mirror of
https://github.com/kata-containers/kata-containers.git
synced 2026-02-22 14:54:23 +00:00
Compare commits
57 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
cfbc834602 | ||
|
|
ea74df1270 | ||
|
|
c712057ae7 | ||
|
|
bc5bbfa60f | ||
|
|
0afcc57a92 | ||
|
|
bcc2ee6e12 | ||
|
|
bd797eddec | ||
|
|
b3760bb3a6 | ||
|
|
9cf1af873b | ||
|
|
4d6ca7623a | ||
|
|
719017d688 | ||
|
|
569ecdbe76 | ||
|
|
fa8a0ad49b | ||
|
|
8fbf862fa6 | ||
|
|
9141acd94c | ||
|
|
9a0ab92f65 | ||
|
|
f3eac35b55 | ||
|
|
8a7e0efd14 | ||
|
|
754308c478 | ||
|
|
a152f6034e | ||
|
|
50bf4434dd | ||
|
|
74791ed389 | ||
|
|
778ebb6e60 | ||
|
|
b5661e9882 | ||
|
|
88c13b6823 | ||
|
|
b8ce291dd0 | ||
|
|
f5e5ca427d | ||
|
|
eaa7ab7462 | ||
|
|
8d2fd24492 | ||
|
|
ab83ab6be5 | ||
|
|
1772df5ac2 | ||
|
|
2e49586445 | ||
|
|
e2a8815ba4 | ||
|
|
63495cf43a | ||
|
|
fb44305497 | ||
|
|
cea5c29e70 | ||
|
|
20c02528e5 | ||
|
|
3eb6f5858a | ||
|
|
8b0231bec8 | ||
|
|
8dc8565ed5 | ||
|
|
740e7e2f77 | ||
|
|
ef49fa95f7 | ||
|
|
727f233e2a | ||
|
|
619d1b487f | ||
|
|
babab160bc | ||
|
|
f168555569 | ||
|
|
af22e71375 | ||
|
|
b9379521a0 | ||
|
|
5b3bbc62ba | ||
|
|
b0c5f040f0 | ||
|
|
d44e39e059 | ||
|
|
43b0e95800 | ||
|
|
81801888a2 | ||
|
|
fba39ef32d | ||
|
|
57261ec97a | ||
|
|
63309514ca | ||
|
|
e229a03cc8 |
4
.github/workflows/PR-wip-checks.yaml
vendored
4
.github/workflows/PR-wip-checks.yaml
vendored
@@ -9,10 +9,6 @@ on:
|
||||
- labeled
|
||||
- unlabeled
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
pr_wip_check:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
14
.github/workflows/add-backport-label.yaml
vendored
14
.github/workflows/add-backport-label.yaml
vendored
@@ -10,10 +10,6 @@ on:
|
||||
- labeled
|
||||
- unlabeled
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
check-issues:
|
||||
if: ${{ github.event.label.name != 'auto-backport' }}
|
||||
@@ -21,7 +17,7 @@ jobs:
|
||||
steps:
|
||||
- name: Checkout code to allow hub to communicate with the project
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Install hub extension script
|
||||
run: |
|
||||
@@ -66,15 +62,15 @@ jobs:
|
||||
has_backport_needed_label=${{ contains(github.event.pull_request.labels.*.name, 'needs-backport') }}
|
||||
has_no_backport_needed_label=${{ contains(github.event.pull_request.labels.*.name, 'no-backport-needed') }}
|
||||
|
||||
echo "add_backport_label=false" >> $GITHUB_OUTPUT
|
||||
echo "::set-output name=add_backport_label::false"
|
||||
if [ $has_backport_needed_label = true ] || [ $has_bug = true ]; then
|
||||
if [[ $has_no_backport_needed_label = false ]]; then
|
||||
echo "add_backport_label=true" >> $GITHUB_OUTPUT
|
||||
echo "::set-output name=add_backport_label::true"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Do not spam comment, only if auto-backport label is going to be newly added.
|
||||
echo "auto_backport_added=$CONTAINS_AUTO_BACKPORT" >> $GITHUB_OUTPUT
|
||||
echo "::set-output name=auto_backport_added::$CONTAINS_AUTO_BACKPORT"
|
||||
|
||||
- name: Add comment
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') && steps.add_label.outputs.add_backport_label == 'true' && steps.add_label.outputs.auto_backport_added == 'false' }}
|
||||
@@ -101,4 +97,4 @@ jobs:
|
||||
uses: andymckay/labeler@e6c4322d0397f3240f0e7e30a33b5c5df2d39e90
|
||||
with:
|
||||
add-labels: "auto-backport"
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
6
.github/workflows/add-issues-to-project.yaml
vendored
6
.github/workflows/add-issues-to-project.yaml
vendored
@@ -11,10 +11,6 @@ on:
|
||||
- opened
|
||||
- reopened
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
add-new-issues-to-backlog:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -39,7 +35,7 @@ jobs:
|
||||
popd &>/dev/null
|
||||
|
||||
- name: Checkout code to allow hub to communicate with the project
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Add issue to issue backlog
|
||||
env:
|
||||
|
||||
15
.github/workflows/add-pr-sizing-label.yaml
vendored
15
.github/workflows/add-pr-sizing-label.yaml
vendored
@@ -12,25 +12,12 @@ on:
|
||||
- reopened
|
||||
- synchronize
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
add-pr-size-label:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ github.event.pull_request.base.ref }}
|
||||
uses: actions/checkout@v1
|
||||
|
||||
- name: Install PR sizing label script
|
||||
run: |
|
||||
|
||||
4
.github/workflows/auto-backport.yaml
vendored
4
.github/workflows/auto-backport.yaml
vendored
@@ -2,10 +2,6 @@ on:
|
||||
pull_request_target:
|
||||
types: ["labeled", "closed"]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
backport:
|
||||
name: Backport PR
|
||||
|
||||
200
.github/workflows/basic-ci-amd64.yaml
vendored
200
.github/workflows/basic-ci-amd64.yaml
vendored
@@ -1,200 +0,0 @@
|
||||
name: CI | Basic amd64 tests
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
tarball-suffix:
|
||||
required: false
|
||||
type: string
|
||||
commit-hash:
|
||||
required: false
|
||||
type: string
|
||||
target-branch:
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
|
||||
jobs:
|
||||
run-cri-containerd:
|
||||
strategy:
|
||||
# We can set this to true whenever we're 100% sure that
|
||||
# the all the tests are not flaky, otherwise we'll fail
|
||||
# all the tests due to a single flaky instance.
|
||||
fail-fast: false
|
||||
matrix:
|
||||
containerd_version: ['lts', 'active']
|
||||
vmm: ['clh', 'qemu']
|
||||
runs-on: garm-ubuntu-2204-smaller
|
||||
env:
|
||||
CONTAINERD_VERSION: ${{ matrix.containerd_version }}
|
||||
GOPATH: ${{ github.workspace }}
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: Install dependencies
|
||||
run: bash tests/integration/cri-containerd/gha-run.sh install-dependencies
|
||||
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: kata-static-tarball-amd64${{ inputs.tarball-suffix }}
|
||||
path: kata-artifacts
|
||||
|
||||
- name: Install kata
|
||||
run: bash tests/integration/cri-containerd/gha-run.sh install-kata kata-artifacts
|
||||
|
||||
- name: Run cri-containerd tests
|
||||
run: bash tests/integration/cri-containerd/gha-run.sh run
|
||||
|
||||
run-containerd-stability:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
containerd_version: ['lts', 'active']
|
||||
vmm: ['clh', 'qemu']
|
||||
runs-on: garm-ubuntu-2204-smaller
|
||||
env:
|
||||
CONTAINERD_VERSION: ${{ matrix.containerd_version }}
|
||||
GOPATH: ${{ github.workspace }}
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: Install dependencies
|
||||
run: bash tests/stability/gha-run.sh install-dependencies
|
||||
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: kata-static-tarball-amd64${{ inputs.tarball-suffix }}
|
||||
path: kata-artifacts
|
||||
|
||||
- name: Install kata
|
||||
run: bash tests/stability/gha-run.sh install-kata kata-artifacts
|
||||
|
||||
- name: Run containerd-stability tests
|
||||
run: bash tests/stability/gha-run.sh run
|
||||
|
||||
run-nydus:
|
||||
strategy:
|
||||
# We can set this to true whenever we're 100% sure that
|
||||
# the all the tests are not flaky, otherwise we'll fail
|
||||
# all the tests due to a single flaky instance.
|
||||
fail-fast: false
|
||||
matrix:
|
||||
containerd_version: ['lts', 'active']
|
||||
vmm: ['clh', 'qemu', 'dragonball']
|
||||
runs-on: garm-ubuntu-2204-smaller
|
||||
env:
|
||||
CONTAINERD_VERSION: ${{ matrix.containerd_version }}
|
||||
GOPATH: ${{ github.workspace }}
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: Install dependencies
|
||||
run: bash tests/integration/nydus/gha-run.sh install-dependencies
|
||||
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: kata-static-tarball-amd64${{ inputs.tarball-suffix }}
|
||||
path: kata-artifacts
|
||||
|
||||
- name: Install kata
|
||||
run: bash tests/integration/nydus/gha-run.sh install-kata kata-artifacts
|
||||
|
||||
- name: Run nydus tests
|
||||
run: bash tests/integration/nydus/gha-run.sh run
|
||||
|
||||
run-runk:
|
||||
runs-on: garm-ubuntu-2204-smaller
|
||||
env:
|
||||
CONTAINERD_VERSION: lts
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: Install dependencies
|
||||
run: bash tests/integration/runk/gha-run.sh install-dependencies
|
||||
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: kata-static-tarball-amd64${{ inputs.tarball-suffix }}
|
||||
path: kata-artifacts
|
||||
|
||||
- name: Install kata
|
||||
run: bash tests/integration/runk/gha-run.sh install-kata kata-artifacts
|
||||
|
||||
- name: Run tracing tests
|
||||
run: bash tests/integration/runk/gha-run.sh run
|
||||
|
||||
run-vfio:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
vmm: ['clh', 'qemu']
|
||||
runs-on: garm-ubuntu-2304
|
||||
env:
|
||||
GOPATH: ${{ github.workspace }}
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: Install dependencies
|
||||
run: bash tests/functional/vfio/gha-run.sh install-dependencies
|
||||
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: kata-static-tarball-amd64${{ inputs.tarball-suffix }}
|
||||
path: kata-artifacts
|
||||
|
||||
- name: Run vfio tests
|
||||
timeout-minutes: 15
|
||||
run: bash tests/functional/vfio/gha-run.sh run
|
||||
@@ -1,137 +0,0 @@
|
||||
name: CI | Build kata-static tarball for amd64
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
stage:
|
||||
required: false
|
||||
type: string
|
||||
default: test
|
||||
tarball-suffix:
|
||||
required: false
|
||||
type: string
|
||||
push-to-registry:
|
||||
required: false
|
||||
type: string
|
||||
default: no
|
||||
commit-hash:
|
||||
required: false
|
||||
type: string
|
||||
target-branch:
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
|
||||
jobs:
|
||||
build-asset:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
asset:
|
||||
- agent
|
||||
- agent-ctl
|
||||
- cloud-hypervisor
|
||||
- cloud-hypervisor-glibc
|
||||
- firecracker
|
||||
- kata-ctl
|
||||
- kernel
|
||||
- kernel-sev
|
||||
- kernel-dragonball-experimental
|
||||
- kernel-tdx-experimental
|
||||
- kernel-nvidia-gpu
|
||||
- kernel-nvidia-gpu-snp
|
||||
- kernel-nvidia-gpu-tdx-experimental
|
||||
- log-parser-rs
|
||||
- nydus
|
||||
- ovmf
|
||||
- ovmf-sev
|
||||
- qemu
|
||||
- qemu-snp-experimental
|
||||
- qemu-tdx-experimental
|
||||
- rootfs-image
|
||||
- rootfs-image-tdx
|
||||
- rootfs-initrd
|
||||
- rootfs-initrd-mariner
|
||||
- rootfs-initrd-sev
|
||||
- runk
|
||||
- shim-v2
|
||||
- tdvf
|
||||
- trace-forwarder
|
||||
- virtiofsd
|
||||
stage:
|
||||
- ${{ inputs.stage }}
|
||||
exclude:
|
||||
- asset: agent
|
||||
stage: release
|
||||
- asset: cloud-hypervisor-glibc
|
||||
stage: release
|
||||
steps:
|
||||
- name: Login to Kata Containers quay.io
|
||||
if: ${{ inputs.push-to-registry == 'yes' }}
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.QUAY_DEPLOYER_USERNAME }}
|
||||
password: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0 # This is needed in order to keep the commit ids history
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: Build ${{ matrix.asset }}
|
||||
run: |
|
||||
make "${KATA_ASSET}-tarball"
|
||||
build_dir=$(readlink -f build)
|
||||
# store-artifact does not work with symlink
|
||||
sudo cp -r "${build_dir}" "kata-build"
|
||||
env:
|
||||
KATA_ASSET: ${{ matrix.asset }}
|
||||
TAR_OUTPUT: ${{ matrix.asset }}.tar.gz
|
||||
PUSH_TO_REGISTRY: ${{ inputs.push-to-registry }}
|
||||
ARTEFACT_REGISTRY: ghcr.io
|
||||
ARTEFACT_REGISTRY_USERNAME: ${{ github.actor }}
|
||||
ARTEFACT_REGISTRY_PASSWORD: ${{ secrets.GITHUB_TOKEN }}
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: store-artifact ${{ matrix.asset }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: kata-artifacts-amd64${{ inputs.tarball-suffix }}
|
||||
path: kata-build/kata-static-${{ matrix.asset }}.tar.xz
|
||||
retention-days: 1
|
||||
if-no-files-found: error
|
||||
|
||||
create-kata-tarball:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build-asset
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
- name: get-artifacts
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: kata-artifacts-amd64${{ inputs.tarball-suffix }}
|
||||
path: kata-artifacts
|
||||
- name: merge-artifacts
|
||||
run: |
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-merge-builds.sh kata-artifacts versions.yaml
|
||||
- name: store-artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: kata-static-tarball-amd64${{ inputs.tarball-suffix }}
|
||||
path: kata-static.tar.xz
|
||||
retention-days: 1
|
||||
if-no-files-found: error
|
||||
@@ -1,120 +0,0 @@
|
||||
name: CI | Build kata-static tarball for arm64
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
stage:
|
||||
required: false
|
||||
type: string
|
||||
default: test
|
||||
tarball-suffix:
|
||||
required: false
|
||||
type: string
|
||||
push-to-registry:
|
||||
required: false
|
||||
type: string
|
||||
default: no
|
||||
commit-hash:
|
||||
required: false
|
||||
type: string
|
||||
target-branch:
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
|
||||
jobs:
|
||||
build-asset:
|
||||
runs-on: arm64-builder
|
||||
strategy:
|
||||
matrix:
|
||||
asset:
|
||||
- cloud-hypervisor
|
||||
- firecracker
|
||||
- kernel
|
||||
- kernel-dragonball-experimental
|
||||
- nydus
|
||||
- qemu
|
||||
- rootfs-image
|
||||
- rootfs-initrd
|
||||
- shim-v2
|
||||
- virtiofsd
|
||||
stage:
|
||||
- ${{ inputs.stage }}
|
||||
steps:
|
||||
- name: Adjust a permission for repo
|
||||
run: |
|
||||
sudo chown -R $USER:$USER $GITHUB_WORKSPACE
|
||||
|
||||
- name: Login to Kata Containers quay.io
|
||||
if: ${{ inputs.push-to-registry == 'yes' }}
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.QUAY_DEPLOYER_USERNAME }}
|
||||
password: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0 # This is needed in order to keep the commit ids history
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: Build ${{ matrix.asset }}
|
||||
run: |
|
||||
make "${KATA_ASSET}-tarball"
|
||||
build_dir=$(readlink -f build)
|
||||
# store-artifact does not work with symlink
|
||||
sudo cp -r "${build_dir}" "kata-build"
|
||||
env:
|
||||
KATA_ASSET: ${{ matrix.asset }}
|
||||
TAR_OUTPUT: ${{ matrix.asset }}.tar.gz
|
||||
PUSH_TO_REGISTRY: ${{ inputs.push-to-registry }}
|
||||
ARTEFACT_REGISTRY: ghcr.io
|
||||
ARTEFACT_REGISTRY_USERNAME: ${{ github.actor }}
|
||||
ARTEFACT_REGISTRY_PASSWORD: ${{ secrets.GITHUB_TOKEN }}
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: store-artifact ${{ matrix.asset }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: kata-artifacts-arm64${{ inputs.tarball-suffix }}
|
||||
path: kata-build/kata-static-${{ matrix.asset }}.tar.xz
|
||||
retention-days: 1
|
||||
if-no-files-found: error
|
||||
|
||||
create-kata-tarball:
|
||||
runs-on: arm64-builder
|
||||
needs: build-asset
|
||||
steps:
|
||||
- name: Adjust a permission for repo
|
||||
run: |
|
||||
sudo chown -R $USER:$USER $GITHUB_WORKSPACE
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
- name: get-artifacts
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: kata-artifacts-arm64${{ inputs.tarball-suffix }}
|
||||
path: kata-artifacts
|
||||
- name: merge-artifacts
|
||||
run: |
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-merge-builds.sh kata-artifacts versions.yaml
|
||||
- name: store-artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: kata-static-tarball-arm64${{ inputs.tarball-suffix }}
|
||||
path: kata-static.tar.xz
|
||||
retention-days: 1
|
||||
if-no-files-found: error
|
||||
@@ -1,117 +0,0 @@
|
||||
name: CI | Build kata-static tarball for s390x
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
stage:
|
||||
required: false
|
||||
type: string
|
||||
default: test
|
||||
tarball-suffix:
|
||||
required: false
|
||||
type: string
|
||||
push-to-registry:
|
||||
required: false
|
||||
type: string
|
||||
default: no
|
||||
commit-hash:
|
||||
required: false
|
||||
type: string
|
||||
target-branch:
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
|
||||
jobs:
|
||||
build-asset:
|
||||
runs-on: s390x
|
||||
strategy:
|
||||
matrix:
|
||||
asset:
|
||||
- kernel
|
||||
- qemu
|
||||
- rootfs-image
|
||||
- rootfs-initrd
|
||||
- shim-v2
|
||||
- virtiofsd
|
||||
stage:
|
||||
- ${{ inputs.stage }}
|
||||
steps:
|
||||
- name: Adjust a permission for repo
|
||||
run: |
|
||||
sudo chown -R $USER:$USER $GITHUB_WORKSPACE
|
||||
|
||||
- name: Login to Kata Containers quay.io
|
||||
if: ${{ inputs.push-to-registry == 'yes' }}
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.QUAY_DEPLOYER_USERNAME }}
|
||||
password: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0 # This is needed in order to keep the commit ids history
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: Build ${{ matrix.asset }}
|
||||
run: |
|
||||
make "${KATA_ASSET}-tarball"
|
||||
build_dir=$(readlink -f build)
|
||||
# store-artifact does not work with symlink
|
||||
sudo cp -r "${build_dir}" "kata-build"
|
||||
sudo chown -R $(id -u):$(id -g) "kata-build"
|
||||
env:
|
||||
KATA_ASSET: ${{ matrix.asset }}
|
||||
TAR_OUTPUT: ${{ matrix.asset }}.tar.gz
|
||||
PUSH_TO_REGISTRY: ${{ inputs.push-to-registry }}
|
||||
ARTEFACT_REGISTRY: ghcr.io
|
||||
ARTEFACT_REGISTRY_USERNAME: ${{ github.actor }}
|
||||
ARTEFACT_REGISTRY_PASSWORD: ${{ secrets.GITHUB_TOKEN }}
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: store-artifact ${{ matrix.asset }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: kata-artifacts-s390x${{ inputs.tarball-suffix }}
|
||||
path: kata-build/kata-static-${{ matrix.asset }}.tar.xz
|
||||
retention-days: 1
|
||||
if-no-files-found: error
|
||||
|
||||
create-kata-tarball:
|
||||
runs-on: s390x
|
||||
needs: build-asset
|
||||
steps:
|
||||
- name: Adjust a permission for repo
|
||||
run: |
|
||||
sudo chown -R $USER:$USER $GITHUB_WORKSPACE
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
- name: get-artifacts
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: kata-artifacts-s390x${{ inputs.tarball-suffix }}
|
||||
path: kata-artifacts
|
||||
- name: merge-artifacts
|
||||
run: |
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-merge-builds.sh kata-artifacts versions.yaml
|
||||
- name: store-artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: kata-static-tarball-s390x${{ inputs.tarball-suffix }}
|
||||
path: kata-static.tar.xz
|
||||
retention-days: 1
|
||||
if-no-files-found: error
|
||||
16
.github/workflows/cargo-deny-runner.yaml
vendored
16
.github/workflows/cargo-deny-runner.yaml
vendored
@@ -1,17 +1,5 @@
|
||||
name: Cargo Crates Check Runner
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
- edited
|
||||
- reopened
|
||||
- synchronize
|
||||
paths-ignore: [ '**.md', '**.png', '**.jpg', '**.jpeg', '**.svg', '/docs/**' ]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
on: [pull_request]
|
||||
jobs:
|
||||
cargo-deny-runner:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -19,7 +7,7 @@ jobs:
|
||||
steps:
|
||||
- name: Checkout Code
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v3
|
||||
- name: Generate Action
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: bash cargo-deny-generator.sh
|
||||
|
||||
19
.github/workflows/ci-nightly.yaml
vendored
19
.github/workflows/ci-nightly.yaml
vendored
@@ -1,19 +0,0 @@
|
||||
name: Kata Containers Nightly CI
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 0 * * *'
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
kata-containers-ci-on-push:
|
||||
uses: ./.github/workflows/ci.yaml
|
||||
with:
|
||||
commit-hash: ${{ github.sha }}
|
||||
pr-number: "nightly"
|
||||
tag: ${{ github.sha }}-nightly
|
||||
target-branch: ${{ github.ref_name }}
|
||||
secrets: inherit
|
||||
32
.github/workflows/ci-on-push.yaml
vendored
32
.github/workflows/ci-on-push.yaml
vendored
@@ -1,32 +0,0 @@
|
||||
name: Kata Containers CI
|
||||
on:
|
||||
pull_request_target:
|
||||
branches:
|
||||
- 'main'
|
||||
- 'stable-*'
|
||||
types:
|
||||
# Adding 'labeled' to the list of activity types that trigger this event
|
||||
# (default: opened, synchronize, reopened) so that we can run this
|
||||
# workflow when the 'ok-to-test' label is added.
|
||||
# Reference: https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#pull_request_target
|
||||
- opened
|
||||
- synchronize
|
||||
- reopened
|
||||
- labeled
|
||||
paths-ignore:
|
||||
- 'docs/**'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
kata-containers-ci-on-push:
|
||||
if: ${{ contains(github.event.pull_request.labels.*.name, 'ok-to-test') }}
|
||||
uses: ./.github/workflows/ci.yaml
|
||||
with:
|
||||
commit-hash: ${{ github.event.pull_request.head.sha }}
|
||||
pr-number: ${{ github.event.pull_request.number }}
|
||||
tag: ${{ github.event.pull_request.number }}-${{ github.event.pull_request.head.sha }}
|
||||
target-branch: ${{ github.event.pull_request.base.ref }}
|
||||
secrets: inherit
|
||||
185
.github/workflows/ci.yaml
vendored
185
.github/workflows/ci.yaml
vendored
@@ -1,185 +0,0 @@
|
||||
name: Run the Kata Containers CI
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
commit-hash:
|
||||
required: true
|
||||
type: string
|
||||
pr-number:
|
||||
required: true
|
||||
type: string
|
||||
tag:
|
||||
required: true
|
||||
type: string
|
||||
target-branch:
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
|
||||
jobs:
|
||||
build-kata-static-tarball-amd64:
|
||||
uses: ./.github/workflows/build-kata-static-tarball-amd64.yaml
|
||||
with:
|
||||
tarball-suffix: -${{ inputs.tag }}
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
|
||||
publish-kata-deploy-payload-amd64:
|
||||
needs: build-kata-static-tarball-amd64
|
||||
uses: ./.github/workflows/publish-kata-deploy-payload-amd64.yaml
|
||||
with:
|
||||
tarball-suffix: -${{ inputs.tag }}
|
||||
registry: ghcr.io
|
||||
repo: ${{ github.repository_owner }}/kata-deploy-ci
|
||||
tag: ${{ inputs.tag }}-amd64
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
secrets: inherit
|
||||
|
||||
build-and-publish-tee-confidential-unencrypted-image:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Login to Kata Containers ghcr.io
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Docker build and push
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
tags: ghcr.io/kata-containers/test-images:unencrypted-${{ inputs.pr-number }}
|
||||
push: true
|
||||
context: tests/integration/kubernetes/runtimeclass_workloads/confidential/unencrypted/
|
||||
platforms: linux/amd64, linux/s390x
|
||||
file: tests/integration/kubernetes/runtimeclass_workloads/confidential/unencrypted/Dockerfile
|
||||
|
||||
run-docker-tests-on-garm:
|
||||
needs: build-kata-static-tarball-amd64
|
||||
uses: ./.github/workflows/run-docker-tests-on-garm.yaml
|
||||
with:
|
||||
tarball-suffix: -${{ inputs.tag }}
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
|
||||
run-nerdctl-tests-on-garm:
|
||||
needs: build-kata-static-tarball-amd64
|
||||
uses: ./.github/workflows/run-nerdctl-tests-on-garm.yaml
|
||||
with:
|
||||
tarball-suffix: -${{ inputs.tag }}
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
|
||||
run-kata-deploy-tests-on-aks:
|
||||
needs: publish-kata-deploy-payload-amd64
|
||||
uses: ./.github/workflows/run-kata-deploy-tests-on-aks.yaml
|
||||
with:
|
||||
registry: ghcr.io
|
||||
repo: ${{ github.repository_owner }}/kata-deploy-ci
|
||||
tag: ${{ inputs.tag }}-amd64
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
pr-number: ${{ inputs.pr-number }}
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
secrets: inherit
|
||||
|
||||
run-kata-deploy-tests-on-garm:
|
||||
needs: publish-kata-deploy-payload-amd64
|
||||
uses: ./.github/workflows/run-kata-deploy-tests-on-garm.yaml
|
||||
with:
|
||||
registry: ghcr.io
|
||||
repo: ${{ github.repository_owner }}/kata-deploy-ci
|
||||
tag: ${{ inputs.tag }}-amd64
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
pr-number: ${{ inputs.pr-number }}
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
secrets: inherit
|
||||
|
||||
run-kata-monitor-tests:
|
||||
needs: build-kata-static-tarball-amd64
|
||||
uses: ./.github/workflows/run-kata-monitor-tests.yaml
|
||||
with:
|
||||
tarball-suffix: -${{ inputs.tag }}
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
|
||||
run-k8s-tests-on-aks:
|
||||
needs: publish-kata-deploy-payload-amd64
|
||||
uses: ./.github/workflows/run-k8s-tests-on-aks.yaml
|
||||
with:
|
||||
registry: ghcr.io
|
||||
repo: ${{ github.repository_owner }}/kata-deploy-ci
|
||||
tag: ${{ inputs.tag }}-amd64
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
pr-number: ${{ inputs.pr-number }}
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
secrets: inherit
|
||||
|
||||
run-k8s-tests-on-garm:
|
||||
needs: publish-kata-deploy-payload-amd64
|
||||
uses: ./.github/workflows/run-k8s-tests-on-garm.yaml
|
||||
with:
|
||||
registry: ghcr.io
|
||||
repo: ${{ github.repository_owner }}/kata-deploy-ci
|
||||
tag: ${{ inputs.tag }}-amd64
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
pr-number: ${{ inputs.pr-number }}
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
secrets: inherit
|
||||
|
||||
run-k8s-tests-with-crio-on-garm:
|
||||
needs: publish-kata-deploy-payload-amd64
|
||||
uses: ./.github/workflows/run-k8s-tests-with-crio-on-garm.yaml
|
||||
with:
|
||||
registry: ghcr.io
|
||||
repo: ${{ github.repository_owner }}/kata-deploy-ci
|
||||
tag: ${{ inputs.tag }}-amd64
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
pr-number: ${{ inputs.pr-number }}
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
secrets: inherit
|
||||
|
||||
run-kata-coco-tests:
|
||||
needs: [publish-kata-deploy-payload-amd64, build-and-publish-tee-confidential-unencrypted-image]
|
||||
uses: ./.github/workflows/run-kata-coco-tests.yaml
|
||||
with:
|
||||
registry: ghcr.io
|
||||
repo: ${{ github.repository_owner }}/kata-deploy-ci
|
||||
tag: ${{ inputs.tag }}-amd64
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
pr-number: ${{ inputs.pr-number }}
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
|
||||
run-metrics-tests:
|
||||
needs: build-kata-static-tarball-amd64
|
||||
uses: ./.github/workflows/run-metrics.yaml
|
||||
with:
|
||||
tarball-suffix: -${{ inputs.tag }}
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
|
||||
run-basic-amd64-tests:
|
||||
needs: build-kata-static-tarball-amd64
|
||||
uses: ./.github/workflows/basic-ci-amd64.yaml
|
||||
with:
|
||||
tarball-suffix: -${{ inputs.tag }}
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
9
.github/workflows/commit-message-check.yaml
vendored
9
.github/workflows/commit-message-check.yaml
vendored
@@ -6,10 +6,6 @@ on:
|
||||
- reopened
|
||||
- synchronize
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
error_msg: |+
|
||||
See the document below for help on formatting commits for the project.
|
||||
@@ -66,9 +62,6 @@ jobs:
|
||||
# to be specified at the start of the regex as the action is passed
|
||||
# the entire commit message.
|
||||
#
|
||||
# - This check will pass if the commit message only contains a subject
|
||||
# line, as other body message properties are enforced elsewhere.
|
||||
#
|
||||
# - Body lines *can* be longer than the maximum if they start
|
||||
# with a non-alphabetic character or if there is no whitespace in
|
||||
# the line.
|
||||
@@ -82,7 +75,7 @@ jobs:
|
||||
#
|
||||
# - A SoB comment can be any length (as it is unreasonable to penalise
|
||||
# people with long names/email addresses :)
|
||||
pattern: '(^[^\n]+$|^.+(\n([a-zA-Z].{0,150}|[^a-zA-Z\n].*|[^\s\n]*|Signed-off-by:.*|))+$)'
|
||||
pattern: '^.+(\n([a-zA-Z].{0,150}|[^a-zA-Z\n].*|[^\s\n]*|Signed-off-by:.*|))+$'
|
||||
error: 'Body line too long (max 150)'
|
||||
post_error: ${{ env.error_msg }}
|
||||
|
||||
|
||||
9
.github/workflows/darwin-tests.yaml
vendored
9
.github/workflows/darwin-tests.yaml
vendored
@@ -5,11 +5,6 @@ on:
|
||||
- edited
|
||||
- reopened
|
||||
- synchronize
|
||||
paths-ignore: [ '**.md', '**.png', '**.jpg', '**.jpeg', '**.svg', '/docs/**' ]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
name: Darwin tests
|
||||
jobs:
|
||||
@@ -19,8 +14,8 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19.3
|
||||
go-version: 1.19.2
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v2
|
||||
- name: Build utils
|
||||
run: ./ci/darwin-test.sh
|
||||
|
||||
4
.github/workflows/docs-url-alive-check.yaml
vendored
4
.github/workflows/docs-url-alive-check.yaml
vendored
@@ -14,7 +14,7 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19.3
|
||||
go-version: 1.19.2
|
||||
env:
|
||||
GOPATH: ${{ runner.workspace }}/kata-containers
|
||||
- name: Set env
|
||||
@@ -22,7 +22,7 @@ jobs:
|
||||
echo "GOPATH=${{ github.workspace }}" >> $GITHUB_ENV
|
||||
echo "${{ github.workspace }}/bin" >> $GITHUB_PATH
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
path: ./src/github.com/${{ github.repository }}
|
||||
|
||||
85
.github/workflows/kata-deploy-push.yaml
vendored
Normal file
85
.github/workflows/kata-deploy-push.yaml
vendored
Normal file
@@ -0,0 +1,85 @@
|
||||
name: kata deploy build
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
- edited
|
||||
- reopened
|
||||
- synchronize
|
||||
paths:
|
||||
- tools/**
|
||||
- versions.yaml
|
||||
|
||||
jobs:
|
||||
build-asset:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
asset:
|
||||
- kernel
|
||||
- shim-v2
|
||||
- qemu
|
||||
- cloud-hypervisor
|
||||
- firecracker
|
||||
- rootfs-image
|
||||
- rootfs-initrd
|
||||
- virtiofsd
|
||||
- nydus
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Install docker
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
curl -fsSL https://test.docker.com -o test-docker.sh
|
||||
sh test-docker.sh
|
||||
|
||||
- name: Build ${{ matrix.asset }}
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
make "${KATA_ASSET}-tarball"
|
||||
build_dir=$(readlink -f build)
|
||||
# store-artifact does not work with symlink
|
||||
sudo cp -r --preserve=all "${build_dir}" "kata-build"
|
||||
env:
|
||||
KATA_ASSET: ${{ matrix.asset }}
|
||||
|
||||
- name: store-artifact ${{ matrix.asset }}
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-build/kata-static-${{ matrix.asset }}.tar.xz
|
||||
if-no-files-found: error
|
||||
|
||||
create-kata-tarball:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build-asset
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: get-artifacts
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: build
|
||||
- name: merge-artifacts
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
make merge-builds
|
||||
- name: store-artifacts
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: kata-static-tarball
|
||||
path: kata-static.tar.xz
|
||||
|
||||
make-kata-tarball:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: make kata-tarball
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
make kata-tarball
|
||||
sudo make install-tarball
|
||||
150
.github/workflows/kata-deploy-test.yaml
vendored
Normal file
150
.github/workflows/kata-deploy-test.yaml
vendored
Normal file
@@ -0,0 +1,150 @@
|
||||
on:
|
||||
workflow_dispatch: # this is used to trigger the workflow on non-main branches
|
||||
issue_comment:
|
||||
types: [created, edited]
|
||||
|
||||
name: test-kata-deploy
|
||||
|
||||
jobs:
|
||||
check-comment-and-membership:
|
||||
runs-on: ubuntu-latest
|
||||
if: |
|
||||
github.event.issue.pull_request
|
||||
&& github.event_name == 'issue_comment'
|
||||
&& github.event.action == 'created'
|
||||
&& startsWith(github.event.comment.body, '/test_kata_deploy')
|
||||
steps:
|
||||
- name: Check membership
|
||||
uses: kata-containers/is-organization-member@1.0.1
|
||||
id: is_organization_member
|
||||
with:
|
||||
organization: kata-containers
|
||||
username: ${{ github.event.comment.user.login }}
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Fail if not member
|
||||
run: |
|
||||
result=${{ steps.is_organization_member.outputs.result }}
|
||||
if [ $result == false ]; then
|
||||
user=${{ github.event.comment.user.login }}
|
||||
echo Either ${user} is not part of the kata-containers organization
|
||||
echo or ${user} has its Organization Visibility set to Private at
|
||||
echo https://github.com/orgs/kata-containers/people?query=${user}
|
||||
echo
|
||||
echo Ensure you change your Organization Visibility to Public and
|
||||
echo trigger the test again.
|
||||
exit 1
|
||||
fi
|
||||
|
||||
build-asset:
|
||||
runs-on: ubuntu-latest
|
||||
needs: check-comment-and-membership
|
||||
strategy:
|
||||
matrix:
|
||||
asset:
|
||||
- cloud-hypervisor
|
||||
- firecracker
|
||||
- kernel
|
||||
- nydus
|
||||
- qemu
|
||||
- rootfs-image
|
||||
- rootfs-initrd
|
||||
- shim-v2
|
||||
- virtiofsd
|
||||
steps:
|
||||
- name: get-PR-ref
|
||||
id: get-PR-ref
|
||||
run: |
|
||||
ref=$(cat $GITHUB_EVENT_PATH | jq -r '.issue.pull_request.url' | sed 's#^.*\/pulls#refs\/pull#' | sed 's#$#\/merge#')
|
||||
echo "reference for PR: " ${ref}
|
||||
echo "##[set-output name=pr-ref;]${ref}"
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
ref: ${{ steps.get-PR-ref.outputs.pr-ref }}
|
||||
|
||||
- name: Install docker
|
||||
run: |
|
||||
curl -fsSL https://test.docker.com -o test-docker.sh
|
||||
sh test-docker.sh
|
||||
|
||||
- name: Build ${{ matrix.asset }}
|
||||
run: |
|
||||
make "${KATA_ASSET}-tarball"
|
||||
build_dir=$(readlink -f build)
|
||||
# store-artifact does not work with symlink
|
||||
sudo cp -r "${build_dir}" "kata-build"
|
||||
env:
|
||||
KATA_ASSET: ${{ matrix.asset }}
|
||||
TAR_OUTPUT: ${{ matrix.asset }}.tar.gz
|
||||
|
||||
- name: store-artifact ${{ matrix.asset }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-build/kata-static-${{ matrix.asset }}.tar.xz
|
||||
if-no-files-found: error
|
||||
|
||||
create-kata-tarball:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build-asset
|
||||
steps:
|
||||
- name: get-PR-ref
|
||||
id: get-PR-ref
|
||||
run: |
|
||||
ref=$(cat $GITHUB_EVENT_PATH | jq -r '.issue.pull_request.url' | sed 's#^.*\/pulls#refs\/pull#' | sed 's#$#\/merge#')
|
||||
echo "reference for PR: " ${ref}
|
||||
echo "##[set-output name=pr-ref;]${ref}"
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
ref: ${{ steps.get-PR-ref.outputs.pr-ref }}
|
||||
- name: get-artifacts
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-artifacts
|
||||
- name: merge-artifacts
|
||||
run: |
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-merge-builds.sh kata-artifacts
|
||||
- name: store-artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: kata-static-tarball
|
||||
path: kata-static.tar.xz
|
||||
|
||||
kata-deploy:
|
||||
needs: create-kata-tarball
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: get-PR-ref
|
||||
id: get-PR-ref
|
||||
run: |
|
||||
ref=$(cat $GITHUB_EVENT_PATH | jq -r '.issue.pull_request.url' | sed 's#^.*\/pulls#refs\/pull#' | sed 's#$#\/merge#')
|
||||
echo "reference for PR: " ${ref}
|
||||
echo "##[set-output name=pr-ref;]${ref}"
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
ref: ${{ steps.get-PR-ref.outputs.pr-ref }}
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: kata-static-tarball
|
||||
- name: build-and-push-kata-deploy-ci
|
||||
id: build-and-push-kata-deploy-ci
|
||||
run: |
|
||||
PR_SHA=$(git log --format=format:%H -n1)
|
||||
mv kata-static.tar.xz $GITHUB_WORKSPACE/tools/packaging/kata-deploy/kata-static.tar.xz
|
||||
docker build --build-arg KATA_ARTIFACTS=kata-static.tar.xz -t quay.io/kata-containers/kata-deploy-ci:$PR_SHA $GITHUB_WORKSPACE/tools/packaging/kata-deploy
|
||||
docker login -u ${{ secrets.QUAY_DEPLOYER_USERNAME }} -p ${{ secrets.QUAY_DEPLOYER_PASSWORD }} quay.io
|
||||
docker push quay.io/kata-containers/kata-deploy-ci:$PR_SHA
|
||||
mkdir -p packaging/kata-deploy
|
||||
ln -s $GITHUB_WORKSPACE/tools/packaging/kata-deploy/action packaging/kata-deploy/action
|
||||
echo "::set-output name=PKG_SHA::${PR_SHA}"
|
||||
- name: test-kata-deploy-ci-in-aks
|
||||
uses: ./packaging/kata-deploy/action
|
||||
with:
|
||||
packaging-sha: ${{steps.build-and-push-kata-deploy-ci.outputs.PKG_SHA}}
|
||||
env:
|
||||
PKG_SHA: ${{steps.build-and-push-kata-deploy-ci.outputs.PKG_SHA}}
|
||||
AZ_APPID: ${{ secrets.AZ_APPID }}
|
||||
AZ_PASSWORD: ${{ secrets.AZ_PASSWORD }}
|
||||
AZ_SUBSCRIPTION_ID: ${{ secrets.AZ_SUBSCRIPTION_ID }}
|
||||
AZ_TENANT_ID: ${{ secrets.AZ_TENANT_ID }}
|
||||
36
.github/workflows/kata-runtime-classes-sync.yaml
vendored
36
.github/workflows/kata-runtime-classes-sync.yaml
vendored
@@ -1,36 +0,0 @@
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
- edited
|
||||
- reopened
|
||||
- synchronize
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
kata-deploy-runtime-classes-check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
- name: Ensure the split out runtime classes match the all-in-one file
|
||||
run: |
|
||||
pushd tools/packaging/kata-deploy/runtimeclasses/
|
||||
echo "::group::Combine runtime classes"
|
||||
for runtimeClass in `find . -type f \( -name "*.yaml" -and -not -name "kata-runtimeClasses.yaml" \) | sort`; do
|
||||
echo "Adding ${runtimeClass} to the resultingRuntimeClasses.yaml"
|
||||
cat ${runtimeClass} >> resultingRuntimeClasses.yaml;
|
||||
done
|
||||
echo "::endgroup::"
|
||||
echo "::group::Displaying the content of resultingRuntimeClasses.yaml"
|
||||
cat resultingRuntimeClasses.yaml
|
||||
echo "::endgroup::"
|
||||
echo ""
|
||||
echo "::group::Displaying the content of kata-runtimeClasses.yaml"
|
||||
cat kata-runtimeClasses.yaml
|
||||
echo "::endgroup::"
|
||||
echo ""
|
||||
diff resultingRuntimeClasses.yaml kata-runtimeClasses.yaml
|
||||
@@ -38,17 +38,7 @@ jobs:
|
||||
|
||||
- name: Checkout code to allow hub to communicate with the project
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ github.event.pull_request.base.ref }}
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Move issue to "In progress"
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
|
||||
91
.github/workflows/payload-after-push.yaml
vendored
91
.github/workflows/payload-after-push.yaml
vendored
@@ -1,91 +0,0 @@
|
||||
name: CI | Publish Kata Containers payload
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- stable-*
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build-assets-amd64:
|
||||
uses: ./.github/workflows/build-kata-static-tarball-amd64.yaml
|
||||
with:
|
||||
commit-hash: ${{ github.sha }}
|
||||
push-to-registry: yes
|
||||
target-branch: ${{ github.ref_name }}
|
||||
secrets: inherit
|
||||
|
||||
build-assets-arm64:
|
||||
uses: ./.github/workflows/build-kata-static-tarball-arm64.yaml
|
||||
with:
|
||||
commit-hash: ${{ github.sha }}
|
||||
push-to-registry: yes
|
||||
target-branch: ${{ github.ref_name }}
|
||||
secrets: inherit
|
||||
|
||||
build-assets-s390x:
|
||||
uses: ./.github/workflows/build-kata-static-tarball-s390x.yaml
|
||||
with:
|
||||
commit-hash: ${{ github.sha }}
|
||||
push-to-registry: yes
|
||||
target-branch: ${{ github.ref_name }}
|
||||
secrets: inherit
|
||||
|
||||
publish-kata-deploy-payload-amd64:
|
||||
needs: build-assets-amd64
|
||||
uses: ./.github/workflows/publish-kata-deploy-payload-amd64.yaml
|
||||
with:
|
||||
commit-hash: ${{ github.sha }}
|
||||
registry: quay.io
|
||||
repo: kata-containers/kata-deploy-ci
|
||||
tag: kata-containers-amd64
|
||||
target-branch: ${{ github.ref_name }}
|
||||
secrets: inherit
|
||||
|
||||
publish-kata-deploy-payload-arm64:
|
||||
needs: build-assets-arm64
|
||||
uses: ./.github/workflows/publish-kata-deploy-payload-arm64.yaml
|
||||
with:
|
||||
commit-hash: ${{ github.sha }}
|
||||
registry: quay.io
|
||||
repo: kata-containers/kata-deploy-ci
|
||||
tag: kata-containers-arm64
|
||||
target-branch: ${{ github.ref_name }}
|
||||
secrets: inherit
|
||||
|
||||
publish-kata-deploy-payload-s390x:
|
||||
needs: build-assets-s390x
|
||||
uses: ./.github/workflows/publish-kata-deploy-payload-s390x.yaml
|
||||
with:
|
||||
commit-hash: ${{ github.sha }}
|
||||
registry: quay.io
|
||||
repo: kata-containers/kata-deploy-ci
|
||||
tag: kata-containers-s390x
|
||||
target-branch: ${{ github.ref_name }}
|
||||
secrets: inherit
|
||||
|
||||
publish-manifest:
|
||||
runs-on: ubuntu-latest
|
||||
needs: [publish-kata-deploy-payload-amd64, publish-kata-deploy-payload-arm64, publish-kata-deploy-payload-s390x]
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Login to Kata Containers quay.io
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.QUAY_DEPLOYER_USERNAME }}
|
||||
password: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
|
||||
- name: Push multi-arch manifest
|
||||
run: |
|
||||
docker manifest create quay.io/kata-containers/kata-deploy-ci:kata-containers-latest \
|
||||
--amend quay.io/kata-containers/kata-deploy-ci:kata-containers-amd64 \
|
||||
--amend quay.io/kata-containers/kata-deploy-ci:kata-containers-arm64 \
|
||||
--amend quay.io/kata-containers/kata-deploy-ci:kata-containers-s390x
|
||||
docker manifest push quay.io/kata-containers/kata-deploy-ci:kata-containers-latest
|
||||
@@ -1,66 +0,0 @@
|
||||
name: CI | Publish kata-deploy payload for amd64
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
tarball-suffix:
|
||||
required: false
|
||||
type: string
|
||||
registry:
|
||||
required: true
|
||||
type: string
|
||||
repo:
|
||||
required: true
|
||||
type: string
|
||||
tag:
|
||||
required: true
|
||||
type: string
|
||||
commit-hash:
|
||||
required: false
|
||||
type: string
|
||||
target-branch:
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
|
||||
jobs:
|
||||
kata-payload:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: kata-static-tarball-amd64${{ inputs.tarball-suffix }}
|
||||
|
||||
- name: Login to Kata Containers quay.io
|
||||
if: ${{ inputs.registry == 'quay.io' }}
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.QUAY_DEPLOYER_USERNAME }}
|
||||
password: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
|
||||
- name: Login to Kata Containers ghcr.io
|
||||
if: ${{ inputs.registry == 'ghcr.io' }}
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: build-and-push-kata-payload
|
||||
id: build-and-push-kata-payload
|
||||
run: |
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh \
|
||||
$(pwd)/kata-static.tar.xz \
|
||||
${{ inputs.registry }}/${{ inputs.repo }} ${{ inputs.tag }}
|
||||
@@ -1,71 +0,0 @@
|
||||
name: CI | Publish kata-deploy payload for arm64
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
tarball-suffix:
|
||||
required: false
|
||||
type: string
|
||||
registry:
|
||||
required: true
|
||||
type: string
|
||||
repo:
|
||||
required: true
|
||||
type: string
|
||||
tag:
|
||||
required: true
|
||||
type: string
|
||||
commit-hash:
|
||||
required: false
|
||||
type: string
|
||||
target-branch:
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
|
||||
jobs:
|
||||
kata-payload:
|
||||
runs-on: arm64-builder
|
||||
steps:
|
||||
- name: Adjust a permission for repo
|
||||
run: |
|
||||
sudo chown -R $USER:$USER $GITHUB_WORKSPACE
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: kata-static-tarball-arm64${{ inputs.tarball-suffix }}
|
||||
|
||||
- name: Login to Kata Containers quay.io
|
||||
if: ${{ inputs.registry == 'quay.io' }}
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.QUAY_DEPLOYER_USERNAME }}
|
||||
password: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
|
||||
- name: Login to Kata Containers ghcr.io
|
||||
if: ${{ inputs.registry == 'ghcr.io' }}
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: build-and-push-kata-payload
|
||||
id: build-and-push-kata-payload
|
||||
run: |
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh \
|
||||
$(pwd)/kata-static.tar.xz \
|
||||
${{ inputs.registry }}/${{ inputs.repo }} ${{ inputs.tag }}
|
||||
|
||||
@@ -1,70 +0,0 @@
|
||||
name: CI | Publish kata-deploy payload for s390x
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
tarball-suffix:
|
||||
required: false
|
||||
type: string
|
||||
registry:
|
||||
required: true
|
||||
type: string
|
||||
repo:
|
||||
required: true
|
||||
type: string
|
||||
tag:
|
||||
required: true
|
||||
type: string
|
||||
commit-hash:
|
||||
required: false
|
||||
type: string
|
||||
target-branch:
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
|
||||
jobs:
|
||||
kata-payload:
|
||||
runs-on: s390x
|
||||
steps:
|
||||
- name: Adjust a permission for repo
|
||||
run: |
|
||||
sudo chown -R $USER:$USER $GITHUB_WORKSPACE
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: kata-static-tarball-s390x${{ inputs.tarball-suffix }}
|
||||
|
||||
- name: Login to Kata Containers quay.io
|
||||
if: ${{ inputs.registry == 'quay.io' }}
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.QUAY_DEPLOYER_USERNAME }}
|
||||
password: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
|
||||
- name: Login to Kata Containers ghcr.io
|
||||
if: ${{ inputs.registry == 'ghcr.io' }}
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: build-and-push-kata-payload
|
||||
id: build-and-push-kata-payload
|
||||
run: |
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh \
|
||||
$(pwd)/kata-static.tar.xz \
|
||||
${{ inputs.registry }}/${{ inputs.repo }} ${{ inputs.tag }}
|
||||
53
.github/workflows/release-amd64.yaml
vendored
53
.github/workflows/release-amd64.yaml
vendored
@@ -1,53 +0,0 @@
|
||||
name: Publish Kata release artifacts for amd64
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
target-arch:
|
||||
required: true
|
||||
type: string
|
||||
|
||||
jobs:
|
||||
build-kata-static-tarball-amd64:
|
||||
uses: ./.github/workflows/build-kata-static-tarball-amd64.yaml
|
||||
with:
|
||||
stage: release
|
||||
|
||||
kata-deploy:
|
||||
needs: build-kata-static-tarball-amd64
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Login to Kata Containers docker.io
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
|
||||
- name: Login to Kata Containers quay.io
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.QUAY_DEPLOYER_USERNAME }}
|
||||
password: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: kata-static-tarball-amd64
|
||||
|
||||
- name: build-and-push-kata-deploy-ci-amd64
|
||||
id: build-and-push-kata-deploy-ci-amd64
|
||||
run: |
|
||||
# We need to do such trick here as the format of the $GITHUB_REF
|
||||
# is "refs/tags/<tag>"
|
||||
tag=$(echo $GITHUB_REF | cut -d/ -f3-)
|
||||
tags=($tag)
|
||||
tags+=($([[ "$tag" =~ "alpha"|"rc" ]] && echo "latest" || echo "stable"))
|
||||
for tag in ${tags[@]}; do
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh \
|
||||
$(pwd)/kata-static.tar.xz "docker.io/katadocker/kata-deploy" \
|
||||
"${tag}-${{ inputs.target-arch }}"
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh \
|
||||
$(pwd)/kata-static.tar.xz "quay.io/kata-containers/kata-deploy" \
|
||||
"${tag}-${{ inputs.target-arch }}"
|
||||
done
|
||||
53
.github/workflows/release-arm64.yaml
vendored
53
.github/workflows/release-arm64.yaml
vendored
@@ -1,53 +0,0 @@
|
||||
name: Publish Kata release artifacts for arm64
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
target-arch:
|
||||
required: true
|
||||
type: string
|
||||
|
||||
jobs:
|
||||
build-kata-static-tarball-arm64:
|
||||
uses: ./.github/workflows/build-kata-static-tarball-arm64.yaml
|
||||
with:
|
||||
stage: release
|
||||
|
||||
kata-deploy:
|
||||
needs: build-kata-static-tarball-arm64
|
||||
runs-on: arm64-builder
|
||||
steps:
|
||||
- name: Login to Kata Containers docker.io
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
|
||||
- name: Login to Kata Containers quay.io
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.QUAY_DEPLOYER_USERNAME }}
|
||||
password: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: kata-static-tarball-arm64
|
||||
|
||||
- name: build-and-push-kata-deploy-ci-arm64
|
||||
id: build-and-push-kata-deploy-ci-arm64
|
||||
run: |
|
||||
# We need to do such trick here as the format of the $GITHUB_REF
|
||||
# is "refs/tags/<tag>"
|
||||
tag=$(echo $GITHUB_REF | cut -d/ -f3-)
|
||||
tags=($tag)
|
||||
tags+=($([[ "$tag" =~ "alpha"|"rc" ]] && echo "latest" || echo "stable"))
|
||||
for tag in ${tags[@]}; do
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh \
|
||||
$(pwd)/kata-static.tar.xz "docker.io/katadocker/kata-deploy" \
|
||||
"${tag}-${{ inputs.target-arch }}"
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh \
|
||||
$(pwd)/kata-static.tar.xz "quay.io/kata-containers/kata-deploy" \
|
||||
"${tag}-${{ inputs.target-arch }}"
|
||||
done
|
||||
53
.github/workflows/release-s390x.yaml
vendored
53
.github/workflows/release-s390x.yaml
vendored
@@ -1,53 +0,0 @@
|
||||
name: Publish Kata release artifacts for s390x
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
target-arch:
|
||||
required: true
|
||||
type: string
|
||||
|
||||
jobs:
|
||||
build-kata-static-tarball-s390x:
|
||||
uses: ./.github/workflows/build-kata-static-tarball-s390x.yaml
|
||||
with:
|
||||
stage: release
|
||||
|
||||
kata-deploy:
|
||||
needs: build-kata-static-tarball-s390x
|
||||
runs-on: s390x
|
||||
steps:
|
||||
- name: Login to Kata Containers docker.io
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
|
||||
- name: Login to Kata Containers quay.io
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.QUAY_DEPLOYER_USERNAME }}
|
||||
password: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: kata-static-tarball-s390x
|
||||
|
||||
- name: build-and-push-kata-deploy-ci-s390x
|
||||
id: build-and-push-kata-deploy-ci-s390x
|
||||
run: |
|
||||
# We need to do such trick here as the format of the $GITHUB_REF
|
||||
# is "refs/tags/<tag>"
|
||||
tag=$(echo $GITHUB_REF | cut -d/ -f3-)
|
||||
tags=($tag)
|
||||
tags+=($([[ "$tag" =~ "alpha"|"rc" ]] && echo "latest" || echo "stable"))
|
||||
for tag in ${tags[@]}; do
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh \
|
||||
$(pwd)/kata-static.tar.xz "docker.io/katadocker/kata-deploy" \
|
||||
"${tag}-${{ inputs.target-arch }}"
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh \
|
||||
$(pwd)/kata-static.tar.xz "quay.io/kata-containers/kata-deploy" \
|
||||
"${tag}-${{ inputs.target-arch }}"
|
||||
done
|
||||
222
.github/workflows/release.yaml
vendored
222
.github/workflows/release.yaml
vendored
@@ -4,153 +4,157 @@ on:
|
||||
tags:
|
||||
- '[0-9]+.[0-9]+.[0-9]+*'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build-and-push-assets-amd64:
|
||||
uses: ./.github/workflows/release-amd64.yaml
|
||||
with:
|
||||
target-arch: amd64
|
||||
secrets: inherit
|
||||
|
||||
build-and-push-assets-arm64:
|
||||
uses: ./.github/workflows/release-arm64.yaml
|
||||
with:
|
||||
target-arch: arm64
|
||||
secrets: inherit
|
||||
|
||||
build-and-push-assets-s390x:
|
||||
uses: ./.github/workflows/release-s390x.yaml
|
||||
with:
|
||||
target-arch: s390x
|
||||
secrets: inherit
|
||||
|
||||
publish-multi-arch-images:
|
||||
build-asset:
|
||||
runs-on: ubuntu-latest
|
||||
needs: [build-and-push-assets-amd64, build-and-push-assets-arm64, build-and-push-assets-s390x]
|
||||
strategy:
|
||||
matrix:
|
||||
asset:
|
||||
- cloud-hypervisor
|
||||
- firecracker
|
||||
- kernel
|
||||
- nydus
|
||||
- qemu
|
||||
- rootfs-image
|
||||
- rootfs-initrd
|
||||
- shim-v2
|
||||
- virtiofsd
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v2
|
||||
- name: Install docker
|
||||
run: |
|
||||
curl -fsSL https://test.docker.com -o test-docker.sh
|
||||
sh test-docker.sh
|
||||
|
||||
- name: Login to Kata Containers docker.io
|
||||
uses: docker/login-action@v2
|
||||
- name: Build ${{ matrix.asset }}
|
||||
run: |
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-copy-yq-installer.sh
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-binaries-in-docker.sh --build="${KATA_ASSET}"
|
||||
build_dir=$(readlink -f build)
|
||||
# store-artifact does not work with symlink
|
||||
sudo cp -r "${build_dir}" "kata-build"
|
||||
env:
|
||||
KATA_ASSET: ${{ matrix.asset }}
|
||||
TAR_OUTPUT: ${{ matrix.asset }}.tar.gz
|
||||
|
||||
- name: store-artifact ${{ matrix.asset }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
name: kata-artifacts
|
||||
path: kata-build/kata-static-${{ matrix.asset }}.tar.xz
|
||||
if-no-files-found: error
|
||||
|
||||
- name: Login to Kata Containers quay.io
|
||||
uses: docker/login-action@v2
|
||||
create-kata-tarball:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build-asset
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: get-artifacts
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.QUAY_DEPLOYER_USERNAME }}
|
||||
password: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
name: kata-artifacts
|
||||
path: kata-artifacts
|
||||
- name: merge-artifacts
|
||||
run: |
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-merge-builds.sh kata-artifacts
|
||||
- name: store-artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: kata-static-tarball
|
||||
path: kata-static.tar.xz
|
||||
|
||||
- name: Push multi-arch manifest
|
||||
kata-deploy:
|
||||
needs: create-kata-tarball
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: kata-static-tarball
|
||||
- name: build-and-push-kata-deploy-ci
|
||||
id: build-and-push-kata-deploy-ci
|
||||
run: |
|
||||
tag=$(echo $GITHUB_REF | cut -d/ -f3-)
|
||||
pushd $GITHUB_WORKSPACE
|
||||
git checkout $tag
|
||||
pkg_sha=$(git rev-parse HEAD)
|
||||
popd
|
||||
mv kata-static.tar.xz $GITHUB_WORKSPACE/tools/packaging/kata-deploy/kata-static.tar.xz
|
||||
docker build --build-arg KATA_ARTIFACTS=kata-static.tar.xz -t katadocker/kata-deploy-ci:$pkg_sha -t quay.io/kata-containers/kata-deploy-ci:$pkg_sha $GITHUB_WORKSPACE/tools/packaging/kata-deploy
|
||||
docker login -u ${{ secrets.DOCKER_USERNAME }} -p ${{ secrets.DOCKER_PASSWORD }}
|
||||
docker push katadocker/kata-deploy-ci:$pkg_sha
|
||||
docker login -u ${{ secrets.QUAY_DEPLOYER_USERNAME }} -p ${{ secrets.QUAY_DEPLOYER_PASSWORD }} quay.io
|
||||
docker push quay.io/kata-containers/kata-deploy-ci:$pkg_sha
|
||||
mkdir -p packaging/kata-deploy
|
||||
ln -s $GITHUB_WORKSPACE/tools/packaging/kata-deploy/action packaging/kata-deploy/action
|
||||
echo "::set-output name=PKG_SHA::${pkg_sha}"
|
||||
- name: test-kata-deploy-ci-in-aks
|
||||
uses: ./packaging/kata-deploy/action
|
||||
with:
|
||||
packaging-sha: ${{steps.build-and-push-kata-deploy-ci.outputs.PKG_SHA}}
|
||||
env:
|
||||
PKG_SHA: ${{steps.build-and-push-kata-deploy-ci.outputs.PKG_SHA}}
|
||||
AZ_APPID: ${{ secrets.AZ_APPID }}
|
||||
AZ_PASSWORD: ${{ secrets.AZ_PASSWORD }}
|
||||
AZ_SUBSCRIPTION_ID: ${{ secrets.AZ_SUBSCRIPTION_ID }}
|
||||
AZ_TENANT_ID: ${{ secrets.AZ_TENANT_ID }}
|
||||
- name: push-tarball
|
||||
run: |
|
||||
# tag the container image we created and push to DockerHub
|
||||
tag=$(echo $GITHUB_REF | cut -d/ -f3-)
|
||||
tags=($tag)
|
||||
tags+=($([[ "$tag" =~ "alpha"|"rc" ]] && echo "latest" || echo "stable"))
|
||||
# push to quay.io and docker.io
|
||||
for tag in ${tags[@]}; do
|
||||
docker manifest create quay.io/kata-containers/kata-deploy:${tag} \
|
||||
--amend quay.io/kata-containers/kata-deploy:${tag}-amd64 \
|
||||
--amend quay.io/kata-containers/kata-deploy:${tag}-arm64 \
|
||||
--amend quay.io/kata-containers/kata-deploy:${tag}-s390x
|
||||
|
||||
docker manifest create docker.io/katadocker/kata-deploy:${tag} \
|
||||
--amend docker.io/katadocker/kata-deploy:${tag}-amd64 \
|
||||
--amend docker.io/katadocker/kata-deploy:${tag}-arm64 \
|
||||
--amend docker.io/katadocker/kata-deploy:${tag}-s390x
|
||||
|
||||
docker manifest push quay.io/kata-containers/kata-deploy:${tag}
|
||||
docker manifest push docker.io/katadocker/kata-deploy:${tag}
|
||||
for tag in ${tags[@]}; do \
|
||||
docker tag katadocker/kata-deploy-ci:${{steps.build-and-push-kata-deploy-ci.outputs.PKG_SHA}} katadocker/kata-deploy:${tag} && \
|
||||
docker tag quay.io/kata-containers/kata-deploy-ci:${{steps.build-and-push-kata-deploy-ci.outputs.PKG_SHA}} quay.io/kata-containers/kata-deploy:${tag} && \
|
||||
docker push katadocker/kata-deploy:${tag} && \
|
||||
docker push quay.io/kata-containers/kata-deploy:${tag}; \
|
||||
done
|
||||
|
||||
upload-multi-arch-static-tarball:
|
||||
needs: publish-multi-arch-images
|
||||
upload-static-tarball:
|
||||
needs: kata-deploy
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: download-artifacts-amd64
|
||||
uses: actions/download-artifact@v3
|
||||
- uses: actions/checkout@v2
|
||||
- name: download-artifacts
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: kata-static-tarball-amd64
|
||||
- name: push amd64 static tarball to github
|
||||
name: kata-static-tarball
|
||||
- name: install hub
|
||||
run: |
|
||||
HUB_VER=$(curl -s "https://api.github.com/repos/github/hub/releases/latest" | jq -r .tag_name | sed 's/^v//')
|
||||
wget -q -O- https://github.com/github/hub/releases/download/v$HUB_VER/hub-linux-amd64-$HUB_VER.tgz | \
|
||||
tar xz --strip-components=2 --wildcards '*/bin/hub' && sudo mv hub /usr/local/bin/hub
|
||||
- name: push static tarball to github
|
||||
run: |
|
||||
tag=$(echo $GITHUB_REF | cut -d/ -f3-)
|
||||
tarball="kata-static-$tag-amd64.tar.xz"
|
||||
tarball="kata-static-$tag-x86_64.tar.xz"
|
||||
mv kata-static.tar.xz "$GITHUB_WORKSPACE/${tarball}"
|
||||
pushd $GITHUB_WORKSPACE
|
||||
echo "uploading asset '${tarball}' for tag: ${tag}"
|
||||
GITHUB_TOKEN=${{ secrets.GIT_UPLOAD_TOKEN }} gh release upload "${tag}" "${tarball}"
|
||||
popd
|
||||
|
||||
- name: download-artifacts-arm64
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: kata-static-tarball-arm64
|
||||
- name: push arm64 static tarball to github
|
||||
run: |
|
||||
tag=$(echo $GITHUB_REF | cut -d/ -f3-)
|
||||
tarball="kata-static-$tag-arm64.tar.xz"
|
||||
mv kata-static.tar.xz "$GITHUB_WORKSPACE/${tarball}"
|
||||
pushd $GITHUB_WORKSPACE
|
||||
echo "uploading asset '${tarball}' for tag: ${tag}"
|
||||
GITHUB_TOKEN=${{ secrets.GIT_UPLOAD_TOKEN }} gh release upload "${tag}" "${tarball}"
|
||||
popd
|
||||
|
||||
- name: download-artifacts-s390x
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: kata-static-tarball-s390x
|
||||
- name: push s390x static tarball to github
|
||||
run: |
|
||||
tag=$(echo $GITHUB_REF | cut -d/ -f3-)
|
||||
tarball="kata-static-$tag-s390x.tar.xz"
|
||||
mv kata-static.tar.xz "$GITHUB_WORKSPACE/${tarball}"
|
||||
pushd $GITHUB_WORKSPACE
|
||||
echo "uploading asset '${tarball}' for tag: ${tag}"
|
||||
GITHUB_TOKEN=${{ secrets.GIT_UPLOAD_TOKEN }} gh release upload "${tag}" "${tarball}"
|
||||
popd
|
||||
|
||||
upload-versions-yaml:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: upload versions.yaml
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GIT_UPLOAD_TOKEN }}
|
||||
run: |
|
||||
tag=$(echo $GITHUB_REF | cut -d/ -f3-)
|
||||
pushd $GITHUB_WORKSPACE
|
||||
versions_file="kata-containers-$tag-versions.yaml"
|
||||
cp versions.yaml ${versions_file}
|
||||
gh release upload "${tag}" "${versions_file}"
|
||||
GITHUB_TOKEN=${{ secrets.GIT_UPLOAD_TOKEN }} hub release edit -m "" -a "${tarball}" "${tag}"
|
||||
popd
|
||||
|
||||
upload-cargo-vendored-tarball:
|
||||
needs: upload-multi-arch-static-tarball
|
||||
needs: upload-static-tarball
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v2
|
||||
- name: generate-and-upload-tarball
|
||||
run: |
|
||||
tag=$(echo $GITHUB_REF | cut -d/ -f3-)
|
||||
tarball="kata-containers-$tag-vendor.tar.gz"
|
||||
pushd $GITHUB_WORKSPACE
|
||||
bash -c "tools/packaging/release/generate_vendor.sh ${tarball}"
|
||||
GITHUB_TOKEN=${{ secrets.GIT_UPLOAD_TOKEN }} gh release upload "${tag}" "${tarball}"
|
||||
GITHUB_TOKEN=${{ secrets.GIT_UPLOAD_TOKEN }} hub release edit -m "" -a "${tarball}" "${tag}"
|
||||
popd
|
||||
|
||||
upload-libseccomp-tarball:
|
||||
needs: upload-cargo-vendored-tarball
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v2
|
||||
- name: download-and-upload-tarball
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GIT_UPLOAD_TOKEN }}
|
||||
@@ -170,6 +174,6 @@ jobs:
|
||||
# "-m" option should be empty to re-use the existing release title
|
||||
# without opening a text editor.
|
||||
# For the details, check https://hub.github.com/hub-release.1.html.
|
||||
gh release upload "${tag}" "${tarball}"
|
||||
gh release upload "${tag}" "${asc}"
|
||||
hub release edit -m "" -a "${tarball}" "${tag}"
|
||||
hub release edit -m "" -a "${asc}" "${tag}"
|
||||
popd
|
||||
|
||||
16
.github/workflows/require-pr-porting-labels.yaml
vendored
16
.github/workflows/require-pr-porting-labels.yaml
vendored
@@ -15,10 +15,6 @@ on:
|
||||
branches:
|
||||
- main
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
check-pr-porting-labels:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -36,17 +32,7 @@ jobs:
|
||||
|
||||
- name: Checkout code to allow hub to communicate with the project
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ github.event.pull_request.base.ref }}
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Install porting checker script
|
||||
run: |
|
||||
|
||||
56
.github/workflows/run-docker-tests-on-garm.yaml
vendored
56
.github/workflows/run-docker-tests-on-garm.yaml
vendored
@@ -1,56 +0,0 @@
|
||||
name: CI | Run docker integration tests
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
tarball-suffix:
|
||||
required: false
|
||||
type: string
|
||||
commit-hash:
|
||||
required: false
|
||||
type: string
|
||||
target-branch:
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
|
||||
jobs:
|
||||
run-docker-tests:
|
||||
strategy:
|
||||
# We can set this to true whenever we're 100% sure that
|
||||
# all the tests are not flaky, otherwise we'll fail them
|
||||
# all due to a single flaky instance.
|
||||
fail-fast: false
|
||||
matrix:
|
||||
vmm:
|
||||
- clh
|
||||
- qemu
|
||||
runs-on: garm-ubuntu-2304-smaller
|
||||
env:
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: Install dependencies
|
||||
run: bash tests/integration/docker/gha-run.sh install-dependencies
|
||||
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: kata-static-tarball-amd64${{ inputs.tarball-suffix }}
|
||||
path: kata-artifacts
|
||||
|
||||
- name: Install kata
|
||||
run: bash tests/integration/docker/gha-run.sh install-kata kata-artifacts
|
||||
|
||||
- name: Run docker smoke test
|
||||
timeout-minutes: 5
|
||||
run: bash tests/integration/docker/gha-run.sh run
|
||||
98
.github/workflows/run-k8s-tests-on-aks.yaml
vendored
98
.github/workflows/run-k8s-tests-on-aks.yaml
vendored
@@ -1,98 +0,0 @@
|
||||
name: CI | Run kubernetes tests on AKS
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
registry:
|
||||
required: true
|
||||
type: string
|
||||
repo:
|
||||
required: true
|
||||
type: string
|
||||
tag:
|
||||
required: true
|
||||
type: string
|
||||
pr-number:
|
||||
required: true
|
||||
type: string
|
||||
commit-hash:
|
||||
required: false
|
||||
type: string
|
||||
target-branch:
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
|
||||
jobs:
|
||||
run-k8s-tests:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
host_os:
|
||||
- ubuntu
|
||||
vmm:
|
||||
- clh
|
||||
- dragonball
|
||||
- qemu
|
||||
instance-type:
|
||||
- small
|
||||
- normal
|
||||
include:
|
||||
- host_os: cbl-mariner
|
||||
vmm: clh
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
DOCKER_REGISTRY: ${{ inputs.registry }}
|
||||
DOCKER_REPO: ${{ inputs.repo }}
|
||||
DOCKER_TAG: ${{ inputs.tag }}
|
||||
GH_PR_NUMBER: ${{ inputs.pr-number }}
|
||||
KATA_HOST_OS: ${{ matrix.host_os }}
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
KUBERNETES: "vanilla"
|
||||
USING_NFD: "false"
|
||||
K8S_TEST_HOST_TYPE: ${{ matrix.instance-type }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: Download Azure CLI
|
||||
run: bash tests/integration/kubernetes/gha-run.sh install-azure-cli
|
||||
|
||||
- name: Log into the Azure account
|
||||
run: bash tests/integration/kubernetes/gha-run.sh login-azure
|
||||
env:
|
||||
AZ_APPID: ${{ secrets.AZ_APPID }}
|
||||
AZ_PASSWORD: ${{ secrets.AZ_PASSWORD }}
|
||||
AZ_TENANT_ID: ${{ secrets.AZ_TENANT_ID }}
|
||||
|
||||
- name: Create AKS cluster
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/kubernetes/gha-run.sh create-cluster
|
||||
|
||||
- name: Install `bats`
|
||||
run: bash tests/integration/kubernetes/gha-run.sh install-bats
|
||||
|
||||
- name: Install `kubectl`
|
||||
run: bash tests/integration/kubernetes/gha-run.sh install-kubectl
|
||||
|
||||
- name: Download credentials for the Kubernetes CLI to use them
|
||||
run: bash tests/integration/kubernetes/gha-run.sh get-cluster-credentials
|
||||
|
||||
- name: Deploy Kata
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/kubernetes/gha-run.sh deploy-kata-aks
|
||||
|
||||
- name: Run tests
|
||||
timeout-minutes: 60
|
||||
run: bash tests/integration/kubernetes/gha-run.sh run-tests
|
||||
|
||||
- name: Delete AKS cluster
|
||||
if: always()
|
||||
run: bash tests/integration/kubernetes/gha-run.sh delete-cluster
|
||||
88
.github/workflows/run-k8s-tests-on-garm.yaml
vendored
88
.github/workflows/run-k8s-tests-on-garm.yaml
vendored
@@ -1,88 +0,0 @@
|
||||
name: CI | Run kubernetes tests on GARM
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
registry:
|
||||
required: true
|
||||
type: string
|
||||
repo:
|
||||
required: true
|
||||
type: string
|
||||
tag:
|
||||
required: true
|
||||
type: string
|
||||
pr-number:
|
||||
required: true
|
||||
type: string
|
||||
commit-hash:
|
||||
required: false
|
||||
type: string
|
||||
target-branch:
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
|
||||
jobs:
|
||||
run-k8s-tests:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
vmm:
|
||||
- clh #cloud-hypervisor
|
||||
- fc #firecracker
|
||||
- qemu
|
||||
snapshotter:
|
||||
- devmapper
|
||||
k8s:
|
||||
- k3s
|
||||
instance:
|
||||
- garm-ubuntu-2004
|
||||
- garm-ubuntu-2004-smaller
|
||||
include:
|
||||
- instance: garm-ubuntu-2004
|
||||
instance-type: normal
|
||||
- instance: garm-ubuntu-2004-smaller
|
||||
instance-type: small
|
||||
runs-on: ${{ matrix.instance }}
|
||||
env:
|
||||
DOCKER_REGISTRY: ${{ inputs.registry }}
|
||||
DOCKER_REPO: ${{ inputs.repo }}
|
||||
DOCKER_TAG: ${{ inputs.tag }}
|
||||
PR_NUMBER: ${{ inputs.pr-number }}
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
KUBERNETES: ${{ matrix.k8s }}
|
||||
SNAPSHOTTER: ${{ matrix.snapshotter }}
|
||||
USING_NFD: "false"
|
||||
K8S_TEST_HOST_TYPE: ${{ matrix.instance-type }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: Deploy ${{ matrix.k8s }}
|
||||
run: bash tests/integration/kubernetes/gha-run.sh deploy-k8s
|
||||
|
||||
- name: Configure the ${{ matrix.snapshotter }} snapshotter
|
||||
run: bash tests/integration/kubernetes/gha-run.sh configure-snapshotter
|
||||
|
||||
- name: Deploy Kata
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/kubernetes/gha-run.sh deploy-kata-garm
|
||||
|
||||
- name: Install `bats`
|
||||
run: bash tests/integration/kubernetes/gha-run.sh install-bats
|
||||
|
||||
- name: Run tests
|
||||
timeout-minutes: 30
|
||||
run: bash tests/integration/kubernetes/gha-run.sh run-tests
|
||||
|
||||
- name: Delete kata-deploy
|
||||
if: always()
|
||||
run: bash tests/integration/kubernetes/gha-run.sh cleanup-garm
|
||||
@@ -1,86 +0,0 @@
|
||||
name: CI | Run kubernetes tests, using CRI-O, on GARM
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
registry:
|
||||
required: true
|
||||
type: string
|
||||
repo:
|
||||
required: true
|
||||
type: string
|
||||
tag:
|
||||
required: true
|
||||
type: string
|
||||
pr-number:
|
||||
required: true
|
||||
type: string
|
||||
commit-hash:
|
||||
required: false
|
||||
type: string
|
||||
target-branch:
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
|
||||
jobs:
|
||||
run-k8s-tests:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
vmm:
|
||||
- qemu
|
||||
k8s:
|
||||
- k0s
|
||||
instance:
|
||||
- garm-ubuntu-2004
|
||||
- garm-ubuntu-2004-smaller
|
||||
include:
|
||||
- instance: garm-ubuntu-2004
|
||||
instance-type: normal
|
||||
- instance: garm-ubuntu-2004-smaller
|
||||
instance-type: small
|
||||
- k8s: k0s
|
||||
k8s-extra-params: '--cri-socket remote:unix:///var/run/crio/crio.sock --kubelet-extra-args --cgroup-driver="systemd"'
|
||||
runs-on: ${{ matrix.instance }}
|
||||
env:
|
||||
DOCKER_REGISTRY: ${{ inputs.registry }}
|
||||
DOCKER_REPO: ${{ inputs.repo }}
|
||||
DOCKER_TAG: ${{ inputs.tag }}
|
||||
PR_NUMBER: ${{ inputs.pr-number }}
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
KUBERNETES: ${{ matrix.k8s }}
|
||||
KUBERNETES_EXTRA_PARAMS: ${{ matrix.k8s-extra-params }}
|
||||
USING_NFD: "false"
|
||||
K8S_TEST_HOST_TYPE: ${{ matrix.instance-type }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: Configure CRI-O
|
||||
run: bash tests/integration/kubernetes/gha-run.sh setup-crio
|
||||
|
||||
- name: Deploy ${{ matrix.k8s }}
|
||||
run: bash tests/integration/kubernetes/gha-run.sh deploy-k8s
|
||||
|
||||
- name: Deploy Kata
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/kubernetes/gha-run.sh deploy-kata-garm
|
||||
|
||||
- name: Install `bats`
|
||||
run: bash tests/integration/kubernetes/gha-run.sh install-bats
|
||||
|
||||
- name: Run tests
|
||||
timeout-minutes: 30
|
||||
run: bash tests/integration/kubernetes/gha-run.sh run-tests
|
||||
|
||||
- name: Delete kata-deploy
|
||||
if: always()
|
||||
run: bash tests/integration/kubernetes/gha-run.sh cleanup-garm
|
||||
176
.github/workflows/run-kata-coco-tests.yaml
vendored
176
.github/workflows/run-kata-coco-tests.yaml
vendored
@@ -1,176 +0,0 @@
|
||||
name: CI | Run kata coco tests
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
registry:
|
||||
required: true
|
||||
type: string
|
||||
repo:
|
||||
required: true
|
||||
type: string
|
||||
tag:
|
||||
required: true
|
||||
type: string
|
||||
pr-number:
|
||||
required: true
|
||||
type: string
|
||||
commit-hash:
|
||||
required: false
|
||||
type: string
|
||||
target-branch:
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
|
||||
jobs:
|
||||
run-kata-deploy-tests-on-tdx:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
vmm:
|
||||
- qemu-tdx
|
||||
runs-on: tdx
|
||||
env:
|
||||
DOCKER_REGISTRY: ${{ inputs.registry }}
|
||||
DOCKER_REPO: ${{ inputs.repo }}
|
||||
DOCKER_TAG: ${{ inputs.tag }}
|
||||
PR_NUMBER: ${{ inputs.pr-number }}
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
KUBERNETES: "k3s"
|
||||
USING_NFD: "true"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: Run tests
|
||||
run: bash tests/functional/kata-deploy/gha-run.sh run-tests
|
||||
|
||||
run-k8s-tests-on-tdx:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
vmm:
|
||||
- qemu-tdx
|
||||
runs-on: tdx
|
||||
env:
|
||||
DOCKER_REGISTRY: ${{ inputs.registry }}
|
||||
DOCKER_REPO: ${{ inputs.repo }}
|
||||
DOCKER_TAG: ${{ inputs.tag }}
|
||||
PR_NUMBER: ${{ inputs.pr-number }}
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
KUBERNETES: "k3s"
|
||||
USING_NFD: "true"
|
||||
K8S_TEST_HOST_TYPE: "baremetal"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: Deploy Kata
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/kubernetes/gha-run.sh deploy-kata-tdx
|
||||
|
||||
- name: Run tests
|
||||
timeout-minutes: 30
|
||||
run: bash tests/integration/kubernetes/gha-run.sh run-tests
|
||||
|
||||
- name: Delete kata-deploy
|
||||
if: always()
|
||||
run: bash tests/integration/kubernetes/gha-run.sh cleanup-tdx
|
||||
|
||||
run-k8s-tests-on-sev:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
vmm:
|
||||
- qemu-sev
|
||||
runs-on: sev
|
||||
env:
|
||||
DOCKER_REGISTRY: ${{ inputs.registry }}
|
||||
DOCKER_REPO: ${{ inputs.repo }}
|
||||
DOCKER_TAG: ${{ inputs.tag }}
|
||||
PR_NUMBER: ${{ inputs.pr-number }}
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
KUBECONFIG: /home/kata/.kube/config
|
||||
KUBERNETES: "vanilla"
|
||||
USING_NFD: "false"
|
||||
K8S_TEST_HOST_TYPE: "baremetal"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: Deploy Kata
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/kubernetes/gha-run.sh deploy-kata-sev
|
||||
|
||||
- name: Run tests
|
||||
timeout-minutes: 30
|
||||
run: bash tests/integration/kubernetes/gha-run.sh run-tests
|
||||
|
||||
- name: Delete kata-deploy
|
||||
if: always()
|
||||
run: bash tests/integration/kubernetes/gha-run.sh cleanup-sev
|
||||
|
||||
run-k8s-tests-sev-snp:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
vmm:
|
||||
- qemu-snp
|
||||
runs-on: sev-snp
|
||||
env:
|
||||
DOCKER_REGISTRY: ${{ inputs.registry }}
|
||||
DOCKER_REPO: ${{ inputs.repo }}
|
||||
DOCKER_TAG: ${{ inputs.tag }}
|
||||
PR_NUMBER: ${{ inputs.pr-number }}
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
KUBECONFIG: /home/kata/.kube/config
|
||||
KUBERNETES: "vanilla"
|
||||
USING_NFD: "false"
|
||||
K8S_TEST_HOST_TYPE: "baremetal"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: Deploy Kata
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/kubernetes/gha-run.sh deploy-kata-snp
|
||||
|
||||
- name: Run tests
|
||||
timeout-minutes: 30
|
||||
run: bash tests/integration/kubernetes/gha-run.sh run-tests
|
||||
|
||||
- name: Delete kata-deploy
|
||||
if: always()
|
||||
run: bash tests/integration/kubernetes/gha-run.sh cleanup-snp
|
||||
@@ -1,89 +0,0 @@
|
||||
name: CI | Run kata-deploy tests on AKS
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
registry:
|
||||
required: true
|
||||
type: string
|
||||
repo:
|
||||
required: true
|
||||
type: string
|
||||
tag:
|
||||
required: true
|
||||
type: string
|
||||
pr-number:
|
||||
required: true
|
||||
type: string
|
||||
commit-hash:
|
||||
required: false
|
||||
type: string
|
||||
target-branch:
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
|
||||
jobs:
|
||||
run-kata-deploy-tests:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
host_os:
|
||||
- ubuntu
|
||||
vmm:
|
||||
- clh
|
||||
- dragonball
|
||||
- qemu
|
||||
include:
|
||||
- host_os: cbl-mariner
|
||||
vmm: clh
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
DOCKER_REGISTRY: ${{ inputs.registry }}
|
||||
DOCKER_REPO: ${{ inputs.repo }}
|
||||
DOCKER_TAG: ${{ inputs.tag }}
|
||||
GH_PR_NUMBER: ${{ inputs.pr-number }}
|
||||
KATA_HOST_OS: ${{ matrix.host_os }}
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
KUBERNETES: "vanilla"
|
||||
USING_NFD: "false"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: Download Azure CLI
|
||||
run: bash tests/functional/kata-deploy/gha-run.sh install-azure-cli
|
||||
|
||||
- name: Log into the Azure account
|
||||
run: bash tests/functional/kata-deploy/gha-run.sh login-azure
|
||||
env:
|
||||
AZ_APPID: ${{ secrets.AZ_APPID }}
|
||||
AZ_PASSWORD: ${{ secrets.AZ_PASSWORD }}
|
||||
AZ_TENANT_ID: ${{ secrets.AZ_TENANT_ID }}
|
||||
|
||||
- name: Create AKS cluster
|
||||
timeout-minutes: 10
|
||||
run: bash tests/functional/kata-deploy/gha-run.sh create-cluster
|
||||
|
||||
- name: Install `bats`
|
||||
run: bash tests/functional/kata-deploy/gha-run.sh install-bats
|
||||
|
||||
- name: Install `kubectl`
|
||||
run: bash tests/functional/kata-deploy/gha-run.sh install-kubectl
|
||||
|
||||
- name: Download credentials for the Kubernetes CLI to use them
|
||||
run: bash tests/functional/kata-deploy/gha-run.sh get-cluster-credentials
|
||||
|
||||
- name: Run tests
|
||||
run: bash tests/functional/kata-deploy/gha-run.sh run-tests
|
||||
|
||||
- name: Delete AKS cluster
|
||||
if: always()
|
||||
run: bash tests/functional/kata-deploy/gha-run.sh delete-cluster
|
||||
@@ -1,65 +0,0 @@
|
||||
name: CI | Run kata-deploy tests on GARM
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
registry:
|
||||
required: true
|
||||
type: string
|
||||
repo:
|
||||
required: true
|
||||
type: string
|
||||
tag:
|
||||
required: true
|
||||
type: string
|
||||
pr-number:
|
||||
required: true
|
||||
type: string
|
||||
commit-hash:
|
||||
required: false
|
||||
type: string
|
||||
target-branch:
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
|
||||
jobs:
|
||||
run-kata-deploy-tests:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
vmm:
|
||||
- clh
|
||||
- qemu
|
||||
k8s:
|
||||
- k0s
|
||||
- k3s
|
||||
- rke2
|
||||
runs-on: garm-ubuntu-2004-smaller
|
||||
env:
|
||||
DOCKER_REGISTRY: ${{ inputs.registry }}
|
||||
DOCKER_REPO: ${{ inputs.repo }}
|
||||
DOCKER_TAG: ${{ inputs.tag }}
|
||||
PR_NUMBER: ${{ inputs.pr-number }}
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
KUBERNETES: ${{ matrix.k8s }}
|
||||
USING_NFD: "false"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: Deploy ${{ matrix.k8s }}
|
||||
run: bash tests/functional/kata-deploy/gha-run.sh deploy-k8s
|
||||
|
||||
- name: Install `bats`
|
||||
run: bash tests/functional/kata-deploy/gha-run.sh install-bats
|
||||
|
||||
- name: Run tests
|
||||
run: bash tests/functional/kata-deploy/gha-run.sh run-tests
|
||||
59
.github/workflows/run-kata-monitor-tests.yaml
vendored
59
.github/workflows/run-kata-monitor-tests.yaml
vendored
@@ -1,59 +0,0 @@
|
||||
name: CI | Run kata-monitor tests
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
tarball-suffix:
|
||||
required: false
|
||||
type: string
|
||||
commit-hash:
|
||||
required: false
|
||||
type: string
|
||||
target-branch:
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
|
||||
jobs:
|
||||
run-monitor:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
vmm:
|
||||
- qemu
|
||||
container_engine:
|
||||
- crio
|
||||
- containerd
|
||||
include:
|
||||
- container_engine: containerd
|
||||
containerd_version: lts
|
||||
runs-on: garm-ubuntu-2204-smaller
|
||||
env:
|
||||
CONTAINER_ENGINE: ${{ matrix.container_engine }}
|
||||
CONTAINERD_VERSION: ${{ matrix.containerd_version }}
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: Install dependencies
|
||||
run: bash tests/functional/kata-monitor/gha-run.sh install-dependencies
|
||||
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: kata-static-tarball-amd64${{ inputs.tarball-suffix }}
|
||||
path: kata-artifacts
|
||||
|
||||
- name: Install kata
|
||||
run: bash tests/functional/kata-monitor/gha-run.sh install-kata kata-artifacts
|
||||
|
||||
- name: Run kata-monitor tests
|
||||
run: bash tests/functional/kata-monitor/gha-run.sh run
|
||||
94
.github/workflows/run-metrics.yaml
vendored
94
.github/workflows/run-metrics.yaml
vendored
@@ -1,94 +0,0 @@
|
||||
name: CI | Run test metrics
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
tarball-suffix:
|
||||
required: false
|
||||
type: string
|
||||
commit-hash:
|
||||
required: false
|
||||
type: string
|
||||
target-branch:
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
|
||||
jobs:
|
||||
setup-kata:
|
||||
name: Kata Setup
|
||||
runs-on: metrics
|
||||
env:
|
||||
GOPATH: ${{ github.workspace }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: kata-static-tarball-amd64${{ inputs.tarball-suffix }}
|
||||
path: kata-artifacts
|
||||
|
||||
- name: Install kata
|
||||
run: bash tests/metrics/gha-run.sh install-kata kata-artifacts
|
||||
|
||||
run-metrics:
|
||||
needs: setup-kata
|
||||
strategy:
|
||||
# We can set this to true whenever we're 100% sure that
|
||||
# the all the tests are not flaky, otherwise we'll fail
|
||||
# all the tests due to a single flaky instance.
|
||||
fail-fast: false
|
||||
matrix:
|
||||
vmm: ['clh', 'qemu']
|
||||
max-parallel: 1
|
||||
runs-on: metrics
|
||||
env:
|
||||
GOPATH: ${{ github.workspace }}
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
steps:
|
||||
- name: enabling the hypervisor
|
||||
run: bash tests/metrics/gha-run.sh enabling-hypervisor
|
||||
|
||||
- name: run launch times test
|
||||
run: bash tests/metrics/gha-run.sh run-test-launchtimes
|
||||
|
||||
- name: run memory foot print test
|
||||
run: bash tests/metrics/gha-run.sh run-test-memory-usage
|
||||
|
||||
- name: run memory usage inside container test
|
||||
run: bash tests/metrics/gha-run.sh run-test-memory-usage-inside-container
|
||||
|
||||
- name: run blogbench test
|
||||
run: bash tests/metrics/gha-run.sh run-test-blogbench
|
||||
|
||||
- name: run tensorflow test
|
||||
run: bash tests/metrics/gha-run.sh run-test-tensorflow
|
||||
|
||||
- name: run fio test
|
||||
run: bash tests/metrics/gha-run.sh run-test-fio
|
||||
|
||||
- name: run iperf test
|
||||
run: bash tests/metrics/gha-run.sh run-test-iperf
|
||||
|
||||
- name: run latency test
|
||||
run: bash tests/metrics/gha-run.sh run-test-latency
|
||||
|
||||
- name: make metrics tarball ${{ matrix.vmm }}
|
||||
run: bash tests/metrics/gha-run.sh make-tarball-results
|
||||
|
||||
- name: archive metrics results ${{ matrix.vmm }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: metrics-artifacts-${{ matrix.vmm }}
|
||||
path: results-${{ matrix.vmm }}.tar.gz
|
||||
retention-days: 1
|
||||
if-no-files-found: error
|
||||
57
.github/workflows/run-nerdctl-tests-on-garm.yaml
vendored
57
.github/workflows/run-nerdctl-tests-on-garm.yaml
vendored
@@ -1,57 +0,0 @@
|
||||
name: CI | Run nerdctl integration tests
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
tarball-suffix:
|
||||
required: false
|
||||
type: string
|
||||
commit-hash:
|
||||
required: false
|
||||
type: string
|
||||
target-branch:
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
|
||||
jobs:
|
||||
run-nerdctl-tests:
|
||||
strategy:
|
||||
# We can set this to true whenever we're 100% sure that
|
||||
# all the tests are not flaky, otherwise we'll fail them
|
||||
# all due to a single flaky instance.
|
||||
fail-fast: false
|
||||
matrix:
|
||||
vmm:
|
||||
- clh
|
||||
- dragonball
|
||||
- qemu
|
||||
runs-on: garm-ubuntu-2304-smaller
|
||||
env:
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: Install dependencies
|
||||
run: bash tests/integration/nerdctl/gha-run.sh install-dependencies
|
||||
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: kata-static-tarball-amd64${{ inputs.tarball-suffix }}
|
||||
path: kata-artifacts
|
||||
|
||||
- name: Install kata
|
||||
run: bash tests/integration/nerdctl/gha-run.sh install-kata kata-artifacts
|
||||
|
||||
- name: Run nerdctl smoke test
|
||||
timeout-minutes: 5
|
||||
run: bash tests/integration/nerdctl/gha-run.sh run
|
||||
46
.github/workflows/run-runk-tests.yaml
vendored
46
.github/workflows/run-runk-tests.yaml
vendored
@@ -1,46 +0,0 @@
|
||||
name: CI | Run runk tests
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
tarball-suffix:
|
||||
required: false
|
||||
type: string
|
||||
commit-hash:
|
||||
required: false
|
||||
type: string
|
||||
target-branch:
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
|
||||
jobs:
|
||||
run-runk:
|
||||
runs-on: garm-ubuntu-2204-smaller
|
||||
env:
|
||||
CONTAINERD_VERSION: lts
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: Install dependencies
|
||||
run: bash tests/integration/runk/gha-run.sh install-dependencies
|
||||
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: kata-static-tarball-amd64${{ inputs.tarball-suffix }}
|
||||
path: kata-artifacts
|
||||
|
||||
- name: Install kata
|
||||
run: bash tests/integration/runk/gha-run.sh install-kata kata-artifacts
|
||||
|
||||
- name: Run tracing tests
|
||||
run: bash tests/integration/runk/gha-run.sh run
|
||||
42
.github/workflows/snap-release.yaml
vendored
Normal file
42
.github/workflows/snap-release.yaml
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
name: Release Kata in snapcraft store
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- '[0-9]+.[0-9]+.[0-9]+*'
|
||||
|
||||
jobs:
|
||||
release-snap:
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- name: Check out Git repository
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Install Snapcraft
|
||||
uses: samuelmeuli/action-snapcraft@v1
|
||||
with:
|
||||
snapcraft_token: ${{ secrets.snapcraft_token }}
|
||||
|
||||
- name: Build snap
|
||||
run: |
|
||||
# Removing man-db, workflow kept failing, fixes: #4480
|
||||
sudo apt -y remove --purge man-db
|
||||
sudo apt-get install -y git git-extras
|
||||
kata_url="https://github.com/kata-containers/kata-containers"
|
||||
latest_version=$(git ls-remote --tags ${kata_url} | egrep -o "refs.*" | egrep -v "\-alpha|\-rc|{}" | egrep -o "[[:digit:]]+\.[[:digit:]]+\.[[:digit:]]+" | sort -V -r | head -1)
|
||||
current_version="$(echo ${GITHUB_REF} | cut -d/ -f3)"
|
||||
# Check semantic versioning format (x.y.z) and if the current tag is the latest tag
|
||||
if echo "${current_version}" | grep -q "^[[:digit:]]\+\.[[:digit:]]\+\.[[:digit:]]\+$" && echo -e "$latest_version\n$current_version" | sort -C -V; then
|
||||
# Current version is the latest version, build it
|
||||
snapcraft snap --debug --destructive-mode
|
||||
fi
|
||||
|
||||
- name: Upload snap
|
||||
run: |
|
||||
snap_version="$(echo ${GITHUB_REF} | cut -d/ -f3)"
|
||||
snap_file="kata-containers_${snap_version}_amd64.snap"
|
||||
# Upload the snap if it exists
|
||||
if [ -f ${snap_file} ]; then
|
||||
snapcraft upload --release=stable ${snap_file}
|
||||
fi
|
||||
27
.github/workflows/snap.yaml
vendored
Normal file
27
.github/workflows/snap.yaml
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
name: snap CI
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
- synchronize
|
||||
- reopened
|
||||
- edited
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- name: Check out
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Install Snapcraft
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
uses: samuelmeuli/action-snapcraft@v1
|
||||
|
||||
- name: Build snap
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
snapcraft snap --debug --destructive-mode
|
||||
486
.github/workflows/static-checks.yaml
vendored
486
.github/workflows/static-checks.yaml
vendored
@@ -6,189 +6,321 @@ on:
|
||||
- reopened
|
||||
- synchronize
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
name: Static checks
|
||||
jobs:
|
||||
check-kernel-config-version:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout the code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Ensure the kernel config version has been updated
|
||||
run: |
|
||||
kernel_dir="tools/packaging/kernel/"
|
||||
kernel_version_file="${kernel_dir}kata_config_version"
|
||||
modified_files=$(git diff --name-only origin/$GITHUB_BASE_REF..HEAD)
|
||||
if git diff --name-only origin/$GITHUB_BASE_REF..HEAD "${kernel_dir}" | grep "${kernel_dir}"; then
|
||||
echo "Kernel directory has changed, checking if $kernel_version_file has been updated"
|
||||
if echo "$modified_files" | grep -v "README.md" | grep "${kernel_dir}" >>"/dev/null"; then
|
||||
echo "$modified_files" | grep "$kernel_version_file" >>/dev/null || ( echo "Please bump version in $kernel_version_file" && exit 1)
|
||||
else
|
||||
echo "Readme file changed, no need for kernel config version update."
|
||||
fi
|
||||
echo "Check passed"
|
||||
fi
|
||||
|
||||
build-checks:
|
||||
check-vendored-code:
|
||||
runs-on: ubuntu-20.04
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
component:
|
||||
- agent
|
||||
- dragonball
|
||||
- runtime
|
||||
- runtime-rs
|
||||
- agent-ctl
|
||||
- kata-ctl
|
||||
- log-parser-rs
|
||||
- runk
|
||||
- trace-forwarder
|
||||
command:
|
||||
- "make vendor"
|
||||
- "make check"
|
||||
- "make test"
|
||||
- "sudo -E PATH=\"$PATH\" make test"
|
||||
include:
|
||||
- component: agent
|
||||
component-path: src/agent
|
||||
- component: dragonball
|
||||
component-path: src/dragonball
|
||||
- component: runtime
|
||||
component-path: src/runtime
|
||||
- component: runtime-rs
|
||||
component-path: src/runtime-rs
|
||||
- component: agent-ctl
|
||||
component-path: src/tools/agent-ctl
|
||||
- component: kata-ctl
|
||||
component-path: src/tools/kata-ctl
|
||||
- component: log-parser-rs
|
||||
component-path: src/tools/log-parser-rs
|
||||
- component: runk
|
||||
component-path: src/tools/runk
|
||||
- component: trace-forwarder
|
||||
component-path: src/tools/trace-forwarder
|
||||
- install-libseccomp: no
|
||||
- component: agent
|
||||
install-libseccomp: yes
|
||||
- component: runk
|
||||
install-libseccomp: yes
|
||||
env:
|
||||
TRAVIS: "true"
|
||||
TRAVIS_BRANCH: ${{ github.base_ref }}
|
||||
TRAVIS_PULL_REQUEST_BRANCH: ${{ github.head_ref }}
|
||||
TRAVIS_PULL_REQUEST_SHA : ${{ github.event.pull_request.head.sha }}
|
||||
RUST_BACKTRACE: "1"
|
||||
target_branch: ${{ github.base_ref }}
|
||||
steps:
|
||||
- name: Checkout the code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Install yq
|
||||
run: |
|
||||
./ci/install_yq.sh
|
||||
env:
|
||||
INSTALL_IN_GOPATH: false
|
||||
- name: Install golang
|
||||
if: ${{ matrix.component == 'runtime' }}
|
||||
run: |
|
||||
./tests/install_go.sh -f -p
|
||||
echo "/usr/local/go/bin" >> $GITHUB_PATH
|
||||
- name: Install rust
|
||||
if: ${{ matrix.component != 'runtime' }}
|
||||
run: |
|
||||
./tests/install_rust.sh
|
||||
echo "${HOME}/.cargo/bin" >> $GITHUB_PATH
|
||||
- name: Install musl-tools
|
||||
if: ${{ matrix.component != 'runtime' }}
|
||||
run: sudo apt-get -y install musl-tools
|
||||
- name: Install libseccomp
|
||||
if: ${{ matrix.command != 'make vendor' && matrix.command != 'make check' && matrix.install-libseccomp == 'yes' }}
|
||||
run: |
|
||||
libseccomp_install_dir=$(mktemp -d -t libseccomp.XXXXXXXXXX)
|
||||
gperf_install_dir=$(mktemp -d -t gperf.XXXXXXXXXX)
|
||||
./ci/install_libseccomp.sh "${libseccomp_install_dir}" "${gperf_install_dir}"
|
||||
echo "Set environment variables for the libseccomp crate to link the libseccomp library statically"
|
||||
echo "LIBSECCOMP_LINK_TYPE=static" >> $GITHUB_ENV
|
||||
echo "LIBSECCOMP_LIB_PATH=${libseccomp_install_dir}/lib" >> $GITHUB_ENV
|
||||
- name: Setup XDG_RUNTIME_DIR for the `runtime` tests
|
||||
if: ${{ matrix.command != 'make vendor' && matrix.command != 'make check' && matrix.component == 'runtime' }}
|
||||
run: |
|
||||
XDG_RUNTIME_DIR=$(mktemp -d /tmp/kata-tests-$USER.XXX | tee >(xargs chmod 0700))
|
||||
echo "XDG_RUNTIME_DIR=${XDG_RUNTIME_DIR}" >> $GITHUB_ENV
|
||||
- name: Running `${{ matrix.command }}` for ${{ matrix.component }}
|
||||
run: |
|
||||
cd ${{ matrix.component-path }}
|
||||
${{ matrix.command }}
|
||||
env:
|
||||
RUST_BACKTRACE: "1"
|
||||
|
||||
build-checks-depending-on-kvm:
|
||||
runs-on: garm-ubuntu-2004-smaller
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
component:
|
||||
- runtime-rs
|
||||
include:
|
||||
- component: runtime-rs
|
||||
command: "sudo -E env PATH=$PATH LIBC=gnu SUPPORT_VIRTUALIZATION=true make test"
|
||||
- component: runtime-rs
|
||||
component-path: src/dragonball
|
||||
steps:
|
||||
- name: Checkout the code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Install system deps
|
||||
run: |
|
||||
sudo apt-get install -y build-essential musl-tools
|
||||
- name: Install yq
|
||||
run: |
|
||||
sudo -E ./ci/install_yq.sh
|
||||
env:
|
||||
INSTALL_IN_GOPATH: false
|
||||
- name: Install rust
|
||||
run: |
|
||||
export PATH="$PATH:/usr/local/bin"
|
||||
./tests/install_rust.sh
|
||||
- name: Running `${{ matrix.command }}` for ${{ matrix.component }}
|
||||
run: |
|
||||
export PATH="$PATH:${HOME}/.cargo/bin"
|
||||
cd ${{ matrix.component-path }}
|
||||
${{ matrix.command }}
|
||||
env:
|
||||
RUST_BACKTRACE: "1"
|
||||
- name: Install Go
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.19.2
|
||||
env:
|
||||
GOPATH: ${{ runner.workspace }}/kata-containers
|
||||
- name: Setup GOPATH
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
echo "TRAVIS_BRANCH: ${TRAVIS_BRANCH}"
|
||||
echo "TRAVIS_PULL_REQUEST_BRANCH: ${TRAVIS_PULL_REQUEST_BRANCH}"
|
||||
echo "TRAVIS_PULL_REQUEST_SHA: ${TRAVIS_PULL_REQUEST_SHA}"
|
||||
echo "TRAVIS: ${TRAVIS}"
|
||||
- name: Set env
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
echo "GOPATH=${{ github.workspace }}" >> $GITHUB_ENV
|
||||
echo "${{ github.workspace }}/bin" >> $GITHUB_PATH
|
||||
- name: Checkout code
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
path: ./src/github.com/${{ github.repository }}
|
||||
- name: Setup travis references
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
echo "TRAVIS_BRANCH=${TRAVIS_BRANCH:-$(echo $GITHUB_REF | awk 'BEGIN { FS = \"/\" } ; { print $3 }')}"
|
||||
target_branch=${TRAVIS_BRANCH}
|
||||
- name: Setup
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
cd ${GOPATH}/src/github.com/${{ github.repository }} && ./ci/setup.sh
|
||||
env:
|
||||
GOPATH: ${{ runner.workspace }}/kata-containers
|
||||
# Check whether the vendored code is up-to-date & working as the first thing
|
||||
- name: Check vendored code
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
cd ${GOPATH}/src/github.com/${{ github.repository }} && make vendor
|
||||
|
||||
static-checks:
|
||||
runs-on: ubuntu-20.04
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
cmd:
|
||||
- "make static-checks"
|
||||
env:
|
||||
GOPATH: ${{ github.workspace }}
|
||||
TRAVIS: "true"
|
||||
TRAVIS_BRANCH: ${{ github.base_ref }}
|
||||
TRAVIS_PULL_REQUEST_BRANCH: ${{ github.head_ref }}
|
||||
TRAVIS_PULL_REQUEST_SHA : ${{ github.event.pull_request.head.sha }}
|
||||
RUST_BACKTRACE: "1"
|
||||
target_branch: ${{ github.base_ref }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
path: ./src/github.com/${{ github.repository }}
|
||||
- name: Install yq
|
||||
run: |
|
||||
cd ${GOPATH}/src/github.com/${{ github.repository }}
|
||||
./ci/install_yq.sh
|
||||
env:
|
||||
INSTALL_IN_GOPATH: false
|
||||
- name: Install golang
|
||||
run: |
|
||||
cd ${GOPATH}/src/github.com/${{ github.repository }}
|
||||
./tests/install_go.sh -f -p
|
||||
echo "/usr/local/go/bin" >> $GITHUB_PATH
|
||||
- name: Install system dependencies
|
||||
run: |
|
||||
sudo apt-get -y install moreutils hunspell pandoc
|
||||
- name: Run check
|
||||
run: |
|
||||
export PATH=${PATH}:${GOPATH}/bin
|
||||
cd ${GOPATH}/src/github.com/${{ github.repository }} && ${{ matrix.cmd }}
|
||||
- name: Install Go
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.19.2
|
||||
env:
|
||||
GOPATH: ${{ runner.workspace }}/kata-containers
|
||||
- name: Setup GOPATH
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
echo "TRAVIS_BRANCH: ${TRAVIS_BRANCH}"
|
||||
echo "TRAVIS_PULL_REQUEST_BRANCH: ${TRAVIS_PULL_REQUEST_BRANCH}"
|
||||
echo "TRAVIS_PULL_REQUEST_SHA: ${TRAVIS_PULL_REQUEST_SHA}"
|
||||
echo "TRAVIS: ${TRAVIS}"
|
||||
- name: Set env
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
echo "GOPATH=${{ github.workspace }}" >> $GITHUB_ENV
|
||||
echo "${{ github.workspace }}/bin" >> $GITHUB_PATH
|
||||
- name: Checkout code
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
path: ./src/github.com/${{ github.repository }}
|
||||
- name: Setup travis references
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
echo "TRAVIS_BRANCH=${TRAVIS_BRANCH:-$(echo $GITHUB_REF | awk 'BEGIN { FS = \"/\" } ; { print $3 }')}"
|
||||
target_branch=${TRAVIS_BRANCH}
|
||||
- name: Setup
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
cd ${GOPATH}/src/github.com/${{ github.repository }} && ./ci/setup.sh
|
||||
env:
|
||||
GOPATH: ${{ runner.workspace }}/kata-containers
|
||||
- name: Installing rust
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
cd ${GOPATH}/src/github.com/${{ github.repository }} && ./ci/install_rust.sh
|
||||
PATH=$PATH:"$HOME/.cargo/bin"
|
||||
rustup target add x86_64-unknown-linux-musl
|
||||
rustup component add rustfmt clippy
|
||||
- name: Setup seccomp
|
||||
run: |
|
||||
libseccomp_install_dir=$(mktemp -d -t libseccomp.XXXXXXXXXX)
|
||||
gperf_install_dir=$(mktemp -d -t gperf.XXXXXXXXXX)
|
||||
cd ${GOPATH}/src/github.com/${{ github.repository }} && ./ci/install_libseccomp.sh "${libseccomp_install_dir}" "${gperf_install_dir}"
|
||||
echo "Set environment variables for the libseccomp crate to link the libseccomp library statically"
|
||||
echo "LIBSECCOMP_LINK_TYPE=static" >> $GITHUB_ENV
|
||||
echo "LIBSECCOMP_LIB_PATH=${libseccomp_install_dir}/lib" >> $GITHUB_ENV
|
||||
- name: Static Checks
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
cd ${GOPATH}/src/github.com/${{ github.repository }} && make static-checks
|
||||
|
||||
|
||||
compiler-checks:
|
||||
runs-on: ubuntu-20.04
|
||||
env:
|
||||
TRAVIS: "true"
|
||||
TRAVIS_BRANCH: ${{ github.base_ref }}
|
||||
TRAVIS_PULL_REQUEST_BRANCH: ${{ github.head_ref }}
|
||||
TRAVIS_PULL_REQUEST_SHA : ${{ github.event.pull_request.head.sha }}
|
||||
RUST_BACKTRACE: "1"
|
||||
target_branch: ${{ github.base_ref }}
|
||||
steps:
|
||||
- name: Install Go
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.19.2
|
||||
env:
|
||||
GOPATH: ${{ runner.workspace }}/kata-containers
|
||||
- name: Setup GOPATH
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
echo "TRAVIS_BRANCH: ${TRAVIS_BRANCH}"
|
||||
echo "TRAVIS_PULL_REQUEST_BRANCH: ${TRAVIS_PULL_REQUEST_BRANCH}"
|
||||
echo "TRAVIS_PULL_REQUEST_SHA: ${TRAVIS_PULL_REQUEST_SHA}"
|
||||
echo "TRAVIS: ${TRAVIS}"
|
||||
- name: Set env
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
echo "GOPATH=${{ github.workspace }}" >> $GITHUB_ENV
|
||||
echo "${{ github.workspace }}/bin" >> $GITHUB_PATH
|
||||
- name: Checkout code
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
path: ./src/github.com/${{ github.repository }}
|
||||
- name: Setup travis references
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
echo "TRAVIS_BRANCH=${TRAVIS_BRANCH:-$(echo $GITHUB_REF | awk 'BEGIN { FS = \"/\" } ; { print $3 }')}"
|
||||
target_branch=${TRAVIS_BRANCH}
|
||||
- name: Setup
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
cd ${GOPATH}/src/github.com/${{ github.repository }} && ./ci/setup.sh
|
||||
env:
|
||||
GOPATH: ${{ runner.workspace }}/kata-containers
|
||||
- name: Installing rust
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
cd ${GOPATH}/src/github.com/${{ github.repository }} && ./ci/install_rust.sh
|
||||
PATH=$PATH:"$HOME/.cargo/bin"
|
||||
rustup target add x86_64-unknown-linux-musl
|
||||
rustup component add rustfmt clippy
|
||||
- name: Setup seccomp
|
||||
run: |
|
||||
libseccomp_install_dir=$(mktemp -d -t libseccomp.XXXXXXXXXX)
|
||||
gperf_install_dir=$(mktemp -d -t gperf.XXXXXXXXXX)
|
||||
cd ${GOPATH}/src/github.com/${{ github.repository }} && ./ci/install_libseccomp.sh "${libseccomp_install_dir}" "${gperf_install_dir}"
|
||||
echo "Set environment variables for the libseccomp crate to link the libseccomp library statically"
|
||||
echo "LIBSECCOMP_LINK_TYPE=static" >> $GITHUB_ENV
|
||||
echo "LIBSECCOMP_LIB_PATH=${libseccomp_install_dir}/lib" >> $GITHUB_ENV
|
||||
- name: Run Compiler Checks
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
cd ${GOPATH}/src/github.com/${{ github.repository }} && make check
|
||||
|
||||
unit-tests:
|
||||
runs-on: ubuntu-20.04
|
||||
env:
|
||||
TRAVIS: "true"
|
||||
TRAVIS_BRANCH: ${{ github.base_ref }}
|
||||
TRAVIS_PULL_REQUEST_BRANCH: ${{ github.head_ref }}
|
||||
TRAVIS_PULL_REQUEST_SHA : ${{ github.event.pull_request.head.sha }}
|
||||
RUST_BACKTRACE: "1"
|
||||
target_branch: ${{ github.base_ref }}
|
||||
steps:
|
||||
- name: Install Go
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.19.2
|
||||
env:
|
||||
GOPATH: ${{ runner.workspace }}/kata-containers
|
||||
- name: Setup GOPATH
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
echo "TRAVIS_BRANCH: ${TRAVIS_BRANCH}"
|
||||
echo "TRAVIS_PULL_REQUEST_BRANCH: ${TRAVIS_PULL_REQUEST_BRANCH}"
|
||||
echo "TRAVIS_PULL_REQUEST_SHA: ${TRAVIS_PULL_REQUEST_SHA}"
|
||||
echo "TRAVIS: ${TRAVIS}"
|
||||
- name: Set env
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
echo "GOPATH=${{ github.workspace }}" >> $GITHUB_ENV
|
||||
echo "${{ github.workspace }}/bin" >> $GITHUB_PATH
|
||||
- name: Checkout code
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
path: ./src/github.com/${{ github.repository }}
|
||||
- name: Setup travis references
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
echo "TRAVIS_BRANCH=${TRAVIS_BRANCH:-$(echo $GITHUB_REF | awk 'BEGIN { FS = \"/\" } ; { print $3 }')}"
|
||||
target_branch=${TRAVIS_BRANCH}
|
||||
- name: Setup
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
cd ${GOPATH}/src/github.com/${{ github.repository }} && ./ci/setup.sh
|
||||
env:
|
||||
GOPATH: ${{ runner.workspace }}/kata-containers
|
||||
- name: Installing rust
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
cd ${GOPATH}/src/github.com/${{ github.repository }} && ./ci/install_rust.sh
|
||||
PATH=$PATH:"$HOME/.cargo/bin"
|
||||
rustup target add x86_64-unknown-linux-musl
|
||||
rustup component add rustfmt clippy
|
||||
- name: Setup seccomp
|
||||
run: |
|
||||
libseccomp_install_dir=$(mktemp -d -t libseccomp.XXXXXXXXXX)
|
||||
gperf_install_dir=$(mktemp -d -t gperf.XXXXXXXXXX)
|
||||
cd ${GOPATH}/src/github.com/${{ github.repository }} && ./ci/install_libseccomp.sh "${libseccomp_install_dir}" "${gperf_install_dir}"
|
||||
echo "Set environment variables for the libseccomp crate to link the libseccomp library statically"
|
||||
echo "LIBSECCOMP_LINK_TYPE=static" >> $GITHUB_ENV
|
||||
echo "LIBSECCOMP_LIB_PATH=${libseccomp_install_dir}/lib" >> $GITHUB_ENV
|
||||
- name: Run Unit Tests
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
cd ${GOPATH}/src/github.com/${{ github.repository }} && make test
|
||||
|
||||
unit-tests-as-root:
|
||||
runs-on: ubuntu-20.04
|
||||
env:
|
||||
TRAVIS: "true"
|
||||
TRAVIS_BRANCH: ${{ github.base_ref }}
|
||||
TRAVIS_PULL_REQUEST_BRANCH: ${{ github.head_ref }}
|
||||
TRAVIS_PULL_REQUEST_SHA : ${{ github.event.pull_request.head.sha }}
|
||||
RUST_BACKTRACE: "1"
|
||||
target_branch: ${{ github.base_ref }}
|
||||
steps:
|
||||
- name: Install Go
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.19.2
|
||||
env:
|
||||
GOPATH: ${{ runner.workspace }}/kata-containers
|
||||
- name: Setup GOPATH
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
echo "TRAVIS_BRANCH: ${TRAVIS_BRANCH}"
|
||||
echo "TRAVIS_PULL_REQUEST_BRANCH: ${TRAVIS_PULL_REQUEST_BRANCH}"
|
||||
echo "TRAVIS_PULL_REQUEST_SHA: ${TRAVIS_PULL_REQUEST_SHA}"
|
||||
echo "TRAVIS: ${TRAVIS}"
|
||||
- name: Set env
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
echo "GOPATH=${{ github.workspace }}" >> $GITHUB_ENV
|
||||
echo "${{ github.workspace }}/bin" >> $GITHUB_PATH
|
||||
- name: Checkout code
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
path: ./src/github.com/${{ github.repository }}
|
||||
- name: Setup travis references
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
echo "TRAVIS_BRANCH=${TRAVIS_BRANCH:-$(echo $GITHUB_REF | awk 'BEGIN { FS = \"/\" } ; { print $3 }')}"
|
||||
target_branch=${TRAVIS_BRANCH}
|
||||
- name: Setup
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
cd ${GOPATH}/src/github.com/${{ github.repository }} && ./ci/setup.sh
|
||||
env:
|
||||
GOPATH: ${{ runner.workspace }}/kata-containers
|
||||
- name: Installing rust
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
cd ${GOPATH}/src/github.com/${{ github.repository }} && ./ci/install_rust.sh
|
||||
PATH=$PATH:"$HOME/.cargo/bin"
|
||||
rustup target add x86_64-unknown-linux-musl
|
||||
rustup component add rustfmt clippy
|
||||
- name: Setup seccomp
|
||||
run: |
|
||||
libseccomp_install_dir=$(mktemp -d -t libseccomp.XXXXXXXXXX)
|
||||
gperf_install_dir=$(mktemp -d -t gperf.XXXXXXXXXX)
|
||||
cd ${GOPATH}/src/github.com/${{ github.repository }} && ./ci/install_libseccomp.sh "${libseccomp_install_dir}" "${gperf_install_dir}"
|
||||
echo "Set environment variables for the libseccomp crate to link the libseccomp library statically"
|
||||
echo "LIBSECCOMP_LINK_TYPE=static" >> $GITHUB_ENV
|
||||
echo "LIBSECCOMP_LIB_PATH=${libseccomp_install_dir}/lib" >> $GITHUB_ENV
|
||||
- name: Run Unit Tests As Root User
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
cd ${GOPATH}/src/github.com/${{ github.repository }} && sudo -E PATH="$PATH" make test
|
||||
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -4,10 +4,6 @@
|
||||
**/*.rej
|
||||
**/target
|
||||
**/.vscode
|
||||
**/.idea
|
||||
**/.fleet
|
||||
**/*.swp
|
||||
**/*.swo
|
||||
pkg/logging/Cargo.lock
|
||||
src/agent/src/version.rs
|
||||
src/agent/kata-agent.service
|
||||
@@ -15,3 +11,4 @@ src/agent/protocols/src/*.rs
|
||||
!src/agent/protocols/src/lib.rs
|
||||
build
|
||||
src/tools/log-parser/kata-log-parser
|
||||
|
||||
|
||||
24
Makefile
24
Makefile
@@ -8,7 +8,6 @@ COMPONENTS =
|
||||
|
||||
COMPONENTS += libs
|
||||
COMPONENTS += agent
|
||||
COMPONENTS += dragonball
|
||||
COMPONENTS += runtime
|
||||
COMPONENTS += runtime-rs
|
||||
|
||||
@@ -16,17 +15,11 @@ COMPONENTS += runtime-rs
|
||||
TOOLS =
|
||||
|
||||
TOOLS += agent-ctl
|
||||
TOOLS += kata-ctl
|
||||
TOOLS += log-parser
|
||||
TOOLS += log-parser-rs
|
||||
TOOLS += runk
|
||||
TOOLS += trace-forwarder
|
||||
TOOLS += runk
|
||||
TOOLS += log-parser
|
||||
|
||||
STANDARD_TARGETS = build check clean install static-checks-build test vendor
|
||||
|
||||
# Variables for the build-and-publish-kata-debug target
|
||||
KATA_DEBUG_REGISTRY ?= ""
|
||||
KATA_DEBUG_TAG ?= ""
|
||||
STANDARD_TARGETS = build check clean install test vendor
|
||||
|
||||
default: all
|
||||
|
||||
@@ -42,19 +35,18 @@ generate-protocols:
|
||||
make -C src/agent generate-protocols
|
||||
|
||||
# Some static checks rely on generated source files of components.
|
||||
static-checks: static-checks-build
|
||||
static-checks: build
|
||||
bash ci/static-checks.sh
|
||||
|
||||
docs-url-alive-check:
|
||||
bash ci/docs-url-alive-check.sh
|
||||
|
||||
build-and-publish-kata-debug:
|
||||
bash tools/packaging/kata-debug/kata-debug-build-and-upload-payload.sh ${KATA_DEBUG_REGISTRY} ${KATA_DEBUG_TAG}
|
||||
|
||||
.PHONY: \
|
||||
all \
|
||||
kata-tarball \
|
||||
install-tarball \
|
||||
binary-tarball \
|
||||
default \
|
||||
install-binary-tarball \
|
||||
static-checks \
|
||||
docs-url-alive-check
|
||||
|
||||
|
||||
|
||||
15
README.md
15
README.md
@@ -1,6 +1,4 @@
|
||||
<img src="https://object-storage-ca-ymq-1.vexxhost.net/swift/v1/6e4619c416ff4bd19e1c087f27a43eea/www-images-prod/openstack-logo/kata/SVG/kata-1.svg" width="900">
|
||||
|
||||
[](https://github.com/kata-containers/kata-containers/actions/workflows/payload-after-push.yaml) [](https://github.com/kata-containers/kata-containers/actions/workflows/ci-nightly.yaml)
|
||||
<img src="https://www.openstack.org/assets/kata/kata-vertical-on-white.png" width="150">
|
||||
|
||||
# Kata Containers
|
||||
|
||||
@@ -121,8 +119,10 @@ The table below lists the core parts of the project:
|
||||
| [runtime](src/runtime) | core | Main component run by a container manager and providing a containerd shimv2 runtime implementation. |
|
||||
| [runtime-rs](src/runtime-rs) | core | The Rust version runtime. |
|
||||
| [agent](src/agent) | core | Management process running inside the virtual machine / POD that sets up the container environment. |
|
||||
| [libraries](src/libs) | core | Library crates shared by multiple Kata Container components or published to [`crates.io`](https://crates.io/index.html) |
|
||||
| [`dragonball`](src/dragonball) | core | An optional built-in VMM brings out-of-the-box Kata Containers experience with optimizations on container workloads |
|
||||
| [documentation](docs) | documentation | Documentation common to all components (such as design and install documentation). |
|
||||
| [libraries](src/libs) | core | Library crates shared by multiple Kata Container components or published to [`crates.io`](https://crates.io/index.html) |
|
||||
| [tests](https://github.com/kata-containers/tests) | tests | Excludes unit tests which live with the main code. |
|
||||
|
||||
### Additional components
|
||||
@@ -134,10 +134,7 @@ The table below lists the remaining parts of the project:
|
||||
| [packaging](tools/packaging) | infrastructure | Scripts and metadata for producing packaged binaries<br/>(components, hypervisors, kernel and rootfs). |
|
||||
| [kernel](https://www.kernel.org) | kernel | Linux kernel used by the hypervisor to boot the guest image. Patches are stored [here](tools/packaging/kernel). |
|
||||
| [osbuilder](tools/osbuilder) | infrastructure | Tool to create "mini O/S" rootfs and initrd images and kernel for the hypervisor. |
|
||||
| [kata-debug](tools/packaging/kata-debug/README.md) | infrastructure | Utility tool to gather Kata Containers debug information from Kubernetes clusters. |
|
||||
| [`agent-ctl`](src/tools/agent-ctl) | utility | Tool that provides low-level access for testing the agent. |
|
||||
| [`kata-ctl`](src/tools/kata-ctl) | utility | Tool that provides advanced commands and debug facilities. |
|
||||
| [`log-parser-rs`](src/tools/log-parser-rs) | utility | Tool that aid in analyzing logs from the kata runtime. |
|
||||
| [`trace-forwarder`](src/tools/trace-forwarder) | utility | Agent tracing helper. |
|
||||
| [`runk`](src/tools/runk) | utility | Standard OCI container runtime based on the agent. |
|
||||
| [`ci`](https://github.com/kata-containers/ci) | CI | Continuous Integration configuration files and scripts. |
|
||||
@@ -147,10 +144,8 @@ The table below lists the remaining parts of the project:
|
||||
|
||||
Kata Containers is now
|
||||
[available natively for most distributions](docs/install/README.md#packaged-installation-methods).
|
||||
|
||||
## Metrics tests
|
||||
|
||||
See the [metrics documentation](tests/metrics/README.md).
|
||||
However, packaging scripts and metadata are still used to generate [snap](snap/local) and GitHub releases. See
|
||||
the [components](#components) section for further details.
|
||||
|
||||
## Glossary of Terms
|
||||
|
||||
|
||||
@@ -7,10 +7,12 @@
|
||||
|
||||
set -o errexit
|
||||
|
||||
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
script_name="$(basename "${BASH_SOURCE[0]}")"
|
||||
cidir=$(dirname "$0")
|
||||
source "${cidir}/lib.sh"
|
||||
|
||||
source "${script_dir}/../tests/common.bash"
|
||||
clone_tests_repo
|
||||
|
||||
source "${tests_repo_dir}/.ci/lib.sh"
|
||||
|
||||
# The following variables if set on the environment will change the behavior
|
||||
# of gperf and libseccomp configure scripts, that may lead this script to
|
||||
@@ -23,11 +25,11 @@ workdir="$(mktemp -d --tmpdir build-libseccomp.XXXXX)"
|
||||
# Variables for libseccomp
|
||||
libseccomp_version="${LIBSECCOMP_VERSION:-""}"
|
||||
if [ -z "${libseccomp_version}" ]; then
|
||||
libseccomp_version=$(get_from_kata_deps "externals.libseccomp.version")
|
||||
libseccomp_version=$(get_version "externals.libseccomp.version")
|
||||
fi
|
||||
libseccomp_url="${LIBSECCOMP_URL:-""}"
|
||||
if [ -z "${libseccomp_url}" ]; then
|
||||
libseccomp_url=$(get_from_kata_deps "externals.libseccomp.url")
|
||||
libseccomp_url=$(get_version "externals.libseccomp.url")
|
||||
fi
|
||||
libseccomp_tarball="libseccomp-${libseccomp_version}.tar.gz"
|
||||
libseccomp_tarball_url="${libseccomp_url}/releases/download/v${libseccomp_version}/${libseccomp_tarball}"
|
||||
@@ -36,11 +38,11 @@ cflags="-O2"
|
||||
# Variables for gperf
|
||||
gperf_version="${GPERF_VERSION:-""}"
|
||||
if [ -z "${gperf_version}" ]; then
|
||||
gperf_version=$(get_from_kata_deps "externals.gperf.version")
|
||||
gperf_version=$(get_version "externals.gperf.version")
|
||||
fi
|
||||
gperf_url="${GPERF_URL:-""}"
|
||||
if [ -z "${gperf_url}" ]; then
|
||||
gperf_url=$(get_from_kata_deps "externals.gperf.url")
|
||||
gperf_url=$(get_version "externals.gperf.url")
|
||||
fi
|
||||
gperf_tarball="gperf-${gperf_version}.tar.gz"
|
||||
gperf_tarball_url="${gperf_url}/${gperf_tarball}"
|
||||
@@ -85,8 +87,7 @@ build_and_install_libseccomp() {
|
||||
curl -sLO "${libseccomp_tarball_url}"
|
||||
tar -xf "${libseccomp_tarball}"
|
||||
pushd "libseccomp-${libseccomp_version}"
|
||||
[ "${arch}" == $(uname -m) ] && cc_name="" || cc_name="${arch}-linux-gnu-gcc"
|
||||
CC=${cc_name} ./configure --prefix="${libseccomp_install_dir}" CFLAGS="${cflags}" --enable-static --host="${arch}"
|
||||
./configure --prefix="${libseccomp_install_dir}" CFLAGS="${cflags}" --enable-static --host="${arch}"
|
||||
make
|
||||
make install
|
||||
popd
|
||||
|
||||
@@ -43,16 +43,6 @@ function install_yq() {
|
||||
"aarch64")
|
||||
goarch=arm64
|
||||
;;
|
||||
"arm64")
|
||||
# If we're on an apple silicon machine, just assign amd64.
|
||||
# The version of yq we use doesn't have a darwin arm build,
|
||||
# but Rosetta can come to the rescue here.
|
||||
if [ $goos == "Darwin" ]; then
|
||||
goarch=amd64
|
||||
else
|
||||
goarch=arm64
|
||||
fi
|
||||
;;
|
||||
"ppc64le")
|
||||
goarch=ppc64le
|
||||
;;
|
||||
@@ -74,7 +64,7 @@ function install_yq() {
|
||||
fi
|
||||
|
||||
## NOTE: ${var,,} => gives lowercase value of var
|
||||
local yq_url="https://${yq_pkg}/releases/download/${yq_version}/yq_${goos}_${goarch}"
|
||||
local yq_url="https://${yq_pkg}/releases/download/${yq_version}/yq_${goos,,}_${goarch}"
|
||||
curl -o "${yq_path}" -LSsf "${yq_url}"
|
||||
[ $? -ne 0 ] && die "Download ${yq_url} failed"
|
||||
chmod +x "${yq_path}"
|
||||
|
||||
@@ -2,8 +2,6 @@
|
||||
|
||||
This document is written **specifically for developers**: it is not intended for end users.
|
||||
|
||||
If you want to contribute changes that you have made, please read the [community guidelines](https://github.com/kata-containers/community/blob/main/CONTRIBUTING.md) for information about our processes.
|
||||
|
||||
# Assumptions
|
||||
|
||||
- You are working on a non-critical test or development system.
|
||||
@@ -35,41 +33,51 @@ You need to install the following to build Kata Containers components:
|
||||
- `make`.
|
||||
- `gcc` (required for building the shim and runtime).
|
||||
|
||||
# Build and install Kata Containers
|
||||
## Build and install the Kata Containers runtime
|
||||
# Build and install the Kata Containers runtime
|
||||
|
||||
```bash
|
||||
$ git clone https://github.com/kata-containers/kata-containers.git
|
||||
$ pushd kata-containers/src/runtime
|
||||
$ make && sudo -E "PATH=$PATH" make install
|
||||
$ sudo mkdir -p /etc/kata-containers/
|
||||
$ sudo install -o root -g root -m 0640 /usr/share/defaults/kata-containers/configuration.toml /etc/kata-containers
|
||||
$ popd
|
||||
```
|
||||
$ go get -d -u github.com/kata-containers/kata-containers
|
||||
$ cd $GOPATH/src/github.com/kata-containers/kata-containers/src/runtime
|
||||
$ make && sudo -E PATH=$PATH make install
|
||||
```
|
||||
|
||||
The build will create the following:
|
||||
|
||||
- runtime binary: `/usr/local/bin/kata-runtime` and `/usr/local/bin/containerd-shim-kata-v2`
|
||||
- configuration file: `/usr/share/defaults/kata-containers/configuration.toml` and `/etc/kata-containers/configuration.toml`
|
||||
- configuration file: `/usr/share/defaults/kata-containers/configuration.toml`
|
||||
|
||||
# Check hardware requirements
|
||||
|
||||
You can check if your system is capable of creating a Kata Container by running the following:
|
||||
|
||||
```
|
||||
$ sudo kata-runtime check
|
||||
```
|
||||
|
||||
If your system is *not* able to run Kata Containers, the previous command will error out and explain why.
|
||||
|
||||
## Configure to use initrd or rootfs image
|
||||
|
||||
Kata containers can run with either an initrd image or a rootfs image.
|
||||
|
||||
If you want to test with `initrd`, make sure you have uncommented `initrd = /usr/share/kata-containers/kata-containers-initrd.img`
|
||||
in your configuration file, commenting out the `image` line in
|
||||
`/etc/kata-containers/configuration.toml`. For example:
|
||||
If you want to test with `initrd`, make sure you have `initrd = /usr/share/kata-containers/kata-containers-initrd.img`
|
||||
in your configuration file, commenting out the `image` line:
|
||||
|
||||
```bash
|
||||
`/usr/share/defaults/kata-containers/configuration.toml` and comment out the `image` line with the following. For example:
|
||||
|
||||
```
|
||||
$ sudo mkdir -p /etc/kata-containers/
|
||||
$ sudo install -o root -g root -m 0640 /usr/share/defaults/kata-containers/configuration.toml /etc/kata-containers
|
||||
$ sudo sed -i 's/^\(image =.*\)/# \1/g' /etc/kata-containers/configuration.toml
|
||||
$ sudo sed -i 's/^# \(initrd =.*\)/\1/g' /etc/kata-containers/configuration.toml
|
||||
```
|
||||
You can create the initrd image as shown in the [create an initrd image](#create-an-initrd-image---optional) section.
|
||||
|
||||
If you want to test with a rootfs `image`, make sure you have uncommented `image = /usr/share/kata-containers/kata-containers.img`
|
||||
If you want to test with a rootfs `image`, make sure you have `image = /usr/share/kata-containers/kata-containers.img`
|
||||
in your configuration file, commenting out the `initrd` line. For example:
|
||||
|
||||
```bash
|
||||
```
|
||||
$ sudo mkdir -p /etc/kata-containers/
|
||||
$ sudo install -o root -g root -m 0640 /usr/share/defaults/kata-containers/configuration.toml /etc/kata-containers
|
||||
$ sudo sed -i 's/^\(initrd =.*\)/# \1/g' /etc/kata-containers/configuration.toml
|
||||
```
|
||||
The rootfs image is created as shown in the [create a rootfs image](#create-a-rootfs-image) section.
|
||||
@@ -82,38 +90,19 @@ rootfs `image`(100MB+).
|
||||
|
||||
Enable seccomp as follows:
|
||||
|
||||
```bash
|
||||
```
|
||||
$ sudo sed -i '/^disable_guest_seccomp/ s/true/false/' /etc/kata-containers/configuration.toml
|
||||
```
|
||||
|
||||
This will pass container seccomp profiles to the kata agent.
|
||||
|
||||
## Enable SELinux on the guest
|
||||
|
||||
> **Note:**
|
||||
>
|
||||
> - To enable SELinux on the guest, SELinux MUST be also enabled on the host.
|
||||
> - You MUST create and build a rootfs image for SELinux in advance.
|
||||
> See [Create a rootfs image](#create-a-rootfs-image) and [Build a rootfs image](#build-a-rootfs-image).
|
||||
> - SELinux on the guest is supported in only a rootfs image currently, so
|
||||
> you cannot enable SELinux with the agent init (`AGENT_INIT=yes`) yet.
|
||||
|
||||
Enable guest SELinux in Enforcing mode as follows:
|
||||
|
||||
```
|
||||
$ sudo sed -i '/^disable_guest_selinux/ s/true/false/g' /etc/kata-containers/configuration.toml
|
||||
```
|
||||
|
||||
The runtime automatically will set `selinux=1` to the kernel parameters and `xattr` option to
|
||||
`virtiofsd` when `disable_guest_selinux` is set to `false`.
|
||||
|
||||
If you want to enable SELinux in Permissive mode, add `enforcing=0` to the kernel parameters.
|
||||
|
||||
## Enable full debug
|
||||
|
||||
Enable full debug as follows:
|
||||
|
||||
```bash
|
||||
```
|
||||
$ sudo mkdir -p /etc/kata-containers/
|
||||
$ sudo install -o root -g root -m 0640 /usr/share/defaults/kata-containers/configuration.toml /etc/kata-containers
|
||||
$ sudo sed -i -e 's/^# *\(enable_debug\).*=.*$/\1 = true/g' /etc/kata-containers/configuration.toml
|
||||
$ sudo sed -i -e 's/^kernel_params = "\(.*\)"/kernel_params = "\1 agent.log=debug initcall_debug"/g' /etc/kata-containers/configuration.toml
|
||||
```
|
||||
@@ -186,7 +175,7 @@ and offers possible workarounds and fixes.
|
||||
it stores. When messages are suppressed, it is noted in the logs. This can be checked
|
||||
for by looking for those notifications, such as:
|
||||
|
||||
```bash
|
||||
```sh
|
||||
$ sudo journalctl --since today | fgrep Suppressed
|
||||
Jun 29 14:51:17 mymachine systemd-journald[346]: Suppressed 4150 messages from /system.slice/docker.service
|
||||
```
|
||||
@@ -211,7 +200,7 @@ RateLimitBurst=0
|
||||
|
||||
Restart `systemd-journald` for the changes to take effect:
|
||||
|
||||
```bash
|
||||
```sh
|
||||
$ sudo systemctl restart systemd-journald
|
||||
```
|
||||
|
||||
@@ -225,52 +214,39 @@ $ sudo systemctl restart systemd-journald
|
||||
|
||||
The agent is built with a statically linked `musl.` The default `libc` used is `musl`, but on `ppc64le` and `s390x`, `gnu` should be used. To configure this:
|
||||
|
||||
```bash
|
||||
$ export ARCH="$(uname -m)"
|
||||
```
|
||||
$ export ARCH=$(uname -m)
|
||||
$ if [ "$ARCH" = "ppc64le" -o "$ARCH" = "s390x" ]; then export LIBC=gnu; else export LIBC=musl; fi
|
||||
$ [ "${ARCH}" == "ppc64le" ] && export ARCH=powerpc64le
|
||||
$ rustup target add "${ARCH}-unknown-linux-${LIBC}"
|
||||
$ [ ${ARCH} == "ppc64le" ] && export ARCH=powerpc64le
|
||||
$ rustup target add ${ARCH}-unknown-linux-${LIBC}
|
||||
```
|
||||
|
||||
To build the agent:
|
||||
|
||||
```
|
||||
$ go get -d -u github.com/kata-containers/kata-containers
|
||||
$ cd $GOPATH/src/github.com/kata-containers/kata-containers/src/agent && make
|
||||
```
|
||||
|
||||
The agent is built with seccomp capability by default.
|
||||
If you want to build the agent without the seccomp capability, you need to run `make` with `SECCOMP=no` as follows.
|
||||
|
||||
```bash
|
||||
$ make -C kata-containers/src/agent SECCOMP=no
|
||||
```
|
||||
|
||||
For building the agent with seccomp support using `musl`, set the environment
|
||||
variables for the [`libseccomp` crate](https://github.com/libseccomp-rs/libseccomp-rs).
|
||||
|
||||
```bash
|
||||
$ export LIBSECCOMP_LINK_TYPE=static
|
||||
$ export LIBSECCOMP_LIB_PATH="the path of the directory containing libseccomp.a"
|
||||
$ make -C kata-containers/src/agent
|
||||
$ make -C $GOPATH/src/github.com/kata-containers/kata-containers/src/agent SECCOMP=no
|
||||
```
|
||||
|
||||
If the compilation fails when the agent tries to link the `libseccomp` library statically
|
||||
against `musl`, you will need to build `libseccomp` manually with `-U_FORTIFY_SOURCE`.
|
||||
You can use [our script](https://github.com/kata-containers/kata-containers/blob/main/ci/install_libseccomp.sh)
|
||||
to install `libseccomp` for the agent.
|
||||
|
||||
```bash
|
||||
$ mkdir -p ${seccomp_install_path} ${gperf_install_path}
|
||||
$ kata-containers/ci/install_libseccomp.sh ${seccomp_install_path} ${gperf_install_path}
|
||||
$ export LIBSECCOMP_LIB_PATH="${seccomp_install_path}/lib"
|
||||
```
|
||||
|
||||
On `ppc64le` and `s390x`, `glibc` is used. You will need to install the `libseccomp` library
|
||||
provided by your distribution.
|
||||
|
||||
> e.g. `libseccomp-dev` for Ubuntu, or `libseccomp-devel` for CentOS
|
||||
|
||||
> **Note:**
|
||||
>
|
||||
> - If you enable seccomp in the main configuration file but build the agent without seccomp capability,
|
||||
> the runtime exits conservatively with an error message.
|
||||
|
||||
## Get the osbuilder
|
||||
|
||||
```
|
||||
$ go get -d -u github.com/kata-containers/kata-containers
|
||||
$ cd $GOPATH/src/github.com/kata-containers/kata-containers/tools/osbuilder
|
||||
```
|
||||
|
||||
## Create a rootfs image
|
||||
### Create a local rootfs
|
||||
|
||||
@@ -278,32 +254,24 @@ As a prerequisite, you need to install Docker. Otherwise, you will not be
|
||||
able to run the `rootfs.sh` script with `USE_DOCKER=true` as expected in
|
||||
the following example.
|
||||
|
||||
```bash
|
||||
$ export distro="ubuntu" # example
|
||||
$ export ROOTFS_DIR="$(realpath kata-containers/tools/osbuilder/rootfs-builder/rootfs)"
|
||||
$ sudo rm -rf "${ROOTFS_DIR}"
|
||||
$ pushd kata-containers/tools/osbuilder/rootfs-builder
|
||||
$ script -fec 'sudo -E USE_DOCKER=true ./rootfs.sh "${distro}"'
|
||||
$ popd
|
||||
```
|
||||
$ export ROOTFS_DIR=${GOPATH}/src/github.com/kata-containers/kata-containers/tools/osbuilder/rootfs-builder/rootfs
|
||||
$ sudo rm -rf ${ROOTFS_DIR}
|
||||
$ cd $GOPATH/src/github.com/kata-containers/kata-containers/tools/osbuilder/rootfs-builder
|
||||
$ script -fec 'sudo -E GOPATH=$GOPATH USE_DOCKER=true ./rootfs.sh ${distro}'
|
||||
```
|
||||
|
||||
You MUST choose a distribution (e.g., `ubuntu`) for `${distro}`.
|
||||
You can get a supported distributions list in the Kata Containers by running the following.
|
||||
|
||||
```bash
|
||||
$ ./kata-containers/tools/osbuilder/rootfs-builder/rootfs.sh -l
|
||||
```
|
||||
$ ./rootfs.sh -l
|
||||
```
|
||||
|
||||
If you want to build the agent without seccomp capability, you need to run the `rootfs.sh` script with `SECCOMP=no` as follows.
|
||||
|
||||
```bash
|
||||
$ script -fec 'sudo -E AGENT_INIT=yes USE_DOCKER=true SECCOMP=no ./rootfs.sh "${distro}"'
|
||||
```
|
||||
|
||||
If you want to enable SELinux on the guest, you MUST choose `centos` and run the `rootfs.sh` script with `SELINUX=yes` as follows.
|
||||
|
||||
```
|
||||
$ script -fec 'sudo -E GOPATH=$GOPATH USE_DOCKER=true SELINUX=yes ./rootfs.sh centos'
|
||||
$ script -fec 'sudo -E GOPATH=$GOPATH AGENT_INIT=yes USE_DOCKER=true SECCOMP=no ./rootfs.sh ${distro}'
|
||||
```
|
||||
|
||||
> **Note:**
|
||||
@@ -319,32 +287,18 @@ $ script -fec 'sudo -E GOPATH=$GOPATH USE_DOCKER=true SELINUX=yes ./rootfs.sh ce
|
||||
>
|
||||
> - You should only do this step if you are testing with the latest version of the agent.
|
||||
|
||||
```bash
|
||||
$ sudo install -o root -g root -m 0550 -t "${ROOTFS_DIR}/usr/bin" "${ROOTFS_DIR}/../../../../src/agent/target/x86_64-unknown-linux-musl/release/kata-agent"
|
||||
$ sudo install -o root -g root -m 0440 "${ROOTFS_DIR}/../../../../src/agent/kata-agent.service" "${ROOTFS_DIR}/usr/lib/systemd/system/"
|
||||
$ sudo install -o root -g root -m 0440 "${ROOTFS_DIR}/../../../../src/agent/kata-containers.target" "${ROOTFS_DIR}/usr/lib/systemd/system/"
|
||||
```
|
||||
$ sudo install -o root -g root -m 0550 -t ${ROOTFS_DIR}/usr/bin ../../../src/agent/target/x86_64-unknown-linux-musl/release/kata-agent
|
||||
$ sudo install -o root -g root -m 0440 ../../../src/agent/kata-agent.service ${ROOTFS_DIR}/usr/lib/systemd/system/
|
||||
$ sudo install -o root -g root -m 0440 ../../../src/agent/kata-containers.target ${ROOTFS_DIR}/usr/lib/systemd/system/
|
||||
```
|
||||
|
||||
### Build a rootfs image
|
||||
|
||||
```bash
|
||||
$ pushd kata-containers/tools/osbuilder/image-builder
|
||||
$ script -fec 'sudo -E USE_DOCKER=true ./image_builder.sh "${ROOTFS_DIR}"'
|
||||
$ popd
|
||||
```
|
||||
|
||||
If you want to enable SELinux on the guest, you MUST run the `image_builder.sh` script with `SELINUX=yes`
|
||||
to label the guest image as follows.
|
||||
To label the image on the host, you need to make sure that SELinux is enabled (`selinuxfs` is mounted) on the host
|
||||
and the rootfs MUST be created by running the `rootfs.sh` with `SELINUX=yes`.
|
||||
|
||||
$ cd $GOPATH/src/github.com/kata-containers/kata-containers/tools/osbuilder/image-builder
|
||||
$ script -fec 'sudo -E USE_DOCKER=true ./image_builder.sh ${ROOTFS_DIR}'
|
||||
```
|
||||
$ script -fec 'sudo -E USE_DOCKER=true SELINUX=yes ./image_builder.sh ${ROOTFS_DIR}'
|
||||
```
|
||||
|
||||
Currently, the `image_builder.sh` uses `chcon` as an interim solution in order to apply `container_runtime_exec_t`
|
||||
to the `kata-agent`. Hence, if you run `restorecon` to the guest image after running the `image_builder.sh`,
|
||||
the `kata-agent` needs to be labeled `container_runtime_exec_t` again by yourself.
|
||||
|
||||
> **Notes:**
|
||||
>
|
||||
@@ -355,31 +309,25 @@ the `kata-agent` needs to be labeled `container_runtime_exec_t` again by yoursel
|
||||
> variable in the previous command and ensure the `qemu-img` command is
|
||||
> available on your system.
|
||||
> - If `qemu-img` is not installed, you will likely see errors such as `ERROR: File /dev/loop19p1 is not a block device` and `losetup: /tmp/tmp.bHz11oY851: Warning: file is smaller than 512 bytes; the loop device may be useless or invisible for system tools`. These can be mitigated by installing the `qemu-img` command (available in the `qemu-img` package on Fedora or the `qemu-utils` package on Debian).
|
||||
> - If `loop` module is not probed, you will likely see errors such as `losetup: cannot find an unused loop device`. Execute `modprobe loop` could resolve it.
|
||||
|
||||
|
||||
### Install the rootfs image
|
||||
|
||||
```bash
|
||||
$ pushd kata-containers/tools/osbuilder/image-builder
|
||||
$ commit="$(git log --format=%h -1 HEAD)"
|
||||
$ date="$(date +%Y-%m-%d-%T.%N%z)"
|
||||
```
|
||||
$ commit=$(git log --format=%h -1 HEAD)
|
||||
$ date=$(date +%Y-%m-%d-%T.%N%z)
|
||||
$ image="kata-containers-${date}-${commit}"
|
||||
$ sudo install -o root -g root -m 0640 -D kata-containers.img "/usr/share/kata-containers/${image}"
|
||||
$ (cd /usr/share/kata-containers && sudo ln -sf "$image" kata-containers.img)
|
||||
$ popd
|
||||
```
|
||||
|
||||
## Create an initrd image - OPTIONAL
|
||||
### Create a local rootfs for initrd image
|
||||
|
||||
```bash
|
||||
$ export distro="ubuntu" # example
|
||||
$ export ROOTFS_DIR="$(realpath kata-containers/tools/osbuilder/rootfs-builder/rootfs)"
|
||||
$ sudo rm -rf "${ROOTFS_DIR}"
|
||||
$ pushd kata-containers/tools/osbuilder/rootfs-builder/
|
||||
$ script -fec 'sudo -E AGENT_INIT=yes USE_DOCKER=true ./rootfs.sh "${distro}"'
|
||||
$ popd
|
||||
```
|
||||
$ export ROOTFS_DIR="${GOPATH}/src/github.com/kata-containers/kata-containers/tools/osbuilder/rootfs-builder/rootfs"
|
||||
$ sudo rm -rf ${ROOTFS_DIR}
|
||||
$ cd $GOPATH/src/github.com/kata-containers/kata-containers/tools/osbuilder/rootfs-builder
|
||||
$ script -fec 'sudo -E GOPATH=$GOPATH AGENT_INIT=yes USE_DOCKER=true ./rootfs.sh ${distro}'
|
||||
```
|
||||
`AGENT_INIT` controls if the guest image uses the Kata agent as the guest `init` process. When you create an initrd image,
|
||||
always set `AGENT_INIT` to `yes`.
|
||||
@@ -387,14 +335,14 @@ always set `AGENT_INIT` to `yes`.
|
||||
You MUST choose a distribution (e.g., `ubuntu`) for `${distro}`.
|
||||
You can get a supported distributions list in the Kata Containers by running the following.
|
||||
|
||||
```bash
|
||||
$ ./kata-containers/tools/osbuilder/rootfs-builder/rootfs.sh -l
|
||||
```
|
||||
$ ./rootfs.sh -l
|
||||
```
|
||||
|
||||
If you want to build the agent without seccomp capability, you need to run the `rootfs.sh` script with `SECCOMP=no` as follows.
|
||||
|
||||
```bash
|
||||
$ script -fec 'sudo -E AGENT_INIT=yes USE_DOCKER=true SECCOMP=no ./rootfs.sh "${distro}"'
|
||||
```
|
||||
$ script -fec 'sudo -E GOPATH=$GOPATH AGENT_INIT=yes USE_DOCKER=true SECCOMP=no ./rootfs.sh ${distro}'
|
||||
```
|
||||
|
||||
> **Note:**
|
||||
@@ -403,31 +351,28 @@ $ script -fec 'sudo -E AGENT_INIT=yes USE_DOCKER=true SECCOMP=no ./rootfs.sh "${
|
||||
|
||||
Optionally, add your custom agent binary to the rootfs with the following commands. The default `$LIBC` used
|
||||
is `musl`, but on ppc64le and s390x, `gnu` should be used. Also, Rust refers to ppc64le as `powerpc64le`:
|
||||
```bash
|
||||
$ export ARCH="$(uname -m)"
|
||||
$ [ "${ARCH}" == "ppc64le" ] || [ "${ARCH}" == "s390x" ] && export LIBC=gnu || export LIBC=musl
|
||||
$ [ "${ARCH}" == "ppc64le" ] && export ARCH=powerpc64le
|
||||
$ sudo install -o root -g root -m 0550 -T "${ROOTFS_DIR}/../../../../src/agent/target/${ARCH}-unknown-linux-${LIBC}/release/kata-agent" "${ROOTFS_DIR}/sbin/init"
|
||||
```
|
||||
$ export ARCH=$(uname -m)
|
||||
$ [ ${ARCH} == "ppc64le" ] || [ ${ARCH} == "s390x" ] && export LIBC=gnu || export LIBC=musl
|
||||
$ [ ${ARCH} == "ppc64le" ] && export ARCH=powerpc64le
|
||||
$ sudo install -o root -g root -m 0550 -T ../../../src/agent/target/${ARCH}-unknown-linux-${LIBC}/release/kata-agent ${ROOTFS_DIR}/sbin/init
|
||||
```
|
||||
|
||||
### Build an initrd image
|
||||
|
||||
```bash
|
||||
$ pushd kata-containers/tools/osbuilder/initrd-builder
|
||||
$ script -fec 'sudo -E AGENT_INIT=yes USE_DOCKER=true ./initrd_builder.sh "${ROOTFS_DIR}"'
|
||||
$ popd
|
||||
```
|
||||
$ cd $GOPATH/src/github.com/kata-containers/kata-containers/tools/osbuilder/initrd-builder
|
||||
$ script -fec 'sudo -E AGENT_INIT=yes USE_DOCKER=true ./initrd_builder.sh ${ROOTFS_DIR}'
|
||||
```
|
||||
|
||||
### Install the initrd image
|
||||
|
||||
```bash
|
||||
$ pushd kata-containers/tools/osbuilder/initrd-builder
|
||||
$ commit="$(git log --format=%h -1 HEAD)"
|
||||
$ date="$(date +%Y-%m-%d-%T.%N%z)"
|
||||
```
|
||||
$ commit=$(git log --format=%h -1 HEAD)
|
||||
$ date=$(date +%Y-%m-%d-%T.%N%z)
|
||||
$ image="kata-containers-initrd-${date}-${commit}"
|
||||
$ sudo install -o root -g root -m 0640 -D kata-containers-initrd.img "/usr/share/kata-containers/${image}"
|
||||
$ (cd /usr/share/kata-containers && sudo ln -sf "$image" kata-containers-initrd.img)
|
||||
$ popd
|
||||
```
|
||||
|
||||
# Install guest kernel images
|
||||
@@ -446,44 +391,44 @@ Kata Containers makes use of upstream QEMU branch. The exact version
|
||||
and repository utilized can be found by looking at the [versions file](../versions.yaml).
|
||||
|
||||
Find the correct version of QEMU from the versions file:
|
||||
```bash
|
||||
$ source kata-containers/tools/packaging/scripts/lib.sh
|
||||
$ qemu_version="$(get_from_kata_deps "assets.hypervisor.qemu.version")"
|
||||
$ echo "${qemu_version}"
|
||||
```
|
||||
$ source ${GOPATH}/src/github.com/kata-containers/kata-containers/tools/packaging/scripts/lib.sh
|
||||
$ qemu_version=$(get_from_kata_deps "assets.hypervisor.qemu.version")
|
||||
$ echo ${qemu_version}
|
||||
```
|
||||
Get source from the matching branch of QEMU:
|
||||
```bash
|
||||
$ git clone -b "${qemu_version}" https://github.com/qemu/qemu.git
|
||||
$ your_qemu_directory="$(realpath qemu)"
|
||||
```
|
||||
$ go get -d github.com/qemu/qemu
|
||||
$ cd ${GOPATH}/src/github.com/qemu/qemu
|
||||
$ git checkout ${qemu_version}
|
||||
$ your_qemu_directory=${GOPATH}/src/github.com/qemu/qemu
|
||||
```
|
||||
|
||||
There are scripts to manage the build and packaging of QEMU. For the examples below, set your
|
||||
environment as:
|
||||
```bash
|
||||
$ packaging_dir="$(realpath kata-containers/tools/packaging)"
|
||||
```
|
||||
$ go get -d github.com/kata-containers/kata-containers
|
||||
$ packaging_dir="${GOPATH}/src/github.com/kata-containers/kata-containers/tools/packaging"
|
||||
```
|
||||
|
||||
Kata often utilizes patches for not-yet-upstream and/or backported fixes for components,
|
||||
including QEMU. These can be found in the [packaging/QEMU directory](../tools/packaging/qemu/patches),
|
||||
and it's *recommended* that you apply them. For example, suppose that you are going to build QEMU
|
||||
version 5.2.0, do:
|
||||
```bash
|
||||
$ "$packaging_dir/scripts/apply_patches.sh" "$packaging_dir/qemu/patches/5.2.x/"
|
||||
```
|
||||
$ cd $your_qemu_directory
|
||||
$ $packaging_dir/scripts/apply_patches.sh $packaging_dir/qemu/patches/5.2.x/
|
||||
```
|
||||
|
||||
To build utilizing the same options as Kata, you should make use of the `configure-hypervisor.sh` script. For example:
|
||||
```bash
|
||||
$ pushd "$your_qemu_directory"
|
||||
$ "$packaging_dir/scripts/configure-hypervisor.sh" kata-qemu > kata.cfg
|
||||
```
|
||||
$ cd $your_qemu_directory
|
||||
$ $packaging_dir/scripts/configure-hypervisor.sh kata-qemu > kata.cfg
|
||||
$ eval ./configure "$(cat kata.cfg)"
|
||||
$ make -j $(nproc --ignore=1)
|
||||
# Optional
|
||||
$ sudo -E make install
|
||||
$ popd
|
||||
```
|
||||
|
||||
If you do not want to install the respective QEMU version, the configuration file can be modified to point to the correct binary. In `/etc/kata-containers/configuration.toml`, change `path = "/path/to/qemu/build/qemu-system-x86_64"` to point to the correct QEMU binary.
|
||||
|
||||
See the [static-build script for QEMU](../tools/packaging/static-build/qemu/build-static-qemu.sh) for a reference on how to get, setup, configure and build QEMU for Kata.
|
||||
|
||||
### Build a custom QEMU for aarch64/arm64 - REQUIRED
|
||||
@@ -494,33 +439,11 @@ See the [static-build script for QEMU](../tools/packaging/static-build/qemu/buil
|
||||
> under upstream review for supporting NVDIMM on aarch64.
|
||||
>
|
||||
You could build the custom `qemu-system-aarch64` as required with the following command:
|
||||
```bash
|
||||
$ git clone https://github.com/kata-containers/tests.git
|
||||
$ script -fec 'sudo -E tests/.ci/install_qemu.sh'
|
||||
```
|
||||
|
||||
## Build `virtiofsd`
|
||||
|
||||
When using the file system type virtio-fs (default), `virtiofsd` is required
|
||||
|
||||
```bash
|
||||
$ pushd kata-containers/tools/packaging/static-build/virtiofsd
|
||||
$ ./build.sh
|
||||
$ popd
|
||||
$ go get -d github.com/kata-containers/tests
|
||||
$ script -fec 'sudo -E ${GOPATH}/src/github.com/kata-containers/tests/.ci/install_qemu.sh'
|
||||
```
|
||||
|
||||
Modify `/etc/kata-containers/configuration.toml` and update value `virtio_fs_daemon = "/path/to/kata-containers/tools/packaging/static-build/virtiofsd/virtiofsd/virtiofsd"` to point to the binary.
|
||||
|
||||
# Check hardware requirements
|
||||
|
||||
You can check if your system is capable of creating a Kata Container by running the following:
|
||||
|
||||
```bash
|
||||
$ sudo kata-runtime check
|
||||
```
|
||||
|
||||
If your system is *not* able to run Kata Containers, the previous command will error out and explain why.
|
||||
|
||||
# Run Kata Containers with Containerd
|
||||
Refer to the [How to use Kata Containers and Containerd](how-to/containerd-kata.md) how-to guide.
|
||||
|
||||
@@ -551,7 +474,7 @@ See [Set up a debug console](#set-up-a-debug-console).
|
||||
|
||||
## Checking Docker default runtime
|
||||
|
||||
```bash
|
||||
```
|
||||
$ sudo docker info 2>/dev/null | grep -i "default runtime" | cut -d: -f2- | grep -q runc && echo "SUCCESS" || echo "ERROR: Incorrect default Docker runtime"
|
||||
```
|
||||
## Set up a debug console
|
||||
@@ -568,7 +491,7 @@ contain either `/bin/sh` or `/bin/bash`.
|
||||
|
||||
Enable debug_console_enabled in the `configuration.toml` configuration file:
|
||||
|
||||
```toml
|
||||
```
|
||||
[agent.kata]
|
||||
debug_console_enabled = true
|
||||
```
|
||||
@@ -579,7 +502,7 @@ This will pass `agent.debug_console agent.debug_console_vport=1026` to agent as
|
||||
|
||||
For Kata Containers `2.0.x` releases, the `kata-runtime exec` command depends on the`kata-monitor` running, in order to get the sandbox's `vsock` address to connect to. Thus, first start the `kata-monitor` process.
|
||||
|
||||
```bash
|
||||
```
|
||||
$ sudo kata-monitor
|
||||
```
|
||||
|
||||
@@ -587,15 +510,10 @@ $ sudo kata-monitor
|
||||
|
||||
#### Connect to debug console
|
||||
|
||||
You need to start a container for example:
|
||||
```bash
|
||||
$ sudo ctr run --runtime io.containerd.kata.v2 -d docker.io/library/ubuntu:latest testdebug
|
||||
```
|
||||
|
||||
Then, you can use the command `kata-runtime exec <sandbox id>` to connect to the debug console.
|
||||
Command `kata-runtime exec` is used to connect to the debug console.
|
||||
|
||||
```
|
||||
$ kata-runtime exec testdebug
|
||||
$ kata-runtime exec 1a9ab65be63b8b03dfd0c75036d27f0ed09eab38abb45337fea83acd3cd7bacd
|
||||
bash-4.2# id
|
||||
uid=0(root) gid=0(root) groups=0(root)
|
||||
bash-4.2# pwd
|
||||
@@ -646,10 +564,10 @@ an additional `coreutils` package.
|
||||
|
||||
For example using CentOS:
|
||||
|
||||
```bash
|
||||
$ pushd kata-containers/tools/osbuilder/rootfs-builder
|
||||
$ export ROOTFS_DIR="$(realpath ./rootfs)"
|
||||
$ script -fec 'sudo -E USE_DOCKER=true EXTRA_PKGS="bash coreutils" ./rootfs.sh centos'
|
||||
```
|
||||
$ cd $GOPATH/src/github.com/kata-containers/kata-containers/tools/osbuilder/rootfs-builder
|
||||
$ export ROOTFS_DIR=${GOPATH}/src/github.com/kata-containers/kata-containers/tools/osbuilder/rootfs-builder/rootfs
|
||||
$ script -fec 'sudo -E GOPATH=$GOPATH USE_DOCKER=true EXTRA_PKGS="bash coreutils" ./rootfs.sh centos'
|
||||
```
|
||||
|
||||
#### Build the debug image
|
||||
@@ -661,13 +579,12 @@ section when using rootfs, or when using initrd, complete the steps in the [Buil
|
||||
|
||||
Install the image:
|
||||
|
||||
>**Note**: When using an initrd image, replace the below rootfs image name `kata-containers.img`
|
||||
>**Note**: When using an initrd image, replace the below rootfs image name `kata-containers.img`
|
||||
>with the initrd image name `kata-containers-initrd.img`.
|
||||
|
||||
```bash
|
||||
```
|
||||
$ name="kata-containers-centos-with-debug-console.img"
|
||||
$ sudo install -o root -g root -m 0640 kata-containers.img "/usr/share/kata-containers/${name}"
|
||||
$ popd
|
||||
```
|
||||
|
||||
Next, modify the `image=` values in the `[hypervisor.qemu]` section of the
|
||||
@@ -676,7 +593,7 @@ to specify the full path to the image name specified in the previous code
|
||||
section. Alternatively, recreate the symbolic link so it points to
|
||||
the new debug image:
|
||||
|
||||
```bash
|
||||
```
|
||||
$ (cd /usr/share/kata-containers && sudo ln -sf "$name" kata-containers.img)
|
||||
```
|
||||
|
||||
@@ -687,7 +604,7 @@ to avoid all subsequently created containers from using the debug image.
|
||||
|
||||
Create a container as normal. For example using `crictl`:
|
||||
|
||||
```bash
|
||||
```
|
||||
$ sudo crictl run -r kata container.yaml pod.yaml
|
||||
```
|
||||
|
||||
@@ -695,25 +612,25 @@ $ sudo crictl run -r kata container.yaml pod.yaml
|
||||
|
||||
The steps required to enable debug console for QEMU slightly differ with
|
||||
those for firecracker / cloud-hypervisor.
|
||||
|
||||
|
||||
##### Enabling debug console for QEMU
|
||||
|
||||
Add `agent.debug_console` to the guest kernel command line to allow the agent process to start a debug console.
|
||||
Add `agent.debug_console` to the guest kernel command line to allow the agent process to start a debug console.
|
||||
|
||||
```bash
|
||||
```
|
||||
$ sudo sed -i -e 's/^kernel_params = "\(.*\)"/kernel_params = "\1 agent.debug_console"/g' "${kata_configuration_file}"
|
||||
```
|
||||
|
||||
Here `kata_configuration_file` could point to `/etc/kata-containers/configuration.toml`
|
||||
Here `kata_configuration_file` could point to `/etc/kata-containers/configuration.toml`
|
||||
or `/usr/share/defaults/kata-containers/configuration.toml`
|
||||
or `/opt/kata/share/defaults/kata-containers/configuration-{hypervisor}.toml`, if
|
||||
you installed Kata Containers using `kata-deploy`.
|
||||
|
||||
##### Enabling debug console for cloud-hypervisor / firecracker
|
||||
|
||||
Slightly different configuration is required in case of firecracker and cloud hypervisor.
|
||||
Firecracker and cloud-hypervisor don't have a UNIX socket connected to `/dev/console`.
|
||||
Hence, the kernel command line option `agent.debug_console` will not work for them.
|
||||
Slightly different configuration is required in case of firecracker and cloud hypervisor.
|
||||
Firecracker and cloud-hypervisor don't have a UNIX socket connected to `/dev/console`.
|
||||
Hence, the kernel command line option `agent.debug_console` will not work for them.
|
||||
These hypervisors support `hybrid vsocks`, which can be used for communication
|
||||
between the host and the guest. The kernel command line option `agent.debug_console_vport`
|
||||
was added to allow developers specify on which `vsock` port the debugging console should be connected.
|
||||
@@ -721,12 +638,12 @@ between the host and the guest. The kernel command line option `agent.debug_cons
|
||||
|
||||
Add the parameter `agent.debug_console_vport=1026` to the kernel command line
|
||||
as shown below:
|
||||
```bash
|
||||
```
|
||||
sudo sed -i -e 's/^kernel_params = "\(.*\)"/kernel_params = "\1 agent.debug_console_vport=1026"/g' "${kata_configuration_file}"
|
||||
```
|
||||
|
||||
> **Note** Ports 1024 and 1025 are reserved for communication with the agent
|
||||
> and gathering of agent logs respectively.
|
||||
> and gathering of agent logs respectively.
|
||||
|
||||
##### Connecting to the debug console
|
||||
|
||||
@@ -734,7 +651,7 @@ Next, connect to the debug console. The VSOCKS paths vary slightly between each
|
||||
VMM solution.
|
||||
|
||||
In case of cloud-hypervisor, connect to the `vsock` as shown:
|
||||
```bash
|
||||
```
|
||||
$ sudo su -c 'cd /var/run/vc/vm/${sandbox_id}/root/ && socat stdin unix-connect:clh.sock'
|
||||
CONNECT 1026
|
||||
```
|
||||
@@ -742,7 +659,7 @@ CONNECT 1026
|
||||
**Note**: You need to type `CONNECT 1026` and press `RETURN` key after entering the `socat` command.
|
||||
|
||||
For firecracker, connect to the `hvsock` as shown:
|
||||
```bash
|
||||
```
|
||||
$ sudo su -c 'cd /var/run/vc/firecracker/${sandbox_id}/root/ && socat stdin unix-connect:kata.hvsock'
|
||||
CONNECT 1026
|
||||
```
|
||||
@@ -751,7 +668,7 @@ CONNECT 1026
|
||||
|
||||
|
||||
For QEMU, connect to the `vsock` as shown:
|
||||
```bash
|
||||
```
|
||||
$ sudo su -c 'cd /var/run/vc/vm/${sandbox_id} && socat "stdin,raw,echo=0,escape=0x11" "unix-connect:console.sock"'
|
||||
```
|
||||
|
||||
@@ -764,7 +681,7 @@ If the image is created using
|
||||
[osbuilder](../tools/osbuilder), the following YAML
|
||||
file exists and contains details of the image and how it was created:
|
||||
|
||||
```bash
|
||||
```
|
||||
$ cat /var/lib/osbuilder/osbuilder.yaml
|
||||
```
|
||||
|
||||
|
||||
@@ -147,8 +147,7 @@ these commands is potentially challenging.
|
||||
See issue https://github.com/clearcontainers/runtime/issues/341 and [the constraints challenge](#the-constraints-challenge) for more information.
|
||||
|
||||
For CPUs resource management see
|
||||
[CPU constraints(in runtime-go)](design/vcpu-handling-runtime-go.md).
|
||||
[CPU constraints(in runtime-rs)](design/vcpu-handling-runtime-rs.md).
|
||||
[CPU constraints](design/vcpu-handling.md).
|
||||
|
||||
# Architectural limitations
|
||||
|
||||
|
||||
@@ -28,6 +28,23 @@
|
||||
$ ./update-repository-version.sh -p "$NEW_VERSION" "$BRANCH"
|
||||
```
|
||||
|
||||
### Point tests repository to stable branch
|
||||
|
||||
If you create a new stable branch, i.e. if your release changes a major or minor version number (not a patch release), then
|
||||
you should modify the `tests` repository to point to that newly created stable branch and not the `main` branch.
|
||||
The objective is that changes in the CI on the main branch will not impact the stable branch.
|
||||
|
||||
In the test directory, change references the main branch in:
|
||||
* `README.md`
|
||||
* `versions.yaml`
|
||||
* `cmd/github-labels/labels.yaml.in`
|
||||
* `cmd/pmemctl/pmemctl.sh`
|
||||
* `.ci/lib.sh`
|
||||
* `.ci/static-checks.sh`
|
||||
|
||||
See the commits in [the corresponding PR for stable-2.1](https://github.com/kata-containers/tests/pull/3504) for an example of the changes.
|
||||
|
||||
|
||||
### Merge all bump version Pull requests
|
||||
|
||||
- The above step will create a GitHub pull request in the Kata projects. Trigger the CI using `/test` command on each bump Pull request.
|
||||
@@ -46,24 +63,6 @@
|
||||
$ ./tag_repos.sh -p -b "$BRANCH" tag
|
||||
```
|
||||
|
||||
### Point tests repository to stable branch
|
||||
|
||||
If your release changes a major or minor version number(not a patch release), then the above
|
||||
`./tag_repos.sh` script will create a new stable branch in all the repositories in addition to tagging them.
|
||||
This happens when you are making the first `rc` release for a new major or minor version in Kata.
|
||||
In this case, you should modify the `tests` repository to point to the newly created stable branch and not the `main` branch.
|
||||
The objective is that changes in the CI on the main branch will not impact the stable branch.
|
||||
|
||||
In the test directory, change references of the `main` branch to the new stable branch in:
|
||||
* `README.md`
|
||||
* `versions.yaml`
|
||||
* `cmd/github-labels/labels.yaml.in`
|
||||
* `cmd/pmemctl/pmemctl.sh`
|
||||
* `.ci/lib.sh`
|
||||
* `.ci/static-checks.sh`
|
||||
|
||||
See the commits in [the corresponding PR for stable-2.1](https://github.com/kata-containers/tests/pull/3504) for an example of the changes.
|
||||
|
||||
### Check Git-hub Actions
|
||||
|
||||
We make use of [GitHub actions](https://github.com/features/actions) in this [file](../.github/workflows/release.yaml) in the `kata-containers/kata-containers` repository to build and upload release artifacts. This action is auto triggered with the above step when a new tag is pushed to the `kata-containers/kata-containers` repository.
|
||||
|
||||
@@ -6,19 +6,13 @@ Kata Containers design documents:
|
||||
- [API Design of Kata Containers](kata-api-design.md)
|
||||
- [Design requirements for Kata Containers](kata-design-requirements.md)
|
||||
- [VSocks](VSocks.md)
|
||||
- [VCPU handling(in runtime-go)](vcpu-handling-runtime-go.md)
|
||||
- [VCPU handling(in runtime-rs)](vcpu-handling-runtime-rs.md)
|
||||
- [VCPU threads pinning](vcpu-threads-pinning.md)
|
||||
- [VCPU handling](vcpu-handling.md)
|
||||
- [Host cgroups](host-cgroups.md)
|
||||
- [Agent systemd cgroup](agent-systemd-cgroup.md)
|
||||
- [`Inotify` support](inotify.md)
|
||||
- [`Hooks` support](hooks-handling.md)
|
||||
- [Metrics(Kata 2.0)](kata-2-0-metrics.md)
|
||||
- [Metrics in Rust Runtime(runtime-rs)](kata-metrics-in-runtime-rs.md)
|
||||
- [Design for Kata Containers `Lazyload` ability with `nydus`](kata-nydus-design.md)
|
||||
- [Design for direct-assigned volume](direct-blk-device-assignment.md)
|
||||
- [Design for core-scheduling](core-scheduling.md)
|
||||
- [Virtualization Reference Architecture](kata-vra.md)
|
||||
---
|
||||
|
||||
- [Design proposals](proposals)
|
||||
|
||||
@@ -78,4 +78,4 @@ with the containers is if the VM itself or the `containerd-shim-kata-v2` dies, i
|
||||
the containers are removed automatically.
|
||||
|
||||
[1]: https://wiki.qemu.org/Features/VirtioVsock
|
||||
[2]: ./vcpu-handling-runtime-go.md#virtual-cpus-and-kubernetes-pods
|
||||
[2]: ./vcpu-handling.md#virtual-cpus-and-kubernetes-pods
|
||||
|
||||
@@ -1,84 +0,0 @@
|
||||
# Systemd Cgroup for Agent
|
||||
|
||||
As we know, we can interact with cgroups in two ways, **`cgroupfs`** and **`systemd`**. The former is achieved by reading and writing cgroup `tmpfs` files under `/sys/fs/cgroup` while the latter is done by configuring a transient unit by requesting systemd. Kata agent uses **`cgroupfs`** by default, unless you pass the parameter `--systemd-cgroup`.
|
||||
|
||||
## usage
|
||||
|
||||
For systemd, kata agent configures cgroups according to the following `linux.cgroupsPath` format standard provided by `runc` (`[slice]:[prefix]:[name]`). If you don't provide a valid `linux.cgroupsPath`, kata agent will treat it as `"system.slice:kata_agent:<container-id>"`.
|
||||
|
||||
> Here slice is a systemd slice under which the container is placed. If empty, it defaults to system.slice, except when cgroup v2 is used and rootless container is created, in which case it defaults to user.slice.
|
||||
>
|
||||
> Note that slice can contain dashes to denote a sub-slice (e.g. user-1000.slice is a correct notation, meaning a `subslice` of user.slice), but it must not contain slashes (e.g. user.slice/user-1000.slice is invalid).
|
||||
>
|
||||
> A slice of `-` represents a root slice.
|
||||
>
|
||||
> Next, prefix and name are used to compose the unit name, which is `<prefix>-<name>.scope`, unless name has `.slice` suffix, in which case prefix is ignored and the name is used as is.
|
||||
|
||||
## supported properties
|
||||
|
||||
The kata agent will translate the parameters in the `linux.resources` of `config.json` into systemd unit properties, and send it to systemd for configuration. Since systemd supports limited properties, only the following parameters in `linux.resources` will be applied. We will simply treat hybrid mode as legacy mode by the way.
|
||||
|
||||
- CPU
|
||||
|
||||
- v1
|
||||
|
||||
| runtime spec resource | systemd property name |
|
||||
| --------------------- | --------------------- |
|
||||
| `cpu.shares` | `CPUShares` |
|
||||
|
||||
- v2
|
||||
|
||||
| runtime spec resource | systemd property name |
|
||||
| -------------------------- | -------------------------- |
|
||||
| `cpu.shares` | `CPUShares` |
|
||||
| `cpu.period` | `CPUQuotaPeriodUSec`(v242) |
|
||||
| `cpu.period` & `cpu.quota` | `CPUQuotaPerSecUSec` |
|
||||
|
||||
- MEMORY
|
||||
|
||||
- v1
|
||||
|
||||
| runtime spec resource | systemd property name |
|
||||
| --------------------- | --------------------- |
|
||||
| `memory.limit` | `MemoryLimit` |
|
||||
|
||||
- v2
|
||||
|
||||
| runtime spec resource | systemd property name |
|
||||
| ------------------------------ | --------------------- |
|
||||
| `memory.low` | `MemoryLow` |
|
||||
| `memory.max` | `MemoryMax` |
|
||||
| `memory.swap` & `memory.limit` | `MemorySwapMax` |
|
||||
|
||||
- PIDS
|
||||
|
||||
| runtime spec resource | systemd property name |
|
||||
| --------------------- | --------------------- |
|
||||
| `pids.limit ` | `TasksMax` |
|
||||
|
||||
- CPUSET
|
||||
|
||||
| runtime spec resource | systemd property name |
|
||||
| --------------------- | -------------------------- |
|
||||
| `cpuset.cpus` | `AllowedCPUs`(v244) |
|
||||
| `cpuset.mems` | `AllowedMemoryNodes`(v244) |
|
||||
|
||||
## Systemd Interface
|
||||
|
||||
`session.rs` and `system.rs` in `src/agent/rustjail/src/cgroups/systemd/interface` are automatically generated by `zbus-xmlgen`, which is is an accompanying tool provided by `zbus` to generate Rust code from `D-Bus XML interface descriptions`. The specific commands to generate these two files are as follows:
|
||||
|
||||
```shell
|
||||
// system.rs
|
||||
zbus-xmlgen --system org.freedesktop.systemd1 /org/freedesktop/systemd1
|
||||
// session.rs
|
||||
zbus-xmlgen --session org.freedesktop.systemd1 /org/freedesktop/systemd1
|
||||
```
|
||||
|
||||
The current implementation of `cgroups/systemd` uses `system.rs` while `session.rs` could be used to build rootless containers in the future.
|
||||
|
||||
## references
|
||||
|
||||
- [runc - systemd cgroup driver](https://github.com/opencontainers/runc/blob/main/docs/systemd.md)
|
||||
|
||||
- [systemd.resource-control — Resource control unit settings](https://www.freedesktop.org/software/systemd/man/systemd.resource-control.html)
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 193 KiB |
@@ -3,11 +3,11 @@
|
||||
[Kubernetes](https://github.com/kubernetes/kubernetes/), or K8s, is a popular open source
|
||||
container orchestration engine. In Kubernetes, a set of containers sharing resources
|
||||
such as networking, storage, mount, PID, etc. is called a
|
||||
[pod](https://kubernetes.io/docs/concepts/workloads/pods/).
|
||||
[pod](https://kubernetes.io/docs/user-guide/pods/).
|
||||
|
||||
A node can have multiple pods, but at a minimum, a node within a Kubernetes cluster
|
||||
only needs to run a container runtime and a container agent (called a
|
||||
[Kubelet](https://kubernetes.io/docs/concepts/overview/components/#kubelet)).
|
||||
[Kubelet](https://kubernetes.io/docs/admin/kubelet/)).
|
||||
|
||||
Kata Containers represents a Kubelet pod as a VM.
|
||||
|
||||
|
||||
@@ -36,7 +36,7 @@ compatibility, and performance on par with MACVTAP.
|
||||
Kata Containers has deprecated support for bridge due to lacking performance relative to TC-filter and MACVTAP.
|
||||
|
||||
Kata Containers supports both
|
||||
[CNM](https://github.com/moby/libnetwork/blob/master/docs/design.md#the-container-network-model)
|
||||
[CNM](https://github.com/docker/libnetwork/blob/master/docs/design.md#the-container-network-model)
|
||||
and [CNI](https://github.com/containernetworking/cni) for networking management.
|
||||
|
||||
## Network Hotplug
|
||||
|
||||
@@ -64,8 +64,8 @@ The kata-runtime is controlled by TOKIO_RUNTIME_WORKER_THREADS to run the OS thr
|
||||
├─ TTRPC listener thread(M * tokio task)
|
||||
├─ TTRPC client handler thread(7 * M * tokio task)
|
||||
├─ container stdin io thread(M * tokio task)
|
||||
├─ container stdout io thread(M * tokio task)
|
||||
└─ container stderr io thread(M * tokio task)
|
||||
├─ container stdin io thread(M * tokio task)
|
||||
└─ container stdin io thread(M * tokio task)
|
||||
```
|
||||
### Extensible Framework
|
||||
The Kata 3.x runtime is designed with the extension of service, runtime, and hypervisor, combined with configuration to meet the needs of different scenarios. At present, the service provides a register mechanism to support multiple services. Services could interact with runtime through messages. In addition, the runtime handler handles messages from services. To meet the needs of a binary that supports multiple runtimes and hypervisors, the startup must obtain the runtime handler type and hypervisor type through configuration.
|
||||
|
||||
@@ -81,7 +81,7 @@ Notes: given that the `mountInfo` is persisted to the disk by the Kata runtime,
|
||||
Instead of the CSI node driver writing the mount info into a `csiPlugin.json` file under the volume root,
|
||||
as described in the original proposal, here we propose that the CSI node driver passes the mount information to
|
||||
the Kata Containers runtime through a new `kata-runtime` commandline command. The `kata-runtime` then writes the mount
|
||||
information to a `mountInfo.json` file in a predefined location (`/run/kata-containers/shared/direct-volumes/[volume_path]/`).
|
||||
information to a `mount-info.json` file in a predefined location (`/run/kata-containers/shared/direct-volumes/[volume_path]/`).
|
||||
|
||||
When the Kata Containers runtime starts a container, it verifies whether a volume mount is a direct-assigned volume by checking
|
||||
whether there is a `mountInfo` file under the computed Kata `direct-volumes` directory. If it is, the runtime parses the `mountInfo` file,
|
||||
|
||||
@@ -1,63 +0,0 @@
|
||||
# Kata Containers support for `Hooks`
|
||||
|
||||
## Introduction
|
||||
|
||||
During container's lifecycle, different Hooks can be executed to do custom actions. In Kata Containers, we support two types of Hooks, `OCI Hooks` and `Kata Hooks`.
|
||||
|
||||
### OCI Hooks
|
||||
|
||||
The OCI Spec stipulates six hooks that can be executed at different time points and namespaces, including `Prestart Hooks`, `CreateRuntime Hooks`, `CreateContainer Hooks`, `StartContainer Hooks`, `Poststart Hooks` and `Poststop Hooks`. We support these types of Hooks as compatible as possible in Kata Containers.
|
||||
|
||||
The path and arguments of these hooks will be passed to Kata for execution via `bundle/config.json`. For example:
|
||||
```
|
||||
...
|
||||
"hooks": {
|
||||
"prestart": [
|
||||
{
|
||||
"path": "/usr/bin/prestart-hook",
|
||||
"args": ["prestart-hook", "arg1", "arg2"],
|
||||
"env": [ "key1=value1"]
|
||||
}
|
||||
],
|
||||
"createRuntime": [
|
||||
{
|
||||
"path": "/usr/bin/createRuntime-hook",
|
||||
"args": ["createRuntime-hook", "arg1", "arg2"],
|
||||
"env": [ "key1=value1"]
|
||||
}
|
||||
]
|
||||
}
|
||||
...
|
||||
```
|
||||
|
||||
### Kata Hooks
|
||||
|
||||
In Kata, we support another three kinds of hooks executed in guest VM, including `Guest Prestart Hook`, `Guest Poststart Hook`, `Guest Poststop Hook`.
|
||||
|
||||
The executable files for Kata Hooks must be packaged in the *guest rootfs*. The file path to those guest hooks should be specified in the configuration file, and guest hooks must be stored in a subdirectory of `guest_hook_path` according to their hook type. For example:
|
||||
|
||||
+ In configuration file:
|
||||
```
|
||||
guest_hook_path="/usr/share/hooks"
|
||||
```
|
||||
+ In guest rootfs, prestart-hook is stored in `/usr/share/hooks/prestart/prestart-hook`.
|
||||
|
||||
## Execution
|
||||
The table below summarized when and where those different hooks will be executed in Kata Containers:
|
||||
|
||||
| Hook Name | Hook Type | Hook Path | Exec Place | Exec Time |
|
||||
|---|---|---|---|---|
|
||||
| `Prestart(deprecated)` | OCI hook | host runtime namespace | host runtime namespace | After VM is started, before container is created. |
|
||||
| `CreateRuntime` | OCI hook | host runtime namespace | host runtime namespace | After VM is started, before container is created, after `Prestart` hooks. |
|
||||
| `CreateContainer` | OCI hook | host runtime namespace | host vmm namespace* | After VM is started, before container is created, after `CreateRuntime` hooks. |
|
||||
| `StartContainer` | OCI hook | guest container namespace | guest container namespace | After container is created, before container is started. |
|
||||
| `Poststart` | OCI hook | host runtime namespace | host runtime namespace | After container is started, before start operation returns. |
|
||||
| `Poststop` | OCI hook | host runtime namespace | host runtime namespace | After container is deleted, before delete operation returns. |
|
||||
| `Guest Prestart` | Kata hook | guest agent namespace | guest agent namespace | During start operation, before container command is executed. |
|
||||
| `Guest Poststart` | Kata hook | guest agent namespace | guest agent namespace | During start operation, after container command is executed, before start operation returns. |
|
||||
| `Guest Poststop` | Kata hook | guest agent namespace | guest agent namespace | During delete operation, after container is deleted, before delete operation returns. |
|
||||
|
||||
+ `Hook Path` specifies where hook's path be resolved.
|
||||
+ `Exec Place` specifies in which namespace those hooks can be executed.
|
||||
+ For `CreateContainer` Hooks, OCI requires to run them inside the container namespace while the hook executable path is in the host runtime, which is a non-starter for VM-based containers. So we design to keep them running in the *host vmm namespace.*
|
||||
+ `Exec Time` specifies at which time point those hooks can be executed.
|
||||
@@ -1,50 +0,0 @@
|
||||
# Kata Metrics in Rust Runtime(runtime-rs)
|
||||
|
||||
Rust Runtime(runtime-rs) is responsible for:
|
||||
|
||||
- Gather metrics about `shim`.
|
||||
- Gather metrics from `hypervisor` (through `channel`).
|
||||
- Get metrics from `agent` (through `ttrpc`).
|
||||
|
||||
---
|
||||
|
||||
Here are listed all the metrics gathered by `runtime-rs`.
|
||||
|
||||
> * Current status of each entry is marked as:
|
||||
> * ✅:DONE
|
||||
> * 🚧:TODO
|
||||
|
||||
### Kata Shim
|
||||
|
||||
| STATUS | Metric name | Type | Units | Labels |
|
||||
| ------ | ------------------------------------------------------------ | ----------- | -------------- | ------------------------------------------------------------ |
|
||||
| 🚧 | `kata_shim_agent_rpc_durations_histogram_milliseconds`: <br> RPC latency distributions. | `HISTOGRAM` | `milliseconds` | <ul><li>`action` (RPC actions of Kata agent)<ul><li>`grpc.CheckRequest`</li><li>`grpc.CloseStdinRequest`</li><li>`grpc.CopyFileRequest`</li><li>`grpc.CreateContainerRequest`</li><li>`grpc.CreateSandboxRequest`</li><li>`grpc.DestroySandboxRequest`</li><li>`grpc.ExecProcessRequest`</li><li>`grpc.GetMetricsRequest`</li><li>`grpc.GuestDetailsRequest`</li><li>`grpc.ListInterfacesRequest`</li><li>`grpc.ListProcessesRequest`</li><li>`grpc.ListRoutesRequest`</li><li>`grpc.MemHotplugByProbeRequest`</li><li>`grpc.OnlineCPUMemRequest`</li><li>`grpc.PauseContainerRequest`</li><li>`grpc.RemoveContainerRequest`</li><li>`grpc.ReseedRandomDevRequest`</li><li>`grpc.ResumeContainerRequest`</li><li>`grpc.SetGuestDateTimeRequest`</li><li>`grpc.SignalProcessRequest`</li><li>`grpc.StartContainerRequest`</li><li>`grpc.StatsContainerRequest`</li><li>`grpc.TtyWinResizeRequest`</li><li>`grpc.UpdateContainerRequest`</li><li>`grpc.UpdateInterfaceRequest`</li><li>`grpc.UpdateRoutesRequest`</li><li>`grpc.WaitProcessRequest`</li><li>`grpc.WriteStreamRequest`</li></ul></li><li>`sandbox_id`</li></ul> |
|
||||
| ✅ | `kata_shim_fds`: <br> Kata containerd shim v2 open FDs. | `GAUGE` | | <ul><li>`sandbox_id`</li></ul> |
|
||||
| ✅ | `kata_shim_io_stat`: <br> Kata containerd shim v2 process IO statistics. | `GAUGE` | | <ul><li>`item` (see `/proc/<pid>/io`)<ul><li>`cancelledwritebytes`</li><li>`rchar`</li><li>`readbytes`</li><li>`syscr`</li><li>`syscw`</li><li>`wchar`</li><li>`writebytes`</li></ul></li><li>`sandbox_id`</li></ul> |
|
||||
| ✅ | `kata_shim_netdev`: <br> Kata containerd shim v2 network devices statistics. | `GAUGE` | | <ul><li>`interface` (network device name)</li><li>`item` (see `/proc/net/dev`)<ul><li>`recv_bytes`</li><li>`recv_compressed`</li><li>`recv_drop`</li><li>`recv_errs`</li><li>`recv_fifo`</li><li>`recv_frame`</li><li>`recv_multicast`</li><li>`recv_packets`</li><li>`sent_bytes`</li><li>`sent_carrier`</li><li>`sent_colls`</li><li>`sent_compressed`</li><li>`sent_drop`</li><li>`sent_errs`</li><li>`sent_fifo`</li><li>`sent_packets`</li></ul></li><li>`sandbox_id`</li></ul> |
|
||||
| 🚧 | `kata_shim_pod_overhead_cpu`: <br> Kata Pod overhead for CPU resources(percent). | `GAUGE` | percent | <ul><li>`sandbox_id`</li></ul> |
|
||||
| 🚧 | `kata_shim_pod_overhead_memory_in_bytes`: <br> Kata Pod overhead for memory resources(bytes). | `GAUGE` | `bytes` | <ul><li>`sandbox_id`</li></ul> |
|
||||
| ✅ | `kata_shim_proc_stat`: <br> Kata containerd shim v2 process statistics. | `GAUGE` | | <ul><li>`item` (see `/proc/<pid>/stat`)<ul><li>`cstime`</li><li>`cutime`</li><li>`stime`</li><li>`utime`</li></ul></li><li>`sandbox_id`</li></ul> |
|
||||
| ✅ | `kata_shim_proc_status`: <br> Kata containerd shim v2 process status. | `GAUGE` | | <ul><li>`item` (see `/proc/<pid>/status`)<ul><li>`hugetlbpages`</li><li>`nonvoluntary_ctxt_switches`</li><li>`rssanon`</li><li>`rssfile`</li><li>`rssshmem`</li><li>`vmdata`</li><li>`vmexe`</li><li>`vmhwm`</li><li>`vmlck`</li><li>`vmlib`</li><li>`vmpeak`</li><li>`vmpin`</li><li>`vmpmd`</li><li>`vmpte`</li><li>`vmrss`</li><li>`vmsize`</li><li>`vmstk`</li><li>`vmswap`</li><li>`voluntary_ctxt_switches`</li></ul></li><li>`sandbox_id`</li></ul> |
|
||||
| 🚧 | `kata_shim_process_cpu_seconds_total`: <br> Total user and system CPU time spent in seconds. | `COUNTER` | `seconds` | <ul><li>`sandbox_id`</li></ul> |
|
||||
| 🚧 | `kata_shim_process_max_fds`: <br> Maximum number of open file descriptors. | `GAUGE` | | <ul><li>`sandbox_id`</li></ul> |
|
||||
| 🚧 | `kata_shim_process_open_fds`: <br> Number of open file descriptors. | `GAUGE` | | <ul><li>`sandbox_id`</li></ul> |
|
||||
| 🚧 | `kata_shim_process_resident_memory_bytes`: <br> Resident memory size in bytes. | `GAUGE` | `bytes` | <ul><li>`sandbox_id`</li></ul> |
|
||||
| 🚧 | `kata_shim_process_start_time_seconds`: <br> Start time of the process since `unix` epoch in seconds. | `GAUGE` | `seconds` | <ul><li>`sandbox_id`</li></ul> |
|
||||
| 🚧 | `kata_shim_process_virtual_memory_bytes`: <br> Virtual memory size in bytes. | `GAUGE` | `bytes` | <ul><li>`sandbox_id`</li></ul> |
|
||||
| 🚧 | `kata_shim_process_virtual_memory_max_bytes`: <br> Maximum amount of virtual memory available in bytes. | `GAUGE` | `bytes` | <ul><li>`sandbox_id`</li></ul> |
|
||||
| 🚧 | `kata_shim_rpc_durations_histogram_milliseconds`: <br> RPC latency distributions. | `HISTOGRAM` | `milliseconds` | <ul><li>`action` (Kata shim v2 actions)<ul><li>`checkpoint`</li><li>`close_io`</li><li>`connect`</li><li>`create`</li><li>`delete`</li><li>`exec`</li><li>`kill`</li><li>`pause`</li><li>`pids`</li><li>`resize_pty`</li><li>`resume`</li><li>`shutdown`</li><li>`start`</li><li>`state`</li><li>`stats`</li><li>`update`</li><li>`wait`</li></ul></li><li>`sandbox_id`</li></ul> |
|
||||
| ✅ | `kata_shim_threads`: <br> Kata containerd shim v2 process threads. | `GAUGE` | | <ul><li>`sandbox_id`</li></ul> |
|
||||
|
||||
### Kata Hypervisor
|
||||
|
||||
Different from golang runtime, hypervisor and shim in runtime-rs belong to the **same process**, so all previous metrics for hypervisor and shim only need to be gathered once. Thus, we currently only collect previous metrics in kata shim.
|
||||
|
||||
At the same time, we added the interface(`VmmAction::GetHypervisorMetrics`) to gather hypervisor metrics, in case we design tailor-made metrics for hypervisor in the future. Here're metrics exposed from [src/dragonball/src/metric.rs](https://github.com/kata-containers/kata-containers/blob/main/src/dragonball/src/metric.rs).
|
||||
|
||||
| Metric name | Type | Units | Labels |
|
||||
| ------------------------------------------------------------ | ---------- | ----- | ------------------------------------------------------------ |
|
||||
| `kata_hypervisor_scrape_count`: <br> Metrics scrape count | `COUNTER` | | <ul><li>`sandbox_id`</li></ul> |
|
||||
| `kata_hypervisor_vcpu`: <br>Hypervisor metrics specific to VCPUs' mode of functioning. | `IntGauge` | | <ul><li>`item`<ul><li>`exit_io_in`</li><li>`exit_io_out`</li><li>`exit_mmio_read`</li><li>`exit_mmio_write`</li><li>`failures`</li><li>`filter_cpuid`</li></ul></li><li>`sandbox_id`</li></ul> |
|
||||
| `kata_hypervisor_seccomp`: <br> Hypervisor metrics for the seccomp filtering. | `IntGauge` | | <ul><li>`item`<ul><li>`num_faults`</li></ul></li><li>`sandbox_id`</li></ul> |
|
||||
| `kata_hypervisor_seccomp`: <br> Hypervisor metrics for the seccomp filtering. | `IntGauge` | | <ul><li>`item`<ul><li>`sigbus`</li><li>`sigsegv`</li></ul></li><li>`sandbox_id`</li></ul> |
|
||||
@@ -1,434 +0,0 @@
|
||||
# Virtualization Reference Architecture
|
||||
|
||||
## Subject to Change | © 2022 by NVIDIA Corporation. All rights reserved. | For test and development only_
|
||||
|
||||
Before digging deeper into the virtualization reference architecture, let's
|
||||
first look at the various GPUDirect use cases in the following table. We’re
|
||||
distinguishing between two top-tier use cases where the devices are (1)
|
||||
passthrough and (2) virtualized, where a VM gets assigned a virtual function
|
||||
(VF) and not the physical function (PF). A combination of PF and VF would also
|
||||
be possible.
|
||||
|
||||
| Device #1 (passthrough) | Device #2 (passthrough) | P2P Compatibility and Mode |
|
||||
| ------------------------- | ----------------------- | -------------------------------------------- |
|
||||
| GPU PF | GPU PF | GPUDirect P2P |
|
||||
| GPU PF | NIC PF | GPUDirect RDMA |
|
||||
| MIG-slice | MIG-slice | _No GPUDirect P2P_ |
|
||||
| MIG-slice | NIC PF | GPUDirect RDMA |
|
||||
| **PDevice #1 (virtualized)** | **Device #2 (virtualized)** | **P2P Compatibility and Mode** |
|
||||
| Time-slice vGPU VF | Time-slice vGPU VF | _No GPUDirect P2P but NVLINK P2P available_ |
|
||||
| Time-slice vGPU VF | NIC VF | GPUDirect RDMA |
|
||||
| MIG-slice vGPU | MIG-slice vGPU | _No GPUDirect P2P_ |
|
||||
| MIG-slice vGPU | NIC VF | GPUDirect RDMA |
|
||||
|
||||
In a virtualized environment we have several distinct features that may prevent
|
||||
Peer-to-peer (P2P) communication of two endpoints in a PCI Express topology. The
|
||||
IOMMU translates IO virtual addresses (IOVA) to physical addresses (PA). Each
|
||||
device behind an IOMMU has its own IOVA memory space, usually, no two devices
|
||||
share the same IOVA memory space but it’s up to the hypervisor or OS how it
|
||||
chooses to map devices to IOVA spaces. Any PCI Express DMA transactions will
|
||||
use IOVAs, which the IOMMU must translate. By default, all the traffic is routed
|
||||
to the root complex and not issued directly to the peer device.
|
||||
|
||||
An IOMMU can be used to isolate and protect devices even if virtualization is
|
||||
not used; since devices can only access memory regions that are mapped for it, a
|
||||
DMA from one device to another is not possible. DPDK uses the IOMMU to have
|
||||
better isolation between devices, another benefit is that IOVA space can be
|
||||
represented as a contiguous memory even if the PA space is heavily scattered.
|
||||
|
||||
In the case of virtualization, the IOMMU is responsible for isolating the device
|
||||
and memory between VMs for safe device assignment without compromising the host
|
||||
and other guest OSes. Without an IOMMU, any device can access the entire system
|
||||
and perform DMA transactions _anywhere_.
|
||||
|
||||
The second feature is ACS (Access Control Services), which controls which
|
||||
devices are allowed to communicate with one another and thus avoids improper
|
||||
routing of packets `irrespectively` of whether IOMMU is enabled or not.
|
||||
|
||||
When IOMMU is enabled, ACS is normally configured to force all PCI Express DMA
|
||||
to go through the root complex so IOMMU can translate it, impacting performance
|
||||
between peers with higher latency and reduced bandwidth.
|
||||
|
||||
A way to avoid the performance hit is to enable Address Translation Services
|
||||
(ATS). ATS-capable endpoints can prefetch IOVA -> PA translations from the IOMMU
|
||||
and then perform DMA transactions directly to another endpoint. Hypervisors
|
||||
enable this by enabling ATS in such endpoints, configuring ACS to enable Direct
|
||||
Translated P2P, and configuring the IOMMU to allow Address Translation requests.
|
||||
|
||||
Another important factor is that the NVIDIA driver stack will use the PCI
|
||||
Express topology of the system it is running on to determine whether the
|
||||
hardware is capable of supporting P2P. The driver stack qualifies specific
|
||||
chipsets, and PCI Express switches for use with GPUDirect P2P. In virtual
|
||||
environments, the PCI Express topology is flattened and obfuscated to present a
|
||||
uniform environment to the software inside the VM, which breaks the GPUDirect
|
||||
P2P use case.
|
||||
|
||||
On a bare metal machine, the driver stack groups GPUs into cliques that can
|
||||
perform GPUDirect P2P communication, excluding peer mappings where P2P
|
||||
communication is not possible, prominently if GPUs are attached to multiple CPU
|
||||
sockets.
|
||||
|
||||
CPUs and local memory banks are referred to as NUMA nodes. In a two-socket
|
||||
server, each of the CPUs has a local memory bank for a total of two NUMA nodes.
|
||||
Some servers provide the ability to configure additional NUMA nodes per CPU,
|
||||
which means a CPU socket can have two NUMA nodes (some servers support four
|
||||
NUMA nodes per socket) with local memory banks and L3 NUMA domains for improved
|
||||
performance.
|
||||
|
||||
One of the current solutions is that the hypervisor provides additional topology
|
||||
information that the driver stack can pick up and enable GPUDirect P2P between
|
||||
GPUs, even if the virtualized environment does not directly expose it. The PCI
|
||||
Express virtual P2P approval capability structure in the PCI configuration space
|
||||
is entirely emulated by the hypervisor of passthrough GPU devices.
|
||||
|
||||
A clique ID is provided where GPUs with the same clique ID belong to a group of
|
||||
GPUs capable of P2P communication
|
||||
|
||||
On vSphere, Azure, and other CPSs, the hypervisor lays down a `topologies.xml`
|
||||
which NCCL can pick up and deduce the right P2P level[^1]. NCCL is leveraging
|
||||
Infiniband (IB) and/or Unified Communication X (UCX) for communication, and
|
||||
GPUDirect P2P and GPUDirect RDMA should just work in this case. The only culprit
|
||||
is that software or applications that do not use the XML file to deduce the
|
||||
topology will fail and not enable GPUDirect ( [`nccl-p2p-level`](https://docs.nvidia.com/deeplearning/nccl/user-guide/docs/env.html#nccl-p2p-level) )
|
||||
|
||||
## Hypervisor PCI Express Topology
|
||||
|
||||
To enable every part of the accelerator stack, we propose a virtualized
|
||||
reference architecture to enable GPUDirect P2P and GPUDirect RDMA for any
|
||||
hypervisor. The idea is split into two parts to enable the right PCI Express
|
||||
topology. The first part builds upon extending the PCI Express virtual P2P
|
||||
approval capability structure to every device that wants to do P2P in some way
|
||||
and groups devices by clique ID. The other part involves replicating a subset of
|
||||
the host topology so that applications running in the VM do not need to read
|
||||
additional information and enable the P2P capability like in the bare-metal use
|
||||
case described above. The driver stack can then deduce automatically if the
|
||||
topology presented in the VM is capable of P2P communication.
|
||||
|
||||
We will work with the following host topology for the following sections. It is
|
||||
a system with two converged DPUs, each having an `A100X` GPU and two `ConnectX-6`
|
||||
network ports connected to the downstream ports of a PCI Express switch.
|
||||
|
||||
```sh
|
||||
+-00.0-[d8-df]----00.0-[d9-df]--+-00.0-[da-db]--+-00.0 Mellanox Tech MT42822 BlueField-2 integrated ConnectX-6 Dx network
|
||||
| +-00.1 Mellanox Tech MT42822 BlueField-2 integrated ConnectX-6 Dx network
|
||||
| \-00.2 Mellanox Tech MT42822 BlueField-2 SoC Management Interface
|
||||
\-01.0-[dc-df]----00.0-[dd-df]----08.0-[de-df]----00.0 NVIDIA Corporation GA100 [A100X]
|
||||
|
||||
+-00.0-[3b-42]----00.0-[3c-42]--+-00.0-[3d-3e]--+-00.0 Mellanox Tech MT42822 BlueField-2 integrated ConnectX-6 Dx network
|
||||
| +-00.1 Mellanox Tech MT42822 BlueField-2 integrated ConnectX-6 Dx network
|
||||
| \-00.2 Mellanox Tech MT42822 BlueField-2 SoC Management Interface
|
||||
\-01.0-[3f-42]----00.0-[40-42]----08.0-[41-42]----00.0 NVIDIA Corporation GA100 [A100X]
|
||||
```
|
||||
|
||||
The green path highlighted above is the optimal and preferred path for
|
||||
efficient P2P communication.
|
||||
|
||||
## PCI Express Virtual P2P Approval Capability
|
||||
|
||||
Most of the time, the PCI Express topology is flattened and obfuscated to ensure
|
||||
easy migration of the VM image between different physical hardware `topologies`.
|
||||
In Kata, we can configure the hypervisor to use PCI Express root ports to
|
||||
hotplug the VFIO devices one is passing through. A user can select how many PCI
|
||||
Express root ports to allocate depending on how many devices are passed through.
|
||||
A recent addition to Kata will detect the right amount of PCI Express devices
|
||||
that need hotplugging and bail out if the number of root ports is insufficient.
|
||||
In Kata, we do not automatically increase the number of root ports, we want the
|
||||
user to be in full control of the topology.
|
||||
|
||||
```toml
|
||||
# /etc/kata-containers/configuration.toml
|
||||
|
||||
# VFIO devices are hotplugged on a bridge by default.
|
||||
# Enable hot-plugging on the root bus. This may be required for devices with
|
||||
# a large PCI bar, as this is a current limitation with hot-plugging on
|
||||
# a bridge.
|
||||
# Default “bridge-port”
|
||||
hotplug_vfio = "root-port"
|
||||
|
||||
# Before hot plugging a PCIe device, you need to add a pcie_root_port device.
|
||||
# Use this parameter when using some large PCI bar devices, such as NVIDIA GPU
|
||||
# The value means the number of pcie_root_port
|
||||
# This value is valid when hotplug_vfio_on_root_bus is true and machine_type is "q35"
|
||||
# Default 0
|
||||
pcie_root_port = 8
|
||||
```
|
||||
|
||||
VFIO devices are hotplugged on a PCIe-PCI bridge by default. Hotplug of PCI
|
||||
Express devices is only supported on PCI Express root or downstream ports. With
|
||||
this configuration set, if we start up a Kata container, we can inspect our
|
||||
topology and see the allocated PCI Express root ports and the hotplugged
|
||||
devices.
|
||||
|
||||
```sh
|
||||
$ lspci -tv
|
||||
-[0000:00]-+-00.0 Intel Corporation 82G33/G31/P35/P31 Express DRAM Controller
|
||||
+-01.0 Red Hat, Inc. Virtio console
|
||||
+-02.0 Red Hat, Inc. Virtio SCSI
|
||||
+-03.0 Red Hat, Inc. Virtio RNG
|
||||
+-04.0-[01]----00.0 Mellanox Technologies MT42822 BlueField-2 integrated ConnectX-6
|
||||
+-05.0-[02]----00.0 Mellanox Technologies MT42822 BlueField-2 integrated ConnectX-6
|
||||
+-06.0-[03]----00.0 NVIDIA Corporation Device 20b8
|
||||
+-07.0-[04]----00.0 NVIDIA Corporation Device 20b8
|
||||
+-08.0-[05]--
|
||||
+-09.0-[06]--
|
||||
+-0a.0-[07]--
|
||||
+-0b.0-[08]--
|
||||
+-0c.0 Red Hat, Inc. Virtio socket
|
||||
+-0d.0 Red Hat, Inc. Virtio file system
|
||||
+-1f.0 Intel Corporation 82801IB (ICH9) LPC Interface Controller
|
||||
+-1f.2 Intel Corporation 82801IR/IO/IH (ICH9R/DO/DH) 6 port SATA Controller
|
||||
\-1f.3 Intel Corporation 82801I (ICH9 Family) SMBus Controller
|
||||
```
|
||||
|
||||
For devices with huge BARs (Base Address Registers) like the GPU (we need to
|
||||
configure the PCI Express root port properly and allocate enough memory for
|
||||
mapping), we have added a heuristic to Kata to deduce the right settings. Hence,
|
||||
the BARs can be mapped correctly. This functionality is added to
|
||||
[`nvidia/go-nvlib1](https://gitlab.com/nvidia/cloud-native/go-nvlib) which is part
|
||||
of Kata now.
|
||||
|
||||
```sh
|
||||
$ sudo dmesg | grep BAR
|
||||
[ 0.179960] pci 0000:00:04.0: BAR 7: assigned [io 0x1000-0x1fff]
|
||||
[ 0.179962] pci 0000:00:05.0: BAR 7: assigned [io 0x2000-0x2fff]
|
||||
[ 0.179963] pci 0000:00:06.0: BAR 7: assigned [io 0x3000-0x3fff]
|
||||
[ 0.179964] pci 0000:00:07.0: BAR 7: assigned [io 0x4000-0x4fff]
|
||||
[ 0.179966] pci 0000:00:08.0: BAR 7: assigned [io 0x5000-0x5fff]
|
||||
[ 0.179967] pci 0000:00:09.0: BAR 7: assigned [io 0x6000-0x6fff]
|
||||
[ 0.179968] pci 0000:00:0a.0: BAR 7: assigned [io 0x7000-0x7fff]
|
||||
[ 0.179969] pci 0000:00:0b.0: BAR 7: assigned [io 0x8000-0x8fff]
|
||||
[ 2.115912] pci 0000:01:00.0: BAR 0: assigned [mem 0x13000000000-0x13001ffffff 64bit pref]
|
||||
[ 2.116203] pci 0000:01:00.0: BAR 2: assigned [mem 0x13002000000-0x130027fffff 64bit pref]
|
||||
[ 2.683132] pci 0000:02:00.0: BAR 0: assigned [mem 0x12000000000-0x12001ffffff 64bit pref]
|
||||
[ 2.683419] pci 0000:02:00.0: BAR 2: assigned [mem 0x12002000000-0x120027fffff 64bit pref]
|
||||
[ 2.959155] pci 0000:03:00.0: BAR 1: assigned [mem 0x11000000000-0x117ffffffff 64bit pref]
|
||||
[ 2.959345] pci 0000:03:00.0: BAR 3: assigned [mem 0x11800000000-0x11801ffffff 64bit pref]
|
||||
[ 2.959523] pci 0000:03:00.0: BAR 0: assigned [mem 0xf9000000-0xf9ffffff]
|
||||
[ 2.966119] pci 0000:04:00.0: BAR 1: assigned [mem 0x10000000000-0x107ffffffff 64bit pref]
|
||||
[ 2.966295] pci 0000:04:00.0: BAR 3: assigned [mem 0x10800000000-0x10801ffffff 64bit pref]
|
||||
[ 2.966472] pci 0000:04:00.0: BAR 0: assigned [mem 0xf7000000-0xf7ffffff]
|
||||
```
|
||||
|
||||
The NVIDIA driver stack in this case would refuse to do P2P communication since
|
||||
(1) the topology is not what it expects, (2) we do not have a qualified
|
||||
chipset. Since our P2P devices are not connected to a PCI Express switch port,
|
||||
we need to provide additional information to support the P2P functionality. One
|
||||
way of providing such meta information would be to annotate the container; most
|
||||
of the settings in Kata's configuration file can be overridden via annotations,
|
||||
but this limits the flexibility, and a user would need to update all the
|
||||
containers that he wants to run with Kata. The goal is to make such things as
|
||||
transparent as possible, so we also introduced
|
||||
[CDI](https://github.com/container-orchestrated-devices/container-device-interface)
|
||||
(Container Device Interface) to Kata. CDI is a[
|
||||
specification](https://github.com/container-orchestrated-devices/container-device-interface/blob/main/SPEC.md)
|
||||
for container runtimes to support third-party devices.
|
||||
|
||||
As written before, we can provide a clique ID for the devices that belong
|
||||
together and are capable of doing P2P. This information is provided to the
|
||||
hypervisor, which will set up things in the VM accordingly. Let's suppose the
|
||||
user wanted to do GPUDirect RDMA with the first GPU and the NIC that reside on
|
||||
the same DPU, one could provide the specification telling the hypervisor that
|
||||
they belong to the same clique.
|
||||
|
||||
```yaml
|
||||
# /etc/cdi/nvidia.yaml
|
||||
cdiVersion: 0.4.0
|
||||
kind: nvidia.com/gpu
|
||||
devices:
|
||||
- name: gpu0
|
||||
annotations:
|
||||
bdf: “41:00.0”
|
||||
clique-id: “0”
|
||||
containerEdits:
|
||||
deviceNodes:
|
||||
- path: “/dev/vfio/71"
|
||||
|
||||
# /etc/cdi/mellanox.yaml
|
||||
cdiVersion: 0.4.0
|
||||
kind: mellanox.com/nic
|
||||
devices:
|
||||
- name: nic0
|
||||
annotations:
|
||||
bdf: “3d:00.0”
|
||||
clique-id: “0”
|
||||
attach-pci: “true”
|
||||
containerEdits:
|
||||
deviceNodes:
|
||||
- path: "/dev/vfio/66"
|
||||
```
|
||||
|
||||
Since this setting is bound to the device and not the container we do not need
|
||||
to alter the container just allocate the right resource and GPUDirect RDMA would
|
||||
be set up correctly. Rather than exposing them separately, an idea would be to
|
||||
expose a GPUDirect RDMA device via NFD (Node Feature Discovery) that combines
|
||||
both of them; this way, we could make sure that the right pair is allocated and
|
||||
used more on Kubernetes deployment in the next section.
|
||||
|
||||
The GPU driver stack is leveraging the PCI Express virtual P2P approval
|
||||
capability, but the NIC stack does not use this now. One of the action items is
|
||||
to enable MOFED to read the P2P approval capability and enable ATS and ACS
|
||||
settings as described above.
|
||||
|
||||
This way, we could enable GPUDirect P2P and GPUDirect RDMA on any topology
|
||||
presented to the VM application. It is the responsibility of the administrator
|
||||
or infrastructure engineer to provide the right information either via
|
||||
annotations or a CDI specification.
|
||||
|
||||
## Host Topology Replication
|
||||
|
||||
The other way to represent the PCI Express topology in the VM is to replicate a
|
||||
subset of the topology needed to support the P2P use case inside the VM. Similar
|
||||
to the configuration for the root ports, we can easily configure the usage of
|
||||
PCI Express switch ports to hotplug the devices.
|
||||
|
||||
```toml
|
||||
# /etc/kata-containers/configuration.toml
|
||||
|
||||
# VFIO devices are hotplugged on a bridge by default.
|
||||
# Enable hot plugging on the root bus. This may be required for devices with
|
||||
# a large PCI bar, as this is a current limitation with hot plugging on
|
||||
# a bridge.
|
||||
# Default “bridge-port”
|
||||
hotplug_vfio = "switch-port"
|
||||
|
||||
# Before hot plugging a PCIe device, you need to add a pcie_root_port device.
|
||||
# Use this parameter when using some large PCI bar devices, such as Nvidia GPU
|
||||
# The value means the number of pcie_root_port
|
||||
# This value is valid when hotplug_vfio_on_root_bus is true and machine_type is "q35"
|
||||
# Default 0
|
||||
pcie_switch_port = 8
|
||||
```
|
||||
|
||||
Each device that is passed through is attached to a PCI Express downstream port
|
||||
as illustrated below. We can even replicate the host’s two DPUs `topologies` with
|
||||
added metadata through the CDI. Most of the time, a container only needs one
|
||||
pair of GPU and NIC for GPUDirect RDMA. This is more of a showcase of what we
|
||||
can do with the power of Kata and CDI. One could even think of adding groups of
|
||||
devices that support P2P, even from different CPU sockets or NUMA nodes, into
|
||||
one container; indeed, the first group is NUMA node 0 (red), and the second
|
||||
group is NUMA node 1 (green). Since they are grouped correctly, P2P would be
|
||||
enabled naturally inside a group, aka clique ID.
|
||||
|
||||
```sh
|
||||
$ lspci -tv
|
||||
-[0000:00]-+-00.0 Intel Corporation 82G33/G31/P35/P31 Express DRAM Controller
|
||||
+-01.0 Red Hat, Inc. Virtio console
|
||||
+-02.0 Red Hat, Inc. Virtio SCSI
|
||||
+-03.0 Red Hat, Inc. Virtio RNG
|
||||
+-04.0-[01-04]----00.0-[02-04]--+-00.0-[03]----00.0 NVIDIA Corporation Device 20b8
|
||||
| \-01.0-[04]----00.0 Mellanox Tech MT42822 BlueField-2 integrated ConnectX-6 Dx
|
||||
+-05.0-[05-08]----00.0-[06-08]--+-00.0-[07]----00.0 Mellanox Tech MT42822 BlueField-2 integrated ConnectX-6 Dx
|
||||
| \-01.0-[08]----00.0 NVIDIA Corporation Device 20b8
|
||||
+-06.0 Red Hat, Inc. Virtio socket
|
||||
+-07.0 Red Hat, Inc. Virtio file system
|
||||
+-1f.0 Intel Corporation 82801IB (ICH9) LPC Interface Controller
|
||||
+-1f.2 Intel Corporation 82801IR/IO/IH (ICH9R/DO/DH) 6 port SATA Controller [AHCI mode]
|
||||
\-1f.3 Intel Corporation 82801I (ICH9 Family) SMBus Controller
|
||||
\-1f.3 Intel Corporation 82801I (ICH9 Family) SMBus Controller
|
||||
```
|
||||
|
||||
The configuration of using either the root port or switch port can be applied on
|
||||
a per Container or Pod basis, meaning we can switch PCI Express `topologies` on
|
||||
each run of an application.
|
||||
|
||||
## Hypervisor Resource Limits
|
||||
|
||||
Every hypervisor will have resource limits in terms of how many PCI Express root
|
||||
ports, switch ports, or bridge ports can be created, especially with devices
|
||||
that need to reserve a 4K IO range per PCI specification. Each instance of root
|
||||
or switch port will consume 4K IO of very limited capacity, 64k is the maximum.
|
||||
|
||||
Simple math brings us to the conclusion that we can have a maximum of 16 PCI
|
||||
Express root ports or 16 PCI Express switch ports in QEMU if devices with IO
|
||||
BARs are used in the PCI Express hierarchy.
|
||||
|
||||
Additionally, one can have 32 slots on the PCI root bus and a maximum of 256
|
||||
slots for the complete PCI(e) topology.
|
||||
|
||||
Per default, QEMU will attach a multi-function device in the last slot on the
|
||||
PCI root bus,
|
||||
|
||||
```sh
|
||||
+-1f.0 Intel Corporation 82801IB (ICH9) LPC Interface Controller
|
||||
+-1f.2 Intel Corporation 82801IR/IO/IH (ICH9R/DO/DH) 6 port SATA Controller [AHCI mode]
|
||||
\-1f.3 Intel Corporation 82801I (ICH9 Family) SMBus Controller
|
||||
```
|
||||
|
||||
Kata will additionally add `virtio-xxx-pci` devices consuming (5 slots) plus a
|
||||
PCIe-PCI-bridge (1 slot) and a DRAM controller (1 slot), meaning per default, we
|
||||
have already eight slots used. This leaves us 24 slots for adding other devices
|
||||
to the root bus.
|
||||
|
||||
The problem that arises here is one use-case from a customer that uses recent
|
||||
RTX GPUs with Kata. The user wanted to pass through eight of these GPUs into one
|
||||
container and ran into issues. The problem is that those cards often consist of
|
||||
four individual device nodes: GPU, Audio, and two USB controller devices (some
|
||||
cards have a USB-C output).
|
||||
|
||||
These devices are grouped into one IOMMU group. Since one needs to pass through
|
||||
the complete IOMMU group into the VM, we need to allocate 32 PCI Express root
|
||||
ports or 32 PCI Express switch ports, which is technically impossible due to the
|
||||
resource limits outlined above. Since all the devices appear as PCI Express
|
||||
devices, we need to hotplug those into a root or switch port.
|
||||
|
||||
The solution to this problem is leveraging CDI. For each device, add the
|
||||
information if it is going to be hotplugged as a PCI Express or PCI device,
|
||||
which results in either using a PCI Express root/switch port or an ordinary PCI
|
||||
bridge. PCI bridges are not affected by the limited IO range. This way, the GPU
|
||||
is attached as a PCI Express device to a root/switch port and the other three
|
||||
PCI devices to a PCI bridge, leaving enough resources to create the needed PCI
|
||||
Express root/switch ports. For example, we’re going to attach the GPUs to a PCI
|
||||
Express root port and the NICs to a PCI bridge.
|
||||
|
||||
```jsonld
|
||||
# /etc/cdi/mellanox.json
|
||||
cdiVersion: 0.4.0
|
||||
kind: mellanox.com/nic
|
||||
devices:
|
||||
- name: nic0
|
||||
annotations:
|
||||
bdf: “3d:00.0”
|
||||
clique-id: “0”
|
||||
attach-pci: “true”
|
||||
containerEdits:
|
||||
deviceNodes:
|
||||
- path: "/dev/vfio/66"
|
||||
- name: nic1
|
||||
annotations:
|
||||
bdf: “3d:00.1”
|
||||
clique-id: “1”
|
||||
attach-pci: “true”
|
||||
containerEdits:
|
||||
deviceNodes:
|
||||
- path: "/dev/vfio/67”
|
||||
```
|
||||
|
||||
The configuration is set to use eight root ports for the GPUs and attach the
|
||||
NICs to a PCI bridge which is connected to a PCI Express-PCI bridge which is the
|
||||
preferred way of introducing a PCI topology in a PCI Express machine.
|
||||
|
||||
```sh
|
||||
$ lspci -tv
|
||||
-[0000:00]-+-00.0 Intel Corporation 82G33/G31/P35/P31 Express DRAM Controller
|
||||
+-01.0 Red Hat, Inc. Virtio console
|
||||
+-02.0 Red Hat, Inc. Virtio SCSI
|
||||
+-03.0 Red Hat, Inc. Virtio RNG
|
||||
+-04.0-[01]----00.0 NVIDIA Corporation Device 20b8
|
||||
+-05.0-[02]----00.0 NVIDIA Corporation Device 20b8
|
||||
+-06.0-[03]--
|
||||
+-07.0-[04]--
|
||||
+-08.0-[05]--
|
||||
+-09.0-[06]--
|
||||
+-0a.0-[07]--
|
||||
+-0b.0-[08]--
|
||||
+-0c.0-[09-0a]----00.0-[0a]--+-00.0 Mellanox Tech MT42822 BlueField-2 ConnectX-6
|
||||
| \-01.0 Mellanox Tech MT42822 BlueField-2 ConnectX-6
|
||||
+-0d.0 Red Hat, Inc. Virtio socket
|
||||
+-0e.0 Red Hat, Inc. Virtio file system
|
||||
+-1f.0 Intel Corporation 82801IB (ICH9) LPC Interface Controller
|
||||
+-1f.2 Intel Corporation 82801IR/IO/IH (ICH9R/DO/DH) 6 port SATA Controller
|
||||
\-1f.3 Intel Corporation 82801I (ICH9 Family) SMBus Controller
|
||||
```
|
||||
|
||||
The PCI devices will consume a slot of which we have 256 in the PCI(e) topology
|
||||
and leave scarce resources for the needed PCI Express devices.
|
||||
@@ -1,51 +0,0 @@
|
||||
# Virtual machine vCPU sizing in Kata Containers 3.0
|
||||
|
||||
> Preview:
|
||||
> [Kubernetes(since 1.23)][1] and [Containerd(since 1.6.0-beta4)][2] will help calculate `Sandbox Size` info and pass it to Kata Containers through annotations.
|
||||
> In order to adapt to this beneficial change and be compatible with the past, we have implemented the new vCPUs handling way in `runtime-rs`, which is slightly different from the original `runtime-go`'s design.
|
||||
|
||||
## When do we need to handle vCPUs size?
|
||||
vCPUs sizing should be determined by the container workloads. So throughout the life cycle of Kata Containers, there are several points in time when we need to think about how many vCPUs should be at the time. Mainly including the time points of `CreateVM`, `CreateContainer`, `UpdateContainer`, and `DeleteContainer`.
|
||||
* `CreateVM`: When creating a sandbox, we need to know how many vCPUs to start the VM with.
|
||||
* `CreateContainer`: When creating a new container in the VM, we may need to hot-plug the vCPUs according to the requirements in container's spec.
|
||||
* `UpdateContainer`: When receiving the `UpdateContainer` request, we may need to update the vCPU resources according to the new requirements of the container.
|
||||
* `DeleteContainer`: When a container is removed from the VM, we may need to hot-unplug the vCPUs to reclaim the vCPU resources introduced by the container.
|
||||
|
||||
## On what basis do we calculate the number of vCPUs?
|
||||
When Kata calculate the number of vCPUs, We have three data sources, the `default_vcpus` and `default_maxvcpus` specified in the configuration file (named `TomlConfig` later in the doc), the `io.kubernetes.cri.sandbox-cpu-quota` and `io.kubernetes.cri.sandbox-cpu-period` annotations passed by the upper layer runtime, and the corresponding CPU resource part in the container's spec for the container when `CreateContainer`/`UpdateContainer`/`DeleteContainer` is requested.
|
||||
|
||||
Our understanding and priority of these resources are as follows, which will affect how we calculate the number of vCPUs later.
|
||||
|
||||
* From `TomlConfig`:
|
||||
* `default_vcpus`: default number of vCPUs when starting a VM.
|
||||
* `default_maxvcpus`: maximum number of vCPUs.
|
||||
* From `Annotation`:
|
||||
* `InitialSize`: we call the size of the resource passed from the annotations as `InitialSize`. Kubernetes will calculate the sandbox size according to the Pod's statement, which is the `InitialSize` here. This size should be the size we want to prioritize.
|
||||
* From `Container Spec`:
|
||||
* The amount of CPU resources that the Container wants to use will be declared through the spec. Including the aforementioned annotations, we mainly consider `cpu quota` and `cpuset` when calculating the number of vCPUs.
|
||||
* `cpu quota`: `cpu quota` is the most common way to declare the amount of CPU resources. The number of vCPUs introduced by `cpu quota` declared in a container's spec is: `vCPUs = ceiling( quota / period )`.
|
||||
* `cpuset`: `cpuset` is often used to bind the CPUs that tasks can run on. The number of vCPUs may introduced by `cpuset` declared in a container's spec is the number of CPUs specified in the set that do not overlap with other containers.
|
||||
|
||||
|
||||
## How to calculate and adjust the vCPUs size:
|
||||
There are two types of vCPUs that we need to consider, one is the number of vCPUs when starting the VM (named `Boot Size` in the doc). The second is the number of vCPUs when `CreateContainer`/`UpdateContainer`/`DeleteContainer` request is received (`Real-time Size` in the doc).
|
||||
|
||||
### `Boot Size`
|
||||
The main considerations are `InitialSize` and `default_vcpus`. There are the following principles:
|
||||
`InitialSize` has priority over `default_vcpus` declared in `TomlConfig`.
|
||||
1. When there is such an annotation statement, the originally `default_vcpus` will be modified to the number of vCPUs in the `InitialSize` as the `Boot Size`. (Because not all runtimes support this annotation for the time being, we still keep the `default_cpus` in `TomlConfig`.)
|
||||
2. When the specs of all containers are aggregated for sandbox size calculation, the method is consistent with the calculation method of `InitialSize` here.
|
||||
|
||||
### `Real-time Size`
|
||||
When we receive an OCI request, it may be for a single container. But what we have to consider is the number of vCPUs for the entire VM. So we will maintain a list. Every time there is a demand for adjustment, the entire list will be traversed to calculate a value for the number of vCPUs. In addition, there are the following principles:
|
||||
1. Do not cut computing power and try to keep the number of vCPUs specified by `InitialSize`.
|
||||
* So the number of vCPUs after will not be less than the `Boot Size`.
|
||||
2. `cpu quota` takes precedence over `cpuset` and the setting history are took into account.
|
||||
* We think quota describes the CPU time slice that a cgroup can use, and `cpuset` describes the actual CPU number that a cgroup can use. Quota can better describe the size of the CPU time slice that a cgroup actually wants to use. The `cpuset` only describes which CPUs the cgroup can use, but the cgroup can use the specified CPU but consumes a smaller time slice, so the quota takes precedence over the `cpuset`.
|
||||
* On the one hand, when both `cpu quota` and `cpuset` are specified, we will calculate the number of vCPUs based on `cpu quota` and ignore `cpuset`. On the other hand, if `cpu quota` was used to control the number of vCPUs in the past, and only `cpuset` was updated during `UpdateContainer`, we will not adjust the number of vCPUs at this time.
|
||||
3. `StaticSandboxResourceMgmt` controls hotplug.
|
||||
* Some VMMs and kernels of some architectures do not support hotplugging. We can accommodate this situation through `StaticSandboxResourceMgmt`. When `StaticSandboxResourceMgmt = true` is set, we don't make any further attempts to update the number of vCPUs after booting.
|
||||
|
||||
|
||||
[1]: https://github.com/kubernetes/kubernetes/pull/104886
|
||||
[2]: https://github.com/containerd/containerd/pull/6155
|
||||
@@ -1,37 +0,0 @@
|
||||
# Design Doc for Kata Containers' VCPUs Pinning Feature
|
||||
|
||||
## Background
|
||||
By now, vCPU threads of Kata Containers are scheduled randomly to CPUs. And each pod would request a specific set of CPUs which we call it CPU set (just the CPU set meaning in Linux cgroups).
|
||||
|
||||
If the number of vCPU threads are equal to that of CPUs claimed in CPU set, we can then pin each vCPU thread to one specified CPU, to reduce the cost of random scheduling.
|
||||
|
||||
## Detailed Design
|
||||
|
||||
### Passing Config Parameters
|
||||
Two ways are provided to use this vCPU thread pinning feature: through `QEMU` configuration file and through annotations. Finally the pinning parameter is passed to `HypervisorConfig`.
|
||||
|
||||
### Related Linux Thread Scheduling API
|
||||
|
||||
| API Info | Value |
|
||||
|-------------------|-----------------------------------------------------------|
|
||||
| Package | `golang.org/x/sys/unix` |
|
||||
| Method | `unix.SchedSetaffinity(thread_id, &unixCPUSet)` |
|
||||
| Official Doc Page | https://pkg.go.dev/golang.org/x/sys/unix#SchedSetaffinity |
|
||||
|
||||
### When is VCPUs Pinning Checked?
|
||||
|
||||
As shown in Section 1, when `num(vCPU threads) == num(CPUs in CPU set)`, we shall pin each vCPU thread to a specified CPU. And when this condition is broken, we should restore to the original random scheduling pattern.
|
||||
So when may `num(CPUs in CPU set)` change? There are 5 possible scenes:
|
||||
|
||||
| Possible scenes | Related Code |
|
||||
|-----------------------------------|--------------------------------------------|
|
||||
| when creating a container | File Sandbox.go, in method `CreateContainer` |
|
||||
| when starting a container | File Sandbox.go, in method `StartContainer` |
|
||||
| when deleting a container | File Sandbox.go, in method `DeleteContainer` |
|
||||
| when updating a container | File Sandbox.go, in method `UpdateContainer` |
|
||||
| when creating multiple containers | File Sandbox.go, in method `createContainers` |
|
||||
|
||||
### Core Pinning Logics
|
||||
|
||||
We can split the whole process into the following steps. Related methods are `checkVCPUsPinning` and `resetVCPUsPinning`, in file Sandbox.go.
|
||||

|
||||
@@ -110,7 +110,7 @@ Devices and features used:
|
||||
- VFIO
|
||||
- hotplug
|
||||
- seccomp filters
|
||||
- [HTTP OpenAPI](https://github.com/cloud-hypervisor/cloud-hypervisor/blob/main/vmm/src/api/openapi/cloud-hypervisor.yaml)
|
||||
- [HTTP OpenAPI](https://github.com/cloud-hypervisor/cloud-hypervisor/blob/master/vmm/src/api/openapi/cloud-hypervisor.yaml)
|
||||
|
||||
### Summary
|
||||
|
||||
|
||||
@@ -42,7 +42,4 @@
|
||||
- [How to setup swap devices in guest kernel](how-to-setup-swap-devices-in-guest-kernel.md)
|
||||
- [How to run rootless vmm](how-to-run-rootless-vmm.md)
|
||||
- [How to run Docker with Kata Containers](how-to-run-docker-with-kata.md)
|
||||
- [How to run Kata Containers with `nydus`](how-to-use-virtio-fs-nydus-with-kata.md)
|
||||
- [How to run Kata Containers with AMD SEV-SNP](how-to-run-kata-containers-with-SNP-VMs.md)
|
||||
- [How to use EROFS to build rootfs in Kata Containers](how-to-use-erofs-build-rootfs.md)
|
||||
- [How to run Kata Containers with kinds of Block Volumes](how-to-run-kata-containers-with-kinds-of-Block-Volumes.md)
|
||||
- [How to run Kata Containers with `nydus`](how-to-use-virtio-fs-nydus-with-kata.md)
|
||||
@@ -77,8 +77,8 @@ $ command -v containerd
|
||||
You can manually install CNI plugins as follows:
|
||||
|
||||
```bash
|
||||
$ git clone https://github.com/containernetworking/plugins.git
|
||||
$ pushd plugins
|
||||
$ go get github.com/containernetworking/plugins
|
||||
$ pushd $GOPATH/src/github.com/containernetworking/plugins
|
||||
$ ./build_linux.sh
|
||||
$ sudo mkdir /opt/cni
|
||||
$ sudo cp -r bin /opt/cni/
|
||||
@@ -93,8 +93,8 @@ $ popd
|
||||
You can install the `cri-tools` from source code:
|
||||
|
||||
```bash
|
||||
$ git clone https://github.com/kubernetes-sigs/cri-tools.git
|
||||
$ pushd cri-tools
|
||||
$ go get github.com/kubernetes-sigs/cri-tools
|
||||
$ pushd $GOPATH/src/github.com/kubernetes-sigs/cri-tools
|
||||
$ make
|
||||
$ sudo -E make install
|
||||
$ popd
|
||||
@@ -257,48 +257,6 @@ This launches a BusyBox container named `hello`, and it will be removed by `--rm
|
||||
The `--cni` flag enables CNI networking for the container. Without this flag, a container with just a
|
||||
loopback interface is created.
|
||||
|
||||
### Launch containers using `ctr` command line with rootfs bundle
|
||||
|
||||
#### Get rootfs
|
||||
Use the script to create rootfs
|
||||
```bash
|
||||
ctr i pull quay.io/prometheus/busybox:latest
|
||||
ctr i export rootfs.tar quay.io/prometheus/busybox:latest
|
||||
|
||||
rootfs_tar=rootfs.tar
|
||||
bundle_dir="./bundle"
|
||||
mkdir -p "${bundle_dir}"
|
||||
|
||||
# extract busybox rootfs
|
||||
rootfs_dir="${bundle_dir}/rootfs"
|
||||
mkdir -p "${rootfs_dir}"
|
||||
layers_dir="$(mktemp -d)"
|
||||
tar -C "${layers_dir}" -pxf "${rootfs_tar}"
|
||||
for ((i=0;i<$(cat ${layers_dir}/manifest.json | jq -r ".[].Layers | length");i++)); do
|
||||
tar -C ${rootfs_dir} -xf ${layers_dir}/$(cat ${layers_dir}/manifest.json | jq -r ".[].Layers[${i}]")
|
||||
done
|
||||
```
|
||||
#### Get `config.json`
|
||||
Use runc spec to generate `config.json`
|
||||
```bash
|
||||
cd ./bundle/rootfs
|
||||
runc spec
|
||||
mv config.json ../
|
||||
```
|
||||
Change the root `path` in `config.json` to the absolute path of rootfs
|
||||
|
||||
```JSON
|
||||
"root":{
|
||||
"path":"/root/test/bundle/rootfs",
|
||||
"readonly": false
|
||||
},
|
||||
```
|
||||
|
||||
#### Run container
|
||||
```bash
|
||||
sudo ctr run -d --runtime io.containerd.run.kata.v2 --config bundle/config.json hello
|
||||
sudo ctr t exec --exec-id ${ID} -t hello sh
|
||||
```
|
||||
### Launch Pods with `crictl` command line
|
||||
|
||||
With the `crictl` command line of `cri-tools`, you can specify runtime class with `-r` or `--runtime` flag.
|
||||
|
||||
@@ -15,18 +15,6 @@ $ sudo .ci/aarch64/install_rom_aarch64.sh
|
||||
$ popd
|
||||
```
|
||||
|
||||
## Config KATA QEMU
|
||||
|
||||
After executing the above script, two files will be generated under the directory `/usr/share/kata-containers/` by default, namely `kata-flash0.img` and `kata-flash1.img`. Next we need to change the configuration file of `kata qemu`, which is in `/opt/kata/share/defaults/kata-containers/configuration-qemu.toml` by default, specify in the configuration file to use the UEFI ROM installed above. The above is an example of `kata deploy` installation. For package management installation, please use `kata-runtime env` to find the location of the configuration file. Please refer to the following configuration.
|
||||
|
||||
```
|
||||
[hypervisor.qemu]
|
||||
|
||||
# -pflash can add image file to VM. The arguments of it should be in format
|
||||
# of ["/path/to/flash0.img", "/path/to/flash1.img"]
|
||||
pflashes = ["/usr/share/kata-containers/kata-flash0.img", "/usr/share/kata-containers/kata-flash1.img"]
|
||||
```
|
||||
|
||||
## Run for test
|
||||
|
||||
Let's test if the memory hotplug is ready for Kata after install the UEFI ROM. Make sure containerd is ready to run Kata before test.
|
||||
|
||||
@@ -1,158 +0,0 @@
|
||||
# Kata Containers with AMD SEV-SNP VMs
|
||||
|
||||
## Disclaimer
|
||||
|
||||
This guide is designed for developers and is - same as the Developer Guide - not intended for production systems or end users. It is advisable to only follow this guide on non-critical development systems.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
To run Kata Containers in SNP-VMs, the following software stack is used.
|
||||
|
||||

|
||||
|
||||
The host BIOS and kernel must be capable of supporting AMD SEV-SNP and configured accordingly. For Kata Containers, the host kernel with branch [`sev-snp-iommu-avic_5.19-rc6_v3`](https://github.com/AMDESE/linux/tree/sev-snp-iommu-avic_5.19-rc6_v3) and commit [`3a88547`](https://github.com/AMDESE/linux/commit/3a885471cf89156ea555341f3b737ad2a8d9d3d0) is known to work in conjunction with SEV Firmware version 1.51.3 (0xh\_1.33.03) available on AMD's [SEV developer website](https://developer.amd.com/sev/). See [AMD's guide](https://github.com/AMDESE/AMDSEV/tree/sev-snp-devel) to configure the host accordingly. Verify that you are able to run SEV-SNP encrypted VMs first. The guest components required for Kata Containers are built as described below.
|
||||
|
||||
**Tip**: It is easiest to first have Kata Containers running on your system and then modify it to run containers in SNP-VMs. Follow the [Developer guide](../Developer-Guide.md#warning) and then follow the below steps. Nonetheless, you can just follow this guide from the start.
|
||||
|
||||
## How to build
|
||||
|
||||
Follow all of the below steps to install Kata Containers with SNP-support from scratch. These steps mostly follow the developer guide with modifications to support SNP
|
||||
|
||||
__Steps from the Developer Guide:__
|
||||
- Get all the [required components](../Developer-Guide.md#requirements-to-build-individual-components) for building the kata-runtime
|
||||
- [Build the and install kata-runtime](../Developer-Guide.md#build-and-install-the-kata-containers-runtime)
|
||||
- [Build a custom agent](../Developer-Guide.md#build-a-custom-kata-agent---optional)
|
||||
- [Create an initrd image](../Developer-Guide.md#create-an-initrd-image---optional) by first building a rootfs, then building the initrd based on the rootfs, use a custom agent and install. `ubuntu` works as the distribution of choice.
|
||||
- Get the [required components](../../tools/packaging/kernel/README.md#requirements) to build a custom kernel
|
||||
|
||||
__SNP-specific steps:__
|
||||
- Build the SNP-specific kernel as shown below (see this [guide](../../tools/packaging/kernel/README.md#build-kata-containers-kernel) for more information)
|
||||
```bash
|
||||
$ pushd kata-containers/tools/packaging/kernel/
|
||||
$ ./build-kernel.sh -a x86_64 -x snp setup
|
||||
$ ./build-kernel.sh -a x86_64 -x snp build
|
||||
$ sudo -E PATH="${PATH}" ./build-kernel.sh -x snp install
|
||||
$ popd
|
||||
```
|
||||
- Build a current OVMF capable of SEV-SNP:
|
||||
```bash
|
||||
$ pushd kata-containers/tools/packaging/static-build/ovmf
|
||||
$ ./build.sh
|
||||
$ tar -xvf edk2-x86_64.tar.gz
|
||||
$ popd
|
||||
```
|
||||
- Build a custom QEMU
|
||||
```bash
|
||||
$ source kata-containers/tools/packaging/scripts/lib.sh
|
||||
$ qemu_url="$(get_from_kata_deps "assets.hypervisor.qemu-snp-experimental.url")"
|
||||
$ qemu_tag="$(get_from_kata_deps "assets.hypervisor.qemu-snp-experimental.tag")"
|
||||
$ git clone "${qemu_url}"
|
||||
$ pushd qemu
|
||||
$ git checkout "${qemu_tag}"
|
||||
$ ./configure --enable-virtfs --target-list=x86_64-softmmu --enable-debug
|
||||
$ make -j "$(nproc)"
|
||||
$ popd
|
||||
```
|
||||
|
||||
### Kata Containers Configuration for SNP
|
||||
|
||||
The configuration file located at `/etc/kata-containers/configuration.toml` must be adapted as follows to support SNP-VMs:
|
||||
- Use the SNP-specific kernel for the guest VM (change path)
|
||||
```toml
|
||||
kernel = "/usr/share/kata-containers/vmlinuz-snp.container"
|
||||
```
|
||||
- Enable the use of an initrd (uncomment)
|
||||
```toml
|
||||
initrd = "/usr/share/kata-containers/kata-containers-initrd.img"
|
||||
```
|
||||
- Disable the use of a rootfs (comment out)
|
||||
```toml
|
||||
# image = "/usr/share/kata-containers/kata-containers.img"
|
||||
```
|
||||
- Use the custom QEMU capable of SNP (change path)
|
||||
```toml
|
||||
path = "/path/to/qemu/build/qemu-system-x86_64"
|
||||
```
|
||||
- Use `virtio-9p` device since `virtio-fs` is unsupported due to bugs / shortcomings in QEMU version [`snp-v3`](https://github.com/AMDESE/qemu/tree/snp-v3) for SEV and SEV-SNP (change value)
|
||||
```toml
|
||||
shared_fs = "virtio-9p"
|
||||
```
|
||||
- Disable `virtiofsd` since it is no longer required (comment out)
|
||||
```toml
|
||||
# virtio_fs_daemon = "/usr/libexec/virtiofsd"
|
||||
```
|
||||
- Disable NVDIMM (uncomment)
|
||||
```toml
|
||||
disable_image_nvdimm = true
|
||||
```
|
||||
- Disable shared memory (uncomment)
|
||||
```toml
|
||||
file_mem_backend = ""
|
||||
```
|
||||
- Enable confidential guests (uncomment)
|
||||
```toml
|
||||
confidential_guest = true
|
||||
```
|
||||
- Enable SNP-VMs (uncomment)
|
||||
```toml
|
||||
sev_snp_guest = true
|
||||
```
|
||||
- Configure an OVMF (add path)
|
||||
```toml
|
||||
firmware = "/path/to/kata-containers/tools/packaging/static-build/ovmf/opt/kata/share/ovmf/OVMF.fd"
|
||||
```
|
||||
|
||||
## Test Kata Containers with Containerd
|
||||
|
||||
With Kata Containers configured to support SNP-VMs, we use containerd to test and deploy containers in these VMs.
|
||||
|
||||
### Install Containerd
|
||||
If not already present, follow [this guide](./containerd-kata.md#install) to install containerd and its related components including `CNI` and the `cri-tools` (skip Kata Containers since we already installed it)
|
||||
|
||||
### Containerd Configuration
|
||||
|
||||
Follow [this guide](./containerd-kata.md#configuration) to configure containerd to use Kata Containers
|
||||
|
||||
## Run Kata Containers in SNP-VMs
|
||||
|
||||
Run the below commands to start a container. See [this guide](./containerd-kata.md#run) for more information
|
||||
```bash
|
||||
$ sudo ctr image pull docker.io/library/busybox:latest
|
||||
$ sudo ctr run --cni --runtime io.containerd.run.kata.v2 -t --rm docker.io/library/busybox:latest hello sh
|
||||
```
|
||||
|
||||
### Check for active SNP:
|
||||
|
||||
Inside the running container, run the following commands to check if SNP is active. It should look something like this:
|
||||
```
|
||||
/ # dmesg | grep -i sev
|
||||
[ 0.299242] Memory Encryption Features active: AMD SEV SEV-ES SEV-SNP
|
||||
[ 0.472286] SEV: Using SNP CPUID table, 31 entries present.
|
||||
[ 0.514574] SEV: SNP guest platform device initialized.
|
||||
[ 0.885425] sev-guest sev-guest: Initialized SEV guest driver (using vmpck_id 0)
|
||||
```
|
||||
|
||||
### Obtain an SNP Attestation Report
|
||||
|
||||
To obtain an attestation report inside the container, the `/dev/sev-guest` must first be configured. As of now, the VM does not perform this step, however it can be performed inside the container, either in the terminal or in code.
|
||||
|
||||
Example for shell:
|
||||
```
|
||||
/ # SNP_MAJOR=$(cat /sys/devices/virtual/misc/sev-guest/dev | awk -F: '{print $1}')
|
||||
/ # SNP_MINOR=$(cat /sys/devices/virtual/misc/sev-guest/dev | awk -F: '{print $2}')
|
||||
/ # mknod -m 600 /dev/sev-guest c "${SNP_MAJOR}" "${SNP_MINOR}"
|
||||
```
|
||||
|
||||
## Known Issues
|
||||
|
||||
- Support for cgroups v2 is still [work in progress](https://github.com/kata-containers/kata-containers/issues/927). If issues occur due to cgroups v2 becoming the default in newer systems, one possible solution is to downgrade cgroups to v1:
|
||||
```bash
|
||||
sudo sed -i 's/^\(GRUB_CMDLINE_LINUX=".*\)"/\1 systemd.unified_cgroup_hierarchy=0"/' /etc/default/grub
|
||||
sudo update-grub
|
||||
sudo reboot
|
||||
```
|
||||
- If both SEV and SEV-SNP are supported by the host, Kata Containers uses SEV-SNP by default. You can verify what features are enabled by checking `/sys/module/kvm_amd/parameters/sev` and `sev_snp`. This means that Kata Containers can not run both SEV-SNP-VMs and SEV-VMs at the same time. If SEV is to be used by Kata Containers instead, reload the `kvm_amd` kernel module without SNP-support, this will disable SNP-support for the entire platform.
|
||||
```bash
|
||||
sudo rmmod kvm_amd && sudo modprobe kvm_amd sev_snp=0
|
||||
```
|
||||
|
||||
@@ -1,226 +0,0 @@
|
||||
# A new way for Kata Containers to use Kinds of Block Volumes
|
||||
|
||||
> **Note:** This guide is only available for runtime-rs with default Hypervisor Dragonball.
|
||||
> Now, other hypervisors are still ongoing, and it'll be updated when they're ready.
|
||||
|
||||
|
||||
## Background
|
||||
|
||||
Currently, there is no widely applicable and convenient method available for users to use some kinds of backend storages, such as File on host based block volume, SPDK based volume or VFIO device based volume for Kata Containers, so we adopt [Proposal: Direct Block Device Assignment](https://github.com/kata-containers/kata-containers/blob/main/docs/design/direct-blk-device-assignment.md) to address it.
|
||||
|
||||
## Solution
|
||||
|
||||
According to the proposal, it requires to use the `kata-ctl direct-volume` command to add a direct assigned block volume device to the Kata Containers runtime.
|
||||
|
||||
And then with the help of method [get_volume_mount_info](https://github.com/kata-containers/kata-containers/blob/099b4b0d0e3db31b9054e7240715f0d7f51f9a1c/src/libs/kata-types/src/mount.rs#L95), get information from JSON file: `(mountinfo.json)` and parse them into structure [Direct Volume Info](https://github.com/kata-containers/kata-containers/blob/099b4b0d0e3db31b9054e7240715f0d7f51f9a1c/src/libs/kata-types/src/mount.rs#L70) which is used to save device-related information.
|
||||
|
||||
We only fill the `mountinfo.json`, such as `device` ,`volume_type`, `fs_type`, `metadata` and `options`, which correspond to the fields in [Direct Volume Info](https://github.com/kata-containers/kata-containers/blob/099b4b0d0e3db31b9054e7240715f0d7f51f9a1c/src/libs/kata-types/src/mount.rs#L70), to describe a device.
|
||||
|
||||
The JSON file `mountinfo.json` placed in a sub-path `/kubelet/kata-test-vol-001/volume001` which under fixed path `/run/kata-containers/shared/direct-volumes/`.
|
||||
And the full path looks like: `/run/kata-containers/shared/direct-volumes/kubelet/kata-test-vol-001/volume001`, But for some security reasons. it is
|
||||
encoded as `/run/kata-containers/shared/direct-volumes/L2t1YmVsZXQva2F0YS10ZXN0LXZvbC0wMDEvdm9sdW1lMDAx`.
|
||||
|
||||
Finally, when running a Kata Containers with `ctr run --mount type=X, src=Y, dst=Z,,options=rbind:rw`, the `type=X` should be specified a proprietary type specifically designed for some kind of volume.
|
||||
|
||||
Now, supported types:
|
||||
|
||||
- `directvol` for direct volume
|
||||
- `vfiovol` for VFIO device based volume
|
||||
- `spdkvol` for SPDK/vhost-user based volume
|
||||
|
||||
|
||||
## Setup Device and Run a Kata-Containers
|
||||
|
||||
### Direct Block Device Based Volume
|
||||
|
||||
#### create raw block based backend storage
|
||||
|
||||
> **Tips:** raw block based backend storage MUST be formatted with `mkfs`.
|
||||
|
||||
```bash
|
||||
$ sudo dd if=/dev/zero of=/tmp/stor/rawdisk01.20g bs=1M count=20480
|
||||
$ sudo mkfs.ext4 /tmp/stor/rawdisk01.20g
|
||||
```
|
||||
|
||||
#### setup direct block device for kata-containers
|
||||
|
||||
```json
|
||||
{
|
||||
"device": "/tmp/stor/rawdisk01.20g",
|
||||
"volume_type": "directvol",
|
||||
"fs_type": "ext4",
|
||||
"metadata":"{}",
|
||||
"options": []
|
||||
}
|
||||
```
|
||||
|
||||
```bash
|
||||
$ sudo kata-ctl direct-volume add /kubelet/kata-direct-vol-002/directvol002 "{\"device\": \"/tmp/stor/rawdisk01.20g\", \"volume_type\": \"directvol\", \"fs_type\": \"ext4\", \"metadata\":"{}", \"options\": []}"
|
||||
$# /kubelet/kata-direct-vol-002/directvol002 <==> /run/kata-containers/shared/direct-volumes/W1lMa2F0ZXQva2F0YS10a2F0DAxvbC0wMDEvdm9sdW1lMDAx
|
||||
$ cat W1lMa2F0ZXQva2F0YS10a2F0DAxvbC0wMDEvdm9sdW1lMDAx/mountInfo.json
|
||||
{"volume_type":"directvol","device":"/tmp/stor/rawdisk01.20g","fs_type":"ext4","metadata":{},"options":[]}
|
||||
```
|
||||
|
||||
#### Run a Kata container with direct block device volume
|
||||
|
||||
```bash
|
||||
$ # type=disrectvol,src=/kubelet/kata-direct-vol-002/directvol002,dst=/disk002,options=rbind:rw
|
||||
$ sudo ctr run -t --rm --runtime io.containerd.kata.v2 --mount type=directvol,src=/kubelet/kata-direct-vol-002/directvol002,dst=/disk002,options=rbind:rw "$image" kata-direct-vol-xx05302045 /bin/bash
|
||||
```
|
||||
|
||||
|
||||
### VFIO Device Based Block Volume
|
||||
|
||||
#### create VFIO device based backend storage
|
||||
|
||||
> **Tip:** It only supports `vfio-pci` based PCI device passthrough mode.
|
||||
|
||||
In this scenario, the device's host kernel driver will be replaced by `vfio-pci`, and IOMMU group ID generated.
|
||||
And either device's BDF or its VFIO IOMMU group ID in `/dev/vfio/` is fine for "device" in `mountinfo.json`.
|
||||
|
||||
```bash
|
||||
$ lspci -nn -k -s 45:00.1
|
||||
45:00.1 SCSI storage controller
|
||||
...
|
||||
Kernel driver in use: vfio-pci
|
||||
...
|
||||
$ ls /dev/vfio/110
|
||||
/dev/vfio/110
|
||||
$ ls /sys/kernel/iommu_groups/110/devices/
|
||||
0000:45:00.1
|
||||
```
|
||||
|
||||
#### setup VFIO device for kata-containers
|
||||
|
||||
First, configure the `mountinfo.json`, as below:
|
||||
|
||||
- (1) device with `BB:DD:F`
|
||||
|
||||
```json
|
||||
{
|
||||
"device": "45:00.1",
|
||||
"volume_type": "vfiovol",
|
||||
"fs_type": "ext4",
|
||||
"metadata":"{}",
|
||||
"options": []
|
||||
}
|
||||
```
|
||||
|
||||
- (2) device with `DDDD:BB:DD:F`
|
||||
|
||||
```json
|
||||
{
|
||||
"device": "0000:45:00.1",
|
||||
"volume_type": "vfiovol",
|
||||
"fs_type": "ext4",
|
||||
"metadata":"{}",
|
||||
"options": []
|
||||
}
|
||||
```
|
||||
|
||||
- (3) device with `/dev/vfio/X`
|
||||
|
||||
```json
|
||||
{
|
||||
"device": "/dev/vfio/110",
|
||||
"volume_type": "vfiovol",
|
||||
"fs_type": "ext4",
|
||||
"metadata":"{}",
|
||||
"options": []
|
||||
}
|
||||
```
|
||||
|
||||
Second, run kata-containers with device(`/dev/vfio/110`) as an example:
|
||||
|
||||
```bash
|
||||
$ sudo kata-ctl direct-volume add /kubelet/kata-vfio-vol-003/vfiovol003 "{\"device\": \"/dev/vfio/110\", \"volume_type\": \"vfiovol\", \"fs_type\": \"ext4\", \"metadata\":"{}", \"options\": []}"
|
||||
$ # /kubelet/kata-vfio-vol-003/directvol003 <==> /run/kata-containers/shared/direct-volumes/F0va22F0ZvaS12F0YS10a2F0DAxvbC0F0ZXvdm9sdF0Z0YSx
|
||||
$ cat F0va22F0ZvaS12F0YS10a2F0DAxvbC0F0ZXvdm9sdF0Z0YSx/mountInfo.json
|
||||
{"volume_type":"vfiovol","device":"/dev/vfio/110","fs_type":"ext4","metadata":{},"options":[]}
|
||||
```
|
||||
|
||||
#### Run a Kata container with VFIO block device based volume
|
||||
|
||||
```bash
|
||||
$ # type=disrectvol,src=/kubelet/kata-vfio-vol-003/vfiovol003,dst=/disk003,options=rbind:rw
|
||||
$ sudo ctr run -t --rm --runtime io.containerd.kata.v2 --mount type=vfiovol,src=/kubelet/kata-vfio-vol-003/vfiovol003,dst=/disk003,options=rbind:rw "$image" kata-vfio-vol-xx05302245 /bin/bash
|
||||
```
|
||||
|
||||
|
||||
### SPDK Device Based Block Volume
|
||||
|
||||
SPDK vhost-user devices in runtime-rs, unlike runtime (golang version), there is no need to `mknod` device node under `/dev/` any more.
|
||||
Just using the `kata-ctl direct-volume add ..` to make a mount info config is enough.
|
||||
|
||||
#### Run SPDK vhost target and Expose vhost block device
|
||||
|
||||
Run a SPDK vhost target and get vhost-user block controller as an example:
|
||||
|
||||
First, run SPDK vhost target:
|
||||
|
||||
> **Tips:** If driver `vfio-pci` supported, you can run SPDK with `DRIVER_OVERRIDE=vfio-pci`
|
||||
> Otherwise, Just run without it `sudo HUGEMEM=4096 ./scripts/setup.sh`.
|
||||
|
||||
```bash
|
||||
$ SPDK_DEVEL=/xx/spdk
|
||||
$ VHU_UDS_PATH=/tmp/vhu-targets
|
||||
$ RAW_DISKS=/xx/rawdisks
|
||||
$ # Reset first
|
||||
$ ${SPDK_DEVEL}/scripts/setup.sh reset
|
||||
$ sudo sysctl -w vm.nr_hugepages=2048
|
||||
$ #4G Huge Memory for spdk
|
||||
$ sudo HUGEMEM=4096 DRIVER_OVERRIDE=vfio-pci ${SPDK_DEVEL}/scripts/setup.sh
|
||||
$ sudo ${SPDK_DEVEL}/build/bin/spdk_tgt -S $VHU_UDS_PATH -s 1024 -m 0x3 &
|
||||
```
|
||||
|
||||
Second, create a vhost controller:
|
||||
|
||||
```bash
|
||||
$ sudo dd if=/dev/zero of=${RAW_DISKS}/rawdisk01.20g bs=1M count=20480
|
||||
$ sudo ${SPDK_DEVEL}/scripts/rpc.py bdev_aio_create ${RAW_DISKS}/rawdisk01.20g vhu-rawdisk01.20g 512
|
||||
$ sudo ${SPDK_DEVEL}/scripts/rpc.py vhost_create_blk_controller vhost-blk-rawdisk01.sock vhu-rawdisk01.20g
|
||||
```
|
||||
|
||||
Here, a vhost controller `vhost-blk-rawdisk01.sock` is created, and the controller will
|
||||
be passed to Hypervisor, such as Dragonball, Cloud-Hypervisor, Firecracker or QEMU.
|
||||
|
||||
|
||||
#### setup vhost-user block device for kata-containers
|
||||
|
||||
|
||||
First, `mkdir` a sub-path `kubelet/kata-test-vol-001/` under `/run/kata-containers/shared/direct-volumes/`.
|
||||
|
||||
Second, fill fields in `mountinfo.json`, it looks like as below:
|
||||
```json
|
||||
{
|
||||
"device": "/tmp/vhu-targets/vhost-blk-rawdisk01.sock",
|
||||
"volume_type": "spdkvol",
|
||||
"fs_type": "ext4",
|
||||
"metadata":"{}",
|
||||
"options": []
|
||||
}
|
||||
```
|
||||
|
||||
Third, with the help of `kata-ctl direct-volume` to add block device to generate `mountinfo.json`, and run a kata container with `--mount`.
|
||||
|
||||
```bash
|
||||
$ # kata-ctl direct-volume add
|
||||
$ sudo kata-ctl direct-volume add /kubelet/kata-test-vol-001/volume001 "{\"device\": \"/tmp/vhu-targets/vhost-blk-rawdisk01.sock\", \"volume_type\":\"spdkvol\", \"fs_type\": \"ext4\", \"metadata\":"{}", \"options\": []}"
|
||||
$ # /kubelet/kata-test-vol-001/volume001 <==> /run/kata-containers/shared/direct-volumes/L2t1YmVsZXQva2F0YS10ZXN0LXZvbC0wMDEvdm9sdW1lMDAx
|
||||
$ cat L2t1YmVsZXQva2F0YS10ZXN0LXZvbC0wMDEvdm9sdW1lMDAx/mountInfo.json
|
||||
$ {"volume_type":"spdkvol","device":"/tmp/vhu-targets/vhost-blk-rawdisk01.sock","fs_type":"ext4","metadata":{},"options":[]}
|
||||
```
|
||||
|
||||
As `/run/kata-containers/shared/direct-volumes/` is a fixed path , we will be able to run a kata pod with `--mount` and set
|
||||
`src` sub-path. And the `--mount` argument looks like: `--mount type=spdkvol,src=/kubelet/kata-test-vol-001/volume001,dst=/disk001`.
|
||||
|
||||
|
||||
#### Run a Kata container with SPDK vhost-user block device
|
||||
|
||||
|
||||
In the case, `ctr run --mount type=X, src=source, dst=dest`, the X will be set `spdkvol` which is a proprietary type specifically designed for SPDK volumes.
|
||||
|
||||
```bash
|
||||
$ # ctr run with --mount type=spdkvol,src=/kubelet/kata-test-vol-001/volume001,dst=/disk001
|
||||
$ sudo ctr run -t --rm --runtime io.containerd.kata.v2 --mount type=spdkvol,src=/kubelet/kata-test-vol-001/volume001,dst=/disk001,options=rbind:rw "$image" kata-spdk-vol-xx0530 /bin/bash
|
||||
```
|
||||
@@ -1,5 +1,5 @@
|
||||
## Introduction
|
||||
To improve security, Kata Container supports running the VMM process (QEMU and cloud-hypervisor) as a non-`root` user.
|
||||
To improve security, Kata Container supports running the VMM process (currently only QEMU) as a non-`root` user.
|
||||
This document describes how to enable the rootless VMM mode and its limitations.
|
||||
|
||||
## Pre-requisites
|
||||
@@ -27,7 +27,7 @@ Another necessary change is to move the hypervisor runtime files (e.g. `vhost-fs
|
||||
## Limitations
|
||||
|
||||
1. Only the VMM process is running as a non-root user. Other processes such as Kata Container shimv2 and `virtiofsd` still run as the root user.
|
||||
2. Currently, this feature is only supported in QEMU and cloud-hypervisor. For firecracker, you can use jailer to run the VMM process with a non-root user.
|
||||
2. Currently, this feature is only supported in QEMU. Still need to bring it to Firecracker and Cloud Hypervisor (see https://github.com/kata-containers/kata-containers/issues/2567).
|
||||
3. Certain features will not work when rootless VMM is enabled, including:
|
||||
1. Passing devices to the guest (`virtio-blk`, `virtio-scsi`) will not work if the non-privileged user does not have permission to access it (leading to a permission denied error). A more permissive permission (e.g. 666) may overcome this issue. However, you need to be aware of the potential security implications of reducing the security on such devices.
|
||||
2. `vfio` device will also not work because of permission denied error.
|
||||
@@ -57,7 +57,6 @@ There are several kinds of Kata configurations and they are listed below.
|
||||
| `io.katacontainers.config.hypervisor.enable_iothreads` | `boolean`| enable IO to be processed in a separate thread. Supported currently for virtio-`scsi` driver |
|
||||
| `io.katacontainers.config.hypervisor.enable_mem_prealloc` | `boolean` | the memory space used for `nvdimm` device by the hypervisor |
|
||||
| `io.katacontainers.config.hypervisor.enable_vhost_user_store` | `boolean` | enable vhost-user storage device (QEMU) |
|
||||
| `io.katacontainers.config.hypervisor.vhost_user_reconnect_timeout_sec` | `string`| the timeout for reconnecting vhost user socket (QEMU)
|
||||
| `io.katacontainers.config.hypervisor.enable_virtio_mem` | `boolean` | enable virtio-mem (QEMU) |
|
||||
| `io.katacontainers.config.hypervisor.entropy_source` (R) | string| the path to a host source of entropy (`/dev/random`, `/dev/urandom` or real hardware RNG device) |
|
||||
| `io.katacontainers.config.hypervisor.file_mem_backend` (R) | string | file based memory backend root directory |
|
||||
@@ -88,7 +87,7 @@ There are several kinds of Kata configurations and they are listed below.
|
||||
| `io.katacontainers.config.hypervisor.use_vsock` | `boolean` | specify use of `vsock` for agent communication |
|
||||
| `io.katacontainers.config.hypervisor.vhost_user_store_path` (R) | `string` | specify the directory path where vhost-user devices related folders, sockets and device nodes should be (QEMU) |
|
||||
| `io.katacontainers.config.hypervisor.virtio_fs_cache_size` | uint32 | virtio-fs DAX cache size in `MiB` |
|
||||
| `io.katacontainers.config.hypervisor.virtio_fs_cache` | string | the cache mode for virtio-fs, valid values are `always`, `auto` and `never` |
|
||||
| `io.katacontainers.config.hypervisor.virtio_fs_cache` | string | the cache mode for virtio-fs, valid values are `always`, `auto` and `none` |
|
||||
| `io.katacontainers.config.hypervisor.virtio_fs_daemon` | string | virtio-fs `vhost-user` daemon path |
|
||||
| `io.katacontainers.config.hypervisor.virtio_fs_extra_args` | string | extra options passed to `virtiofs` daemon |
|
||||
| `io.katacontainers.config.hypervisor.enable_guest_swap` | `boolean` | enable swap in the guest |
|
||||
|
||||
@@ -17,9 +17,9 @@ Enable setup swap device in guest kernel as follows:
|
||||
$ sudo sed -i -e 's/^#enable_guest_swap.*$/enable_guest_swap = true/g' /etc/kata-containers/configuration.toml
|
||||
```
|
||||
|
||||
## Run a Kata Containers utilizing swap device
|
||||
## Run a Kata Container utilizing swap device
|
||||
|
||||
Use following command to start a Kata Containers with swappiness 60 and 1GB swap device (swap_in_bytes - memory_limit_in_bytes).
|
||||
Use following command to start a Kata Container with swappiness 60 and 1GB swap device (swap_in_bytes - memory_limit_in_bytes).
|
||||
```
|
||||
$ pod_yaml=pod.yaml
|
||||
$ container_yaml=container.yaml
|
||||
@@ -43,12 +43,12 @@ command:
|
||||
- top
|
||||
EOF
|
||||
$ sudo crictl pull $image
|
||||
$ podid=$(sudo crictl runp --runtime kata $pod_yaml)
|
||||
$ podid=$(sudo crictl runp $pod_yaml)
|
||||
$ cid=$(sudo crictl create $podid $container_yaml $pod_yaml)
|
||||
$ sudo crictl start $cid
|
||||
```
|
||||
|
||||
Kata Containers setups swap device for this container only when `io.katacontainers.container.resource.swappiness` is set.
|
||||
Kata Container setups swap device for this container only when `io.katacontainers.container.resource.swappiness` is set.
|
||||
|
||||
The following table shows the swap size how to decide if `io.katacontainers.container.resource.swappiness` is set.
|
||||
|`io.katacontainers.container.resource.swap_in_bytes`|`memory_limit_in_bytes`|swap size|
|
||||
|
||||
@@ -1,90 +0,0 @@
|
||||
# Configure Kata Containers to use EROFS build rootfs
|
||||
|
||||
## Introduction
|
||||
For kata containers, rootfs is used in the read-only way. EROFS can noticeably decrease metadata overhead.
|
||||
|
||||
`mkfs.erofs` can generate compressed and uncompressed EROFS images.
|
||||
|
||||
For uncompressed images, no files are compressed. However, it is optional to inline the data blocks at the end of the file with the metadata.
|
||||
|
||||
For compressed images, each file will be compressed using the lz4 or lz4hc algorithm, and it will be confirmed whether it can save space. Use No compression of the file if compression does not save space.
|
||||
|
||||
## Performance comparison
|
||||
| | EROFS | EXT4 | XFS |
|
||||
|-----------------|-------| --- | --- |
|
||||
| Image Size [MB] | 106(uncompressed) | 256 | 126 |
|
||||
|
||||
|
||||
## Guidance
|
||||
### Install the `erofs-utils`
|
||||
#### `apt/dnf` install
|
||||
On newer `Ubuntu/Debian` systems, it can be installed directly using the `apt` command, and on `Fedora` it can be installed directly using the `dnf` command.
|
||||
|
||||
```shell
|
||||
# Debian/Ubuntu
|
||||
$ apt install erofs-utils
|
||||
# Fedora
|
||||
$ dnf install erofs-utils
|
||||
```
|
||||
|
||||
#### Source install
|
||||
[https://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs-utils.git](https://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs-utils.git)
|
||||
|
||||
##### Compile dependencies
|
||||
If you need to enable the `Lz4` compression feature, `Lz4 1.8.0+` is required, and `Lz4 1.9.3+` is strongly recommended.
|
||||
|
||||
##### Compilation process
|
||||
For some old lz4 versions (lz4-1.8.0~1.8.3), if lz4-static is not installed, the lz4hc algorithm will not be supported. lz4-static can be installed with apt install lz4-static.x86_64. However, these versions have some bugs in compression, and it is not recommended to use these versions directly.
|
||||
If you use `lz4 1.9.0+`, you can directly use the following command to compile.
|
||||
|
||||
```shell
|
||||
$ ./autogen.sh
|
||||
$ ./configure
|
||||
$ make
|
||||
```
|
||||
|
||||
The compiled `mkfs.erofs` program will be saved in the `mkfs` directory. Afterwards, the generated tools can be installed to a system directory using make install (requires root privileges).
|
||||
|
||||
### Create a local rootfs
|
||||
```shell
|
||||
$ export distro="ubuntu"
|
||||
$ export FS_TYPE="erofs"
|
||||
$ export ROOTFS_DIR="realpath kata-containers/tools/osbuilder/rootfs-builder/rootfs"
|
||||
$ sudo rm -rf "${ROOTFS_DIR}"
|
||||
$ pushd kata-containers/tools/osbuilder/rootfs-builder
|
||||
$ script -fec 'sudo -E SECCOMP=no ./rootfs.sh "${distro}"'
|
||||
$ popd
|
||||
```
|
||||
|
||||
### Add a custom agent to the image - OPTIONAL
|
||||
> Note:
|
||||
> - You should only do this step if you are testing with the latest version of the agent.
|
||||
```shell
|
||||
$ sudo install -o root -g root -m 0550 -t "${ROOTFS_DIR}/usr/bin" "${ROOTFS_DIR}/../../../../src/agent/target/x86_64-unknown-linux-musl/release/kata-agent"
|
||||
$ sudo install -o root -g root -m 0440 "${ROOTFS_DIR}/../../../../src/agent/kata-agent.service" "${ROOTFS_DIR}/usr/lib/systemd/system/"
|
||||
$ sudo install -o root -g root -m 0440 "${ROOTFS_DIR}/../../../../src/agent/kata-containers.target" "${ROOTFS_DIR}/usr/lib/systemd/system/"
|
||||
```
|
||||
|
||||
### Build a root image
|
||||
```shell
|
||||
$ pushd kata-containers/tools/osbuilder/image-builder
|
||||
$ script -fec 'sudo -E ./image_builder.sh "${ROOTFS_DIR}"'
|
||||
$ popd
|
||||
```
|
||||
|
||||
### Install the rootfs image
|
||||
```shell
|
||||
$ pushd kata-containers/tools/osbuilder/image-builder
|
||||
$ commit="$(git log --format=%h -1 HEAD)"
|
||||
$ date="$(date +%Y-%m-%d-%T.%N%z)"
|
||||
$ rootfs="erofs"
|
||||
$ image="kata-containers-${rootfs}-${date}-${commit}"
|
||||
$ sudo install -o root -g root -m 0640 -D kata-containers.img "/usr/share/kata-containers/${image}"
|
||||
$ (cd /usr/share/kata-containers && sudo ln -sf "$image" kata-containers.img)
|
||||
$ popd
|
||||
```
|
||||
|
||||
### Use `EROFS` in the runtime
|
||||
```shell
|
||||
$ sudo sed -i -e 's/^# *\(rootfs_type\).*=.*$/\1 = erofs/g' /etc/kata-containers/configuration.toml
|
||||
```
|
||||
@@ -104,7 +104,7 @@ sudo dmsetup create "${POOL_NAME}" \
|
||||
|
||||
cat << EOF
|
||||
#
|
||||
# Add this to your config.toml configuration file and restart containerd daemon
|
||||
# Add this to your config.toml configuration file and restart `containerd` daemon
|
||||
#
|
||||
[plugins]
|
||||
[plugins.devmapper]
|
||||
@@ -212,7 +212,7 @@ Next, we need to configure containerd. Add a file in your path (e.g. `/usr/local
|
||||
|
||||
```
|
||||
#!/bin/bash
|
||||
KATA_CONF_FILE=/etc/kata-containers/configuration-fc.toml /usr/local/bin/containerd-shim-kata-v2 $@
|
||||
KATA_CONF_FILE=/etc/containers/configuration-fc.toml /usr/local/bin/containerd-shim-kata-v2 $@
|
||||
```
|
||||
> **Note:** You may need to edit the paths of the configuration file and the `containerd-shim-kata-v2` to correspond to your setup.
|
||||
|
||||
|
||||
File diff suppressed because one or more lines are too long
|
Before Width: | Height: | Size: 9.0 KiB |
@@ -19,6 +19,7 @@ Packaged installation methods uses your distribution's native package format (su
|
||||
|------------------------------------------------------|----------------------------------------------------------------------------------------------|-------------------|-----------------------------------------------------------------------------------------------|
|
||||
| [Using kata-deploy](#kata-deploy-installation) | The preferred way to deploy the Kata Containers distributed binaries on a Kubernetes cluster | **No!** | Best way to give it a try on kata-containers on an already up and running Kubernetes cluster. |
|
||||
| [Using official distro packages](#official-packages) | Kata packages provided by Linux distributions official repositories | yes | Recommended for most users. |
|
||||
| [Using snap](#snap-installation) | Easy to install | yes | Good alternative to official distro packages. |
|
||||
| [Automatic](#automatic-installation) | Run a single command to install a full system | **No!** | For those wanting the latest release quickly. |
|
||||
| [Manual](#manual-installation) | Follow a guide step-by-step to install a working system | **No!** | For those who want the latest release with more control. |
|
||||
| [Build from source](#build-from-source-installation) | Build the software components manually | **No!** | Power users and developers only. |
|
||||
@@ -41,6 +42,12 @@ Kata packages are provided by official distribution repositories for:
|
||||
| [CentOS](centos-installation-guide.md) | 8 |
|
||||
| [Fedora](fedora-installation-guide.md) | 34 |
|
||||
|
||||
### Snap Installation
|
||||
|
||||
The snap installation is available for all distributions which support `snapd`.
|
||||
|
||||
[Use snap](snap-installation-guide.md) to install Kata Containers from https://snapcraft.io.
|
||||
|
||||
### Automatic Installation
|
||||
|
||||
[Use `kata-manager`](/utils/README.md) to automatically install a working Kata Containers system.
|
||||
|
||||
@@ -123,7 +123,7 @@ Refer to [this guide](https://docs.aws.amazon.com/cli/latest/userguide/cli-ec2-l
|
||||
SSH into the machine
|
||||
|
||||
```bash
|
||||
$ ssh -i MyKeyPair.pem ubuntu@${IP}
|
||||
$ ssh -i MyKeyPair.pen ubuntu@${IP}
|
||||
```
|
||||
|
||||
Go onto the next step.
|
||||
|
||||
@@ -24,17 +24,19 @@ architectures:
|
||||
|
||||
| Installation method | Description | Automatic updates | Use case | Availability
|
||||
|------------------------------------------------------|----------------------------------------------------------------------------------------------|-------------------|-----------------------------------------------------------------------------------------------|----------- |
|
||||
| [Using kata-deploy](#kata-deploy-installation) | The preferred way to deploy the Kata Containers distributed binaries on a Kubernetes cluster | **No!** | Best way to give it a try on kata-containers on an already up and running Kubernetes cluster. | Yes |
|
||||
| [Using kata-deploy](#kata-deploy-installation) | The preferred way to deploy the Kata Containers distributed binaries on a Kubernetes cluster | **No!** | Best way to give it a try on kata-containers on an already up and running Kubernetes cluster. | No |
|
||||
| [Using official distro packages](#official-packages) | Kata packages provided by Linux distributions official repositories | yes | Recommended for most users. | No |
|
||||
| [Using snap](#snap-installation) | Easy to install | yes | Good alternative to official distro packages. | No |
|
||||
| [Automatic](#automatic-installation) | Run a single command to install a full system | **No!** | For those wanting the latest release quickly. | No |
|
||||
| [Manual](#manual-installation) | Follow a guide step-by-step to install a working system | **No!** | For those who want the latest release with more control. | No |
|
||||
| [Build from source](#build-from-source-installation) | Build the software components manually | **No!** | Power users and developers only. | Yes |
|
||||
|
||||
### Kata Deploy Installation
|
||||
|
||||
Follow the [`kata-deploy`](../../tools/packaging/kata-deploy/README.md).
|
||||
`ToDo`
|
||||
### Official packages
|
||||
`ToDo`
|
||||
### Snap Installation
|
||||
`ToDo`
|
||||
### Automatic Installation
|
||||
`ToDo`
|
||||
### Manual Installation
|
||||
@@ -46,14 +48,14 @@ Follow the [`kata-deploy`](../../tools/packaging/kata-deploy/README.md).
|
||||
|
||||
* Download `Rustup` and install `Rust`
|
||||
> **Notes:**
|
||||
> For Rust version, please set `RUST_VERSION` to the value of `languages.rust.meta.newest-version key` in [`versions.yaml`](../../versions.yaml) or, if `yq` is available on your system, run `export RUST_VERSION=$(yq read versions.yaml languages.rust.meta.newest-version)`.
|
||||
> Rust version 1.58 is needed
|
||||
|
||||
Example for `x86_64`
|
||||
```
|
||||
$ curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
|
||||
$ source $HOME/.cargo/env
|
||||
$ rustup install ${RUST_VERSION}
|
||||
$ rustup default ${RUST_VERSION}-x86_64-unknown-linux-gnu
|
||||
$ rustup install 1.58
|
||||
$ rustup default 1.58-x86_64-unknown-linux-gnu
|
||||
```
|
||||
|
||||
* Musl support for fully static binary
|
||||
@@ -81,7 +83,7 @@ $ git clone https://github.com/kata-containers/kata-containers.git
|
||||
$ cd kata-containers/src/runtime-rs
|
||||
$ make && sudo make install
|
||||
```
|
||||
After running the command above, the default config file `configuration.toml` will be installed under `/usr/share/defaults/kata-containers/`, the binary file `containerd-shim-kata-v2` will be installed under `/usr/local/bin/` .
|
||||
After running the command above, the default config file `configuration.toml` will be installed under `/usr/share/defaults/kata-containers/`, the binary file `containerd-shim-kata-v2` will be installed under `/user/local/bin` .
|
||||
|
||||
### Build Kata Containers Kernel
|
||||
Follow the [Kernel installation guide](/tools/packaging/kernel/README.md).
|
||||
|
||||
@@ -71,6 +71,12 @@ To use containerd, modify the `--container-runtime` argument:
|
||||
> **Notes:**
|
||||
> - Adjust the `--memory 6144` line to suit your environment and requirements. Kata Containers default to
|
||||
> requesting 2048MB per container. We recommended you supply more than that to the Minikube node.
|
||||
> - Prior to Minikube/Kubernetes v1.14, the beta `RuntimeClass` feature also needed enabling with
|
||||
> the following.
|
||||
>
|
||||
> | what | why |
|
||||
> | ---- | --- |
|
||||
> | `--feature-gates=RuntimeClass=true` | Kata needs to use the `RuntimeClass` Kubernetes feature |
|
||||
|
||||
The full command is therefore:
|
||||
|
||||
@@ -132,9 +138,17 @@ $ kubectl -n kube-system exec ${podname} -- ps -ef | fgrep infinity
|
||||
|
||||
## Enabling Kata Containers
|
||||
|
||||
> **Note:** Only Minikube/Kubernetes versions <= 1.13 require this step. Since version
|
||||
> v1.14, the `RuntimeClass` is enabled by default. Performing this step on Kubernetes > v1.14 is
|
||||
> however benign.
|
||||
|
||||
Now you have installed the Kata Containers components in the Minikube node. Next, you need to configure
|
||||
Kubernetes `RuntimeClass` to know when to use Kata Containers to run a pod.
|
||||
|
||||
```sh
|
||||
$ kubectl apply -f https://raw.githubusercontent.com/kubernetes/node-api/master/manifests/runtimeclass_crd.yaml > runtimeclass_crd.yaml
|
||||
```
|
||||
|
||||
### Register the runtime
|
||||
|
||||
Now register the `kata qemu` runtime with that class. This should result in no errors:
|
||||
|
||||
52
docs/install/snap-installation-guide.md
Normal file
52
docs/install/snap-installation-guide.md
Normal file
@@ -0,0 +1,52 @@
|
||||
# Kata Containers snap package
|
||||
|
||||
## Install Kata Containers
|
||||
|
||||
Kata Containers can be installed in any Linux distribution that supports
|
||||
[snapd](https://docs.snapcraft.io/installing-snapd).
|
||||
|
||||
Run the following command to install **Kata Containers**:
|
||||
|
||||
```sh
|
||||
$ sudo snap install kata-containers --stable --classic
|
||||
```
|
||||
|
||||
## Configure Kata Containers
|
||||
|
||||
By default Kata Containers snap image is mounted at `/snap/kata-containers` as a
|
||||
read-only file system, therefore default configuration file can not be edited.
|
||||
Fortunately Kata Containers supports loading a configuration file from another
|
||||
path than the default.
|
||||
|
||||
```sh
|
||||
$ sudo mkdir -p /etc/kata-containers
|
||||
$ sudo cp /snap/kata-containers/current/usr/share/defaults/kata-containers/configuration.toml /etc/kata-containers/
|
||||
$ $EDITOR /etc/kata-containers/configuration.toml
|
||||
```
|
||||
|
||||
## Integration with shim v2 Container Engines
|
||||
|
||||
The Container engine daemon (`cri-o`, `containerd`, etc) needs to be able to find the
|
||||
`containerd-shim-kata-v2` binary to allow Kata Containers to be created.
|
||||
Run the following command to create a symbolic link to the shim v2 binary.
|
||||
|
||||
```sh
|
||||
$ sudo ln -sf /snap/kata-containers/current/usr/bin/containerd-shim-kata-v2 /usr/local/bin/containerd-shim-kata-v2
|
||||
```
|
||||
|
||||
Once the symbolic link has been created and the engine daemon configured, `io.containerd.kata.v2`
|
||||
can be used as runtime.
|
||||
|
||||
Read the following documents to know how to run Kata Containers 2.x with `containerd`.
|
||||
|
||||
* [How to use Kata Containers and Containerd](../how-to/containerd-kata.md)
|
||||
* [Install Kata Containers with containerd](./container-manager/containerd/containerd-install.md)
|
||||
|
||||
|
||||
## Remove Kata Containers snap package
|
||||
|
||||
Run the following command to remove the Kata Containers snap:
|
||||
|
||||
```sh
|
||||
$ sudo snap remove kata-containers
|
||||
```
|
||||
@@ -545,12 +545,6 @@ Create the hook execution file for Kata:
|
||||
/usr/bin/nvidia-container-toolkit -debug $@
|
||||
```
|
||||
|
||||
Make sure the hook shell is executable:
|
||||
|
||||
```sh
|
||||
chmod +x $ROOTFS_DIR/usr/share/oci/hooks/prestart/nvidia-container-toolkit.sh
|
||||
```
|
||||
|
||||
As the last step one can do some cleanup of files or package caches. Build the
|
||||
rootfs and configure it for use with Kata according to the development guide.
|
||||
|
||||
|
||||
@@ -49,7 +49,7 @@ the latest driver.
|
||||
$ export QAT_DRIVER_VER=qat1.7.l.4.14.0-00031.tar.gz
|
||||
$ export QAT_DRIVER_URL=https://downloadmirror.intel.com/30178/eng/${QAT_DRIVER_VER}
|
||||
$ export QAT_CONF_LOCATION=~/QAT_conf
|
||||
$ export QAT_DOCKERFILE=https://raw.githubusercontent.com/intel/intel-device-plugins-for-kubernetes/main/demo/openssl-qat-engine/Dockerfile
|
||||
$ export QAT_DOCKERFILE=https://raw.githubusercontent.com/intel/intel-device-plugins-for-kubernetes/master/demo/openssl-qat-engine/Dockerfile
|
||||
$ export QAT_SRC=~/src/QAT
|
||||
$ export GOPATH=~/src/go
|
||||
$ export KATA_KERNEL_LOCATION=~/kata
|
||||
|
||||
@@ -61,9 +61,6 @@ spec:
|
||||
name: eosgx-demo-job-1
|
||||
image: oeciteam/oe-helloworld:latest
|
||||
imagePullPolicy: IfNotPresent
|
||||
volumeMounts:
|
||||
- mountPath: /dev
|
||||
name: dev-mount
|
||||
securityContext:
|
||||
readOnlyRootFilesystem: true
|
||||
capabilities:
|
||||
|
||||
@@ -197,6 +197,11 @@ vhost_user_store_path = "<Path of the base directory for vhost-user device>"
|
||||
> under `[hypervisor.qemu]` section.
|
||||
|
||||
|
||||
For the subdirectories of `vhost_user_store_path`: `block` is used for block
|
||||
device; `block/sockets` is where we expect UNIX domain sockets for vhost-user
|
||||
block devices to live; `block/devices` is where simulated block device nodes
|
||||
for vhost-user block devices are created.
|
||||
|
||||
For the subdirectories of `vhost_user_store_path`:
|
||||
- `block` is used for block device;
|
||||
- `block/sockets` is where we expect UNIX domain sockets for vhost-user
|
||||
|
||||
101
snap/local/README.md
Normal file
101
snap/local/README.md
Normal file
@@ -0,0 +1,101 @@
|
||||
# Kata Containers snap image
|
||||
|
||||
This directory contains the resources needed to build the Kata Containers
|
||||
[snap][1] image.
|
||||
|
||||
## Initial setup
|
||||
|
||||
Kata Containers can be installed in any Linux distribution that supports
|
||||
[snapd](https://docs.snapcraft.io/installing-snapd). For this example, we
|
||||
assume Ubuntu as your base distro.
|
||||
```sh
|
||||
$ sudo apt-get --no-install-recommends install -y apt-utils ca-certificates snapd snapcraft
|
||||
```
|
||||
|
||||
## Install snap
|
||||
|
||||
You can install the Kata Containers snap from the [snapcraft store][8] or by running the following command:
|
||||
|
||||
```sh
|
||||
$ sudo snap install kata-containers --classic
|
||||
```
|
||||
|
||||
## Build and install snap image
|
||||
|
||||
Run the command below which will use the packaging Makefile to build the snap image:
|
||||
|
||||
```sh
|
||||
$ make -C tools/packaging snap
|
||||
```
|
||||
|
||||
> **Warning:**
|
||||
>
|
||||
> By default, `snapcraft` will create a clean virtual machine
|
||||
> environment to build the snap in using the `multipass` tool.
|
||||
>
|
||||
> However, `multipass` is silently disabled when `--destructive-mode` is
|
||||
> used.
|
||||
>
|
||||
> Since building the Kata Containers package currently requires
|
||||
> `--destructive-mode`, the snap will be built using the host
|
||||
> environment. To avoid parts of the build auto-detecting additional
|
||||
> features to enable (for example for QEMU), we recommend that you
|
||||
> only run the snap build in a minimal host environment.
|
||||
|
||||
To install the resulting snap image, snap must be put in [classic mode][3] and the
|
||||
security confinement must be disabled (`--classic`). Also since the resulting snap
|
||||
has not been signed the verification of signature must be omitted (`--dangerous`).
|
||||
|
||||
```sh
|
||||
$ sudo snap install --classic --dangerous "kata-containers_${version}_${arch}.snap"
|
||||
```
|
||||
|
||||
Replace `${version}` with the current version of Kata Containers and `${arch}` with
|
||||
the system architecture.
|
||||
|
||||
## Configure Kata Containers
|
||||
|
||||
By default Kata Containers snap image is mounted at `/snap/kata-containers` as a
|
||||
read-only file system, therefore default configuration file can not be edited.
|
||||
Fortunately [`kata-runtime`][4] supports loading a configuration file from another
|
||||
path than the default.
|
||||
|
||||
```sh
|
||||
$ sudo mkdir -p /etc/kata-containers
|
||||
$ sudo cp /snap/kata-containers/current/usr/share/defaults/kata-containers/configuration.toml /etc/kata-containers/
|
||||
$ $EDITOR /etc/kata-containers/configuration.toml
|
||||
```
|
||||
|
||||
## Integration with docker and Kubernetes
|
||||
|
||||
The path to the runtime provided by the Kata Containers snap image is
|
||||
`/snap/kata-containers/current/usr/bin/kata-runtime`. You should use it to
|
||||
run Kata Containers with [docker][9] and [Kubernetes][10].
|
||||
|
||||
## Remove snap
|
||||
|
||||
You can remove the Kata Containers snap by running the following command:
|
||||
|
||||
```sh
|
||||
$ sudo snap remove kata-containers
|
||||
```
|
||||
|
||||
## Limitations
|
||||
|
||||
The [miniOS image][2] is not included in the snap image as it is not possible for
|
||||
QEMU to open a guest RAM backing store on a read-only filesystem. Fortunately,
|
||||
you can start Kata Containers with a Linux initial RAM disk (initrd) that is
|
||||
included in the snap image. If you want to use the miniOS image instead of initrd,
|
||||
then a new configuration file can be [created](#configure-kata-containers)
|
||||
and [configured][7].
|
||||
|
||||
[1]: https://docs.snapcraft.io/snaps/intro
|
||||
[2]: ../../docs/design/architecture/README.md#root-filesystem-image
|
||||
[3]: https://docs.snapcraft.io/reference/confinement#classic
|
||||
[4]: https://github.com/kata-containers/kata-containers/tree/main/src/runtime#configuration
|
||||
[5]: https://docs.docker.com/engine/reference/commandline/dockerd
|
||||
[6]: ../../docs/install/docker/ubuntu-docker-install.md
|
||||
[7]: ../../docs/Developer-Guide.md#configure-to-use-initrd-or-rootfs-image
|
||||
[8]: https://snapcraft.io/kata-containers
|
||||
[9]: ../../docs/Developer-Guide.md#run-kata-containers-with-docker
|
||||
[10]: ../../docs/Developer-Guide.md#run-kata-containers-with-kubernetes
|
||||
114
snap/local/snap-common.sh
Normal file
114
snap/local/snap-common.sh
Normal file
@@ -0,0 +1,114 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Copyright (c) 2022 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Description: Idempotent script to be sourced by all parts in a
|
||||
# snapcraft config file.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
# XXX: Bash-specific code. zsh doesn't support this option and that *does*
|
||||
# matter if this script is run sourced... since it'll be using zsh! ;)
|
||||
[ -n "$BASH_VERSION" ] && set -o errtrace
|
||||
|
||||
[ -n "${DEBUG:-}" ] && set -o xtrace
|
||||
|
||||
die()
|
||||
{
|
||||
echo >&2 "ERROR: $0: $*"
|
||||
}
|
||||
|
||||
[ -n "${SNAPCRAFT_STAGE:-}" ] ||\
|
||||
die "must be sourced from a snapcraft config file"
|
||||
|
||||
snap_yq_version=3.4.1
|
||||
|
||||
snap_common_install_yq()
|
||||
{
|
||||
export yq="${SNAPCRAFT_STAGE}/bin/yq"
|
||||
|
||||
local yq_pkg
|
||||
yq_pkg="github.com/mikefarah/yq"
|
||||
|
||||
local yq_url
|
||||
yq_url="https://${yq_pkg}/releases/download/${snap_yq_version}/yq_${goos}_${goarch}"
|
||||
curl -o "${yq}" -L "${yq_url}"
|
||||
chmod +x "${yq}"
|
||||
}
|
||||
|
||||
# Function that should be called for each snap "part" in
|
||||
# snapcraft.yaml.
|
||||
snap_common_main()
|
||||
{
|
||||
# Architecture
|
||||
arch="$(uname -m)"
|
||||
|
||||
case "${arch}" in
|
||||
aarch64)
|
||||
goarch="arm64"
|
||||
qemu_arch="${arch}"
|
||||
;;
|
||||
|
||||
ppc64le)
|
||||
goarch="ppc64le"
|
||||
qemu_arch="ppc64"
|
||||
;;
|
||||
|
||||
s390x)
|
||||
goarch="${arch}"
|
||||
qemu_arch="${arch}"
|
||||
;;
|
||||
|
||||
x86_64)
|
||||
goarch="amd64"
|
||||
qemu_arch="${arch}"
|
||||
;;
|
||||
|
||||
*) die "unsupported architecture: ${arch}" ;;
|
||||
esac
|
||||
|
||||
dpkg_arch=$(dpkg --print-architecture)
|
||||
|
||||
# golang
|
||||
#
|
||||
# We need the O/S name in golang format, but since we don't
|
||||
# know if the godeps part has run, we don't know if golang is
|
||||
# available yet, hence fall back to a standard system command.
|
||||
goos="$(go env GOOS &>/dev/null || true)"
|
||||
[ -z "$goos" ] && goos=$(uname -s|tr '[A-Z]' '[a-z]')
|
||||
|
||||
export GOROOT="${SNAPCRAFT_STAGE}"
|
||||
export GOPATH="${GOROOT}/gopath"
|
||||
export GO111MODULE="auto"
|
||||
|
||||
mkdir -p "${GOPATH}/bin"
|
||||
export PATH="${GOPATH}/bin:${PATH}"
|
||||
|
||||
# Proxy
|
||||
export http_proxy="${http_proxy:-}"
|
||||
export https_proxy="${https_proxy:-}"
|
||||
|
||||
# Binaries
|
||||
mkdir -p "${SNAPCRAFT_STAGE}/bin"
|
||||
|
||||
export PATH="$PATH:${SNAPCRAFT_STAGE}/bin"
|
||||
|
||||
# YAML query tool
|
||||
export yq="${SNAPCRAFT_STAGE}/bin/yq"
|
||||
|
||||
# Kata paths
|
||||
export kata_dir=$(printf "%s/src/github.com/%s/%s" \
|
||||
"${GOPATH}" \
|
||||
"${SNAPCRAFT_PROJECT_NAME}" \
|
||||
"${SNAPCRAFT_PROJECT_NAME}")
|
||||
|
||||
export versions_file="${kata_dir}/versions.yaml"
|
||||
|
||||
[ -n "${yq:-}" ] && [ -x "${yq:-}" ] || snap_common_install_yq
|
||||
}
|
||||
|
||||
snap_common_main
|
||||
371
snap/snapcraft.yaml
Normal file
371
snap/snapcraft.yaml
Normal file
@@ -0,0 +1,371 @@
|
||||
name: kata-containers
|
||||
website: https://github.com/kata-containers/kata-containers
|
||||
summary: Build lightweight VMs that seamlessly plug into the containers ecosystem
|
||||
description: |
|
||||
Kata Containers is an open source project and community working to build a
|
||||
standard implementation of lightweight Virtual Machines (VMs) that feel and
|
||||
perform like containers, but provide the workload isolation and security
|
||||
advantages of VMs
|
||||
confinement: classic
|
||||
adopt-info: metadata
|
||||
base: core20
|
||||
|
||||
parts:
|
||||
metadata:
|
||||
plugin: nil
|
||||
prime:
|
||||
- -*
|
||||
build-packages:
|
||||
- git
|
||||
- git-extras
|
||||
override-pull: |
|
||||
source "${SNAPCRAFT_PROJECT_DIR}/snap/local/snap-common.sh"
|
||||
|
||||
version="9999"
|
||||
|
||||
if echo "${GITHUB_REF:-}" | grep -q -E "^refs/tags"; then
|
||||
version=$(echo ${GITHUB_REF:-} | cut -d/ -f3)
|
||||
git checkout ${version}
|
||||
fi
|
||||
|
||||
snapcraftctl set-grade "stable"
|
||||
snapcraftctl set-version "${version}"
|
||||
|
||||
mkdir -p $(dirname ${kata_dir})
|
||||
ln -sf $(realpath "${SNAPCRAFT_STAGE}/..") ${kata_dir}
|
||||
|
||||
godeps:
|
||||
after: [metadata]
|
||||
plugin: nil
|
||||
prime:
|
||||
- -*
|
||||
build-packages:
|
||||
- curl
|
||||
override-build: |
|
||||
source "${SNAPCRAFT_PROJECT_DIR}/snap/local/snap-common.sh"
|
||||
|
||||
# put everything in stage
|
||||
cd "${SNAPCRAFT_STAGE}"
|
||||
|
||||
version="$(${yq} r ${kata_dir}/versions.yaml languages.golang.meta.newest-version)"
|
||||
tarfile="go${version}.${goos}-${goarch}.tar.gz"
|
||||
curl -LO https://golang.org/dl/${tarfile}
|
||||
tar -xf ${tarfile} --strip-components=1
|
||||
|
||||
rustdeps:
|
||||
after: [metadata]
|
||||
plugin: nil
|
||||
prime:
|
||||
- -*
|
||||
build-packages:
|
||||
- curl
|
||||
override-build: |
|
||||
source "${SNAPCRAFT_PROJECT_DIR}/snap/local/snap-common.sh"
|
||||
|
||||
# put everything in stage
|
||||
cd "${SNAPCRAFT_STAGE}"
|
||||
|
||||
version="$(${yq} r ${kata_dir}/versions.yaml languages.rust.meta.newest-version)"
|
||||
if ! command -v rustup > /dev/null; then
|
||||
curl https://sh.rustup.rs -sSf | sh -s -- -y --default-toolchain ${version}
|
||||
fi
|
||||
|
||||
export PATH=${PATH}:${HOME}/.cargo/bin
|
||||
rustup toolchain install ${version}
|
||||
rustup default ${version}
|
||||
if [ "${arch}" == "ppc64le" ] || [ "${arch}" == "s390x" ] ; then
|
||||
[ "${arch}" == "ppc64le" ] && arch="powerpc64le"
|
||||
rustup target add ${arch}-unknown-linux-gnu
|
||||
else
|
||||
rustup target add ${arch}-unknown-linux-musl
|
||||
$([ "$(whoami)" != "root" ] && echo sudo) ln -sf /usr/bin/g++ /bin/musl-g++
|
||||
fi
|
||||
rustup component add rustfmt
|
||||
|
||||
docker:
|
||||
after: [metadata]
|
||||
plugin: nil
|
||||
prime:
|
||||
- -*
|
||||
build-packages:
|
||||
- ca-certificates
|
||||
- containerd
|
||||
- curl
|
||||
- gnupg
|
||||
- lsb-release
|
||||
- runc
|
||||
override-build: |
|
||||
source "${SNAPCRAFT_PROJECT_DIR}/snap/local/snap-common.sh"
|
||||
|
||||
curl -fsSL https://download.docker.com/linux/ubuntu/gpg |\
|
||||
sudo gpg --batch --yes --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
|
||||
distro_codename=$(lsb_release -cs)
|
||||
echo "deb [arch=${dpkg_arch} signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu ${distro_codename} stable" |\
|
||||
sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
|
||||
sudo apt-get -y update
|
||||
sudo apt-get -y install docker-ce docker-ce-cli containerd.io
|
||||
|
||||
echo "Unmasking docker service"
|
||||
sudo -E systemctl unmask docker.service || true
|
||||
sudo -E systemctl unmask docker.socket || true
|
||||
echo "Adding $USER into docker group"
|
||||
sudo -E gpasswd -a $USER docker
|
||||
echo "Starting docker"
|
||||
sudo -E systemctl start docker || true
|
||||
|
||||
image:
|
||||
after: [godeps, docker, qemu, kernel]
|
||||
plugin: nil
|
||||
build-packages:
|
||||
- docker.io
|
||||
- cpio
|
||||
- git
|
||||
- iptables
|
||||
- software-properties-common
|
||||
- uidmap
|
||||
- gnupg2
|
||||
override-build: |
|
||||
source "${SNAPCRAFT_PROJECT_DIR}/snap/local/snap-common.sh"
|
||||
|
||||
[ "${arch}" = "ppc64le" ] || [ "${arch}" = "s390x" ] && sudo apt-get --no-install-recommends install -y protobuf-compiler
|
||||
|
||||
if [ -n "$http_proxy" ]; then
|
||||
echo "Setting proxy $http_proxy"
|
||||
sudo -E systemctl set-environment http_proxy="$http_proxy" || true
|
||||
sudo -E systemctl set-environment https_proxy="$https_proxy" || true
|
||||
fi
|
||||
|
||||
# Copy yq binary. It's used in the container
|
||||
cp -a "${yq}" "${GOPATH}/bin/"
|
||||
|
||||
cd "${kata_dir}/tools/osbuilder"
|
||||
|
||||
# build image
|
||||
export AGENT_INIT=yes
|
||||
export USE_DOCKER=1
|
||||
export DEBUG=1
|
||||
initrd_distro=$(${yq} r -X ${kata_dir}/versions.yaml assets.initrd.architecture.${arch}.name)
|
||||
image_distro=$(${yq} r -X ${kata_dir}/versions.yaml assets.image.architecture.${arch}.name)
|
||||
case "$arch" in
|
||||
x86_64)
|
||||
# In some build systems it's impossible to build a rootfs image, try with the initrd image
|
||||
sudo -E PATH=$PATH make image DISTRO="${image_distro}" || sudo -E PATH="$PATH" make initrd DISTRO="${initrd_distro}"
|
||||
;;
|
||||
|
||||
aarch64|ppc64le|s390x)
|
||||
sudo -E PATH="$PATH" make initrd DISTRO="${initrd_distro}"
|
||||
;;
|
||||
|
||||
*) die "unsupported architecture: ${arch}" ;;
|
||||
esac
|
||||
|
||||
# Install image
|
||||
kata_image_dir="${SNAPCRAFT_PART_INSTALL}/usr/share/kata-containers"
|
||||
mkdir -p "${kata_image_dir}"
|
||||
cp kata-containers*.img "${kata_image_dir}"
|
||||
|
||||
runtime:
|
||||
after: [godeps, image, cloud-hypervisor]
|
||||
plugin: nil
|
||||
build-attributes: [no-patchelf]
|
||||
override-build: |
|
||||
source "${SNAPCRAFT_PROJECT_DIR}/snap/local/snap-common.sh"
|
||||
|
||||
cd "${kata_dir}/src/runtime"
|
||||
|
||||
qemu_cmd="qemu-system-${qemu_arch}"
|
||||
|
||||
# build and install runtime
|
||||
make \
|
||||
PREFIX="/snap/${SNAPCRAFT_PROJECT_NAME}/current/usr" \
|
||||
SKIP_GO_VERSION_CHECK=1 \
|
||||
QEMUCMD="${qemu_cmd}"
|
||||
|
||||
make install \
|
||||
PREFIX=/usr \
|
||||
DESTDIR="${SNAPCRAFT_PART_INSTALL}" \
|
||||
SKIP_GO_VERSION_CHECK=1 \
|
||||
QEMUCMD="${qemu_cmd}"
|
||||
|
||||
if [ ! -f ${SNAPCRAFT_PART_INSTALL}/../../image/install/usr/share/kata-containers/kata-containers.img ]; then
|
||||
sed -i -e "s|^image =.*|initrd = \"/snap/${SNAPCRAFT_PROJECT_NAME}/current/usr/share/kata-containers/kata-containers-initrd.img\"|" \
|
||||
${SNAPCRAFT_PART_INSTALL}/usr/share/defaults/${SNAPCRAFT_PROJECT_NAME}/configuration.toml
|
||||
fi
|
||||
|
||||
kernel:
|
||||
after: [godeps]
|
||||
plugin: nil
|
||||
build-packages:
|
||||
- libelf-dev
|
||||
- curl
|
||||
- build-essential
|
||||
- bison
|
||||
- flex
|
||||
override-build: |
|
||||
source "${SNAPCRAFT_PROJECT_DIR}/snap/local/snap-common.sh"
|
||||
|
||||
kernel_version="$(${yq} r $versions_file assets.kernel.version)"
|
||||
#Remove extra 'v'
|
||||
kernel_version="${kernel_version#v}"
|
||||
|
||||
[ "${arch}" = "s390x" ] && sudo apt-get --no-install-recommends install -y libssl-dev
|
||||
|
||||
cd "${kata_dir}/tools/packaging/kernel"
|
||||
kernel_dir_prefix="kata-linux-"
|
||||
|
||||
# Setup and build kernel
|
||||
./build-kernel.sh -v "${kernel_version}" -d setup
|
||||
cd ${kernel_dir_prefix}*
|
||||
make -j $(nproc ${CI:+--ignore 1}) EXTRAVERSION=".container"
|
||||
|
||||
kernel_suffix="${kernel_version}.container"
|
||||
kata_kernel_dir="${SNAPCRAFT_PART_INSTALL}/usr/share/kata-containers"
|
||||
mkdir -p "${kata_kernel_dir}"
|
||||
|
||||
# Install bz kernel
|
||||
make install INSTALL_PATH="${kata_kernel_dir}" EXTRAVERSION=".container" || true
|
||||
vmlinuz_name="vmlinuz-${kernel_suffix}"
|
||||
ln -sf "${vmlinuz_name}" "${kata_kernel_dir}/vmlinuz.container"
|
||||
|
||||
# Install raw kernel
|
||||
vmlinux_path="vmlinux"
|
||||
[ "${arch}" = "s390x" ] && vmlinux_path="arch/s390/boot/vmlinux"
|
||||
vmlinux_name="vmlinux-${kernel_suffix}"
|
||||
cp "${vmlinux_path}" "${kata_kernel_dir}/${vmlinux_name}"
|
||||
ln -sf "${vmlinux_name}" "${kata_kernel_dir}/vmlinux.container"
|
||||
|
||||
qemu:
|
||||
plugin: make
|
||||
after: [godeps]
|
||||
build-packages:
|
||||
- gcc
|
||||
- python3
|
||||
- zlib1g-dev
|
||||
- libcap-ng-dev
|
||||
- libglib2.0-dev
|
||||
- libpixman-1-dev
|
||||
- libnuma-dev
|
||||
- libltdl-dev
|
||||
- libcap-dev
|
||||
- libattr1-dev
|
||||
- libfdt-dev
|
||||
- curl
|
||||
- libcapstone-dev
|
||||
- bc
|
||||
- libblkid-dev
|
||||
- libffi-dev
|
||||
- libmount-dev
|
||||
- libseccomp-dev
|
||||
- libselinux1-dev
|
||||
- ninja-build
|
||||
override-build: |
|
||||
source "${SNAPCRAFT_PROJECT_DIR}/snap/local/snap-common.sh"
|
||||
|
||||
branch="$(${yq} r ${versions_file} assets.hypervisor.qemu.version)"
|
||||
url="$(${yq} r ${versions_file} assets.hypervisor.qemu.url)"
|
||||
commit=""
|
||||
patches_dir="${kata_dir}/tools/packaging/qemu/patches/$(echo ${branch} | sed -e 's/.[[:digit:]]*$//' -e 's/^v//').x"
|
||||
patches_version_dir="${kata_dir}/tools/packaging/qemu/patches/tag_patches/${branch}"
|
||||
|
||||
# download source
|
||||
qemu_dir="${SNAPCRAFT_STAGE}/qemu"
|
||||
rm -rf "${qemu_dir}"
|
||||
git clone --depth 1 --branch ${branch} --single-branch ${url} "${qemu_dir}"
|
||||
cd "${qemu_dir}"
|
||||
[ -z "${commit}" ] || git checkout "${commit}"
|
||||
|
||||
[ -n "$(ls -A ui/keycodemapdb)" ] || git clone --depth 1 https://github.com/qemu/keycodemapdb ui/keycodemapdb/
|
||||
[ -n "$(ls -A capstone)" ] || git clone --depth 1 https://github.com/qemu/capstone capstone
|
||||
|
||||
# Apply branch patches
|
||||
[ -d "${patches_version_dir}" ] || mkdir "${patches_version_dir}"
|
||||
${kata_dir}/tools/packaging/scripts/apply_patches.sh "${patches_dir}"
|
||||
${kata_dir}/tools/packaging/scripts/apply_patches.sh "${patches_version_dir}"
|
||||
|
||||
# Only x86_64 supports libpmem
|
||||
[ "${arch}" = "x86_64" ] && sudo apt-get --no-install-recommends install -y apt-utils ca-certificates libpmem-dev
|
||||
|
||||
configure_hypervisor="${kata_dir}/tools/packaging/scripts/configure-hypervisor.sh"
|
||||
chmod +x "${configure_hypervisor}"
|
||||
# static build. The --prefix, --libdir, --libexecdir, --datadir arguments are
|
||||
# based on PREFIX and set by configure-hypervisor.sh
|
||||
echo "$(PREFIX=/snap/${SNAPCRAFT_PROJECT_NAME}/current/usr ${configure_hypervisor} -s kata-qemu) \
|
||||
--disable-rbd " \
|
||||
| xargs ./configure
|
||||
|
||||
# Copy QEMU configurations (Kconfigs)
|
||||
case "${branch}" in
|
||||
"v5.1.0")
|
||||
cp -a "${kata_dir}"/tools/packaging/qemu/default-configs/* default-configs
|
||||
;;
|
||||
|
||||
*)
|
||||
cp -a "${kata_dir}"/tools/packaging/qemu/default-configs/* configs/devices/
|
||||
;;
|
||||
esac
|
||||
|
||||
# build and install
|
||||
make -j $(nproc ${CI:+--ignore 1})
|
||||
make install DESTDIR="${SNAPCRAFT_PART_INSTALL}"
|
||||
prime:
|
||||
- -snap/
|
||||
- -usr/bin/qemu-ga
|
||||
- -usr/bin/qemu-pr-helper
|
||||
- -usr/bin/virtfs-proxy-helper
|
||||
- -usr/include/
|
||||
- -usr/share/applications/
|
||||
- -usr/share/icons/
|
||||
- -usr/var/
|
||||
- usr/*
|
||||
- lib/*
|
||||
organize:
|
||||
# Hack: move qemu to /
|
||||
"snap/kata-containers/current/": "./"
|
||||
|
||||
virtiofsd:
|
||||
plugin: nil
|
||||
after: [godeps, rustdeps, docker]
|
||||
override-build: |
|
||||
source "${SNAPCRAFT_PROJECT_DIR}/snap/local/snap-common.sh"
|
||||
|
||||
echo "INFO: Building rust version of virtiofsd"
|
||||
|
||||
cd "${SNAPCRAFT_PROJECT_DIR}"
|
||||
# Clean-up build dir in case it already exists
|
||||
sudo -E NO_TTY=true make virtiofsd-tarball
|
||||
|
||||
sudo install \
|
||||
--owner='root' \
|
||||
--group='root' \
|
||||
--mode=0755 \
|
||||
-D \
|
||||
--target-directory="${SNAPCRAFT_PART_INSTALL}/usr/libexec/" \
|
||||
build/virtiofsd/builddir/virtiofsd/virtiofsd
|
||||
|
||||
cloud-hypervisor:
|
||||
plugin: nil
|
||||
after: [godeps, docker]
|
||||
override-build: |
|
||||
source "${SNAPCRAFT_PROJECT_DIR}/snap/local/snap-common.sh"
|
||||
|
||||
if [ "${arch}" == "aarch64" ] || [ "${arch}" == "x86_64" ]; then
|
||||
cd "${SNAPCRAFT_PROJECT_DIR}"
|
||||
sudo -E NO_TTY=true make cloud-hypervisor-tarball
|
||||
|
||||
tarfile="${SNAPCRAFT_PROJECT_DIR}/tools/packaging/kata-deploy/local-build/build/kata-static-cloud-hypervisor.tar.xz"
|
||||
tmpdir=$(mktemp -d)
|
||||
|
||||
tar -xvJpf "${tarfile}" -C "${tmpdir}"
|
||||
|
||||
install -D "${tmpdir}/opt/kata/bin/cloud-hypervisor" "${SNAPCRAFT_PART_INSTALL}/usr/bin/cloud-hypervisor"
|
||||
|
||||
rm -rf "${tmpdir}"
|
||||
fi
|
||||
|
||||
apps:
|
||||
runtime:
|
||||
command: usr/bin/kata-runtime
|
||||
shim:
|
||||
command: usr/bin/containerd-shim-kata-v2
|
||||
collect-data:
|
||||
command: usr/bin/kata-collect-data.sh
|
||||
858
src/agent/Cargo.lock
generated
858
src/agent/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -10,8 +10,8 @@ oci = { path = "../libs/oci" }
|
||||
rustjail = { path = "rustjail" }
|
||||
protocols = { path = "../libs/protocols", features = ["async"] }
|
||||
lazy_static = "1.3.0"
|
||||
ttrpc = { version = "0.7.1", features = ["async"], default-features = false }
|
||||
protobuf = "3.2.0"
|
||||
ttrpc = { version = "0.6.0", features = ["async"], default-features = false }
|
||||
protobuf = "2.27.0"
|
||||
libc = "0.2.58"
|
||||
nix = "0.24.2"
|
||||
capctl = "0.2.0"
|
||||
@@ -23,6 +23,7 @@ regex = "1.5.6"
|
||||
serial_test = "0.5.1"
|
||||
kata-sys-util = { path = "../libs/kata-sys-util" }
|
||||
kata-types = { path = "../libs/kata-types" }
|
||||
sysinfo = "0.23.0"
|
||||
|
||||
# Async helpers
|
||||
async-trait = "0.1.42"
|
||||
@@ -30,7 +31,7 @@ async-recursion = "0.3.2"
|
||||
futures = "0.3.17"
|
||||
|
||||
# Async runtime
|
||||
tokio = { version = "1.28.1", features = ["full"] }
|
||||
tokio = { version = "1.14.0", features = ["full"] }
|
||||
tokio-vsock = "0.3.1"
|
||||
|
||||
netlink-sys = { version = "0.7.0", features = ["tokio_socket",]}
|
||||
@@ -43,17 +44,15 @@ ipnetwork = "0.17.0"
|
||||
logging = { path = "../libs/logging" }
|
||||
slog = "2.5.2"
|
||||
slog-scope = "4.1.2"
|
||||
slog-term = "2.9.0"
|
||||
|
||||
# Redirect ttrpc log calls
|
||||
slog-stdlog = "4.0.0"
|
||||
log = "0.4.11"
|
||||
|
||||
cfg-if = "1.0.0"
|
||||
prometheus = { version = "0.13.0", features = ["process"] }
|
||||
procfs = "0.12.0"
|
||||
anyhow = "1.0.32"
|
||||
cgroups = { package = "cgroups-rs", version = "0.3.2" }
|
||||
cgroups = { package = "cgroups-rs", version = "0.2.10" }
|
||||
|
||||
# Tracing
|
||||
tracing = "0.1.26"
|
||||
@@ -70,7 +69,6 @@ clap = { version = "3.0.1", features = ["derive"] }
|
||||
[dev-dependencies]
|
||||
tempfile = "3.1.0"
|
||||
test-utils = { path = "../libs/test-utils" }
|
||||
which = "4.3.0"
|
||||
|
||||
[workspace]
|
||||
members = [
|
||||
|
||||
@@ -26,19 +26,13 @@ export VERSION_COMMIT := $(if $(COMMIT),$(VERSION)-$(COMMIT),$(VERSION))
|
||||
EXTRA_RUSTFEATURES :=
|
||||
|
||||
##VAR SECCOMP=yes|no define if agent enables seccomp feature
|
||||
SECCOMP ?= yes
|
||||
SECCOMP := yes
|
||||
|
||||
# Enable seccomp feature of rust build
|
||||
ifeq ($(SECCOMP),yes)
|
||||
override EXTRA_RUSTFEATURES += seccomp
|
||||
endif
|
||||
|
||||
include ../../utils.mk
|
||||
|
||||
ifeq ($(ARCH), ppc64le)
|
||||
override ARCH = powerpc64le
|
||||
endif
|
||||
|
||||
##VAR STANDARD_OCI_RUNTIME=yes|no define if agent enables standard oci runtime feature
|
||||
STANDARD_OCI_RUNTIME := no
|
||||
|
||||
@@ -51,10 +45,12 @@ ifneq ($(EXTRA_RUSTFEATURES),)
|
||||
override EXTRA_RUSTFEATURES := --features "$(EXTRA_RUSTFEATURES)"
|
||||
endif
|
||||
|
||||
include ../../utils.mk
|
||||
|
||||
TARGET_PATH = target/$(TRIPLE)/$(BUILD_TYPE)/$(TARGET)
|
||||
|
||||
##VAR DESTDIR=<path> is a directory prepended to each installed target file
|
||||
DESTDIR ?=
|
||||
DESTDIR :=
|
||||
##VAR BINDIR=<path> is a directory for installing executable programs
|
||||
BINDIR := /usr/bin
|
||||
|
||||
@@ -111,8 +107,6 @@ endef
|
||||
##TARGET default: build code
|
||||
default: $(TARGET) show-header
|
||||
|
||||
static-checks-build: $(GENERATED_CODE)
|
||||
|
||||
$(TARGET): $(GENERATED_CODE) $(TARGET_PATH)
|
||||
|
||||
$(TARGET_PATH): show-summary
|
||||
@@ -140,7 +134,7 @@ vendor:
|
||||
|
||||
|
||||
#TARGET test: run cargo tests
|
||||
test: $(GENERATED_FILES)
|
||||
test:
|
||||
@cargo test --all --target $(TRIPLE) $(EXTRA_RUSTFEATURES) -- --nocapture
|
||||
|
||||
##TARGET check: run test
|
||||
|
||||
@@ -11,32 +11,28 @@ serde_json = "1.0.39"
|
||||
serde_derive = "1.0.91"
|
||||
oci = { path = "../../libs/oci" }
|
||||
protocols = { path ="../../libs/protocols" }
|
||||
kata-sys-util = { path = "../../libs/kata-sys-util" }
|
||||
caps = "0.5.0"
|
||||
nix = "0.24.2"
|
||||
scopeguard = "1.0.0"
|
||||
capctl = "0.2.0"
|
||||
lazy_static = "1.3.0"
|
||||
libc = "0.2.58"
|
||||
protobuf = "3.2.0"
|
||||
protobuf = "2.27.0"
|
||||
slog = "2.5.2"
|
||||
slog-scope = "4.1.2"
|
||||
scan_fmt = "0.2.6"
|
||||
regex = "1.5.6"
|
||||
path-absolutize = "1.2.0"
|
||||
anyhow = "1.0.32"
|
||||
cgroups = { package = "cgroups-rs", version = "0.3.2" }
|
||||
cgroups = { package = "cgroups-rs", version = "0.2.10" }
|
||||
rlimit = "0.5.3"
|
||||
cfg-if = "0.1.0"
|
||||
|
||||
tokio = { version = "1.28.1", features = ["sync", "io-util", "process", "time", "macros", "rt"] }
|
||||
tokio = { version = "1.2.0", features = ["sync", "io-util", "process", "time", "macros", "rt"] }
|
||||
futures = "0.3.17"
|
||||
async-trait = "0.1.31"
|
||||
inotify = "0.9.2"
|
||||
libseccomp = { version = "0.3.0", optional = true }
|
||||
zbus = "2.3.0"
|
||||
bit-vec= "0.6.3"
|
||||
xattr = "0.2.3"
|
||||
libseccomp = { version = "0.2.3", optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
serial_test = "0.5.0"
|
||||
|
||||
@@ -27,28 +27,29 @@ use oci::{
|
||||
LinuxNetwork, LinuxPids, LinuxResources,
|
||||
};
|
||||
|
||||
use protobuf::MessageField;
|
||||
use protobuf::{CachedSize, RepeatedField, SingularPtrField, UnknownFields};
|
||||
use protocols::agent::{
|
||||
BlkioStats, BlkioStatsEntry, CgroupStats, CpuStats, CpuUsage, HugetlbStats, MemoryData,
|
||||
MemoryStats, PidsStats, ThrottlingData,
|
||||
};
|
||||
use std::any::Any;
|
||||
use std::collections::HashMap;
|
||||
use std::fs;
|
||||
use std::path::Path;
|
||||
|
||||
const GUEST_CPUS_PATH: &str = "/sys/devices/system/cpu/online";
|
||||
|
||||
// Convenience function to obtain the scope logger.
|
||||
fn sl() -> slog::Logger {
|
||||
slog_scope::logger().new(o!("subsystem" => "cgroups"))
|
||||
// Convenience macro to obtain the scope logger
|
||||
macro_rules! sl {
|
||||
() => {
|
||||
slog_scope::logger().new(o!("subsystem" => "cgroups"))
|
||||
};
|
||||
}
|
||||
|
||||
macro_rules! get_controller_or_return_singular_none {
|
||||
($cg:ident) => {
|
||||
match $cg.controller_of() {
|
||||
Some(c) => c,
|
||||
None => return MessageField::none(),
|
||||
None => return SingularPtrField::none(),
|
||||
}
|
||||
};
|
||||
}
|
||||
@@ -74,13 +75,13 @@ macro_rules! set_resource {
|
||||
|
||||
impl CgroupManager for Manager {
|
||||
fn apply(&self, pid: pid_t) -> Result<()> {
|
||||
self.cgroup.add_task_by_tgid(CgroupPid::from(pid as u64))?;
|
||||
self.cgroup.add_task(CgroupPid::from(pid as u64))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn set(&self, r: &LinuxResources, update: bool) -> Result<()> {
|
||||
info!(
|
||||
sl(),
|
||||
sl!(),
|
||||
"cgroup manager set resources for container. Resources input {:?}", r
|
||||
);
|
||||
|
||||
@@ -118,7 +119,7 @@ impl CgroupManager for Manager {
|
||||
|
||||
// set devices resources
|
||||
set_devices_resources(&self.cgroup, &r.devices, res);
|
||||
info!(sl(), "resources after processed {:?}", res);
|
||||
info!(sl!(), "resources after processed {:?}", res);
|
||||
|
||||
// apply resources
|
||||
self.cgroup.apply(res)?;
|
||||
@@ -132,10 +133,11 @@ impl CgroupManager for Manager {
|
||||
|
||||
let throttling_data = get_cpu_stats(&self.cgroup);
|
||||
|
||||
let cpu_stats = MessageField::some(CpuStats {
|
||||
let cpu_stats = SingularPtrField::some(CpuStats {
|
||||
cpu_usage,
|
||||
throttling_data,
|
||||
..Default::default()
|
||||
unknown_fields: UnknownFields::default(),
|
||||
cached_size: CachedSize::default(),
|
||||
});
|
||||
|
||||
// Memorystats
|
||||
@@ -157,7 +159,8 @@ impl CgroupManager for Manager {
|
||||
pids_stats,
|
||||
blkio_stats,
|
||||
hugetlb_stats,
|
||||
..Default::default()
|
||||
unknown_fields: UnknownFields::default(),
|
||||
cached_size: CachedSize::default(),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -190,83 +193,6 @@ impl CgroupManager for Manager {
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
fn update_cpuset_path(&self, guest_cpuset: &str, container_cpuset: &str) -> Result<()> {
|
||||
if guest_cpuset.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
info!(sl(), "update_cpuset_path to: {}", guest_cpuset);
|
||||
|
||||
let h = cgroups::hierarchies::auto();
|
||||
let root_cg = h.root_control_group();
|
||||
|
||||
let root_cpuset_controller: &CpuSetController = root_cg.controller_of().unwrap();
|
||||
let path = root_cpuset_controller.path();
|
||||
let root_path = Path::new(path);
|
||||
info!(sl(), "root cpuset path: {:?}", &path);
|
||||
|
||||
let container_cpuset_controller: &CpuSetController = self.cgroup.controller_of().unwrap();
|
||||
let path = container_cpuset_controller.path();
|
||||
let container_path = Path::new(path);
|
||||
info!(sl(), "container cpuset path: {:?}", &path);
|
||||
|
||||
let mut paths = vec![];
|
||||
for ancestor in container_path.ancestors() {
|
||||
if ancestor == root_path {
|
||||
break;
|
||||
}
|
||||
paths.push(ancestor);
|
||||
}
|
||||
info!(sl(), "parent paths to update cpuset: {:?}", &paths);
|
||||
|
||||
let mut i = paths.len();
|
||||
loop {
|
||||
if i == 0 {
|
||||
break;
|
||||
}
|
||||
i -= 1;
|
||||
|
||||
// remove cgroup root from path
|
||||
let r_path = &paths[i]
|
||||
.to_str()
|
||||
.unwrap()
|
||||
.trim_start_matches(root_path.to_str().unwrap());
|
||||
info!(sl(), "updating cpuset for parent path {:?}", &r_path);
|
||||
let cg = new_cgroup(cgroups::hierarchies::auto(), r_path)?;
|
||||
let cpuset_controller: &CpuSetController = cg.controller_of().unwrap();
|
||||
cpuset_controller.set_cpus(guest_cpuset)?;
|
||||
}
|
||||
|
||||
if !container_cpuset.is_empty() {
|
||||
info!(
|
||||
sl(),
|
||||
"updating cpuset for container path: {:?} cpuset: {}",
|
||||
&container_path,
|
||||
container_cpuset
|
||||
);
|
||||
container_cpuset_controller.set_cpus(container_cpuset)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_cgroup_path(&self, cg: &str) -> Result<String> {
|
||||
if cgroups::hierarchies::is_cgroup2_unified_mode() {
|
||||
let cg_path = format!("/sys/fs/cgroup/{}", self.cpath);
|
||||
return Ok(cg_path);
|
||||
}
|
||||
|
||||
// for cgroup v1
|
||||
Ok(self.paths.get(cg).map(|s| s.to_string()).unwrap())
|
||||
}
|
||||
|
||||
fn as_any(&self) -> Result<&dyn Any> {
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
fn name(&self) -> &str {
|
||||
"cgroupfs"
|
||||
}
|
||||
}
|
||||
|
||||
fn set_network_resources(
|
||||
@@ -274,7 +200,7 @@ fn set_network_resources(
|
||||
network: &LinuxNetwork,
|
||||
res: &mut cgroups::Resources,
|
||||
) {
|
||||
info!(sl(), "cgroup manager set network");
|
||||
info!(sl!(), "cgroup manager set network");
|
||||
|
||||
// set classid
|
||||
// description can be found at https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v1/net_cls.html
|
||||
@@ -301,7 +227,7 @@ fn set_devices_resources(
|
||||
device_resources: &[LinuxDeviceCgroup],
|
||||
res: &mut cgroups::Resources,
|
||||
) {
|
||||
info!(sl(), "cgroup manager set devices");
|
||||
info!(sl!(), "cgroup manager set devices");
|
||||
let mut devices = vec![];
|
||||
|
||||
for d in device_resources.iter() {
|
||||
@@ -326,28 +252,19 @@ fn set_devices_resources(
|
||||
}
|
||||
|
||||
fn set_hugepages_resources(
|
||||
cg: &cgroups::Cgroup,
|
||||
_cg: &cgroups::Cgroup,
|
||||
hugepage_limits: &[LinuxHugepageLimit],
|
||||
res: &mut cgroups::Resources,
|
||||
) {
|
||||
info!(sl(), "cgroup manager set hugepage");
|
||||
info!(sl!(), "cgroup manager set hugepage");
|
||||
let mut limits = vec![];
|
||||
let hugetlb_controller = cg.controller_of::<HugeTlbController>();
|
||||
|
||||
for l in hugepage_limits.iter() {
|
||||
if hugetlb_controller.is_some() && hugetlb_controller.unwrap().size_supported(&l.page_size)
|
||||
{
|
||||
let hr = HugePageResource {
|
||||
size: l.page_size.clone(),
|
||||
limit: l.limit,
|
||||
};
|
||||
limits.push(hr);
|
||||
} else {
|
||||
warn!(
|
||||
sl(),
|
||||
"{} page size support cannot be verified, dropping requested limit", l.page_size
|
||||
);
|
||||
}
|
||||
let hr = HugePageResource {
|
||||
size: l.page_size.clone(),
|
||||
limit: l.limit,
|
||||
};
|
||||
limits.push(hr);
|
||||
}
|
||||
res.hugepages.limits = limits;
|
||||
}
|
||||
@@ -357,7 +274,7 @@ fn set_block_io_resources(
|
||||
blkio: &LinuxBlockIo,
|
||||
res: &mut cgroups::Resources,
|
||||
) {
|
||||
info!(sl(), "cgroup manager set block io");
|
||||
info!(sl!(), "cgroup manager set block io");
|
||||
|
||||
res.blkio.weight = blkio.weight;
|
||||
res.blkio.leaf_weight = blkio.leaf_weight;
|
||||
@@ -385,13 +302,13 @@ fn set_block_io_resources(
|
||||
}
|
||||
|
||||
fn set_cpu_resources(cg: &cgroups::Cgroup, cpu: &LinuxCpu) -> Result<()> {
|
||||
info!(sl(), "cgroup manager set cpu");
|
||||
info!(sl!(), "cgroup manager set cpu");
|
||||
|
||||
let cpuset_controller: &CpuSetController = cg.controller_of().unwrap();
|
||||
|
||||
if !cpu.cpus.is_empty() {
|
||||
if let Err(e) = cpuset_controller.set_cpus(&cpu.cpus) {
|
||||
warn!(sl(), "write cpuset failed: {:?}", e);
|
||||
warn!(sl!(), "write cpuset failed: {:?}", e);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -422,7 +339,7 @@ fn set_cpu_resources(cg: &cgroups::Cgroup, cpu: &LinuxCpu) -> Result<()> {
|
||||
}
|
||||
|
||||
fn set_memory_resources(cg: &cgroups::Cgroup, memory: &LinuxMemory, update: bool) -> Result<()> {
|
||||
info!(sl(), "cgroup manager set memory");
|
||||
info!(sl!(), "cgroup manager set memory");
|
||||
let mem_controller: &MemController = cg.controller_of().unwrap();
|
||||
|
||||
if !update {
|
||||
@@ -442,14 +359,14 @@ fn set_memory_resources(cg: &cgroups::Cgroup, memory: &LinuxMemory, update: bool
|
||||
let memstat = get_memory_stats(cg)
|
||||
.into_option()
|
||||
.ok_or_else(|| anyhow!("failed to get the cgroup memory stats"))?;
|
||||
let memusage = memstat.usage();
|
||||
let memusage = memstat.get_usage();
|
||||
|
||||
// When update memory limit, the kernel would check the current memory limit
|
||||
// set against the new swap setting, if the current memory limit is large than
|
||||
// the new swap, then set limit first, otherwise the kernel would complain and
|
||||
// refused to set; on the other hand, if the current memory limit is smaller than
|
||||
// the new swap, then we should set the swap first and then set the memor limit.
|
||||
if swap == -1 || memusage.limit() < swap as u64 {
|
||||
if swap == -1 || memusage.get_limit() < swap as u64 {
|
||||
mem_controller.set_memswap_limit(swap)?;
|
||||
set_resource!(mem_controller, set_limit, memory, limit);
|
||||
} else {
|
||||
@@ -491,7 +408,7 @@ fn set_memory_resources(cg: &cgroups::Cgroup, memory: &LinuxMemory, update: bool
|
||||
}
|
||||
|
||||
fn set_pids_resources(cg: &cgroups::Cgroup, pids: &LinuxPids) -> Result<()> {
|
||||
info!(sl(), "cgroup manager set pids");
|
||||
info!(sl!(), "cgroup manager set pids");
|
||||
let pid_controller: &PidController = cg.controller_of().unwrap();
|
||||
let v = if pids.limit > 0 {
|
||||
MaxValue::Value(pids.limit)
|
||||
@@ -653,20 +570,21 @@ lazy_static! {
|
||||
};
|
||||
}
|
||||
|
||||
fn get_cpu_stats(cg: &cgroups::Cgroup) -> MessageField<ThrottlingData> {
|
||||
fn get_cpu_stats(cg: &cgroups::Cgroup) -> SingularPtrField<ThrottlingData> {
|
||||
let cpu_controller: &CpuController = get_controller_or_return_singular_none!(cg);
|
||||
let stat = cpu_controller.cpu().stat;
|
||||
let h = lines_to_map(&stat);
|
||||
|
||||
MessageField::some(ThrottlingData {
|
||||
SingularPtrField::some(ThrottlingData {
|
||||
periods: *h.get("nr_periods").unwrap_or(&0),
|
||||
throttled_periods: *h.get("nr_throttled").unwrap_or(&0),
|
||||
throttled_time: *h.get("throttled_time").unwrap_or(&0),
|
||||
..Default::default()
|
||||
unknown_fields: UnknownFields::default(),
|
||||
cached_size: CachedSize::default(),
|
||||
})
|
||||
}
|
||||
|
||||
fn get_cpuacct_stats(cg: &cgroups::Cgroup) -> MessageField<CpuUsage> {
|
||||
fn get_cpuacct_stats(cg: &cgroups::Cgroup) -> SingularPtrField<CpuUsage> {
|
||||
if let Some(cpuacct_controller) = cg.controller_of::<CpuAcctController>() {
|
||||
let cpuacct = cpuacct_controller.cpuacct();
|
||||
|
||||
@@ -680,12 +598,24 @@ fn get_cpuacct_stats(cg: &cgroups::Cgroup) -> MessageField<CpuUsage> {
|
||||
|
||||
let percpu_usage = line_to_vec(&cpuacct.usage_percpu);
|
||||
|
||||
return MessageField::some(CpuUsage {
|
||||
return SingularPtrField::some(CpuUsage {
|
||||
total_usage,
|
||||
percpu_usage,
|
||||
usage_in_kernelmode,
|
||||
usage_in_usermode,
|
||||
..Default::default()
|
||||
unknown_fields: UnknownFields::default(),
|
||||
cached_size: CachedSize::default(),
|
||||
});
|
||||
}
|
||||
|
||||
if cg.v2() {
|
||||
return SingularPtrField::some(CpuUsage {
|
||||
total_usage: 0,
|
||||
percpu_usage: vec![],
|
||||
usage_in_kernelmode: 0,
|
||||
usage_in_usermode: 0,
|
||||
unknown_fields: UnknownFields::default(),
|
||||
cached_size: CachedSize::default(),
|
||||
});
|
||||
}
|
||||
|
||||
@@ -698,16 +628,17 @@ fn get_cpuacct_stats(cg: &cgroups::Cgroup) -> MessageField<CpuUsage> {
|
||||
let total_usage = *h.get("usage_usec").unwrap_or(&0);
|
||||
let percpu_usage = vec![];
|
||||
|
||||
MessageField::some(CpuUsage {
|
||||
SingularPtrField::some(CpuUsage {
|
||||
total_usage,
|
||||
percpu_usage,
|
||||
usage_in_kernelmode,
|
||||
usage_in_usermode,
|
||||
..Default::default()
|
||||
unknown_fields: UnknownFields::default(),
|
||||
cached_size: CachedSize::default(),
|
||||
})
|
||||
}
|
||||
|
||||
fn get_memory_stats(cg: &cgroups::Cgroup) -> MessageField<MemoryStats> {
|
||||
fn get_memory_stats(cg: &cgroups::Cgroup) -> SingularPtrField<MemoryStats> {
|
||||
let memory_controller: &MemController = get_controller_or_return_singular_none!(cg);
|
||||
|
||||
// cache from memory stat
|
||||
@@ -718,49 +649,53 @@ fn get_memory_stats(cg: &cgroups::Cgroup) -> MessageField<MemoryStats> {
|
||||
let value = memory.use_hierarchy;
|
||||
let use_hierarchy = value == 1;
|
||||
|
||||
// get memory data
|
||||
let usage = MessageField::some(MemoryData {
|
||||
// gte memory datas
|
||||
let usage = SingularPtrField::some(MemoryData {
|
||||
usage: memory.usage_in_bytes,
|
||||
max_usage: memory.max_usage_in_bytes,
|
||||
failcnt: memory.fail_cnt,
|
||||
limit: memory.limit_in_bytes as u64,
|
||||
..Default::default()
|
||||
unknown_fields: UnknownFields::default(),
|
||||
cached_size: CachedSize::default(),
|
||||
});
|
||||
|
||||
// get swap usage
|
||||
let memswap = memory_controller.memswap();
|
||||
|
||||
let swap_usage = MessageField::some(MemoryData {
|
||||
let swap_usage = SingularPtrField::some(MemoryData {
|
||||
usage: memswap.usage_in_bytes,
|
||||
max_usage: memswap.max_usage_in_bytes,
|
||||
failcnt: memswap.fail_cnt,
|
||||
limit: memswap.limit_in_bytes as u64,
|
||||
..Default::default()
|
||||
unknown_fields: UnknownFields::default(),
|
||||
cached_size: CachedSize::default(),
|
||||
});
|
||||
|
||||
// get kernel usage
|
||||
let kmem_stat = memory_controller.kmem_stat();
|
||||
|
||||
let kernel_usage = MessageField::some(MemoryData {
|
||||
let kernel_usage = SingularPtrField::some(MemoryData {
|
||||
usage: kmem_stat.usage_in_bytes,
|
||||
max_usage: kmem_stat.max_usage_in_bytes,
|
||||
failcnt: kmem_stat.fail_cnt,
|
||||
limit: kmem_stat.limit_in_bytes as u64,
|
||||
..Default::default()
|
||||
unknown_fields: UnknownFields::default(),
|
||||
cached_size: CachedSize::default(),
|
||||
});
|
||||
|
||||
MessageField::some(MemoryStats {
|
||||
SingularPtrField::some(MemoryStats {
|
||||
cache,
|
||||
usage,
|
||||
swap_usage,
|
||||
kernel_usage,
|
||||
use_hierarchy,
|
||||
stats: memory.stat.raw,
|
||||
..Default::default()
|
||||
unknown_fields: UnknownFields::default(),
|
||||
cached_size: CachedSize::default(),
|
||||
})
|
||||
}
|
||||
|
||||
fn get_pids_stats(cg: &cgroups::Cgroup) -> MessageField<PidsStats> {
|
||||
fn get_pids_stats(cg: &cgroups::Cgroup) -> SingularPtrField<PidsStats> {
|
||||
let pid_controller: &PidController = get_controller_or_return_singular_none!(cg);
|
||||
|
||||
let current = pid_controller.get_pid_current().unwrap_or(0);
|
||||
@@ -774,10 +709,11 @@ fn get_pids_stats(cg: &cgroups::Cgroup) -> MessageField<PidsStats> {
|
||||
},
|
||||
} as u64;
|
||||
|
||||
MessageField::some(PidsStats {
|
||||
SingularPtrField::some(PidsStats {
|
||||
current,
|
||||
limit,
|
||||
..Default::default()
|
||||
unknown_fields: UnknownFields::default(),
|
||||
cached_size: CachedSize::default(),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -813,8 +749,8 @@ https://github.com/opencontainers/runc/blob/a5847db387ae28c0ca4ebe4beee1a76900c8
|
||||
Total 0
|
||||
*/
|
||||
|
||||
fn get_blkio_stat_blkiodata(blkiodata: &[BlkIoData]) -> Vec<BlkioStatsEntry> {
|
||||
let mut m = Vec::new();
|
||||
fn get_blkio_stat_blkiodata(blkiodata: &[BlkIoData]) -> RepeatedField<BlkioStatsEntry> {
|
||||
let mut m = RepeatedField::new();
|
||||
if blkiodata.is_empty() {
|
||||
return m;
|
||||
}
|
||||
@@ -827,15 +763,16 @@ fn get_blkio_stat_blkiodata(blkiodata: &[BlkIoData]) -> Vec<BlkioStatsEntry> {
|
||||
minor: d.minor as u64,
|
||||
op: op.clone(),
|
||||
value: d.data,
|
||||
..Default::default()
|
||||
unknown_fields: UnknownFields::default(),
|
||||
cached_size: CachedSize::default(),
|
||||
});
|
||||
}
|
||||
|
||||
m
|
||||
}
|
||||
|
||||
fn get_blkio_stat_ioservice(services: &[IoService]) -> Vec<BlkioStatsEntry> {
|
||||
let mut m = Vec::new();
|
||||
fn get_blkio_stat_ioservice(services: &[IoService]) -> RepeatedField<BlkioStatsEntry> {
|
||||
let mut m = RepeatedField::new();
|
||||
|
||||
if services.is_empty() {
|
||||
return m;
|
||||
@@ -859,16 +796,17 @@ fn build_blkio_stats_entry(major: i16, minor: i16, op: &str, value: u64) -> Blki
|
||||
minor: minor as u64,
|
||||
op: op.to_string(),
|
||||
value,
|
||||
..Default::default()
|
||||
unknown_fields: UnknownFields::default(),
|
||||
cached_size: CachedSize::default(),
|
||||
}
|
||||
}
|
||||
|
||||
fn get_blkio_stats_v2(cg: &cgroups::Cgroup) -> MessageField<BlkioStats> {
|
||||
fn get_blkio_stats_v2(cg: &cgroups::Cgroup) -> SingularPtrField<BlkioStats> {
|
||||
let blkio_controller: &BlkIoController = get_controller_or_return_singular_none!(cg);
|
||||
let blkio = blkio_controller.blkio();
|
||||
|
||||
let mut resp = BlkioStats::new();
|
||||
let mut blkio_stats = Vec::new();
|
||||
let mut blkio_stats = RepeatedField::new();
|
||||
|
||||
let stat = blkio.io_stat;
|
||||
for s in stat {
|
||||
@@ -884,10 +822,10 @@ fn get_blkio_stats_v2(cg: &cgroups::Cgroup) -> MessageField<BlkioStats> {
|
||||
|
||||
resp.io_service_bytes_recursive = blkio_stats;
|
||||
|
||||
MessageField::some(resp)
|
||||
SingularPtrField::some(resp)
|
||||
}
|
||||
|
||||
fn get_blkio_stats(cg: &cgroups::Cgroup) -> MessageField<BlkioStats> {
|
||||
fn get_blkio_stats(cg: &cgroups::Cgroup) -> SingularPtrField<BlkioStats> {
|
||||
if cg.v2() {
|
||||
return get_blkio_stats_v2(cg);
|
||||
}
|
||||
@@ -920,7 +858,7 @@ fn get_blkio_stats(cg: &cgroups::Cgroup) -> MessageField<BlkioStats> {
|
||||
m.sectors_recursive = get_blkio_stat_blkiodata(&blkio.sectors_recursive);
|
||||
}
|
||||
|
||||
MessageField::some(m)
|
||||
SingularPtrField::some(m)
|
||||
}
|
||||
|
||||
fn get_hugetlb_stats(cg: &cgroups::Cgroup) -> HashMap<String, HugetlbStats> {
|
||||
@@ -944,7 +882,8 @@ fn get_hugetlb_stats(cg: &cgroups::Cgroup) -> HashMap<String, HugetlbStats> {
|
||||
usage,
|
||||
max_usage,
|
||||
failcnt,
|
||||
..Default::default()
|
||||
unknown_fields: UnknownFields::default(),
|
||||
cached_size: CachedSize::default(),
|
||||
},
|
||||
);
|
||||
}
|
||||
@@ -960,7 +899,7 @@ pub fn get_paths() -> Result<HashMap<String, String>> {
|
||||
for l in fs::read_to_string(PATHS)?.lines() {
|
||||
let fl: Vec<&str> = l.split(':').collect();
|
||||
if fl.len() != 3 {
|
||||
info!(sl(), "Corrupted cgroup data!");
|
||||
info!(sl!(), "Corrupted cgroup data!");
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -981,7 +920,7 @@ pub fn get_mounts(paths: &HashMap<String, String>) -> Result<HashMap<String, Str
|
||||
let post: Vec<&str> = p[1].split(' ').collect();
|
||||
|
||||
if post.len() != 3 {
|
||||
warn!(sl(), "can't parse {} line {:?}", MOUNTS, l);
|
||||
warn!(sl!(), "can't parse {} line {:?}", MOUNTS, l);
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -1001,9 +940,9 @@ pub fn get_mounts(paths: &HashMap<String, String>) -> Result<HashMap<String, Str
|
||||
Ok(m)
|
||||
}
|
||||
|
||||
fn new_cgroup(h: Box<dyn cgroups::Hierarchy>, path: &str) -> Result<Cgroup> {
|
||||
fn new_cgroup(h: Box<dyn cgroups::Hierarchy>, path: &str) -> Cgroup {
|
||||
let valid_path = path.trim_start_matches('/').to_string();
|
||||
cgroups::Cgroup::new(h, valid_path.as_str()).map_err(anyhow::Error::from)
|
||||
cgroups::Cgroup::new(h, valid_path.as_str())
|
||||
}
|
||||
|
||||
impl Manager {
|
||||
@@ -1025,16 +964,83 @@ impl Manager {
|
||||
m.insert(key.to_string(), p);
|
||||
}
|
||||
|
||||
let cg = new_cgroup(cgroups::hierarchies::auto(), cpath)?;
|
||||
|
||||
Ok(Self {
|
||||
paths: m,
|
||||
mounts,
|
||||
// rels: paths,
|
||||
cpath: cpath.to_string(),
|
||||
cgroup: cg,
|
||||
cgroup: new_cgroup(cgroups::hierarchies::auto(), cpath),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn update_cpuset_path(&self, guest_cpuset: &str, container_cpuset: &str) -> Result<()> {
|
||||
if guest_cpuset.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
info!(sl!(), "update_cpuset_path to: {}", guest_cpuset);
|
||||
|
||||
let h = cgroups::hierarchies::auto();
|
||||
let root_cg = h.root_control_group();
|
||||
|
||||
let root_cpuset_controller: &CpuSetController = root_cg.controller_of().unwrap();
|
||||
let path = root_cpuset_controller.path();
|
||||
let root_path = Path::new(path);
|
||||
info!(sl!(), "root cpuset path: {:?}", &path);
|
||||
|
||||
let container_cpuset_controller: &CpuSetController = self.cgroup.controller_of().unwrap();
|
||||
let path = container_cpuset_controller.path();
|
||||
let container_path = Path::new(path);
|
||||
info!(sl!(), "container cpuset path: {:?}", &path);
|
||||
|
||||
let mut paths = vec![];
|
||||
for ancestor in container_path.ancestors() {
|
||||
if ancestor == root_path {
|
||||
break;
|
||||
}
|
||||
paths.push(ancestor);
|
||||
}
|
||||
info!(sl!(), "parent paths to update cpuset: {:?}", &paths);
|
||||
|
||||
let mut i = paths.len();
|
||||
loop {
|
||||
if i == 0 {
|
||||
break;
|
||||
}
|
||||
i -= 1;
|
||||
|
||||
// remove cgroup root from path
|
||||
let r_path = &paths[i]
|
||||
.to_str()
|
||||
.unwrap()
|
||||
.trim_start_matches(root_path.to_str().unwrap());
|
||||
info!(sl!(), "updating cpuset for parent path {:?}", &r_path);
|
||||
let cg = new_cgroup(cgroups::hierarchies::auto(), r_path);
|
||||
let cpuset_controller: &CpuSetController = cg.controller_of().unwrap();
|
||||
cpuset_controller.set_cpus(guest_cpuset)?;
|
||||
}
|
||||
|
||||
if !container_cpuset.is_empty() {
|
||||
info!(
|
||||
sl!(),
|
||||
"updating cpuset for container path: {:?} cpuset: {}",
|
||||
&container_path,
|
||||
container_cpuset
|
||||
);
|
||||
container_cpuset_controller.set_cpus(container_cpuset)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn get_cg_path(&self, cg: &str) -> Option<String> {
|
||||
if cgroups::hierarchies::is_cgroup2_unified_mode() {
|
||||
let cg_path = format!("/sys/fs/cgroup/{}", self.cpath);
|
||||
return Some(cg_path);
|
||||
}
|
||||
|
||||
// for cgroup v1
|
||||
self.paths.get(cg).map(|s| s.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
// get the guest's online cpus.
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use protobuf::MessageField;
|
||||
use protobuf::{CachedSize, SingularPtrField, UnknownFields};
|
||||
|
||||
use crate::cgroups::Manager as CgroupManager;
|
||||
use crate::protocols::agent::{BlkioStats, CgroupStats, CpuStats, MemoryStats, PidsStats};
|
||||
@@ -11,7 +11,6 @@ use anyhow::Result;
|
||||
use cgroups::freezer::FreezerState;
|
||||
use libc::{self, pid_t};
|
||||
use oci::LinuxResources;
|
||||
use std::any::Any;
|
||||
use std::collections::HashMap;
|
||||
use std::string::String;
|
||||
|
||||
@@ -33,12 +32,13 @@ impl CgroupManager for Manager {
|
||||
|
||||
fn get_stats(&self) -> Result<CgroupStats> {
|
||||
Ok(CgroupStats {
|
||||
cpu_stats: MessageField::some(CpuStats::default()),
|
||||
memory_stats: MessageField::some(MemoryStats::new()),
|
||||
pids_stats: MessageField::some(PidsStats::new()),
|
||||
blkio_stats: MessageField::some(BlkioStats::new()),
|
||||
cpu_stats: SingularPtrField::some(CpuStats::default()),
|
||||
memory_stats: SingularPtrField::some(MemoryStats::new()),
|
||||
pids_stats: SingularPtrField::some(PidsStats::new()),
|
||||
blkio_stats: SingularPtrField::some(BlkioStats::new()),
|
||||
hugetlb_stats: HashMap::new(),
|
||||
..Default::default()
|
||||
unknown_fields: UnknownFields::default(),
|
||||
cached_size: CachedSize::default(),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -53,22 +53,6 @@ impl CgroupManager for Manager {
|
||||
fn get_pids(&self) -> Result<Vec<pid_t>> {
|
||||
Ok(Vec::new())
|
||||
}
|
||||
|
||||
fn update_cpuset_path(&self, _: &str, _: &str) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_cgroup_path(&self, _: &str) -> Result<String> {
|
||||
Ok("".to_string())
|
||||
}
|
||||
|
||||
fn as_any(&self) -> Result<&dyn Any> {
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
fn name(&self) -> &str {
|
||||
"mock"
|
||||
}
|
||||
}
|
||||
|
||||
impl Manager {
|
||||
@@ -79,4 +63,12 @@ impl Manager {
|
||||
cpath: cpath.to_string(),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn update_cpuset_path(&self, _: &str, _: &str) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn get_cg_path(&self, _: &str) -> Option<String> {
|
||||
Some("".to_string())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,10 +4,8 @@
|
||||
//
|
||||
|
||||
use anyhow::{anyhow, Result};
|
||||
use core::fmt::Debug;
|
||||
use oci::LinuxResources;
|
||||
use protocols::agent::CgroupStats;
|
||||
use std::any::Any;
|
||||
|
||||
use cgroups::freezer::FreezerState;
|
||||
|
||||
@@ -40,24 +38,4 @@ pub trait Manager {
|
||||
fn set(&self, _container: &LinuxResources, _update: bool) -> Result<()> {
|
||||
Err(anyhow!("not supported!"))
|
||||
}
|
||||
|
||||
fn update_cpuset_path(&self, _: &str, _: &str) -> Result<()> {
|
||||
Err(anyhow!("not supported!"))
|
||||
}
|
||||
|
||||
fn get_cgroup_path(&self, _: &str) -> Result<String> {
|
||||
Err(anyhow!("not supported!"))
|
||||
}
|
||||
|
||||
fn as_any(&self) -> Result<&dyn Any> {
|
||||
Err(anyhow!("not supported!"))
|
||||
}
|
||||
|
||||
fn name(&self) -> &str;
|
||||
}
|
||||
|
||||
impl Debug for dyn Manager + Send + Sync {
|
||||
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
|
||||
write!(f, "{}", self.name())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,9 +16,11 @@ use inotify::{Inotify, WatchMask};
|
||||
use tokio::io::AsyncReadExt;
|
||||
use tokio::sync::mpsc::{channel, Receiver};
|
||||
|
||||
// Convenience function to obtain the scope logger.
|
||||
fn sl() -> slog::Logger {
|
||||
slog_scope::logger().new(o!("subsystem" => "cgroups_notifier"))
|
||||
// Convenience macro to obtain the scope logger
|
||||
macro_rules! sl {
|
||||
() => {
|
||||
slog_scope::logger().new(o!("subsystem" => "cgroups_notifier"))
|
||||
};
|
||||
}
|
||||
|
||||
pub async fn notify_oom(cid: &str, cg_dir: String) -> Result<Receiver<String>> {
|
||||
@@ -36,7 +38,7 @@ pub async fn notify_oom(cid: &str, cg_dir: String) -> Result<Receiver<String>> {
|
||||
fn get_value_from_cgroup(path: &Path, key: &str) -> Result<i64> {
|
||||
let content = fs::read_to_string(path)?;
|
||||
info!(
|
||||
sl(),
|
||||
sl!(),
|
||||
"get_value_from_cgroup file: {:?}, content: {}", &path, &content
|
||||
);
|
||||
|
||||
@@ -65,11 +67,11 @@ async fn register_memory_event_v2(
|
||||
let event_control_path = Path::new(&cg_dir).join(memory_event_name);
|
||||
let cgroup_event_control_path = Path::new(&cg_dir).join(cgroup_event_name);
|
||||
info!(
|
||||
sl(),
|
||||
sl!(),
|
||||
"register_memory_event_v2 event_control_path: {:?}", &event_control_path
|
||||
);
|
||||
info!(
|
||||
sl(),
|
||||
sl!(),
|
||||
"register_memory_event_v2 cgroup_event_control_path: {:?}", &cgroup_event_control_path
|
||||
);
|
||||
|
||||
@@ -80,8 +82,8 @@ async fn register_memory_event_v2(
|
||||
// Because no `unix.IN_DELETE|unix.IN_DELETE_SELF` event for cgroup file system, so watching all process exited
|
||||
let cg_wd = inotify.add_watch(&cgroup_event_control_path, WatchMask::MODIFY)?;
|
||||
|
||||
info!(sl(), "ev_wd: {:?}", ev_wd);
|
||||
info!(sl(), "cg_wd: {:?}", cg_wd);
|
||||
info!(sl!(), "ev_wd: {:?}", ev_wd);
|
||||
info!(sl!(), "cg_wd: {:?}", cg_wd);
|
||||
|
||||
let (sender, receiver) = channel(100);
|
||||
let containere_id = containere_id.to_string();
|
||||
@@ -95,17 +97,17 @@ async fn register_memory_event_v2(
|
||||
while let Some(event_or_error) = stream.next().await {
|
||||
let event = event_or_error.unwrap();
|
||||
info!(
|
||||
sl(),
|
||||
sl!(),
|
||||
"container[{}] get event for container: {:?}", &containere_id, &event
|
||||
);
|
||||
// info!("is1: {}", event.wd == wd1);
|
||||
info!(sl(), "event.wd: {:?}", event.wd);
|
||||
info!(sl!(), "event.wd: {:?}", event.wd);
|
||||
|
||||
if event.wd == ev_wd {
|
||||
let oom = get_value_from_cgroup(&event_control_path, "oom_kill");
|
||||
if oom.unwrap_or(0) > 0 {
|
||||
let _ = sender.send(containere_id.clone()).await.map_err(|e| {
|
||||
error!(sl(), "send containere_id failed, error: {:?}", e);
|
||||
error!(sl!(), "send containere_id failed, error: {:?}", e);
|
||||
});
|
||||
return;
|
||||
}
|
||||
@@ -169,13 +171,13 @@ async fn register_memory_event(
|
||||
let mut buf = [0u8; 8];
|
||||
match eventfd_stream.read(&mut buf).await {
|
||||
Err(err) => {
|
||||
warn!(sl(), "failed to read from eventfd: {:?}", err);
|
||||
warn!(sl!(), "failed to read from eventfd: {:?}", err);
|
||||
return;
|
||||
}
|
||||
Ok(_) => {
|
||||
let content = fs::read_to_string(path.clone());
|
||||
info!(
|
||||
sl(),
|
||||
sl!(),
|
||||
"cgroup event for container: {}, path: {:?}, content: {:?}",
|
||||
&containere_id,
|
||||
&path,
|
||||
@@ -191,7 +193,7 @@ async fn register_memory_event(
|
||||
}
|
||||
|
||||
let _ = sender.send(containere_id.clone()).await.map_err(|e| {
|
||||
error!(sl(), "send containere_id failed, error: {:?}", e);
|
||||
error!(sl!(), "send containere_id failed, error: {:?}", e);
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
10
src/agent/rustjail/src/cgroups/systemd.rs
Normal file
10
src/agent/rustjail/src/cgroups/systemd.rs
Normal file
@@ -0,0 +1,10 @@
|
||||
// Copyright (c) 2019 Ant Financial
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use crate::cgroups::Manager as CgroupManager;
|
||||
|
||||
pub struct Manager {}
|
||||
|
||||
impl CgroupManager for Manager {}
|
||||
@@ -1,95 +0,0 @@
|
||||
// Copyright 2021-2022 Kata Contributors
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use anyhow::{anyhow, Result};
|
||||
|
||||
use super::common::{DEFAULT_SLICE, SCOPE_SUFFIX, SLICE_SUFFIX};
|
||||
use std::string::String;
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub struct CgroupsPath {
|
||||
pub slice: String,
|
||||
pub prefix: String,
|
||||
pub name: String,
|
||||
}
|
||||
|
||||
impl CgroupsPath {
|
||||
pub fn new(cgroups_path_str: &str) -> Result<Self> {
|
||||
let path_vec: Vec<&str> = cgroups_path_str.split(':').collect();
|
||||
if path_vec.len() != 3 {
|
||||
return Err(anyhow!("invalid cpath: {:?}", cgroups_path_str));
|
||||
}
|
||||
|
||||
Ok(CgroupsPath {
|
||||
slice: if path_vec[0].is_empty() {
|
||||
DEFAULT_SLICE.to_string()
|
||||
} else {
|
||||
path_vec[0].to_owned()
|
||||
},
|
||||
prefix: path_vec[1].to_owned(),
|
||||
name: path_vec[2].to_owned(),
|
||||
})
|
||||
}
|
||||
|
||||
// ref: https://github.com/opencontainers/runc/blob/main/docs/systemd.md
|
||||
// return: (parent_slice, unit_name)
|
||||
pub fn parse(&self) -> Result<(String, String)> {
|
||||
Ok((
|
||||
parse_parent(self.slice.to_owned())?,
|
||||
get_unit_name(self.prefix.to_owned(), self.name.to_owned()),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_parent(slice: String) -> Result<String> {
|
||||
if !slice.ends_with(SLICE_SUFFIX) || slice.contains('/') {
|
||||
return Err(anyhow!("invalid slice name: {}", slice));
|
||||
} else if slice == "-.slice" {
|
||||
return Ok(String::new());
|
||||
}
|
||||
|
||||
let mut slice_path = String::new();
|
||||
let mut prefix = String::new();
|
||||
for subslice in slice.trim_end_matches(SLICE_SUFFIX).split('-') {
|
||||
if subslice.is_empty() {
|
||||
return Err(anyhow!("invalid slice name: {}", slice));
|
||||
}
|
||||
slice_path = format!("{}/{}{}{}", slice_path, prefix, subslice, SLICE_SUFFIX);
|
||||
prefix = format!("{}{}-", prefix, subslice);
|
||||
}
|
||||
slice_path.remove(0);
|
||||
Ok(slice_path)
|
||||
}
|
||||
|
||||
fn get_unit_name(prefix: String, name: String) -> String {
|
||||
if name.ends_with(SLICE_SUFFIX) {
|
||||
name
|
||||
} else if prefix.is_empty() {
|
||||
format!("{}{}", name, SCOPE_SUFFIX)
|
||||
} else {
|
||||
format!("{}-{}{}", prefix, name, SCOPE_SUFFIX)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::CgroupsPath;
|
||||
|
||||
#[test]
|
||||
fn test_cgroup_path_parse() {
|
||||
let slice = "system.slice";
|
||||
let prefix = "kata_agent";
|
||||
let name = "123";
|
||||
let cgroups_path =
|
||||
CgroupsPath::new(format!("{}:{}:{}", slice, prefix, name).as_str()).unwrap();
|
||||
assert_eq!(slice, cgroups_path.slice.as_str());
|
||||
assert_eq!(prefix, cgroups_path.prefix.as_str());
|
||||
assert_eq!(name, cgroups_path.name.as_str());
|
||||
|
||||
let (parent_slice, unit_name) = cgroups_path.parse().unwrap();
|
||||
assert_eq!(format!("{}", slice), parent_slice);
|
||||
assert_eq!(format!("{}-{}.scope", prefix, name), unit_name);
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user