mirror of
https://github.com/kata-containers/kata-containers.git
synced 2025-06-24 22:43:05 +00:00
Compare commits
No commits in common. "main" and "3.14.0" have entirely different histories.
2
.github/actionlint.yaml
vendored
2
.github/actionlint.yaml
vendored
@ -7,7 +7,6 @@
|
||||
self-hosted-runner:
|
||||
# Labels of self-hosted runner that linter should ignore
|
||||
labels:
|
||||
- arm64-k8s
|
||||
- ubuntu-22.04-arm
|
||||
- garm-ubuntu-2004
|
||||
- garm-ubuntu-2004-smaller
|
||||
@ -18,7 +17,6 @@ self-hosted-runner:
|
||||
- k8s-ppc64le
|
||||
- metrics
|
||||
- ppc64le
|
||||
- riscv-builder
|
||||
- sev
|
||||
- sev-snp
|
||||
- s390x
|
||||
|
@ -21,7 +21,7 @@ runs:
|
||||
override: true
|
||||
|
||||
- name: Cache
|
||||
uses: Swatinem/rust-cache@f0deed1e0edfc6a9be95417288c0e1099b1eeec3 # v2.7.7
|
||||
uses: Swatinem/rust-cache@v2
|
||||
|
||||
- name: Install Cargo deny
|
||||
shell: bash
|
||||
|
93
.github/dependabot.yml
vendored
93
.github/dependabot.yml
vendored
@ -1,93 +0,0 @@
|
||||
---
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "cargo"
|
||||
directories:
|
||||
- "/src/agent"
|
||||
- "/src/dragonball"
|
||||
- "/src/libs"
|
||||
- "/src/mem-agent"
|
||||
- "/src/mem-agent/example"
|
||||
- "/src/runtime-rs"
|
||||
- "/src/tools/agent-ctl"
|
||||
- "/src/tools/genpolicy"
|
||||
- "/src/tools/kata-ctl"
|
||||
- "/src/tools/runk"
|
||||
- "/src/tools/trace-forwarder"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
ignore:
|
||||
# rust-vmm repos might cause incompatibilities on patch versions, so
|
||||
# lets handle them manually for now.
|
||||
- dependency-name: "event-manager"
|
||||
- dependency-name: "kvm-bindings"
|
||||
- dependency-name: "kvm-ioctls"
|
||||
- dependency-name: "linux-loader"
|
||||
- dependency-name: "seccompiler"
|
||||
- dependency-name: "vfio-bindings"
|
||||
- dependency-name: "vfio-ioctls"
|
||||
- dependency-name: "virtio-bindings"
|
||||
- dependency-name: "virtio-queue"
|
||||
- dependency-name: "vm-fdt"
|
||||
- dependency-name: "vm-memory"
|
||||
- dependency-name: "vm-superio"
|
||||
- dependency-name: "vmm-sys-util"
|
||||
# As we often have up to 8/9 components that need the same versions bumps
|
||||
# create groups for common dependencies, so they can all go in a single PR
|
||||
# We can extend this as we see more frequent groups
|
||||
groups:
|
||||
atty:
|
||||
patterns:
|
||||
- atty
|
||||
bit-vec:
|
||||
patterns:
|
||||
- bit-vec
|
||||
bumpalo:
|
||||
patterns:
|
||||
- bumpalo
|
||||
clap:
|
||||
patterns:
|
||||
- clap
|
||||
crossbeam:
|
||||
patterns:
|
||||
- crossbeam
|
||||
h2:
|
||||
patterns:
|
||||
- h2
|
||||
idna:
|
||||
patterns:
|
||||
- idna
|
||||
openssl:
|
||||
patterns:
|
||||
- openssl
|
||||
protobuf:
|
||||
patterns:
|
||||
- protobuf
|
||||
rsa:
|
||||
patterns:
|
||||
- rsa
|
||||
rustix:
|
||||
patterns:
|
||||
- rustix
|
||||
time:
|
||||
patterns:
|
||||
- time
|
||||
tokio:
|
||||
patterns:
|
||||
- tokio
|
||||
tracing:
|
||||
patterns:
|
||||
- tracing
|
||||
|
||||
- package-ecosystem: "gomod"
|
||||
directories:
|
||||
- "src/runtime"
|
||||
- "tools/testing/kata-webhook"
|
||||
- "src/tools/csi-kata-directvolume"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "monthly"
|
5
.github/workflows/PR-wip-checks.yaml
vendored
5
.github/workflows/PR-wip-checks.yaml
vendored
@ -9,9 +9,6 @@ on:
|
||||
- labeled
|
||||
- unlabeled
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
@ -23,7 +20,7 @@ jobs:
|
||||
steps:
|
||||
- name: WIP Check
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
uses: tim-actions/wip-check@1c2a1ca6c110026b3e2297bb2ef39e1747b5a755 # master (2021-06-10)
|
||||
uses: tim-actions/wip-check@1c2a1ca6c110026b3e2297bb2ef39e1747b5a755
|
||||
with:
|
||||
labels: '["do-not-merge", "wip", "rfc"]'
|
||||
keywords: '["WIP", "wip", "RFC", "rfc", "dnm", "DNM", "do-not-merge"]'
|
||||
|
6
.github/workflows/actionlint.yaml
vendored
6
.github/workflows/actionlint.yaml
vendored
@ -11,9 +11,6 @@ on:
|
||||
paths:
|
||||
- '.github/workflows/**'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
@ -25,10 +22,9 @@ jobs:
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- name: Checkout the code
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Install actionlint gh extension
|
||||
run: gh extension install https://github.com/cschleiden/gh-actionlint
|
||||
|
59
.github/workflows/add-issues-to-project.yaml
vendored
Normal file
59
.github/workflows/add-issues-to-project.yaml
vendored
Normal file
@ -0,0 +1,59 @@
|
||||
# Copyright (c) 2020 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
name: Add newly created issues to the backlog project
|
||||
|
||||
on:
|
||||
issues:
|
||||
types:
|
||||
- opened
|
||||
- reopened
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
add-new-issues-to-backlog:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Install hub
|
||||
run: |
|
||||
HUB_ARCH="amd64"
|
||||
HUB_VER=$(curl -sL "https://api.github.com/repos/github/hub/releases/latest" |\
|
||||
jq -r .tag_name | sed 's/^v//')
|
||||
curl -sL \
|
||||
"https://github.com/github/hub/releases/download/v${HUB_VER}/hub-linux-${HUB_ARCH}-${HUB_VER}.tgz" |\
|
||||
tar xz --strip-components=2 --wildcards '*/bin/hub' && \
|
||||
sudo install hub /usr/local/bin
|
||||
|
||||
- name: Install hub extension script
|
||||
run: |
|
||||
# Clone into a temporary directory to avoid overwriting
|
||||
# any existing github directory.
|
||||
pushd "$(mktemp -d)" &>/dev/null
|
||||
git clone --single-branch --depth 1 "https://github.com/kata-containers/.github" && cd .github/scripts
|
||||
sudo install hub-util.sh /usr/local/bin
|
||||
popd &>/dev/null
|
||||
|
||||
- name: Checkout code to allow hub to communicate with the project
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Add issue to issue backlog
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.KATA_GITHUB_ACTIONS_TOKEN }}
|
||||
run: |
|
||||
issue=${{ github.event.issue.number }}
|
||||
|
||||
project_name="Issue backlog"
|
||||
project_type="org"
|
||||
project_column="To do"
|
||||
|
||||
hub-util.sh \
|
||||
add-issue \
|
||||
"$issue" \
|
||||
"$project_name" \
|
||||
"$project_type" \
|
||||
"$project_column"
|
53
.github/workflows/add-pr-sizing-label.yaml
vendored
Normal file
53
.github/workflows/add-pr-sizing-label.yaml
vendored
Normal file
@ -0,0 +1,53 @@
|
||||
# Copyright (c) 2022 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
name: Add PR sizing label
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types:
|
||||
- opened
|
||||
- reopened
|
||||
- synchronize
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
add-pr-size-label:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ github.event.pull_request.base.ref }}
|
||||
|
||||
- name: Install PR sizing label script
|
||||
run: |
|
||||
# Clone into a temporary directory to avoid overwriting
|
||||
# any existing github directory.
|
||||
pushd "$(mktemp -d)" &>/dev/null
|
||||
git clone --single-branch --depth 1 "https://github.com/kata-containers/.github" && cd .github/scripts
|
||||
sudo install pr-add-size-label.sh /usr/local/bin
|
||||
popd &>/dev/null
|
||||
|
||||
- name: Add PR sizing label
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.KATA_GITHUB_ACTIONS_PR_SIZE_TOKEN }}
|
||||
run: |
|
||||
pr=${{ github.event.number }}
|
||||
# Removing man-db, workflow kept failing, fixes: #4480
|
||||
sudo apt -y remove --purge man-db
|
||||
sudo apt -y install diffstat patchutils
|
||||
|
||||
pr-add-size-label.sh -p "$pr"
|
109
.github/workflows/basic-ci-amd64.yaml
vendored
109
.github/workflows/basic-ci-amd64.yaml
vendored
@ -13,33 +13,26 @@ on:
|
||||
type: string
|
||||
default: ""
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
run-containerd-sandboxapi:
|
||||
run-cri-containerd:
|
||||
strategy:
|
||||
# We can set this to true whenever we're 100% sure that
|
||||
# the all the tests are not flaky, otherwise we'll fail
|
||||
# all the tests due to a single flaky instance.
|
||||
fail-fast: false
|
||||
matrix:
|
||||
containerd_version: ['active']
|
||||
vmm: ['dragonball', 'cloud-hypervisor', 'qemu-runtime-rs']
|
||||
# TODO: enable me when https://github.com/containerd/containerd/issues/11640 is fixed
|
||||
if: false
|
||||
containerd_version: ['lts', 'active']
|
||||
vmm: ['clh', 'dragonball', 'qemu', 'stratovirt', 'cloud-hypervisor', 'qemu-runtime-rs']
|
||||
runs-on: ubuntu-22.04
|
||||
env:
|
||||
CONTAINERD_VERSION: ${{ matrix.containerd_version }}
|
||||
GOPATH: ${{ github.workspace }}
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
SANDBOXER: "shim"
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
@ -51,7 +44,52 @@ jobs:
|
||||
run: bash tests/integration/cri-containerd/gha-run.sh install-dependencies
|
||||
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: kata-static-tarball-amd64${{ inputs.tarball-suffix }}
|
||||
path: kata-artifacts
|
||||
|
||||
- name: Install kata
|
||||
run: bash tests/integration/cri-containerd/gha-run.sh install-kata kata-artifacts
|
||||
|
||||
- name: Run cri-containerd tests
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/cri-containerd/gha-run.sh run
|
||||
|
||||
run-containerd-sandboxapi:
|
||||
strategy:
|
||||
# We can set this to true whenever we're 100% sure that
|
||||
# the all the tests are not flaky, otherwise we'll fail
|
||||
# all the tests due to a single flaky instance.
|
||||
fail-fast: false
|
||||
matrix:
|
||||
containerd_version: ['latest']
|
||||
vmm: ['dragonball', 'cloud-hypervisor', 'qemu-runtime-rs']
|
||||
runs-on: ubuntu-22.04
|
||||
env:
|
||||
CONTAINERD_VERSION: ${{ matrix.containerd_version }}
|
||||
GOPATH: ${{ github.workspace }}
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
#the latest containerd from 2.0 need to set the CGROUP_DRIVER for e2e testing
|
||||
CGROUP_DRIVER: ""
|
||||
SANDBOXER: "shim"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: Install dependencies
|
||||
run: bash tests/integration/cri-containerd/gha-run.sh install-dependencies
|
||||
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: kata-static-tarball-amd64${{ inputs.tarball-suffix }}
|
||||
path: kata-artifacts
|
||||
@ -76,11 +114,11 @@ jobs:
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
SANDBOXER: "podsandbox"
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
@ -91,7 +129,7 @@ jobs:
|
||||
run: bash tests/stability/gha-run.sh install-dependencies
|
||||
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: kata-static-tarball-amd64${{ inputs.tarball-suffix }}
|
||||
path: kata-artifacts
|
||||
@ -118,11 +156,10 @@ jobs:
|
||||
GOPATH: ${{ github.workspace }}
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
@ -134,7 +171,7 @@ jobs:
|
||||
run: bash tests/integration/nydus/gha-run.sh install-dependencies
|
||||
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: kata-static-tarball-amd64${{ inputs.tarball-suffix }}
|
||||
path: kata-artifacts
|
||||
@ -153,11 +190,10 @@ jobs:
|
||||
env:
|
||||
CONTAINERD_VERSION: lts
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
@ -169,7 +205,7 @@ jobs:
|
||||
run: bash tests/integration/runk/gha-run.sh install-dependencies
|
||||
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: kata-static-tarball-amd64${{ inputs.tarball-suffix }}
|
||||
path: kata-artifacts
|
||||
@ -195,11 +231,10 @@ jobs:
|
||||
env:
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
@ -211,7 +246,7 @@ jobs:
|
||||
run: bash tests/functional/tracing/gha-run.sh install-dependencies
|
||||
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: kata-static-tarball-amd64${{ inputs.tarball-suffix }}
|
||||
path: kata-artifacts
|
||||
@ -239,11 +274,10 @@ jobs:
|
||||
GOPATH: ${{ github.workspace }}
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
@ -255,7 +289,7 @@ jobs:
|
||||
run: bash tests/functional/vfio/gha-run.sh install-dependencies
|
||||
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: kata-static-tarball-amd64${{ inputs.tarball-suffix }}
|
||||
path: kata-artifacts
|
||||
@ -280,11 +314,10 @@ jobs:
|
||||
env:
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
@ -296,7 +329,7 @@ jobs:
|
||||
run: bash tests/integration/docker/gha-run.sh install-dependencies
|
||||
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: kata-static-tarball-amd64${{ inputs.tarball-suffix }}
|
||||
path: kata-artifacts
|
||||
@ -324,11 +357,10 @@ jobs:
|
||||
env:
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
@ -337,12 +369,10 @@ jobs:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: Install dependencies
|
||||
env:
|
||||
GITHUB_API_TOKEN: ${{ github.token }}
|
||||
run: bash tests/integration/nerdctl/gha-run.sh install-dependencies
|
||||
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: kata-static-tarball-amd64${{ inputs.tarball-suffix }}
|
||||
path: kata-artifacts
|
||||
@ -360,20 +390,21 @@ jobs:
|
||||
continue-on-error: true
|
||||
|
||||
- name: Archive artifacts ${{ matrix.vmm }}
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: nerdctl-tests-garm-${{ matrix.vmm }}
|
||||
path: /tmp/artifacts
|
||||
retention-days: 1
|
||||
|
||||
run-kata-agent-apis:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
@ -385,7 +416,7 @@ jobs:
|
||||
run: bash tests/functional/kata-agent-apis/gha-run.sh install-dependencies
|
||||
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: kata-static-tarball-amd64${{ inputs.tarball-suffix }}
|
||||
path: kata-artifacts
|
||||
|
145
.github/workflows/basic-ci-s390x.yaml
vendored
145
.github/workflows/basic-ci-s390x.yaml
vendored
@ -1,145 +0,0 @@
|
||||
name: CI | Basic s390x tests
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
tarball-suffix:
|
||||
required: false
|
||||
type: string
|
||||
commit-hash:
|
||||
required: false
|
||||
type: string
|
||||
target-branch:
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
run-containerd-sandboxapi:
|
||||
strategy:
|
||||
# We can set this to true whenever we're 100% sure that
|
||||
# the all the tests are not flaky, otherwise we'll fail
|
||||
# all the tests due to a single flaky instance.
|
||||
fail-fast: false
|
||||
matrix:
|
||||
containerd_version: ['active']
|
||||
vmm: ['qemu-runtime-rs']
|
||||
# TODO: enable me when https://github.com/containerd/containerd/issues/11640 is fixed
|
||||
if: false
|
||||
runs-on: s390x-large
|
||||
env:
|
||||
CONTAINERD_VERSION: ${{ matrix.containerd_version }}
|
||||
GOPATH: ${{ github.workspace }}
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
SANDBOXER: "shim"
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: Install dependencies
|
||||
run: bash tests/integration/cri-containerd/gha-run.sh install-dependencies
|
||||
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
with:
|
||||
name: kata-static-tarball-s390x${{ inputs.tarball-suffix }}
|
||||
path: kata-artifacts
|
||||
|
||||
- name: Install kata
|
||||
run: bash tests/integration/cri-containerd/gha-run.sh install-kata kata-artifacts
|
||||
|
||||
- name: Run containerd-sandboxapi tests
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/cri-containerd/gha-run.sh run
|
||||
|
||||
run-containerd-stability:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
containerd_version: ['lts', 'active']
|
||||
vmm: ['qemu']
|
||||
runs-on: s390x-large
|
||||
env:
|
||||
CONTAINERD_VERSION: ${{ matrix.containerd_version }}
|
||||
GOPATH: ${{ github.workspace }}
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
SANDBOXER: "podsandbox"
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: Install dependencies
|
||||
run: bash tests/stability/gha-run.sh install-dependencies
|
||||
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
with:
|
||||
name: kata-static-tarball-s390x${{ inputs.tarball-suffix }}
|
||||
path: kata-artifacts
|
||||
|
||||
- name: Install kata
|
||||
run: bash tests/stability/gha-run.sh install-kata kata-artifacts
|
||||
|
||||
- name: Run containerd-stability tests
|
||||
timeout-minutes: 15
|
||||
run: bash tests/stability/gha-run.sh run
|
||||
|
||||
run-docker-tests:
|
||||
strategy:
|
||||
# We can set this to true whenever we're 100% sure that
|
||||
# all the tests are not flaky, otherwise we'll fail them
|
||||
# all due to a single flaky instance.
|
||||
fail-fast: false
|
||||
matrix:
|
||||
vmm: ['qemu']
|
||||
runs-on: s390x-large
|
||||
env:
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: Install dependencies
|
||||
run: bash tests/integration/docker/gha-run.sh install-dependencies
|
||||
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
with:
|
||||
name: kata-static-tarball-s390x${{ inputs.tarball-suffix }}
|
||||
path: kata-artifacts
|
||||
|
||||
- name: Install kata
|
||||
run: bash tests/integration/docker/gha-run.sh install-kata kata-artifacts
|
||||
|
||||
- name: Run docker smoke test
|
||||
timeout-minutes: 5
|
||||
run: bash tests/integration/docker/gha-run.sh run
|
132
.github/workflows/build-checks-preview-riscv64.yaml
vendored
132
.github/workflows/build-checks-preview-riscv64.yaml
vendored
@ -1,132 +0,0 @@
|
||||
# This yaml is designed to be used until all components listed in
|
||||
# `build-checks.yaml` are supported
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
instance:
|
||||
default: "riscv-builder"
|
||||
description: "Default instance when manually triggering"
|
||||
workflow_call:
|
||||
inputs:
|
||||
instance:
|
||||
required: true
|
||||
type: string
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
name: Build checks preview riscv64
|
||||
jobs:
|
||||
check:
|
||||
runs-on: ${{ inputs.instance }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
command:
|
||||
- "make vendor"
|
||||
- "make check"
|
||||
- "make test"
|
||||
- "sudo -E PATH=\"$PATH\" make test"
|
||||
component:
|
||||
- name: agent
|
||||
path: src/agent
|
||||
needs:
|
||||
- rust
|
||||
- libdevmapper
|
||||
- libseccomp
|
||||
- protobuf-compiler
|
||||
- clang
|
||||
- name: agent-ctl
|
||||
path: src/tools/agent-ctl
|
||||
needs:
|
||||
- rust
|
||||
- musl-tools
|
||||
- protobuf-compiler
|
||||
- clang
|
||||
- name: trace-forwarder
|
||||
path: src/tools/trace-forwarder
|
||||
needs:
|
||||
- rust
|
||||
- musl-tools
|
||||
- name: genpolicy
|
||||
path: src/tools/genpolicy
|
||||
needs:
|
||||
- rust
|
||||
- musl-tools
|
||||
- protobuf-compiler
|
||||
- name: runtime
|
||||
path: src/runtime
|
||||
needs:
|
||||
- golang
|
||||
- XDG_RUNTIME_DIR
|
||||
- name: runtime-rs
|
||||
path: src/runtime-rs
|
||||
needs:
|
||||
- rust
|
||||
|
||||
steps:
|
||||
- name: Adjust a permission for repo
|
||||
run: |
|
||||
sudo chown -R "$USER":"$USER" "$GITHUB_WORKSPACE" "$HOME"
|
||||
sudo rm -rf "$GITHUB_WORKSPACE"/* || { sleep 10 && sudo rm -rf "$GITHUB_WORKSPACE"/*; }
|
||||
sudo rm -f /tmp/kata_hybrid* # Sometime we got leftover from test_setup_hvsock_failed()
|
||||
|
||||
- name: Checkout the code
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Install yq
|
||||
run: |
|
||||
./ci/install_yq.sh
|
||||
env:
|
||||
INSTALL_IN_GOPATH: false
|
||||
- name: Install golang
|
||||
if: contains(matrix.component.needs, 'golang')
|
||||
run: |
|
||||
./tests/install_go.sh -f -p
|
||||
echo "/usr/local/go/bin" >> "$GITHUB_PATH"
|
||||
- name: Setup rust
|
||||
if: contains(matrix.component.needs, 'rust')
|
||||
run: |
|
||||
./tests/install_rust.sh
|
||||
echo "${HOME}/.cargo/bin" >> "$GITHUB_PATH"
|
||||
if [ "$(uname -m)" == "x86_64" ] || [ "$(uname -m)" == "aarch64" ]; then
|
||||
sudo apt-get update && sudo apt-get -y install musl-tools
|
||||
fi
|
||||
- name: Install devicemapper
|
||||
if: contains(matrix.component.needs, 'libdevmapper') && matrix.command == 'make check'
|
||||
run: sudo apt-get update && sudo apt-get -y install libdevmapper-dev
|
||||
- name: Install libseccomp
|
||||
if: contains(matrix.component.needs, 'libseccomp') && matrix.command != 'make vendor' && matrix.command != 'make check'
|
||||
run: |
|
||||
libseccomp_install_dir=$(mktemp -d -t libseccomp.XXXXXXXXXX)
|
||||
gperf_install_dir=$(mktemp -d -t gperf.XXXXXXXXXX)
|
||||
./ci/install_libseccomp.sh "${libseccomp_install_dir}" "${gperf_install_dir}"
|
||||
echo "Set environment variables for the libseccomp crate to link the libseccomp library statically"
|
||||
echo "LIBSECCOMP_LINK_TYPE=static" >> "$GITHUB_ENV"
|
||||
echo "LIBSECCOMP_LIB_PATH=${libseccomp_install_dir}/lib" >> "$GITHUB_ENV"
|
||||
- name: Install protobuf-compiler
|
||||
if: contains(matrix.component.needs, 'protobuf-compiler') && matrix.command != 'make vendor'
|
||||
run: sudo apt-get update && sudo apt-get -y install protobuf-compiler
|
||||
- name: Install clang
|
||||
if: contains(matrix.component.needs, 'clang') && matrix.command == 'make check'
|
||||
run: sudo apt-get update && sudo apt-get -y install clang
|
||||
- name: Setup XDG_RUNTIME_DIR
|
||||
if: contains(matrix.component.needs, 'XDG_RUNTIME_DIR') && matrix.command != 'make check'
|
||||
run: |
|
||||
XDG_RUNTIME_DIR=$(mktemp -d "/tmp/kata-tests-$USER.XXX" | tee >(xargs chmod 0700))
|
||||
echo "XDG_RUNTIME_DIR=${XDG_RUNTIME_DIR}" >> "$GITHUB_ENV"
|
||||
- name: Skip tests that depend on virtualization capable runners when needed
|
||||
if: inputs.instance == 'riscv-builder'
|
||||
run: |
|
||||
echo "GITHUB_RUNNER_CI_NON_VIRT=true" >> "$GITHUB_ENV"
|
||||
- name: Running `${{ matrix.command }}` for ${{ matrix.component.name }}
|
||||
run: |
|
||||
cd ${{ matrix.component.path }}
|
||||
${{ matrix.command }}
|
||||
env:
|
||||
RUST_BACKTRACE: "1"
|
||||
RUST_LIB_BACKTRACE: "0"
|
||||
SKIP_GO_VERSION_CHECK: "1"
|
116
.github/workflows/build-checks.yaml
vendored
116
.github/workflows/build-checks.yaml
vendored
@ -5,9 +5,6 @@ on:
|
||||
required: true
|
||||
type: string
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
name: Build checks
|
||||
jobs:
|
||||
check:
|
||||
@ -15,53 +12,40 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
component:
|
||||
- agent
|
||||
- dragonball
|
||||
- runtime
|
||||
- runtime-rs
|
||||
- agent-ctl
|
||||
- kata-ctl
|
||||
- trace-forwarder
|
||||
- genpolicy
|
||||
command:
|
||||
- "make vendor"
|
||||
- "make check"
|
||||
- "make test"
|
||||
- "sudo -E PATH=\"$PATH\" make test"
|
||||
component:
|
||||
- name: agent
|
||||
path: src/agent
|
||||
needs:
|
||||
- rust
|
||||
- libdevmapper
|
||||
- libseccomp
|
||||
- protobuf-compiler
|
||||
- clang
|
||||
- name: dragonball
|
||||
path: src/dragonball
|
||||
needs:
|
||||
- rust
|
||||
- name: runtime
|
||||
path: src/runtime
|
||||
needs:
|
||||
- golang
|
||||
- XDG_RUNTIME_DIR
|
||||
- name: runtime-rs
|
||||
path: src/runtime-rs
|
||||
needs:
|
||||
- rust
|
||||
- name: agent-ctl
|
||||
path: src/tools/agent-ctl
|
||||
needs:
|
||||
- rust
|
||||
- protobuf-compiler
|
||||
- clang
|
||||
- name: kata-ctl
|
||||
path: src/tools/kata-ctl
|
||||
needs:
|
||||
- rust
|
||||
- name: trace-forwarder
|
||||
path: src/tools/trace-forwarder
|
||||
needs:
|
||||
- rust
|
||||
- name: genpolicy
|
||||
path: src/tools/genpolicy
|
||||
needs:
|
||||
- rust
|
||||
- protobuf-compiler
|
||||
|
||||
include:
|
||||
- component: agent
|
||||
component-path: src/agent
|
||||
- component: dragonball
|
||||
component-path: src/dragonball
|
||||
- component: runtime
|
||||
component-path: src/runtime
|
||||
- component: runtime-rs
|
||||
component-path: src/runtime-rs
|
||||
- component: agent-ctl
|
||||
component-path: src/tools/agent-ctl
|
||||
- component: kata-ctl
|
||||
component-path: src/tools/kata-ctl
|
||||
- component: trace-forwarder
|
||||
component-path: src/tools/trace-forwarder
|
||||
- install-libseccomp: no
|
||||
- component: agent
|
||||
install-libseccomp: yes
|
||||
- component: genpolicy
|
||||
component-path: src/tools/genpolicy
|
||||
steps:
|
||||
- name: Adjust a permission for repo
|
||||
run: |
|
||||
@ -70,10 +54,9 @@ jobs:
|
||||
sudo rm -f /tmp/kata_hybrid* # Sometime we got leftover from test_setup_hvsock_failed()
|
||||
|
||||
- name: Checkout the code
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Install yq
|
||||
run: |
|
||||
@ -81,23 +64,23 @@ jobs:
|
||||
env:
|
||||
INSTALL_IN_GOPATH: false
|
||||
- name: Install golang
|
||||
if: contains(matrix.component.needs, 'golang')
|
||||
if: ${{ matrix.component == 'runtime' }}
|
||||
run: |
|
||||
./tests/install_go.sh -f -p
|
||||
echo "/usr/local/go/bin" >> "$GITHUB_PATH"
|
||||
- name: Setup rust
|
||||
if: contains(matrix.component.needs, 'rust')
|
||||
- name: Install rust
|
||||
if: ${{ matrix.component != 'runtime' }}
|
||||
run: |
|
||||
./tests/install_rust.sh
|
||||
echo "${HOME}/.cargo/bin" >> "$GITHUB_PATH"
|
||||
if [ "$(uname -m)" == "x86_64" ] || [ "$(uname -m)" == "aarch64" ]; then
|
||||
sudo apt-get update && sudo apt-get -y install musl-tools
|
||||
fi
|
||||
- name: Install musl-tools
|
||||
if: ${{ matrix.component != 'runtime' }}
|
||||
run: sudo apt-get -y install musl-tools
|
||||
- name: Install devicemapper
|
||||
if: contains(matrix.component.needs, 'libdevmapper') && matrix.command == 'make check'
|
||||
run: sudo apt-get update && sudo apt-get -y install libdevmapper-dev
|
||||
if: ${{ matrix.command == 'make check' && matrix.component == 'agent' }}
|
||||
run: sudo apt-get -y install libdevmapper-dev
|
||||
- name: Install libseccomp
|
||||
if: contains(matrix.component.needs, 'libseccomp') && matrix.command != 'make vendor' && matrix.command != 'make check'
|
||||
if: ${{ matrix.command != 'make vendor' && matrix.command != 'make check' && matrix.install-libseccomp == 'yes' }}
|
||||
run: |
|
||||
libseccomp_install_dir=$(mktemp -d -t libseccomp.XXXXXXXXXX)
|
||||
gperf_install_dir=$(mktemp -d -t gperf.XXXXXXXXXX)
|
||||
@ -106,25 +89,20 @@ jobs:
|
||||
echo "LIBSECCOMP_LINK_TYPE=static" >> "$GITHUB_ENV"
|
||||
echo "LIBSECCOMP_LIB_PATH=${libseccomp_install_dir}/lib" >> "$GITHUB_ENV"
|
||||
- name: Install protobuf-compiler
|
||||
if: contains(matrix.component.needs, 'protobuf-compiler') && matrix.command != 'make vendor'
|
||||
run: sudo apt-get update && sudo apt-get -y install protobuf-compiler
|
||||
if: ${{ matrix.command != 'make vendor' && (matrix.component == 'agent' || matrix.component == 'genpolicy' || matrix.component == 'agent-ctl') }}
|
||||
run: sudo apt-get -y install protobuf-compiler
|
||||
- name: Install clang
|
||||
if: contains(matrix.component.needs, 'clang') && matrix.command == 'make check'
|
||||
run: sudo apt-get update && sudo apt-get -y install clang
|
||||
- name: Setup XDG_RUNTIME_DIR
|
||||
if: contains(matrix.component.needs, 'XDG_RUNTIME_DIR') && matrix.command != 'make check'
|
||||
if: ${{ matrix.command == 'make check' && (matrix.component == 'agent' || matrix.component == 'agent-ctl') }}
|
||||
run: sudo apt-get -y install clang
|
||||
- name: Setup XDG_RUNTIME_DIR for the `runtime` tests
|
||||
if: ${{ matrix.command != 'make vendor' && matrix.command != 'make check' && matrix.component == 'runtime' }}
|
||||
run: |
|
||||
XDG_RUNTIME_DIR=$(mktemp -d "/tmp/kata-tests-$USER.XXX" | tee >(xargs chmod 0700))
|
||||
echo "XDG_RUNTIME_DIR=${XDG_RUNTIME_DIR}" >> "$GITHUB_ENV"
|
||||
- name: Skip tests that depend on virtualization capable runners when needed
|
||||
if: ${{ endsWith(inputs.instance, '-arm') }}
|
||||
- name: Running `${{ matrix.command }}` for ${{ matrix.component }}
|
||||
run: |
|
||||
echo "GITHUB_RUNNER_CI_NON_VIRT=true" >> "$GITHUB_ENV"
|
||||
- name: Running `${{ matrix.command }}` for ${{ matrix.component.name }}
|
||||
run: |
|
||||
cd ${{ matrix.component.path }}
|
||||
cd ${{ matrix.component-path }}
|
||||
${{ matrix.command }}
|
||||
env:
|
||||
RUST_BACKTRACE: "1"
|
||||
RUST_LIB_BACKTRACE: "0"
|
||||
SKIP_GO_VERSION_CHECK: "1"
|
||||
|
@ -20,12 +20,6 @@ on:
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
secrets:
|
||||
QUAY_DEPLOYER_PASSWORD:
|
||||
required: false
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build-asset:
|
||||
@ -60,7 +54,6 @@ jobs:
|
||||
- pause-image
|
||||
- qemu
|
||||
- qemu-snp-experimental
|
||||
- qemu-tdx-experimental
|
||||
- stratovirt
|
||||
- trace-forwarder
|
||||
- virtiofsd
|
||||
@ -74,17 +67,16 @@ jobs:
|
||||
steps:
|
||||
- name: Login to Kata Containers quay.io
|
||||
if: ${{ inputs.push-to-registry == 'yes' }}
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ vars.QUAY_DEPLOYER_USERNAME }}
|
||||
username: ${{ secrets.QUAY_DEPLOYER_USERNAME }}
|
||||
password: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0 # This is needed in order to keep the commit ids history
|
||||
persist-credentials: false
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
@ -117,20 +109,20 @@ jobs:
|
||||
echo "oci-name=${oci_image%@*}" >> "$GITHUB_OUTPUT"
|
||||
echo "oci-digest=${oci_image#*@}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- uses: oras-project/setup-oras@5c0b487ce3fe0ce3ab0d034e63669e426e294e4d # v1.2.2
|
||||
- uses: oras-project/setup-oras@v1
|
||||
if: ${{ env.PERFORM_ATTESTATION == 'yes' }}
|
||||
with:
|
||||
version: "1.2.0"
|
||||
|
||||
# for pushing attestations to the registry
|
||||
- uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
- uses: docker/login-action@v3
|
||||
if: ${{ env.PERFORM_ATTESTATION == 'yes' }}
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- uses: actions/attest-build-provenance@ef244123eb79f2f7a7e75d99086184180e6d0018 # v1.4.4
|
||||
- uses: actions/attest-build-provenance@v1
|
||||
if: ${{ env.PERFORM_ATTESTATION == 'yes' }}
|
||||
with:
|
||||
subject-name: ${{ steps.parse-oci-segments.outputs.oci-name }}
|
||||
@ -138,7 +130,7 @@ jobs:
|
||||
push-to-registry: true
|
||||
|
||||
- name: store-artifact ${{ matrix.asset }}
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: kata-artifacts-amd64-${{ matrix.asset }}${{ inputs.tarball-suffix }}
|
||||
path: kata-build/kata-static-${{ matrix.asset }}.tar.xz
|
||||
@ -147,7 +139,7 @@ jobs:
|
||||
|
||||
- name: store-extratarballs-artifact ${{ matrix.asset }}
|
||||
if: ${{ startsWith(matrix.asset, 'kernel-nvidia-gpu') }}
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: kata-artifacts-amd64-${{ matrix.asset }}-headers${{ inputs.tarball-suffix }}
|
||||
path: kata-build/kata-static-${{ matrix.asset }}-headers.tar.xz
|
||||
@ -157,9 +149,6 @@ jobs:
|
||||
build-asset-rootfs:
|
||||
runs-on: ubuntu-22.04
|
||||
needs: build-asset
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
strategy:
|
||||
matrix:
|
||||
asset:
|
||||
@ -173,17 +162,16 @@ jobs:
|
||||
steps:
|
||||
- name: Login to Kata Containers quay.io
|
||||
if: ${{ inputs.push-to-registry == 'yes' }}
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ vars.QUAY_DEPLOYER_USERNAME }}
|
||||
username: ${{ secrets.QUAY_DEPLOYER_USERNAME }}
|
||||
password: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0 # This is needed in order to keep the commit ids history
|
||||
persist-credentials: false
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
@ -192,7 +180,7 @@ jobs:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: get-artifacts
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
pattern: kata-artifacts-amd64-*${{ inputs.tarball-suffix }}
|
||||
path: kata-artifacts
|
||||
@ -217,7 +205,7 @@ jobs:
|
||||
RELEASE: ${{ inputs.stage == 'release' && 'yes' || 'no' }}
|
||||
|
||||
- name: store-artifact ${{ matrix.asset }}
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: kata-artifacts-amd64-${{ matrix.asset }}${{ inputs.tarball-suffix }}
|
||||
path: kata-build/kata-static-${{ matrix.asset }}.tar.xz
|
||||
@ -237,7 +225,7 @@ jobs:
|
||||
- kernel-nvidia-gpu-confidential-headers
|
||||
- pause-image
|
||||
steps:
|
||||
- uses: geekyeggo/delete-artifact@f275313e70c08f6120db482d7a6b98377786765b # v5.1.0
|
||||
- uses: geekyeggo/delete-artifact@v5
|
||||
with:
|
||||
name: kata-artifacts-amd64-${{ matrix.asset}}${{ inputs.tarball-suffix }}
|
||||
|
||||
@ -250,7 +238,7 @@ jobs:
|
||||
asset:
|
||||
- agent
|
||||
steps:
|
||||
- uses: geekyeggo/delete-artifact@f275313e70c08f6120db482d7a6b98377786765b # v5.1.0
|
||||
- uses: geekyeggo/delete-artifact@v5
|
||||
if: ${{ inputs.stage == 'release' }}
|
||||
with:
|
||||
name: kata-artifacts-amd64-${{ matrix.asset}}${{ inputs.tarball-suffix }}
|
||||
@ -258,23 +246,19 @@ jobs:
|
||||
build-asset-shim-v2:
|
||||
runs-on: ubuntu-22.04
|
||||
needs: [build-asset, build-asset-rootfs, remove-rootfs-binary-artifacts, remove-rootfs-binary-artifacts-for-release]
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
steps:
|
||||
- name: Login to Kata Containers quay.io
|
||||
if: ${{ inputs.push-to-registry == 'yes' }}
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ vars.QUAY_DEPLOYER_USERNAME }}
|
||||
username: ${{ secrets.QUAY_DEPLOYER_USERNAME }}
|
||||
password: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0 # This is needed in order to keep the commit ids history
|
||||
persist-credentials: false
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
@ -283,7 +267,7 @@ jobs:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: get-artifacts
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
pattern: kata-artifacts-amd64-*${{ inputs.tarball-suffix }}
|
||||
path: kata-artifacts
|
||||
@ -309,7 +293,7 @@ jobs:
|
||||
MEASURED_ROOTFS: yes
|
||||
|
||||
- name: store-artifact shim-v2
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: kata-artifacts-amd64-shim-v2${{ inputs.tarball-suffix }}
|
||||
path: kata-build/kata-static-shim-v2.tar.xz
|
||||
@ -319,22 +303,18 @@ jobs:
|
||||
create-kata-tarball:
|
||||
runs-on: ubuntu-22.04
|
||||
needs: [build-asset, build-asset-rootfs, build-asset-shim-v2]
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
- name: get-artifacts
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
pattern: kata-artifacts-amd64-*${{ inputs.tarball-suffix }}
|
||||
path: kata-artifacts
|
||||
@ -343,7 +323,7 @@ jobs:
|
||||
run: |
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-merge-builds.sh kata-artifacts versions.yaml
|
||||
- name: store-artifacts
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: kata-static-tarball-amd64${{ inputs.tarball-suffix }}
|
||||
path: kata-static.tar.xz
|
||||
|
@ -20,12 +20,6 @@ on:
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
secrets:
|
||||
QUAY_DEPLOYER_PASSWORD:
|
||||
required: false
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build-asset:
|
||||
@ -46,7 +40,6 @@ jobs:
|
||||
- kernel-dragonball-experimental
|
||||
- kernel-nvidia-gpu
|
||||
- nydus
|
||||
- ovmf
|
||||
- qemu
|
||||
- stratovirt
|
||||
- virtiofsd
|
||||
@ -55,17 +48,16 @@ jobs:
|
||||
steps:
|
||||
- name: Login to Kata Containers quay.io
|
||||
if: ${{ inputs.push-to-registry == 'yes' }}
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ vars.QUAY_DEPLOYER_USERNAME }}
|
||||
username: ${{ secrets.QUAY_DEPLOYER_USERNAME }}
|
||||
password: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0 # This is needed in order to keep the commit ids history
|
||||
persist-credentials: false
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
@ -97,20 +89,20 @@ jobs:
|
||||
echo "oci-name=${oci_image%@*}" >> "$GITHUB_OUTPUT"
|
||||
echo "oci-digest=${oci_image#*@}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- uses: oras-project/setup-oras@5c0b487ce3fe0ce3ab0d034e63669e426e294e4d # v1.2.2
|
||||
- uses: oras-project/setup-oras@v1
|
||||
if: ${{ env.PERFORM_ATTESTATION == 'yes' }}
|
||||
with:
|
||||
version: "1.2.0"
|
||||
|
||||
# for pushing attestations to the registry
|
||||
- uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
- uses: docker/login-action@v3
|
||||
if: ${{ env.PERFORM_ATTESTATION == 'yes' }}
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- uses: actions/attest-build-provenance@ef244123eb79f2f7a7e75d99086184180e6d0018 # v1.4.4
|
||||
- uses: actions/attest-build-provenance@v1
|
||||
if: ${{ env.PERFORM_ATTESTATION == 'yes' }}
|
||||
with:
|
||||
subject-name: ${{ steps.parse-oci-segments.outputs.oci-name }}
|
||||
@ -118,7 +110,7 @@ jobs:
|
||||
push-to-registry: true
|
||||
|
||||
- name: store-artifact ${{ matrix.asset }}
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: kata-artifacts-arm64-${{ matrix.asset }}${{ inputs.tarball-suffix }}
|
||||
path: kata-build/kata-static-${{ matrix.asset }}.tar.xz
|
||||
@ -127,7 +119,7 @@ jobs:
|
||||
|
||||
- name: store-extratarballs-artifact ${{ matrix.asset }}
|
||||
if: ${{ startsWith(matrix.asset, 'kernel-nvidia-gpu') }}
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: kata-artifacts-arm64-${{ matrix.asset }}-headers${{ inputs.tarball-suffix }}
|
||||
path: kata-build/kata-static-${{ matrix.asset }}-headers.tar.xz
|
||||
@ -137,9 +129,6 @@ jobs:
|
||||
build-asset-rootfs:
|
||||
runs-on: ubuntu-22.04-arm
|
||||
needs: build-asset
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
strategy:
|
||||
matrix:
|
||||
asset:
|
||||
@ -149,17 +138,16 @@ jobs:
|
||||
steps:
|
||||
- name: Login to Kata Containers quay.io
|
||||
if: ${{ inputs.push-to-registry == 'yes' }}
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ vars.QUAY_DEPLOYER_USERNAME }}
|
||||
username: ${{ secrets.QUAY_DEPLOYER_USERNAME }}
|
||||
password: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0 # This is needed in order to keep the commit ids history
|
||||
persist-credentials: false
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
@ -168,7 +156,7 @@ jobs:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: get-artifacts
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
pattern: kata-artifacts-arm64-*${{ inputs.tarball-suffix }}
|
||||
path: kata-artifacts
|
||||
@ -192,7 +180,7 @@ jobs:
|
||||
RELEASE: ${{ inputs.stage == 'release' && 'yes' || 'no' }}
|
||||
|
||||
- name: store-artifact ${{ matrix.asset }}
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: kata-artifacts-arm64-${{ matrix.asset }}${{ inputs.tarball-suffix }}
|
||||
path: kata-build/kata-static-${{ matrix.asset }}.tar.xz
|
||||
@ -209,7 +197,7 @@ jobs:
|
||||
- busybox
|
||||
- kernel-nvidia-gpu-headers
|
||||
steps:
|
||||
- uses: geekyeggo/delete-artifact@f275313e70c08f6120db482d7a6b98377786765b # v5.1.0
|
||||
- uses: geekyeggo/delete-artifact@v5
|
||||
with:
|
||||
name: kata-artifacts-arm64-${{ matrix.asset}}${{ inputs.tarball-suffix }}
|
||||
|
||||
@ -222,7 +210,7 @@ jobs:
|
||||
asset:
|
||||
- agent
|
||||
steps:
|
||||
- uses: geekyeggo/delete-artifact@f275313e70c08f6120db482d7a6b98377786765b # v5.1.0
|
||||
- uses: geekyeggo/delete-artifact@v5
|
||||
if: ${{ inputs.stage == 'release' }}
|
||||
with:
|
||||
name: kata-artifacts-arm64-${{ matrix.asset}}${{ inputs.tarball-suffix }}
|
||||
@ -230,23 +218,19 @@ jobs:
|
||||
build-asset-shim-v2:
|
||||
runs-on: ubuntu-22.04-arm
|
||||
needs: [build-asset, build-asset-rootfs, remove-rootfs-binary-artifacts, remove-rootfs-binary-artifacts-for-release]
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
steps:
|
||||
- name: Login to Kata Containers quay.io
|
||||
if: ${{ inputs.push-to-registry == 'yes' }}
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ vars.QUAY_DEPLOYER_USERNAME }}
|
||||
username: ${{ secrets.QUAY_DEPLOYER_USERNAME }}
|
||||
password: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0 # This is needed in order to keep the commit ids history
|
||||
persist-credentials: false
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
@ -255,7 +239,7 @@ jobs:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: get-artifacts
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
pattern: kata-artifacts-arm64-*${{ inputs.tarball-suffix }}
|
||||
path: kata-artifacts
|
||||
@ -279,7 +263,7 @@ jobs:
|
||||
RELEASE: ${{ inputs.stage == 'release' && 'yes' || 'no' }}
|
||||
|
||||
- name: store-artifact shim-v2
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: kata-artifacts-arm64-shim-v2${{ inputs.tarball-suffix }}
|
||||
path: kata-build/kata-static-shim-v2.tar.xz
|
||||
@ -289,22 +273,18 @@ jobs:
|
||||
create-kata-tarball:
|
||||
runs-on: ubuntu-22.04-arm
|
||||
needs: [build-asset, build-asset-rootfs, build-asset-shim-v2]
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
- name: get-artifacts
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
pattern: kata-artifacts-arm64-*${{ inputs.tarball-suffix }}
|
||||
path: kata-artifacts
|
||||
@ -313,7 +293,7 @@ jobs:
|
||||
run: |
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-merge-builds.sh kata-artifacts versions.yaml
|
||||
- name: store-artifacts
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: kata-static-tarball-arm64${{ inputs.tarball-suffix }}
|
||||
path: kata-static.tar.xz
|
||||
|
@ -20,18 +20,9 @@ on:
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
secrets:
|
||||
QUAY_DEPLOYER_PASSWORD:
|
||||
required: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build-asset:
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
runs-on: ppc64le
|
||||
strategy:
|
||||
matrix:
|
||||
@ -43,19 +34,24 @@ jobs:
|
||||
stage:
|
||||
- ${{ inputs.stage }}
|
||||
steps:
|
||||
- name: Prepare the self-hosted runner
|
||||
timeout-minutes: 15
|
||||
run: |
|
||||
"${HOME}/scripts/prepare_runner.sh"
|
||||
sudo rm -rf "$GITHUB_WORKSPACE"/*
|
||||
|
||||
- name: Login to Kata Containers quay.io
|
||||
if: ${{ inputs.push-to-registry == 'yes' }}
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ vars.QUAY_DEPLOYER_USERNAME }}
|
||||
username: ${{ secrets.QUAY_DEPLOYER_USERNAME }}
|
||||
password: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0 # This is needed in order to keep the commit ids history
|
||||
persist-credentials: false
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
@ -80,7 +76,7 @@ jobs:
|
||||
RELEASE: ${{ inputs.stage == 'release' && 'yes' || 'no' }}
|
||||
|
||||
- name: store-artifact ${{ matrix.asset }}
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: kata-artifacts-ppc64le-${{ matrix.asset }}${{ inputs.tarball-suffix }}
|
||||
path: kata-build/kata-static-${{ matrix.asset }}.tar.xz
|
||||
@ -90,9 +86,6 @@ jobs:
|
||||
build-asset-rootfs:
|
||||
runs-on: ppc64le
|
||||
needs: build-asset
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
strategy:
|
||||
matrix:
|
||||
asset:
|
||||
@ -100,19 +93,24 @@ jobs:
|
||||
stage:
|
||||
- ${{ inputs.stage }}
|
||||
steps:
|
||||
- name: Prepare the self-hosted runner
|
||||
timeout-minutes: 15
|
||||
run: |
|
||||
"${HOME}/scripts/prepare_runner.sh"
|
||||
sudo rm -rf "$GITHUB_WORKSPACE"/*
|
||||
|
||||
- name: Login to Kata Containers quay.io
|
||||
if: ${{ inputs.push-to-registry == 'yes' }}
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ vars.QUAY_DEPLOYER_USERNAME }}
|
||||
username: ${{ secrets.QUAY_DEPLOYER_USERNAME }}
|
||||
password: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0 # This is needed in order to keep the commit ids history
|
||||
persist-credentials: false
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
@ -121,7 +119,7 @@ jobs:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: get-artifacts
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
pattern: kata-artifacts-ppc64le-*${{ inputs.tarball-suffix }}
|
||||
path: kata-artifacts
|
||||
@ -145,7 +143,7 @@ jobs:
|
||||
RELEASE: ${{ inputs.stage == 'release' && 'yes' || 'no' }}
|
||||
|
||||
- name: store-artifact ${{ matrix.asset }}
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: kata-artifacts-ppc64le-${{ matrix.asset }}${{ inputs.tarball-suffix }}
|
||||
path: kata-build/kata-static-${{ matrix.asset }}.tar.xz
|
||||
@ -161,7 +159,7 @@ jobs:
|
||||
asset:
|
||||
- agent
|
||||
steps:
|
||||
- uses: geekyeggo/delete-artifact@f275313e70c08f6120db482d7a6b98377786765b # v5.1.0
|
||||
- uses: geekyeggo/delete-artifact@v5
|
||||
if: ${{ inputs.stage == 'release' }}
|
||||
with:
|
||||
name: kata-artifacts-ppc64le-${{ matrix.asset}}${{ inputs.tarball-suffix }}
|
||||
@ -169,23 +167,25 @@ jobs:
|
||||
build-asset-shim-v2:
|
||||
runs-on: ppc64le
|
||||
needs: [build-asset, build-asset-rootfs, remove-rootfs-binary-artifacts]
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
steps:
|
||||
- name: Prepare the self-hosted runner
|
||||
timeout-minutes: 15
|
||||
run: |
|
||||
"${HOME}/scripts/prepare_runner.sh"
|
||||
sudo rm -rf "$GITHUB_WORKSPACE"/*
|
||||
|
||||
- name: Login to Kata Containers quay.io
|
||||
if: ${{ inputs.push-to-registry == 'yes' }}
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ vars.QUAY_DEPLOYER_USERNAME }}
|
||||
username: ${{ secrets.QUAY_DEPLOYER_USERNAME }}
|
||||
password: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0 # This is needed in order to keep the commit ids history
|
||||
persist-credentials: false
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
@ -194,7 +194,7 @@ jobs:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: get-artifacts
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
pattern: kata-artifacts-ppc64le-*${{ inputs.tarball-suffix }}
|
||||
path: kata-artifacts
|
||||
@ -218,7 +218,7 @@ jobs:
|
||||
RELEASE: ${{ inputs.stage == 'release' && 'yes' || 'no' }}
|
||||
|
||||
- name: store-artifact shim-v2
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: kata-artifacts-ppc64le-shim-v2${{ inputs.tarball-suffix }}
|
||||
path: kata-build/kata-static-shim-v2.tar.xz
|
||||
@ -228,26 +228,22 @@ jobs:
|
||||
create-kata-tarball:
|
||||
runs-on: ppc64le
|
||||
needs: [build-asset, build-asset-rootfs, build-asset-shim-v2]
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
steps:
|
||||
- name: Adjust a permission for repo
|
||||
run: |
|
||||
sudo chown -R "$USER":"$USER" "$GITHUB_WORKSPACE"
|
||||
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
- name: get-artifacts
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
pattern: kata-artifacts-ppc64le-*${{ inputs.tarball-suffix }}
|
||||
path: kata-artifacts
|
||||
@ -256,7 +252,7 @@ jobs:
|
||||
run: |
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-merge-builds.sh kata-artifacts versions.yaml
|
||||
- name: store-artifacts
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: kata-static-tarball-ppc64le${{ inputs.tarball-suffix }}
|
||||
path: kata-static.tar.xz
|
||||
|
@ -1,86 +0,0 @@
|
||||
name: CI | Build kata-static tarball for riscv64
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
stage:
|
||||
required: false
|
||||
type: string
|
||||
default: test
|
||||
tarball-suffix:
|
||||
required: false
|
||||
type: string
|
||||
push-to-registry:
|
||||
required: false
|
||||
type: string
|
||||
default: no
|
||||
commit-hash:
|
||||
required: false
|
||||
type: string
|
||||
target-branch:
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
secrets:
|
||||
QUAY_DEPLOYER_PASSWORD:
|
||||
required: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build-asset:
|
||||
runs-on: riscv-builder
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
id-token: write
|
||||
attestations: write
|
||||
strategy:
|
||||
matrix:
|
||||
asset:
|
||||
- kernel
|
||||
- virtiofsd
|
||||
steps:
|
||||
- name: Login to Kata Containers quay.io
|
||||
if: ${{ inputs.push-to-registry == 'yes' }}
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ vars.QUAY_DEPLOYER_USERNAME }}
|
||||
password: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0 # This is needed in order to keep the commit ids history
|
||||
persist-credentials: false
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: Build ${{ matrix.asset }}
|
||||
run: |
|
||||
make "${KATA_ASSET}-tarball"
|
||||
build_dir=$(readlink -f build)
|
||||
# store-artifact does not work with symlink
|
||||
mkdir -p kata-build && cp "${build_dir}"/kata-static-"${KATA_ASSET}"*.tar.* kata-build/.
|
||||
env:
|
||||
KATA_ASSET: ${{ matrix.asset }}
|
||||
TAR_OUTPUT: ${{ matrix.asset }}.tar.gz
|
||||
PUSH_TO_REGISTRY: ${{ inputs.push-to-registry }}
|
||||
ARTEFACT_REGISTRY: ghcr.io
|
||||
ARTEFACT_REGISTRY_USERNAME: ${{ github.actor }}
|
||||
ARTEFACT_REGISTRY_PASSWORD: ${{ secrets.GITHUB_TOKEN }}
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
RELEASE: ${{ inputs.stage == 'release' && 'yes' || 'no' }}
|
||||
|
||||
- name: store-artifact ${{ matrix.asset }}
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
name: kata-artifacts-riscv64-${{ matrix.asset }}${{ inputs.tarball-suffix }}
|
||||
path: kata-build/kata-static-${{ matrix.asset }}.tar.xz
|
||||
retention-days: 15
|
||||
if-no-files-found: error
|
@ -20,15 +20,6 @@ on:
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
secrets:
|
||||
CI_HKD_PATH:
|
||||
required: true
|
||||
QUAY_DEPLOYER_PASSWORD:
|
||||
required: true
|
||||
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build-asset:
|
||||
@ -53,17 +44,16 @@ jobs:
|
||||
steps:
|
||||
- name: Login to Kata Containers quay.io
|
||||
if: ${{ inputs.push-to-registry == 'yes' }}
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ vars.QUAY_DEPLOYER_USERNAME }}
|
||||
username: ${{ secrets.QUAY_DEPLOYER_USERNAME }}
|
||||
password: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0 # This is needed in order to keep the commit ids history
|
||||
persist-credentials: false
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
@ -97,14 +87,14 @@ jobs:
|
||||
echo "oci-digest=${oci_image#*@}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
# for pushing attestations to the registry
|
||||
- uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
- uses: docker/login-action@v3
|
||||
if: ${{ env.PERFORM_ATTESTATION == 'yes' }}
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- uses: actions/attest-build-provenance@ef244123eb79f2f7a7e75d99086184180e6d0018 # v1.4.4
|
||||
- uses: actions/attest-build-provenance@v1
|
||||
if: ${{ env.PERFORM_ATTESTATION == 'yes' }}
|
||||
with:
|
||||
subject-name: ${{ steps.parse-oci-segments.outputs.oci-name }}
|
||||
@ -112,7 +102,7 @@ jobs:
|
||||
push-to-registry: true
|
||||
|
||||
- name: store-artifact ${{ matrix.asset }}
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: kata-artifacts-s390x-${{ matrix.asset }}${{ inputs.tarball-suffix }}
|
||||
path: kata-build/kata-static-${{ matrix.asset }}.tar.xz
|
||||
@ -122,9 +112,6 @@ jobs:
|
||||
build-asset-rootfs:
|
||||
runs-on: s390x
|
||||
needs: build-asset
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
strategy:
|
||||
matrix:
|
||||
asset:
|
||||
@ -135,17 +122,16 @@ jobs:
|
||||
steps:
|
||||
- name: Login to Kata Containers quay.io
|
||||
if: ${{ inputs.push-to-registry == 'yes' }}
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ vars.QUAY_DEPLOYER_USERNAME }}
|
||||
username: ${{ secrets.QUAY_DEPLOYER_USERNAME }}
|
||||
password: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0 # This is needed in order to keep the commit ids history
|
||||
persist-credentials: false
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
@ -154,7 +140,7 @@ jobs:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: get-artifacts
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
pattern: kata-artifacts-s390x-*${{ inputs.tarball-suffix }}
|
||||
path: kata-artifacts
|
||||
@ -179,7 +165,7 @@ jobs:
|
||||
RELEASE: ${{ inputs.stage == 'release' && 'yes' || 'no' }}
|
||||
|
||||
- name: store-artifact ${{ matrix.asset }}
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: kata-artifacts-s390x-${{ matrix.asset }}${{ inputs.tarball-suffix }}
|
||||
path: kata-build/kata-static-${{ matrix.asset }}.tar.xz
|
||||
@ -189,13 +175,9 @@ jobs:
|
||||
build-asset-boot-image-se:
|
||||
runs-on: s390x
|
||||
needs: [build-asset, build-asset-rootfs]
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
@ -203,7 +185,7 @@ jobs:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: get-artifacts
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
pattern: kata-artifacts-s390x-*${{ inputs.tarball-suffix }}
|
||||
path: kata-artifacts
|
||||
@ -227,7 +209,7 @@ jobs:
|
||||
HKD_PATH: "host-key-document"
|
||||
|
||||
- name: store-artifact boot-image-se
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: kata-artifacts-s390x${{ inputs.tarball-suffix }}
|
||||
path: kata-build/kata-static-boot-image-se.tar.xz
|
||||
@ -245,7 +227,7 @@ jobs:
|
||||
- coco-guest-components
|
||||
- pause-image
|
||||
steps:
|
||||
- uses: geekyeggo/delete-artifact@f275313e70c08f6120db482d7a6b98377786765b # v5.1.0
|
||||
- uses: geekyeggo/delete-artifact@v5
|
||||
if: ${{ inputs.stage == 'release' }}
|
||||
with:
|
||||
name: kata-artifacts-s390x-${{ matrix.asset}}${{ inputs.tarball-suffix }}
|
||||
@ -253,23 +235,19 @@ jobs:
|
||||
build-asset-shim-v2:
|
||||
runs-on: s390x
|
||||
needs: [build-asset, build-asset-rootfs, remove-rootfs-binary-artifacts]
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
steps:
|
||||
- name: Login to Kata Containers quay.io
|
||||
if: ${{ inputs.push-to-registry == 'yes' }}
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ vars.QUAY_DEPLOYER_USERNAME }}
|
||||
username: ${{ secrets.QUAY_DEPLOYER_USERNAME }}
|
||||
password: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0 # This is needed in order to keep the commit ids history
|
||||
persist-credentials: false
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
@ -278,7 +256,7 @@ jobs:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: get-artifacts
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
pattern: kata-artifacts-s390x-*${{ inputs.tarball-suffix }}
|
||||
path: kata-artifacts
|
||||
@ -301,10 +279,10 @@ jobs:
|
||||
ARTEFACT_REGISTRY_PASSWORD: ${{ secrets.GITHUB_TOKEN }}
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
RELEASE: ${{ inputs.stage == 'release' && 'yes' || 'no' }}
|
||||
MEASURED_ROOTFS: no
|
||||
MEASURED_ROOTFS: yes
|
||||
|
||||
- name: store-artifact shim-v2
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: kata-artifacts-s390x-shim-v2${{ inputs.tarball-suffix }}
|
||||
path: kata-build/kata-static-shim-v2.tar.xz
|
||||
@ -318,22 +296,18 @@ jobs:
|
||||
- build-asset-rootfs
|
||||
- build-asset-boot-image-se
|
||||
- build-asset-shim-v2
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
- name: get-artifacts
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
pattern: kata-artifacts-s390x-*${{ inputs.tarball-suffix }}
|
||||
path: kata-artifacts
|
||||
@ -342,7 +316,7 @@ jobs:
|
||||
run: |
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-merge-builds.sh kata-artifacts versions.yaml
|
||||
- name: store-artifacts
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: kata-static-tarball-s390x${{ inputs.tarball-suffix }}
|
||||
path: kata-static.tar.xz
|
||||
|
7
.github/workflows/cargo-deny-runner.yaml
vendored
7
.github/workflows/cargo-deny-runner.yaml
vendored
@ -11,9 +11,6 @@ concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
cargo-deny-runner:
|
||||
runs-on: ubuntu-22.04
|
||||
@ -21,9 +18,7 @@ jobs:
|
||||
steps:
|
||||
- name: Checkout Code
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
uses: actions/checkout@v4
|
||||
- name: Generate Action
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: bash cargo-deny-generator.sh
|
||||
|
15
.github/workflows/ci-coco-stability.yaml
vendored
15
.github/workflows/ci-coco-stability.yaml
vendored
@ -8,25 +8,12 @@ concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
kata-containers-ci-on-push:
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
id-token: write
|
||||
attestations: write
|
||||
uses: ./.github/workflows/ci-weekly.yaml
|
||||
with:
|
||||
commit-hash: ${{ github.sha }}
|
||||
pr-number: "weekly"
|
||||
tag: ${{ github.sha }}-weekly
|
||||
target-branch: ${{ github.ref_name }}
|
||||
secrets:
|
||||
AUTHENTICATED_IMAGE_PASSWORD: ${{ secrets.AUTHENTICATED_IMAGE_PASSWORD }}
|
||||
AZ_APPID: ${{ secrets.AZ_APPID }}
|
||||
AZ_TENANT_ID: ${{ secrets.AZ_TENANT_ID }}
|
||||
AZ_SUBSCRIPTION_ID: ${{ secrets.AZ_SUBSCRIPTION_ID }}
|
||||
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
secrets: inherit
|
||||
|
23
.github/workflows/ci-devel.yaml
vendored
23
.github/workflows/ci-devel.yaml
vendored
@ -2,33 +2,12 @@ name: Kata Containers CI (manually triggered)
|
||||
on:
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
kata-containers-ci-on-push:
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
id-token: write
|
||||
attestations: write
|
||||
uses: ./.github/workflows/ci.yaml
|
||||
with:
|
||||
commit-hash: ${{ github.sha }}
|
||||
pr-number: "dev"
|
||||
tag: ${{ github.sha }}-dev
|
||||
target-branch: ${{ github.ref_name }}
|
||||
|
||||
secrets:
|
||||
AUTHENTICATED_IMAGE_PASSWORD: ${{ secrets.AUTHENTICATED_IMAGE_PASSWORD }}
|
||||
AZ_APPID: ${{ secrets.AZ_APPID }}
|
||||
AZ_TENANT_ID: ${{ secrets.AZ_TENANT_ID }}
|
||||
AZ_SUBSCRIPTION_ID: ${{ secrets.AZ_SUBSCRIPTION_ID }}
|
||||
CI_HKD_PATH: ${{ secrets.CI_HKD_PATH }}
|
||||
ITA_KEY: ${{ secrets.ITA_KEY }}
|
||||
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
|
||||
build-checks:
|
||||
uses: ./.github/workflows/build-checks.yaml
|
||||
with:
|
||||
instance: ubuntu-22.04
|
||||
secrets: inherit
|
||||
|
5
.github/workflows/ci-nightly-s390x.yaml
vendored
5
.github/workflows/ci-nightly-s390x.yaml
vendored
@ -3,10 +3,6 @@ on:
|
||||
- cron: '0 5 * * *'
|
||||
|
||||
name: Nightly CI for s390x
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
check-internal-test-result:
|
||||
runs-on: s390x
|
||||
@ -15,7 +11,6 @@ jobs:
|
||||
matrix:
|
||||
test_title:
|
||||
- kata-vfio-ap-e2e-tests
|
||||
- cc-vfio-ap-e2e-tests
|
||||
- cc-se-e2e-tests
|
||||
steps:
|
||||
- name: Fetch a test result for {{ matrix.test_title }}
|
||||
|
17
.github/workflows/ci-nightly.yaml
vendored
17
.github/workflows/ci-nightly.yaml
vendored
@ -7,27 +7,12 @@ concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
kata-containers-ci-on-push:
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
id-token: write
|
||||
attestations: write
|
||||
uses: ./.github/workflows/ci.yaml
|
||||
with:
|
||||
commit-hash: ${{ github.sha }}
|
||||
pr-number: "nightly"
|
||||
tag: ${{ github.sha }}-nightly
|
||||
target-branch: ${{ github.ref_name }}
|
||||
secrets:
|
||||
AUTHENTICATED_IMAGE_PASSWORD: ${{ secrets.AUTHENTICATED_IMAGE_PASSWORD }}
|
||||
AZ_APPID: ${{ secrets.AZ_APPID }}
|
||||
AZ_TENANT_ID: ${{ secrets.AZ_TENANT_ID }}
|
||||
AZ_SUBSCRIPTION_ID: ${{ secrets.AZ_SUBSCRIPTION_ID }}
|
||||
CI_HKD_PATH: ${{ secrets.CI_HKD_PATH }}
|
||||
ITA_KEY: ${{ secrets.ITA_KEY }}
|
||||
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
secrets: inherit
|
||||
|
18
.github/workflows/ci-on-push.yaml
vendored
18
.github/workflows/ci-on-push.yaml
vendored
@ -14,10 +14,6 @@ on:
|
||||
- reopened
|
||||
- labeled
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
id-token: write
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
@ -33,11 +29,6 @@ jobs:
|
||||
kata-containers-ci-on-push:
|
||||
needs: skipper
|
||||
if: ${{ needs.skipper.outputs.skip_build != 'yes' }}
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
id-token: write
|
||||
attestations: write
|
||||
uses: ./.github/workflows/ci.yaml
|
||||
with:
|
||||
commit-hash: ${{ github.event.pull_request.head.sha }}
|
||||
@ -45,11 +36,4 @@ jobs:
|
||||
tag: ${{ github.event.pull_request.number }}-${{ github.event.pull_request.head.sha }}
|
||||
target-branch: ${{ github.event.pull_request.base.ref }}
|
||||
skip-test: ${{ needs.skipper.outputs.skip_test }}
|
||||
secrets:
|
||||
AUTHENTICATED_IMAGE_PASSWORD: ${{ secrets.AUTHENTICATED_IMAGE_PASSWORD }}
|
||||
AZ_APPID: ${{ secrets.AZ_APPID }}
|
||||
AZ_TENANT_ID: ${{ secrets.AZ_TENANT_ID }}
|
||||
AZ_SUBSCRIPTION_ID: ${{ secrets.AZ_SUBSCRIPTION_ID }}
|
||||
CI_HKD_PATH: ${{ secrets.CI_HKD_PATH }}
|
||||
ITA_KEY: ${{ secrets.ITA_KEY }}
|
||||
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
secrets: inherit
|
||||
|
53
.github/workflows/ci-weekly.yaml
vendored
53
.github/workflows/ci-weekly.yaml
vendored
@ -15,29 +15,9 @@ on:
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
secrets:
|
||||
AUTHENTICATED_IMAGE_PASSWORD:
|
||||
required: true
|
||||
|
||||
AZ_APPID:
|
||||
required: true
|
||||
AZ_TENANT_ID:
|
||||
required: true
|
||||
AZ_SUBSCRIPTION_ID:
|
||||
required: true
|
||||
QUAY_DEPLOYER_PASSWORD:
|
||||
required: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build-kata-static-tarball-amd64:
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
id-token: write
|
||||
attestations: write
|
||||
uses: ./.github/workflows/build-kata-static-tarball-amd64.yaml
|
||||
with:
|
||||
tarball-suffix: -${{ inputs.tag }}
|
||||
@ -46,10 +26,7 @@ jobs:
|
||||
|
||||
publish-kata-deploy-payload-amd64:
|
||||
needs: build-kata-static-tarball-amd64
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
uses: ./.github/workflows/publish-kata-deploy-payload.yaml
|
||||
uses: ./.github/workflows/publish-kata-deploy-payload-amd64.yaml
|
||||
with:
|
||||
tarball-suffix: -${{ inputs.tag }}
|
||||
registry: ghcr.io
|
||||
@ -57,23 +34,16 @@ jobs:
|
||||
tag: ${{ inputs.tag }}-amd64
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
runner: ubuntu-22.04
|
||||
arch: amd64
|
||||
secrets:
|
||||
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
secrets: inherit
|
||||
|
||||
build-and-publish-tee-confidential-unencrypted-image:
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
@ -82,20 +52,20 @@ jobs:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3.6.0
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Login to Kata Containers ghcr.io
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Docker build and push
|
||||
uses: docker/build-push-action@ca052bb54ab0790a636c9b5f226502c73d547a25 # v5.4.0
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
tags: ghcr.io/kata-containers/test-images:unencrypted-${{ inputs.pr-number }}
|
||||
push: true
|
||||
@ -114,11 +84,4 @@ jobs:
|
||||
pr-number: ${{ inputs.pr-number }}
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
tarball-suffix: -${{ inputs.tag }}
|
||||
secrets:
|
||||
AUTHENTICATED_IMAGE_PASSWORD: ${{ secrets.AUTHENTICATED_IMAGE_PASSWORD }}
|
||||
AZ_APPID: ${{ secrets.AZ_APPID }}
|
||||
AZ_TENANT_ID: ${{ secrets.AZ_TENANT_ID }}
|
||||
AZ_SUBSCRIPTION_ID: ${{ secrets.AZ_SUBSCRIPTION_ID }}
|
||||
permissions:
|
||||
contents: read
|
||||
id-token: write
|
||||
secrets: inherit
|
||||
|
260
.github/workflows/ci.yaml
vendored
260
.github/workflows/ci.yaml
vendored
@ -19,34 +19,9 @@ on:
|
||||
required: false
|
||||
type: string
|
||||
default: no
|
||||
secrets:
|
||||
AUTHENTICATED_IMAGE_PASSWORD:
|
||||
required: true
|
||||
|
||||
AZ_APPID:
|
||||
required: true
|
||||
AZ_TENANT_ID:
|
||||
required: true
|
||||
AZ_SUBSCRIPTION_ID:
|
||||
required: true
|
||||
CI_HKD_PATH:
|
||||
required: true
|
||||
ITA_KEY:
|
||||
required: true
|
||||
QUAY_DEPLOYER_PASSWORD:
|
||||
required: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
id-token: write
|
||||
|
||||
jobs:
|
||||
build-kata-static-tarball-amd64:
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
id-token: write
|
||||
attestations: write
|
||||
uses: ./.github/workflows/build-kata-static-tarball-amd64.yaml
|
||||
with:
|
||||
tarball-suffix: -${{ inputs.tag }}
|
||||
@ -55,10 +30,7 @@ jobs:
|
||||
|
||||
publish-kata-deploy-payload-amd64:
|
||||
needs: build-kata-static-tarball-amd64
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
uses: ./.github/workflows/publish-kata-deploy-payload.yaml
|
||||
uses: ./.github/workflows/publish-kata-deploy-payload-amd64.yaml
|
||||
with:
|
||||
tarball-suffix: -${{ inputs.tag }}
|
||||
registry: ghcr.io
|
||||
@ -66,17 +38,9 @@ jobs:
|
||||
tag: ${{ inputs.tag }}-amd64
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
runner: ubuntu-22.04
|
||||
arch: amd64
|
||||
secrets:
|
||||
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
secrets: inherit
|
||||
|
||||
build-kata-static-tarball-arm64:
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
id-token: write
|
||||
attestations: write
|
||||
uses: ./.github/workflows/build-kata-static-tarball-arm64.yaml
|
||||
with:
|
||||
tarball-suffix: -${{ inputs.tag }}
|
||||
@ -85,10 +49,7 @@ jobs:
|
||||
|
||||
publish-kata-deploy-payload-arm64:
|
||||
needs: build-kata-static-tarball-arm64
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
uses: ./.github/workflows/publish-kata-deploy-payload.yaml
|
||||
uses: ./.github/workflows/publish-kata-deploy-payload-arm64.yaml
|
||||
with:
|
||||
tarball-suffix: -${{ inputs.tag }}
|
||||
registry: ghcr.io
|
||||
@ -96,58 +57,26 @@ jobs:
|
||||
tag: ${{ inputs.tag }}-arm64
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
runner: ubuntu-22.04-arm
|
||||
arch: arm64
|
||||
secrets:
|
||||
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
secrets: inherit
|
||||
|
||||
build-kata-static-tarball-s390x:
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
id-token: write
|
||||
attestations: write
|
||||
uses: ./.github/workflows/build-kata-static-tarball-s390x.yaml
|
||||
with:
|
||||
tarball-suffix: -${{ inputs.tag }}
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
secrets:
|
||||
CI_HKD_PATH: ${{ secrets.ci_hkd_path }}
|
||||
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
secrets: inherit
|
||||
|
||||
build-kata-static-tarball-ppc64le:
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
uses: ./.github/workflows/build-kata-static-tarball-ppc64le.yaml
|
||||
with:
|
||||
tarball-suffix: -${{ inputs.tag }}
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
secrets:
|
||||
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
|
||||
build-kata-static-tarball-riscv64:
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
id-token: write
|
||||
attestations: write
|
||||
uses: ./.github/workflows/build-kata-static-tarball-riscv64.yaml
|
||||
with:
|
||||
tarball-suffix: -${{ inputs.tag }}
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
secrets:
|
||||
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
|
||||
publish-kata-deploy-payload-s390x:
|
||||
needs: build-kata-static-tarball-s390x
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
uses: ./.github/workflows/publish-kata-deploy-payload.yaml
|
||||
uses: ./.github/workflows/publish-kata-deploy-payload-s390x.yaml
|
||||
with:
|
||||
tarball-suffix: -${{ inputs.tag }}
|
||||
registry: ghcr.io
|
||||
@ -155,17 +84,11 @@ jobs:
|
||||
tag: ${{ inputs.tag }}-s390x
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
runner: s390x
|
||||
arch: s390x
|
||||
secrets:
|
||||
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
secrets: inherit
|
||||
|
||||
publish-kata-deploy-payload-ppc64le:
|
||||
needs: build-kata-static-tarball-ppc64le
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
uses: ./.github/workflows/publish-kata-deploy-payload.yaml
|
||||
uses: ./.github/workflows/publish-kata-deploy-payload-ppc64le.yaml
|
||||
with:
|
||||
tarball-suffix: -${{ inputs.tag }}
|
||||
registry: ghcr.io
|
||||
@ -173,23 +96,16 @@ jobs:
|
||||
tag: ${{ inputs.tag }}-ppc64le
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
runner: ppc64le
|
||||
arch: ppc64le
|
||||
secrets:
|
||||
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
secrets: inherit
|
||||
|
||||
build-and-publish-tee-confidential-unencrypted-image:
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
@ -198,20 +114,20 @@ jobs:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3.6.0
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Login to Kata Containers ghcr.io
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Docker build and push
|
||||
uses: docker/build-push-action@ca052bb54ab0790a636c9b5f226502c73d547a25 # v5.4.0
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
tags: ghcr.io/kata-containers/test-images:unencrypted-${{ inputs.pr-number }}
|
||||
push: true
|
||||
@ -221,17 +137,13 @@ jobs:
|
||||
|
||||
publish-csi-driver-amd64:
|
||||
needs: build-kata-static-tarball-amd64
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
@ -240,7 +152,7 @@ jobs:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: kata-static-tarball-amd64-${{ inputs.tag }}
|
||||
path: kata-artifacts
|
||||
@ -255,17 +167,17 @@ jobs:
|
||||
cp /opt/kata/bin/csi-kata-directvolume src/tools/csi-kata-directvolume/bin/directvolplugin
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Login to Kata Containers ghcr.io
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Docker build and push
|
||||
uses: docker/build-push-action@ca052bb54ab0790a636c9b5f226502c73d547a25 # v5.4.0
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
tags: ghcr.io/kata-containers/csi-kata-directvolume:${{ inputs.pr-number }}
|
||||
push: true
|
||||
@ -294,10 +206,7 @@ jobs:
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
pr-number: ${{ inputs.pr-number }}
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
secrets:
|
||||
AZ_APPID: ${{ secrets.AZ_APPID }}
|
||||
AZ_TENANT_ID: ${{ secrets.AZ_TENANT_ID }}
|
||||
AZ_SUBSCRIPTION_ID: ${{ secrets.AZ_SUBSCRIPTION_ID }}
|
||||
secrets: inherit
|
||||
|
||||
run-k8s-tests-on-amd64:
|
||||
if: ${{ inputs.skip-test != 'yes' }}
|
||||
@ -310,18 +219,7 @@ jobs:
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
pr-number: ${{ inputs.pr-number }}
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
|
||||
run-k8s-tests-on-arm64:
|
||||
if: ${{ inputs.skip-test != 'yes' }}
|
||||
needs: publish-kata-deploy-payload-arm64
|
||||
uses: ./.github/workflows/run-k8s-tests-on-arm64.yaml
|
||||
with:
|
||||
registry: ghcr.io
|
||||
repo: ${{ github.repository_owner }}/kata-deploy-ci
|
||||
tag: ${{ inputs.tag }}-arm64
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
pr-number: ${{ inputs.pr-number }}
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
secrets: inherit
|
||||
|
||||
run-kata-coco-tests:
|
||||
if: ${{ inputs.skip-test != 'yes' }}
|
||||
@ -338,12 +236,7 @@ jobs:
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
pr-number: ${{ inputs.pr-number }}
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
secrets:
|
||||
AUTHENTICATED_IMAGE_PASSWORD: ${{ secrets.AUTHENTICATED_IMAGE_PASSWORD }}
|
||||
AZ_APPID: ${{ secrets.AZ_APPID }}
|
||||
AZ_TENANT_ID: ${{ secrets.AZ_TENANT_ID }}
|
||||
AZ_SUBSCRIPTION_ID: ${{ secrets.AZ_SUBSCRIPTION_ID }}
|
||||
ITA_KEY: ${{ secrets.ITA_KEY }}
|
||||
secrets: inherit
|
||||
|
||||
run-k8s-tests-on-zvsi:
|
||||
if: ${{ inputs.skip-test != 'yes' }}
|
||||
@ -356,8 +249,7 @@ jobs:
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
pr-number: ${{ inputs.pr-number }}
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
secrets:
|
||||
AUTHENTICATED_IMAGE_PASSWORD: ${{ secrets.AUTHENTICATED_IMAGE_PASSWORD }}
|
||||
secrets: inherit
|
||||
|
||||
run-k8s-tests-on-ppc64le:
|
||||
if: ${{ inputs.skip-test != 'yes' }}
|
||||
@ -371,30 +263,13 @@ jobs:
|
||||
pr-number: ${{ inputs.pr-number }}
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
|
||||
run-kata-deploy-tests:
|
||||
if: ${{ inputs.skip-test != 'yes' }}
|
||||
needs: [publish-kata-deploy-payload-amd64]
|
||||
uses: ./.github/workflows/run-kata-deploy-tests.yaml
|
||||
with:
|
||||
registry: ghcr.io
|
||||
repo: ${{ github.repository_owner }}/kata-deploy-ci
|
||||
tag: ${{ inputs.tag }}-amd64
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
pr-number: ${{ inputs.pr-number }}
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
|
||||
run-metrics-tests:
|
||||
# Skip metrics tests whilst runner is broken
|
||||
if: false
|
||||
# if: ${{ inputs.skip-test != 'yes' }}
|
||||
if: ${{ inputs.skip-test != 'yes' }}
|
||||
needs: build-kata-static-tarball-amd64
|
||||
uses: ./.github/workflows/run-metrics.yaml
|
||||
with:
|
||||
registry: ghcr.io
|
||||
repo: ${{ github.repository_owner }}/kata-deploy-ci
|
||||
tag: ${{ inputs.tag }}-amd64
|
||||
tarball-suffix: -${{ inputs.tag }}
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
pr-number: ${{ inputs.pr-number }}
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
|
||||
run-basic-amd64-tests:
|
||||
@ -406,99 +281,20 @@ jobs:
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
|
||||
run-basic-s390x-tests:
|
||||
run-cri-containerd-tests-s390x:
|
||||
if: ${{ inputs.skip-test != 'yes' }}
|
||||
needs: build-kata-static-tarball-s390x
|
||||
uses: ./.github/workflows/basic-ci-s390x.yaml
|
||||
uses: ./.github/workflows/run-cri-containerd-tests-s390x.yaml
|
||||
with:
|
||||
tarball-suffix: -${{ inputs.tag }}
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
|
||||
run-cri-containerd-amd64:
|
||||
if: ${{ inputs.skip-test != 'yes' }}
|
||||
needs: build-kata-static-tarball-amd64
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
params: [
|
||||
{ containerd_version: lts, vmm: clh },
|
||||
{ containerd_version: lts, vmm: dragonball },
|
||||
{ containerd_version: lts, vmm: qemu },
|
||||
{ containerd_version: lts, vmm: stratovirt },
|
||||
{ containerd_version: lts, vmm: cloud-hypervisor },
|
||||
{ containerd_version: lts, vmm: qemu-runtime-rs },
|
||||
{ containerd_version: active, vmm: clh },
|
||||
{ containerd_version: active, vmm: dragonball },
|
||||
{ containerd_version: active, vmm: qemu },
|
||||
{ containerd_version: active, vmm: stratovirt },
|
||||
{ containerd_version: active, vmm: cloud-hypervisor },
|
||||
{ containerd_version: active, vmm: qemu-runtime-rs },
|
||||
]
|
||||
uses: ./.github/workflows/run-cri-containerd-tests.yaml
|
||||
with:
|
||||
tarball-suffix: -${{ inputs.tag }}
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
runner: ubuntu-22.04
|
||||
arch: amd64
|
||||
containerd_version: ${{ matrix.params.containerd_version }}
|
||||
vmm: ${{ matrix.params.vmm }}
|
||||
|
||||
run-cri-containerd-s390x:
|
||||
if: ${{ inputs.skip-test != 'yes' }}
|
||||
needs: build-kata-static-tarball-s390x
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
params: [
|
||||
{ containerd_version: active, vmm: qemu },
|
||||
{ containerd_version: active, vmm: qemu-runtime-rs },
|
||||
]
|
||||
uses: ./.github/workflows/run-cri-containerd-tests.yaml
|
||||
with:
|
||||
tarball-suffix: -${{ inputs.tag }}
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
runner: s390x-large
|
||||
arch: s390x
|
||||
containerd_version: ${{ matrix.params.containerd_version }}
|
||||
vmm: ${{ matrix.params.vmm }}
|
||||
|
||||
run-cri-containerd-tests-ppc64le:
|
||||
if: ${{ inputs.skip-test != 'yes' }}
|
||||
needs: build-kata-static-tarball-ppc64le
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
params: [
|
||||
{ containerd_version: active, vmm: qemu },
|
||||
]
|
||||
uses: ./.github/workflows/run-cri-containerd-tests.yaml
|
||||
uses: ./.github/workflows/run-cri-containerd-tests-ppc64le.yaml
|
||||
with:
|
||||
tarball-suffix: -${{ inputs.tag }}
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
runner: ppc64le
|
||||
arch: ppc64le
|
||||
containerd_version: ${{ matrix.params.containerd_version }}
|
||||
vmm: ${{ matrix.params.vmm }}
|
||||
|
||||
run-cri-containerd-tests-arm64:
|
||||
if: ${{ inputs.skip-test != 'yes' }}
|
||||
needs: build-kata-static-tarball-arm64
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
params: [
|
||||
{ containerd_version: active, vmm: qemu },
|
||||
]
|
||||
uses: ./.github/workflows/run-cri-containerd-tests.yaml
|
||||
with:
|
||||
tarball-suffix: -${{ inputs.tag }}
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
runner: arm64-non-k8s
|
||||
arch: arm64
|
||||
containerd_version: ${{ matrix.params.containerd_version }}
|
||||
vmm: ${{ matrix.params.vmm }}
|
||||
|
20
.github/workflows/cleanup-resources.yaml
vendored
20
.github/workflows/cleanup-resources.yaml
vendored
@ -4,25 +4,19 @@ on:
|
||||
- cron: "0 0 * * *"
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
id-token: write
|
||||
|
||||
jobs:
|
||||
cleanup-resources:
|
||||
runs-on: ubuntu-22.04
|
||||
environment: ci
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Log into Azure
|
||||
uses: azure/login@a457da9ea143d694b1b9c7c869ebb04ebe844ef5 # v2.3.0
|
||||
with:
|
||||
client-id: ${{ secrets.AZ_APPID }}
|
||||
tenant-id: ${{ secrets.AZ_TENANT_ID }}
|
||||
subscription-id: ${{ secrets.AZ_SUBSCRIPTION_ID }}
|
||||
env:
|
||||
AZ_APPID: ${{ secrets.AZ_APPID }}
|
||||
AZ_PASSWORD: ${{ secrets.AZ_PASSWORD }}
|
||||
AZ_TENANT_ID: ${{ secrets.AZ_TENANT_ID }}
|
||||
AZ_SUBSCRIPTION_ID: ${{ secrets.AZ_SUBSCRIPTION_ID }}
|
||||
run: bash tests/integration/kubernetes/gha-run.sh login-azure
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: |
|
||||
|
100
.github/workflows/codeql.yml
vendored
100
.github/workflows/codeql.yml
vendored
@ -1,100 +0,0 @@
|
||||
# For most projects, this workflow file will not need changing; you simply need
|
||||
# to commit it to your repository.
|
||||
#
|
||||
# You may wish to alter this file to override the set of languages analyzed,
|
||||
# or to provide custom queries or build logic.
|
||||
#
|
||||
# ******** NOTE ********
|
||||
# We have attempted to detect the languages in your repository. Please check
|
||||
# the `language` matrix defined below to confirm you have the correct set of
|
||||
# supported CodeQL languages.
|
||||
#
|
||||
name: "CodeQL Advanced"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "main" ]
|
||||
pull_request:
|
||||
branches: [ "main" ]
|
||||
schedule:
|
||||
- cron: '45 0 * * 1'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
name: Analyze (${{ matrix.language }})
|
||||
# Runner size impacts CodeQL analysis time. To learn more, please see:
|
||||
# - https://gh.io/recommended-hardware-resources-for-running-codeql
|
||||
# - https://gh.io/supported-runners-and-hardware-resources
|
||||
# - https://gh.io/using-larger-runners (GitHub.com only)
|
||||
# Consider using larger runners or machines with greater resources for possible analysis time improvements.
|
||||
runs-on: ubuntu-24.04
|
||||
permissions:
|
||||
# required for all workflows
|
||||
security-events: write
|
||||
|
||||
# required to fetch internal or private CodeQL packs
|
||||
packages: read
|
||||
|
||||
# only required for workflows in private repositories
|
||||
actions: read
|
||||
contents: read
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- language: go
|
||||
build-mode: manual
|
||||
- language: python
|
||||
build-mode: none
|
||||
# CodeQL supports the following values keywords for 'language': 'c-cpp', 'csharp', 'go', 'java-kotlin', 'javascript-typescript', 'python', 'ruby', 'swift'
|
||||
# Use `c-cpp` to analyze code written in C, C++ or both
|
||||
# Use 'java-kotlin' to analyze code written in Java, Kotlin or both
|
||||
# Use 'javascript-typescript' to analyze code written in JavaScript, TypeScript or both
|
||||
# To learn more about changing the languages that are analyzed or customizing the build mode for your analysis,
|
||||
# see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/customizing-your-advanced-setup-for-code-scanning.
|
||||
# If you are analyzing a compiled language, you can modify the 'build-mode' for that language to customize how
|
||||
# your codebase is analyzed, see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/codeql-code-scanning-for-compiled-languages
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
# Add any setup steps before running the `github/codeql-action/init` action.
|
||||
# This includes steps like installing compilers or runtimes (`actions/setup-node`
|
||||
# or others). This is typically only required for manual builds.
|
||||
# - name: Setup runtime (example)
|
||||
# uses: actions/setup-example@v1
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v3
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
build-mode: ${{ matrix.build-mode }}
|
||||
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||
# By default, queries listed here will override any specified in a config file.
|
||||
# Prefix the list here with "+" to use these queries and those in the config file.
|
||||
|
||||
# For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
|
||||
# queries: security-extended,security-and-quality
|
||||
|
||||
# If the analyze step fails for one of the languages you are analyzing with
|
||||
# "We were unable to automatically build your code", modify the matrix above
|
||||
# to set the build mode to "manual" for that language. Then modify this step
|
||||
# to build your code.
|
||||
# ℹ️ Command-line programs to run using the OS shell.
|
||||
# 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
|
||||
- if: matrix.build-mode == 'manual' && matrix.language == 'go'
|
||||
shell: bash
|
||||
run: |
|
||||
make -C src/runtime
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v3
|
||||
with:
|
||||
category: "/language:${{matrix.language}}"
|
15
.github/workflows/commit-message-check.yaml
vendored
15
.github/workflows/commit-message-check.yaml
vendored
@ -6,9 +6,6 @@ on:
|
||||
- reopened
|
||||
- synchronize
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
@ -29,7 +26,7 @@ jobs:
|
||||
- name: Get PR Commits
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
id: 'get-pr-commits'
|
||||
uses: tim-actions/get-pr-commits@c64db31d359214d244884dd68f971a110b29ab83 # v1.2.0
|
||||
uses: tim-actions/get-pr-commits@v1.2.0
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
# Filter out revert commits
|
||||
@ -44,19 +41,19 @@ jobs:
|
||||
|
||||
- name: DCO Check
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
uses: tim-actions/dco@2fd0504dc0d27b33f542867c300c60840c6dcb20 # master (2020-04-28)
|
||||
uses: tim-actions/dco@2fd0504dc0d27b33f542867c300c60840c6dcb20
|
||||
with:
|
||||
commits: ${{ steps.get-pr-commits.outputs.commits }}
|
||||
|
||||
- name: Commit Body Missing Check
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') && ( success() || failure() ) }}
|
||||
uses: tim-actions/commit-body-check@d2e0e8e1f0332b3281c98867c42a2fbe25ad3f15 # v1.0.2
|
||||
uses: tim-actions/commit-body-check@v1.0.2
|
||||
with:
|
||||
commits: ${{ steps.get-pr-commits.outputs.commits }}
|
||||
|
||||
- name: Check Subject Line Length
|
||||
if: ${{ (env.PR_AUTHOR != 'dependabot[bot]') && !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') && ( success() || failure() ) }}
|
||||
uses: tim-actions/commit-message-checker-with-regex@d6d9770051dd6460679d1cab1dcaa8cffc5c2bbd # v0.3.1
|
||||
uses: tim-actions/commit-message-checker-with-regex@v0.3.1
|
||||
with:
|
||||
commits: ${{ steps.get-pr-commits.outputs.commits }}
|
||||
pattern: '^.{0,75}(\n.*)*$'
|
||||
@ -65,7 +62,7 @@ jobs:
|
||||
|
||||
- name: Check Body Line Length
|
||||
if: ${{ (env.PR_AUTHOR != 'dependabot[bot]') && !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') && ( success() || failure() ) }}
|
||||
uses: tim-actions/commit-message-checker-with-regex@d6d9770051dd6460679d1cab1dcaa8cffc5c2bbd # v0.3.1
|
||||
uses: tim-actions/commit-message-checker-with-regex@v0.3.1
|
||||
with:
|
||||
commits: ${{ steps.get-pr-commits.outputs.commits }}
|
||||
# Notes:
|
||||
@ -96,7 +93,7 @@ jobs:
|
||||
|
||||
- name: Check Subsystem
|
||||
if: ${{ (env.PR_AUTHOR != 'dependabot[bot]') && !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') && ( success() || failure() ) }}
|
||||
uses: tim-actions/commit-message-checker-with-regex@d6d9770051dd6460679d1cab1dcaa8cffc5c2bbd # v0.3.1
|
||||
uses: tim-actions/commit-message-checker-with-regex@v0.3.1
|
||||
with:
|
||||
commits: ${{ steps.get-pr-commits.outputs.commits }}
|
||||
pattern: '^[\s\t]*[^:\s\t]+[\s\t]*:'
|
||||
|
11
.github/workflows/darwin-tests.yaml
vendored
11
.github/workflows/darwin-tests.yaml
vendored
@ -6,9 +6,6 @@ on:
|
||||
- reopened
|
||||
- synchronize
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
@ -19,12 +16,10 @@ jobs:
|
||||
runs-on: macos-latest
|
||||
steps:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.23.10
|
||||
go-version: 1.22.11
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
uses: actions/checkout@v4
|
||||
- name: Build utils
|
||||
run: ./ci/darwin-test.sh
|
||||
|
10
.github/workflows/docs-url-alive-check.yaml
vendored
10
.github/workflows/docs-url-alive-check.yaml
vendored
@ -2,9 +2,6 @@ on:
|
||||
schedule:
|
||||
- cron: '0 23 * * 0'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
name: Docs URL Alive Check
|
||||
jobs:
|
||||
test:
|
||||
@ -15,9 +12,9 @@ jobs:
|
||||
target_branch: ${{ github.base_ref }}
|
||||
steps:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.23.10
|
||||
go-version: 1.22.11
|
||||
env:
|
||||
GOPATH: ${{ github.workspace }}/kata-containers
|
||||
- name: Set env
|
||||
@ -25,10 +22,9 @@ jobs:
|
||||
echo "GOPATH=${{ github.workspace }}" >> "$GITHUB_ENV"
|
||||
echo "${{ github.workspace }}/bin" >> "$GITHUB_PATH"
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
path: ./src/github.com/${{ github.repository }}
|
||||
# docs url alive check
|
||||
- name: Docs URL Alive Check
|
||||
|
5
.github/workflows/gatekeeper-skipper.yaml
vendored
5
.github/workflows/gatekeeper-skipper.yaml
vendored
@ -31,8 +31,6 @@ on:
|
||||
skip_static:
|
||||
value: ${{ jobs.skipper.outputs.skip_static }}
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
skipper:
|
||||
@ -42,11 +40,10 @@ jobs:
|
||||
skip_test: ${{ steps.skipper.outputs.skip_test }}
|
||||
skip_static: ${{ steps.skipper.outputs.skip_static }}
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
- id: skipper
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
11
.github/workflows/gatekeeper.yaml
vendored
11
.github/workflows/gatekeeper.yaml
vendored
@ -12,9 +12,6 @@ on:
|
||||
- reopened
|
||||
- labeled
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
@ -22,17 +19,11 @@ concurrency:
|
||||
jobs:
|
||||
gatekeeper:
|
||||
runs-on: ubuntu-22.04
|
||||
permissions:
|
||||
actions: read
|
||||
contents: read
|
||||
issues: read
|
||||
pull-requests: read
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
- id: gatekeeper
|
||||
env:
|
||||
TARGET_BRANCH: ${{ github.event.pull_request.base.ref }}
|
||||
|
50
.github/workflows/govulncheck.yaml
vendored
50
.github/workflows/govulncheck.yaml
vendored
@ -1,50 +0,0 @@
|
||||
on:
|
||||
workflow_call:
|
||||
|
||||
name: Govulncheck
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
govulncheck:
|
||||
runs-on: ubuntu-22.04
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- binary: "kata-runtime"
|
||||
make_target: "runtime"
|
||||
- binary: "containerd-shim-kata-v2"
|
||||
make_target: "containerd-shim-v2"
|
||||
- binary: "kata-monitor"
|
||||
make_target: "monitor"
|
||||
fail-fast: false
|
||||
|
||||
steps:
|
||||
- name: Checkout the code
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Install golang
|
||||
run: |
|
||||
./tests/install_go.sh -f -p
|
||||
echo "/usr/local/go/bin" >> "${GITHUB_PATH}"
|
||||
|
||||
- name: Install govulncheck
|
||||
run: |
|
||||
go install golang.org/x/vuln/cmd/govulncheck@latest
|
||||
echo "${HOME}/go/bin" >> "${GITHUB_PATH}"
|
||||
|
||||
- name: Build runtime binaries
|
||||
run: |
|
||||
cd src/runtime
|
||||
make ${{ matrix.make_target }}
|
||||
env:
|
||||
SKIP_GO_VERSION_CHECK: "1"
|
||||
|
||||
- name: Run govulncheck on ${{ matrix.binary }}
|
||||
run: |
|
||||
cd src/runtime
|
||||
bash ../../tests/govulncheck-runner.sh "./${{ matrix.binary }}"
|
@ -6,9 +6,6 @@ on:
|
||||
- reopened
|
||||
- synchronize
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
@ -18,9 +15,7 @@ jobs:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
uses: actions/checkout@v4
|
||||
- name: Ensure the split out runtime classes match the all-in-one file
|
||||
run: |
|
||||
pushd tools/packaging/kata-deploy/runtimeclasses/
|
||||
|
91
.github/workflows/move-issues-to-in-progress.yaml
vendored
Normal file
91
.github/workflows/move-issues-to-in-progress.yaml
vendored
Normal file
@ -0,0 +1,91 @@
|
||||
# Copyright (c) 2020 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
name: Move issues to "In progress" in backlog project when referenced by a PR
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types:
|
||||
- opened
|
||||
- reopened
|
||||
|
||||
jobs:
|
||||
move-linked-issues-to-in-progress:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Install hub
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
HUB_ARCH="amd64"
|
||||
HUB_VER=$(curl -sL "https://api.github.com/repos/github/hub/releases/latest" |\
|
||||
jq -r .tag_name | sed 's/^v//')
|
||||
curl -sL \
|
||||
"https://github.com/github/hub/releases/download/v${HUB_VER}/hub-linux-${HUB_ARCH}-${HUB_VER}.tgz" |\
|
||||
tar xz --strip-components=2 --wildcards '*/bin/hub' && \
|
||||
sudo install hub /usr/local/bin
|
||||
|
||||
- name: Install hub extension script
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
# Clone into a temporary directory to avoid overwriting
|
||||
# any existing github directory.
|
||||
pushd "$(mktemp -d)" &>/dev/null
|
||||
git clone --single-branch --depth 1 "https://github.com/kata-containers/.github" && cd .github/scripts
|
||||
sudo install hub-util.sh /usr/local/bin
|
||||
popd &>/dev/null
|
||||
|
||||
- name: Checkout code to allow hub to communicate with the project
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ github.event.pull_request.base.ref }}
|
||||
|
||||
- name: Move issue to "In progress"
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.KATA_GITHUB_ACTIONS_TOKEN }}
|
||||
run: |
|
||||
pr=${{ github.event.pull_request.number }}
|
||||
|
||||
linked_issue_urls=$(hub-util.sh \
|
||||
list-issues-for-pr "$pr" |\
|
||||
grep -v "^\#" |\
|
||||
cut -d';' -f3 || true)
|
||||
|
||||
# PR doesn't have any linked issues, handle it only if it exists
|
||||
[ -z "$linked_issue_urls" ] && {
|
||||
echo "::warning::No linked issues for PR $pr"
|
||||
exit 0
|
||||
}
|
||||
|
||||
project_name="Issue backlog"
|
||||
project_type="org"
|
||||
project_column="In progress"
|
||||
|
||||
for issue_url in $linked_issue_urls
|
||||
do
|
||||
issue=$(echo "$issue_url"| awk -F/ '{print $NF}' || true)
|
||||
|
||||
[ -z "$issue" ] && {
|
||||
echo "::error::Cannot determine issue number from $issue_url for PR $pr"
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Move the issue to the correct column on the project board
|
||||
hub-util.sh \
|
||||
move-issue \
|
||||
"$issue" \
|
||||
"$project_name" \
|
||||
"$project_type" \
|
||||
"$project_column"
|
||||
done
|
87
.github/workflows/payload-after-push.yaml
vendored
87
.github/workflows/payload-after-push.yaml
vendored
@ -5,153 +5,98 @@ on:
|
||||
- main
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
|
||||
jobs:
|
||||
build-assets-amd64:
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
id-token: write
|
||||
attestations: write
|
||||
uses: ./.github/workflows/build-kata-static-tarball-amd64.yaml
|
||||
with:
|
||||
commit-hash: ${{ github.sha }}
|
||||
push-to-registry: yes
|
||||
target-branch: ${{ github.ref_name }}
|
||||
secrets:
|
||||
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
secrets: inherit
|
||||
|
||||
build-assets-arm64:
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
id-token: write
|
||||
attestations: write
|
||||
uses: ./.github/workflows/build-kata-static-tarball-arm64.yaml
|
||||
with:
|
||||
commit-hash: ${{ github.sha }}
|
||||
push-to-registry: yes
|
||||
target-branch: ${{ github.ref_name }}
|
||||
secrets:
|
||||
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
secrets: inherit
|
||||
|
||||
build-assets-s390x:
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
id-token: write
|
||||
attestations: write
|
||||
uses: ./.github/workflows/build-kata-static-tarball-s390x.yaml
|
||||
with:
|
||||
commit-hash: ${{ github.sha }}
|
||||
push-to-registry: yes
|
||||
target-branch: ${{ github.ref_name }}
|
||||
secrets:
|
||||
CI_HKD_PATH: ${{ secrets.CI_HKD_PATH }}
|
||||
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
|
||||
secrets: inherit
|
||||
|
||||
build-assets-ppc64le:
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
uses: ./.github/workflows/build-kata-static-tarball-ppc64le.yaml
|
||||
with:
|
||||
commit-hash: ${{ github.sha }}
|
||||
push-to-registry: yes
|
||||
target-branch: ${{ github.ref_name }}
|
||||
secrets:
|
||||
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
secrets: inherit
|
||||
|
||||
publish-kata-deploy-payload-amd64:
|
||||
needs: build-assets-amd64
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
uses: ./.github/workflows/publish-kata-deploy-payload.yaml
|
||||
uses: ./.github/workflows/publish-kata-deploy-payload-amd64.yaml
|
||||
with:
|
||||
commit-hash: ${{ github.sha }}
|
||||
registry: quay.io
|
||||
repo: kata-containers/kata-deploy-ci
|
||||
tag: kata-containers-latest-amd64
|
||||
target-branch: ${{ github.ref_name }}
|
||||
runner: ubuntu-22.04
|
||||
arch: amd64
|
||||
secrets:
|
||||
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
secrets: inherit
|
||||
|
||||
publish-kata-deploy-payload-arm64:
|
||||
needs: build-assets-arm64
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
uses: ./.github/workflows/publish-kata-deploy-payload.yaml
|
||||
uses: ./.github/workflows/publish-kata-deploy-payload-arm64.yaml
|
||||
with:
|
||||
commit-hash: ${{ github.sha }}
|
||||
registry: quay.io
|
||||
repo: kata-containers/kata-deploy-ci
|
||||
tag: kata-containers-latest-arm64
|
||||
target-branch: ${{ github.ref_name }}
|
||||
runner: ubuntu-22.04-arm
|
||||
arch: arm64
|
||||
secrets:
|
||||
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
secrets: inherit
|
||||
|
||||
publish-kata-deploy-payload-s390x:
|
||||
needs: build-assets-s390x
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
uses: ./.github/workflows/publish-kata-deploy-payload.yaml
|
||||
uses: ./.github/workflows/publish-kata-deploy-payload-s390x.yaml
|
||||
with:
|
||||
commit-hash: ${{ github.sha }}
|
||||
registry: quay.io
|
||||
repo: kata-containers/kata-deploy-ci
|
||||
tag: kata-containers-latest-s390x
|
||||
target-branch: ${{ github.ref_name }}
|
||||
runner: s390x
|
||||
arch: s390x
|
||||
secrets:
|
||||
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
secrets: inherit
|
||||
|
||||
publish-kata-deploy-payload-ppc64le:
|
||||
needs: build-assets-ppc64le
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
uses: ./.github/workflows/publish-kata-deploy-payload.yaml
|
||||
uses: ./.github/workflows/publish-kata-deploy-payload-ppc64le.yaml
|
||||
with:
|
||||
commit-hash: ${{ github.sha }}
|
||||
registry: quay.io
|
||||
repo: kata-containers/kata-deploy-ci
|
||||
tag: kata-containers-latest-ppc64le
|
||||
target-branch: ${{ github.ref_name }}
|
||||
runner: ppc64le
|
||||
arch: ppc64le
|
||||
secrets:
|
||||
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
secrets: inherit
|
||||
|
||||
publish-manifest:
|
||||
runs-on: ubuntu-22.04
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
needs: [publish-kata-deploy-payload-amd64, publish-kata-deploy-payload-arm64, publish-kata-deploy-payload-s390x, publish-kata-deploy-payload-ppc64le]
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Login to Kata Containers quay.io
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ vars.QUAY_DEPLOYER_USERNAME }}
|
||||
username: ${{ secrets.QUAY_DEPLOYER_USERNAME }}
|
||||
password: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
|
||||
- name: Push multi-arch manifest
|
||||
|
66
.github/workflows/publish-kata-deploy-payload-amd64.yaml
vendored
Normal file
66
.github/workflows/publish-kata-deploy-payload-amd64.yaml
vendored
Normal file
@ -0,0 +1,66 @@
|
||||
name: CI | Publish kata-deploy payload for amd64
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
tarball-suffix:
|
||||
required: false
|
||||
type: string
|
||||
registry:
|
||||
required: true
|
||||
type: string
|
||||
repo:
|
||||
required: true
|
||||
type: string
|
||||
tag:
|
||||
required: true
|
||||
type: string
|
||||
commit-hash:
|
||||
required: false
|
||||
type: string
|
||||
target-branch:
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
|
||||
jobs:
|
||||
kata-payload:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: kata-static-tarball-amd64${{ inputs.tarball-suffix }}
|
||||
|
||||
- name: Login to Kata Containers quay.io
|
||||
if: ${{ inputs.registry == 'quay.io' }}
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.QUAY_DEPLOYER_USERNAME }}
|
||||
password: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
|
||||
- name: Login to Kata Containers ghcr.io
|
||||
if: ${{ inputs.registry == 'ghcr.io' }}
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: build-and-push-kata-payload
|
||||
id: build-and-push-kata-payload
|
||||
run: |
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh \
|
||||
"$(pwd)"/kata-static.tar.xz \
|
||||
${{ inputs.registry }}/${{ inputs.repo }} ${{ inputs.tag }}
|
66
.github/workflows/publish-kata-deploy-payload-arm64.yaml
vendored
Normal file
66
.github/workflows/publish-kata-deploy-payload-arm64.yaml
vendored
Normal file
@ -0,0 +1,66 @@
|
||||
name: CI | Publish kata-deploy payload for arm64
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
tarball-suffix:
|
||||
required: false
|
||||
type: string
|
||||
registry:
|
||||
required: true
|
||||
type: string
|
||||
repo:
|
||||
required: true
|
||||
type: string
|
||||
tag:
|
||||
required: true
|
||||
type: string
|
||||
commit-hash:
|
||||
required: false
|
||||
type: string
|
||||
target-branch:
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
|
||||
jobs:
|
||||
kata-payload:
|
||||
runs-on: ubuntu-22.04-arm
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: kata-static-tarball-arm64${{ inputs.tarball-suffix }}
|
||||
|
||||
- name: Login to Kata Containers quay.io
|
||||
if: ${{ inputs.registry == 'quay.io' }}
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.QUAY_DEPLOYER_USERNAME }}
|
||||
password: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
|
||||
- name: Login to Kata Containers ghcr.io
|
||||
if: ${{ inputs.registry == 'ghcr.io' }}
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: build-and-push-kata-payload
|
||||
id: build-and-push-kata-payload
|
||||
run: |
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh \
|
||||
"$(pwd)"/kata-static.tar.xz \
|
||||
${{ inputs.registry }}/${{ inputs.repo }} ${{ inputs.tag }}
|
@ -1,4 +1,4 @@
|
||||
name: CI | Publish kata-deploy payload
|
||||
name: CI | Publish kata-deploy payload for ppc64le
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
@ -21,34 +21,25 @@ on:
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
runner:
|
||||
default: 'ubuntu-22.04'
|
||||
description: The runner to execute the workflow on. Defaults to 'ubuntu-22.04'.
|
||||
required: false
|
||||
type: string
|
||||
arch:
|
||||
description: The arch of the tarball.
|
||||
required: true
|
||||
type: string
|
||||
secrets:
|
||||
QUAY_DEPLOYER_PASSWORD:
|
||||
required: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
kata-payload:
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
runs-on: ${{ inputs.runner }}
|
||||
runs-on: ppc64le
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- name: Prepare the self-hosted runner
|
||||
timeout-minutes: 15
|
||||
run: |
|
||||
"${HOME}/scripts/prepare_runner.sh"
|
||||
sudo rm -rf "$GITHUB_WORKSPACE"/*
|
||||
|
||||
- name: Adjust a permission for repo
|
||||
run: |
|
||||
sudo chown -R "$USER":"$USER" "$GITHUB_WORKSPACE"
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
@ -56,28 +47,28 @@ jobs:
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: get-kata-tarball for ${{ inputs.arch }}
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: kata-static-tarball-${{ inputs.arch}}${{ inputs.tarball-suffix }}
|
||||
name: kata-static-tarball-ppc64le${{ inputs.tarball-suffix }}
|
||||
|
||||
- name: Login to Kata Containers quay.io
|
||||
if: ${{ inputs.registry == 'quay.io' }}
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ vars.QUAY_DEPLOYER_USERNAME }}
|
||||
username: ${{ secrets.QUAY_DEPLOYER_USERNAME }}
|
||||
password: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
|
||||
- name: Login to Kata Containers ghcr.io
|
||||
if: ${{ inputs.registry == 'ghcr.io' }}
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: build-and-push-kata-payload for ${{ inputs.arch }}
|
||||
- name: build-and-push-kata-payload
|
||||
id: build-and-push-kata-payload
|
||||
run: |
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh \
|
66
.github/workflows/publish-kata-deploy-payload-s390x.yaml
vendored
Normal file
66
.github/workflows/publish-kata-deploy-payload-s390x.yaml
vendored
Normal file
@ -0,0 +1,66 @@
|
||||
name: CI | Publish kata-deploy payload for s390x
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
tarball-suffix:
|
||||
required: false
|
||||
type: string
|
||||
registry:
|
||||
required: true
|
||||
type: string
|
||||
repo:
|
||||
required: true
|
||||
type: string
|
||||
tag:
|
||||
required: true
|
||||
type: string
|
||||
commit-hash:
|
||||
required: false
|
||||
type: string
|
||||
target-branch:
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
|
||||
jobs:
|
||||
kata-payload:
|
||||
runs-on: s390x
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: kata-static-tarball-s390x${{ inputs.tarball-suffix }}
|
||||
|
||||
- name: Login to Kata Containers quay.io
|
||||
if: ${{ inputs.registry == 'quay.io' }}
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.QUAY_DEPLOYER_USERNAME }}
|
||||
password: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
|
||||
- name: Login to Kata Containers ghcr.io
|
||||
if: ${{ inputs.registry == 'ghcr.io' }}
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: build-and-push-kata-payload
|
||||
id: build-and-push-kata-payload
|
||||
run: |
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh \
|
||||
"$(pwd)"/kata-static.tar.xz \
|
||||
${{ inputs.registry }}/${{ inputs.repo }} ${{ inputs.tag }}
|
38
.github/workflows/release-amd64.yaml
vendored
38
.github/workflows/release-amd64.yaml
vendored
@ -5,12 +5,6 @@ on:
|
||||
target-arch:
|
||||
required: true
|
||||
type: string
|
||||
secrets:
|
||||
QUAY_DEPLOYER_PASSWORD:
|
||||
required: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build-kata-static-tarball-amd64:
|
||||
@ -18,40 +12,28 @@ jobs:
|
||||
with:
|
||||
push-to-registry: yes
|
||||
stage: release
|
||||
secrets:
|
||||
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
id-token: write
|
||||
attestations: write
|
||||
secrets: inherit
|
||||
|
||||
kata-deploy:
|
||||
needs: build-kata-static-tarball-amd64
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Login to Kata Containers ghcr.io
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
- name: Login to Kata Containers docker.io
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
|
||||
- name: Login to Kata Containers quay.io
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ vars.QUAY_DEPLOYER_USERNAME }}
|
||||
username: ${{ secrets.QUAY_DEPLOYER_USERNAME }}
|
||||
password: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions/checkout@v4
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: kata-static-tarball-amd64
|
||||
|
||||
@ -69,7 +51,7 @@ jobs:
|
||||
fi
|
||||
for tag in "${tags[@]}"; do
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh \
|
||||
"$(pwd)"/kata-static.tar.xz "ghcr.io/kata-containers/kata-deploy" \
|
||||
"$(pwd)"/kata-static.tar.xz "docker.io/katadocker/kata-deploy" \
|
||||
"${tag}-${{ inputs.target-arch }}"
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh \
|
||||
"$(pwd)"/kata-static.tar.xz "quay.io/kata-containers/kata-deploy" \
|
||||
|
38
.github/workflows/release-arm64.yaml
vendored
38
.github/workflows/release-arm64.yaml
vendored
@ -5,12 +5,6 @@ on:
|
||||
target-arch:
|
||||
required: true
|
||||
type: string
|
||||
secrets:
|
||||
QUAY_DEPLOYER_PASSWORD:
|
||||
required: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build-kata-static-tarball-arm64:
|
||||
@ -18,40 +12,28 @@ jobs:
|
||||
with:
|
||||
push-to-registry: yes
|
||||
stage: release
|
||||
secrets:
|
||||
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
id-token: write
|
||||
attestations: write
|
||||
secrets: inherit
|
||||
|
||||
kata-deploy:
|
||||
needs: build-kata-static-tarball-arm64
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
runs-on: ubuntu-22.04-arm
|
||||
steps:
|
||||
- name: Login to Kata Containers ghcr.io
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
- name: Login to Kata Containers docker.io
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
|
||||
- name: Login to Kata Containers quay.io
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ vars.QUAY_DEPLOYER_USERNAME }}
|
||||
username: ${{ secrets.QUAY_DEPLOYER_USERNAME }}
|
||||
password: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions/checkout@v4
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: kata-static-tarball-arm64
|
||||
|
||||
@ -69,7 +51,7 @@ jobs:
|
||||
fi
|
||||
for tag in "${tags[@]}"; do
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh \
|
||||
"$(pwd)"/kata-static.tar.xz "ghcr.io/kata-containers/kata-deploy" \
|
||||
"$(pwd)"/kata-static.tar.xz "docker.io/katadocker/kata-deploy" \
|
||||
"${tag}-${{ inputs.target-arch }}"
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh \
|
||||
"$(pwd)"/kata-static.tar.xz "quay.io/kata-containers/kata-deploy" \
|
||||
|
44
.github/workflows/release-ppc64le.yaml
vendored
44
.github/workflows/release-ppc64le.yaml
vendored
@ -5,12 +5,6 @@ on:
|
||||
target-arch:
|
||||
required: true
|
||||
type: string
|
||||
secrets:
|
||||
QUAY_DEPLOYER_PASSWORD:
|
||||
required: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build-kata-static-tarball-ppc64le:
|
||||
@ -18,40 +12,34 @@ jobs:
|
||||
with:
|
||||
push-to-registry: yes
|
||||
stage: release
|
||||
secrets:
|
||||
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
id-token: write
|
||||
attestations: write
|
||||
secrets: inherit
|
||||
|
||||
kata-deploy:
|
||||
needs: build-kata-static-tarball-ppc64le
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
runs-on: ppc64le
|
||||
steps:
|
||||
- name: Login to Kata Containers ghcr.io
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
- name: Prepare the self-hosted runner
|
||||
timeout-minutes: 15
|
||||
run: |
|
||||
bash "${HOME}/scripts/prepare_runner.sh"
|
||||
sudo rm -rf "$GITHUB_WORKSPACE"/*
|
||||
|
||||
- name: Login to Kata Containers docker.io
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
|
||||
- name: Login to Kata Containers quay.io
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ vars.QUAY_DEPLOYER_USERNAME }}
|
||||
username: ${{ secrets.QUAY_DEPLOYER_USERNAME }}
|
||||
password: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions/checkout@v4
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: kata-static-tarball-ppc64le
|
||||
|
||||
@ -69,7 +57,7 @@ jobs:
|
||||
fi
|
||||
for tag in "${tags[@]}"; do
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh \
|
||||
"$(pwd)"/kata-static.tar.xz "ghcr.io/kata-containers/kata-deploy" \
|
||||
"$(pwd)"/kata-static.tar.xz "docker.io/katadocker/kata-deploy" \
|
||||
"${tag}-${{ inputs.target-arch }}"
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh \
|
||||
"$(pwd)"/kata-static.tar.xz "quay.io/kata-containers/kata-deploy" \
|
||||
|
42
.github/workflows/release-s390x.yaml
vendored
42
.github/workflows/release-s390x.yaml
vendored
@ -5,14 +5,6 @@ on:
|
||||
target-arch:
|
||||
required: true
|
||||
type: string
|
||||
secrets:
|
||||
CI_HKD_PATH:
|
||||
required: true
|
||||
QUAY_DEPLOYER_PASSWORD:
|
||||
required: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build-kata-static-tarball-s390x:
|
||||
@ -20,42 +12,28 @@ jobs:
|
||||
with:
|
||||
push-to-registry: yes
|
||||
stage: release
|
||||
secrets:
|
||||
CI_HKD_PATH: ${{ secrets.CI_HKD_PATH }}
|
||||
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
id-token: write
|
||||
attestations: write
|
||||
|
||||
secrets: inherit
|
||||
|
||||
kata-deploy:
|
||||
needs: build-kata-static-tarball-s390x
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
runs-on: s390x
|
||||
steps:
|
||||
- name: Login to Kata Containers ghcr.io
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
- name: Login to Kata Containers docker.io
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
|
||||
- name: Login to Kata Containers quay.io
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ vars.QUAY_DEPLOYER_USERNAME }}
|
||||
username: ${{ secrets.QUAY_DEPLOYER_USERNAME }}
|
||||
password: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions/checkout@v4
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: kata-static-tarball-s390x
|
||||
|
||||
@ -73,7 +51,7 @@ jobs:
|
||||
fi
|
||||
for tag in "${tags[@]}"; do
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh \
|
||||
"$(pwd)"/kata-static.tar.xz "ghcr.io/kata-containers/kata-deploy" \
|
||||
"$(pwd)"/kata-static.tar.xz "docker.io/katadocker/kata-deploy" \
|
||||
"${tag}-${{ inputs.target-arch }}"
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh \
|
||||
"$(pwd)"/kata-static.tar.xz "quay.io/kata-containers/kata-deploy" \
|
||||
|
123
.github/workflows/release.yaml
vendored
123
.github/workflows/release.yaml
vendored
@ -2,20 +2,14 @@ name: Release Kata Containers
|
||||
on:
|
||||
workflow_dispatch
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
release:
|
||||
runs-on: ubuntu-22.04
|
||||
permissions:
|
||||
contents: write # needed for the `gh release create` command
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Create a new release
|
||||
run: |
|
||||
@ -25,81 +19,50 @@ jobs:
|
||||
|
||||
build-and-push-assets-amd64:
|
||||
needs: release
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
id-token: write
|
||||
attestations: write
|
||||
uses: ./.github/workflows/release-amd64.yaml
|
||||
with:
|
||||
target-arch: amd64
|
||||
secrets:
|
||||
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
secrets: inherit
|
||||
|
||||
build-and-push-assets-arm64:
|
||||
needs: release
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
id-token: write
|
||||
attestations: write
|
||||
uses: ./.github/workflows/release-arm64.yaml
|
||||
with:
|
||||
target-arch: arm64
|
||||
secrets:
|
||||
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
secrets: inherit
|
||||
|
||||
build-and-push-assets-s390x:
|
||||
needs: release
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
id-token: write
|
||||
attestations: write
|
||||
uses: ./.github/workflows/release-s390x.yaml
|
||||
with:
|
||||
target-arch: s390x
|
||||
secrets:
|
||||
CI_HKD_PATH: ${{ secrets.CI_HKD_PATH }}
|
||||
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
secrets: inherit
|
||||
|
||||
build-and-push-assets-ppc64le:
|
||||
needs: release
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
id-token: write
|
||||
attestations: write
|
||||
uses: ./.github/workflows/release-ppc64le.yaml
|
||||
with:
|
||||
target-arch: ppc64le
|
||||
secrets:
|
||||
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
secrets: inherit
|
||||
|
||||
publish-multi-arch-images:
|
||||
runs-on: ubuntu-22.04
|
||||
needs: [build-and-push-assets-amd64, build-and-push-assets-arm64, build-and-push-assets-s390x, build-and-push-assets-ppc64le]
|
||||
permissions:
|
||||
contents: write # needed for the `gh release` commands
|
||||
packages: write # needed to push the multi-arch manifest to ghcr.io
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Login to Kata Containers ghcr.io
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
- name: Login to Kata Containers docker.io
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
|
||||
- name: Login to Kata Containers quay.io
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ vars.QUAY_DEPLOYER_USERNAME }}
|
||||
username: ${{ secrets.QUAY_DEPLOYER_USERNAME }}
|
||||
password: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
|
||||
- name: Get the image tags
|
||||
@ -107,22 +70,18 @@ jobs:
|
||||
release_version=$(./tools/packaging/release/release.sh release-version)
|
||||
echo "KATA_DEPLOY_IMAGE_TAGS=$release_version latest" >> "$GITHUB_ENV"
|
||||
|
||||
- name: Publish multi-arch manifest on quay.io & ghcr.io
|
||||
- name: Publish multi-arch manifest on docker.io and quay.io
|
||||
run: |
|
||||
./tools/packaging/release/release.sh publish-multiarch-manifest
|
||||
env:
|
||||
KATA_DEPLOY_REGISTRIES: "quay.io/kata-containers/kata-deploy ghcr.io/kata-containers/kata-deploy"
|
||||
KATA_DEPLOY_REGISTRIES: "quay.io/kata-containers/kata-deploy docker.io/katadocker/kata-deploy"
|
||||
|
||||
upload-multi-arch-static-tarball:
|
||||
needs: [build-and-push-assets-amd64, build-and-push-assets-arm64, build-and-push-assets-s390x, build-and-push-assets-ppc64le]
|
||||
permissions:
|
||||
contents: write # needed for the `gh release` commands
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set KATA_STATIC_TARBALL env var
|
||||
run: |
|
||||
@ -130,7 +89,7 @@ jobs:
|
||||
echo "KATA_STATIC_TARBALL=${tarball}" >> "$GITHUB_ENV"
|
||||
|
||||
- name: Download amd64 artifacts
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: kata-static-tarball-amd64
|
||||
|
||||
@ -142,7 +101,7 @@ jobs:
|
||||
ARCHITECTURE: amd64
|
||||
|
||||
- name: Download arm64 artifacts
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: kata-static-tarball-arm64
|
||||
|
||||
@ -154,7 +113,7 @@ jobs:
|
||||
ARCHITECTURE: arm64
|
||||
|
||||
- name: Download s390x artifacts
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: kata-static-tarball-s390x
|
||||
|
||||
@ -166,7 +125,7 @@ jobs:
|
||||
ARCHITECTURE: s390x
|
||||
|
||||
- name: Download ppc64le artifacts
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: kata-static-tarball-ppc64le
|
||||
|
||||
@ -180,13 +139,9 @@ jobs:
|
||||
upload-versions-yaml:
|
||||
needs: release
|
||||
runs-on: ubuntu-22.04
|
||||
permissions:
|
||||
contents: write # needed for the `gh release` commands
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Upload versions.yaml to GitHub
|
||||
run: |
|
||||
@ -197,13 +152,9 @@ jobs:
|
||||
upload-cargo-vendored-tarball:
|
||||
needs: release
|
||||
runs-on: ubuntu-22.04
|
||||
permissions:
|
||||
contents: write # needed for the `gh release` commands
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Generate and upload vendored code tarball
|
||||
run: |
|
||||
@ -214,13 +165,9 @@ jobs:
|
||||
upload-libseccomp-tarball:
|
||||
needs: release
|
||||
runs-on: ubuntu-22.04
|
||||
permissions:
|
||||
contents: write # needed for the `gh release` commands
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Download libseccomp tarball and upload it to GitHub
|
||||
run: |
|
||||
@ -231,17 +178,12 @@ jobs:
|
||||
upload-helm-chart-tarball:
|
||||
needs: release
|
||||
runs-on: ubuntu-22.04
|
||||
permissions:
|
||||
contents: write # needed for the `gh release` commands
|
||||
packages: write # needed to push the helm chart to ghcr.io
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install helm
|
||||
uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814 # v4.2.0
|
||||
uses: azure/setup-helm@v4.2.0
|
||||
id: install
|
||||
|
||||
- name: Generate and upload helm chart tarball
|
||||
@ -250,27 +192,12 @@ jobs:
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
|
||||
- name: Login to the OCI registries
|
||||
run: |
|
||||
echo "${{ secrets.QUAY_DEPLOYER_PASSWORD }}" | helm registry login quay.io --username "${{ vars.QUAY_DEPLOYER_USERNAME }}" --password-stdin
|
||||
echo "${{ github.token }}" | helm registry login ghcr.io --username $ --password-stdin
|
||||
|
||||
- name: Push helm chart to the OCI registries
|
||||
run: |
|
||||
release_version=$(./tools/packaging/release/release.sh release-version)
|
||||
helm push "kata-deploy-${release_version}.tgz" oci://quay.io/kata-containers/kata-deploy-charts
|
||||
helm push "kata-deploy-${release_version}.tgz" oci://ghcr.io/kata-containers/kata-deploy-charts
|
||||
|
||||
publish-release:
|
||||
needs: [ build-and-push-assets-amd64, build-and-push-assets-arm64, build-and-push-assets-s390x, build-and-push-assets-ppc64le, publish-multi-arch-images, upload-multi-arch-static-tarball, upload-versions-yaml, upload-cargo-vendored-tarball, upload-libseccomp-tarball ]
|
||||
runs-on: ubuntu-22.04
|
||||
permissions:
|
||||
contents: write # needed for the `gh release` commands
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Publish a release
|
||||
run: |
|
||||
|
69
.github/workflows/run-cri-containerd-tests-ppc64le.yaml
vendored
Normal file
69
.github/workflows/run-cri-containerd-tests-ppc64le.yaml
vendored
Normal file
@ -0,0 +1,69 @@
|
||||
name: CI | Run cri-containerd tests on ppc64le
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
tarball-suffix:
|
||||
required: false
|
||||
type: string
|
||||
commit-hash:
|
||||
required: false
|
||||
type: string
|
||||
target-branch:
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
|
||||
jobs:
|
||||
run-cri-containerd:
|
||||
strategy:
|
||||
# We can set this to true whenever we're 100% sure that
|
||||
# the all the tests are not flaky, otherwise we'll fail
|
||||
# all the tests due to a single flaky instance
|
||||
fail-fast: false
|
||||
matrix:
|
||||
containerd_version: ['active']
|
||||
vmm: ['qemu']
|
||||
runs-on: ppc64le
|
||||
env:
|
||||
CONTAINERD_VERSION: ${{ matrix.containerd_version }}
|
||||
GOPATH: ${{ github.workspace }}
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
steps:
|
||||
- name: Adjust a permission for repo
|
||||
run: sudo chown -R "$USER":"$USER" "$GITHUB_WORKSPACE"
|
||||
|
||||
- name: Prepare the self-hosted runner
|
||||
timeout-minutes: 15
|
||||
run: |
|
||||
bash "${HOME}/scripts/prepare_runner.sh" cri-containerd
|
||||
sudo rm -rf "$GITHUB_WORKSPACE"/*
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: Install dependencies
|
||||
timeout-minutes: 15
|
||||
run: bash tests/integration/cri-containerd/gha-run.sh install-dependencies
|
||||
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: kata-static-tarball-ppc64le${{ inputs.tarball-suffix }}
|
||||
path: kata-artifacts
|
||||
|
||||
- name: Install kata
|
||||
run: bash tests/integration/cri-containerd/gha-run.sh install-kata kata-artifacts
|
||||
|
||||
- name: Run cri-containerd tests
|
||||
run: bash tests/integration/cri-containerd/gha-run.sh run
|
||||
|
||||
- name: Cleanup actions for the self hosted runner
|
||||
run: bash "${HOME}/scripts/cleanup_runner.sh"
|
56
.github/workflows/run-cri-containerd-tests-s390x.yaml
vendored
Normal file
56
.github/workflows/run-cri-containerd-tests-s390x.yaml
vendored
Normal file
@ -0,0 +1,56 @@
|
||||
name: CI | Run cri-containerd tests
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
tarball-suffix:
|
||||
required: false
|
||||
type: string
|
||||
commit-hash:
|
||||
required: false
|
||||
type: string
|
||||
target-branch:
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
|
||||
jobs:
|
||||
run-cri-containerd:
|
||||
strategy:
|
||||
# We can set this to true whenever we're 100% sure that
|
||||
# the all the tests are not flaky, otherwise we'll fail
|
||||
# all the tests due to a single flaky instance
|
||||
fail-fast: false
|
||||
matrix:
|
||||
containerd_version: ['active']
|
||||
vmm: ['qemu', 'qemu-runtime-rs']
|
||||
runs-on: s390x-large
|
||||
env:
|
||||
CONTAINERD_VERSION: ${{ matrix.containerd_version }}
|
||||
GOPATH: ${{ github.workspace }}
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: Install dependencies
|
||||
run: bash tests/integration/cri-containerd/gha-run.sh install-dependencies
|
||||
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: kata-static-tarball-s390x${{ inputs.tarball-suffix }}
|
||||
path: kata-artifacts
|
||||
|
||||
- name: Install kata
|
||||
run: bash tests/integration/cri-containerd/gha-run.sh install-kata kata-artifacts
|
||||
|
||||
- name: Run cri-containerd tests
|
||||
run: bash tests/integration/cri-containerd/gha-run.sh run
|
74
.github/workflows/run-cri-containerd-tests.yaml
vendored
74
.github/workflows/run-cri-containerd-tests.yaml
vendored
@ -1,74 +0,0 @@
|
||||
name: CI | Run cri-containerd tests
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
tarball-suffix:
|
||||
required: false
|
||||
type: string
|
||||
commit-hash:
|
||||
required: false
|
||||
type: string
|
||||
target-branch:
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
runner:
|
||||
description: The runner to execute the workflow on.
|
||||
required: true
|
||||
type: string
|
||||
arch:
|
||||
description: The arch of the tarball.
|
||||
required: true
|
||||
type: string
|
||||
containerd_version:
|
||||
description: The version of containerd for testing.
|
||||
required: true
|
||||
type: string
|
||||
vmm:
|
||||
description: The kata hypervisor for testing.
|
||||
required: true
|
||||
type: string
|
||||
|
||||
jobs:
|
||||
run-cri-containerd:
|
||||
name: run-cri-containerd-${{ inputs.arch }} (${{ inputs.containerd_version }}, ${{ inputs.vmm }})
|
||||
strategy:
|
||||
fail-fast: false
|
||||
runs-on: ${{ inputs.runner }}
|
||||
env:
|
||||
CONTAINERD_VERSION: ${{ inputs.containerd_version }}
|
||||
GOPATH: ${{ github.workspace }}
|
||||
KATA_HYPERVISOR: ${{ inputs.vmm }}
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: Install dependencies
|
||||
timeout-minutes: 15
|
||||
run: bash tests/integration/cri-containerd/gha-run.sh install-dependencies
|
||||
|
||||
- name: get-kata-tarball for ${{ inputs.arch }}
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
with:
|
||||
name: kata-static-tarball-${{ inputs.arch }}${{ inputs.tarball-suffix }}
|
||||
path: kata-artifacts
|
||||
|
||||
- name: Install kata
|
||||
run: bash tests/integration/cri-containerd/gha-run.sh install-kata kata-artifacts
|
||||
|
||||
- name: Run cri-containerd tests for ${{ inputs.arch }}
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/cri-containerd/gha-run.sh run
|
32
.github/workflows/run-k8s-tests-on-aks.yaml
vendored
32
.github/workflows/run-k8s-tests-on-aks.yaml
vendored
@ -24,19 +24,6 @@ on:
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
secrets:
|
||||
|
||||
AZ_APPID:
|
||||
required: true
|
||||
AZ_TENANT_ID:
|
||||
required: true
|
||||
AZ_SUBSCRIPTION_ID:
|
||||
required: true
|
||||
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
id-token: write
|
||||
|
||||
jobs:
|
||||
run-k8s-tests:
|
||||
@ -71,7 +58,6 @@ jobs:
|
||||
instance-type: normal
|
||||
auto-generate-policy: yes
|
||||
runs-on: ubuntu-22.04
|
||||
environment: ci
|
||||
env:
|
||||
DOCKER_REGISTRY: ${{ inputs.registry }}
|
||||
DOCKER_REPO: ${{ inputs.repo }}
|
||||
@ -85,11 +71,10 @@ jobs:
|
||||
GENPOLICY_PULL_METHOD: ${{ matrix.genpolicy-pull-method }}
|
||||
AUTO_GENERATE_POLICY: ${{ matrix.auto-generate-policy }}
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
@ -98,7 +83,7 @@ jobs:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: kata-static-tarball-amd64${{ inputs.tarball-suffix }}
|
||||
path: kata-artifacts
|
||||
@ -110,14 +95,15 @@ jobs:
|
||||
run: bash tests/integration/kubernetes/gha-run.sh install-azure-cli
|
||||
|
||||
- name: Log into the Azure account
|
||||
uses: azure/login@a457da9ea143d694b1b9c7c869ebb04ebe844ef5 # v2.3.0
|
||||
with:
|
||||
client-id: ${{ secrets.AZ_APPID }}
|
||||
tenant-id: ${{ secrets.AZ_TENANT_ID }}
|
||||
subscription-id: ${{ secrets.AZ_SUBSCRIPTION_ID }}
|
||||
run: bash tests/integration/kubernetes/gha-run.sh login-azure
|
||||
env:
|
||||
AZ_APPID: ${{ secrets.AZ_APPID }}
|
||||
AZ_PASSWORD: ${{ secrets.AZ_PASSWORD }}
|
||||
AZ_TENANT_ID: ${{ secrets.AZ_TENANT_ID }}
|
||||
AZ_SUBSCRIPTION_ID: ${{ secrets.AZ_SUBSCRIPTION_ID }}
|
||||
|
||||
- name: Create AKS cluster
|
||||
uses: nick-fields/retry@ce71cc2ab81d554ebbe88c79ab5975992d79ba08 # v3.0.2
|
||||
uses: nick-fields/retry@v3
|
||||
with:
|
||||
timeout_minutes: 15
|
||||
max_attempts: 20
|
||||
|
@ -22,9 +22,6 @@ on:
|
||||
type: string
|
||||
default: ""
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
run-k8s-tests-amd64:
|
||||
strategy:
|
||||
@ -61,11 +58,10 @@ jobs:
|
||||
K8S_TEST_HOST_TYPE: all
|
||||
CONTAINER_RUNTIME: ${{ matrix.container_runtime }}
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
@ -101,7 +97,7 @@ jobs:
|
||||
continue-on-error: true
|
||||
|
||||
- name: Archive artifacts ${{ matrix.vmm }}
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: k8s-tests-${{ matrix.vmm }}-${{ matrix.snapshotter }}-${{ matrix.k8s }}-${{ inputs.tag }}
|
||||
path: /tmp/artifacts
|
||||
|
87
.github/workflows/run-k8s-tests-on-arm64.yaml
vendored
87
.github/workflows/run-k8s-tests-on-arm64.yaml
vendored
@ -1,87 +0,0 @@
|
||||
name: CI | Run kubernetes tests on arm64
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
registry:
|
||||
required: true
|
||||
type: string
|
||||
repo:
|
||||
required: true
|
||||
type: string
|
||||
tag:
|
||||
required: true
|
||||
type: string
|
||||
pr-number:
|
||||
required: true
|
||||
type: string
|
||||
commit-hash:
|
||||
required: false
|
||||
type: string
|
||||
target-branch:
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
run-k8s-tests-on-arm64:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
vmm:
|
||||
- qemu
|
||||
k8s:
|
||||
- kubeadm
|
||||
runs-on: arm64-k8s
|
||||
env:
|
||||
DOCKER_REGISTRY: ${{ inputs.registry }}
|
||||
DOCKER_REPO: ${{ inputs.repo }}
|
||||
DOCKER_TAG: ${{ inputs.tag }}
|
||||
GH_PR_NUMBER: ${{ inputs.pr-number }}
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
KUBERNETES: ${{ matrix.k8s }}
|
||||
USING_NFD: "false"
|
||||
K8S_TEST_HOST_TYPE: all
|
||||
TARGET_ARCH: "aarch64"
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: Deploy Kata
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/kubernetes/gha-run.sh deploy-kata
|
||||
|
||||
- name: Install `bats`
|
||||
run: bash tests/integration/kubernetes/gha-run.sh install-bats
|
||||
|
||||
- name: Run tests
|
||||
timeout-minutes: 30
|
||||
run: bash tests/integration/kubernetes/gha-run.sh run-tests
|
||||
|
||||
- name: Collect artifacts ${{ matrix.vmm }}
|
||||
if: always()
|
||||
run: bash tests/integration/kubernetes/gha-run.sh collect-artifacts
|
||||
continue-on-error: true
|
||||
|
||||
- name: Archive artifacts ${{ matrix.vmm }}
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
name: k8s-tests-${{ matrix.vmm }}-${{ matrix.k8s }}-${{ inputs.tag }}
|
||||
path: /tmp/artifacts
|
||||
retention-days: 1
|
||||
|
||||
- name: Delete kata-deploy
|
||||
if: always()
|
||||
timeout-minutes: 5
|
||||
run: bash tests/integration/kubernetes/gha-run.sh cleanup
|
12
.github/workflows/run-k8s-tests-on-ppc64le.yaml
vendored
12
.github/workflows/run-k8s-tests-on-ppc64le.yaml
vendored
@ -22,9 +22,6 @@ on:
|
||||
type: string
|
||||
default: ""
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
run-k8s-tests:
|
||||
strategy:
|
||||
@ -46,11 +43,16 @@ jobs:
|
||||
USING_NFD: "false"
|
||||
TARGET_ARCH: "ppc64le"
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- name: Prepare the self-hosted runner
|
||||
timeout-minutes: 15
|
||||
run: |
|
||||
bash "${HOME}/scripts/prepare_runner.sh" kubernetes
|
||||
sudo rm -rf "$GITHUB_WORKSPACE"/*
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
|
11
.github/workflows/run-k8s-tests-on-zvsi.yaml
vendored
11
.github/workflows/run-k8s-tests-on-zvsi.yaml
vendored
@ -21,12 +21,6 @@ on:
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
secrets:
|
||||
AUTHENTICATED_IMAGE_PASSWORD:
|
||||
required: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
run-k8s-tests:
|
||||
@ -78,14 +72,13 @@ jobs:
|
||||
SNAPSHOTTER: ${{ matrix.snapshotter }}
|
||||
USING_NFD: ${{ matrix.using-nfd }}
|
||||
TARGET_ARCH: "s390x"
|
||||
AUTHENTICATED_IMAGE_USER: ${{ vars.AUTHENTICATED_IMAGE_USER }}
|
||||
AUTHENTICATED_IMAGE_USER: ${{ secrets.AUTHENTICATED_IMAGE_USER }}
|
||||
AUTHENTICATED_IMAGE_PASSWORD: ${{ secrets.AUTHENTICATED_IMAGE_PASSWORD }}
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
|
@ -24,20 +24,6 @@ on:
|
||||
tarball-suffix:
|
||||
required: false
|
||||
type: string
|
||||
secrets:
|
||||
|
||||
AZ_APPID:
|
||||
required: true
|
||||
AZ_TENANT_ID:
|
||||
required: true
|
||||
AZ_SUBSCRIPTION_ID:
|
||||
required: true
|
||||
AUTHENTICATED_IMAGE_PASSWORD:
|
||||
required: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
id-token: write
|
||||
|
||||
jobs:
|
||||
# Generate jobs for testing CoCo on non-TEE environments
|
||||
@ -52,7 +38,6 @@ jobs:
|
||||
pull-type:
|
||||
- guest-pull
|
||||
runs-on: ubuntu-22.04
|
||||
environment: ci
|
||||
env:
|
||||
DOCKER_REGISTRY: ${{ inputs.registry }}
|
||||
DOCKER_REPO: ${{ inputs.repo }}
|
||||
@ -65,16 +50,15 @@ jobs:
|
||||
KBS_INGRESS: "aks"
|
||||
KUBERNETES: "vanilla"
|
||||
PULL_TYPE: ${{ matrix.pull-type }}
|
||||
AUTHENTICATED_IMAGE_USER: ${{ vars.AUTHENTICATED_IMAGE_USER }}
|
||||
AUTHENTICATED_IMAGE_USER: ${{ secrets.AUTHENTICATED_IMAGE_USER }}
|
||||
AUTHENTICATED_IMAGE_PASSWORD: ${{ secrets.AUTHENTICATED_IMAGE_PASSWORD }}
|
||||
SNAPSHOTTER: ${{ matrix.snapshotter }}
|
||||
USING_NFD: "false"
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
@ -83,7 +67,7 @@ jobs:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: kata-static-tarball-amd64${{ inputs.tarball-suffix }}
|
||||
path: kata-artifacts
|
||||
@ -95,14 +79,15 @@ jobs:
|
||||
run: bash tests/integration/kubernetes/gha-run.sh install-azure-cli
|
||||
|
||||
- name: Log into the Azure account
|
||||
uses: azure/login@a457da9ea143d694b1b9c7c869ebb04ebe844ef5 # v2.3.0
|
||||
with:
|
||||
client-id: ${{ secrets.AZ_APPID }}
|
||||
tenant-id: ${{ secrets.AZ_TENANT_ID }}
|
||||
subscription-id: ${{ secrets.AZ_SUBSCRIPTION_ID }}
|
||||
run: bash tests/integration/kubernetes/gha-run.sh login-azure
|
||||
env:
|
||||
AZ_APPID: ${{ secrets.AZ_APPID }}
|
||||
AZ_PASSWORD: ${{ secrets.AZ_PASSWORD }}
|
||||
AZ_TENANT_ID: ${{ secrets.AZ_TENANT_ID }}
|
||||
AZ_SUBSCRIPTION_ID: ${{ secrets.AZ_SUBSCRIPTION_ID }}
|
||||
|
||||
- name: Create AKS cluster
|
||||
uses: nick-fields/retry@ce71cc2ab81d554ebbe88c79ab5975992d79ba08 # v3.0.2
|
||||
uses: nick-fields/retry@v3
|
||||
with:
|
||||
timeout_minutes: 15
|
||||
max_attempts: 20
|
||||
|
55
.github/workflows/run-kata-coco-tests.yaml
vendored
55
.github/workflows/run-kata-coco-tests.yaml
vendored
@ -24,21 +24,6 @@ on:
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
secrets:
|
||||
AUTHENTICATED_IMAGE_PASSWORD:
|
||||
required: true
|
||||
AZ_APPID:
|
||||
required: true
|
||||
AZ_TENANT_ID:
|
||||
required: true
|
||||
AZ_SUBSCRIPTION_ID:
|
||||
required: true
|
||||
ITA_KEY:
|
||||
required: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
id-token: write
|
||||
|
||||
jobs:
|
||||
run-k8s-tests-on-tdx:
|
||||
@ -65,16 +50,15 @@ jobs:
|
||||
KBS_INGRESS: "nodeport"
|
||||
SNAPSHOTTER: ${{ matrix.snapshotter }}
|
||||
PULL_TYPE: ${{ matrix.pull-type }}
|
||||
AUTHENTICATED_IMAGE_USER: ${{ vars.AUTHENTICATED_IMAGE_USER }}
|
||||
AUTHENTICATED_IMAGE_USER: ${{ secrets.AUTHENTICATED_IMAGE_USER }}
|
||||
AUTHENTICATED_IMAGE_PASSWORD: ${{ secrets.AUTHENTICATED_IMAGE_PASSWORD }}
|
||||
ITA_KEY: ${{ secrets.ITA_KEY }}
|
||||
AUTO_GENERATE_POLICY: "yes"
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
@ -152,15 +136,14 @@ jobs:
|
||||
K8S_TEST_HOST_TYPE: "baremetal"
|
||||
SNAPSHOTTER: ${{ matrix.snapshotter }}
|
||||
PULL_TYPE: ${{ matrix.pull-type }}
|
||||
AUTHENTICATED_IMAGE_USER: ${{ vars.AUTHENTICATED_IMAGE_USER }}
|
||||
AUTHENTICATED_IMAGE_USER: ${{ secrets.AUTHENTICATED_IMAGE_USER }}
|
||||
AUTHENTICATED_IMAGE_PASSWORD: ${{ secrets.AUTHENTICATED_IMAGE_PASSWORD }}
|
||||
AUTO_GENERATE_POLICY: "yes"
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
@ -224,7 +207,6 @@ jobs:
|
||||
pull-type:
|
||||
- guest-pull
|
||||
runs-on: ubuntu-22.04
|
||||
environment: ci
|
||||
env:
|
||||
DOCKER_REGISTRY: ${{ inputs.registry }}
|
||||
DOCKER_REPO: ${{ inputs.repo }}
|
||||
@ -237,22 +219,16 @@ jobs:
|
||||
KBS_INGRESS: "aks"
|
||||
KUBERNETES: "vanilla"
|
||||
PULL_TYPE: ${{ matrix.pull-type }}
|
||||
AUTHENTICATED_IMAGE_USER: ${{ vars.AUTHENTICATED_IMAGE_USER }}
|
||||
AUTHENTICATED_IMAGE_USER: ${{ secrets.AUTHENTICATED_IMAGE_USER }}
|
||||
AUTHENTICATED_IMAGE_PASSWORD: ${{ secrets.AUTHENTICATED_IMAGE_PASSWORD }}
|
||||
SNAPSHOTTER: ${{ matrix.snapshotter }}
|
||||
# Caution: current ingress controller used to expose the KBS service
|
||||
# requires much vCPUs, lefting only a few for the tests. Depending on the
|
||||
# host type chose it will result on the creation of a cluster with
|
||||
# insufficient resources.
|
||||
K8S_TEST_HOST_TYPE: "all"
|
||||
USING_NFD: "false"
|
||||
AUTO_GENERATE_POLICY: "yes"
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
@ -261,7 +237,7 @@ jobs:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: kata-static-tarball-amd64${{ inputs.tarball-suffix }}
|
||||
path: kata-artifacts
|
||||
@ -273,14 +249,15 @@ jobs:
|
||||
run: bash tests/integration/kubernetes/gha-run.sh install-azure-cli
|
||||
|
||||
- name: Log into the Azure account
|
||||
uses: azure/login@a457da9ea143d694b1b9c7c869ebb04ebe844ef5 # v2.3.0
|
||||
with:
|
||||
client-id: ${{ secrets.AZ_APPID }}
|
||||
tenant-id: ${{ secrets.AZ_TENANT_ID }}
|
||||
subscription-id: ${{ secrets.AZ_SUBSCRIPTION_ID }}
|
||||
run: bash tests/integration/kubernetes/gha-run.sh login-azure
|
||||
env:
|
||||
AZ_APPID: ${{ secrets.AZ_APPID }}
|
||||
AZ_PASSWORD: ${{ secrets.AZ_PASSWORD }}
|
||||
AZ_TENANT_ID: ${{ secrets.AZ_TENANT_ID }}
|
||||
AZ_SUBSCRIPTION_ID: ${{ secrets.AZ_SUBSCRIPTION_ID }}
|
||||
|
||||
- name: Create AKS cluster
|
||||
uses: nick-fields/retry@ce71cc2ab81d554ebbe88c79ab5975992d79ba08 # v3.0.2
|
||||
uses: nick-fields/retry@v3
|
||||
with:
|
||||
timeout_minutes: 15
|
||||
max_attempts: 20
|
||||
@ -321,10 +298,6 @@ jobs:
|
||||
timeout-minutes: 80
|
||||
run: bash tests/integration/kubernetes/gha-run.sh run-tests
|
||||
|
||||
- name: Report tests
|
||||
if: always()
|
||||
run: bash tests/integration/kubernetes/gha-run.sh report-tests
|
||||
|
||||
- name: Delete AKS cluster
|
||||
if: always()
|
||||
run: bash tests/integration/kubernetes/gha-run.sh delete-cluster
|
||||
|
@ -21,17 +21,6 @@ on:
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
secrets:
|
||||
AZ_APPID:
|
||||
required: true
|
||||
AZ_TENANT_ID:
|
||||
required: true
|
||||
AZ_SUBSCRIPTION_ID:
|
||||
required: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
id-token: write
|
||||
|
||||
jobs:
|
||||
run-kata-deploy-tests:
|
||||
@ -49,7 +38,6 @@ jobs:
|
||||
- host_os: cbl-mariner
|
||||
vmm: clh
|
||||
runs-on: ubuntu-22.04
|
||||
environment: ci
|
||||
env:
|
||||
DOCKER_REGISTRY: ${{ inputs.registry }}
|
||||
DOCKER_REPO: ${{ inputs.repo }}
|
||||
@ -60,11 +48,10 @@ jobs:
|
||||
KUBERNETES: "vanilla"
|
||||
USING_NFD: "false"
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
@ -76,14 +63,15 @@ jobs:
|
||||
run: bash tests/functional/kata-deploy/gha-run.sh install-azure-cli
|
||||
|
||||
- name: Log into the Azure account
|
||||
uses: azure/login@a457da9ea143d694b1b9c7c869ebb04ebe844ef5 # v2.3.0
|
||||
with:
|
||||
client-id: ${{ secrets.AZ_APPID }}
|
||||
tenant-id: ${{ secrets.AZ_TENANT_ID }}
|
||||
subscription-id: ${{ secrets.AZ_SUBSCRIPTION_ID }}
|
||||
run: bash tests/functional/kata-deploy/gha-run.sh login-azure
|
||||
env:
|
||||
AZ_APPID: ${{ secrets.AZ_APPID }}
|
||||
AZ_PASSWORD: ${{ secrets.AZ_PASSWORD }}
|
||||
AZ_TENANT_ID: ${{ secrets.AZ_TENANT_ID }}
|
||||
AZ_SUBSCRIPTION_ID: ${{ secrets.AZ_SUBSCRIPTION_ID }}
|
||||
|
||||
- name: Create AKS cluster
|
||||
uses: nick-fields/retry@ce71cc2ab81d554ebbe88c79ab5975992d79ba08 # v3.0.2
|
||||
uses: nick-fields/retry@v3
|
||||
with:
|
||||
timeout_minutes: 15
|
||||
max_attempts: 20
|
||||
|
@ -1,4 +1,4 @@
|
||||
name: CI | Run kata-deploy tests
|
||||
name: CI | Run kata-deploy tests on GARM
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
@ -22,22 +22,23 @@ on:
|
||||
type: string
|
||||
default: ""
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
run-kata-deploy-tests:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
vmm:
|
||||
- clh
|
||||
- qemu
|
||||
k8s:
|
||||
- k0s
|
||||
- k3s
|
||||
- rke2
|
||||
- microk8s
|
||||
runs-on: ubuntu-22.04
|
||||
# TODO: There are a couple of vmm/k8s combination failing (https://github.com/kata-containers/kata-containers/issues/9854)
|
||||
# and we will put the entire kata-deploy-tests on GARM on maintenance.
|
||||
# TODO: Transition to free runner (see #9940).
|
||||
if: false
|
||||
runs-on: garm-ubuntu-2004-smaller
|
||||
env:
|
||||
DOCKER_REGISTRY: ${{ inputs.registry }}
|
||||
DOCKER_REPO: ${{ inputs.repo }}
|
||||
@ -47,11 +48,10 @@ jobs:
|
||||
KUBERNETES: ${{ matrix.k8s }}
|
||||
USING_NFD: "false"
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
@ -13,9 +13,6 @@ on:
|
||||
type: string
|
||||
default: ""
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
run-monitor:
|
||||
strategy:
|
||||
@ -40,11 +37,10 @@ jobs:
|
||||
#CONTAINERD_VERSION: ${{ matrix.containerd_version }}
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
@ -56,7 +52,7 @@ jobs:
|
||||
run: bash tests/functional/kata-monitor/gha-run.sh install-dependencies
|
||||
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: kata-static-tarball-amd64${{ inputs.tarball-suffix }}
|
||||
path: kata-artifacts
|
||||
|
95
.github/workflows/run-metrics.yaml
vendored
95
.github/workflows/run-metrics.yaml
vendored
@ -2,17 +2,8 @@ name: CI | Run test metrics
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
registry:
|
||||
required: true
|
||||
type: string
|
||||
repo:
|
||||
required: true
|
||||
type: string
|
||||
tag:
|
||||
required: true
|
||||
type: string
|
||||
pr-number:
|
||||
required: true
|
||||
tarball-suffix:
|
||||
required: false
|
||||
type: string
|
||||
commit-hash:
|
||||
required: false
|
||||
@ -22,11 +13,35 @@ on:
|
||||
type: string
|
||||
default: ""
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
setup-kata:
|
||||
name: Kata Setup
|
||||
runs-on: metrics
|
||||
env:
|
||||
GOPATH: ${{ github.workspace }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: kata-static-tarball-amd64${{ inputs.tarball-suffix }}
|
||||
path: kata-artifacts
|
||||
|
||||
- name: Install kata
|
||||
run: bash tests/metrics/gha-run.sh install-kata kata-artifacts
|
||||
|
||||
run-metrics:
|
||||
needs: setup-kata
|
||||
strategy:
|
||||
# We can set this to true whenever we're 100% sure that
|
||||
# the all the tests are not flaky, otherwise we'll fail
|
||||
@ -39,91 +54,41 @@ jobs:
|
||||
env:
|
||||
GOPATH: ${{ github.workspace }}
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
DOCKER_REGISTRY: ${{ inputs.registry }}
|
||||
DOCKER_REPO: ${{ inputs.repo }}
|
||||
DOCKER_TAG: ${{ inputs.tag }}
|
||||
GH_PR_NUMBER: ${{ inputs.pr-number }}
|
||||
K8S_TEST_HOST_TYPE: "baremetal"
|
||||
USING_NFD: "false"
|
||||
KUBERNETES: kubeadm
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: Deploy Kata
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/kubernetes/gha-run.sh deploy-kata-kubeadm
|
||||
|
||||
- name: Install check metrics
|
||||
run: bash tests/metrics/gha-run.sh install-checkmetrics
|
||||
|
||||
- name: enabling the hypervisor
|
||||
run: bash tests/metrics/gha-run.sh enabling-hypervisor
|
||||
|
||||
- name: run launch times test
|
||||
timeout-minutes: 15
|
||||
continue-on-error: true
|
||||
run: bash tests/metrics/gha-run.sh run-test-launchtimes
|
||||
|
||||
- name: run memory foot print test
|
||||
timeout-minutes: 15
|
||||
continue-on-error: true
|
||||
run: bash tests/metrics/gha-run.sh run-test-memory-usage
|
||||
|
||||
- name: run memory usage inside container test
|
||||
timeout-minutes: 15
|
||||
continue-on-error: true
|
||||
run: bash tests/metrics/gha-run.sh run-test-memory-usage-inside-container
|
||||
|
||||
- name: run blogbench test
|
||||
timeout-minutes: 15
|
||||
continue-on-error: true
|
||||
run: bash tests/metrics/gha-run.sh run-test-blogbench
|
||||
|
||||
- name: run tensorflow test
|
||||
timeout-minutes: 15
|
||||
continue-on-error: true
|
||||
run: bash tests/metrics/gha-run.sh run-test-tensorflow
|
||||
|
||||
- name: run fio test
|
||||
timeout-minutes: 15
|
||||
continue-on-error: true
|
||||
run: bash tests/metrics/gha-run.sh run-test-fio
|
||||
|
||||
- name: run iperf test
|
||||
timeout-minutes: 15
|
||||
continue-on-error: true
|
||||
run: bash tests/metrics/gha-run.sh run-test-iperf
|
||||
|
||||
- name: run latency test
|
||||
timeout-minutes: 15
|
||||
continue-on-error: true
|
||||
run: bash tests/metrics/gha-run.sh run-test-latency
|
||||
|
||||
- name: check metrics
|
||||
run: bash tests/metrics/gha-run.sh check-metrics
|
||||
|
||||
- name: make metrics tarball ${{ matrix.vmm }}
|
||||
run: bash tests/metrics/gha-run.sh make-tarball-results
|
||||
|
||||
- name: archive metrics results ${{ matrix.vmm }}
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: metrics-artifacts-${{ matrix.vmm }}
|
||||
path: results-${{ matrix.vmm }}.tar.gz
|
||||
retention-days: 1
|
||||
if-no-files-found: error
|
||||
|
||||
- name: Delete kata-deploy
|
||||
timeout-minutes: 10
|
||||
if: always()
|
||||
run: bash tests/integration/kubernetes/gha-run.sh cleanup-kubeadm
|
||||
|
8
.github/workflows/run-runk-tests.yaml
vendored
8
.github/workflows/run-runk-tests.yaml
vendored
@ -13,9 +13,6 @@ on:
|
||||
type: string
|
||||
default: ""
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
run-runk:
|
||||
# Skip runk tests as we have no maintainers. TODO: Decide when to remove altogether
|
||||
@ -24,11 +21,10 @@ jobs:
|
||||
env:
|
||||
CONTAINERD_VERSION: lts
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
@ -40,7 +36,7 @@ jobs:
|
||||
run: bash tests/integration/runk/gha-run.sh install-dependencies
|
||||
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: kata-static-tarball-amd64${{ inputs.tarball-suffix }}
|
||||
path: kata-artifacts
|
||||
|
13
.github/workflows/shellcheck.yaml
vendored
13
.github/workflows/shellcheck.yaml
vendored
@ -10,9 +10,6 @@ on:
|
||||
- reopened
|
||||
- synchronize
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
@ -22,11 +19,11 @@ jobs:
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- name: Checkout the code
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
- name: Run ShellCheck
|
||||
uses: ludeeus/action-shellcheck@00b27aa7cb85167568cb48a3838b75f4265f2bca # master (2024-06-20)
|
||||
with:
|
||||
ignore_paths: "**/vendor/**"
|
||||
uses: ludeeus/action-shellcheck@master
|
||||
|
||||
|
35
.github/workflows/shellcheck_required.yaml
vendored
35
.github/workflows/shellcheck_required.yaml
vendored
@ -1,35 +0,0 @@
|
||||
|
||||
# https://github.com/marketplace/actions/shellcheck
|
||||
name: Shellcheck required
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
- edited
|
||||
- reopened
|
||||
- synchronize
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
shellcheck-required:
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- name: Checkout the code
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
||||
- name: Run ShellCheck
|
||||
uses: ludeeus/action-shellcheck@00b27aa7cb85167568cb48a3838b75f4265f2bca # master (2024-06-20)
|
||||
with:
|
||||
severity: error
|
||||
ignore_paths: "**/vendor/**"
|
5
.github/workflows/stale.yaml
vendored
5
.github/workflows/stale.yaml
vendored
@ -4,14 +4,11 @@ on:
|
||||
- cron: '0 0 * * *'
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/stale@5bef64f19d7facfb25b37b414482c7164d639639 # v9.1.0
|
||||
- uses: actions/stale@v9
|
||||
with:
|
||||
stale-pr-message: 'This PR has been opened without with no activity for 180 days. Comment on the issue otherwise it will be closed in 7 days'
|
||||
days-before-pr-stale: 180
|
||||
|
15
.github/workflows/static-checks-self-hosted.yaml
vendored
15
.github/workflows/static-checks-self-hosted.yaml
vendored
@ -6,9 +6,6 @@ on:
|
||||
- reopened
|
||||
- labeled # a workflow runs only when the 'ok-to-test' label is added
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
@ -35,15 +32,3 @@ jobs:
|
||||
uses: ./.github/workflows/build-checks.yaml
|
||||
with:
|
||||
instance: ${{ matrix.instance }}
|
||||
|
||||
build-checks-preview:
|
||||
needs: skipper
|
||||
if: ${{ needs.skipper.outputs.skip_static != 'yes' }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
instance:
|
||||
- "riscv-builder"
|
||||
uses: ./.github/workflows/build-checks-preview-riscv64.yaml
|
||||
with:
|
||||
instance: ${{ matrix.instance }}
|
||||
|
37
.github/workflows/static-checks.yaml
vendored
37
.github/workflows/static-checks.yaml
vendored
@ -5,10 +5,6 @@ on:
|
||||
- edited
|
||||
- reopened
|
||||
- synchronize
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
@ -28,10 +24,9 @@ jobs:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout the code
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
- name: Ensure the kernel config version has been updated
|
||||
run: |
|
||||
kernel_dir="tools/packaging/kernel/"
|
||||
@ -70,13 +65,12 @@ jobs:
|
||||
component-path: src/dragonball
|
||||
steps:
|
||||
- name: Checkout the code
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
- name: Install system deps
|
||||
run: |
|
||||
sudo apt-get update && sudo apt-get install -y build-essential musl-tools
|
||||
sudo apt-get install -y build-essential musl-tools
|
||||
- name: Install yq
|
||||
run: |
|
||||
sudo -E ./ci/install_yq.sh
|
||||
@ -93,7 +87,6 @@ jobs:
|
||||
${{ matrix.command }}
|
||||
env:
|
||||
RUST_BACKTRACE: "1"
|
||||
RUST_LIB_BACKTRACE: "0"
|
||||
|
||||
static-checks:
|
||||
runs-on: ubuntu-22.04
|
||||
@ -106,15 +99,11 @@ jobs:
|
||||
- "make static-checks"
|
||||
env:
|
||||
GOPATH: ${{ github.workspace }}
|
||||
permissions:
|
||||
contents: read # for checkout
|
||||
packages: write # for push to ghcr.io
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
path: ./src/github.com/${{ github.repository }}
|
||||
- name: Install yq
|
||||
run: |
|
||||
@ -129,24 +118,8 @@ jobs:
|
||||
echo "/usr/local/go/bin" >> "$GITHUB_PATH"
|
||||
- name: Install system dependencies
|
||||
run: |
|
||||
sudo apt-get update && sudo apt-get -y install moreutils hunspell hunspell-en-gb hunspell-en-us pandoc
|
||||
- name: Install open-policy-agent
|
||||
run: |
|
||||
cd "${GOPATH}/src/github.com/${{ github.repository }}"
|
||||
./tests/install_opa.sh
|
||||
- name: Install regorus
|
||||
env:
|
||||
ARTEFACT_REPOSITORY: "${{ github.repository }}"
|
||||
ARTEFACT_REGISTRY_USERNAME: "${{ github.actor }}"
|
||||
ARTEFACT_REGISTRY_PASSWORD: "${{ secrets.GITHUB_TOKEN }}"
|
||||
run: |
|
||||
"${GOPATH}/src/github.com/${{ github.repository }}/tests/install_regorus.sh"
|
||||
sudo apt-get -y install moreutils hunspell hunspell-en-gb hunspell-en-us pandoc
|
||||
- name: Run check
|
||||
run: |
|
||||
export PATH="${PATH}:${GOPATH}/bin"
|
||||
cd "${GOPATH}/src/github.com/${{ github.repository }}" && ${{ matrix.cmd }}
|
||||
|
||||
govulncheck:
|
||||
needs: skipper
|
||||
if: ${{ needs.skipper.outputs.skip_static != 'yes' }}
|
||||
uses: ./.github/workflows/govulncheck.yaml
|
||||
|
2
.gitignore
vendored
2
.gitignore
vendored
@ -16,5 +16,3 @@ src/agent/protocols/src/*.rs
|
||||
build
|
||||
src/tools/log-parser/kata-log-parser
|
||||
tools/packaging/static-build/agent/install_libseccomp.sh
|
||||
.envrc
|
||||
.direnv
|
||||
|
26
ci/README.md
26
ci/README.md
@ -172,28 +172,12 @@ For new jobs, or jobs that haven't been marked as required recently,
|
||||
the criteria to be initially marked as required is ten days
|
||||
of passing tests, with no relevant PR failures reported in that time.
|
||||
Required jobs also need one or more nominated maintainers that are
|
||||
responsible for the stability of their jobs. Maintainers can be registered
|
||||
in [`maintainers.yml`](https://github.com/kata-containers/kata-containers.github.io/blob/main/maintainers.yml)
|
||||
and will then show on the CI Dashboard.
|
||||
responsible for the stability of their jobs.
|
||||
|
||||
To add transparency to making jobs required/non-required and to keep the
|
||||
GitHub UI in sync with the [Gatekeeper job](../tools/testing/gatekeeper),
|
||||
the process to update a job's required state is as follows:
|
||||
1. Create a PR to update `maintainers.yml`, if new maintainers are being
|
||||
declared on a CI job.
|
||||
1. Create a PR which updates
|
||||
[`required-tests.yaml`](../tools/testing/gatekeeper/required-tests.yaml)
|
||||
adding the new job and listing the evidence that the job meets the
|
||||
requirements above. Ensure that all maintainers and
|
||||
@kata-containers/architecture-committee are notified to give them the
|
||||
opportunity to review the PR. See
|
||||
[#11015](https://github.com/kata-containers/kata-containers/pull/11015)
|
||||
as an example.
|
||||
1. The maintainers and Architecture Committee get a chance to review the PR.
|
||||
It can be discussed in an AC meeting to get broader input.
|
||||
1. Once the PR has been merged, a Kata Containers admin should be notified
|
||||
to ensure that the GitHub UI is updated to reflect the change in
|
||||
`required-tests.yaml`.
|
||||
> [!NOTE]
|
||||
> We don't currently have a good place to record the job maintainers, but
|
||||
> once we have this, the intention is to show it on the CI Dashboard so
|
||||
> people can find the contact easily.
|
||||
|
||||
#### Expectation of required job maintainers
|
||||
|
||||
|
@ -7,16 +7,16 @@
|
||||
set -e
|
||||
|
||||
cidir=$(dirname "$0")
|
||||
runtimedir=${cidir}/../src/runtime
|
||||
runtimedir=$cidir/../src/runtime
|
||||
|
||||
build_working_packages() {
|
||||
# working packages:
|
||||
device_api=${runtimedir}/pkg/device/api
|
||||
device_config=${runtimedir}/pkg/device/config
|
||||
device_drivers=${runtimedir}/pkg/device/drivers
|
||||
device_manager=${runtimedir}/pkg/device/manager
|
||||
rc_pkg_dir=${runtimedir}/pkg/resourcecontrol/
|
||||
utils_pkg_dir=${runtimedir}/virtcontainers/utils
|
||||
device_api=$runtimedir/pkg/device/api
|
||||
device_config=$runtimedir/pkg/device/config
|
||||
device_drivers=$runtimedir/pkg/device/drivers
|
||||
device_manager=$runtimedir/pkg/device/manager
|
||||
rc_pkg_dir=$runtimedir/pkg/resourcecontrol/
|
||||
utils_pkg_dir=$runtimedir/virtcontainers/utils
|
||||
|
||||
# broken packages :( :
|
||||
#katautils=$runtimedir/pkg/katautils
|
||||
@ -24,15 +24,15 @@ build_working_packages() {
|
||||
#vc=$runtimedir/virtcontainers
|
||||
|
||||
pkgs=(
|
||||
"${device_api}"
|
||||
"${device_config}"
|
||||
"${device_drivers}"
|
||||
"${device_manager}"
|
||||
"${utils_pkg_dir}"
|
||||
"${rc_pkg_dir}")
|
||||
"$device_api"
|
||||
"$device_config"
|
||||
"$device_drivers"
|
||||
"$device_manager"
|
||||
"$utils_pkg_dir"
|
||||
"$rc_pkg_dir")
|
||||
for pkg in "${pkgs[@]}"; do
|
||||
echo building "${pkg}"
|
||||
pushd "${pkg}" &>/dev/null
|
||||
echo building "$pkg"
|
||||
pushd "$pkg" &>/dev/null
|
||||
go build
|
||||
go test
|
||||
popd &>/dev/null
|
||||
|
@ -10,7 +10,7 @@ set -o errtrace
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
[[ -n "${DEBUG:-}" ]] && set -o xtrace
|
||||
[ -n "${DEBUG:-}" ] && set -o xtrace
|
||||
|
||||
script_name=${0##*/}
|
||||
|
||||
@ -25,7 +25,7 @@ die()
|
||||
usage()
|
||||
{
|
||||
cat <<EOF
|
||||
Usage: ${script_name} [OPTIONS] [command] [arguments]
|
||||
Usage: $script_name [OPTIONS] [command] [arguments]
|
||||
|
||||
Description: Utility to expand the abilities of the GitHub CLI tool, gh.
|
||||
|
||||
@ -48,7 +48,7 @@ Examples:
|
||||
|
||||
- List issues for a Pull Request 123 in kata-containers/kata-containers repo
|
||||
|
||||
$ ${script_name} list-issues-for-pr 123
|
||||
$ $script_name list-issues-for-pr 123
|
||||
EOF
|
||||
}
|
||||
|
||||
@ -57,12 +57,11 @@ list_issues_for_pr()
|
||||
local pr="${1:-}"
|
||||
local repo="${2:-kata-containers/kata-containers}"
|
||||
|
||||
[[ -z "${pr}" ]] && die "need PR"
|
||||
[ -z "$pr" ] && die "need PR"
|
||||
|
||||
local commits
|
||||
commits=$(gh pr view "${pr}" --repo "${repo}" --json commits --jq .commits[].messageBody)
|
||||
local commits=$(gh pr view ${pr} --repo ${repo} --json commits --jq .commits[].messageBody)
|
||||
|
||||
[[ -z "${commits}" ]] && die "cannot determine commits for PR ${pr}"
|
||||
[ -z "$commits" ] && die "cannot determine commits for PR $pr"
|
||||
|
||||
# Extract the issue number(s) from the commits.
|
||||
#
|
||||
@ -79,8 +78,7 @@ list_issues_for_pr()
|
||||
#
|
||||
# "<git-commit> <git-commit-msg>"
|
||||
#
|
||||
local issues
|
||||
issues=$(echo "${commits}" |\
|
||||
local issues=$(echo "$commits" |\
|
||||
grep -v -E "^( | )" |\
|
||||
grep -i -E "fixes:* *(#*[0-9][0-9]*)" |\
|
||||
tr ' ' '\n' |\
|
||||
@ -88,16 +86,16 @@ list_issues_for_pr()
|
||||
sed 's/[.,\#]//g' |\
|
||||
sort -nu || true)
|
||||
|
||||
[[ -z "${issues}" ]] && die "cannot determine issues for PR ${pr}"
|
||||
[ -z "$issues" ] && die "cannot determine issues for PR $pr"
|
||||
|
||||
echo "# Issues linked to PR"
|
||||
echo "#"
|
||||
echo "# Fields: issue_number"
|
||||
|
||||
local issue
|
||||
echo "${issues}" | while read -r issue
|
||||
echo "$issues"|while read issue
|
||||
do
|
||||
printf "%s\n" "${issue}"
|
||||
printf "%s\n" "$issue"
|
||||
done
|
||||
}
|
||||
|
||||
@ -105,21 +103,20 @@ list_labels_for_issue()
|
||||
{
|
||||
local issue="${1:-}"
|
||||
|
||||
[[ -z "${issue}" ]] && die "need issue number"
|
||||
[ -z "$issue" ] && die "need issue number"
|
||||
|
||||
local labels
|
||||
labels=$(gh issue view "${issue}" --repo kata-containers/kata-containers --json labels)
|
||||
local labels=$(gh issue view ${issue} --repo kata-containers/kata-containers --json labels)
|
||||
|
||||
[[ -z "${labels}" ]] && die "cannot determine labels for issue ${issue}"
|
||||
[ -z "$labels" ] && die "cannot determine labels for issue $issue"
|
||||
|
||||
echo "${labels}"
|
||||
printf "$labels"
|
||||
}
|
||||
|
||||
setup()
|
||||
{
|
||||
for cmd in gh jq
|
||||
do
|
||||
command -v "${cmd}" &>/dev/null || die "need command: ${cmd}"
|
||||
command -v "$cmd" &>/dev/null || die "need command: $cmd"
|
||||
done
|
||||
}
|
||||
|
||||
@ -127,28 +124,29 @@ handle_args()
|
||||
{
|
||||
setup
|
||||
|
||||
local show_all="false"
|
||||
local opt
|
||||
|
||||
while getopts "hr:" opt "$@"
|
||||
while getopts "ahr:" opt "$@"
|
||||
do
|
||||
case "${opt}" in
|
||||
case "$opt" in
|
||||
a) show_all="true" ;;
|
||||
h) usage && exit 0 ;;
|
||||
r) repo="${OPTARG}" ;;
|
||||
*) echo "use '-h' to get list of supprted aruments" && exit 1 ;;
|
||||
esac
|
||||
done
|
||||
|
||||
shift $((OPTIND - 1))
|
||||
shift $(($OPTIND - 1))
|
||||
|
||||
local repo="${repo:-kata-containers/kata-containers}"
|
||||
local cmd="${1:-}"
|
||||
|
||||
case "${cmd}" in
|
||||
case "$cmd" in
|
||||
list-issues-for-pr) ;;
|
||||
list-labels-for-issue) ;;
|
||||
|
||||
"") usage && exit 0 ;;
|
||||
*) die "invalid command: '${cmd}'" ;;
|
||||
*) die "invalid command: '$cmd'" ;;
|
||||
esac
|
||||
|
||||
# Consume the command name
|
||||
@ -157,20 +155,20 @@ handle_args()
|
||||
local issue=""
|
||||
local pr=""
|
||||
|
||||
case "${cmd}" in
|
||||
case "$cmd" in
|
||||
list-issues-for-pr)
|
||||
pr="${1:-}"
|
||||
|
||||
list_issues_for_pr "${pr}" "${repo}"
|
||||
list_issues_for_pr "$pr" "${repo}"
|
||||
;;
|
||||
|
||||
list-labels-for-issue)
|
||||
issue="${1:-}"
|
||||
|
||||
list_labels_for_issue "${issue}"
|
||||
list_labels_for_issue "$issue"
|
||||
;;
|
||||
|
||||
*) die "impossible situation: cmd: '${cmd}'" ;;
|
||||
*) die "impossible situation: cmd: '$cmd'" ;;
|
||||
esac
|
||||
|
||||
exit 0
|
||||
|
@ -8,6 +8,7 @@
|
||||
set -o errexit
|
||||
|
||||
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
script_name="$(basename "${BASH_SOURCE[0]}")"
|
||||
|
||||
source "${script_dir}/../tests/common.bash"
|
||||
|
||||
@ -21,11 +22,11 @@ workdir="$(mktemp -d --tmpdir build-libseccomp.XXXXX)"
|
||||
|
||||
# Variables for libseccomp
|
||||
libseccomp_version="${LIBSECCOMP_VERSION:-""}"
|
||||
if [[ -z "${libseccomp_version}" ]]; then
|
||||
if [ -z "${libseccomp_version}" ]; then
|
||||
libseccomp_version=$(get_from_kata_deps ".externals.libseccomp.version")
|
||||
fi
|
||||
libseccomp_url="${LIBSECCOMP_URL:-""}"
|
||||
if [[ -z "${libseccomp_url}" ]]; then
|
||||
if [ -z "${libseccomp_url}" ]; then
|
||||
libseccomp_url=$(get_from_kata_deps ".externals.libseccomp.url")
|
||||
fi
|
||||
libseccomp_tarball="libseccomp-${libseccomp_version}.tar.gz"
|
||||
@ -34,21 +35,19 @@ cflags="-O2"
|
||||
|
||||
# Variables for gperf
|
||||
gperf_version="${GPERF_VERSION:-""}"
|
||||
if [[ -z "${gperf_version}" ]]; then
|
||||
if [ -z "${gperf_version}" ]; then
|
||||
gperf_version=$(get_from_kata_deps ".externals.gperf.version")
|
||||
fi
|
||||
gperf_url="${GPERF_URL:-""}"
|
||||
if [[ -z "${gperf_url}" ]]; then
|
||||
if [ -z "${gperf_url}" ]; then
|
||||
gperf_url=$(get_from_kata_deps ".externals.gperf.url")
|
||||
fi
|
||||
gperf_tarball="gperf-${gperf_version}.tar.gz"
|
||||
gperf_tarball_url="${gperf_url}/${gperf_tarball}"
|
||||
|
||||
# We need to build the libseccomp library from sources to create a static
|
||||
# library for the musl libc.
|
||||
# However, ppc64le, riscv64 and s390x have no musl targets in Rust. Hence, we do
|
||||
# not set cflags for the musl libc.
|
||||
if [[ "${arch}" != "ppc64le" ]] && [[ "${arch}" != "riscv64" ]] && [[ "${arch}" != "s390x" ]]; then
|
||||
# We need to build the libseccomp library from sources to create a static library for the musl libc.
|
||||
# However, ppc64le and s390x have no musl targets in Rust. Hence, we do not set cflags for the musl libc.
|
||||
if ([ "${arch}" != "ppc64le" ] && [ "${arch}" != "s390x" ]); then
|
||||
# Set FORTIFY_SOURCE=1 because the musl-libc does not have some functions about FORTIFY_SOURCE=2
|
||||
cflags="-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=1 -O2"
|
||||
fi
|
||||
@ -72,10 +71,10 @@ build_and_install_gperf() {
|
||||
tar -xf "${gperf_tarball}"
|
||||
pushd "gperf-${gperf_version}"
|
||||
# Unset $CC for configure, we will always use native for gperf
|
||||
CC="" ./configure --prefix="${gperf_install_dir}"
|
||||
CC= ./configure --prefix="${gperf_install_dir}"
|
||||
make
|
||||
make install
|
||||
export PATH=${PATH}:"${gperf_install_dir}"/bin
|
||||
export PATH=$PATH:"${gperf_install_dir}"/bin
|
||||
popd
|
||||
echo "Gperf installed successfully"
|
||||
}
|
||||
@ -86,7 +85,7 @@ build_and_install_libseccomp() {
|
||||
curl -sLO "${libseccomp_tarball_url}"
|
||||
tar -xf "${libseccomp_tarball}"
|
||||
pushd "libseccomp-${libseccomp_version}"
|
||||
[[ "${arch}" == $(uname -m) ]] && cc_name="" || cc_name="${arch}-linux-gnu-gcc"
|
||||
[ "${arch}" == $(uname -m) ] && cc_name="" || cc_name="${arch}-linux-gnu-gcc"
|
||||
CC=${cc_name} ./configure --prefix="${libseccomp_install_dir}" CFLAGS="${cflags}" --enable-static --host="${arch}"
|
||||
make
|
||||
make install
|
||||
@ -98,11 +97,11 @@ main() {
|
||||
local libseccomp_install_dir="${1:-}"
|
||||
local gperf_install_dir="${2:-}"
|
||||
|
||||
if [[ -z "${libseccomp_install_dir}" ]] || [[ -z "${gperf_install_dir}" ]]; then
|
||||
if [ -z "${libseccomp_install_dir}" ] || [ -z "${gperf_install_dir}" ]; then
|
||||
die "Usage: ${0} <libseccomp-install-dir> <gperf-install-dir>"
|
||||
fi
|
||||
|
||||
pushd "${workdir}"
|
||||
pushd "$workdir"
|
||||
# gperf is required for building the libseccomp.
|
||||
build_and_install_gperf
|
||||
build_and_install_libseccomp
|
||||
|
@ -5,20 +5,20 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
[[ -n "${DEBUG}" ]] && set -o xtrace
|
||||
[ -n "$DEBUG" ] && set -o xtrace
|
||||
|
||||
# If we fail for any reason a message will be displayed
|
||||
die() {
|
||||
msg="$*"
|
||||
echo "ERROR: ${msg}" >&2
|
||||
echo "ERROR: $msg" >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
function verify_yq_exists() {
|
||||
local yq_path=$1
|
||||
local yq_version=$2
|
||||
local expected="yq (https://github.com/mikefarah/yq/) version ${yq_version}"
|
||||
if [[ -x "${yq_path}" ]] && [[ "$(${yq_path} --version)"X == "${expected}"X ]]; then
|
||||
local expected="yq (https://github.com/mikefarah/yq/) version $yq_version"
|
||||
if [ -x "${yq_path}" ] && [ "$($yq_path --version)"X == "$expected"X ]; then
|
||||
return 0
|
||||
else
|
||||
return 1
|
||||
@ -34,20 +34,20 @@ function install_yq() {
|
||||
local yq_path=""
|
||||
INSTALL_IN_GOPATH=${INSTALL_IN_GOPATH:-true}
|
||||
|
||||
if [[ "${INSTALL_IN_GOPATH}" == "true" ]]; then
|
||||
if [ "${INSTALL_IN_GOPATH}" == "true" ]; then
|
||||
GOPATH=${GOPATH:-${HOME}/go}
|
||||
mkdir -p "${GOPATH}/bin"
|
||||
yq_path="${GOPATH}/bin/yq"
|
||||
else
|
||||
yq_path="/usr/local/bin/yq"
|
||||
fi
|
||||
if verify_yq_exists "${yq_path}" "${yq_version}"; then
|
||||
if verify_yq_exists "$yq_path" "$yq_version"; then
|
||||
echo "yq is already installed in correct version"
|
||||
return
|
||||
fi
|
||||
if [[ "${yq_path}" == "/usr/local/bin/yq" ]]; then
|
||||
if [ "${yq_path}" == "/usr/local/bin/yq" ]; then
|
||||
# Check if we need sudo to install yq
|
||||
if [[ ! -w "/usr/local/bin" ]]; then
|
||||
if [ ! -w "/usr/local/bin" ]; then
|
||||
# Check if we have sudo privileges
|
||||
if ! sudo -n true 2>/dev/null; then
|
||||
die "Please provide sudo privileges to install yq"
|
||||
@ -76,7 +76,7 @@ function install_yq() {
|
||||
# If we're on an apple silicon machine, just assign amd64.
|
||||
# The version of yq we use doesn't have a darwin arm build,
|
||||
# but Rosetta can come to the rescue here.
|
||||
if [[ ${goos} == "Darwin" ]]; then
|
||||
if [ $goos == "Darwin" ]; then
|
||||
goarch=amd64
|
||||
else
|
||||
goarch=arm64
|
||||
@ -107,7 +107,8 @@ function install_yq() {
|
||||
|
||||
## NOTE: ${var,,} => gives lowercase value of var
|
||||
local yq_url="https://${yq_pkg}/releases/download/${yq_version}/yq_${goos}_${goarch}"
|
||||
${precmd} curl -o "${yq_path}" -LSsf "${yq_url}" || die "Download ${yq_url} failed"
|
||||
${precmd} curl -o "${yq_path}" -LSsf "${yq_url}"
|
||||
[ $? -ne 0 ] && die "Download ${yq_url} failed"
|
||||
${precmd} chmod +x "${yq_path}"
|
||||
|
||||
if ! command -v "${yq_path}" >/dev/null; then
|
||||
|
@ -147,11 +147,3 @@ all images with a single MCP update instead of per-image MCP update.
|
||||
You can check the bisection progress during/after execution by running
|
||||
``bisecter log`` from the current directory. Before starting a new
|
||||
bisection you need to execute ``bisecter reset``.
|
||||
|
||||
|
||||
Peer pods
|
||||
=========
|
||||
|
||||
It's possible to run similar testing on peer-pods using cloud-api-adaptor.
|
||||
Our CI configuration to run inside azure's OCP is in ``peer-pods-azure.sh``
|
||||
and can be used to replace the `test.sh` step in snippets above.
|
||||
|
@ -3,28 +3,25 @@
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
if [[ "$#" -gt 2 ]] || [[ "$#" -lt 1 ]] ; then
|
||||
if [ "$#" -gt 2 ] || [ "$#" -lt 1 ] ; then
|
||||
echo "Usage: $0 GOOD [BAD]"
|
||||
echo "Prints list of available kata-deploy-ci tags between GOOD and BAD commits (by default BAD is the latest available tag)"
|
||||
exit 255
|
||||
fi
|
||||
GOOD="$1"
|
||||
[[ -n "$2" ]] && BAD="$2"
|
||||
[ -n "$2" ] && BAD="$2"
|
||||
ARCH=amd64
|
||||
REPO="quay.io/kata-containers/kata-deploy-ci"
|
||||
|
||||
TAGS=$(skopeo list-tags "docker://${REPO}")
|
||||
# For testing
|
||||
#echo "$TAGS" > tags
|
||||
#TAGS=$(cat tags)
|
||||
TAGS=$(skopeo list-tags "docker://$REPO")
|
||||
# Only amd64
|
||||
TAGS=$(echo "${TAGS}" | jq '.Tags' | jq "map(select(endswith(\"${ARCH}\")))" | jq -r '.[]')
|
||||
TAGS=$(echo "$TAGS" | jq '.Tags' | jq "map(select(endswith(\"$ARCH\")))" | jq -r '.[]')
|
||||
# Sort by git
|
||||
SORTED=""
|
||||
[[ -n "${BAD}" ]] && LOG_ARGS="${GOOD}~1..${BAD}" || LOG_ARGS="${GOOD}~1.."
|
||||
for TAG in $(git log --merges --pretty=format:%H --reverse "${LOG_ARGS}"); do
|
||||
[[ "${TAGS}" =~ ${TAG} ]] && SORTED+="
|
||||
kata-containers-${TAG}-${ARCH}"
|
||||
[ -n "$BAD" ] && LOG_ARGS="$GOOD~1..$BAD" || LOG_ARGS="$GOOD~1.."
|
||||
for TAG in $(git log --merges --pretty=format:%H --reverse $LOG_ARGS); do
|
||||
[[ "$TAGS" =~ "$TAG" ]] && SORTED+="
|
||||
kata-containers-$TAG-$ARCH"
|
||||
done
|
||||
# Comma separated tags with repo
|
||||
echo "${SORTED}" | tail -n +2 | sed -e "s@^@${REPO}:@" | paste -s -d, -
|
||||
echo "$SORTED" | tail -n +2 | sed -e "s@^@$REPO:@" | paste -s -d, -
|
||||
|
@ -7,14 +7,11 @@
|
||||
# This script tries to removes most of the resources added by `test.sh` script
|
||||
# from the cluster.
|
||||
|
||||
scripts_dir=$(dirname "$0")
|
||||
scripts_dir=$(dirname $0)
|
||||
deployments_dir=${scripts_dir}/cluster/deployments
|
||||
configs_dir=${scripts_dir}/configs
|
||||
|
||||
# shellcheck disable=SC1091 # import based on variable
|
||||
source "${scripts_dir}/lib.sh"
|
||||
|
||||
# Set your katacontainers repo dir location
|
||||
[[ -z "${katacontainers_repo_dir}" ]] && echo "Please set katacontainers_repo_dir variable to your kata repo"
|
||||
source ${scripts_dir}/lib.sh
|
||||
|
||||
# Set to 'yes' if you want to configure SELinux to permissive on the cluster
|
||||
# workers.
|
||||
@ -39,23 +36,24 @@ oc delete -f "${scripts_dir}/smoke/http-server.yaml"
|
||||
|
||||
# Delete test.sh resources
|
||||
oc delete -f "${deployments_dir}/relabel_selinux.yaml"
|
||||
if [[ "${WORKAROUND_9206_CRIO}" == "yes" ]]; then
|
||||
if [[ "$WORKAROUND_9206_CRIO" == "yes" ]]; then
|
||||
oc delete -f "${deployments_dir}/workaround-9206-crio-ds.yaml"
|
||||
oc delete -f "${deployments_dir}/workaround-9206-crio.yaml"
|
||||
fi
|
||||
[[ ${SELINUX_PERMISSIVE} == "yes" ]] && oc delete -f "${deployments_dir}/machineconfig_selinux.yaml.in"
|
||||
[ ${SELINUX_PERMISSIVE} == "yes" ] && oc delete -f "${deployments_dir}/machineconfig_selinux.yaml.in"
|
||||
|
||||
# Delete kata-containers
|
||||
pushd "${katacontainers_repo_dir}/tools/packaging/kata-deploy" || { echo "Failed to push to ${katacontainers_repo_dir}/tools/packaging/kata-deploy"; exit 125; }
|
||||
pushd "$katacontainers_repo_dir/tools/packaging/kata-deploy"
|
||||
oc delete -f kata-deploy/base/kata-deploy.yaml
|
||||
oc -n kube-system wait --timeout=10m --for=delete -l name=kata-deploy pod
|
||||
oc apply -f kata-cleanup/base/kata-cleanup.yaml
|
||||
echo "Wait for all related pods to be gone"
|
||||
( repeats=1; for _ in $(seq 1 600); do
|
||||
( repeats=1; for i in $(seq 1 600); do
|
||||
oc get pods -l name="kubelet-kata-cleanup" --no-headers=true -n kube-system 2>&1 | grep "No resources found" -q && ((repeats++)) || repeats=1
|
||||
[[ "${repeats}" -gt 5 ]] && echo kata-cleanup finished && break
|
||||
[ "$repeats" -gt 5 ] && echo kata-cleanup finished && break
|
||||
sleep 1
|
||||
done) || { echo "There are still some kata-cleanup related pods after 600 iterations"; oc get all -n kube-system; exit 1; }
|
||||
done) || { echo "There are still some kata-cleanup related pods after 600 iterations"; oc get all -n kube-system; exit -1; }
|
||||
oc delete -f kata-cleanup/base/kata-cleanup.yaml
|
||||
oc delete -f kata-rbac/base/kata-rbac.yaml
|
||||
oc delete -f runtimeclasses/kata-runtimeClasses.yaml
|
||||
|
||||
|
@ -13,9 +13,8 @@ set -e
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
script_dir="$(realpath "$(dirname "$0")")"
|
||||
script_dir="$(realpath $(dirname $0))"
|
||||
webhook_dir="${script_dir}/../../../tools/testing/kata-webhook"
|
||||
# shellcheck disable=SC1091 # import based on variable
|
||||
source "${script_dir}/../lib.sh"
|
||||
KATA_RUNTIME=${KATA_RUNTIME:-kata-ci}
|
||||
|
||||
@ -24,11 +23,14 @@ pushd "${webhook_dir}" >/dev/null
|
||||
#
|
||||
info "Builds the kata-webhook"
|
||||
./create-certs.sh
|
||||
info "Override our KATA_RUNTIME ConfigMap"
|
||||
sed -i deploy/webhook.yaml -e "s/runtime_class: .*$/runtime_class: ${KATA_RUNTIME}/g"
|
||||
info "Deploys the kata-webhook"
|
||||
oc apply -f deploy/
|
||||
|
||||
info "Override our KATA_RUNTIME ConfigMap"
|
||||
RUNTIME_CLASS="${KATA_RUNTIME}" \
|
||||
envsubst < "${script_dir}/deployments/configmap_kata-webhook.yaml.in" \
|
||||
| oc apply -f -
|
||||
|
||||
# Check the webhook was deployed and is working.
|
||||
RUNTIME_CLASS="${KATA_RUNTIME}" ./webhook-check.sh
|
||||
popd >/dev/null
|
||||
|
@ -0,0 +1,12 @@
|
||||
# Copyright (c) 2021 Red Hat, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# Apply customizations to the kata-webhook.
|
||||
#
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: kata-webhook
|
||||
data:
|
||||
runtime_class: ${RUNTIME_CLASS}
|
@ -7,15 +7,11 @@
|
||||
# This script installs the built kata-containers in the test cluster,
|
||||
# and configure a runtime.
|
||||
|
||||
scripts_dir=$(dirname "$0")
|
||||
scripts_dir=$(dirname $0)
|
||||
deployments_dir=${scripts_dir}/deployments
|
||||
configs_dir=${scripts_dir}/configs
|
||||
|
||||
# shellcheck disable=SC1091 # import based on variable
|
||||
source "${scripts_dir}/../lib.sh"
|
||||
|
||||
# Set your katacontainers repo dir location
|
||||
[[ -z "${katacontainers_repo_dir}" ]] && echo "Please set katacontainers_repo_dir variable to your kata repo"
|
||||
source ${scripts_dir}/../lib.sh
|
||||
|
||||
# Set to 'yes' if you want to configure SELinux to permissive on the cluster
|
||||
# workers.
|
||||
@ -44,18 +40,18 @@ WORKAROUND_9206_CRIO=${WORKAROUND_9206_CRIO:-no}
|
||||
#
|
||||
apply_kata_deploy() {
|
||||
local deploy_file="tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
|
||||
pushd "${katacontainers_repo_dir}" || die
|
||||
sed -ri "s#(\s+image:) .*#\1 ${KATA_DEPLOY_IMAGE}#" "${deploy_file}"
|
||||
pushd "$katacontainers_repo_dir"
|
||||
sed -ri "s#(\s+image:) .*#\1 ${KATA_DEPLOY_IMAGE}#" "$deploy_file"
|
||||
|
||||
info "Applying kata-deploy"
|
||||
oc apply -f tools/packaging/kata-deploy/kata-rbac/base/kata-rbac.yaml
|
||||
oc label --overwrite ns kube-system pod-security.kubernetes.io/enforce=privileged pod-security.kubernetes.io/warn=baseline pod-security.kubernetes.io/audit=baseline
|
||||
oc apply -f "${deploy_file}"
|
||||
oc apply -f "$deploy_file"
|
||||
oc -n kube-system wait --timeout=10m --for=condition=Ready -l name=kata-deploy pod
|
||||
|
||||
info "Adding the kata runtime classes"
|
||||
oc apply -f tools/packaging/kata-deploy/runtimeclasses/kata-runtimeClasses.yaml
|
||||
popd || die
|
||||
popd
|
||||
}
|
||||
|
||||
|
||||
@ -68,32 +64,32 @@ wait_for_reboot() {
|
||||
local delta="${1:-900}"
|
||||
local sleep_time=60
|
||||
declare -A BOOTIDS
|
||||
local workers
|
||||
mapfile -t workers < <(oc get nodes | awk '{if ($3 == "worker") { print $1 } }')
|
||||
local workers=($(oc get nodes | \
|
||||
awk '{if ($3 == "worker") { print $1 } }'))
|
||||
# Get the boot ID to compared it changed over time.
|
||||
for node in "${workers[@]}"; do
|
||||
BOOTIDS[${node}]=$(oc get -o jsonpath='{.status.nodeInfo.bootID}'\
|
||||
"node/${node}")
|
||||
echo "Wait ${node} reboot"
|
||||
for node in ${workers[@]}; do
|
||||
BOOTIDS[$node]=$(oc get -o jsonpath='{.status.nodeInfo.bootID}'\
|
||||
node/$node)
|
||||
echo "Wait $node reboot"
|
||||
done
|
||||
|
||||
echo "Set timeout to ${delta} seconds"
|
||||
echo "Set timeout to $delta seconds"
|
||||
timer_start=$(date +%s)
|
||||
while [[ ${#workers[@]} -gt 0 ]]; do
|
||||
sleep "${sleep_time}"
|
||||
while [ ${#workers[@]} -gt 0 ]; do
|
||||
sleep $sleep_time
|
||||
now=$(date +%s)
|
||||
if [[ $((timer_start + delta)) -lt ${now} ]]; then
|
||||
if [ $(($timer_start + $delta)) -lt $now ]; then
|
||||
echo "Timeout: not all workers rebooted"
|
||||
return 1
|
||||
fi
|
||||
echo "Checking after $((now - timer_start)) seconds"
|
||||
for i in "${!workers[@]}"; do
|
||||
echo "Checking after $(($now - $timer_start)) seconds"
|
||||
for i in ${!workers[@]}; do
|
||||
current_id=$(oc get \
|
||||
-o jsonpath='{.status.nodeInfo.bootID}' \
|
||||
"node/${workers[i]}")
|
||||
if [[ "${current_id}" != "${BOOTIDS[${workers[i]}]}" ]]; then
|
||||
node/${workers[i]})
|
||||
if [ "$current_id" != ${BOOTIDS[${workers[i]}]} ]; then
|
||||
echo "${workers[i]} rebooted"
|
||||
unset "workers[i]"
|
||||
unset workers[i]
|
||||
fi
|
||||
done
|
||||
done
|
||||
@ -106,34 +102,32 @@ wait_mcp_update() {
|
||||
# and none are degraded.
|
||||
local ready_count=0
|
||||
local degraded_count=0
|
||||
local machine_count
|
||||
machine_count=$(oc get mcp worker -o jsonpath='{.status.machineCount}')
|
||||
local machine_count=$(oc get mcp worker -o jsonpath='{.status.machineCount}')
|
||||
|
||||
if [[ -z "${machine_count}" && "${machine_count}" -lt 1 ]]; then
|
||||
if [[ -z "$machine_count" && "$machine_count" -lt 1 ]]; then
|
||||
warn "Unabled to obtain the machine count"
|
||||
return 1
|
||||
fi
|
||||
|
||||
echo "Set timeout to ${delta} seconds"
|
||||
local deadline=$(($(date +%s) + delta))
|
||||
local now
|
||||
echo "Set timeout to $delta seconds"
|
||||
local deadline=$(($(date +%s) + $delta))
|
||||
# The ready count might not have changed yet, so wait a little.
|
||||
while [[ "${ready_count}" != "${machine_count}" && \
|
||||
"${degraded_count}" == 0 ]]; do
|
||||
while [[ "$ready_count" != "$machine_count" && \
|
||||
"$degraded_count" == 0 ]]; do
|
||||
# Let's check it hit the timeout (or not).
|
||||
now=$(date +%s)
|
||||
if [[ ${deadline} -lt ${now} ]]; then
|
||||
local now=$(date +%s)
|
||||
if [ $deadline -lt $now ]; then
|
||||
echo "Timeout: not all workers updated" >&2
|
||||
return 1
|
||||
fi
|
||||
sleep "${sleep_time}"
|
||||
sleep $sleep_time
|
||||
ready_count=$(oc get mcp worker \
|
||||
-o jsonpath='{.status.readyMachineCount}')
|
||||
degraded_count=$(oc get mcp worker \
|
||||
-o jsonpath='{.status.degradedMachineCount}')
|
||||
echo "check machineconfigpool - ready_count: ${ready_count} degraded_count: ${degraded_count}"
|
||||
echo "check machineconfigpool - ready_count: $ready_count degraded_count: $degraded_count"
|
||||
done
|
||||
[[ ${degraded_count} -eq 0 ]]
|
||||
[ $degraded_count -eq 0 ]
|
||||
}
|
||||
|
||||
# Enable the RHCOS extension for the Sandboxed Containers.
|
||||
@ -141,10 +135,10 @@ wait_mcp_update() {
|
||||
enable_sandboxedcontainers_extension() {
|
||||
info "Enabling the RHCOS extension for Sandboxed Containers"
|
||||
local deployment_file="${deployments_dir}/machineconfig_sandboxedcontainers_extension.yaml"
|
||||
oc apply -f "${deployment_file}"
|
||||
oc get -f "${deployment_file}" || \
|
||||
oc apply -f ${deployment_file}
|
||||
oc get -f ${deployment_file} || \
|
||||
die "Sandboxed Containers extension machineconfig not found"
|
||||
wait_mcp_update 3600 || die "Failed to update the machineconfigpool"
|
||||
wait_mcp_update || die "Failed to update the machineconfigpool"
|
||||
}
|
||||
|
||||
# Print useful information for debugging.
|
||||
@ -154,8 +148,8 @@ enable_sandboxedcontainers_extension() {
|
||||
debug_pod() {
|
||||
local pod="$1"
|
||||
info "Debug pod: ${pod}"
|
||||
oc describe pods "${pod}"
|
||||
oc logs "${pod}"
|
||||
oc describe pods "$pod"
|
||||
oc logs "$pod"
|
||||
}
|
||||
|
||||
# Wait for all pods of the app label to contain expected message
|
||||
@ -172,32 +166,31 @@ wait_for_app_pods_message() {
|
||||
local message="$3"
|
||||
local timeout="$4"
|
||||
local namespace="$5"
|
||||
[[ -z "${pod_count}" ]] && pod_count=1
|
||||
[[ -z "${timeout}" ]] && timeout=60
|
||||
[[ -n "${namespace}" ]] && namespace=" -n ${namespace} "
|
||||
[ -z "$pod_count" ] && pod_count=1
|
||||
[ -z "$timeout" ] && timeout=60
|
||||
[ -n "$namespace" ] && namespace=" -n $namespace "
|
||||
local pod
|
||||
local pods
|
||||
local i
|
||||
SECONDS=0
|
||||
while :; do
|
||||
mapfile -t pods < <(oc get pods -l app="${app}" --no-headers=true "${namespace}" | awk '{print $1}')
|
||||
[[ "${#pods}" -ge "${pod_count}" ]] && break
|
||||
if [[ "${SECONDS}" -gt "${timeout}" ]]; then
|
||||
printf "Unable to find ${pod_count} pods for '-l app=\"${app}\"' in ${SECONDS}s (%s)" "${pods[@]}"
|
||||
return 1
|
||||
pods=($(oc get pods -l app="$app" --no-headers=true $namespace | awk '{print $1}'))
|
||||
[ "${#pods}" -ge "$pod_count" ] && break
|
||||
if [ "$SECONDS" -gt "$timeout" ]; then
|
||||
echo "Unable to find ${pod_count} pods for '-l app=\"$app\"' in ${SECONDS}s (${pods[@]})"
|
||||
return -1
|
||||
fi
|
||||
done
|
||||
local log
|
||||
for pod in "${pods[@]}"; do
|
||||
while :; do
|
||||
log=$(oc logs "${namespace}" "${pod}")
|
||||
echo "${log}" | grep "${message}" -q && echo "Found $(echo "${log}" | grep "${message}") in ${pod}'s log (${SECONDS})" && break;
|
||||
if [[ "${SECONDS}" -gt "${timeout}" ]]; then
|
||||
echo -n "Message '${message}' not present in '${pod}' pod of the '-l app=\"${app}\"' "
|
||||
printf "pods after ${SECONDS}s :(%s)\n" "${pods[@]}"
|
||||
echo "Pod ${pod}'s output so far:"
|
||||
echo "${log}"
|
||||
return 1
|
||||
local log=$(oc logs $namespace "$pod")
|
||||
echo "$log" | grep "$message" -q && echo "Found $(echo "$log" | grep "$message") in $pod's log ($SECONDS)" && break;
|
||||
if [ "$SECONDS" -gt "$timeout" ]; then
|
||||
echo -n "Message '$message' not present in '${pod}' pod of the '-l app=\"$app\"' "
|
||||
echo "pods after ${SECONDS}s (${pods[@]})"
|
||||
echo "Pod $pod's output so far:"
|
||||
echo "$log"
|
||||
return -1
|
||||
fi
|
||||
sleep 1;
|
||||
done
|
||||
@ -207,45 +200,46 @@ wait_for_app_pods_message() {
|
||||
oc config set-context --current --namespace=default
|
||||
|
||||
worker_nodes=$(oc get nodes | awk '{if ($3 == "worker") { print $1 } }')
|
||||
num_nodes=$(echo "${worker_nodes}" | wc -w)
|
||||
[[ ${num_nodes} -ne 0 ]] || \
|
||||
num_nodes=$(echo $worker_nodes | wc -w)
|
||||
[ $num_nodes -ne 0 ] || \
|
||||
die "No worker nodes detected. Something is wrong with the cluster"
|
||||
|
||||
if [[ "${KATA_WITH_SYSTEM_QEMU}" == "yes" ]]; then
|
||||
if [ "${KATA_WITH_SYSTEM_QEMU}" == "yes" ]; then
|
||||
# QEMU is deployed on the workers via RCHOS extension.
|
||||
enable_sandboxedcontainers_extension
|
||||
oc apply -f "${deployments_dir}/configmap_installer_qemu.yaml"
|
||||
oc apply -f ${deployments_dir}/configmap_installer_qemu.yaml
|
||||
fi
|
||||
|
||||
if [[ "${KATA_WITH_HOST_KERNEL}" == "yes" ]]; then
|
||||
oc apply -f "${deployments_dir}/configmap_installer_kernel.yaml"
|
||||
if [ "${KATA_WITH_HOST_KERNEL}" == "yes" ]; then
|
||||
oc apply -f ${deployments_dir}/configmap_installer_kernel.yaml
|
||||
fi
|
||||
|
||||
apply_kata_deploy
|
||||
|
||||
# Set SELinux to permissive mode
|
||||
if [[ ${SELINUX_PERMISSIVE} == "yes" ]]; then
|
||||
if [ ${SELINUX_PERMISSIVE} == "yes" ]; then
|
||||
info "Configuring SELinux"
|
||||
if [[ -z "${SELINUX_CONF_BASE64}" ]]; then
|
||||
SELINUX_CONF_BASE64=$(base64 -w0 < "${configs_dir}/selinux.conf")
|
||||
export SELINUX_CONF_BASE64
|
||||
if [ -z "$SELINUX_CONF_BASE64" ]; then
|
||||
export SELINUX_CONF_BASE64=$(echo \
|
||||
$(cat $configs_dir/selinux.conf|base64) | \
|
||||
sed -e 's/\s//g')
|
||||
fi
|
||||
envsubst < "${deployments_dir}"/machineconfig_selinux.yaml.in | \
|
||||
envsubst < ${deployments_dir}/machineconfig_selinux.yaml.in | \
|
||||
oc apply -f -
|
||||
oc get machineconfig/51-kata-selinux || \
|
||||
die "SELinux machineconfig not found"
|
||||
# The new SELinux configuration will trigger another reboot.
|
||||
wait_for_reboot 900
|
||||
wait_for_reboot
|
||||
fi
|
||||
|
||||
if [[ "${WORKAROUND_9206_CRIO}" == "yes" ]]; then
|
||||
if [[ "$WORKAROUND_9206_CRIO" == "yes" ]]; then
|
||||
info "Applying workaround to enable skip_mount_home in crio on OCP 4.13"
|
||||
oc apply -f "${deployments_dir}/workaround-9206-crio.yaml"
|
||||
oc apply -f "${deployments_dir}/workaround-9206-crio-ds.yaml"
|
||||
wait_for_app_pods_message workaround-9206-crio-ds "${num_nodes}" "Config file present" 1200 || echo "Failed to apply the workaround, proceeding anyway..."
|
||||
wait_for_app_pods_message workaround-9206-crio-ds "$num_nodes" "Config file present" 1200 || echo "Failed to apply the workaround, proceeding anyway..."
|
||||
fi
|
||||
|
||||
# FIXME: Remove when https://github.com/kata-containers/kata-containers/pull/8417 is resolved
|
||||
# Selinux context is currently not handled by kata-deploy
|
||||
oc apply -f "${deployments_dir}/relabel_selinux.yaml"
|
||||
wait_for_app_pods_message restorecon "${num_nodes}" "NSENTER_FINISHED_WITH:" 120 "kube-system" || echo "Failed to treat selinux, proceeding anyway..."
|
||||
oc apply -f ${deployments_dir}/relabel_selinux.yaml
|
||||
wait_for_app_pods_message restorecon "$num_nodes" "NSENTER_FINISHED_WITH:" 120 "kube-system" || echo "Failed to treat selinux, proceeding anyway..."
|
||||
|
@ -10,12 +10,11 @@ if command -v go > /dev/null; then
|
||||
export GOPATH=${GOPATH:-$(go env GOPATH)}
|
||||
else
|
||||
# if go isn't installed, set default location for GOPATH
|
||||
export GOPATH="${GOPATH:-${HOME}/go}"
|
||||
export GOPATH="${GOPATH:-$HOME/go}"
|
||||
fi
|
||||
|
||||
lib_dir=$(dirname "${BASH_SOURCE[0]}")
|
||||
# shellcheck disable=SC1091 # import based on variable
|
||||
source "${lib_dir}/../../tests/common.bash"
|
||||
source "$lib_dir/../../tests/common.bash"
|
||||
|
||||
export katacontainers_repo=${katacontainers_repo:="github.com/kata-containers/kata-containers"}
|
||||
export katacontainers_repo_dir="${GOPATH}/src/${katacontainers_repo}"
|
||||
|
@ -1,254 +0,0 @@
|
||||
#!/bin/bash -e
|
||||
#
|
||||
# Copyright (c) 2025 Red Hat, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# Setup peer-pods using cloud-api-adaptor on azure
|
||||
#
|
||||
# WARNING: When running outside "eastus" region this script creates a new
|
||||
# resource group in "eastus" region and peers the network. You
|
||||
# have to remove these manually (or use temporary accounts)
|
||||
|
||||
SCRIPT_DIR=$(dirname "$0")
|
||||
|
||||
###############################
|
||||
# Disable security to allow e2e
|
||||
###############################
|
||||
|
||||
# Disable security
|
||||
oc adm policy add-scc-to-group privileged system:authenticated system:serviceaccounts
|
||||
oc adm policy add-scc-to-group anyuid system:authenticated system:serviceaccounts
|
||||
oc label --overwrite ns default pod-security.kubernetes.io/enforce=privileged pod-security.kubernetes.io/warn=baseline pod-security.kubernetes.io/audit=baseline
|
||||
|
||||
|
||||
####################################
|
||||
# Get basic credentials from cluster
|
||||
####################################
|
||||
|
||||
oc -n kube-system get secret azure-credentials -o json > azure_credentials.json
|
||||
AZURE_CLIENT_ID="$(jq -r .data.azure_client_id azure_credentials.json|base64 -d)"
|
||||
AZURE_CLIENT_SECRET="$(jq -r .data.azure_client_secret azure_credentials.json|base64 -d)"
|
||||
AZURE_TENANT_ID="$(jq -r .data.azure_tenant_id azure_credentials.json|base64 -d)"
|
||||
AZURE_SUBSCRIPTION_ID="$(jq -r .data.azure_subscription_id azure_credentials.json|base64 -d)"
|
||||
rm -f azure_credentials.json
|
||||
AZURE_RESOURCE_GROUP=$(oc get infrastructure/cluster -o jsonpath='{.status.platformStatus.azure.resourceGroupName}')
|
||||
az login --service-principal -u "${AZURE_CLIENT_ID}" -p "${AZURE_CLIENT_SECRET}" --tenant "${AZURE_TENANT_ID}"
|
||||
# Recommended on az sites to refresh the subscription
|
||||
az account set --subscription "${AZURE_SUBSCRIPTION_ID}"
|
||||
# This command still sometimes fails directly after login
|
||||
for I in {1..30}; do
|
||||
AZURE_VNET_NAME=$(az network vnet list --resource-group "${AZURE_RESOURCE_GROUP}" --query "[].{Name:name}" --output tsv ||:)
|
||||
if [[ -z "${AZURE_VNET_NAME}" ]]; then
|
||||
sleep "${I}"
|
||||
else # VNET set, we are done
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [[ -z "${AZURE_VNET_NAME}" ]]; then
|
||||
echo "Failed to get AZURE_VNET_NAME in 30 iterations"
|
||||
exit 1
|
||||
fi
|
||||
AZURE_SUBNET_NAME=$(az network vnet subnet list --resource-group "${AZURE_RESOURCE_GROUP}" --vnet-name "${AZURE_VNET_NAME}" --query "[].{Id:name} | [? contains(Id, 'worker')]" --output tsv)
|
||||
AZURE_SUBNET_ID=$(az network vnet subnet list --resource-group "${AZURE_RESOURCE_GROUP}" --vnet-name "${AZURE_VNET_NAME}" --query "[].{Id:id} | [? contains(Id, 'worker')]" --output tsv)
|
||||
AZURE_REGION=$(az group show --resource-group "${AZURE_RESOURCE_GROUP}" --query "{Location:location}" --output tsv)
|
||||
|
||||
# Create workload identity
|
||||
AZURE_WORKLOAD_IDENTITY_NAME="caa-${AZURE_CLIENT_ID}"
|
||||
az identity create --name "${AZURE_WORKLOAD_IDENTITY_NAME}" --resource-group "${AZURE_RESOURCE_GROUP}" --location "${AZURE_REGION}"
|
||||
USER_ASSIGNED_CLIENT_ID="$(az identity show --resource-group "${AZURE_RESOURCE_GROUP}" --name "${AZURE_WORKLOAD_IDENTITY_NAME}" --query 'clientId' -otsv)"
|
||||
|
||||
|
||||
#############################
|
||||
# Ensure we can run in eastus
|
||||
#############################
|
||||
|
||||
PP_REGION=eastus
|
||||
if [[ "${AZURE_REGION}" == "${PP_REGION}" ]]; then
|
||||
echo "Using the current region ${AZURE_REGION}"
|
||||
PEERING=0
|
||||
PP_RESOURCE_GROUP="${AZURE_RESOURCE_GROUP}"
|
||||
PP_VNET_NAME="${AZURE_VNET_NAME}"
|
||||
PP_SUBNET_NAME="${AZURE_SUBNET_NAME}"
|
||||
PP_SUBNET_ID="${AZURE_SUBNET_ID}"
|
||||
else
|
||||
echo "Creating peering between ${AZURE_REGION} and ${PP_REGION}"
|
||||
PEERING=1
|
||||
PP_RESOURCE_GROUP="${AZURE_RESOURCE_GROUP}-eastus"
|
||||
PP_VNET_NAME="${AZURE_VNET_NAME}-eastus"
|
||||
PP_SUBNET_NAME="${AZURE_SUBNET_NAME}-eastus"
|
||||
PP_NSG_NAME="${AZURE_VNET_NAME}-nsg-eastus"
|
||||
echo " creating new PP_RESOURCE_GROUP=${PP_RESOURCE_GROUP}"
|
||||
az group create --name "${PP_RESOURCE_GROUP}" --location "${PP_REGION}"
|
||||
az network vnet create --resource-group "${PP_RESOURCE_GROUP}" --name "${PP_VNET_NAME}" --location "${PP_REGION}" --address-prefixes 10.2.0.0/16 --subnet-name "${PP_SUBNET_NAME}" --subnet-prefixes 10.2.1.0/24
|
||||
az network nsg create --resource-group "${PP_RESOURCE_GROUP}" --name "${PP_NSG_NAME}" --location "${PP_REGION}"
|
||||
az network vnet subnet update --resource-group "${PP_RESOURCE_GROUP}" --vnet-name "${PP_VNET_NAME}" --name "${PP_SUBNET_NAME}" --network-security-group "${PP_NSG_NAME}"
|
||||
AZURE_VNET_ID=$(az network vnet show --resource-group "${AZURE_RESOURCE_GROUP}" --name "${AZURE_VNET_NAME}" --query id --output tsv)
|
||||
PP_VNET_ID=$(az network vnet show --resource-group "${PP_RESOURCE_GROUP}" --name "${PP_VNET_NAME}" --query id --output tsv)
|
||||
az network vnet peering create --name westus-to-eastus --resource-group "${AZURE_RESOURCE_GROUP}" --vnet-name "${AZURE_VNET_NAME}" --remote-vnet "${PP_VNET_ID}" --allow-vnet-access
|
||||
az network vnet peering create --name eastus-to-westus --resource-group "${PP_RESOURCE_GROUP}" --vnet-name "${PP_VNET_NAME}" --remote-vnet "${AZURE_VNET_ID}" --allow-vnet-access
|
||||
PP_SUBNET_ID=$(az network vnet subnet list --resource-group "${PP_RESOURCE_GROUP}" --vnet-name "${PP_VNET_NAME}" --query "[].{Id:id} | [? contains(Id, 'worker')]" --output tsv)
|
||||
fi
|
||||
|
||||
# Peer-pod requires gateway
|
||||
az network public-ip create \
|
||||
--resource-group "${PP_RESOURCE_GROUP}" \
|
||||
--name MyPublicIP \
|
||||
--sku Standard \
|
||||
--allocation-method Static
|
||||
az network nat gateway create \
|
||||
--resource-group "${PP_RESOURCE_GROUP}" \
|
||||
--name MyNatGateway \
|
||||
--public-ip-addresses MyPublicIP \
|
||||
--idle-timeout 10
|
||||
az network vnet subnet update \
|
||||
--resource-group "${PP_RESOURCE_GROUP}" \
|
||||
--vnet-name "${PP_VNET_NAME}" \
|
||||
--name "${PP_SUBNET_NAME}" \
|
||||
--nat-gateway MyNatGateway
|
||||
|
||||
|
||||
##########################################
|
||||
# Setup CAA
|
||||
#########################################
|
||||
|
||||
# Label the nodes
|
||||
for NODE_NAME in $(kubectl get nodes -o jsonpath='{.items[*].metadata.name}'); do [[ "${NODE_NAME}" =~ 'worker' ]] && kubectl label node "${NODE_NAME}" node.kubernetes.io/worker=; done
|
||||
|
||||
# CAA artifacts
|
||||
CAA_IMAGE="quay.io/confidential-containers/cloud-api-adaptor"
|
||||
TAGS="$(curl https://quay.io/api/v1/repository/confidential-containers/cloud-api-adaptor/tag/?onlyActiveTags=true)"
|
||||
DIGEST=$(echo "${TAGS}" | jq -r '.tags[] | select(.name | contains("latest-amd64")) | .manifest_digest')
|
||||
CAA_TAG="$(echo "${TAGS}" | jq -r '.tags[] | select(.manifest_digest | contains("'"${DIGEST}"'")) | .name' | grep -v "latest")"
|
||||
|
||||
# Get latest PP image
|
||||
SUCCESS_TIME=$(curl -s \
|
||||
-H "Accept: application/vnd.github+json" \
|
||||
"https://api.github.com/repos/confidential-containers/cloud-api-adaptor/actions/workflows/azure-nightly-build.yml/runs?status=success" \
|
||||
| jq -r '.workflow_runs[0].updated_at')
|
||||
PP_IMAGE_ID="/CommunityGalleries/cocopodvm-d0e4f35f-5530-4b9c-8596-112487cdea85/Images/podvm_image0/Versions/$(date -u -jf "%Y-%m-%dT%H:%M:%SZ" "${SUCCESS_TIME}" "+%Y.%m.%d" 2>/dev/null || date -d "${SUCCESS_TIME}" +%Y.%m.%d)"
|
||||
|
||||
echo "AZURE_REGION: \"${AZURE_REGION}\""
|
||||
echo "PP_REGION: \"${PP_REGION}\""
|
||||
echo "AZURE_RESOURCE_GROUP: \"${AZURE_RESOURCE_GROUP}\""
|
||||
echo "PP_RESOURCE_GROUP: \"${PP_RESOURCE_GROUP}\""
|
||||
echo "PP_SUBNET_ID: \"${PP_SUBNET_ID}\""
|
||||
echo "CAA_TAG: \"${CAA_TAG}\""
|
||||
echo "PP_IMAGE_ID: \"${PP_IMAGE_ID}\""
|
||||
|
||||
# Clone and configure caa
|
||||
git clone --depth 1 --no-checkout https://github.com/confidential-containers/cloud-api-adaptor.git
|
||||
pushd cloud-api-adaptor
|
||||
git sparse-checkout init --cone
|
||||
git sparse-checkout set src/cloud-api-adaptor/install/
|
||||
git checkout
|
||||
echo "CAA_GIT_SHA: \"$(git rev-parse HEAD)\""
|
||||
pushd src/cloud-api-adaptor
|
||||
cat <<EOF > install/overlays/azure/workload-identity.yaml
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: cloud-api-adaptor-daemonset
|
||||
namespace: confidential-containers-system
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
azure.workload.identity/use: "true"
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: cloud-api-adaptor
|
||||
namespace: confidential-containers-system
|
||||
annotations:
|
||||
azure.workload.identity/client-id: "${USER_ASSIGNED_CLIENT_ID}"
|
||||
EOF
|
||||
PP_INSTANCE_SIZE="Standard_D2as_v5"
|
||||
DISABLECVM="true"
|
||||
cat <<EOF > install/overlays/azure/kustomization.yaml
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
bases:
|
||||
- ../../yamls
|
||||
images:
|
||||
- name: cloud-api-adaptor
|
||||
newName: "${CAA_IMAGE}"
|
||||
newTag: "${CAA_TAG}"
|
||||
generatorOptions:
|
||||
disableNameSuffixHash: true
|
||||
configMapGenerator:
|
||||
- name: peer-pods-cm
|
||||
namespace: confidential-containers-system
|
||||
literals:
|
||||
- CLOUD_PROVIDER="azure"
|
||||
- AZURE_SUBSCRIPTION_ID="${AZURE_SUBSCRIPTION_ID}"
|
||||
- AZURE_REGION="${PP_REGION}"
|
||||
- AZURE_INSTANCE_SIZE="${PP_INSTANCE_SIZE}"
|
||||
- AZURE_RESOURCE_GROUP="${PP_RESOURCE_GROUP}"
|
||||
- AZURE_SUBNET_ID="${PP_SUBNET_ID}"
|
||||
- AZURE_IMAGE_ID="${PP_IMAGE_ID}"
|
||||
- DISABLECVM="${DISABLECVM}"
|
||||
- PEERPODS_LIMIT_PER_NODE="50"
|
||||
secretGenerator:
|
||||
- name: peer-pods-secret
|
||||
namespace: confidential-containers-system
|
||||
envs:
|
||||
- service-principal.env
|
||||
- name: ssh-key-secret
|
||||
namespace: confidential-containers-system
|
||||
files:
|
||||
- id_rsa.pub
|
||||
patchesStrategicMerge:
|
||||
- workload-identity.yaml
|
||||
EOF
|
||||
ssh-keygen -t rsa -f install/overlays/azure/id_rsa -N ''
|
||||
echo "AZURE_CLIENT_ID=${AZURE_CLIENT_ID}" > install/overlays/azure/service-principal.env
|
||||
echo "AZURE_CLIENT_SECRET=${AZURE_CLIENT_SECRET}" >> install/overlays/azure/service-principal.env
|
||||
echo "AZURE_TENANT_ID=${AZURE_TENANT_ID}" >> install/overlays/azure/service-principal.env
|
||||
|
||||
# Deploy Operator
|
||||
git clone --depth 1 --no-checkout https://github.com/confidential-containers/operator
|
||||
pushd operator
|
||||
git sparse-checkout init --cone
|
||||
git sparse-checkout set "config/"
|
||||
git checkout
|
||||
echo "OPERATOR_SHA: \"$(git rev-parse HEAD)\""
|
||||
oc apply -k "config/release"
|
||||
oc apply -k "config/samples/ccruntime/peer-pods"
|
||||
popd
|
||||
|
||||
# Deploy CAA
|
||||
kubectl apply -k "install/overlays/azure"
|
||||
popd
|
||||
popd
|
||||
|
||||
# Wait for runtimeclass
|
||||
SECONDS=0
|
||||
( while [[ "${SECONDS}" -lt 360 ]]; do
|
||||
kubectl get runtimeclass | grep -q kata-remote && exit 0
|
||||
done; exit 1 ) || { echo "kata-remote runtimeclass not initialized in 60s"; kubectl -n confidential-containers-system get all; echo; echo CAA; kubectl -n confidential-containers-system logs daemonset.apps/cloud-api-adaptor-daemonset; echo pre-install; kubectl -n confidential-containers-system logs daemonset.apps/cc-operator-pre-install-daemon; echo install; kubectl -n confidential-containers-system logs daemonset.apps/cc-operator-daemon-install; exit 1; }
|
||||
|
||||
|
||||
################
|
||||
# Deploy webhook
|
||||
################
|
||||
pushd "${SCRIPT_DIR}/cluster/"
|
||||
kubectl create ns default || true
|
||||
kubectl config set-context --current --namespace=default
|
||||
KATA_RUNTIME=kata-remote ./deploy_webhook.sh
|
||||
popd
|
||||
|
||||
|
||||
##################################
|
||||
# Log warning when peering created
|
||||
##################################
|
||||
if [[ ${PEERING} -ne 0 ]]; then
|
||||
echo "This script created additional resources to create peering between ${AZURE_REGION} and ${PP_REGION}. Ensure you release those resources after the testing (or use temporary subscription)"
|
||||
PP_VARS=("PP_RESOURCE_GROUP" "PP_VNET_NAME" "PP_SUBNET_NAME" "PP_NSG_NAME" "AZURE_VNET_ID" "PP_VNET_ID" "PP_SUBNET_ID")
|
||||
for PP_VAR in "${PP_VARS[@]}"; do
|
||||
echo "${PP_VAR}=${!PP_VAR}"
|
||||
done
|
||||
echo
|
||||
echo "by running 'az group delete --name ${PP_RESOURCE_GROUP}'"
|
||||
fi
|
@ -7,16 +7,15 @@
|
||||
# Run a smoke test.
|
||||
#
|
||||
|
||||
script_dir=$(dirname "$0")
|
||||
# shellcheck disable=SC1091 # import based on variable
|
||||
source "${script_dir}/lib.sh"
|
||||
script_dir=$(dirname $0)
|
||||
source ${script_dir}/lib.sh
|
||||
|
||||
pod='http-server'
|
||||
|
||||
# Create a pod.
|
||||
#
|
||||
info "Creating the ${pod} pod"
|
||||
[[ -z "${KATA_RUNTIME}" ]] && die "Please set the KATA_RUNTIME first"
|
||||
[ -z "$KATA_RUNTIME" ] && die "Please set the KATA_RUNTIME first"
|
||||
envsubst < "${script_dir}/smoke/${pod}.yaml.in" | \
|
||||
oc apply -f - || \
|
||||
die "failed to create ${pod} pod"
|
||||
@ -28,10 +27,10 @@ sleep_time=5
|
||||
cmd="oc get pod/${pod} -o jsonpath='{.status.containerStatuses[0].state}' | \
|
||||
grep running > /dev/null"
|
||||
info "Wait until the pod gets running"
|
||||
waitForProcess "${wait_time}" "${sleep_time}" "${cmd}" || timed_out=$?
|
||||
if [[ -n "${timed_out}" ]]; then
|
||||
oc describe "pod/${pod}"
|
||||
oc delete "pod/${pod}"
|
||||
waitForProcess $wait_time $sleep_time "$cmd" || timed_out=$?
|
||||
if [ -n "$timed_out" ]; then
|
||||
oc describe pod/${pod}
|
||||
oc delete pod/${pod}
|
||||
die "${pod} not running"
|
||||
fi
|
||||
info "${pod} is running"
|
||||
@ -40,13 +39,13 @@ info "${pod} is running"
|
||||
#
|
||||
hello_file=/tmp/hello
|
||||
hello_msg='Hello World'
|
||||
oc exec "${pod}" -- sh -c "echo ${hello_msg} > ${hello_file}"
|
||||
oc exec ${pod} -- sh -c "echo $hello_msg > $hello_file"
|
||||
|
||||
info "Creating the service and route"
|
||||
if oc apply -f "${script_dir}/smoke/service.yaml"; then
|
||||
if oc apply -f ${script_dir}/smoke/service.yaml; then
|
||||
# Likely on OCP, use service
|
||||
is_ocp=1
|
||||
host=$(oc get route/http-server-route -o jsonpath="{.spec.host}")
|
||||
host=$(oc get route/http-server-route -o jsonpath={.spec.host})
|
||||
port=80
|
||||
else
|
||||
# Likely on plain kubernetes, test using another container
|
||||
@ -55,13 +54,13 @@ else
|
||||
oc apply -f "${script_dir}/smoke/service_kubernetes.yaml"
|
||||
# For some reason kcli's cluster lists external IP as internal IP, try both
|
||||
host=$(oc get nodes -o jsonpath='{.items[0].status.addresses[?(@.type=="ExternalIP")].address}')
|
||||
[[ -z "${host}" ]] && host=$(oc get nodes -o jsonpath='{.items[0].status.addresses[?(@.type=="InternalIP")].address}')
|
||||
[ -z "$host"] && host=$(oc get nodes -o jsonpath='{.items[0].status.addresses[?(@.type=="InternalIP")].address}')
|
||||
port=$(oc get service/http-server-service -o jsonpath='{.spec.ports[0].nodePort}')
|
||||
fi
|
||||
|
||||
info "Wait for the HTTP server to respond"
|
||||
tempfile=$(mktemp)
|
||||
check_cmd="curl -vvv '${host}:${port}${hello_file}' 2>&1 | tee -a '${tempfile}' | grep -q '${hello_msg}'"
|
||||
check_cmd="curl -vvv '${host}:${port}${hello_file}' 2>&1 | tee -a '$tempfile' | grep -q '$hello_msg'"
|
||||
if waitForProcess 60 1 "${check_cmd}"; then
|
||||
test_status=0
|
||||
info "HTTP server is working"
|
||||
@ -79,17 +78,17 @@ else
|
||||
echo "::endgroup::"
|
||||
info "HTTP server is unreachable"
|
||||
fi
|
||||
rm -f "${tempfile}"
|
||||
rm -f "$tempfile"
|
||||
|
||||
# Delete the resources.
|
||||
#
|
||||
info "Deleting the service/route"
|
||||
if [[ "${is_ocp}" -eq 0 ]]; then
|
||||
oc delete -f "${script_dir}/smoke/service_kubernetes.yaml"
|
||||
if [ "$is_ocp" -eq 0 ]; then
|
||||
oc delete -f ${script_dir}/smoke/service_kubernetes.yaml
|
||||
else
|
||||
oc delete -f "${script_dir}/smoke/service.yaml"
|
||||
oc delete -f ${script_dir}/smoke/service.yaml
|
||||
fi
|
||||
info "Deleting the ${pod} pod"
|
||||
oc delete "pod/${pod}" || test_status=$?
|
||||
oc delete pod/${pod} || test_status=$?
|
||||
|
||||
exit "${test_status}"
|
||||
exit $test_status
|
||||
|
@ -7,7 +7,7 @@
|
||||
# afterwards OCP cluster using kata-containers primarily created for use
|
||||
# with https://github.com/ldoktor/bisecter
|
||||
|
||||
[[ "$#" -ne 1 ]] && echo "Provide image as the first and only argument" && exit 255
|
||||
[ "$#" -ne 1 ] && echo "Provide image as the first and only argument" && exit 255
|
||||
export KATA_DEPLOY_IMAGE="$1"
|
||||
OCP_DIR="${OCP_DIR:-/path/to/your/openshift/release/}"
|
||||
E2E_TEST="${E2E_TEST:-'"[sig-node] Container Runtime blackbox test on terminated container should report termination message as empty when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]"'}"
|
||||
@ -17,12 +17,12 @@ export KATA_RUNTIME="${KATA_RUNTIME:-kata-qemu}"
|
||||
## SETUP
|
||||
# Deploy kata
|
||||
SETUP=0
|
||||
pushd "${KATA_CI_DIR}" || { echo "Failed to cd to '${KATA_CI_DIR}'"; exit 255; }
|
||||
pushd "$KATA_CI_DIR" || { echo "Failed to cd to '$KATA_CI_DIR'"; exit 255; }
|
||||
./test.sh || SETUP=125
|
||||
cluster/deploy_webhook.sh || SETUP=125
|
||||
if [[ ${SETUP} != 0 ]]; then
|
||||
if [ $SETUP != 0 ]; then
|
||||
./cleanup.sh
|
||||
exit "${SETUP}"
|
||||
exit "$SETUP"
|
||||
fi
|
||||
popd || true
|
||||
# Disable security
|
||||
@ -32,19 +32,19 @@ oc label --overwrite ns default pod-security.kubernetes.io/enforce=privileged po
|
||||
|
||||
## TEST EXECUTION
|
||||
# Run the testing
|
||||
pushd "${OCP_DIR}" || { echo "Failed to cd to '${OCP_DIR}'"; exit 255; }
|
||||
echo "${E2E_TEST}" > /tmp/tsts
|
||||
pushd "$OCP_DIR" || { echo "Failed to cd to '$OCP_DIR'"; exit 255; }
|
||||
echo "$E2E_TEST" > /tmp/tsts
|
||||
# Remove previously-existing temporarily files as well as previous results
|
||||
OUT=RESULTS/tmp
|
||||
rm -Rf /tmp/*test* /tmp/e2e-*
|
||||
rm -R "${OUT}"
|
||||
mkdir -p "${OUT}"
|
||||
rm -R $OUT
|
||||
mkdir -p $OUT
|
||||
# Run the tests ignoring the monitor health checks
|
||||
./openshift-tests run --provider azure -o "${OUT}/job.log" --junit-dir "${OUT}" --file /tmp/tsts --max-parallel-tests 5 --cluster-stability Disruptive
|
||||
./openshift-tests run --provider azure -o "$OUT/job.log" --junit-dir "$OUT" --file /tmp/tsts --max-parallel-tests 5 --cluster-stability Disruptive
|
||||
RET=$?
|
||||
popd || true
|
||||
|
||||
## CLEANUP
|
||||
./cleanup.sh
|
||||
exit "${RET}"
|
||||
exit "$RET"
|
||||
|
||||
|
@ -8,29 +8,25 @@
|
||||
# The kata shim to be used
|
||||
export KATA_RUNTIME=${KATA_RUNTIME:-kata-qemu}
|
||||
|
||||
script_dir=$(dirname "$0")
|
||||
# shellcheck disable=SC1091 # import based on variable
|
||||
source "${script_dir}/lib.sh"
|
||||
script_dir=$(dirname $0)
|
||||
source ${script_dir}/lib.sh
|
||||
|
||||
suite=$1
|
||||
if [[ -z "$1" ]]; then
|
||||
if [ -z "$1" ]; then
|
||||
suite='smoke'
|
||||
fi
|
||||
|
||||
# Make oc and kubectl visible
|
||||
export PATH=/tmp/shared:${PATH}
|
||||
export PATH=/tmp/shared:$PATH
|
||||
|
||||
oc version || die "Test cluster is unreachable"
|
||||
|
||||
info "Install and configure kata into the test cluster"
|
||||
export SELINUX_PERMISSIVE="no"
|
||||
"${script_dir}/cluster/install_kata.sh" || die "Failed to install kata-containers"
|
||||
${script_dir}/cluster/install_kata.sh || die "Failed to install kata-containers"
|
||||
|
||||
info "Overriding KATA_RUNTIME cpu resources"
|
||||
oc patch "runtimeclass/${KATA_RUNTIME}" -p '{"overhead": {"podFixed": {"cpu": "50m"}}}'
|
||||
|
||||
info "Run test suite: ${suite}"
|
||||
info "Run test suite: $suite"
|
||||
test_status='PASS'
|
||||
"${script_dir}/run_${suite}_test.sh" || test_status='FAIL'
|
||||
info "Test suite: ${suite}: ${test_status}"
|
||||
[[ "${test_status}" == "PASS" ]]
|
||||
${script_dir}/run_${suite}_test.sh || test_status='FAIL'
|
||||
info "Test suite: $suite: $test_status"
|
||||
[ "$test_status" == "PASS" ]
|
||||
|
@ -32,7 +32,6 @@ See the [how-to documentation](how-to).
|
||||
* [Intel QAT with Kata](./use-cases/using-Intel-QAT-and-kata.md)
|
||||
* [SPDK vhost-user with Kata](./use-cases/using-SPDK-vhostuser-and-kata.md)
|
||||
* [Intel SGX with Kata](./use-cases/using-Intel-SGX-and-kata.md)
|
||||
* [IBM Crypto Express passthrough with Confidential Containers](./use-cases/CEX-passthrough-and-coco.md)
|
||||
|
||||
## Developer Guide
|
||||
|
||||
|
BIN
docs/design/arch-images/guest-image-management-architecture.png
Normal file
BIN
docs/design/arch-images/guest-image-management-architecture.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 61 KiB |
BIN
docs/design/arch-images/guest-image-management-details.png
Normal file
BIN
docs/design/arch-images/guest-image-management-details.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 122 KiB |
@ -49,58 +49,13 @@ Pull the container image directly from the guest VM using `nydus snapshotter` ba
|
||||
#### Architecture
|
||||
|
||||
The following diagram provides an overview of the architecture for pulling image in the guest with key components.
|
||||
```mermaid
|
||||
flowchart LR
|
||||
Kubelet[kubelet]--> |1\. Pull image request & metadata|Containerd
|
||||
Containerd-->|2\. Pull image metadata| E
|
||||
Containerd-->Snapshotter[Nydus Snapshotter]
|
||||
Snapshotter-->|3\. Pack image info| Containerd
|
||||
Containerd-->Runtime[Kata Runtime]
|
||||
Runtime-->Hypervisor
|
||||
Hypervisor-->TEE
|
||||
Runtime-->|4\. Pass image info to VM| Agent
|
||||
CDH1-->|6\. Pull image with image info|E[Container Images Registry]
|
||||
subgraph TEE [Virtual Machine]
|
||||
Images[Container Images]-->|7\. Prepare container rootfs|H[Container]
|
||||
|
||||
subgraph CDH [Confidential Data Hub]
|
||||
CDH1[Image Mgmt]
|
||||
end
|
||||
|
||||
CDH-->Images
|
||||
Agent[Kata Agent]-->|5\. Call image pull RPC|CDH
|
||||
end
|
||||
```
|
||||

|
||||
|
||||
#### Sequence diagrams
|
||||
|
||||
The following sequence diagram depicted below offers a detailed overview of the messages/calls exchanged to pull an unencrypted unsigned image from an unauthenticated container registry. This involves the kata-runtime, kata-agent, and the guest-components’ image-rs to use the guest pull mechanism.
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
par Hosts Side
|
||||
Containerd/Kubelet->>runtime.kata_agent: createContainer(ctx,sandbox,c)
|
||||
runtime.kata_agent->>runtime.fs_share_linux: ShareRootFilesystem(ctx,c)
|
||||
runtime.fs_share_linux->>runtime.kata_agent: handleVirtualVolumeStorageObject(c,...,KataVolumeType)
|
||||
runtime.kata_agent->>runtime.kata_agent: handleImageGuestPullBlockVolume(c,virtVolume,vol)
|
||||
runtime.kata_agent->>runtime.fs_share_linux: ret:storage
|
||||
runtime.fs_share_linux->>runtime.kata_agent: ret:sharedFile
|
||||
and Guest Side
|
||||
runtime.kata_agent->>agent.rpc: CreateContainerRequest(cid,...,storages,...,oci,...)
|
||||
agent.rpc->>agent.storage: add_storage(storages...)
|
||||
agent.storage->>agent.storage: StorageHandler.handler(driver)
|
||||
agent.storage->>agent.storage.StorageHandler.ImagePullHandler: create_device(storage)
|
||||
agent.storage.StorageHandler.ImagePullHandler->>agent.confidential_data_hub: pull_image(img,cid,img_metadata)
|
||||
agent.confidential_data_hub->>Confidential Data Hub: pull_image(img,bundle_path)
|
||||
Confidential Data Hub->>agent.confidential_data_hub: ret
|
||||
agent.confidential_data_hub->>agent.storage.StorageHandler.ImagePullHandler: ret: bundle_path
|
||||
agent.storage.StorageHandler.ImagePullHandler->>agent.storage: ret: device
|
||||
agent.storage->>agent.rpc: ret: mount_list
|
||||
and Return
|
||||
agent.rpc->>runtime.kata_agent: ret: ok
|
||||
runtime.kata_agent->>Containerd/Kubelet: ret: ok
|
||||
end
|
||||
```
|
||||

|
||||
|
||||
First and foremost, the guest pull code path is only activated when `nydus snapshotter` requires the handling of a volume which type is `image_guest_pull`, as can be seen on the message below:
|
||||
```json
|
||||
@ -153,10 +108,10 @@ Below is an example of storage information packaged in the message sent to the k
|
||||
```
|
||||
Next, the kata-agent's RPC module will handle the create container request which, among other things, involves adding storages to the sandbox. The storage module contains implementations of `StorageHandler` interface for various storage types, being the `ImagePullHandler` in charge of handling the storage object for the container image (the storage manager instantiates the handler based on the value of the "driver").
|
||||
|
||||
`ImagePullHandler` delegates the image pulling operation to the `confidential_data_hub.pull_image()` that is going to create the image's bundle directory on the guest filesystem and, in turn, the `ImagePullService` of Confidential Data Hub to fetch, uncompress and mount the image's rootfs.
|
||||
`ImagePullHandler` delegates the image pulling operation to the `ImageService.pull_image()` that is going to create the image's bundle directory on the guest filesystem and, in turn, class the image-rs to in fact fetch and uncompress the image's bundle.
|
||||
|
||||
> **Notes:**
|
||||
> In this flow, `confidential_data_hub.pull_image()` parses the image metadata, looking for either the `io.kubernetes.cri.container-type: sandbox` or `io.kubernetes.cri-o.ContainerType: sandbox` (CRI-IO case) annotation, then it never calls the `pull_image()` RPC of Confidential Data Hub because the pause image is expected to already be inside the guest's filesystem, so instead `confidential_data_hub.unpack_pause_image()` is called.
|
||||
> In this flow, `ImageService.pull_image()` parses the image metadata, looking for either the `io.kubernetes.cri.container-type: sandbox` or `io.kubernetes.cri-o.ContainerType: sandbox` (CRI-IO case) annotation, then it never calls the `image-rs.pull_image()` because the pause image is expected to already be inside the guest's filesystem, so instead `ImageService.unpack_pause_image()` is called.
|
||||
|
||||
## Using guest image pull with `nerdctl`
|
||||
|
||||
@ -166,6 +121,6 @@ nerdctl run --runtime io.containerd.kata.v2 --snapshotter nydus --label io.kuber
|
||||
```
|
||||
|
||||
References:
|
||||
1. [[RFC] Image management proposal for hosting sharing and peer pods](https://github.com/confidential-containers/confidential-containers/issues/137)
|
||||
2. https://github.com/containerd/containerd/blob/main/docs/content-flow.md
|
||||
3. [Move guest pull ability to a configurable component](https://github.com/kata-containers/kata-containers/issues/9266)
|
||||
[1] [[RFC] Image management proposal for hosting sharing and peer pods](https://github.com/confidential-containers/confidential-containers/issues/137)
|
||||
[2] https://github.com/containerd/containerd/blob/main/docs/content-flow.md
|
||||
|
||||
|
@ -47,4 +47,3 @@
|
||||
- [How to run Kata Containers with kinds of Block Volumes](how-to-run-kata-containers-with-kinds-of-Block-Volumes.md)
|
||||
- [How to use the Kata Agent Policy](how-to-use-the-kata-agent-policy.md)
|
||||
- [How to pull images in the guest](how-to-pull-images-in-guest-with-kata.md)
|
||||
- [How to use mem-agent to decrease the memory usage of Kata container](how-to-use-memory-agent.md)
|
||||
|
@ -28,7 +28,6 @@ There are several kinds of Kata configurations and they are listed below.
|
||||
| `io.katacontainers.config.runtime.sandbox_cgroup_only`| `boolean` | determines if Kata processes are managed only in sandbox cgroup |
|
||||
| `io.katacontainers.config.runtime.enable_pprof` | `boolean` | enables Golang `pprof` for `containerd-shim-kata-v2` process |
|
||||
| `io.katacontainers.config.runtime.create_container_timeout` | `uint64` | the timeout for create a container in `seconds`, default is `60` |
|
||||
| `io.katacontainers.config.runtime.experimental_force_guest_pull` | `boolean` | forces the runtime to pull the image in the guest VM, default is `false`. This is an experimental feature and might be removed in the future. |
|
||||
|
||||
## Agent Options
|
||||
| Key | Value Type | Comments |
|
||||
|
@ -2,8 +2,6 @@
|
||||
|
||||
## Introduction
|
||||
|
||||
This document describes how to setup swap device with runtime-golang. See [How to use mem-agent to decrease the memory usage of Kata container](how-to-use-memory-agent.md#setup-guest-swap) to get how to setup and use guest swap with runtime-rs.
|
||||
|
||||
Setup swap device in guest kernel can help to increase memory capacity, handle some memory issues and increase file access speed sometimes.
|
||||
Kata Containers can insert a raw file to the guest as the swap device.
|
||||
|
||||
|
@ -1,351 +0,0 @@
|
||||
# How to use mem-agent to decrease the memory usage of Kata container
|
||||
## Introduction
|
||||
mem-agent is a component designed for managing memory in Linux environments.<br>
|
||||
The mem-agent has been integrated into the kata-agent to reduce memory usage in Kata containers.
|
||||
|
||||
## Open mem-agent in configuration
|
||||
```bash
|
||||
$ config_file="/opt/kata/share/defaults/kata-containers/configuration.toml"
|
||||
$ sudo sed -i -e 's/^#mem_agent_enable.*$/mem_agent_enable = true/g' $config_file
|
||||
```
|
||||
|
||||
## Open reclaim_guest_freed_memory in configuration
|
||||
Enabling this will result in the VM balloon device having f_reporting=on set.<br>
|
||||
Then the hypervisor will use it to reclaim guest freed memory.
|
||||
|
||||
When mem-agent reclaim the memory of the guest, this function will reclaim guest freed memory in the host.
|
||||
|
||||
**To use mem-agent, must open reclaim_guest_freed_memory in configuration.**
|
||||
|
||||
```bash
|
||||
$ config_file="/opt/kata/share/defaults/kata-containers/configuration.toml"
|
||||
$ sudo sed -i -e 's/^#reclaim_guest_freed_memory.*$/reclaim_guest_freed_memory = true/g' $config_file
|
||||
```
|
||||
|
||||
## Sub-feature psi
|
||||
During memory reclamation and compaction, mem-agent monitors system pressure using Pressure Stall Information (PSI).<br>
|
||||
If the system pressure becomes too high, memory reclamation or compaction will automatically stop.
|
||||
|
||||
This feature helps the mem-agent reduce its overhead on system performance.
|
||||
|
||||
## Sub-feature memcg
|
||||
Use the Linux kernel MgLRU feature to monitor each cgroup's memory usage and periodically reclaim cold memory.
|
||||
|
||||
During each run period, memcg calls the run_aging function of MgLRU for each cgroup to mark the hot and cold states of the pages within it.<br>
|
||||
Then, it calls the run_eviction function of MgLRU for each cgroup to reclaim a portion of the cold pages that have not been accessed for three periods.
|
||||
|
||||
After the run period, the memcg will enter a sleep period. Once the sleep period is over, it will transition into the next run period, and this cycle will continue.
|
||||
|
||||
**The following are the configurations of the sub-feature memcg:**
|
||||
|
||||
### memcg_disable
|
||||
Control the mem-agent memcg function disable or enable.<br>
|
||||
Default to false.
|
||||
```bash
|
||||
$ config_file="/opt/kata/share/defaults/kata-containers/runtime-rs/configuration.toml"
|
||||
$ sudo sed -i -e 's/^#memcg_disable.*$/memcg_disable = true/g' $config_file
|
||||
```
|
||||
|
||||
For a running Kata container, this configuration can be dynamically modified using the kata-agent-ctl command.
|
||||
```bash
|
||||
$ PODID="12345"
|
||||
$ kata-agent-ctl connect --server-address "unix:///var/run/kata/$PODID/root/kata.hvsock" --hybrid-vsock \
|
||||
--cmd 'MemAgentMemcgSet json://{"disabled":true}'
|
||||
```
|
||||
|
||||
### memcg_swap
|
||||
If this feature is disabled, the mem-agent will only track and reclaim file cache pages. If this feature is enabled, the mem-agent will handle both file cache pages and anonymous pages.<br>
|
||||
Default to false.
|
||||
|
||||
```bash
|
||||
$ config_file="/opt/kata/share/defaults/kata-containers/runtime-rs/configuration.toml"
|
||||
$ sudo sed -i -e 's/^#memcg_swap.*$/memcg_swap = true/g' $config_file
|
||||
```
|
||||
|
||||
For a running Kata container, this configuration can be dynamically modified using the kata-agent-ctl command.
|
||||
```bash
|
||||
$ PODID="12345"
|
||||
$ kata-agent-ctl connect --server-address "unix:///var/run/kata/$PODID/root/kata.hvsock" --hybrid-vsock \
|
||||
--cmd 'MemAgentMemcgSet json://{"swap":true}'
|
||||
```
|
||||
|
||||
#### setup guest swap
|
||||
memcg_swap should use with guest swap function.<br>
|
||||
The guest swap function will create a separate swap task that will create and insert swap files into the guest as needed.<br>
|
||||
Just dragonball and cloud-hypervisor support guest swap.
|
||||
|
||||
Use following configuration to enable guest swap.
|
||||
```bash
|
||||
$ config_file="/opt/kata/share/defaults/kata-containers/runtime-rs/configuration.toml"
|
||||
$ sudo sed -i -e 's/^#enable_guest_swap.*$/enable_guest_swap = true/g' $config_file
|
||||
```
|
||||
|
||||
By default, swap files are created in the /run/kata-containers/swap directory. You can use the following configuration to create swap files in a different directory.
|
||||
```bash
|
||||
$ config_file="/opt/kata/share/defaults/kata-containers/runtime-rs/configuration.toml"
|
||||
$ sudo sed -i -e 's/^#guest_swap_path.*$/guest_swap_path = \"\/run\/kata-containers\/swap\"/g' $config_file
|
||||
```
|
||||
|
||||
By default, the inserted swap file will match the current memory size, which is set to 100%. You can modify the percentage of the swap size relative to the current memory size using the configuration below.
|
||||
```bash
|
||||
$ config_file="/opt/kata/share/defaults/kata-containers/runtime-rs/configuration.toml"
|
||||
$ sudo sed -i -e 's/^#guest_swap_size_percent.*$/guest_swap_size_percent = 100/g' $config_file
|
||||
```
|
||||
|
||||
The swap task will wait for 60 seconds before determining the memory size and creating swap files. This approach helps prevent interference with the startup performance of the kata container during its initial creation and avoids frequent insertion of swap files when the guest memory size is adjusted frequently. You can configure the waiting time using the option below.
|
||||
```bash
|
||||
$ config_file="/opt/kata/share/defaults/kata-containers/runtime-rs/configuration.toml"
|
||||
$ sudo sed -i -e 's/^#guest_swap_create_threshold_secs.*$/guest_swap_create_threshold_secs = 60/g' $config_file
|
||||
```
|
||||
|
||||
### memcg_swappiness_max
|
||||
The usage of this value is similar to the swappiness in the Linux kernel, applying a ratio of swappiness_max/200 when utilized.<br>
|
||||
At the beginning of the eviction memory process for a cgroup in each run period, the coldest anonymous pages are assigned a maximum eviction value based on swappiness_max/200.<br>
|
||||
When the run_eviction function of MgLRU is actually called, if the comparison ratio between the current coldest anonymous pages and file cache pages exceeds this value, then this value will be used as the swappiness.<br>
|
||||
Default to 50.
|
||||
|
||||
```bash
|
||||
$ config_file="/opt/kata/share/defaults/kata-containers/runtime-rs/configuration.toml"
|
||||
$ sudo sed -i -e 's/^#memcg_swappiness_max.*$/memcg_swappiness_max = 50/g' $config_file
|
||||
```
|
||||
|
||||
For a running Kata container, this configuration can be dynamically modified using the kata-agent-ctl command.
|
||||
```bash
|
||||
$ PODID="12345"
|
||||
$ kata-agent-ctl connect --server-address "unix:///var/run/kata/$PODID/root/kata.hvsock" --hybrid-vsock \
|
||||
--cmd 'MemAgentMemcgSet json://{"swappiness_max":50}'
|
||||
```
|
||||
|
||||
### memcg_period_secs
|
||||
Control the mem-agent memcg function wait period seconds.<br>
|
||||
Default to 600.
|
||||
|
||||
```bash
|
||||
$ config_file="/opt/kata/share/defaults/kata-containers/runtime-rs/configuration.toml"
|
||||
$ sudo sed -i -e 's/^#memcg_period_secs.*$/memcg_period_secs = 600/g' $config_file
|
||||
```
|
||||
|
||||
For a running Kata container, this configuration can be dynamically modified using the kata-agent-ctl command.
|
||||
```bash
|
||||
$ PODID="12345"
|
||||
$ kata-agent-ctl connect --server-address "unix:///var/run/kata/$PODID/root/kata.hvsock" --hybrid-vsock \
|
||||
--cmd 'MemAgentMemcgSet json://{"period_secs":600}'
|
||||
```
|
||||
|
||||
### memcg_period_psi_percent_limit
|
||||
Control the mem-agent memcg wait period PSI percent limit.<br>
|
||||
If the percentage of memory and IO PSI stall time within the memcg waiting period for a cgroup exceeds this value, then the memcg run period for this cgroup will not be executed after this waiting period.<br>
|
||||
Default to 1
|
||||
|
||||
```bash
|
||||
$ config_file="/opt/kata/share/defaults/kata-containers/runtime-rs/configuration.toml"
|
||||
$ sudo sed -i -e 's/^#memcg_period_psi_percent_limit.*$/memcg_period_psi_percent_limit = 1/g' $config_file
|
||||
```
|
||||
|
||||
For a running Kata container, this configuration can be dynamically modified using the kata-agent-ctl command.
|
||||
```bash
|
||||
$ PODID="12345"
|
||||
$ kata-agent-ctl connect --server-address "unix:///var/run/kata/$PODID/root/kata.hvsock" --hybrid-vsock \
|
||||
--cmd 'MemAgentMemcgSet json://{"period_psi_percent_limit":1}'
|
||||
```
|
||||
|
||||
### memcg_eviction_psi_percent_limit
|
||||
Control the mem-agent memcg eviction PSI percent limit.<br>
|
||||
If the percentage of memory and IO PSI stall time for a cgroup exceeds this value during an eviction cycle, the eviction for this cgroup will immediately stop and will not resume until the next memcg waiting period.<br>
|
||||
Default to 1.
|
||||
|
||||
```bash
|
||||
$ config_file="/opt/kata/share/defaults/kata-containers/runtime-rs/configuration.toml"
|
||||
$ sudo sed -i -e 's/^#memcg_eviction_psi_percent_limit.*$/memcg_eviction_psi_percent_limit = 1/g' $config_file
|
||||
```
|
||||
|
||||
For a running Kata container, this configuration can be dynamically modified using the kata-agent-ctl command.
|
||||
```bash
|
||||
$ PODID="12345"
|
||||
$ kata-agent-ctl connect --server-address "unix:///var/run/kata/$PODID/root/kata.hvsock" --hybrid-vsock \
|
||||
--cmd 'MemAgentMemcgSet json://{"eviction_psi_percent_limit":1}'
|
||||
```
|
||||
|
||||
### memcg_eviction_run_aging_count_min
|
||||
Control the mem-agent memcg eviction run aging count min.<br>
|
||||
A cgroup will only perform eviction when the number of aging cycles in memcg is greater than or equal to memcg_eviction_run_aging_count_min.<br>
|
||||
Default to 3.
|
||||
|
||||
```bash
|
||||
$ config_file="/opt/kata/share/defaults/kata-containers/runtime-rs/configuration.toml"
|
||||
$ sudo sed -i -e 's/^#memcg_eviction_run_aging_count_min.*$/memcg_eviction_run_aging_count_min = 3/g' $config_file
|
||||
```
|
||||
|
||||
For a running Kata container, this configuration can be dynamically modified using the kata-agent-ctl command.
|
||||
```bash
|
||||
$ PODID="12345"
|
||||
$ kata-agent-ctl connect --server-address "unix:///var/run/kata/$PODID/root/kata.hvsock" --hybrid-vsock \
|
||||
--cmd 'MemAgentMemcgSet json://{"eviction_run_aging_count_min":3}'
|
||||
```
|
||||
|
||||
## Sub-feature compact
|
||||
The memory control group (memcg) functionality may release a significant number of small pages, but the VM balloon free page reporting feature used by reclaim_guest_freed_memory requires at least a contiguous block of order 10 pages(a page block) to be released from the host.<br>
|
||||
The sub-feature compact is designed to address the issue of fragmented pages.<br>
|
||||
|
||||
During each run period, compact check the continuity of free pages within the system. If necessary, the compact will invoke the Linux compaction feature to reorganize fragmented pages.<br>
|
||||
After the run period, the compact will enter a sleep period. Once the sleep period is over, it will transition into the next run period, and this cycle will continue.
|
||||
|
||||
*the VM balloon free page reporting feature in arm64_64k report order 5 pages. Following is the comments from Linux kernel.*
|
||||
```
|
||||
/*
|
||||
* The default page reporting order is @pageblock_order, which
|
||||
* corresponds to 512MB in size on ARM64 when 64KB base page
|
||||
* size is used. The page reporting won't be triggered if the
|
||||
* freeing page can't come up with a free area like that huge.
|
||||
* So we specify the page reporting order to 5, corresponding
|
||||
* to 2MB. It helps to avoid THP splitting if 4KB base page
|
||||
* size is used by host.
|
||||
*
|
||||
* Ideally, the page reporting order is selected based on the
|
||||
* host's base page size. However, it needs more work to report
|
||||
* that value. The hard-coded order would be fine currently.
|
||||
*/
|
||||
```
|
||||
|
||||
**The following are the configurations of the sub-feature compact:**
|
||||
|
||||
### compact_disable
|
||||
Control the mem-agent compact function disable or enable.<br>
|
||||
Default to false.
|
||||
|
||||
```bash
|
||||
$ config_file="/opt/kata/share/defaults/kata-containers/runtime-rs/configuration.toml"
|
||||
$ sudo sed -i -e 's/^#compact_disable.*$/compact_disable = true/g' $config_file
|
||||
```
|
||||
|
||||
For a running Kata container, this configuration can be dynamically modified using the kata-agent-ctl command.
|
||||
```bash
|
||||
$ PODID="12345"
|
||||
$ kata-agent-ctl connect --server-address "unix:///var/run/kata/$PODID/root/kata.hvsock" --hybrid-vsock \
|
||||
--cmd 'MemAgentCompactSet json://{"disabled":false}'
|
||||
```
|
||||
|
||||
### compact_period_secs
|
||||
Control the mem-agent compaction function wait period seconds.<br>
|
||||
Default to 600.
|
||||
|
||||
```bash
|
||||
$ config_file="/opt/kata/share/defaults/kata-containers/runtime-rs/configuration.toml"
|
||||
$ sudo sed -i -e 's/^#compact_period_secs.*$/compact_period_secs = 600/g' $config_file
|
||||
```
|
||||
|
||||
For a running Kata container, this configuration can be dynamically modified using the kata-agent-ctl command.
|
||||
```bash
|
||||
$ PODID="12345"
|
||||
$ kata-agent-ctl connect --server-address "unix:///var/run/kata/$PODID/root/kata.hvsock" --hybrid-vsock \
|
||||
--cmd 'MemAgentCompactSet json://{"period_secs":600}'
|
||||
```
|
||||
|
||||
### compact_period_psi_percent_limit
|
||||
Control the mem-agent compaction function wait period PSI percent limit.<br>
|
||||
If the percentage of memory and IO PSI stall time within the compaction waiting period exceeds this value, then the compaction will not be executed after this waiting period.<br>
|
||||
Default to 1.
|
||||
|
||||
```bash
|
||||
$ config_file="/opt/kata/share/defaults/kata-containers/runtime-rs/configuration.toml"
|
||||
$ sudo sed -i -e 's/^#compact_period_psi_percent_limit.*$/compact_period_psi_percent_limit = 1/g' $config_file
|
||||
```
|
||||
|
||||
For a running Kata container, this configuration can be dynamically modified using the kata-agent-ctl command.
|
||||
```bash
|
||||
$ PODID="12345"
|
||||
$ kata-agent-ctl connect --server-address "unix:///var/run/kata/$PODID/root/kata.hvsock" --hybrid-vsock \
|
||||
--cmd 'MemAgentCompactSet json://{"period_psi_percent_limit":1}'
|
||||
```
|
||||
|
||||
### compact_psi_percent_limit
|
||||
Control the mem-agent compaction function compact PSI percent limit.<br>
|
||||
During compaction, the percentage of memory and IO PSI stall time is checked every second. If this percentage exceeds compact_psi_percent_limit, the compaction process will stop.<br>
|
||||
Default to 5
|
||||
|
||||
```bash
|
||||
$ config_file="/opt/kata/share/defaults/kata-containers/runtime-rs/configuration.toml"
|
||||
$ sudo sed -i -e 's/^#compact_psi_percent_limit.*$/compact_psi_percent_limit = 5/g' $config_file
|
||||
```
|
||||
|
||||
For a running Kata container, this configuration can be dynamically modified using the kata-agent-ctl command.
|
||||
```bash
|
||||
$ PODID="12345"
|
||||
$ kata-agent-ctl connect --server-address "unix:///var/run/kata/$PODID/root/kata.hvsock" --hybrid-vsock \
|
||||
--cmd 'MemAgentCompactSet json://{"compact_psi_percent_limit":5}'
|
||||
```
|
||||
|
||||
### compact_sec_max
|
||||
Control the maximum number of seconds for each compaction of mem-agent compact function.<br>
|
||||
If compaction seconds is bigger than compact_sec_max during compact run period, stop compaction at once.
|
||||
|
||||
Default to 180.
|
||||
|
||||
```bash
|
||||
$ config_file="/opt/kata/share/defaults/kata-containers/runtime-rs/configuration.toml"
|
||||
$ sudo sed -i -e 's/^#compact_sec_max.*$/compact_sec_max = 180/g' $config_file
|
||||
```
|
||||
|
||||
For a running Kata container, this configuration can be dynamically modified using the kata-agent-ctl command.
|
||||
```bash
|
||||
$ PODID="12345"
|
||||
$ kata-agent-ctl connect --server-address "unix:///var/run/kata/$PODID/root/kata.hvsock" --hybrid-vsock \
|
||||
--cmd 'MemAgentCompactSet json://{"compact_sec_max":180}'
|
||||
```
|
||||
|
||||
### compact_order
|
||||
compact_order is use with compact_threshold.<br>
|
||||
compact_order parameter determines the size of contiguous pages that the mem-agent's compaction functionality aims to achieve.<br>
|
||||
For example, if compact_order is set to 10 in a Kata container guest environment, the compaction function will target acquiring more contiguous pages of order 10, which will allow reclaim_guest_freed_memory to release additional pages.<br>
|
||||
If the goal is to have more free pages of order 9 in the system to ensure a higher likelihood of obtaining transparent huge pages during memory allocation, then setting compact_order to 9 would be appropriate.
|
||||
Default to 9.
|
||||
|
||||
```bash
|
||||
$ config_file="/opt/kata/share/defaults/kata-containers/runtime-rs/configuration.toml"
|
||||
$ sudo sed -i -e 's/^#compact_order.*$/compact_order = 9/g' $config_file
|
||||
```
|
||||
|
||||
For a running Kata container, this configuration can be dynamically modified using the kata-agent-ctl command.
|
||||
```bash
|
||||
$ PODID="12345"
|
||||
$ kata-agent-ctl connect --server-address "unix:///var/run/kata/$PODID/root/kata.hvsock" --hybrid-vsock \
|
||||
--cmd 'MemAgentCompactSet json://{"compact_order":9}'
|
||||
```
|
||||
|
||||
### compact_threshold
|
||||
Control the mem-agent compaction function compact threshold.<br>
|
||||
compact_threshold is the pages number.<br>
|
||||
When examining the /proc/pagetypeinfo, if there's an increase in the number of movable pages of orders smaller than the compact_order compared to the amount following the previous compaction period, and this increase surpasses a certain threshold specifically, more than compact_threshold number of pages, or the number of free pages has decreased by compact_threshold since the previous compaction. Current compact run period will not do compaction because there is no enough fragmented pages to be compaction.<br>
|
||||
This design aims to minimize the impact of unnecessary compaction calls on system performance.<br>
|
||||
Default to 1024.
|
||||
|
||||
```bash
|
||||
$ config_file="/opt/kata/share/defaults/kata-containers/runtime-rs/configuration.toml"
|
||||
$ sudo sed -i -e 's/^#compact_threshold.*$/compact_threshold = 1024/g' $config_file
|
||||
```
|
||||
|
||||
For a running Kata container, this configuration can be dynamically modified using the kata-agent-ctl command.
|
||||
```bash
|
||||
$ PODID="12345"
|
||||
$ kata-agent-ctl connect --server-address "unix:///var/run/kata/$PODID/root/kata.hvsock" --hybrid-vsock \
|
||||
--cmd 'MemAgentCompactSet json://{"compact_threshold":1024}'
|
||||
```
|
||||
|
||||
### compact_force_times
|
||||
Control the mem-agent compaction function force compact times.<br>
|
||||
After one compaction during a run period, if there are consecutive instances of compact_force_times run periods where no compaction occurs, a compaction will be forced regardless of the system's memory state.<br>
|
||||
If compact_force_times is set to 0, will do force compaction each period.<br>
|
||||
If compact_force_times is set to 18446744073709551615, will never do force compaction.<br>
|
||||
Default to 18446744073709551615.
|
||||
|
||||
```bash
|
||||
$ config_file="/opt/kata/share/defaults/kata-containers/runtime-rs/configuration.toml"
|
||||
$ sudo sed -i -e 's/^#compact_force_times.*$/compact_force_times = 18446744073709551615/g' $config_file
|
||||
```
|
||||
|
||||
For a running Kata container, this configuration can be dynamically modified using the kata-agent-ctl command.
|
||||
```bash
|
||||
$ PODID="12345"
|
||||
$ kata-agent-ctl connect --server-address "unix:///var/run/kata/$PODID/root/kata.hvsock" --hybrid-vsock \
|
||||
--cmd 'MemAgentCompactSet json://{"compact_force_times":18446744073709551615}'
|
||||
```
|
@ -22,14 +22,16 @@ The packaged installation method uses your distribution's native package format
|
||||
|------------------------------------------------------|----------------------------------------------------------------------------------------------|-------------------|-----------------------------------------------------------------------------------------------|
|
||||
| [Using official distro packages](#official-packages) | Kata packages provided by Linux distributions official repositories | yes | Recommended for most users. |
|
||||
| [Automatic](#automatic-installation) | Run a single command to install a full system | **No!** | For those wanting the latest release quickly. |
|
||||
| [Using kata-deploy Helm chart](#kata-deploy-helm-chart) | The preferred way to deploy the Kata Containers distributed binaries on a Kubernetes cluster | **No!** | Best way to give it a try on kata-containers on an already up and running Kubernetes cluster. |
|
||||
| [Using kata-deploy](#kata-deploy-installation) | The preferred way to deploy the Kata Containers distributed binaries on a Kubernetes cluster | **No!** | Best way to give it a try on kata-containers on an already up and running Kubernetes cluster. |
|
||||
|
||||
### Kata Deploy Helm Chart
|
||||
### Kata Deploy Installation
|
||||
|
||||
The Kata Deploy Helm chart is a convenient way to install all of the binaries and
|
||||
artifacts required to run Kata Containers on Kubernetes.
|
||||
Kata Deploy provides a Dockerfile, which contains all of the binaries and
|
||||
artifacts required to run Kata Containers, as well as reference DaemonSets,
|
||||
which can be utilized to install Kata Containers on a running Kubernetes
|
||||
cluster.
|
||||
|
||||
[Use Kata Deploy Helm Chart](/tools/packaging/kata-deploy/helm-chart/README.md) to install Kata Containers on a Kubernetes Cluster.
|
||||
[Use Kata Deploy](/tools/packaging/kata-deploy/README.md) to install Kata Containers on a Kubernetes Cluster.
|
||||
|
||||
### Official packages
|
||||
|
||||
|
@ -1,96 +0,0 @@
|
||||
# Using IBM Crypto Express with Confidential Containers
|
||||
|
||||
On IBM Z (s390x), IBM Crypto Express (CEX) hardware security modules (HSM) can be passed through to virtual guests.
|
||||
This VFIO pass-through is domain-wise, i.e. guests can securely share one physical card.
|
||||
For the Accelerator and Enterprise PKCS #11 (EP11) modes of CEX, on IBM z16 and up, pass-through is also supported when using the IBM Secure Execution trusted execution environment.
|
||||
To maintain confidentiality when using EP11 within Secure Execution, additional steps are required.
|
||||
When using Secure Execution within Kata Containers, some of these steps are managed by the Kata agent, but preparation is required to make pass-through work.
|
||||
The Kata agent will expect required confidential information at runtime via [Confidential Data Hub](https://github.com/confidential-containers/guest-components/tree/main/confidential-data-hub) from Confidential Containers, and this guide assumes Confidential Containers components as a means of secret provisioning.
|
||||
|
||||
At the time of writing, devices for trusted execution environments are only supported via the `--device` option of e.g. `ctr`, `docker`, or `podman`, but **not** via Kubernetes.
|
||||
Refer to [KEP 4113](https://github.com/kubernetes/enhancements/pull/4113) for details.
|
||||
|
||||
Using a CEX card in Accelerator mode is much simpler and does not require the steps below.
|
||||
To do so, prepare [Kata for Secure Execution](../how-to/how-to-run-kata-containers-with-SE-VMs.md), set `vfio_mode = "vfio"` and `cold_plug_vfio = "bridge-port"` in the Kata `configuration.toml` file and use a [mediated device](../../src/runtime/virtcontainers/README.md#how-to-pass-a-device-using-vfio-ap-passthrough) similar to operating without Secure Execution.
|
||||
The Kata agent will do the [Secure Execution bind](https://www.ibm.com/docs/en/linux-on-systems?topic=adapters-accelerator-mode) automatically.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- A host kernel that supports adjunct processor (AP) pass-through with Secure Execution. [Official support](https://www.ibm.com/docs/en/linux-on-systems?topic=restrictions-required-software) exists as of Ubuntu 24.04, RHEL 8.10 and 9.4, and SLES 15 SP6.
|
||||
- An EP11 domain with a master key set up. In this process, you will need the master key verification pattern (MKVP) [1].
|
||||
- A [mediated device](../../src/runtime/virtcontainers/README.md#how-to-pass-a-device-using-vfio-ap-passthrough), created from this domain, to pass through.
|
||||
- Working [Kata Containers with Secure Execution](../how-to/how-to-run-kata-containers-with-SE-VMs.md).
|
||||
- Working access to a [key broker service (KBS) with the IBM Secure Execution verifier](https://github.com/confidential-containers/trustee/blob/main/deps/verifier/src/se/README.md) from a Kata container. The provided Secure Execution header must match the Kata guest image and a policy to allow the appropriate secrets for this guest must be set up.
|
||||
- In Kata's `configuration.toml`, set `vfio_mode = "vfio"` and `cold_plug_vfio = "bridge-port"`
|
||||
|
||||
## Prepare an association secret
|
||||
|
||||
An EP11 Secure Execution workload requires an [association secret](https://www.ibm.com/docs/en/linux-on-systems?topic=adapters-ep11-mode) to be inserted in the guest and associated with the adjunct processor (AP) queue.
|
||||
In Kata Containers, this secret must be created and made available via Trustee, whereas the Kata agent performs the actual secret insertion and association.
|
||||
On a trusted system, to create an association secret using the host key document (HKD) `z16.crt`, a guest header `hdr.bin`, a CA certificate `DigiCertCA.crt`, an IBM signing key `ibm-z-host-key-signing-gen2.crt`, and let the command create a random association secret that is named `my secret` and save this random association secret to `my_random_secret`, run:
|
||||
|
||||
```
|
||||
[trusted]# pvsecret create -k z16.crt --hdr hdr.bin -o my_addsecreq \
|
||||
--crt DigiCertCA.crt --crt ibm-z-host-key-signing-gen2.crt \
|
||||
association "my secret" --output-secret my_random_secret
|
||||
```
|
||||
|
||||
using `pvsecret` from the [s390-tools](https://github.com/ibm-s390-linux/s390-tools) suite.
|
||||
`hdr.bin` **must** be the Secure Execution header matching the Kata guest image, i.e. the one also provided to Trustee.
|
||||
This command saves the add-secret request itself to `my_addsecreq`, and information on the secret, including the secret ID, to `my_secret.yaml`.
|
||||
This secret ID must be provided alongside the secret.
|
||||
Write it to `my_addsecid` with or without leading `0x` or, using `yq`:
|
||||
|
||||
```
|
||||
[trusted]# yq ".id" my_secret.yaml > my_addsecid
|
||||
```
|
||||
|
||||
## Provision the association secret with Trustee
|
||||
|
||||
The secret and secret ID must be provided via Trustee with respect to the MKVP.
|
||||
The paths where the Kata agent will expect this info are `vfio_ap/${mkvp}/secret` and `vfio_ap/${mkvp}/secret_id`, where `$mkvp` is the first 16 bytes (32 hex numbers) without leading `0x` of the MKVP.
|
||||
|
||||
For example, if your MKVPs read [1] as
|
||||
|
||||
```
|
||||
WK CUR: valid 0xdb3c3b3c3f097dd55ec7eb0e7fdbcb933b773619640a1a75a9161cec00000000
|
||||
WK NEW: empty -
|
||||
```
|
||||
|
||||
use `db3c3b3c3f097dd55ec7eb0e7fdbcb93` in the provision for Trustee.
|
||||
With a KBS running at `127.0.0.1:8080`, to store the secret and ID created above in the KBS with the authentication key `kbs.key` and this MKVP, run:
|
||||
|
||||
```
|
||||
[trusted]# kbs-client --url http://127.0.0.1:8080 config \
|
||||
--auth-private-key kbs.key set-resource \
|
||||
--path vfio_ap/db3c3b3c3f097dd55ec7eb0e7fdbcb93/secret \
|
||||
--resource-file my_addsecreq
|
||||
[trusted]# kbs-client --url http://127.0.0.1:8080 config \
|
||||
--auth-private-key kbs.key set-resource \
|
||||
--path vfio_ap/db3c3b3c3f097dd55ec7eb0e7fdbcb93/secret_id \
|
||||
--resource-file my_addsecid
|
||||
```
|
||||
|
||||
## Run the workload
|
||||
|
||||
Assuming the mediated device exists at `/dev/vfio/0`, run e.g.
|
||||
|
||||
```
|
||||
[host]# docker run --rm --runtime io.containerd.run.kata.v2 --device /dev/vfio/0 -it ubuntu
|
||||
```
|
||||
|
||||
If you have [s390-tools](https://github.com/ibm-s390-linux/s390-tools) available in the container, you can see the available CEX domains including Secure Execution info using `lszcrypt -V`:
|
||||
|
||||
```
|
||||
[container]# lszcrypt -V
|
||||
CARD.DOM TYPE MODE STATUS REQUESTS PENDING HWTYPE QDEPTH FUNCTIONS DRIVER SESTAT
|
||||
--------------------------------------------------------------------------------------------------------
|
||||
03 CEX8P EP11-Coproc online 2 0 14 08 -----XN-F- cex4card -
|
||||
03.0041 CEX8P EP11-Coproc online 2 0 14 08 -----XN-F- cex4queue usable
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
[1] If you have access to the host, the MKVP can be read at `/sys/bus/ap/card${cardno}/${apqn}/mkvps`, where `${cardno}` is the the two-digit hexadecimal identification for the card, and `${apqn}` is the APQN of the domain you want to pass, e.g. `card03/03.0041` for the the domain 0x41 on card 3.
|
||||
This information is only readable when card and domain are not yet masked for use with VFIO.
|
||||
If you do not have access to the host, you should receive the MKVP from your HSM domain administrator.
|
@ -1,3 +0,0 @@
|
||||
[toolchain]
|
||||
# Keep in sync with versions.yaml
|
||||
channel = "1.85.1"
|
17
shellcheckrc
17
shellcheckrc
@ -1,17 +0,0 @@
|
||||
# Allow opening any 'source'd file, even if not specified as input
|
||||
external-sources=true
|
||||
|
||||
# Turn on warnings for unquoted variables with safe values
|
||||
enable=quote-safe-variables
|
||||
|
||||
# Turn on warnings for unassigned uppercase variables
|
||||
enable=check-unassigned-uppercase
|
||||
|
||||
# Enforces braces around variable expansions to avoid ambiguity or confusion.
|
||||
# e.g. ${filename} rather than $filename
|
||||
enable=require-variable-braces
|
||||
|
||||
# Requires double-bracket syntax [[ expr ]] for safer, more consistent tests.
|
||||
# NO: if [ "$var" = "value" ]
|
||||
# YES: if [[ $var == "value" ]]
|
||||
enable=require-double-brackets
|
4130
src/agent/Cargo.lock
generated
4130
src/agent/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@ -1,45 +1,52 @@
|
||||
[workspace]
|
||||
members = ["rustjail", "policy", "vsock-exporter"]
|
||||
|
||||
[workspace.package]
|
||||
[package]
|
||||
name = "kata-agent"
|
||||
version = "0.1.0"
|
||||
authors = ["The Kata Containers community <kata-dev@lists.katacontainers.io>"]
|
||||
edition = "2018"
|
||||
license = "Apache-2.0"
|
||||
rust-version = "1.85.1"
|
||||
|
||||
[workspace.dependencies]
|
||||
[dependencies]
|
||||
runtime-spec = { path = "../libs/runtime-spec" }
|
||||
mem-agent = { path = "../mem-agent" }
|
||||
oci-spec = { version = "0.6.8", features = ["runtime"] }
|
||||
rustjail = { path = "rustjail" }
|
||||
protocols = { path = "../libs/protocols", features = ["async", "with-serde"] }
|
||||
lazy_static = "1.3.0"
|
||||
ttrpc = { version = "0.8.4", features = ["async"], default-features = false }
|
||||
protobuf = "=3.7.1"
|
||||
libc = "0.2.94"
|
||||
protobuf = "3.2.0"
|
||||
libc = "0.2.58"
|
||||
nix = "0.24.2"
|
||||
capctl = "0.2.0"
|
||||
scan_fmt = "0.2.6"
|
||||
serde_json = "1.0.39"
|
||||
scan_fmt = "0.2.3"
|
||||
scopeguard = "1.0.0"
|
||||
thiserror = "1.0.26"
|
||||
regex = "1.10.5"
|
||||
serial_test = "0.5.1"
|
||||
url = "2.5.0"
|
||||
derivative = "2.2.0"
|
||||
kata-sys-util = { path = "../libs/kata-sys-util" }
|
||||
kata-types = { path = "../libs/kata-types" }
|
||||
safe-path = { path = "../libs/safe-path" }
|
||||
const_format = "0.2.30"
|
||||
|
||||
# Async helpers
|
||||
async-trait = "0.1.50"
|
||||
async-trait = "0.1.42"
|
||||
async-recursion = "0.3.2"
|
||||
futures = "0.3.30"
|
||||
|
||||
# Async runtime
|
||||
tokio = { version = "1.44.2", features = ["full"] }
|
||||
tokio = { version = "1.39.0", features = ["full"] }
|
||||
tokio-vsock = "0.3.4"
|
||||
|
||||
netlink-sys = { version = "0.7.0", features = ["tokio_socket"] }
|
||||
rtnetlink = "0.14.0"
|
||||
netlink-packet-route = "0.19.0"
|
||||
netlink-packet-core = "0.7.0"
|
||||
rtnetlink = "0.8.0"
|
||||
netlink-packet-utils = "0.4.1"
|
||||
ipnetwork = "0.17.0"
|
||||
|
||||
|
||||
# Note: this crate sets the slog 'max_*' features which allows the log level
|
||||
# to be modified at runtime.
|
||||
logging = { path = "../libs/logging" }
|
||||
slog = "2.5.2"
|
||||
slog-scope = "4.1.2"
|
||||
slog-term = "2.9.0"
|
||||
@ -54,154 +61,55 @@ procfs = "0.12.0"
|
||||
|
||||
anyhow = "1"
|
||||
|
||||
cgroups = { package = "cgroups-rs", git = "https://github.com/kata-containers/cgroups-rs", rev = "v0.3.5" }
|
||||
cgroups = { package = "cgroups-rs", version = "0.3.3" }
|
||||
|
||||
# Tracing
|
||||
tracing = "0.1.41"
|
||||
tracing = "0.1.26"
|
||||
tracing-subscriber = "0.2.18"
|
||||
tracing-opentelemetry = "0.13.0"
|
||||
opentelemetry = { version = "0.14.0", features = ["rt-tokio-current-thread"] }
|
||||
vsock-exporter = { path = "vsock-exporter" }
|
||||
|
||||
# Configuration
|
||||
serde = { version = "1.0.129", features = ["derive"] }
|
||||
serde_json = "1.0.39"
|
||||
toml = "0.5.8"
|
||||
clap = { version = "4.5.40", features = ["derive"] }
|
||||
clap = { version = "3.0.1", features = ["derive"] }
|
||||
strum = "0.26.2"
|
||||
strum_macros = "0.26.2"
|
||||
|
||||
tempfile = "3.19.1"
|
||||
# Image pull/decrypt
|
||||
image-rs = { git = "https://github.com/confidential-containers/guest-components", rev = "514c561d933cb11a0f1628621a0b930157af76cd", default-features = false, optional = true }
|
||||
|
||||
# Agent Policy
|
||||
regorus = { version = "0.2.6", default-features = false, features = [
|
||||
"arc",
|
||||
"regex",
|
||||
"std",
|
||||
], optional = true }
|
||||
cdi = { git = "https://github.com/cncf-tags/container-device-interface-rs", rev = "fba5677a8e7cc962fc6e495fcec98d7d765e332a" }
|
||||
json-patch = "2.0.0"
|
||||
kata-agent-policy = { path = "policy" }
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = "3.1.0"
|
||||
test-utils = { path = "../libs/test-utils" }
|
||||
which = "4.3.0"
|
||||
rstest = "0.18.0"
|
||||
async-std = { version = "1.12.0", features = ["attributes"] }
|
||||
|
||||
# Local dependencies
|
||||
kata-agent-policy = { path = "policy" }
|
||||
rustjail = { path = "rustjail" }
|
||||
vsock-exporter = { path = "vsock-exporter" }
|
||||
|
||||
mem-agent = { path = "../mem-agent" }
|
||||
|
||||
kata-sys-util = { path = "../libs/kata-sys-util" }
|
||||
kata-types = { path = "../libs/kata-types" }
|
||||
# Note: this crate sets the slog 'max_*' features which allows the log level
|
||||
# to be modified at runtime.
|
||||
logging = { path = "../libs/logging" }
|
||||
protocols = { path = "../libs/protocols" }
|
||||
runtime-spec = { path = "../libs/runtime-spec" }
|
||||
safe-path = { path = "../libs/safe-path" }
|
||||
test-utils = { path = "../libs/test-utils" }
|
||||
|
||||
|
||||
[package]
|
||||
name = "kata-agent"
|
||||
version = "0.1.0"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
oci-spec.workspace = true
|
||||
lazy_static.workspace = true
|
||||
ttrpc.workspace = true
|
||||
protobuf.workspace = true
|
||||
libc.workspace = true
|
||||
nix.workspace = true
|
||||
capctl.workspace = true
|
||||
serde_json.workspace = true
|
||||
scan_fmt.workspace = true
|
||||
scopeguard.workspace = true
|
||||
thiserror.workspace = true
|
||||
regex.workspace = true
|
||||
serial_test.workspace = true
|
||||
url.workspace = true
|
||||
derivative.workspace = true
|
||||
const_format.workspace = true
|
||||
|
||||
# Async helpers
|
||||
async-trait.workspace = true
|
||||
async-recursion.workspace = true
|
||||
futures.workspace = true
|
||||
|
||||
# Async runtime
|
||||
tokio.workspace = true
|
||||
tokio-vsock.workspace = true
|
||||
|
||||
netlink-sys.workspace = true
|
||||
rtnetlink.workspace = true
|
||||
netlink-packet-route.workspace = true
|
||||
netlink-packet-core.workspace = true
|
||||
ipnetwork.workspace = true
|
||||
|
||||
slog.workspace = true
|
||||
slog-scope.workspace = true
|
||||
slog-term.workspace = true
|
||||
|
||||
# Redirect ttrpc log calls
|
||||
slog-stdlog.workspace = true
|
||||
log.workspace = true
|
||||
|
||||
cfg-if.workspace = true
|
||||
prometheus.workspace = true
|
||||
procfs.workspace = true
|
||||
|
||||
anyhow.workspace = true
|
||||
|
||||
cgroups.workspace = true
|
||||
|
||||
# Tracing
|
||||
tracing.workspace = true
|
||||
tracing-subscriber.workspace = true
|
||||
tracing-opentelemetry.workspace = true
|
||||
opentelemetry.workspace = true
|
||||
|
||||
# Configuration
|
||||
serde.workspace = true
|
||||
toml.workspace = true
|
||||
clap.workspace = true
|
||||
strum.workspace = true
|
||||
strum_macros.workspace = true
|
||||
|
||||
# Agent Policy
|
||||
cdi = { git = "https://github.com/cncf-tags/container-device-interface-rs", rev = "fba5677a8e7cc962fc6e495fcec98d7d765e332a" }
|
||||
|
||||
# Local dependencies
|
||||
kata-agent-policy = { workspace = true, optional = true }
|
||||
mem-agent.workspace = true
|
||||
rustjail.workspace = true
|
||||
protocols = { workspace = true, features = ["async", "with-serde"] }
|
||||
kata-sys-util.workspace = true
|
||||
kata-types.workspace = true
|
||||
runtime-spec.workspace = true
|
||||
safe-path.workspace = true
|
||||
# Note: this crate sets the slog 'max_*' features which allows the log level
|
||||
# to be modified at runtime.
|
||||
logging.workspace = true
|
||||
vsock-exporter.workspace = true
|
||||
|
||||
# Initdata
|
||||
base64 = "0.22"
|
||||
sha2 = "0.10.8"
|
||||
async-compression = { version = "0.4.22", features = ["tokio", "gzip"] }
|
||||
|
||||
[target.'cfg(target_arch = "s390x")'.dependencies]
|
||||
pv_core = { git = "https://github.com/ibm-s390-linux/s390-tools", rev = "4942504a9a2977d49989a5e5b7c1c8e07dc0fa41", package = "s390_pv_core" }
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile.workspace = true
|
||||
which.workspace = true
|
||||
rstest.workspace = true
|
||||
async-std.workspace = true
|
||||
|
||||
test-utils.workspace = true
|
||||
[workspace]
|
||||
members = ["rustjail", "policy"]
|
||||
|
||||
[profile.release]
|
||||
lto = true
|
||||
|
||||
[features]
|
||||
# The default-pull feature would support all pull types, including sharing images by virtio-fs and pulling images in the guest
|
||||
default-pull = ["guest-pull"]
|
||||
seccomp = ["rustjail/seccomp"]
|
||||
standard-oci-runtime = ["rustjail/standard-oci-runtime"]
|
||||
agent-policy = ["kata-agent-policy"]
|
||||
agent-policy = ["regorus"]
|
||||
guest-pull = ["image-rs/kata-cc-rustls-tls"]
|
||||
|
||||
[[bin]]
|
||||
name = "kata-agent"
|
||||
|
@ -41,8 +41,22 @@ ifeq ($(AGENT_POLICY),yes)
|
||||
override EXTRA_RUSTFEATURES += agent-policy
|
||||
endif
|
||||
|
||||
##VAR PULL_TYPE=default|guest-pull define if agent enables the guest pull image feature
|
||||
PULL_TYPE ?= default
|
||||
ifeq ($(PULL_TYPE),default)
|
||||
override EXTRA_RUSTFEATURES += default-pull
|
||||
# Enable guest pull image feature of rust build
|
||||
else ifeq ($(PULL_TYPE),guest-pull)
|
||||
override EXTRA_RUSTFEATURES += guest-pull
|
||||
endif
|
||||
|
||||
|
||||
include ../../utils.mk
|
||||
|
||||
ifeq ($(ARCH), ppc64le)
|
||||
override ARCH = powerpc64le
|
||||
endif
|
||||
|
||||
##VAR STANDARD_OCI_RUNTIME=yes|no define if agent enables standard oci runtime feature
|
||||
STANDARD_OCI_RUNTIME := no
|
||||
|
||||
|
@ -129,7 +129,6 @@ The kata agent has the ability to configure agent options in guest kernel comman
|
||||
| `agent.guest_components_procs` | guest-components processes | Attestation-related processes that should be spawned as children of the guest. Valid values are `none`, `attestation-agent`, `confidential-data-hub` (implies `attestation-agent`), `api-server-rest` (implies `attestation-agent` and `confidential-data-hub`) | string | `api-server-rest` |
|
||||
| `agent.hotplug_timeout` | Hotplug timeout | Allow to configure hotplug timeout(seconds) of block devices | integer | `3` |
|
||||
| `agent.cdh_api_timeout` | Confidential Data Hub (CDH) API timeout | Allow to configure CDH API timeout(seconds) | integer | `50` |
|
||||
| `agent.image_pull_timeout` | Confidential Data Hub (CDH) Image Pull API timeout | Allow to configure CDH API image pull timeout(seconds) | integer | `1200` |
|
||||
| `agent.https_proxy` | HTTPS proxy | Allow to configure `https_proxy` in the guest | string | `""` |
|
||||
| `agent.image_registry_auth` | Image registry credential URI | The URI to where image-rs can find the credentials for pulling images from private registries e.g. `file:///root/.docker/config.json` to read from a file in the guest image, or `kbs:///default/credentials/test` to get the file from the KBS| string | `""` |
|
||||
| `agent.enable_signature_verification` | Image security policy flag | Whether enable image security policy enforcement. If `true`, the resource indexed by URI `agent.image_policy_file` will be got to work as image pulling policy. | string | `""` |
|
||||
@ -149,7 +148,7 @@ The kata agent has the ability to configure agent options in guest kernel comman
|
||||
> The agent will fail to start if the configuration file is not present,
|
||||
> or if it can't be parsed properly.
|
||||
> - `agent.devmode`: true | false
|
||||
> - `agent.hotplug_timeout`, `agent.image_pull_timeout` and `agent.cdh_api_timeout`: a whole number of seconds
|
||||
> - `agent.hotplug_timeout` and `agent.cdh_api_timeout`: a whole number of seconds
|
||||
> - `agent.log`: "critical"("fatal" | "panic") | "error" | "warn"("warning") | "info" | "debug"
|
||||
> - `agent.server_addr`: "{VSOCK_ADDR}:{VSOCK_PORT}"
|
||||
> - `agent.trace`: true | false
|
||||
|
@ -15,7 +15,7 @@ Wants=kata-containers.target
|
||||
StandardOutput=tty
|
||||
Type=simple
|
||||
ExecStart=@BINDIR@/@AGENT_NAME@
|
||||
LimitNOFILE=1073741824
|
||||
LimitNOFILE=1048576
|
||||
# ExecStop is required for static agent tracing; in all other scenarios
|
||||
# the runtime handles shutting down the VM.
|
||||
ExecStop=/bin/sync ; /usr/bin/systemctl --force poweroff
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user