mirror of
https://github.com/kata-containers/kata-containers.git
synced 2025-05-05 06:57:26 +00:00
Compare commits
505 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
fbf7faa9f4 | ||
|
a9b3c6a5a5 | ||
|
79ad68cce5 | ||
|
4ce00ea434 | ||
|
3dba8ddd98 | ||
|
7e404dd13f | ||
|
445cad7754 | ||
|
049a4ef3a8 | ||
|
bd1071aff8 | ||
|
61f2b6a733 | ||
|
33c69fc8bf | ||
|
bc66d75fe9 | ||
|
9924fbbc70 | ||
|
3dfabd42c2 | ||
|
a2fbc598b8 | ||
|
62639c861e | ||
|
6e21d14334 | ||
|
a126884953 | ||
|
f8fcd032ef | ||
|
ffbaa793a3 | ||
|
b97bc03ecb | ||
|
f910c7535a | ||
|
97f7d49e8e | ||
|
8045cb982c | ||
|
46af7cf817 | ||
|
19371e2d3b | ||
|
6c1fafb651 | ||
|
3c8cc0cdbf | ||
|
a6d1dc7df3 | ||
|
63b9ae3ed0 | ||
|
8b32846519 | ||
|
7163d7d89b | ||
|
2992a279ab | ||
|
e5cc9acab8 | ||
|
a9893e83b8 | ||
|
52b2662b75 | ||
|
bcffe938ca | ||
|
57527c1ce4 | ||
|
70ef0376fb | ||
|
d3b652014a | ||
|
fc75aee13a | ||
|
938ddeaf1e | ||
|
eb2c7f4150 | ||
|
c13d7796ee | ||
|
349ce8c339 | ||
|
460c3394dd | ||
|
945482ff6e | ||
|
a66aac0d77 | ||
|
14e74b8fc9 | ||
|
c938c75af0 | ||
|
2ee7ef6aa3 | ||
|
e3d3a2843f | ||
|
973bd7c2b6 | ||
|
9248634baa | ||
|
ee344aa4e9 | ||
|
4f703e376b | ||
|
9fe70151f7 | ||
|
83d31b142b | ||
|
3457572130 | ||
|
43b5a616f6 | ||
|
b747f8380e | ||
|
9f76467cb7 | ||
|
1c72d22212 | ||
|
213f9ddd30 | ||
|
fc4e10b08d | ||
|
b96685bf7a | ||
|
800c05fffe | ||
|
1de466fe84 | ||
|
706c2e2d68 | ||
|
cf4325b535 | ||
|
7e9e9263d1 | ||
|
8b49564c01 | ||
|
e8f19609b9 | ||
|
517d6201f5 | ||
|
3946435291 | ||
|
486244b292 | ||
|
6713db8990 | ||
|
97f4bcb456 | ||
|
6102976d2d | ||
|
09052faaa0 | ||
|
ed56050a99 | ||
|
1c9d7ce0eb | ||
|
c37840ce80 | ||
|
463fd4eda4 | ||
|
1ffce3ff70 | ||
|
ccfdf59607 | ||
|
b1c72c7094 | ||
|
ef12dcd7da | ||
|
9eb3fcb84b | ||
|
97a1942f86 | ||
|
3e77377be0 | ||
|
f714b6c049 | ||
|
0cdc05ce0a | ||
|
f5eaaa41d5 | ||
|
d7f4b6cbef | ||
|
1c3b1f5adb | ||
|
bf93b5daf1 | ||
|
318c409ed6 | ||
|
12594a9f9e | ||
|
734e7e8c54 | ||
|
497ab9faaf | ||
|
90397ca4fe | ||
|
ff9fb19f11 | ||
|
bfdf4e7a6a | ||
|
91bb6b7c34 | ||
|
2a17628591 | ||
|
2f28be3ad9 | ||
|
bfd4b98355 | ||
|
5e363dc277 | ||
|
2fef594f14 | ||
|
5d0688079a | ||
|
40a15ac760 | ||
|
0b4fea9382 | ||
|
792180a740 | ||
|
93830cbf4d | ||
|
ad68cb9afa | ||
|
17d0db9865 | ||
|
75dc4ce3bf | ||
|
0dbf4ec39f | ||
|
d1d60cfe89 | ||
|
9b401cd250 | ||
|
576747b060 | ||
|
9a7c5b914e | ||
|
5774f131ec | ||
|
fd9a4548ab | ||
|
6603cf7872 | ||
|
2cb9fd3c69 | ||
|
655255b50c | ||
|
1bec432ffa | ||
|
28c09ae645 | ||
|
3769ad9c0d | ||
|
abb5fb127b | ||
|
58bebe332a | ||
|
8df271358e | ||
|
e6cca9da6d | ||
|
713cbb0c62 | ||
|
d3c9cc4e36 | ||
|
a40b919afe | ||
|
bc04c390bd | ||
|
ee84068aed | ||
|
8779abd0a1 | ||
|
e606a8deb5 | ||
|
ba92639481 | ||
|
c75ea2582e | ||
|
e3c98a5ac7 | ||
|
7e450bc1c2 | ||
|
17af28acad | ||
|
009aa6257b | ||
|
2f134514b0 | ||
|
96e43fbee5 | ||
|
10ceeb0930 | ||
|
b19a8c7b1c | ||
|
b046f79d06 | ||
|
a05f5f1827 | ||
|
a49b6f8634 | ||
|
d81a1747bd | ||
|
e5c4cfb8a1 | ||
|
c0af0b43e0 | ||
|
902cb5f205 | ||
|
28be53ac92 | ||
|
abbc9c6b50 | ||
|
3c78c42ea5 | ||
|
44bab5afc4 | ||
|
5a08d748b9 | ||
|
700944c420 | ||
|
97bd311a66 | ||
|
54dcf0d342 | ||
|
047b7e1fb7 | ||
|
41b536d487 | ||
|
9ab6ab9897 | ||
|
0432d2fcdf | ||
|
46caa986bb | ||
|
7f0b1946c5 | ||
|
1f52f83309 | ||
|
b6924ef5e5 | ||
|
594c5e36a6 | ||
|
0ff5cb1e66 | ||
|
2329aeec38 | ||
|
1b4dbebb1b | ||
|
805da14634 | ||
|
96b2d25508 | ||
|
1662595146 | ||
|
1e4963a3b2 | ||
|
4947938ce8 | ||
|
b7cf4fd2e6 | ||
|
1e187482d4 | ||
|
0aa76f7206 | ||
|
423ad8341d | ||
|
7a0ac55f22 | ||
|
8088064b8b | ||
|
211a36559c | ||
|
404e212102 | ||
|
f7976a40e4 | ||
|
eb884d33a8 | ||
|
ae5c587efc | ||
|
3406df9133 | ||
|
85f3391bcf | ||
|
7a704453b6 | ||
|
5d68600c06 | ||
|
15c9035254 | ||
|
f6a1c6d0e0 | ||
|
e5d7414c33 | ||
|
17baa6199b | ||
|
4b93176225 | ||
|
5e81f67ceb | ||
|
e84f7c2c4b | ||
|
835c6814d7 | ||
|
d83b8349a2 | ||
|
59a70a2b28 | ||
|
b895e3b3e5 | ||
|
0f4de1c94a | ||
|
9c0d069ac7 | ||
|
15961b03f7 | ||
|
4589dc96ef | ||
|
cc5f8d31d2 | ||
|
0d3f9fcee1 | ||
|
5df43ffc7c | ||
|
f79fabab24 | ||
|
70d32afbb7 | ||
|
607b27fd7f | ||
|
f52a565834 | ||
|
c0632f847f | ||
|
e19b81225c | ||
|
a678046d13 | ||
|
fad248ef09 | ||
|
a6e5d28a15 | ||
|
cb7c599180 | ||
|
8f22b07aba | ||
|
d708866b2a | ||
|
7e11489daf | ||
|
f62e08998c | ||
|
02deb1d782 | ||
|
d80e7c7644 | ||
|
6552ac41e0 | ||
|
154a4ddc00 | ||
|
667e26036c | ||
|
d37d9feee9 | ||
|
ba5b0777b5 | ||
|
36d2dee3a4 | ||
|
cb7508ffdc | ||
|
f308cbba93 | ||
|
084fb2d780 | ||
|
fd6c16e209 | ||
|
26bd7989b3 | ||
|
b965fe8239 | ||
|
e9f88757ba | ||
|
35c92aa6ad | ||
|
fa0f85e8b0 | ||
|
c3a9c70d45 | ||
|
32dbee8d7e | ||
|
dab981b0bc | ||
|
b7786fbcf0 | ||
|
63ec1609bc | ||
|
c352905998 | ||
|
7968a3c09d | ||
|
1022d8d260 | ||
|
d77008b817 | ||
|
97151cce4e | ||
|
becb760e32 | ||
|
af4058fa82 | ||
|
a994f142d0 | ||
|
810a6dafad | ||
|
b7087eb0ea | ||
|
b910daf625 | ||
|
199b16f053 | ||
|
0e26dd4ce8 | ||
|
bceffd5ff6 | ||
|
1c406e9c1d | ||
|
7a5db51c80 | ||
|
e6a78e64e6 | ||
|
0e61eb215d | ||
|
592d58ca52 | ||
|
e0fb8f08d8 | ||
|
a7e953c7a7 | ||
|
3c8a8ca9c2 | ||
|
e316f633d8 | ||
|
31446b8be8 | ||
|
ebd1214b2e | ||
|
734f5d07a9 | ||
|
19d78ca844 | ||
|
cf129f3744 | ||
|
71d4ad5fca | ||
|
8c2d1b374c | ||
|
386fed342c | ||
|
fdc0d81198 | ||
|
796eab3bef | ||
|
4f41989a6a | ||
|
e40251d9f8 | ||
|
33f3a8cf5f | ||
|
420b282279 | ||
|
71531a82f4 | ||
|
93cd30862d | ||
|
7787340ab6 | ||
|
4cd9d70c4d | ||
|
1dbe3fb8bc | ||
|
e8405590c1 | ||
|
730e007abd | ||
|
df9c6ae9d7 | ||
|
509e6da965 | ||
|
95d47e4d05 | ||
|
caee12c796 | ||
|
014ff8476a | ||
|
cb682ef3c8 | ||
|
0671252466 | ||
|
691430ca95 | ||
|
9a4c0a5c5c | ||
|
d929bc0224 | ||
|
aad915a7a1 | ||
|
0995c6528e | ||
|
cda6d0e36c | ||
|
1ade2a874f | ||
|
7b63f256e5 | ||
|
04adcdace6 | ||
|
3a8131349e | ||
|
8ef8109b2f | ||
|
133528a63c | ||
|
a40d5d3daa | ||
|
f45b398170 | ||
|
ee0f0b7bfe | ||
|
c05b976ebe | ||
|
644af52968 | ||
|
bf41618a84 | ||
|
ed6f57f8f6 | ||
|
6f894450fe | ||
|
a54eed6bab | ||
|
2619b57411 | ||
|
c3e3ef7b25 | ||
|
80e95bd264 | ||
|
ae63bbb824 | ||
|
545780a83a | ||
|
50f765b19c | ||
|
219db60071 | ||
|
c337a21a4e | ||
|
fd832d0feb | ||
|
14bf653c35 | ||
|
3cea080185 | ||
|
7024d3c600 | ||
|
624f7bfe0b | ||
|
a5629f9bfa | ||
|
504d9e2b66 | ||
|
4ea7d274c4 | ||
|
2c72cf5891 | ||
|
65021caca6 | ||
|
c73ff7518e | ||
|
186c88b1d5 | ||
|
4bb0eb4590 | ||
|
c3c3f23b33 | ||
|
edf6af2a43 | ||
|
d48116114e | ||
|
248d04c20c | ||
|
874129a11f | ||
|
02a2f6a9c1 | ||
|
e33ad56cf4 | ||
|
2df3e5937a | ||
|
9a9e88a38d | ||
|
b220cca253 | ||
|
b8cfdd06fb | ||
|
eb90b93e3f | ||
|
67bfd4793e | ||
|
ed8347c868 | ||
|
dbba6b056b | ||
|
c5ff513e0b | ||
|
58672068ff | ||
|
bc2d7d9e1e | ||
|
fb1d4b571f | ||
|
b3972df3ca | ||
|
4df406f03c | ||
|
eb2f75ee61 | ||
|
545022f295 | ||
|
e8aa5a5ab7 | ||
|
59c1f0b59b | ||
|
4c8e881a84 | ||
|
2ecb2fe519 | ||
|
c69509be1c | ||
|
0962cd95bc | ||
|
ef0e8669fb | ||
|
f81c85e73d | ||
|
435ee86fdd | ||
|
85bbc0e969 | ||
|
4ce94c2d1b | ||
|
658a5e032b | ||
|
3fab7944a3 | ||
|
6f918d71f5 | ||
|
3f13023f5f | ||
|
d971e13446 | ||
|
4018079b55 | ||
|
94579517d4 | ||
|
af1d6c2407 | ||
|
c95f9885ea | ||
|
c4e4e14b32 | ||
|
d13be49f9b | ||
|
f80e7370d5 | ||
|
f2ba224e6c | ||
|
09030ee96e | ||
|
eb94700590 | ||
|
4dadd07699 | ||
|
5ab3192c51 | ||
|
493ba63c77 | ||
|
0309b70522 | ||
|
9602ba6ccc | ||
|
39d3b7fb90 | ||
|
d815fb6f46 | ||
|
c2cb89532b | ||
|
bc8360e8a9 | ||
|
f485e52f75 | ||
|
96ed706d20 | ||
|
abfbc0ab60 | ||
|
33460386b9 | ||
|
e18e1ec3a8 | ||
|
5fda6b69e8 | ||
|
f3c22411fc | ||
|
d08787774f | ||
|
3fe35c1594 | ||
|
6e236fd44c | ||
|
cb382e1367 | ||
|
ceafa82f2e | ||
|
a00a7c500a | ||
|
70709455ef | ||
|
5aa89bc1d7 | ||
|
9d8026b4e5 | ||
|
7b16df64c9 | ||
|
c146980bcd | ||
|
a084b99324 | ||
|
2aa523f08a | ||
|
2a992c4080 | ||
|
b325069d72 | ||
|
9935f9ea7e | ||
|
a338af3f18 | ||
|
47a5439a20 | ||
|
c5e560e2d1 | ||
|
eb47f15b10 | ||
|
87deb68ab7 | ||
|
a3f973db3b | ||
|
c549d12da7 | ||
|
d58f38dfab | ||
|
ec020399b9 | ||
|
1f6833bd0d | ||
|
9981cdd8a8 | ||
|
b3b570e4c4 | ||
|
04c56a0aaf | ||
|
ed50e31625 | ||
|
562911e170 | ||
|
62e2473c32 | ||
|
804e5cd332 | ||
|
c97e9e1592 | ||
|
e09ae2cc0b | ||
|
c01e7f1ed5 | ||
|
5000fca664 | ||
|
23434791f2 | ||
|
e3eb9e4f28 | ||
|
a6186b6244 | ||
|
1798804c32 | ||
|
053827cacc | ||
|
7bd444fa52 | ||
|
16aa6b9b4b | ||
|
265a751837 | ||
|
aa30f9ab1f | ||
|
adca339c3c | ||
|
111803e168 | ||
|
1f8c15fa48 | ||
|
7542dbffb8 | ||
|
a1ed923740 | ||
|
aaa7008cad | ||
|
a7d33cc0cb | ||
|
b90c537f79 | ||
|
304978ad47 | ||
|
cdb29a4fd1 | ||
|
58647bb654 | ||
|
7cca2c4925 | ||
|
9add633258 | ||
|
19a7f27736 | ||
|
c0a3ecb27b | ||
|
1a9dabd433 | ||
|
f51d84b466 | ||
|
601c403603 | ||
|
cb3467535c | ||
|
ca40462a1c | ||
|
d973d41efb | ||
|
238db32126 | ||
|
f0eef73a89 | ||
|
ca4d227562 | ||
|
a8363c28ca | ||
|
0d786577c6 | ||
|
a8a096b20c | ||
|
b129972e12 | ||
|
a174e2be03 | ||
|
6bb193bbc0 | ||
|
73b7a3478c | ||
|
926119040c | ||
|
0d8242aee4 | ||
|
c8db24468c | ||
|
672462e6b8 | ||
|
6b389fdd4f | ||
|
67fbad5f37 | ||
|
d23284a0dc | ||
|
80af09aae9 | ||
|
4646058c0c | ||
|
7e49e83779 | ||
|
d0ef78d3a4 | ||
|
2d8531cd20 | ||
|
7ded74c068 | ||
|
e4679055c6 | ||
|
7a219b3f03 | ||
|
937fd90779 | ||
|
e82fdee20f |
.github
actionlint.yaml
VERSIONcargo-deny-composite-action
workflows
PR-wip-checks.yamladd-pr-sizing-label.yamlbasic-ci-amd64.yamlbasic-ci-s390x.yamlbuild-checks-preview-riscv64.yamlbuild-checks.yamlbuild-kata-static-tarball-amd64.yamlbuild-kata-static-tarball-arm64.yamlbuild-kata-static-tarball-ppc64le.yamlbuild-kata-static-tarball-riscv64.yamlbuild-kata-static-tarball-s390x.yamlci-devel.yamlci-nightly-s390x.yamlci-weekly.yamlci.yamlcodeql.ymlcommit-message-check.yamldarwin-tests.yamldocs-url-alive-check.yamlpayload-after-push.yamlpublish-kata-deploy-payload-amd64.yamlpublish-kata-deploy-payload-arm64.yamlpublish-kata-deploy-payload-s390x.yamlpublish-kata-deploy-payload.yamlrelease-amd64.yamlrelease-arm64.yamlrelease-ppc64le.yamlrelease-s390x.yamlrelease.yamlrun-cri-containerd-tests-ppc64le.yamlrun-cri-containerd-tests-s390x.yamlrun-k8s-tests-on-aks.yamlrun-k8s-tests-on-arm64.yamlrun-k8s-tests-on-ppc64le.yamlrun-kata-coco-stability-tests.yamlrun-kata-coco-tests.yamlrun-kata-deploy-tests-on-aks.yamlrun-kata-deploy-tests.yamlrun-metrics.yamlshellcheck.yamlshellcheck_required.yamlstatic-checks-self-hosted.yamlstatic-checks.yaml
ci
docs/how-to
rust-toolchain.tomlshellcheckrcsrc
agent
dragonball
2
.github/actionlint.yaml
vendored
2
.github/actionlint.yaml
vendored
@ -7,6 +7,7 @@
|
||||
self-hosted-runner:
|
||||
# Labels of self-hosted runner that linter should ignore
|
||||
labels:
|
||||
- arm64-k8s
|
||||
- ubuntu-22.04-arm
|
||||
- garm-ubuntu-2004
|
||||
- garm-ubuntu-2004-smaller
|
||||
@ -17,6 +18,7 @@ self-hosted-runner:
|
||||
- k8s-ppc64le
|
||||
- metrics
|
||||
- ppc64le
|
||||
- riscv-builder
|
||||
- sev
|
||||
- sev-snp
|
||||
- s390x
|
||||
|
@ -21,7 +21,7 @@ runs:
|
||||
override: true
|
||||
|
||||
- name: Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
uses: Swatinem/rust-cache@f0deed1e0edfc6a9be95417288c0e1099b1eeec3 # v2.7.7
|
||||
|
||||
- name: Install Cargo deny
|
||||
shell: bash
|
||||
|
2
.github/workflows/PR-wip-checks.yaml
vendored
2
.github/workflows/PR-wip-checks.yaml
vendored
@ -20,7 +20,7 @@ jobs:
|
||||
steps:
|
||||
- name: WIP Check
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
uses: tim-actions/wip-check@1c2a1ca6c110026b3e2297bb2ef39e1747b5a755
|
||||
uses: tim-actions/wip-check@1c2a1ca6c110026b3e2297bb2ef39e1747b5a755 # master (2021-06-10)
|
||||
with:
|
||||
labels: '["do-not-merge", "wip", "rfc"]'
|
||||
keywords: '["WIP", "wip", "RFC", "rfc", "dnm", "DNM", "do-not-merge"]'
|
||||
|
2
.github/workflows/add-pr-sizing-label.yaml
vendored
2
.github/workflows/add-pr-sizing-label.yaml
vendored
@ -48,6 +48,6 @@ jobs:
|
||||
pr=${{ github.event.number }}
|
||||
# Removing man-db, workflow kept failing, fixes: #4480
|
||||
sudo apt -y remove --purge man-db
|
||||
sudo apt -y install diffstat patchutils
|
||||
sudo apt update && sudo apt -y install diffstat patchutils
|
||||
|
||||
pr-add-size-label.sh -p "$pr"
|
||||
|
6
.github/workflows/basic-ci-amd64.yaml
vendored
6
.github/workflows/basic-ci-amd64.yaml
vendored
@ -63,15 +63,13 @@ jobs:
|
||||
# all the tests due to a single flaky instance.
|
||||
fail-fast: false
|
||||
matrix:
|
||||
containerd_version: ['latest']
|
||||
containerd_version: ['active']
|
||||
vmm: ['dragonball', 'cloud-hypervisor', 'qemu-runtime-rs']
|
||||
runs-on: ubuntu-22.04
|
||||
env:
|
||||
CONTAINERD_VERSION: ${{ matrix.containerd_version }}
|
||||
GOPATH: ${{ github.workspace }}
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
#the latest containerd from 2.0 need to set the CGROUP_DRIVER for e2e testing
|
||||
CGROUP_DRIVER: ""
|
||||
SANDBOXER: "shim"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
@ -369,6 +367,8 @@ jobs:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: Install dependencies
|
||||
env:
|
||||
GITHUB_API_TOKEN: ${{ github.token }}
|
||||
run: bash tests/integration/nerdctl/gha-run.sh install-dependencies
|
||||
|
||||
- name: get-kata-tarball
|
||||
|
178
.github/workflows/basic-ci-s390x.yaml
vendored
Normal file
178
.github/workflows/basic-ci-s390x.yaml
vendored
Normal file
@ -0,0 +1,178 @@
|
||||
name: CI | Basic s390x tests
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
tarball-suffix:
|
||||
required: false
|
||||
type: string
|
||||
commit-hash:
|
||||
required: false
|
||||
type: string
|
||||
target-branch:
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
|
||||
jobs:
|
||||
run-cri-containerd:
|
||||
strategy:
|
||||
# We can set this to true whenever we're 100% sure that
|
||||
# the all the tests are not flaky, otherwise we'll fail
|
||||
# all the tests due to a single flaky instance
|
||||
fail-fast: false
|
||||
matrix:
|
||||
containerd_version: ['active']
|
||||
vmm: ['qemu', 'qemu-runtime-rs']
|
||||
runs-on: s390x-large
|
||||
env:
|
||||
CONTAINERD_VERSION: ${{ matrix.containerd_version }}
|
||||
GOPATH: ${{ github.workspace }}
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: Install dependencies
|
||||
run: bash tests/integration/cri-containerd/gha-run.sh install-dependencies
|
||||
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: kata-static-tarball-s390x${{ inputs.tarball-suffix }}
|
||||
path: kata-artifacts
|
||||
|
||||
- name: Install kata
|
||||
run: bash tests/integration/cri-containerd/gha-run.sh install-kata kata-artifacts
|
||||
|
||||
- name: Run cri-containerd tests
|
||||
run: bash tests/integration/cri-containerd/gha-run.sh run
|
||||
|
||||
run-containerd-sandboxapi:
|
||||
strategy:
|
||||
# We can set this to true whenever we're 100% sure that
|
||||
# the all the tests are not flaky, otherwise we'll fail
|
||||
# all the tests due to a single flaky instance.
|
||||
fail-fast: false
|
||||
matrix:
|
||||
containerd_version: ['active']
|
||||
vmm: ['qemu-runtime-rs']
|
||||
runs-on: s390x-large
|
||||
env:
|
||||
CONTAINERD_VERSION: ${{ matrix.containerd_version }}
|
||||
GOPATH: ${{ github.workspace }}
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
SANDBOXER: "shim"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: Install dependencies
|
||||
run: bash tests/integration/cri-containerd/gha-run.sh install-dependencies
|
||||
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: kata-static-tarball-s390x${{ inputs.tarball-suffix }}
|
||||
path: kata-artifacts
|
||||
|
||||
- name: Install kata
|
||||
run: bash tests/integration/cri-containerd/gha-run.sh install-kata kata-artifacts
|
||||
|
||||
- name: Run containerd-sandboxapi tests
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/cri-containerd/gha-run.sh run
|
||||
|
||||
run-containerd-stability:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
containerd_version: ['lts', 'active']
|
||||
vmm: ['qemu']
|
||||
runs-on: s390x-large
|
||||
env:
|
||||
CONTAINERD_VERSION: ${{ matrix.containerd_version }}
|
||||
GOPATH: ${{ github.workspace }}
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
SANDBOXER: "podsandbox"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: Install dependencies
|
||||
run: bash tests/stability/gha-run.sh install-dependencies
|
||||
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: kata-static-tarball-s390x${{ inputs.tarball-suffix }}
|
||||
path: kata-artifacts
|
||||
|
||||
- name: Install kata
|
||||
run: bash tests/stability/gha-run.sh install-kata kata-artifacts
|
||||
|
||||
- name: Run containerd-stability tests
|
||||
timeout-minutes: 15
|
||||
run: bash tests/stability/gha-run.sh run
|
||||
|
||||
run-docker-tests:
|
||||
strategy:
|
||||
# We can set this to true whenever we're 100% sure that
|
||||
# all the tests are not flaky, otherwise we'll fail them
|
||||
# all due to a single flaky instance.
|
||||
fail-fast: false
|
||||
matrix:
|
||||
vmm: ['qemu']
|
||||
runs-on: s390x-large
|
||||
env:
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: Install dependencies
|
||||
run: bash tests/integration/docker/gha-run.sh install-dependencies
|
||||
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: kata-static-tarball-s390x${{ inputs.tarball-suffix }}
|
||||
path: kata-artifacts
|
||||
|
||||
- name: Install kata
|
||||
run: bash tests/integration/docker/gha-run.sh install-kata kata-artifacts
|
||||
|
||||
- name: Run docker smoke test
|
||||
timeout-minutes: 5
|
||||
run: bash tests/integration/docker/gha-run.sh run
|
120
.github/workflows/build-checks-preview-riscv64.yaml
vendored
Normal file
120
.github/workflows/build-checks-preview-riscv64.yaml
vendored
Normal file
@ -0,0 +1,120 @@
|
||||
# This yaml is designed to be used until all components listed in
|
||||
# `build-checks.yaml` are supported
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
instance:
|
||||
default: "riscv-builder"
|
||||
description: "Default instance when manually triggering"
|
||||
workflow_call:
|
||||
inputs:
|
||||
instance:
|
||||
required: true
|
||||
type: string
|
||||
|
||||
name: Build checks preview riscv64
|
||||
jobs:
|
||||
check:
|
||||
runs-on: ${{ inputs.instance }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
command:
|
||||
- "make vendor"
|
||||
- "make check"
|
||||
- "make test"
|
||||
- "sudo -E PATH=\"$PATH\" make test"
|
||||
component:
|
||||
- name: agent-ctl
|
||||
path: src/tools/agent-ctl
|
||||
needs:
|
||||
- rust
|
||||
- musl-tools
|
||||
- protobuf-compiler
|
||||
- clang
|
||||
- name: trace-forwarder
|
||||
path: src/tools/trace-forwarder
|
||||
needs:
|
||||
- rust
|
||||
- musl-tools
|
||||
- name: genpolicy
|
||||
path: src/tools/genpolicy
|
||||
needs:
|
||||
- rust
|
||||
- musl-tools
|
||||
- protobuf-compiler
|
||||
- name: runtime
|
||||
path: src/runtime
|
||||
needs:
|
||||
- golang
|
||||
- XDG_RUNTIME_DIR
|
||||
- name: runtime-rs
|
||||
path: src/runtime-rs
|
||||
needs:
|
||||
- rust
|
||||
|
||||
steps:
|
||||
- name: Adjust a permission for repo
|
||||
run: |
|
||||
sudo chown -R "$USER":"$USER" "$GITHUB_WORKSPACE" "$HOME"
|
||||
sudo rm -rf "$GITHUB_WORKSPACE"/* || { sleep 10 && sudo rm -rf "$GITHUB_WORKSPACE"/*; }
|
||||
sudo rm -f /tmp/kata_hybrid* # Sometime we got leftover from test_setup_hvsock_failed()
|
||||
|
||||
- name: Checkout the code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Install yq
|
||||
run: |
|
||||
./ci/install_yq.sh
|
||||
env:
|
||||
INSTALL_IN_GOPATH: false
|
||||
- name: Install golang
|
||||
if: contains(matrix.component.needs, 'golang')
|
||||
run: |
|
||||
./tests/install_go.sh -f -p
|
||||
echo "/usr/local/go/bin" >> "$GITHUB_PATH"
|
||||
- name: Setup rust
|
||||
if: contains(matrix.component.needs, 'rust')
|
||||
run: |
|
||||
./tests/install_rust.sh
|
||||
echo "${HOME}/.cargo/bin" >> "$GITHUB_PATH"
|
||||
if [ "$(uname -m)" == "x86_64" ] || [ "$(uname -m)" == "aarch64" ]; then
|
||||
sudo apt-get update && sudo apt-get -y install musl-tools
|
||||
fi
|
||||
- name: Install devicemapper
|
||||
if: contains(matrix.component.needs, 'libdevmapper') && matrix.command == 'make check'
|
||||
run: sudo apt-get update && sudo apt-get -y install libdevmapper-dev
|
||||
- name: Install libseccomp
|
||||
if: contains(matrix.component.needs, 'libseccomp') && matrix.command != 'make vendor' && matrix.command != 'make check'
|
||||
run: |
|
||||
libseccomp_install_dir=$(mktemp -d -t libseccomp.XXXXXXXXXX)
|
||||
gperf_install_dir=$(mktemp -d -t gperf.XXXXXXXXXX)
|
||||
./ci/install_libseccomp.sh "${libseccomp_install_dir}" "${gperf_install_dir}"
|
||||
echo "Set environment variables for the libseccomp crate to link the libseccomp library statically"
|
||||
echo "LIBSECCOMP_LINK_TYPE=static" >> "$GITHUB_ENV"
|
||||
echo "LIBSECCOMP_LIB_PATH=${libseccomp_install_dir}/lib" >> "$GITHUB_ENV"
|
||||
- name: Install protobuf-compiler
|
||||
if: contains(matrix.component.needs, 'protobuf-compiler') && matrix.command != 'make vendor'
|
||||
run: sudo apt-get update && sudo apt-get -y install protobuf-compiler
|
||||
- name: Install clang
|
||||
if: contains(matrix.component.needs, 'clang') && matrix.command == 'make check'
|
||||
run: sudo apt-get update && sudo apt-get -y install clang
|
||||
- name: Setup XDG_RUNTIME_DIR
|
||||
if: contains(matrix.component.needs, 'XDG_RUNTIME_DIR') && matrix.command != 'make check'
|
||||
run: |
|
||||
XDG_RUNTIME_DIR=$(mktemp -d "/tmp/kata-tests-$USER.XXX" | tee >(xargs chmod 0700))
|
||||
echo "XDG_RUNTIME_DIR=${XDG_RUNTIME_DIR}" >> "$GITHUB_ENV"
|
||||
- name: Skip tests that depend on virtualization capable runners when needed
|
||||
if: inputs.instance == 'riscv-builder'
|
||||
run: |
|
||||
echo "GITHUB_RUNNER_CI_NON_VIRT=true" >> "$GITHUB_ENV"
|
||||
- name: Running `${{ matrix.command }}` for ${{ matrix.component.name }}
|
||||
run: |
|
||||
cd ${{ matrix.component.path }}
|
||||
${{ matrix.command }}
|
||||
env:
|
||||
RUST_BACKTRACE: "1"
|
||||
RUST_LIB_BACKTRACE: "0"
|
||||
SKIP_GO_VERSION_CHECK: "1"
|
110
.github/workflows/build-checks.yaml
vendored
110
.github/workflows/build-checks.yaml
vendored
@ -12,40 +12,53 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
component:
|
||||
- agent
|
||||
- dragonball
|
||||
- runtime
|
||||
- runtime-rs
|
||||
- agent-ctl
|
||||
- kata-ctl
|
||||
- trace-forwarder
|
||||
- genpolicy
|
||||
command:
|
||||
- "make vendor"
|
||||
- "make check"
|
||||
- "make test"
|
||||
- "sudo -E PATH=\"$PATH\" make test"
|
||||
include:
|
||||
- component: agent
|
||||
component-path: src/agent
|
||||
- component: dragonball
|
||||
component-path: src/dragonball
|
||||
- component: runtime
|
||||
component-path: src/runtime
|
||||
- component: runtime-rs
|
||||
component-path: src/runtime-rs
|
||||
- component: agent-ctl
|
||||
component-path: src/tools/agent-ctl
|
||||
- component: kata-ctl
|
||||
component-path: src/tools/kata-ctl
|
||||
- component: trace-forwarder
|
||||
component-path: src/tools/trace-forwarder
|
||||
- install-libseccomp: no
|
||||
- component: agent
|
||||
install-libseccomp: yes
|
||||
- component: genpolicy
|
||||
component-path: src/tools/genpolicy
|
||||
component:
|
||||
- name: agent
|
||||
path: src/agent
|
||||
needs:
|
||||
- rust
|
||||
- libdevmapper
|
||||
- libseccomp
|
||||
- protobuf-compiler
|
||||
- clang
|
||||
- name: dragonball
|
||||
path: src/dragonball
|
||||
needs:
|
||||
- rust
|
||||
- name: runtime
|
||||
path: src/runtime
|
||||
needs:
|
||||
- golang
|
||||
- XDG_RUNTIME_DIR
|
||||
- name: runtime-rs
|
||||
path: src/runtime-rs
|
||||
needs:
|
||||
- rust
|
||||
- name: agent-ctl
|
||||
path: src/tools/agent-ctl
|
||||
needs:
|
||||
- rust
|
||||
- protobuf-compiler
|
||||
- clang
|
||||
- name: kata-ctl
|
||||
path: src/tools/kata-ctl
|
||||
needs:
|
||||
- rust
|
||||
- name: trace-forwarder
|
||||
path: src/tools/trace-forwarder
|
||||
needs:
|
||||
- rust
|
||||
- name: genpolicy
|
||||
path: src/tools/genpolicy
|
||||
needs:
|
||||
- rust
|
||||
- protobuf-compiler
|
||||
|
||||
steps:
|
||||
- name: Adjust a permission for repo
|
||||
run: |
|
||||
@ -64,23 +77,23 @@ jobs:
|
||||
env:
|
||||
INSTALL_IN_GOPATH: false
|
||||
- name: Install golang
|
||||
if: ${{ matrix.component == 'runtime' }}
|
||||
if: contains(matrix.component.needs, 'golang')
|
||||
run: |
|
||||
./tests/install_go.sh -f -p
|
||||
echo "/usr/local/go/bin" >> "$GITHUB_PATH"
|
||||
- name: Install rust
|
||||
if: ${{ matrix.component != 'runtime' }}
|
||||
- name: Setup rust
|
||||
if: contains(matrix.component.needs, 'rust')
|
||||
run: |
|
||||
./tests/install_rust.sh
|
||||
echo "${HOME}/.cargo/bin" >> "$GITHUB_PATH"
|
||||
- name: Install musl-tools
|
||||
if: ${{ matrix.component != 'runtime' }}
|
||||
run: sudo apt-get -y install musl-tools
|
||||
if [ "$(uname -m)" == "x86_64" ] || [ "$(uname -m)" == "aarch64" ]; then
|
||||
sudo apt-get update && sudo apt-get -y install musl-tools
|
||||
fi
|
||||
- name: Install devicemapper
|
||||
if: ${{ matrix.command == 'make check' && matrix.component == 'agent' }}
|
||||
run: sudo apt-get -y install libdevmapper-dev
|
||||
if: contains(matrix.component.needs, 'libdevmapper') && matrix.command == 'make check'
|
||||
run: sudo apt-get update && sudo apt-get -y install libdevmapper-dev
|
||||
- name: Install libseccomp
|
||||
if: ${{ matrix.command != 'make vendor' && matrix.command != 'make check' && matrix.install-libseccomp == 'yes' }}
|
||||
if: contains(matrix.component.needs, 'libseccomp') && matrix.command != 'make vendor' && matrix.command != 'make check'
|
||||
run: |
|
||||
libseccomp_install_dir=$(mktemp -d -t libseccomp.XXXXXXXXXX)
|
||||
gperf_install_dir=$(mktemp -d -t gperf.XXXXXXXXXX)
|
||||
@ -89,20 +102,25 @@ jobs:
|
||||
echo "LIBSECCOMP_LINK_TYPE=static" >> "$GITHUB_ENV"
|
||||
echo "LIBSECCOMP_LIB_PATH=${libseccomp_install_dir}/lib" >> "$GITHUB_ENV"
|
||||
- name: Install protobuf-compiler
|
||||
if: ${{ matrix.command != 'make vendor' && (matrix.component == 'agent' || matrix.component == 'genpolicy' || matrix.component == 'agent-ctl') }}
|
||||
run: sudo apt-get -y install protobuf-compiler
|
||||
if: contains(matrix.component.needs, 'protobuf-compiler') && matrix.command != 'make vendor'
|
||||
run: sudo apt-get update && sudo apt-get -y install protobuf-compiler
|
||||
- name: Install clang
|
||||
if: ${{ matrix.command == 'make check' && (matrix.component == 'agent' || matrix.component == 'agent-ctl') }}
|
||||
run: sudo apt-get -y install clang
|
||||
- name: Setup XDG_RUNTIME_DIR for the `runtime` tests
|
||||
if: ${{ matrix.command != 'make vendor' && matrix.command != 'make check' && matrix.component == 'runtime' }}
|
||||
if: contains(matrix.component.needs, 'clang') && matrix.command == 'make check'
|
||||
run: sudo apt-get update && sudo apt-get -y install clang
|
||||
- name: Setup XDG_RUNTIME_DIR
|
||||
if: contains(matrix.component.needs, 'XDG_RUNTIME_DIR') && matrix.command != 'make check'
|
||||
run: |
|
||||
XDG_RUNTIME_DIR=$(mktemp -d "/tmp/kata-tests-$USER.XXX" | tee >(xargs chmod 0700))
|
||||
echo "XDG_RUNTIME_DIR=${XDG_RUNTIME_DIR}" >> "$GITHUB_ENV"
|
||||
- name: Running `${{ matrix.command }}` for ${{ matrix.component }}
|
||||
- name: Skip tests that depend on virtualization capable runners when needed
|
||||
if: ${{ endsWith(inputs.instance, '-arm') }}
|
||||
run: |
|
||||
cd ${{ matrix.component-path }}
|
||||
echo "GITHUB_RUNNER_CI_NON_VIRT=true" >> "$GITHUB_ENV"
|
||||
- name: Running `${{ matrix.command }}` for ${{ matrix.component.name }}
|
||||
run: |
|
||||
cd ${{ matrix.component.path }}
|
||||
${{ matrix.command }}
|
||||
env:
|
||||
RUST_BACKTRACE: "1"
|
||||
RUST_LIB_BACKTRACE: "0"
|
||||
SKIP_GO_VERSION_CHECK: "1"
|
||||
|
@ -54,6 +54,7 @@ jobs:
|
||||
- pause-image
|
||||
- qemu
|
||||
- qemu-snp-experimental
|
||||
- qemu-tdx-experimental
|
||||
- stratovirt
|
||||
- trace-forwarder
|
||||
- virtiofsd
|
||||
@ -67,7 +68,7 @@ jobs:
|
||||
steps:
|
||||
- name: Login to Kata Containers quay.io
|
||||
if: ${{ inputs.push-to-registry == 'yes' }}
|
||||
uses: docker/login-action@v3
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.QUAY_DEPLOYER_USERNAME }}
|
||||
@ -109,13 +110,13 @@ jobs:
|
||||
echo "oci-name=${oci_image%@*}" >> "$GITHUB_OUTPUT"
|
||||
echo "oci-digest=${oci_image#*@}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- uses: oras-project/setup-oras@v1
|
||||
- uses: oras-project/setup-oras@5c0b487ce3fe0ce3ab0d034e63669e426e294e4d # v1.2.2
|
||||
if: ${{ env.PERFORM_ATTESTATION == 'yes' }}
|
||||
with:
|
||||
version: "1.2.0"
|
||||
|
||||
# for pushing attestations to the registry
|
||||
- uses: docker/login-action@v3
|
||||
- uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
if: ${{ env.PERFORM_ATTESTATION == 'yes' }}
|
||||
with:
|
||||
registry: ghcr.io
|
||||
@ -162,7 +163,7 @@ jobs:
|
||||
steps:
|
||||
- name: Login to Kata Containers quay.io
|
||||
if: ${{ inputs.push-to-registry == 'yes' }}
|
||||
uses: docker/login-action@v3
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.QUAY_DEPLOYER_USERNAME }}
|
||||
@ -225,7 +226,7 @@ jobs:
|
||||
- kernel-nvidia-gpu-confidential-headers
|
||||
- pause-image
|
||||
steps:
|
||||
- uses: geekyeggo/delete-artifact@v5
|
||||
- uses: geekyeggo/delete-artifact@f275313e70c08f6120db482d7a6b98377786765b # v5.1.0
|
||||
with:
|
||||
name: kata-artifacts-amd64-${{ matrix.asset}}${{ inputs.tarball-suffix }}
|
||||
|
||||
@ -238,7 +239,7 @@ jobs:
|
||||
asset:
|
||||
- agent
|
||||
steps:
|
||||
- uses: geekyeggo/delete-artifact@v5
|
||||
- uses: geekyeggo/delete-artifact@f275313e70c08f6120db482d7a6b98377786765b # v5.1.0
|
||||
if: ${{ inputs.stage == 'release' }}
|
||||
with:
|
||||
name: kata-artifacts-amd64-${{ matrix.asset}}${{ inputs.tarball-suffix }}
|
||||
@ -249,7 +250,7 @@ jobs:
|
||||
steps:
|
||||
- name: Login to Kata Containers quay.io
|
||||
if: ${{ inputs.push-to-registry == 'yes' }}
|
||||
uses: docker/login-action@v3
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.QUAY_DEPLOYER_USERNAME }}
|
||||
|
@ -48,7 +48,7 @@ jobs:
|
||||
steps:
|
||||
- name: Login to Kata Containers quay.io
|
||||
if: ${{ inputs.push-to-registry == 'yes' }}
|
||||
uses: docker/login-action@v3
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.QUAY_DEPLOYER_USERNAME }}
|
||||
@ -89,13 +89,13 @@ jobs:
|
||||
echo "oci-name=${oci_image%@*}" >> "$GITHUB_OUTPUT"
|
||||
echo "oci-digest=${oci_image#*@}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- uses: oras-project/setup-oras@v1
|
||||
- uses: oras-project/setup-oras@5c0b487ce3fe0ce3ab0d034e63669e426e294e4d # v1.2.2
|
||||
if: ${{ env.PERFORM_ATTESTATION == 'yes' }}
|
||||
with:
|
||||
version: "1.2.0"
|
||||
|
||||
# for pushing attestations to the registry
|
||||
- uses: docker/login-action@v3
|
||||
- uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
if: ${{ env.PERFORM_ATTESTATION == 'yes' }}
|
||||
with:
|
||||
registry: ghcr.io
|
||||
@ -138,7 +138,7 @@ jobs:
|
||||
steps:
|
||||
- name: Login to Kata Containers quay.io
|
||||
if: ${{ inputs.push-to-registry == 'yes' }}
|
||||
uses: docker/login-action@v3
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.QUAY_DEPLOYER_USERNAME }}
|
||||
@ -197,7 +197,7 @@ jobs:
|
||||
- busybox
|
||||
- kernel-nvidia-gpu-headers
|
||||
steps:
|
||||
- uses: geekyeggo/delete-artifact@v5
|
||||
- uses: geekyeggo/delete-artifact@f275313e70c08f6120db482d7a6b98377786765b # v5.1.0
|
||||
with:
|
||||
name: kata-artifacts-arm64-${{ matrix.asset}}${{ inputs.tarball-suffix }}
|
||||
|
||||
@ -210,7 +210,7 @@ jobs:
|
||||
asset:
|
||||
- agent
|
||||
steps:
|
||||
- uses: geekyeggo/delete-artifact@v5
|
||||
- uses: geekyeggo/delete-artifact@f275313e70c08f6120db482d7a6b98377786765b # v5.1.0
|
||||
if: ${{ inputs.stage == 'release' }}
|
||||
with:
|
||||
name: kata-artifacts-arm64-${{ matrix.asset}}${{ inputs.tarball-suffix }}
|
||||
@ -221,7 +221,7 @@ jobs:
|
||||
steps:
|
||||
- name: Login to Kata Containers quay.io
|
||||
if: ${{ inputs.push-to-registry == 'yes' }}
|
||||
uses: docker/login-action@v3
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.QUAY_DEPLOYER_USERNAME }}
|
||||
|
@ -34,15 +34,9 @@ jobs:
|
||||
stage:
|
||||
- ${{ inputs.stage }}
|
||||
steps:
|
||||
- name: Prepare the self-hosted runner
|
||||
timeout-minutes: 15
|
||||
run: |
|
||||
"${HOME}/scripts/prepare_runner.sh"
|
||||
sudo rm -rf "$GITHUB_WORKSPACE"/*
|
||||
|
||||
- name: Login to Kata Containers quay.io
|
||||
if: ${{ inputs.push-to-registry == 'yes' }}
|
||||
uses: docker/login-action@v3
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.QUAY_DEPLOYER_USERNAME }}
|
||||
@ -93,15 +87,9 @@ jobs:
|
||||
stage:
|
||||
- ${{ inputs.stage }}
|
||||
steps:
|
||||
- name: Prepare the self-hosted runner
|
||||
timeout-minutes: 15
|
||||
run: |
|
||||
"${HOME}/scripts/prepare_runner.sh"
|
||||
sudo rm -rf "$GITHUB_WORKSPACE"/*
|
||||
|
||||
- name: Login to Kata Containers quay.io
|
||||
if: ${{ inputs.push-to-registry == 'yes' }}
|
||||
uses: docker/login-action@v3
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.QUAY_DEPLOYER_USERNAME }}
|
||||
@ -159,7 +147,7 @@ jobs:
|
||||
asset:
|
||||
- agent
|
||||
steps:
|
||||
- uses: geekyeggo/delete-artifact@v5
|
||||
- uses: geekyeggo/delete-artifact@f275313e70c08f6120db482d7a6b98377786765b # v5.1.0
|
||||
if: ${{ inputs.stage == 'release' }}
|
||||
with:
|
||||
name: kata-artifacts-ppc64le-${{ matrix.asset}}${{ inputs.tarball-suffix }}
|
||||
@ -168,15 +156,9 @@ jobs:
|
||||
runs-on: ppc64le
|
||||
needs: [build-asset, build-asset-rootfs, remove-rootfs-binary-artifacts]
|
||||
steps:
|
||||
- name: Prepare the self-hosted runner
|
||||
timeout-minutes: 15
|
||||
run: |
|
||||
"${HOME}/scripts/prepare_runner.sh"
|
||||
sudo rm -rf "$GITHUB_WORKSPACE"/*
|
||||
|
||||
- name: Login to Kata Containers quay.io
|
||||
if: ${{ inputs.push-to-registry == 'yes' }}
|
||||
uses: docker/login-action@v3
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.QUAY_DEPLOYER_USERNAME }}
|
||||
|
79
.github/workflows/build-kata-static-tarball-riscv64.yaml
vendored
Normal file
79
.github/workflows/build-kata-static-tarball-riscv64.yaml
vendored
Normal file
@ -0,0 +1,79 @@
|
||||
name: CI | Build kata-static tarball for riscv64
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
stage:
|
||||
required: false
|
||||
type: string
|
||||
default: test
|
||||
tarball-suffix:
|
||||
required: false
|
||||
type: string
|
||||
push-to-registry:
|
||||
required: false
|
||||
type: string
|
||||
default: no
|
||||
commit-hash:
|
||||
required: false
|
||||
type: string
|
||||
target-branch:
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
|
||||
jobs:
|
||||
build-asset:
|
||||
runs-on: riscv-builder
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
id-token: write
|
||||
attestations: write
|
||||
strategy:
|
||||
matrix:
|
||||
asset:
|
||||
- kernel
|
||||
- virtiofsd
|
||||
steps:
|
||||
- name: Login to Kata Containers quay.io
|
||||
if: ${{ inputs.push-to-registry == 'yes' }}
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.QUAY_DEPLOYER_USERNAME }}
|
||||
password: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0 # This is needed in order to keep the commit ids history
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: Build ${{ matrix.asset }}
|
||||
run: |
|
||||
make "${KATA_ASSET}-tarball"
|
||||
build_dir=$(readlink -f build)
|
||||
# store-artifact does not work with symlink
|
||||
mkdir -p kata-build && cp "${build_dir}"/kata-static-"${KATA_ASSET}"*.tar.* kata-build/.
|
||||
env:
|
||||
KATA_ASSET: ${{ matrix.asset }}
|
||||
TAR_OUTPUT: ${{ matrix.asset }}.tar.gz
|
||||
PUSH_TO_REGISTRY: ${{ inputs.push-to-registry }}
|
||||
ARTEFACT_REGISTRY: ghcr.io
|
||||
ARTEFACT_REGISTRY_USERNAME: ${{ github.actor }}
|
||||
ARTEFACT_REGISTRY_PASSWORD: ${{ secrets.GITHUB_TOKEN }}
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
RELEASE: ${{ inputs.stage == 'release' && 'yes' || 'no' }}
|
||||
|
||||
- name: store-artifact ${{ matrix.asset }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: kata-artifacts-arm64-${{ matrix.asset }}${{ inputs.tarball-suffix }}
|
||||
path: kata-build/kata-static-${{ matrix.asset }}.tar.xz
|
||||
retention-days: 15
|
||||
if-no-files-found: error
|
@ -44,7 +44,7 @@ jobs:
|
||||
steps:
|
||||
- name: Login to Kata Containers quay.io
|
||||
if: ${{ inputs.push-to-registry == 'yes' }}
|
||||
uses: docker/login-action@v3
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.QUAY_DEPLOYER_USERNAME }}
|
||||
@ -87,7 +87,7 @@ jobs:
|
||||
echo "oci-digest=${oci_image#*@}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
# for pushing attestations to the registry
|
||||
- uses: docker/login-action@v3
|
||||
- uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
if: ${{ env.PERFORM_ATTESTATION == 'yes' }}
|
||||
with:
|
||||
registry: ghcr.io
|
||||
@ -122,7 +122,7 @@ jobs:
|
||||
steps:
|
||||
- name: Login to Kata Containers quay.io
|
||||
if: ${{ inputs.push-to-registry == 'yes' }}
|
||||
uses: docker/login-action@v3
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.QUAY_DEPLOYER_USERNAME }}
|
||||
@ -227,7 +227,7 @@ jobs:
|
||||
- coco-guest-components
|
||||
- pause-image
|
||||
steps:
|
||||
- uses: geekyeggo/delete-artifact@v5
|
||||
- uses: geekyeggo/delete-artifact@f275313e70c08f6120db482d7a6b98377786765b # v5.1.0
|
||||
if: ${{ inputs.stage == 'release' }}
|
||||
with:
|
||||
name: kata-artifacts-s390x-${{ matrix.asset}}${{ inputs.tarball-suffix }}
|
||||
@ -238,7 +238,7 @@ jobs:
|
||||
steps:
|
||||
- name: Login to Kata Containers quay.io
|
||||
if: ${{ inputs.push-to-registry == 'yes' }}
|
||||
uses: docker/login-action@v3
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.QUAY_DEPLOYER_USERNAME }}
|
||||
@ -279,7 +279,7 @@ jobs:
|
||||
ARTEFACT_REGISTRY_PASSWORD: ${{ secrets.GITHUB_TOKEN }}
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
RELEASE: ${{ inputs.stage == 'release' && 'yes' || 'no' }}
|
||||
MEASURED_ROOTFS: yes
|
||||
MEASURED_ROOTFS: no
|
||||
|
||||
- name: store-artifact shim-v2
|
||||
uses: actions/upload-artifact@v4
|
||||
|
5
.github/workflows/ci-devel.yaml
vendored
5
.github/workflows/ci-devel.yaml
vendored
@ -11,3 +11,8 @@ jobs:
|
||||
tag: ${{ github.sha }}-dev
|
||||
target-branch: ${{ github.ref_name }}
|
||||
secrets: inherit
|
||||
|
||||
build-checks:
|
||||
uses: ./.github/workflows/build-checks.yaml
|
||||
with:
|
||||
instance: ubuntu-22.04
|
||||
|
1
.github/workflows/ci-nightly-s390x.yaml
vendored
1
.github/workflows/ci-nightly-s390x.yaml
vendored
@ -11,6 +11,7 @@ jobs:
|
||||
matrix:
|
||||
test_title:
|
||||
- kata-vfio-ap-e2e-tests
|
||||
- cc-vfio-ap-e2e-tests
|
||||
- cc-se-e2e-tests
|
||||
steps:
|
||||
- name: Fetch a test result for {{ matrix.test_title }}
|
||||
|
12
.github/workflows/ci-weekly.yaml
vendored
12
.github/workflows/ci-weekly.yaml
vendored
@ -26,7 +26,7 @@ jobs:
|
||||
|
||||
publish-kata-deploy-payload-amd64:
|
||||
needs: build-kata-static-tarball-amd64
|
||||
uses: ./.github/workflows/publish-kata-deploy-payload-amd64.yaml
|
||||
uses: ./.github/workflows/publish-kata-deploy-payload.yaml
|
||||
with:
|
||||
tarball-suffix: -${{ inputs.tag }}
|
||||
registry: ghcr.io
|
||||
@ -34,6 +34,8 @@ jobs:
|
||||
tag: ${{ inputs.tag }}-amd64
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
runner: ubuntu-22.04
|
||||
arch: amd64
|
||||
secrets: inherit
|
||||
|
||||
build-and-publish-tee-confidential-unencrypted-image:
|
||||
@ -52,20 +54,20 @@ jobs:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3.6.0
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
|
||||
|
||||
- name: Login to Kata Containers ghcr.io
|
||||
uses: docker/login-action@v3
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Docker build and push
|
||||
uses: docker/build-push-action@v5
|
||||
uses: docker/build-push-action@ca052bb54ab0790a636c9b5f226502c73d547a25 # v5.4.0
|
||||
with:
|
||||
tags: ghcr.io/kata-containers/test-images:unencrypted-${{ inputs.pr-number }}
|
||||
push: true
|
||||
|
75
.github/workflows/ci.yaml
vendored
75
.github/workflows/ci.yaml
vendored
@ -30,7 +30,7 @@ jobs:
|
||||
|
||||
publish-kata-deploy-payload-amd64:
|
||||
needs: build-kata-static-tarball-amd64
|
||||
uses: ./.github/workflows/publish-kata-deploy-payload-amd64.yaml
|
||||
uses: ./.github/workflows/publish-kata-deploy-payload.yaml
|
||||
with:
|
||||
tarball-suffix: -${{ inputs.tag }}
|
||||
registry: ghcr.io
|
||||
@ -38,6 +38,8 @@ jobs:
|
||||
tag: ${{ inputs.tag }}-amd64
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
runner: ubuntu-22.04
|
||||
arch: amd64
|
||||
secrets: inherit
|
||||
|
||||
build-kata-static-tarball-arm64:
|
||||
@ -49,7 +51,7 @@ jobs:
|
||||
|
||||
publish-kata-deploy-payload-arm64:
|
||||
needs: build-kata-static-tarball-arm64
|
||||
uses: ./.github/workflows/publish-kata-deploy-payload-arm64.yaml
|
||||
uses: ./.github/workflows/publish-kata-deploy-payload.yaml
|
||||
with:
|
||||
tarball-suffix: -${{ inputs.tag }}
|
||||
registry: ghcr.io
|
||||
@ -57,6 +59,8 @@ jobs:
|
||||
tag: ${{ inputs.tag }}-arm64
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
runner: ubuntu-22.04-arm
|
||||
arch: arm64
|
||||
secrets: inherit
|
||||
|
||||
build-kata-static-tarball-s390x:
|
||||
@ -74,9 +78,17 @@ jobs:
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
|
||||
build-kata-static-tarball-riscv64:
|
||||
uses: ./.github/workflows/build-kata-static-tarball-riscv64.yaml
|
||||
with:
|
||||
tarball-suffix: -${{ inputs.tag }}
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
secrets: inherit
|
||||
|
||||
publish-kata-deploy-payload-s390x:
|
||||
needs: build-kata-static-tarball-s390x
|
||||
uses: ./.github/workflows/publish-kata-deploy-payload-s390x.yaml
|
||||
uses: ./.github/workflows/publish-kata-deploy-payload.yaml
|
||||
with:
|
||||
tarball-suffix: -${{ inputs.tag }}
|
||||
registry: ghcr.io
|
||||
@ -84,11 +96,13 @@ jobs:
|
||||
tag: ${{ inputs.tag }}-s390x
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
runner: s390x
|
||||
arch: s390x
|
||||
secrets: inherit
|
||||
|
||||
publish-kata-deploy-payload-ppc64le:
|
||||
needs: build-kata-static-tarball-ppc64le
|
||||
uses: ./.github/workflows/publish-kata-deploy-payload-ppc64le.yaml
|
||||
uses: ./.github/workflows/publish-kata-deploy-payload.yaml
|
||||
with:
|
||||
tarball-suffix: -${{ inputs.tag }}
|
||||
registry: ghcr.io
|
||||
@ -96,6 +110,8 @@ jobs:
|
||||
tag: ${{ inputs.tag }}-ppc64le
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
runner: ppc64le
|
||||
arch: ppc64le
|
||||
secrets: inherit
|
||||
|
||||
build-and-publish-tee-confidential-unencrypted-image:
|
||||
@ -114,20 +130,20 @@ jobs:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3.6.0
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
|
||||
|
||||
- name: Login to Kata Containers ghcr.io
|
||||
uses: docker/login-action@v3
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Docker build and push
|
||||
uses: docker/build-push-action@v5
|
||||
uses: docker/build-push-action@ca052bb54ab0790a636c9b5f226502c73d547a25 # v5.4.0
|
||||
with:
|
||||
tags: ghcr.io/kata-containers/test-images:unencrypted-${{ inputs.pr-number }}
|
||||
push: true
|
||||
@ -167,17 +183,17 @@ jobs:
|
||||
cp /opt/kata/bin/csi-kata-directvolume src/tools/csi-kata-directvolume/bin/directvolplugin
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
|
||||
|
||||
- name: Login to Kata Containers ghcr.io
|
||||
uses: docker/login-action@v3
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Docker build and push
|
||||
uses: docker/build-push-action@v5
|
||||
uses: docker/build-push-action@ca052bb54ab0790a636c9b5f226502c73d547a25 # v5.4.0
|
||||
with:
|
||||
tags: ghcr.io/kata-containers/csi-kata-directvolume:${{ inputs.pr-number }}
|
||||
push: true
|
||||
@ -221,6 +237,18 @@ jobs:
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
secrets: inherit
|
||||
|
||||
run-k8s-tests-on-arm64:
|
||||
if: ${{ inputs.skip-test != 'yes' }}
|
||||
needs: publish-kata-deploy-payload-arm64
|
||||
uses: ./.github/workflows/run-k8s-tests-on-arm64.yaml
|
||||
with:
|
||||
registry: ghcr.io
|
||||
repo: ${{ github.repository_owner }}/kata-deploy-ci
|
||||
tag: ${{ inputs.tag }}-arm64
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
pr-number: ${{ inputs.pr-number }}
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
|
||||
run-kata-coco-tests:
|
||||
if: ${{ inputs.skip-test != 'yes' }}
|
||||
needs:
|
||||
@ -263,13 +291,30 @@ jobs:
|
||||
pr-number: ${{ inputs.pr-number }}
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
|
||||
run-metrics-tests:
|
||||
run-kata-deploy-tests:
|
||||
if: ${{ inputs.skip-test != 'yes' }}
|
||||
needs: [publish-kata-deploy-payload-amd64]
|
||||
uses: ./.github/workflows/run-kata-deploy-tests.yaml
|
||||
with:
|
||||
registry: ghcr.io
|
||||
repo: ${{ github.repository_owner }}/kata-deploy-ci
|
||||
tag: ${{ inputs.tag }}-amd64
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
pr-number: ${{ inputs.pr-number }}
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
|
||||
run-metrics-tests:
|
||||
# Skip metrics tests whilst runner is broken
|
||||
if: false
|
||||
# if: ${{ inputs.skip-test != 'yes' }}
|
||||
needs: build-kata-static-tarball-amd64
|
||||
uses: ./.github/workflows/run-metrics.yaml
|
||||
with:
|
||||
tarball-suffix: -${{ inputs.tag }}
|
||||
registry: ghcr.io
|
||||
repo: ${{ github.repository_owner }}/kata-deploy-ci
|
||||
tag: ${{ inputs.tag }}-amd64
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
pr-number: ${{ inputs.pr-number }}
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
|
||||
run-basic-amd64-tests:
|
||||
@ -281,10 +326,10 @@ jobs:
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
|
||||
run-cri-containerd-tests-s390x:
|
||||
run-basic-s390x-tests:
|
||||
if: ${{ inputs.skip-test != 'yes' }}
|
||||
needs: build-kata-static-tarball-s390x
|
||||
uses: ./.github/workflows/run-cri-containerd-tests-s390x.yaml
|
||||
uses: ./.github/workflows/basic-ci-s390x.yaml
|
||||
with:
|
||||
tarball-suffix: -${{ inputs.tag }}
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
|
95
.github/workflows/codeql.yml
vendored
Normal file
95
.github/workflows/codeql.yml
vendored
Normal file
@ -0,0 +1,95 @@
|
||||
# For most projects, this workflow file will not need changing; you simply need
|
||||
# to commit it to your repository.
|
||||
#
|
||||
# You may wish to alter this file to override the set of languages analyzed,
|
||||
# or to provide custom queries or build logic.
|
||||
#
|
||||
# ******** NOTE ********
|
||||
# We have attempted to detect the languages in your repository. Please check
|
||||
# the `language` matrix defined below to confirm you have the correct set of
|
||||
# supported CodeQL languages.
|
||||
#
|
||||
name: "CodeQL Advanced"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "main" ]
|
||||
pull_request:
|
||||
branches: [ "main" ]
|
||||
schedule:
|
||||
- cron: '45 0 * * 1'
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
name: Analyze (${{ matrix.language }})
|
||||
# Runner size impacts CodeQL analysis time. To learn more, please see:
|
||||
# - https://gh.io/recommended-hardware-resources-for-running-codeql
|
||||
# - https://gh.io/supported-runners-and-hardware-resources
|
||||
# - https://gh.io/using-larger-runners (GitHub.com only)
|
||||
# Consider using larger runners or machines with greater resources for possible analysis time improvements.
|
||||
runs-on: ubuntu-24.04
|
||||
permissions:
|
||||
# required for all workflows
|
||||
security-events: write
|
||||
|
||||
# required to fetch internal or private CodeQL packs
|
||||
packages: read
|
||||
|
||||
# only required for workflows in private repositories
|
||||
actions: read
|
||||
contents: read
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- language: go
|
||||
build-mode: manual
|
||||
- language: python
|
||||
build-mode: none
|
||||
# CodeQL supports the following values keywords for 'language': 'c-cpp', 'csharp', 'go', 'java-kotlin', 'javascript-typescript', 'python', 'ruby', 'swift'
|
||||
# Use `c-cpp` to analyze code written in C, C++ or both
|
||||
# Use 'java-kotlin' to analyze code written in Java, Kotlin or both
|
||||
# Use 'javascript-typescript' to analyze code written in JavaScript, TypeScript or both
|
||||
# To learn more about changing the languages that are analyzed or customizing the build mode for your analysis,
|
||||
# see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/customizing-your-advanced-setup-for-code-scanning.
|
||||
# If you are analyzing a compiled language, you can modify the 'build-mode' for that language to customize how
|
||||
# your codebase is analyzed, see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/codeql-code-scanning-for-compiled-languages
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
# Add any setup steps before running the `github/codeql-action/init` action.
|
||||
# This includes steps like installing compilers or runtimes (`actions/setup-node`
|
||||
# or others). This is typically only required for manual builds.
|
||||
# - name: Setup runtime (example)
|
||||
# uses: actions/setup-example@v1
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v3
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
build-mode: ${{ matrix.build-mode }}
|
||||
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||
# By default, queries listed here will override any specified in a config file.
|
||||
# Prefix the list here with "+" to use these queries and those in the config file.
|
||||
|
||||
# For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
|
||||
# queries: security-extended,security-and-quality
|
||||
|
||||
# If the analyze step fails for one of the languages you are analyzing with
|
||||
# "We were unable to automatically build your code", modify the matrix above
|
||||
# to set the build mode to "manual" for that language. Then modify this step
|
||||
# to build your code.
|
||||
# ℹ️ Command-line programs to run using the OS shell.
|
||||
# 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
|
||||
- if: matrix.build-mode == 'manual' && matrix.language == 'go'
|
||||
shell: bash
|
||||
run: |
|
||||
make -C src/runtime
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v3
|
||||
with:
|
||||
category: "/language:${{matrix.language}}"
|
12
.github/workflows/commit-message-check.yaml
vendored
12
.github/workflows/commit-message-check.yaml
vendored
@ -26,7 +26,7 @@ jobs:
|
||||
- name: Get PR Commits
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
id: 'get-pr-commits'
|
||||
uses: tim-actions/get-pr-commits@v1.2.0
|
||||
uses: tim-actions/get-pr-commits@c64db31d359214d244884dd68f971a110b29ab83 # v1.2.0
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
# Filter out revert commits
|
||||
@ -41,19 +41,19 @@ jobs:
|
||||
|
||||
- name: DCO Check
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
uses: tim-actions/dco@2fd0504dc0d27b33f542867c300c60840c6dcb20
|
||||
uses: tim-actions/dco@2fd0504dc0d27b33f542867c300c60840c6dcb20 # master (2020-04-28)
|
||||
with:
|
||||
commits: ${{ steps.get-pr-commits.outputs.commits }}
|
||||
|
||||
- name: Commit Body Missing Check
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') && ( success() || failure() ) }}
|
||||
uses: tim-actions/commit-body-check@v1.0.2
|
||||
uses: tim-actions/commit-body-check@d2e0e8e1f0332b3281c98867c42a2fbe25ad3f15 # v1.0.2
|
||||
with:
|
||||
commits: ${{ steps.get-pr-commits.outputs.commits }}
|
||||
|
||||
- name: Check Subject Line Length
|
||||
if: ${{ (env.PR_AUTHOR != 'dependabot[bot]') && !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') && ( success() || failure() ) }}
|
||||
uses: tim-actions/commit-message-checker-with-regex@v0.3.1
|
||||
uses: tim-actions/commit-message-checker-with-regex@d6d9770051dd6460679d1cab1dcaa8cffc5c2bbd # v0.3.1
|
||||
with:
|
||||
commits: ${{ steps.get-pr-commits.outputs.commits }}
|
||||
pattern: '^.{0,75}(\n.*)*$'
|
||||
@ -62,7 +62,7 @@ jobs:
|
||||
|
||||
- name: Check Body Line Length
|
||||
if: ${{ (env.PR_AUTHOR != 'dependabot[bot]') && !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') && ( success() || failure() ) }}
|
||||
uses: tim-actions/commit-message-checker-with-regex@v0.3.1
|
||||
uses: tim-actions/commit-message-checker-with-regex@d6d9770051dd6460679d1cab1dcaa8cffc5c2bbd # v0.3.1
|
||||
with:
|
||||
commits: ${{ steps.get-pr-commits.outputs.commits }}
|
||||
# Notes:
|
||||
@ -93,7 +93,7 @@ jobs:
|
||||
|
||||
- name: Check Subsystem
|
||||
if: ${{ (env.PR_AUTHOR != 'dependabot[bot]') && !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') && ( success() || failure() ) }}
|
||||
uses: tim-actions/commit-message-checker-with-regex@v0.3.1
|
||||
uses: tim-actions/commit-message-checker-with-regex@d6d9770051dd6460679d1cab1dcaa8cffc5c2bbd # v0.3.1
|
||||
with:
|
||||
commits: ${{ steps.get-pr-commits.outputs.commits }}
|
||||
pattern: '^[\s\t]*[^:\s\t]+[\s\t]*:'
|
||||
|
2
.github/workflows/darwin-tests.yaml
vendored
2
.github/workflows/darwin-tests.yaml
vendored
@ -18,7 +18,7 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.22.11
|
||||
go-version: 1.23.7
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
- name: Build utils
|
||||
|
2
.github/workflows/docs-url-alive-check.yaml
vendored
2
.github/workflows/docs-url-alive-check.yaml
vendored
@ -14,7 +14,7 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.22.11
|
||||
go-version: 1.23.7
|
||||
env:
|
||||
GOPATH: ${{ github.workspace }}/kata-containers
|
||||
- name: Set env
|
||||
|
20
.github/workflows/payload-after-push.yaml
vendored
20
.github/workflows/payload-after-push.yaml
vendored
@ -32,7 +32,7 @@ jobs:
|
||||
push-to-registry: yes
|
||||
target-branch: ${{ github.ref_name }}
|
||||
secrets: inherit
|
||||
|
||||
|
||||
build-assets-ppc64le:
|
||||
uses: ./.github/workflows/build-kata-static-tarball-ppc64le.yaml
|
||||
with:
|
||||
@ -43,46 +43,54 @@ jobs:
|
||||
|
||||
publish-kata-deploy-payload-amd64:
|
||||
needs: build-assets-amd64
|
||||
uses: ./.github/workflows/publish-kata-deploy-payload-amd64.yaml
|
||||
uses: ./.github/workflows/publish-kata-deploy-payload.yaml
|
||||
with:
|
||||
commit-hash: ${{ github.sha }}
|
||||
registry: quay.io
|
||||
repo: kata-containers/kata-deploy-ci
|
||||
tag: kata-containers-latest-amd64
|
||||
target-branch: ${{ github.ref_name }}
|
||||
runner: ubuntu-22.04
|
||||
arch: amd64
|
||||
secrets: inherit
|
||||
|
||||
publish-kata-deploy-payload-arm64:
|
||||
needs: build-assets-arm64
|
||||
uses: ./.github/workflows/publish-kata-deploy-payload-arm64.yaml
|
||||
uses: ./.github/workflows/publish-kata-deploy-payload.yaml
|
||||
with:
|
||||
commit-hash: ${{ github.sha }}
|
||||
registry: quay.io
|
||||
repo: kata-containers/kata-deploy-ci
|
||||
tag: kata-containers-latest-arm64
|
||||
target-branch: ${{ github.ref_name }}
|
||||
runner: ubuntu-22.04-arm
|
||||
arch: arm64
|
||||
secrets: inherit
|
||||
|
||||
publish-kata-deploy-payload-s390x:
|
||||
needs: build-assets-s390x
|
||||
uses: ./.github/workflows/publish-kata-deploy-payload-s390x.yaml
|
||||
uses: ./.github/workflows/publish-kata-deploy-payload.yaml
|
||||
with:
|
||||
commit-hash: ${{ github.sha }}
|
||||
registry: quay.io
|
||||
repo: kata-containers/kata-deploy-ci
|
||||
tag: kata-containers-latest-s390x
|
||||
target-branch: ${{ github.ref_name }}
|
||||
runner: s390x
|
||||
arch: s390x
|
||||
secrets: inherit
|
||||
|
||||
publish-kata-deploy-payload-ppc64le:
|
||||
needs: build-assets-ppc64le
|
||||
uses: ./.github/workflows/publish-kata-deploy-payload-ppc64le.yaml
|
||||
uses: ./.github/workflows/publish-kata-deploy-payload.yaml
|
||||
with:
|
||||
commit-hash: ${{ github.sha }}
|
||||
registry: quay.io
|
||||
repo: kata-containers/kata-deploy-ci
|
||||
tag: kata-containers-latest-ppc64le
|
||||
target-branch: ${{ github.ref_name }}
|
||||
runner: ppc64le
|
||||
arch: ppc64le
|
||||
secrets: inherit
|
||||
|
||||
publish-manifest:
|
||||
@ -93,7 +101,7 @@ jobs:
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Login to Kata Containers quay.io
|
||||
uses: docker/login-action@v3
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.QUAY_DEPLOYER_USERNAME }}
|
||||
|
@ -1,66 +0,0 @@
|
||||
name: CI | Publish kata-deploy payload for amd64
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
tarball-suffix:
|
||||
required: false
|
||||
type: string
|
||||
registry:
|
||||
required: true
|
||||
type: string
|
||||
repo:
|
||||
required: true
|
||||
type: string
|
||||
tag:
|
||||
required: true
|
||||
type: string
|
||||
commit-hash:
|
||||
required: false
|
||||
type: string
|
||||
target-branch:
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
|
||||
jobs:
|
||||
kata-payload:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: kata-static-tarball-amd64${{ inputs.tarball-suffix }}
|
||||
|
||||
- name: Login to Kata Containers quay.io
|
||||
if: ${{ inputs.registry == 'quay.io' }}
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.QUAY_DEPLOYER_USERNAME }}
|
||||
password: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
|
||||
- name: Login to Kata Containers ghcr.io
|
||||
if: ${{ inputs.registry == 'ghcr.io' }}
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: build-and-push-kata-payload
|
||||
id: build-and-push-kata-payload
|
||||
run: |
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh \
|
||||
"$(pwd)"/kata-static.tar.xz \
|
||||
${{ inputs.registry }}/${{ inputs.repo }} ${{ inputs.tag }}
|
@ -1,66 +0,0 @@
|
||||
name: CI | Publish kata-deploy payload for arm64
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
tarball-suffix:
|
||||
required: false
|
||||
type: string
|
||||
registry:
|
||||
required: true
|
||||
type: string
|
||||
repo:
|
||||
required: true
|
||||
type: string
|
||||
tag:
|
||||
required: true
|
||||
type: string
|
||||
commit-hash:
|
||||
required: false
|
||||
type: string
|
||||
target-branch:
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
|
||||
jobs:
|
||||
kata-payload:
|
||||
runs-on: ubuntu-22.04-arm
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: kata-static-tarball-arm64${{ inputs.tarball-suffix }}
|
||||
|
||||
- name: Login to Kata Containers quay.io
|
||||
if: ${{ inputs.registry == 'quay.io' }}
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.QUAY_DEPLOYER_USERNAME }}
|
||||
password: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
|
||||
- name: Login to Kata Containers ghcr.io
|
||||
if: ${{ inputs.registry == 'ghcr.io' }}
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: build-and-push-kata-payload
|
||||
id: build-and-push-kata-payload
|
||||
run: |
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh \
|
||||
"$(pwd)"/kata-static.tar.xz \
|
||||
${{ inputs.registry }}/${{ inputs.repo }} ${{ inputs.tag }}
|
@ -1,66 +0,0 @@
|
||||
name: CI | Publish kata-deploy payload for s390x
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
tarball-suffix:
|
||||
required: false
|
||||
type: string
|
||||
registry:
|
||||
required: true
|
||||
type: string
|
||||
repo:
|
||||
required: true
|
||||
type: string
|
||||
tag:
|
||||
required: true
|
||||
type: string
|
||||
commit-hash:
|
||||
required: false
|
||||
type: string
|
||||
target-branch:
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
|
||||
jobs:
|
||||
kata-payload:
|
||||
runs-on: s390x
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: kata-static-tarball-s390x${{ inputs.tarball-suffix }}
|
||||
|
||||
- name: Login to Kata Containers quay.io
|
||||
if: ${{ inputs.registry == 'quay.io' }}
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.QUAY_DEPLOYER_USERNAME }}
|
||||
password: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
|
||||
- name: Login to Kata Containers ghcr.io
|
||||
if: ${{ inputs.registry == 'ghcr.io' }}
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: build-and-push-kata-payload
|
||||
id: build-and-push-kata-payload
|
||||
run: |
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh \
|
||||
"$(pwd)"/kata-static.tar.xz \
|
||||
${{ inputs.registry }}/${{ inputs.repo }} ${{ inputs.tag }}
|
@ -1,4 +1,4 @@
|
||||
name: CI | Publish kata-deploy payload for ppc64le
|
||||
name: CI | Publish kata-deploy payload
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
@ -21,21 +21,20 @@ on:
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
runner:
|
||||
default: 'ubuntu-22.04'
|
||||
description: The runner to execute the workflow on. Defaults to 'ubuntu-22.04'.
|
||||
required: false
|
||||
type: string
|
||||
arch:
|
||||
description: The arch of the tarball.
|
||||
required: true
|
||||
type: string
|
||||
|
||||
jobs:
|
||||
kata-payload:
|
||||
runs-on: ppc64le
|
||||
runs-on: ${{ inputs.runner }}
|
||||
steps:
|
||||
- name: Prepare the self-hosted runner
|
||||
timeout-minutes: 15
|
||||
run: |
|
||||
"${HOME}/scripts/prepare_runner.sh"
|
||||
sudo rm -rf "$GITHUB_WORKSPACE"/*
|
||||
|
||||
- name: Adjust a permission for repo
|
||||
run: |
|
||||
sudo chown -R "$USER":"$USER" "$GITHUB_WORKSPACE"
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
@ -47,14 +46,14 @@ jobs:
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: get-kata-tarball
|
||||
- name: get-kata-tarball for ${{ inputs.arch }}
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: kata-static-tarball-ppc64le${{ inputs.tarball-suffix }}
|
||||
name: kata-static-tarball-${{ inputs.arch}}${{ inputs.tarball-suffix }}
|
||||
|
||||
- name: Login to Kata Containers quay.io
|
||||
if: ${{ inputs.registry == 'quay.io' }}
|
||||
uses: docker/login-action@v3
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.QUAY_DEPLOYER_USERNAME }}
|
||||
@ -62,13 +61,13 @@ jobs:
|
||||
|
||||
- name: Login to Kata Containers ghcr.io
|
||||
if: ${{ inputs.registry == 'ghcr.io' }}
|
||||
uses: docker/login-action@v3
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: build-and-push-kata-payload
|
||||
- name: build-and-push-kata-payload for ${{ inputs.arch }}
|
||||
id: build-and-push-kata-payload
|
||||
run: |
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh \
|
4
.github/workflows/release-amd64.yaml
vendored
4
.github/workflows/release-amd64.yaml
vendored
@ -19,13 +19,13 @@ jobs:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Login to Kata Containers docker.io
|
||||
uses: docker/login-action@v3
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
|
||||
- name: Login to Kata Containers quay.io
|
||||
uses: docker/login-action@v3
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.QUAY_DEPLOYER_USERNAME }}
|
||||
|
4
.github/workflows/release-arm64.yaml
vendored
4
.github/workflows/release-arm64.yaml
vendored
@ -19,13 +19,13 @@ jobs:
|
||||
runs-on: ubuntu-22.04-arm
|
||||
steps:
|
||||
- name: Login to Kata Containers docker.io
|
||||
uses: docker/login-action@v3
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
|
||||
- name: Login to Kata Containers quay.io
|
||||
uses: docker/login-action@v3
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.QUAY_DEPLOYER_USERNAME }}
|
||||
|
10
.github/workflows/release-ppc64le.yaml
vendored
10
.github/workflows/release-ppc64le.yaml
vendored
@ -18,20 +18,14 @@ jobs:
|
||||
needs: build-kata-static-tarball-ppc64le
|
||||
runs-on: ppc64le
|
||||
steps:
|
||||
- name: Prepare the self-hosted runner
|
||||
timeout-minutes: 15
|
||||
run: |
|
||||
bash "${HOME}/scripts/prepare_runner.sh"
|
||||
sudo rm -rf "$GITHUB_WORKSPACE"/*
|
||||
|
||||
- name: Login to Kata Containers docker.io
|
||||
uses: docker/login-action@v3
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
|
||||
- name: Login to Kata Containers quay.io
|
||||
uses: docker/login-action@v3
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.QUAY_DEPLOYER_USERNAME }}
|
||||
|
4
.github/workflows/release-s390x.yaml
vendored
4
.github/workflows/release-s390x.yaml
vendored
@ -19,13 +19,13 @@ jobs:
|
||||
runs-on: s390x
|
||||
steps:
|
||||
- name: Login to Kata Containers docker.io
|
||||
uses: docker/login-action@v3
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
|
||||
- name: Login to Kata Containers quay.io
|
||||
uses: docker/login-action@v3
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.QUAY_DEPLOYER_USERNAME }}
|
||||
|
6
.github/workflows/release.yaml
vendored
6
.github/workflows/release.yaml
vendored
@ -53,13 +53,13 @@ jobs:
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Login to Kata Containers docker.io
|
||||
uses: docker/login-action@v3
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
|
||||
- name: Login to Kata Containers quay.io
|
||||
uses: docker/login-action@v3
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.QUAY_DEPLOYER_USERNAME }}
|
||||
@ -183,7 +183,7 @@ jobs:
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install helm
|
||||
uses: azure/setup-helm@v4.2.0
|
||||
uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814 # v4.2.0
|
||||
id: install
|
||||
|
||||
- name: Generate and upload helm chart tarball
|
||||
|
@ -29,15 +29,6 @@ jobs:
|
||||
GOPATH: ${{ github.workspace }}
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
steps:
|
||||
- name: Adjust a permission for repo
|
||||
run: sudo chown -R "$USER":"$USER" "$GITHUB_WORKSPACE"
|
||||
|
||||
- name: Prepare the self-hosted runner
|
||||
timeout-minutes: 15
|
||||
run: |
|
||||
bash "${HOME}/scripts/prepare_runner.sh" cri-containerd
|
||||
sudo rm -rf "$GITHUB_WORKSPACE"/*
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
@ -64,6 +55,3 @@ jobs:
|
||||
|
||||
- name: Run cri-containerd tests
|
||||
run: bash tests/integration/cri-containerd/gha-run.sh run
|
||||
|
||||
- name: Cleanup actions for the self hosted runner
|
||||
run: bash "${HOME}/scripts/cleanup_runner.sh"
|
||||
|
@ -1,56 +0,0 @@
|
||||
name: CI | Run cri-containerd tests
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
tarball-suffix:
|
||||
required: false
|
||||
type: string
|
||||
commit-hash:
|
||||
required: false
|
||||
type: string
|
||||
target-branch:
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
|
||||
jobs:
|
||||
run-cri-containerd:
|
||||
strategy:
|
||||
# We can set this to true whenever we're 100% sure that
|
||||
# the all the tests are not flaky, otherwise we'll fail
|
||||
# all the tests due to a single flaky instance
|
||||
fail-fast: false
|
||||
matrix:
|
||||
containerd_version: ['active']
|
||||
vmm: ['qemu', 'qemu-runtime-rs']
|
||||
runs-on: s390x-large
|
||||
env:
|
||||
CONTAINERD_VERSION: ${{ matrix.containerd_version }}
|
||||
GOPATH: ${{ github.workspace }}
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: Install dependencies
|
||||
run: bash tests/integration/cri-containerd/gha-run.sh install-dependencies
|
||||
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: kata-static-tarball-s390x${{ inputs.tarball-suffix }}
|
||||
path: kata-artifacts
|
||||
|
||||
- name: Install kata
|
||||
run: bash tests/integration/cri-containerd/gha-run.sh install-kata kata-artifacts
|
||||
|
||||
- name: Run cri-containerd tests
|
||||
run: bash tests/integration/cri-containerd/gha-run.sh run
|
2
.github/workflows/run-k8s-tests-on-aks.yaml
vendored
2
.github/workflows/run-k8s-tests-on-aks.yaml
vendored
@ -103,7 +103,7 @@ jobs:
|
||||
AZ_SUBSCRIPTION_ID: ${{ secrets.AZ_SUBSCRIPTION_ID }}
|
||||
|
||||
- name: Create AKS cluster
|
||||
uses: nick-fields/retry@v3
|
||||
uses: nick-fields/retry@ce71cc2ab81d554ebbe88c79ab5975992d79ba08 # v3.0.2
|
||||
with:
|
||||
timeout_minutes: 15
|
||||
max_attempts: 20
|
||||
|
83
.github/workflows/run-k8s-tests-on-arm64.yaml
vendored
Normal file
83
.github/workflows/run-k8s-tests-on-arm64.yaml
vendored
Normal file
@ -0,0 +1,83 @@
|
||||
name: CI | Run kubernetes tests on arm64
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
registry:
|
||||
required: true
|
||||
type: string
|
||||
repo:
|
||||
required: true
|
||||
type: string
|
||||
tag:
|
||||
required: true
|
||||
type: string
|
||||
pr-number:
|
||||
required: true
|
||||
type: string
|
||||
commit-hash:
|
||||
required: false
|
||||
type: string
|
||||
target-branch:
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
|
||||
jobs:
|
||||
run-k8s-tests-on-arm64:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
vmm:
|
||||
- qemu
|
||||
k8s:
|
||||
- kubeadm
|
||||
runs-on: arm64-k8s
|
||||
env:
|
||||
DOCKER_REGISTRY: ${{ inputs.registry }}
|
||||
DOCKER_REPO: ${{ inputs.repo }}
|
||||
DOCKER_TAG: ${{ inputs.tag }}
|
||||
GH_PR_NUMBER: ${{ inputs.pr-number }}
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
KUBERNETES: ${{ matrix.k8s }}
|
||||
USING_NFD: "false"
|
||||
K8S_TEST_HOST_TYPE: all
|
||||
TARGET_ARCH: "aarch64"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: Deploy Kata
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/kubernetes/gha-run.sh deploy-kata
|
||||
|
||||
- name: Install `bats`
|
||||
run: bash tests/integration/kubernetes/gha-run.sh install-bats
|
||||
|
||||
- name: Run tests
|
||||
timeout-minutes: 30
|
||||
run: bash tests/integration/kubernetes/gha-run.sh run-tests
|
||||
|
||||
- name: Collect artifacts ${{ matrix.vmm }}
|
||||
if: always()
|
||||
run: bash tests/integration/kubernetes/gha-run.sh collect-artifacts
|
||||
continue-on-error: true
|
||||
|
||||
- name: Archive artifacts ${{ matrix.vmm }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: k8s-tests-${{ matrix.vmm }}-${{ matrix.k8s }}-${{ inputs.tag }}
|
||||
path: /tmp/artifacts
|
||||
retention-days: 1
|
||||
|
||||
- name: Delete kata-deploy
|
||||
if: always()
|
||||
timeout-minutes: 5
|
||||
run: bash tests/integration/kubernetes/gha-run.sh cleanup
|
@ -43,12 +43,6 @@ jobs:
|
||||
USING_NFD: "false"
|
||||
TARGET_ARCH: "ppc64le"
|
||||
steps:
|
||||
- name: Prepare the self-hosted runner
|
||||
timeout-minutes: 15
|
||||
run: |
|
||||
bash "${HOME}/scripts/prepare_runner.sh" kubernetes
|
||||
sudo rm -rf "$GITHUB_WORKSPACE"/*
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
|
@ -87,7 +87,7 @@ jobs:
|
||||
AZ_SUBSCRIPTION_ID: ${{ secrets.AZ_SUBSCRIPTION_ID }}
|
||||
|
||||
- name: Create AKS cluster
|
||||
uses: nick-fields/retry@v3
|
||||
uses: nick-fields/retry@ce71cc2ab81d554ebbe88c79ab5975992d79ba08 # v3.0.2
|
||||
with:
|
||||
timeout_minutes: 15
|
||||
max_attempts: 20
|
||||
|
7
.github/workflows/run-kata-coco-tests.yaml
vendored
7
.github/workflows/run-kata-coco-tests.yaml
vendored
@ -222,6 +222,11 @@ jobs:
|
||||
AUTHENTICATED_IMAGE_USER: ${{ secrets.AUTHENTICATED_IMAGE_USER }}
|
||||
AUTHENTICATED_IMAGE_PASSWORD: ${{ secrets.AUTHENTICATED_IMAGE_PASSWORD }}
|
||||
SNAPSHOTTER: ${{ matrix.snapshotter }}
|
||||
# Caution: current ingress controller used to expose the KBS service
|
||||
# requires much vCPUs, lefting only a few for the tests. Depending on the
|
||||
# host type chose it will result on the creation of a cluster with
|
||||
# insufficient resources.
|
||||
K8S_TEST_HOST_TYPE: "all"
|
||||
USING_NFD: "false"
|
||||
AUTO_GENERATE_POLICY: "yes"
|
||||
steps:
|
||||
@ -257,7 +262,7 @@ jobs:
|
||||
AZ_SUBSCRIPTION_ID: ${{ secrets.AZ_SUBSCRIPTION_ID }}
|
||||
|
||||
- name: Create AKS cluster
|
||||
uses: nick-fields/retry@v3
|
||||
uses: nick-fields/retry@ce71cc2ab81d554ebbe88c79ab5975992d79ba08 # v3.0.2
|
||||
with:
|
||||
timeout_minutes: 15
|
||||
max_attempts: 20
|
||||
|
@ -71,7 +71,7 @@ jobs:
|
||||
AZ_SUBSCRIPTION_ID: ${{ secrets.AZ_SUBSCRIPTION_ID }}
|
||||
|
||||
- name: Create AKS cluster
|
||||
uses: nick-fields/retry@v3
|
||||
uses: nick-fields/retry@ce71cc2ab81d554ebbe88c79ab5975992d79ba08 # v3.0.2
|
||||
with:
|
||||
timeout_minutes: 15
|
||||
max_attempts: 20
|
||||
|
@ -1,4 +1,4 @@
|
||||
name: CI | Run kata-deploy tests on GARM
|
||||
name: CI | Run kata-deploy tests
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
@ -28,17 +28,13 @@ jobs:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
vmm:
|
||||
- clh
|
||||
- qemu
|
||||
k8s:
|
||||
- k0s
|
||||
- k3s
|
||||
- rke2
|
||||
# TODO: There are a couple of vmm/k8s combination failing (https://github.com/kata-containers/kata-containers/issues/9854)
|
||||
# and we will put the entire kata-deploy-tests on GARM on maintenance.
|
||||
# TODO: Transition to free runner (see #9940).
|
||||
if: false
|
||||
runs-on: garm-ubuntu-2004-smaller
|
||||
- microk8s
|
||||
runs-on: ubuntu-22.04
|
||||
env:
|
||||
DOCKER_REGISTRY: ${{ inputs.registry }}
|
||||
DOCKER_REPO: ${{ inputs.repo }}
|
89
.github/workflows/run-metrics.yaml
vendored
89
.github/workflows/run-metrics.yaml
vendored
@ -2,8 +2,17 @@ name: CI | Run test metrics
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
tarball-suffix:
|
||||
required: false
|
||||
registry:
|
||||
required: true
|
||||
type: string
|
||||
repo:
|
||||
required: true
|
||||
type: string
|
||||
tag:
|
||||
required: true
|
||||
type: string
|
||||
pr-number:
|
||||
required: true
|
||||
type: string
|
||||
commit-hash:
|
||||
required: false
|
||||
@ -14,34 +23,7 @@ on:
|
||||
default: ""
|
||||
|
||||
jobs:
|
||||
setup-kata:
|
||||
name: Kata Setup
|
||||
runs-on: metrics
|
||||
env:
|
||||
GOPATH: ${{ github.workspace }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: kata-static-tarball-amd64${{ inputs.tarball-suffix }}
|
||||
path: kata-artifacts
|
||||
|
||||
- name: Install kata
|
||||
run: bash tests/metrics/gha-run.sh install-kata kata-artifacts
|
||||
|
||||
run-metrics:
|
||||
needs: setup-kata
|
||||
strategy:
|
||||
# We can set this to true whenever we're 100% sure that
|
||||
# the all the tests are not flaky, otherwise we'll fail
|
||||
@ -54,34 +36,78 @@ jobs:
|
||||
env:
|
||||
GOPATH: ${{ github.workspace }}
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
DOCKER_REGISTRY: ${{ inputs.registry }}
|
||||
DOCKER_REPO: ${{ inputs.repo }}
|
||||
DOCKER_TAG: ${{ inputs.tag }}
|
||||
GH_PR_NUMBER: ${{ inputs.pr-number }}
|
||||
K8S_TEST_HOST_TYPE: "baremetal"
|
||||
USING_NFD: "false"
|
||||
KUBERNETES: kubeadm
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: Deploy Kata
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/kubernetes/gha-run.sh deploy-kata-kubeadm
|
||||
|
||||
- name: Install check metrics
|
||||
run: bash tests/metrics/gha-run.sh install-checkmetrics
|
||||
|
||||
- name: enabling the hypervisor
|
||||
run: bash tests/metrics/gha-run.sh enabling-hypervisor
|
||||
|
||||
- name: run launch times test
|
||||
timeout-minutes: 15
|
||||
continue-on-error: true
|
||||
run: bash tests/metrics/gha-run.sh run-test-launchtimes
|
||||
|
||||
- name: run memory foot print test
|
||||
timeout-minutes: 15
|
||||
continue-on-error: true
|
||||
run: bash tests/metrics/gha-run.sh run-test-memory-usage
|
||||
|
||||
- name: run memory usage inside container test
|
||||
timeout-minutes: 15
|
||||
continue-on-error: true
|
||||
run: bash tests/metrics/gha-run.sh run-test-memory-usage-inside-container
|
||||
|
||||
- name: run blogbench test
|
||||
timeout-minutes: 15
|
||||
continue-on-error: true
|
||||
run: bash tests/metrics/gha-run.sh run-test-blogbench
|
||||
|
||||
- name: run tensorflow test
|
||||
timeout-minutes: 15
|
||||
continue-on-error: true
|
||||
run: bash tests/metrics/gha-run.sh run-test-tensorflow
|
||||
|
||||
- name: run fio test
|
||||
timeout-minutes: 15
|
||||
continue-on-error: true
|
||||
run: bash tests/metrics/gha-run.sh run-test-fio
|
||||
|
||||
- name: run iperf test
|
||||
timeout-minutes: 15
|
||||
continue-on-error: true
|
||||
run: bash tests/metrics/gha-run.sh run-test-iperf
|
||||
|
||||
- name: run latency test
|
||||
timeout-minutes: 15
|
||||
continue-on-error: true
|
||||
run: bash tests/metrics/gha-run.sh run-test-latency
|
||||
|
||||
- name: check metrics
|
||||
run: bash tests/metrics/gha-run.sh check-metrics
|
||||
|
||||
- name: make metrics tarball ${{ matrix.vmm }}
|
||||
run: bash tests/metrics/gha-run.sh make-tarball-results
|
||||
|
||||
@ -92,3 +118,8 @@ jobs:
|
||||
path: results-${{ matrix.vmm }}.tar.gz
|
||||
retention-days: 1
|
||||
if-no-files-found: error
|
||||
|
||||
- name: Delete kata-deploy
|
||||
timeout-minutes: 10
|
||||
if: always()
|
||||
run: bash tests/integration/kubernetes/gha-run.sh cleanup-kubeadm
|
||||
|
5
.github/workflows/shellcheck.yaml
vendored
5
.github/workflows/shellcheck.yaml
vendored
@ -25,5 +25,6 @@ jobs:
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
- name: Run ShellCheck
|
||||
uses: ludeeus/action-shellcheck@master
|
||||
|
||||
uses: ludeeus/action-shellcheck@00b27aa7cb85167568cb48a3838b75f4265f2bca # master (2024-06-20)
|
||||
with:
|
||||
ignore_paths: "**/vendor/**"
|
||||
|
32
.github/workflows/shellcheck_required.yaml
vendored
Normal file
32
.github/workflows/shellcheck_required.yaml
vendored
Normal file
@ -0,0 +1,32 @@
|
||||
|
||||
# https://github.com/marketplace/actions/shellcheck
|
||||
name: Shellcheck required
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
- edited
|
||||
- reopened
|
||||
- synchronize
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
shellcheck-required:
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- name: Checkout the code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
- name: Run ShellCheck
|
||||
uses: ludeeus/action-shellcheck@00b27aa7cb85167568cb48a3838b75f4265f2bca # master (2024-06-20)
|
||||
with:
|
||||
severity: error
|
||||
ignore_paths: "**/vendor/**"
|
12
.github/workflows/static-checks-self-hosted.yaml
vendored
12
.github/workflows/static-checks-self-hosted.yaml
vendored
@ -32,3 +32,15 @@ jobs:
|
||||
uses: ./.github/workflows/build-checks.yaml
|
||||
with:
|
||||
instance: ${{ matrix.instance }}
|
||||
|
||||
build-checks-preview:
|
||||
needs: skipper
|
||||
if: ${{ needs.skipper.outputs.skip_static != 'yes' }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
instance:
|
||||
- "riscv-builder"
|
||||
uses: ./.github/workflows/build-checks-preview-riscv64.yaml
|
||||
with:
|
||||
instance: ${{ matrix.instance }}
|
||||
|
5
.github/workflows/static-checks.yaml
vendored
5
.github/workflows/static-checks.yaml
vendored
@ -70,7 +70,7 @@ jobs:
|
||||
fetch-depth: 0
|
||||
- name: Install system deps
|
||||
run: |
|
||||
sudo apt-get install -y build-essential musl-tools
|
||||
sudo apt-get update && sudo apt-get install -y build-essential musl-tools
|
||||
- name: Install yq
|
||||
run: |
|
||||
sudo -E ./ci/install_yq.sh
|
||||
@ -87,6 +87,7 @@ jobs:
|
||||
${{ matrix.command }}
|
||||
env:
|
||||
RUST_BACKTRACE: "1"
|
||||
RUST_LIB_BACKTRACE: "0"
|
||||
|
||||
static-checks:
|
||||
runs-on: ubuntu-22.04
|
||||
@ -118,7 +119,7 @@ jobs:
|
||||
echo "/usr/local/go/bin" >> "$GITHUB_PATH"
|
||||
- name: Install system dependencies
|
||||
run: |
|
||||
sudo apt-get -y install moreutils hunspell hunspell-en-gb hunspell-en-us pandoc
|
||||
sudo apt-get update && sudo apt-get -y install moreutils hunspell hunspell-en-gb hunspell-en-us pandoc
|
||||
- name: Run check
|
||||
run: |
|
||||
export PATH="${PATH}:${GOPATH}/bin"
|
||||
|
26
ci/README.md
26
ci/README.md
@ -172,12 +172,28 @@ For new jobs, or jobs that haven't been marked as required recently,
|
||||
the criteria to be initially marked as required is ten days
|
||||
of passing tests, with no relevant PR failures reported in that time.
|
||||
Required jobs also need one or more nominated maintainers that are
|
||||
responsible for the stability of their jobs.
|
||||
responsible for the stability of their jobs. Maintainers can be registered
|
||||
in [`maintainers.yml`](https://github.com/kata-containers/kata-containers.github.io/blob/main/maintainers.yml)
|
||||
and will then show on the CI Dashboard.
|
||||
|
||||
> [!NOTE]
|
||||
> We don't currently have a good place to record the job maintainers, but
|
||||
> once we have this, the intention is to show it on the CI Dashboard so
|
||||
> people can find the contact easily.
|
||||
To add transparency to making jobs required/non-required and to keep the
|
||||
GitHub UI in sync with the [Gatekeeper job](../tools/testing/gatekeeper),
|
||||
the process to update a job's required state is as follows:
|
||||
1. Create a PR to update `maintainers.yml`, if new maintainers are being
|
||||
declared on a CI job.
|
||||
1. Create a PR which updates
|
||||
[`required-tests.yaml`](../tools/testing/gatekeeper/required-tests.yaml)
|
||||
adding the new job and listing the evidence that the job meets the
|
||||
requirements above. Ensure that all maintainers and
|
||||
@kata-containers/architecture-committee are notified to give them the
|
||||
opportunity to review the PR. See
|
||||
[#11015](https://github.com/kata-containers/kata-containers/pull/11015)
|
||||
as an example.
|
||||
1. The maintainers and Architecture Committee get a chance to review the PR.
|
||||
It can be discussed in an AC meeting to get broader input.
|
||||
1. Once the PR has been merged, a Kata Containers admin should be notified
|
||||
to ensure that the GitHub UI is updated to reflect the change in
|
||||
`required-tests.yaml`.
|
||||
|
||||
#### Expectation of required job maintainers
|
||||
|
||||
|
@ -7,16 +7,16 @@
|
||||
set -e
|
||||
|
||||
cidir=$(dirname "$0")
|
||||
runtimedir=$cidir/../src/runtime
|
||||
runtimedir=${cidir}/../src/runtime
|
||||
|
||||
build_working_packages() {
|
||||
# working packages:
|
||||
device_api=$runtimedir/pkg/device/api
|
||||
device_config=$runtimedir/pkg/device/config
|
||||
device_drivers=$runtimedir/pkg/device/drivers
|
||||
device_manager=$runtimedir/pkg/device/manager
|
||||
rc_pkg_dir=$runtimedir/pkg/resourcecontrol/
|
||||
utils_pkg_dir=$runtimedir/virtcontainers/utils
|
||||
device_api=${runtimedir}/pkg/device/api
|
||||
device_config=${runtimedir}/pkg/device/config
|
||||
device_drivers=${runtimedir}/pkg/device/drivers
|
||||
device_manager=${runtimedir}/pkg/device/manager
|
||||
rc_pkg_dir=${runtimedir}/pkg/resourcecontrol/
|
||||
utils_pkg_dir=${runtimedir}/virtcontainers/utils
|
||||
|
||||
# broken packages :( :
|
||||
#katautils=$runtimedir/pkg/katautils
|
||||
@ -24,15 +24,15 @@ build_working_packages() {
|
||||
#vc=$runtimedir/virtcontainers
|
||||
|
||||
pkgs=(
|
||||
"$device_api"
|
||||
"$device_config"
|
||||
"$device_drivers"
|
||||
"$device_manager"
|
||||
"$utils_pkg_dir"
|
||||
"$rc_pkg_dir")
|
||||
"${device_api}"
|
||||
"${device_config}"
|
||||
"${device_drivers}"
|
||||
"${device_manager}"
|
||||
"${utils_pkg_dir}"
|
||||
"${rc_pkg_dir}")
|
||||
for pkg in "${pkgs[@]}"; do
|
||||
echo building "$pkg"
|
||||
pushd "$pkg" &>/dev/null
|
||||
echo building "${pkg}"
|
||||
pushd "${pkg}" &>/dev/null
|
||||
go build
|
||||
go test
|
||||
popd &>/dev/null
|
||||
|
@ -10,7 +10,7 @@ set -o errtrace
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
[ -n "${DEBUG:-}" ] && set -o xtrace
|
||||
[[ -n "${DEBUG:-}" ]] && set -o xtrace
|
||||
|
||||
script_name=${0##*/}
|
||||
|
||||
@ -25,7 +25,7 @@ die()
|
||||
usage()
|
||||
{
|
||||
cat <<EOF
|
||||
Usage: $script_name [OPTIONS] [command] [arguments]
|
||||
Usage: ${script_name} [OPTIONS] [command] [arguments]
|
||||
|
||||
Description: Utility to expand the abilities of the GitHub CLI tool, gh.
|
||||
|
||||
@ -48,7 +48,7 @@ Examples:
|
||||
|
||||
- List issues for a Pull Request 123 in kata-containers/kata-containers repo
|
||||
|
||||
$ $script_name list-issues-for-pr 123
|
||||
$ ${script_name} list-issues-for-pr 123
|
||||
EOF
|
||||
}
|
||||
|
||||
@ -57,11 +57,12 @@ list_issues_for_pr()
|
||||
local pr="${1:-}"
|
||||
local repo="${2:-kata-containers/kata-containers}"
|
||||
|
||||
[ -z "$pr" ] && die "need PR"
|
||||
[[ -z "${pr}" ]] && die "need PR"
|
||||
|
||||
local commits=$(gh pr view ${pr} --repo ${repo} --json commits --jq .commits[].messageBody)
|
||||
local commits
|
||||
commits=$(gh pr view "${pr}" --repo "${repo}" --json commits --jq .commits[].messageBody)
|
||||
|
||||
[ -z "$commits" ] && die "cannot determine commits for PR $pr"
|
||||
[[ -z "${commits}" ]] && die "cannot determine commits for PR ${pr}"
|
||||
|
||||
# Extract the issue number(s) from the commits.
|
||||
#
|
||||
@ -78,7 +79,8 @@ list_issues_for_pr()
|
||||
#
|
||||
# "<git-commit> <git-commit-msg>"
|
||||
#
|
||||
local issues=$(echo "$commits" |\
|
||||
local issues
|
||||
issues=$(echo "${commits}" |\
|
||||
grep -v -E "^( | )" |\
|
||||
grep -i -E "fixes:* *(#*[0-9][0-9]*)" |\
|
||||
tr ' ' '\n' |\
|
||||
@ -86,16 +88,16 @@ list_issues_for_pr()
|
||||
sed 's/[.,\#]//g' |\
|
||||
sort -nu || true)
|
||||
|
||||
[ -z "$issues" ] && die "cannot determine issues for PR $pr"
|
||||
[[ -z "${issues}" ]] && die "cannot determine issues for PR ${pr}"
|
||||
|
||||
echo "# Issues linked to PR"
|
||||
echo "#"
|
||||
echo "# Fields: issue_number"
|
||||
|
||||
local issue
|
||||
echo "$issues"|while read issue
|
||||
echo "${issues}" | while read -r issue
|
||||
do
|
||||
printf "%s\n" "$issue"
|
||||
printf "%s\n" "${issue}"
|
||||
done
|
||||
}
|
||||
|
||||
@ -103,20 +105,21 @@ list_labels_for_issue()
|
||||
{
|
||||
local issue="${1:-}"
|
||||
|
||||
[ -z "$issue" ] && die "need issue number"
|
||||
[[ -z "${issue}" ]] && die "need issue number"
|
||||
|
||||
local labels=$(gh issue view ${issue} --repo kata-containers/kata-containers --json labels)
|
||||
local labels
|
||||
labels=$(gh issue view "${issue}" --repo kata-containers/kata-containers --json labels)
|
||||
|
||||
[ -z "$labels" ] && die "cannot determine labels for issue $issue"
|
||||
[[ -z "${labels}" ]] && die "cannot determine labels for issue ${issue}"
|
||||
|
||||
printf "$labels"
|
||||
echo "${labels}"
|
||||
}
|
||||
|
||||
setup()
|
||||
{
|
||||
for cmd in gh jq
|
||||
do
|
||||
command -v "$cmd" &>/dev/null || die "need command: $cmd"
|
||||
command -v "${cmd}" &>/dev/null || die "need command: ${cmd}"
|
||||
done
|
||||
}
|
||||
|
||||
@ -124,29 +127,28 @@ handle_args()
|
||||
{
|
||||
setup
|
||||
|
||||
local show_all="false"
|
||||
local opt
|
||||
|
||||
while getopts "ahr:" opt "$@"
|
||||
while getopts "hr:" opt "$@"
|
||||
do
|
||||
case "$opt" in
|
||||
a) show_all="true" ;;
|
||||
case "${opt}" in
|
||||
h) usage && exit 0 ;;
|
||||
r) repo="${OPTARG}" ;;
|
||||
*) echo "use '-h' to get list of supprted aruments" && exit 1 ;;
|
||||
esac
|
||||
done
|
||||
|
||||
shift $(($OPTIND - 1))
|
||||
shift $((OPTIND - 1))
|
||||
|
||||
local repo="${repo:-kata-containers/kata-containers}"
|
||||
local cmd="${1:-}"
|
||||
|
||||
case "$cmd" in
|
||||
case "${cmd}" in
|
||||
list-issues-for-pr) ;;
|
||||
list-labels-for-issue) ;;
|
||||
|
||||
"") usage && exit 0 ;;
|
||||
*) die "invalid command: '$cmd'" ;;
|
||||
*) die "invalid command: '${cmd}'" ;;
|
||||
esac
|
||||
|
||||
# Consume the command name
|
||||
@ -155,20 +157,20 @@ handle_args()
|
||||
local issue=""
|
||||
local pr=""
|
||||
|
||||
case "$cmd" in
|
||||
case "${cmd}" in
|
||||
list-issues-for-pr)
|
||||
pr="${1:-}"
|
||||
|
||||
list_issues_for_pr "$pr" "${repo}"
|
||||
list_issues_for_pr "${pr}" "${repo}"
|
||||
;;
|
||||
|
||||
list-labels-for-issue)
|
||||
issue="${1:-}"
|
||||
|
||||
list_labels_for_issue "$issue"
|
||||
list_labels_for_issue "${issue}"
|
||||
;;
|
||||
|
||||
*) die "impossible situation: cmd: '$cmd'" ;;
|
||||
*) die "impossible situation: cmd: '${cmd}'" ;;
|
||||
esac
|
||||
|
||||
exit 0
|
||||
|
@ -8,7 +8,6 @@
|
||||
set -o errexit
|
||||
|
||||
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
script_name="$(basename "${BASH_SOURCE[0]}")"
|
||||
|
||||
source "${script_dir}/../tests/common.bash"
|
||||
|
||||
@ -22,11 +21,11 @@ workdir="$(mktemp -d --tmpdir build-libseccomp.XXXXX)"
|
||||
|
||||
# Variables for libseccomp
|
||||
libseccomp_version="${LIBSECCOMP_VERSION:-""}"
|
||||
if [ -z "${libseccomp_version}" ]; then
|
||||
if [[ -z "${libseccomp_version}" ]]; then
|
||||
libseccomp_version=$(get_from_kata_deps ".externals.libseccomp.version")
|
||||
fi
|
||||
libseccomp_url="${LIBSECCOMP_URL:-""}"
|
||||
if [ -z "${libseccomp_url}" ]; then
|
||||
if [[ -z "${libseccomp_url}" ]]; then
|
||||
libseccomp_url=$(get_from_kata_deps ".externals.libseccomp.url")
|
||||
fi
|
||||
libseccomp_tarball="libseccomp-${libseccomp_version}.tar.gz"
|
||||
@ -35,11 +34,11 @@ cflags="-O2"
|
||||
|
||||
# Variables for gperf
|
||||
gperf_version="${GPERF_VERSION:-""}"
|
||||
if [ -z "${gperf_version}" ]; then
|
||||
if [[ -z "${gperf_version}" ]]; then
|
||||
gperf_version=$(get_from_kata_deps ".externals.gperf.version")
|
||||
fi
|
||||
gperf_url="${GPERF_URL:-""}"
|
||||
if [ -z "${gperf_url}" ]; then
|
||||
if [[ -z "${gperf_url}" ]]; then
|
||||
gperf_url=$(get_from_kata_deps ".externals.gperf.url")
|
||||
fi
|
||||
gperf_tarball="gperf-${gperf_version}.tar.gz"
|
||||
@ -47,7 +46,7 @@ gperf_tarball_url="${gperf_url}/${gperf_tarball}"
|
||||
|
||||
# We need to build the libseccomp library from sources to create a static library for the musl libc.
|
||||
# However, ppc64le and s390x have no musl targets in Rust. Hence, we do not set cflags for the musl libc.
|
||||
if ([ "${arch}" != "ppc64le" ] && [ "${arch}" != "s390x" ]); then
|
||||
if [[ "${arch}" != "ppc64le" ]] && [[ "${arch}" != "s390x" ]]; then
|
||||
# Set FORTIFY_SOURCE=1 because the musl-libc does not have some functions about FORTIFY_SOURCE=2
|
||||
cflags="-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=1 -O2"
|
||||
fi
|
||||
@ -71,10 +70,10 @@ build_and_install_gperf() {
|
||||
tar -xf "${gperf_tarball}"
|
||||
pushd "gperf-${gperf_version}"
|
||||
# Unset $CC for configure, we will always use native for gperf
|
||||
CC= ./configure --prefix="${gperf_install_dir}"
|
||||
CC="" ./configure --prefix="${gperf_install_dir}"
|
||||
make
|
||||
make install
|
||||
export PATH=$PATH:"${gperf_install_dir}"/bin
|
||||
export PATH=${PATH}:"${gperf_install_dir}"/bin
|
||||
popd
|
||||
echo "Gperf installed successfully"
|
||||
}
|
||||
@ -85,7 +84,7 @@ build_and_install_libseccomp() {
|
||||
curl -sLO "${libseccomp_tarball_url}"
|
||||
tar -xf "${libseccomp_tarball}"
|
||||
pushd "libseccomp-${libseccomp_version}"
|
||||
[ "${arch}" == $(uname -m) ] && cc_name="" || cc_name="${arch}-linux-gnu-gcc"
|
||||
[[ "${arch}" == $(uname -m) ]] && cc_name="" || cc_name="${arch}-linux-gnu-gcc"
|
||||
CC=${cc_name} ./configure --prefix="${libseccomp_install_dir}" CFLAGS="${cflags}" --enable-static --host="${arch}"
|
||||
make
|
||||
make install
|
||||
@ -97,11 +96,11 @@ main() {
|
||||
local libseccomp_install_dir="${1:-}"
|
||||
local gperf_install_dir="${2:-}"
|
||||
|
||||
if [ -z "${libseccomp_install_dir}" ] || [ -z "${gperf_install_dir}" ]; then
|
||||
if [[ -z "${libseccomp_install_dir}" ]] || [[ -z "${gperf_install_dir}" ]]; then
|
||||
die "Usage: ${0} <libseccomp-install-dir> <gperf-install-dir>"
|
||||
fi
|
||||
|
||||
pushd "$workdir"
|
||||
pushd "${workdir}"
|
||||
# gperf is required for building the libseccomp.
|
||||
build_and_install_gperf
|
||||
build_and_install_libseccomp
|
||||
|
@ -5,20 +5,20 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
[ -n "$DEBUG" ] && set -o xtrace
|
||||
[[ -n "${DEBUG}" ]] && set -o xtrace
|
||||
|
||||
# If we fail for any reason a message will be displayed
|
||||
die() {
|
||||
msg="$*"
|
||||
echo "ERROR: $msg" >&2
|
||||
echo "ERROR: ${msg}" >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
function verify_yq_exists() {
|
||||
local yq_path=$1
|
||||
local yq_version=$2
|
||||
local expected="yq (https://github.com/mikefarah/yq/) version $yq_version"
|
||||
if [ -x "${yq_path}" ] && [ "$($yq_path --version)"X == "$expected"X ]; then
|
||||
local expected="yq (https://github.com/mikefarah/yq/) version ${yq_version}"
|
||||
if [[ -x "${yq_path}" ]] && [[ "$(${yq_path} --version)"X == "${expected}"X ]]; then
|
||||
return 0
|
||||
else
|
||||
return 1
|
||||
@ -34,20 +34,20 @@ function install_yq() {
|
||||
local yq_path=""
|
||||
INSTALL_IN_GOPATH=${INSTALL_IN_GOPATH:-true}
|
||||
|
||||
if [ "${INSTALL_IN_GOPATH}" == "true" ]; then
|
||||
if [[ "${INSTALL_IN_GOPATH}" == "true" ]]; then
|
||||
GOPATH=${GOPATH:-${HOME}/go}
|
||||
mkdir -p "${GOPATH}/bin"
|
||||
yq_path="${GOPATH}/bin/yq"
|
||||
else
|
||||
yq_path="/usr/local/bin/yq"
|
||||
fi
|
||||
if verify_yq_exists "$yq_path" "$yq_version"; then
|
||||
if verify_yq_exists "${yq_path}" "${yq_version}"; then
|
||||
echo "yq is already installed in correct version"
|
||||
return
|
||||
fi
|
||||
if [ "${yq_path}" == "/usr/local/bin/yq" ]; then
|
||||
if [[ "${yq_path}" == "/usr/local/bin/yq" ]]; then
|
||||
# Check if we need sudo to install yq
|
||||
if [ ! -w "/usr/local/bin" ]; then
|
||||
if [[ ! -w "/usr/local/bin" ]]; then
|
||||
# Check if we have sudo privileges
|
||||
if ! sudo -n true 2>/dev/null; then
|
||||
die "Please provide sudo privileges to install yq"
|
||||
@ -76,7 +76,7 @@ function install_yq() {
|
||||
# If we're on an apple silicon machine, just assign amd64.
|
||||
# The version of yq we use doesn't have a darwin arm build,
|
||||
# but Rosetta can come to the rescue here.
|
||||
if [ $goos == "Darwin" ]; then
|
||||
if [[ ${goos} == "Darwin" ]]; then
|
||||
goarch=amd64
|
||||
else
|
||||
goarch=arm64
|
||||
@ -107,8 +107,7 @@ function install_yq() {
|
||||
|
||||
## NOTE: ${var,,} => gives lowercase value of var
|
||||
local yq_url="https://${yq_pkg}/releases/download/${yq_version}/yq_${goos}_${goarch}"
|
||||
${precmd} curl -o "${yq_path}" -LSsf "${yq_url}"
|
||||
[ $? -ne 0 ] && die "Download ${yq_url} failed"
|
||||
${precmd} curl -o "${yq_path}" -LSsf "${yq_url}" || die "Download ${yq_url} failed"
|
||||
${precmd} chmod +x "${yq_path}"
|
||||
|
||||
if ! command -v "${yq_path}" >/dev/null; then
|
||||
|
@ -147,3 +147,11 @@ all images with a single MCP update instead of per-image MCP update.
|
||||
You can check the bisection progress during/after execution by running
|
||||
``bisecter log`` from the current directory. Before starting a new
|
||||
bisection you need to execute ``bisecter reset``.
|
||||
|
||||
|
||||
Peer pods
|
||||
=========
|
||||
|
||||
It's possible to run similar testing on peer-pods using cloud-api-adaptor.
|
||||
Our CI configuration to run inside azure's OCP is in ``peer-pods-azure.sh``
|
||||
and can be used to replace the `test.sh` step in snippets above.
|
||||
|
@ -3,25 +3,28 @@
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
if [ "$#" -gt 2 ] || [ "$#" -lt 1 ] ; then
|
||||
if [[ "$#" -gt 2 ]] || [[ "$#" -lt 1 ]] ; then
|
||||
echo "Usage: $0 GOOD [BAD]"
|
||||
echo "Prints list of available kata-deploy-ci tags between GOOD and BAD commits (by default BAD is the latest available tag)"
|
||||
exit 255
|
||||
fi
|
||||
GOOD="$1"
|
||||
[ -n "$2" ] && BAD="$2"
|
||||
[[ -n "$2" ]] && BAD="$2"
|
||||
ARCH=amd64
|
||||
REPO="quay.io/kata-containers/kata-deploy-ci"
|
||||
|
||||
TAGS=$(skopeo list-tags "docker://$REPO")
|
||||
TAGS=$(skopeo list-tags "docker://${REPO}")
|
||||
# For testing
|
||||
#echo "$TAGS" > tags
|
||||
#TAGS=$(cat tags)
|
||||
# Only amd64
|
||||
TAGS=$(echo "$TAGS" | jq '.Tags' | jq "map(select(endswith(\"$ARCH\")))" | jq -r '.[]')
|
||||
TAGS=$(echo "${TAGS}" | jq '.Tags' | jq "map(select(endswith(\"${ARCH}\")))" | jq -r '.[]')
|
||||
# Sort by git
|
||||
SORTED=""
|
||||
[ -n "$BAD" ] && LOG_ARGS="$GOOD~1..$BAD" || LOG_ARGS="$GOOD~1.."
|
||||
for TAG in $(git log --merges --pretty=format:%H --reverse $LOG_ARGS); do
|
||||
[[ "$TAGS" =~ "$TAG" ]] && SORTED+="
|
||||
kata-containers-$TAG-$ARCH"
|
||||
[[ -n "${BAD}" ]] && LOG_ARGS="${GOOD}~1..${BAD}" || LOG_ARGS="${GOOD}~1.."
|
||||
for TAG in $(git log --merges --pretty=format:%H --reverse "${LOG_ARGS}"); do
|
||||
[[ "${TAGS}" =~ ${TAG} ]] && SORTED+="
|
||||
kata-containers-${TAG}-${ARCH}"
|
||||
done
|
||||
# Comma separated tags with repo
|
||||
echo "$SORTED" | tail -n +2 | sed -e "s@^@$REPO:@" | paste -s -d, -
|
||||
echo "${SORTED}" | tail -n +2 | sed -e "s@^@${REPO}:@" | paste -s -d, -
|
||||
|
@ -7,11 +7,14 @@
|
||||
# This script tries to removes most of the resources added by `test.sh` script
|
||||
# from the cluster.
|
||||
|
||||
scripts_dir=$(dirname $0)
|
||||
scripts_dir=$(dirname "$0")
|
||||
deployments_dir=${scripts_dir}/cluster/deployments
|
||||
configs_dir=${scripts_dir}/configs
|
||||
|
||||
source ${scripts_dir}/lib.sh
|
||||
# shellcheck disable=SC1091 # import based on variable
|
||||
source "${scripts_dir}/lib.sh"
|
||||
|
||||
# Set your katacontainers repo dir location
|
||||
[[ -z "${katacontainers_repo_dir}" ]] && echo "Please set katacontainers_repo_dir variable to your kata repo"
|
||||
|
||||
# Set to 'yes' if you want to configure SELinux to permissive on the cluster
|
||||
# workers.
|
||||
@ -36,24 +39,23 @@ oc delete -f "${scripts_dir}/smoke/http-server.yaml"
|
||||
|
||||
# Delete test.sh resources
|
||||
oc delete -f "${deployments_dir}/relabel_selinux.yaml"
|
||||
if [[ "$WORKAROUND_9206_CRIO" == "yes" ]]; then
|
||||
if [[ "${WORKAROUND_9206_CRIO}" == "yes" ]]; then
|
||||
oc delete -f "${deployments_dir}/workaround-9206-crio-ds.yaml"
|
||||
oc delete -f "${deployments_dir}/workaround-9206-crio.yaml"
|
||||
fi
|
||||
[ ${SELINUX_PERMISSIVE} == "yes" ] && oc delete -f "${deployments_dir}/machineconfig_selinux.yaml.in"
|
||||
[[ ${SELINUX_PERMISSIVE} == "yes" ]] && oc delete -f "${deployments_dir}/machineconfig_selinux.yaml.in"
|
||||
|
||||
# Delete kata-containers
|
||||
pushd "$katacontainers_repo_dir/tools/packaging/kata-deploy"
|
||||
pushd "${katacontainers_repo_dir}/tools/packaging/kata-deploy" || { echo "Failed to push to ${katacontainers_repo_dir}/tools/packaging/kata-deploy"; exit 125; }
|
||||
oc delete -f kata-deploy/base/kata-deploy.yaml
|
||||
oc -n kube-system wait --timeout=10m --for=delete -l name=kata-deploy pod
|
||||
oc apply -f kata-cleanup/base/kata-cleanup.yaml
|
||||
echo "Wait for all related pods to be gone"
|
||||
( repeats=1; for i in $(seq 1 600); do
|
||||
( repeats=1; for _ in $(seq 1 600); do
|
||||
oc get pods -l name="kubelet-kata-cleanup" --no-headers=true -n kube-system 2>&1 | grep "No resources found" -q && ((repeats++)) || repeats=1
|
||||
[ "$repeats" -gt 5 ] && echo kata-cleanup finished && break
|
||||
[[ "${repeats}" -gt 5 ]] && echo kata-cleanup finished && break
|
||||
sleep 1
|
||||
done) || { echo "There are still some kata-cleanup related pods after 600 iterations"; oc get all -n kube-system; exit -1; }
|
||||
done) || { echo "There are still some kata-cleanup related pods after 600 iterations"; oc get all -n kube-system; exit 1; }
|
||||
oc delete -f kata-cleanup/base/kata-cleanup.yaml
|
||||
oc delete -f kata-rbac/base/kata-rbac.yaml
|
||||
oc delete -f runtimeclasses/kata-runtimeClasses.yaml
|
||||
|
||||
|
@ -13,8 +13,9 @@ set -e
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
script_dir="$(realpath $(dirname $0))"
|
||||
script_dir="$(realpath "$(dirname "$0")")"
|
||||
webhook_dir="${script_dir}/../../../tools/testing/kata-webhook"
|
||||
# shellcheck disable=SC1091 # import based on variable
|
||||
source "${script_dir}/../lib.sh"
|
||||
KATA_RUNTIME=${KATA_RUNTIME:-kata-ci}
|
||||
|
||||
@ -23,14 +24,11 @@ pushd "${webhook_dir}" >/dev/null
|
||||
#
|
||||
info "Builds the kata-webhook"
|
||||
./create-certs.sh
|
||||
info "Override our KATA_RUNTIME ConfigMap"
|
||||
sed -i deploy/webhook.yaml -e "s/runtime_class: .*$/runtime_class: ${KATA_RUNTIME}/g"
|
||||
info "Deploys the kata-webhook"
|
||||
oc apply -f deploy/
|
||||
|
||||
info "Override our KATA_RUNTIME ConfigMap"
|
||||
RUNTIME_CLASS="${KATA_RUNTIME}" \
|
||||
envsubst < "${script_dir}/deployments/configmap_kata-webhook.yaml.in" \
|
||||
| oc apply -f -
|
||||
|
||||
# Check the webhook was deployed and is working.
|
||||
RUNTIME_CLASS="${KATA_RUNTIME}" ./webhook-check.sh
|
||||
popd >/dev/null
|
||||
|
@ -1,12 +0,0 @@
|
||||
# Copyright (c) 2021 Red Hat, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# Apply customizations to the kata-webhook.
|
||||
#
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: kata-webhook
|
||||
data:
|
||||
runtime_class: ${RUNTIME_CLASS}
|
@ -7,11 +7,15 @@
|
||||
# This script installs the built kata-containers in the test cluster,
|
||||
# and configure a runtime.
|
||||
|
||||
scripts_dir=$(dirname $0)
|
||||
scripts_dir=$(dirname "$0")
|
||||
deployments_dir=${scripts_dir}/deployments
|
||||
configs_dir=${scripts_dir}/configs
|
||||
|
||||
source ${scripts_dir}/../lib.sh
|
||||
# shellcheck disable=SC1091 # import based on variable
|
||||
source "${scripts_dir}/../lib.sh"
|
||||
|
||||
# Set your katacontainers repo dir location
|
||||
[[ -z "${katacontainers_repo_dir}" ]] && echo "Please set katacontainers_repo_dir variable to your kata repo"
|
||||
|
||||
# Set to 'yes' if you want to configure SELinux to permissive on the cluster
|
||||
# workers.
|
||||
@ -40,18 +44,18 @@ WORKAROUND_9206_CRIO=${WORKAROUND_9206_CRIO:-no}
|
||||
#
|
||||
apply_kata_deploy() {
|
||||
local deploy_file="tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
|
||||
pushd "$katacontainers_repo_dir"
|
||||
sed -ri "s#(\s+image:) .*#\1 ${KATA_DEPLOY_IMAGE}#" "$deploy_file"
|
||||
pushd "${katacontainers_repo_dir}" || die
|
||||
sed -ri "s#(\s+image:) .*#\1 ${KATA_DEPLOY_IMAGE}#" "${deploy_file}"
|
||||
|
||||
info "Applying kata-deploy"
|
||||
oc apply -f tools/packaging/kata-deploy/kata-rbac/base/kata-rbac.yaml
|
||||
oc label --overwrite ns kube-system pod-security.kubernetes.io/enforce=privileged pod-security.kubernetes.io/warn=baseline pod-security.kubernetes.io/audit=baseline
|
||||
oc apply -f "$deploy_file"
|
||||
oc apply -f "${deploy_file}"
|
||||
oc -n kube-system wait --timeout=10m --for=condition=Ready -l name=kata-deploy pod
|
||||
|
||||
info "Adding the kata runtime classes"
|
||||
oc apply -f tools/packaging/kata-deploy/runtimeclasses/kata-runtimeClasses.yaml
|
||||
popd
|
||||
popd || die
|
||||
}
|
||||
|
||||
|
||||
@ -64,32 +68,32 @@ wait_for_reboot() {
|
||||
local delta="${1:-900}"
|
||||
local sleep_time=60
|
||||
declare -A BOOTIDS
|
||||
local workers=($(oc get nodes | \
|
||||
awk '{if ($3 == "worker") { print $1 } }'))
|
||||
local workers
|
||||
mapfile -t workers < <(oc get nodes | awk '{if ($3 == "worker") { print $1 } }')
|
||||
# Get the boot ID to compared it changed over time.
|
||||
for node in ${workers[@]}; do
|
||||
BOOTIDS[$node]=$(oc get -o jsonpath='{.status.nodeInfo.bootID}'\
|
||||
node/$node)
|
||||
echo "Wait $node reboot"
|
||||
for node in "${workers[@]}"; do
|
||||
BOOTIDS[${node}]=$(oc get -o jsonpath='{.status.nodeInfo.bootID}'\
|
||||
"node/${node}")
|
||||
echo "Wait ${node} reboot"
|
||||
done
|
||||
|
||||
echo "Set timeout to $delta seconds"
|
||||
echo "Set timeout to ${delta} seconds"
|
||||
timer_start=$(date +%s)
|
||||
while [ ${#workers[@]} -gt 0 ]; do
|
||||
sleep $sleep_time
|
||||
while [[ ${#workers[@]} -gt 0 ]]; do
|
||||
sleep "${sleep_time}"
|
||||
now=$(date +%s)
|
||||
if [ $(($timer_start + $delta)) -lt $now ]; then
|
||||
if [[ $((timer_start + delta)) -lt ${now} ]]; then
|
||||
echo "Timeout: not all workers rebooted"
|
||||
return 1
|
||||
fi
|
||||
echo "Checking after $(($now - $timer_start)) seconds"
|
||||
for i in ${!workers[@]}; do
|
||||
echo "Checking after $((now - timer_start)) seconds"
|
||||
for i in "${!workers[@]}"; do
|
||||
current_id=$(oc get \
|
||||
-o jsonpath='{.status.nodeInfo.bootID}' \
|
||||
node/${workers[i]})
|
||||
if [ "$current_id" != ${BOOTIDS[${workers[i]}]} ]; then
|
||||
"node/${workers[i]}")
|
||||
if [[ "${current_id}" != "${BOOTIDS[${workers[i]}]}" ]]; then
|
||||
echo "${workers[i]} rebooted"
|
||||
unset workers[i]
|
||||
unset "workers[i]"
|
||||
fi
|
||||
done
|
||||
done
|
||||
@ -102,32 +106,34 @@ wait_mcp_update() {
|
||||
# and none are degraded.
|
||||
local ready_count=0
|
||||
local degraded_count=0
|
||||
local machine_count=$(oc get mcp worker -o jsonpath='{.status.machineCount}')
|
||||
local machine_count
|
||||
machine_count=$(oc get mcp worker -o jsonpath='{.status.machineCount}')
|
||||
|
||||
if [[ -z "$machine_count" && "$machine_count" -lt 1 ]]; then
|
||||
if [[ -z "${machine_count}" && "${machine_count}" -lt 1 ]]; then
|
||||
warn "Unabled to obtain the machine count"
|
||||
return 1
|
||||
fi
|
||||
|
||||
echo "Set timeout to $delta seconds"
|
||||
local deadline=$(($(date +%s) + $delta))
|
||||
echo "Set timeout to ${delta} seconds"
|
||||
local deadline=$(($(date +%s) + delta))
|
||||
local now
|
||||
# The ready count might not have changed yet, so wait a little.
|
||||
while [[ "$ready_count" != "$machine_count" && \
|
||||
"$degraded_count" == 0 ]]; do
|
||||
while [[ "${ready_count}" != "${machine_count}" && \
|
||||
"${degraded_count}" == 0 ]]; do
|
||||
# Let's check it hit the timeout (or not).
|
||||
local now=$(date +%s)
|
||||
if [ $deadline -lt $now ]; then
|
||||
now=$(date +%s)
|
||||
if [[ ${deadline} -lt ${now} ]]; then
|
||||
echo "Timeout: not all workers updated" >&2
|
||||
return 1
|
||||
fi
|
||||
sleep $sleep_time
|
||||
sleep "${sleep_time}"
|
||||
ready_count=$(oc get mcp worker \
|
||||
-o jsonpath='{.status.readyMachineCount}')
|
||||
degraded_count=$(oc get mcp worker \
|
||||
-o jsonpath='{.status.degradedMachineCount}')
|
||||
echo "check machineconfigpool - ready_count: $ready_count degraded_count: $degraded_count"
|
||||
echo "check machineconfigpool - ready_count: ${ready_count} degraded_count: ${degraded_count}"
|
||||
done
|
||||
[ $degraded_count -eq 0 ]
|
||||
[[ ${degraded_count} -eq 0 ]]
|
||||
}
|
||||
|
||||
# Enable the RHCOS extension for the Sandboxed Containers.
|
||||
@ -135,10 +141,10 @@ wait_mcp_update() {
|
||||
enable_sandboxedcontainers_extension() {
|
||||
info "Enabling the RHCOS extension for Sandboxed Containers"
|
||||
local deployment_file="${deployments_dir}/machineconfig_sandboxedcontainers_extension.yaml"
|
||||
oc apply -f ${deployment_file}
|
||||
oc get -f ${deployment_file} || \
|
||||
oc apply -f "${deployment_file}"
|
||||
oc get -f "${deployment_file}" || \
|
||||
die "Sandboxed Containers extension machineconfig not found"
|
||||
wait_mcp_update || die "Failed to update the machineconfigpool"
|
||||
wait_mcp_update 3600 || die "Failed to update the machineconfigpool"
|
||||
}
|
||||
|
||||
# Print useful information for debugging.
|
||||
@ -148,8 +154,8 @@ enable_sandboxedcontainers_extension() {
|
||||
debug_pod() {
|
||||
local pod="$1"
|
||||
info "Debug pod: ${pod}"
|
||||
oc describe pods "$pod"
|
||||
oc logs "$pod"
|
||||
oc describe pods "${pod}"
|
||||
oc logs "${pod}"
|
||||
}
|
||||
|
||||
# Wait for all pods of the app label to contain expected message
|
||||
@ -166,31 +172,32 @@ wait_for_app_pods_message() {
|
||||
local message="$3"
|
||||
local timeout="$4"
|
||||
local namespace="$5"
|
||||
[ -z "$pod_count" ] && pod_count=1
|
||||
[ -z "$timeout" ] && timeout=60
|
||||
[ -n "$namespace" ] && namespace=" -n $namespace "
|
||||
[[ -z "${pod_count}" ]] && pod_count=1
|
||||
[[ -z "${timeout}" ]] && timeout=60
|
||||
[[ -n "${namespace}" ]] && namespace=" -n ${namespace} "
|
||||
local pod
|
||||
local pods
|
||||
local i
|
||||
SECONDS=0
|
||||
while :; do
|
||||
pods=($(oc get pods -l app="$app" --no-headers=true $namespace | awk '{print $1}'))
|
||||
[ "${#pods}" -ge "$pod_count" ] && break
|
||||
if [ "$SECONDS" -gt "$timeout" ]; then
|
||||
echo "Unable to find ${pod_count} pods for '-l app=\"$app\"' in ${SECONDS}s (${pods[@]})"
|
||||
return -1
|
||||
mapfile -t pods < <(oc get pods -l app="${app}" --no-headers=true "${namespace}" | awk '{print $1}')
|
||||
[[ "${#pods}" -ge "${pod_count}" ]] && break
|
||||
if [[ "${SECONDS}" -gt "${timeout}" ]]; then
|
||||
printf "Unable to find ${pod_count} pods for '-l app=\"${app}\"' in ${SECONDS}s (%s)" "${pods[@]}"
|
||||
return 1
|
||||
fi
|
||||
done
|
||||
local log
|
||||
for pod in "${pods[@]}"; do
|
||||
while :; do
|
||||
local log=$(oc logs $namespace "$pod")
|
||||
echo "$log" | grep "$message" -q && echo "Found $(echo "$log" | grep "$message") in $pod's log ($SECONDS)" && break;
|
||||
if [ "$SECONDS" -gt "$timeout" ]; then
|
||||
echo -n "Message '$message' not present in '${pod}' pod of the '-l app=\"$app\"' "
|
||||
echo "pods after ${SECONDS}s (${pods[@]})"
|
||||
echo "Pod $pod's output so far:"
|
||||
echo "$log"
|
||||
return -1
|
||||
log=$(oc logs "${namespace}" "${pod}")
|
||||
echo "${log}" | grep "${message}" -q && echo "Found $(echo "${log}" | grep "${message}") in ${pod}'s log (${SECONDS})" && break;
|
||||
if [[ "${SECONDS}" -gt "${timeout}" ]]; then
|
||||
echo -n "Message '${message}' not present in '${pod}' pod of the '-l app=\"${app}\"' "
|
||||
printf "pods after ${SECONDS}s :(%s)\n" "${pods[@]}"
|
||||
echo "Pod ${pod}'s output so far:"
|
||||
echo "${log}"
|
||||
return 1
|
||||
fi
|
||||
sleep 1;
|
||||
done
|
||||
@ -200,46 +207,45 @@ wait_for_app_pods_message() {
|
||||
oc config set-context --current --namespace=default
|
||||
|
||||
worker_nodes=$(oc get nodes | awk '{if ($3 == "worker") { print $1 } }')
|
||||
num_nodes=$(echo $worker_nodes | wc -w)
|
||||
[ $num_nodes -ne 0 ] || \
|
||||
num_nodes=$(echo "${worker_nodes}" | wc -w)
|
||||
[[ ${num_nodes} -ne 0 ]] || \
|
||||
die "No worker nodes detected. Something is wrong with the cluster"
|
||||
|
||||
if [ "${KATA_WITH_SYSTEM_QEMU}" == "yes" ]; then
|
||||
if [[ "${KATA_WITH_SYSTEM_QEMU}" == "yes" ]]; then
|
||||
# QEMU is deployed on the workers via RCHOS extension.
|
||||
enable_sandboxedcontainers_extension
|
||||
oc apply -f ${deployments_dir}/configmap_installer_qemu.yaml
|
||||
oc apply -f "${deployments_dir}/configmap_installer_qemu.yaml"
|
||||
fi
|
||||
|
||||
if [ "${KATA_WITH_HOST_KERNEL}" == "yes" ]; then
|
||||
oc apply -f ${deployments_dir}/configmap_installer_kernel.yaml
|
||||
if [[ "${KATA_WITH_HOST_KERNEL}" == "yes" ]]; then
|
||||
oc apply -f "${deployments_dir}/configmap_installer_kernel.yaml"
|
||||
fi
|
||||
|
||||
apply_kata_deploy
|
||||
|
||||
# Set SELinux to permissive mode
|
||||
if [ ${SELINUX_PERMISSIVE} == "yes" ]; then
|
||||
if [[ ${SELINUX_PERMISSIVE} == "yes" ]]; then
|
||||
info "Configuring SELinux"
|
||||
if [ -z "$SELINUX_CONF_BASE64" ]; then
|
||||
export SELINUX_CONF_BASE64=$(echo \
|
||||
$(cat $configs_dir/selinux.conf|base64) | \
|
||||
sed -e 's/\s//g')
|
||||
if [[ -z "${SELINUX_CONF_BASE64}" ]]; then
|
||||
SELINUX_CONF_BASE64=$(base64 -w0 < "${configs_dir}/selinux.conf")
|
||||
export SELINUX_CONF_BASE64
|
||||
fi
|
||||
envsubst < ${deployments_dir}/machineconfig_selinux.yaml.in | \
|
||||
envsubst < "${deployments_dir}"/machineconfig_selinux.yaml.in | \
|
||||
oc apply -f -
|
||||
oc get machineconfig/51-kata-selinux || \
|
||||
die "SELinux machineconfig not found"
|
||||
# The new SELinux configuration will trigger another reboot.
|
||||
wait_for_reboot
|
||||
wait_for_reboot 900
|
||||
fi
|
||||
|
||||
if [[ "$WORKAROUND_9206_CRIO" == "yes" ]]; then
|
||||
if [[ "${WORKAROUND_9206_CRIO}" == "yes" ]]; then
|
||||
info "Applying workaround to enable skip_mount_home in crio on OCP 4.13"
|
||||
oc apply -f "${deployments_dir}/workaround-9206-crio.yaml"
|
||||
oc apply -f "${deployments_dir}/workaround-9206-crio-ds.yaml"
|
||||
wait_for_app_pods_message workaround-9206-crio-ds "$num_nodes" "Config file present" 1200 || echo "Failed to apply the workaround, proceeding anyway..."
|
||||
wait_for_app_pods_message workaround-9206-crio-ds "${num_nodes}" "Config file present" 1200 || echo "Failed to apply the workaround, proceeding anyway..."
|
||||
fi
|
||||
|
||||
# FIXME: Remove when https://github.com/kata-containers/kata-containers/pull/8417 is resolved
|
||||
# Selinux context is currently not handled by kata-deploy
|
||||
oc apply -f ${deployments_dir}/relabel_selinux.yaml
|
||||
wait_for_app_pods_message restorecon "$num_nodes" "NSENTER_FINISHED_WITH:" 120 "kube-system" || echo "Failed to treat selinux, proceeding anyway..."
|
||||
oc apply -f "${deployments_dir}/relabel_selinux.yaml"
|
||||
wait_for_app_pods_message restorecon "${num_nodes}" "NSENTER_FINISHED_WITH:" 120 "kube-system" || echo "Failed to treat selinux, proceeding anyway..."
|
||||
|
@ -10,11 +10,12 @@ if command -v go > /dev/null; then
|
||||
export GOPATH=${GOPATH:-$(go env GOPATH)}
|
||||
else
|
||||
# if go isn't installed, set default location for GOPATH
|
||||
export GOPATH="${GOPATH:-$HOME/go}"
|
||||
export GOPATH="${GOPATH:-${HOME}/go}"
|
||||
fi
|
||||
|
||||
lib_dir=$(dirname "${BASH_SOURCE[0]}")
|
||||
source "$lib_dir/../../tests/common.bash"
|
||||
# shellcheck disable=SC1091 # import based on variable
|
||||
source "${lib_dir}/../../tests/common.bash"
|
||||
|
||||
export katacontainers_repo=${katacontainers_repo:="github.com/kata-containers/kata-containers"}
|
||||
export katacontainers_repo_dir="${GOPATH}/src/${katacontainers_repo}"
|
||||
|
217
ci/openshift-ci/peer-pods-azure.sh
Normal file
217
ci/openshift-ci/peer-pods-azure.sh
Normal file
@ -0,0 +1,217 @@
|
||||
#!/bin/bash -e
|
||||
# Setup peer-pods using cloud-api-adaptor on azure
|
||||
#
|
||||
# WARNING: When running outside "eastus" region this script creates a new
|
||||
# resource group in "eastus" region and peers the network. You
|
||||
# have to remove these manually (or use temporary accounts)
|
||||
|
||||
###############################
|
||||
# Disable security to allow e2e
|
||||
###############################
|
||||
|
||||
# Disable security
|
||||
oc adm policy add-scc-to-group privileged system:authenticated system:serviceaccounts
|
||||
oc adm policy add-scc-to-group anyuid system:authenticated system:serviceaccounts
|
||||
oc label --overwrite ns default pod-security.kubernetes.io/enforce=privileged pod-security.kubernetes.io/warn=baseline pod-security.kubernetes.io/audit=baseline
|
||||
|
||||
|
||||
####################################
|
||||
# Get basic credentials from cluster
|
||||
####################################
|
||||
|
||||
oc -n kube-system get secret azure-credentials -o json > azure_credentials.json
|
||||
AZURE_CLIENT_ID="$(jq -r .data.azure_client_id azure_credentials.json|base64 -d)"
|
||||
AZURE_CLIENT_SECRET="$(jq -r .data.azure_client_secret azure_credentials.json|base64 -d)"
|
||||
AZURE_TENANT_ID="$(jq -r .data.azure_tenant_id azure_credentials.json|base64 -d)"
|
||||
AZURE_SUBSCRIPTION_ID="$(jq -r .data.azure_subscription_id azure_credentials.json|base64 -d)"
|
||||
rm -f azure_credentials.json
|
||||
AZURE_RESOURCE_GROUP=$(oc get infrastructure/cluster -o jsonpath='{.status.platformStatus.azure.resourceGroupName}')
|
||||
az login --service-principal -u "${AZURE_CLIENT_ID}" -p "${AZURE_CLIENT_SECRET}" --tenant "${AZURE_TENANT_ID}"
|
||||
|
||||
AZURE_VNET_NAME=$(az network vnet list --resource-group "${AZURE_RESOURCE_GROUP}" --query "[].{Name:name}" --output tsv)
|
||||
AZURE_SUBNET_NAME=$(az network vnet subnet list --resource-group "${AZURE_RESOURCE_GROUP}" --vnet-name "${AZURE_VNET_NAME}" --query "[].{Id:name} | [? contains(Id, 'worker')]" --output tsv)
|
||||
AZURE_SUBNET_ID=$(az network vnet subnet list --resource-group "${AZURE_RESOURCE_GROUP}" --vnet-name "${AZURE_VNET_NAME}" --query "[].{Id:id} | [? contains(Id, 'worker')]" --output tsv)
|
||||
AZURE_REGION=$(az group show --resource-group "${AZURE_RESOURCE_GROUP}" --query "{Location:location}" --output tsv)
|
||||
|
||||
# Create workload identity
|
||||
AZURE_WORKLOAD_IDENTITY_NAME="caa-${AZURE_CLIENT_ID}"
|
||||
az identity create --name "${AZURE_WORKLOAD_IDENTITY_NAME}" --resource-group "${AZURE_RESOURCE_GROUP}" --location "${AZURE_REGION}"
|
||||
USER_ASSIGNED_CLIENT_ID="$(az identity show --resource-group "${AZURE_RESOURCE_GROUP}" --name "${AZURE_WORKLOAD_IDENTITY_NAME}" --query 'clientId' -otsv)"
|
||||
|
||||
|
||||
#############################
|
||||
# Ensure we can run in eastus
|
||||
#############################
|
||||
|
||||
PP_REGION=eastus
|
||||
if [[ "${AZURE_REGION}" == "${PP_REGION}" ]]; then
|
||||
echo "Using the current region ${AZURE_REGION}"
|
||||
PP_RESOURCE_GROUP="${AZURE_RESOURCE_GROUP}"
|
||||
PP_VNET_NAME="${AZURE_VNET_NAME}"
|
||||
PP_SUBNET_NAME="${AZURE_SUBNET_NAME}"
|
||||
PP_SUBNET_ID="${AZURE_SUBNET_ID}"
|
||||
else
|
||||
echo "Creating peering between ${AZURE_REGION} and ${PP_REGION}"
|
||||
PP_RESOURCE_GROUP="${AZURE_RESOURCE_GROUP}-eastus"
|
||||
PP_VNET_NAME="${AZURE_VNET_NAME}-eastus"
|
||||
PP_SUBNET_NAME="${AZURE_SUBNET_NAME}-eastus"
|
||||
PP_NSG_NAME="${AZURE_VNET_NAME}-nsg-eastus"
|
||||
az group create --name "${PP_RESOURCE_GROUP}" --location "${PP_REGION}"
|
||||
az network vnet create --resource-group "${PP_RESOURCE_GROUP}" --name "${PP_VNET_NAME}" --location "${PP_REGION}" --address-prefixes 10.2.0.0/16 --subnet-name "${PP_SUBNET_NAME}" --subnet-prefixes 10.2.1.0/24
|
||||
az network nsg create --resource-group "${PP_RESOURCE_GROUP}" --name "${PP_NSG_NAME}" --location "${PP_REGION}"
|
||||
az network vnet subnet update --resource-group "${PP_RESOURCE_GROUP}" --vnet-name "${PP_VNET_NAME}" --name "${PP_SUBNET_NAME}" --network-security-group "${PP_NSG_NAME}"
|
||||
AZURE_VNET_ID=$(az network vnet show --resource-group "${AZURE_RESOURCE_GROUP}" --name "${AZURE_VNET_NAME}" --query id --output tsv)
|
||||
PP_VNET_ID=$(az network vnet show --resource-group "${PP_RESOURCE_GROUP}" --name "${PP_VNET_NAME}" --query id --output tsv)
|
||||
az network vnet peering create --name westus-to-eastus --resource-group "${AZURE_RESOURCE_GROUP}" --vnet-name "${AZURE_VNET_NAME}" --remote-vnet "${PP_VNET_ID}" --allow-vnet-access
|
||||
az network vnet peering create --name eastus-to-westus --resource-group "${PP_RESOURCE_GROUP}" --vnet-name "${PP_VNET_NAME}" --remote-vnet "${AZURE_VNET_ID}" --allow-vnet-access
|
||||
PP_SUBNET_ID=$(az network vnet subnet list --resource-group "${PP_RESOURCE_GROUP}" --vnet-name "${PP_VNET_NAME}" --query "[].{Id:id} | [? contains(Id, 'worker')]" --output tsv)
|
||||
fi
|
||||
|
||||
# Peer-pod requires gateway
|
||||
az network public-ip create \
|
||||
--resource-group "${PP_RESOURCE_GROUP}" \
|
||||
--name MyPublicIP \
|
||||
--sku Standard \
|
||||
--allocation-method Static
|
||||
az network nat gateway create \
|
||||
--resource-group "${PP_RESOURCE_GROUP}" \
|
||||
--name MyNatGateway \
|
||||
--public-ip-addresses MyPublicIP \
|
||||
--idle-timeout 10
|
||||
az network vnet subnet update \
|
||||
--resource-group "${PP_RESOURCE_GROUP}" \
|
||||
--vnet-name "${PP_VNET_NAME}" \
|
||||
--name "${PP_SUBNET_NAME}" \
|
||||
--nat-gateway MyNatGateway
|
||||
|
||||
|
||||
##########################################
|
||||
# Setup CAA
|
||||
#########################################
|
||||
|
||||
# Label the nodes
|
||||
for NODE_NAME in $(kubectl get nodes -o jsonpath='{.items[*].metadata.name}'); do [[ "${NODE_NAME}" =~ 'worker' ]] && kubectl label node "${NODE_NAME}" node.kubernetes.io/worker=; done
|
||||
|
||||
# CAA artifacts
|
||||
CAA_IMAGE="quay.io/confidential-containers/cloud-api-adaptor"
|
||||
TAGS="$(curl https://quay.io/api/v1/repository/confidential-containers/cloud-api-adaptor/tag/?onlyActiveTags=true)"
|
||||
DIGEST=$(echo "${TAGS}" | jq -r '.tags[] | select(.name | contains("latest-amd64")) | .manifest_digest')
|
||||
CAA_TAG="$(echo "${TAGS}" | jq -r '.tags[] | select(.manifest_digest | contains("'"${DIGEST}"'")) | .name' | grep -v "latest")"
|
||||
|
||||
# Get latest PP image
|
||||
SUCCESS_TIME=$(curl -s \
|
||||
-H "Accept: application/vnd.github+json" \
|
||||
"https://api.github.com/repos/confidential-containers/cloud-api-adaptor/actions/workflows/azure-nightly-build.yml/runs?status=success" \
|
||||
| jq -r '.workflow_runs[0].updated_at')
|
||||
PP_IMAGE_ID="/CommunityGalleries/cocopodvm-d0e4f35f-5530-4b9c-8596-112487cdea85/Images/podvm_image0/Versions/$(date -u -jf "%Y-%m-%dT%H:%M:%SZ" "${SUCCESS_TIME}" "+%Y.%m.%d" 2>/dev/null || date -d "${SUCCESS_TIME}" +%Y.%m.%d)"
|
||||
|
||||
echo "AZURE_REGION: \"${AZURE_REGION}\""
|
||||
echo "PP_REGION: \"${PP_REGION}\""
|
||||
echo "AZURE_RESOURCE_GROUP: \"${AZURE_RESOURCE_GROUP}\""
|
||||
echo "PP_RESOURCE_GROUP: \"${PP_RESOURCE_GROUP}\""
|
||||
echo "PP_SUBNET_ID: \"${PP_SUBNET_ID}\""
|
||||
echo "CAA_TAG: \"${CAA_TAG}\""
|
||||
echo "PP_IMAGE_ID: \"${PP_IMAGE_ID}\""
|
||||
|
||||
# Clone and configure caa
|
||||
git clone --depth 1 --no-checkout https://github.com/confidential-containers/cloud-api-adaptor.git
|
||||
pushd cloud-api-adaptor
|
||||
git sparse-checkout init --cone
|
||||
git sparse-checkout set src/cloud-api-adaptor/install/
|
||||
git checkout
|
||||
echo "CAA_GIT_SHA: \"$(git rev-parse HEAD)\""
|
||||
pushd src/cloud-api-adaptor
|
||||
cat <<EOF > install/overlays/azure/workload-identity.yaml
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: cloud-api-adaptor-daemonset
|
||||
namespace: confidential-containers-system
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
azure.workload.identity/use: "true"
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: cloud-api-adaptor
|
||||
namespace: confidential-containers-system
|
||||
annotations:
|
||||
azure.workload.identity/client-id: "${USER_ASSIGNED_CLIENT_ID}"
|
||||
EOF
|
||||
PP_INSTANCE_SIZE="Standard_D2as_v5"
|
||||
DISABLECVM="true"
|
||||
cat <<EOF > install/overlays/azure/kustomization.yaml
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
bases:
|
||||
- ../../yamls
|
||||
images:
|
||||
- name: cloud-api-adaptor
|
||||
newName: "${CAA_IMAGE}"
|
||||
newTag: "${CAA_TAG}"
|
||||
generatorOptions:
|
||||
disableNameSuffixHash: true
|
||||
configMapGenerator:
|
||||
- name: peer-pods-cm
|
||||
namespace: confidential-containers-system
|
||||
literals:
|
||||
- CLOUD_PROVIDER="azure"
|
||||
- AZURE_SUBSCRIPTION_ID="${AZURE_SUBSCRIPTION_ID}"
|
||||
- AZURE_REGION="${PP_REGION}"
|
||||
- AZURE_INSTANCE_SIZE="${PP_INSTANCE_SIZE}"
|
||||
- AZURE_RESOURCE_GROUP="${PP_RESOURCE_GROUP}"
|
||||
- AZURE_SUBNET_ID="${PP_SUBNET_ID}"
|
||||
- AZURE_IMAGE_ID="${PP_IMAGE_ID}"
|
||||
- DISABLECVM="${DISABLECVM}"
|
||||
- PEERPODS_LIMIT_PER_NODE="50"
|
||||
secretGenerator:
|
||||
- name: peer-pods-secret
|
||||
namespace: confidential-containers-system
|
||||
envs:
|
||||
- service-principal.env
|
||||
- name: ssh-key-secret
|
||||
namespace: confidential-containers-system
|
||||
files:
|
||||
- id_rsa.pub
|
||||
patchesStrategicMerge:
|
||||
- workload-identity.yaml
|
||||
EOF
|
||||
ssh-keygen -t rsa -f install/overlays/azure/id_rsa -N ''
|
||||
echo "AZURE_CLIENT_ID=${AZURE_CLIENT_ID}" > install/overlays/azure/service-principal.env
|
||||
echo "AZURE_CLIENT_SECRET=${AZURE_CLIENT_SECRET}" >> install/overlays/azure/service-principal.env
|
||||
echo "AZURE_TENANT_ID=${AZURE_TENANT_ID}" >> install/overlays/azure/service-principal.env
|
||||
|
||||
# Deploy Operator
|
||||
git clone --depth 1 --no-checkout https://github.com/confidential-containers/operator
|
||||
pushd operator
|
||||
git sparse-checkout init --cone
|
||||
git sparse-checkout set "config/"
|
||||
git checkout
|
||||
echo "OPERATOR_SHA: \"$(git rev-parse HEAD)\""
|
||||
oc apply -k "config/release"
|
||||
oc apply -k "config/samples/ccruntime/peer-pods"
|
||||
popd
|
||||
|
||||
# Deploy CAA
|
||||
kubectl apply -k "install/overlays/azure"
|
||||
popd
|
||||
popd
|
||||
|
||||
# Wait for runtimeclass
|
||||
SECONDS=0
|
||||
( while [[ "${SECONDS}" -lt 360 ]]; do
|
||||
kubectl get runtimeclass | grep -q kata-remote && exit 0
|
||||
done; exit 1 ) || { echo "kata-remote runtimeclass not initialized in 60s"; kubectl -n confidential-containers-system get all; echo; echo CAA; kubectl -n confidential-containers-system logs daemonset.apps/cloud-api-adaptor-daemonset; echo pre-install; kubectl -n confidential-containers-system logs daemonset.apps/cc-operator-pre-install-daemon; echo install; kubectl -n confidential-containers-system logs daemonset.apps/cc-operator-daemon-install; exit 1; }
|
||||
|
||||
|
||||
################
|
||||
# Deploy webhook
|
||||
################
|
||||
pushd ci/openshift-ci/cluster/
|
||||
kubectl create ns default || true
|
||||
kubectl config set-context --current --namespace=default
|
||||
KATA_RUNTIME=kata-remote ./deploy_webhook.sh
|
||||
popd
|
@ -7,15 +7,16 @@
|
||||
# Run a smoke test.
|
||||
#
|
||||
|
||||
script_dir=$(dirname $0)
|
||||
source ${script_dir}/lib.sh
|
||||
script_dir=$(dirname "$0")
|
||||
# shellcheck disable=SC1091 # import based on variable
|
||||
source "${script_dir}/lib.sh"
|
||||
|
||||
pod='http-server'
|
||||
|
||||
# Create a pod.
|
||||
#
|
||||
info "Creating the ${pod} pod"
|
||||
[ -z "$KATA_RUNTIME" ] && die "Please set the KATA_RUNTIME first"
|
||||
[[ -z "${KATA_RUNTIME}" ]] && die "Please set the KATA_RUNTIME first"
|
||||
envsubst < "${script_dir}/smoke/${pod}.yaml.in" | \
|
||||
oc apply -f - || \
|
||||
die "failed to create ${pod} pod"
|
||||
@ -27,10 +28,10 @@ sleep_time=5
|
||||
cmd="oc get pod/${pod} -o jsonpath='{.status.containerStatuses[0].state}' | \
|
||||
grep running > /dev/null"
|
||||
info "Wait until the pod gets running"
|
||||
waitForProcess $wait_time $sleep_time "$cmd" || timed_out=$?
|
||||
if [ -n "$timed_out" ]; then
|
||||
oc describe pod/${pod}
|
||||
oc delete pod/${pod}
|
||||
waitForProcess "${wait_time}" "${sleep_time}" "${cmd}" || timed_out=$?
|
||||
if [[ -n "${timed_out}" ]]; then
|
||||
oc describe "pod/${pod}"
|
||||
oc delete "pod/${pod}"
|
||||
die "${pod} not running"
|
||||
fi
|
||||
info "${pod} is running"
|
||||
@ -39,13 +40,13 @@ info "${pod} is running"
|
||||
#
|
||||
hello_file=/tmp/hello
|
||||
hello_msg='Hello World'
|
||||
oc exec ${pod} -- sh -c "echo $hello_msg > $hello_file"
|
||||
oc exec "${pod}" -- sh -c "echo ${hello_msg} > ${hello_file}"
|
||||
|
||||
info "Creating the service and route"
|
||||
if oc apply -f ${script_dir}/smoke/service.yaml; then
|
||||
if oc apply -f "${script_dir}/smoke/service.yaml"; then
|
||||
# Likely on OCP, use service
|
||||
is_ocp=1
|
||||
host=$(oc get route/http-server-route -o jsonpath={.spec.host})
|
||||
host=$(oc get route/http-server-route -o jsonpath="{.spec.host}")
|
||||
port=80
|
||||
else
|
||||
# Likely on plain kubernetes, test using another container
|
||||
@ -54,13 +55,13 @@ else
|
||||
oc apply -f "${script_dir}/smoke/service_kubernetes.yaml"
|
||||
# For some reason kcli's cluster lists external IP as internal IP, try both
|
||||
host=$(oc get nodes -o jsonpath='{.items[0].status.addresses[?(@.type=="ExternalIP")].address}')
|
||||
[ -z "$host"] && host=$(oc get nodes -o jsonpath='{.items[0].status.addresses[?(@.type=="InternalIP")].address}')
|
||||
[[ -z "${host}" ]] && host=$(oc get nodes -o jsonpath='{.items[0].status.addresses[?(@.type=="InternalIP")].address}')
|
||||
port=$(oc get service/http-server-service -o jsonpath='{.spec.ports[0].nodePort}')
|
||||
fi
|
||||
|
||||
info "Wait for the HTTP server to respond"
|
||||
tempfile=$(mktemp)
|
||||
check_cmd="curl -vvv '${host}:${port}${hello_file}' 2>&1 | tee -a '$tempfile' | grep -q '$hello_msg'"
|
||||
check_cmd="curl -vvv '${host}:${port}${hello_file}' 2>&1 | tee -a '${tempfile}' | grep -q '${hello_msg}'"
|
||||
if waitForProcess 60 1 "${check_cmd}"; then
|
||||
test_status=0
|
||||
info "HTTP server is working"
|
||||
@ -78,17 +79,17 @@ else
|
||||
echo "::endgroup::"
|
||||
info "HTTP server is unreachable"
|
||||
fi
|
||||
rm -f "$tempfile"
|
||||
rm -f "${tempfile}"
|
||||
|
||||
# Delete the resources.
|
||||
#
|
||||
info "Deleting the service/route"
|
||||
if [ "$is_ocp" -eq 0 ]; then
|
||||
oc delete -f ${script_dir}/smoke/service_kubernetes.yaml
|
||||
if [[ "${is_ocp}" -eq 0 ]]; then
|
||||
oc delete -f "${script_dir}/smoke/service_kubernetes.yaml"
|
||||
else
|
||||
oc delete -f ${script_dir}/smoke/service.yaml
|
||||
oc delete -f "${script_dir}/smoke/service.yaml"
|
||||
fi
|
||||
info "Deleting the ${pod} pod"
|
||||
oc delete pod/${pod} || test_status=$?
|
||||
oc delete "pod/${pod}" || test_status=$?
|
||||
|
||||
exit $test_status
|
||||
exit "${test_status}"
|
||||
|
@ -7,7 +7,7 @@
|
||||
# afterwards OCP cluster using kata-containers primarily created for use
|
||||
# with https://github.com/ldoktor/bisecter
|
||||
|
||||
[ "$#" -ne 1 ] && echo "Provide image as the first and only argument" && exit 255
|
||||
[[ "$#" -ne 1 ]] && echo "Provide image as the first and only argument" && exit 255
|
||||
export KATA_DEPLOY_IMAGE="$1"
|
||||
OCP_DIR="${OCP_DIR:-/path/to/your/openshift/release/}"
|
||||
E2E_TEST="${E2E_TEST:-'"[sig-node] Container Runtime blackbox test on terminated container should report termination message as empty when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]"'}"
|
||||
@ -17,12 +17,12 @@ export KATA_RUNTIME="${KATA_RUNTIME:-kata-qemu}"
|
||||
## SETUP
|
||||
# Deploy kata
|
||||
SETUP=0
|
||||
pushd "$KATA_CI_DIR" || { echo "Failed to cd to '$KATA_CI_DIR'"; exit 255; }
|
||||
pushd "${KATA_CI_DIR}" || { echo "Failed to cd to '${KATA_CI_DIR}'"; exit 255; }
|
||||
./test.sh || SETUP=125
|
||||
cluster/deploy_webhook.sh || SETUP=125
|
||||
if [ $SETUP != 0 ]; then
|
||||
if [[ ${SETUP} != 0 ]]; then
|
||||
./cleanup.sh
|
||||
exit "$SETUP"
|
||||
exit "${SETUP}"
|
||||
fi
|
||||
popd || true
|
||||
# Disable security
|
||||
@ -32,19 +32,19 @@ oc label --overwrite ns default pod-security.kubernetes.io/enforce=privileged po
|
||||
|
||||
## TEST EXECUTION
|
||||
# Run the testing
|
||||
pushd "$OCP_DIR" || { echo "Failed to cd to '$OCP_DIR'"; exit 255; }
|
||||
echo "$E2E_TEST" > /tmp/tsts
|
||||
pushd "${OCP_DIR}" || { echo "Failed to cd to '${OCP_DIR}'"; exit 255; }
|
||||
echo "${E2E_TEST}" > /tmp/tsts
|
||||
# Remove previously-existing temporarily files as well as previous results
|
||||
OUT=RESULTS/tmp
|
||||
rm -Rf /tmp/*test* /tmp/e2e-*
|
||||
rm -R $OUT
|
||||
mkdir -p $OUT
|
||||
rm -R "${OUT}"
|
||||
mkdir -p "${OUT}"
|
||||
# Run the tests ignoring the monitor health checks
|
||||
./openshift-tests run --provider azure -o "$OUT/job.log" --junit-dir "$OUT" --file /tmp/tsts --max-parallel-tests 5 --cluster-stability Disruptive
|
||||
./openshift-tests run --provider azure -o "${OUT}/job.log" --junit-dir "${OUT}" --file /tmp/tsts --max-parallel-tests 5 --cluster-stability Disruptive
|
||||
RET=$?
|
||||
popd || true
|
||||
|
||||
## CLEANUP
|
||||
./cleanup.sh
|
||||
exit "$RET"
|
||||
exit "${RET}"
|
||||
|
||||
|
@ -8,25 +8,29 @@
|
||||
# The kata shim to be used
|
||||
export KATA_RUNTIME=${KATA_RUNTIME:-kata-qemu}
|
||||
|
||||
script_dir=$(dirname $0)
|
||||
source ${script_dir}/lib.sh
|
||||
script_dir=$(dirname "$0")
|
||||
# shellcheck disable=SC1091 # import based on variable
|
||||
source "${script_dir}/lib.sh"
|
||||
|
||||
suite=$1
|
||||
if [ -z "$1" ]; then
|
||||
if [[ -z "$1" ]]; then
|
||||
suite='smoke'
|
||||
fi
|
||||
|
||||
# Make oc and kubectl visible
|
||||
export PATH=/tmp/shared:$PATH
|
||||
export PATH=/tmp/shared:${PATH}
|
||||
|
||||
oc version || die "Test cluster is unreachable"
|
||||
|
||||
info "Install and configure kata into the test cluster"
|
||||
export SELINUX_PERMISSIVE="no"
|
||||
${script_dir}/cluster/install_kata.sh || die "Failed to install kata-containers"
|
||||
"${script_dir}/cluster/install_kata.sh" || die "Failed to install kata-containers"
|
||||
|
||||
info "Run test suite: $suite"
|
||||
info "Overriding KATA_RUNTIME cpu resources"
|
||||
oc patch "runtimeclass/${KATA_RUNTIME}" -p '{"overhead": {"podFixed": {"cpu": "50m"}}}'
|
||||
|
||||
info "Run test suite: ${suite}"
|
||||
test_status='PASS'
|
||||
${script_dir}/run_${suite}_test.sh || test_status='FAIL'
|
||||
info "Test suite: $suite: $test_status"
|
||||
[ "$test_status" == "PASS" ]
|
||||
"${script_dir}/run_${suite}_test.sh" || test_status='FAIL'
|
||||
info "Test suite: ${suite}: ${test_status}"
|
||||
[[ "${test_status}" == "PASS" ]]
|
||||
|
@ -47,3 +47,4 @@
|
||||
- [How to run Kata Containers with kinds of Block Volumes](how-to-run-kata-containers-with-kinds-of-Block-Volumes.md)
|
||||
- [How to use the Kata Agent Policy](how-to-use-the-kata-agent-policy.md)
|
||||
- [How to pull images in the guest](how-to-pull-images-in-guest-with-kata.md)
|
||||
- [How to use mem-agent to decrease the memory usage of Kata container](how-to-use-memory-agent.md)
|
||||
|
@ -2,6 +2,8 @@
|
||||
|
||||
## Introduction
|
||||
|
||||
This document describes how to setup swap device with runtime-golang. See [How to use mem-agent to decrease the memory usage of Kata container](how-to-use-memory-agent.md#setup-guest-swap) to get how to setup and use guest swap with runtime-rs.
|
||||
|
||||
Setup swap device in guest kernel can help to increase memory capacity, handle some memory issues and increase file access speed sometimes.
|
||||
Kata Containers can insert a raw file to the guest as the swap device.
|
||||
|
||||
|
351
docs/how-to/how-to-use-memory-agent.md
Normal file
351
docs/how-to/how-to-use-memory-agent.md
Normal file
@ -0,0 +1,351 @@
|
||||
# How to use mem-agent to decrease the memory usage of Kata container
|
||||
## Introduction
|
||||
mem-agent is a component designed for managing memory in Linux environments.<br>
|
||||
The mem-agent has been integrated into the kata-agent to reduce memory usage in Kata containers.
|
||||
|
||||
## Open mem-agent in configuration
|
||||
```bash
|
||||
$ config_file="/opt/kata/share/defaults/kata-containers/configuration.toml"
|
||||
$ sudo sed -i -e 's/^#mem_agent_enable.*$/mem_agent_enable = true/g' $config_file
|
||||
```
|
||||
|
||||
## Open reclaim_guest_freed_memory in configuration
|
||||
Enabling this will result in the VM balloon device having f_reporting=on set.<br>
|
||||
Then the hypervisor will use it to reclaim guest freed memory.
|
||||
|
||||
When mem-agent reclaim the memory of the guest, this function will reclaim guest freed memory in the host.
|
||||
|
||||
**To use mem-agent, must open reclaim_guest_freed_memory in configuration.**
|
||||
|
||||
```bash
|
||||
$ config_file="/opt/kata/share/defaults/kata-containers/configuration.toml"
|
||||
$ sudo sed -i -e 's/^#reclaim_guest_freed_memory.*$/reclaim_guest_freed_memory = true/g' $config_file
|
||||
```
|
||||
|
||||
## Sub-feature psi
|
||||
During memory reclamation and compaction, mem-agent monitors system pressure using Pressure Stall Information (PSI).<br>
|
||||
If the system pressure becomes too high, memory reclamation or compaction will automatically stop.
|
||||
|
||||
This feature helps the mem-agent reduce its overhead on system performance.
|
||||
|
||||
## Sub-feature memcg
|
||||
Use the Linux kernel MgLRU feature to monitor each cgroup's memory usage and periodically reclaim cold memory.
|
||||
|
||||
During each run period, memcg calls the run_aging function of MgLRU for each cgroup to mark the hot and cold states of the pages within it.<br>
|
||||
Then, it calls the run_eviction function of MgLRU for each cgroup to reclaim a portion of the cold pages that have not been accessed for three periods.
|
||||
|
||||
After the run period, the memcg will enter a sleep period. Once the sleep period is over, it will transition into the next run period, and this cycle will continue.
|
||||
|
||||
**The following are the configurations of the sub-feature memcg:**
|
||||
|
||||
### memcg_disable
|
||||
Control the mem-agent memcg function disable or enable.<br>
|
||||
Default to false.
|
||||
```bash
|
||||
$ config_file="/opt/kata/share/defaults/kata-containers/runtime-rs/configuration.toml"
|
||||
$ sudo sed -i -e 's/^#memcg_disable.*$/memcg_disable = true/g' $config_file
|
||||
```
|
||||
|
||||
For a running Kata container, this configuration can be dynamically modified using the kata-agent-ctl command.
|
||||
```bash
|
||||
$ PODID="12345"
|
||||
$ kata-agent-ctl connect --server-address "unix:///var/run/kata/$PODID/root/kata.hvsock" --hybrid-vsock \
|
||||
--cmd 'MemAgentMemcgSet json://{"disabled":true}'
|
||||
```
|
||||
|
||||
### memcg_swap
|
||||
If this feature is disabled, the mem-agent will only track and reclaim file cache pages. If this feature is enabled, the mem-agent will handle both file cache pages and anonymous pages.<br>
|
||||
Default to false.
|
||||
|
||||
```bash
|
||||
$ config_file="/opt/kata/share/defaults/kata-containers/runtime-rs/configuration.toml"
|
||||
$ sudo sed -i -e 's/^#memcg_swap.*$/memcg_swap = true/g' $config_file
|
||||
```
|
||||
|
||||
For a running Kata container, this configuration can be dynamically modified using the kata-agent-ctl command.
|
||||
```bash
|
||||
$ PODID="12345"
|
||||
$ kata-agent-ctl connect --server-address "unix:///var/run/kata/$PODID/root/kata.hvsock" --hybrid-vsock \
|
||||
--cmd 'MemAgentMemcgSet json://{"swap":true}'
|
||||
```
|
||||
|
||||
#### setup guest swap
|
||||
memcg_swap should use with guest swap function.<br>
|
||||
The guest swap function will create a separate swap task that will create and insert swap files into the guest as needed.<br>
|
||||
Just dragonball and cloud-hypervisor support guest swap.
|
||||
|
||||
Use following configuration to enable guest swap.
|
||||
```bash
|
||||
$ config_file="/opt/kata/share/defaults/kata-containers/runtime-rs/configuration.toml"
|
||||
$ sudo sed -i -e 's/^#enable_guest_swap.*$/enable_guest_swap = true/g' $config_file
|
||||
```
|
||||
|
||||
By default, swap files are created in the /run/kata-containers/swap directory. You can use the following configuration to create swap files in a different directory.
|
||||
```bash
|
||||
$ config_file="/opt/kata/share/defaults/kata-containers/runtime-rs/configuration.toml"
|
||||
$ sudo sed -i -e 's/^#guest_swap_path.*$/guest_swap_path = \"\/run\/kata-containers\/swap\"/g' $config_file
|
||||
```
|
||||
|
||||
By default, the inserted swap file will match the current memory size, which is set to 100%. You can modify the percentage of the swap size relative to the current memory size using the configuration below.
|
||||
```bash
|
||||
$ config_file="/opt/kata/share/defaults/kata-containers/runtime-rs/configuration.toml"
|
||||
$ sudo sed -i -e 's/^#guest_swap_size_percent.*$/guest_swap_size_percent = 100/g' $config_file
|
||||
```
|
||||
|
||||
The swap task will wait for 60 seconds before determining the memory size and creating swap files. This approach helps prevent interference with the startup performance of the kata container during its initial creation and avoids frequent insertion of swap files when the guest memory size is adjusted frequently. You can configure the waiting time using the option below.
|
||||
```bash
|
||||
$ config_file="/opt/kata/share/defaults/kata-containers/runtime-rs/configuration.toml"
|
||||
$ sudo sed -i -e 's/^#guest_swap_create_threshold_secs.*$/guest_swap_create_threshold_secs = 60/g' $config_file
|
||||
```
|
||||
|
||||
### memcg_swappiness_max
|
||||
The usage of this value is similar to the swappiness in the Linux kernel, applying a ratio of swappiness_max/200 when utilized.<br>
|
||||
At the beginning of the eviction memory process for a cgroup in each run period, the coldest anonymous pages are assigned a maximum eviction value based on swappiness_max/200.<br>
|
||||
When the run_eviction function of MgLRU is actually called, if the comparison ratio between the current coldest anonymous pages and file cache pages exceeds this value, then this value will be used as the swappiness.<br>
|
||||
Default to 50.
|
||||
|
||||
```bash
|
||||
$ config_file="/opt/kata/share/defaults/kata-containers/runtime-rs/configuration.toml"
|
||||
$ sudo sed -i -e 's/^#memcg_swappiness_max.*$/memcg_swappiness_max = 50/g' $config_file
|
||||
```
|
||||
|
||||
For a running Kata container, this configuration can be dynamically modified using the kata-agent-ctl command.
|
||||
```bash
|
||||
$ PODID="12345"
|
||||
$ kata-agent-ctl connect --server-address "unix:///var/run/kata/$PODID/root/kata.hvsock" --hybrid-vsock \
|
||||
--cmd 'MemAgentMemcgSet json://{"swappiness_max":50}'
|
||||
```
|
||||
|
||||
### memcg_period_secs
|
||||
Control the mem-agent memcg function wait period seconds.<br>
|
||||
Default to 600.
|
||||
|
||||
```bash
|
||||
$ config_file="/opt/kata/share/defaults/kata-containers/runtime-rs/configuration.toml"
|
||||
$ sudo sed -i -e 's/^#memcg_period_secs.*$/memcg_period_secs = 600/g' $config_file
|
||||
```
|
||||
|
||||
For a running Kata container, this configuration can be dynamically modified using the kata-agent-ctl command.
|
||||
```bash
|
||||
$ PODID="12345"
|
||||
$ kata-agent-ctl connect --server-address "unix:///var/run/kata/$PODID/root/kata.hvsock" --hybrid-vsock \
|
||||
--cmd 'MemAgentMemcgSet json://{"period_secs":600}'
|
||||
```
|
||||
|
||||
### memcg_period_psi_percent_limit
|
||||
Control the mem-agent memcg wait period PSI percent limit.<br>
|
||||
If the percentage of memory and IO PSI stall time within the memcg waiting period for a cgroup exceeds this value, then the memcg run period for this cgroup will not be executed after this waiting period.<br>
|
||||
Default to 1
|
||||
|
||||
```bash
|
||||
$ config_file="/opt/kata/share/defaults/kata-containers/runtime-rs/configuration.toml"
|
||||
$ sudo sed -i -e 's/^#memcg_period_psi_percent_limit.*$/memcg_period_psi_percent_limit = 1/g' $config_file
|
||||
```
|
||||
|
||||
For a running Kata container, this configuration can be dynamically modified using the kata-agent-ctl command.
|
||||
```bash
|
||||
$ PODID="12345"
|
||||
$ kata-agent-ctl connect --server-address "unix:///var/run/kata/$PODID/root/kata.hvsock" --hybrid-vsock \
|
||||
--cmd 'MemAgentMemcgSet json://{"period_psi_percent_limit":1}'
|
||||
```
|
||||
|
||||
### memcg_eviction_psi_percent_limit
|
||||
Control the mem-agent memcg eviction PSI percent limit.<br>
|
||||
If the percentage of memory and IO PSI stall time for a cgroup exceeds this value during an eviction cycle, the eviction for this cgroup will immediately stop and will not resume until the next memcg waiting period.<br>
|
||||
Default to 1.
|
||||
|
||||
```bash
|
||||
$ config_file="/opt/kata/share/defaults/kata-containers/runtime-rs/configuration.toml"
|
||||
$ sudo sed -i -e 's/^#memcg_eviction_psi_percent_limit.*$/memcg_eviction_psi_percent_limit = 1/g' $config_file
|
||||
```
|
||||
|
||||
For a running Kata container, this configuration can be dynamically modified using the kata-agent-ctl command.
|
||||
```bash
|
||||
$ PODID="12345"
|
||||
$ kata-agent-ctl connect --server-address "unix:///var/run/kata/$PODID/root/kata.hvsock" --hybrid-vsock \
|
||||
--cmd 'MemAgentMemcgSet json://{"eviction_psi_percent_limit":1}'
|
||||
```
|
||||
|
||||
### memcg_eviction_run_aging_count_min
|
||||
Control the mem-agent memcg eviction run aging count min.<br>
|
||||
A cgroup will only perform eviction when the number of aging cycles in memcg is greater than or equal to memcg_eviction_run_aging_count_min.<br>
|
||||
Default to 3.
|
||||
|
||||
```bash
|
||||
$ config_file="/opt/kata/share/defaults/kata-containers/runtime-rs/configuration.toml"
|
||||
$ sudo sed -i -e 's/^#memcg_eviction_run_aging_count_min.*$/memcg_eviction_run_aging_count_min = 3/g' $config_file
|
||||
```
|
||||
|
||||
For a running Kata container, this configuration can be dynamically modified using the kata-agent-ctl command.
|
||||
```bash
|
||||
$ PODID="12345"
|
||||
$ kata-agent-ctl connect --server-address "unix:///var/run/kata/$PODID/root/kata.hvsock" --hybrid-vsock \
|
||||
--cmd 'MemAgentMemcgSet json://{"eviction_run_aging_count_min":3}'
|
||||
```
|
||||
|
||||
## Sub-feature compact
|
||||
The memory control group (memcg) functionality may release a significant number of small pages, but the VM balloon free page reporting feature used by reclaim_guest_freed_memory requires at least a contiguous block of order 10 pages(a page block) to be released from the host.<br>
|
||||
The sub-feature compact is designed to address the issue of fragmented pages.<br>
|
||||
|
||||
During each run period, compact check the continuity of free pages within the system. If necessary, the compact will invoke the Linux compaction feature to reorganize fragmented pages.<br>
|
||||
After the run period, the compact will enter a sleep period. Once the sleep period is over, it will transition into the next run period, and this cycle will continue.
|
||||
|
||||
*the VM balloon free page reporting feature in arm64_64k report order 5 pages. Following is the comments from Linux kernel.*
|
||||
```
|
||||
/*
|
||||
* The default page reporting order is @pageblock_order, which
|
||||
* corresponds to 512MB in size on ARM64 when 64KB base page
|
||||
* size is used. The page reporting won't be triggered if the
|
||||
* freeing page can't come up with a free area like that huge.
|
||||
* So we specify the page reporting order to 5, corresponding
|
||||
* to 2MB. It helps to avoid THP splitting if 4KB base page
|
||||
* size is used by host.
|
||||
*
|
||||
* Ideally, the page reporting order is selected based on the
|
||||
* host's base page size. However, it needs more work to report
|
||||
* that value. The hard-coded order would be fine currently.
|
||||
*/
|
||||
```
|
||||
|
||||
**The following are the configurations of the sub-feature compact:**
|
||||
|
||||
### compact_disable
|
||||
Control the mem-agent compact function disable or enable.<br>
|
||||
Default to false.
|
||||
|
||||
```bash
|
||||
$ config_file="/opt/kata/share/defaults/kata-containers/runtime-rs/configuration.toml"
|
||||
$ sudo sed -i -e 's/^#compact_disable.*$/compact_disable = true/g' $config_file
|
||||
```
|
||||
|
||||
For a running Kata container, this configuration can be dynamically modified using the kata-agent-ctl command.
|
||||
```bash
|
||||
$ PODID="12345"
|
||||
$ kata-agent-ctl connect --server-address "unix:///var/run/kata/$PODID/root/kata.hvsock" --hybrid-vsock \
|
||||
--cmd 'MemAgentCompactSet json://{"disabled":false}'
|
||||
```
|
||||
|
||||
### compact_period_secs
|
||||
Control the mem-agent compaction function wait period seconds.<br>
|
||||
Default to 600.
|
||||
|
||||
```bash
|
||||
$ config_file="/opt/kata/share/defaults/kata-containers/runtime-rs/configuration.toml"
|
||||
$ sudo sed -i -e 's/^#compact_period_secs.*$/compact_period_secs = 600/g' $config_file
|
||||
```
|
||||
|
||||
For a running Kata container, this configuration can be dynamically modified using the kata-agent-ctl command.
|
||||
```bash
|
||||
$ PODID="12345"
|
||||
$ kata-agent-ctl connect --server-address "unix:///var/run/kata/$PODID/root/kata.hvsock" --hybrid-vsock \
|
||||
--cmd 'MemAgentCompactSet json://{"period_secs":600}'
|
||||
```
|
||||
|
||||
### compact_period_psi_percent_limit
|
||||
Control the mem-agent compaction function wait period PSI percent limit.<br>
|
||||
If the percentage of memory and IO PSI stall time within the compaction waiting period exceeds this value, then the compaction will not be executed after this waiting period.<br>
|
||||
Default to 1.
|
||||
|
||||
```bash
|
||||
$ config_file="/opt/kata/share/defaults/kata-containers/runtime-rs/configuration.toml"
|
||||
$ sudo sed -i -e 's/^#compact_period_psi_percent_limit.*$/compact_period_psi_percent_limit = 1/g' $config_file
|
||||
```
|
||||
|
||||
For a running Kata container, this configuration can be dynamically modified using the kata-agent-ctl command.
|
||||
```bash
|
||||
$ PODID="12345"
|
||||
$ kata-agent-ctl connect --server-address "unix:///var/run/kata/$PODID/root/kata.hvsock" --hybrid-vsock \
|
||||
--cmd 'MemAgentCompactSet json://{"period_psi_percent_limit":1}'
|
||||
```
|
||||
|
||||
### compact_psi_percent_limit
|
||||
Control the mem-agent compaction function compact PSI percent limit.<br>
|
||||
During compaction, the percentage of memory and IO PSI stall time is checked every second. If this percentage exceeds compact_psi_percent_limit, the compaction process will stop.<br>
|
||||
Default to 5
|
||||
|
||||
```bash
|
||||
$ config_file="/opt/kata/share/defaults/kata-containers/runtime-rs/configuration.toml"
|
||||
$ sudo sed -i -e 's/^#compact_psi_percent_limit.*$/compact_psi_percent_limit = 5/g' $config_file
|
||||
```
|
||||
|
||||
For a running Kata container, this configuration can be dynamically modified using the kata-agent-ctl command.
|
||||
```bash
|
||||
$ PODID="12345"
|
||||
$ kata-agent-ctl connect --server-address "unix:///var/run/kata/$PODID/root/kata.hvsock" --hybrid-vsock \
|
||||
--cmd 'MemAgentCompactSet json://{"compact_psi_percent_limit":5}'
|
||||
```
|
||||
|
||||
### compact_sec_max
|
||||
Control the maximum number of seconds for each compaction of mem-agent compact function.<br>
|
||||
If compaction seconds is bigger than compact_sec_max during compact run period, stop compaction at once.
|
||||
|
||||
Default to 180.
|
||||
|
||||
```bash
|
||||
$ config_file="/opt/kata/share/defaults/kata-containers/runtime-rs/configuration.toml"
|
||||
$ sudo sed -i -e 's/^#compact_sec_max.*$/compact_sec_max = 180/g' $config_file
|
||||
```
|
||||
|
||||
For a running Kata container, this configuration can be dynamically modified using the kata-agent-ctl command.
|
||||
```bash
|
||||
$ PODID="12345"
|
||||
$ kata-agent-ctl connect --server-address "unix:///var/run/kata/$PODID/root/kata.hvsock" --hybrid-vsock \
|
||||
--cmd 'MemAgentCompactSet json://{"compact_sec_max":180}'
|
||||
```
|
||||
|
||||
### compact_order
|
||||
compact_order is use with compact_threshold.<br>
|
||||
compact_order parameter determines the size of contiguous pages that the mem-agent's compaction functionality aims to achieve.<br>
|
||||
For example, if compact_order is set to 10 in a Kata container guest environment, the compaction function will target acquiring more contiguous pages of order 10, which will allow reclaim_guest_freed_memory to release additional pages.<br>
|
||||
If the goal is to have more free pages of order 9 in the system to ensure a higher likelihood of obtaining transparent huge pages during memory allocation, then setting compact_order to 9 would be appropriate.
|
||||
Default to 9.
|
||||
|
||||
```bash
|
||||
$ config_file="/opt/kata/share/defaults/kata-containers/runtime-rs/configuration.toml"
|
||||
$ sudo sed -i -e 's/^#compact_order.*$/compact_order = 9/g' $config_file
|
||||
```
|
||||
|
||||
For a running Kata container, this configuration can be dynamically modified using the kata-agent-ctl command.
|
||||
```bash
|
||||
$ PODID="12345"
|
||||
$ kata-agent-ctl connect --server-address "unix:///var/run/kata/$PODID/root/kata.hvsock" --hybrid-vsock \
|
||||
--cmd 'MemAgentCompactSet json://{"compact_order":9}'
|
||||
```
|
||||
|
||||
### compact_threshold
|
||||
Control the mem-agent compaction function compact threshold.<br>
|
||||
compact_threshold is the pages number.<br>
|
||||
When examining the /proc/pagetypeinfo, if there's an increase in the number of movable pages of orders smaller than the compact_order compared to the amount following the previous compaction period, and this increase surpasses a certain threshold specifically, more than compact_threshold number of pages, or the number of free pages has decreased by compact_threshold since the previous compaction. Current compact run period will not do compaction because there is no enough fragmented pages to be compaction.<br>
|
||||
This design aims to minimize the impact of unnecessary compaction calls on system performance.<br>
|
||||
Default to 1024.
|
||||
|
||||
```bash
|
||||
$ config_file="/opt/kata/share/defaults/kata-containers/runtime-rs/configuration.toml"
|
||||
$ sudo sed -i -e 's/^#compact_threshold.*$/compact_threshold = 1024/g' $config_file
|
||||
```
|
||||
|
||||
For a running Kata container, this configuration can be dynamically modified using the kata-agent-ctl command.
|
||||
```bash
|
||||
$ PODID="12345"
|
||||
$ kata-agent-ctl connect --server-address "unix:///var/run/kata/$PODID/root/kata.hvsock" --hybrid-vsock \
|
||||
--cmd 'MemAgentCompactSet json://{"compact_threshold":1024}'
|
||||
```
|
||||
|
||||
### compact_force_times
|
||||
Control the mem-agent compaction function force compact times.<br>
|
||||
After one compaction during a run period, if there are consecutive instances of compact_force_times run periods where no compaction occurs, a compaction will be forced regardless of the system's memory state.<br>
|
||||
If compact_force_times is set to 0, will do force compaction each period.<br>
|
||||
If compact_force_times is set to 18446744073709551615, will never do force compaction.<br>
|
||||
Default to 18446744073709551615.
|
||||
|
||||
```bash
|
||||
$ config_file="/opt/kata/share/defaults/kata-containers/runtime-rs/configuration.toml"
|
||||
$ sudo sed -i -e 's/^#compact_force_times.*$/compact_force_times = 18446744073709551615/g' $config_file
|
||||
```
|
||||
|
||||
For a running Kata container, this configuration can be dynamically modified using the kata-agent-ctl command.
|
||||
```bash
|
||||
$ PODID="12345"
|
||||
$ kata-agent-ctl connect --server-address "unix:///var/run/kata/$PODID/root/kata.hvsock" --hybrid-vsock \
|
||||
--cmd 'MemAgentCompactSet json://{"compact_force_times":18446744073709551615}'
|
||||
```
|
3
rust-toolchain.toml
Normal file
3
rust-toolchain.toml
Normal file
@ -0,0 +1,3 @@
|
||||
[toolchain]
|
||||
# Keep in sync with versions.yaml
|
||||
channel = "1.80.0"
|
17
shellcheckrc
Normal file
17
shellcheckrc
Normal file
@ -0,0 +1,17 @@
|
||||
# Allow opening any 'source'd file, even if not specified as input
|
||||
external-sources=true
|
||||
|
||||
# Turn on warnings for unquoted variables with safe values
|
||||
enable=quote-safe-variables
|
||||
|
||||
# Turn on warnings for unassigned uppercase variables
|
||||
enable=check-unassigned-uppercase
|
||||
|
||||
# Enforces braces around variable expansions to avoid ambiguity or confusion.
|
||||
# e.g. ${filename} rather than $filename
|
||||
enable=require-variable-braces
|
||||
|
||||
# Requires double-bracket syntax [[ expr ]] for safer, more consistent tests.
|
||||
# NO: if [ "$var" = "value" ]
|
||||
# YES: if [[ $var == "value" ]]
|
||||
enable=require-double-brackets
|
2016
src/agent/Cargo.lock
generated
2016
src/agent/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@ -1,37 +1,31 @@
|
||||
[package]
|
||||
name = "kata-agent"
|
||||
version = "0.1.0"
|
||||
[workspace]
|
||||
members = ["rustjail", "policy", "vsock-exporter"]
|
||||
|
||||
[workspace.package]
|
||||
authors = ["The Kata Containers community <kata-dev@lists.katacontainers.io>"]
|
||||
edition = "2018"
|
||||
license = "Apache-2.0"
|
||||
rust-version = "1.80.0"
|
||||
|
||||
[dependencies]
|
||||
runtime-spec = { path = "../libs/runtime-spec" }
|
||||
mem-agent = { path = "../mem-agent" }
|
||||
[workspace.dependencies]
|
||||
oci-spec = { version = "0.6.8", features = ["runtime"] }
|
||||
rustjail = { path = "rustjail" }
|
||||
protocols = { path = "../libs/protocols", features = ["async", "with-serde"] }
|
||||
lazy_static = "1.3.0"
|
||||
ttrpc = { version = "0.8.4", features = ["async"], default-features = false }
|
||||
protobuf = "3.2.0"
|
||||
libc = "0.2.58"
|
||||
protobuf = "=3.7.1"
|
||||
libc = "0.2.94"
|
||||
nix = "0.24.2"
|
||||
capctl = "0.2.0"
|
||||
serde_json = "1.0.39"
|
||||
scan_fmt = "0.2.3"
|
||||
scan_fmt = "0.2.6"
|
||||
scopeguard = "1.0.0"
|
||||
thiserror = "1.0.26"
|
||||
regex = "1.10.5"
|
||||
serial_test = "0.5.1"
|
||||
url = "2.5.0"
|
||||
derivative = "2.2.0"
|
||||
kata-sys-util = { path = "../libs/kata-sys-util" }
|
||||
kata-types = { path = "../libs/kata-types" }
|
||||
safe-path = { path = "../libs/safe-path" }
|
||||
const_format = "0.2.30"
|
||||
|
||||
# Async helpers
|
||||
async-trait = "0.1.42"
|
||||
async-trait = "0.1.50"
|
||||
async-recursion = "0.3.2"
|
||||
futures = "0.3.30"
|
||||
|
||||
@ -40,13 +34,12 @@ tokio = { version = "1.39.0", features = ["full"] }
|
||||
tokio-vsock = "0.3.4"
|
||||
|
||||
netlink-sys = { version = "0.7.0", features = ["tokio_socket"] }
|
||||
rtnetlink = "0.8.0"
|
||||
netlink-packet-utils = "0.4.1"
|
||||
rtnetlink = "0.14.0"
|
||||
netlink-packet-route = "0.19.0"
|
||||
netlink-packet-core = "0.7.0"
|
||||
ipnetwork = "0.17.0"
|
||||
|
||||
# Note: this crate sets the slog 'max_*' features which allows the log level
|
||||
# to be modified at runtime.
|
||||
logging = { path = "../libs/logging" }
|
||||
|
||||
slog = "2.5.2"
|
||||
slog-scope = "4.1.2"
|
||||
slog-term = "2.9.0"
|
||||
@ -68,47 +61,149 @@ tracing = "0.1.26"
|
||||
tracing-subscriber = "0.2.18"
|
||||
tracing-opentelemetry = "0.13.0"
|
||||
opentelemetry = { version = "0.14.0", features = ["rt-tokio-current-thread"] }
|
||||
vsock-exporter = { path = "vsock-exporter" }
|
||||
|
||||
# Configuration
|
||||
serde = { version = "1.0.129", features = ["derive"] }
|
||||
serde_json = "1.0.39"
|
||||
toml = "0.5.8"
|
||||
clap = { version = "3.0.1", features = ["derive"] }
|
||||
strum = "0.26.2"
|
||||
strum_macros = "0.26.2"
|
||||
|
||||
# Image pull/decrypt
|
||||
image-rs = { git = "https://github.com/confidential-containers/guest-components", rev = "514c561d933cb11a0f1628621a0b930157af76cd", default-features = false, optional = true }
|
||||
|
||||
# Agent Policy
|
||||
regorus = { version = "0.2.6", default-features = false, features = [
|
||||
"arc",
|
||||
"regex",
|
||||
"std",
|
||||
], optional = true }
|
||||
cdi = { git = "https://github.com/cncf-tags/container-device-interface-rs", rev = "fba5677a8e7cc962fc6e495fcec98d7d765e332a" }
|
||||
json-patch = "2.0.0"
|
||||
kata-agent-policy = { path = "policy" }
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = "3.1.0"
|
||||
test-utils = { path = "../libs/test-utils" }
|
||||
which = "4.3.0"
|
||||
rstest = "0.18.0"
|
||||
async-std = { version = "1.12.0", features = ["attributes"] }
|
||||
|
||||
[workspace]
|
||||
members = ["rustjail", "policy"]
|
||||
# Local dependencies
|
||||
kata-agent-policy = { path = "policy" }
|
||||
rustjail = { path = "rustjail" }
|
||||
vsock-exporter = { path = "vsock-exporter" }
|
||||
|
||||
mem-agent = { path = "../mem-agent" }
|
||||
|
||||
kata-sys-util = { path = "../libs/kata-sys-util" }
|
||||
kata-types = { path = "../libs/kata-types" }
|
||||
# Note: this crate sets the slog 'max_*' features which allows the log level
|
||||
# to be modified at runtime.
|
||||
logging = { path = "../libs/logging" }
|
||||
protocols = { path = "../libs/protocols" }
|
||||
runtime-spec = { path = "../libs/runtime-spec" }
|
||||
safe-path = { path = "../libs/safe-path" }
|
||||
test-utils = { path = "../libs/test-utils" }
|
||||
|
||||
|
||||
[package]
|
||||
name = "kata-agent"
|
||||
version = "0.1.0"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
oci-spec.workspace = true
|
||||
lazy_static.workspace = true
|
||||
ttrpc.workspace = true
|
||||
protobuf.workspace = true
|
||||
libc.workspace = true
|
||||
nix.workspace = true
|
||||
capctl.workspace = true
|
||||
serde_json.workspace = true
|
||||
scan_fmt.workspace = true
|
||||
scopeguard.workspace = true
|
||||
thiserror.workspace = true
|
||||
regex.workspace = true
|
||||
serial_test.workspace = true
|
||||
url.workspace = true
|
||||
derivative.workspace = true
|
||||
const_format.workspace = true
|
||||
|
||||
# Async helpers
|
||||
async-trait.workspace = true
|
||||
async-recursion.workspace = true
|
||||
futures.workspace = true
|
||||
|
||||
# Async runtime
|
||||
tokio.workspace = true
|
||||
tokio-vsock.workspace = true
|
||||
|
||||
netlink-sys.workspace = true
|
||||
rtnetlink.workspace = true
|
||||
netlink-packet-route.workspace = true
|
||||
netlink-packet-core.workspace = true
|
||||
ipnetwork.workspace = true
|
||||
|
||||
slog.workspace = true
|
||||
slog-scope.workspace = true
|
||||
slog-term.workspace = true
|
||||
|
||||
# Redirect ttrpc log calls
|
||||
slog-stdlog.workspace = true
|
||||
log.workspace = true
|
||||
|
||||
cfg-if.workspace = true
|
||||
prometheus.workspace = true
|
||||
procfs.workspace = true
|
||||
|
||||
anyhow.workspace = true
|
||||
|
||||
cgroups.workspace = true
|
||||
|
||||
# Tracing
|
||||
tracing.workspace = true
|
||||
tracing-subscriber.workspace = true
|
||||
tracing-opentelemetry.workspace = true
|
||||
opentelemetry.workspace = true
|
||||
|
||||
# Configuration
|
||||
serde.workspace = true
|
||||
toml.workspace = true
|
||||
clap.workspace = true
|
||||
strum.workspace = true
|
||||
strum_macros.workspace = true
|
||||
|
||||
# Image pull/decrypt
|
||||
image-rs = { git = "https://github.com/confidential-containers/guest-components", rev = "1191f8257eb65f42892ab0328cec02e58d40de84", default-features = false, optional = true }
|
||||
|
||||
# Agent Policy
|
||||
cdi = { git = "https://github.com/cncf-tags/container-device-interface-rs", rev = "fba5677a8e7cc962fc6e495fcec98d7d765e332a" }
|
||||
|
||||
# Local dependencies
|
||||
kata-agent-policy = { workspace = true, optional = true }
|
||||
mem-agent.workspace = true
|
||||
rustjail.workspace = true
|
||||
protocols = { workspace = true, features = ["async", "with-serde"] }
|
||||
kata-sys-util.workspace = true
|
||||
kata-types.workspace = true
|
||||
runtime-spec.workspace = true
|
||||
safe-path.workspace = true
|
||||
# Note: this crate sets the slog 'max_*' features which allows the log level
|
||||
# to be modified at runtime.
|
||||
logging.workspace = true
|
||||
vsock-exporter.workspace = true
|
||||
|
||||
# Initdata
|
||||
base64 = "0.22"
|
||||
sha2 = "0.10.8"
|
||||
async-compression = { version = "0.4.22", features = ["tokio", "gzip"] }
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile.workspace = true
|
||||
which.workspace = true
|
||||
rstest.workspace = true
|
||||
async-std.workspace = true
|
||||
|
||||
test-utils.workspace = true
|
||||
|
||||
[profile.release]
|
||||
lto = true
|
||||
|
||||
[features]
|
||||
# The default-pull feature would support all pull types, including sharing images by virtio-fs and pulling images in the guest
|
||||
default-pull = ["guest-pull"]
|
||||
# The default-pull feature supports all sharing images by virtio-fs, for guest-pull build with the guest-pull feature
|
||||
default-pull = []
|
||||
seccomp = ["rustjail/seccomp"]
|
||||
standard-oci-runtime = ["rustjail/standard-oci-runtime"]
|
||||
agent-policy = ["regorus"]
|
||||
agent-policy = ["kata-agent-policy"]
|
||||
guest-pull = ["image-rs/kata-cc-rustls-tls"]
|
||||
|
||||
[[bin]]
|
||||
|
@ -53,10 +53,6 @@ endif
|
||||
|
||||
include ../../utils.mk
|
||||
|
||||
ifeq ($(ARCH), ppc64le)
|
||||
override ARCH = powerpc64le
|
||||
endif
|
||||
|
||||
##VAR STANDARD_OCI_RUNTIME=yes|no define if agent enables standard oci runtime feature
|
||||
STANDARD_OCI_RUNTIME := no
|
||||
|
||||
|
@ -1,20 +1,19 @@
|
||||
[package]
|
||||
name = "kata-agent-policy"
|
||||
version = "0.1.0"
|
||||
authors = ["The Kata Containers community <kata-dev@lists.katacontainers.io>"]
|
||||
edition = "2018"
|
||||
license = "Apache-2.0"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
# Async runtime
|
||||
tokio = { version = "1.39.0", features = ["full"] }
|
||||
tokio-vsock = "0.3.4"
|
||||
tokio.workspace = true
|
||||
|
||||
anyhow = "1"
|
||||
anyhow.workspace = true
|
||||
|
||||
# Configuration
|
||||
serde = { version = "1.0.129", features = ["derive"] }
|
||||
serde_json = "1.0.39"
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
|
||||
# Agent Policy
|
||||
regorus = { version = "0.2.8", default-features = false, features = [
|
||||
@ -27,7 +26,5 @@ json-patch = "2.0.0"
|
||||
|
||||
# Note: this crate sets the slog 'max_*' features which allows the log level
|
||||
# to be modified at runtime.
|
||||
logging = { path = "../../libs/logging" }
|
||||
slog = "2.5.2"
|
||||
slog-scope = "4.1.2"
|
||||
slog-term = "2.9.0"
|
||||
slog.workspace = true
|
||||
slog-scope.workspace = true
|
||||
|
@ -1,51 +1,62 @@
|
||||
[package]
|
||||
name = "rustjail"
|
||||
version = "0.1.0"
|
||||
authors = ["The Kata Containers community <kata-dev@lists.katacontainers.io>"]
|
||||
edition = "2018"
|
||||
license = "Apache-2.0"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
awaitgroup = "0.6.0"
|
||||
serde = "1.0.91"
|
||||
serde_json = "1.0.39"
|
||||
serde_derive = "1.0.91"
|
||||
runtime-spec = { path = "../../libs/runtime-spec" }
|
||||
oci-spec = { version = "0.6.8", features = ["runtime"] }
|
||||
protocols = { path ="../../libs/protocols" }
|
||||
kata-sys-util = { path = "../../libs/kata-sys-util" }
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
|
||||
oci-spec.workspace = true
|
||||
caps = "0.5.0"
|
||||
nix = "0.24.2"
|
||||
scopeguard = "1.0.0"
|
||||
capctl = "0.2.0"
|
||||
lazy_static = "1.3.0"
|
||||
libc = "0.2.58"
|
||||
protobuf = "3.2.0"
|
||||
slog = "2.5.2"
|
||||
slog-scope = "4.1.2"
|
||||
scan_fmt = "0.2.6"
|
||||
regex = "1.5.6"
|
||||
nix.workspace = true
|
||||
scopeguard.workspace = true
|
||||
capctl.workspace = true
|
||||
lazy_static.workspace = true
|
||||
libc.workspace = true
|
||||
protobuf.workspace = true
|
||||
slog.workspace = true
|
||||
slog-scope.workspace = true
|
||||
scan_fmt.workspace = true
|
||||
regex.workspace = true
|
||||
path-absolutize = "1.2.0"
|
||||
anyhow = "1.0.32"
|
||||
cgroups = { package = "cgroups-rs", version = "0.3.3" }
|
||||
cgroups.workspace = true
|
||||
rlimit = "0.5.3"
|
||||
cfg-if = "0.1.0"
|
||||
cfg-if.workspace = true
|
||||
|
||||
tokio = { version = "1.38.0", features = ["sync", "io-util", "process", "time", "macros", "rt", "fs"] }
|
||||
tokio-vsock = "0.3.4"
|
||||
futures = "0.3.17"
|
||||
async-trait = "0.1.31"
|
||||
tokio = { workspace = true, features = [
|
||||
"sync",
|
||||
"io-util",
|
||||
"process",
|
||||
"time",
|
||||
"macros",
|
||||
"rt",
|
||||
"fs",
|
||||
] }
|
||||
tokio-vsock.workspace = true
|
||||
futures.workspace = true
|
||||
async-trait.workspace = true
|
||||
inotify = "0.9.2"
|
||||
libseccomp = { version = "0.3.0", optional = true }
|
||||
zbus = "3.12.0"
|
||||
bit-vec= "0.6.3"
|
||||
bit-vec = "0.6.3"
|
||||
xattr = "0.2.3"
|
||||
|
||||
# Local dependencies
|
||||
protocols.workspace = true
|
||||
kata-sys-util.workspace = true
|
||||
runtime-spec.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
serial_test = "0.5.0"
|
||||
tempfile = "3.1.0"
|
||||
test-utils = { path = "../../libs/test-utils" }
|
||||
protocols = { path ="../../libs/protocols" }
|
||||
serial_test.workspace = true
|
||||
tempfile.workspace = true
|
||||
|
||||
# Local dependencies
|
||||
test-utils.workspace = true
|
||||
|
||||
[features]
|
||||
seccomp = ["libseccomp"]
|
||||
|
@ -33,6 +33,7 @@ use protocols::agent::{
|
||||
BlkioStats, BlkioStatsEntry, CgroupStats, CpuStats, CpuUsage, HugetlbStats, MemoryData,
|
||||
MemoryStats, PidsStats, ThrottlingData,
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::any::Any;
|
||||
use std::collections::HashMap;
|
||||
use std::fs;
|
||||
|
@ -12,6 +12,7 @@ use cgroups::freezer::FreezerState;
|
||||
use libc::{self, pid_t};
|
||||
use oci::{LinuxResources, Spec};
|
||||
use oci_spec::runtime as oci;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::any::Any;
|
||||
use std::collections::HashMap;
|
||||
use std::string::String;
|
||||
|
@ -77,9 +77,17 @@ async fn register_memory_event_v2(
|
||||
let mut inotify = Inotify::init().context("Failed to initialize inotify")?;
|
||||
|
||||
// watching oom kill
|
||||
let ev_wd = inotify.add_watch(&event_control_path, WatchMask::MODIFY)?;
|
||||
let ev_wd = inotify
|
||||
.add_watch(&event_control_path, WatchMask::MODIFY)
|
||||
.context(format!("failed to add watch for {:?}", &event_control_path))?;
|
||||
|
||||
// Because no `unix.IN_DELETE|unix.IN_DELETE_SELF` event for cgroup file system, so watching all process exited
|
||||
let cg_wd = inotify.add_watch(&cgroup_event_control_path, WatchMask::MODIFY)?;
|
||||
let cg_wd = inotify
|
||||
.add_watch(&cgroup_event_control_path, WatchMask::MODIFY)
|
||||
.context(format!(
|
||||
"failed to add watch for {:?}",
|
||||
&cgroup_event_control_path
|
||||
))?;
|
||||
|
||||
info!(sl(), "ev_wd: {:?}", ev_wd);
|
||||
info!(sl(), "cg_wd: {:?}", cg_wd);
|
||||
|
@ -6,6 +6,7 @@
|
||||
use anyhow::{anyhow, Result};
|
||||
|
||||
use super::common::{DEFAULT_SLICE, SCOPE_SUFFIX, SLICE_SUFFIX};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::string::String;
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
|
@ -3,6 +3,8 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
pub const DEFAULT_SLICE: &str = "system.slice";
|
||||
pub const SLICE_SUFFIX: &str = ".slice";
|
||||
pub const SCOPE_SUFFIX: &str = ".scope";
|
||||
|
@ -3,6 +3,7 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::vec;
|
||||
|
||||
use super::common::{
|
||||
|
@ -10,6 +10,7 @@ use cgroups::freezer::FreezerState;
|
||||
use libc::{self, pid_t};
|
||||
use oci::LinuxResources;
|
||||
use oci_spec::runtime as oci;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::any::Any;
|
||||
use std::collections::HashMap;
|
||||
use std::convert::TryInto;
|
||||
|
@ -8,6 +8,7 @@ use libc::pid_t;
|
||||
use oci::{Linux, LinuxDevice, LinuxIdMapping, LinuxNamespace, LinuxResources, Spec};
|
||||
use oci_spec::runtime as oci;
|
||||
use runtime_spec as spec;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use spec::{ContainerState, State as OCIState};
|
||||
use std::clone::Clone;
|
||||
use std::ffi::CString;
|
||||
|
@ -7,12 +7,10 @@
|
||||
#[macro_use]
|
||||
#[cfg(test)]
|
||||
extern crate serial_test;
|
||||
extern crate serde;
|
||||
extern crate serde_json;
|
||||
#[macro_use]
|
||||
extern crate serde_derive;
|
||||
extern crate caps;
|
||||
extern crate protocols;
|
||||
extern crate serde;
|
||||
extern crate serde_json;
|
||||
#[macro_use]
|
||||
extern crate scopeguard;
|
||||
extern crate capctl;
|
||||
|
@ -18,12 +18,13 @@ use std::collections::{HashMap, HashSet};
|
||||
use std::fs::{self, OpenOptions};
|
||||
use std::mem::MaybeUninit;
|
||||
use std::os::unix;
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
use std::os::unix::io::RawFd;
|
||||
use std::path::{Component, Path, PathBuf};
|
||||
|
||||
use path_absolutize::*;
|
||||
use std::fs::File;
|
||||
use std::io::{BufRead, BufReader};
|
||||
use std::io::{BufRead, BufReader, ErrorKind};
|
||||
|
||||
use crate::container::DEFAULT_DEVICES;
|
||||
use crate::selinux;
|
||||
@ -1010,16 +1011,29 @@ lazy_static! {
|
||||
};
|
||||
}
|
||||
|
||||
fn permissions_from_path(path: &Path) -> Result<u32> {
|
||||
match fs::metadata(path) {
|
||||
Ok(metadata) => Ok(metadata.permissions().mode()),
|
||||
Err(e) if e.kind() == ErrorKind::NotFound => Ok(0),
|
||||
Err(e) => Err(e.into()),
|
||||
}
|
||||
}
|
||||
|
||||
fn mknod_dev(dev: &LinuxDevice, relpath: &Path) -> Result<()> {
|
||||
let f = match LINUXDEVICETYPE.get(dev.typ().as_str()) {
|
||||
Some(v) => v,
|
||||
None => return Err(anyhow!("invalid spec".to_string())),
|
||||
};
|
||||
|
||||
let file_mode = match dev.file_mode().unwrap_or(0) {
|
||||
0 => permissions_from_path(Path::new(dev.path()))?,
|
||||
x => x,
|
||||
};
|
||||
|
||||
stat::mknod(
|
||||
relpath,
|
||||
*f,
|
||||
Mode::from_bits_truncate(dev.file_mode().unwrap_or(0)),
|
||||
Mode::from_bits_truncate(file_mode),
|
||||
nix::sys::stat::makedev(dev.major() as u64, dev.minor() as u64),
|
||||
)?;
|
||||
|
||||
|
@ -4,6 +4,7 @@
|
||||
//
|
||||
|
||||
use oci_spec::runtime::Spec;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Default, Clone)]
|
||||
pub struct CreateOpts {
|
||||
|
@ -15,7 +15,7 @@ use anyhow::anyhow;
|
||||
// - <xxxx> is the device number (0000-ffff; leading zeroes can be omitted,
|
||||
// e.g. 3 instead of 0003).
|
||||
// [1] https://www.ibm.com/docs/en/linuxonibm/pdf/lku4dd04.pdf
|
||||
// [2] https://qemu.readthedocs.io/en/latest/system/s390x/css.html
|
||||
// [2] https://qemu.readthedocs.io/en/master/system/s390x/css.html
|
||||
|
||||
// Maximum subchannel set ID
|
||||
const SUBCHANNEL_SET_MAX: u8 = 3;
|
||||
|
@ -184,6 +184,7 @@ pub async fn unseal_file(path: &str) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(feature = "guest-pull")]
|
||||
pub async fn secure_mount(
|
||||
volume_type: &str,
|
||||
options: &std::collections::HashMap<String, String>,
|
||||
|
@ -9,6 +9,7 @@ use self::nvdimm_device_handler::VirtioNvdimmDeviceHandler;
|
||||
use self::scsi_device_handler::ScsiDeviceHandler;
|
||||
use self::vfio_device_handler::{VfioApDeviceHandler, VfioPciDeviceHandler};
|
||||
use crate::pci;
|
||||
use crate::sandbox::PciHostGuestMapping;
|
||||
use crate::sandbox::Sandbox;
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use cdi::annotations::parse_annotations;
|
||||
@ -180,6 +181,7 @@ lazy_static! {
|
||||
|
||||
#[instrument]
|
||||
pub async fn add_devices(
|
||||
cid: &String,
|
||||
logger: &Logger,
|
||||
devices: &[Device],
|
||||
spec: &mut Spec,
|
||||
@ -211,8 +213,9 @@ pub async fn add_devices(
|
||||
}
|
||||
|
||||
let mut sb = sandbox.lock().await;
|
||||
let mut host_guest: PciHostGuestMapping = HashMap::new();
|
||||
for (host, guest) in update.pci {
|
||||
if let Some(other_guest) = sb.pcimap.insert(host, guest) {
|
||||
if let Some(other_guest) = host_guest.insert(host, guest) {
|
||||
return Err(anyhow!(
|
||||
"Conflicting guest address for host device {} ({} versus {})",
|
||||
host,
|
||||
@ -221,6 +224,9 @@ pub async fn add_devices(
|
||||
));
|
||||
}
|
||||
}
|
||||
// Save all the host -> guest mappings per container upon
|
||||
// removal of the container, the mappings will be removed
|
||||
sb.pcimap.insert(cid.clone(), host_guest);
|
||||
}
|
||||
Err(e) => {
|
||||
error!(logger, "failed to add devices, error: {e:?}");
|
||||
@ -238,7 +244,7 @@ pub async fn add_devices(
|
||||
if let Some(process) = spec.process_mut() {
|
||||
let env_vec: &mut Vec<String> =
|
||||
&mut process.env_mut().get_or_insert_with(Vec::new).to_vec();
|
||||
update_env_pci(env_vec, &sandbox.lock().await.pcimap)?
|
||||
update_env_pci(cid, env_vec, &sandbox.lock().await.pcimap)?
|
||||
}
|
||||
update_spec_devices(logger, spec, dev_updates)
|
||||
}
|
||||
@ -391,8 +397,9 @@ pub fn insert_devices_cgroup_rule(
|
||||
// given a map of (host address => guest address)
|
||||
#[instrument]
|
||||
pub fn update_env_pci(
|
||||
cid: &String,
|
||||
env: &mut [String],
|
||||
pcimap: &HashMap<pci::Address, pci::Address>,
|
||||
pcimap: &HashMap<String, PciHostGuestMapping>,
|
||||
) -> Result<()> {
|
||||
// SR-IOV device plugin may add two environment variables for one resource:
|
||||
// - PCIDEVICE_<prefix>_<resource-name>: a list of PCI device ids separated by comma
|
||||
@ -418,7 +425,10 @@ pub fn update_env_pci(
|
||||
for host_addr_str in val.split(',') {
|
||||
let host_addr = pci::Address::from_str(host_addr_str)
|
||||
.with_context(|| format!("Can't parse {} environment variable", name))?;
|
||||
let guest_addr = pcimap
|
||||
let host_guest = pcimap
|
||||
.get(cid)
|
||||
.ok_or_else(|| anyhow!("No PCI mapping found for container {}", cid))?;
|
||||
let guest_addr = host_guest
|
||||
.get(&host_addr)
|
||||
.ok_or_else(|| anyhow!("Unable to translate host PCI address {}", host_addr))?;
|
||||
|
||||
@ -1052,7 +1062,7 @@ mod tests {
|
||||
"NOTAPCIDEVICE_blah=abcd:ef:01.0".to_string(),
|
||||
];
|
||||
|
||||
let pci_fixups = example_map
|
||||
let _pci_fixups = example_map
|
||||
.iter()
|
||||
.map(|(h, g)| {
|
||||
(
|
||||
@ -1062,7 +1072,11 @@ mod tests {
|
||||
})
|
||||
.collect();
|
||||
|
||||
let res = update_env_pci(&mut env, &pci_fixups);
|
||||
let cid = "0".to_string();
|
||||
let mut pci_fixups: HashMap<String, HashMap<pci::Address, pci::Address>> = HashMap::new();
|
||||
pci_fixups.insert(cid.clone(), _pci_fixups);
|
||||
|
||||
let res = update_env_pci(&cid, &mut env, &pci_fixups);
|
||||
assert!(res.is_ok(), "error: {}", res.err().unwrap());
|
||||
|
||||
assert_eq!(env[0], "PCIDEVICE_x=0000:01:01.0,0000:01:02.0");
|
||||
|
@ -3,35 +3,23 @@
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
use crate::device::pcipath_to_sysfs;
|
||||
#[cfg(target_arch = "s390x")]
|
||||
use crate::ccw;
|
||||
use crate::linux_abi::*;
|
||||
use crate::pci;
|
||||
use crate::sandbox::Sandbox;
|
||||
use crate::uevent::{wait_for_uevent, Uevent, UeventMatcher};
|
||||
#[cfg(not(target_arch = "s390x"))]
|
||||
use crate::{device::pcipath_to_sysfs, pci};
|
||||
use anyhow::{anyhow, Result};
|
||||
use regex::Regex;
|
||||
use std::fs;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
pub async fn wait_for_net_interface(
|
||||
sandbox: &Arc<Mutex<Sandbox>>,
|
||||
pcipath: &pci::Path,
|
||||
) -> Result<()> {
|
||||
let root_bus_sysfs = format!("{}{}", SYSFS_DIR, create_pci_root_bus_path());
|
||||
let sysfs_rel_path = pcipath_to_sysfs(&root_bus_sysfs, pcipath)?;
|
||||
|
||||
let matcher = NetPciMatcher::new(&sysfs_rel_path);
|
||||
|
||||
fn check_existing(re: Regex) -> Result<bool> {
|
||||
// Check if the interface is already added in case network is cold-plugged
|
||||
// or the uevent loop is started before network is added.
|
||||
// We check for the pci deive in the sysfs directory for network devices.
|
||||
let pattern = format!(
|
||||
r"[./]+{}/[a-z0-9/]*net/[a-z0-9/]*",
|
||||
matcher.devpath.as_str()
|
||||
);
|
||||
let re = Regex::new(&pattern).expect("BUG: Failed to compile regex for NetPciMatcher");
|
||||
|
||||
// We check for the device in the sysfs directory for network devices.
|
||||
for entry in fs::read_dir(SYSFS_NET_PATH)? {
|
||||
let entry = entry?;
|
||||
let path = entry.path();
|
||||
@ -41,19 +29,41 @@ pub async fn wait_for_net_interface(
|
||||
.ok_or_else(|| anyhow!("Expected symlink in dir {}", SYSFS_NET_PATH))?;
|
||||
|
||||
if re.is_match(target_path_str) {
|
||||
return Ok(());
|
||||
return Ok(true);
|
||||
}
|
||||
}
|
||||
Ok(false)
|
||||
}
|
||||
|
||||
#[cfg(not(target_arch = "s390x"))]
|
||||
pub async fn wait_for_pci_net_interface(
|
||||
sandbox: &Arc<Mutex<Sandbox>>,
|
||||
pcipath: &pci::Path,
|
||||
) -> Result<()> {
|
||||
let root_bus_sysfs = format!("{}{}", SYSFS_DIR, create_pci_root_bus_path());
|
||||
let sysfs_rel_path = pcipath_to_sysfs(&root_bus_sysfs, pcipath)?;
|
||||
let matcher = NetPciMatcher::new(&sysfs_rel_path);
|
||||
let pattern = format!(
|
||||
r"[./]+{}/[a-z0-9/]*net/[a-z0-9/]*",
|
||||
matcher.devpath.as_str()
|
||||
);
|
||||
let re = Regex::new(&pattern).expect("BUG: Failed to compile regex for NetPciMatcher");
|
||||
if check_existing(re)? {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let _uev = wait_for_uevent(sandbox, matcher).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(not(target_arch = "s390x"))]
|
||||
#[derive(Debug)]
|
||||
pub struct NetPciMatcher {
|
||||
devpath: String,
|
||||
}
|
||||
|
||||
#[cfg(not(target_arch = "s390x"))]
|
||||
impl NetPciMatcher {
|
||||
pub fn new(relpath: &str) -> NetPciMatcher {
|
||||
let root_bus = create_pci_root_bus_path();
|
||||
@ -64,6 +74,7 @@ impl NetPciMatcher {
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(target_arch = "s390x"))]
|
||||
impl UeventMatcher for NetPciMatcher {
|
||||
fn is_match(&self, uev: &Uevent) -> bool {
|
||||
uev.devpath.starts_with(self.devpath.as_str())
|
||||
@ -73,10 +84,53 @@ impl UeventMatcher for NetPciMatcher {
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(target_arch = "s390x")]
|
||||
pub async fn wait_for_ccw_net_interface(
|
||||
sandbox: &Arc<Mutex<Sandbox>>,
|
||||
device: &ccw::Device,
|
||||
) -> Result<()> {
|
||||
let matcher = NetCcwMatcher::new(CCW_ROOT_BUS_PATH, device);
|
||||
if check_existing(matcher.re.clone())? {
|
||||
return Ok(());
|
||||
}
|
||||
let _uev = wait_for_uevent(sandbox, matcher).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(target_arch = "s390x")]
|
||||
#[derive(Debug)]
|
||||
struct NetCcwMatcher {
|
||||
re: Regex,
|
||||
}
|
||||
|
||||
#[cfg(target_arch = "s390x")]
|
||||
impl NetCcwMatcher {
|
||||
pub fn new(root_bus_path: &str, device: &ccw::Device) -> Self {
|
||||
let re = format!(
|
||||
r"{}/0\.[0-3]\.[0-9a-f]{{1,4}}/{}/virtio[0-9]+/net/",
|
||||
root_bus_path, device
|
||||
);
|
||||
NetCcwMatcher {
|
||||
re: Regex::new(&re).expect("BUG: failed to compile NetCCWMatcher regex"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(target_arch = "s390x")]
|
||||
impl UeventMatcher for NetCcwMatcher {
|
||||
fn is_match(&self, uev: &Uevent) -> bool {
|
||||
self.re.is_match(&uev.devpath)
|
||||
&& uev.subsystem == "net"
|
||||
&& !uev.interface.is_empty()
|
||||
&& uev.action == "add"
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[cfg(not(target_arch = "s390x"))]
|
||||
#[tokio::test]
|
||||
#[allow(clippy::redundant_clone)]
|
||||
async fn test_net_pci_matcher() {
|
||||
@ -111,4 +165,34 @@ mod tests {
|
||||
assert!(!matcher_a.is_match(&uev_c));
|
||||
assert!(!matcher_b.is_match(&uev_c));
|
||||
}
|
||||
|
||||
#[cfg(target_arch = "s390x")]
|
||||
#[tokio::test]
|
||||
async fn test_net_ccw_matcher() {
|
||||
let dev_a = ccw::Device::new(0, 1).unwrap();
|
||||
let dev_b = ccw::Device::new(1, 2).unwrap();
|
||||
|
||||
let mut uev_a = crate::uevent::Uevent::default();
|
||||
uev_a.action = crate::linux_abi::U_EVENT_ACTION_ADD.to_string();
|
||||
uev_a.subsystem = String::from("net");
|
||||
uev_a.interface = String::from("eth0");
|
||||
uev_a.devpath = format!(
|
||||
"{}/0.0.0001/{}/virtio1/{}/{}",
|
||||
CCW_ROOT_BUS_PATH, dev_a, uev_a.subsystem, uev_a.interface
|
||||
);
|
||||
|
||||
let mut uev_b = uev_a.clone();
|
||||
uev_b.devpath = format!(
|
||||
"{}/0.0.0001/{}/virtio1/{}/{}",
|
||||
CCW_ROOT_BUS_PATH, dev_b, uev_b.subsystem, uev_b.interface
|
||||
);
|
||||
|
||||
let matcher_a = NetCcwMatcher::new(CCW_ROOT_BUS_PATH, &dev_a);
|
||||
let matcher_b = NetCcwMatcher::new(CCW_ROOT_BUS_PATH, &dev_b);
|
||||
|
||||
assert!(matcher_a.is_match(&uev_a));
|
||||
assert!(matcher_b.is_match(&uev_b));
|
||||
assert!(!matcher_b.is_match(&uev_a));
|
||||
assert!(!matcher_a.is_match(&uev_b));
|
||||
}
|
||||
}
|
||||
|
191
src/agent/src/initdata.rs
Normal file
191
src/agent/src/initdata.rs
Normal file
@ -0,0 +1,191 @@
|
||||
//! # Initdata Module
|
||||
//!
|
||||
//! This module will do the following things if a proper initdata device with initdata exists.
|
||||
//! 1. Parse the initdata block device and extract the config files to [`INITDATA_PATH`].
|
||||
//! 2. Return the initdata and the policy (if any).
|
||||
|
||||
// Copyright (c) 2025 Alibaba Cloud
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use std::{os::unix::fs::FileTypeExt, path::Path};
|
||||
|
||||
use anyhow::{bail, Context, Result};
|
||||
use async_compression::tokio::bufread::GzipDecoder;
|
||||
use base64::{engine::general_purpose::STANDARD, Engine};
|
||||
use const_format::concatcp;
|
||||
use serde::Deserialize;
|
||||
use sha2::{Digest, Sha256, Sha384, Sha512};
|
||||
use slog::Logger;
|
||||
use tokio::io::{AsyncReadExt, AsyncSeekExt};
|
||||
|
||||
/// This is the target directory to store the extracted initdata.
|
||||
pub const INITDATA_PATH: &str = "/run/confidential-containers/initdata";
|
||||
|
||||
/// The path of AA's config file
|
||||
pub const AA_CONFIG_PATH: &str = concatcp!(INITDATA_PATH, "/aa.toml");
|
||||
|
||||
/// The path of CDH's config file
|
||||
pub const CDH_CONFIG_PATH: &str = concatcp!(INITDATA_PATH, "/cdh.toml");
|
||||
|
||||
/// Magic number of initdata device
|
||||
pub const INITDATA_MAGIC_NUMBER: &[u8] = b"initdata";
|
||||
|
||||
/// Now only initdata `0.1.0` is defined.
|
||||
const INITDATA_VERSION: &str = "0.1.0";
|
||||
|
||||
/// Initdata defined in
|
||||
/// <https://github.com/confidential-containers/trustee/blob/47d7a2338e0be76308ac19be5c0c172c592780aa/kbs/docs/initdata.md>
|
||||
#[derive(Deserialize)]
|
||||
pub struct Initdata {
|
||||
version: String,
|
||||
algorithm: String,
|
||||
data: DefinedFields,
|
||||
}
|
||||
|
||||
/// Well-defined keys for initdata of kata/CoCo
|
||||
#[derive(Deserialize, Default)]
|
||||
#[serde(deny_unknown_fields)]
|
||||
pub struct DefinedFields {
|
||||
#[serde(rename = "aa.toml")]
|
||||
aa_config: Option<String>,
|
||||
#[serde(rename = "cdh.toml")]
|
||||
cdh_config: Option<String>,
|
||||
#[serde(rename = "policy.rego")]
|
||||
policy: Option<String>,
|
||||
}
|
||||
|
||||
async fn detect_initdata_device(logger: &Logger) -> Result<Option<String>> {
|
||||
let dev_dir = Path::new("/dev");
|
||||
let mut read_dir = tokio::fs::read_dir(dev_dir).await?;
|
||||
while let Some(entry) = read_dir.next_entry().await? {
|
||||
let filename = entry.file_name();
|
||||
let filename = filename.to_string_lossy();
|
||||
debug!(logger, "Initdata check device `{filename}`");
|
||||
if !filename.starts_with("vd") {
|
||||
continue;
|
||||
}
|
||||
let path = entry.path();
|
||||
|
||||
debug!(logger, "Initdata find potential device: `{path:?}`");
|
||||
let metadata = std::fs::metadata(path.clone())?;
|
||||
if !metadata.file_type().is_block_device() {
|
||||
continue;
|
||||
}
|
||||
|
||||
let mut file = tokio::fs::File::open(&path).await?;
|
||||
let mut magic = [0; 8];
|
||||
match file.read_exact(&mut magic).await {
|
||||
Ok(_) => {
|
||||
debug!(
|
||||
logger,
|
||||
"Initdata read device `{filename}` first 8 bytes: {magic:?}"
|
||||
);
|
||||
if magic == INITDATA_MAGIC_NUMBER {
|
||||
let path = path.as_path().to_string_lossy().to_string();
|
||||
debug!(logger, "Found initdata device {path}");
|
||||
return Ok(Some(path));
|
||||
}
|
||||
}
|
||||
Err(e) => debug!(logger, "Initdata read device `{filename}` failed: {e:?}"),
|
||||
}
|
||||
}
|
||||
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
pub async fn read_initdata(device_path: &str) -> Result<Vec<u8>> {
|
||||
let initdata_devfile = tokio::fs::File::open(device_path).await?;
|
||||
let mut buf_reader = tokio::io::BufReader::new(initdata_devfile);
|
||||
// skip the magic number "initdata"
|
||||
buf_reader.seek(std::io::SeekFrom::Start(8)).await?;
|
||||
|
||||
let mut len_buf = [0u8; 8];
|
||||
buf_reader.read_exact(&mut len_buf).await?;
|
||||
let length = u64::from_le_bytes(len_buf) as usize;
|
||||
|
||||
let mut buf = vec![0; length];
|
||||
buf_reader.read_exact(&mut buf).await?;
|
||||
let mut gzip_decoder = GzipDecoder::new(&buf[..]);
|
||||
|
||||
let mut initdata = Vec::new();
|
||||
let _ = gzip_decoder.read_to_end(&mut initdata).await?;
|
||||
Ok(initdata)
|
||||
}
|
||||
|
||||
pub struct InitdataReturnValue {
|
||||
pub digest: Vec<u8>,
|
||||
pub _policy: Option<String>,
|
||||
}
|
||||
|
||||
pub async fn initialize_initdata(logger: &Logger) -> Result<Option<InitdataReturnValue>> {
|
||||
let logger = logger.new(o!("subsystem" => "initdata"));
|
||||
let Some(initdata_device) = detect_initdata_device(&logger).await? else {
|
||||
info!(
|
||||
logger,
|
||||
"Initdata device not found, skip initdata initialization"
|
||||
);
|
||||
return Ok(None);
|
||||
};
|
||||
|
||||
tokio::fs::create_dir_all(INITDATA_PATH)
|
||||
.await
|
||||
.inspect_err(|e| error!(logger, "Failed to create initdata dir: {e:?}"))?;
|
||||
|
||||
let initdata_content = read_initdata(&initdata_device)
|
||||
.await
|
||||
.inspect_err(|e| error!(logger, "Failed to read initdata: {e:?}"))?;
|
||||
|
||||
let initdata: Initdata =
|
||||
toml::from_slice(&initdata_content).context("parse initdata failed")?;
|
||||
info!(logger, "Initdata version: {}", initdata.version);
|
||||
|
||||
if initdata.version != INITDATA_VERSION {
|
||||
bail!("Unsupported initdata version");
|
||||
}
|
||||
|
||||
let digest = match &initdata.algorithm[..] {
|
||||
"sha256" => Sha256::digest(&initdata_content).to_vec(),
|
||||
"sha384" => Sha384::digest(&initdata_content).to_vec(),
|
||||
"sha512" => Sha512::digest(&initdata_content).to_vec(),
|
||||
others => bail!("Unsupported hash algorithm {others}"),
|
||||
};
|
||||
|
||||
if let Some(config) = initdata.data.aa_config {
|
||||
tokio::fs::write(AA_CONFIG_PATH, config)
|
||||
.await
|
||||
.context("write aa config failed")?;
|
||||
info!(logger, "write AA config from initdata");
|
||||
}
|
||||
|
||||
if let Some(config) = initdata.data.cdh_config {
|
||||
tokio::fs::write(CDH_CONFIG_PATH, config)
|
||||
.await
|
||||
.context("write cdh config failed")?;
|
||||
info!(logger, "write CDH config from initdata");
|
||||
}
|
||||
|
||||
debug!(logger, "Initdata digest: {}", STANDARD.encode(&digest));
|
||||
|
||||
let res = InitdataReturnValue {
|
||||
digest,
|
||||
_policy: initdata.data.policy,
|
||||
};
|
||||
|
||||
Ok(Some(res))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::initdata::read_initdata;
|
||||
|
||||
const INITDATA_IMG_PATH: &str = "testdata/initdata.img";
|
||||
const INITDATA_PLAINTEXT: &[u8] = b"some content";
|
||||
|
||||
#[tokio::test]
|
||||
async fn parse_initdata() {
|
||||
let initdata = read_initdata(INITDATA_IMG_PATH).await.unwrap();
|
||||
assert_eq!(initdata, INITDATA_PLAINTEXT);
|
||||
}
|
||||
}
|
@ -18,10 +18,12 @@ extern crate scopeguard;
|
||||
#[macro_use]
|
||||
extern crate slog;
|
||||
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use anyhow::{anyhow, bail, Context, Result};
|
||||
use base64::Engine;
|
||||
use cfg_if::cfg_if;
|
||||
use clap::{AppSettings, Parser};
|
||||
use const_format::{concatcp, formatcp};
|
||||
use initdata::{InitdataReturnValue, AA_CONFIG_PATH, CDH_CONFIG_PATH};
|
||||
use nix::fcntl::OFlag;
|
||||
use nix::sys::reboot::{reboot, RebootMode};
|
||||
use nix::sys::socket::{self, AddressFamily, SockFlag, SockType, VsockAddr};
|
||||
@ -33,7 +35,6 @@ use std::os::unix::fs::{self as unixfs, FileTypeExt};
|
||||
use std::os::unix::io::AsRawFd;
|
||||
use std::path::Path;
|
||||
use std::process::exit;
|
||||
use std::process::Command;
|
||||
use std::sync::Arc;
|
||||
use tracing::{instrument, span};
|
||||
|
||||
@ -42,6 +43,7 @@ mod config;
|
||||
mod console;
|
||||
mod device;
|
||||
mod features;
|
||||
mod initdata;
|
||||
mod linux_abi;
|
||||
mod metrics;
|
||||
mod mount;
|
||||
@ -419,6 +421,8 @@ async fn start_sandbox(
|
||||
let (tx, rx) = tokio::sync::oneshot::channel();
|
||||
sandbox.lock().await.sender = Some(tx);
|
||||
|
||||
let initdata_return_value = initdata::initialize_initdata(logger).await?;
|
||||
|
||||
let gc_procs = config.guest_components_procs;
|
||||
if !attestation_binaries_available(logger, &gc_procs) {
|
||||
warn!(
|
||||
@ -426,7 +430,21 @@ async fn start_sandbox(
|
||||
"attestation binaries requested for launch not available"
|
||||
);
|
||||
} else {
|
||||
init_attestation_components(logger, config).await?;
|
||||
init_attestation_components(logger, config, &initdata_return_value).await?;
|
||||
}
|
||||
|
||||
// if policy is given via initdata, use it
|
||||
#[cfg(feature = "agent-policy")]
|
||||
if let Some(initdata_return_value) = initdata_return_value {
|
||||
if let Some(policy) = &initdata_return_value._policy {
|
||||
info!(logger, "using policy from initdata");
|
||||
AGENT_POLICY
|
||||
.lock()
|
||||
.await
|
||||
.set_policy(policy)
|
||||
.await
|
||||
.context("Failed to set policy from initdata")?;
|
||||
}
|
||||
}
|
||||
|
||||
let mut oma = None;
|
||||
@ -472,19 +490,34 @@ fn attestation_binaries_available(logger: &Logger, procs: &GuestComponentsProcs)
|
||||
true
|
||||
}
|
||||
|
||||
async fn launch_guest_component_procs(logger: &Logger, config: &AgentConfig) -> Result<()> {
|
||||
async fn launch_guest_component_procs(
|
||||
logger: &Logger,
|
||||
config: &AgentConfig,
|
||||
initdata_return_value: &Option<InitdataReturnValue>,
|
||||
) -> Result<()> {
|
||||
if config.guest_components_procs == GuestComponentsProcs::None {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
debug!(logger, "spawning attestation-agent process {}", AA_PATH);
|
||||
let mut aa_args = vec!["--attestation_sock", AA_ATTESTATION_URI];
|
||||
let initdata_parameter;
|
||||
if let Some(initdata_return_value) = initdata_return_value {
|
||||
initdata_parameter =
|
||||
base64::engine::general_purpose::STANDARD.encode(&initdata_return_value.digest);
|
||||
aa_args.push("--initdata");
|
||||
aa_args.push(&initdata_parameter);
|
||||
}
|
||||
|
||||
launch_process(
|
||||
logger,
|
||||
AA_PATH,
|
||||
&vec!["--attestation_sock", AA_ATTESTATION_URI],
|
||||
aa_args,
|
||||
Some(AA_CONFIG_PATH),
|
||||
AA_ATTESTATION_SOCKET,
|
||||
DEFAULT_LAUNCH_PROCESS_TIMEOUT,
|
||||
)
|
||||
.await
|
||||
.map_err(|e| anyhow!("launch_process {} failed: {:?}", AA_PATH, e))?;
|
||||
|
||||
// skip launch of confidential-data-hub and api-server-rest
|
||||
@ -500,10 +533,12 @@ async fn launch_guest_component_procs(logger: &Logger, config: &AgentConfig) ->
|
||||
launch_process(
|
||||
logger,
|
||||
CDH_PATH,
|
||||
&vec![],
|
||||
vec![],
|
||||
Some(CDH_CONFIG_PATH),
|
||||
CDH_SOCKET,
|
||||
DEFAULT_LAUNCH_PROCESS_TIMEOUT,
|
||||
)
|
||||
.await
|
||||
.map_err(|e| anyhow!("launch_process {} failed: {:?}", CDH_PATH, e))?;
|
||||
|
||||
// skip launch of api-server-rest
|
||||
@ -519,10 +554,12 @@ async fn launch_guest_component_procs(logger: &Logger, config: &AgentConfig) ->
|
||||
launch_process(
|
||||
logger,
|
||||
API_SERVER_PATH,
|
||||
&vec!["--features", &features.to_string()],
|
||||
vec!["--features", &features.to_string()],
|
||||
None,
|
||||
"",
|
||||
0,
|
||||
)
|
||||
.await
|
||||
.map_err(|e| anyhow!("launch_process {} failed: {:?}", API_SERVER_PATH, e))?;
|
||||
|
||||
Ok(())
|
||||
@ -532,8 +569,12 @@ async fn launch_guest_component_procs(logger: &Logger, config: &AgentConfig) ->
|
||||
// and the corresponding procs are enabled in the agent configuration. the process will be
|
||||
// launched in the background and the function will return immediately.
|
||||
// If the CDH is started, a CDH client will be instantiated and returned.
|
||||
async fn init_attestation_components(logger: &Logger, config: &AgentConfig) -> Result<()> {
|
||||
launch_guest_component_procs(logger, config).await?;
|
||||
async fn init_attestation_components(
|
||||
logger: &Logger,
|
||||
config: &AgentConfig,
|
||||
initdata_return_value: &Option<InitdataReturnValue>,
|
||||
) -> Result<()> {
|
||||
launch_guest_component_procs(logger, config, initdata_return_value).await?;
|
||||
|
||||
// If a CDH socket exists, initialize the CDH client and enable ocicrypt
|
||||
match tokio::fs::metadata(CDH_SOCKET).await {
|
||||
@ -555,11 +596,11 @@ async fn init_attestation_components(logger: &Logger, config: &AgentConfig) -> R
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn wait_for_path_to_exist(logger: &Logger, path: &str, timeout_secs: i32) -> Result<()> {
|
||||
async fn wait_for_path_to_exist(logger: &Logger, path: &str, timeout_secs: i32) -> Result<()> {
|
||||
let p = Path::new(path);
|
||||
let mut attempts = 0;
|
||||
loop {
|
||||
std::thread::sleep(std::time::Duration::from_secs(1));
|
||||
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
|
||||
if p.exists() {
|
||||
return Ok(());
|
||||
}
|
||||
@ -576,22 +617,32 @@ fn wait_for_path_to_exist(logger: &Logger, path: &str, timeout_secs: i32) -> Res
|
||||
Err(anyhow!("wait for {} to exist timeout.", path))
|
||||
}
|
||||
|
||||
fn launch_process(
|
||||
async fn launch_process(
|
||||
logger: &Logger,
|
||||
path: &str,
|
||||
args: &Vec<&str>,
|
||||
mut args: Vec<&str>,
|
||||
config: Option<&str>,
|
||||
unix_socket_path: &str,
|
||||
timeout_secs: i32,
|
||||
) -> Result<()> {
|
||||
if !Path::new(path).exists() {
|
||||
return Err(anyhow!("path {} does not exist.", path));
|
||||
bail!("path {} does not exist.", path);
|
||||
}
|
||||
|
||||
if let Some(config_path) = config {
|
||||
if Path::new(config_path).exists() {
|
||||
args.push("-c");
|
||||
args.push(config_path);
|
||||
}
|
||||
}
|
||||
|
||||
if !unix_socket_path.is_empty() && Path::new(unix_socket_path).exists() {
|
||||
fs::remove_file(unix_socket_path)?;
|
||||
tokio::fs::remove_file(unix_socket_path).await?;
|
||||
}
|
||||
Command::new(path).args(args).spawn()?;
|
||||
|
||||
tokio::process::Command::new(path).args(args).spawn()?;
|
||||
if !unix_socket_path.is_empty() && timeout_secs > 0 {
|
||||
wait_for_path_to_exist(logger, unix_socket_path, timeout_secs)?;
|
||||
wait_for_path_to_exist(logger, unix_socket_path, timeout_secs).await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
@ -6,11 +6,21 @@
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use futures::{future, StreamExt, TryStreamExt};
|
||||
use ipnetwork::{IpNetwork, Ipv4Network, Ipv6Network};
|
||||
use netlink_packet_route::address::{AddressAttribute, AddressMessage};
|
||||
use netlink_packet_route::link::{LinkAttribute, LinkMessage};
|
||||
use netlink_packet_route::neighbour::{self, NeighbourFlag};
|
||||
use netlink_packet_route::route::{RouteFlag, RouteHeader, RouteProtocol, RouteScope, RouteType};
|
||||
use netlink_packet_route::{
|
||||
neighbour::{NeighbourAddress, NeighbourAttribute, NeighbourState},
|
||||
route::{RouteAddress, RouteAttribute, RouteMessage},
|
||||
AddressFamily,
|
||||
};
|
||||
use nix::errno::Errno;
|
||||
use protocols::types::{ARPNeighbor, IPAddress, IPFamily, Interface, Route};
|
||||
use rtnetlink::{new_connection, packet, IpVersion};
|
||||
use rtnetlink::{new_connection, IpVersion};
|
||||
use std::convert::{TryFrom, TryInto};
|
||||
use std::fmt;
|
||||
use std::fs;
|
||||
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
|
||||
use std::ops::Deref;
|
||||
use std::str::{self, FromStr};
|
||||
@ -35,6 +45,36 @@ impl fmt::Display for LinkFilter<'_> {
|
||||
}
|
||||
}
|
||||
|
||||
const ALL_RULE_FLAGS: [NeighbourFlag; 8] = [
|
||||
NeighbourFlag::Use,
|
||||
NeighbourFlag::Own,
|
||||
NeighbourFlag::Controller,
|
||||
NeighbourFlag::Proxy,
|
||||
NeighbourFlag::ExtLearned,
|
||||
NeighbourFlag::Offloaded,
|
||||
NeighbourFlag::Sticky,
|
||||
NeighbourFlag::Router,
|
||||
];
|
||||
|
||||
const ALL_ROUTE_FLAGS: [RouteFlag; 16] = [
|
||||
RouteFlag::Dead,
|
||||
RouteFlag::Pervasive,
|
||||
RouteFlag::Onlink,
|
||||
RouteFlag::Offload,
|
||||
RouteFlag::Linkdown,
|
||||
RouteFlag::Unresolved,
|
||||
RouteFlag::Trap,
|
||||
RouteFlag::Notify,
|
||||
RouteFlag::Cloned,
|
||||
RouteFlag::Equalize,
|
||||
RouteFlag::Prefix,
|
||||
RouteFlag::LookupTable,
|
||||
RouteFlag::FibMatch,
|
||||
RouteFlag::RtOffload,
|
||||
RouteFlag::RtTrap,
|
||||
RouteFlag::OffloadFailed,
|
||||
];
|
||||
|
||||
/// A filter to query addresses.
|
||||
pub enum AddressFilter {
|
||||
/// Return addresses that belong to the given interface.
|
||||
@ -74,18 +114,30 @@ impl Handle {
|
||||
self.enable_link(link.index(), false).await?;
|
||||
}
|
||||
|
||||
// Delete all addresses associated with the link
|
||||
let addresses = self
|
||||
.list_addresses(AddressFilter::LinkIndex(link.index()))
|
||||
.await?;
|
||||
self.delete_addresses(addresses).await?;
|
||||
// Get whether the network stack has ipv6 enabled or disabled.
|
||||
let supports_ipv6_all = fs::read_to_string("/proc/sys/net/ipv6/conf/all/disable_ipv6")
|
||||
.map(|s| s.trim() == "0")
|
||||
.unwrap_or(false);
|
||||
let supports_ipv6_default =
|
||||
fs::read_to_string("/proc/sys/net/ipv6/conf/default/disable_ipv6")
|
||||
.map(|s| s.trim() == "0")
|
||||
.unwrap_or(false);
|
||||
let supports_ipv6 = supports_ipv6_default || supports_ipv6_all;
|
||||
|
||||
// Add new ip addresses from request
|
||||
for ip_address in &iface.IPAddresses {
|
||||
let ip = IpAddr::from_str(ip_address.address())?;
|
||||
let mask = ip_address.mask().parse::<u8>()?;
|
||||
|
||||
self.add_addresses(link.index(), std::iter::once(IpNetwork::new(ip, mask)?))
|
||||
let net = IpNetwork::new(ip, mask)?;
|
||||
if !net.is_ipv4() && !supports_ipv6 {
|
||||
// If we're dealing with an ipv6 address, but the stack does not
|
||||
// support ipv6, skip adding it otherwise it will lead to an
|
||||
// error at the "CreatePodSandbox" time.
|
||||
continue;
|
||||
}
|
||||
|
||||
self.add_addresses(link.index(), std::iter::once(net))
|
||||
.await?;
|
||||
}
|
||||
|
||||
@ -125,6 +177,7 @@ impl Handle {
|
||||
}
|
||||
|
||||
// Update link
|
||||
let link = self.find_link(LinkFilter::Address(&iface.hwAddr)).await?;
|
||||
let mut request = self.handle.link().set(link.index());
|
||||
request.message_mut().header = link.header.clone();
|
||||
|
||||
@ -172,26 +225,6 @@ impl Handle {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn update_routes<I>(&mut self, list: I) -> Result<()>
|
||||
where
|
||||
I: IntoIterator<Item = Route>,
|
||||
{
|
||||
let old_routes = self
|
||||
.query_routes(None)
|
||||
.await
|
||||
.with_context(|| "Failed to query old routes")?;
|
||||
|
||||
self.delete_routes(old_routes)
|
||||
.await
|
||||
.with_context(|| "Failed to delete old routes")?;
|
||||
|
||||
self.add_routes(list)
|
||||
.await
|
||||
.with_context(|| "Failed to add new routes")?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Retireve available network interfaces.
|
||||
pub async fn list_interfaces(&self) -> Result<Vec<Interface>> {
|
||||
let mut list = Vec::new();
|
||||
@ -225,7 +258,7 @@ impl Handle {
|
||||
let request = self.handle.link().get();
|
||||
|
||||
let filtered = match filter {
|
||||
LinkFilter::Name(name) => request.set_name_filter(name.to_owned()),
|
||||
LinkFilter::Name(name) => request.match_name(name.to_owned()),
|
||||
LinkFilter::Index(index) => request.match_index(index),
|
||||
_ => request, // Post filters
|
||||
};
|
||||
@ -233,7 +266,7 @@ impl Handle {
|
||||
let mut stream = filtered.execute();
|
||||
|
||||
let next = if let LinkFilter::Address(addr) = filter {
|
||||
use packet::link::nlas::Nla;
|
||||
use LinkAttribute as Nla;
|
||||
|
||||
let mac_addr = parse_mac_address(addr)
|
||||
.with_context(|| format!("Failed to parse MAC address: {}", addr))?;
|
||||
@ -242,7 +275,7 @@ impl Handle {
|
||||
// we may have to dump link list and then find the target link.
|
||||
stream
|
||||
.try_filter(|f| {
|
||||
let result = f.nlas.iter().any(|n| match n {
|
||||
let result = f.attributes.iter().any(|n| match n {
|
||||
Nla::Address(data) => data.eq(&mac_addr),
|
||||
_ => false,
|
||||
});
|
||||
@ -278,10 +311,7 @@ impl Handle {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn query_routes(
|
||||
&self,
|
||||
ip_version: Option<IpVersion>,
|
||||
) -> Result<Vec<packet::RouteMessage>> {
|
||||
async fn query_routes(&self, ip_version: Option<IpVersion>) -> Result<Vec<RouteMessage>> {
|
||||
let list = if let Some(ip_version) = ip_version {
|
||||
self.handle
|
||||
.route()
|
||||
@ -321,36 +351,46 @@ impl Handle {
|
||||
|
||||
for msg in self.query_routes(None).await? {
|
||||
// Ignore non-main tables
|
||||
if msg.header.table != packet::constants::RT_TABLE_MAIN {
|
||||
if msg.header.table != RouteHeader::RT_TABLE_MAIN {
|
||||
continue;
|
||||
}
|
||||
|
||||
let mut route = Route {
|
||||
scope: msg.header.scope as _,
|
||||
scope: u8::from(msg.header.scope) as u32,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
if let Some((ip, mask)) = msg.destination_prefix() {
|
||||
route.dest = format!("{}/{}", ip, mask);
|
||||
}
|
||||
|
||||
if let Some((ip, mask)) = msg.source_prefix() {
|
||||
route.source = format!("{}/{}", ip, mask);
|
||||
}
|
||||
|
||||
if let Some(addr) = msg.gateway() {
|
||||
route.gateway = addr.to_string();
|
||||
|
||||
// For gateway, destination is 0.0.0.0
|
||||
route.dest = if addr.is_ipv4() {
|
||||
String::from("0.0.0.0")
|
||||
} else {
|
||||
String::from("::1")
|
||||
for attribute in &msg.attributes {
|
||||
if let RouteAttribute::Destination(dest) = attribute {
|
||||
if let Ok(dest) = parse_route_addr(dest) {
|
||||
route.dest = format!("{}/{}", dest, msg.header.destination_prefix_length);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(index) = msg.output_interface() {
|
||||
route.device = self.find_link(LinkFilter::Index(index)).await?.name();
|
||||
if let RouteAttribute::Source(src) = attribute {
|
||||
if let Ok(src) = parse_route_addr(src) {
|
||||
route.source = format!("{}/{}", src, msg.header.source_prefix_length)
|
||||
}
|
||||
}
|
||||
|
||||
if let RouteAttribute::Gateway(g) = attribute {
|
||||
if let Ok(addr) = parse_route_addr(g) {
|
||||
// For gateway, destination is 0.0.0.0
|
||||
if addr.is_ipv4() {
|
||||
route.dest = String::from("0.0.0.0");
|
||||
} else {
|
||||
route.dest = String::from("::1");
|
||||
}
|
||||
}
|
||||
|
||||
route.gateway = parse_route_addr(g)
|
||||
.map(|g| g.to_string())
|
||||
.unwrap_or_default();
|
||||
}
|
||||
|
||||
if let RouteAttribute::Oif(index) = attribute {
|
||||
route.device = self.find_link(LinkFilter::Index(*index)).await?.name();
|
||||
}
|
||||
}
|
||||
|
||||
if !route.dest.is_empty() {
|
||||
@ -361,10 +401,11 @@ impl Handle {
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Adds a list of routes from iterable object `I`.
|
||||
/// Add a list of routes from iterable object `I`.
|
||||
/// If the route existed, then replace it with the latest.
|
||||
/// It can accept both a collection of routes or a single item (via `iter::once()`).
|
||||
/// It'll also take care of proper order when adding routes (gateways first, everything else after).
|
||||
async fn add_routes<I>(&mut self, list: I) -> Result<()>
|
||||
pub async fn update_routes<I>(&mut self, list: I) -> Result<()>
|
||||
where
|
||||
I: IntoIterator<Item = Route>,
|
||||
{
|
||||
@ -377,24 +418,41 @@ impl Handle {
|
||||
for route in list {
|
||||
let link = self.find_link(LinkFilter::Name(&route.device)).await?;
|
||||
|
||||
const MAIN_TABLE: u8 = packet::constants::RT_TABLE_MAIN;
|
||||
const UNICAST: u8 = packet::constants::RTN_UNICAST;
|
||||
const BOOT_PROT: u8 = packet::constants::RTPROT_BOOT;
|
||||
const MAIN_TABLE: u32 = libc::RT_TABLE_MAIN as u32;
|
||||
let uni_cast: RouteType = RouteType::from(libc::RTN_UNICAST);
|
||||
let boot_prot: RouteProtocol = RouteProtocol::from(libc::RTPROT_BOOT);
|
||||
|
||||
let scope = route.scope as u8;
|
||||
let scope = RouteScope::from(route.scope as u8);
|
||||
|
||||
use packet::nlas::route::Nla;
|
||||
use RouteAttribute as Nla;
|
||||
|
||||
// Build a common indeterminate ip request
|
||||
let request = self
|
||||
let mut request = self
|
||||
.handle
|
||||
.route()
|
||||
.add()
|
||||
.table(MAIN_TABLE)
|
||||
.kind(UNICAST)
|
||||
.protocol(BOOT_PROT)
|
||||
.table_id(MAIN_TABLE)
|
||||
.kind(uni_cast)
|
||||
.protocol(boot_prot)
|
||||
.scope(scope);
|
||||
|
||||
let message = request.message_mut();
|
||||
|
||||
// calculate the Flag vec from the u32 flags
|
||||
let mut got: u32 = 0;
|
||||
let mut flags = Vec::new();
|
||||
for flag in ALL_ROUTE_FLAGS {
|
||||
if (route.flags & (u32::from(flag))) > 0 {
|
||||
flags.push(flag);
|
||||
got += u32::from(flag);
|
||||
}
|
||||
}
|
||||
if got != route.flags {
|
||||
flags.push(RouteFlag::Other(route.flags - got));
|
||||
}
|
||||
|
||||
message.header.flags = flags;
|
||||
|
||||
// `rtnetlink` offers a separate request builders for different IP versions (IP v4 and v6).
|
||||
// This if branch is a bit clumsy because it does almost the same.
|
||||
if route.family() == IPFamily::v6 {
|
||||
@ -408,7 +466,8 @@ impl Handle {
|
||||
let mut request = request
|
||||
.v6()
|
||||
.destination_prefix(dest_addr.ip(), dest_addr.prefix())
|
||||
.output_interface(link.index());
|
||||
.output_interface(link.index())
|
||||
.replace();
|
||||
|
||||
if !route.source.is_empty() {
|
||||
let network = Ipv6Network::from_str(&route.source)?;
|
||||
@ -417,8 +476,8 @@ impl Handle {
|
||||
} else {
|
||||
request
|
||||
.message_mut()
|
||||
.nlas
|
||||
.push(Nla::PrefSource(network.ip().octets().to_vec()));
|
||||
.attributes
|
||||
.push(Nla::PrefSource(RouteAddress::from(network.ip())));
|
||||
}
|
||||
}
|
||||
|
||||
@ -428,14 +487,16 @@ impl Handle {
|
||||
}
|
||||
|
||||
if let Err(rtnetlink::Error::NetlinkError(message)) = request.execute().await {
|
||||
if Errno::from_i32(message.code.abs()) != Errno::EEXIST {
|
||||
return Err(anyhow!(
|
||||
"Failed to add IP v6 route (src: {}, dst: {}, gtw: {},Err: {})",
|
||||
route.source(),
|
||||
route.dest(),
|
||||
route.gateway(),
|
||||
message
|
||||
));
|
||||
if let Some(code) = message.code {
|
||||
if Errno::from_i32(code.get()) != Errno::EEXIST {
|
||||
return Err(anyhow!(
|
||||
"Failed to add IP v6 route (src: {}, dst: {}, gtw: {},Err: {})",
|
||||
route.source(),
|
||||
route.dest(),
|
||||
route.gateway(),
|
||||
message
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@ -449,7 +510,8 @@ impl Handle {
|
||||
let mut request = request
|
||||
.v4()
|
||||
.destination_prefix(dest_addr.ip(), dest_addr.prefix())
|
||||
.output_interface(link.index());
|
||||
.output_interface(link.index())
|
||||
.replace();
|
||||
|
||||
if !route.source.is_empty() {
|
||||
let network = Ipv4Network::from_str(&route.source)?;
|
||||
@ -458,8 +520,8 @@ impl Handle {
|
||||
} else {
|
||||
request
|
||||
.message_mut()
|
||||
.nlas
|
||||
.push(Nla::PrefSource(network.ip().octets().to_vec()));
|
||||
.attributes
|
||||
.push(RouteAttribute::PrefSource(RouteAddress::from(network.ip())));
|
||||
}
|
||||
}
|
||||
|
||||
@ -469,14 +531,16 @@ impl Handle {
|
||||
}
|
||||
|
||||
if let Err(rtnetlink::Error::NetlinkError(message)) = request.execute().await {
|
||||
if Errno::from_i32(message.code.abs()) != Errno::EEXIST {
|
||||
return Err(anyhow!(
|
||||
"Failed to add IP v4 route (src: {}, dst: {}, gtw: {},Err: {})",
|
||||
route.source(),
|
||||
route.dest(),
|
||||
route.gateway(),
|
||||
message
|
||||
));
|
||||
if let Some(code) = message.code {
|
||||
if Errno::from_i32(code.get()) != Errno::EEXIST {
|
||||
return Err(anyhow!(
|
||||
"Failed to add IP v4 route (src: {}, dst: {}, gtw: {},Err: {})",
|
||||
route.source(),
|
||||
route.dest(),
|
||||
route.gateway(),
|
||||
message
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -485,34 +549,6 @@ impl Handle {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn delete_routes<I>(&mut self, routes: I) -> Result<()>
|
||||
where
|
||||
I: IntoIterator<Item = packet::RouteMessage>,
|
||||
{
|
||||
for route in routes.into_iter() {
|
||||
if route.header.protocol == packet::constants::RTPROT_KERNEL {
|
||||
continue;
|
||||
}
|
||||
|
||||
let index = if let Some(index) = route.output_interface() {
|
||||
index
|
||||
} else {
|
||||
continue;
|
||||
};
|
||||
|
||||
let link = self.find_link(LinkFilter::Index(index)).await?;
|
||||
|
||||
let name = link.name();
|
||||
if name.contains("lo") || name.contains("::1") {
|
||||
continue;
|
||||
}
|
||||
|
||||
self.handle.route().del(route).execute().await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn list_addresses<F>(&self, filter: F) -> Result<Vec<Address>>
|
||||
where
|
||||
F: Into<Option<AddressFilter>>,
|
||||
@ -534,6 +570,8 @@ impl Handle {
|
||||
Ok(list)
|
||||
}
|
||||
|
||||
// add the addresses to the specified interface, if the addresses existed,
|
||||
// replace it with the latest one.
|
||||
async fn add_addresses<I>(&mut self, index: u32, list: I) -> Result<()>
|
||||
where
|
||||
I: IntoIterator<Item = IpNetwork>,
|
||||
@ -542,6 +580,7 @@ impl Handle {
|
||||
self.handle
|
||||
.address()
|
||||
.add(index, net.ip(), net.prefix())
|
||||
.replace()
|
||||
.execute()
|
||||
.await
|
||||
.map_err(|err| anyhow!("Failed to add address {}: {:?}", net.ip(), err))?;
|
||||
@ -550,17 +589,6 @@ impl Handle {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn delete_addresses<I>(&mut self, list: I) -> Result<()>
|
||||
where
|
||||
I: IntoIterator<Item = Address>,
|
||||
{
|
||||
for addr in list.into_iter() {
|
||||
self.handle.address().del(addr.0).execute().await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn add_arp_neighbors<I>(&mut self, list: I) -> Result<()>
|
||||
where
|
||||
I: IntoIterator<Item = ARPNeighbor>,
|
||||
@ -592,52 +620,57 @@ impl Handle {
|
||||
.map_err(|e| anyhow!("Failed to parse IP {}: {:?}", ip_address, e))?;
|
||||
|
||||
// Import rtnetlink objects that make sense only for this function
|
||||
use packet::constants::{
|
||||
NDA_UNSPEC, NLM_F_ACK, NLM_F_CREATE, NLM_F_REPLACE, NLM_F_REQUEST,
|
||||
};
|
||||
use packet::neighbour::{NeighbourHeader, NeighbourMessage};
|
||||
use packet::nlas::neighbour::Nla;
|
||||
use packet::{NetlinkMessage, NetlinkPayload, RtnlMessage};
|
||||
use libc::{NDA_UNSPEC, NLM_F_ACK, NLM_F_CREATE, NLM_F_REPLACE, NLM_F_REQUEST};
|
||||
use neighbour::{NeighbourHeader, NeighbourMessage};
|
||||
use netlink_packet_core::{NetlinkMessage, NetlinkPayload};
|
||||
use netlink_packet_route::RouteNetlinkMessage as RtnlMessage;
|
||||
use rtnetlink::Error;
|
||||
|
||||
const IFA_F_PERMANENT: u16 = 0x80; // See https://github.com/little-dude/netlink/blob/0185b2952505e271805902bf175fee6ea86c42b8/netlink-packet-route/src/rtnl/constants.rs#L770
|
||||
let state = if neigh.state != 0 {
|
||||
neigh.state as u16
|
||||
} else {
|
||||
IFA_F_PERMANENT
|
||||
};
|
||||
|
||||
let link = self.find_link(LinkFilter::Name(&neigh.device)).await?;
|
||||
|
||||
let message = NeighbourMessage {
|
||||
header: NeighbourHeader {
|
||||
family: match ip {
|
||||
IpAddr::V4(_) => packet::AF_INET,
|
||||
IpAddr::V6(_) => packet::AF_INET6,
|
||||
} as u8,
|
||||
ifindex: link.index(),
|
||||
state: if neigh.state != 0 {
|
||||
neigh.state as u16
|
||||
} else {
|
||||
IFA_F_PERMANENT
|
||||
},
|
||||
flags: neigh.flags as u8,
|
||||
ntype: NDA_UNSPEC as u8,
|
||||
},
|
||||
nlas: {
|
||||
let mut nlas = vec![Nla::Destination(match ip {
|
||||
IpAddr::V4(v4) => v4.octets().to_vec(),
|
||||
IpAddr::V6(v6) => v6.octets().to_vec(),
|
||||
})];
|
||||
let mut flags = Vec::new();
|
||||
for flag in ALL_RULE_FLAGS {
|
||||
if (neigh.flags as u8 & (u8::from(flag))) > 0 {
|
||||
flags.push(flag);
|
||||
}
|
||||
}
|
||||
|
||||
if !neigh.lladdr.is_empty() {
|
||||
nlas.push(Nla::LinkLocalAddress(
|
||||
parse_mac_address(&neigh.lladdr)?.to_vec(),
|
||||
));
|
||||
}
|
||||
let mut message = NeighbourMessage::default();
|
||||
|
||||
nlas
|
||||
message.header = NeighbourHeader {
|
||||
family: match ip {
|
||||
IpAddr::V4(_) => AddressFamily::Inet,
|
||||
IpAddr::V6(_) => AddressFamily::Inet6,
|
||||
},
|
||||
ifindex: link.index(),
|
||||
state: NeighbourState::from(state),
|
||||
flags,
|
||||
kind: RouteType::from(NDA_UNSPEC as u8),
|
||||
};
|
||||
|
||||
let mut nlas = vec![NeighbourAttribute::Destination(match ip {
|
||||
IpAddr::V4(ipv4_addr) => NeighbourAddress::from(ipv4_addr),
|
||||
IpAddr::V6(ipv6_addr) => NeighbourAddress::from(ipv6_addr),
|
||||
})];
|
||||
|
||||
if !neigh.lladdr.is_empty() {
|
||||
nlas.push(NeighbourAttribute::LinkLocalAddress(
|
||||
parse_mac_address(&neigh.lladdr)?.to_vec(),
|
||||
));
|
||||
}
|
||||
|
||||
message.attributes = nlas;
|
||||
|
||||
// Send request and ACK
|
||||
let mut req = NetlinkMessage::from(RtnlMessage::NewNeighbour(message));
|
||||
req.header.flags = NLM_F_REQUEST | NLM_F_ACK | NLM_F_CREATE | NLM_F_REPLACE;
|
||||
req.header.flags = (NLM_F_REQUEST | NLM_F_ACK | NLM_F_CREATE | NLM_F_REPLACE) as u16;
|
||||
|
||||
let mut response = self.handle.request(req)?;
|
||||
while let Some(message) = response.next().await {
|
||||
@ -700,13 +733,13 @@ fn parse_mac_address(addr: &str) -> Result<[u8; 6]> {
|
||||
}
|
||||
|
||||
/// Wraps external type with the local one, so we can implement various extensions and type conversions.
|
||||
struct Link(packet::LinkMessage);
|
||||
struct Link(LinkMessage);
|
||||
|
||||
impl Link {
|
||||
/// If name.
|
||||
fn name(&self) -> String {
|
||||
use packet::nlas::link::Nla;
|
||||
self.nlas
|
||||
use LinkAttribute as Nla;
|
||||
self.attributes
|
||||
.iter()
|
||||
.find_map(|n| {
|
||||
if let Nla::IfName(name) = n {
|
||||
@ -720,8 +753,8 @@ impl Link {
|
||||
|
||||
/// Extract Mac address.
|
||||
fn address(&self) -> String {
|
||||
use packet::nlas::link::Nla;
|
||||
self.nlas
|
||||
use LinkAttribute as Nla;
|
||||
self.attributes
|
||||
.iter()
|
||||
.find_map(|n| {
|
||||
if let Nla::Address(data) = n {
|
||||
@ -735,7 +768,12 @@ impl Link {
|
||||
|
||||
/// Returns whether the link is UP
|
||||
fn is_up(&self) -> bool {
|
||||
self.header.flags & packet::rtnl::constants::IFF_UP > 0
|
||||
let mut flags: u32 = 0;
|
||||
for flag in &self.header.flags {
|
||||
flags += u32::from(*flag);
|
||||
}
|
||||
|
||||
flags as i32 & libc::IFF_UP > 0
|
||||
}
|
||||
|
||||
fn index(&self) -> u32 {
|
||||
@ -743,8 +781,8 @@ impl Link {
|
||||
}
|
||||
|
||||
fn mtu(&self) -> Option<u64> {
|
||||
use packet::nlas::link::Nla;
|
||||
self.nlas.iter().find_map(|n| {
|
||||
use LinkAttribute as Nla;
|
||||
self.attributes.iter().find_map(|n| {
|
||||
if let Nla::Mtu(mtu) = n {
|
||||
Some(*mtu as u64)
|
||||
} else {
|
||||
@ -754,21 +792,21 @@ impl Link {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<packet::LinkMessage> for Link {
|
||||
fn from(msg: packet::LinkMessage) -> Self {
|
||||
impl From<LinkMessage> for Link {
|
||||
fn from(msg: LinkMessage) -> Self {
|
||||
Link(msg)
|
||||
}
|
||||
}
|
||||
|
||||
impl Deref for Link {
|
||||
type Target = packet::LinkMessage;
|
||||
type Target = LinkMessage;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
struct Address(packet::AddressMessage);
|
||||
struct Address(AddressMessage);
|
||||
|
||||
impl TryFrom<Address> for IPAddress {
|
||||
type Error = anyhow::Error;
|
||||
@ -798,7 +836,7 @@ impl TryFrom<Address> for IPAddress {
|
||||
|
||||
impl Address {
|
||||
fn is_ipv6(&self) -> bool {
|
||||
self.0.header.family == packet::constants::AF_INET6 as u8
|
||||
u8::from(self.0.header.family) == libc::AF_INET6 as u8
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
@ -807,13 +845,13 @@ impl Address {
|
||||
}
|
||||
|
||||
fn address(&self) -> String {
|
||||
use packet::nlas::address::Nla;
|
||||
use AddressAttribute as Nla;
|
||||
self.0
|
||||
.nlas
|
||||
.attributes
|
||||
.iter()
|
||||
.find_map(|n| {
|
||||
if let Nla::Address(data) = n {
|
||||
format_address(data).ok()
|
||||
Some(data.to_string())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
@ -822,13 +860,13 @@ impl Address {
|
||||
}
|
||||
|
||||
fn local(&self) -> String {
|
||||
use packet::nlas::address::Nla;
|
||||
use AddressAttribute as Nla;
|
||||
self.0
|
||||
.nlas
|
||||
.attributes
|
||||
.iter()
|
||||
.find_map(|n| {
|
||||
if let Nla::Local(data) = n {
|
||||
format_address(data).ok()
|
||||
Some(data.to_string())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
@ -837,10 +875,21 @@ impl Address {
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_route_addr(ra: &RouteAddress) -> Result<IpAddr> {
|
||||
let ipaddr = match ra {
|
||||
RouteAddress::Inet6(ipv6_addr) => ipv6_addr.to_canonical(),
|
||||
RouteAddress::Inet(ipv4_addr) => IpAddr::from(*ipv4_addr),
|
||||
_ => return Err(anyhow!("got invalid route address")),
|
||||
};
|
||||
|
||||
Ok(ipaddr)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use rtnetlink::packet;
|
||||
use netlink_packet_route::address::AddressHeader;
|
||||
use netlink_packet_route::link::LinkHeader;
|
||||
use std::iter;
|
||||
use std::process::Command;
|
||||
use test_utils::skip_if_not_root;
|
||||
@ -853,7 +902,7 @@ mod tests {
|
||||
.await
|
||||
.expect("Loopback not found");
|
||||
|
||||
assert_ne!(message.header, packet::LinkHeader::default());
|
||||
assert_ne!(message.header, LinkHeader::default());
|
||||
assert_eq!(message.name(), "lo");
|
||||
}
|
||||
|
||||
@ -928,7 +977,7 @@ mod tests {
|
||||
|
||||
assert_ne!(list.len(), 0);
|
||||
for addr in &list {
|
||||
assert_ne!(addr.0.header, packet::AddressHeader::default());
|
||||
assert_ne!(addr.0.header, AddressHeader::default());
|
||||
}
|
||||
}
|
||||
|
||||
@ -952,7 +1001,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn add_delete_addresses() {
|
||||
async fn add_update_addresses() {
|
||||
skip_if_not_root!();
|
||||
|
||||
let list = vec![
|
||||
@ -981,9 +1030,9 @@ mod tests {
|
||||
|
||||
assert!(result.is_some());
|
||||
|
||||
// Delete it
|
||||
// Update it
|
||||
handle
|
||||
.delete_addresses(iter::once(result.unwrap()))
|
||||
.add_addresses(lo.index(), iter::once(network))
|
||||
.await
|
||||
.expect("Failed to delete address");
|
||||
}
|
||||
|
@ -28,9 +28,9 @@ use oci::{Hooks, LinuxNamespace, Spec};
|
||||
use oci_spec::runtime as oci;
|
||||
use protobuf::MessageField;
|
||||
use protocols::agent::{
|
||||
AddSwapRequest, AgentDetails, CopyFileRequest, GetIPTablesRequest, GetIPTablesResponse,
|
||||
GuestDetailsResponse, Interfaces, Metrics, OOMEvent, ReadStreamResponse, Routes,
|
||||
SetIPTablesRequest, SetIPTablesResponse, StatsContainerResponse, VolumeStatsRequest,
|
||||
AddSwapPathRequest, AddSwapRequest, AgentDetails, CopyFileRequest, GetIPTablesRequest,
|
||||
GetIPTablesResponse, GuestDetailsResponse, Interfaces, Metrics, OOMEvent, ReadStreamResponse,
|
||||
Routes, SetIPTablesRequest, SetIPTablesResponse, StatsContainerResponse, VolumeStatsRequest,
|
||||
WaitProcessResponse, WriteStreamResponse,
|
||||
};
|
||||
use protocols::csi::{
|
||||
@ -55,11 +55,17 @@ use nix::sys::{stat, statfs};
|
||||
use nix::unistd::{self, Pid};
|
||||
use rustjail::process::ProcessOperations;
|
||||
|
||||
#[cfg(target_arch = "s390x")]
|
||||
use crate::ccw;
|
||||
use crate::cdh;
|
||||
use crate::device::block_device_handler::get_virtio_blk_pci_device_name;
|
||||
use crate::device::network_device_handler::wait_for_net_interface;
|
||||
#[cfg(target_arch = "s390x")]
|
||||
use crate::device::network_device_handler::wait_for_ccw_net_interface;
|
||||
#[cfg(not(target_arch = "s390x"))]
|
||||
use crate::device::network_device_handler::wait_for_pci_net_interface;
|
||||
use crate::device::{add_devices, handle_cdi_devices, update_env_pci};
|
||||
use crate::features::get_build_features;
|
||||
#[cfg(feature = "guest-pull")]
|
||||
use crate::image::KATA_IMAGE_WORK_DIR;
|
||||
use crate::linux_abi::*;
|
||||
use crate::metrics::get_metrics;
|
||||
@ -106,6 +112,7 @@ use kata_types::k8s;
|
||||
|
||||
pub const CONTAINER_BASE: &str = "/run/kata-containers";
|
||||
const MODPROBE_PATH: &str = "/sbin/modprobe";
|
||||
#[cfg(feature = "guest-pull")]
|
||||
const TRUSTED_IMAGE_STORAGE_DEVICE: &str = "/dev/trusted_store";
|
||||
/// the iptables seriers binaries could appear either in /sbin
|
||||
/// or /usr/sbin, we need to check both of them
|
||||
@ -223,7 +230,7 @@ impl AgentService {
|
||||
// updates the devices listed in the OCI spec, so that they actually
|
||||
// match real devices inside the VM. This step is necessary since we
|
||||
// cannot predict everything from the caller.
|
||||
add_devices(&sl(), &req.devices, &mut oci, &self.sandbox).await?;
|
||||
add_devices(&cid, &sl(), &req.devices, &mut oci, &self.sandbox).await?;
|
||||
|
||||
// In guest-kernel mode some devices need extra handling. Taking the
|
||||
// GPU as an example the shim will inject CDI annotations that will
|
||||
@ -234,7 +241,11 @@ impl AgentService {
|
||||
// readonly
|
||||
handle_cdi_devices(&sl(), &mut oci, "/var/run/cdi", AGENT_CONFIG.cdi_timeout).await?;
|
||||
|
||||
cdh_handler(&mut oci).await?;
|
||||
// Handle trusted storage configuration before mounting any storage
|
||||
#[cfg(feature = "guest-pull")]
|
||||
cdh_handler_trusted_storage(&mut oci)
|
||||
.await
|
||||
.map_err(|e| anyhow!("failed to handle trusted storage: {}", e))?;
|
||||
|
||||
// Both rootfs and volumes (invoked with --volume for instance) will
|
||||
// be processed the same way. The idea is to always mount any provided
|
||||
@ -251,6 +262,11 @@ impl AgentService {
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Handle sealed secrets after storage is mounted
|
||||
cdh_handler_sealed_secrets(&mut oci)
|
||||
.await
|
||||
.map_err(|e| anyhow!("failed to handle sealed secrets: {}", e))?;
|
||||
|
||||
let mut s = self.sandbox.lock().await;
|
||||
s.container_mounts.insert(cid.clone(), m);
|
||||
|
||||
@ -342,24 +358,25 @@ impl AgentService {
|
||||
async fn do_start_container(&self, req: protocols::agent::StartContainerRequest) -> Result<()> {
|
||||
let mut s = self.sandbox.lock().await;
|
||||
let sid = s.id.clone();
|
||||
let cid = req.container_id;
|
||||
let cid = req.container_id.clone();
|
||||
|
||||
let ctr = s
|
||||
.get_container(&cid)
|
||||
.ok_or_else(|| anyhow!("Invalid container id"))?;
|
||||
ctr.exec().await?;
|
||||
|
||||
if sid == cid {
|
||||
return Ok(());
|
||||
if sid != cid {
|
||||
// start oom event loop
|
||||
if let Ok(cg_path) = ctr.cgroup_manager.as_ref().get_cgroup_path("memory") {
|
||||
let rx = notifier::notify_oom(cid.as_str(), cg_path.to_string()).await?;
|
||||
s.run_oom_event_monitor(rx, cid.clone()).await;
|
||||
}
|
||||
}
|
||||
|
||||
// start oom event loop
|
||||
if let Ok(cg_path) = ctr.cgroup_manager.as_ref().get_cgroup_path("memory") {
|
||||
let rx = notifier::notify_oom(cid.as_str(), cg_path.to_string()).await?;
|
||||
s.run_oom_event_monitor(rx, cid).await;
|
||||
}
|
||||
let ctr = s
|
||||
.get_container(&cid)
|
||||
.ok_or_else(|| anyhow!("Invalid container id"))?;
|
||||
|
||||
Ok(())
|
||||
ctr.exec().await
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
@ -369,6 +386,9 @@ impl AgentService {
|
||||
) -> Result<()> {
|
||||
let cid = req.container_id;
|
||||
|
||||
// Drop the host guest mapping for this container so we can reuse the
|
||||
// PCI slots for the next containers
|
||||
|
||||
if req.timeout == 0 {
|
||||
let mut sandbox = self.sandbox.lock().await;
|
||||
sandbox.bind_watcher.remove_container(&cid).await;
|
||||
@ -424,7 +444,7 @@ impl AgentService {
|
||||
.ok_or_else(|| anyhow!("Unable to parse process from ExecProcessRequest"))?;
|
||||
|
||||
// Apply any necessary corrections for PCI addresses
|
||||
update_env_pci(&mut process.Env, &sandbox.pcimap)?;
|
||||
update_env_pci(&cid, &mut process.Env, &sandbox.pcimap)?;
|
||||
|
||||
let pipe_size = AGENT_CONFIG.container_pipe_size;
|
||||
let ocip = process.into();
|
||||
@ -647,11 +667,11 @@ impl AgentService {
|
||||
|
||||
async fn do_read_stream(
|
||||
&self,
|
||||
req: protocols::agent::ReadStreamRequest,
|
||||
req: &protocols::agent::ReadStreamRequest,
|
||||
stdout: bool,
|
||||
) -> Result<protocols::agent::ReadStreamResponse> {
|
||||
let cid = req.container_id;
|
||||
let eid = req.exec_id;
|
||||
let cid = &req.container_id;
|
||||
let eid = &req.exec_id;
|
||||
|
||||
let term_exit_notifier;
|
||||
let reader = {
|
||||
@ -898,8 +918,12 @@ impl agent_ttrpc::AgentService for AgentService {
|
||||
_ctx: &TtrpcContext,
|
||||
req: protocols::agent::ReadStreamRequest,
|
||||
) -> ttrpc::Result<ReadStreamResponse> {
|
||||
is_allowed(&req).await?;
|
||||
self.do_read_stream(req, true).await.map_ttrpc_err(same)
|
||||
let mut response = self.do_read_stream(&req, true).await.map_ttrpc_err(same)?;
|
||||
if is_allowed(&req).await.is_err() {
|
||||
// Policy does not allow reading logs, so we redact the log messages.
|
||||
response.clear_data();
|
||||
}
|
||||
Ok(response)
|
||||
}
|
||||
|
||||
async fn read_stderr(
|
||||
@ -907,8 +931,12 @@ impl agent_ttrpc::AgentService for AgentService {
|
||||
_ctx: &TtrpcContext,
|
||||
req: protocols::agent::ReadStreamRequest,
|
||||
) -> ttrpc::Result<ReadStreamResponse> {
|
||||
is_allowed(&req).await?;
|
||||
self.do_read_stream(req, false).await.map_ttrpc_err(same)
|
||||
let mut response = self.do_read_stream(&req, false).await.map_ttrpc_err(same)?;
|
||||
if is_allowed(&req).await.is_err() {
|
||||
// Policy does not allow reading logs, so we redact the log messages.
|
||||
response.clear_data();
|
||||
}
|
||||
Ok(response)
|
||||
}
|
||||
|
||||
async fn close_stdin(
|
||||
@ -989,15 +1017,27 @@ impl agent_ttrpc::AgentService for AgentService {
|
||||
"empty update interface request",
|
||||
)?;
|
||||
|
||||
// For network devices passed on the pci bus, check for the network interface
|
||||
// For network devices passed, check for the network interface
|
||||
// to be available first.
|
||||
if !interface.pciPath.is_empty() {
|
||||
let pcipath = pci::Path::from_str(&interface.pciPath)
|
||||
.map_ttrpc_err(|e| format!("Unexpected pci-path for network interface: {:?}", e))?;
|
||||
|
||||
wait_for_net_interface(&self.sandbox, &pcipath)
|
||||
.await
|
||||
.map_ttrpc_err(|e| format!("interface not available: {:?}", e))?;
|
||||
if !interface.devicePath.is_empty() {
|
||||
#[cfg(not(target_arch = "s390x"))]
|
||||
{
|
||||
let pcipath = pci::Path::from_str(&interface.devicePath).map_ttrpc_err(|e| {
|
||||
format!("Unexpected pci-path for network interface: {:?}", e)
|
||||
})?;
|
||||
wait_for_pci_net_interface(&self.sandbox, &pcipath)
|
||||
.await
|
||||
.map_ttrpc_err(|e| format!("interface not available: {:?}", e))?;
|
||||
}
|
||||
#[cfg(target_arch = "s390x")]
|
||||
{
|
||||
let ccw_dev = ccw::Device::from_str(&interface.devicePath).map_ttrpc_err(|e| {
|
||||
format!("Unexpected CCW path for network interface: {:?}", e)
|
||||
})?;
|
||||
wait_for_ccw_net_interface(&self.sandbox, &ccw_dev)
|
||||
.await
|
||||
.map_ttrpc_err(|e| format!("interface not available: {:?}", e))?;
|
||||
}
|
||||
}
|
||||
|
||||
self.sandbox
|
||||
@ -1534,6 +1574,19 @@ impl agent_ttrpc::AgentService for AgentService {
|
||||
Ok(Empty::new())
|
||||
}
|
||||
|
||||
async fn add_swap_path(
|
||||
&self,
|
||||
ctx: &TtrpcContext,
|
||||
req: protocols::agent::AddSwapPathRequest,
|
||||
) -> ttrpc::Result<Empty> {
|
||||
trace_rpc_call!(ctx, "add_swap_path", req);
|
||||
is_allowed(&req).await?;
|
||||
|
||||
do_add_swap_path(&req).await.map_ttrpc_err(same)?;
|
||||
|
||||
Ok(Empty::new())
|
||||
}
|
||||
|
||||
#[cfg(feature = "agent-policy")]
|
||||
async fn set_policy(
|
||||
&self,
|
||||
@ -1850,6 +1903,8 @@ async fn remove_container_resources(sandbox: &mut Sandbox, cid: &str) -> Result<
|
||||
|
||||
sandbox.container_mounts.remove(cid);
|
||||
sandbox.containers.remove(cid);
|
||||
// Remove any host -> guest mappings for this container
|
||||
sandbox.pcimap.remove(cid);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -2059,6 +2114,19 @@ async fn do_add_swap(sandbox: &Arc<Mutex<Sandbox>>, req: &AddSwapRequest) -> Res
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn do_add_swap_path(req: &AddSwapPathRequest) -> Result<()> {
|
||||
let c_str = CString::new(req.path.clone())?;
|
||||
let ret = unsafe { libc::swapon(c_str.as_ptr() as *const c_char, 0) };
|
||||
if ret != 0 {
|
||||
return Err(anyhow!(
|
||||
"libc::swapon get error {}",
|
||||
io::Error::last_os_error()
|
||||
));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Setup container bundle under CONTAINER_BASE, which is cleaned up
|
||||
// before removing a container.
|
||||
// - bundle path is /<CONTAINER_BASE>/<cid>/
|
||||
@ -2175,7 +2243,42 @@ fn is_sealed_secret_path(source_path: &str) -> bool {
|
||||
.any(|suffix| source_path.ends_with(suffix))
|
||||
}
|
||||
|
||||
async fn cdh_handler(oci: &mut Spec) -> Result<()> {
|
||||
#[cfg(feature = "guest-pull")]
|
||||
async fn cdh_handler_trusted_storage(oci: &mut Spec) -> Result<()> {
|
||||
if !cdh::is_cdh_client_initialized().await {
|
||||
return Ok(());
|
||||
}
|
||||
let linux = oci
|
||||
.linux()
|
||||
.as_ref()
|
||||
.ok_or_else(|| anyhow!("Spec didn't contain linux field"))?;
|
||||
|
||||
if let Some(devices) = linux.devices() {
|
||||
for specdev in devices.iter() {
|
||||
if specdev.path().as_path().to_str() == Some(TRUSTED_IMAGE_STORAGE_DEVICE) {
|
||||
let dev_major_minor = format!("{}:{}", specdev.major(), specdev.minor());
|
||||
let secure_storage_integrity = AGENT_CONFIG.secure_storage_integrity.to_string();
|
||||
info!(
|
||||
sl(),
|
||||
"trusted_store device major:min {}, enable data integrity {}",
|
||||
dev_major_minor,
|
||||
secure_storage_integrity
|
||||
);
|
||||
|
||||
let options = std::collections::HashMap::from([
|
||||
("deviceId".to_string(), dev_major_minor),
|
||||
("encryptType".to_string(), "LUKS".to_string()),
|
||||
("dataIntegrity".to_string(), secure_storage_integrity),
|
||||
]);
|
||||
cdh::secure_mount("BlockDevice", &options, vec![], KATA_IMAGE_WORK_DIR).await?;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn cdh_handler_sealed_secrets(oci: &mut Spec) -> Result<()> {
|
||||
if !cdh::is_cdh_client_initialized().await {
|
||||
return Ok(());
|
||||
}
|
||||
@ -2232,33 +2335,6 @@ async fn cdh_handler(oci: &mut Spec) -> Result<()> {
|
||||
}
|
||||
}
|
||||
|
||||
let linux = oci
|
||||
.linux()
|
||||
.as_ref()
|
||||
.ok_or_else(|| anyhow!("Spec didn't contain linux field"))?;
|
||||
|
||||
if let Some(devices) = linux.devices() {
|
||||
for specdev in devices.iter() {
|
||||
if specdev.path().as_path().to_str() == Some(TRUSTED_IMAGE_STORAGE_DEVICE) {
|
||||
let dev_major_minor = format!("{}:{}", specdev.major(), specdev.minor());
|
||||
let secure_storage_integrity = AGENT_CONFIG.secure_storage_integrity.to_string();
|
||||
info!(
|
||||
sl(),
|
||||
"trusted_store device major:min {}, enable data integrity {}",
|
||||
dev_major_minor,
|
||||
secure_storage_integrity
|
||||
);
|
||||
|
||||
let options = std::collections::HashMap::from([
|
||||
("deviceId".to_string(), dev_major_minor),
|
||||
("encryptType".to_string(), "LUKS".to_string()),
|
||||
("dataIntegrity".to_string(), secure_storage_integrity),
|
||||
]);
|
||||
cdh::secure_mount("BlockDevice", &options, vec![], KATA_IMAGE_WORK_DIR).await?;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -95,6 +95,8 @@ impl StorageState {
|
||||
}
|
||||
}
|
||||
|
||||
pub type PciHostGuestMapping = HashMap<pci::Address, pci::Address>;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Sandbox {
|
||||
pub logger: Logger,
|
||||
@ -118,7 +120,7 @@ pub struct Sandbox {
|
||||
pub event_rx: Arc<Mutex<Receiver<String>>>,
|
||||
pub event_tx: Option<Sender<String>>,
|
||||
pub bind_watcher: BindWatcher,
|
||||
pub pcimap: HashMap<pci::Address, pci::Address>,
|
||||
pub pcimap: HashMap<String, PciHostGuestMapping>,
|
||||
pub devcg_info: Arc<RwLock<DevicesCgroupInfo>>,
|
||||
}
|
||||
|
||||
|
BIN
src/agent/testdata/initdata.img
vendored
Normal file
BIN
src/agent/testdata/initdata.img
vendored
Normal file
Binary file not shown.
@ -2,20 +2,20 @@
|
||||
name = "vsock-exporter"
|
||||
version = "0.1.0"
|
||||
authors = ["James O. D. Hunt <james.o.hunt@intel.com>"]
|
||||
edition = "2018"
|
||||
license = "Apache-2.0"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
nix = "0.24.2"
|
||||
libc = "0.2.94"
|
||||
thiserror = "1.0.26"
|
||||
opentelemetry = { version = "0.14.0", features=["serialize"] }
|
||||
serde = { version = "1.0.126", features = ["derive"] }
|
||||
tokio-vsock = "0.3.1"
|
||||
libc.workspace = true
|
||||
thiserror.workspace = true
|
||||
opentelemetry = { workspace = true, features = ["serialize"] }
|
||||
tokio-vsock.workspace = true
|
||||
bincode = "1.3.3"
|
||||
byteorder = "1.4.3"
|
||||
slog = { version = "2.5.2", features = ["dynamic-keys", "max_level_trace", "release_max_level_debug"] }
|
||||
async-trait = "0.1.50"
|
||||
tokio = "1.28.1"
|
||||
slog = { workspace = true, features = [
|
||||
"dynamic-keys",
|
||||
"max_level_trace",
|
||||
"release_max_level_debug",
|
||||
] }
|
||||
async-trait.workspace = true
|
||||
tokio.workspace = true
|
||||
|
53
src/dragonball/Cargo.lock
generated
53
src/dragonball/Cargo.lock
generated
@ -1,6 +1,6 @@
|
||||
# This file is automatically @generated by Cargo.
|
||||
# It is not intended for manual editing.
|
||||
version = 3
|
||||
version = 4
|
||||
|
||||
[[package]]
|
||||
name = "addr2line"
|
||||
@ -238,22 +238,18 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam-channel"
|
||||
version = "0.5.8"
|
||||
version = "0.5.15"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200"
|
||||
checksum = "82b8f8f868b36967f9606790d1903570de9ceaf870a7bf9fbbd3016d636a2cb2"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"crossbeam-utils",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam-utils"
|
||||
version = "0.8.16"
|
||||
version = "0.8.21"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
]
|
||||
checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28"
|
||||
|
||||
[[package]]
|
||||
name = "crypto-common"
|
||||
@ -265,6 +261,13 @@ dependencies = [
|
||||
"typenum",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "dbs-acpi"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"vm-memory",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "dbs-address-space"
|
||||
version = "0.3.0"
|
||||
@ -303,6 +306,8 @@ name = "dbs-boot"
|
||||
version = "0.4.0"
|
||||
dependencies = [
|
||||
"dbs-arch",
|
||||
"dbs-device",
|
||||
"device_tree",
|
||||
"kvm-bindings",
|
||||
"kvm-ioctls",
|
||||
"lazy_static",
|
||||
@ -350,6 +355,7 @@ version = "0.1.0"
|
||||
dependencies = [
|
||||
"byteorder",
|
||||
"dbs-allocator",
|
||||
"dbs-arch",
|
||||
"dbs-boot",
|
||||
"dbs-device",
|
||||
"dbs-interrupt",
|
||||
@ -364,6 +370,16 @@ dependencies = [
|
||||
"vm-memory",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "dbs-tdx"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"kvm-bindings",
|
||||
"serde_json",
|
||||
"thiserror",
|
||||
"vmm-sys-util",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "dbs-upcall"
|
||||
version = "0.3.0"
|
||||
@ -385,6 +401,7 @@ dependencies = [
|
||||
"libc",
|
||||
"log",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"thiserror",
|
||||
"timerfd",
|
||||
"vmm-sys-util",
|
||||
@ -446,6 +463,12 @@ dependencies = [
|
||||
"syn 1.0.109",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "device_tree"
|
||||
version = "1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f18f717c5c7c2e3483feb64cccebd077245ad6d19007c2db0fd341d38595353c"
|
||||
|
||||
[[package]]
|
||||
name = "digest"
|
||||
version = "0.10.7"
|
||||
@ -984,9 +1007,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
|
||||
|
||||
[[package]]
|
||||
name = "libc"
|
||||
version = "0.2.147"
|
||||
version = "0.2.172"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b4668fb0ea861c1df094127ac5f1da3409a82116a4ba74fca2e58ef927159bb3"
|
||||
checksum = "d750af042f7ef4f724306de029d18836c26c1765a54a6a3f094cbd23a7267ffa"
|
||||
|
||||
[[package]]
|
||||
name = "libz-sys"
|
||||
@ -1040,9 +1063,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "log"
|
||||
version = "0.4.20"
|
||||
version = "0.4.27"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f"
|
||||
checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94"
|
||||
|
||||
[[package]]
|
||||
name = "lz4"
|
||||
@ -1096,9 +1119,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "mio"
|
||||
version = "0.8.8"
|
||||
version = "0.8.11"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "927a765cd3fc26206e66b296465fa9d3e5ab003e651c1b3c060e7956d96b19d2"
|
||||
checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"log",
|
||||
|
@ -9,34 +9,86 @@ repository = "https://github.com/kata-containers/kata-containers.git"
|
||||
license = "Apache-2.0"
|
||||
edition = "2018"
|
||||
|
||||
[workspace]
|
||||
members = [
|
||||
"dbs_acpi",
|
||||
"dbs_address_space",
|
||||
"dbs_allocator",
|
||||
"dbs_arch",
|
||||
"dbs_boot",
|
||||
"dbs_device",
|
||||
"dbs_interrupt",
|
||||
"dbs_legacy_devices",
|
||||
"dbs_pci",
|
||||
"dbs_tdx",
|
||||
"dbs_upcall",
|
||||
"dbs_utils",
|
||||
"dbs_virtio_devices",
|
||||
]
|
||||
resolver = "2"
|
||||
|
||||
[workspace.dependencies]
|
||||
# Rust-VMM crates
|
||||
event-manager = "0.2.1"
|
||||
kvm-bindings = "0.6.0"
|
||||
kvm-ioctls = "0.12.0"
|
||||
linux-loader = "0.8.0"
|
||||
seccompiler = "0.2.0"
|
||||
vfio-bindings = "0.3.0"
|
||||
vfio-ioctls = "0.1.0"
|
||||
virtio-bindings = "0.1.0"
|
||||
virtio-queue = "0.7.0"
|
||||
vm-fdt = "0.2.0"
|
||||
vm-memory = "0.10.0"
|
||||
vm-superio = "0.5.0"
|
||||
vmm-sys-util = "0.11.0"
|
||||
|
||||
# Local dependencies from Dragonball Sandbox crates
|
||||
dbs-acpi = { path = "dbs_acpi" }
|
||||
dbs-address-space = { path = "dbs_address_space" }
|
||||
dbs-allocator = { path = "dbs_allocator" }
|
||||
dbs-arch = { path = "dbs_arch" }
|
||||
dbs-boot = { path = "dbs_boot" }
|
||||
dbs-device = { path = "dbs_device" }
|
||||
dbs-interrupt = { path = "dbs_interrupt" }
|
||||
dbs-legacy-devices = { path = "dbs_legacy_devices" }
|
||||
dbs-pci = { path = "dbs_pci" }
|
||||
dbs-tdx = { path = "dbs_tdx" }
|
||||
dbs-upcall = { path = "dbs_upcall" }
|
||||
dbs-utils = { path = "dbs_utils" }
|
||||
dbs-virtio-devices = { path = "dbs_virtio_devices" }
|
||||
|
||||
# Local dependencies from `src/lib`
|
||||
test-utils = { path = "../libs/test-utils" }
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0.32"
|
||||
arc-swap = "1.5.0"
|
||||
bytes = "1.1.0"
|
||||
dbs-address-space = { path = "./src/dbs_address_space" }
|
||||
dbs-allocator = { path = "./src/dbs_allocator" }
|
||||
dbs-arch = { path = "./src/dbs_arch" }
|
||||
dbs-boot = { path = "./src/dbs_boot" }
|
||||
dbs-device = { path = "./src/dbs_device" }
|
||||
dbs-interrupt = { path = "./src/dbs_interrupt", features = ["kvm-irq"] }
|
||||
dbs-legacy-devices = { path = "./src/dbs_legacy_devices" }
|
||||
dbs-upcall = { path = "./src/dbs_upcall", optional = true }
|
||||
dbs-utils = { path = "./src/dbs_utils" }
|
||||
dbs-virtio-devices = { path = "./src/dbs_virtio_devices", optional = true, features = [
|
||||
"virtio-mmio",
|
||||
dbs-address-space = { workspace = true }
|
||||
dbs-allocator = { workspace = true }
|
||||
dbs-arch = { workspace = true }
|
||||
dbs-boot = { workspace = true }
|
||||
dbs-device = { workspace = true }
|
||||
dbs-interrupt = { workspace = true, features = ["kvm-irq"] }
|
||||
dbs-legacy-devices = { workspace = true }
|
||||
dbs-upcall = { workspace = true, optional = true }
|
||||
dbs-utils = { workspace = true }
|
||||
dbs-virtio-devices = { workspace = true, optional = true, features = [
|
||||
"virtio-mmio",
|
||||
] }
|
||||
dbs-pci = { path = "./src/dbs_pci", optional = true }
|
||||
dbs-pci = { workspace = true, optional = true }
|
||||
derivative = "2.2.0"
|
||||
kvm-bindings = "0.6.0"
|
||||
kvm-ioctls = "0.12.0"
|
||||
kvm-bindings = { workspace = true }
|
||||
kvm-ioctls = { workspace = true }
|
||||
lazy_static = "1.2"
|
||||
libc = "0.2.39"
|
||||
linux-loader = "0.8.0"
|
||||
linux-loader = {workspace = true}
|
||||
log = "0.4.14"
|
||||
nix = "0.24.2"
|
||||
procfs = "0.12.0"
|
||||
prometheus = { version = "0.13.0", features = ["process"] }
|
||||
seccompiler = "0.2.0"
|
||||
seccompiler = {workspace = true}
|
||||
serde = "1.0.27"
|
||||
serde_derive = "1.0.27"
|
||||
serde_json = "1.0.9"
|
||||
@ -44,18 +96,18 @@ slog = "2.5.2"
|
||||
slog-scope = "4.4.0"
|
||||
thiserror = "1"
|
||||
tracing = "0.1.37"
|
||||
vmm-sys-util = "0.11.0"
|
||||
virtio-queue = { version = "0.7.0", optional = true }
|
||||
vm-memory = { version = "0.10.0", features = ["backend-mmap"] }
|
||||
vmm-sys-util = {workspace = true}
|
||||
virtio-queue = { workspace = true, optional = true }
|
||||
vm-memory = { workspace = true, features = ["backend-mmap"] }
|
||||
crossbeam-channel = "0.5.6"
|
||||
fuse-backend-rs = "0.10.5"
|
||||
vfio-bindings = { version = "0.3.0", optional = true }
|
||||
vfio-ioctls = { version = "0.1.0", optional = true }
|
||||
vfio-bindings = { workspace = true, optional = true }
|
||||
vfio-ioctls = { workspace = true, optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
slog-async = "2.7.0"
|
||||
slog-term = "2.9.0"
|
||||
test-utils = { path = "../libs/test-utils" }
|
||||
test-utils = { workspace = true }
|
||||
|
||||
[features]
|
||||
acpi = []
|
||||
|
@ -35,7 +35,7 @@ vendor:
|
||||
format:
|
||||
@echo "INFO: rust fmt..."
|
||||
# This is kinda dirty step here simply because cargo fmt --all will apply fmt to all dependencies of dragonball which will include /src/libs/protocols with some file generated during compilation time and could not be formatted when you use cargo fmt --all before building the whole project. In order to avoid this problem, we do fmt check in this following way.
|
||||
rustfmt --edition 2018 ./src/dbs_address_space/src/lib.rs ./src/dbs_allocator/src/lib.rs ./src/dbs_arch/src/lib.rs ./src/dbs_boot/src/lib.rs ./src/dbs_device/src/lib.rs ./src/dbs_interrupt/src/lib.rs ./src/dbs_legacy_devices/src/lib.rs ./src/dbs_pci/src/lib.rs ./src/dbs_upcall/src/lib.rs ./src/dbs_utils/src/lib.rs ./src/dbs_virtio_devices/src/lib.rs ./src/lib.rs --check
|
||||
rustfmt --edition 2018 ./dbs_address_space/src/lib.rs ./dbs_allocator/src/lib.rs ./dbs_arch/src/lib.rs ./dbs_boot/src/lib.rs ./dbs_device/src/lib.rs ./dbs_interrupt/src/lib.rs ./dbs_legacy_devices/src/lib.rs ./dbs_pci/src/lib.rs ./dbs_upcall/src/lib.rs ./dbs_utils/src/lib.rs ./dbs_virtio_devices/src/lib.rs ./src/lib.rs --check
|
||||
|
||||
clean:
|
||||
cargo clean
|
||||
|
@ -20,19 +20,19 @@ and configuration process.
|
||||
- vCPU: [vCPU Document](docs/vcpu.md)
|
||||
- API: [API Document](docs/api.md)
|
||||
- `Upcall`: [`Upcall` Document](docs/upcall.md)
|
||||
- `dbs_acpi`: [`dbs_acpi` Document](src/dbs_acpi/README.md)
|
||||
- `dbs_address_space`: [`dbs_address_space` Document](src/dbs_address_space/README.md)
|
||||
- `dbs_allocator`: [`dbs_allocator` Document](src/dbs_allocator/README.md)
|
||||
- `dbs_arch`: [`dbs_arch` Document](src/dbs_arch/README.md)
|
||||
- `dbs_boot`: [`dbs_boot` Document](src/dbs_boot/README.md)
|
||||
- `dbs_device`: [`dbs_device` Document](src/dbs_device/README.md)
|
||||
- `dbs_interrupt`: [`dbs_interrput` Document](src/dbs_interrupt/README.md)
|
||||
- `dbs_legacy_devices`: [`dbs_legacy_devices` Document](src/dbs_legacy_devices/README.md)
|
||||
- `dbs_tdx`: [`dbs_tdx` Document](src/dbs_tdx/README.md)
|
||||
- `dbs_upcall`: [`dbs_upcall` Document](src/dbs_upcall/README.md)
|
||||
- `dbs_utils`: [`dbs_utils` Document](src/dbs_utils/README.md)
|
||||
- `dbs_virtio_devices`: [`dbs_virtio_devices` Document](src/dbs_virtio_devices/README.md)
|
||||
- `dbs_pci`: [`dbc_pci` Document](src/dbs_pci/README.md)
|
||||
- `dbs_acpi`: [`dbs_acpi` Document](dbs_acpi/README.md)
|
||||
- `dbs_address_space`: [`dbs_address_space` Document](dbs_address_space/README.md)
|
||||
- `dbs_allocator`: [`dbs_allocator` Document](dbs_allocator/README.md)
|
||||
- `dbs_arch`: [`dbs_arch` Document](dbs_arch/README.md)
|
||||
- `dbs_boot`: [`dbs_boot` Document](dbs_boot/README.md)
|
||||
- `dbs_device`: [`dbs_device` Document](dbs_device/README.md)
|
||||
- `dbs_interrupt`: [`dbs_interrput` Document](dbs_interrupt/README.md)
|
||||
- `dbs_legacy_devices`: [`dbs_legacy_devices` Document](dbs_legacy_devices/README.md)
|
||||
- `dbs_tdx`: [`dbs_tdx` Document](dbs_tdx/README.md)
|
||||
- `dbs_upcall`: [`dbs_upcall` Document](dbs_upcall/README.md)
|
||||
- `dbs_utils`: [`dbs_utils` Document](dbs_utils/README.md)
|
||||
- `dbs_virtio_devices`: [`dbs_virtio_devices` Document](dbs_virtio_devices/README.md)
|
||||
- `dbs_pci`: [`dbc_pci` Document](dbs_pci/README.md)
|
||||
|
||||
Currently, the documents are still actively adding.
|
||||
You could see the [official documentation](docs/) page for more details.
|
||||
|
@ -11,4 +11,4 @@ keywords = ["dragonball", "acpi", "vmm", "secure-sandbox"]
|
||||
readme = "README.md"
|
||||
|
||||
[dependencies]
|
||||
vm-memory = "0.9.0"
|
||||
vm-memory = {workspace = true}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user