mirror of
https://github.com/kata-containers/kata-containers.git
synced 2026-02-22 14:54:23 +00:00
Compare commits
385 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ef11ce13ea | ||
|
|
ea3f9b22a2 | ||
|
|
86ad7e486c | ||
|
|
624ff41318 | ||
|
|
6bb3f44100 | ||
|
|
4d4aba2e64 | ||
|
|
5f4f8ff337 | ||
|
|
f0d6316004 | ||
|
|
4e868ad981 | ||
|
|
a24ff2b51c | ||
|
|
1c70ef544f | ||
|
|
e5df408f64 | ||
|
|
985b9fa479 | ||
|
|
6d5e47bab1 | ||
|
|
514af3624b | ||
|
|
a6e3fb6514 | ||
|
|
55bdd1fcf4 | ||
|
|
6586f3b725 | ||
|
|
f5adc4c114 | ||
|
|
a67bdc369a | ||
|
|
67be55834d | ||
|
|
abfff68de6 | ||
|
|
0466ee04da | ||
|
|
6b223194a9 | ||
|
|
fb01d51573 | ||
|
|
144be14547 | ||
|
|
017c7cf249 | ||
|
|
52c6b0737c | ||
|
|
e7bdeb49b9 | ||
|
|
c0ca9f9a90 | ||
|
|
81f389903a | ||
|
|
179a98d678 | ||
|
|
e3efcfd40f | ||
|
|
5a92333f4b | ||
|
|
ec0424e153 | ||
|
|
b26e94ffba | ||
|
|
f6f4023508 | ||
|
|
814e7d7285 | ||
|
|
92d1197f10 | ||
|
|
a2484d0088 | ||
|
|
9e2cbe8ea1 | ||
|
|
fc676f76de | ||
|
|
ac9f838e33 | ||
|
|
9ea851ee53 | ||
|
|
2c1b957642 | ||
|
|
dfe5ef36b4 | ||
|
|
8a374af6b7 | ||
|
|
50aa89fa05 | ||
|
|
57aa746d0d | ||
|
|
ce2798b688 | ||
|
|
b7208b3c6c | ||
|
|
7e4dc08b0e | ||
|
|
a649d33a45 | ||
|
|
c628ecf298 | ||
|
|
d87076eea5 | ||
|
|
2dd859bfce | ||
|
|
4c9af982e6 | ||
|
|
06f964843a | ||
|
|
c27c3c40dd | ||
|
|
476467115f | ||
|
|
73645d1742 | ||
|
|
c7db337f10 | ||
|
|
72af86f686 | ||
|
|
95b2cad095 | ||
|
|
506f4f2adc | ||
|
|
a3e35e7e92 | ||
|
|
fdf69ab84c | ||
|
|
56b94e200c | ||
|
|
0533bee222 | ||
|
|
2114576be5 | ||
|
|
bcd8fd538d | ||
|
|
6fe3f331c9 | ||
|
|
3f3a2533a3 | ||
|
|
fc72d392b7 | ||
|
|
ef4ebfba48 | ||
|
|
336b80626c | ||
|
|
dd3c5fc617 | ||
|
|
93bd2e4716 | ||
|
|
7eb882a797 | ||
|
|
a60cf37879 | ||
|
|
ca6438728d | ||
|
|
32feb10331 | ||
|
|
3c618a61d6 | ||
|
|
7c888b34be | ||
|
|
234d53b6df | ||
|
|
cf81d400d8 | ||
|
|
79ed33adb5 | ||
|
|
f1cea9a022 | ||
|
|
4f802cc993 | ||
|
|
dda4279a2b | ||
|
|
5888971e18 | ||
|
|
ca28ca422c | ||
|
|
50ad323a21 | ||
|
|
f8314bedb0 | ||
|
|
99d9a24a51 | ||
|
|
0091b89184 | ||
|
|
9da2707202 | ||
|
|
2a0ff0bec3 | ||
|
|
fa581d334f | ||
|
|
a3967e9a59 | ||
|
|
272d39bc87 | ||
|
|
7a86c2eedd | ||
|
|
5096bd6a11 | ||
|
|
3fe59a99ff | ||
|
|
61fa4a3c75 | ||
|
|
856af1a886 | ||
|
|
74b587431f | ||
|
|
3df65f4f3a | ||
|
|
c5a6354718 | ||
|
|
867d8bc9b4 | ||
|
|
cfe9470ff1 | ||
|
|
9820459a0f | ||
|
|
4e141a96ed | ||
|
|
c8028da3c6 | ||
|
|
0aa68ccfef | ||
|
|
e4cea92ad3 | ||
|
|
0590fedd98 | ||
|
|
6b6668998f | ||
|
|
4f7f25d1a1 | ||
|
|
216eb29e04 | ||
|
|
65ae12710d | ||
|
|
9bc6fe6c83 | ||
|
|
349d496f7f | ||
|
|
6005026416 | ||
|
|
91b43a9964 | ||
|
|
2478b8f400 | ||
|
|
499aa24d38 | ||
|
|
1edb7fe7da | ||
|
|
607a892f2e | ||
|
|
26f176e2d9 | ||
|
|
3306195f66 | ||
|
|
a7568b520c | ||
|
|
e6d68349fa | ||
|
|
1f943bd6bf | ||
|
|
9a41d09f39 | ||
|
|
8fdb85e062 | ||
|
|
49516ef6f2 | ||
|
|
21fad464e8 | ||
|
|
b745e5ff02 | ||
|
|
40316f688a | ||
|
|
35b619ff58 | ||
|
|
662e8db5dd | ||
|
|
9117dd409e | ||
|
|
fce14f3697 | ||
|
|
0fd70f7ec3 | ||
|
|
4727a9c3e4 | ||
|
|
7ab8f62d43 | ||
|
|
7e92833bd4 | ||
|
|
14b18b55be | ||
|
|
1dde0de1d7 | ||
|
|
d4c1b768a6 | ||
|
|
3c36ce8139 | ||
|
|
c9d4e2c4b0 | ||
|
|
5fadc5fcb4 | ||
|
|
7cc7fd6888 | ||
|
|
5f8875064b | ||
|
|
3b925d6ad1 | ||
|
|
7526ee9350 | ||
|
|
c46a6244ba | ||
|
|
21ed9dc23f | ||
|
|
5f1520bdee | ||
|
|
e30bd6733b | ||
|
|
78df4a0c3f | ||
|
|
7daf9cffb1 | ||
|
|
293be9d0ad | ||
|
|
84e1a34f8f | ||
|
|
cf56307edb | ||
|
|
359f76d209 | ||
|
|
ca8f1399ca | ||
|
|
0bb559a438 | ||
|
|
4ca4412f64 | ||
|
|
e2424b9eb1 | ||
|
|
3d80c84869 | ||
|
|
f0fdc8e17c | ||
|
|
e53645ec85 | ||
|
|
aa295c91f2 | ||
|
|
6648c8c7fc | ||
|
|
49776f76bf | ||
|
|
dbfe85e705 | ||
|
|
0c3b6a94b3 | ||
|
|
f751c98da3 | ||
|
|
08361c5948 | ||
|
|
da9bfb27ed | ||
|
|
7347d43cf9 | ||
|
|
c7bb1e2790 | ||
|
|
e6f7ddd9a2 | ||
|
|
46cfed5025 | ||
|
|
81fb2c9980 | ||
|
|
0c432153df | ||
|
|
6511ffe89d | ||
|
|
ee59378232 | ||
|
|
ef11213a4e | ||
|
|
1fb6730984 | ||
|
|
05e9fe0591 | ||
|
|
d658129695 | ||
|
|
ae2d89e95e | ||
|
|
095d4ad08d | ||
|
|
bd816dfcec | ||
|
|
d413bf7d44 | ||
|
|
76408c0f13 | ||
|
|
6e4da19fa5 | ||
|
|
8f8061da08 | ||
|
|
64e4b2fa83 | ||
|
|
7c0d68f7f7 | ||
|
|
82ed34aee1 | ||
|
|
9def624c05 | ||
|
|
6926914683 | ||
|
|
e733c13cf7 | ||
|
|
ba069f9baa | ||
|
|
cc8ec7b0e9 | ||
|
|
8a364d2145 | ||
|
|
0cc6297716 | ||
|
|
b6059f3566 | ||
|
|
c6afad2a06 | ||
|
|
451608fb28 | ||
|
|
8328136575 | ||
|
|
a92a63031d | ||
|
|
997f7c4433 | ||
|
|
74d4065197 | ||
|
|
73bb3fdbee | ||
|
|
5a587ba506 | ||
|
|
29f5dec38f | ||
|
|
d71f9e1155 | ||
|
|
28c386c51f | ||
|
|
c2a186b18c | ||
|
|
8cd094cf06 | ||
|
|
b5f2a1e8c4 | ||
|
|
2d65b3bfd8 | ||
|
|
fe5e1cf2e1 | ||
|
|
3f7bcf54f0 | ||
|
|
80144fc415 | ||
|
|
2f5f35608a | ||
|
|
2faafbdd3a | ||
|
|
9e5ed41511 | ||
|
|
b33d4fe708 | ||
|
|
183823398d | ||
|
|
bfbbe8ba6b | ||
|
|
5c21ec278c | ||
|
|
9bb0d48d56 | ||
|
|
64a2ef62e0 | ||
|
|
a441f21c40 | ||
|
|
ce54090f25 | ||
|
|
e884fef483 | ||
|
|
9c16643c12 | ||
|
|
4978c9092c | ||
|
|
a7ba362f92 | ||
|
|
230a9833f8 | ||
|
|
a6d9fd4118 | ||
|
|
8f0cb2f1ea | ||
|
|
cbdae44992 | ||
|
|
97acaa8124 | ||
|
|
23246662b2 | ||
|
|
ebe5ad1386 | ||
|
|
c9497c88e4 | ||
|
|
d5d9928f97 | ||
|
|
f70892a5bb | ||
|
|
ab64780a0b | ||
|
|
9e064ba192 | ||
|
|
42c48f54ed | ||
|
|
d3a36fa06f | ||
|
|
fa546600ff | ||
|
|
efddcb4ab8 | ||
|
|
7bb3e562bc | ||
|
|
7b53041bad | ||
|
|
38212ba6d8 | ||
|
|
fb7e9b4f32 | ||
|
|
0cfcbf79b8 | ||
|
|
997f1f6cd0 | ||
|
|
f60f43af6b | ||
|
|
1789527d61 | ||
|
|
999f67d573 | ||
|
|
cb2255f199 | ||
|
|
2a6c9eec74 | ||
|
|
eaff5de37a | ||
|
|
4f1d23b651 | ||
|
|
6d80df9831 | ||
|
|
a116ce0b75 | ||
|
|
4dc3bc0020 | ||
|
|
8f7a4842c2 | ||
|
|
ce54e5dd57 | ||
|
|
9adb7b7c28 | ||
|
|
73ab9b1d6d | ||
|
|
4db3f9e226 | ||
|
|
19cb657299 | ||
|
|
86bc151787 | ||
|
|
8d8adb6887 | ||
|
|
76298c12b7 | ||
|
|
7d303ec2d0 | ||
|
|
e0b79eb57f | ||
|
|
8ed61b1bb9 | ||
|
|
cc4f02e2b6 | ||
|
|
ace6f1e66e | ||
|
|
47cfeaaf18 | ||
|
|
63c475786f | ||
|
|
059b89cd03 | ||
|
|
4ff3ed5101 | ||
|
|
de8dcb1549 | ||
|
|
c488cc48a2 | ||
|
|
e5acb1257f | ||
|
|
1bddde729b | ||
|
|
9517b0a933 | ||
|
|
f5a7175f92 | ||
|
|
9b969bb7da | ||
|
|
fb2f3cfce2 | ||
|
|
f32a741c76 | ||
|
|
512e79f61a | ||
|
|
aa70080423 | ||
|
|
34015bae12 | ||
|
|
93b60a8327 | ||
|
|
aa9951f2cd | ||
|
|
9d8c72998b | ||
|
|
033ed13202 | ||
|
|
c058d04b94 | ||
|
|
9d2bb0c452 | ||
|
|
627d062fb2 | ||
|
|
96afe62576 | ||
|
|
d946016eb7 | ||
|
|
37f1a77a6a | ||
|
|
450a81cc54 | ||
|
|
c09f02e6f6 | ||
|
|
58c7469110 | ||
|
|
c36ea0968d | ||
|
|
ba197302e2 | ||
|
|
725ad067c1 | ||
|
|
9858c23c59 | ||
|
|
fc8f1ff03c | ||
|
|
f7b4f76082 | ||
|
|
4fd66fa689 | ||
|
|
e6ff42b8ad | ||
|
|
6710d87c6a | ||
|
|
178b79f122 | ||
|
|
bc545c6549 | ||
|
|
585481990a | ||
|
|
0057f86cfa | ||
|
|
fa0401793f | ||
|
|
60b7265961 | ||
|
|
57b53dbae8 | ||
|
|
ddf1a545d1 | ||
|
|
cbdf6400ae | ||
|
|
ceeecf9c66 | ||
|
|
7c53baea8a | ||
|
|
b549d354bf | ||
|
|
9f3113e1f6 | ||
|
|
ef94742320 | ||
|
|
d71764985d | ||
|
|
0fc04a269d | ||
|
|
8d7ac5f01c | ||
|
|
612acbe319 | ||
|
|
f3a487cd41 | ||
|
|
3a559521d1 | ||
|
|
567daf5a42 | ||
|
|
c7d913f436 | ||
|
|
7bd410c725 | ||
|
|
7fbc789855 | ||
|
|
7fc41a771a | ||
|
|
a31d82fec2 | ||
|
|
9ef4c80340 | ||
|
|
6a4e413758 | ||
|
|
678d4d189d | ||
|
|
718f718764 | ||
|
|
d860ded3f0 | ||
|
|
a141da8a20 | ||
|
|
aaaaee7a4b | ||
|
|
21efaf1fca | ||
|
|
2056623e13 | ||
|
|
34126ee704 | ||
|
|
980a338454 | ||
|
|
e14f766895 | ||
|
|
2e0731f479 | ||
|
|
addf62087c | ||
|
|
c24b68dc4f | ||
|
|
24677d7484 | ||
|
|
9e74c28158 | ||
|
|
b7aae33cc1 | ||
|
|
6d9d58278e | ||
|
|
1bc6fbda8c | ||
|
|
d39f5a85e6 | ||
|
|
d90a0eefbe | ||
|
|
2618c014a0 | ||
|
|
5c4878f37e | ||
|
|
bd6b169e98 | ||
|
|
5770336572 | ||
|
|
45daec7b37 | ||
|
|
ed5a7dc022 | ||
|
|
6fc7c77721 |
2
.github/workflows/commit-message-check.yaml
vendored
2
.github/workflows/commit-message-check.yaml
vendored
@@ -10,7 +10,7 @@ env:
|
||||
error_msg: |+
|
||||
See the document below for help on formatting commits for the project.
|
||||
|
||||
https://github.com/kata-containers/community/blob/master/CONTRIBUTING.md#patch-format
|
||||
https://github.com/kata-containers/community/blob/master/CONTRIBUTING.md#patch-forma
|
||||
|
||||
jobs:
|
||||
commit-message-check:
|
||||
|
||||
18
.github/workflows/gather-artifacts.sh
vendored
Executable file
18
.github/workflows/gather-artifacts.sh
vendored
Executable file
@@ -0,0 +1,18 @@
|
||||
#!/bin/bash
|
||||
# Copyright (c) 2019 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
set -o errexit
|
||||
set -o pipefail
|
||||
|
||||
pushd kata-artifacts >>/dev/null
|
||||
for c in ./*.tar.gz
|
||||
do
|
||||
echo "untarring tarball $c"
|
||||
tar -xvf $c
|
||||
done
|
||||
|
||||
tar cvfJ ../kata-static.tar.xz ./opt
|
||||
popd >>/dev/null
|
||||
36
.github/workflows/generate-artifact-tarball.sh
vendored
Executable file
36
.github/workflows/generate-artifact-tarball.sh
vendored
Executable file
@@ -0,0 +1,36 @@
|
||||
#!/bin/bash
|
||||
# Copyright (c) 2019 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
set -o errexit
|
||||
set -o pipefail
|
||||
|
||||
|
||||
main() {
|
||||
artifact_stage=${1:-}
|
||||
artifact=$(echo ${artifact_stage} | sed -n -e 's/^install_//p' | sed -r 's/_/-/g')
|
||||
if [ -z "${artifact}" ]; then
|
||||
"Scripts needs artifact name to build"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
tag=$(echo $GITHUB_REF | cut -d/ -f3-)
|
||||
export GOPATH=$HOME/go
|
||||
|
||||
go get github.com/kata-containers/packaging || true
|
||||
pushd $GOPATH/src/github.com/kata-containers/packaging/release >>/dev/null
|
||||
git checkout $tag
|
||||
pushd ../obs-packaging
|
||||
./gen_versions_txt.sh $tag
|
||||
popd
|
||||
|
||||
source ./kata-deploy-binaries.sh
|
||||
${artifact_stage} $tag
|
||||
popd
|
||||
|
||||
mv $HOME/go/src/github.com/kata-containers/packaging/release/kata-static-${artifact}.tar.gz .
|
||||
}
|
||||
|
||||
main $@
|
||||
34
.github/workflows/generate-local-artifact-tarball.sh
vendored
Executable file
34
.github/workflows/generate-local-artifact-tarball.sh
vendored
Executable file
@@ -0,0 +1,34 @@
|
||||
#!/bin/bash
|
||||
# Copyright (c) 2019 Intel Corporation
|
||||
# Copyright (c) 2020 Ant Group
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
set -o errexit
|
||||
set -o pipefail
|
||||
|
||||
|
||||
main() {
|
||||
artifact_stage=${1:-}
|
||||
artifact=$(echo ${artifact_stage} | sed -n -e 's/^install_//p' | sed -r 's/_/-/g')
|
||||
if [ -z "${artifact}" ]; then
|
||||
"Scripts needs artifact name to build"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
tag=$(echo $GITHUB_REF | cut -d/ -f3-)
|
||||
pushd $GITHUB_WORKSPACE/tools/packaging
|
||||
git checkout $tag
|
||||
./scripts/gen_versions_txt.sh $tag
|
||||
popd
|
||||
|
||||
pushd $GITHUB_WORKSPACE/tools/packaging/release
|
||||
source ./kata-deploy-binaries.sh
|
||||
${artifact_stage} $tag
|
||||
popd
|
||||
|
||||
mv $GITHUB_WORKSPACE/tools/packaging/release/kata-static-${artifact}.tar.gz .
|
||||
}
|
||||
|
||||
main $@
|
||||
58
.github/workflows/kata-deploy-push.yaml
vendored
58
.github/workflows/kata-deploy-push.yaml
vendored
@@ -1,58 +0,0 @@
|
||||
name: kata-deploy-build
|
||||
|
||||
on: push
|
||||
|
||||
jobs:
|
||||
build-asset:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
asset:
|
||||
- kernel
|
||||
- shim-v2
|
||||
- qemu
|
||||
- cloud-hypervisor
|
||||
- firecracker
|
||||
- rootfs-image
|
||||
- rootfs-initrd
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Install docker
|
||||
run: |
|
||||
curl -fsSL https://test.docker.com -o test-docker.sh
|
||||
sh test-docker.sh
|
||||
|
||||
- name: Build ${{ matrix.asset }}
|
||||
run: |
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-binaries-in-docker.sh --build="${KATA_ASSET}"
|
||||
build_dir=$(readlink -f build)
|
||||
# store-artifact does not work with symlink
|
||||
sudo cp -r --preserve=all "${build_dir}" "kata-build"
|
||||
env:
|
||||
KATA_ASSET: ${{ matrix.asset }}
|
||||
|
||||
- name: store-artifact ${{ matrix.asset }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-build/kata-static-${{ matrix.asset }}.tar.xz
|
||||
if-no-files-found: error
|
||||
|
||||
create-kata-tarball:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build-asset
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: get-artifacts
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-artifacts
|
||||
- name: merge-artifacts
|
||||
run: |
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-merge-builds.sh kata-artifacts
|
||||
- name: store-artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: kata-static-tarball
|
||||
path: kata-static.tar.xz
|
||||
4
.github/workflows/kata-deploy-test.yaml
vendored
4
.github/workflows/kata-deploy-test.yaml
vendored
@@ -46,11 +46,9 @@ jobs:
|
||||
VERSION="2.0.0"
|
||||
ARTIFACT_URL="https://github.com/kata-containers/kata-containers/releases/download/${VERSION}/kata-static-${VERSION}-x86_64.tar.xz"
|
||||
wget "${ARTIFACT_URL}" -O tools/packaging/kata-deploy/kata-static.tar.xz
|
||||
docker build --build-arg KATA_ARTIFACTS=kata-static.tar.xz -t katadocker/kata-deploy-ci:${PR_SHA} -t quay.io/kata-containers/kata-deploy-ci:${PR_SHA} ./tools/packaging/kata-deploy
|
||||
docker build --build-arg KATA_ARTIFACTS=kata-static.tar.xz -t katadocker/kata-deploy-ci:${PR_SHA} ./tools/packaging/kata-deploy
|
||||
docker login -u ${{ secrets.DOCKER_USERNAME }} -p ${{ secrets.DOCKER_PASSWORD }}
|
||||
docker push katadocker/kata-deploy-ci:$PR_SHA
|
||||
docker login -u ${{ secrets.QUAY_DEPLOYER_USERNAME }} -p ${{ secrets.QUAY_DEPLOYER_PASSWORD }} quay.io
|
||||
docker push quay.io/kata-containers/kata-deploy-ci:$PR_SHA
|
||||
echo "##[set-output name=pr-sha;]${PR_SHA}"
|
||||
|
||||
- name: test-kata-deploy-ci-in-aks
|
||||
|
||||
18
.github/workflows/main.yaml
vendored
18
.github/workflows/main.yaml
vendored
@@ -43,7 +43,7 @@ jobs:
|
||||
echo "artifact-built=false" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: ${{ env.artifact-built }} == 'true'
|
||||
if: env.artifact-built == 'true'
|
||||
uses: actions/upload-artifact@master
|
||||
with:
|
||||
name: kata-artifacts
|
||||
@@ -71,7 +71,7 @@ jobs:
|
||||
echo "artifact-built=false" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: ${{ env.artifact-built }} == 'true'
|
||||
if: env.artifact-built == 'true'
|
||||
uses: actions/upload-artifact@master
|
||||
with:
|
||||
name: kata-artifacts
|
||||
@@ -97,7 +97,7 @@ jobs:
|
||||
echo "artifact-built=false" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: ${{ env.artifact-built }} == 'true'
|
||||
if: env.artifact-built == 'true'
|
||||
uses: actions/upload-artifact@master
|
||||
with:
|
||||
name: kata-artifacts
|
||||
@@ -124,7 +124,7 @@ jobs:
|
||||
echo "artifact-built=false" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: ${{ env.artifact-built }} == 'true'
|
||||
if: env.artifact-built == 'true'
|
||||
uses: actions/upload-artifact@master
|
||||
with:
|
||||
name: kata-artifacts
|
||||
@@ -151,7 +151,7 @@ jobs:
|
||||
echo "artifact-built=false" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: ${{ env.artifact-built }} == 'true'
|
||||
if: env.artifact-built == 'true'
|
||||
uses: actions/upload-artifact@master
|
||||
with:
|
||||
name: kata-artifacts
|
||||
@@ -178,7 +178,7 @@ jobs:
|
||||
echo "artifact-built=false" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: ${{ env.artifact-built }} == 'true'
|
||||
if: env.artifact-built == 'true'
|
||||
uses: actions/upload-artifact@master
|
||||
with:
|
||||
name: kata-artifacts
|
||||
@@ -205,7 +205,7 @@ jobs:
|
||||
echo "artifact-built=false" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: ${{ env.artifact-built }} == 'true'
|
||||
if: env.artifact-built == 'true'
|
||||
uses: actions/upload-artifact@master
|
||||
with:
|
||||
name: kata-artifacts
|
||||
@@ -247,11 +247,9 @@ jobs:
|
||||
pkg_sha=$(git rev-parse HEAD)
|
||||
popd
|
||||
mv release-candidate/kata-static.tar.xz ./packaging/kata-deploy/kata-static.tar.xz
|
||||
docker build --build-arg KATA_ARTIFACTS=kata-static.tar.xz -t katadocker/kata-deploy-ci:$pkg_sha -t quay.io/kata-containers/kata-deploy-ci:$pkg_sha ./packaging/kata-deploy
|
||||
docker build --build-arg KATA_ARTIFACTS=kata-static.tar.xz -t katadocker/kata-deploy-ci:$pkg_sha ./packaging/kata-deploy
|
||||
docker login -u ${{ secrets.DOCKER_USERNAME }} -p ${{ secrets.DOCKER_PASSWORD }}
|
||||
docker push katadocker/kata-deploy-ci:$pkg_sha
|
||||
docker login -u ${{ secrets.QUAY_DEPLOYER_USERNAME }} -p ${{ secrets.QUAY_DEPLOYER_PASSWORD }} quay.io
|
||||
docker push quay.io/kata-containers/kata-deploy-ci:$pkg_sha
|
||||
echo "::set-output name=PKG_SHA::${pkg_sha}"
|
||||
- name: test-kata-deploy-ci-in-aks
|
||||
uses: ./packaging/kata-deploy/action
|
||||
|
||||
272
.github/workflows/release.yaml
vendored
272
.github/workflows/release.yaml
vendored
@@ -5,45 +5,213 @@ on:
|
||||
- '2.*'
|
||||
|
||||
jobs:
|
||||
build-asset:
|
||||
get-artifact-list:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
asset:
|
||||
- cloud-hypervisor
|
||||
- firecracker
|
||||
- kernel
|
||||
- qemu
|
||||
- rootfs-image
|
||||
- rootfs-initrd
|
||||
- shim-v2
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Install docker
|
||||
- name: get the list
|
||||
run: |
|
||||
curl -fsSL https://test.docker.com -o test-docker.sh
|
||||
sh test-docker.sh
|
||||
pushd $GITHUB_WORKSPACE
|
||||
tag=$(echo $GITHUB_REF | cut -d/ -f3-)
|
||||
git checkout $tag
|
||||
popd
|
||||
$GITHUB_WORKSPACE/tools/packaging/artifact-list.sh > artifact-list.txt
|
||||
- name: save-artifact-list
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: artifact-list
|
||||
path: artifact-list.txt
|
||||
|
||||
- name: Build ${{ matrix.asset }}
|
||||
build-kernel:
|
||||
runs-on: ubuntu-16.04
|
||||
needs: get-artifact-list
|
||||
env:
|
||||
buildstr: "install_kernel"
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: get-artifact-list
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: artifact-list
|
||||
- run: |
|
||||
sudo apt-get update && sudo apt install -y flex bison libelf-dev bc iptables
|
||||
- name: build-kernel
|
||||
run: |
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-binaries-in-docker.sh --build="${KATA_ASSET}"
|
||||
build_dir=$(readlink -f build)
|
||||
# store-artifact does not work with symlink
|
||||
sudo cp -r "${build_dir}" "kata-build"
|
||||
env:
|
||||
KATA_ASSET: ${{ matrix.asset }}
|
||||
TAR_OUTPUT: ${{ matrix.asset }}.tar.gz
|
||||
|
||||
- name: store-artifact ${{ matrix.asset }}
|
||||
if grep -q $buildstr artifact-list.txt; then
|
||||
$GITHUB_WORKSPACE/.github/workflows/generate-local-artifact-tarball.sh $buildstr
|
||||
echo "artifact-built=true" >> $GITHUB_ENV
|
||||
else
|
||||
echo "artifact-built=false" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: env.artifact-built == 'true'
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-build/kata-static-${{ matrix.asset }}.tar.xz
|
||||
if-no-files-found: error
|
||||
path: kata-static-kernel.tar.gz
|
||||
|
||||
create-kata-tarball:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build-asset
|
||||
build-experimental-kernel:
|
||||
runs-on: ubuntu-16.04
|
||||
needs: get-artifact-list
|
||||
env:
|
||||
buildstr: "install_experimental_kernel"
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: get-artifact-list
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: artifact-list
|
||||
- run: |
|
||||
sudo apt-get update && sudo apt install -y flex bison libelf-dev bc iptables
|
||||
- name: build-experimental-kernel
|
||||
run: |
|
||||
if grep -q $buildstr artifact-list.txt; then
|
||||
$GITHUB_WORKSPACE/.github/workflows/generate-local-artifact-tarball.sh $buildstr
|
||||
echo "artifact-built=true" >> $GITHUB_ENV
|
||||
else
|
||||
echo "artifact-built=false" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: env.artifact-built == 'true'
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-static-experimental-kernel.tar.gz
|
||||
|
||||
build-qemu:
|
||||
runs-on: ubuntu-16.04
|
||||
needs: get-artifact-list
|
||||
env:
|
||||
buildstr: "install_qemu"
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: get-artifact-list
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: artifact-list
|
||||
- name: build-qemu
|
||||
run: |
|
||||
if grep -q $buildstr artifact-list.txt; then
|
||||
$GITHUB_WORKSPACE/.github/workflows/generate-local-artifact-tarball.sh $buildstr
|
||||
echo "artifact-built=true" >> $GITHUB_ENV
|
||||
else
|
||||
echo "artifact-built=false" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: env.artifact-built == 'true'
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-static-qemu.tar.gz
|
||||
|
||||
build-image:
|
||||
runs-on: ubuntu-16.04
|
||||
needs: get-artifact-list
|
||||
env:
|
||||
buildstr: "install_image"
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: get-artifact-list
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: artifact-list
|
||||
- name: build-image
|
||||
run: |
|
||||
if grep -q $buildstr artifact-list.txt; then
|
||||
$GITHUB_WORKSPACE/.github/workflows/generate-local-artifact-tarball.sh $buildstr
|
||||
echo "artifact-built=true" >> $GITHUB_ENV
|
||||
else
|
||||
echo "artifact-built=false" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: env.artifact-built == 'true'
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-static-image.tar.gz
|
||||
|
||||
build-firecracker:
|
||||
runs-on: ubuntu-16.04
|
||||
needs: get-artifact-list
|
||||
env:
|
||||
buildstr: "install_firecracker"
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: get-artifact-list
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: artifact-list
|
||||
- name: build-firecracker
|
||||
run: |
|
||||
if grep -q $buildstr artifact-list.txt; then
|
||||
$GITHUB_WORKSPACE/.github/workflows/generate-local-artifact-tarball.sh $buildstr
|
||||
echo "artifact-built=true" >> $GITHUB_ENV
|
||||
else
|
||||
echo "artifact-built=false" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: env.artifact-built == 'true'
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-static-firecracker.tar.gz
|
||||
|
||||
|
||||
build-clh:
|
||||
runs-on: ubuntu-16.04
|
||||
needs: get-artifact-list
|
||||
env:
|
||||
buildstr: "install_clh"
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: get-artifact-list
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: artifact-list
|
||||
- name: build-clh
|
||||
run: |
|
||||
if grep -q $buildstr artifact-list.txt; then
|
||||
$GITHUB_WORKSPACE/.github/workflows/generate-local-artifact-tarball.sh $buildstr
|
||||
echo "artifact-built=true" >> $GITHUB_ENV
|
||||
else
|
||||
echo "artifact-built=false" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: env.artifact-built == 'true'
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-static-clh.tar.gz
|
||||
|
||||
build-kata-components:
|
||||
runs-on: ubuntu-16.04
|
||||
needs: get-artifact-list
|
||||
env:
|
||||
buildstr: "install_kata_components"
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: get-artifact-list
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: artifact-list
|
||||
- name: build-kata-components
|
||||
run: |
|
||||
if grep -q $buildstr artifact-list.txt; then
|
||||
$GITHUB_WORKSPACE/.github/workflows/generate-local-artifact-tarball.sh $buildstr
|
||||
echo "artifact-built=true" >> $GITHUB_ENV
|
||||
else
|
||||
echo "artifact-built=false" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: env.artifact-built == 'true'
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-static-kata-components.tar.gz
|
||||
|
||||
gather-artifacts:
|
||||
runs-on: ubuntu-16.04
|
||||
needs: [build-experimental-kernel, build-kernel, build-qemu, build-image, build-firecracker, build-kata-components, build-clh]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: get-artifacts
|
||||
@@ -51,24 +219,24 @@ jobs:
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-artifacts
|
||||
- name: merge-artifacts
|
||||
- name: colate-artifacts
|
||||
run: |
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-merge-builds.sh kata-artifacts
|
||||
$GITHUB_WORKSPACE/.github/workflows/gather-artifacts.sh
|
||||
- name: store-artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: kata-static-tarball
|
||||
name: release-candidate
|
||||
path: kata-static.tar.xz
|
||||
|
||||
kata-deploy:
|
||||
needs: create-kata-tarball
|
||||
needs: gather-artifacts
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: get-kata-tarball
|
||||
- name: get-artifacts
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: kata-static-tarball
|
||||
name: release-candidate
|
||||
- name: build-and-push-kata-deploy-ci
|
||||
id: build-and-push-kata-deploy-ci
|
||||
run: |
|
||||
@@ -78,11 +246,9 @@ jobs:
|
||||
pkg_sha=$(git rev-parse HEAD)
|
||||
popd
|
||||
mv kata-static.tar.xz $GITHUB_WORKSPACE/tools/packaging/kata-deploy/kata-static.tar.xz
|
||||
docker build --build-arg KATA_ARTIFACTS=kata-static.tar.xz -t katadocker/kata-deploy-ci:$pkg_sha -t quay.io/kata-containers/kata-deploy-ci:$pkg_sha $GITHUB_WORKSPACE/tools/packaging/kata-deploy
|
||||
docker build --build-arg KATA_ARTIFACTS=kata-static.tar.xz -t katadocker/kata-deploy-ci:$pkg_sha $GITHUB_WORKSPACE/tools/packaging/kata-deploy
|
||||
docker login -u ${{ secrets.DOCKER_USERNAME }} -p ${{ secrets.DOCKER_PASSWORD }}
|
||||
docker push katadocker/kata-deploy-ci:$pkg_sha
|
||||
docker login -u ${{ secrets.QUAY_DEPLOYER_USERNAME }} -p ${{ secrets.QUAY_DEPLOYER_PASSWORD }} quay.io
|
||||
docker push quay.io/kata-containers/kata-deploy-ci:$pkg_sha
|
||||
mkdir -p packaging/kata-deploy
|
||||
ln -s $GITHUB_WORKSPACE/tools/packaging/kata-deploy/action packaging/kata-deploy/action
|
||||
echo "::set-output name=PKG_SHA::${pkg_sha}"
|
||||
@@ -100,14 +266,8 @@ jobs:
|
||||
run: |
|
||||
# tag the container image we created and push to DockerHub
|
||||
tag=$(echo $GITHUB_REF | cut -d/ -f3-)
|
||||
tags=($tag)
|
||||
tags+=($([[ "$tag" =~ "alpha"|"rc" ]] && echo "latest" || echo "stable"))
|
||||
for tag in ${tags[@]}; do \
|
||||
docker tag katadocker/kata-deploy-ci:${{steps.build-and-push-kata-deploy-ci.outputs.PKG_SHA}} katadocker/kata-deploy:${tag} && \
|
||||
docker tag quay.io/kata-containers/kata-deploy-ci:${{steps.build-and-push-kata-deploy-ci.outputs.PKG_SHA}} quay.io/kata-containers/kata-deploy:${tag} && \
|
||||
docker push katadocker/kata-deploy:${tag} && \
|
||||
docker push quay.io/kata-containers/kata-deploy:${tag}; \
|
||||
done
|
||||
docker tag katadocker/kata-deploy-ci:${{steps.build-and-push-kata-deploy-ci.outputs.PKG_SHA}} katadocker/kata-deploy:${tag}
|
||||
docker push katadocker/kata-deploy:${tag}
|
||||
|
||||
upload-static-tarball:
|
||||
needs: kata-deploy
|
||||
@@ -117,7 +277,7 @@ jobs:
|
||||
- name: download-artifacts
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: kata-static-tarball
|
||||
name: release-candidate
|
||||
- name: install hub
|
||||
run: |
|
||||
HUB_VER=$(curl -s "https://api.github.com/repos/github/hub/releases/latest" | jq -r .tag_name | sed 's/^v//')
|
||||
@@ -131,21 +291,3 @@ jobs:
|
||||
pushd $GITHUB_WORKSPACE
|
||||
echo "uploading asset '${tarball}' for tag: ${tag}"
|
||||
GITHUB_TOKEN=${{ secrets.GIT_UPLOAD_TOKEN }} hub release edit -m "" -a "${tarball}" "${tag}"
|
||||
popd
|
||||
|
||||
upload-cargo-vendored-tarball:
|
||||
needs: upload-static-tarball
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: generate-and-upload-tarball
|
||||
run: |
|
||||
pushd $GITHUB_WORKSPACE/src/agent
|
||||
cargo vendor >> .cargo/config
|
||||
popd
|
||||
tag=$(echo $GITHUB_REF | cut -d/ -f3-)
|
||||
tarball="kata-containers-$tag-vendor.tar.gz"
|
||||
pushd $GITHUB_WORKSPACE
|
||||
tar -cvzf "${tarball}" src/agent/.cargo/config src/agent/vendor
|
||||
GITHUB_TOKEN=${{ secrets.GIT_UPLOAD_TOKEN }} hub release edit -m "" -a "${tarball}" "${tag}"
|
||||
popd
|
||||
|
||||
@@ -12,9 +12,6 @@ on:
|
||||
- reopened
|
||||
- labeled
|
||||
- unlabeled
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
|
||||
jobs:
|
||||
check-pr-porting-labels:
|
||||
|
||||
6
.github/workflows/snap-release.yaml
vendored
6
.github/workflows/snap-release.yaml
vendored
@@ -9,8 +9,6 @@ jobs:
|
||||
steps:
|
||||
- name: Check out Git repository
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Install Snapcraft
|
||||
uses: samuelmeuli/action-snapcraft@v1
|
||||
@@ -21,7 +19,7 @@ jobs:
|
||||
run: |
|
||||
sudo apt-get install -y git git-extras
|
||||
kata_url="https://github.com/kata-containers/kata-containers"
|
||||
latest_version=$(git ls-remote --tags ${kata_url} | egrep -o "refs.*" | egrep -v "\-alpha|\-rc|{}" | egrep -o "[[:digit:]]+\.[[:digit:]]+\.[[:digit:]]+" | sort -V -r | head -1)
|
||||
latest_version=$(git ls-remote --tags ${kata_url} | egrep -o "refs.*" | egrep -o "[[:digit:]]+\.[[:digit:]]+\.[[:digit:]]+" | sort -V -r -u | head -1)
|
||||
current_version="$(echo ${GITHUB_REF} | cut -d/ -f3)"
|
||||
# Check semantic versioning format (x.y.z) and if the current tag is the latest tag
|
||||
if echo "${current_version}" | grep -q "^[[:digit:]]\+\.[[:digit:]]\+\.[[:digit:]]\+$" && echo -e "$latest_version\n$current_version" | sort -C -V; then
|
||||
@@ -35,5 +33,5 @@ jobs:
|
||||
snap_file="kata-containers_${snap_version}_amd64.snap"
|
||||
# Upload the snap if it exists
|
||||
if [ -f ${snap_file} ]; then
|
||||
snapcraft upload --release=stable ${snap_file}
|
||||
snapcraft upload --release=candidate ${snap_file}
|
||||
fi
|
||||
|
||||
2
.github/workflows/snap.yaml
vendored
2
.github/workflows/snap.yaml
vendored
@@ -6,8 +6,6 @@ jobs:
|
||||
steps:
|
||||
- name: Check out
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Install Snapcraft
|
||||
uses: samuelmeuli/action-snapcraft@v1
|
||||
|
||||
86
.github/workflows/static-checks.yaml
vendored
86
.github/workflows/static-checks.yaml
vendored
@@ -1,86 +0,0 @@
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
- edited
|
||||
- reopened
|
||||
- synchronize
|
||||
- labeled
|
||||
- unlabeled
|
||||
|
||||
name: Static checks
|
||||
jobs:
|
||||
test:
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.15.x, 1.16.x]
|
||||
os: [ubuntu-20.04]
|
||||
runs-on: ${{ matrix.os }}
|
||||
env:
|
||||
TRAVIS: "true"
|
||||
TRAVIS_BRANCH: ${{ github.base_ref }}
|
||||
TRAVIS_PULL_REQUEST_BRANCH: ${{ github.head_ref }}
|
||||
TRAVIS_PULL_REQUEST_SHA : ${{ github.event.pull_request.head.sha }}
|
||||
RUST_BACKTRACE: "1"
|
||||
target_branch: ${{ github.base_ref }}
|
||||
steps:
|
||||
- name: Install Go
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
env:
|
||||
GOPATH: ${{ runner.workspace }}/kata-containers
|
||||
- name: Setup GOPATH
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
echo "TRAVIS_BRANCH: ${TRAVIS_BRANCH}"
|
||||
echo "TRAVIS_PULL_REQUEST_BRANCH: ${TRAVIS_PULL_REQUEST_BRANCH}"
|
||||
echo "TRAVIS_PULL_REQUEST_SHA: ${TRAVIS_PULL_REQUEST_SHA}"
|
||||
echo "TRAVIS: ${TRAVIS}"
|
||||
- name: Set env
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
echo "GOPATH=${{ github.workspace }}" >> $GITHUB_ENV
|
||||
echo "${{ github.workspace }}/bin" >> $GITHUB_PATH
|
||||
- name: Checkout code
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
path: ./src/github.com/${{ github.repository }}
|
||||
- name: Setup travis references
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
echo "TRAVIS_BRANCH=${TRAVIS_BRANCH:-$(echo $GITHUB_REF | awk 'BEGIN { FS = \"/\" } ; { print $3 }')}"
|
||||
target_branch=${TRAVIS_BRANCH}
|
||||
- name: Setup
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
cd ${GOPATH}/src/github.com/${{ github.repository }} && ./ci/setup.sh
|
||||
env:
|
||||
GOPATH: ${{ runner.workspace }}/kata-containers
|
||||
- name: Building rust
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
cd ${GOPATH}/src/github.com/${{ github.repository }} && ./ci/install_rust.sh
|
||||
PATH=$PATH:"$HOME/.cargo/bin"
|
||||
rustup target add x86_64-unknown-linux-musl
|
||||
rustup component add rustfmt clippy
|
||||
# Check whether the vendored code is up-to-date & working as the first thing
|
||||
- name: Check vendored code
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
cd ${GOPATH}/src/github.com/${{ github.repository }} && make vendor
|
||||
- name: Static Checks
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
cd ${GOPATH}/src/github.com/${{ github.repository }} && make static-checks
|
||||
- name: Run Compiler Checks
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
cd ${GOPATH}/src/github.com/${{ github.repository }} && make check
|
||||
- name: Run Unit Tests
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
cd ${GOPATH}/src/github.com/${{ github.repository }} && make test
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -1,10 +1,8 @@
|
||||
**/*.bk
|
||||
**/*~
|
||||
**/*.orig
|
||||
**/*.rej
|
||||
**/target
|
||||
**/.vscode
|
||||
pkg/logging/Cargo.lock
|
||||
src/agent/src/version.rs
|
||||
src/agent/kata-agent.service
|
||||
src/agent/protocols/src/*.rs
|
||||
|
||||
62
.travis.yml
Normal file
62
.travis.yml
Normal file
@@ -0,0 +1,62 @@
|
||||
# Copyright (c) 2019 Ant Financial
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
dist: bionic
|
||||
os: linux
|
||||
|
||||
# set cache directories manually, because
|
||||
# we are using a non-standard directory struct
|
||||
# cargo root is in srs/agent
|
||||
#
|
||||
# If needed, caches can be cleared
|
||||
# by ways documented in
|
||||
# https://docs.travis-ci.com/user/caching#clearing-caches
|
||||
language: rust
|
||||
rust:
|
||||
- 1.44.1
|
||||
cache:
|
||||
cargo: true
|
||||
directories:
|
||||
- src/agent/target
|
||||
|
||||
before_install:
|
||||
- git remote set-branches --add origin "${TRAVIS_BRANCH}"
|
||||
- git fetch
|
||||
- export RUST_BACKTRACE=1
|
||||
- export target_branch=$TRAVIS_BRANCH
|
||||
- "ci/setup.sh"
|
||||
|
||||
# we use install to run check agent
|
||||
# so that it is easy to skip for non-amd64 platform
|
||||
install:
|
||||
- export PATH=$PATH:"$HOME/.cargo/bin"
|
||||
- export RUST_AGENT=yes
|
||||
- rustup target add x86_64-unknown-linux-musl
|
||||
- sudo ln -sf /usr/bin/g++ /bin/musl-g++
|
||||
- rustup component add rustfmt
|
||||
- make -C ${TRAVIS_BUILD_DIR}/src/agent
|
||||
- make -C ${TRAVIS_BUILD_DIR}/src/agent check
|
||||
- sudo -E PATH=$PATH make -C ${TRAVIS_BUILD_DIR}/src/agent check
|
||||
|
||||
before_script:
|
||||
- "ci/install_go.sh"
|
||||
- make -C ${TRAVIS_BUILD_DIR}/src/runtime
|
||||
- make -C ${TRAVIS_BUILD_DIR}/src/runtime test
|
||||
- sudo -E PATH=$PATH GOPATH=$GOPATH make -C ${TRAVIS_BUILD_DIR}/src/runtime test
|
||||
|
||||
script:
|
||||
- "ci/static-checks.sh"
|
||||
|
||||
jobs:
|
||||
include:
|
||||
- name: x86_64 test
|
||||
os: linux
|
||||
- name: ppc64le test
|
||||
os: linux-ppc64le
|
||||
install: skip
|
||||
script: skip
|
||||
allow_failures:
|
||||
- name: ppc64le test
|
||||
fast_finish: true
|
||||
94
Glossary.md
94
Glossary.md
@@ -1,94 +0,0 @@
|
||||
# Glossary
|
||||
|
||||
[A](#a), [B](#b), [C](#c), [D](#d), [E](#e), [F](#f), [G](#g), [H](#h), [I](#i), [J](#j), [K](#k), [L](#l), [M](#m), [N](#n), [O](#o), [P](#p), [Q](#q), [R](#r), [S](#s), [T](#t), [U](#u), [V](#v), [W](#w), [X](#x), [Y](#y), [Z](#z)
|
||||
|
||||
## A
|
||||
|
||||
### Auto Scaling
|
||||
a method used in cloud computing, whereby the amount of computational resources in a server farm, typically measured in terms of the number of active servers, which vary automatically based on the load on the farm.
|
||||
|
||||
## B
|
||||
|
||||
## C
|
||||
|
||||
### Container Security Solutions
|
||||
The process of implementing security tools and policies that will give you the assurance that everything in your container is running as intended, and only as intended.
|
||||
|
||||
### Container Software
|
||||
A standard unit of software that packages up code and all its dependencies so the application runs quickly and reliably from one computing environment to another.
|
||||
|
||||
### Container Runtime Interface
|
||||
A plugin interface which enables Kubelet to use a wide variety of container runtimes, without the need to recompile.
|
||||
|
||||
### Container Virtualization
|
||||
A container is a virtual runtime environment that runs on top of a single operating system (OS) kernel and emulates an operating system rather than the underlying hardware.
|
||||
|
||||
## D
|
||||
|
||||
## E
|
||||
|
||||
## F
|
||||
|
||||
## G
|
||||
|
||||
## H
|
||||
|
||||
## I
|
||||
|
||||
### Infrastructure Architecture
|
||||
A structured and modern approach for supporting an organization and facilitating innovation within an enterprise.
|
||||
|
||||
## J
|
||||
|
||||
## K
|
||||
|
||||
### Kata Containers
|
||||
Kata containers is an open source project delivering increased container security and Workload isolation through an implementation of lightweight virtual machines.
|
||||
|
||||
## L
|
||||
|
||||
## M
|
||||
|
||||
## N
|
||||
|
||||
## O
|
||||
|
||||
## P
|
||||
|
||||
### Pod Containers
|
||||
A Group of one or more containers , with shared storage/network, and a specification for how to run the containers.
|
||||
|
||||
### Private Cloud
|
||||
A computing model that offers a proprietary environment dedicated to a single business entity.
|
||||
|
||||
### Public Cloud
|
||||
Computing services offered by third-party providers over the public Internet, making them available to anyone who wants to use or purchase them.
|
||||
|
||||
## Q
|
||||
|
||||
## R
|
||||
|
||||
## S
|
||||
|
||||
### Serverless Containers
|
||||
An architecture in which code is executed on-demand. Serverless workloads are typically in the cloud, but on-premises serverless platforms exist, too.
|
||||
|
||||
## T
|
||||
|
||||
## U
|
||||
|
||||
## V
|
||||
|
||||
### Virtual Machine Monitor
|
||||
Computer software, firmware or hardware that creates and runs virtual machines.
|
||||
|
||||
### Virtual Machine Software
|
||||
A software program or operating system that not only exhibits the behavior of a separate computer, but is also capable of performing tasks such as running applications and programs like a separate computer.
|
||||
|
||||
## W
|
||||
|
||||
## X
|
||||
|
||||
## Y
|
||||
|
||||
## Z
|
||||
14
Makefile
14
Makefile
@@ -15,7 +15,7 @@ TOOLS =
|
||||
|
||||
TOOLS += agent-ctl
|
||||
|
||||
STANDARD_TARGETS = build check clean install test vendor
|
||||
STANDARD_TARGETS = build check clean install test
|
||||
|
||||
include utils.mk
|
||||
|
||||
@@ -29,14 +29,4 @@ $(eval $(call create_all_rules,$(COMPONENTS),$(TOOLS),$(STANDARD_TARGETS)))
|
||||
generate-protocols:
|
||||
make -C src/agent generate-protocols
|
||||
|
||||
# Some static checks rely on generated source files of components.
|
||||
static-checks: build
|
||||
bash ci/static-checks.sh
|
||||
|
||||
binary-tarball:
|
||||
make -f ./tools/packaging/kata-deploy/local-build/Makefile
|
||||
|
||||
install-binary-tarball:
|
||||
make -f ./tools/packaging/kata-deploy/local-build/Makefile install
|
||||
|
||||
.PHONY: all default static-checks binary-tarball install-binary-tarball
|
||||
.PHONY: all default
|
||||
|
||||
208
README.md
208
README.md
@@ -2,90 +2,130 @@
|
||||
|
||||
# Kata Containers
|
||||
|
||||
Welcome to Kata Containers!
|
||||
* [Raising issues](#raising-issues)
|
||||
* [Kata Containers repositories](#kata-containers-repositories)
|
||||
* [Code Repositories](#code-repositories)
|
||||
* [Kata Containers-developed components](#kata-containers-developed-components)
|
||||
* [Agent](#agent)
|
||||
* [KSM throttler](#ksm-throttler)
|
||||
* [Runtime](#runtime)
|
||||
* [Trace forwarder](#trace-forwarder)
|
||||
* [Additional](#additional)
|
||||
* [Kernel](#kernel)
|
||||
* [CI](#ci)
|
||||
* [Community](#community)
|
||||
* [Documentation](#documentation)
|
||||
* [Packaging](#packaging)
|
||||
* [Test code](#test-code)
|
||||
* [Utilities](#utilities)
|
||||
* [OS builder](#os-builder)
|
||||
* [Web content](#web-content)
|
||||
|
||||
This repository is the home of the Kata Containers code for the 2.0 and newer
|
||||
releases.
|
||||
|
||||
If you want to learn about Kata Containers, visit the main
|
||||
[Kata Containers website](https://katacontainers.io).
|
||||
|
||||
## Introduction
|
||||
|
||||
Kata Containers is an open source project and community working to build a
|
||||
standard implementation of lightweight Virtual Machines (VMs) that feel and
|
||||
perform like containers, but provide the workload isolation and security
|
||||
advantages of VMs.
|
||||
|
||||
## Getting started
|
||||
|
||||
See the [installation documentation](docs/install).
|
||||
|
||||
## Documentation
|
||||
|
||||
See the [official documentation](docs)
|
||||
(including [installation guides](docs/install),
|
||||
[the developer guide](docs/Developer-Guide.md),
|
||||
[design documents](docs/design) and more).
|
||||
|
||||
## Community
|
||||
|
||||
To learn more about the project, its community and governance, see the
|
||||
[community repository](https://github.com/kata-containers/community). This is
|
||||
the first place to go if you wish to contribute to the project.
|
||||
|
||||
## Getting help
|
||||
|
||||
See the [community](#community) section for ways to contact us.
|
||||
|
||||
### Raising issues
|
||||
|
||||
Please raise an issue
|
||||
[in this repository](https://github.com/kata-containers/kata-containers/issues).
|
||||
|
||||
> **Note:**
|
||||
> If you are reporting a security issue, please follow the [vulnerability reporting process](https://github.com/kata-containers/community#vulnerability-handling)
|
||||
|
||||
## Developers
|
||||
|
||||
### Components
|
||||
|
||||
### Main components
|
||||
|
||||
The table below lists the core parts of the project:
|
||||
|
||||
| Component | Type | Description |
|
||||
|-|-|-|
|
||||
| [runtime](src/runtime) | core | Main component run by a container manager and providing a containerd shimv2 runtime implementation. |
|
||||
| [agent](src/agent) | core | Management process running inside the virtual machine / POD that sets up the container environment. |
|
||||
| [documentation](docs) | documentation | Documentation common to all components (such as design and install documentation). |
|
||||
| [tests](https://github.com/kata-containers/tests) | tests | Excludes unit tests which live with the main code. |
|
||||
|
||||
### Additional components
|
||||
|
||||
The table below lists the remaining parts of the project:
|
||||
|
||||
| Component | Type | Description |
|
||||
|-|-|-|
|
||||
| [packaging](tools/packaging) | infrastructure | Scripts and metadata for producing packaged binaries<br/>(components, hypervisors, kernel and rootfs). |
|
||||
| [kernel](https://www.kernel.org) | kernel | Linux kernel used by the hypervisor to boot the guest image. Patches are stored [here](tools/packaging/kernel). |
|
||||
| [osbuilder](tools/osbuilder) | infrastructure | Tool to create "mini O/S" rootfs and initrd images and kernel for the hypervisor. |
|
||||
| [`agent-ctl`](tools/agent-ctl) | utility | Tool that provides low-level access for testing the agent. |
|
||||
| [`trace-forwarder`](src/trace-forwarder) | utility | Agent tracing helper. |
|
||||
| [`ci`](https://github.com/kata-containers/ci) | CI | Continuous Integration configuration files and scripts. |
|
||||
| [`katacontainers.io`](https://github.com/kata-containers/www.katacontainers.io) | Source for the [`katacontainers.io`](https://www.katacontainers.io) site. |
|
||||
|
||||
### Packaging and releases
|
||||
|
||||
Kata Containers is now
|
||||
[available natively for most distributions](docs/install/README.md#packaged-installation-methods).
|
||||
However, packaging scripts and metadata are still used to generate snap and GitHub releases. See
|
||||
the [components](#components) section for further details.
|
||||
|
||||
## Glossary of Terms
|
||||
|
||||
See the [glossary of terms](Glossary.md) related to Kata Containers.
|
||||
---
|
||||
|
||||
[kernel]: https://www.kernel.org
|
||||
[github-katacontainers.io]: https://github.com/kata-containers/www.katacontainers.io
|
||||
Welcome to Kata Containers!
|
||||
|
||||
The purpose of this repository is to act as a "top level" site for the project. Specifically it is used:
|
||||
|
||||
- To provide a list of the various *other* [Kata Containers repositories](#kata-containers-repositories),
|
||||
along with a brief explanation of their purpose.
|
||||
|
||||
- To provide a general area for [Raising Issues](#raising-issues).
|
||||
|
||||
## Raising issues
|
||||
|
||||
This repository is used for [raising
|
||||
issues](https://github.com/kata-containers/kata-containers/issues/new):
|
||||
|
||||
- That might affect multiple code repositories.
|
||||
|
||||
- Where the raiser is unsure which repositories are affected.
|
||||
|
||||
> **Note:**
|
||||
>
|
||||
> - If an issue affects only a single component, it should be raised in that
|
||||
> components repository.
|
||||
|
||||
## Kata Containers repositories
|
||||
|
||||
### CI
|
||||
|
||||
The [CI](https://github.com/kata-containers/ci) repository stores the Continuous
|
||||
Integration (CI) system configuration information.
|
||||
|
||||
### Community
|
||||
|
||||
The [Community](https://github.com/kata-containers/community) repository is
|
||||
the first place to go if you want to use or contribute to the project.
|
||||
|
||||
### Code Repositories
|
||||
|
||||
#### Kata Containers-developed components
|
||||
|
||||
##### Agent
|
||||
|
||||
The [`kata-agent`](src/agent/README.md) runs inside the
|
||||
virtual machine and sets up the container environment.
|
||||
|
||||
##### KSM throttler
|
||||
|
||||
The [`kata-ksm-throttler`](https://github.com/kata-containers/ksm-throttler)
|
||||
is an optional utility that monitors containers and deduplicates memory to
|
||||
maximize container density on a host.
|
||||
|
||||
##### Runtime
|
||||
|
||||
The [`kata-runtime`](src/runtime/README.md) is usually
|
||||
invoked by a container manager and provides high-level verbs to manage
|
||||
containers.
|
||||
|
||||
##### Trace forwarder
|
||||
|
||||
The [`kata-trace-forwarder`](src/trace-forwarder) is a component only used
|
||||
when tracing the [agent](#agent) process.
|
||||
|
||||
#### Additional
|
||||
|
||||
##### Kernel
|
||||
|
||||
The hypervisor uses a [Linux\* kernel](https://github.com/kata-containers/linux) to boot the guest image.
|
||||
|
||||
### Documentation
|
||||
|
||||
The [docs](docs/README.md) directory holds documentation common to all code components.
|
||||
|
||||
### Packaging
|
||||
|
||||
We use the [packaging](tools/packaging/README.md) to create packages for the [system
|
||||
components](#kata-containers-developed-components) including
|
||||
[rootfs](#os-builder) and [kernel](#kernel) images.
|
||||
|
||||
### Test code
|
||||
|
||||
The [tests](https://github.com/kata-containers/tests) repository hosts all
|
||||
test code except the unit testing code (which is kept in the same repository
|
||||
as the component it tests).
|
||||
|
||||
### Utilities
|
||||
|
||||
#### OS builder
|
||||
|
||||
The [osbuilder](tools/osbuilder/README.md) tool can create
|
||||
a rootfs and a "mini O/S" image. This image is used by the hypervisor to setup
|
||||
the environment before switching to the workload.
|
||||
|
||||
#### `kata-agent-ctl`
|
||||
|
||||
[`kata-agent-ctl`](tools/agent-ctl) is a low-level test tool for
|
||||
interacting with the agent.
|
||||
|
||||
### Web content
|
||||
|
||||
The
|
||||
[www.katacontainers.io](https://github.com/kata-containers/www.katacontainers.io)
|
||||
repository contains all sources for the https://www.katacontainers.io site.
|
||||
|
||||
## Credits
|
||||
|
||||
Kata Containers uses [packagecloud](https://packagecloud.io) for package
|
||||
hosting.
|
||||
|
||||
@@ -12,11 +12,10 @@ install_aarch64_musl() {
|
||||
local musl_tar="${arch}-linux-musl-native.tgz"
|
||||
local musl_dir="${arch}-linux-musl-native"
|
||||
pushd /tmp
|
||||
if curl -sLO --fail https://musl.cc/${musl_tar}; then
|
||||
tar -zxf ${musl_tar}
|
||||
mkdir -p /usr/local/musl/
|
||||
cp -r ${musl_dir}/* /usr/local/musl/
|
||||
fi
|
||||
curl -sLO https://musl.cc/${musl_tar}
|
||||
tar -zxf ${musl_tar}
|
||||
mkdir -p /usr/local/musl/
|
||||
cp -r ${musl_dir}/* /usr/local/musl/
|
||||
popd
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -15,18 +15,10 @@ die() {
|
||||
# Install the yq yaml query package from the mikefarah github repo
|
||||
# Install via binary download, as we may not have golang installed at this point
|
||||
function install_yq() {
|
||||
GOPATH=${GOPATH:-${HOME}/go}
|
||||
local yq_path="${GOPATH}/bin/yq"
|
||||
local yq_pkg="github.com/mikefarah/yq"
|
||||
local yq_version=3.4.1
|
||||
INSTALL_IN_GOPATH=${INSTALL_IN_GOPATH:-true}
|
||||
|
||||
if [ "${INSTALL_IN_GOPATH}" == "true" ];then
|
||||
GOPATH=${GOPATH:-${HOME}/go}
|
||||
mkdir -p "${GOPATH}/bin"
|
||||
local yq_path="${GOPATH}/bin/yq"
|
||||
else
|
||||
yq_path="/usr/local/bin/yq"
|
||||
fi
|
||||
[ -x "${yq_path}" ] && [ "`${yq_path} --version`"X == "yq version ${yq_version}"X ] && return
|
||||
[ -x "${GOPATH}/bin/yq" ] && return
|
||||
|
||||
read -r -a sysInfo <<< "$(uname -sm)"
|
||||
|
||||
@@ -57,12 +49,15 @@ function install_yq() {
|
||||
;;
|
||||
esac
|
||||
|
||||
mkdir -p "${GOPATH}/bin"
|
||||
|
||||
# Check curl
|
||||
if ! command -v "curl" >/dev/null; then
|
||||
die "Please install curl"
|
||||
fi
|
||||
|
||||
local yq_version=3.1.0
|
||||
|
||||
## NOTE: ${var,,} => gives lowercase value of var
|
||||
local yq_url="https://${yq_pkg}/releases/download/${yq_version}/yq_${goos,,}_${goarch}"
|
||||
curl -o "${yq_path}" -LSsf "${yq_url}"
|
||||
|
||||
27
ci/lib.sh
27
ci/lib.sh
@@ -3,31 +3,20 @@
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -o nounset
|
||||
|
||||
export tests_repo="${tests_repo:-github.com/kata-containers/tests}"
|
||||
export tests_repo_dir="$GOPATH/src/$tests_repo"
|
||||
export branch="${target_branch:-main}"
|
||||
export branch="${branch:-$TRAVIS_BRANCH}"
|
||||
|
||||
# Clones the tests repository and checkout to the branch pointed out by
|
||||
# the global $branch variable.
|
||||
# If the clone exists and `CI` is exported then it does nothing. Otherwise
|
||||
# it will clone the repository or `git pull` the latest code.
|
||||
#
|
||||
clone_tests_repo()
|
||||
{
|
||||
if [ -d "$tests_repo_dir" ]; then
|
||||
[ -n "${CI:-}" ] && return
|
||||
pushd "${tests_repo_dir}"
|
||||
git checkout "${branch}"
|
||||
git pull
|
||||
popd
|
||||
else
|
||||
git clone -q "https://${tests_repo}" "$tests_repo_dir"
|
||||
pushd "${tests_repo_dir}"
|
||||
git checkout "${branch}"
|
||||
popd
|
||||
if [ -d "$tests_repo_dir" -a -n "$CI" ]
|
||||
then
|
||||
return
|
||||
fi
|
||||
|
||||
go get -d -u "$tests_repo" || true
|
||||
|
||||
pushd "${tests_repo_dir}" && git checkout "${branch}" && popd
|
||||
}
|
||||
|
||||
run_static_checks()
|
||||
|
||||
@@ -1,9 +0,0 @@
|
||||
# Copyright (c) 2021 Red Hat, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# This is the build root image for Kata Containers on OpenShift CI.
|
||||
#
|
||||
FROM centos:8
|
||||
|
||||
RUN yum -y update && yum -y install git sudo wget
|
||||
@@ -8,14 +8,9 @@
|
||||
set -e
|
||||
cidir=$(dirname "$0")
|
||||
source "${cidir}/lib.sh"
|
||||
export CI_JOB="${CI_JOB:-}"
|
||||
|
||||
clone_tests_repo
|
||||
|
||||
pushd ${tests_repo_dir}
|
||||
.ci/run.sh
|
||||
# temporary fix, see https://github.com/kata-containers/tests/issues/3878
|
||||
if [ "$(uname -m)" != "s390x" ] && [ "$CI_JOB" == "CRI_CONTAINERD_K8S_MINIMAL" ]; then
|
||||
tracing/test-agent-shutdown.sh
|
||||
fi
|
||||
popd
|
||||
|
||||
@@ -1,3 +1,55 @@
|
||||
- [Warning](#warning)
|
||||
- [Assumptions](#assumptions)
|
||||
- [Initial setup](#initial-setup)
|
||||
- [Requirements to build individual components](#requirements-to-build-individual-components)
|
||||
- [Build and install the Kata Containers runtime](#build-and-install-the-kata-containers-runtime)
|
||||
- [Check hardware requirements](#check-hardware-requirements)
|
||||
- [Configure to use initrd or rootfs image](#configure-to-use-initrd-or-rootfs-image)
|
||||
- [Enable full debug](#enable-full-debug)
|
||||
- [debug logs and shimv2](#debug-logs-and-shimv2)
|
||||
- [Enabling full `containerd` debug](#enabling-full-containerd-debug)
|
||||
- [Enabling just `containerd shim` debug](#enabling-just-containerd-shim-debug)
|
||||
- [Enabling `CRI-O` and `shimv2` debug](#enabling-cri-o-and-shimv2-debug)
|
||||
- [journald rate limiting](#journald-rate-limiting)
|
||||
- [`systemd-journald` suppressing messages](#systemd-journald-suppressing-messages)
|
||||
- [Disabling `systemd-journald` rate limiting](#disabling-systemd-journald-rate-limiting)
|
||||
- [Create and install rootfs and initrd image](#create-and-install-rootfs-and-initrd-image)
|
||||
- [Build a custom Kata agent - OPTIONAL](#build-a-custom-kata-agent---optional)
|
||||
- [Get the osbuilder](#get-the-osbuilder)
|
||||
- [Create a rootfs image](#create-a-rootfs-image)
|
||||
- [Create a local rootfs](#create-a-local-rootfs)
|
||||
- [Add a custom agent to the image - OPTIONAL](#add-a-custom-agent-to-the-image---optional)
|
||||
- [Build a rootfs image](#build-a-rootfs-image)
|
||||
- [Install the rootfs image](#install-the-rootfs-image)
|
||||
- [Create an initrd image - OPTIONAL](#create-an-initrd-image---optional)
|
||||
- [Create a local rootfs for initrd image](#create-a-local-rootfs-for-initrd-image)
|
||||
- [Build an initrd image](#build-an-initrd-image)
|
||||
- [Install the initrd image](#install-the-initrd-image)
|
||||
- [Install guest kernel images](#install-guest-kernel-images)
|
||||
- [Install a hypervisor](#install-a-hypervisor)
|
||||
- [Build a custom QEMU](#build-a-custom-qemu)
|
||||
- [Build a custom QEMU for aarch64/arm64 - REQUIRED](#build-a-custom-qemu-for-aarch64arm64---required)
|
||||
- [Run Kata Containers with Containerd](#run-kata-containers-with-containerd)
|
||||
- [Run Kata Containers with Kubernetes](#run-kata-containers-with-kubernetes)
|
||||
- [Troubleshoot Kata Containers](#troubleshoot-kata-containers)
|
||||
- [Appendices](#appendices)
|
||||
- [Checking Docker default runtime](#checking-docker-default-runtime)
|
||||
- [Set up a debug console](#set-up-a-debug-console)
|
||||
- [Simple debug console setup](#simple-debug-console-setup)
|
||||
- [Enable agent debug console](#enable-agent-debug-console)
|
||||
- [Connect to debug console](#connect-to-debug-console)
|
||||
- [Traditional debug console setup](#traditional-debug-console-setup)
|
||||
- [Create a custom image containing a shell](#create-a-custom-image-containing-a-shell)
|
||||
- [Build the debug image](#build-the-debug-image)
|
||||
- [Configure runtime for custom debug image](#configure-runtime-for-custom-debug-image)
|
||||
- [Create a container](#create-a-container)
|
||||
- [Connect to the virtual machine using the debug console](#connect-to-the-virtual-machine-using-the-debug-console)
|
||||
- [Enabling debug console for QEMU](#enabling-debug-console-for-qemu)
|
||||
- [Enabling debug console for cloud-hypervisor / firecracker](#enabling-debug-console-for-cloud-hypervisor--firecracker)
|
||||
- [Connecting to the debug console](#connecting-to-the-debug-console)
|
||||
- [Obtain details of the image](#obtain-details-of-the-image)
|
||||
- [Capturing kernel boot logs](#capturing-kernel-boot-logs)
|
||||
|
||||
# Warning
|
||||
|
||||
This document is written **specifically for developers**: it is not intended for end users.
|
||||
@@ -51,7 +103,7 @@ The build will create the following:
|
||||
You can check if your system is capable of creating a Kata Container by running the following:
|
||||
|
||||
```
|
||||
$ sudo kata-runtime check
|
||||
$ sudo kata-runtime kata-check
|
||||
```
|
||||
|
||||
If your system is *not* able to run Kata Containers, the previous command will error out and explain why.
|
||||
@@ -252,7 +304,7 @@ You MUST choose one of `alpine`, `centos`, `clearlinux`, `debian`, `euleros`, `f
|
||||
> - You should only do this step if you are testing with the latest version of the agent.
|
||||
|
||||
```
|
||||
$ sudo install -o root -g root -m 0550 -t ${ROOTFS_DIR}/usr/bin ../../../src/agent/target/x86_64-unknown-linux-musl/release/kata-agent
|
||||
$ sudo install -o root -g root -m 0550 -t ${ROOTFS_DIR}/bin ../../../src/agent/target/x86_64-unknown-linux-musl/release/kata-agent
|
||||
$ sudo install -o root -g root -m 0440 ../../../src/agent/kata-agent.service ${ROOTFS_DIR}/usr/lib/systemd/system/
|
||||
$ sudo install -o root -g root -m 0440 ../../../src/agent/kata-containers.target ${ROOTFS_DIR}/usr/lib/systemd/system/
|
||||
```
|
||||
@@ -301,13 +353,9 @@ You MUST choose one of `alpine`, `centos`, `clearlinux`, `euleros`, and `fedora`
|
||||
>
|
||||
> - Check the [compatibility matrix](../tools/osbuilder/README.md#platform-distro-compatibility-matrix) before creating rootfs.
|
||||
|
||||
Optionally, add your custom agent binary to the rootfs with the following commands. The default `$LIBC` used
|
||||
is `musl`, but on ppc64le and s390x, `gnu` should be used. Also, Rust refers to ppc64le as `powerpc64le`:
|
||||
Optionally, add your custom agent binary to the rootfs with the following:
|
||||
```
|
||||
$ export ARCH=$(uname -m)
|
||||
$ [ ${ARCH} == "ppc64le" ] || [ ${ARCH} == "s390x" ] && export LIBC=gnu || export LIBC=musl
|
||||
$ [ ${ARCH} == "ppc64le" ] && export ARCH=powerpc64le
|
||||
$ sudo install -o root -g root -m 0550 -T ../../../src/agent/target/${ARCH}-unknown-linux-${LIBC}/release/kata-agent ${ROOTFS_DIR}/sbin/init
|
||||
$ sudo install -o root -g root -m 0550 -T ../../agent/kata-agent ${ROOTFS_DIR}/sbin/init
|
||||
```
|
||||
|
||||
### Build an initrd image
|
||||
@@ -342,40 +390,14 @@ You may choose to manually build your VMM/hypervisor.
|
||||
Kata Containers makes use of upstream QEMU branch. The exact version
|
||||
and repository utilized can be found by looking at the [versions file](../versions.yaml).
|
||||
|
||||
Find the correct version of QEMU from the versions file:
|
||||
```
|
||||
$ source ${GOPATH}/src/github.com/kata-containers/kata-containers/tools/packaging/scripts/lib.sh
|
||||
$ qemu_version=$(get_from_kata_deps "assets.hypervisor.qemu.version")
|
||||
$ echo ${qemu_version}
|
||||
```
|
||||
Get source from the matching branch of QEMU:
|
||||
```
|
||||
$ go get -d github.com/qemu/qemu
|
||||
$ cd ${GOPATH}/src/github.com/qemu/qemu
|
||||
$ git checkout ${qemu_version}
|
||||
$ your_qemu_directory=${GOPATH}/src/github.com/qemu/qemu
|
||||
```
|
||||
|
||||
There are scripts to manage the build and packaging of QEMU. For the examples below, set your
|
||||
environment as:
|
||||
```
|
||||
$ go get -d github.com/kata-containers/kata-containers
|
||||
$ packaging_dir="${GOPATH}/src/github.com/kata-containers/kata-containers/tools/packaging"
|
||||
```
|
||||
|
||||
Kata often utilizes patches for not-yet-upstream and/or backported fixes for components,
|
||||
including QEMU. These can be found in the [packaging/QEMU directory](../tools/packaging/qemu/patches),
|
||||
and it's *recommended* that you apply them. For example, suppose that you are going to build QEMU
|
||||
version 5.2.0, do:
|
||||
```
|
||||
$ cd $your_qemu_directory
|
||||
$ $packaging_dir/scripts/apply_patches.sh $packaging_dir/qemu/patches/5.2.x/
|
||||
```
|
||||
Kata often utilizes patches for not-yet-upstream fixes for components,
|
||||
including QEMU. These can be found in the [packaging/QEMU directory](../tools/packaging/qemu/patches)
|
||||
|
||||
To build utilizing the same options as Kata, you should make use of the `configure-hypervisor.sh` script. For example:
|
||||
```
|
||||
$ go get -d github.com/kata-containers/kata-containers/tools/packaging
|
||||
$ cd $your_qemu_directory
|
||||
$ $packaging_dir/scripts/configure-hypervisor.sh kata-qemu > kata.cfg
|
||||
$ ${GOPATH}/src/github.com/kata-containers/kata-containers/tools/packaging/scripts/configure-hypervisor.sh qemu > kata.cfg
|
||||
$ eval ./configure "$(cat kata.cfg)"
|
||||
$ make -j $(nproc)
|
||||
$ sudo -E make install
|
||||
@@ -417,7 +439,7 @@ script and paste its output directly into a
|
||||
> [runtime](../src/runtime) repository.
|
||||
|
||||
To perform analysis on Kata logs, use the
|
||||
[`kata-log-parser`](https://github.com/kata-containers/tests/tree/main/cmd/log-parser)
|
||||
[`kata-log-parser`](https://github.com/kata-containers/tests/tree/master/cmd/log-parser)
|
||||
tool, which can convert the logs into formats (e.g. JSON, TOML, XML, and YAML).
|
||||
|
||||
See [Set up a debug console](#set-up-a-debug-console).
|
||||
@@ -450,9 +472,9 @@ debug_console_enabled = true
|
||||
|
||||
This will pass `agent.debug_console agent.debug_console_vport=1026` to agent as kernel parameters, and sandboxes created using this parameters will start a shell in guest if new connection is accept from VSOCK.
|
||||
|
||||
#### Start `kata-monitor` - ONLY NEEDED FOR 2.0.x
|
||||
#### Start `kata-monitor`
|
||||
|
||||
For Kata Containers `2.0.x` releases, the `kata-runtime exec` command depends on the`kata-monitor` running, in order to get the sandbox's `vsock` address to connect to. Thus, first start the `kata-monitor` process.
|
||||
The `kata-runtime exec` command needs `kata-monitor` to get the sandbox's `vsock` address to connect to, first start `kata-monitor`.
|
||||
|
||||
```
|
||||
$ sudo kata-monitor
|
||||
@@ -460,6 +482,7 @@ $ sudo kata-monitor
|
||||
|
||||
`kata-monitor` will serve at `localhost:8090` by default.
|
||||
|
||||
|
||||
#### Connect to debug console
|
||||
|
||||
Command `kata-runtime exec` is used to connect to the debug console.
|
||||
@@ -474,10 +497,6 @@ bash-4.2# exit
|
||||
exit
|
||||
```
|
||||
|
||||
`kata-runtime exec` has a command-line option `runtime-namespace`, which is used to specify under which [runtime namespace](https://github.com/containerd/containerd/blob/master/docs/namespaces.md) the particular pod was created. By default, it is set to `k8s.io` and works for containerd when configured
|
||||
with Kubernetes. For CRI-O, the namespace should set to `default` explicitly. This should not be confused with [Kubernetes namespaces](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/).
|
||||
For other CRI-runtimes and configurations, you may need to set the namespace utilizing the `runtime-namespace` option.
|
||||
|
||||
If you want to access guest OS through a traditional way, see [Traditional debug console setup)](#traditional-debug-console-setup).
|
||||
|
||||
### Traditional debug console setup
|
||||
@@ -604,7 +623,7 @@ VMM solution.
|
||||
|
||||
In case of cloud-hypervisor, connect to the `vsock` as shown:
|
||||
```
|
||||
$ sudo su -c 'cd /var/run/vc/vm/${sandbox_id}/root/ && socat stdin unix-connect:clh.sock'
|
||||
$ sudo su -c 'cd /var/run/vc/vm/{sandbox_id}/root/ && socat stdin unix-connect:clh.sock'
|
||||
CONNECT 1026
|
||||
```
|
||||
|
||||
@@ -612,7 +631,7 @@ CONNECT 1026
|
||||
|
||||
For firecracker, connect to the `hvsock` as shown:
|
||||
```
|
||||
$ sudo su -c 'cd /var/run/vc/firecracker/${sandbox_id}/root/ && socat stdin unix-connect:kata.hvsock'
|
||||
$ sudo su -c 'cd /var/run/vc/firecracker/{sandbox_id}/root/ && socat stdin unix-connect:kata.hvsock'
|
||||
CONNECT 1026
|
||||
```
|
||||
|
||||
@@ -621,7 +640,7 @@ CONNECT 1026
|
||||
|
||||
For QEMU, connect to the `vsock` as shown:
|
||||
```
|
||||
$ sudo su -c 'cd /var/run/vc/vm/${sandbox_id} && socat "stdin,raw,echo=0,escape=0x11" "unix-connect:console.sock"'
|
||||
$ sudo su -c 'cd /var/run/vc/vm/{sandbox_id} && socat "stdin,raw,echo=0,escape=0x11" "unix-connect:console.sock"
|
||||
```
|
||||
|
||||
To disconnect from the virtual machine, type `CONTROL+q` (hold down the
|
||||
|
||||
@@ -1,3 +1,16 @@
|
||||
* [Introduction](#introduction)
|
||||
* [General requirements](#general-requirements)
|
||||
* [Linking advice](#linking-advice)
|
||||
* [Notes](#notes)
|
||||
* [Warnings and other admonitions](#warnings-and-other-admonitions)
|
||||
* [Files and command names](#files-and-command-names)
|
||||
* [Code blocks](#code-blocks)
|
||||
* [Images](#images)
|
||||
* [Spelling](#spelling)
|
||||
* [Names](#names)
|
||||
* [Version numbers](#version-numbers)
|
||||
* [The apostrophe](#the-apostrophe)
|
||||
|
||||
# Introduction
|
||||
|
||||
This document outlines the requirements for all documentation in the [Kata
|
||||
@@ -10,6 +23,10 @@ All documents must:
|
||||
- Be written in simple English.
|
||||
- Be written in [GitHub Flavored Markdown](https://github.github.com/gfm) format.
|
||||
- Have a `.md` file extension.
|
||||
- Include a TOC (table of contents) at the top of the document with links to
|
||||
all heading sections. We recommend using the
|
||||
[`kata-check-markdown`](https://github.com/kata-containers/tests/tree/master/cmd/check-markdown)
|
||||
tool to generate the TOC.
|
||||
- Be linked to from another document in the same repository.
|
||||
|
||||
Although GitHub allows navigation of the entire repository, it should be
|
||||
@@ -26,10 +43,6 @@ All documents must:
|
||||
which can then execute the commands specified to ensure the instructions are
|
||||
correct. This avoids documents becoming out of date over time.
|
||||
|
||||
> **Note:**
|
||||
>
|
||||
> Do not add a table of contents (TOC) since GitHub will auto-generate one.
|
||||
|
||||
# Linking advice
|
||||
|
||||
Linking between documents is strongly encouraged to help users and developers
|
||||
@@ -105,7 +118,7 @@ This section lists requirements for displaying commands and command output.
|
||||
The requirements must be adhered to since documentation containing code blocks
|
||||
is validated by the CI system, which executes the command blocks with the help
|
||||
of the
|
||||
[doc-to-script](https://github.com/kata-containers/tests/tree/main/.ci/kata-doc-to-script.sh)
|
||||
[doc-to-script](https://github.com/kata-containers/tests/tree/master/.ci/kata-doc-to-script.sh)
|
||||
utility.
|
||||
|
||||
- If a document includes commands the user should run, they **MUST** be shown
|
||||
@@ -189,7 +202,7 @@ and compare them with standard tools (e.g. `diff(1)`).
|
||||
|
||||
Since this project uses a number of terms not found in conventional
|
||||
dictionaries, we have a
|
||||
[spell checking tool](https://github.com/kata-containers/tests/tree/main/cmd/check-spelling)
|
||||
[spell checking tool](https://github.com/kata-containers/tests/tree/master/cmd/check-spelling)
|
||||
that checks both dictionary words and the additional terms we use.
|
||||
|
||||
Run the spell checking tool on your document before raising a PR to ensure it
|
||||
|
||||
@@ -1,5 +1,9 @@
|
||||
# Licensing strategy
|
||||
|
||||
* [Project License](#project-license)
|
||||
* [License file](#license-file)
|
||||
* [License for individual files](#license-for-individual-files)
|
||||
|
||||
## Project License
|
||||
|
||||
The license for the [Kata Containers](https://github.com/kata-containers)
|
||||
@@ -18,4 +22,4 @@ licensing and allows automated tooling to check the license of individual
|
||||
files.
|
||||
|
||||
This SPDX licence identifier requirement is enforced by the
|
||||
[CI (Continuous Integration) system](https://github.com/kata-containers/tests/blob/main/.ci/static-checks.sh).
|
||||
[CI (Continuous Integration) system](https://github.com/kata-containers/tests/blob/master/.ci/static-checks.sh).
|
||||
|
||||
@@ -1,3 +1,33 @@
|
||||
* [Overview](#overview)
|
||||
* [Definition of a limitation](#definition-of-a-limitation)
|
||||
* [Scope](#scope)
|
||||
* [Contributing](#contributing)
|
||||
* [Pending items](#pending-items)
|
||||
* [Runtime commands](#runtime-commands)
|
||||
* [checkpoint and restore](#checkpoint-and-restore)
|
||||
* [events command](#events-command)
|
||||
* [update command](#update-command)
|
||||
* [Networking](#networking)
|
||||
* [Docker swarm and compose support](#docker-swarm-and-compose-support)
|
||||
* [Resource management](#resource-management)
|
||||
* [docker run and shared memory](#docker-run-and-shared-memory)
|
||||
* [docker run and sysctl](#docker-run-and-sysctl)
|
||||
* [Docker daemon features](#docker-daemon-features)
|
||||
* [SELinux support](#selinux-support)
|
||||
* [Architectural limitations](#architectural-limitations)
|
||||
* [Networking limitations](#networking-limitations)
|
||||
* [Support for joining an existing VM network](#support-for-joining-an-existing-vm-network)
|
||||
* [docker --net=host](#docker---nethost)
|
||||
* [docker run --link](#docker-run---link)
|
||||
* [Host resource sharing](#host-resource-sharing)
|
||||
* [docker run --privileged](#docker-run---privileged)
|
||||
* [Miscellaneous](#miscellaneous)
|
||||
* [Docker --security-opt option partially supported](#docker---security-opt-option-partially-supported)
|
||||
* [Appendices](#appendices)
|
||||
* [The constraints challenge](#the-constraints-challenge)
|
||||
|
||||
---
|
||||
|
||||
# Overview
|
||||
|
||||
A [Kata Container](https://github.com/kata-containers) utilizes a Virtual Machine (VM) to enhance security and
|
||||
@@ -62,9 +92,7 @@ This section lists items that might be possible to fix.
|
||||
### checkpoint and restore
|
||||
|
||||
The runtime does not provide `checkpoint` and `restore` commands. There
|
||||
are discussions about using VM save and restore to give us a
|
||||
`[criu](https://github.com/checkpoint-restore/criu)`-like functionality,
|
||||
which might provide a solution.
|
||||
are discussions about using VM save and restore to give [`criu`](https://github.com/checkpoint-restore/criu)-like functionality, which might provide a solution.
|
||||
|
||||
Note that the OCI standard does not specify `checkpoint` and `restore`
|
||||
commands.
|
||||
@@ -188,17 +216,6 @@ Equivalent functionality can be achieved with the newer docker networking comman
|
||||
See more documentation at
|
||||
[docs.docker.com](https://docs.docker.com/engine/userguide/networking/default_network/dockerlinks/).
|
||||
|
||||
## Storage limitations
|
||||
|
||||
### Kubernetes `volumeMounts.subPaths`
|
||||
|
||||
Kubernetes `volumeMount.subPath` is not supported by Kata Containers at the
|
||||
moment.
|
||||
|
||||
See [this issue](https://github.com/kata-containers/runtime/issues/2812) for more details.
|
||||
[Another issue](https://github.com/kata-containers/kata-containers/issues/1728) focuses on the case of `emptyDir`.
|
||||
|
||||
|
||||
## Host resource sharing
|
||||
|
||||
### docker run --privileged
|
||||
@@ -207,7 +224,7 @@ Privileged support in Kata is essentially different from `runc` containers.
|
||||
Kata does support `docker run --privileged` command, but in this case full access
|
||||
to the guest VM is provided in addition to some host access.
|
||||
|
||||
The container runs with elevated capabilities within the guest and is granted
|
||||
The container runs with elevated capabilities within the guest and is granted
|
||||
access to guest devices instead of the host devices.
|
||||
This is also true with using `securityContext privileged=true` with Kubernetes.
|
||||
|
||||
|
||||
@@ -1,5 +1,16 @@
|
||||
# Documentation
|
||||
|
||||
* [Getting Started](#getting-started)
|
||||
* [More User Guides](#more-user-guides)
|
||||
* [Kata Use-Cases](#kata-use-cases)
|
||||
* [Developer Guide](#developer-guide)
|
||||
* [Design and Implementations](#design-and-implementations)
|
||||
* [How to Contribute](#how-to-contribute)
|
||||
* [Code Licensing](#code-licensing)
|
||||
* [The Release Process](#the-release-process)
|
||||
* [Help Improving the Documents](#help-improving-the-documents)
|
||||
* [Website Changes](#website-changes)
|
||||
|
||||
The [Kata Containers](https://github.com/kata-containers)
|
||||
documentation repository hosts overall system documentation, with information
|
||||
common to multiple components.
|
||||
@@ -29,7 +40,6 @@ See the [howto documentation](how-to).
|
||||
* [Intel QAT with Kata](./use-cases/using-Intel-QAT-and-kata.md)
|
||||
* [VPP with Kata](./use-cases/using-vpp-and-kata.md)
|
||||
* [SPDK vhost-user with Kata](./use-cases/using-SPDK-vhostuser-and-kata.md)
|
||||
* [Intel SGX with Kata](./use-cases/using-Intel-SGX-and-kata.md)
|
||||
|
||||
## Developer Guide
|
||||
|
||||
|
||||
@@ -1,10 +1,25 @@
|
||||
|
||||
# How to do a Kata Containers Release
|
||||
This document lists the tasks required to create a Kata Release.
|
||||
|
||||
<!-- TOC START min:1 max:3 link:true asterisk:false update:true -->
|
||||
- [How to do a Kata Containers Release](#how-to-do-a-kata-containers-release)
|
||||
- [Requirements](#requirements)
|
||||
- [Release Process](#release-process)
|
||||
- [Bump all Kata repositories](#bump-all-kata-repositories)
|
||||
- [Merge all bump version Pull requests](#merge-all-bump-version-pull-requests)
|
||||
- [Tag all Kata repositories](#tag-all-kata-repositories)
|
||||
- [Check Git-hub Actions](#check-git-hub-actions)
|
||||
- [Create release notes](#create-release-notes)
|
||||
- [Announce the release](#announce-the-release)
|
||||
<!-- TOC END -->
|
||||
|
||||
|
||||
## Requirements
|
||||
|
||||
- [hub](https://github.com/github/hub)
|
||||
* Using an [application token](https://github.com/settings/tokens) is required for hub.
|
||||
|
||||
- OBS account with permissions on [`/home:katacontainers`](https://build.opensuse.org/project/subprojects/home:katacontainers)
|
||||
|
||||
- GitHub permissions to push tags and create releases in Kata repositories.
|
||||
|
||||
@@ -15,12 +30,16 @@
|
||||
|
||||
## Release Process
|
||||
|
||||
|
||||
### Bump all Kata repositories
|
||||
|
||||
Bump the repositories using a script in the Kata packaging repo, where:
|
||||
- `BRANCH=<the-branch-you-want-to-bump>`
|
||||
- `NEW_VERSION=<the-new-kata-version>`
|
||||
- We have set up a Jenkins job to bump the version in the `VERSION` file in all Kata repositories. Go to the [Jenkins bump-job page](http://jenkins.katacontainers.io/job/release/build) to trigger a new job.
|
||||
- Start a new job with variables for the job passed as:
|
||||
- `BRANCH=<the-branch-you-want-to-bump>`
|
||||
- `NEW_VERSION=<the-new-kata-version>`
|
||||
|
||||
For example, in the case where you want to make a patch release `1.10.2`, the variable `NEW_VERSION` should be `1.10.2` and `BRANCH` should point to `stable-1.10`. In case of an alpha or release candidate release, `BRANCH` should point to `master` branch.
|
||||
|
||||
Alternatively, you can also bump the repositories using a script in the Kata packaging repo
|
||||
```
|
||||
$ cd ${GOPATH}/src/github.com/kata-containers/kata-containers/tools/packaging/release
|
||||
$ export NEW_VERSION=<the-new-kata-version>
|
||||
@@ -28,23 +47,6 @@
|
||||
$ ./update-repository-version.sh -p "$NEW_VERSION" "$BRANCH"
|
||||
```
|
||||
|
||||
### Point tests repository to stable branch
|
||||
|
||||
If you create a new stable branch, i.e. if your release changes a major or minor version number (not a patch release), then
|
||||
you should modify the `tests` repository to point to that newly created stable branch and not the `main` branch.
|
||||
The objective is that changes in the CI on the main branch will not impact the stable branch.
|
||||
|
||||
In the test directory, change references the main branch in:
|
||||
* `README.md`
|
||||
* `versions.yaml`
|
||||
* `cmd/github-labels/labels.yaml.in`
|
||||
* `cmd/pmemctl/pmemctl.sh`
|
||||
* `.ci/lib.sh`
|
||||
* `.ci/static-checks.sh`
|
||||
|
||||
See the commits in [the corresponding PR for stable-2.1](https://github.com/kata-containers/tests/pull/3504) for an example of the changes.
|
||||
|
||||
|
||||
### Merge all bump version Pull requests
|
||||
|
||||
- The above step will create a GitHub pull request in the Kata projects. Trigger the CI using `/test` command on each bump Pull request.
|
||||
@@ -54,7 +56,7 @@
|
||||
### Tag all Kata repositories
|
||||
|
||||
Once all the pull requests to bump versions in all Kata repositories are merged,
|
||||
tag all the repositories as shown below.
|
||||
tag all the repositories as shown below.
|
||||
```
|
||||
$ cd ${GOPATH}/src/github.com/kata-containers/kata-containers/tools/packaging/release
|
||||
$ git checkout <kata-branch-to-release>
|
||||
@@ -64,7 +66,7 @@
|
||||
|
||||
### Check Git-hub Actions
|
||||
|
||||
We make use of [GitHub actions](https://github.com/features/actions) in this [file](https://github.com/kata-containers/kata-containers/blob/main/.github/workflows/main.yaml) in the `kata-containers/kata-containers` repository to build and upload release artifacts. This action is auto triggered with the above step when a new tag is pushed to the `kata-containers/kata-containers` repository.
|
||||
We make use of [GitHub actions](https://github.com/features/actions) in this [file](https://github.com/kata-containers/kata-containers/blob/master/.github/workflows/main.yaml) in the `kata-containers/kata-containers` repository to build and upload release artifacts. This action is auto triggered with the above step when a new tag is pushed to the `kata-containers/kata-conatiners` repository.
|
||||
|
||||
Check the [actions status page](https://github.com/kata-containers/kata-containers/actions) to verify all steps in the actions workflow have completed successfully. On success, a static tarball containing Kata release artifacts will be uploaded to the [Release page](https://github.com/kata-containers/kata-containers/releases).
|
||||
|
||||
|
||||
@@ -32,16 +32,16 @@ provides additional information regarding release `99.123.77` in the previous ex
|
||||
changing the existing behavior*.
|
||||
|
||||
- When `MAJOR` increases, the new release adds **new features, bug fixes, or
|
||||
both** and which **changes the behavior from the previous release** (incompatible with previous releases).
|
||||
both** and which *changes the behavior from the previous release* (incompatible with previous releases).
|
||||
|
||||
A major release will also likely require a change of the container manager version used,
|
||||
for example Containerd or CRI-O. Please refer to the release notes for further details.
|
||||
for example Docker\*. Please refer to the release notes for further details.
|
||||
|
||||
## Release Strategy
|
||||
|
||||
Any new features added since the last release will be available in the next minor
|
||||
release. These will include bug fixes as well. To facilitate a stable user environment,
|
||||
Kata provides stable branch-based releases and a main branch release.
|
||||
Kata provides stable branch-based releases and a master branch release.
|
||||
|
||||
## Stable branch patch criteria
|
||||
|
||||
@@ -49,10 +49,9 @@ No new features should be introduced to stable branches. This is intended to li
|
||||
providing only bug and security fixes.
|
||||
|
||||
## Branch Management
|
||||
Kata Containers will maintain **one** stable release branch, in addition to the main branch, for
|
||||
each active major release.
|
||||
Once a new MAJOR or MINOR release is created from main, a new stable branch is created for
|
||||
the prior MAJOR or MINOR release and the previous stable branch is no longer maintained. End of
|
||||
Kata Containers will maintain two stable release branches in addition to the master branch.
|
||||
Once a new MAJOR or MINOR release is created from master, a new stable branch is created for
|
||||
the prior MAJOR or MINOR release and the older stable branch is no longer maintained. End of
|
||||
maintenance for a branch is announced on the Kata Containers mailing list. Users can determine
|
||||
the version currently installed by running `kata-runtime kata-env`. It is recommended to use the
|
||||
latest stable branch available.
|
||||
@@ -62,59 +61,59 @@ A couple of examples follow to help clarify this process.
|
||||
### New bug fix introduced
|
||||
|
||||
A bug fix is submitted against the runtime which does not introduce new inter-component dependencies.
|
||||
This fix is applied to both the main and stable branches, and there is no need to create a new
|
||||
This fix is applied to both the master and stable branches, and there is no need to create a new
|
||||
stable branch.
|
||||
|
||||
| Branch | Original version | New version |
|
||||
|--|--|--|
|
||||
| `main` | `2.3.0-rc0` | `2.3.0-rc1` |
|
||||
| `stable-2.2` | `2.2.0` | `2.2.1` |
|
||||
| `stable-2.1` | (unmaintained) | (unmaintained) |
|
||||
| `master` | `1.3.0-rc0` | `1.3.0-rc1` |
|
||||
| `stable-1.2` | `1.2.0` | `1.2.1` |
|
||||
| `stable-1.1` | `1.1.2` | `1.1.3` |
|
||||
|
||||
|
||||
### New release made feature or change adding new inter-component dependency
|
||||
|
||||
A new feature is introduced, which adds a new inter-component dependency. In this case a new stable
|
||||
branch is created (stable-2.3) starting from main and the previous stable branch (stable-2.2)
|
||||
branch is created (stable-1.3) starting from master and the older stable branch (stable-1.1)
|
||||
is dropped from maintenance.
|
||||
|
||||
|
||||
| Branch | Original version | New version |
|
||||
|--|--|--|
|
||||
| `main` | `2.3.0-rc1` | `2.3.0` |
|
||||
| `stable-2.3` | N/A| `2.3.0` |
|
||||
| `stable-2.2` | `2.2.1` | (unmaintained) |
|
||||
| `stable-2.1` | (unmaintained) | (unmaintained) |
|
||||
| `master` | `1.3.0-rc1` | `1.3.0` |
|
||||
| `stable-1.3` | N/A| `1.3.0` |
|
||||
| `stable-1.2` | `1.2.1` | `1.2.2` |
|
||||
| `stable-1.1` | `1.1.3` | (unmaintained) |
|
||||
|
||||
Note, the stable-2.2 branch will still exist with tag 2.2.1, but under current plans it is
|
||||
not maintained further. The next tag applied to main will be 2.4.0-alpha0. We would then
|
||||
Note, the stable-1.1 branch will still exist with tag 1.1.3, but under current plans it is
|
||||
not maintained further. The next tag applied to master will be 1.4.0-alpha0. We would then
|
||||
create a couple of alpha releases gathering features targeted for that particular release (in
|
||||
this case 2.4.0), followed by a release candidate. The release candidate marks a feature freeze.
|
||||
this case 1.4.0), followed by a release candidate. The release candidate marks a feature freeze.
|
||||
A new stable branch is created for the release candidate. Only bug fixes and any security issues
|
||||
are added to the branch going forward until release 2.4.0 is made.
|
||||
are added to the branch going forward until release 1.4.0 is made.
|
||||
|
||||
## Backporting Process
|
||||
|
||||
Development that occurs against the main branch and applicable code commits should also be submitted
|
||||
Development that occurs against the master branch and applicable code commits should also be submitted
|
||||
against the stable branches. Some guidelines for this process follow::
|
||||
1. Only bug and security fixes which do not introduce inter-component dependencies are
|
||||
candidates for stable branches. These PRs should be marked with "bug" in GitHub.
|
||||
2. Once a PR is created against main which meets requirement of (1), a comparable one
|
||||
2. Once a PR is created against master which meets requirement of (1), a comparable one
|
||||
should also be submitted against the stable branches. It is the responsibility of the submitter
|
||||
to apply their pull request against stable, and it is the responsibility of the
|
||||
reviewers to help identify stable-candidate pull requests.
|
||||
|
||||
## Continuous Integration Testing
|
||||
|
||||
The test repository is forked to create stable branches from main. Full CI
|
||||
runs on each stable and main PR using its respective tests repository branch.
|
||||
The test repository is forked to create stable branches from master. Full CI
|
||||
runs on each stable and master PR using its respective tests repository branch.
|
||||
|
||||
### An alternative method for CI testing:
|
||||
|
||||
Ideally, the continuous integration infrastructure will run the same test suite on both main
|
||||
Ideally, the continuous integration infrastructure will run the same test suite on both master
|
||||
and the stable branches. When tests are modified or new feature tests are introduced, explicit
|
||||
logic should exist within the testing CI to make sure only applicable tests are executed against
|
||||
stable and main. While this is not in place currently, it should be considered in the long term.
|
||||
stable and master. While this is not in place currently, it should be considered in the long term.
|
||||
|
||||
## Release Management
|
||||
|
||||
@@ -122,7 +121,7 @@ stable and main. While this is not in place currently, it should be considered i
|
||||
|
||||
Releases are made every three weeks, which include a GitHub release as
|
||||
well as binary packages. These patch releases are made for both stable branches, and a "release candidate"
|
||||
for the next `MAJOR` or `MINOR` is created from main. If there are no changes across all the repositories, no
|
||||
for the next `MAJOR` or `MINOR` is created from master. If there are no changes across all the repositories, no
|
||||
release is created and an announcement is made on the developer mailing list to highlight this.
|
||||
If a release is being made, each repository is tagged for this release, regardless
|
||||
of whether changes are introduced. The release schedule can be seen on the
|
||||
@@ -143,10 +142,10 @@ maturity, we have increased the cadence from six weeks to twelve weeks. The rele
|
||||
### Compatibility
|
||||
Kata guarantees compatibility between components that are within one minor release of each other.
|
||||
|
||||
This is critical for dependencies which cross between host (shimv2 runtime) and
|
||||
This is critical for dependencies which cross between host (runtime, shim, proxy) and
|
||||
the guest (hypervisor, rootfs and agent). For example, consider a cluster with a long-running
|
||||
deployment, workload-never-dies, all on Kata version 2.1.3 components. If the operator updates
|
||||
the Kata components to the next new minor release (i.e. 2.2.0), we need to guarantee that the 2.2.0
|
||||
shimv2 runtime still communicates with 2.1.3 agent within workload-never-dies.
|
||||
deployment, workload-never-dies, all on Kata version 1.1.3 components. If the operator updates
|
||||
the Kata components to the next new minor release (i.e. 1.2.0), we need to guarantee that the 1.2.0
|
||||
runtime still communicates with 1.1.3 agent within workload-never-dies.
|
||||
|
||||
Handling live-update is out of the scope of this document. See this [`kata-runtime` issue](https://github.com/kata-containers/runtime/issues/492) for details.
|
||||
|
||||
@@ -1,3 +1,16 @@
|
||||
* [Introduction](#introduction)
|
||||
* [Maintenance warning](#maintenance-warning)
|
||||
* [Determine current version](#determine-current-version)
|
||||
* [Determine latest version](#determine-latest-version)
|
||||
* [Configuration changes](#configuration-changes)
|
||||
* [Upgrade Kata Containers](#upgrade-kata-containers)
|
||||
* [Upgrade native distribution packaged version](#upgrade-native-distribution-packaged-version)
|
||||
* [Static installation](#static-installation)
|
||||
* [Determine if you are using a static installation](#determine-if-you-are-using-a-static-installation)
|
||||
* [Remove a static installation](#remove-a-static-installation)
|
||||
* [Upgrade a static installation](#upgrade-a-static-installation)
|
||||
* [Custom assets](#custom-assets)
|
||||
|
||||
# Introduction
|
||||
|
||||
This document outlines the options for upgrading from a
|
||||
@@ -35,10 +48,10 @@ Alternatively, if you are using Kata Containers version 1.12.0 or newer, you
|
||||
can check for newer releases using the command line:
|
||||
|
||||
```bash
|
||||
$ kata-runtime check --check-version-only
|
||||
$ kata-runtime kata-check --check-version-only
|
||||
```
|
||||
|
||||
There are various other related options. Run `kata-runtime check --help`
|
||||
There are various other related options. Run `kata-runtime kata-check --help`
|
||||
for further details.
|
||||
|
||||
# Configuration changes
|
||||
|
||||
@@ -8,9 +8,4 @@ Kata Containers design documents:
|
||||
- [VSocks](VSocks.md)
|
||||
- [VCPU handling](vcpu-handling.md)
|
||||
- [Host cgroups](host-cgroups.md)
|
||||
- [`Inotify` support](inotify.md)
|
||||
- [Metrics(Kata 2.0)](kata-2-0-metrics.md)
|
||||
|
||||
---
|
||||
|
||||
- [Design proposals](proposals)
|
||||
|
||||
@@ -1,5 +1,12 @@
|
||||
# Kata Containers and VSOCKs
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [VSOCK communication diagram](#vsock-communication-diagram)
|
||||
- [System requirements](#system-requirements)
|
||||
- [Advantages of using VSOCKs](#advantages-of-using-vsocks)
|
||||
- [High density](#high-density)
|
||||
- [Reliability](#reliability)
|
||||
|
||||
## Introduction
|
||||
|
||||
There are two different ways processes in the virtual machine can communicate
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 101 KiB |
@@ -1,5 +1,26 @@
|
||||
# Kata Containers Architecture
|
||||
|
||||
|
||||
- [Kata Containers Architecture](#kata-containers-architecture)
|
||||
- [Overview](#overview)
|
||||
- [Virtualization](#virtualization)
|
||||
- [Guest assets](#guest-assets)
|
||||
- [Guest kernel](#guest-kernel)
|
||||
- [Guest image](#guest-image)
|
||||
- [Root filesystem image](#root-filesystem-image)
|
||||
- [Initrd image](#initrd-image)
|
||||
- [Agent](#agent)
|
||||
- [Runtime](#runtime)
|
||||
- [Configuration](#configuration)
|
||||
- [Networking](#networking)
|
||||
- [Network Hotplug](#network-hotplug)
|
||||
- [Storage](#storage)
|
||||
- [Kubernetes support](#kubernetes-support)
|
||||
- [OCI annotations](#oci-annotations)
|
||||
- [Mixing VM based and namespace based runtimes](#mixing-vm-based-and-namespace-based-runtimes)
|
||||
- [Appendices](#appendices)
|
||||
- [DAX](#dax)
|
||||
|
||||
## Overview
|
||||
|
||||
This is an architectural overview of Kata Containers, based on the 2.0 release.
|
||||
@@ -37,7 +58,7 @@ to go through the VSOCK interface exported by QEMU.
|
||||
|
||||
The container workload, that is, the actual OCI bundle rootfs, is exported from the
|
||||
host to the virtual machine. In the case where a block-based graph driver is
|
||||
configured, `virtio-scsi` will be used. In all other cases a `virtio-fs` VIRTIO mount point
|
||||
configured, `virtio-scsi` will be used. In all other cases a 9pfs VIRTIO mount point
|
||||
will be used. `kata-agent` uses this mount point as the root filesystem for the
|
||||
container processes.
|
||||
|
||||
@@ -116,7 +137,7 @@ The runtime uses a TOML format configuration file called `configuration.toml`. B
|
||||
|
||||
The actual configuration file paths can be determined by running:
|
||||
```
|
||||
$ kata-runtime --show-default-config-paths
|
||||
$ kata-runtime --kata-show-default-config-paths
|
||||
```
|
||||
Most users will not need to modify the configuration file.
|
||||
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# Kata Containers E2E Flow
|
||||
|
||||
|
||||

|
||||
|
||||
@@ -1,3 +1,18 @@
|
||||
- [Host cgroup management](#host-cgroup-management)
|
||||
- [Introduction](#introduction)
|
||||
- [`SandboxCgroupOnly` enabled](#sandboxcgrouponly-enabled)
|
||||
- [What does Kata do in this configuration?](#what-does-kata-do-in-this-configuration)
|
||||
- [Why create a Kata-cgroup under the parent cgroup?](#why-create-a-kata-cgroup-under-the-parent-cgroup)
|
||||
- [Improvements](#improvements)
|
||||
- [`SandboxCgroupOnly` disabled (default, legacy)](#sandboxcgrouponly-disabled-default-legacy)
|
||||
- [What does this method do?](#what-does-this-method-do)
|
||||
- [Impact](#impact)
|
||||
- [Supported cgroups](#supported-cgroups)
|
||||
- [Cgroups V1](#cgroups-v1)
|
||||
- [Cgroups V2](#cgroups-v2)
|
||||
- [Distro Support](#distro-support)
|
||||
- [Summary](#summary)
|
||||
|
||||
# Host cgroup management
|
||||
|
||||
## Introduction
|
||||
|
||||
@@ -1,30 +0,0 @@
|
||||
# Kata Containers support for `inotify`
|
||||
|
||||
## Background on `inotify` usage
|
||||
|
||||
A common pattern in Kubernetes is to watch for changes to files/directories passed in as `ConfigMaps`
|
||||
or `Secrets`. Sidecar's normally use `inotify` to watch for changes and then signal the primary container to reload
|
||||
the updated configuration. Kata Containers typically will pass these host files into the guest using `virtiofs`, which
|
||||
does not support `inotify` today. While we work to enable this use case in `virtiofs`, we introduced a workaround in Kata Containers.
|
||||
This document describes how Kata Containers implements this workaround.
|
||||
|
||||
### Detecting a `watchable` mount
|
||||
|
||||
Kubernetes creates `secrets` and `ConfigMap` mounts at very specific locations on the host filesystem. For container mounts,
|
||||
the `Kata Containers` runtime will check the source of the mount to identify these special cases. For these use cases, only a single file
|
||||
or very few would typically need to be watched. To avoid excessive overheads in making a mount watchable,
|
||||
we enforce a limit of eight files per mount. If a `secret` or `ConfigMap` mount contains more than 8 files, it will not be
|
||||
considered watchable. We similarly enforce a limit of 1 MB per mount to be considered watchable. Non-watchable mounts will
|
||||
continue to propagate changes from the mount on the host to the container workload, but these updates will not trigger an
|
||||
`inotify` event.
|
||||
|
||||
If at any point a mount grows beyond the eight file or 1MB limit, it will no longer be `watchable.`
|
||||
|
||||
### Presenting a `watchable` mount to the workload
|
||||
|
||||
For mounts that are considered `watchable`, inside the guest, the `kata-agent` will poll the mount presented from
|
||||
the host through `virtiofs` and copy any changed files to a `tmpfs` mount that is presented to the container. In this way,
|
||||
for `watchable` mounts, Kata will do the polling on behalf of the workload and existing workloads needn't change their usage
|
||||
of `inotify`.
|
||||
|
||||

|
||||
@@ -1,5 +1,20 @@
|
||||
# Kata 2.0 Metrics Design
|
||||
|
||||
* [Limitations of Kata 1.x and the target of Kata 2.0](#limitations-of-kata-1x-and-the-target-of-kata-20)
|
||||
* [Metrics architecture](#metrics-architecture)
|
||||
* [Kata monitor](#kata-monitor)
|
||||
* [Kata runtime](#kata-runtime)
|
||||
* [Kata agent](#kata-agent)
|
||||
* [Performance and overhead](#performance-and-overhead)
|
||||
* [Metrics list](#metrics-list)
|
||||
* [Metric types](#metric-types)
|
||||
* [Kata agent metrics](#kata-agent-metrics)
|
||||
* [Firecracker metrics](#firecracker-metrics)
|
||||
* [Kata guest OS metrics](#kata-guest-os-metrics)
|
||||
* [Hypervisor metrics](#hypervisor-metrics)
|
||||
* [Kata monitor metrics](#kata-monitor-metrics)
|
||||
* [Kata containerd shim v2 metrics](#kata-containerd-shim-v2-metrics)
|
||||
|
||||
Kata implement CRI's API and support [`ContainerStats`](https://github.com/kubernetes/kubernetes/blob/release-1.18/staging/src/k8s.io/cri-api/pkg/apis/runtime/v1alpha2/api.proto#L101) and [`ListContainerStats`](https://github.com/kubernetes/kubernetes/blob/release-1.18/staging/src/k8s.io/cri-api/pkg/apis/runtime/v1alpha2/api.proto#L103) interfaces to expose containers metrics. User can use these interface to get basic metrics about container.
|
||||
|
||||
But unlike `runc`, Kata is a VM-based runtime and has a different architecture.
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
# Kata API Design
|
||||
|
||||
To fulfill the [Kata design requirements](kata-design-requirements.md), and based on the discussion on [Virtcontainers API extensions](https://docs.google.com/presentation/d/1dbGrD1h9cpuqAPooiEgtiwWDGCYhVPdatq7owsKHDEQ), the Kata runtime library features the following APIs:
|
||||
- Sandbox based top API
|
||||
- Storage and network hotplug API
|
||||
- Plugin frameworks for external proprietary Kata runtime extensions
|
||||
- Built-in shim and proxy types and capabilities
|
||||
|
||||
## Sandbox Based API
|
||||
### Sandbox Management API
|
||||
@@ -57,7 +57,7 @@ To fulfill the [Kata design requirements](kata-design-requirements.md), and base
|
||||
|Name|Description|
|
||||
|---|---|
|
||||
|`sandbox.GetOOMEvent()`| Monitor the OOM events that occur in the sandbox..|
|
||||
|`sandbox.UpdateRuntimeMetrics()`| Update the `shim/hypervisor` metrics of the running sandbox.|
|
||||
|`sandbox.UpdateRuntimeMetrics()`| Update the shim/`hypervisor`'s metrics of the running sandbox.|
|
||||
|`sandbox.GetAgentMetrics()`| Get metrics of the agent and the guest in the running sandbox.|
|
||||
|
||||
## Plugin framework for external proprietary Kata runtime extensions
|
||||
@@ -99,3 +99,32 @@ Built-in implementations include:
|
||||
|
||||
### Sandbox Connection Plugin Workflow
|
||||

|
||||
|
||||
## Built-in Shim and Proxy Types and Capabilities
|
||||
### Built-in shim/proxy sandbox configurations
|
||||
- Supported shim configurations:
|
||||
|
||||
|Name|Description|
|
||||
|---|---|
|
||||
|`noopshim`|Do not start any shim process.|
|
||||
|`ccshim`| Start the cc-shim binary.|
|
||||
|`katashim`| Start the `kata-shim` binary.|
|
||||
|`katashimbuiltin`|No standalone shim process but shim functionality APIs are exported.|
|
||||
- Supported proxy configurations:
|
||||
|
||||
|Name|Description|
|
||||
|---|---|
|
||||
|`noopProxy`| a dummy proxy implementation of the proxy interface, only used for testing purpose.|
|
||||
|`noProxy`|generic implementation for any case where no actual proxy is needed.|
|
||||
|`ccProxy`|run `ccProxy` to proxy between runtime and agent.|
|
||||
|`kataProxy`|run `kata-proxy` to translate Yamux connections between runtime and Kata agent. |
|
||||
|`kataProxyBuiltin`| no standalone proxy process and connect to Kata agent with internal Yamux translation.|
|
||||
|
||||
### Built-in Shim Capability
|
||||
Built-in shim capability is implemented by removing standalone shim process, and
|
||||
supporting the shim related APIs.
|
||||
|
||||
### Built-in Proxy Capability
|
||||
Built-in proxy capability is achieved by removing standalone proxy process, and
|
||||
connecting to Kata agent with a custom gRPC dialer that is internal Yamux translation.
|
||||
The behavior is enabled when proxy is configured as `kataProxyBuiltin`.
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
# Design proposals
|
||||
|
||||
Kata Containers design proposal documents:
|
||||
|
||||
- [Kata Containers tracing](tracing-proposals.md)
|
||||
@@ -1,213 +0,0 @@
|
||||
# Kata Tracing proposals
|
||||
|
||||
## Overview
|
||||
|
||||
This document summarises a set of proposals triggered by the
|
||||
[tracing documentation PR][tracing-doc-pr].
|
||||
|
||||
## Required context
|
||||
|
||||
This section explains some terminology required to understand the proposals.
|
||||
Further details can be found in the
|
||||
[tracing documentation PR][tracing-doc-pr].
|
||||
|
||||
### Agent trace mode terminology
|
||||
|
||||
| Trace mode | Description | Use-case |
|
||||
|-|-|-|
|
||||
| Static | Trace agent from startup to shutdown | Entire lifespan |
|
||||
| Dynamic | Toggle tracing on/off as desired | On-demand "snapshot" |
|
||||
|
||||
### Agent trace type terminology
|
||||
|
||||
| Trace type | Description | Use-case |
|
||||
|-|-|-|
|
||||
| isolated | traces all relate to single component | Observing lifespan |
|
||||
| collated | traces "grouped" (runtime+agent) | Understanding component interaction |
|
||||
|
||||
### Container lifespan
|
||||
|
||||
| Lifespan | trace mode | trace type |
|
||||
|-|-|-|
|
||||
| short-lived | static | collated if possible, else isolated? |
|
||||
| long-running | dynamic | collated? (to see interactions) |
|
||||
|
||||
## Original plan for agent
|
||||
|
||||
- Implement all trace types and trace modes for agent.
|
||||
|
||||
- Why?
|
||||
- Maximum flexibility.
|
||||
|
||||
> **Counterargument:**
|
||||
>
|
||||
> Due to the intrusive nature of adding tracing, we have
|
||||
> learnt that landing small incremental changes is simpler and quicker!
|
||||
|
||||
- Compatibility with [Kata 1.x tracing][kata-1x-tracing].
|
||||
|
||||
> **Counterargument:**
|
||||
>
|
||||
> Agent tracing in Kata 1.x was extremely awkward to setup (to the extent
|
||||
> that it's unclear how many users actually used it!)
|
||||
>
|
||||
> This point, coupled with the new architecture for Kata 2.x, suggests
|
||||
> that we may not need to supply the same set of tracing features (in fact
|
||||
> they may not make sense)).
|
||||
|
||||
## Agent tracing proposals
|
||||
|
||||
### Agent tracing proposal 1: Don't implement dynamic trace mode
|
||||
|
||||
- All tracing will be static.
|
||||
|
||||
- Why?
|
||||
- Because dynamic tracing will always be "partial"
|
||||
|
||||
> In fact, not only would it be only a "snapshot" of activity, it may not
|
||||
> even be possible to create a complete "trace transaction". If this is
|
||||
> true, the trace output would be partial and would appear "unstructured".
|
||||
|
||||
### Agent tracing proposal 2: Simplify handling of trace type
|
||||
|
||||
- Agent tracing will be "isolated" by default.
|
||||
- Agent tracing will be "collated" if runtime tracing is also enabled.
|
||||
|
||||
- Why?
|
||||
- Offers a graceful fallback for agent tracing if runtime tracing disabled.
|
||||
- Simpler code!
|
||||
|
||||
## Questions to ask yourself (part 1)
|
||||
|
||||
- Are your containers long-running or short-lived?
|
||||
|
||||
- Would you ever need to turn on tracing "briefly"?
|
||||
- If "yes", is a "partial trace" useful or useless?
|
||||
|
||||
> Likely to be considered useless as it is a partial snapshot.
|
||||
> Alternative tracing methods may be more appropriate to dynamic
|
||||
> OpenTelemetry tracing.
|
||||
|
||||
## Questions to ask yourself (part 2)
|
||||
|
||||
- Are you happy to stop a container to enable tracing?
|
||||
If "no", dynamic tracing may be required.
|
||||
|
||||
- Would you ever want to trace the agent and the runtime "in isolation" at the
|
||||
same time?
|
||||
- If "yes", we need to fully implement `trace_mode=isolated`
|
||||
|
||||
> This seems unlikely though.
|
||||
|
||||
## Trace collection
|
||||
|
||||
The second set of proposals affect the way traces are collected.
|
||||
|
||||
### Motivation
|
||||
|
||||
Currently:
|
||||
|
||||
- The runtime sends trace spans to Jaeger directly.
|
||||
- The agent will send trace spans to the [`trace-forwarder`][trace-forwarder] component.
|
||||
- The trace forwarder will send trace spans to Jaeger.
|
||||
|
||||
Kata agent tracing overview:
|
||||
|
||||
```
|
||||
+-------------------------------------------+
|
||||
| Host |
|
||||
| |
|
||||
| +-----------+ |
|
||||
| | Trace | |
|
||||
| | Collector | |
|
||||
| +-----+-----+ |
|
||||
| ^ +--------------+ |
|
||||
| | spans | Kata VM | |
|
||||
| +-----+-----+ | | |
|
||||
| | Kata | spans | +-----+ | |
|
||||
| | Trace |<-----------------|Kata | | |
|
||||
| | Forwarder | VSOCK | |Agent| | |
|
||||
| +-----------+ Channel | +-----+ | |
|
||||
| +--------------+ |
|
||||
+-------------------------------------------+
|
||||
```
|
||||
|
||||
Currently:
|
||||
|
||||
- If agent tracing is enabled but the trace forwarder is not running,
|
||||
the agent will error.
|
||||
|
||||
- If the trace forwarder is started but Jaeger is not running,
|
||||
the trace forwarder will error.
|
||||
|
||||
### Goals
|
||||
|
||||
- The runtime and agent should:
|
||||
- Use the same trace collection implementation.
|
||||
- Use the most the common configuration items.
|
||||
|
||||
- Kata should should support more trace collection software or `SaaS`
|
||||
(for example `Zipkin`, `datadog`).
|
||||
|
||||
- Trace collection should not block normal runtime/agent operations
|
||||
(for example if `vsock-exporter`/Jaeger is not running, Kata Containers should work normally).
|
||||
|
||||
### Trace collection proposals
|
||||
|
||||
#### Trace collection proposal 1: Send all spans to the trace forwarder as a span proxy
|
||||
|
||||
Kata runtime/agent all send spans to trace forwarder, and the trace forwarder,
|
||||
acting as a tracing proxy, sends all spans to a tracing back-end, such as Jaeger or `datadog`.
|
||||
|
||||
**Pros:**
|
||||
|
||||
- Runtime/agent will be simple.
|
||||
- Could update trace collection target while Kata Containers are running.
|
||||
|
||||
**Cons:**
|
||||
|
||||
- Requires the trace forwarder component to be running (that is a pressure to operation).
|
||||
|
||||
#### Trace collection proposal 2: Send spans to collector directly from runtime/agent
|
||||
|
||||
Send spans to collector directly from runtime/agent, this proposal need
|
||||
network accessible to the collector.
|
||||
|
||||
**Pros:**
|
||||
|
||||
- No additional trace forwarder component needed.
|
||||
|
||||
**Cons:**
|
||||
|
||||
- Need more code/configuration to support all trace collectors.
|
||||
|
||||
## Future work
|
||||
|
||||
- We could add dynamic and fully isolated tracing at a later stage,
|
||||
if required.
|
||||
|
||||
## Further details
|
||||
|
||||
- See the new [GitHub project](https://github.com/orgs/kata-containers/projects/28).
|
||||
- [kata-containers-tracing-status](https://gist.github.com/jodh-intel/0ee54d41d2a803ba761e166136b42277) gist.
|
||||
- [tracing documentation PR][tracing-doc-pr].
|
||||
|
||||
## Summary
|
||||
|
||||
### Time line
|
||||
|
||||
- 2021-07-01: A summary of the discussion was
|
||||
[posted to the mail list](http://lists.katacontainers.io/pipermail/kata-dev/2021-July/001996.html).
|
||||
- 2021-06-22: These proposals were
|
||||
[discussed in the Kata Architecture Committee meeting](https://etherpad.opendev.org/p/Kata_Containers_2021_Architecture_Committee_Mtgs).
|
||||
- 2021-06-18: These proposals where
|
||||
[announced on the mailing list](http://lists.katacontainers.io/pipermail/kata-dev/2021-June/001980.html).
|
||||
|
||||
### Outcome
|
||||
|
||||
- Nobody opposed the agent proposals, so they are being implemented.
|
||||
- The trace collection proposals are still being considered.
|
||||
|
||||
[kata-1x-tracing]: https://github.com/kata-containers/agent/blob/master/TRACING.md
|
||||
[trace-forwarder]: /src/trace-forwarder
|
||||
[tracing-doc-pr]: https://github.com/kata-containers/kata-containers/pull/1937
|
||||
@@ -1,3 +1,11 @@
|
||||
- [Virtual machine vCPU sizing in Kata Containers](#virtual-machine-vcpu-sizing-in-kata-containers)
|
||||
* [Default number of virtual CPUs](#default-number-of-virtual-cpus)
|
||||
* [Virtual CPUs and Kubernetes pods](#virtual-cpus-and-kubernetes-pods)
|
||||
* [Container lifecycle](#container-lifecycle)
|
||||
* [Container without CPU constraint](#container-without-cpu-constraint)
|
||||
* [Container with CPU constraint](#container-with-cpu-constraint)
|
||||
* [Do not waste resources](#do-not-waste-resources)
|
||||
|
||||
# Virtual machine vCPU sizing in Kata Containers
|
||||
|
||||
## Default number of virtual CPUs
|
||||
|
||||
@@ -1,5 +1,16 @@
|
||||
# Virtualization in Kata Containers
|
||||
|
||||
- [Virtualization in Kata Containers](#virtualization-in-kata-containers)
|
||||
- [Mapping container concepts to virtual machine technologies](#mapping-container-concepts-to-virtual-machine-technologies)
|
||||
- [Kata Containers Hypervisor and VMM support](#kata-containers-hypervisor-and-vmm-support)
|
||||
- [QEMU/KVM](#qemukvm)
|
||||
- [Machine accelerators](#machine-accelerators)
|
||||
- [Hotplug devices](#hotplug-devices)
|
||||
- [Firecracker/KVM](#firecrackerkvm)
|
||||
- [Cloud Hypervisor/KVM](#cloud-hypervisorkvm)
|
||||
- [Summary](#summary)
|
||||
|
||||
|
||||
Kata Containers, a second layer of isolation is created on top of those provided by traditional namespace-containers. The
|
||||
hardware virtualization interface is the basis of this additional layer. Kata will launch a lightweight virtual machine,
|
||||
and use the guest’s Linux kernel to create a container workload, or workloads in the case of multi-container pods. In Kubernetes
|
||||
@@ -11,10 +22,10 @@ the multiple hypervisors and virtual machine monitors that Kata supports.
|
||||
## Mapping container concepts to virtual machine technologies
|
||||
|
||||
A typical deployment of Kata Containers will be in Kubernetes by way of a Container Runtime Interface (CRI) implementation. On every node,
|
||||
Kubelet will interact with a CRI implementer (such as containerd or CRI-O), which will in turn interface with Kata Containers (an OCI based runtime).
|
||||
Kubelet will interact with a CRI implementor (such as containerd or CRI-O), which will in turn interface with Kata Containers (an OCI based runtime).
|
||||
|
||||
The CRI API, as defined at the [Kubernetes CRI-API repo](https://github.com/kubernetes/cri-api/), implies a few constructs being supported by the
|
||||
CRI implementation, and ultimately in Kata Containers. In order to support the full [API](https://github.com/kubernetes/cri-api/blob/a6f63f369f6d50e9d0886f2eda63d585fbd1ab6a/pkg/apis/runtime/v1alpha2/api.proto#L34-L110) with the CRI-implementer, Kata must provide the following constructs:
|
||||
CRI implementation, and ultimately in Kata Containers. In order to support the full [API](https://github.com/kubernetes/cri-api/blob/a6f63f369f6d50e9d0886f2eda63d585fbd1ab6a/pkg/apis/runtime/v1alpha2/api.proto#L34-L110) with the CRI-implementor, Kata must provide the following constructs:
|
||||
|
||||

|
||||
|
||||
@@ -30,9 +41,14 @@ Each hypervisor or VMM varies on how or if it handles each of these.
|
||||
|
||||
## Kata Containers Hypervisor and VMM support
|
||||
|
||||
Kata Containers [supports multiple hypervisors](../hypervisors.md).
|
||||
Kata Containers is designed to support multiple virtual machine monitors (VMMs) and hypervisors.
|
||||
Kata Containers supports:
|
||||
- [ACRN hypervisor](https://projectacrn.org/)
|
||||
- [Cloud Hypervisor](https://github.com/cloud-hypervisor/cloud-hypervisor)/[KVM](https://www.linux-kvm.org/page/Main_Page)
|
||||
- [Firecracker](https://github.com/firecracker-microvm/firecracker)/KVM
|
||||
- [QEMU](http://www.qemu-project.org/)/KVM
|
||||
|
||||
Details of each solution and a summary are provided below.
|
||||
Which configuration to use will depend on the end user's requirements. Details of each solution and a summary are provided below.
|
||||
|
||||
### QEMU/KVM
|
||||
|
||||
@@ -46,7 +62,7 @@ be changed by editing the runtime [`configuration`](./architecture.md/#configura
|
||||
Devices and features used:
|
||||
- virtio VSOCK or virtio serial
|
||||
- virtio block or virtio SCSI
|
||||
- [virtio net](https://www.redhat.com/en/virtio-networking-series)
|
||||
- virtio net
|
||||
- virtio fs or virtio 9p (recommend: virtio fs)
|
||||
- VFIO
|
||||
- hotplug
|
||||
@@ -89,34 +105,25 @@ Devices used:
|
||||
|
||||
### Cloud Hypervisor/KVM
|
||||
|
||||
[Cloud Hypervisor](https://github.com/cloud-hypervisor/cloud-hypervisor), based
|
||||
on [rust-vmm](https://github.com/rust-vmm), is designed to have a
|
||||
lighter footprint and smaller attack surface for running modern cloud
|
||||
workloads. Kata Containers with Cloud
|
||||
Hypervisor provides mostly complete compatibility with Kubernetes
|
||||
comparable to the QEMU configuration. As of the 1.12 and 2.0.0 release
|
||||
of Kata Containers, the Cloud Hypervisor configuration supports both CPU
|
||||
and memory resize, device hotplug (disk and VFIO), file-system sharing through virtio-fs,
|
||||
block-based volumes, booting from VM images backed by pmem device, and
|
||||
fine-grained seccomp filters for each VMM threads (e.g. all virtio
|
||||
device worker threads). Please check [this GitHub Project](https://github.com/orgs/kata-containers/projects/21)
|
||||
for details of ongoing integration efforts.
|
||||
Cloud Hypervisor, based on [rust-VMM](https://github.com/rust-vmm), is designed to have a lighter footprint and attack surface. For Kata Containers,
|
||||
relative to Firecracker, the Cloud Hypervisor configuration provides better compatibility at the expense of exposing additional devices: file system
|
||||
sharing and direct device assignment. As of the 1.10 release of Kata Containers, Cloud Hypervisor does not support device hotplug, and as a result
|
||||
does not support updating container resources after boot, or utilizing block based volumes. While Cloud Hypervisor does support VFIO, Kata is still adding
|
||||
this support. As of 1.10, Kata does not support block based volumes or direct device assignment. See [Cloud Hypervisor device support documentation](https://github.com/cloud-hypervisor/cloud-hypervisor/blob/master/docs/device_model.md)
|
||||
for more details on Cloud Hypervisor.
|
||||
|
||||
Devices and features used:
|
||||
- virtio VSOCK or virtio serial
|
||||
Devices used:
|
||||
- virtio VSOCK
|
||||
- virtio block
|
||||
- virtio net
|
||||
- virtio fs
|
||||
- virtio pmem
|
||||
- VFIO
|
||||
- hotplug
|
||||
- seccomp filters
|
||||
- [HTTP OpenAPI](https://github.com/cloud-hypervisor/cloud-hypervisor/blob/master/vmm/src/api/openapi/cloud-hypervisor.yaml)
|
||||
|
||||
### Summary
|
||||
|
||||
| Solution | release introduced | brief summary |
|
||||
|-|-|-|
|
||||
| Cloud Hypervisor | 1.10 | upstream Cloud Hypervisor with rich feature support, e.g. hotplug, VFIO and FS sharing|
|
||||
| Firecracker | 1.5 | upstream Firecracker, rust-VMM based, no VFIO, no FS sharing, no memory/CPU hotplug |
|
||||
| QEMU | 1.0 | upstream QEMU, with support for hotplug and filesystem sharing |
|
||||
| NEMU | 1.4 | Deprecated, removed as of 1.10 release. Slimmed down fork of QEMU, with experimental support of virtio-fs |
|
||||
| Firecracker | 1.5 | upstream Firecracker, rust-VMM based, no VFIO, no FS sharing, no memory/CPU hotplug |
|
||||
| QEMU-virtio-fs | 1.7 | upstream QEMU with support for virtio-fs. Will be removed once virtio-fs lands in upstream QEMU |
|
||||
| Cloud Hypervisor | 1.10 | rust-VMM based, includes VFIO and FS sharing through virtio-fs, no hotplug |
|
||||
|
||||
@@ -1,7 +1,11 @@
|
||||
# Howto Guides
|
||||
|
||||
## Kubernetes Integration
|
||||
* [Howto Guides](#howto-guides)
|
||||
* [Kubernetes Integration](#kubernetes-integration)
|
||||
* [Hypervisors Integration](#hypervisors-integration)
|
||||
* [Advanced Topics](#advanced-topics)
|
||||
|
||||
## Kubernetes Integration
|
||||
- [Run Kata containers with `crictl`](run-kata-with-crictl.md)
|
||||
- [Run Kata Containers with Kubernetes](run-kata-with-k8s.md)
|
||||
- [How to use Kata Containers and Containerd](containerd-kata.md)
|
||||
@@ -24,7 +28,6 @@
|
||||
- [Kata Containers with ACRN Hypervisor](how-to-use-kata-containers-with-acrn.md)
|
||||
|
||||
## Advanced Topics
|
||||
|
||||
- [How to use Kata Containers with virtio-fs](how-to-use-virtio-fs-with-kata.md)
|
||||
- [Setting Sysctls with Kata](how-to-use-sysctls-with-kata.md)
|
||||
- [What Is VMCache and How To Enable It](what-is-vm-cache-and-how-do-I-use-it.md)
|
||||
@@ -34,4 +37,3 @@
|
||||
- [How to use Kata Containers with `virtio-mem`](how-to-use-virtio-mem-with-kata.md)
|
||||
- [How to set sandbox Kata Containers configurations with pod annotations](how-to-set-sandbox-config-kata.md)
|
||||
- [How to monitor Kata Containers in K8s](how-to-set-prometheus-in-k8s.md)
|
||||
- [How to use hotplug memory on arm64 in Kata Containers](how-to-hotplug-memory-arm64.md)
|
||||
|
||||
@@ -1,5 +1,23 @@
|
||||
# How to use Kata Containers and Containerd
|
||||
|
||||
- [Concepts](#concepts)
|
||||
- [Kubernetes `RuntimeClass`](#kubernetes-runtimeclass)
|
||||
- [Containerd Runtime V2 API: Shim V2 API](#containerd-runtime-v2-api-shim-v2-api)
|
||||
- [Install](#install)
|
||||
- [Install Kata Containers](#install-kata-containers)
|
||||
- [Install containerd with CRI plugin](#install-containerd-with-cri-plugin)
|
||||
- [Install CNI plugins](#install-cni-plugins)
|
||||
- [Install `cri-tools`](#install-cri-tools)
|
||||
- [Configuration](#configuration)
|
||||
- [Configure containerd to use Kata Containers](#configure-containerd-to-use-kata-containers)
|
||||
- [Kata Containers as a `RuntimeClass`](#kata-containers-as-a-runtimeclass)
|
||||
- [Kata Containers as the runtime for untrusted workload](#kata-containers-as-the-runtime-for-untrusted-workload)
|
||||
- [Kata Containers as the default runtime](#kata-containers-as-the-default-runtime)
|
||||
- [Configuration for `cri-tools`](#configuration-for-cri-tools)
|
||||
- [Run](#run)
|
||||
- [Launch containers with `ctr` command line](#launch-containers-with-ctr-command-line)
|
||||
- [Launch Pods with `crictl` command line](#launch-pods-with-crictl-command-line)
|
||||
|
||||
This document covers the installation and configuration of [containerd](https://containerd.io/)
|
||||
and [Kata Containers](https://katacontainers.io). The containerd provides not only the `ctr`
|
||||
command line tool, but also the [CRI](https://kubernetes.io/blog/2016/12/container-runtime-interface-cri-in-kubernetes/)
|
||||
|
||||
@@ -26,7 +26,7 @@ spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: kata-monitor
|
||||
image: quay.io/kata-containers/kata-monitor:2.0.0
|
||||
image: docker.io/katadocker/kata-monitor:2.0.0
|
||||
args:
|
||||
- -log-level=debug
|
||||
ports:
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
# How to use memory hotplug feature in Kata Containers on arm64
|
||||
|
||||
## Introduction
|
||||
|
||||
Memory hotplug is a key feature for containers to allocate memory dynamically in deployment.
|
||||
As Kata Container bases on VM, this feature needs support both from VMM and guest kernel. Luckily, it has been fully supported for the current default version of QEMU and guest kernel used by Kata on arm64. For other VMMs, e.g, Cloud Hypervisor, the enablement work is on the road. Apart from VMM and guest kernel, memory hotplug also depends on ACPI which depends on firmware either. On x86, you can boot a VM using QEMU with ACPI enabled directly, because it boots up with firmware implicitly. For arm64, however, you need specify firmware explicitly. That is to say, if you are ready to run a normal Kata Container on arm64, what you need extra to do is to install the UEFI ROM before use the memory hotplug feature.
|
||||
|
||||
## Install UEFI ROM
|
||||
|
||||
We have offered a helper script for you to install the UEFI ROM. If you have installed Kata normally on your host, you just need to run the script as fellows:
|
||||
|
||||
```bash
|
||||
$ pushd $GOPATH/src/github.com/kata-containers/tests
|
||||
$ sudo .ci/aarch64/install_rom_aarch64.sh
|
||||
$ popd
|
||||
```
|
||||
|
||||
## Run for test
|
||||
|
||||
Let's test if the memory hotplug is ready for Kata after install the UEFI ROM. Make sure containerd is ready to run Kata before test.
|
||||
|
||||
```bash
|
||||
$ sudo ctr image pull docker.io/library/ubuntu:latest
|
||||
$ sudo ctr run --runtime io.containerd.run.kata.v2 -t --rm docker.io/library/ubuntu:latest hello sh -c "free -h"
|
||||
$ sudo ctr run --runtime io.containerd.run.kata.v2 -t --memory-limit 536870912 --rm docker.io/library/ubuntu:latest hello sh -c "free -h"
|
||||
```
|
||||
|
||||
Compare the results between the two tests. If the latter is 0.5G larger than the former, you have done what you want, and congratulation!
|
||||
@@ -1,5 +1,20 @@
|
||||
# Importing Kata Containers logs with Fluentd
|
||||
|
||||
* [Introduction](#introduction)
|
||||
* [Overview](#overview)
|
||||
* [Test stack](#test-stack)
|
||||
* [Importing the logs](#importing-the-logs)
|
||||
* [Direct import `logfmt` from `systemd`](#direct-import-logfmt-from-systemd)
|
||||
* [Configuring `minikube`](#configuring-minikube)
|
||||
* [Pull from `systemd`](#pull-from-systemd)
|
||||
* [Systemd Summary](#systemd-summary)
|
||||
* [Directly importing JSON](#directly-importing-json)
|
||||
* [JSON in files](#json-in-files)
|
||||
* [Prefixing all keys](#prefixing-all-keys)
|
||||
* [Kata `shimv2`](#kata-shimv2)
|
||||
* [Caveats](#caveats)
|
||||
* [Summary](#summary)
|
||||
|
||||
# Introduction
|
||||
|
||||
This document describes how to import Kata Containers logs into [Fluentd](https://www.fluentd.org/),
|
||||
@@ -128,7 +143,7 @@ YAML can be found
|
||||
tag kata-containers
|
||||
path /run/log/journal
|
||||
pos_file /run/log/journal/kata-journald.pos
|
||||
filters [{"SYSLOG_IDENTIFIER": "kata-runtime"}, {"SYSLOG_IDENTIFIER": "kata-shim"}]
|
||||
filters [{"SYSLOG_IDENTIFIER": "kata-runtime"}, {"SYSLOG_IDENTIFIER": "kata-proxy"}, {"SYSLOG_IDENTIFIER": "kata-shim"}]
|
||||
read_from_head true
|
||||
</source>
|
||||
```
|
||||
@@ -146,7 +161,7 @@ generate some Kata specific log entries:
|
||||
|
||||
```bash
|
||||
$ minikube addons open efk
|
||||
$ cd $GOPATH/src/github.com/kata-containers/kata-containers/tools/packaging/kata-deploy
|
||||
$ cd $GOPATH/src/github.com/kata-containers/packaging/kata-deploy
|
||||
$ kubectl apply -f examples/nginx-deployment-qemu.yaml
|
||||
```
|
||||
|
||||
@@ -163,14 +178,14 @@ sub-filter on, for instance, the `SYSLOG_IDENTIFIER` to differentiate the Kata c
|
||||
on the `PRIORITY` to filter out critical issues etc.
|
||||
|
||||
Kata generates a significant amount of Kata specific information, which can be seen as
|
||||
[`logfmt`](https://github.com/kata-containers/tests/tree/main/cmd/log-parser#logfile-requirements).
|
||||
[`logfmt`](https://github.com/kata-containers/tests/tree/master/cmd/log-parser#logfile-requirements).
|
||||
data contained in the `MESSAGE` field. Imported as-is, there is no easy way to filter on that data
|
||||
in Kibana:
|
||||
|
||||
.
|
||||
|
||||
We can however further sub-parse the Kata entries using the
|
||||
[Fluentd plugins](https://docs.fluentbit.io/manual/pipeline/parsers/logfmt) that will parse
|
||||
[Fluentd plugins](https://docs.fluentbit.io/manual/v/1.3/parser/logfmt) that will parse
|
||||
`logfmt` formatted data. We can utilise these to parse the sub-fields using a Fluentd filter
|
||||
section. At the same time, we will prefix the new fields with `kata_` to make it clear where
|
||||
they have come from:
|
||||
@@ -207,7 +222,7 @@ test to check the parsing works. The resulting output from Fluentd is:
|
||||
"_COMM":"kata-runtime",
|
||||
"_EXE":"/opt/kata/bin/kata-runtime",
|
||||
"SYSLOG_TIMESTAMP":"Feb 21 10:31:27 ",
|
||||
"_CMDLINE":"/opt/kata/bin/kata-runtime --config /opt/kata/share/defaults/kata-containers/configuration-qemu.toml --root /run/runc state 7cdd31660d8705facdadeb8598d2c0bd008e8142c54e3b3069abd392c8d58997",
|
||||
"_CMDLINE":"/opt/kata/bin/kata-runtime --kata-config /opt/kata/share/defaults/kata-containers/configuration-qemu.toml --root /run/runc state 7cdd31660d8705facdadeb8598d2c0bd008e8142c54e3b3069abd392c8d58997",
|
||||
"SYSLOG_PID":"14314",
|
||||
"_PID":"14314",
|
||||
"MESSAGE":"time=\"2020-02-21T10:31:27.810781647Z\" level=info msg=\"release sandbox\" arch=amd64 command=state container=7cdd31660d8705facdadeb8598d2c0bd008e8142c54e3b3069abd392c8d58997 name=kata-runtime pid=14314 sandbox=1c3e77cad66aa2b6d8cc846f818370f79cb0104c0b840f67d0f502fd6562b68c source=virtcontainers subsystem=sandbox",
|
||||
@@ -257,15 +272,16 @@ go directly to a full Kata specific JSON format logfile test.
|
||||
|
||||
Kata runtime has the ability to generate JSON logs directly, rather than its default `logfmt` format. Passing
|
||||
the `--log-format=json` argument to the Kata runtime enables this. The easiest way to pass in this extra
|
||||
parameter from a [Kata deploy](https://github.com/kata-containers/kata-containers/tree/main/tools/packaging/kata-deploy) installation
|
||||
is to edit the `/opt/kata/bin/kata-qemu` shell script.
|
||||
parameter from a [Kata deploy](https://github.com/kata-containers/packaging/tree/master/kata-deploy) installation
|
||||
is to edit the `/opt/kata/bin/kata-qemu` shell script (generated by the
|
||||
[Kata packaging release scripts](https://github.com/kata-containers/packaging/blob/master/release/kata-deploy-binaries.sh)).
|
||||
|
||||
At the same time, we will add the `--log=/var/log/kata-runtime.log` argument to store the Kata logs in their
|
||||
own file (rather than into the system journal).
|
||||
|
||||
```bash
|
||||
#!/bin/bash
|
||||
/opt/kata/bin/kata-runtime --config "/opt/kata/share/defaults/kata-containers/configuration-qemu.toml" --log-format=json --log=/var/log/kata-runtime.log $@
|
||||
/opt/kata/bin/kata-runtime --kata-config "/opt/kata/share/defaults/kata-containers/configuration-qemu.toml" --log-format=json --log=/var/log/kata-runtime.log $@
|
||||
```
|
||||
|
||||
And then we'll add the Fluentd config section to parse that file. Note, we inform the parser that Kata is
|
||||
|
||||
@@ -56,9 +56,8 @@ There are some limitations with this approach:
|
||||
|
||||
As was mentioned above, not all containers need the same modules, therefore using
|
||||
the configuration file for specifying the list of kernel modules per [POD][3] can
|
||||
be a pain.
|
||||
Unlike the configuration file, [annotations](how-to-set-sandbox-config-kata.md)
|
||||
provide a way to specify custom configurations per POD.
|
||||
be a pain. Unlike the configuration file, annotations provide a way to specify
|
||||
custom configurations per POD.
|
||||
|
||||
The list of kernel modules and parameters can be set using the annotation
|
||||
`io.katacontainers.config.agent.kernel_modules` as a semicolon separated
|
||||
@@ -102,7 +101,7 @@ spec:
|
||||
tty: true
|
||||
```
|
||||
|
||||
> **Note**: To pass annotations to Kata containers, [CRI-O must be configured correctly](how-to-set-sandbox-config-kata.md#cri-o-configuration)
|
||||
> **Note**: To pass annotations to Kata containers, [`CRI` must to be configured correctly](how-to-set-sandbox-config-kata.md#cri-configuration)
|
||||
|
||||
[1]: ../../src/runtime
|
||||
[2]: ../../src/agent
|
||||
|
||||
@@ -2,6 +2,14 @@
|
||||
|
||||
This document describes how to run `kata-monitor` in a Kubernetes cluster using Prometheus's service discovery to scrape metrics from `kata-agent`.
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Pre-requisites](#pre-requisites)
|
||||
- [Configure Prometheus](#configure-prometheus)
|
||||
- [Configure `kata-monitor`](#configure-kata-monitor)
|
||||
- [Setup Grafana](#setup-grafana)
|
||||
* [Create `datasource`](#create-datasource)
|
||||
* [Import dashboard](#import-dashboard)
|
||||
|
||||
> **Warning**: This how-to is only for evaluation purpose, you **SHOULD NOT** running it in production using this configurations.
|
||||
|
||||
## Introduction
|
||||
@@ -26,7 +34,7 @@ Also you should ensure that `kubectl` working correctly.
|
||||
Start Prometheus by utilizing our sample manifest:
|
||||
|
||||
```
|
||||
$ kubectl apply -f https://raw.githubusercontent.com/kata-containers/kata-containers/main/docs/how-to/data/prometheus.yml
|
||||
$ kubectl apply -f https://raw.githubusercontent.com/kata-containers/kata-containers/2.0-dev/docs/how-to/data/prometheus.yml
|
||||
```
|
||||
|
||||
This will create a new namespace, `prometheus`, and create the following resources:
|
||||
@@ -52,7 +60,7 @@ go_gc_duration_seconds{quantile="0.75"} 0.000229911
|
||||
`kata-monitor` can be started on the cluster as follows:
|
||||
|
||||
```
|
||||
$ kubectl apply -f https://raw.githubusercontent.com/kata-containers/kata-containers/main/docs/how-to/data/kata-monitor-daemonset.yml
|
||||
$ kubectl apply -f https://raw.githubusercontent.com/kata-containers/kata-containers/2.0-dev/docs/how-to/data/kata-monitor-daemonset.yml
|
||||
```
|
||||
|
||||
This will create a new namespace `kata-system` and a `daemonset` in it.
|
||||
@@ -65,7 +73,7 @@ Once the `daemonset` is running, Prometheus should discover `kata-monitor` as a
|
||||
Run this command to run Grafana in Kubernetes:
|
||||
|
||||
```
|
||||
$ kubectl apply -f https://raw.githubusercontent.com/kata-containers/kata-containers/main/docs/how-to/data/grafana.yml
|
||||
$ kubectl apply -f https://raw.githubusercontent.com/kata-containers/kata-containers/2.0-dev/docs/how-to/data/grafana.yml
|
||||
```
|
||||
|
||||
This will create deployment and service for Grafana under namespace `prometheus`.
|
||||
@@ -91,7 +99,7 @@ You can import this dashboard using Grafana UI, or using `curl` command in conso
|
||||
$ curl -XPOST -i localhost:3000/api/dashboards/import \
|
||||
-u admin:admin \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"dashboard\":$(curl -sL https://raw.githubusercontent.com/kata-containers/kata-containers/main/docs/how-to/data/dashboard.json )}"
|
||||
-d "{\"dashboard\":$(curl -sL https://raw.githubusercontent.com/kata-containers/kata-containers/2.0-dev/docs/how-to/data/dashboard.json )}"
|
||||
```
|
||||
|
||||
## References
|
||||
|
||||
@@ -3,11 +3,6 @@
|
||||
Kata Containers gives users freedom to customize at per-pod level, by setting
|
||||
a wide range of Kata specific annotations in the pod specification.
|
||||
|
||||
Some annotations may be [restricted](#restricted-annotations) by the
|
||||
configuration file for security reasons, notably annotations that could lead the
|
||||
runtime to execute programs on the host. Such annotations are marked with _(R)_ in
|
||||
the tables below.
|
||||
|
||||
# Kata Configuration Annotations
|
||||
There are several kinds of Kata configurations and they are listed below.
|
||||
|
||||
@@ -26,13 +21,11 @@ There are several kinds of Kata configurations and they are listed below.
|
||||
| `io.katacontainers.config.runtime.disable_new_netns` | `boolean` | determines if a new netns is created for the hypervisor process |
|
||||
| `io.katacontainers.config.runtime.internetworking_model` | string| determines how the VM should be connected to the container network interface. Valid values are `macvtap`, `tcfilter` and `none` |
|
||||
| `io.katacontainers.config.runtime.sandbox_cgroup_only`| `boolean` | determines if Kata processes are managed only in sandbox cgroup |
|
||||
| `io.katacontainers.config.runtime.enable_pprof` | `boolean` | enables Golang `pprof` for `containerd-shim-kata-v2` process |
|
||||
|
||||
## Agent Options
|
||||
| Key | Value Type | Comments |
|
||||
|-------| ----- | ----- |
|
||||
| `io.katacontainers.config.agent.enable_tracing` | `boolean` | enable tracing for the agent |
|
||||
| `io.katacontainers.config.agent.container_pipe_size` | uint32 | specify the size of the std(in/out) pipes created for containers |
|
||||
| `io.katacontainers.config.agent.kernel_modules` | string | the list of kernel modules and their parameters that will be loaded in the guest kernel. Semicolon separated list of kernel modules and their parameters. These modules will be loaded in the guest kernel using `modprobe`(8). E.g., `e1000e InterruptThrottleRate=3000,3000,3000 EEE=1; i915 enable_ppgtt=0` |
|
||||
| `io.katacontainers.config.agent.trace_mode` | string | the trace mode for the agent |
|
||||
| `io.katacontainers.config.agent.trace_type` | string | the trace type for the agent |
|
||||
@@ -45,24 +38,17 @@ There are several kinds of Kata configurations and they are listed below.
|
||||
| `io.katacontainers.config.hypervisor.block_device_cache_noflush` | `boolean` | Denotes whether flush requests for the device are ignored |
|
||||
| `io.katacontainers.config.hypervisor.block_device_cache_set` | `boolean` | cache-related options will be set to block devices or not |
|
||||
| `io.katacontainers.config.hypervisor.block_device_driver` | string | the driver to be used for block device, valid values are `virtio-blk`, `virtio-scsi`, `nvdimm`|
|
||||
| `io.katacontainers.config.hypervisor.cpu_features` | `string` | Comma-separated list of CPU features to pass to the CPU (QEMU) |
|
||||
| `io.katacontainers.config.hypervisor.ctlpath` (R) | `string` | Path to the `acrnctl` binary for the ACRN hypervisor |
|
||||
| `io.katacontainers.config.hypervisor.default_max_vcpus` | uint32| the maximum number of vCPUs allocated for the VM by the hypervisor |
|
||||
| `io.katacontainers.config.hypervisor.default_memory` | uint32| the memory assigned for a VM by the hypervisor in `MiB` |
|
||||
| `io.katacontainers.config.hypervisor.default_vcpus` | uint32| the default vCPUs assigned for a VM by the hypervisor |
|
||||
| `io.katacontainers.config.hypervisor.disable_block_device_use` | `boolean` | disallow a block device from being used |
|
||||
| `io.katacontainers.config.hypervisor.disable_image_nvdimm` | `boolean` | specify if a `nvdimm` device should be used as rootfs for the guest (QEMU) |
|
||||
| `io.katacontainers.config.hypervisor.disable_vhost_net` | `boolean` | specify if `vhost-net` is not available on the host |
|
||||
| `io.katacontainers.config.hypervisor.enable_hugepages` | `boolean` | if the memory should be `pre-allocated` from huge pages |
|
||||
| `io.katacontainers.config.hypervisor.enable_iommu_platform` | `boolean` | enable `iommu` on CCW devices (QEMU s390x) |
|
||||
| `io.katacontainers.config.hypervisor.enable_iommu` | `boolean` | enable `iommu` on Q35 (QEMU x86_64) |
|
||||
| `io.katacontainers.config.hypervisor.enable_iothreads` | `boolean`| enable IO to be processed in a separate thread. Supported currently for virtio-`scsi` driver |
|
||||
| `io.katacontainers.config.hypervisor.enable_mem_prealloc` | `boolean` | the memory space used for `nvdimm` device by the hypervisor |
|
||||
| `io.katacontainers.config.hypervisor.enable_swap` | `boolean` | enable swap of VM memory |
|
||||
| `io.katacontainers.config.hypervisor.enable_vhost_user_store` | `boolean` | enable vhost-user storage device (QEMU) |
|
||||
| `io.katacontainers.config.hypervisor.enable_virtio_mem` | `boolean` | enable virtio-mem (QEMU) |
|
||||
| `io.katacontainers.config.hypervisor.entropy_source` (R) | string| the path to a host source of entropy (`/dev/random`, `/dev/urandom` or real hardware RNG device) |
|
||||
| `io.katacontainers.config.hypervisor.file_mem_backend` (R) | string | file based memory backend root directory |
|
||||
| `io.katacontainers.config.hypervisor.entropy_source` | string| the path to a host source of entropy (`/dev/random`, `/dev/urandom` or real hardware RNG device) |
|
||||
| `io.katacontainers.config.hypervisor.file_mem_backend` | string | file based memory backend root directory |
|
||||
| `io.katacontainers.config.hypervisor.firmware_hash` | string | container firmware SHA-512 hash value |
|
||||
| `io.katacontainers.config.hypervisor.firmware` | string | the guest firmware that will run the container VM |
|
||||
| `io.katacontainers.config.hypervisor.guest_hook_path` | string | the path within the VM that will be used for drop in hooks |
|
||||
@@ -73,26 +59,24 @@ There are several kinds of Kata configurations and they are listed below.
|
||||
| `io.katacontainers.config.hypervisor.initrd_hash` | string | container guest initrd SHA-512 hash value |
|
||||
| `io.katacontainers.config.hypervisor.initrd` | string | the guest initrd image that will run in the container VM |
|
||||
| `io.katacontainers.config.hypervisor.jailer_hash` | string | container jailer SHA-512 hash value |
|
||||
| `io.katacontainers.config.hypervisor.jailer_path` (R) | string | the jailer that will constrain the container VM |
|
||||
| `io.katacontainers.config.hypervisor.jailer_path` | string | the jailer that will constrain the container VM |
|
||||
| `io.katacontainers.config.hypervisor.kernel_hash` | string | container kernel image SHA-512 hash value |
|
||||
| `io.katacontainers.config.hypervisor.kernel_params` | string | additional guest kernel parameters |
|
||||
| `io.katacontainers.config.hypervisor.kernel` | string | the kernel used to boot the container VM |
|
||||
| `io.katacontainers.config.hypervisor.machine_accelerators` | string | machine specific accelerators for the hypervisor |
|
||||
| `io.katacontainers.config.hypervisor.machine_type` | string | the type of machine being emulated by the hypervisor |
|
||||
| `io.katacontainers.config.hypervisor.memory_offset` | uint64| the memory space used for `nvdimm` device by the hypervisor |
|
||||
| `io.katacontainers.config.hypervisor.memory_offset` | uint32| the memory space used for `nvdimm` device by the hypervisor |
|
||||
| `io.katacontainers.config.hypervisor.memory_slots` | uint32| the memory slots assigned to the VM by the hypervisor |
|
||||
| `io.katacontainers.config.hypervisor.msize_9p` | uint32 | the `msize` for 9p shares |
|
||||
| `io.katacontainers.config.hypervisor.path` | string | the hypervisor that will run the container VM |
|
||||
| `io.katacontainers.config.hypervisor.pcie_root_port` | specify the number of PCIe Root Port devices. The PCIe Root Port device is used to hot-plug a PCIe device (QEMU) |
|
||||
| `io.katacontainers.config.hypervisor.shared_fs` | string | the shared file system type, either `virtio-9p` or `virtio-fs` |
|
||||
| `io.katacontainers.config.hypervisor.use_vsock` | `boolean` | specify use of `vsock` for agent communication |
|
||||
| `io.katacontainers.config.hypervisor.vhost_user_store_path` (R) | `string` | specify the directory path where vhost-user devices related folders, sockets and device nodes should be (QEMU) |
|
||||
| `io.katacontainers.config.hypervisor.virtio_fs_cache_size` | uint32 | virtio-fs DAX cache size in `MiB` |
|
||||
| `io.katacontainers.config.hypervisor.virtio_fs_cache` | string | the cache mode for virtio-fs, valid values are `always`, `auto` and `none` |
|
||||
| `io.katacontainers.config.hypervisor.virtio_fs_daemon` | string | virtio-fs `vhost-user` daemon path |
|
||||
| `io.katacontainers.config.hypervisor.virtio_fs_extra_args` | string | extra options passed to `virtiofs` daemon |
|
||||
|
||||
# CRI-O Configuration
|
||||
# CRI Configuration
|
||||
|
||||
In case of CRI-O, all annotations specified in the pod spec are passed down to Kata.
|
||||
|
||||
@@ -117,7 +101,7 @@ $ cat /etc/containerd/config
|
||||
|
||||
```
|
||||
|
||||
Additional documentation on the above configuration can be found in the
|
||||
Additional documentation on the above configuration can be found in the
|
||||
[containerd docs](https://github.com/containerd/cri/blob/8d5a8355d07783ba2f8f451209f6bdcc7c412346/docs/config.md).
|
||||
|
||||
# Example - Using annotations
|
||||
@@ -175,32 +159,3 @@ spec:
|
||||
stdin: true
|
||||
tty: true
|
||||
```
|
||||
|
||||
# Restricted annotations
|
||||
|
||||
Some annotations are _restricted_, meaning that the configuration file specifies
|
||||
the acceptable values. Currently, only hypervisor annotations are restricted,
|
||||
for security reason, with the intent to control which binaries the Kata
|
||||
Containers runtime will launch on your behalf.
|
||||
|
||||
The configuration file validates the annotation _name_ as well as the annotation
|
||||
_value_.
|
||||
|
||||
The acceptable annotation names are defined by the `enable_annotations` entry in
|
||||
the configuration file.
|
||||
|
||||
For restricted annotations, an additional configuration entry provides a list of
|
||||
acceptable values. Since most restricted annotations are intended to control
|
||||
which binaries the runtime can execute, the valid value is generally provided by
|
||||
a shell pattern, as defined by `glob(3)`. The table below provides the name of
|
||||
the configuration entry:
|
||||
|
||||
| Key | Config file entry | Comments |
|
||||
|-------| ----- | ----- |
|
||||
| `ctlpath` | `valid_ctlpaths` | Valid paths for `acrnctl` binary |
|
||||
| `entropy_source` | `valid_entropy_sources` | Valid entropy sources, e.g. `/dev/random` |
|
||||
| `file_mem_backend` | `valid_file_mem_backends` | Valid locations for the file-based memory backend root directory |
|
||||
| `jailer_path` | `valid_jailer_paths`| Valid paths for the jailer constraining the container VM (Firecracker) |
|
||||
| `path` | `valid_hypervisor_paths` | Valid hypervisors to run the container VM |
|
||||
| `vhost_user_store_path` | `valid_vhost_user_store_paths` | Valid paths for vhost-user related files|
|
||||
| `virtio_fs_daemon` | `valid_virtio_fs_daemon_paths` | Valid paths for the `virtiofsd` daemon |
|
||||
|
||||
@@ -1,5 +1,18 @@
|
||||
# How to use Kata Containers and CRI (containerd plugin) with Kubernetes
|
||||
|
||||
* [Requirements](#requirements)
|
||||
* [Install and configure containerd](#install-and-configure-containerd)
|
||||
* [Install and configure Kubernetes](#install-and-configure-kubernetes)
|
||||
* [Install Kubernetes](#install-kubernetes)
|
||||
* [Configure Kubelet to use containerd](#configure-kubelet-to-use-containerd)
|
||||
* [Configure HTTP proxy - OPTIONAL](#configure-http-proxy---optional)
|
||||
* [Start Kubernetes](#start-kubernetes)
|
||||
* [Configure Pod Network](#configure-pod-network)
|
||||
* [Allow pods to run in the master node](#allow-pods-to-run-in-the-master-node)
|
||||
* [Create runtime class for Kata Containers](#create-runtime-class-for-kata-containers)
|
||||
* [Run pod in Kata Containers](#run-pod-in-kata-containers)
|
||||
* [Delete created pod](#delete-created-pod)
|
||||
|
||||
This document describes how to set up a single-machine Kubernetes (k8s) cluster.
|
||||
|
||||
The Kubernetes cluster will use the
|
||||
|
||||
@@ -2,6 +2,11 @@
|
||||
|
||||
This document provides an overview on how to run Kata containers with ACRN hypervisor and device model.
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Pre-requisites](#pre-requisites)
|
||||
- [Configure Docker](#configure-docker)
|
||||
- [Configure Kata Containers with ACRN](#configure-kata-containers-with-acrn)
|
||||
|
||||
## Introduction
|
||||
|
||||
ACRN is a flexible, lightweight Type-1 reference hypervisor built with real-time and safety-criticality in mind. ACRN uses an open source platform making it optimized to streamline embedded development.
|
||||
@@ -86,7 +91,7 @@ To configure Kata Containers with ACRN, copy the generated `configuration-acrn.t
|
||||
The following command shows full paths to the `configuration.toml` files that the runtime loads. It will use the first path that exists. (Please make sure the kernel and image paths are set correctly in the `configuration.toml` file)
|
||||
|
||||
```bash
|
||||
$ sudo kata-runtime --show-default-config-paths
|
||||
$ sudo kata-runtime --kata-show-default-config-paths
|
||||
```
|
||||
|
||||
>**Warning:** Please offline CPUs using [this](offline_cpu.sh) script, else VM launches will fail.
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
# Setting Sysctls with Kata
|
||||
|
||||
## Sysctls
|
||||
|
||||
In Linux, the sysctl interface allows an administrator to modify kernel
|
||||
parameters at runtime. Parameters are available via the `/proc/sys/` virtual
|
||||
process file system.
|
||||
@@ -17,10 +16,11 @@ To get a complete list of kernel parameters, run:
|
||||
$ sudo sysctl -a
|
||||
```
|
||||
|
||||
Kubernetes provide mechanisms for setting namespaced sysctls.
|
||||
Namespaced sysctls can be set per pod in the case of Kubernetes.
|
||||
Both Docker and Kubernetes provide mechanisms for setting namespaced sysctls.
|
||||
Namespaced sysctls can be set per pod in the case of Kubernetes or per container
|
||||
in case of Docker.
|
||||
The following sysctls are known to be namespaced and can be set with
|
||||
Kubernetes:
|
||||
Docker and Kubernetes:
|
||||
|
||||
- `kernel.shm*`
|
||||
- `kernel.msg*`
|
||||
@@ -30,10 +30,31 @@ Kubernetes:
|
||||
|
||||
### Namespaced Sysctls:
|
||||
|
||||
Kata Containers supports setting namespaced sysctls with Kubernetes.
|
||||
Kata Containers supports setting namespaced sysctls with Docker and Kubernetes.
|
||||
All namespaced sysctls can be set in the same way as regular Linux based
|
||||
containers, the difference being, in the case of Kata they are set inside the guest.
|
||||
|
||||
#### Setting Namespaced Sysctls with Docker:
|
||||
|
||||
```
|
||||
$ sudo docker run --runtime=kata-runtime -it alpine cat /proc/sys/fs/mqueue/queues_max
|
||||
256
|
||||
$ sudo docker run --runtime=kata-runtime --sysctl fs.mqueue.queues_max=512 -it alpine cat /proc/sys/fs/mqueue/queues_max
|
||||
512
|
||||
```
|
||||
|
||||
... and:
|
||||
|
||||
```
|
||||
$ sudo docker run --runtime=kata-runtime -it alpine cat /proc/sys/kernel/shmmax
|
||||
18446744073692774399
|
||||
$ sudo docker run --runtime=kata-runtime --sysctl kernel.shmmax=1024 -it alpine cat /proc/sys/kernel/shmmax
|
||||
1024
|
||||
```
|
||||
|
||||
For additional documentation on setting sysctls with Docker please refer to [Docker-sysctl-doc](https://docs.docker.com/engine/reference/commandline/run/#configure-namespaced-kernel-parameters-sysctls-at-runtime).
|
||||
|
||||
|
||||
#### Setting Namespaced Sysctls with Kubernetes:
|
||||
|
||||
Kubernetes considers certain sysctls as safe and others as unsafe. For detailed
|
||||
@@ -79,7 +100,7 @@ spec:
|
||||
|
||||
### Non-Namespaced Sysctls:
|
||||
|
||||
Kubernetes disallow sysctls without a namespace.
|
||||
Docker and Kubernetes disallow sysctls without a namespace.
|
||||
The recommendation is to set them directly on the host or use a privileged
|
||||
container in the case of Kubernetes.
|
||||
|
||||
|
||||
@@ -1,9 +1,12 @@
|
||||
# Kata Containers with virtio-fs
|
||||
|
||||
- [Kata Containers with virtio-fs](#kata-containers-with-virtio-fs)
|
||||
- [Introduction](#introduction)
|
||||
|
||||
## Introduction
|
||||
|
||||
Container deployments utilize explicit or implicit file sharing between host filesystem and containers. From a trust perspective, avoiding a shared file-system between the trusted host and untrusted container is recommended. This is not always feasible. In Kata Containers, block-based volumes are preferred as they allow usage of either device pass through or `virtio-blk` for access within the virtual machine.
|
||||
|
||||
As of the 2.0 release of Kata Containers, [virtio-fs](https://virtio-fs.gitlab.io/) is the default filesystem sharing mechanism.
|
||||
|
||||
virtio-fs support works out of the box for `cloud-hypervisor` and `qemu`, when Kata Containers is deployed using `kata-deploy`. Learn more about `kata-deploy` and how to use `kata-deploy` in Kubernetes [here](https://github.com/kata-containers/kata-containers/tree/main/tools/packaging/kata-deploy#kubernetes-quick-start).
|
||||
virtio-fs support works out of the box for `cloud-hypervisor` and `qemu`, when Kata Containers is deployed using `kata-deploy`. Learn more about `kata-deploy` and how to use `kata-deploy` in Kubernetes [here](https://github.com/kata-containers/packaging/tree/master/kata-deploy#kubernetes-quick-start).
|
||||
@@ -1,5 +1,9 @@
|
||||
# Kata Containers with `virtio-mem`
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Requisites](#requisites)
|
||||
- [Run a Kata Container utilizing `virtio-mem`](#run-a-kata-container-utilizing-virtio-mem)
|
||||
|
||||
## Introduction
|
||||
|
||||
The basic idea of `virtio-mem` is to provide a flexible, cross-architecture memory hot plug and hot unplug solution that avoids many limitations imposed by existing technologies, architectures, and interfaces.
|
||||
@@ -9,23 +13,26 @@ Kata Containers with `virtio-mem` supports memory resize.
|
||||
|
||||
## Requisites
|
||||
|
||||
Kata Containers just supports `virtio-mem` with QEMU.
|
||||
Install and setup Kata Containers as shown [here](../install/README.md).
|
||||
Kata Containers with `virtio-mem` requires Linux and the QEMU that support `virtio-mem`.
|
||||
The Linux kernel and QEMU upstream version still not support `virtio-mem`. @davidhildenbrand is working on them.
|
||||
Please use following unofficial version of the Linux kernel and QEMU that support `virtio-mem` with Kata Containers.
|
||||
|
||||
### With x86_64
|
||||
The `virtio-mem` config of the x86_64 Kata Linux kernel is open.
|
||||
Enable `virtio-mem` as follows:
|
||||
```
|
||||
$ sudo sed -i -e 's/^#enable_virtio_mem.*$/enable_virtio_mem = true/g' /etc/kata-containers/configuration.toml
|
||||
The Linux kernel is at https://github.com/davidhildenbrand/linux/tree/virtio-mem-rfc-v4.
|
||||
The Linux kernel config that can work with Kata Containers is at https://gist.github.com/teawater/016194ee84748c768745a163d08b0fb9.
|
||||
|
||||
The QEMU is at https://github.com/teawater/qemu/tree/kata-virtio-mem. (The original source is at https://github.com/davidhildenbrand/qemu/tree/virtio-mem. Its base version of QEMU cannot work with Kata Containers. So merge the commit of `virtio-mem` to upstream QEMU.)
|
||||
|
||||
Set Linux and the QEMU that support `virtio-mem` with following line in the Kata Containers QEMU configuration `configuration-qemu.toml`:
|
||||
```toml
|
||||
[hypervisor.qemu]
|
||||
path = "qemu-dir"
|
||||
kernel = "vmlinux-dir"
|
||||
```
|
||||
|
||||
### With other architectures
|
||||
The `virtio-mem` config of the others Kata Linux kernel is not open.
|
||||
You can open `virtio-mem` config as follows:
|
||||
Enable `virtio-mem` with following line in the Kata Containers configuration:
|
||||
```toml
|
||||
enable_virtio_mem = true
|
||||
```
|
||||
CONFIG_VIRTIO_MEM=y
|
||||
```
|
||||
Then you can build and install the guest kernel image as shown [here](../../tools/packaging/kernel/README.md#build-kata-containers-kernel).
|
||||
|
||||
## Run a Kata Container utilizing `virtio-mem`
|
||||
|
||||
@@ -34,35 +41,13 @@ Use following command to enable memory overcommitment of a Linux kernel. Becaus
|
||||
$ echo 1 | sudo tee /proc/sys/vm/overcommit_memory
|
||||
```
|
||||
|
||||
Use following command to start a Kata Container.
|
||||
Use following command start a Kata Container.
|
||||
```
|
||||
$ pod_yaml=pod.yaml
|
||||
$ container_yaml=container.yaml
|
||||
$ image="quay.io/prometheus/busybox:latest"
|
||||
$ cat << EOF > "${pod_yaml}"
|
||||
metadata:
|
||||
name: busybox-sandbox1
|
||||
EOF
|
||||
$ cat << EOF > "${container_yaml}"
|
||||
metadata:
|
||||
name: busybox-killed-vmm
|
||||
image:
|
||||
image: "$image"
|
||||
command:
|
||||
- top
|
||||
EOF
|
||||
$ sudo crictl pull $image
|
||||
$ podid=$(sudo crictl runp $pod_yaml)
|
||||
$ cid=$(sudo crictl create $podid $container_yaml $pod_yaml)
|
||||
$ sudo crictl start $cid
|
||||
$ docker run --rm -it --runtime=kata --name test busybox
|
||||
```
|
||||
|
||||
Use the following command to set the container memory limit to 2g and the memory size of the VM to its default_memory + 2g.
|
||||
Use following command set the memory size of test to default_memory + 512m.
|
||||
```
|
||||
$ sudo crictl update --memory $((2*1024*1024*1024)) $cid
|
||||
$ docker update -m 512m --memory-swap -1 test
|
||||
```
|
||||
|
||||
Use the following command to set the container memory limit to 1g and the memory size of the VM to its default_memory + 1g.
|
||||
```
|
||||
$ sudo crictl update --memory $((1*1024*1024*1024)) $cid
|
||||
```
|
||||
|
||||
@@ -3,6 +3,11 @@
|
||||
Kata Containers supports creation of containers that are "privileged" (i.e. have additional capabilities and access
|
||||
that is not normally granted).
|
||||
|
||||
* [Warnings](#warnings)
|
||||
* [Host Devices](#host-devices)
|
||||
* [Containerd and CRI](#containerd-and-cri)
|
||||
* [CRI-O](#cri-o)
|
||||
|
||||
## Warnings
|
||||
|
||||
**Warning:** Whilst this functionality is supported, it can decrease the security of Kata Containers if not configured
|
||||
|
||||
@@ -1,5 +1,16 @@
|
||||
# Working with `crictl`
|
||||
|
||||
* [What's `cri-tools`](#whats-cri-tools)
|
||||
* [Use `crictl` run Pods in Kata containers](#use-crictl-run-pods-in-kata-containers)
|
||||
* [Run `busybox` Pod](#run-busybox-pod)
|
||||
* [Run pod sandbox with config file](#run-pod-sandbox-with-config-file)
|
||||
* [Create container in the pod sandbox with config file](#create-container-in-the-pod-sandbox-with-config-file)
|
||||
* [Start container](#start-container)
|
||||
* [Run `redis` Pod](#run-redis-pod)
|
||||
* [Create `redis-server` Pod](#create-redis-server-pod)
|
||||
* [Create `redis-client` Pod](#create-redis-client-pod)
|
||||
* [Check `redis` server is working](#check-redis-server-is-working)
|
||||
|
||||
## What's `cri-tools`
|
||||
|
||||
[`cri-tools`](https://github.com/kubernetes-sigs/cri-tools) provides debugging and validation tools for Kubelet Container Runtime Interface (CRI).
|
||||
|
||||
@@ -1,5 +1,18 @@
|
||||
# Run Kata Containers with Kubernetes
|
||||
|
||||
* [Run Kata Containers with Kubernetes](#run-kata-containers-with-kubernetes)
|
||||
* [Prerequisites](#prerequisites)
|
||||
* [Install a CRI implementation](#install-a-cri-implementation)
|
||||
* [CRI-O](#cri-o)
|
||||
* [Kubernetes Runtime Class (CRI-O v1.12 )](#kubernetes-runtime-class-cri-o-v112)
|
||||
* [Untrusted annotation (until CRI-O v1.12)](#untrusted-annotation-until-cri-o-v112)
|
||||
* [Network namespace management](#network-namespace-management)
|
||||
* [containerd with CRI plugin](#containerd-with-cri-plugin)
|
||||
* [Install Kubernetes](#install-kubernetes)
|
||||
* [Configure for CRI-O](#configure-for-cri-o)
|
||||
* [Configure for containerd](#configure-for-containerd)
|
||||
* [Run a Kubernetes pod with Kata Containers](#run-a-kubernetes-pod-with-kata-containers)
|
||||
|
||||
## Prerequisites
|
||||
This guide requires Kata Containers available on your system, install-able by following [this guide](../install/README.md).
|
||||
|
||||
@@ -158,10 +171,10 @@ $ sudo systemctl daemon-reload
|
||||
$ sudo systemctl restart kubelet
|
||||
|
||||
# If using CRI-O
|
||||
$ sudo kubeadm init --ignore-preflight-errors=all --cri-socket /var/run/crio/crio.sock --pod-network-cidr=10.244.0.0/16
|
||||
$ sudo kubeadm init --skip-preflight-checks --cri-socket /var/run/crio/crio.sock --pod-network-cidr=10.244.0.0/16
|
||||
|
||||
# If using CRI-containerd
|
||||
$ sudo kubeadm init --ignore-preflight-errors=all --cri-socket /run/containerd/containerd.sock --pod-network-cidr=10.244.0.0/16
|
||||
$ sudo kubeadm init --skip-preflight-checks --cri-socket /run/containerd/containerd.sock --pod-network-cidr=10.244.0.0/16
|
||||
|
||||
$ export KUBECONFIG=/etc/kubernetes/admin.conf
|
||||
```
|
||||
|
||||
@@ -1,5 +1,21 @@
|
||||
# Kata Containers and service mesh for Kubernetes
|
||||
|
||||
* [Assumptions](#assumptions)
|
||||
* [How they work](#how-they-work)
|
||||
* [Prerequisites](#prerequisites)
|
||||
* [Kata and Kubernetes](#kata-and-kubernetes)
|
||||
* [Restrictions](#restrictions)
|
||||
* [Install and deploy your service mesh](#install-and-deploy-your-service-mesh)
|
||||
* [Service Mesh Istio](#service-mesh-istio)
|
||||
* [Service Mesh Linkerd](#service-mesh-linkerd)
|
||||
* [Inject your services with sidecars](#inject-your-services-with-sidecars)
|
||||
* [Sidecar Istio](#sidecar-istio)
|
||||
* [Sidecar Linkerd](#sidecar-linkerd)
|
||||
* [Run your services with Kata](#run-your-services-with-kata)
|
||||
* [Lower privileges](#lower-privileges)
|
||||
* [Add annotations](#add-annotations)
|
||||
* [Deploy](#deploy)
|
||||
|
||||
A service mesh is a way to monitor and control the traffic between
|
||||
micro-services running in your Kubernetes cluster. It is a powerful
|
||||
tool that you might want to use in combination with the security
|
||||
@@ -60,16 +76,15 @@ is not able to perform a proper setup of the rules.
|
||||
|
||||
### Service Mesh Istio
|
||||
|
||||
The following is a summary of what you need to install Istio on your system:
|
||||
As a reference, you can follow Istio [instructions](https://istio.io/docs/setup/kubernetes/quick-start/#download-and-prepare-for-the-installation).
|
||||
|
||||
The following is a summary of what you need to install Istio on your system:
|
||||
```
|
||||
$ curl -L https://git.io/getLatestIstio | sh -
|
||||
$ cd istio-*
|
||||
$ export PATH=$PWD/bin:$PATH
|
||||
```
|
||||
|
||||
See the [Istio documentation](https://istio.io/docs) for further details.
|
||||
|
||||
Now deploy Istio in the control plane of your cluster with the following:
|
||||
```
|
||||
$ kubectl apply -f install/kubernetes/istio-demo.yaml
|
||||
|
||||
@@ -1,5 +1,10 @@
|
||||
# What Is VMCache and How To Enable It
|
||||
|
||||
* [What is VMCache](#what-is-vmcache)
|
||||
* [How is this different to VM templating](#how-is-this-different-to-vm-templating)
|
||||
* [How to enable VMCache](#how-to-enable-vmcache)
|
||||
* [Limitations](#limitations)
|
||||
|
||||
### What is VMCache
|
||||
|
||||
VMCache is a new function that creates VMs as caches before using it.
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
# What Is VM Templating and How To Enable It
|
||||
|
||||
### What is VM templating
|
||||
|
||||
VM templating is a Kata Containers feature that enables new VM
|
||||
creation using a cloning technique. When enabled, new VMs are created
|
||||
by cloning from a pre-created template VM, and they will share the
|
||||
@@ -9,13 +8,11 @@ same initramfs, kernel and agent memory in readonly mode. It is very
|
||||
much like a process fork done by the kernel but here we *fork* VMs.
|
||||
|
||||
### How is this different from VMCache
|
||||
|
||||
Both [VMCache](../how-to/what-is-vm-cache-and-how-do-I-use-it.md) and VM templating help speed up new container creation.
|
||||
When VMCache enabled, new VMs are created by the VMCache server. So it is not vulnerable to share memory CVE because each VM doesn't share the memory.
|
||||
VM templating saves a lot of memory if there are many Kata Containers running on the same host.
|
||||
|
||||
### What are the Pros
|
||||
|
||||
VM templating helps speed up new container creation and saves a lot
|
||||
of memory if there are many Kata Containers running on the same host.
|
||||
If you are running a density workload, or care a lot about container
|
||||
@@ -32,7 +29,6 @@ showed that VM templating speeds up Kata Containers creation by as much as
|
||||
38.68%. See [full results here](https://gist.github.com/bergwolf/06974a3c5981494a40e2c408681c085d).
|
||||
|
||||
### What are the Cons
|
||||
|
||||
One drawback of VM templating is that it cannot avoid cross-VM side-channel
|
||||
attack such as [CVE-2015-2877](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2015-2877)
|
||||
that originally targeted at the Linux KSM feature.
|
||||
@@ -43,15 +39,13 @@ and can be classified as potentially misunderstood behaviors rather than vulnera
|
||||
**Warning**: If you care about such attack vector, do not use VM templating or KSM.
|
||||
|
||||
### How to enable VM templating
|
||||
|
||||
VM templating can be enabled by changing your Kata Containers config file (`/usr/share/defaults/kata-containers/configuration.toml`,
|
||||
overridden by `/etc/kata-containers/configuration.toml` if provided) such that:
|
||||
|
||||
- `qemu` version `v4.1.0` or above is specified in `hypervisor.qemu`->`path` section
|
||||
- `qemu-lite` is specified in `hypervisor.qemu`->`path` section
|
||||
- `enable_template = true`
|
||||
- `initrd =` is set
|
||||
- `image =` option is commented out or removed
|
||||
- `shared_fs` should not be `virtio-fs`
|
||||
|
||||
Then you can create a VM templating for later usage by calling
|
||||
```
|
||||
|
||||
@@ -1,62 +0,0 @@
|
||||
# Hypervisors
|
||||
|
||||
## Introduction
|
||||
|
||||
Kata Containers supports multiple hypervisors. This document provides a very
|
||||
high level overview of the available hypervisors, giving suggestions as to
|
||||
which hypervisors you may wish to investigate further.
|
||||
|
||||
> **Note:**
|
||||
>
|
||||
> This document is not prescriptive or authoritative:
|
||||
>
|
||||
> - It is up to you to decide which hypervisors may be most appropriate for
|
||||
> your use-case.
|
||||
> - Refer to the official documentation for each hypervisor for further details.
|
||||
|
||||
## Types
|
||||
|
||||
Since each hypervisor offers different features and options, Kata Containers
|
||||
provides a separate
|
||||
[configuration file](/src/runtime/README.md#configuration)
|
||||
for each. The configuration files contain comments explaining which options
|
||||
are available, their default values and how each setting can be used.
|
||||
|
||||
> **Note:**
|
||||
>
|
||||
> The simplest way to switch between hypervisors is to create a symbolic link
|
||||
> to the appropriate hypervisor-specific configuration file.
|
||||
|
||||
| Hypervisor | Written in | Architectures | Type | Configuration file |
|
||||
|-|-|-|-|-|
|
||||
[ACRN] | C | `x86_64` | Type 1 (bare metal) | `configuration-acrn.toml` |
|
||||
[Cloud Hypervisor] | rust | `aarch64`, `x86_64` | Type 2 ([KVM]) | `configuration-clh.toml` |
|
||||
[Firecracker] | rust | `aarch64`, `x86_64` | Type 2 ([KVM]) | `configuration-fc.toml` |
|
||||
[QEMU] | C | all | Type 2 ([KVM]) | `configuration-qemu.toml` |
|
||||
|
||||
## Determine currently configured hypervisor
|
||||
|
||||
```bash
|
||||
$ kata-runtime kata-env | awk -v RS= '/\[Hypervisor\]/' | grep Path
|
||||
```
|
||||
|
||||
## Choose a Hypervisor
|
||||
|
||||
The table below provides a brief summary of some of the differences between
|
||||
the hypervisors:
|
||||
|
||||
|
||||
| Hypervisor | Summary | Features | Limitations | Container Creation speed | Memory density | Use cases | Comment |
|
||||
|-|-|-|-|-|-|-|-|
|
||||
[ACRN] | Safety critical and real-time workloads | | | excellent | excellent | Embedded and IOT systems | For advanced users |
|
||||
[Cloud Hypervisor] | Low latency, small memory footprint, small attack surface | Minimal | | excellent | excellent | High performance modern cloud workloads | |
|
||||
[Firecracker] | Very slimline | Extremely minimal | Doesn't support all device types | excellent | excellent | Serverless / FaaS | |
|
||||
[QEMU] | Lots of features | Lots | | good | good | Good option for most users | | All users |
|
||||
|
||||
For further details, see the [Virtualization in Kata Containers](design/virtualization.md) document and the official documentation for each hypervisor.
|
||||
|
||||
[ACRN]: https://projectacrn.org
|
||||
[Cloud Hypervisor]: https://github.com/cloud-hypervisor/cloud-hypervisor
|
||||
[Firecracker]: https://github.com/firecracker-microvm/firecracker
|
||||
[KVM]: https://en.wikipedia.org/wiki/Kernel-based_Virtual_Machine
|
||||
[QEMU]: http://www.qemu-project.org
|
||||
@@ -1,19 +1,39 @@
|
||||
# Kata Containers installation guides
|
||||
# Kata Containers installation user guides
|
||||
|
||||
The following is an overview of the different installation methods available.
|
||||
* [Kata Containers installation user guides](#kata-containers-installation-user-guides)
|
||||
* [Prerequisites](#prerequisites)
|
||||
* [Legacy installation](#legacy-installation)
|
||||
* [Packaged installation methods](#packaged-installation-methods)
|
||||
* [Official packages](#official-packages)
|
||||
* [Snap Installation](#snap-installation)
|
||||
* [Automatic Installation](#automatic-installation)
|
||||
* [Manual Installation](#manual-installation)
|
||||
* [Build from source installation](#build-from-source-installation)
|
||||
* [Installing on a Cloud Service Platform](#installing-on-a-cloud-service-platform)
|
||||
* [Further information](#further-information)
|
||||
|
||||
The following is an overview of the different installation methods available. All of these methods equally result
|
||||
in a system configured to run Kata Containers.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Kata Containers requires nested virtualization or bare metal. Check
|
||||
[hardware requirements](/src/runtime/README.md#hardware-requirements) to see if your system is capable of running Kata
|
||||
Containers.
|
||||
Kata Containers requires nested virtualization or bare metal.
|
||||
See the
|
||||
[hardware requirements](/src/runtime/README.md#hardware-requirements)
|
||||
to see if your system is capable of running Kata Containers.
|
||||
|
||||
## Legacy installation
|
||||
|
||||
If you wish to install a legacy 1.x version of Kata Containers, see
|
||||
[the Kata Containers 1.x installation documentation](https://github.com/kata-containers/documentation/tree/master/install/).
|
||||
|
||||
## Packaged installation methods
|
||||
|
||||
Packaged installation methods uses your distribution's native package format (such as RPM or DEB).
|
||||
|
||||
*Note:* We encourage installation methods that provides automatic updates, it ensures security updates and bug fixes are
|
||||
easily applied.
|
||||
> **Notes:**
|
||||
>
|
||||
> - Packaged installation methods uses your distribution's native package format (such as RPM or DEB).
|
||||
> - You are strongly encouraged to choose an installation method that provides
|
||||
> automatic updates, to ensure you benefit from security updates and bug fixes.
|
||||
|
||||
| Installation method | Description | Automatic updates | Use case |
|
||||
|------------------------------------------------------|---------------------------------------------------------------------|-------------------|----------------------------------------------------------|
|
||||
@@ -30,11 +50,19 @@ Kata packages are provided by official distribution repositories for:
|
||||
| Distribution (link to installation guide) | Minimum versions |
|
||||
|----------------------------------------------------------|--------------------------------------------------------------------------------|
|
||||
| [CentOS](centos-installation-guide.md) | 8 |
|
||||
| [Fedora](fedora-installation-guide.md) | 34 |
|
||||
| [Fedora](fedora-installation-guide.md) | 32, Rawhide |
|
||||
| [openSUSE](opensuse-installation-guide.md) | [Leap 15.1](opensuse-leap-15.1-installation-guide.md)<br>Leap 15.2, Tumbleweed |
|
||||
|
||||
> **Note::**
|
||||
>
|
||||
> All users are encouraged to uses the official distribution versions of Kata
|
||||
> Containers unless they understand the implications of alternative methods.
|
||||
|
||||
### Snap Installation
|
||||
|
||||
The snap installation is available for all distributions which support `snapd`.
|
||||
> **Note:** The snap installation is available for all distributions which support `snapd`.
|
||||
|
||||
[](https://snapcraft.io/kata-containers)
|
||||
|
||||
[Use snap](snap-installation-guide.md) to install Kata Containers from https://snapcraft.io.
|
||||
|
||||
@@ -48,9 +76,11 @@ Follow the [containerd installation guide](container-manager/containerd/containe
|
||||
|
||||
## Build from source installation
|
||||
|
||||
*Note:* Power users who decide to build from sources should be aware of the
|
||||
implications of using an unpackaged system which will not be automatically
|
||||
updated as new [releases](../Stable-Branch-Strategy.md) are made available.
|
||||
> **Notes:**
|
||||
>
|
||||
> - Power users who decide to build from sources should be aware of the
|
||||
> implications of using an unpackaged system which will not be automatically
|
||||
> updated as new [releases](../Stable-Branch-Strategy.md) are made available.
|
||||
|
||||
[Building from sources](../Developer-Guide.md#initial-setup) allows power users
|
||||
who are comfortable building software from source to use the latest component
|
||||
@@ -66,6 +96,6 @@ versions. This is not recommended for normal users.
|
||||
|
||||
## Further information
|
||||
|
||||
* [upgrading document](../Upgrading.md)
|
||||
* [developer guide](../Developer-Guide.md)
|
||||
* [runtime documentation](../../src/runtime/README.md)
|
||||
* The [upgrading document](../Upgrading.md).
|
||||
* The [developer guide](../Developer-Guide.md).
|
||||
* The [runtime documentation](../../src/runtime/README.md).
|
||||
|
||||
@@ -1,5 +1,10 @@
|
||||
# Install Kata Containers on Amazon Web Services
|
||||
|
||||
* [Install and Configure AWS CLI](#install-and-configure-aws-cli)
|
||||
* [Create or Import an EC2 SSH key pair](#create-or-import-an-ec2-ssh-key-pair)
|
||||
* [Launch i3.metal instance](#launch-i3metal-instance)
|
||||
* [Install Kata](#install-kata)
|
||||
|
||||
Kata Containers on Amazon Web Services (AWS) makes use of [i3.metal](https://aws.amazon.com/ec2/instance-types/i3/) instances. Most of the installation procedure is identical to that for Kata on your preferred distribution, except that you have to run it on bare metal instances since AWS doesn't support nested virtualization yet. This guide walks you through creating an i3.metal instance.
|
||||
|
||||
## Install and Configure AWS CLI
|
||||
|
||||
@@ -18,7 +18,7 @@
|
||||
>
|
||||
> - If you decide to proceed and install a Kata Containers release, you can
|
||||
> still check for the latest version of Kata Containers by running
|
||||
> `kata-runtime check --only-list-releases`.
|
||||
> `kata-runtime kata-check --only-list-releases`.
|
||||
>
|
||||
> - These instructions will not work for Fedora 31 and higher since those
|
||||
> distribution versions only support cgroups version 2 by default. However,
|
||||
@@ -98,12 +98,12 @@
|
||||
|
||||
```toml
|
||||
[plugins]
|
||||
[plugins."io.containerd.grpc.v1.cri"]
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd]
|
||||
default_runtime_name = "kata"
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes]
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.kata]
|
||||
runtime_type = "io.containerd.kata.v2"
|
||||
[plugins.cri]
|
||||
[plugins.cri.containerd]
|
||||
default_runtime_name = "kata"
|
||||
|
||||
[plugins.cri.containerd.runtimes.kata]
|
||||
runtime_type = "io.containerd.kata.v2"
|
||||
```
|
||||
|
||||
> **Note:**
|
||||
|
||||
@@ -1,6 +1,12 @@
|
||||
# Install Kata Containers on Google Compute Engine
|
||||
|
||||
Kata Containers on Google Compute Engine (GCE) makes use of [nested virtualization](https://cloud.google.com/compute/docs/instances/enable-nested-virtualization-vm-instances). Most of the installation procedure is identical to that for Kata on your preferred distribution, but enabling nested virtualization currently requires extra steps on GCE. This guide walks you through creating an image and instance with nested virtualization enabled. Note that `kata-runtime check` checks for nested virtualization, but does not fail if support is not found.
|
||||
* [Create an Image with Nested Virtualization Enabled](#create-an-image-with-nested-virtualization-enabled)
|
||||
* [Create the Image](#create-the-image)
|
||||
* [Verify VMX is Available](#verify-vmx-is-available)
|
||||
* [Install Kata](#install-kata)
|
||||
* [Create a Kata-enabled Image](#create-a-kata-enabled-image)
|
||||
|
||||
Kata Containers on Google Compute Engine (GCE) makes use of [nested virtualization](https://cloud.google.com/compute/docs/instances/enable-nested-virtualization-vm-instances). Most of the installation procedure is identical to that for Kata on your preferred distribution, but enabling nested virtualization currently requires extra steps on GCE. This guide walks you through creating an image and instance with nested virtualization enabled. Note that `kata-runtime kata-check` checks for nested virtualization, but does not fail if support is not found.
|
||||
|
||||
As a pre-requisite this guide assumes an installed and configured instance of the [Google Cloud SDK](https://cloud.google.com/sdk/downloads). For a zero-configuration option, all of the commands below were been tested under [Google Cloud Shell](https://cloud.google.com/shell/) (as of Jun 2018). Verify your `gcloud` installation and configuration:
|
||||
|
||||
|
||||
@@ -1,12 +1,24 @@
|
||||
# Installing Kata Containers in Minikube
|
||||
|
||||
* [Installing Kata Containers in Minikube](#installing-kata-containers-in-minikube)
|
||||
* [Introduction](#introduction)
|
||||
* [Prerequisites](#prerequisites)
|
||||
* [Setting up Minikube](#setting-up-minikube)
|
||||
* [Checking for nested virtualization](#checking-for-nested-virtualization)
|
||||
* [Check Minikube is running](#check-minikube-is-running)
|
||||
* [Installing Kata Containers](#installing-kata-containers)
|
||||
* [Enabling Kata Containers](#enabling-kata-containers)
|
||||
* [Register the runtime](#register-the-runtime)
|
||||
* [Testing Kata Containers](#testing-kata-containers)
|
||||
* [Wrapping up](#wrapping-up)
|
||||
|
||||
## Introduction
|
||||
|
||||
[Minikube](https://kubernetes.io/docs/setup/minikube/) is an easy way to try out a Kubernetes (k8s)
|
||||
cluster locally. It creates a single node Kubernetes stack in a local VM.
|
||||
|
||||
[Kata Containers](https://github.com/kata-containers) can be installed into a Minikube cluster using
|
||||
[`kata-deploy`](https://github.com/kata-containers/kata-containers/tree/main/tools/packaging/kata-deploy).
|
||||
[`kata-deploy`](https://github.com/kata-containers/packaging/tree/master/kata-deploy).
|
||||
|
||||
This document details the pre-requisites, installation steps, and how to check
|
||||
the installation has been successful.
|
||||
@@ -42,7 +54,7 @@ to enable nested virtualization can be found on the
|
||||
[KVM Nested Guests page](https://www.linux-kvm.org/page/Nested_Guests)
|
||||
|
||||
Alternatively, and for other architectures, the Kata Containers built in
|
||||
[`check`](../../src/runtime/README.md#hardware-requirements)
|
||||
[`kata-check`](../../src/runtime/README.md#hardware-requirements)
|
||||
command can be used *inside Minikube* once Kata has been installed, to check for compatibility.
|
||||
|
||||
## Setting up Minikube
|
||||
@@ -123,7 +135,7 @@ $ kubectl apply -f kata-deploy/base/kata-deploy.yaml
|
||||
This installs the Kata Containers components into `/opt/kata` inside the Minikube node. It can take
|
||||
a few minutes for the operation to complete. You can check the installation has worked by checking
|
||||
the status of the `kata-deploy` pod, which will be executing
|
||||
[this script](https://github.com/kata-containers/kata-containers/tree/main/tools/packaging/kata-deploy/scripts/kata-deploy.sh),
|
||||
[this script](https://github.com/kata-containers/packaging/blob/master/kata-deploy/scripts/kata-deploy.sh),
|
||||
and will be executing a `sleep infinity` once it has successfully completed its work.
|
||||
You can accomplish this by running the following:
|
||||
|
||||
@@ -154,8 +166,8 @@ $ kubectl apply -f https://raw.githubusercontent.com/kubernetes/node-api/master/
|
||||
Now register the `kata qemu` runtime with that class. This should result in no errors:
|
||||
|
||||
```sh
|
||||
$ cd kata-containers/tools/packaging/kata-deploy/runtimeclasses
|
||||
$ kubectl apply -f kata-runtimeClasses.yaml
|
||||
$ cd kata-containers/tools/packaging/kata-deploy/k8s-1.14
|
||||
$ kubectl apply -f kata-qemu-runtimeClass.yaml
|
||||
```
|
||||
|
||||
The Kata Containers installation process should be complete and enabled in the Minikube cluster.
|
||||
|
||||
10
docs/install/opensuse-installation-guide.md
Normal file
10
docs/install/opensuse-installation-guide.md
Normal file
@@ -0,0 +1,10 @@
|
||||
# Install Kata Containers on openSUSE
|
||||
|
||||
1. Install the Kata Containers components with the following commands:
|
||||
|
||||
```bash
|
||||
$ sudo -E zypper -n install katacontainers
|
||||
```
|
||||
|
||||
2. Decide which container manager to use and select the corresponding link that follows:
|
||||
- [Kubernetes](../Developer-Guide.md#run-kata-containers-with-kubernetes)
|
||||
11
docs/install/opensuse-leap-15.1-installation-guide.md
Normal file
11
docs/install/opensuse-leap-15.1-installation-guide.md
Normal file
@@ -0,0 +1,11 @@
|
||||
# Install Kata Containers on openSUSE Leap 15.1
|
||||
|
||||
1. Install the Kata Containers components with the following commands:
|
||||
|
||||
```bash
|
||||
$ sudo -E zypper addrepo --refresh "https://download.opensuse.org/repositories/devel:/kubic/openSUSE_Leap_15.1/devel:kubic.repo"
|
||||
$ sudo -E zypper -n --gpg-auto-import-keys install katacontainers
|
||||
```
|
||||
|
||||
2. Decide which container manager to use and select the corresponding link that follows:
|
||||
- [Kubernetes](../Developer-Guide.md#run-kata-containers-with-kubernetes)
|
||||
@@ -1,5 +1,11 @@
|
||||
# Kata Containers snap package
|
||||
|
||||
* [Install Kata Containers](#install-kata-containers)
|
||||
* [Configure Kata Containers](#configure-kata-containers)
|
||||
* [Integration with shim v2 Container Engines](#integration-with-shim-v2-container-engines)
|
||||
* [Remove Kata Containers snap package](#remove-kata-containers-snap-package)
|
||||
|
||||
|
||||
## Install Kata Containers
|
||||
|
||||
Kata Containers can be installed in any Linux distribution that supports
|
||||
@@ -8,7 +14,7 @@ Kata Containers can be installed in any Linux distribution that supports
|
||||
Run the following command to install **Kata Containers**:
|
||||
|
||||
```sh
|
||||
$ sudo snap install kata-containers --stable --classic
|
||||
$ sudo snap install kata-containers --candidate --classic
|
||||
```
|
||||
|
||||
## Configure Kata Containers
|
||||
|
||||
@@ -1,5 +1,13 @@
|
||||
# Using Intel GPU device with Kata Containers
|
||||
|
||||
- [Using Intel GPU device with Kata Containers](#using-intel-gpu-device-with-kata-containers)
|
||||
- [Hardware Requirements](#hardware-requirements)
|
||||
- [Host Kernel Requirements](#host-kernel-requirements)
|
||||
- [Install and configure Kata Containers](#install-and-configure-kata-containers)
|
||||
- [Build Kata Containers kernel with GPU support](#build-kata-containers-kernel-with-gpu-support)
|
||||
- [GVT-d with Kata Containers](#gvt-d-with-kata-containers)
|
||||
- [GVT-g with Kata Containers](#gvt-g-with-kata-containers)
|
||||
|
||||
An Intel Graphics device can be passed to a Kata Containers container using GPU
|
||||
passthrough (Intel GVT-d) as well as GPU mediated passthrough (Intel GVT-g).
|
||||
|
||||
@@ -57,8 +65,8 @@ configuration in the Kata `configuration.toml` file as shown below.
|
||||
$ sudo sed -i -e 's/^# *\(hotplug_vfio_on_root_bus\).*=.*$/\1 = true/g' /usr/share/defaults/kata-containers/configuration.toml
|
||||
```
|
||||
|
||||
Make sure you are using the `q35` machine type by verifying `machine_type = "q35"` is
|
||||
set in the `configuration.toml`. Make sure `pcie_root_port` is set to a positive value.
|
||||
Make sure you are using the `pc` machine type by verifying `machine_type = "pc"` is
|
||||
set in the `configuration.toml`.
|
||||
|
||||
## Build Kata Containers kernel with GPU support
|
||||
|
||||
|
||||
@@ -1,5 +1,17 @@
|
||||
# Using Nvidia GPU device with Kata Containers
|
||||
|
||||
- [Using Nvidia GPU device with Kata Containers](#using-nvidia-gpu-device-with-kata-containers)
|
||||
- [Hardware Requirements](#hardware-requirements)
|
||||
- [Host BIOS Requirements](#host-bios-requirements)
|
||||
- [Host Kernel Requirements](#host-kernel-requirements)
|
||||
- [Install and configure Kata Containers](#install-and-configure-kata-containers)
|
||||
- [Build Kata Containers kernel with GPU support](#build-kata-containers-kernel-with-gpu-support)
|
||||
- [Nvidia GPU pass-through mode with Kata Containers](#nvidia-gpu-pass-through-mode-with-kata-containers)
|
||||
- [Nvidia vGPU mode with Kata Containers](#nvidia-vgpu-mode-with-kata-containers)
|
||||
- [Install Nvidia Driver in Kata Containers](#install-nvidia-driver-in-kata-containers)
|
||||
- [References](#references)
|
||||
|
||||
|
||||
An Nvidia GPU device can be passed to a Kata Containers container using GPU passthrough
|
||||
(Nvidia GPU pass-through mode) as well as GPU mediated passthrough (Nvidia vGPU mode).
|
||||
|
||||
@@ -63,6 +75,13 @@ To use non-large BARs devices (for example, Nvidia Tesla T4), you need Kata vers
|
||||
Follow the [Kata Containers setup instructions](../install/README.md)
|
||||
to install the latest version of Kata.
|
||||
|
||||
The following configuration in the Kata `configuration.toml` file as shown below can work:
|
||||
```
|
||||
machine_type = "pc"
|
||||
|
||||
hotplug_vfio_on_root_bus = true
|
||||
```
|
||||
|
||||
To use large BARs devices (for example, Nvidia Tesla P100), you need Kata version 1.11.0 or above.
|
||||
|
||||
The following configuration in the Kata `configuration.toml` file as shown below can work:
|
||||
@@ -291,4 +310,4 @@ Tue Mar 3 00:03:49 2020
|
||||
|
||||
- [Configuring a VM for GPU Pass-Through by Using the QEMU Command Line](https://docs.nvidia.com/grid/latest/grid-vgpu-user-guide/index.html#using-gpu-pass-through-red-hat-el-qemu-cli)
|
||||
- https://gitlab.com/nvidia/container-images/driver/-/tree/master
|
||||
- https://github.com/NVIDIA/nvidia-docker/wiki/Driver-containers
|
||||
- https://github.com/NVIDIA/nvidia-docker/wiki/Driver-containers-(Beta)
|
||||
|
||||
@@ -1,34 +1,56 @@
|
||||
# Table of Contents
|
||||
|
||||
* [Table of Contents](#table-of-contents)
|
||||
* [Introduction](#introduction)
|
||||
* [Helpful Links before starting](#helpful-links-before-starting)
|
||||
* [Steps to enable Intel QAT in Kata Containers](#steps-to-enable-intel-qat-in-kata-containers)
|
||||
* [Script variables](#script-variables)
|
||||
* [Set environment variables (Every Reboot)](#set-environment-variables-every-reboot)
|
||||
* [Prepare the Clear Linux Host](#prepare-the-clear-linux-host)
|
||||
* [Identify which PCI Bus the Intel QAT card is on](#identify-which-pci-bus-the-intel-qat-card-is-on)
|
||||
* [Install necessary bundles for Clear Linux](#install-necessary-bundles-for-clear-linux)
|
||||
* [Download Intel QAT drivers](#download-intel-qat-drivers)
|
||||
* [Copy Intel QAT configuration files and enable Virtual Functions](#copy-intel-qat-configuration-files-and-enable-virtual-functions)
|
||||
* [Expose and Bind Intel QAT virtual functions to VFIO-PCI (Every reboot)](#expose-and-bind-intel-qat-virtual-functions-to-vfio-pci-every-reboot)
|
||||
* [Check Intel QAT virtual functions are enabled](#check-intel-qat-virtual-functions-are-enabled)
|
||||
* [Prepare Kata Containers](#prepare-kata-containers)
|
||||
* [Download Kata kernel Source](#download-kata-kernel-source)
|
||||
* [Build Kata kernel](#build-kata-kernel)
|
||||
* [Copy Kata kernel](#copy-kata-kernel)
|
||||
* [Prepare Kata root filesystem](#prepare-kata-root-filesystem)
|
||||
* [Compile Intel QAT drivers for Kata Containers kernel and add to Kata Containers rootfs](#compile-intel-qat-drivers-for-kata-containers-kernel-and-add-to-kata-containers-rootfs)
|
||||
* [Copy Kata rootfs](#copy-kata-rootfs)
|
||||
* [Update Kata configuration to point to custom kernel and rootfs](#update-kata-configuration-to-point-to-custom-kernel-and-rootfs)
|
||||
* [Verify Intel QAT works in a Docker Kata Containers container](#verify-intel-qat-works-in-a-docker-kata-containers-container)
|
||||
* [Build OpenSSL Intel QAT engine container](#build-openssl-intel-qat-engine-container)
|
||||
* [Test Intel QAT in Docker](#test-intel-qat-in-docker)
|
||||
* [Troubleshooting](#troubleshooting)
|
||||
* [Optional Scripts](#optional-scripts)
|
||||
* [Verify Intel QAT card counters are incremented](#verify-intel-qat-card-counters-are-incremented)
|
||||
|
||||
# Introduction
|
||||
|
||||
Intel® QuickAssist Technology (QAT) provides hardware acceleration
|
||||
Intel QuickAssist Technology (Intel QAT) provides hardware acceleration
|
||||
for security (cryptography) and compression. These instructions cover the
|
||||
steps for the latest [Ubuntu LTS release](https://ubuntu.com/download/desktop)
|
||||
which already include the QAT host driver. These instructions can be adapted to
|
||||
any Linux distribution. These instructions guide the user on how to download
|
||||
the kernel sources, compile kernel driver modules against those sources, and
|
||||
load them onto the host as well as preparing a specially built Kata Containers
|
||||
kernel and custom Kata Containers rootfs.
|
||||
|
||||
* Download kernel sources
|
||||
* Compile Kata kernel
|
||||
* Compile kernel driver modules against those sources
|
||||
* Download rootfs
|
||||
* Add driver modules to rootfs
|
||||
* Build rootfs image
|
||||
steps for [Clear Linux](https://clearlinux.org) but can be adapted to any
|
||||
Linux distribution. Your distribution may already have the Intel QAT
|
||||
drivers, but it is likely they do not contain the necessary user space
|
||||
components. These instructions guide the user on how to download the kernel
|
||||
sources, compile kernel driver modules against those sources, and load them
|
||||
onto the host as well as preparing a specially built Kata Containers kernel
|
||||
and custom Kata Containers rootfs.
|
||||
|
||||
## Helpful Links before starting
|
||||
|
||||
[Intel® QuickAssist Technology at `01.org`](https://01.org/intel-quickassist-technology)
|
||||
[Intel QAT Engine](https://github.com/intel/QAT_Engine)
|
||||
|
||||
[Intel® QuickAssist Technology Engine for OpenSSL](https://github.com/intel/QAT_Engine)
|
||||
[Intel QuickAssist Technology at `01.org`](https://01.org/intel-quickassist-technology)
|
||||
|
||||
[Intel Device Plugin for Kubernetes](https://github.com/intel/intel-device-plugins-for-kubernetes)
|
||||
|
||||
[Intel® QuickAssist Technology for Crypto Poll Mode Driver](https://dpdk-docs.readthedocs.io/en/latest/cryptodevs/qat.html)
|
||||
[Intel QuickAssist Crypto Poll Mode Driver](https://dpdk-docs.readthedocs.io/en/latest/cryptodevs/qat.html)
|
||||
|
||||
## Steps to enable Intel® QAT in Kata Containers
|
||||
## Steps to enable Intel QAT in Kata Containers
|
||||
|
||||
There are some steps to complete only once, some steps to complete with every
|
||||
reboot, and some steps to complete when the host kernel changes.
|
||||
@@ -45,95 +67,91 @@ needed to point to updated drivers or different install locations.
|
||||
Make sure to check [`01.org`](https://01.org/intel-quickassist-technology) for
|
||||
the latest driver.
|
||||
|
||||
```bash
|
||||
$ export QAT_DRIVER_VER=qat1.7.l.4.14.0-00031.tar.gz
|
||||
$ export QAT_DRIVER_URL=https://downloadmirror.intel.com/30178/eng/${QAT_DRIVER_VER}
|
||||
```sh
|
||||
$ export QAT_DRIVER_VER=qat1.7.l.4.8.0-00005.tar.gz
|
||||
$ export QAT_DRIVER_URL=https://01.org/sites/default/files/downloads/${QAT_DRIVER_VER}
|
||||
$ export QAT_CONF_LOCATION=~/QAT_conf
|
||||
$ export QAT_DOCKERFILE=https://raw.githubusercontent.com/intel/intel-device-plugins-for-kubernetes/master/demo/openssl-qat-engine/Dockerfile
|
||||
$ export QAT_SRC=~/src/QAT
|
||||
$ export GOPATH=~/src/go
|
||||
$ export OSBUILDER=~/src/osbuilder
|
||||
$ export KATA_KERNEL_LOCATION=~/kata
|
||||
$ export KATA_ROOTFS_LOCATION=~/kata
|
||||
```
|
||||
|
||||
## Prepare the Ubuntu Host
|
||||
## Prepare the Clear Linux Host
|
||||
|
||||
The host could be a bare metal instance or a virtual machine. If using a
|
||||
virtual machine, make sure that KVM nesting is enabled. The following
|
||||
instructions reference an Intel® C62X chipset. Some of the instructions must be
|
||||
modified if using a different Intel® QAT device. The Intel® QAT chipset can be
|
||||
identified by executing the following.
|
||||
instructions reference an Intel QAT. Some of the instructions must be
|
||||
modified if using a different Intel QAT device. You can identify the Intel QAT
|
||||
chipset by executing the following.
|
||||
|
||||
### Identify which PCI Bus the Intel® QAT card is on
|
||||
### Identify which PCI Bus the Intel QAT card is on
|
||||
|
||||
```bash
|
||||
```sh
|
||||
$ for i in 0434 0435 37c8 1f18 1f19; do lspci -d 8086:$i; done
|
||||
```
|
||||
|
||||
### Install necessary packages for Ubuntu
|
||||
### Install necessary bundles for Clear Linux
|
||||
|
||||
These packages are necessary to compile the Kata kernel, Intel® QAT driver, and to
|
||||
prepare the rootfs for Kata. [Docker](https://docs.docker.com/engine/install/ubuntu/)
|
||||
also needs to be installed to be able to build the rootfs. To test that
|
||||
everything works a Kubernetes pod is started requesting Intel® QAT resources. For the
|
||||
pass through of the virtual functions the kernel boot parameter needs to have
|
||||
`INTEL_IOMMU=on`.
|
||||
Clear Linux version 30780 (Released August 13, 2019) includes a
|
||||
`linux-firmware-qat` bundle that has the necessary QAT firmware along with a
|
||||
functional QAT host driver that works with Kata Containers.
|
||||
|
||||
```bash
|
||||
$ sudo apt update
|
||||
$ sudo apt install -y golang-go build-essential python pkg-config zlib1g-dev libudev-dev bison libelf-dev flex libtool automake autotools-dev autoconf bc libpixman-1-dev coreutils libssl-dev
|
||||
$ sudo sed -i 's/GRUB_CMDLINE_LINUX_DEFAULT=""/GRUB_CMDLINE_LINUX_DEFAULT="intel_iommu=on"/' /etc/default/grub
|
||||
$ sudo update-grub
|
||||
```sh
|
||||
$ sudo swupd bundle-add network-basic linux-firmware-qat make c-basic go-basic containers-virt dev-utils devpkg-elfutils devpkg-systemd devpkg-ssl
|
||||
$ sudo clr-boot-manager update
|
||||
$ sudo systemctl enable --now docker
|
||||
$ sudo reboot
|
||||
```
|
||||
|
||||
### Download Intel® QAT drivers
|
||||
### Download Intel QAT drivers
|
||||
|
||||
This will download the [Intel® QAT drivers](https://01.org/intel-quickassist-technology).
|
||||
This will download the Intel QAT drivers from [`01.org`](https://01.org/intel-quickassist-technology).
|
||||
Make sure to check the website for the latest version.
|
||||
|
||||
```bash
|
||||
```sh
|
||||
$ mkdir -p $QAT_SRC
|
||||
$ cd $QAT_SRC
|
||||
$ curl -L $QAT_DRIVER_URL | tar zx
|
||||
```
|
||||
|
||||
### Copy Intel® QAT configuration files and enable virtual functions
|
||||
### Copy Intel QAT configuration files and enable Virtual Functions
|
||||
|
||||
Modify the instructions below as necessary if using a different Intel® QAT hardware
|
||||
Modify the instructions below as necessary if using a different QAT hardware
|
||||
platform. You can learn more about customizing configuration files at the
|
||||
[Intel® QAT Engine repository](https://github.com/intel/QAT_Engine/#copy-the-correct-intel-quickassist-technology-driver-config-files)
|
||||
[Intel QAT Engine repository](https://github.com/intel/QAT_Engine/#copy-the-correct-intel-quickassist-technology-driver-config-files)
|
||||
This section starts from a base config file and changes the `SSL` section to
|
||||
`SHIM` to support the OpenSSL engine. There are more tweaks that you can make
|
||||
depending on the use case and how many Intel® QAT engines should be run. You
|
||||
depending on the use case and how many Intel QAT engines should be run. You
|
||||
can find more information about how to customize in the
|
||||
[Intel® QuickAssist Technology Software for Linux* - Programmer's Guide.](https://01.org/sites/default/files/downloads/336210qatswprogrammersguiderev006.pdf)
|
||||
|
||||
> **Note: This section assumes that a Intel® QAT `c6xx` platform is used.**
|
||||
> **Note: This section assumes that a QAT `c6xx` platform is used.**
|
||||
|
||||
```bash
|
||||
```sh
|
||||
$ mkdir -p $QAT_CONF_LOCATION
|
||||
$ cp $QAT_SRC/quickassist/utilities/adf_ctl/conf_files/c6xxvf_dev0.conf.vm $QAT_CONF_LOCATION/c6xxvf_dev0.conf
|
||||
$ sed -i 's/\[SSL\]/\[SHIM\]/g' $QAT_CONF_LOCATION/c6xxvf_dev0.conf
|
||||
```
|
||||
|
||||
### Expose and Bind Intel® QAT virtual functions to VFIO-PCI (Every reboot)
|
||||
### Expose and Bind Intel QAT virtual functions to VFIO-PCI (Every reboot)
|
||||
|
||||
To enable virtual functions, the host OS should have IOMMU groups enabled. In
|
||||
the UEFI Firmware Intel® Virtualization Technology for Directed I/O
|
||||
(Intel® VT-d) must be enabled. Also, the kernel boot parameter should be
|
||||
`intel_iommu=on` or `intel_iommu=ifgx_off`. This should have been set from
|
||||
the instructions above. Check the output of `/proc/cmdline` to confirm. The
|
||||
following commands assume you installed an Intel® QAT card, IOMMU is on, and
|
||||
the UEFI Firmware Intel Virtualization Technology for Directed I/O
|
||||
(Intel VT-d) must be enabled. Also, the kernel boot parameter should be
|
||||
`intel_iommu=on` or `intel_iommu=ifgx_off`. The default in Clear Linux currently
|
||||
is `intel_iommu=igfx_off` which should work with the Intel QAT device. The
|
||||
following commands assume you installed an Intel QAT card, IOMMU is on, and
|
||||
VT-d is enabled. The vendor and device ID add to the `VFIO-PCI` driver so that
|
||||
each exposed virtual function can be bound to the `VFIO-PCI` driver. Once
|
||||
complete, each virtual function passes into a Kata Containers container using
|
||||
the PCIe device passthrough feature. For Kubernetes, the
|
||||
[Intel device plugin](https://github.com/intel/intel-device-plugins-for-kubernetes)
|
||||
for Kubernetes handles the binding of the driver, but the VF’s still must be
|
||||
the PCIe device passthrough feature. For Kubernetes, the Intel device plugin
|
||||
for Kubernetes handles the binding of the driver but the VF’s still must be
|
||||
enabled.
|
||||
|
||||
```bash
|
||||
```sh
|
||||
$ sudo modprobe vfio-pci
|
||||
$ QAT_PCI_BUS_PF_NUMBERS=$((lspci -d :435 && lspci -d :37c8 && lspci -d :19e2 && lspci -d :6f54) | cut -d ' ' -f 1)
|
||||
$ QAT_PCI_BUS_PF_1=$(echo $QAT_PCI_BUS_PF_NUMBERS | cut -d ' ' -f 1)
|
||||
@@ -142,10 +160,8 @@ $ QAT_PCI_ID_VF=$(cat /sys/bus/pci/devices/0000:${QAT_PCI_BUS_PF_1}/virtfn0/ueve
|
||||
$ QAT_VENDOR_AND_ID_VF=$(echo ${QAT_PCI_ID_VF/PCI_ID=} | sed 's/:/ /')
|
||||
$ echo $QAT_VENDOR_AND_ID_VF | sudo tee --append /sys/bus/pci/drivers/vfio-pci/new_id
|
||||
```
|
||||
|
||||
Loop through all the virtual functions and bind to the VFIO driver
|
||||
|
||||
```bash
|
||||
```sh
|
||||
$ for f in /sys/bus/pci/devices/0000:$QAT_PCI_BUS_PF_1/virtfn*
|
||||
do QAT_PCI_BUS_VF=$(basename $(readlink $f))
|
||||
echo $QAT_PCI_BUS_VF | sudo tee --append /sys/bus/pci/drivers/c6xxvf/unbind
|
||||
@@ -153,23 +169,22 @@ $ for f in /sys/bus/pci/devices/0000:$QAT_PCI_BUS_PF_1/virtfn*
|
||||
done
|
||||
```
|
||||
|
||||
### Check Intel® QAT virtual functions are enabled
|
||||
### Check Intel QAT virtual functions are enabled
|
||||
|
||||
If the following command returns empty, then the virtual functions are not
|
||||
properly enabled. This command checks the enumerated device IDs for just the
|
||||
virtual functions. Using the Intel® QAT as an example, the physical device ID
|
||||
virtual functions. Using the Intel QAT as an example, the physical device ID
|
||||
is `37c8` and virtual function device ID is `37c9`. The following command checks
|
||||
if VF's are enabled for any of the currently known Intel® QAT device ID's. The
|
||||
if VF's are enabled for any of the currently known Intel QAT device ID's. The
|
||||
following `ls` command should show the 16 VF's bound to `VFIO-PCI`.
|
||||
|
||||
```bash
|
||||
```sh
|
||||
$ for i in 0442 0443 37c9 19e3; do lspci -d 8086:$i; done
|
||||
```
|
||||
|
||||
Another way to check is to see what PCI devices that `VFIO-PCI` is mapped to.
|
||||
It should match the device ID's of the VF's.
|
||||
|
||||
```bash
|
||||
```sh
|
||||
$ ls -la /sys/bus/pci/drivers/vfio-pci
|
||||
```
|
||||
|
||||
@@ -186,16 +201,16 @@ There are some patches that must be installed as well, which the
|
||||
`build-kernel.sh` script should automatically apply. If you are using a
|
||||
different kernel version, then you might need to manually apply them. Since
|
||||
the Kata Containers kernel has a minimal set of kernel flags set, you must
|
||||
create a Intel® QAT kernel fragment with the necessary `CONFIG_CRYPTO_*` options set.
|
||||
create a QAT kernel fragment with the necessary `CONFIG_CRYPTO_*` options set.
|
||||
Update the config to set some of the `CRYPTO` flags to enabled. This might
|
||||
change with different kernel versions. The following instructions were tested
|
||||
with kernel `v5.4.0-64-generic`.
|
||||
change with different kernel versions. We tested the following instructions
|
||||
with kernel `v4.19.28-41`.
|
||||
|
||||
```bash
|
||||
```sh
|
||||
$ mkdir -p $GOPATH
|
||||
$ cd $GOPATH
|
||||
$ go get -v github.com/kata-containers/kata-containers
|
||||
$ cat << EOF > $GOPATH/src/github.com/kata-containers/kata-containers/tools/packaging/kernel/configs/fragments/common/qat.conf
|
||||
$ go get -v github.com/kata-containers/packaging
|
||||
$ cat << EOF > $GOPATH/src/github.com/kata-containers/packaging/kernel/configs/fragments/common/qat.conf
|
||||
CONFIG_PCIEAER=y
|
||||
CONFIG_UIO=y
|
||||
CONFIG_CRYPTO_HW=y
|
||||
@@ -206,70 +221,61 @@ CONFIG_MODULE_SIG=y
|
||||
CONFIG_CRYPTO_AUTHENC=y
|
||||
CONFIG_CRYPTO_DH=y
|
||||
EOF
|
||||
$ $GOPATH/src/github.com/kata-containers/kata-containers/tools/packaging/kernel/build-kernel.sh setup
|
||||
$ $GOPATH/src/github.com/kata-containers/packaging/kernel/build-kernel.sh setup
|
||||
```
|
||||
|
||||
### Build Kata kernel
|
||||
|
||||
```bash
|
||||
$ cd $GOPATH
|
||||
$ export LINUX_VER=$(ls -d kata-linux-*)
|
||||
```sh
|
||||
$ export LINUX_VER=$(ls -d kata*)
|
||||
$ sed -i 's/EXTRAVERSION =/EXTRAVERSION = .qat.container/' $LINUX_VER/Makefile
|
||||
$ $GOPATH/src/github.com/kata-containers/kata-containers/tools/packaging/kernel/build-kernel.sh build
|
||||
$ $GOPATH/src/github.com/kata-containers/packaging/kernel/build-kernel.sh build
|
||||
```
|
||||
|
||||
|
||||
### Copy Kata kernel
|
||||
|
||||
```bash
|
||||
$ export KATA_KERNEL_NAME=vmlinux-${LINUX_VER}_qat
|
||||
```sh
|
||||
$ mkdir -p $KATA_KERNEL_LOCATION
|
||||
$ cp ${GOPATH}/${LINUX_VER}/vmlinux ${KATA_KERNEL_LOCATION}/${KATA_KERNEL_NAME}
|
||||
$ cp $LINUX_VER/arch/x86/boot/bzImage $KATA_KERNEL_LOCATION/vmlinuz-${LINUX_VER}_qat
|
||||
```
|
||||
|
||||
### Prepare Kata root filesystem
|
||||
|
||||
These instructions build upon the OS builder instructions located in the
|
||||
[Developer Guide](../Developer-Guide.md). At this point it is recommended that
|
||||
[Docker](https://docs.docker.com/engine/install/ubuntu/) is installed first, and
|
||||
then [Kata-deploy](https://github.com/kata-containers/kata-containers/tree/main/tools/packaging/kata-deploy)
|
||||
is use to install Kata. This will make sure that the correct `agent` version
|
||||
is installed into the rootfs in the steps below.
|
||||
[Developer Guide](../Developer-Guide.md). The following instructions use Clear
|
||||
Linux (Kata Containers default) as the root filesystem with systemd as the
|
||||
init and will add in the `kmod` binary, which is not a standard binary in a
|
||||
Kata rootfs image. The `kmod` binary is necessary to load the QAT kernel
|
||||
modules when the virtual machine rootfs boots. You should install Docker on
|
||||
your system before running the following commands. If you need to use a custom
|
||||
`kata-agent`, then refer to the previous link on how to add it in.
|
||||
|
||||
The following instructions use Debian as the root filesystem with systemd as
|
||||
the init and will add in the `kmod` binary, which is not a standard binary in
|
||||
a Kata rootfs image. The `kmod` binary is necessary to load the Intel® QAT
|
||||
kernel modules when the virtual machine rootfs boots.
|
||||
|
||||
```bash
|
||||
$ export OSBUILDER=$GOPATH/src/github.com/kata-containers/kata-containers/tools/osbuilder
|
||||
$ export ROOTFS_DIR=${OSBUILDER}/rootfs-builder/rootfs
|
||||
```sh
|
||||
$ mkdir -p $OSBUILDER
|
||||
$ cd $OSBUILDER
|
||||
$ git clone https://github.com/kata-containers/osbuilder.git
|
||||
$ export ROOTFS_DIR=${OSBUILDER}/osbuilder/rootfs-builder/rootfs
|
||||
$ export EXTRA_PKGS='kmod'
|
||||
```
|
||||
|
||||
Make sure that the `kata-agent` version matches the installed `kata-runtime`
|
||||
version. Also make sure the `kata-runtime` install location is in your `PATH`
|
||||
variable. The following `AGENT_VERSION` can be set manually to match
|
||||
the `kata-runtime` version if the following commands don't work.
|
||||
|
||||
```bash
|
||||
$ export PATH=$PATH:/opt/kata/bin
|
||||
$ cd $GOPATH
|
||||
version.
|
||||
```sh
|
||||
$ export AGENT_VERSION=$(kata-runtime version | head -n 1 | grep -o "[0-9.]\+")
|
||||
$ cd ${OSBUILDER}/rootfs-builder
|
||||
$ cd ${OSBUILDER}/osbuilder/rootfs-builder
|
||||
$ sudo rm -rf ${ROOTFS_DIR}
|
||||
$ script -fec 'sudo -E GOPATH=$GOPATH USE_DOCKER=true SECCOMP=no ./rootfs.sh debian'
|
||||
$ script -fec 'sudo -E GOPATH=$GOPATH USE_DOCKER=true SECCOMP=no ./rootfs.sh clearlinux'
|
||||
```
|
||||
|
||||
### Compile Intel® QAT drivers for Kata Containers kernel and add to Kata Containers rootfs
|
||||
### Compile Intel QAT drivers for Kata Containers kernel and add to Kata Containers rootfs
|
||||
|
||||
After the Kata Containers kernel builds with the proper configuration flags,
|
||||
you must build the Intel® QAT drivers against that Kata Containers kernel
|
||||
you must build the Intel QAT drivers against that Kata Containers kernel
|
||||
version in a similar way they were previously built for the host OS. You must
|
||||
set the `KERNEL_SOURCE_ROOT` variable to the Kata Containers kernel source
|
||||
directory and build the Intel® QAT drivers again. The `make` command will
|
||||
install the Intel® QAT modules into the Kata rootfs.
|
||||
directory and build the Intel QAT drivers again.
|
||||
|
||||
```bash
|
||||
```sh
|
||||
$ cd $GOPATH
|
||||
$ export LINUX_VER=$(ls -d kata*)
|
||||
$ export KERNEL_MAJOR_VERSION=$(awk '/^VERSION =/{print $NF}' $GOPATH/$LINUX_VER/Makefile)
|
||||
@@ -278,18 +284,16 @@ $ export KERNEL_SUBLEVEL=$(awk '/^SUBLEVEL =/{print $NF}' $GOPATH/$LINUX_VER/Mak
|
||||
$ export KERNEL_EXTRAVERSION=$(awk '/^EXTRAVERSION =/{print $NF}' $GOPATH/$LINUX_VER/Makefile)
|
||||
$ export KERNEL_ROOTFS_DIR=${KERNEL_MAJOR_VERSION}.${KERNEL_PATHLEVEL}.${KERNEL_SUBLEVEL}${KERNEL_EXTRAVERSION}
|
||||
$ cd $QAT_SRC
|
||||
$ KERNEL_SOURCE_ROOT=$GOPATH/$LINUX_VER ./configure --enable-icp-sriov=guest
|
||||
$ KERNEL_SOURCE_ROOT=$GOPATH/$LINUX_VER ./configure --disable-qat-lkcf --enable-icp-sriov=guest
|
||||
$ sudo -E make all -j$(nproc)
|
||||
$ sudo -E make INSTALL_MOD_PATH=$ROOTFS_DIR qat-driver-install -j$(nproc)
|
||||
```
|
||||
|
||||
The `usdm_drv` module also needs to be copied into the rootfs modules path and
|
||||
`depmod` should be run.
|
||||
|
||||
```bash
|
||||
$ sudo cp $QAT_SRC/build/usdm_drv.ko $ROOTFS_DIR/lib/modules/${KERNEL_ROOTFS_DIR}/updates/drivers
|
||||
```sh
|
||||
$ sudo cp $QAT_SRC/build/usdm_drv.ko $ROOTFS_DIR/usr/lib/modules/${KERNEL_ROOTFS_DIR}/updates/drivers
|
||||
$ sudo depmod -a -b ${ROOTFS_DIR} ${KERNEL_ROOTFS_DIR}
|
||||
$ cd ${OSBUILDER}/image-builder
|
||||
$ cd ${OSBUILDER}/osbuilder/image-builder
|
||||
$ script -fec 'sudo -E USE_DOCKER=true ./image_builder.sh ${ROOTFS_DIR}'
|
||||
```
|
||||
|
||||
@@ -298,225 +302,84 @@ $ script -fec 'sudo -E USE_DOCKER=true ./image_builder.sh ${ROOTFS_DIR}'
|
||||
|
||||
### Copy Kata rootfs
|
||||
|
||||
```bash
|
||||
```sh
|
||||
$ mkdir -p $KATA_ROOTFS_LOCATION
|
||||
$ cp ${OSBUILDER}/image-builder/kata-containers.img $KATA_ROOTFS_LOCATION
|
||||
$ cp ${OSBUILDER}/osbuilder/image-builder/kata-containers.img $KATA_ROOTFS_LOCATION
|
||||
```
|
||||
|
||||
## Verify Intel® QAT works in a container
|
||||
### Update Kata configuration to point to custom kernel and rootfs
|
||||
|
||||
The following instructions uses a OpenSSL Dockerfile that builds the
|
||||
Intel® QAT engine to allow OpenSSL to offload crypto functions. It is a
|
||||
convenient way to test that VFIO device passthrough for the Intel® QAT VF’s are
|
||||
You must update the `configuration.toml` for Kata Containers to point to the
|
||||
custom kernel, custom rootfs, and to specify which modules to load when the
|
||||
virtual machine is booted when a container is run. The following example
|
||||
assumes you installed an Intel QAT, and you need to load those modules.
|
||||
|
||||
```sh
|
||||
$ sudo mkdir -p /etc/kata-containers
|
||||
$ sudo cp /usr/share/defaults/kata-containers/configuration-qemu.toml /etc/kata-containers/configuration.toml
|
||||
$ sudo sed -i "s|kernel_params = \"\"|kernel_params = \"modules-load=usdm_drv,qat_c62xvf\"|g" /etc/kata-containers/configuration.toml
|
||||
$ sudo sed -i "s|\/usr\/share\/kata-containers\/kata-containers.img|${KATA_KERNEL_LOCATION}\/kata-containers.img|g" /etc/kata-containers/configuration.toml
|
||||
$ sudo sed -i "s|\/usr\/share\/kata-containers\/vmlinuz.container|${KATA_ROOTFS_LOCATION}\/vmlinuz-${LINUX_VER}_qat|g" /etc/kata-containers/configuration.toml
|
||||
```
|
||||
|
||||
## Verify Intel QAT works in a Docker Kata Containers container
|
||||
|
||||
The following instructions leverage an OpenSSL Dockerfile that builds the
|
||||
Intel QAT engine to allow OpenSSL to offload crypto functions. It is a
|
||||
convenient way to test that VFIO device passthrough for the Intel QAT VF’s are
|
||||
working properly with the Kata Containers VM.
|
||||
|
||||
### Build OpenSSL Intel® QAT engine container
|
||||
## Build OpenSSL Intel QAT engine container
|
||||
|
||||
Use the OpenSSL Intel® QAT [Dockerfile](https://github.com/intel/intel-device-plugins-for-kubernetes/tree/master/demo/openssl-qat-engine)
|
||||
Use the OpenSSL Intel QAT [Dockerfile](https://github.com/intel/intel-device-plugins-for-kubernetes/tree/master/demo/openssl-qat-engine)
|
||||
to build a container image with an optimized OpenSSL engine for
|
||||
Intel® QAT. Using `docker build` with the Kata Containers runtime can sometimes
|
||||
have issues. Therefore, make sure that `runc` is the default Docker container
|
||||
runtime.
|
||||
Intel QAT. Using `docker build` with the Kata Containers runtime can sometimes
|
||||
have issues. Therefore, we recommended you change the default runtime to
|
||||
`runc` before doing a build. Instructions for this are below.
|
||||
|
||||
```bash
|
||||
```sh
|
||||
$ cd $QAT_SRC
|
||||
$ curl -O $QAT_DOCKERFILE
|
||||
$ sudo sed -i 's/kata-runtime/runc/g' /etc/systemd/system/docker.service.d/50-runtime.conf
|
||||
$ sudo systemctl daemon-reload && sudo systemctl restart docker
|
||||
$ sudo docker build -t openssl-qat-engine .
|
||||
```
|
||||
|
||||
> **Note: The Intel® QAT driver version in this container might not match the
|
||||
> Intel® QAT driver compiled and loaded on the host when compiling.**
|
||||
> **Note: The Intel QAT driver version in this container might not match the
|
||||
> Intel QAT driver compiled and loaded on the host when compiling.**
|
||||
|
||||
### Test Intel® QAT with the ctr tool
|
||||
### Test Intel QAT in Docker
|
||||
|
||||
The `ctr` tool can be used to interact with the containerd daemon. It may be
|
||||
more convenient to use this tool to verify the kernel and image instead of
|
||||
setting up a Kubernetes cluster. The correct Kata runtimes need to be added
|
||||
to the containerd `config.toml`. Below is a sample snippet that can be added
|
||||
to allow QEMU and Cloud Hypervisor (CLH) to work with `ctr`.
|
||||
The host should already be setup with 16 virtual functions of the Intel QAT
|
||||
card bound to `VFIO-PCI`. Verify this by looking in `/dev/vfio` for a listing
|
||||
of devices. Replace the number 90 with one of the VF’s exposed in `/dev/vfio`.
|
||||
It might require you to add an `IPC_LOCK` capability to your Docker runtime
|
||||
depending on which rootfs you use.
|
||||
|
||||
```
|
||||
[plugins.cri.containerd.runtimes.kata-qemu]
|
||||
runtime_type = "io.containerd.kata-qemu.v2"
|
||||
privileged_without_host_devices = true
|
||||
pod_annotations = ["io.katacontainers.*"]
|
||||
[plugins.cri.containerd.runtimes.kata-qemu.options]
|
||||
ConfigPath = "/opt/kata/share/defaults/kata-containers/configuration-qemu.toml"
|
||||
[plugins.cri.containerd.runtimes.kata-clh]
|
||||
runtime_type = "io.containerd.kata-clh.v2"
|
||||
privileged_without_host_devices = true
|
||||
pod_annotations = ["io.katacontainers.*"]
|
||||
[plugins.cri.containerd.runtimes.kata-clh.options]
|
||||
ConfigPath = "/opt/kata/share/defaults/kata-containers/configuration-clh.toml"
|
||||
```sh
|
||||
$ sudo docker run -it --runtime=kata-runtime --cap-add=IPC_LOCK --cap-add=SYS_ADMIN --device=/dev/vfio/90 -v /dev:/dev -v ${QAT_CONF_LOCATION}:/etc openssl-qat-engine bash
|
||||
```
|
||||
|
||||
In addition, containerd expects the binary to be in `/usr/local/bin` so add
|
||||
this small script so that it redirects to be able to use either QEMU or
|
||||
Cloud Hypervisor with Kata.
|
||||
|
||||
```bash
|
||||
$ echo '#!/bin/bash' | sudo tee /usr/local/bin/containerd-shim-kata-qemu-v2
|
||||
$ echo 'KATA_CONF_FILE=/opt/kata/share/defaults/kata-containers/configuration-qemu.toml /opt/kata/bin/containerd-shim-kata-v2 $@' | sudo tee -a /usr/local/bin/containerd-shim-kata-qemu-v2
|
||||
$ sudo chmod +x /usr/local/bin/containerd-shim-kata-qemu-v2
|
||||
$ echo '#!/bin/bash' | sudo tee /usr/local/bin/containerd-shim-kata-clh-v2
|
||||
$ echo 'KATA_CONF_FILE=/opt/kata/share/defaults/kata-containers/configuration-clh.toml /opt/kata/bin/containerd-shim-kata-v2 $@' | sudo tee -a /usr/local/bin/containerd-shim-kata-clh-v2
|
||||
$ sudo chmod +x /usr/local/bin/containerd-shim-kata-clh-v2
|
||||
```
|
||||
|
||||
After the OpenSSL image is built and imported into containerd, a Intel® QAT
|
||||
virtual function exposed in the step above can be added to the `ctr` command.
|
||||
Make sure to change the `/dev/vfio` number to one that actually exists on the
|
||||
host system. When using the `ctr` tool, the`configuration.toml` for Kata needs
|
||||
to point to the custom Kata kernel and rootfs built above and the Intel® QAT
|
||||
modules in the Kata rootfs need to load at boot. The following steps assume that
|
||||
`kata-deploy` was used to install Kata and QEMU is being tested. If using a
|
||||
different hypervisor, different install method for Kata, or a different
|
||||
Intel® QAT chipset then the command will need to be modified.
|
||||
|
||||
> **Note: The following was tested with
|
||||
[containerd v1.4.6](https://github.com/containerd/containerd/releases/tag/v1.4.6).**
|
||||
|
||||
```bash
|
||||
$ config_file="/opt/kata/share/defaults/kata-containers/configuration-qemu.toml"
|
||||
$ sudo sed -i "/kernel =/c kernel = "\"${KATA_ROOTFS_LOCATION}/${KATA_KERNEL_NAME}\""" $config_file
|
||||
$ sudo sed -i "/image =/c image = "\"${KATA_KERNEL_LOCATION}/kata-containers.img\""" $config_file
|
||||
$ sudo sed -i -e 's/^kernel_params = "\(.*\)"/kernel_params = "\1 modules-load=usdm_drv,qat_c62xvf"/g' $config_file
|
||||
$ sudo docker save -o openssl-qat-engine.tar openssl-qat-engine:latest
|
||||
$ sudo ctr images import openssl-qat-engine.tar
|
||||
$ sudo ctr run --runtime io.containerd.run.kata-qemu.v2 --privileged -t --rm --device=/dev/vfio/180 --mount type=bind,src=/dev,dst=/dev,options=rbind:rw --mount type=bind,src=${QAT_CONF_LOCATION}/c6xxvf_dev0.conf,dst=/etc/c6xxvf_dev0.conf,options=rbind:rw docker.io/library/openssl-qat-engine:latest bash
|
||||
```
|
||||
|
||||
Below are some commands to run in the container image to verify Intel® QAT is
|
||||
Below are some commands to run in the container image to verify Intel QAT is
|
||||
working
|
||||
|
||||
```sh
|
||||
root@67561dc2757a/ # cat /proc/modules
|
||||
qat_c62xvf 16384 - - Live 0xffffffffc00d9000 (OE)
|
||||
usdm_drv 86016 - - Live 0xffffffffc00e8000 (OE)
|
||||
intel_qat 249856 - - Live 0xffffffffc009b000 (OE)
|
||||
|
||||
root@67561dc2757a/ # adf_ctl restart
|
||||
Restarting all devices.
|
||||
Processing /etc/c6xxvf_dev0.conf
|
||||
|
||||
root@67561dc2757a/ # adf_ctl status
|
||||
Checking status of all devices.
|
||||
There is 1 QAT acceleration device(s) in the system:
|
||||
qat_dev0 - type: c6xxvf, inst_id: 0, node_id: 0, bsf: 0000:01:01.0, #accel: 1 #engines: 1 state: up
|
||||
|
||||
root@67561dc2757a/ # openssl engine -c -t qat-hw
|
||||
(qat-hw) Reference implementation of QAT crypto engine v0.6.1
|
||||
[RSA, DSA, DH, AES-128-CBC-HMAC-SHA1, AES-128-CBC-HMAC-SHA256, AES-256-CBC-HMAC-SHA1, AES-256-CBC-HMAC-SHA256, TLS1-PRF, HKDF, X25519, X448]
|
||||
[ available ]
|
||||
bash-5.0# cat /proc/modules
|
||||
bash-5.0# adf_ctl restart
|
||||
bash-5.0# adf_ctl status
|
||||
bash-5.0# openssl engine -c -t qat
|
||||
```
|
||||
|
||||
### Test Intel® QAT in Kubernetes
|
||||
|
||||
Start a Kubernetes cluster with containerd as the CRI. The host should
|
||||
already be setup with 16 virtual functions of the Intel® QAT card bound to
|
||||
`VFIO-PCI`. Verify this by looking in `/dev/vfio` for a listing of devices.
|
||||
You might need to disable Docker before initializing Kubernetes. Be aware
|
||||
that the OpenSSL container image built above will need to be exported from
|
||||
Docker and imported into containerd.
|
||||
|
||||
If Kata is installed through [`kata-deploy`](https://github.com/kata-containers/kata-containers/blob/stable-2.0/tools/packaging/kata-deploy/README.md)
|
||||
there will be multiple `configuration.toml` files associated with different
|
||||
hypervisors. Rather than add in the custom Kata kernel, Kata rootfs, and
|
||||
kernel modules to each `configuration.toml` as the default, instead use
|
||||
[annotations](https://github.com/kata-containers/kata-containers/blob/stable-2.0/docs/how-to/how-to-load-kernel-modules-with-kata.md)
|
||||
in the Kubernetes YAML file to tell Kata which kernel and rootfs to use. The
|
||||
easy way to do this is to use `kata-deploy` which will install the Kata binaries
|
||||
to `/opt` and properly configure the `/etc/containerd/config.toml` with annotation
|
||||
support. However, the `configuration.toml` needs to enable support for
|
||||
annotations as well. The following configures both QEMU and Cloud Hypervisor
|
||||
`configuration.toml` files that are currently available with Kata Container
|
||||
versions 2.0 and higher.
|
||||
|
||||
```bash
|
||||
$ sudo sed -i 's/enable_annotations\s=\s\[\]/enable_annotations = [".*"]/' /opt/kata/share/defaults/kata-containers/configuration-qemu.toml
|
||||
$ sudo sed -i 's/enable_annotations\s=\s\[\]/enable_annotations = [".*"]/' /opt/kata/share/defaults/kata-containers/configuration-clh.toml
|
||||
```
|
||||
|
||||
Export the OpenSSL image from Docker and import into containerd.
|
||||
|
||||
```bash
|
||||
$ sudo docker save -o openssl-qat-engine.tar openssl-qat-engine:latest
|
||||
$ sudo ctr -n=k8s.io images import openssl-qat-engine.tar
|
||||
```
|
||||
|
||||
The [Intel® QAT Plugin](https://github.com/intel/intel-device-plugins-for-kubernetes/blob/master/cmd/qat_plugin/README.md)
|
||||
needs to be started so that the virtual functions can be discovered and
|
||||
used by Kubernetes.
|
||||
|
||||
The following YAML file can be used to start a Kata container with Intel® QAT
|
||||
support. If Kata is installed with `kata-deploy`, then the containerd
|
||||
`configuration.toml` should have all of the Kata runtime classes already
|
||||
populated and annotations supported. To use a Intel® QAT virtual function, the
|
||||
Intel® QAT plugin needs to be started after the VF's are bound to `VFIO-PCI` as
|
||||
described [above](#expose-and-bind-intel-qat-virtual-functions-to-vfio-pci-every-reboot).
|
||||
Edit the following to point to the correct Kata kernel and rootfs location
|
||||
built with Intel® QAT support.
|
||||
|
||||
```bash
|
||||
$ cat << EOF > kata-openssl-qat.yaml
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: kata-openssl-qat
|
||||
labels:
|
||||
app: kata-openssl-qat
|
||||
annotations:
|
||||
io.katacontainers.config.hypervisor.kernel: "$KATA_KERNEL_LOCATION/$KATA_KERNEL_NAME"
|
||||
io.katacontainers.config.hypervisor.image: "$KATA_ROOTFS_LOCATION/kata-containers.img"
|
||||
io.katacontainers.config.hypervisor.kernel_params: "modules-load=usdm_drv,qat_c62xvf"
|
||||
spec:
|
||||
runtimeClassName: kata-qemu
|
||||
containers:
|
||||
- name: kata-openssl-qat
|
||||
image: docker.io/library/openssl-qat-engine:latest
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
limits:
|
||||
qat.intel.com/generic: 1
|
||||
cpu: 1
|
||||
securityContext:
|
||||
capabilities:
|
||||
add: ["IPC_LOCK", "SYS_ADMIN"]
|
||||
volumeMounts:
|
||||
- mountPath: /etc/c6xxvf_dev0.conf
|
||||
name: etc-mount
|
||||
- mountPath: /dev
|
||||
name: dev-mount
|
||||
volumes:
|
||||
- name: dev-mount
|
||||
hostPath:
|
||||
path: /dev
|
||||
- name: etc-mount
|
||||
hostPath:
|
||||
path: $QAT_CONF_LOCATION/c6xxvf_dev0.conf
|
||||
EOF
|
||||
```
|
||||
|
||||
Use `kubectl` to start the pod. Verify that Intel® QAT card acceleration is
|
||||
working with the Intel® QAT engine.
|
||||
```bash
|
||||
$ kubectl apply -f kata-openssl-qat.yaml
|
||||
```
|
||||
Test with Intel QAT card acceleration
|
||||
|
||||
```sh
|
||||
$ kubectl exec -it kata-openssl-qat -- adf_ctl restart
|
||||
Restarting all devices.
|
||||
Processing /etc/c6xxvf_dev0.conf
|
||||
bash-5.0# openssl speed -engine qat -elapsed -async_jobs 72 rsa2048
|
||||
```
|
||||
|
||||
$ kubectl exec -it kata-openssl-qat -- adf_ctl status
|
||||
Checking status of all devices.
|
||||
There is 1 QAT acceleration device(s) in the system:
|
||||
qat_dev0 - type: c6xxvf, inst_id: 0, node_id: 0, bsf: 0000:01:01.0, #accel: 1 #engines: 1 state: up
|
||||
Test with CPU acceleration
|
||||
|
||||
$ kubectl exec -it kata-openssl-qat -- openssl engine -c -t qat-hw
|
||||
(qat-hw) Reference implementation of QAT crypto engine v0.6.1
|
||||
[RSA, DSA, DH, AES-128-CBC-HMAC-SHA1, AES-128-CBC-HMAC-SHA256, AES-256-CBC-HMAC-SHA1, AES-256-CBC-HMAC-SHA256, TLS1-PRF, HKDF, X25519, X448]
|
||||
[ available ]
|
||||
```sh
|
||||
bash-5.0# openssl speed -elapsed rsa2048
|
||||
```
|
||||
|
||||
### Troubleshooting
|
||||
@@ -549,9 +412,9 @@ c6xxvf_dev10.conf c6xxvf_dev13.conf c6xxvf_dev2.conf c6xxvf_dev5.conf c6xxvf
|
||||
```
|
||||
|
||||
* Check `dmesg` inside the container to see if there are any issues with the
|
||||
Intel® QAT driver.
|
||||
Intel QAT driver.
|
||||
|
||||
* If there are issues building the OpenSSL Intel® QAT container image, then
|
||||
* If there are issues building the OpenSSL Intel QAT container image, then
|
||||
check to make sure that runc is the default runtime for building container.
|
||||
|
||||
```sh
|
||||
@@ -562,16 +425,15 @@ Environment="DOCKER_DEFAULT_RUNTIME=--default-runtime runc"
|
||||
|
||||
## Optional Scripts
|
||||
|
||||
### Verify Intel® QAT card counters are incremented
|
||||
### Verify Intel QAT card counters are incremented
|
||||
|
||||
To check the built in firmware counters, the Intel® QAT driver has to be compiled
|
||||
and installed to the host and can't rely on the built in host driver. The
|
||||
counters will increase when the accelerator is actively being used. To verify
|
||||
Intel® QAT is actively accelerating the containerized application, use the
|
||||
following instructions to check if any of the counters increment. Make
|
||||
sure to change the PCI Device ID to match whats in the system.
|
||||
Use the `lspci` command to figure out which PCI bus the Intel QAT accelerators
|
||||
are on. The counters will increase when the accelerator is actively being
|
||||
used. To verify QAT is actively accelerating the containerized application,
|
||||
use the following instructions to check if any of the counters are
|
||||
incrementing. You will have to change the PCI device ID to match your system.
|
||||
|
||||
```bash
|
||||
```sh
|
||||
$ for i in 0434 0435 37c8 1f18 1f19; do lspci -d 8086:$i; done
|
||||
$ sudo watch cat /sys/kernel/debug/qat_c6xx_0000\:b1\:00.0/fw_counters
|
||||
$ sudo watch cat /sys/kernel/debug/qat_c6xx_0000\:b3\:00.0/fw_counters
|
||||
|
||||
@@ -1,107 +0,0 @@
|
||||
# Kata Containers with SGX
|
||||
|
||||
Intel® Software Guard Extensions (SGX) is a set of instructions that increases the security
|
||||
of applications code and data, giving them more protections from disclosure or modification.
|
||||
|
||||
> **Note:** At the time of writing this document, SGX patches have not landed on the Linux kernel
|
||||
> project, so specific versions for guest and host kernels must be installed to enable SGX.
|
||||
|
||||
## Check if SGX is enabled
|
||||
|
||||
Run the following command to check if your host supports SGX.
|
||||
|
||||
```sh
|
||||
$ grep -o sgx /proc/cpuinfo
|
||||
```
|
||||
|
||||
Continue to the following section if the output of the above command is empty,
|
||||
otherwise continue to section [Install Guest kernel with SGX support](#install-guest-kernel-with-sgx-support)
|
||||
|
||||
## Install Host kernel with SGX support
|
||||
|
||||
The following commands were tested on Fedora 32, they might work on other distros too.
|
||||
|
||||
```sh
|
||||
$ git clone --depth=1 https://github.com/intel/kvm-sgx
|
||||
$ pushd kvm-sgx
|
||||
$ cp /boot/config-$(uname -r) .config
|
||||
$ yes "" | make oldconfig
|
||||
$ # In the following step, enable: INTEL_SGX and INTEL_SGX_VIRTUALIZATION
|
||||
$ make menuconfig
|
||||
$ make -j$(($(nproc)-1)) bzImage
|
||||
$ make -j$(($(nproc)-1)) modules
|
||||
$ sudo make modules_install
|
||||
$ sudo make install
|
||||
$ popd
|
||||
$ sudo reboot
|
||||
```
|
||||
|
||||
> **Notes:**
|
||||
> * Run: `mokutil --sb-state` to check whether secure boot is enabled, if so, you will need to sign the kernel.
|
||||
> * You'll lose SGX support when a new distro kernel is installed and the system rebooted.
|
||||
|
||||
Once you have restarted your system with the new brand Linux Kernel with SGX support, run
|
||||
the following command to make sure it's enabled. If the output is empty, go to the BIOS
|
||||
setup and enable SGX manually.
|
||||
|
||||
```sh
|
||||
$ grep -o sgx /proc/cpuinfo
|
||||
```
|
||||
|
||||
## Install Guest kernel with SGX support
|
||||
|
||||
Install the guest kernel in the Kata Containers directory, this way it can be used to run
|
||||
Kata Containers.
|
||||
|
||||
```sh
|
||||
$ curl -LOk https://github.com/devimc/kvm-sgx/releases/download/v0.0.1/kata-virtiofs-sgx.tar.gz
|
||||
$ sudo tar -xf kata-virtiofs-sgx.tar.gz -C /usr/share/kata-containers/
|
||||
$ sudo sed -i 's|kernel =|kernel = "/usr/share/kata-containers/vmlinux-virtiofs-sgx.container"|g' \
|
||||
/usr/share/defaults/kata-containers/configuration.toml
|
||||
```
|
||||
|
||||
## Run Kata Containers with SGX enabled
|
||||
|
||||
Before running a Kata Container make sure that your version of `crio` or `containerd`
|
||||
supports annotations.
|
||||
For `containerd` check in `/etc/containerd/config.toml` that the list of `pod_annotations` passed
|
||||
to the `sandbox` are: `["io.katacontainers.*", "sgx.intel.com/epc"]`.
|
||||
|
||||
> `sgx.yaml`
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: sgx
|
||||
annotations:
|
||||
sgx.intel.com/epc: "32Mi"
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 0
|
||||
runtimeClassName: kata
|
||||
containers:
|
||||
- name: c1
|
||||
image: busybox
|
||||
command:
|
||||
- sh
|
||||
stdin: true
|
||||
tty: true
|
||||
volumeMounts:
|
||||
- mountPath: /dev/sgx/
|
||||
name: test-volume
|
||||
volumes:
|
||||
- name: test-volume
|
||||
hostPath:
|
||||
path: /dev/sgx/
|
||||
type: Directory
|
||||
```
|
||||
|
||||
```sh
|
||||
$ kubectl apply -f sgx.yaml
|
||||
$ kubectl exec -ti sgx ls /dev/sgx/
|
||||
enclave provision
|
||||
```
|
||||
|
||||
The output of the latest command shouldn't be empty, otherwise check
|
||||
your system environment to make sure SGX is fully supported.
|
||||
|
||||
[1]: github.com/cloud-hypervisor/cloud-hypervisor/
|
||||
@@ -1,6 +1,13 @@
|
||||
# Setup to run SPDK vhost-user devices with Kata Containers and Docker*
|
||||
|
||||
> **Note:** This guide only applies to QEMU, since the vhost-user storage
|
||||
- [SPDK vhost-user target overview](#spdk-vhost-user-target-overview)
|
||||
- [Install and setup SPDK vhost-user target](#install-and-setup-spdk-vhost-user-target)
|
||||
- [Get source code and build SPDK](#get-source-code-and-build-spdk)
|
||||
- [Run SPDK vhost-user target](#run-spdk-vhost-user-target)
|
||||
- [Host setup for vhost-user devices](#host-setup-for-vhost-user-devices)
|
||||
- [Launch a Kata container with SPDK vhost-user block device](#launch-a-kata-container-with-spdk-vhost-user-block-device)
|
||||
|
||||
> **NOTE:** This guide only applies to QEMU, since the vhost-user storage
|
||||
> device is only available for QEMU now. The enablement work on other
|
||||
> hypervisors is still ongoing.
|
||||
|
||||
|
||||
@@ -1,5 +1,13 @@
|
||||
# Setup to use SR-IOV with Kata Containers and Docker*
|
||||
|
||||
- [Install the SR-IOV Docker\* plugin](#install-the-sr-iov-docker-plugin)
|
||||
- [Host setup for SR-IOV](#host-setup-for-sr-iov)
|
||||
- [Checking your NIC for SR-IOV](#checking-your-nic-for-sr-iov)
|
||||
- [IOMMU Groups and PCIe Access Control Services](#iommu-groups-and-pcie-access-control-services)
|
||||
- [Update the host kernel](#update-the-host-kernel)
|
||||
- [Set up the SR-IOV Device](#set-up-the-sr-iov-device)
|
||||
- [Example: Launch a Kata Containers container using SR-IOV](#example-launch-a-kata-containers-container-using-sr-iov)
|
||||
|
||||
Single Root I/O Virtualization (SR-IOV) enables splitting a physical device into
|
||||
virtual functions (VFs). Virtual functions enable direct passthrough to virtual
|
||||
machines or containers. For Kata Containers, we enabled a Container Network
|
||||
|
||||
@@ -12,7 +12,7 @@ For more information about VPP visit their [wiki](https://wiki.fd.io/view/VPP).
|
||||
|
||||
## Install and configure Kata Containers
|
||||
|
||||
Follow the [Kata Containers setup instructions](../Developer-Guide.md).
|
||||
Follow the [Kata Containers setup instructions](https://github.com/kata-containers/documentation/wiki/Developer-Guide).
|
||||
|
||||
In order to make use of VHOST-USER based interfaces, the container needs to be backed
|
||||
by huge pages. `HugePages` support is required for the large memory pool allocation used for
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
# OpenStack Zun DevStack working with Kata Containers
|
||||
|
||||
## Introduction
|
||||
|
||||
This guide describes how to get Kata Containers to work with OpenStack Zun
|
||||
|
||||
@@ -21,12 +21,7 @@ const LOG_LEVELS: &[(&str, slog::Level)] = &[
|
||||
];
|
||||
|
||||
// XXX: 'writer' param used to make testing possible.
|
||||
pub fn create_logger<W>(
|
||||
name: &str,
|
||||
source: &str,
|
||||
level: slog::Level,
|
||||
writer: W,
|
||||
) -> (slog::Logger, slog_async::AsyncGuard)
|
||||
pub fn create_logger<W>(name: &str, source: &str, level: slog::Level, writer: W) -> slog::Logger
|
||||
where
|
||||
W: Write + Send + Sync + 'static,
|
||||
{
|
||||
@@ -42,21 +37,17 @@ where
|
||||
let filter_drain = RuntimeLevelFilter::new(unique_drain, level).fuse();
|
||||
|
||||
// Ensure the logger is thread-safe
|
||||
let (async_drain, guard) = slog_async::Async::new(filter_drain)
|
||||
.thread_name("slog-async-logger".into())
|
||||
.build_with_guard();
|
||||
let async_drain = slog_async::Async::new(filter_drain).build().fuse();
|
||||
|
||||
// Add some "standard" fields
|
||||
let logger = slog::Logger::root(
|
||||
slog::Logger::root(
|
||||
async_drain.fuse(),
|
||||
o!("version" => env!("CARGO_PKG_VERSION"),
|
||||
"subsystem" => "root",
|
||||
"pid" => process::id().to_string(),
|
||||
"name" => name.to_string(),
|
||||
"source" => source.to_string()),
|
||||
);
|
||||
|
||||
(logger, guard)
|
||||
)
|
||||
}
|
||||
|
||||
pub fn get_log_levels() -> Vec<&'static str> {
|
||||
@@ -102,7 +93,9 @@ impl HashSerializer {
|
||||
// Take care to only add the first instance of a key. This matters for loggers (but not
|
||||
// Records) since a child loggers have parents and the loggers are serialised child first
|
||||
// meaning the *newest* fields are serialised first.
|
||||
self.fields.entry(key).or_insert(value);
|
||||
if !self.fields.contains_key(&key) {
|
||||
self.fields.insert(key, value);
|
||||
}
|
||||
}
|
||||
|
||||
fn remove_field(&mut self, key: &str) {
|
||||
|
||||
@@ -1,5 +1,13 @@
|
||||
# Kata Containers snap image
|
||||
|
||||
* [Initial setup](#initial-setup)
|
||||
* [Install snap](#install-snap)
|
||||
* [Build and install snap image](#build-and-install-snap-image)
|
||||
* [Configure Kata Containers](#configure-kata-containers)
|
||||
* [Integration with docker and Kubernetes](#integration-with-docker-and-kubernetes)
|
||||
* [Remove snap](#remove-snap)
|
||||
* [Limitations](#limitations)
|
||||
|
||||
This directory contains the resources needed to build the Kata Containers
|
||||
[snap][1] image.
|
||||
|
||||
|
||||
@@ -80,8 +80,6 @@ parts:
|
||||
- uidmap
|
||||
- gnupg2
|
||||
override-build: |
|
||||
[ "$(uname -m)" = "ppc64le" ] || [ "$(uname -m)" = "s390x" ] && sudo apt-get --no-install-recommends install -y protobuf-compiler
|
||||
|
||||
yq=${SNAPCRAFT_STAGE}/yq
|
||||
|
||||
# set GOPATH
|
||||
@@ -90,7 +88,6 @@ parts:
|
||||
|
||||
export GOROOT=${SNAPCRAFT_STAGE}
|
||||
export PATH="${GOROOT}/bin:${PATH}"
|
||||
export GO111MODULE="auto"
|
||||
|
||||
http_proxy=${http_proxy:-""}
|
||||
https_proxy=${https_proxy:-""}
|
||||
@@ -115,17 +112,14 @@ parts:
|
||||
cd ${kata_dir}/tools/osbuilder
|
||||
|
||||
# build image
|
||||
export AGENT_VERSION=$(cat ${kata_dir}/VERSION)
|
||||
export AGENT_INIT=yes
|
||||
export USE_DOCKER=1
|
||||
export DEBUG=1
|
||||
case "$(uname -m)" in
|
||||
aarch64)
|
||||
aarch64|ppc64le|s390x)
|
||||
sudo -E PATH=$PATH make initrd DISTRO=alpine
|
||||
;;
|
||||
ppc64le|s390x)
|
||||
# Cannot use alpine on ppc64le/s390x because it would require a musl agent
|
||||
sudo -E PATH=$PATH make initrd DISTRO=ubuntu
|
||||
;;
|
||||
x86_64)
|
||||
# In some build systems it's impossible to build a rootfs image, try with the initrd image
|
||||
sudo -E PATH=$PATH make image DISTRO=clearlinux || sudo -E PATH=$PATH make initrd DISTRO=alpine
|
||||
@@ -147,7 +141,6 @@ parts:
|
||||
export GOPATH=${SNAPCRAFT_STAGE}/gopath
|
||||
export GOROOT=${SNAPCRAFT_STAGE}
|
||||
export PATH="${GOROOT}/bin:${PATH}"
|
||||
export GO111MODULE="auto"
|
||||
kata_dir=${GOPATH}/src/github.com/${SNAPCRAFT_PROJECT_NAME}/${SNAPCRAFT_PROJECT_NAME}
|
||||
|
||||
cd ${kata_dir}/src/runtime
|
||||
@@ -169,9 +162,12 @@ parts:
|
||||
SKIP_GO_VERSION_CHECK=1 \
|
||||
QEMUCMD=qemu-system-$arch
|
||||
|
||||
if [ ! -f ${SNAPCRAFT_PART_INSTALL}/../../image/install/usr/share/kata-containers/kata-containers.img ]; then
|
||||
sed -i -e "s|^image =.*|initrd = \"/snap/${SNAPCRAFT_PROJECT_NAME}/current/usr/share/kata-containers/kata-containers-initrd.img\"|" \
|
||||
${SNAPCRAFT_PART_INSTALL}/usr/share/defaults/${SNAPCRAFT_PROJECT_NAME}/configuration.toml
|
||||
if [ -e ${SNAPCRAFT_PART_INSTALL}/../../image/install/usr/share/kata-containers/kata-containers.img ]; then
|
||||
# Use rootfs image by default
|
||||
sed -i -e '/^initrd =/d' ${SNAPCRAFT_PART_INSTALL}/usr/share/defaults/${SNAPCRAFT_PROJECT_NAME}/configuration.toml
|
||||
else
|
||||
# Use initrd by default
|
||||
sed -i -e '/^image =/d' ${SNAPCRAFT_PART_INSTALL}/usr/share/defaults/${SNAPCRAFT_PROJECT_NAME}/configuration.toml
|
||||
fi
|
||||
|
||||
kernel:
|
||||
@@ -184,29 +180,19 @@ parts:
|
||||
- bison
|
||||
- flex
|
||||
override-build: |
|
||||
yq=${SNAPCRAFT_STAGE}/yq
|
||||
export GOPATH=${SNAPCRAFT_STAGE}/gopath
|
||||
kata_dir=${GOPATH}/src/github.com/${SNAPCRAFT_PROJECT_NAME}/${SNAPCRAFT_PROJECT_NAME}
|
||||
versions_file="${kata_dir}/versions.yaml"
|
||||
kernel_version="$(${yq} r $versions_file assets.kernel.version)"
|
||||
#Remove extra 'v'
|
||||
kernel_version=${kernel_version#v}
|
||||
|
||||
[ "$(uname -m)" = "s390x" ] && sudo apt-get --no-install-recommends install -y libssl-dev
|
||||
|
||||
export GOPATH=${SNAPCRAFT_STAGE}/gopath
|
||||
export GO111MODULE="auto"
|
||||
kata_dir=${GOPATH}/src/github.com/${SNAPCRAFT_PROJECT_NAME}/${SNAPCRAFT_PROJECT_NAME}
|
||||
|
||||
cd ${kata_dir}/tools/packaging/kernel
|
||||
|
||||
# Setup and build kernel
|
||||
./build-kernel.sh -v ${kernel_version} -d setup
|
||||
./build-kernel.sh -d setup
|
||||
kernel_dir_prefix="kata-linux-"
|
||||
cd ${kernel_dir_prefix}*
|
||||
version=$(basename ${PWD} | sed 's|'"${kernel_dir_prefix}"'||' | cut -d- -f1)
|
||||
make -j $(($(nproc)-1)) EXTRAVERSION=".container"
|
||||
|
||||
kernel_suffix=${kernel_version}.container
|
||||
kernel_suffix=${version}.container
|
||||
kata_kernel_dir=${SNAPCRAFT_PART_INSTALL}/usr/share/kata-containers
|
||||
mkdir -p ${kata_kernel_dir}
|
||||
|
||||
@@ -216,10 +202,8 @@ parts:
|
||||
ln -sf ${vmlinuz_name} ${kata_kernel_dir}/vmlinuz.container
|
||||
|
||||
# Install raw kernel
|
||||
vmlinux_path=vmlinux
|
||||
[ "$(uname -m)" = "s390x" ] && vmlinux_path=arch/s390/boot/compressed/vmlinux
|
||||
vmlinux_name=vmlinux-${kernel_suffix}
|
||||
cp ${vmlinux_path} ${kata_kernel_dir}/${vmlinux_name}
|
||||
cp vmlinux ${kata_kernel_dir}/${vmlinux_name}
|
||||
ln -sf ${vmlinux_name} ${kata_kernel_dir}/vmlinux.container
|
||||
|
||||
qemu:
|
||||
@@ -243,38 +227,32 @@ parts:
|
||||
- libblkid-dev
|
||||
- libffi-dev
|
||||
- libmount-dev
|
||||
- libseccomp-dev
|
||||
- libselinux1-dev
|
||||
- ninja-build
|
||||
override-build: |
|
||||
yq=${SNAPCRAFT_STAGE}/yq
|
||||
export GOPATH=${SNAPCRAFT_STAGE}/gopath
|
||||
export GO111MODULE="auto"
|
||||
kata_dir=${GOPATH}/src/github.com/${SNAPCRAFT_PROJECT_NAME}/${SNAPCRAFT_PROJECT_NAME}
|
||||
|
||||
versions_file="${kata_dir}/versions.yaml"
|
||||
# arch-specific definition
|
||||
case "$(uname -m)" in
|
||||
"aarch64")
|
||||
branch="$(${yq} r ${versions_file} assets.hypervisor.qemu.architecture.aarch64.version)"
|
||||
branch="$(${yq} r ${versions_file} assets.hypervisor.qemu.architecture.aarch64.branch)"
|
||||
url="$(${yq} r ${versions_file} assets.hypervisor.qemu.url)"
|
||||
commit="$(${yq} r ${versions_file} assets.hypervisor.qemu.architecture.aarch64.commit)"
|
||||
patches_dir="${kata_dir}/tools/packaging/qemu/patches/$(echo ${branch} | sed -e 's/.[[:digit:]]*$//' -e 's/^v//').x"
|
||||
patches_version_dir="${kata_dir}/tools/packaging/qemu/patches/tag_patches/${branch}"
|
||||
patches_dir="${kata_dir}/tools/packaging/obs-packaging/qemu-aarch64/patches/"
|
||||
;;
|
||||
|
||||
*)
|
||||
branch="$(${yq} r ${versions_file} assets.hypervisor.qemu.version)"
|
||||
branch="$(${yq} r ${versions_file} assets.hypervisor.qemu.tag)"
|
||||
url="$(${yq} r ${versions_file} assets.hypervisor.qemu.url)"
|
||||
commit=""
|
||||
patches_dir="${kata_dir}/tools/packaging/qemu/patches/$(echo ${branch} | sed -e 's/.[[:digit:]]*$//' -e 's/^v//').x"
|
||||
patches_version_dir="${kata_dir}/tools/packaging/qemu/patches/tag_patches/${branch}"
|
||||
;;
|
||||
esac
|
||||
|
||||
# download source
|
||||
qemu_dir=${SNAPCRAFT_STAGE}/qemu
|
||||
rm -rf "${qemu_dir}"
|
||||
git clone --branch ${branch} --single-branch ${url} "${qemu_dir}"
|
||||
cd ${qemu_dir}
|
||||
[ -z "${commit}" ] || git checkout ${commit}
|
||||
@@ -282,32 +260,31 @@ parts:
|
||||
[ -n "$(ls -A ui/keycodemapdb)" ] || git clone https://github.com/qemu/keycodemapdb ui/keycodemapdb/
|
||||
[ -n "$(ls -A capstone)" ] || git clone https://github.com/qemu/capstone capstone
|
||||
|
||||
# Apply branch patches
|
||||
[ -d "${patches_version_dir}" ] || mkdir "${patches_version_dir}"
|
||||
${kata_dir}/tools/packaging/scripts/apply_patches.sh "${patches_dir}"
|
||||
${kata_dir}/tools/packaging/scripts/apply_patches.sh "${patches_version_dir}"
|
||||
# Apply patches
|
||||
for patch in ${patches_dir}/*.patch; do
|
||||
echo "Applying $(basename "$patch") ..."
|
||||
patch \
|
||||
--batch \
|
||||
--forward \
|
||||
--strip 1 \
|
||||
--input "$patch"
|
||||
done
|
||||
|
||||
# Only x86_64 supports libpmem
|
||||
[ "$(uname -m)" = "x86_64" ] && sudo apt-get --no-install-recommends install -y apt-utils ca-certificates libpmem-dev
|
||||
[ "$(uname -m)" = "x86_64" ] && sudo apt-get --no-install-recommends install -y apt-utils ca-certificates libpmem-dev libseccomp-dev
|
||||
|
||||
configure_hypervisor=${kata_dir}/tools/packaging/scripts/configure-hypervisor.sh
|
||||
chmod +x ${configure_hypervisor}
|
||||
# static build. The --prefix, --libdir, --libexecdir, --datadir arguments are
|
||||
# based on PREFIX and set by configure-hypervisor.sh
|
||||
echo "$(PREFIX=/snap/${SNAPCRAFT_PROJECT_NAME}/current/usr ${configure_hypervisor} -s kata-qemu) \
|
||||
--disable-rbd " \
|
||||
# static build
|
||||
echo "$(${configure_hypervisor} -s qemu) \
|
||||
--disable-rbd
|
||||
--prefix=/snap/${SNAPCRAFT_PROJECT_NAME}/current/usr \
|
||||
--datadir=/snap/${SNAPCRAFT_PROJECT_NAME}/current/usr/share \
|
||||
--libexecdir=/snap/${SNAPCRAFT_PROJECT_NAME}/current/usr/libexec/qemu" \
|
||||
| xargs ./configure
|
||||
|
||||
# Copy QEMU configurations (Kconfigs)
|
||||
case "${branch}" in
|
||||
"v5.1.0")
|
||||
cp -a ${kata_dir}/tools/packaging/qemu/default-configs/* default-configs
|
||||
;;
|
||||
|
||||
*)
|
||||
cp -a ${kata_dir}/tools/packaging/qemu/default-configs/* default-configs/devices/
|
||||
;;
|
||||
esac
|
||||
cp -a ${kata_dir}/tools/packaging/qemu/default-configs/* default-configs/
|
||||
|
||||
# build and install
|
||||
make -j $(($(nproc)-1))
|
||||
@@ -318,6 +295,7 @@ parts:
|
||||
- -usr/bin/qemu-pr-helper
|
||||
- -usr/bin/virtfs-proxy-helper
|
||||
- -usr/include/
|
||||
- -usr/libexec/
|
||||
- -usr/share/applications/
|
||||
- -usr/share/icons/
|
||||
- -usr/var/
|
||||
@@ -329,8 +307,4 @@ parts:
|
||||
|
||||
apps:
|
||||
runtime:
|
||||
command: usr/bin/kata-runtime
|
||||
shim:
|
||||
command: usr/bin/containerd-shim-kata-v2
|
||||
collect-data:
|
||||
command: usr/bin/kata-collect-data.sh
|
||||
|
||||
2
src/agent/.gitignore
vendored
2
src/agent/.gitignore
vendored
@@ -1,2 +0,0 @@
|
||||
tarpaulin-report.html
|
||||
vendor/
|
||||
1237
src/agent/Cargo.lock
generated
1237
src/agent/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -9,64 +9,35 @@ oci = { path = "oci" }
|
||||
logging = { path = "../../pkg/logging" }
|
||||
rustjail = { path = "rustjail" }
|
||||
protocols = { path = "protocols" }
|
||||
netlink = { path = "netlink", features = ["with-log", "with-agent-handler"] }
|
||||
lazy_static = "1.3.0"
|
||||
ttrpc = { version = "0.5.0", features = ["async", "protobuf-codec"], default-features = false }
|
||||
ttrpc = "0.3.0"
|
||||
protobuf = "=2.14.0"
|
||||
libc = "0.2.58"
|
||||
nix = "0.21.0"
|
||||
capctl = "0.2.0"
|
||||
nix = "0.17.0"
|
||||
prctl = "1.0.0"
|
||||
serde_json = "1.0.39"
|
||||
signal-hook = "0.1.9"
|
||||
scan_fmt = "0.2.3"
|
||||
scopeguard = "1.0.0"
|
||||
thiserror = "1.0.26"
|
||||
regex = "1"
|
||||
|
||||
# Async helpers
|
||||
async-trait = "0.1.42"
|
||||
async-recursion = "0.3.2"
|
||||
futures = "0.3.12"
|
||||
|
||||
# Async runtime
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
tokio-vsock = "0.3.1"
|
||||
|
||||
netlink-sys = { version = "0.7.0", features = ["tokio_socket",]}
|
||||
rtnetlink = "0.8.0"
|
||||
netlink-packet-utils = "0.4.1"
|
||||
ipnetwork = "0.17.0"
|
||||
|
||||
# slog:
|
||||
# - Dynamic keys required to allow HashMap keys to be slog::Serialized.
|
||||
# - The 'max_*' features allow changing the log level at runtime
|
||||
# (by stopping the compiler from removing log calls).
|
||||
slog = { version = "2.5.2", features = ["dynamic-keys", "max_level_trace", "release_max_level_info"] }
|
||||
slog-scope = "4.1.2"
|
||||
|
||||
# Redirect ttrpc log calls
|
||||
slog-stdlog = "4.0.0"
|
||||
log = "0.4.11"
|
||||
|
||||
# for testing
|
||||
tempfile = "3.1.0"
|
||||
prometheus = { version = "0.9.0", features = ["process"] }
|
||||
procfs = "0.7.9"
|
||||
anyhow = "1.0.32"
|
||||
cgroups = { package = "cgroups-rs", version = "0.2.5" }
|
||||
|
||||
# Tracing
|
||||
tracing = "0.1.26"
|
||||
tracing-subscriber = "0.2.18"
|
||||
tracing-opentelemetry = "0.13.0"
|
||||
opentelemetry = { version = "0.14.0", features = ["rt-tokio-current-thread"]}
|
||||
vsock-exporter = { path = "vsock-exporter" }
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = "3.1.0"
|
||||
cgroups = { git = "https://github.com/kata-containers/cgroups-rs", branch = "stable-0.1.1"}
|
||||
|
||||
[workspace]
|
||||
members = [
|
||||
"netlink",
|
||||
"oci",
|
||||
"protocols",
|
||||
"rustjail",
|
||||
]
|
||||
|
||||
[profile.release]
|
||||
lto = true
|
||||
|
||||
@@ -3,11 +3,6 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
# To show variables or targets help on `make help`
|
||||
# Use the following format:
|
||||
# '##VAR VARIABLE_NAME: help about variable'
|
||||
# '##TARGET TARGET_NAME: help about target'
|
||||
|
||||
PROJECT_NAME = Kata Containers
|
||||
PROJECT_URL = https://github.com/kata-containers
|
||||
PROJECT_COMPONENT = kata-agent
|
||||
@@ -21,22 +16,51 @@ SOURCES := \
|
||||
VERSION_FILE := ./VERSION
|
||||
VERSION := $(shell grep -v ^\# $(VERSION_FILE))
|
||||
COMMIT_NO := $(shell git rev-parse HEAD 2>/dev/null || true)
|
||||
COMMIT_NO_SHORT := $(shell git rev-parse --short HEAD 2>/dev/null || true)
|
||||
COMMIT := $(if $(shell git status --porcelain --untracked-files=no 2>/dev/null || true),${COMMIT_NO}-dirty,${COMMIT_NO})
|
||||
COMMIT_MSG = $(if $(COMMIT),$(COMMIT),unknown)
|
||||
|
||||
# Exported to allow cargo to see it
|
||||
export VERSION_COMMIT := $(if $(COMMIT),$(VERSION)-$(COMMIT),$(VERSION))
|
||||
|
||||
include ../../utils.mk
|
||||
BUILD_TYPE = release
|
||||
|
||||
ARCH = $(shell uname -m)
|
||||
LIBC ?= musl
|
||||
ifneq ($(LIBC),musl)
|
||||
ifeq ($(LIBC),gnu)
|
||||
override LIBC = gnu
|
||||
else
|
||||
$(error "ERROR: A non supported LIBC value was passed. Supported values are musl and gnu")
|
||||
endif
|
||||
endif
|
||||
|
||||
ifeq ($(ARCH), ppc64le)
|
||||
override ARCH = powerpc64le
|
||||
override LIBC = gnu
|
||||
$(warning "WARNING: powerpc64le-unknown-linux-musl target is unavailable")
|
||||
endif
|
||||
|
||||
ifeq ($(ARCH), s390x)
|
||||
override LIBC = gnu
|
||||
$(warning "WARNING: s390x-unknown-linux-musl target is unavailable")
|
||||
endif
|
||||
|
||||
|
||||
EXTRA_RUSTFLAGS :=
|
||||
ifeq ($(ARCH), aarch64)
|
||||
override EXTRA_RUSTFLAGS = -C link-arg=-lgcc
|
||||
$(warning "WARNING: aarch64-musl needs extra symbols from libgcc")
|
||||
endif
|
||||
|
||||
TRIPLE = $(ARCH)-unknown-linux-$(LIBC)
|
||||
|
||||
TARGET_PATH = target/$(TRIPLE)/$(BUILD_TYPE)/$(TARGET)
|
||||
|
||||
##VAR DESTDIR=<path> is a directory prepended to each installed target file
|
||||
DESTDIR :=
|
||||
##VAR BINDIR=<path> is a directory for installing executable programs
|
||||
BINDIR := /usr/bin
|
||||
|
||||
##VAR INIT=yes|no define if agent will be installed as init
|
||||
# Define if agent will be installed as init
|
||||
INIT := no
|
||||
|
||||
# Path to systemd unit directory if installed as not init.
|
||||
@@ -84,7 +108,6 @@ define INSTALL_FILE
|
||||
install -D -m 644 $1 $(DESTDIR)$2/$1 || exit 1;
|
||||
endef
|
||||
|
||||
##TARGET default: build code
|
||||
default: $(TARGET) show-header
|
||||
|
||||
$(TARGET): $(GENERATED_CODE) $(TARGET_PATH)
|
||||
@@ -92,59 +115,36 @@ $(TARGET): $(GENERATED_CODE) $(TARGET_PATH)
|
||||
$(TARGET_PATH): $(SOURCES) | show-summary
|
||||
@RUSTFLAGS="$(EXTRA_RUSTFLAGS) --deny warnings" cargo build --target $(TRIPLE) --$(BUILD_TYPE)
|
||||
|
||||
$(GENERATED_FILES): %: %.in
|
||||
@sed $(foreach r,$(GENERATED_REPLACEMENTS),-e 's|@$r@|$($r)|g') "$<" > "$@"
|
||||
|
||||
##TARGET optimize: optimized build
|
||||
optimize: $(SOURCES) | show-summary show-header
|
||||
@RUSTFLAGS="-C link-arg=-s $(EXTRA_RUSTFLAGS) --deny-warnings" cargo build --target $(TRIPLE) --$(BUILD_TYPE)
|
||||
|
||||
show-header:
|
||||
@printf "%s - version %s (commit %s)\n\n" "$(TARGET)" "$(VERSION)" "$(COMMIT_MSG)"
|
||||
|
||||
##TARGET clippy: run clippy linter
|
||||
clippy: $(GENERATED_CODE)
|
||||
cargo clippy --all-targets --all-features --release \
|
||||
-- \
|
||||
-Aclippy::redundant_allocation \
|
||||
-D warnings
|
||||
$(GENERATED_FILES): %: %.in
|
||||
@sed $(foreach r,$(GENERATED_REPLACEMENTS),-e 's|@$r@|$($r)|g') "$<" > "$@"
|
||||
|
||||
format:
|
||||
cargo fmt -- --check
|
||||
|
||||
|
||||
##TARGET install: install agent
|
||||
install: install-services
|
||||
install: build-service
|
||||
@install -D $(TARGET_PATH) $(DESTDIR)/$(BINDIR)/$(TARGET)
|
||||
|
||||
##TARGET clean: clean build
|
||||
clean:
|
||||
@cargo clean
|
||||
@rm -f $(GENERATED_FILES)
|
||||
@rm -f tarpaulin-report.html
|
||||
|
||||
vendor:
|
||||
@cargo vendor
|
||||
|
||||
|
||||
#TARGET test: run cargo tests
|
||||
test:
|
||||
@cargo test --all --target $(TRIPLE)
|
||||
|
||||
##TARGET check: run test
|
||||
check: clippy format
|
||||
check: test
|
||||
|
||||
##TARGET run: build and run agent
|
||||
run:
|
||||
@cargo run --target $(TRIPLE)
|
||||
|
||||
install-services: $(GENERATED_FILES)
|
||||
build-service: $(GENERATED_FILES)
|
||||
ifeq ($(INIT),no)
|
||||
@echo "Installing systemd unit files..."
|
||||
$(foreach f,$(UNIT_FILES),$(call INSTALL_FILE,$f,$(UNIT_DIR)))
|
||||
endif
|
||||
|
||||
show-header:
|
||||
@printf "%s - version %s (commit %s)\n\n" "$(TARGET)" "$(VERSION)" "$(COMMIT_MSG)"
|
||||
|
||||
show-summary: show-header
|
||||
@printf "project:\n"
|
||||
@printf " name: $(PROJECT_NAME)\n"
|
||||
@@ -160,43 +160,13 @@ show-summary: show-header
|
||||
@printf " %s\n" "$(call get_toolchain_version)"
|
||||
@printf "\n"
|
||||
|
||||
## help: Show help comments that start with `##VAR` and `##TARGET`
|
||||
help: Makefile show-summary
|
||||
@echo "==========================Help============================="
|
||||
@echo "Variables:"
|
||||
@sed -n 's/^##VAR//p' $< | sort
|
||||
@echo ""
|
||||
@echo "Targets:"
|
||||
@sed -n 's/^##TARGET//p' $< | sort
|
||||
|
||||
TARPAULIN_ARGS:=-v --workspace
|
||||
install-tarpaulin:
|
||||
cargo install cargo-tarpaulin
|
||||
|
||||
# Check if cargo tarpaulin is installed
|
||||
HAS_TARPAULIN:= $(shell cargo --list | grep tarpaulin 2>/dev/null)
|
||||
check_tarpaulin:
|
||||
ifndef HAS_TARPAULIN
|
||||
$(error "tarpaulin is not available please: run make install-tarpaulin ")
|
||||
else
|
||||
$(info OK: tarpaulin installed)
|
||||
endif
|
||||
|
||||
##TARGET codecov: Generate code coverage report
|
||||
codecov: check_tarpaulin
|
||||
cargo tarpaulin $(TARPAULIN_ARGS)
|
||||
|
||||
##TARGET codecov-html: Generate code coverage html report
|
||||
codecov-html: check_tarpaulin
|
||||
cargo tarpaulin $(TARPAULIN_ARGS) -o Html
|
||||
help: show-summary
|
||||
|
||||
.PHONY: \
|
||||
help \
|
||||
show-header \
|
||||
show-summary \
|
||||
optimize \
|
||||
vendor
|
||||
optimize
|
||||
|
||||
##TARGET generate-protocols: generate/update grpc agent protocols
|
||||
generate-protocols:
|
||||
protocols/hack/update-generated-proto.sh all
|
||||
|
||||
@@ -39,27 +39,11 @@ After that, we drafted the initial code here, and any contributions are welcome.
|
||||
## Getting Started
|
||||
|
||||
### Build from Source
|
||||
The rust-agent needs to be built statically and linked with `musl`
|
||||
|
||||
> **Note:** skip this step for ppc64le, the build scripts explicitly use gnu for ppc64le.
|
||||
|
||||
The rust-agent need to be built with rust newer than 1.37, and static linked with `musl`.
|
||||
```bash
|
||||
$ arch=$(uname -m)
|
||||
$ rustup target add "${arch}-unknown-linux-musl"
|
||||
$ sudo ln -s /usr/bin/g++ /bin/musl-g++
|
||||
```
|
||||
|
||||
ppc64le-only: Manually install `protoc`, e.g.
|
||||
```bash
|
||||
$ sudo dnf install protobuf-compiler
|
||||
```
|
||||
|
||||
Download the source files in the Kata containers repository and build the agent:
|
||||
```bash
|
||||
$ GOPATH="${GOPATH:-$HOME/go}"
|
||||
$ dir="$GOPATH/src/github.com/kata-containers"
|
||||
$ git -C ${dir} clone --depth 1 https://github.com/kata-containers/kata-containers
|
||||
$ make -C ${dir}/kata-containers/src/agent
|
||||
rustup target add x86_64-unknown-linux-musl
|
||||
sudo ln -s /usr/bin/g++ /bin/musl-g++
|
||||
cargo build --target x86_64-unknown-linux-musl --release
|
||||
```
|
||||
|
||||
## Run Kata CI with rust-agent
|
||||
|
||||
@@ -15,10 +15,8 @@ Wants=kata-containers.target
|
||||
StandardOutput=tty
|
||||
Type=simple
|
||||
ExecStart=@BINDIR@/@AGENT_NAME@
|
||||
LimitNOFILE=1048576
|
||||
LimitNOFILE=infinity
|
||||
# ExecStop is required for static agent tracing; in all other scenarios
|
||||
# the runtime handles shutting down the VM.
|
||||
ExecStop=/bin/sync ; /usr/bin/systemctl --force poweroff
|
||||
FailureAction=poweroff
|
||||
# Discourage OOM-killer from touching the agent
|
||||
OOMScoreAdjust=-997
|
||||
|
||||
20
src/agent/netlink/Cargo.toml
Normal file
20
src/agent/netlink/Cargo.toml
Normal file
@@ -0,0 +1,20 @@
|
||||
[package]
|
||||
name = "netlink"
|
||||
version = "0.1.0"
|
||||
authors = ["The Kata Containers community <kata-dev@lists.katacontainers.io>"]
|
||||
edition = "2018"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
libc = "0.2.58"
|
||||
nix = "0.17.0"
|
||||
|
||||
protobuf = { version = "=2.14.0", optional = true }
|
||||
protocols = { path = "../protocols", optional = true }
|
||||
slog = { version = "2.5.2", features = ["dynamic-keys", "max_level_trace", "release_max_level_info"], optional = true }
|
||||
slog-scope = { version = "4.1.2", optional = true }
|
||||
|
||||
[features]
|
||||
with-log = ["slog", "slog-scope"]
|
||||
with-agent-handler = ["protobuf", "protocols"]
|
||||
572
src/agent/netlink/src/agent_handler.rs
Normal file
572
src/agent/netlink/src/agent_handler.rs
Normal file
@@ -0,0 +1,572 @@
|
||||
// Copyright (c) 2020 Ant Financial
|
||||
// Copyright (C) 2020 Alibaba Cloud. All rights reserved.
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
//! Dedicated Netlink interfaces for Kata agent protocol handler.
|
||||
|
||||
use std::convert::TryFrom;
|
||||
|
||||
use protobuf::RepeatedField;
|
||||
use protocols::types::{ARPNeighbor, IPAddress, IPFamily, Interface, Route};
|
||||
|
||||
use super::*;
|
||||
|
||||
#[cfg(feature = "with-log")]
|
||||
// Convenience macro to obtain the scope logger
|
||||
macro_rules! sl {
|
||||
() => {
|
||||
slog_scope::logger().new(o!("subsystem" => "netlink"))
|
||||
};
|
||||
}
|
||||
|
||||
impl super::RtnlHandle {
|
||||
pub fn update_interface(&mut self, iface: &Interface) -> Result<Interface> {
|
||||
// the reliable way to find link is using hardware address
|
||||
// as filter. However, hardware filter might not be supported
|
||||
// by netlink, we may have to dump link list and the find the
|
||||
// target link. filter using name or family is supported, but
|
||||
// we cannot use that to find target link.
|
||||
// let's try if hardware address filter works. -_-
|
||||
|
||||
let ifinfo = self.find_link_by_hwaddr(iface.hwAddr.as_str())?;
|
||||
|
||||
// bring down interface if it is up
|
||||
if ifinfo.ifi_flags & libc::IFF_UP as u32 != 0 {
|
||||
self.set_link_status(&ifinfo, false)?;
|
||||
}
|
||||
|
||||
// delete all addresses associated with the link
|
||||
let del_addrs: Vec<RtIPAddr> = self.get_link_addresses(&ifinfo)?;
|
||||
self.delete_all_addrs(&ifinfo, del_addrs.as_ref())?;
|
||||
|
||||
// add new ip addresses in request
|
||||
for grpc_addr in &iface.IPAddresses {
|
||||
let rtip = RtIPAddr::try_from(grpc_addr.clone())?;
|
||||
self.add_one_address(&ifinfo, &rtip)?;
|
||||
}
|
||||
|
||||
let mut v: Vec<u8> = vec![0; DEFAULT_NETLINK_BUF_SIZE];
|
||||
// Safe because we have allocated enough buffer space.
|
||||
let nlh = unsafe { &mut *(v.as_mut_ptr() as *mut nlmsghdr) };
|
||||
let ifi = unsafe { &mut *(NLMSG_DATA!(nlh) as *mut ifinfomsg) };
|
||||
|
||||
// set name, set mtu, IFF_NOARP. in one rtnl_talk.
|
||||
nlh.nlmsg_len = NLMSG_LENGTH!(mem::size_of::<ifinfomsg>() as u32) as __u32;
|
||||
nlh.nlmsg_type = RTM_NEWLINK;
|
||||
nlh.nlmsg_flags = NLM_F_REQUEST;
|
||||
self.assign_seqnum(nlh);
|
||||
|
||||
ifi.ifi_family = ifinfo.ifi_family;
|
||||
ifi.ifi_type = ifinfo.ifi_type;
|
||||
ifi.ifi_index = ifinfo.ifi_index;
|
||||
if iface.raw_flags & libc::IFF_NOARP as u32 != 0 {
|
||||
ifi.ifi_change |= libc::IFF_NOARP as u32;
|
||||
ifi.ifi_flags |= libc::IFF_NOARP as u32;
|
||||
}
|
||||
|
||||
// Safe because we have allocated enough buffer space.
|
||||
unsafe {
|
||||
nlh.addattr32(IFLA_MTU, iface.mtu as u32);
|
||||
|
||||
// if str is null terminated, use addattr_var.
|
||||
// otherwise, use addattr_str
|
||||
nlh.addattr_var(IFLA_IFNAME, iface.name.as_ref());
|
||||
}
|
||||
|
||||
self.rtnl_talk(v.as_mut_slice(), false)?;
|
||||
|
||||
// TODO: why the result is ignored here?
|
||||
let _ = self.set_link_status(&ifinfo, true);
|
||||
|
||||
Ok(iface.clone())
|
||||
}
|
||||
|
||||
/// Delete this interface/link per request
|
||||
pub fn remove_interface(&mut self, iface: &Interface) -> Result<Interface> {
|
||||
let ifinfo = self.find_link_by_hwaddr(iface.hwAddr.as_str())?;
|
||||
|
||||
self.set_link_status(&ifinfo, false)?;
|
||||
|
||||
let mut v: Vec<u8> = vec![0; DEFAULT_NETLINK_BUF_SIZE];
|
||||
// Safe because we have allocated enough buffer space.
|
||||
let nlh = unsafe { &mut *(v.as_mut_ptr() as *mut nlmsghdr) };
|
||||
let ifi = unsafe { &mut *(NLMSG_DATA!(nlh) as *mut ifinfomsg) };
|
||||
|
||||
// No attributes needed?
|
||||
nlh.nlmsg_len = NLMSG_LENGTH!(mem::size_of::<ifinfomsg>()) as __u32;
|
||||
nlh.nlmsg_type = RTM_DELLINK;
|
||||
nlh.nlmsg_flags = NLM_F_REQUEST;
|
||||
self.assign_seqnum(nlh);
|
||||
|
||||
ifi.ifi_family = ifinfo.ifi_family;
|
||||
ifi.ifi_index = ifinfo.ifi_index;
|
||||
ifi.ifi_type = ifinfo.ifi_type;
|
||||
|
||||
self.rtnl_talk(v.as_mut_slice(), false)?;
|
||||
|
||||
Ok(iface.clone())
|
||||
}
|
||||
|
||||
pub fn list_interfaces(&mut self) -> Result<Vec<Interface>> {
|
||||
let mut ifaces: Vec<Interface> = Vec::new();
|
||||
let (_slv, lv) = self.dump_all_links()?;
|
||||
let (_sav, av) = self.dump_all_addresses(0)?;
|
||||
|
||||
for link in &lv {
|
||||
// Safe because dump_all_links() returns valid pointers.
|
||||
let nlh = unsafe { &**link };
|
||||
if nlh.nlmsg_type != RTM_NEWLINK && nlh.nlmsg_type != RTM_DELLINK {
|
||||
continue;
|
||||
}
|
||||
|
||||
if nlh.nlmsg_len < NLMSG_SPACE!(mem::size_of::<ifinfomsg>()) {
|
||||
info!(
|
||||
sl!(),
|
||||
"invalid nlmsg! nlmsg_len: {}, nlmsg_space: {}",
|
||||
nlh.nlmsg_len,
|
||||
NLMSG_SPACE!(mem::size_of::<ifinfomsg>())
|
||||
);
|
||||
break;
|
||||
}
|
||||
|
||||
// Safe because we have just validated available buffer space above.
|
||||
let ifi = unsafe { &*(NLMSG_DATA!(nlh) as *const ifinfomsg) };
|
||||
let rta: *mut rtattr = IFLA_RTA!(ifi as *const ifinfomsg) as *mut rtattr;
|
||||
let rtalen = IFLA_PAYLOAD!(nlh) as u32;
|
||||
let attrs = unsafe { parse_attrs(rta, rtalen, (IFLA_MAX + 1) as usize)? };
|
||||
|
||||
// fill out some fields of Interface,
|
||||
let mut iface: Interface = Interface::default();
|
||||
|
||||
// Safe because parse_attrs() returns valid pointers.
|
||||
unsafe {
|
||||
if !attrs[IFLA_IFNAME as usize].is_null() {
|
||||
let t = attrs[IFLA_IFNAME as usize];
|
||||
iface.name = String::from_utf8(getattr_var(t as *const rtattr))?;
|
||||
}
|
||||
|
||||
if !attrs[IFLA_MTU as usize].is_null() {
|
||||
let t = attrs[IFLA_MTU as usize];
|
||||
iface.mtu = getattr32(t) as u64;
|
||||
}
|
||||
|
||||
if !attrs[IFLA_ADDRESS as usize].is_null() {
|
||||
let alen = RTA_PAYLOAD!(attrs[IFLA_ADDRESS as usize]);
|
||||
let a: *const u8 = RTA_DATA!(attrs[IFLA_ADDRESS as usize]) as *const u8;
|
||||
iface.hwAddr = parser::format_address(a, alen as u32)?;
|
||||
}
|
||||
}
|
||||
|
||||
// get ip address info from av
|
||||
let mut ads: Vec<IPAddress> = Vec::new();
|
||||
for address in &av {
|
||||
// Safe because dump_all_addresses() returns valid pointers.
|
||||
let alh = unsafe { &**address };
|
||||
if alh.nlmsg_type != RTM_NEWADDR {
|
||||
continue;
|
||||
}
|
||||
|
||||
let tlen = NLMSG_SPACE!(mem::size_of::<ifaddrmsg>());
|
||||
if alh.nlmsg_len < tlen {
|
||||
info!(
|
||||
sl!(),
|
||||
"invalid nlmsg! nlmsg_len: {}, nlmsg_space: {}", alh.nlmsg_len, tlen
|
||||
);
|
||||
break;
|
||||
}
|
||||
|
||||
// Safe becahse we have checked avialable buffer space by NLMSG_SPACE above.
|
||||
let ifa = unsafe { &*(NLMSG_DATA!(alh) as *const ifaddrmsg) };
|
||||
let arta: *mut rtattr = IFA_RTA!(ifa) as *mut rtattr;
|
||||
let artalen = IFA_PAYLOAD!(alh) as u32;
|
||||
|
||||
if ifa.ifa_index as u32 == ifi.ifi_index as u32 {
|
||||
// found target addresses, parse attributes and fill out Interface
|
||||
let addrs = unsafe { parse_attrs(arta, artalen, (IFA_MAX + 1) as usize)? };
|
||||
|
||||
// fill address field of Interface
|
||||
let mut one: IPAddress = IPAddress::default();
|
||||
let tattr: *const rtattr = if !addrs[IFA_ADDRESS as usize].is_null() {
|
||||
addrs[IFA_ADDRESS as usize]
|
||||
} else {
|
||||
addrs[IFA_LOCAL as usize]
|
||||
};
|
||||
|
||||
one.mask = format!("{}", ifa.ifa_prefixlen);
|
||||
one.family = IPFamily::v4;
|
||||
if ifa.ifa_family == libc::AF_INET6 as u8 {
|
||||
one.family = IPFamily::v6;
|
||||
}
|
||||
|
||||
// Safe because parse_attrs() returns valid pointers.
|
||||
unsafe {
|
||||
let a: *const u8 = RTA_DATA!(tattr) as *const u8;
|
||||
let alen = RTA_PAYLOAD!(tattr);
|
||||
one.address = parser::format_address(a, alen as u32)?;
|
||||
}
|
||||
|
||||
ads.push(one);
|
||||
}
|
||||
}
|
||||
|
||||
iface.IPAddresses = RepeatedField::from_vec(ads);
|
||||
ifaces.push(iface);
|
||||
}
|
||||
|
||||
Ok(ifaces)
|
||||
}
|
||||
|
||||
pub fn update_routes(&mut self, rt: &[Route]) -> Result<Vec<Route>> {
|
||||
let rs = self.get_all_routes()?;
|
||||
self.delete_all_routes(&rs)?;
|
||||
|
||||
for grpcroute in rt {
|
||||
if grpcroute.gateway.as_str() == "" {
|
||||
let r = RtRoute::try_from(grpcroute.clone())?;
|
||||
if r.index == -1 {
|
||||
continue;
|
||||
}
|
||||
self.add_one_route(&r)?;
|
||||
}
|
||||
}
|
||||
|
||||
for grpcroute in rt {
|
||||
if grpcroute.gateway.as_str() != "" {
|
||||
let r = RtRoute::try_from(grpcroute.clone())?;
|
||||
if r.index == -1 {
|
||||
continue;
|
||||
}
|
||||
self.add_one_route(&r)?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(rt.to_owned())
|
||||
}
|
||||
|
||||
pub fn list_routes(&mut self) -> Result<Vec<Route>> {
|
||||
// currently, only dump routes from main table for ipv4
|
||||
// ie, rtmsg.rtmsg_family = AF_INET, set RT_TABLE_MAIN
|
||||
// attribute in dump request
|
||||
// Fix Me: think about othe tables, ipv6..
|
||||
let mut rs: Vec<Route> = Vec::new();
|
||||
let (_srv, rv) = self.dump_all_routes()?;
|
||||
|
||||
// parse out routes and store in rs
|
||||
for r in &rv {
|
||||
// Safe because dump_all_routes() returns valid pointers.
|
||||
let nlh = unsafe { &**r };
|
||||
if nlh.nlmsg_type != RTM_NEWROUTE && nlh.nlmsg_type != RTM_DELROUTE {
|
||||
info!(sl!(), "not route message!");
|
||||
continue;
|
||||
}
|
||||
let tlen = NLMSG_SPACE!(mem::size_of::<rtmsg>());
|
||||
if nlh.nlmsg_len < tlen {
|
||||
info!(
|
||||
sl!(),
|
||||
"invalid nlmsg! nlmsg_len: {}, nlmsg_spae: {}", nlh.nlmsg_len, tlen
|
||||
);
|
||||
break;
|
||||
}
|
||||
|
||||
// Safe because we have just validated available buffer space above.
|
||||
let rtm = unsafe { &mut *(NLMSG_DATA!(nlh) as *mut rtmsg) };
|
||||
if rtm.rtm_table != RT_TABLE_MAIN as u8 {
|
||||
continue;
|
||||
}
|
||||
let rta: *mut rtattr = RTM_RTA!(rtm) as *mut rtattr;
|
||||
let rtalen = RTM_PAYLOAD!(nlh) as u32;
|
||||
let attrs = unsafe { parse_attrs(rta, rtalen, (RTA_MAX + 1) as usize)? };
|
||||
|
||||
let t = attrs[RTA_TABLE as usize];
|
||||
if !t.is_null() {
|
||||
// Safe because parse_attrs() returns valid pointers
|
||||
let table = unsafe { getattr32(t) };
|
||||
if table != RT_TABLE_MAIN {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// find source, destination, gateway, scope, and and device name
|
||||
let mut t = attrs[RTA_DST as usize];
|
||||
let mut rte: Route = Route::default();
|
||||
|
||||
// Safe because parse_attrs() returns valid pointers
|
||||
unsafe {
|
||||
// destination
|
||||
if !t.is_null() {
|
||||
let data: *const u8 = RTA_DATA!(t) as *const u8;
|
||||
let len = RTA_PAYLOAD!(t) as u32;
|
||||
rte.dest =
|
||||
format!("{}/{}", parser::format_address(data, len)?, rtm.rtm_dst_len);
|
||||
}
|
||||
|
||||
// gateway
|
||||
t = attrs[RTA_GATEWAY as usize];
|
||||
if !t.is_null() {
|
||||
let data: *const u8 = RTA_DATA!(t) as *const u8;
|
||||
let len = RTA_PAYLOAD!(t) as u32;
|
||||
rte.gateway = parser::format_address(data, len)?;
|
||||
|
||||
// for gateway, destination is 0.0.0.0
|
||||
rte.dest = "0.0.0.0".to_string();
|
||||
}
|
||||
|
||||
// source
|
||||
t = attrs[RTA_SRC as usize];
|
||||
if t.is_null() {
|
||||
t = attrs[RTA_PREFSRC as usize];
|
||||
}
|
||||
if !t.is_null() {
|
||||
let data: *const u8 = RTA_DATA!(t) as *const u8;
|
||||
let len = RTA_PAYLOAD!(t) as u32;
|
||||
rte.source = parser::format_address(data, len)?;
|
||||
|
||||
if rtm.rtm_src_len != 0 {
|
||||
rte.source = format!("{}/{}", rte.source.as_str(), rtm.rtm_src_len);
|
||||
}
|
||||
}
|
||||
|
||||
// scope
|
||||
rte.scope = rtm.rtm_scope as u32;
|
||||
|
||||
// oif
|
||||
t = attrs[RTA_OIF as usize];
|
||||
if !t.is_null() {
|
||||
let data = &*(RTA_DATA!(t) as *const i32);
|
||||
assert_eq!(RTA_PAYLOAD!(t), 4);
|
||||
|
||||
rte.device = self
|
||||
.get_name_by_index(*data)
|
||||
.unwrap_or_else(|_| "unknown".to_string());
|
||||
}
|
||||
}
|
||||
|
||||
rs.push(rte);
|
||||
}
|
||||
|
||||
Ok(rs)
|
||||
}
|
||||
|
||||
pub fn add_arp_neighbors(&mut self, neighs: &[ARPNeighbor]) -> Result<()> {
|
||||
for neigh in neighs {
|
||||
self.add_one_arp_neighbor(&neigh)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn add_one_arp_neighbor(&mut self, neigh: &ARPNeighbor) -> Result<()> {
|
||||
let to_ip = match neigh.toIPAddress.as_ref() {
|
||||
None => return nix_errno(Errno::EINVAL),
|
||||
Some(v) => {
|
||||
if v.address.is_empty() {
|
||||
return nix_errno(Errno::EINVAL);
|
||||
}
|
||||
v.address.as_ref()
|
||||
}
|
||||
};
|
||||
|
||||
let dev = self.find_link_by_name(&neigh.device)?;
|
||||
|
||||
let mut v: Vec<u8> = vec![0; DEFAULT_NETLINK_BUF_SIZE];
|
||||
// Safe because we have allocated enough buffer space.
|
||||
let nlh = unsafe { &mut *(v.as_mut_ptr() as *mut nlmsghdr) };
|
||||
let ndm = unsafe { &mut *(NLMSG_DATA!(nlh) as *mut ndmsg) };
|
||||
|
||||
nlh.nlmsg_len = NLMSG_LENGTH!(std::mem::size_of::<ndmsg>()) as u32;
|
||||
nlh.nlmsg_type = RTM_NEWNEIGH;
|
||||
nlh.nlmsg_flags = NLM_F_REQUEST | NLM_F_CREATE | NLM_F_EXCL;
|
||||
self.assign_seqnum(nlh);
|
||||
|
||||
ndm.ndm_family = libc::AF_UNSPEC as __u8;
|
||||
ndm.ndm_state = IFA_F_PERMANENT as __u16;
|
||||
// process lladdr
|
||||
if neigh.lladdr != "" {
|
||||
let llabuf = parser::parse_mac_addr(&neigh.lladdr)?;
|
||||
|
||||
// Safe because we have allocated enough buffer space.
|
||||
unsafe { nlh.addattr_var(NDA_LLADDR, llabuf.as_ref()) };
|
||||
}
|
||||
|
||||
let (family, ip_data) = parser::parse_ip_addr_with_family(&to_ip)?;
|
||||
ndm.ndm_family = family;
|
||||
// Safe because we have allocated enough buffer space.
|
||||
unsafe { nlh.addattr_var(NDA_DST, ip_data.as_ref()) };
|
||||
|
||||
// process state
|
||||
if neigh.state != 0 {
|
||||
ndm.ndm_state = neigh.state as __u16;
|
||||
}
|
||||
|
||||
// process flags
|
||||
ndm.ndm_flags = (*ndm).ndm_flags | neigh.flags as __u8;
|
||||
|
||||
// process dev
|
||||
ndm.ndm_ifindex = dev.ifi_index;
|
||||
|
||||
// send
|
||||
self.rtnl_talk(v.as_mut_slice(), false)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<IPAddress> for RtIPAddr {
|
||||
type Error = nix::Error;
|
||||
|
||||
fn try_from(ipi: IPAddress) -> std::result::Result<Self, Self::Error> {
|
||||
let ip_family = if ipi.family == IPFamily::v4 {
|
||||
libc::AF_INET
|
||||
} else {
|
||||
libc::AF_INET6
|
||||
} as __u8;
|
||||
|
||||
let ip_mask = parser::parse_u8(ipi.mask.as_str(), 10)?;
|
||||
let addr = parser::parse_ip_addr(ipi.address.as_ref())?;
|
||||
|
||||
Ok(Self {
|
||||
ip_family,
|
||||
ip_mask,
|
||||
addr,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<Route> for RtRoute {
|
||||
type Error = nix::Error;
|
||||
|
||||
fn try_from(r: Route) -> std::result::Result<Self, Self::Error> {
|
||||
// only handle ipv4
|
||||
|
||||
let index = {
|
||||
let mut rh = RtnlHandle::new(NETLINK_ROUTE, 0)?;
|
||||
match rh.find_link_by_name(r.device.as_str()) {
|
||||
Ok(ifi) => ifi.ifi_index,
|
||||
Err(_) => -1,
|
||||
}
|
||||
};
|
||||
|
||||
let (dest, dst_len) = if r.dest.is_empty() {
|
||||
(Some(vec![0 as u8; 4]), 0)
|
||||
} else {
|
||||
let (dst, mask) = parser::parse_cidr(r.dest.as_str())?;
|
||||
(Some(dst), mask)
|
||||
};
|
||||
|
||||
let (source, src_len) = if r.source.is_empty() {
|
||||
(None, 0)
|
||||
} else {
|
||||
let (src, mask) = parser::parse_cidr(r.source.as_str())?;
|
||||
(Some(src), mask)
|
||||
};
|
||||
|
||||
let gateway = if r.gateway.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(parser::parse_ip_addr(r.gateway.as_str())?)
|
||||
};
|
||||
|
||||
Ok(Self {
|
||||
dest,
|
||||
source,
|
||||
src_len,
|
||||
dst_len,
|
||||
index,
|
||||
gateway,
|
||||
scope: r.scope as u8,
|
||||
protocol: RTPROTO_UNSPEC,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::{RtnlHandle, NETLINK_ROUTE};
|
||||
use protocols::types::IPAddress;
|
||||
use std::process::Command;
|
||||
|
||||
fn clean_env_for_test_add_one_arp_neighbor(dummy_name: &str, ip: &str) {
|
||||
// ip link delete dummy
|
||||
Command::new("ip")
|
||||
.args(&["link", "delete", dummy_name])
|
||||
.output()
|
||||
.expect("prepare: failed to delete dummy");
|
||||
|
||||
// ip neigh del dev dummy ip
|
||||
Command::new("ip")
|
||||
.args(&["neigh", "del", dummy_name, ip])
|
||||
.output()
|
||||
.expect("prepare: failed to delete neigh");
|
||||
}
|
||||
|
||||
fn prepare_env_for_test_add_one_arp_neighbor(dummy_name: &str, ip: &str) {
|
||||
clean_env_for_test_add_one_arp_neighbor(dummy_name, ip);
|
||||
// modprobe dummy
|
||||
Command::new("modprobe")
|
||||
.arg("dummy")
|
||||
.output()
|
||||
.expect("failed to run modprobe dummy");
|
||||
|
||||
// ip link add dummy type dummy
|
||||
Command::new("ip")
|
||||
.args(&["link", "add", dummy_name, "type", "dummy"])
|
||||
.output()
|
||||
.expect("failed to add dummy interface");
|
||||
|
||||
// ip addr add 192.168.0.2/16 dev dummy
|
||||
Command::new("ip")
|
||||
.args(&["addr", "add", "192.168.0.2/16", "dev", dummy_name])
|
||||
.output()
|
||||
.expect("failed to add ip for dummy");
|
||||
|
||||
// ip link set dummy up;
|
||||
Command::new("ip")
|
||||
.args(&["link", "set", dummy_name, "up"])
|
||||
.output()
|
||||
.expect("failed to up dummy");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_add_one_arp_neighbor() {
|
||||
// skip_if_not_root
|
||||
if !nix::unistd::Uid::effective().is_root() {
|
||||
println!("INFO: skipping {} which needs root", module_path!());
|
||||
return;
|
||||
}
|
||||
|
||||
let mac = "6a:92:3a:59:70:aa";
|
||||
let to_ip = "169.254.1.1";
|
||||
let dummy_name = "dummy_for_arp";
|
||||
|
||||
prepare_env_for_test_add_one_arp_neighbor(dummy_name, to_ip);
|
||||
|
||||
let mut ip_address = IPAddress::new();
|
||||
ip_address.set_address(to_ip.to_string());
|
||||
|
||||
let mut neigh = ARPNeighbor::new();
|
||||
neigh.set_toIPAddress(ip_address);
|
||||
neigh.set_device(dummy_name.to_string());
|
||||
neigh.set_lladdr(mac.to_string());
|
||||
neigh.set_state(0x80);
|
||||
|
||||
let mut rtnl = RtnlHandle::new(NETLINK_ROUTE, 0).unwrap();
|
||||
|
||||
rtnl.add_one_arp_neighbor(&neigh).unwrap();
|
||||
|
||||
// ip neigh show dev dummy ip
|
||||
let stdout = Command::new("ip")
|
||||
.args(&["neigh", "show", "dev", dummy_name, to_ip])
|
||||
.output()
|
||||
.expect("failed to show neigh")
|
||||
.stdout;
|
||||
|
||||
let stdout = std::str::from_utf8(&stdout).expect("failed to conveert stdout");
|
||||
|
||||
assert_eq!(stdout, format!("{} lladdr {} PERMANENT\n", to_ip, mac));
|
||||
|
||||
clean_env_for_test_add_one_arp_neighbor(dummy_name, to_ip);
|
||||
}
|
||||
}
|
||||
2354
src/agent/netlink/src/lib.rs
Normal file
2354
src/agent/netlink/src/lib.rs
Normal file
File diff suppressed because it is too large
Load Diff
201
src/agent/netlink/src/parser.rs
Normal file
201
src/agent/netlink/src/parser.rs
Normal file
@@ -0,0 +1,201 @@
|
||||
// Copyright (c) 2019 Ant Financial
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
//! Parser for IPv4/IPv6/MAC addresses.
|
||||
|
||||
use std::net::{Ipv4Addr, Ipv6Addr};
|
||||
use std::str::FromStr;
|
||||
|
||||
use super::{Errno, Result, __u8, nix_errno};
|
||||
|
||||
#[inline]
|
||||
pub(crate) fn parse_u8(s: &str, radix: u32) -> Result<u8> {
|
||||
if radix >= 2 && radix <= 36 {
|
||||
u8::from_str_radix(s, radix).map_err(|_| nix::Error::Sys(Errno::EINVAL))
|
||||
} else {
|
||||
u8::from_str(s).map_err(|_| nix::Error::Sys(Errno::EINVAL))
|
||||
}
|
||||
}
|
||||
|
||||
pub fn parse_ipv4_addr(s: &str) -> Result<Vec<u8>> {
|
||||
match Ipv4Addr::from_str(s) {
|
||||
Ok(v) => Ok(Vec::from(v.octets().as_ref())),
|
||||
Err(_e) => nix_errno(Errno::EINVAL),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn parse_ip_addr(s: &str) -> Result<Vec<u8>> {
|
||||
if let Ok(v6) = Ipv6Addr::from_str(s) {
|
||||
Ok(Vec::from(v6.octets().as_ref()))
|
||||
} else {
|
||||
parse_ipv4_addr(s)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn parse_ip_addr_with_family(ip_address: &str) -> Result<(__u8, Vec<u8>)> {
|
||||
if let Ok(v6) = Ipv6Addr::from_str(ip_address) {
|
||||
Ok((libc::AF_INET6 as __u8, Vec::from(v6.octets().as_ref())))
|
||||
} else {
|
||||
parse_ipv4_addr(ip_address).map(|v| (libc::AF_INET as __u8, v))
|
||||
}
|
||||
}
|
||||
|
||||
pub fn parse_ipv4_cidr(s: &str) -> Result<(Vec<u8>, u8)> {
|
||||
let fields: Vec<&str> = s.split('/').collect();
|
||||
|
||||
if fields.len() != 2 {
|
||||
nix_errno(Errno::EINVAL)
|
||||
} else {
|
||||
Ok((parse_ipv4_addr(fields[0])?, parse_u8(fields[1], 10)?))
|
||||
}
|
||||
}
|
||||
|
||||
pub fn parse_cidr(s: &str) -> Result<(Vec<u8>, u8)> {
|
||||
let fields: Vec<&str> = s.split('/').collect();
|
||||
|
||||
if fields.len() != 2 {
|
||||
nix_errno(Errno::EINVAL)
|
||||
} else {
|
||||
Ok((parse_ip_addr(fields[0])?, parse_u8(fields[1], 10)?))
|
||||
}
|
||||
}
|
||||
|
||||
pub fn parse_mac_addr(hwaddr: &str) -> Result<Vec<u8>> {
|
||||
let fields: Vec<&str> = hwaddr.split(':').collect();
|
||||
|
||||
if fields.len() != 6 {
|
||||
nix_errno(Errno::EINVAL)
|
||||
} else {
|
||||
Ok(vec![
|
||||
parse_u8(fields[0], 16)?,
|
||||
parse_u8(fields[1], 16)?,
|
||||
parse_u8(fields[2], 16)?,
|
||||
parse_u8(fields[3], 16)?,
|
||||
parse_u8(fields[4], 16)?,
|
||||
parse_u8(fields[5], 16)?,
|
||||
])
|
||||
}
|
||||
}
|
||||
|
||||
/// Format an IPv4/IPv6/MAC address.
|
||||
///
|
||||
/// # Safety
|
||||
/// Caller needs to ensure that addr and len are valid.
|
||||
pub unsafe fn format_address(addr: *const u8, len: u32) -> Result<String> {
|
||||
let mut a: String;
|
||||
if len == 4 {
|
||||
// ipv4
|
||||
let mut i = 1;
|
||||
let mut p = addr as i64;
|
||||
|
||||
a = format!("{}", *(p as *const u8));
|
||||
while i < len {
|
||||
p += 1;
|
||||
i += 1;
|
||||
a.push_str(format!(".{}", *(p as *const u8)).as_str());
|
||||
}
|
||||
|
||||
return Ok(a);
|
||||
}
|
||||
|
||||
if len == 6 {
|
||||
// hwaddr
|
||||
let mut i = 1;
|
||||
let mut p = addr as i64;
|
||||
|
||||
a = format!("{:0>2X}", *(p as *const u8));
|
||||
while i < len {
|
||||
p += 1;
|
||||
i += 1;
|
||||
a.push_str(format!(":{:0>2X}", *(p as *const u8)).as_str());
|
||||
}
|
||||
|
||||
return Ok(a);
|
||||
}
|
||||
|
||||
if len == 16 {
|
||||
// ipv6
|
||||
let p = addr as *const u8 as *const libc::c_void;
|
||||
let mut ar: [u8; 16] = [0; 16];
|
||||
let mut v: Vec<u8> = vec![0; 16];
|
||||
let dp: *mut libc::c_void = v.as_mut_ptr() as *mut libc::c_void;
|
||||
libc::memcpy(dp, p, 16);
|
||||
|
||||
ar.copy_from_slice(v.as_slice());
|
||||
|
||||
return Ok(Ipv6Addr::from(ar).to_string());
|
||||
}
|
||||
|
||||
nix_errno(Errno::EINVAL)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use libc;
|
||||
|
||||
#[test]
|
||||
fn test_ip_addr() {
|
||||
let ip = parse_ipv4_addr("1.2.3.4").unwrap();
|
||||
assert_eq!(ip, vec![0x1u8, 0x2u8, 0x3u8, 0x4u8]);
|
||||
parse_ipv4_addr("1.2.3.4.5").unwrap_err();
|
||||
parse_ipv4_addr("1.2.3-4").unwrap_err();
|
||||
parse_ipv4_addr("1.2.3.a").unwrap_err();
|
||||
parse_ipv4_addr("1.2.3.x").unwrap_err();
|
||||
parse_ipv4_addr("-1.2.3.4").unwrap_err();
|
||||
parse_ipv4_addr("+1.2.3.4").unwrap_err();
|
||||
|
||||
let (family, _) = parse_ip_addr_with_family("192.168.1.1").unwrap();
|
||||
assert_eq!(family, libc::AF_INET as __u8);
|
||||
|
||||
let (family, ip) =
|
||||
parse_ip_addr_with_family("2001:0db8:85a3:0000:0000:8a2e:0370:7334").unwrap();
|
||||
assert_eq!(family, libc::AF_INET6 as __u8);
|
||||
assert_eq!(ip.len(), 16);
|
||||
parse_ip_addr_with_family("2001:0db8:85a3:0000:0000:8a2e:0370:73345").unwrap_err();
|
||||
|
||||
let ip = parse_ip_addr("::1").unwrap();
|
||||
assert_eq!(ip[0], 0x0);
|
||||
assert_eq!(ip[15], 0x1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_cidr() {
|
||||
let (_, mask) = parse_ipv4_cidr("1.2.3.4/31").unwrap();
|
||||
assert_eq!(mask, 31);
|
||||
|
||||
parse_ipv4_cidr("1.2.3/4/31").unwrap_err();
|
||||
parse_ipv4_cidr("1.2.3.4/f").unwrap_err();
|
||||
parse_ipv4_cidr("1.2.3/8").unwrap_err();
|
||||
parse_ipv4_cidr("1.2.3.4.8").unwrap_err();
|
||||
|
||||
let (ip, mask) = parse_cidr("2001:db8:a::123/64").unwrap();
|
||||
assert_eq!(mask, 64);
|
||||
assert_eq!(ip[0], 0x20);
|
||||
assert_eq!(ip[15], 0x23);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_mac_addr() {
|
||||
let mac = parse_mac_addr("FF:FF:FF:FF:FF:FE").unwrap();
|
||||
assert_eq!(mac.len(), 6);
|
||||
assert_eq!(mac[0], 0xff);
|
||||
assert_eq!(mac[5], 0xfe);
|
||||
|
||||
parse_mac_addr("FF:FF:FF:FF:FF:FE:A0").unwrap_err();
|
||||
parse_mac_addr("FF:FF:FF:FF:FF:FX").unwrap_err();
|
||||
parse_mac_addr("FF:FF:FF:FF:FF").unwrap_err();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_format_address() {
|
||||
let buf = [1u8, 2u8, 3u8, 4u8];
|
||||
let addr = unsafe { format_address(&buf as *const u8, 4).unwrap() };
|
||||
assert_eq!(addr, "1.2.3.4");
|
||||
|
||||
let buf = [1u8, 2u8, 3u8, 4u8, 5u8, 6u8];
|
||||
let addr = unsafe { format_address(&buf as *const u8, 6).unwrap() };
|
||||
assert_eq!(addr, "01:02:03:04:05:06");
|
||||
}
|
||||
}
|
||||
@@ -8,7 +8,7 @@ extern crate serde;
|
||||
extern crate serde_derive;
|
||||
extern crate serde_json;
|
||||
|
||||
use libc::{self, mode_t};
|
||||
use libc::mode_t;
|
||||
use std::collections::HashMap;
|
||||
|
||||
mod serialize;
|
||||
@@ -27,10 +27,6 @@ where
|
||||
*d == T::default()
|
||||
}
|
||||
|
||||
fn default_seccomp_errno() -> u32 {
|
||||
libc::EPERM as u32
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Default, Clone, PartialEq)]
|
||||
pub struct Spec {
|
||||
#[serde(
|
||||
@@ -58,7 +54,7 @@ pub struct Spec {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub windows: Option<Windows<String>>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub vm: Option<Vm>,
|
||||
pub vm: Option<VM>,
|
||||
}
|
||||
|
||||
impl Spec {
|
||||
@@ -71,7 +67,7 @@ impl Spec {
|
||||
}
|
||||
}
|
||||
|
||||
pub type LinuxRlimit = PosixRlimit;
|
||||
pub type LinuxRlimit = POSIXRlimit;
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Default, Clone, PartialEq)]
|
||||
pub struct Process {
|
||||
@@ -93,7 +89,7 @@ pub struct Process {
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub capabilities: Option<LinuxCapabilities>,
|
||||
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||
pub rlimits: Vec<PosixRlimit>,
|
||||
pub rlimits: Vec<POSIXRlimit>,
|
||||
#[serde(default, rename = "noNewPrivileges")]
|
||||
pub no_new_privileges: bool,
|
||||
#[serde(
|
||||
@@ -199,9 +195,9 @@ pub struct Hooks {
|
||||
#[derive(Serialize, Deserialize, Debug, Default, Clone, PartialEq)]
|
||||
pub struct Linux {
|
||||
#[serde(default, rename = "uidMappings", skip_serializing_if = "Vec::is_empty")]
|
||||
pub uid_mappings: Vec<LinuxIdMapping>,
|
||||
pub uid_mappings: Vec<LinuxIDMapping>,
|
||||
#[serde(default, rename = "gidMappings", skip_serializing_if = "Vec::is_empty")]
|
||||
pub gid_mappings: Vec<LinuxIdMapping>,
|
||||
pub gid_mappings: Vec<LinuxIDMapping>,
|
||||
#[serde(default, skip_serializing_if = "HashMap::is_empty")]
|
||||
pub sysctl: HashMap<String, String>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
@@ -261,7 +257,7 @@ pub const UTSNAMESPACE: &str = "uts";
|
||||
pub const CGROUPNAMESPACE: &str = "cgroup";
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Default, Clone, PartialEq)]
|
||||
pub struct LinuxIdMapping {
|
||||
pub struct LinuxIDMapping {
|
||||
#[serde(default, rename = "containerID")]
|
||||
pub container_id: u32,
|
||||
#[serde(default, rename = "hostID")]
|
||||
@@ -271,7 +267,7 @@ pub struct LinuxIdMapping {
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Default, Clone, PartialEq)]
|
||||
pub struct PosixRlimit {
|
||||
pub struct POSIXRlimit {
|
||||
#[serde(default)]
|
||||
pub r#type: String,
|
||||
#[serde(default)]
|
||||
@@ -297,7 +293,7 @@ pub struct LinuxInterfacePriority {
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Default, Clone, PartialEq)]
|
||||
pub struct LinuxBlockIoDevice {
|
||||
pub struct LinuxBlockIODevice {
|
||||
#[serde(default)]
|
||||
pub major: i64,
|
||||
#[serde(default)]
|
||||
@@ -307,7 +303,7 @@ pub struct LinuxBlockIoDevice {
|
||||
#[derive(Serialize, Deserialize, Debug, Default, Clone, PartialEq)]
|
||||
pub struct LinuxWeightDevice {
|
||||
#[serde(flatten)]
|
||||
pub blk: LinuxBlockIoDevice,
|
||||
pub blk: LinuxBlockIODevice,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub weight: Option<u16>,
|
||||
#[serde(
|
||||
@@ -321,13 +317,13 @@ pub struct LinuxWeightDevice {
|
||||
#[derive(Serialize, Deserialize, Debug, Default, Clone, PartialEq)]
|
||||
pub struct LinuxThrottleDevice {
|
||||
#[serde(flatten)]
|
||||
pub blk: LinuxBlockIoDevice,
|
||||
pub blk: LinuxBlockIODevice,
|
||||
#[serde(default)]
|
||||
pub rate: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Default, Clone, PartialEq)]
|
||||
pub struct LinuxBlockIo {
|
||||
pub struct LinuxBlockIO {
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub weight: Option<u16>,
|
||||
#[serde(
|
||||
@@ -391,7 +387,7 @@ pub struct LinuxMemory {
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Default, Clone, PartialEq)]
|
||||
pub struct LinuxCpu {
|
||||
pub struct LinuxCPU {
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub shares: Option<u64>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
@@ -453,11 +449,11 @@ pub struct LinuxResources {
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub memory: Option<LinuxMemory>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub cpu: Option<LinuxCpu>,
|
||||
pub cpu: Option<LinuxCPU>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub pids: Option<LinuxPids>,
|
||||
#[serde(skip_serializing_if = "Option::is_none", rename = "blockIO")]
|
||||
pub block_io: Option<LinuxBlockIo>,
|
||||
pub block_io: Option<LinuxBlockIO>,
|
||||
#[serde(
|
||||
default,
|
||||
skip_serializing_if = "Vec::is_empty",
|
||||
@@ -517,7 +513,7 @@ pub struct Solaris {
|
||||
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||
pub anet: Vec<SolarisAnet>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none", rename = "cappedCPU")]
|
||||
pub capped_cpu: Option<SolarisCappedCpu>,
|
||||
pub capped_cpu: Option<SolarisCappedCPU>,
|
||||
#[serde(
|
||||
default,
|
||||
skip_serializing_if = "Option::is_none",
|
||||
@@ -527,7 +523,7 @@ pub struct Solaris {
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Default, Clone, PartialEq)]
|
||||
pub struct SolarisCappedCpu {
|
||||
pub struct SolarisCappedCPU {
|
||||
#[serde(default, skip_serializing_if = "String::is_empty")]
|
||||
pub ncpus: String,
|
||||
}
|
||||
@@ -605,7 +601,7 @@ pub struct WindowsResources {
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub memory: Option<WindowsMemoryResources>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub cpu: Option<WindowsCpuResources>,
|
||||
pub cpu: Option<WindowsCPUResources>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub storage: Option<WindowsStorageResources>,
|
||||
}
|
||||
@@ -617,7 +613,7 @@ pub struct WindowsMemoryResources {
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Default, Clone, PartialEq)]
|
||||
pub struct WindowsCpuResources {
|
||||
pub struct WindowsCPUResources {
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub count: Option<u64>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
@@ -675,14 +671,14 @@ pub struct WindowsHyperV {
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Default, Clone, PartialEq)]
|
||||
pub struct Vm {
|
||||
pub hypervisor: VmHypervisor,
|
||||
pub kernel: VmKernel,
|
||||
pub image: VmImage,
|
||||
pub struct VM {
|
||||
pub hypervisor: VMHypervisor,
|
||||
pub kernel: VMKernel,
|
||||
pub image: VMImage,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Default, Clone, PartialEq)]
|
||||
pub struct VmHypervisor {
|
||||
pub struct VMHypervisor {
|
||||
#[serde(default)]
|
||||
pub path: String,
|
||||
#[serde(default, skip_serializing_if = "String::is_empty")]
|
||||
@@ -690,7 +686,7 @@ pub struct VmHypervisor {
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Default, Clone, PartialEq)]
|
||||
pub struct VmKernel {
|
||||
pub struct VMKernel {
|
||||
#[serde(default)]
|
||||
pub path: String,
|
||||
#[serde(default, skip_serializing_if = "String::is_empty")]
|
||||
@@ -700,7 +696,7 @@ pub struct VmKernel {
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Default, Clone, PartialEq)]
|
||||
pub struct VmImage {
|
||||
pub struct VMImage {
|
||||
#[serde(default)]
|
||||
pub path: String,
|
||||
#[serde(default)]
|
||||
@@ -714,8 +710,6 @@ pub struct LinuxSeccomp {
|
||||
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||
pub architectures: Vec<Arch>,
|
||||
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||
pub flags: Vec<LinuxSeccompFlag>,
|
||||
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||
pub syscalls: Vec<LinuxSyscall>,
|
||||
}
|
||||
|
||||
@@ -739,20 +733,14 @@ pub const ARCHS390: &str = "SCMP_ARCH_S390";
|
||||
pub const ARCHS390X: &str = "SCMP_ARCH_S390X";
|
||||
pub const ARCHPARISC: &str = "SCMP_ARCH_PARISC";
|
||||
pub const ARCHPARISC64: &str = "SCMP_ARCH_PARISC64";
|
||||
pub const ARCHRISCV64: &str = "SCMP_ARCH_RISCV64";
|
||||
|
||||
pub type LinuxSeccompFlag = String;
|
||||
|
||||
pub type LinuxSeccompAction = String;
|
||||
|
||||
pub const ACTKILL: &str = "SCMP_ACT_KILL";
|
||||
pub const ACTKILLPROCESS: &str = "SCMP_ACT_KILL_PROCESS";
|
||||
pub const ACTKILLTHREAD: &str = "SCMP_ACT_KILL_THREAD";
|
||||
pub const ACTTRAP: &str = "SCMP_ACT_TRAP";
|
||||
pub const ACTERRNO: &str = "SCMP_ACT_ERRNO";
|
||||
pub const ACTTRACE: &str = "SCMP_ACT_TRACE";
|
||||
pub const ACTALLOW: &str = "SCMP_ACT_ALLOW";
|
||||
pub const ACTLOG: &str = "SCMP_ACT_LOG";
|
||||
|
||||
pub type LinuxSeccompOperator = String;
|
||||
|
||||
@@ -782,8 +770,6 @@ pub struct LinuxSyscall {
|
||||
pub names: Vec<String>,
|
||||
#[serde(default, skip_serializing_if = "String::is_empty")]
|
||||
pub action: LinuxSeccompAction,
|
||||
#[serde(default = "default_seccomp_errno", rename = "errnoRet")]
|
||||
pub errno_ret: u32,
|
||||
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||
pub args: Vec<LinuxSeccompArg>,
|
||||
}
|
||||
@@ -801,11 +787,11 @@ pub struct LinuxIntelRdt {
|
||||
#[derive(Debug, Serialize, Deserialize, Copy, Clone, PartialEq)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum ContainerState {
|
||||
Creating,
|
||||
Created,
|
||||
Running,
|
||||
Stopped,
|
||||
Paused,
|
||||
CREATING,
|
||||
CREATED,
|
||||
RUNNING,
|
||||
STOPPED,
|
||||
PAUSED,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
|
||||
@@ -846,7 +832,7 @@ mod tests {
|
||||
let expected = State {
|
||||
version: "0.2.0".to_string(),
|
||||
id: "oci-container1".to_string(),
|
||||
status: ContainerState::Running,
|
||||
status: ContainerState::RUNNING,
|
||||
pid: 4422,
|
||||
bundle: "/containers/redis".to_string(),
|
||||
annotations: [("myKey".to_string(), "myValue".to_string())]
|
||||
@@ -1271,12 +1257,12 @@ mod tests {
|
||||
ambient: vec!["CAP_NET_BIND_SERVICE".to_string()],
|
||||
}),
|
||||
rlimits: vec![
|
||||
crate::PosixRlimit {
|
||||
crate::POSIXRlimit {
|
||||
r#type: "RLIMIT_CORE".to_string(),
|
||||
hard: 1024,
|
||||
soft: 1024,
|
||||
},
|
||||
crate::PosixRlimit {
|
||||
crate::POSIXRlimit {
|
||||
r#type: "RLIMIT_NOFILE".to_string(),
|
||||
hard: 1024,
|
||||
soft: 1024,
|
||||
@@ -1408,12 +1394,12 @@ mod tests {
|
||||
.cloned()
|
||||
.collect(),
|
||||
linux: Some(crate::Linux {
|
||||
uid_mappings: vec![crate::LinuxIdMapping {
|
||||
uid_mappings: vec![crate::LinuxIDMapping {
|
||||
container_id: 0,
|
||||
host_id: 1000,
|
||||
size: 32000,
|
||||
}],
|
||||
gid_mappings: vec![crate::LinuxIdMapping {
|
||||
gid_mappings: vec![crate::LinuxIDMapping {
|
||||
container_id: 0,
|
||||
host_id: 1000,
|
||||
size: 32000,
|
||||
@@ -1458,7 +1444,7 @@ mod tests {
|
||||
swappiness: Some(0),
|
||||
disable_oom_killer: Some(false),
|
||||
}),
|
||||
cpu: Some(crate::LinuxCpu {
|
||||
cpu: Some(crate::LinuxCPU {
|
||||
shares: Some(1024),
|
||||
quota: Some(1000000),
|
||||
period: Some(500000),
|
||||
@@ -1468,17 +1454,17 @@ mod tests {
|
||||
mems: "0-7".to_string(),
|
||||
}),
|
||||
pids: Some(crate::LinuxPids { limit: 32771 }),
|
||||
block_io: Some(crate::LinuxBlockIo {
|
||||
block_io: Some(crate::LinuxBlockIO {
|
||||
weight: Some(10),
|
||||
leaf_weight: Some(10),
|
||||
weight_device: vec![
|
||||
crate::LinuxWeightDevice {
|
||||
blk: crate::LinuxBlockIoDevice { major: 8, minor: 0 },
|
||||
blk: crate::LinuxBlockIODevice { major: 8, minor: 0 },
|
||||
weight: Some(500),
|
||||
leaf_weight: Some(300),
|
||||
},
|
||||
crate::LinuxWeightDevice {
|
||||
blk: crate::LinuxBlockIoDevice {
|
||||
blk: crate::LinuxBlockIODevice {
|
||||
major: 8,
|
||||
minor: 16,
|
||||
},
|
||||
@@ -1487,13 +1473,13 @@ mod tests {
|
||||
},
|
||||
],
|
||||
throttle_read_bps_device: vec![crate::LinuxThrottleDevice {
|
||||
blk: crate::LinuxBlockIoDevice { major: 8, minor: 0 },
|
||||
blk: crate::LinuxBlockIODevice { major: 8, minor: 0 },
|
||||
rate: 600,
|
||||
}],
|
||||
throttle_write_bps_device: vec![],
|
||||
throttle_read_iops_device: vec![],
|
||||
throttle_write_iops_device: vec![crate::LinuxThrottleDevice {
|
||||
blk: crate::LinuxBlockIoDevice {
|
||||
blk: crate::LinuxBlockIODevice {
|
||||
major: 8,
|
||||
minor: 16,
|
||||
},
|
||||
@@ -1579,11 +1565,9 @@ mod tests {
|
||||
seccomp: Some(crate::LinuxSeccomp {
|
||||
default_action: "SCMP_ACT_ALLOW".to_string(),
|
||||
architectures: vec!["SCMP_ARCH_X86".to_string(), "SCMP_ARCH_X32".to_string()],
|
||||
flags: vec![],
|
||||
syscalls: vec![crate::LinuxSyscall {
|
||||
names: vec!["getcwd".to_string(), "chmod".to_string()],
|
||||
action: "SCMP_ACT_ERRNO".to_string(),
|
||||
errno_ret: crate::default_seccomp_errno(),
|
||||
args: vec![],
|
||||
}],
|
||||
}),
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
//
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json;
|
||||
|
||||
use std::error;
|
||||
use std::fmt::{Display, Formatter, Result as FmtResult};
|
||||
|
||||
@@ -5,9 +5,9 @@ authors = ["The Kata Containers community <kata-dev@lists.katacontainers.io>"]
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
ttrpc = { version = "0.5.0", features = ["async"] }
|
||||
async-trait = "0.1.42"
|
||||
ttrpc = "0.3.0"
|
||||
protobuf = "=2.14.0"
|
||||
futures = "0.1.27"
|
||||
|
||||
[build-dependencies]
|
||||
ttrpc-codegen = "0.2.0"
|
||||
ttrpc-codegen = "0.1.2"
|
||||
|
||||
@@ -3,8 +3,8 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use std::fs;
|
||||
use ttrpc_codegen::{Codegen, Customize};
|
||||
use std::fs::File;
|
||||
use std::io::{Read, Write};
|
||||
|
||||
fn main() {
|
||||
let protos = vec![
|
||||
@@ -15,15 +15,16 @@ fn main() {
|
||||
"protos/oci.proto",
|
||||
];
|
||||
|
||||
Codegen::new()
|
||||
// Tell Cargo that if the .proto files changed, to rerun this build script.
|
||||
protos
|
||||
.iter()
|
||||
.for_each(|p| println!("cargo:rerun-if-changed={}", &p));
|
||||
|
||||
ttrpc_codegen::Codegen::new()
|
||||
.out_dir("src")
|
||||
.inputs(&protos)
|
||||
.include("protos")
|
||||
.rust_protobuf()
|
||||
.customize(Customize {
|
||||
async_server: true,
|
||||
..Default::default()
|
||||
})
|
||||
.run()
|
||||
.expect("Gen codes failed.");
|
||||
|
||||
@@ -39,6 +40,15 @@ fn main() {
|
||||
}
|
||||
|
||||
fn replace_text_in_file(file_name: &str, from: &str, to: &str) -> Result<(), std::io::Error> {
|
||||
let new_contents = fs::read_to_string(file_name)?.replace(from, to);
|
||||
fs::write(&file_name, new_contents.as_bytes())
|
||||
let mut src = File::open(file_name)?;
|
||||
let mut contents = String::new();
|
||||
src.read_to_string(&mut contents).unwrap();
|
||||
drop(src);
|
||||
|
||||
let new_contents = contents.replace(from, to);
|
||||
|
||||
let mut dst = File::create(&file_name)?;
|
||||
dst.write_all(new_contents.as_bytes())?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -47,7 +47,7 @@ show_usage() {
|
||||
}
|
||||
|
||||
generate_go_sources() {
|
||||
local cmd="protoc -I$GOPATH/src:$GOPATH/src/github.com/kata-containers/kata-containers/src/agent/protocols/protos \
|
||||
local cmd="protoc -I$GOPATH/src/github.com/kata-containers/agent/vendor/github.com/gogo/protobuf:$GOPATH/src/github.com/kata-containers/agent/vendor:$GOPATH/src/github.com/gogo/protobuf:$GOPATH/src/github.com/gogo/googleapis:$GOPATH/src:$GOPATH/src/github.com/kata-containers/kata-containers/src/agent/protocols/protos \
|
||||
--gogottrpc_out=plugins=ttrpc+fieldpath,\
|
||||
import_path=github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/agent/protocols/grpc,\
|
||||
\
|
||||
@@ -65,7 +65,7 @@ $GOPATH/src/github.com/kata-containers/kata-containers/src/agent/protocols/proto
|
||||
}
|
||||
|
||||
if [ "$(basename $(pwd))" != "agent" ]; then
|
||||
die "Please go to root directory of agent before execute this shell"
|
||||
die "Please go to directory of protocols before execute this shell"
|
||||
fi
|
||||
|
||||
# Protocol buffer files required to generate golang/rust bindings.
|
||||
@@ -80,6 +80,12 @@ fi;
|
||||
which protoc
|
||||
[ $? -eq 0 ] || die "Please install protoc from github.com/protocolbuffers/protobuf"
|
||||
|
||||
which protoc-gen-rust
|
||||
[ $? -eq 0 ] || die "Please install protobuf-codegen from github.com/pingcap/grpc-rs"
|
||||
|
||||
which ttrpc_rust_plugin
|
||||
[ $? -eq 0 ] || die "Please install ttrpc_rust_plugin from https://github.com/containerd/ttrpc-rust"
|
||||
|
||||
which protoc-gen-gogottrpc
|
||||
[ $? -eq 0 ] || die "Please install protoc-gen-gogottrpc from https://github.com/containerd/ttrpc"
|
||||
|
||||
|
||||
@@ -32,6 +32,7 @@ service AgentService {
|
||||
rpc ExecProcess(ExecProcessRequest) returns (google.protobuf.Empty);
|
||||
rpc SignalProcess(SignalProcessRequest) returns (google.protobuf.Empty);
|
||||
rpc WaitProcess(WaitProcessRequest) returns (WaitProcessResponse); // wait & reap like waitpid(2)
|
||||
rpc ListProcesses(ListProcessesRequest) returns (ListProcessesResponse);
|
||||
rpc UpdateContainer(UpdateContainerRequest) returns (google.protobuf.Empty);
|
||||
rpc StatsContainer(StatsContainerRequest) returns (StatsContainerResponse);
|
||||
rpc PauseContainer(PauseContainerRequest) returns (google.protobuf.Empty);
|
||||
@@ -66,7 +67,6 @@ service AgentService {
|
||||
rpc SetGuestDateTime(SetGuestDateTimeRequest) returns (google.protobuf.Empty);
|
||||
rpc CopyFile(CopyFileRequest) returns (google.protobuf.Empty);
|
||||
rpc GetOOMEvent(GetOOMEventRequest) returns (OOMEvent);
|
||||
rpc AddSwap(AddSwapRequest) returns (google.protobuf.Empty);
|
||||
}
|
||||
|
||||
message CreateContainerRequest {
|
||||
@@ -126,6 +126,18 @@ message WaitProcessResponse {
|
||||
int32 status = 1;
|
||||
}
|
||||
|
||||
// ListProcessesRequest contains the options used to list running processes inside the container
|
||||
message ListProcessesRequest {
|
||||
string container_id = 1;
|
||||
string format = 2;
|
||||
repeated string args = 3;
|
||||
}
|
||||
|
||||
// ListProcessesResponse represents the list of running processes inside the container
|
||||
message ListProcessesResponse {
|
||||
bytes process_list = 1;
|
||||
}
|
||||
|
||||
message UpdateContainerRequest {
|
||||
string container_id = 1;
|
||||
LinuxResources resources = 2;
|
||||
@@ -504,10 +516,6 @@ message OOMEvent {
|
||||
string container_id = 1;
|
||||
}
|
||||
|
||||
message AddSwapRequest {
|
||||
repeated uint32 PCIPath = 1;
|
||||
}
|
||||
|
||||
message GetMetricsRequest {}
|
||||
|
||||
message Metrics {
|
||||
|
||||
@@ -12,6 +12,7 @@ option go_package = "github.com/kata-containers/kata-containers/src/runtime/virt
|
||||
package grpc;
|
||||
|
||||
import "gogo/protobuf/gogoproto/gogo.proto";
|
||||
import "google/protobuf/wrappers.proto";
|
||||
|
||||
option (gogoproto.equal_all) = true;
|
||||
option (gogoproto.populate_all) = true;
|
||||
@@ -441,8 +442,7 @@ message LinuxInterfacePriority {
|
||||
message LinuxSeccomp {
|
||||
string DefaultAction = 1;
|
||||
repeated string Architectures = 2;
|
||||
repeated string Flags = 3;
|
||||
repeated LinuxSyscall Syscalls = 4 [(gogoproto.nullable) = false];
|
||||
repeated LinuxSyscall Syscalls = 3 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
message LinuxSeccompArg {
|
||||
@@ -455,10 +455,7 @@ message LinuxSeccompArg {
|
||||
message LinuxSyscall {
|
||||
repeated string Names = 1;
|
||||
string Action = 2;
|
||||
oneof ErrnoRet {
|
||||
uint32 errnoret = 3;
|
||||
}
|
||||
repeated LinuxSeccompArg Args = 4 [(gogoproto.nullable) = false];
|
||||
repeated LinuxSeccompArg Args = 3 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
message LinuxIntelRdt {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user