mirror of
https://github.com/kata-containers/kata-containers.git
synced 2026-03-04 20:02:24 +00:00
Compare commits
316 Commits
2.2.0-alph
...
2.2.3
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3a1804cd73 | ||
|
|
b7493fd5d5 | ||
|
|
63ecbcf14b | ||
|
|
4f73e58d73 | ||
|
|
45f65a73c8 | ||
|
|
06d3049349 | ||
|
|
0366f6e817 | ||
|
|
7cb650abcf | ||
|
|
e97cd23bd6 | ||
|
|
6b6d81cced | ||
|
|
a479eca7de | ||
|
|
ee3bf4a411 | ||
|
|
4443a982e6 | ||
|
|
b794a39401 | ||
|
|
39d95f486b | ||
|
|
aa40324c52 | ||
|
|
9053137592 | ||
|
|
c4e8e86acf | ||
|
|
eea2c0195f | ||
|
|
1e798b96fd | ||
|
|
53c4492fb3 | ||
|
|
893623dfbc | ||
|
|
503ce9c154 | ||
|
|
9932e76f27 | ||
|
|
3a035c1f43 | ||
|
|
4102a18aa1 | ||
|
|
0034f40b67 | ||
|
|
1f6b0f651e | ||
|
|
112e0f6381 | ||
|
|
18820e31d9 | ||
|
|
8fafced9ff | ||
|
|
9668095abd | ||
|
|
be51808a13 | ||
|
|
3e145ea94c | ||
|
|
3951834565 | ||
|
|
79e0754a7b | ||
|
|
afe6005785 | ||
|
|
b8fc1af363 | ||
|
|
97167ccddd | ||
|
|
af0fbb9460 | ||
|
|
bc48a58806 | ||
|
|
d581cdab4e | ||
|
|
52fdfc4fed | ||
|
|
8d98e01414 | ||
|
|
688cc8e2bd | ||
|
|
ebc23df752 | ||
|
|
b0aca51eac | ||
|
|
28873c4d75 | ||
|
|
3525a2ed03 | ||
|
|
30d07d4407 | ||
|
|
623b108227 | ||
|
|
fc1822f094 | ||
|
|
ba6ad1c804 | ||
|
|
22d3df9141 | ||
|
|
e58fabfc20 | ||
|
|
feb06dad8a | ||
|
|
d9b41fc583 | ||
|
|
7852b9f8e1 | ||
|
|
83f219577d | ||
|
|
97421afe17 | ||
|
|
2b6327ac37 | ||
|
|
5256e0852c | ||
|
|
02b46268f4 | ||
|
|
1b3058dd24 | ||
|
|
98e2e93552 | ||
|
|
8f25c7da11 | ||
|
|
84da2f8ddc | ||
|
|
de0e3915b7 | ||
|
|
5c76f1c65a | ||
|
|
522a53010c | ||
|
|
852fc53351 | ||
|
|
e0a27b5e90 | ||
|
|
ba6fc32804 | ||
|
|
d5f5da4323 | ||
|
|
017cd3c53c | ||
|
|
484af1a559 | ||
|
|
a572a6ebf8 | ||
|
|
2ca867da7b | ||
|
|
f4da502c4f | ||
|
|
16164241df | ||
|
|
25c7e1181a | ||
|
|
4c5bf0576b | ||
|
|
b3e620dbcf | ||
|
|
98c2ca13c1 | ||
|
|
a97c9063db | ||
|
|
0481c5070c | ||
|
|
64504061c8 | ||
|
|
56920bc943 | ||
|
|
a1874ccd62 | ||
|
|
c2c650500b | ||
|
|
7ee43f9468 | ||
|
|
eedf139076 | ||
|
|
54a6890c3c | ||
|
|
1792a9fe11 | ||
|
|
9bf95279be | ||
|
|
807cc8a3a5 | ||
|
|
5987f3b5e1 | ||
|
|
caafd0f952 | ||
|
|
800126b272 | ||
|
|
b1372b353f | ||
|
|
dca35c1730 | ||
|
|
0bdfdad236 | ||
|
|
60155756f3 | ||
|
|
669888c339 | ||
|
|
cde008f441 | ||
|
|
7c866073f9 | ||
|
|
ca9e6538e6 | ||
|
|
938b01aedc | ||
|
|
abd708e814 | ||
|
|
61babd45ed | ||
|
|
59c51f6201 | ||
|
|
c1f260cc40 | ||
|
|
4cd6909f18 | ||
|
|
efa2d54e85 | ||
|
|
cfd539dc6c | ||
|
|
31c8454700 | ||
|
|
0675bab49c | ||
|
|
aaf37d72b2 | ||
|
|
348795e282 | ||
|
|
fbd4ab6cdc | ||
|
|
af93263985 | ||
|
|
02717b8c4b | ||
|
|
1c643dea24 | ||
|
|
7a5ffd4a0f | ||
|
|
2cb7b51355 | ||
|
|
76f4588f25 | ||
|
|
e155fb2b19 | ||
|
|
b980c62f43 | ||
|
|
99e9a6ad0a | ||
|
|
c23ffef4eb | ||
|
|
9586d48254 | ||
|
|
bff73de4d3 | ||
|
|
6a6dee7cc8 | ||
|
|
8915dc56d5 | ||
|
|
46942dd449 | ||
|
|
9edbc00dbc | ||
|
|
71f304ce17 | ||
|
|
f1a505dbfe | ||
|
|
961aaff004 | ||
|
|
2aa686a0f5 | ||
|
|
7effbdebcb | ||
|
|
1ab55e5afd | ||
|
|
e287708435 | ||
|
|
7445071330 | ||
|
|
f152284f1b | ||
|
|
99ab91df3d | ||
|
|
4fe23b190f | ||
|
|
e1e6827a2c | ||
|
|
a28cc7ffd6 | ||
|
|
b7d4888659 | ||
|
|
f981fc6456 | ||
|
|
e07a9fea79 | ||
|
|
f87cee9d11 | ||
|
|
6871aeaa60 | ||
|
|
15e0a3c8f0 | ||
|
|
d01aebebae | ||
|
|
b4b843178c | ||
|
|
77160e591a | ||
|
|
2f5a77d144 | ||
|
|
b9e03a1cf1 | ||
|
|
f47cad3d95 | ||
|
|
9fa1febfd9 | ||
|
|
233b53c048 | ||
|
|
2d8386ea52 | ||
|
|
8714a35063 | ||
|
|
68a6f011b5 | ||
|
|
97dcc5f78a | ||
|
|
0b03d97d0b | ||
|
|
c2f03e8993 | ||
|
|
c867d1e069 | ||
|
|
1d25d7d43f | ||
|
|
eac05ad6d6 | ||
|
|
64dd35ba4f | ||
|
|
03325f0612 | ||
|
|
48bb104660 | ||
|
|
b8133a188c | ||
|
|
d473967120 | ||
|
|
831c2feead | ||
|
|
2e28b71473 | ||
|
|
d5f85698e1 | ||
|
|
3165095669 | ||
|
|
b24ee4b11e | ||
|
|
27b9a68189 | ||
|
|
a72b08117f | ||
|
|
0a2e2c6038 | ||
|
|
eda55195fb | ||
|
|
d007bb8550 | ||
|
|
e6408fe670 | ||
|
|
fdc42ca7ff | ||
|
|
186a02593a | ||
|
|
49083bfa31 | ||
|
|
ee90affc18 | ||
|
|
767a41ce56 | ||
|
|
760ec4e58a | ||
|
|
b94252a5fa | ||
|
|
0c913040b6 | ||
|
|
d9359440e2 | ||
|
|
3fe6695b0a | ||
|
|
7df56301be | ||
|
|
57b696a5ec | ||
|
|
4f0726bc49 | ||
|
|
f186c5e284 | ||
|
|
7c610a6ff1 | ||
|
|
1d70523a59 | ||
|
|
80afba15ee | ||
|
|
5a0d3c4fac | ||
|
|
2d142bc92d | ||
|
|
7ed6423dd8 | ||
|
|
9514dda52e | ||
|
|
6ffe37b949 | ||
|
|
8508e52fdc | ||
|
|
5b514177b0 | ||
|
|
6b2ad64aea | ||
|
|
b53e84054e | ||
|
|
5957bc7d9c | ||
|
|
81e6bf6f2c | ||
|
|
d46ae3248e | ||
|
|
b789a935cf | ||
|
|
85987c6d79 | ||
|
|
b9d2eea39b | ||
|
|
4895747f35 | ||
|
|
fc90bb5314 | ||
|
|
bbb06c4975 | ||
|
|
2f9859ab2f | ||
|
|
3533a5b61d | ||
|
|
0c5ded4bd7 | ||
|
|
fe7e6a2dc1 | ||
|
|
e27733db94 | ||
|
|
fff76d5e90 | ||
|
|
2ec310934a | ||
|
|
5283f73b0c | ||
|
|
cc0bb9aebc | ||
|
|
8e9ffe6f3a | ||
|
|
838e169b9c | ||
|
|
8b15eafa6a | ||
|
|
77604de80b | ||
|
|
9806e88963 | ||
|
|
d2c7b5e782 | ||
|
|
4fbae549e4 | ||
|
|
1fbfd99d37 | ||
|
|
07f7ad9d63 | ||
|
|
9c0b8a7f5b | ||
|
|
695a7ad758 | ||
|
|
3727caf7d8 | ||
|
|
116c29c897 | ||
|
|
c0f801c0c4 | ||
|
|
b5293c5214 | ||
|
|
3160f0102d | ||
|
|
2859600a6f | ||
|
|
24cbb97f68 | ||
|
|
d75c01bd67 | ||
|
|
8befb1f39f | ||
|
|
7125f5d8cf | ||
|
|
0f8c0dbc52 | ||
|
|
070590fb53 | ||
|
|
b817340f94 | ||
|
|
d08923d55a | ||
|
|
d53d3b4f6a | ||
|
|
b4c45df885 | ||
|
|
aec530904b | ||
|
|
1e4f7faa77 | ||
|
|
bb9495c0b7 | ||
|
|
80ab91ac2f | ||
|
|
54bdd01811 | ||
|
|
dd58de368d | ||
|
|
47d95dc1c6 | ||
|
|
8ca7a7c547 | ||
|
|
31de8eb75b | ||
|
|
2b80091e14 | ||
|
|
0dc59df68f | ||
|
|
54bcd3c946 | ||
|
|
c10425233b | ||
|
|
fd2607cc43 | ||
|
|
5512da1739 | ||
|
|
add480ed59 | ||
|
|
75c5edd66a | ||
|
|
67906b4876 | ||
|
|
f7c6f17060 | ||
|
|
1838c3a33b | ||
|
|
259f0e26d1 | ||
|
|
a8649acfd0 | ||
|
|
6b00806bb8 | ||
|
|
3882619471 | ||
|
|
c5fdc0dbb6 | ||
|
|
f2ef25c67f | ||
|
|
1b71d31121 | ||
|
|
6310f0543a | ||
|
|
cb6b7667cd | ||
|
|
a733f537e5 | ||
|
|
2c835b60ed | ||
|
|
243d4b8689 | ||
|
|
e1b91986d7 | ||
|
|
4f066db8da | ||
|
|
11d84cca46 | ||
|
|
4f23b8cd47 | ||
|
|
5737b36a35 | ||
|
|
1da8fa1655 | ||
|
|
1bb0705765 | ||
|
|
b94ebc30b4 | ||
|
|
462e445d2f | ||
|
|
f8d71eb96b | ||
|
|
35cbc93dee | ||
|
|
ff87da721b | ||
|
|
8e0daf6780 | ||
|
|
b12b21f337 | ||
|
|
558f1be62d | ||
|
|
5371b9214f | ||
|
|
27b299b2a7 | ||
|
|
0508469994 | ||
|
|
acf6932863 | ||
|
|
dcd2986724 | ||
|
|
afd9785051 | ||
|
|
34828df9a1 | ||
|
|
73d3798cb1 | ||
|
|
7960689ef7 | ||
|
|
e887b39e95 |
18
.github/workflows/gather-artifacts.sh
vendored
18
.github/workflows/gather-artifacts.sh
vendored
@@ -1,18 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Copyright (c) 2019 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
set -o errexit
|
||||
set -o pipefail
|
||||
|
||||
pushd kata-artifacts >>/dev/null
|
||||
for c in ./*.tar.gz
|
||||
do
|
||||
echo "untarring tarball $c"
|
||||
tar -xvf $c
|
||||
done
|
||||
|
||||
tar cvfJ ../kata-static.tar.xz ./opt
|
||||
popd >>/dev/null
|
||||
36
.github/workflows/generate-artifact-tarball.sh
vendored
36
.github/workflows/generate-artifact-tarball.sh
vendored
@@ -1,36 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Copyright (c) 2019 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
set -o errexit
|
||||
set -o pipefail
|
||||
|
||||
|
||||
main() {
|
||||
artifact_stage=${1:-}
|
||||
artifact=$(echo ${artifact_stage} | sed -n -e 's/^install_//p' | sed -r 's/_/-/g')
|
||||
if [ -z "${artifact}" ]; then
|
||||
"Scripts needs artifact name to build"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
tag=$(echo $GITHUB_REF | cut -d/ -f3-)
|
||||
export GOPATH=$HOME/go
|
||||
|
||||
go get github.com/kata-containers/packaging || true
|
||||
pushd $GOPATH/src/github.com/kata-containers/packaging/release >>/dev/null
|
||||
git checkout $tag
|
||||
pushd ../obs-packaging
|
||||
./gen_versions_txt.sh $tag
|
||||
popd
|
||||
|
||||
source ./kata-deploy-binaries.sh
|
||||
${artifact_stage} $tag
|
||||
popd
|
||||
|
||||
mv $HOME/go/src/github.com/kata-containers/packaging/release/kata-static-${artifact}.tar.gz .
|
||||
}
|
||||
|
||||
main $@
|
||||
@@ -1,34 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Copyright (c) 2019 Intel Corporation
|
||||
# Copyright (c) 2020 Ant Group
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
set -o errexit
|
||||
set -o pipefail
|
||||
|
||||
|
||||
main() {
|
||||
artifact_stage=${1:-}
|
||||
artifact=$(echo ${artifact_stage} | sed -n -e 's/^install_//p' | sed -r 's/_/-/g')
|
||||
if [ -z "${artifact}" ]; then
|
||||
"Scripts needs artifact name to build"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
tag=$(echo $GITHUB_REF | cut -d/ -f3-)
|
||||
pushd $GITHUB_WORKSPACE/tools/packaging
|
||||
git checkout $tag
|
||||
./scripts/gen_versions_txt.sh $tag
|
||||
popd
|
||||
|
||||
pushd $GITHUB_WORKSPACE/tools/packaging/release
|
||||
source ./kata-deploy-binaries.sh
|
||||
${artifact_stage} $tag
|
||||
popd
|
||||
|
||||
mv $GITHUB_WORKSPACE/tools/packaging/release/kata-static-${artifact}.tar.gz .
|
||||
}
|
||||
|
||||
main $@
|
||||
58
.github/workflows/kata-deploy-push.yaml
vendored
Normal file
58
.github/workflows/kata-deploy-push.yaml
vendored
Normal file
@@ -0,0 +1,58 @@
|
||||
name: kata-deploy-build
|
||||
|
||||
on: push
|
||||
|
||||
jobs:
|
||||
build-asset:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
asset:
|
||||
- kernel
|
||||
- shim-v2
|
||||
- qemu
|
||||
- cloud-hypervisor
|
||||
- firecracker
|
||||
- rootfs-image
|
||||
- rootfs-initrd
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Install docker
|
||||
run: |
|
||||
curl -fsSL https://test.docker.com -o test-docker.sh
|
||||
sh test-docker.sh
|
||||
|
||||
- name: Build ${{ matrix.asset }}
|
||||
run: |
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-binaries-in-docker.sh --build="${KATA_ASSET}"
|
||||
build_dir=$(readlink -f build)
|
||||
# store-artifact does not work with symlink
|
||||
sudo cp -r --preserve=all "${build_dir}" "kata-build"
|
||||
env:
|
||||
KATA_ASSET: ${{ matrix.asset }}
|
||||
|
||||
- name: store-artifact ${{ matrix.asset }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-build/kata-static-${{ matrix.asset }}.tar.xz
|
||||
if-no-files-found: error
|
||||
|
||||
create-kata-tarball:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build-asset
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: get-artifacts
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-artifacts
|
||||
- name: merge-artifacts
|
||||
run: |
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-merge-builds.sh kata-artifacts
|
||||
- name: store-artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: kata-static-tarball
|
||||
path: kata-static.tar.xz
|
||||
4
.github/workflows/kata-deploy-test.yaml
vendored
4
.github/workflows/kata-deploy-test.yaml
vendored
@@ -46,9 +46,11 @@ jobs:
|
||||
VERSION="2.0.0"
|
||||
ARTIFACT_URL="https://github.com/kata-containers/kata-containers/releases/download/${VERSION}/kata-static-${VERSION}-x86_64.tar.xz"
|
||||
wget "${ARTIFACT_URL}" -O tools/packaging/kata-deploy/kata-static.tar.xz
|
||||
docker build --build-arg KATA_ARTIFACTS=kata-static.tar.xz -t katadocker/kata-deploy-ci:${PR_SHA} ./tools/packaging/kata-deploy
|
||||
docker build --build-arg KATA_ARTIFACTS=kata-static.tar.xz -t katadocker/kata-deploy-ci:${PR_SHA} -t quay.io/kata-containers/kata-deploy-ci:${PR_SHA} ./tools/packaging/kata-deploy
|
||||
docker login -u ${{ secrets.DOCKER_USERNAME }} -p ${{ secrets.DOCKER_PASSWORD }}
|
||||
docker push katadocker/kata-deploy-ci:$PR_SHA
|
||||
docker login -u ${{ secrets.QUAY_DEPLOYER_USERNAME }} -p ${{ secrets.QUAY_DEPLOYER_PASSWORD }} quay.io
|
||||
docker push quay.io/kata-containers/kata-deploy-ci:$PR_SHA
|
||||
echo "##[set-output name=pr-sha;]${PR_SHA}"
|
||||
|
||||
- name: test-kata-deploy-ci-in-aks
|
||||
|
||||
4
.github/workflows/main.yaml
vendored
4
.github/workflows/main.yaml
vendored
@@ -247,9 +247,11 @@ jobs:
|
||||
pkg_sha=$(git rev-parse HEAD)
|
||||
popd
|
||||
mv release-candidate/kata-static.tar.xz ./packaging/kata-deploy/kata-static.tar.xz
|
||||
docker build --build-arg KATA_ARTIFACTS=kata-static.tar.xz -t katadocker/kata-deploy-ci:$pkg_sha ./packaging/kata-deploy
|
||||
docker build --build-arg KATA_ARTIFACTS=kata-static.tar.xz -t katadocker/kata-deploy-ci:$pkg_sha -t quay.io/kata-containers/kata-deploy-ci:$pkg_sha ./packaging/kata-deploy
|
||||
docker login -u ${{ secrets.DOCKER_USERNAME }} -p ${{ secrets.DOCKER_PASSWORD }}
|
||||
docker push katadocker/kata-deploy-ci:$pkg_sha
|
||||
docker login -u ${{ secrets.QUAY_DEPLOYER_USERNAME }} -p ${{ secrets.QUAY_DEPLOYER_PASSWORD }} quay.io
|
||||
docker push quay.io/kata-containers/kata-deploy-ci:$pkg_sha
|
||||
echo "::set-output name=PKG_SHA::${pkg_sha}"
|
||||
- name: test-kata-deploy-ci-in-aks
|
||||
uses: ./packaging/kata-deploy/action
|
||||
|
||||
272
.github/workflows/release.yaml
vendored
272
.github/workflows/release.yaml
vendored
@@ -5,213 +5,45 @@ on:
|
||||
- '2.*'
|
||||
|
||||
jobs:
|
||||
get-artifact-list:
|
||||
build-asset:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
asset:
|
||||
- cloud-hypervisor
|
||||
- firecracker
|
||||
- kernel
|
||||
- qemu
|
||||
- rootfs-image
|
||||
- rootfs-initrd
|
||||
- shim-v2
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: get the list
|
||||
- name: Install docker
|
||||
run: |
|
||||
pushd $GITHUB_WORKSPACE
|
||||
tag=$(echo $GITHUB_REF | cut -d/ -f3-)
|
||||
git checkout $tag
|
||||
popd
|
||||
$GITHUB_WORKSPACE/tools/packaging/artifact-list.sh > artifact-list.txt
|
||||
- name: save-artifact-list
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: artifact-list
|
||||
path: artifact-list.txt
|
||||
curl -fsSL https://test.docker.com -o test-docker.sh
|
||||
sh test-docker.sh
|
||||
|
||||
build-kernel:
|
||||
runs-on: ubuntu-16.04
|
||||
needs: get-artifact-list
|
||||
env:
|
||||
buildstr: "install_kernel"
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: get-artifact-list
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: artifact-list
|
||||
- run: |
|
||||
sudo apt-get update && sudo apt install -y flex bison libelf-dev bc iptables
|
||||
- name: build-kernel
|
||||
- name: Build ${{ matrix.asset }}
|
||||
run: |
|
||||
if grep -q $buildstr artifact-list.txt; then
|
||||
$GITHUB_WORKSPACE/.github/workflows/generate-local-artifact-tarball.sh $buildstr
|
||||
echo "artifact-built=true" >> $GITHUB_ENV
|
||||
else
|
||||
echo "artifact-built=false" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: ${{ env.artifact-built }} == 'true'
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-binaries-in-docker.sh --build="${KATA_ASSET}"
|
||||
build_dir=$(readlink -f build)
|
||||
# store-artifact does not work with symlink
|
||||
sudo cp -r "${build_dir}" "kata-build"
|
||||
env:
|
||||
KATA_ASSET: ${{ matrix.asset }}
|
||||
TAR_OUTPUT: ${{ matrix.asset }}.tar.gz
|
||||
|
||||
- name: store-artifact ${{ matrix.asset }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-static-kernel.tar.gz
|
||||
path: kata-build/kata-static-${{ matrix.asset }}.tar.xz
|
||||
if-no-files-found: error
|
||||
|
||||
build-experimental-kernel:
|
||||
runs-on: ubuntu-16.04
|
||||
needs: get-artifact-list
|
||||
env:
|
||||
buildstr: "install_experimental_kernel"
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: get-artifact-list
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: artifact-list
|
||||
- run: |
|
||||
sudo apt-get update && sudo apt install -y flex bison libelf-dev bc iptables
|
||||
- name: build-experimental-kernel
|
||||
run: |
|
||||
if grep -q $buildstr artifact-list.txt; then
|
||||
$GITHUB_WORKSPACE/.github/workflows/generate-local-artifact-tarball.sh $buildstr
|
||||
echo "artifact-built=true" >> $GITHUB_ENV
|
||||
else
|
||||
echo "artifact-built=false" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: ${{ env.artifact-built }} == 'true'
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-static-experimental-kernel.tar.gz
|
||||
|
||||
build-qemu:
|
||||
runs-on: ubuntu-16.04
|
||||
needs: get-artifact-list
|
||||
env:
|
||||
buildstr: "install_qemu"
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: get-artifact-list
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: artifact-list
|
||||
- name: build-qemu
|
||||
run: |
|
||||
if grep -q $buildstr artifact-list.txt; then
|
||||
$GITHUB_WORKSPACE/.github/workflows/generate-local-artifact-tarball.sh $buildstr
|
||||
echo "artifact-built=true" >> $GITHUB_ENV
|
||||
else
|
||||
echo "artifact-built=false" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: ${{ env.artifact-built }} == 'true'
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-static-qemu.tar.gz
|
||||
|
||||
build-image:
|
||||
runs-on: ubuntu-16.04
|
||||
needs: get-artifact-list
|
||||
env:
|
||||
buildstr: "install_image"
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: get-artifact-list
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: artifact-list
|
||||
- name: build-image
|
||||
run: |
|
||||
if grep -q $buildstr artifact-list.txt; then
|
||||
$GITHUB_WORKSPACE/.github/workflows/generate-local-artifact-tarball.sh $buildstr
|
||||
echo "artifact-built=true" >> $GITHUB_ENV
|
||||
else
|
||||
echo "artifact-built=false" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: ${{ env.artifact-built }} == 'true'
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-static-image.tar.gz
|
||||
|
||||
build-firecracker:
|
||||
runs-on: ubuntu-16.04
|
||||
needs: get-artifact-list
|
||||
env:
|
||||
buildstr: "install_firecracker"
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: get-artifact-list
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: artifact-list
|
||||
- name: build-firecracker
|
||||
run: |
|
||||
if grep -q $buildstr artifact-list.txt; then
|
||||
$GITHUB_WORKSPACE/.github/workflows/generate-local-artifact-tarball.sh $buildstr
|
||||
echo "artifact-built=true" >> $GITHUB_ENV
|
||||
else
|
||||
echo "artifact-built=false" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: ${{ env.artifact-built }} == 'true'
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-static-firecracker.tar.gz
|
||||
|
||||
|
||||
build-clh:
|
||||
runs-on: ubuntu-16.04
|
||||
needs: get-artifact-list
|
||||
env:
|
||||
buildstr: "install_clh"
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: get-artifact-list
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: artifact-list
|
||||
- name: build-clh
|
||||
run: |
|
||||
if grep -q $buildstr artifact-list.txt; then
|
||||
$GITHUB_WORKSPACE/.github/workflows/generate-local-artifact-tarball.sh $buildstr
|
||||
echo "artifact-built=true" >> $GITHUB_ENV
|
||||
else
|
||||
echo "artifact-built=false" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: ${{ env.artifact-built }} == 'true'
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-static-clh.tar.gz
|
||||
|
||||
build-kata-components:
|
||||
runs-on: ubuntu-16.04
|
||||
needs: get-artifact-list
|
||||
env:
|
||||
buildstr: "install_kata_components"
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: get-artifact-list
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: artifact-list
|
||||
- name: build-kata-components
|
||||
run: |
|
||||
if grep -q $buildstr artifact-list.txt; then
|
||||
$GITHUB_WORKSPACE/.github/workflows/generate-local-artifact-tarball.sh $buildstr
|
||||
echo "artifact-built=true" >> $GITHUB_ENV
|
||||
else
|
||||
echo "artifact-built=false" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: ${{ env.artifact-built }} == 'true'
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-static-kata-components.tar.gz
|
||||
|
||||
gather-artifacts:
|
||||
runs-on: ubuntu-16.04
|
||||
needs: [build-experimental-kernel, build-kernel, build-qemu, build-image, build-firecracker, build-kata-components, build-clh]
|
||||
create-kata-tarball:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build-asset
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: get-artifacts
|
||||
@@ -219,24 +51,24 @@ jobs:
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-artifacts
|
||||
- name: colate-artifacts
|
||||
- name: merge-artifacts
|
||||
run: |
|
||||
$GITHUB_WORKSPACE/.github/workflows/gather-artifacts.sh
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-merge-builds.sh kata-artifacts
|
||||
- name: store-artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: release-candidate
|
||||
name: kata-static-tarball
|
||||
path: kata-static.tar.xz
|
||||
|
||||
kata-deploy:
|
||||
needs: gather-artifacts
|
||||
needs: create-kata-tarball
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: get-artifacts
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: release-candidate
|
||||
name: kata-static-tarball
|
||||
- name: build-and-push-kata-deploy-ci
|
||||
id: build-and-push-kata-deploy-ci
|
||||
run: |
|
||||
@@ -246,9 +78,11 @@ jobs:
|
||||
pkg_sha=$(git rev-parse HEAD)
|
||||
popd
|
||||
mv kata-static.tar.xz $GITHUB_WORKSPACE/tools/packaging/kata-deploy/kata-static.tar.xz
|
||||
docker build --build-arg KATA_ARTIFACTS=kata-static.tar.xz -t katadocker/kata-deploy-ci:$pkg_sha $GITHUB_WORKSPACE/tools/packaging/kata-deploy
|
||||
docker build --build-arg KATA_ARTIFACTS=kata-static.tar.xz -t katadocker/kata-deploy-ci:$pkg_sha -t quay.io/kata-containers/kata-deploy-ci:$pkg_sha $GITHUB_WORKSPACE/tools/packaging/kata-deploy
|
||||
docker login -u ${{ secrets.DOCKER_USERNAME }} -p ${{ secrets.DOCKER_PASSWORD }}
|
||||
docker push katadocker/kata-deploy-ci:$pkg_sha
|
||||
docker login -u ${{ secrets.QUAY_DEPLOYER_USERNAME }} -p ${{ secrets.QUAY_DEPLOYER_PASSWORD }} quay.io
|
||||
docker push quay.io/kata-containers/kata-deploy-ci:$pkg_sha
|
||||
mkdir -p packaging/kata-deploy
|
||||
ln -s $GITHUB_WORKSPACE/tools/packaging/kata-deploy/action packaging/kata-deploy/action
|
||||
echo "::set-output name=PKG_SHA::${pkg_sha}"
|
||||
@@ -266,8 +100,14 @@ jobs:
|
||||
run: |
|
||||
# tag the container image we created and push to DockerHub
|
||||
tag=$(echo $GITHUB_REF | cut -d/ -f3-)
|
||||
docker tag katadocker/kata-deploy-ci:${{steps.build-and-push-kata-deploy-ci.outputs.PKG_SHA}} katadocker/kata-deploy:${tag}
|
||||
docker push katadocker/kata-deploy:${tag}
|
||||
tags=($tag)
|
||||
tags+=($([[ "$tag" =~ "alpha"|"rc" ]] && echo "latest" || echo "stable"))
|
||||
for tag in ${tags[@]}; do \
|
||||
docker tag katadocker/kata-deploy-ci:${{steps.build-and-push-kata-deploy-ci.outputs.PKG_SHA}} katadocker/kata-deploy:${tag} && \
|
||||
docker tag quay.io/kata-containers/kata-deploy-ci:${{steps.build-and-push-kata-deploy-ci.outputs.PKG_SHA}} quay.io/kata-containers/kata-deploy:${tag} && \
|
||||
docker push katadocker/kata-deploy:${tag} && \
|
||||
docker push quay.io/kata-containers/kata-deploy:${tag}; \
|
||||
done
|
||||
|
||||
upload-static-tarball:
|
||||
needs: kata-deploy
|
||||
@@ -277,7 +117,7 @@ jobs:
|
||||
- name: download-artifacts
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: release-candidate
|
||||
name: kata-static-tarball
|
||||
- name: install hub
|
||||
run: |
|
||||
HUB_VER=$(curl -s "https://api.github.com/repos/github/hub/releases/latest" | jq -r .tag_name | sed 's/^v//')
|
||||
@@ -291,3 +131,21 @@ jobs:
|
||||
pushd $GITHUB_WORKSPACE
|
||||
echo "uploading asset '${tarball}' for tag: ${tag}"
|
||||
GITHUB_TOKEN=${{ secrets.GIT_UPLOAD_TOKEN }} hub release edit -m "" -a "${tarball}" "${tag}"
|
||||
popd
|
||||
|
||||
upload-cargo-vendored-tarball:
|
||||
needs: upload-static-tarball
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: generate-and-upload-tarball
|
||||
run: |
|
||||
pushd $GITHUB_WORKSPACE/src/agent
|
||||
cargo vendor >> .cargo/config
|
||||
popd
|
||||
tag=$(echo $GITHUB_REF | cut -d/ -f3-)
|
||||
tarball="kata-containers-$tag-vendor.tar.gz"
|
||||
pushd $GITHUB_WORKSPACE
|
||||
tar -cvzf "${tarball}" src/agent/.cargo/config src/agent/vendor
|
||||
GITHUB_TOKEN=${{ secrets.GIT_UPLOAD_TOKEN }} hub release edit -m "" -a "${tarball}" "${tag}"
|
||||
popd
|
||||
|
||||
20
.github/workflows/static-checks.yaml
vendored
20
.github/workflows/static-checks.yaml
vendored
@@ -2,14 +2,15 @@ on:
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
- edited
|
||||
- reopened
|
||||
- synchronize
|
||||
- labeled
|
||||
- unlabeled
|
||||
|
||||
name: Static checks
|
||||
jobs:
|
||||
test:
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.15.x, 1.16.x]
|
||||
@@ -24,36 +25,43 @@ jobs:
|
||||
target_branch: ${{ github.base_ref }}
|
||||
steps:
|
||||
- name: Install Go
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
env:
|
||||
GOPATH: ${{ runner.workspace }}/kata-containers
|
||||
- name: Setup GOPATH
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
echo "TRAVIS_BRANCH: ${TRAVIS_BRANCH}"
|
||||
echo "TRAVIS_PULL_REQUEST_BRANCH: ${TRAVIS_PULL_REQUEST_BRANCH}"
|
||||
echo "TRAVIS_PULL_REQUEST_SHA: ${TRAVIS_PULL_REQUEST_SHA}"
|
||||
echo "TRAVIS: ${TRAVIS}"
|
||||
- name: Set env
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
echo "GOPATH=${{ github.workspace }}" >> $GITHUB_ENV
|
||||
echo "${{ github.workspace }}/bin" >> $GITHUB_PATH
|
||||
- name: Checkout code
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
path: ./src/github.com/${{ github.repository }}
|
||||
- name: Setup travis references
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
echo "TRAVIS_BRANCH=${TRAVIS_BRANCH:-$(echo $GITHUB_REF | awk 'BEGIN { FS = \"/\" } ; { print $3 }')}"
|
||||
target_branch=${TRAVIS_BRANCH}
|
||||
- name: Setup
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
cd ${GOPATH}/src/github.com/${{ github.repository }} && ./ci/setup.sh
|
||||
env:
|
||||
GOPATH: ${{ runner.workspace }}/kata-containers
|
||||
- name: Building rust
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
cd ${GOPATH}/src/github.com/${{ github.repository }} && ./ci/install_rust.sh
|
||||
PATH=$PATH:"$HOME/.cargo/bin"
|
||||
@@ -61,18 +69,18 @@ jobs:
|
||||
rustup component add rustfmt clippy
|
||||
# Check whether the vendored code is up-to-date & working as the first thing
|
||||
- name: Check vendored code
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
cd ${GOPATH}/src/github.com/${{ github.repository }} && make vendor
|
||||
# Must build before static checks as we depend on some generated code in runtime and agent
|
||||
- name: Build
|
||||
run: |
|
||||
cd ${GOPATH}/src/github.com/${{ github.repository }} && make
|
||||
- name: Static Checks
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
cd ${GOPATH}/src/github.com/${{ github.repository }} && ./ci/static-checks.sh
|
||||
cd ${GOPATH}/src/github.com/${{ github.repository }} && make static-checks
|
||||
- name: Run Compiler Checks
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
cd ${GOPATH}/src/github.com/${{ github.repository }} && make check
|
||||
- name: Run Unit Tests
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
cd ${GOPATH}/src/github.com/${{ github.repository }} && make test
|
||||
|
||||
94
Glossary.md
Normal file
94
Glossary.md
Normal file
@@ -0,0 +1,94 @@
|
||||
# Glossary
|
||||
|
||||
[A](#a), [B](#b), [C](#c), [D](#d), [E](#e), [F](#f), [G](#g), [H](#h), [I](#i), [J](#j), [K](#k), [L](#l), [M](#m), [N](#n), [O](#o), [P](#p), [Q](#q), [R](#r), [S](#s), [T](#t), [U](#u), [V](#v), [W](#w), [X](#x), [Y](#y), [Z](#z)
|
||||
|
||||
## A
|
||||
|
||||
### Auto Scaling
|
||||
a method used in cloud computing, whereby the amount of computational resources in a server farm, typically measured in terms of the number of active servers, which vary automatically based on the load on the farm.
|
||||
|
||||
## B
|
||||
|
||||
## C
|
||||
|
||||
### Container Security Solutions
|
||||
The process of implementing security tools and policies that will give you the assurance that everything in your container is running as intended, and only as intended.
|
||||
|
||||
### Container Software
|
||||
A standard unit of software that packages up code and all its dependencies so the application runs quickly and reliably from one computing environment to another.
|
||||
|
||||
### Container Runtime Interface
|
||||
A plugin interface which enables Kubelet to use a wide variety of container runtimes, without the need to recompile.
|
||||
|
||||
### Container Virtualization
|
||||
A container is a virtual runtime environment that runs on top of a single operating system (OS) kernel and emulates an operating system rather than the underlying hardware.
|
||||
|
||||
## D
|
||||
|
||||
## E
|
||||
|
||||
## F
|
||||
|
||||
## G
|
||||
|
||||
## H
|
||||
|
||||
## I
|
||||
|
||||
### Infrastructure Architecture
|
||||
A structured and modern approach for supporting an organization and facilitating innovation within an enterprise.
|
||||
|
||||
## J
|
||||
|
||||
## K
|
||||
|
||||
### Kata Containers
|
||||
Kata containers is an open source project delivering increased container security and Workload isolation through an implementation of lightweight virtual machines.
|
||||
|
||||
## L
|
||||
|
||||
## M
|
||||
|
||||
## N
|
||||
|
||||
## O
|
||||
|
||||
## P
|
||||
|
||||
### Pod Containers
|
||||
A Group of one or more containers , with shared storage/network, and a specification for how to run the containers.
|
||||
|
||||
### Private Cloud
|
||||
A computing model that offers a proprietary environment dedicated to a single business entity.
|
||||
|
||||
### Public Cloud
|
||||
Computing services offered by third-party providers over the public Internet, making them available to anyone who wants to use or purchase them.
|
||||
|
||||
## Q
|
||||
|
||||
## R
|
||||
|
||||
## S
|
||||
|
||||
### Serverless Containers
|
||||
An architecture in which code is executed on-demand. Serverless workloads are typically in the cloud, but on-premises serverless platforms exist, too.
|
||||
|
||||
## T
|
||||
|
||||
## U
|
||||
|
||||
## V
|
||||
|
||||
### Virtual Machine Monitor
|
||||
Computer software, firmware or hardware that creates and runs virtual machines.
|
||||
|
||||
### Virtual Machine Software
|
||||
A software program or operating system that not only exhibits the behavior of a separate computer, but is also capable of performing tasks such as running applications and programs like a separate computer.
|
||||
|
||||
## W
|
||||
|
||||
## X
|
||||
|
||||
## Y
|
||||
|
||||
## Z
|
||||
12
Makefile
12
Makefile
@@ -29,4 +29,14 @@ $(eval $(call create_all_rules,$(COMPONENTS),$(TOOLS),$(STANDARD_TARGETS)))
|
||||
generate-protocols:
|
||||
make -C src/agent generate-protocols
|
||||
|
||||
.PHONY: all default
|
||||
# Some static checks rely on generated source files of components.
|
||||
static-checks: build
|
||||
bash ci/static-checks.sh
|
||||
|
||||
binary-tarball:
|
||||
make -f ./tools/packaging/kata-deploy/local-build/Makefile
|
||||
|
||||
install-binary-tarball:
|
||||
make -f ./tools/packaging/kata-deploy/local-build/Makefile install
|
||||
|
||||
.PHONY: all default static-checks binary-tarball install-binary-tarball
|
||||
|
||||
89
README.md
89
README.md
@@ -2,22 +2,6 @@
|
||||
|
||||
# Kata Containers
|
||||
|
||||
* [Kata Containers](#kata-containers)
|
||||
* [Introduction](#introduction)
|
||||
* [Getting started](#getting-started)
|
||||
* [Documentation](#documentation)
|
||||
* [Community](#community)
|
||||
* [Getting help](#getting-help)
|
||||
* [Raising issues](#raising-issues)
|
||||
* [Kata Containers 1.x versions](#kata-containers-1x-versions)
|
||||
* [Developers](#developers)
|
||||
* [Components](#components)
|
||||
* [Kata Containers 1.x components](#kata-containers-1x-components)
|
||||
* [Common repositories](#common-repositories)
|
||||
* [Packaging and releases](#packaging-and-releases)
|
||||
|
||||
---
|
||||
|
||||
Welcome to Kata Containers!
|
||||
|
||||
This repository is the home of the Kata Containers code for the 2.0 and newer
|
||||
@@ -26,11 +10,6 @@ releases.
|
||||
If you want to learn about Kata Containers, visit the main
|
||||
[Kata Containers website](https://katacontainers.io).
|
||||
|
||||
For further details on the older (first generation) Kata Containers 1.x
|
||||
versions, see the
|
||||
[Kata Containers 1.x components](#kata-containers-1x-components)
|
||||
section.
|
||||
|
||||
## Introduction
|
||||
|
||||
Kata Containers is an open source project and community working to build a
|
||||
@@ -67,69 +46,34 @@ Please raise an issue
|
||||
> **Note:**
|
||||
> If you are reporting a security issue, please follow the [vulnerability reporting process](https://github.com/kata-containers/community#vulnerability-handling)
|
||||
|
||||
#### Kata Containers 1.x versions
|
||||
|
||||
For older Kata Containers 1.x releases, please raise an issue in the
|
||||
[Kata Containers 1.x component repository](#kata-containers-1x-components)
|
||||
that seems most appropriate.
|
||||
|
||||
If in doubt, raise an issue
|
||||
[in the Kata Containers 1.x runtime repository](https://github.com/kata-containers/runtime/issues).
|
||||
|
||||
## Developers
|
||||
|
||||
### Components
|
||||
|
||||
### Main components
|
||||
|
||||
The table below lists the core parts of the project:
|
||||
|
||||
| Component | Type | Description |
|
||||
|-|-|-|
|
||||
| [agent-ctl](tools/agent-ctl) | utility | Tool that provides low-level access for testing the agent. |
|
||||
| [runtime](src/runtime) | core | Main component run by a container manager and providing a containerd shimv2 runtime implementation. |
|
||||
| [agent](src/agent) | core | Management process running inside the virtual machine / POD that sets up the container environment. |
|
||||
| [documentation](docs) | documentation | Documentation common to all components (such as design and install documentation). |
|
||||
| [osbuilder](tools/osbuilder) | infrastructure | Tool to create "mini O/S" rootfs and initrd images for the hypervisor. |
|
||||
| [packaging](tools/packaging) | infrastructure | Scripts and metadata for producing packaged binaries<br/>(components, hypervisors, kernel and rootfs). |
|
||||
| [runtime](src/runtime) | core | Main component run by a container manager and providing a containerd shimv2 runtime implementation. |
|
||||
| [trace-forwarder](src/trace-forwarder) | utility | Agent tracing helper. |
|
||||
| [tests](https://github.com/kata-containers/tests) | tests | Excludes unit tests which live with the main code. |
|
||||
|
||||
#### Kata Containers 1.x components
|
||||
### Additional components
|
||||
|
||||
For the first generation of Kata Containers (1.x versions), each component was
|
||||
kept in a separate repository.
|
||||
|
||||
For information on the Kata Containers 1.x releases, see the
|
||||
[Kata Containers 1.x releases page](https://github.com/kata-containers/runtime/releases).
|
||||
|
||||
For further information on particular Kata Containers 1.x components, see the
|
||||
individual component repositories:
|
||||
The table below lists the remaining parts of the project:
|
||||
|
||||
| Component | Type | Description |
|
||||
|-|-|-|
|
||||
| [agent](https://github.com/kata-containers/agent) | core | See [components](#components). |
|
||||
| [documentation](https://github.com/kata-containers/documentation) | documentation | |
|
||||
| [KSM throttler](https://github.com/kata-containers/ksm-throttler) | optional core | Daemon that monitors containers and deduplicates memory to maximize container density on the host. |
|
||||
| [osbuilder](https://github.com/kata-containers/osbuilder) | infrastructure | See [components](#components). |
|
||||
| [packaging](https://github.com/kata-containers/packaging) | infrastructure | See [components](#components). |
|
||||
| [proxy](https://github.com/kata-containers/proxy) | core | Multiplexes communications between the shims, agent and runtime. |
|
||||
| [runtime](https://github.com/kata-containers/runtime) | core | See [components](#components). |
|
||||
| [shim](https://github.com/kata-containers/shim) | core | Handles standard I/O and signals on behalf of the container process. |
|
||||
|
||||
> **Note:**
|
||||
>
|
||||
> - There are more components for the original Kata Containers 1.x implementation.
|
||||
> - The current implementation simplifies the design significantly:
|
||||
> compare the [current](docs/design/architecture.md) and
|
||||
> [previous generation](https://github.com/kata-containers/documentation/blob/master/design/architecture.md)
|
||||
> designs.
|
||||
|
||||
### Common repositories
|
||||
|
||||
The following repositories are used by both the current and first generation Kata Containers implementations:
|
||||
|
||||
| Component | Description | Current | First generation | Notes |
|
||||
|-|-|-|-|-|
|
||||
| CI | Continuous Integration configuration files and scripts. | [Kata 2.x](https://github.com/kata-containers/ci/tree/main) | [Kata 1.x](https://github.com/kata-containers/ci/tree/master) | |
|
||||
| kernel | The Linux kernel used by the hypervisor to boot the guest image. | [Kata 2.x][kernel] | [Kata 1.x][kernel] | Patches are stored in the packaging component. |
|
||||
| tests | Test code. | [Kata 2.x](https://github.com/kata-containers/tests/tree/main) | [Kata 1.x](https://github.com/kata-containers/tests/tree/master) | Excludes unit tests which live with the main code. |
|
||||
| www.katacontainers.io | Contains the source for the [main web site](https://www.katacontainers.io). | [Kata 2.x][github-katacontainers.io] | [Kata 1.x][github-katacontainers.io] | | |
|
||||
| [packaging](tools/packaging) | infrastructure | Scripts and metadata for producing packaged binaries<br/>(components, hypervisors, kernel and rootfs). |
|
||||
| [kernel](https://www.kernel.org) | kernel | Linux kernel used by the hypervisor to boot the guest image. Patches are stored [here](tools/packaging/kernel). |
|
||||
| [osbuilder](tools/osbuilder) | infrastructure | Tool to create "mini O/S" rootfs and initrd images and kernel for the hypervisor. |
|
||||
| [`agent-ctl`](tools/agent-ctl) | utility | Tool that provides low-level access for testing the agent. |
|
||||
| [`trace-forwarder`](src/trace-forwarder) | utility | Agent tracing helper. |
|
||||
| [`ci`](https://github.com/kata-containers/ci) | CI | Continuous Integration configuration files and scripts. |
|
||||
| [`katacontainers.io`](https://github.com/kata-containers/www.katacontainers.io) | Source for the [`katacontainers.io`](https://www.katacontainers.io) site. |
|
||||
|
||||
### Packaging and releases
|
||||
|
||||
@@ -138,6 +82,9 @@ Kata Containers is now
|
||||
However, packaging scripts and metadata are still used to generate snap and GitHub releases. See
|
||||
the [components](#components) section for further details.
|
||||
|
||||
## Glossary of Terms
|
||||
|
||||
See the [glossary of terms](Glossary.md) related to Kata Containers.
|
||||
---
|
||||
|
||||
[kernel]: https://www.kernel.org
|
||||
|
||||
@@ -15,12 +15,18 @@ die() {
|
||||
# Install the yq yaml query package from the mikefarah github repo
|
||||
# Install via binary download, as we may not have golang installed at this point
|
||||
function install_yq() {
|
||||
GOPATH=${GOPATH:-${HOME}/go}
|
||||
local yq_path="${GOPATH}/bin/yq"
|
||||
local yq_pkg="github.com/mikefarah/yq"
|
||||
local yq_version=3.4.1
|
||||
INSTALL_IN_GOPATH=${INSTALL_IN_GOPATH:-true}
|
||||
|
||||
[ -x "${GOPATH}/bin/yq" ] && [ "`${GOPATH}/bin/yq --version`"X == "yq version ${yq_version}"X ] && return
|
||||
if [ "${INSTALL_IN_GOPATH}" == "true" ];then
|
||||
GOPATH=${GOPATH:-${HOME}/go}
|
||||
mkdir -p "${GOPATH}/bin"
|
||||
local yq_path="${GOPATH}/bin/yq"
|
||||
else
|
||||
yq_path="/usr/local/bin/yq"
|
||||
fi
|
||||
[ -x "${yq_path}" ] && [ "`${yq_path} --version`"X == "yq version ${yq_version}"X ] && return
|
||||
|
||||
read -r -a sysInfo <<< "$(uname -sm)"
|
||||
|
||||
@@ -51,7 +57,6 @@ function install_yq() {
|
||||
;;
|
||||
esac
|
||||
|
||||
mkdir -p "${GOPATH}/bin"
|
||||
|
||||
# Check curl
|
||||
if ! command -v "curl" >/dev/null; then
|
||||
|
||||
@@ -3,6 +3,8 @@
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -o nounset
|
||||
|
||||
export tests_repo="${tests_repo:-github.com/kata-containers/tests}"
|
||||
export tests_repo_dir="$GOPATH/src/$tests_repo"
|
||||
export branch="${target_branch:-main}"
|
||||
@@ -15,7 +17,7 @@ export branch="${target_branch:-main}"
|
||||
clone_tests_repo()
|
||||
{
|
||||
if [ -d "$tests_repo_dir" ]; then
|
||||
[ -n "$CI" ] && return
|
||||
[ -n "${CI:-}" ] && return
|
||||
pushd "${tests_repo_dir}"
|
||||
git checkout "${branch}"
|
||||
git pull
|
||||
|
||||
@@ -8,9 +8,14 @@
|
||||
set -e
|
||||
cidir=$(dirname "$0")
|
||||
source "${cidir}/lib.sh"
|
||||
export CI_JOB="${CI_JOB:-}"
|
||||
|
||||
clone_tests_repo
|
||||
|
||||
pushd ${tests_repo_dir}
|
||||
.ci/run.sh
|
||||
# temporary fix, see https://github.com/kata-containers/tests/issues/3878
|
||||
if [ "$(uname -m)" != "s390x" ] && [ "$CI_JOB" == "CRI_CONTAINERD_K8S_MINIMAL" ]; then
|
||||
tracing/test-agent-shutdown.sh
|
||||
fi
|
||||
popd
|
||||
|
||||
@@ -1,56 +1,3 @@
|
||||
- [Warning](#warning)
|
||||
- [Assumptions](#assumptions)
|
||||
- [Initial setup](#initial-setup)
|
||||
- [Requirements to build individual components](#requirements-to-build-individual-components)
|
||||
- [Build and install the Kata Containers runtime](#build-and-install-the-kata-containers-runtime)
|
||||
- [Check hardware requirements](#check-hardware-requirements)
|
||||
- [Configure to use initrd or rootfs image](#configure-to-use-initrd-or-rootfs-image)
|
||||
- [Enable full debug](#enable-full-debug)
|
||||
- [debug logs and shimv2](#debug-logs-and-shimv2)
|
||||
- [Enabling full `containerd` debug](#enabling-full-containerd-debug)
|
||||
- [Enabling just `containerd shim` debug](#enabling-just-containerd-shim-debug)
|
||||
- [Enabling `CRI-O` and `shimv2` debug](#enabling-cri-o-and-shimv2-debug)
|
||||
- [journald rate limiting](#journald-rate-limiting)
|
||||
- [`systemd-journald` suppressing messages](#systemd-journald-suppressing-messages)
|
||||
- [Disabling `systemd-journald` rate limiting](#disabling-systemd-journald-rate-limiting)
|
||||
- [Create and install rootfs and initrd image](#create-and-install-rootfs-and-initrd-image)
|
||||
- [Build a custom Kata agent - OPTIONAL](#build-a-custom-kata-agent---optional)
|
||||
- [Get the osbuilder](#get-the-osbuilder)
|
||||
- [Create a rootfs image](#create-a-rootfs-image)
|
||||
- [Create a local rootfs](#create-a-local-rootfs)
|
||||
- [Add a custom agent to the image - OPTIONAL](#add-a-custom-agent-to-the-image---optional)
|
||||
- [Build a rootfs image](#build-a-rootfs-image)
|
||||
- [Install the rootfs image](#install-the-rootfs-image)
|
||||
- [Create an initrd image - OPTIONAL](#create-an-initrd-image---optional)
|
||||
- [Create a local rootfs for initrd image](#create-a-local-rootfs-for-initrd-image)
|
||||
- [Build an initrd image](#build-an-initrd-image)
|
||||
- [Install the initrd image](#install-the-initrd-image)
|
||||
- [Install guest kernel images](#install-guest-kernel-images)
|
||||
- [Install a hypervisor](#install-a-hypervisor)
|
||||
- [Build a custom QEMU](#build-a-custom-qemu)
|
||||
- [Build a custom QEMU for aarch64/arm64 - REQUIRED](#build-a-custom-qemu-for-aarch64arm64---required)
|
||||
- [Run Kata Containers with Containerd](#run-kata-containers-with-containerd)
|
||||
- [Run Kata Containers with Kubernetes](#run-kata-containers-with-kubernetes)
|
||||
- [Troubleshoot Kata Containers](#troubleshoot-kata-containers)
|
||||
- [Appendices](#appendices)
|
||||
- [Checking Docker default runtime](#checking-docker-default-runtime)
|
||||
- [Set up a debug console](#set-up-a-debug-console)
|
||||
- [Simple debug console setup](#simple-debug-console-setup)
|
||||
- [Enable agent debug console](#enable-agent-debug-console)
|
||||
- [Start `kata-monitor` - ONLY NEEDED FOR 2.0.x](#start-kata-monitor---only-needed-for-20x)
|
||||
- [Connect to debug console](#connect-to-debug-console)
|
||||
- [Traditional debug console setup](#traditional-debug-console-setup)
|
||||
- [Create a custom image containing a shell](#create-a-custom-image-containing-a-shell)
|
||||
- [Build the debug image](#build-the-debug-image)
|
||||
- [Configure runtime for custom debug image](#configure-runtime-for-custom-debug-image)
|
||||
- [Create a container](#create-a-container)
|
||||
- [Connect to the virtual machine using the debug console](#connect-to-the-virtual-machine-using-the-debug-console)
|
||||
- [Enabling debug console for QEMU](#enabling-debug-console-for-qemu)
|
||||
- [Enabling debug console for cloud-hypervisor / firecracker](#enabling-debug-console-for-cloud-hypervisor--firecracker)
|
||||
- [Connecting to the debug console](#connecting-to-the-debug-console)
|
||||
- [Obtain details of the image](#obtain-details-of-the-image)
|
||||
- [Capturing kernel boot logs](#capturing-kernel-boot-logs)
|
||||
|
||||
# Warning
|
||||
|
||||
This document is written **specifically for developers**: it is not intended for end users.
|
||||
@@ -470,7 +417,7 @@ script and paste its output directly into a
|
||||
> [runtime](../src/runtime) repository.
|
||||
|
||||
To perform analysis on Kata logs, use the
|
||||
[`kata-log-parser`](https://github.com/kata-containers/tests/tree/master/cmd/log-parser)
|
||||
[`kata-log-parser`](https://github.com/kata-containers/tests/tree/main/cmd/log-parser)
|
||||
tool, which can convert the logs into formats (e.g. JSON, TOML, XML, and YAML).
|
||||
|
||||
See [Set up a debug console](#set-up-a-debug-console).
|
||||
|
||||
@@ -1,16 +1,3 @@
|
||||
* [Introduction](#introduction)
|
||||
* [General requirements](#general-requirements)
|
||||
* [Linking advice](#linking-advice)
|
||||
* [Notes](#notes)
|
||||
* [Warnings and other admonitions](#warnings-and-other-admonitions)
|
||||
* [Files and command names](#files-and-command-names)
|
||||
* [Code blocks](#code-blocks)
|
||||
* [Images](#images)
|
||||
* [Spelling](#spelling)
|
||||
* [Names](#names)
|
||||
* [Version numbers](#version-numbers)
|
||||
* [The apostrophe](#the-apostrophe)
|
||||
|
||||
# Introduction
|
||||
|
||||
This document outlines the requirements for all documentation in the [Kata
|
||||
@@ -23,10 +10,6 @@ All documents must:
|
||||
- Be written in simple English.
|
||||
- Be written in [GitHub Flavored Markdown](https://github.github.com/gfm) format.
|
||||
- Have a `.md` file extension.
|
||||
- Include a TOC (table of contents) at the top of the document with links to
|
||||
all heading sections. We recommend using the
|
||||
[`kata-check-markdown`](https://github.com/kata-containers/tests/tree/main/cmd/check-markdown)
|
||||
tool to generate the TOC.
|
||||
- Be linked to from another document in the same repository.
|
||||
|
||||
Although GitHub allows navigation of the entire repository, it should be
|
||||
@@ -43,6 +26,10 @@ All documents must:
|
||||
which can then execute the commands specified to ensure the instructions are
|
||||
correct. This avoids documents becoming out of date over time.
|
||||
|
||||
> **Note:**
|
||||
>
|
||||
> Do not add a table of contents (TOC) since GitHub will auto-generate one.
|
||||
|
||||
# Linking advice
|
||||
|
||||
Linking between documents is strongly encouraged to help users and developers
|
||||
|
||||
@@ -1,9 +1,5 @@
|
||||
# Licensing strategy
|
||||
|
||||
* [Project License](#project-license)
|
||||
* [License file](#license-file)
|
||||
* [License for individual files](#license-for-individual-files)
|
||||
|
||||
## Project License
|
||||
|
||||
The license for the [Kata Containers](https://github.com/kata-containers)
|
||||
|
||||
@@ -1,35 +1,3 @@
|
||||
* [Overview](#overview)
|
||||
* [Definition of a limitation](#definition-of-a-limitation)
|
||||
* [Scope](#scope)
|
||||
* [Contributing](#contributing)
|
||||
* [Pending items](#pending-items)
|
||||
* [Runtime commands](#runtime-commands)
|
||||
* [checkpoint and restore](#checkpoint-and-restore)
|
||||
* [events command](#events-command)
|
||||
* [update command](#update-command)
|
||||
* [Networking](#networking)
|
||||
* [Docker swarm and compose support](#docker-swarm-and-compose-support)
|
||||
* [Resource management](#resource-management)
|
||||
* [docker run and shared memory](#docker-run-and-shared-memory)
|
||||
* [docker run and sysctl](#docker-run-and-sysctl)
|
||||
* [Docker daemon features](#docker-daemon-features)
|
||||
* [SELinux support](#selinux-support)
|
||||
* [Architectural limitations](#architectural-limitations)
|
||||
* [Networking limitations](#networking-limitations)
|
||||
* [Support for joining an existing VM network](#support-for-joining-an-existing-vm-network)
|
||||
* [docker --net=host](#docker---nethost)
|
||||
* [docker run --link](#docker-run---link)
|
||||
* [Storage limitations](#storage-limitations)
|
||||
* [Kubernetes `volumeMounts.subPaths`](#kubernetes-volumemountssubpaths)
|
||||
* [Host resource sharing](#host-resource-sharing)
|
||||
* [docker run --privileged](#docker-run---privileged)
|
||||
* [Miscellaneous](#miscellaneous)
|
||||
* [Docker --security-opt option partially supported](#docker---security-opt-option-partially-supported)
|
||||
* [Appendices](#appendices)
|
||||
* [The constraints challenge](#the-constraints-challenge)
|
||||
|
||||
***
|
||||
|
||||
# Overview
|
||||
|
||||
A [Kata Container](https://github.com/kata-containers) utilizes a Virtual Machine (VM) to enhance security and
|
||||
|
||||
@@ -1,16 +1,5 @@
|
||||
# Documentation
|
||||
|
||||
* [Getting Started](#getting-started)
|
||||
* [More User Guides](#more-user-guides)
|
||||
* [Kata Use-Cases](#kata-use-cases)
|
||||
* [Developer Guide](#developer-guide)
|
||||
* [Design and Implementations](#design-and-implementations)
|
||||
* [How to Contribute](#how-to-contribute)
|
||||
* [Code Licensing](#code-licensing)
|
||||
* [The Release Process](#the-release-process)
|
||||
* [Help Improving the Documents](#help-improving-the-documents)
|
||||
* [Website Changes](#website-changes)
|
||||
|
||||
The [Kata Containers](https://github.com/kata-containers)
|
||||
documentation repository hosts overall system documentation, with information
|
||||
common to multiple components.
|
||||
|
||||
@@ -1,20 +1,6 @@
|
||||
|
||||
# How to do a Kata Containers Release
|
||||
This document lists the tasks required to create a Kata Release.
|
||||
|
||||
<!-- TOC START min:1 max:3 link:true asterisk:false update:true -->
|
||||
- [How to do a Kata Containers Release](#how-to-do-a-kata-containers-release)
|
||||
- [Requirements](#requirements)
|
||||
- [Release Process](#release-process)
|
||||
- [Bump all Kata repositories](#bump-all-kata-repositories)
|
||||
- [Merge all bump version Pull requests](#merge-all-bump-version-pull-requests)
|
||||
- [Tag all Kata repositories](#tag-all-kata-repositories)
|
||||
- [Check Git-hub Actions](#check-git-hub-actions)
|
||||
- [Create release notes](#create-release-notes)
|
||||
- [Announce the release](#announce-the-release)
|
||||
<!-- TOC END -->
|
||||
|
||||
|
||||
## Requirements
|
||||
|
||||
- [hub](https://github.com/github/hub)
|
||||
|
||||
@@ -1,16 +1,3 @@
|
||||
* [Introduction](#introduction)
|
||||
* [Maintenance warning](#maintenance-warning)
|
||||
* [Determine current version](#determine-current-version)
|
||||
* [Determine latest version](#determine-latest-version)
|
||||
* [Configuration changes](#configuration-changes)
|
||||
* [Upgrade Kata Containers](#upgrade-kata-containers)
|
||||
* [Upgrade native distribution packaged version](#upgrade-native-distribution-packaged-version)
|
||||
* [Static installation](#static-installation)
|
||||
* [Determine if you are using a static installation](#determine-if-you-are-using-a-static-installation)
|
||||
* [Remove a static installation](#remove-a-static-installation)
|
||||
* [Upgrade a static installation](#upgrade-a-static-installation)
|
||||
* [Custom assets](#custom-assets)
|
||||
|
||||
# Introduction
|
||||
|
||||
This document outlines the options for upgrading from a
|
||||
|
||||
@@ -10,3 +10,7 @@ Kata Containers design documents:
|
||||
- [Host cgroups](host-cgroups.md)
|
||||
- [`Inotify` support](inotify.md)
|
||||
- [Metrics(Kata 2.0)](kata-2-0-metrics.md)
|
||||
|
||||
---
|
||||
|
||||
- [Design proposals](proposals)
|
||||
|
||||
@@ -1,12 +1,5 @@
|
||||
# Kata Containers and VSOCKs
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [VSOCK communication diagram](#vsock-communication-diagram)
|
||||
- [System requirements](#system-requirements)
|
||||
- [Advantages of using VSOCKs](#advantages-of-using-vsocks)
|
||||
- [High density](#high-density)
|
||||
- [Reliability](#reliability)
|
||||
|
||||
## Introduction
|
||||
|
||||
There are two different ways processes in the virtual machine can communicate
|
||||
|
||||
@@ -1,26 +1,5 @@
|
||||
# Kata Containers Architecture
|
||||
|
||||
|
||||
- [Kata Containers Architecture](#kata-containers-architecture)
|
||||
- [Overview](#overview)
|
||||
- [Virtualization](#virtualization)
|
||||
- [Guest assets](#guest-assets)
|
||||
- [Guest kernel](#guest-kernel)
|
||||
- [Guest image](#guest-image)
|
||||
- [Root filesystem image](#root-filesystem-image)
|
||||
- [Initrd image](#initrd-image)
|
||||
- [Agent](#agent)
|
||||
- [Runtime](#runtime)
|
||||
- [Configuration](#configuration)
|
||||
- [Networking](#networking)
|
||||
- [Network Hotplug](#network-hotplug)
|
||||
- [Storage](#storage)
|
||||
- [Kubernetes support](#kubernetes-support)
|
||||
- [OCI annotations](#oci-annotations)
|
||||
- [Mixing VM based and namespace based runtimes](#mixing-vm-based-and-namespace-based-runtimes)
|
||||
- [Appendices](#appendices)
|
||||
- [DAX](#dax)
|
||||
|
||||
## Overview
|
||||
|
||||
This is an architectural overview of Kata Containers, based on the 2.0 release.
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
# Kata Containers E2E Flow
|
||||
|
||||
|
||||

|
||||
|
||||
@@ -1,18 +1,3 @@
|
||||
- [Host cgroup management](#host-cgroup-management)
|
||||
- [Introduction](#introduction)
|
||||
- [`SandboxCgroupOnly` enabled](#sandboxcgrouponly-enabled)
|
||||
- [What does Kata do in this configuration?](#what-does-kata-do-in-this-configuration)
|
||||
- [Why create a Kata-cgroup under the parent cgroup?](#why-create-a-kata-cgroup-under-the-parent-cgroup)
|
||||
- [Improvements](#improvements)
|
||||
- [`SandboxCgroupOnly` disabled (default, legacy)](#sandboxcgrouponly-disabled-default-legacy)
|
||||
- [What does this method do?](#what-does-this-method-do)
|
||||
- [Impact](#impact)
|
||||
- [Supported cgroups](#supported-cgroups)
|
||||
- [Cgroups V1](#cgroups-v1)
|
||||
- [Cgroups V2](#cgroups-v2)
|
||||
- [Distro Support](#distro-support)
|
||||
- [Summary](#summary)
|
||||
|
||||
# Host cgroup management
|
||||
|
||||
## Introduction
|
||||
|
||||
@@ -1,20 +1,5 @@
|
||||
# Kata 2.0 Metrics Design
|
||||
|
||||
* [Limitations of Kata 1.x and the target of Kata 2.0](#limitations-of-kata-1x-and-the-target-of-kata-20)
|
||||
* [Metrics architecture](#metrics-architecture)
|
||||
* [Kata monitor](#kata-monitor)
|
||||
* [Kata runtime](#kata-runtime)
|
||||
* [Kata agent](#kata-agent)
|
||||
* [Performance and overhead](#performance-and-overhead)
|
||||
* [Metrics list](#metrics-list)
|
||||
* [Metric types](#metric-types)
|
||||
* [Kata agent metrics](#kata-agent-metrics)
|
||||
* [Firecracker metrics](#firecracker-metrics)
|
||||
* [Kata guest OS metrics](#kata-guest-os-metrics)
|
||||
* [Hypervisor metrics](#hypervisor-metrics)
|
||||
* [Kata monitor metrics](#kata-monitor-metrics)
|
||||
* [Kata containerd shim v2 metrics](#kata-containerd-shim-v2-metrics)
|
||||
|
||||
Kata implement CRI's API and support [`ContainerStats`](https://github.com/kubernetes/kubernetes/blob/release-1.18/staging/src/k8s.io/cri-api/pkg/apis/runtime/v1alpha2/api.proto#L101) and [`ListContainerStats`](https://github.com/kubernetes/kubernetes/blob/release-1.18/staging/src/k8s.io/cri-api/pkg/apis/runtime/v1alpha2/api.proto#L103) interfaces to expose containers metrics. User can use these interface to get basic metrics about container.
|
||||
|
||||
But unlike `runc`, Kata is a VM-based runtime and has a different architecture.
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
# Kata API Design
|
||||
|
||||
To fulfill the [Kata design requirements](kata-design-requirements.md), and based on the discussion on [Virtcontainers API extensions](https://docs.google.com/presentation/d/1dbGrD1h9cpuqAPooiEgtiwWDGCYhVPdatq7owsKHDEQ), the Kata runtime library features the following APIs:
|
||||
- Sandbox based top API
|
||||
- Storage and network hotplug API
|
||||
|
||||
5
docs/design/proposals/README.md
Normal file
5
docs/design/proposals/README.md
Normal file
@@ -0,0 +1,5 @@
|
||||
# Design proposals
|
||||
|
||||
Kata Containers design proposal documents:
|
||||
|
||||
- [Kata Containers tracing](tracing-proposals.md)
|
||||
213
docs/design/proposals/tracing-proposals.md
Normal file
213
docs/design/proposals/tracing-proposals.md
Normal file
@@ -0,0 +1,213 @@
|
||||
# Kata Tracing proposals
|
||||
|
||||
## Overview
|
||||
|
||||
This document summarises a set of proposals triggered by the
|
||||
[tracing documentation PR][tracing-doc-pr].
|
||||
|
||||
## Required context
|
||||
|
||||
This section explains some terminology required to understand the proposals.
|
||||
Further details can be found in the
|
||||
[tracing documentation PR][tracing-doc-pr].
|
||||
|
||||
### Agent trace mode terminology
|
||||
|
||||
| Trace mode | Description | Use-case |
|
||||
|-|-|-|
|
||||
| Static | Trace agent from startup to shutdown | Entire lifespan |
|
||||
| Dynamic | Toggle tracing on/off as desired | On-demand "snapshot" |
|
||||
|
||||
### Agent trace type terminology
|
||||
|
||||
| Trace type | Description | Use-case |
|
||||
|-|-|-|
|
||||
| isolated | traces all relate to single component | Observing lifespan |
|
||||
| collated | traces "grouped" (runtime+agent) | Understanding component interaction |
|
||||
|
||||
### Container lifespan
|
||||
|
||||
| Lifespan | trace mode | trace type |
|
||||
|-|-|-|
|
||||
| short-lived | static | collated if possible, else isolated? |
|
||||
| long-running | dynamic | collated? (to see interactions) |
|
||||
|
||||
## Original plan for agent
|
||||
|
||||
- Implement all trace types and trace modes for agent.
|
||||
|
||||
- Why?
|
||||
- Maximum flexibility.
|
||||
|
||||
> **Counterargument:**
|
||||
>
|
||||
> Due to the intrusive nature of adding tracing, we have
|
||||
> learnt that landing small incremental changes is simpler and quicker!
|
||||
|
||||
- Compatibility with [Kata 1.x tracing][kata-1x-tracing].
|
||||
|
||||
> **Counterargument:**
|
||||
>
|
||||
> Agent tracing in Kata 1.x was extremely awkward to setup (to the extent
|
||||
> that it's unclear how many users actually used it!)
|
||||
>
|
||||
> This point, coupled with the new architecture for Kata 2.x, suggests
|
||||
> that we may not need to supply the same set of tracing features (in fact
|
||||
> they may not make sense)).
|
||||
|
||||
## Agent tracing proposals
|
||||
|
||||
### Agent tracing proposal 1: Don't implement dynamic trace mode
|
||||
|
||||
- All tracing will be static.
|
||||
|
||||
- Why?
|
||||
- Because dynamic tracing will always be "partial"
|
||||
|
||||
> In fact, not only would it be only a "snapshot" of activity, it may not
|
||||
> even be possible to create a complete "trace transaction". If this is
|
||||
> true, the trace output would be partial and would appear "unstructured".
|
||||
|
||||
### Agent tracing proposal 2: Simplify handling of trace type
|
||||
|
||||
- Agent tracing will be "isolated" by default.
|
||||
- Agent tracing will be "collated" if runtime tracing is also enabled.
|
||||
|
||||
- Why?
|
||||
- Offers a graceful fallback for agent tracing if runtime tracing disabled.
|
||||
- Simpler code!
|
||||
|
||||
## Questions to ask yourself (part 1)
|
||||
|
||||
- Are your containers long-running or short-lived?
|
||||
|
||||
- Would you ever need to turn on tracing "briefly"?
|
||||
- If "yes", is a "partial trace" useful or useless?
|
||||
|
||||
> Likely to be considered useless as it is a partial snapshot.
|
||||
> Alternative tracing methods may be more appropriate to dynamic
|
||||
> OpenTelemetry tracing.
|
||||
|
||||
## Questions to ask yourself (part 2)
|
||||
|
||||
- Are you happy to stop a container to enable tracing?
|
||||
If "no", dynamic tracing may be required.
|
||||
|
||||
- Would you ever want to trace the agent and the runtime "in isolation" at the
|
||||
same time?
|
||||
- If "yes", we need to fully implement `trace_mode=isolated`
|
||||
|
||||
> This seems unlikely though.
|
||||
|
||||
## Trace collection
|
||||
|
||||
The second set of proposals affect the way traces are collected.
|
||||
|
||||
### Motivation
|
||||
|
||||
Currently:
|
||||
|
||||
- The runtime sends trace spans to Jaeger directly.
|
||||
- The agent will send trace spans to the [`trace-forwarder`][trace-forwarder] component.
|
||||
- The trace forwarder will send trace spans to Jaeger.
|
||||
|
||||
Kata agent tracing overview:
|
||||
|
||||
```
|
||||
+-------------------------------------------+
|
||||
| Host |
|
||||
| |
|
||||
| +-----------+ |
|
||||
| | Trace | |
|
||||
| | Collector | |
|
||||
| +-----+-----+ |
|
||||
| ^ +--------------+ |
|
||||
| | spans | Kata VM | |
|
||||
| +-----+-----+ | | |
|
||||
| | Kata | spans | +-----+ | |
|
||||
| | Trace |<-----------------|Kata | | |
|
||||
| | Forwarder | VSOCK | |Agent| | |
|
||||
| +-----------+ Channel | +-----+ | |
|
||||
| +--------------+ |
|
||||
+-------------------------------------------+
|
||||
```
|
||||
|
||||
Currently:
|
||||
|
||||
- If agent tracing is enabled but the trace forwarder is not running,
|
||||
the agent will error.
|
||||
|
||||
- If the trace forwarder is started but Jaeger is not running,
|
||||
the trace forwarder will error.
|
||||
|
||||
### Goals
|
||||
|
||||
- The runtime and agent should:
|
||||
- Use the same trace collection implementation.
|
||||
- Use the most the common configuration items.
|
||||
|
||||
- Kata should should support more trace collection software or `SaaS`
|
||||
(for example `Zipkin`, `datadog`).
|
||||
|
||||
- Trace collection should not block normal runtime/agent operations
|
||||
(for example if `vsock-exporter`/Jaeger is not running, Kata Containers should work normally).
|
||||
|
||||
### Trace collection proposals
|
||||
|
||||
#### Trace collection proposal 1: Send all spans to the trace forwarder as a span proxy
|
||||
|
||||
Kata runtime/agent all send spans to trace forwarder, and the trace forwarder,
|
||||
acting as a tracing proxy, sends all spans to a tracing back-end, such as Jaeger or `datadog`.
|
||||
|
||||
**Pros:**
|
||||
|
||||
- Runtime/agent will be simple.
|
||||
- Could update trace collection target while Kata Containers are running.
|
||||
|
||||
**Cons:**
|
||||
|
||||
- Requires the trace forwarder component to be running (that is a pressure to operation).
|
||||
|
||||
#### Trace collection proposal 2: Send spans to collector directly from runtime/agent
|
||||
|
||||
Send spans to collector directly from runtime/agent, this proposal need
|
||||
network accessible to the collector.
|
||||
|
||||
**Pros:**
|
||||
|
||||
- No additional trace forwarder component needed.
|
||||
|
||||
**Cons:**
|
||||
|
||||
- Need more code/configuration to support all trace collectors.
|
||||
|
||||
## Future work
|
||||
|
||||
- We could add dynamic and fully isolated tracing at a later stage,
|
||||
if required.
|
||||
|
||||
## Further details
|
||||
|
||||
- See the new [GitHub project](https://github.com/orgs/kata-containers/projects/28).
|
||||
- [kata-containers-tracing-status](https://gist.github.com/jodh-intel/0ee54d41d2a803ba761e166136b42277) gist.
|
||||
- [tracing documentation PR][tracing-doc-pr].
|
||||
|
||||
## Summary
|
||||
|
||||
### Time line
|
||||
|
||||
- 2021-07-01: A summary of the discussion was
|
||||
[posted to the mail list](http://lists.katacontainers.io/pipermail/kata-dev/2021-July/001996.html).
|
||||
- 2021-06-22: These proposals were
|
||||
[discussed in the Kata Architecture Committee meeting](https://etherpad.opendev.org/p/Kata_Containers_2021_Architecture_Committee_Mtgs).
|
||||
- 2021-06-18: These proposals where
|
||||
[announced on the mailing list](http://lists.katacontainers.io/pipermail/kata-dev/2021-June/001980.html).
|
||||
|
||||
### Outcome
|
||||
|
||||
- Nobody opposed the agent proposals, so they are being implemented.
|
||||
- The trace collection proposals are still being considered.
|
||||
|
||||
[kata-1x-tracing]: https://github.com/kata-containers/agent/blob/master/TRACING.md
|
||||
[trace-forwarder]: /src/trace-forwarder
|
||||
[tracing-doc-pr]: https://github.com/kata-containers/kata-containers/pull/1937
|
||||
@@ -1,11 +1,3 @@
|
||||
- [Virtual machine vCPU sizing in Kata Containers](#virtual-machine-vcpu-sizing-in-kata-containers)
|
||||
* [Default number of virtual CPUs](#default-number-of-virtual-cpus)
|
||||
* [Virtual CPUs and Kubernetes pods](#virtual-cpus-and-kubernetes-pods)
|
||||
* [Container lifecycle](#container-lifecycle)
|
||||
* [Container without CPU constraint](#container-without-cpu-constraint)
|
||||
* [Container with CPU constraint](#container-with-cpu-constraint)
|
||||
* [Do not waste resources](#do-not-waste-resources)
|
||||
|
||||
# Virtual machine vCPU sizing in Kata Containers
|
||||
|
||||
## Default number of virtual CPUs
|
||||
|
||||
@@ -1,16 +1,5 @@
|
||||
# Virtualization in Kata Containers
|
||||
|
||||
- [Virtualization in Kata Containers](#virtualization-in-kata-containers)
|
||||
- [Mapping container concepts to virtual machine technologies](#mapping-container-concepts-to-virtual-machine-technologies)
|
||||
- [Kata Containers Hypervisor and VMM support](#kata-containers-hypervisor-and-vmm-support)
|
||||
- [QEMU/KVM](#qemukvm)
|
||||
- [Machine accelerators](#machine-accelerators)
|
||||
- [Hotplug devices](#hotplug-devices)
|
||||
- [Firecracker/KVM](#firecrackerkvm)
|
||||
- [Cloud Hypervisor/KVM](#cloud-hypervisorkvm)
|
||||
- [Summary](#summary)
|
||||
|
||||
|
||||
Kata Containers, a second layer of isolation is created on top of those provided by traditional namespace-containers. The
|
||||
hardware virtualization interface is the basis of this additional layer. Kata will launch a lightweight virtual machine,
|
||||
and use the guest’s Linux kernel to create a container workload, or workloads in the case of multi-container pods. In Kubernetes
|
||||
|
||||
@@ -1,11 +1,7 @@
|
||||
# Howto Guides
|
||||
|
||||
* [Howto Guides](#howto-guides)
|
||||
* [Kubernetes Integration](#kubernetes-integration)
|
||||
* [Hypervisors Integration](#hypervisors-integration)
|
||||
* [Advanced Topics](#advanced-topics)
|
||||
|
||||
## Kubernetes Integration
|
||||
|
||||
- [Run Kata containers with `crictl`](run-kata-with-crictl.md)
|
||||
- [Run Kata Containers with Kubernetes](run-kata-with-k8s.md)
|
||||
- [How to use Kata Containers and Containerd](containerd-kata.md)
|
||||
@@ -28,6 +24,7 @@
|
||||
- [Kata Containers with ACRN Hypervisor](how-to-use-kata-containers-with-acrn.md)
|
||||
|
||||
## Advanced Topics
|
||||
|
||||
- [How to use Kata Containers with virtio-fs](how-to-use-virtio-fs-with-kata.md)
|
||||
- [Setting Sysctls with Kata](how-to-use-sysctls-with-kata.md)
|
||||
- [What Is VMCache and How To Enable It](what-is-vm-cache-and-how-do-I-use-it.md)
|
||||
|
||||
@@ -1,23 +1,5 @@
|
||||
# How to use Kata Containers and Containerd
|
||||
|
||||
- [Concepts](#concepts)
|
||||
- [Kubernetes `RuntimeClass`](#kubernetes-runtimeclass)
|
||||
- [Containerd Runtime V2 API: Shim V2 API](#containerd-runtime-v2-api-shim-v2-api)
|
||||
- [Install](#install)
|
||||
- [Install Kata Containers](#install-kata-containers)
|
||||
- [Install containerd with CRI plugin](#install-containerd-with-cri-plugin)
|
||||
- [Install CNI plugins](#install-cni-plugins)
|
||||
- [Install `cri-tools`](#install-cri-tools)
|
||||
- [Configuration](#configuration)
|
||||
- [Configure containerd to use Kata Containers](#configure-containerd-to-use-kata-containers)
|
||||
- [Kata Containers as a `RuntimeClass`](#kata-containers-as-a-runtimeclass)
|
||||
- [Kata Containers as the runtime for untrusted workload](#kata-containers-as-the-runtime-for-untrusted-workload)
|
||||
- [Kata Containers as the default runtime](#kata-containers-as-the-default-runtime)
|
||||
- [Configuration for `cri-tools`](#configuration-for-cri-tools)
|
||||
- [Run](#run)
|
||||
- [Launch containers with `ctr` command line](#launch-containers-with-ctr-command-line)
|
||||
- [Launch Pods with `crictl` command line](#launch-pods-with-crictl-command-line)
|
||||
|
||||
This document covers the installation and configuration of [containerd](https://containerd.io/)
|
||||
and [Kata Containers](https://katacontainers.io). The containerd provides not only the `ctr`
|
||||
command line tool, but also the [CRI](https://kubernetes.io/blog/2016/12/container-runtime-interface-cri-in-kubernetes/)
|
||||
|
||||
@@ -26,7 +26,7 @@ spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: kata-monitor
|
||||
image: docker.io/katadocker/kata-monitor:2.0.0
|
||||
image: quay.io/kata-containers/kata-monitor:2.0.0
|
||||
args:
|
||||
- -log-level=debug
|
||||
ports:
|
||||
|
||||
@@ -1,9 +1,5 @@
|
||||
# How to use memory hotplug feature in Kata Containers on arm64
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Install UEFI ROM](#install-uefi-rom)
|
||||
- [Run for test](#run-for-test)
|
||||
|
||||
## Introduction
|
||||
|
||||
Memory hotplug is a key feature for containers to allocate memory dynamically in deployment.
|
||||
|
||||
@@ -1,20 +1,5 @@
|
||||
# Importing Kata Containers logs with Fluentd
|
||||
|
||||
* [Introduction](#introduction)
|
||||
* [Overview](#overview)
|
||||
* [Test stack](#test-stack)
|
||||
* [Importing the logs](#importing-the-logs)
|
||||
* [Direct import `logfmt` from `systemd`](#direct-import-logfmt-from-systemd)
|
||||
* [Configuring `minikube`](#configuring-minikube)
|
||||
* [Pull from `systemd`](#pull-from-systemd)
|
||||
* [Systemd Summary](#systemd-summary)
|
||||
* [Directly importing JSON](#directly-importing-json)
|
||||
* [JSON in files](#json-in-files)
|
||||
* [Prefixing all keys](#prefixing-all-keys)
|
||||
* [Kata `shimv2`](#kata-shimv2)
|
||||
* [Caveats](#caveats)
|
||||
* [Summary](#summary)
|
||||
|
||||
# Introduction
|
||||
|
||||
This document describes how to import Kata Containers logs into [Fluentd](https://www.fluentd.org/),
|
||||
@@ -143,7 +128,7 @@ YAML can be found
|
||||
tag kata-containers
|
||||
path /run/log/journal
|
||||
pos_file /run/log/journal/kata-journald.pos
|
||||
filters [{"SYSLOG_IDENTIFIER": "kata-runtime"}, {"SYSLOG_IDENTIFIER": "kata-proxy"}, {"SYSLOG_IDENTIFIER": "kata-shim"}]
|
||||
filters [{"SYSLOG_IDENTIFIER": "kata-runtime"}, {"SYSLOG_IDENTIFIER": "kata-shim"}]
|
||||
read_from_head true
|
||||
</source>
|
||||
```
|
||||
@@ -161,7 +146,7 @@ generate some Kata specific log entries:
|
||||
|
||||
```bash
|
||||
$ minikube addons open efk
|
||||
$ cd $GOPATH/src/github.com/kata-containers/packaging/kata-deploy
|
||||
$ cd $GOPATH/src/github.com/kata-containers/kata-containers/tools/packaging/kata-deploy
|
||||
$ kubectl apply -f examples/nginx-deployment-qemu.yaml
|
||||
```
|
||||
|
||||
@@ -178,7 +163,7 @@ sub-filter on, for instance, the `SYSLOG_IDENTIFIER` to differentiate the Kata c
|
||||
on the `PRIORITY` to filter out critical issues etc.
|
||||
|
||||
Kata generates a significant amount of Kata specific information, which can be seen as
|
||||
[`logfmt`](https://github.com/kata-containers/tests/tree/master/cmd/log-parser#logfile-requirements).
|
||||
[`logfmt`](https://github.com/kata-containers/tests/tree/main/cmd/log-parser#logfile-requirements).
|
||||
data contained in the `MESSAGE` field. Imported as-is, there is no easy way to filter on that data
|
||||
in Kibana:
|
||||
|
||||
@@ -272,9 +257,8 @@ go directly to a full Kata specific JSON format logfile test.
|
||||
|
||||
Kata runtime has the ability to generate JSON logs directly, rather than its default `logfmt` format. Passing
|
||||
the `--log-format=json` argument to the Kata runtime enables this. The easiest way to pass in this extra
|
||||
parameter from a [Kata deploy](https://github.com/kata-containers/packaging/tree/master/kata-deploy) installation
|
||||
is to edit the `/opt/kata/bin/kata-qemu` shell script (generated by the
|
||||
[Kata packaging release scripts](https://github.com/kata-containers/packaging/blob/master/release/kata-deploy-binaries.sh)).
|
||||
parameter from a [Kata deploy](https://github.com/kata-containers/kata-containers/tree/main/tools/packaging/kata-deploy) installation
|
||||
is to edit the `/opt/kata/bin/kata-qemu` shell script.
|
||||
|
||||
At the same time, we will add the `--log=/var/log/kata-runtime.log` argument to store the Kata logs in their
|
||||
own file (rather than into the system journal).
|
||||
|
||||
@@ -2,14 +2,6 @@
|
||||
|
||||
This document describes how to run `kata-monitor` in a Kubernetes cluster using Prometheus's service discovery to scrape metrics from `kata-agent`.
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Pre-requisites](#pre-requisites)
|
||||
- [Configure Prometheus](#configure-prometheus)
|
||||
- [Configure `kata-monitor`](#configure-kata-monitor)
|
||||
- [Setup Grafana](#setup-grafana)
|
||||
* [Create `datasource`](#create-datasource)
|
||||
* [Import dashboard](#import-dashboard)
|
||||
|
||||
> **Warning**: This how-to is only for evaluation purpose, you **SHOULD NOT** running it in production using this configurations.
|
||||
|
||||
## Introduction
|
||||
|
||||
@@ -1,18 +1,5 @@
|
||||
# How to use Kata Containers and CRI (containerd plugin) with Kubernetes
|
||||
|
||||
* [Requirements](#requirements)
|
||||
* [Install and configure containerd](#install-and-configure-containerd)
|
||||
* [Install and configure Kubernetes](#install-and-configure-kubernetes)
|
||||
* [Install Kubernetes](#install-kubernetes)
|
||||
* [Configure Kubelet to use containerd](#configure-kubelet-to-use-containerd)
|
||||
* [Configure HTTP proxy - OPTIONAL](#configure-http-proxy---optional)
|
||||
* [Start Kubernetes](#start-kubernetes)
|
||||
* [Configure Pod Network](#configure-pod-network)
|
||||
* [Allow pods to run in the master node](#allow-pods-to-run-in-the-master-node)
|
||||
* [Create runtime class for Kata Containers](#create-runtime-class-for-kata-containers)
|
||||
* [Run pod in Kata Containers](#run-pod-in-kata-containers)
|
||||
* [Delete created pod](#delete-created-pod)
|
||||
|
||||
This document describes how to set up a single-machine Kubernetes (k8s) cluster.
|
||||
|
||||
The Kubernetes cluster will use the
|
||||
|
||||
@@ -2,11 +2,6 @@
|
||||
|
||||
This document provides an overview on how to run Kata containers with ACRN hypervisor and device model.
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Pre-requisites](#pre-requisites)
|
||||
- [Configure Docker](#configure-docker)
|
||||
- [Configure Kata Containers with ACRN](#configure-kata-containers-with-acrn)
|
||||
|
||||
## Introduction
|
||||
|
||||
ACRN is a flexible, lightweight Type-1 reference hypervisor built with real-time and safety-criticality in mind. ACRN uses an open source platform making it optimized to streamline embedded development.
|
||||
@@ -27,7 +22,7 @@ This document requires the presence of the ACRN hypervisor and Kata Containers o
|
||||
|
||||
- ACRN supported [Hardware](https://projectacrn.github.io/latest/hardware.html#supported-hardware).
|
||||
> **Note:** Please make sure to have a minimum of 4 logical processors (HT) or cores.
|
||||
- ACRN [software](https://projectacrn.github.io/latest/tutorials/kbl-nuc-sdc.html#use-the-script-to-set-up-acrn-automatically) setup.
|
||||
- ACRN [software](https://projectacrn.github.io/latest/tutorials/run_kata_containers.html) setup.
|
||||
- For networking, ACRN supports either MACVTAP or TAP. If MACVTAP is not enabled in the Service OS, please follow the below steps to update the kernel:
|
||||
|
||||
```sh
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
# Setting Sysctls with Kata
|
||||
|
||||
## Sysctls
|
||||
|
||||
In Linux, the sysctl interface allows an administrator to modify kernel
|
||||
parameters at runtime. Parameters are available via the `/proc/sys/` virtual
|
||||
process file system.
|
||||
|
||||
@@ -1,8 +1,5 @@
|
||||
# Kata Containers with virtio-fs
|
||||
|
||||
- [Kata Containers with virtio-fs](#kata-containers-with-virtio-fs)
|
||||
- [Introduction](#introduction)
|
||||
|
||||
## Introduction
|
||||
|
||||
Container deployments utilize explicit or implicit file sharing between host filesystem and containers. From a trust perspective, avoiding a shared file-system between the trusted host and untrusted container is recommended. This is not always feasible. In Kata Containers, block-based volumes are preferred as they allow usage of either device pass through or `virtio-blk` for access within the virtual machine.
|
||||
|
||||
@@ -1,9 +1,5 @@
|
||||
# Kata Containers with `virtio-mem`
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Requisites](#requisites)
|
||||
- [Run a Kata Container utilizing `virtio-mem`](#run-a-kata-container-utilizing-virtio-mem)
|
||||
|
||||
## Introduction
|
||||
|
||||
The basic idea of `virtio-mem` is to provide a flexible, cross-architecture memory hot plug and hot unplug solution that avoids many limitations imposed by existing technologies, architectures, and interfaces.
|
||||
@@ -41,7 +37,7 @@ $ echo 1 | sudo tee /proc/sys/vm/overcommit_memory
|
||||
Use following command to start a Kata Container.
|
||||
```
|
||||
$ pod_yaml=pod.yaml
|
||||
$ container_yaml=${REPORT_DIR}/container.yaml
|
||||
$ container_yaml=container.yaml
|
||||
$ image="quay.io/prometheus/busybox:latest"
|
||||
$ cat << EOF > "${pod_yaml}"
|
||||
metadata:
|
||||
|
||||
@@ -3,11 +3,6 @@
|
||||
Kata Containers supports creation of containers that are "privileged" (i.e. have additional capabilities and access
|
||||
that is not normally granted).
|
||||
|
||||
* [Warnings](#warnings)
|
||||
* [Host Devices](#host-devices)
|
||||
* [Containerd and CRI](#containerd-and-cri)
|
||||
* [CRI-O](#cri-o)
|
||||
|
||||
## Warnings
|
||||
|
||||
**Warning:** Whilst this functionality is supported, it can decrease the security of Kata Containers if not configured
|
||||
|
||||
@@ -1,16 +1,5 @@
|
||||
# Working with `crictl`
|
||||
|
||||
* [What's `cri-tools`](#whats-cri-tools)
|
||||
* [Use `crictl` run Pods in Kata containers](#use-crictl-run-pods-in-kata-containers)
|
||||
* [Run `busybox` Pod](#run-busybox-pod)
|
||||
* [Run pod sandbox with config file](#run-pod-sandbox-with-config-file)
|
||||
* [Create container in the pod sandbox with config file](#create-container-in-the-pod-sandbox-with-config-file)
|
||||
* [Start container](#start-container)
|
||||
* [Run `redis` Pod](#run-redis-pod)
|
||||
* [Create `redis-server` Pod](#create-redis-server-pod)
|
||||
* [Create `redis-client` Pod](#create-redis-client-pod)
|
||||
* [Check `redis` server is working](#check-redis-server-is-working)
|
||||
|
||||
## What's `cri-tools`
|
||||
|
||||
[`cri-tools`](https://github.com/kubernetes-sigs/cri-tools) provides debugging and validation tools for Kubelet Container Runtime Interface (CRI).
|
||||
|
||||
@@ -1,18 +1,5 @@
|
||||
# Run Kata Containers with Kubernetes
|
||||
|
||||
* [Run Kata Containers with Kubernetes](#run-kata-containers-with-kubernetes)
|
||||
* [Prerequisites](#prerequisites)
|
||||
* [Install a CRI implementation](#install-a-cri-implementation)
|
||||
* [CRI-O](#cri-o)
|
||||
* [Kubernetes Runtime Class (CRI-O v1.12 )](#kubernetes-runtime-class-cri-o-v112)
|
||||
* [Untrusted annotation (until CRI-O v1.12)](#untrusted-annotation-until-cri-o-v112)
|
||||
* [Network namespace management](#network-namespace-management)
|
||||
* [containerd with CRI plugin](#containerd-with-cri-plugin)
|
||||
* [Install Kubernetes](#install-kubernetes)
|
||||
* [Configure for CRI-O](#configure-for-cri-o)
|
||||
* [Configure for containerd](#configure-for-containerd)
|
||||
* [Run a Kubernetes pod with Kata Containers](#run-a-kubernetes-pod-with-kata-containers)
|
||||
|
||||
## Prerequisites
|
||||
This guide requires Kata Containers available on your system, install-able by following [this guide](../install/README.md).
|
||||
|
||||
|
||||
@@ -1,21 +1,5 @@
|
||||
# Kata Containers and service mesh for Kubernetes
|
||||
|
||||
* [Assumptions](#assumptions)
|
||||
* [How they work](#how-they-work)
|
||||
* [Prerequisites](#prerequisites)
|
||||
* [Kata and Kubernetes](#kata-and-kubernetes)
|
||||
* [Restrictions](#restrictions)
|
||||
* [Install and deploy your service mesh](#install-and-deploy-your-service-mesh)
|
||||
* [Service Mesh Istio](#service-mesh-istio)
|
||||
* [Service Mesh Linkerd](#service-mesh-linkerd)
|
||||
* [Inject your services with sidecars](#inject-your-services-with-sidecars)
|
||||
* [Sidecar Istio](#sidecar-istio)
|
||||
* [Sidecar Linkerd](#sidecar-linkerd)
|
||||
* [Run your services with Kata](#run-your-services-with-kata)
|
||||
* [Lower privileges](#lower-privileges)
|
||||
* [Add annotations](#add-annotations)
|
||||
* [Deploy](#deploy)
|
||||
|
||||
A service mesh is a way to monitor and control the traffic between
|
||||
micro-services running in your Kubernetes cluster. It is a powerful
|
||||
tool that you might want to use in combination with the security
|
||||
@@ -76,15 +60,16 @@ is not able to perform a proper setup of the rules.
|
||||
|
||||
### Service Mesh Istio
|
||||
|
||||
As a reference, you can follow Istio [instructions](https://istio.io/docs/setup/kubernetes/quick-start/#download-and-prepare-for-the-installation).
|
||||
|
||||
The following is a summary of what you need to install Istio on your system:
|
||||
|
||||
```
|
||||
$ curl -L https://git.io/getLatestIstio | sh -
|
||||
$ cd istio-*
|
||||
$ export PATH=$PWD/bin:$PATH
|
||||
```
|
||||
|
||||
See the [Istio documentation](https://istio.io/docs) for further details.
|
||||
|
||||
Now deploy Istio in the control plane of your cluster with the following:
|
||||
```
|
||||
$ kubectl apply -f install/kubernetes/istio-demo.yaml
|
||||
|
||||
@@ -1,10 +1,5 @@
|
||||
# What Is VMCache and How To Enable It
|
||||
|
||||
* [What is VMCache](#what-is-vmcache)
|
||||
* [How is this different to VM templating](#how-is-this-different-to-vm-templating)
|
||||
* [How to enable VMCache](#how-to-enable-vmcache)
|
||||
* [Limitations](#limitations)
|
||||
|
||||
### What is VMCache
|
||||
|
||||
VMCache is a new function that creates VMs as caches before using it.
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
# What Is VM Templating and How To Enable It
|
||||
|
||||
### What is VM templating
|
||||
|
||||
VM templating is a Kata Containers feature that enables new VM
|
||||
creation using a cloning technique. When enabled, new VMs are created
|
||||
by cloning from a pre-created template VM, and they will share the
|
||||
@@ -8,11 +9,13 @@ same initramfs, kernel and agent memory in readonly mode. It is very
|
||||
much like a process fork done by the kernel but here we *fork* VMs.
|
||||
|
||||
### How is this different from VMCache
|
||||
|
||||
Both [VMCache](../how-to/what-is-vm-cache-and-how-do-I-use-it.md) and VM templating help speed up new container creation.
|
||||
When VMCache enabled, new VMs are created by the VMCache server. So it is not vulnerable to share memory CVE because each VM doesn't share the memory.
|
||||
VM templating saves a lot of memory if there are many Kata Containers running on the same host.
|
||||
|
||||
### What are the Pros
|
||||
|
||||
VM templating helps speed up new container creation and saves a lot
|
||||
of memory if there are many Kata Containers running on the same host.
|
||||
If you are running a density workload, or care a lot about container
|
||||
@@ -29,6 +32,7 @@ showed that VM templating speeds up Kata Containers creation by as much as
|
||||
38.68%. See [full results here](https://gist.github.com/bergwolf/06974a3c5981494a40e2c408681c085d).
|
||||
|
||||
### What are the Cons
|
||||
|
||||
One drawback of VM templating is that it cannot avoid cross-VM side-channel
|
||||
attack such as [CVE-2015-2877](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2015-2877)
|
||||
that originally targeted at the Linux KSM feature.
|
||||
@@ -39,10 +43,11 @@ and can be classified as potentially misunderstood behaviors rather than vulnera
|
||||
**Warning**: If you care about such attack vector, do not use VM templating or KSM.
|
||||
|
||||
### How to enable VM templating
|
||||
|
||||
VM templating can be enabled by changing your Kata Containers config file (`/usr/share/defaults/kata-containers/configuration.toml`,
|
||||
overridden by `/etc/kata-containers/configuration.toml` if provided) such that:
|
||||
|
||||
- `qemu-lite` is specified in `hypervisor.qemu`->`path` section
|
||||
- `qemu` version `v4.1.0` or above is specified in `hypervisor.qemu`->`path` section
|
||||
- `enable_template = true`
|
||||
- `initrd =` is set
|
||||
- `image =` option is commented out or removed
|
||||
|
||||
@@ -1,11 +1,5 @@
|
||||
# Hypervisors
|
||||
|
||||
* [Hypervisors](#hypervisors)
|
||||
* [Introduction](#introduction)
|
||||
* [Types](#types)
|
||||
* [Determine currently configured hypervisor](#determine-currently-configured-hypervisor)
|
||||
* [Choose a Hypervisor](#choose-a-hypervisor)
|
||||
|
||||
## Introduction
|
||||
|
||||
Kata Containers supports multiple hypervisors. This document provides a very
|
||||
|
||||
@@ -1,39 +1,19 @@
|
||||
# Kata Containers installation user guides
|
||||
# Kata Containers installation guides
|
||||
|
||||
* [Kata Containers installation user guides](#kata-containers-installation-user-guides)
|
||||
* [Prerequisites](#prerequisites)
|
||||
* [Legacy installation](#legacy-installation)
|
||||
* [Packaged installation methods](#packaged-installation-methods)
|
||||
* [Official packages](#official-packages)
|
||||
* [Snap Installation](#snap-installation)
|
||||
* [Automatic Installation](#automatic-installation)
|
||||
* [Manual Installation](#manual-installation)
|
||||
* [Build from source installation](#build-from-source-installation)
|
||||
* [Installing on a Cloud Service Platform](#installing-on-a-cloud-service-platform)
|
||||
* [Further information](#further-information)
|
||||
|
||||
The following is an overview of the different installation methods available. All of these methods equally result
|
||||
in a system configured to run Kata Containers.
|
||||
The following is an overview of the different installation methods available.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Kata Containers requires nested virtualization or bare metal.
|
||||
See the
|
||||
[hardware requirements](/src/runtime/README.md#hardware-requirements)
|
||||
to see if your system is capable of running Kata Containers.
|
||||
|
||||
## Legacy installation
|
||||
|
||||
If you wish to install a legacy 1.x version of Kata Containers, see
|
||||
[the Kata Containers 1.x installation documentation](https://github.com/kata-containers/documentation/tree/master/install/).
|
||||
Kata Containers requires nested virtualization or bare metal. Check
|
||||
[hardware requirements](/src/runtime/README.md#hardware-requirements) to see if your system is capable of running Kata
|
||||
Containers.
|
||||
|
||||
## Packaged installation methods
|
||||
|
||||
> **Notes:**
|
||||
>
|
||||
> - Packaged installation methods uses your distribution's native package format (such as RPM or DEB).
|
||||
> - You are strongly encouraged to choose an installation method that provides
|
||||
> automatic updates, to ensure you benefit from security updates and bug fixes.
|
||||
Packaged installation methods uses your distribution's native package format (such as RPM or DEB).
|
||||
|
||||
*Note:* We encourage installation methods that provides automatic updates, it ensures security updates and bug fixes are
|
||||
easily applied.
|
||||
|
||||
| Installation method | Description | Automatic updates | Use case |
|
||||
|------------------------------------------------------|---------------------------------------------------------------------|-------------------|----------------------------------------------------------|
|
||||
@@ -52,16 +32,9 @@ Kata packages are provided by official distribution repositories for:
|
||||
| [CentOS](centos-installation-guide.md) | 8 |
|
||||
| [Fedora](fedora-installation-guide.md) | 34 |
|
||||
|
||||
> **Note::**
|
||||
>
|
||||
> All users are encouraged to uses the official distribution versions of Kata
|
||||
> Containers unless they understand the implications of alternative methods.
|
||||
|
||||
### Snap Installation
|
||||
|
||||
> **Note:** The snap installation is available for all distributions which support `snapd`.
|
||||
|
||||
[](https://snapcraft.io/kata-containers)
|
||||
The snap installation is available for all distributions which support `snapd`.
|
||||
|
||||
[Use snap](snap-installation-guide.md) to install Kata Containers from https://snapcraft.io.
|
||||
|
||||
@@ -75,11 +48,9 @@ Follow the [containerd installation guide](container-manager/containerd/containe
|
||||
|
||||
## Build from source installation
|
||||
|
||||
> **Notes:**
|
||||
>
|
||||
> - Power users who decide to build from sources should be aware of the
|
||||
> implications of using an unpackaged system which will not be automatically
|
||||
> updated as new [releases](../Stable-Branch-Strategy.md) are made available.
|
||||
*Note:* Power users who decide to build from sources should be aware of the
|
||||
implications of using an unpackaged system which will not be automatically
|
||||
updated as new [releases](../Stable-Branch-Strategy.md) are made available.
|
||||
|
||||
[Building from sources](../Developer-Guide.md#initial-setup) allows power users
|
||||
who are comfortable building software from source to use the latest component
|
||||
@@ -95,6 +66,6 @@ versions. This is not recommended for normal users.
|
||||
|
||||
## Further information
|
||||
|
||||
* The [upgrading document](../Upgrading.md).
|
||||
* The [developer guide](../Developer-Guide.md).
|
||||
* The [runtime documentation](../../src/runtime/README.md).
|
||||
* [upgrading document](../Upgrading.md)
|
||||
* [developer guide](../Developer-Guide.md)
|
||||
* [runtime documentation](../../src/runtime/README.md)
|
||||
|
||||
@@ -1,10 +1,5 @@
|
||||
# Install Kata Containers on Amazon Web Services
|
||||
|
||||
* [Install and Configure AWS CLI](#install-and-configure-aws-cli)
|
||||
* [Create or Import an EC2 SSH key pair](#create-or-import-an-ec2-ssh-key-pair)
|
||||
* [Launch i3.metal instance](#launch-i3metal-instance)
|
||||
* [Install Kata](#install-kata)
|
||||
|
||||
Kata Containers on Amazon Web Services (AWS) makes use of [i3.metal](https://aws.amazon.com/ec2/instance-types/i3/) instances. Most of the installation procedure is identical to that for Kata on your preferred distribution, except that you have to run it on bare metal instances since AWS doesn't support nested virtualization yet. This guide walks you through creating an i3.metal instance.
|
||||
|
||||
## Install and Configure AWS CLI
|
||||
|
||||
@@ -98,12 +98,12 @@
|
||||
|
||||
```toml
|
||||
[plugins]
|
||||
[plugins.cri]
|
||||
[plugins.cri.containerd]
|
||||
default_runtime_name = "kata"
|
||||
|
||||
[plugins.cri.containerd.runtimes.kata]
|
||||
runtime_type = "io.containerd.kata.v2"
|
||||
[plugins."io.containerd.grpc.v1.cri"]
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd]
|
||||
default_runtime_name = "kata"
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes]
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.kata]
|
||||
runtime_type = "io.containerd.kata.v2"
|
||||
```
|
||||
|
||||
> **Note:**
|
||||
|
||||
@@ -1,11 +1,5 @@
|
||||
# Install Kata Containers on Google Compute Engine
|
||||
|
||||
* [Create an Image with Nested Virtualization Enabled](#create-an-image-with-nested-virtualization-enabled)
|
||||
* [Create the Image](#create-the-image)
|
||||
* [Verify VMX is Available](#verify-vmx-is-available)
|
||||
* [Install Kata](#install-kata)
|
||||
* [Create a Kata-enabled Image](#create-a-kata-enabled-image)
|
||||
|
||||
Kata Containers on Google Compute Engine (GCE) makes use of [nested virtualization](https://cloud.google.com/compute/docs/instances/enable-nested-virtualization-vm-instances). Most of the installation procedure is identical to that for Kata on your preferred distribution, but enabling nested virtualization currently requires extra steps on GCE. This guide walks you through creating an image and instance with nested virtualization enabled. Note that `kata-runtime check` checks for nested virtualization, but does not fail if support is not found.
|
||||
|
||||
As a pre-requisite this guide assumes an installed and configured instance of the [Google Cloud SDK](https://cloud.google.com/sdk/downloads). For a zero-configuration option, all of the commands below were been tested under [Google Cloud Shell](https://cloud.google.com/shell/) (as of Jun 2018). Verify your `gcloud` installation and configuration:
|
||||
|
||||
@@ -1,17 +1,5 @@
|
||||
# Installing Kata Containers in Minikube
|
||||
|
||||
* [Installing Kata Containers in Minikube](#installing-kata-containers-in-minikube)
|
||||
* [Introduction](#introduction)
|
||||
* [Prerequisites](#prerequisites)
|
||||
* [Setting up Minikube](#setting-up-minikube)
|
||||
* [Checking for nested virtualization](#checking-for-nested-virtualization)
|
||||
* [Check Minikube is running](#check-minikube-is-running)
|
||||
* [Installing Kata Containers](#installing-kata-containers)
|
||||
* [Enabling Kata Containers](#enabling-kata-containers)
|
||||
* [Register the runtime](#register-the-runtime)
|
||||
* [Testing Kata Containers](#testing-kata-containers)
|
||||
* [Wrapping up](#wrapping-up)
|
||||
|
||||
## Introduction
|
||||
|
||||
[Minikube](https://kubernetes.io/docs/setup/minikube/) is an easy way to try out a Kubernetes (k8s)
|
||||
@@ -166,8 +154,8 @@ $ kubectl apply -f https://raw.githubusercontent.com/kubernetes/node-api/master/
|
||||
Now register the `kata qemu` runtime with that class. This should result in no errors:
|
||||
|
||||
```sh
|
||||
$ cd kata-containers/tools/packaging/kata-deploy/k8s-1.14
|
||||
$ kubectl apply -f kata-qemu-runtimeClass.yaml
|
||||
$ cd kata-containers/tools/packaging/kata-deploy/runtimeclasses
|
||||
$ kubectl apply -f kata-runtimeClasses.yaml
|
||||
```
|
||||
|
||||
The Kata Containers installation process should be complete and enabled in the Minikube cluster.
|
||||
|
||||
@@ -1,11 +1,5 @@
|
||||
# Kata Containers snap package
|
||||
|
||||
* [Install Kata Containers](#install-kata-containers)
|
||||
* [Configure Kata Containers](#configure-kata-containers)
|
||||
* [Integration with shim v2 Container Engines](#integration-with-shim-v2-container-engines)
|
||||
* [Remove Kata Containers snap package](#remove-kata-containers-snap-package)
|
||||
|
||||
|
||||
## Install Kata Containers
|
||||
|
||||
Kata Containers can be installed in any Linux distribution that supports
|
||||
|
||||
@@ -1,13 +1,5 @@
|
||||
# Using Intel GPU device with Kata Containers
|
||||
|
||||
- [Using Intel GPU device with Kata Containers](#using-intel-gpu-device-with-kata-containers)
|
||||
- [Hardware Requirements](#hardware-requirements)
|
||||
- [Host Kernel Requirements](#host-kernel-requirements)
|
||||
- [Install and configure Kata Containers](#install-and-configure-kata-containers)
|
||||
- [Build Kata Containers kernel with GPU support](#build-kata-containers-kernel-with-gpu-support)
|
||||
- [GVT-d with Kata Containers](#gvt-d-with-kata-containers)
|
||||
- [GVT-g with Kata Containers](#gvt-g-with-kata-containers)
|
||||
|
||||
An Intel Graphics device can be passed to a Kata Containers container using GPU
|
||||
passthrough (Intel GVT-d) as well as GPU mediated passthrough (Intel GVT-g).
|
||||
|
||||
|
||||
@@ -1,17 +1,5 @@
|
||||
# Using Nvidia GPU device with Kata Containers
|
||||
|
||||
- [Using Nvidia GPU device with Kata Containers](#using-nvidia-gpu-device-with-kata-containers)
|
||||
- [Hardware Requirements](#hardware-requirements)
|
||||
- [Host BIOS Requirements](#host-bios-requirements)
|
||||
- [Host Kernel Requirements](#host-kernel-requirements)
|
||||
- [Install and configure Kata Containers](#install-and-configure-kata-containers)
|
||||
- [Build Kata Containers kernel with GPU support](#build-kata-containers-kernel-with-gpu-support)
|
||||
- [Nvidia GPU pass-through mode with Kata Containers](#nvidia-gpu-pass-through-mode-with-kata-containers)
|
||||
- [Nvidia vGPU mode with Kata Containers](#nvidia-vgpu-mode-with-kata-containers)
|
||||
- [Install Nvidia Driver in Kata Containers](#install-nvidia-driver-in-kata-containers)
|
||||
- [References](#references)
|
||||
|
||||
|
||||
An Nvidia GPU device can be passed to a Kata Containers container using GPU passthrough
|
||||
(Nvidia GPU pass-through mode) as well as GPU mediated passthrough (Nvidia vGPU mode).
|
||||
|
||||
@@ -303,4 +291,4 @@ Tue Mar 3 00:03:49 2020
|
||||
|
||||
- [Configuring a VM for GPU Pass-Through by Using the QEMU Command Line](https://docs.nvidia.com/grid/latest/grid-vgpu-user-guide/index.html#using-gpu-pass-through-red-hat-el-qemu-cli)
|
||||
- https://gitlab.com/nvidia/container-images/driver/-/tree/master
|
||||
- https://github.com/NVIDIA/nvidia-docker/wiki/Driver-containers-(Beta)
|
||||
- https://github.com/NVIDIA/nvidia-docker/wiki/Driver-containers
|
||||
|
||||
@@ -1,33 +1,5 @@
|
||||
# Table of Contents
|
||||
|
||||
- [Table of Contents](#table-of-contents)
|
||||
- [Introduction](#introduction)
|
||||
- [Helpful Links before starting](#helpful-links-before-starting)
|
||||
- [Steps to enable Intel® QAT in Kata Containers](#steps-to-enable-intel-qat-in-kata-containers)
|
||||
- [Script variables](#script-variables)
|
||||
- [Set environment variables (Every Reboot)](#set-environment-variables-every-reboot)
|
||||
- [Prepare the Ubuntu Host](#prepare-the-ubuntu-host)
|
||||
- [Identify which PCI Bus the Intel® QAT card is on](#identify-which-pci-bus-the-intel-qat-card-is-on)
|
||||
- [Install necessary packages for Ubuntu](#install-necessary-packages-for-ubuntu)
|
||||
- [Download Intel® QAT drivers](#download-intel-qat-drivers)
|
||||
- [Copy Intel® QAT configuration files and enable virtual functions](#copy-intel-qat-configuration-files-and-enable-virtual-functions)
|
||||
- [Expose and Bind Intel® QAT virtual functions to VFIO-PCI (Every reboot)](#expose-and-bind-intel-qat-virtual-functions-to-vfio-pci-every-reboot)
|
||||
- [Check Intel® QAT virtual functions are enabled](#check-intel-qat-virtual-functions-are-enabled)
|
||||
- [Prepare Kata Containers](#prepare-kata-containers)
|
||||
- [Download Kata kernel Source](#download-kata-kernel-source)
|
||||
- [Build Kata kernel](#build-kata-kernel)
|
||||
- [Copy Kata kernel](#copy-kata-kernel)
|
||||
- [Prepare Kata root filesystem](#prepare-kata-root-filesystem)
|
||||
- [Compile Intel® QAT drivers for Kata Containers kernel and add to Kata Containers rootfs](#compile-intel-qat-drivers-for-kata-containers-kernel-and-add-to-kata-containers-rootfs)
|
||||
- [Copy Kata rootfs](#copy-kata-rootfs)
|
||||
- [Verify Intel® QAT works in a container](#verify-intel-qat-works-in-a-container)
|
||||
- [Build OpenSSL Intel® QAT engine container](#build-openssl-intel-qat-engine-container)
|
||||
- [Test Intel® QAT with the ctr tool](#test-intel-qat-with-the-ctr-tool)
|
||||
- [Test Intel® QAT in Kubernetes](#test-intel-qat-in-kubernetes)
|
||||
- [Troubleshooting](#troubleshooting)
|
||||
- [Optional Scripts](#optional-scripts)
|
||||
- [Verify Intel® QAT card counters are incremented](#verify-intel-qat-card-counters-are-incremented)
|
||||
|
||||
# Introduction
|
||||
|
||||
Intel® QuickAssist Technology (QAT) provides hardware acceleration
|
||||
@@ -604,4 +576,4 @@ $ for i in 0434 0435 37c8 1f18 1f19; do lspci -d 8086:$i; done
|
||||
$ sudo watch cat /sys/kernel/debug/qat_c6xx_0000\:b1\:00.0/fw_counters
|
||||
$ sudo watch cat /sys/kernel/debug/qat_c6xx_0000\:b3\:00.0/fw_counters
|
||||
$ sudo watch cat /sys/kernel/debug/qat_c6xx_0000\:b5\:00.0/fw_counters
|
||||
```
|
||||
```
|
||||
|
||||
@@ -1,10 +1,5 @@
|
||||
# Kata Containers with SGX
|
||||
|
||||
- [Check if SGX is enabled](#check-if-sgx-is-enabled)
|
||||
- [Install Host kernel with SGX support](#install-host-kernel-with-sgx-support)
|
||||
- [Install Guest kernel with SGX support](#install-guest-kernel-with-sgx-support)
|
||||
- [Run Kata Containers with SGX enabled](#run-kata-containers-with-sgx-enabled)
|
||||
|
||||
Intel® Software Guard Extensions (SGX) is a set of instructions that increases the security
|
||||
of applications code and data, giving them more protections from disclosure or modification.
|
||||
|
||||
|
||||
@@ -1,13 +1,6 @@
|
||||
# Setup to run SPDK vhost-user devices with Kata Containers and Docker*
|
||||
|
||||
- [SPDK vhost-user target overview](#spdk-vhost-user-target-overview)
|
||||
- [Install and setup SPDK vhost-user target](#install-and-setup-spdk-vhost-user-target)
|
||||
- [Get source code and build SPDK](#get-source-code-and-build-spdk)
|
||||
- [Run SPDK vhost-user target](#run-spdk-vhost-user-target)
|
||||
- [Host setup for vhost-user devices](#host-setup-for-vhost-user-devices)
|
||||
- [Launch a Kata container with SPDK vhost-user block device](#launch-a-kata-container-with-spdk-vhost-user-block-device)
|
||||
|
||||
> **NOTE:** This guide only applies to QEMU, since the vhost-user storage
|
||||
> **Note:** This guide only applies to QEMU, since the vhost-user storage
|
||||
> device is only available for QEMU now. The enablement work on other
|
||||
> hypervisors is still ongoing.
|
||||
|
||||
|
||||
@@ -1,13 +1,5 @@
|
||||
# Setup to use SR-IOV with Kata Containers and Docker*
|
||||
|
||||
- [Install the SR-IOV Docker\* plugin](#install-the-sr-iov-docker-plugin)
|
||||
- [Host setup for SR-IOV](#host-setup-for-sr-iov)
|
||||
- [Checking your NIC for SR-IOV](#checking-your-nic-for-sr-iov)
|
||||
- [IOMMU Groups and PCIe Access Control Services](#iommu-groups-and-pcie-access-control-services)
|
||||
- [Update the host kernel](#update-the-host-kernel)
|
||||
- [Set up the SR-IOV Device](#set-up-the-sr-iov-device)
|
||||
- [Example: Launch a Kata Containers container using SR-IOV](#example-launch-a-kata-containers-container-using-sr-iov)
|
||||
|
||||
Single Root I/O Virtualization (SR-IOV) enables splitting a physical device into
|
||||
virtual functions (VFs). Virtual functions enable direct passthrough to virtual
|
||||
machines or containers. For Kata Containers, we enabled a Container Network
|
||||
|
||||
@@ -12,7 +12,7 @@ For more information about VPP visit their [wiki](https://wiki.fd.io/view/VPP).
|
||||
|
||||
## Install and configure Kata Containers
|
||||
|
||||
Follow the [Kata Containers setup instructions](https://github.com/kata-containers/documentation/wiki/Developer-Guide).
|
||||
Follow the [Kata Containers setup instructions](../Developer-Guide.md).
|
||||
|
||||
In order to make use of VHOST-USER based interfaces, the container needs to be backed
|
||||
by huge pages. `HugePages` support is required for the large memory pool allocation used for
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
# OpenStack Zun DevStack working with Kata Containers
|
||||
|
||||
## Introduction
|
||||
|
||||
This guide describes how to get Kata Containers to work with OpenStack Zun
|
||||
|
||||
@@ -1,13 +1,5 @@
|
||||
# Kata Containers snap image
|
||||
|
||||
* [Initial setup](#initial-setup)
|
||||
* [Install snap](#install-snap)
|
||||
* [Build and install snap image](#build-and-install-snap-image)
|
||||
* [Configure Kata Containers](#configure-kata-containers)
|
||||
* [Integration with docker and Kubernetes](#integration-with-docker-and-kubernetes)
|
||||
* [Remove snap](#remove-snap)
|
||||
* [Limitations](#limitations)
|
||||
|
||||
This directory contains the resources needed to build the Kata Containers
|
||||
[snap][1] image.
|
||||
|
||||
|
||||
@@ -115,7 +115,6 @@ parts:
|
||||
cd ${kata_dir}/tools/osbuilder
|
||||
|
||||
# build image
|
||||
export AGENT_VERSION=$(cat ${kata_dir}/VERSION)
|
||||
export AGENT_INIT=yes
|
||||
export USE_DOCKER=1
|
||||
export DEBUG=1
|
||||
@@ -170,12 +169,9 @@ parts:
|
||||
SKIP_GO_VERSION_CHECK=1 \
|
||||
QEMUCMD=qemu-system-$arch
|
||||
|
||||
if [ -e ${SNAPCRAFT_PART_INSTALL}/../../image/install/usr/share/kata-containers/kata-containers.img ]; then
|
||||
# Use rootfs image by default
|
||||
sed -i -e '/^initrd =/d' ${SNAPCRAFT_PART_INSTALL}/usr/share/defaults/${SNAPCRAFT_PROJECT_NAME}/configuration.toml
|
||||
else
|
||||
# Use initrd by default
|
||||
sed -i -e '/^image =/d' ${SNAPCRAFT_PART_INSTALL}/usr/share/defaults/${SNAPCRAFT_PROJECT_NAME}/configuration.toml
|
||||
if [ ! -f ${SNAPCRAFT_PART_INSTALL}/../../image/install/usr/share/kata-containers/kata-containers.img ]; then
|
||||
sed -i -e "s|^image =.*|initrd = \"/snap/${SNAPCRAFT_PROJECT_NAME}/current/usr/share/kata-containers/kata-containers-initrd.img\"|" \
|
||||
${SNAPCRAFT_PART_INSTALL}/usr/share/defaults/${SNAPCRAFT_PROJECT_NAME}/configuration.toml
|
||||
fi
|
||||
|
||||
kernel:
|
||||
@@ -188,6 +184,14 @@ parts:
|
||||
- bison
|
||||
- flex
|
||||
override-build: |
|
||||
yq=${SNAPCRAFT_STAGE}/yq
|
||||
export GOPATH=${SNAPCRAFT_STAGE}/gopath
|
||||
kata_dir=${GOPATH}/src/github.com/${SNAPCRAFT_PROJECT_NAME}/${SNAPCRAFT_PROJECT_NAME}
|
||||
versions_file="${kata_dir}/versions.yaml"
|
||||
kernel_version="$(${yq} r $versions_file assets.kernel.version)"
|
||||
#Remove extra 'v'
|
||||
kernel_version=${kernel_version#v}
|
||||
|
||||
[ "$(uname -m)" = "s390x" ] && sudo apt-get --no-install-recommends install -y libssl-dev
|
||||
|
||||
export GOPATH=${SNAPCRAFT_STAGE}/gopath
|
||||
@@ -197,13 +201,12 @@ parts:
|
||||
cd ${kata_dir}/tools/packaging/kernel
|
||||
|
||||
# Setup and build kernel
|
||||
./build-kernel.sh -d setup
|
||||
./build-kernel.sh -v ${kernel_version} -d setup
|
||||
kernel_dir_prefix="kata-linux-"
|
||||
cd ${kernel_dir_prefix}*
|
||||
version=$(basename ${PWD} | sed 's|'"${kernel_dir_prefix}"'||' | cut -d- -f1)
|
||||
make -j $(($(nproc)-1)) EXTRAVERSION=".container"
|
||||
|
||||
kernel_suffix=${version}.container
|
||||
kernel_suffix=${kernel_version}.container
|
||||
kata_kernel_dir=${SNAPCRAFT_PART_INSTALL}/usr/share/kata-containers
|
||||
mkdir -p ${kata_kernel_dir}
|
||||
|
||||
@@ -253,10 +256,11 @@ parts:
|
||||
# arch-specific definition
|
||||
case "$(uname -m)" in
|
||||
"aarch64")
|
||||
branch="$(${yq} r ${versions_file} assets.hypervisor.qemu.architecture.aarch64.branch)"
|
||||
branch="$(${yq} r ${versions_file} assets.hypervisor.qemu.architecture.aarch64.version)"
|
||||
url="$(${yq} r ${versions_file} assets.hypervisor.qemu.url)"
|
||||
commit="$(${yq} r ${versions_file} assets.hypervisor.qemu.architecture.aarch64.commit)"
|
||||
patches_dir="${kata_dir}/tools/packaging/obs-packaging/qemu-aarch64/patches/"
|
||||
patches_dir="${kata_dir}/tools/packaging/qemu/patches/$(echo ${branch} | sed -e 's/.[[:digit:]]*$//' -e 's/^v//').x"
|
||||
patches_version_dir="${kata_dir}/tools/packaging/qemu/patches/tag_patches/${branch}"
|
||||
;;
|
||||
|
||||
*)
|
||||
@@ -270,6 +274,7 @@ parts:
|
||||
|
||||
# download source
|
||||
qemu_dir=${SNAPCRAFT_STAGE}/qemu
|
||||
rm -rf "${qemu_dir}"
|
||||
git clone --branch ${branch} --single-branch ${url} "${qemu_dir}"
|
||||
cd ${qemu_dir}
|
||||
[ -z "${commit}" ] || git checkout ${commit}
|
||||
@@ -278,6 +283,7 @@ parts:
|
||||
[ -n "$(ls -A capstone)" ] || git clone https://github.com/qemu/capstone capstone
|
||||
|
||||
# Apply branch patches
|
||||
[ -d "${patches_version_dir}" ] || mkdir "${patches_version_dir}"
|
||||
${kata_dir}/tools/packaging/scripts/apply_patches.sh "${patches_dir}"
|
||||
${kata_dir}/tools/packaging/scripts/apply_patches.sh "${patches_version_dir}"
|
||||
|
||||
@@ -293,7 +299,15 @@ parts:
|
||||
| xargs ./configure
|
||||
|
||||
# Copy QEMU configurations (Kconfigs)
|
||||
cp -a ${kata_dir}/tools/packaging/qemu/default-configs/* default-configs/devices/
|
||||
case "${branch}" in
|
||||
"v5.1.0")
|
||||
cp -a ${kata_dir}/tools/packaging/qemu/default-configs/* default-configs
|
||||
;;
|
||||
|
||||
*)
|
||||
cp -a ${kata_dir}/tools/packaging/qemu/default-configs/* default-configs/devices/
|
||||
;;
|
||||
esac
|
||||
|
||||
# build and install
|
||||
make -j $(($(nproc)-1))
|
||||
|
||||
30
src/agent/Cargo.lock
generated
30
src/agent/Cargo.lock
generated
@@ -545,10 +545,12 @@ dependencies = [
|
||||
"scan_fmt",
|
||||
"scopeguard",
|
||||
"serde_json",
|
||||
"serial_test",
|
||||
"slog",
|
||||
"slog-scope",
|
||||
"slog-stdlog",
|
||||
"tempfile",
|
||||
"thiserror",
|
||||
"tokio",
|
||||
"tokio-vsock",
|
||||
"tracing",
|
||||
@@ -872,6 +874,8 @@ dependencies = [
|
||||
"rand",
|
||||
"serde",
|
||||
"thiserror",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1509,18 +1513,18 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "thiserror"
|
||||
version = "1.0.24"
|
||||
version = "1.0.26"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e0f4a65597094d4483ddaed134f409b2cb7c1beccf25201a9f73c719254fa98e"
|
||||
checksum = "93119e4feac1cbe6c798c34d3a53ea0026b0b1de6a120deef895137c0529bfe2"
|
||||
dependencies = [
|
||||
"thiserror-impl",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "thiserror-impl"
|
||||
version = "1.0.24"
|
||||
version = "1.0.26"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7765189610d8241a44529806d6fd1f2e0a08734313a35d5b3a556f92b381f3c0"
|
||||
checksum = "060d69a0afe7796bf42e9e2ff91f5ee691fb15c53d38b4b62a9a53eb23164745"
|
||||
dependencies = [
|
||||
"proc-macro2 1.0.26",
|
||||
"quote 1.0.9",
|
||||
@@ -1548,9 +1552,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tokio"
|
||||
version = "1.6.0"
|
||||
version = "1.8.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bd3076b5c8cc18138b8f8814895c11eb4de37114a5d127bafdc5e55798ceef37"
|
||||
checksum = "98c8b05dc14c75ea83d63dd391100353789f5f24b8b3866542a5e85c8be8e985"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
"bytes 1.0.1",
|
||||
@@ -1577,6 +1581,17 @@ dependencies = [
|
||||
"syn 1.0.72",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tokio-stream"
|
||||
version = "0.1.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f8864d706fdb3cc0843a49647ac892720dac98a6eeb818b77190592cf4994066"
|
||||
dependencies = [
|
||||
"futures-core",
|
||||
"pin-project-lite",
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tokio-util"
|
||||
version = "0.6.7"
|
||||
@@ -1785,7 +1800,8 @@ dependencies = [
|
||||
"serde",
|
||||
"slog",
|
||||
"thiserror",
|
||||
"vsock",
|
||||
"tokio",
|
||||
"tokio-vsock",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
||||
@@ -18,7 +18,9 @@ capctl = "0.2.0"
|
||||
serde_json = "1.0.39"
|
||||
scan_fmt = "0.2.3"
|
||||
scopeguard = "1.0.0"
|
||||
thiserror = "1.0.26"
|
||||
regex = "1"
|
||||
serial_test = "0.5.1"
|
||||
|
||||
# Async helpers
|
||||
async-trait = "0.1.42"
|
||||
@@ -26,7 +28,7 @@ async-recursion = "0.3.2"
|
||||
futures = "0.3.12"
|
||||
|
||||
# Async runtime
|
||||
tokio = { version = "1.2.0", features = ["full"] }
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
tokio-vsock = "0.3.1"
|
||||
|
||||
netlink-sys = { version = "0.7.0", features = ["tokio_socket",]}
|
||||
@@ -54,7 +56,7 @@ cgroups = { package = "cgroups-rs", version = "0.2.5" }
|
||||
tracing = "0.1.26"
|
||||
tracing-subscriber = "0.2.18"
|
||||
tracing-opentelemetry = "0.13.0"
|
||||
opentelemetry = "0.14.0"
|
||||
opentelemetry = { version = "0.14.0", features = ["rt-tokio-current-thread"]}
|
||||
vsock-exporter = { path = "vsock-exporter" }
|
||||
|
||||
[dev-dependencies]
|
||||
|
||||
@@ -66,6 +66,7 @@ service AgentService {
|
||||
rpc SetGuestDateTime(SetGuestDateTimeRequest) returns (google.protobuf.Empty);
|
||||
rpc CopyFile(CopyFileRequest) returns (google.protobuf.Empty);
|
||||
rpc GetOOMEvent(GetOOMEventRequest) returns (OOMEvent);
|
||||
rpc AddSwap(AddSwapRequest) returns (google.protobuf.Empty);
|
||||
}
|
||||
|
||||
message CreateContainerRequest {
|
||||
@@ -503,6 +504,10 @@ message OOMEvent {
|
||||
string container_id = 1;
|
||||
}
|
||||
|
||||
message AddSwapRequest {
|
||||
repeated uint32 PCIPath = 1;
|
||||
}
|
||||
|
||||
message GetMetricsRequest {}
|
||||
|
||||
message Metrics {
|
||||
|
||||
@@ -46,6 +46,7 @@ message Route {
|
||||
string device = 3;
|
||||
string source = 4;
|
||||
uint32 scope = 5;
|
||||
IPFamily family = 6;
|
||||
}
|
||||
|
||||
message ARPNeighbor {
|
||||
|
||||
@@ -232,19 +232,19 @@ fn set_devices_resources(
|
||||
let mut devices = vec![];
|
||||
|
||||
for d in device_resources.iter() {
|
||||
if let Some(dev) = linux_device_group_to_cgroup_device(&d) {
|
||||
if let Some(dev) = linux_device_group_to_cgroup_device(d) {
|
||||
devices.push(dev);
|
||||
}
|
||||
}
|
||||
|
||||
for d in DEFAULT_DEVICES.iter() {
|
||||
if let Some(dev) = linux_device_to_cgroup_device(&d) {
|
||||
if let Some(dev) = linux_device_to_cgroup_device(d) {
|
||||
devices.push(dev);
|
||||
}
|
||||
}
|
||||
|
||||
for d in DEFAULT_ALLOWED_DEVICES.iter() {
|
||||
if let Some(dev) = linux_device_group_to_cgroup_device(&d) {
|
||||
if let Some(dev) = linux_device_group_to_cgroup_device(d) {
|
||||
devices.push(dev);
|
||||
}
|
||||
}
|
||||
@@ -828,7 +828,7 @@ fn get_blkio_stats_v2(cg: &cgroups::Cgroup) -> SingularPtrField<BlkioStats> {
|
||||
|
||||
fn get_blkio_stats(cg: &cgroups::Cgroup) -> SingularPtrField<BlkioStats> {
|
||||
if cg.v2() {
|
||||
return get_blkio_stats_v2(&cg);
|
||||
return get_blkio_stats_v2(cg);
|
||||
}
|
||||
|
||||
let blkio_controller: &BlkIoController = get_controller_or_return_singular_none!(cg);
|
||||
@@ -1022,7 +1022,7 @@ impl Manager {
|
||||
.unwrap()
|
||||
.trim_start_matches(root_path.to_str().unwrap());
|
||||
info!(sl!(), "updating cpuset for parent path {:?}", &r_path);
|
||||
let cg = new_cgroup(cgroups::hierarchies::auto(), &r_path);
|
||||
let cg = new_cgroup(cgroups::hierarchies::auto(), r_path);
|
||||
let cpuset_controller: &CpuSetController = cg.controller_of().unwrap();
|
||||
cpuset_controller.set_cpus(guest_cpuset)?;
|
||||
}
|
||||
|
||||
@@ -390,7 +390,7 @@ fn do_init_child(cwfd: RawFd) -> Result<()> {
|
||||
let linux = spec.linux.as_ref().unwrap();
|
||||
|
||||
// get namespace vector to join/new
|
||||
let nses = get_namespaces(&linux);
|
||||
let nses = get_namespaces(linux);
|
||||
|
||||
let mut userns = false;
|
||||
let mut to_new = CloneFlags::empty();
|
||||
@@ -561,7 +561,7 @@ fn do_init_child(cwfd: RawFd) -> Result<()> {
|
||||
}
|
||||
|
||||
if to_new.contains(CloneFlags::CLONE_NEWNS) {
|
||||
mount::finish_rootfs(cfd_log, &spec)?;
|
||||
mount::finish_rootfs(cfd_log, &spec, &oci_process)?;
|
||||
}
|
||||
|
||||
if !oci_process.cwd.is_empty() {
|
||||
@@ -833,6 +833,20 @@ impl BaseContainer for LinuxContainer {
|
||||
}
|
||||
let linux = spec.linux.as_ref().unwrap();
|
||||
|
||||
if p.oci.capabilities.is_none() {
|
||||
// No capabilities, inherit from container process
|
||||
let process = spec
|
||||
.process
|
||||
.as_ref()
|
||||
.ok_or_else(|| anyhow!("no process config"))?;
|
||||
p.oci.capabilities = Some(
|
||||
process
|
||||
.capabilities
|
||||
.clone()
|
||||
.ok_or_else(|| anyhow!("missing process capabilities"))?,
|
||||
);
|
||||
}
|
||||
|
||||
let (pfd_log, cfd_log) = unistd::pipe().context("failed to create pipe")?;
|
||||
|
||||
let _ = fcntl::fcntl(pfd_log, FcntlArg::F_SETFD(FdFlag::FD_CLOEXEC))
|
||||
@@ -939,7 +953,7 @@ impl BaseContainer for LinuxContainer {
|
||||
|
||||
join_namespaces(
|
||||
&logger,
|
||||
&spec,
|
||||
spec,
|
||||
&p,
|
||||
self.cgroup_manager.as_ref().unwrap(),
|
||||
&st,
|
||||
@@ -1031,7 +1045,7 @@ impl BaseContainer for LinuxContainer {
|
||||
let fifo = format!("{}/{}", &self.root, EXEC_FIFO_FILENAME);
|
||||
let fd = fcntl::open(fifo.as_str(), OFlag::O_WRONLY, Mode::from_bits_truncate(0))?;
|
||||
let data: &[u8] = &[0];
|
||||
unistd::write(fd, &data)?;
|
||||
unistd::write(fd, data)?;
|
||||
info!(self.logger, "container started");
|
||||
self.init_process_start_time = SystemTime::now()
|
||||
.duration_since(SystemTime::UNIX_EPOCH)
|
||||
|
||||
@@ -13,7 +13,7 @@ use nix::mount::{MntFlags, MsFlags};
|
||||
use nix::sys::stat::{self, Mode, SFlag};
|
||||
use nix::unistd::{self, Gid, Uid};
|
||||
use nix::NixPath;
|
||||
use oci::{LinuxDevice, Mount, Spec};
|
||||
use oci::{LinuxDevice, Mount, Process, Spec};
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::fs::{self, OpenOptions};
|
||||
use std::mem::MaybeUninit;
|
||||
@@ -189,7 +189,7 @@ pub fn init_rootfs(
|
||||
|
||||
let mut bind_mount_dev = false;
|
||||
for m in &spec.mounts {
|
||||
let (mut flags, pgflags, data) = parse_mount(&m);
|
||||
let (mut flags, pgflags, data) = parse_mount(m);
|
||||
if !m.destination.starts_with('/') || m.destination.contains("..") {
|
||||
return Err(anyhow!(
|
||||
"the mount destination {} is invalid",
|
||||
@@ -198,7 +198,7 @@ pub fn init_rootfs(
|
||||
}
|
||||
|
||||
if m.r#type == "cgroup" {
|
||||
mount_cgroups(cfd_log, &m, rootfs, flags, &data, cpath, mounts)?;
|
||||
mount_cgroups(cfd_log, m, rootfs, flags, &data, cpath, mounts)?;
|
||||
} else {
|
||||
if m.destination == "/dev" {
|
||||
if m.r#type == "bind" {
|
||||
@@ -226,7 +226,7 @@ pub fn init_rootfs(
|
||||
}
|
||||
}
|
||||
|
||||
mount_from(cfd_log, &m, &rootfs, flags, &data, "")?;
|
||||
mount_from(cfd_log, m, rootfs, flags, &data, "")?;
|
||||
// bind mount won't change mount options, we need remount to make mount options
|
||||
// effective.
|
||||
// first check that we have non-default options required before attempting a
|
||||
@@ -356,7 +356,7 @@ fn mount_cgroups(
|
||||
mounts: &HashMap<String, String>,
|
||||
) -> Result<()> {
|
||||
if cgroups::hierarchies::is_cgroup2_unified_mode() {
|
||||
return mount_cgroups_v2(cfd_log, &m, rootfs, flags);
|
||||
return mount_cgroups_v2(cfd_log, m, rootfs, flags);
|
||||
}
|
||||
// mount tmpfs
|
||||
let ctm = Mount {
|
||||
@@ -902,10 +902,21 @@ fn bind_dev(dev: &LinuxDevice) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn finish_rootfs(cfd_log: RawFd, spec: &Spec) -> Result<()> {
|
||||
pub fn finish_rootfs(cfd_log: RawFd, spec: &Spec, process: &Process) -> Result<()> {
|
||||
let olddir = unistd::getcwd()?;
|
||||
log_child!(cfd_log, "old cwd: {}", olddir.to_str().unwrap());
|
||||
unistd::chdir("/")?;
|
||||
|
||||
if !process.cwd.is_empty() {
|
||||
// Although the process.cwd string can be unclean/malicious (../../dev, etc),
|
||||
// we are running on our own mount namespace and we just chrooted into the
|
||||
// container's root. It's safe to create CWD from there.
|
||||
log_child!(cfd_log, "Creating CWD {}", process.cwd.as_str());
|
||||
// Unconditionally try to create CWD, create_dir_all will not fail if
|
||||
// it already exists.
|
||||
fs::create_dir_all(process.cwd.as_str())?;
|
||||
}
|
||||
|
||||
if spec.linux.is_some() {
|
||||
let linux = spec.linux.as_ref().unwrap();
|
||||
|
||||
@@ -1211,7 +1222,7 @@ mod tests {
|
||||
options: vec!["ro".to_string(), "shared".to_string()],
|
||||
}];
|
||||
|
||||
let ret = finish_rootfs(stdout_fd, &spec);
|
||||
let ret = finish_rootfs(stdout_fd, &spec, &oci::Process::default());
|
||||
assert!(ret.is_ok(), "Should pass. Got: {:?}", ret);
|
||||
}
|
||||
|
||||
|
||||
@@ -266,7 +266,7 @@ pub fn validate(conf: &Config) -> Result<()> {
|
||||
security(oci).context("security")?;
|
||||
usernamespace(oci).context("usernamespace")?;
|
||||
cgroupnamespace(oci).context("cgroupnamespace")?;
|
||||
sysctl(&oci).context("sysctl")?;
|
||||
sysctl(oci).context("sysctl")?;
|
||||
|
||||
if conf.rootless_euid {
|
||||
rootless_euid(oci).context("rootless euid")?;
|
||||
|
||||
@@ -372,8 +372,8 @@ mod tests {
|
||||
#[test]
|
||||
fn test_new() {
|
||||
let config = AgentConfig::new();
|
||||
assert_eq!(config.debug_console, false);
|
||||
assert_eq!(config.dev_mode, false);
|
||||
assert!(!config.debug_console);
|
||||
assert!(!config.dev_mode);
|
||||
assert_eq!(config.log_level, DEFAULT_LOG_LEVEL);
|
||||
assert_eq!(config.hotplug_timeout, DEFAULT_HOTPLUG_TIMEOUT);
|
||||
}
|
||||
@@ -754,9 +754,9 @@ mod tests {
|
||||
}
|
||||
|
||||
let mut config = AgentConfig::new();
|
||||
assert_eq!(config.debug_console, false, "{}", msg);
|
||||
assert_eq!(config.dev_mode, false, "{}", msg);
|
||||
assert_eq!(config.unified_cgroup_hierarchy, false, "{}", msg);
|
||||
assert!(!config.debug_console, "{}", msg);
|
||||
assert!(!config.dev_mode, "{}", msg);
|
||||
assert!(!config.unified_cgroup_hierarchy, "{}", msg);
|
||||
assert_eq!(
|
||||
config.hotplug_timeout,
|
||||
time::Duration::from_secs(3),
|
||||
|
||||
@@ -62,7 +62,7 @@ pub fn online_device(path: &str) -> Result<()> {
|
||||
// the sysfs path for the PCI host bridge, based on the PCI path
|
||||
// provided.
|
||||
#[instrument]
|
||||
fn pcipath_to_sysfs(root_bus_sysfs: &str, pcipath: &pci::Path) -> Result<String> {
|
||||
pub fn pcipath_to_sysfs(root_bus_sysfs: &str, pcipath: &pci::Path) -> Result<String> {
|
||||
let mut bus = "0000:00".to_string();
|
||||
let mut relpath = String::new();
|
||||
|
||||
@@ -966,12 +966,12 @@ mod tests {
|
||||
uev_a.subsystem = "block".to_string();
|
||||
uev_a.devname = devname.to_string();
|
||||
uev_a.devpath = format!("{}{}/virtio4/block/{}", root_bus, relpath_a, devname);
|
||||
let matcher_a = VirtioBlkPciMatcher::new(&relpath_a);
|
||||
let matcher_a = VirtioBlkPciMatcher::new(relpath_a);
|
||||
|
||||
let mut uev_b = uev_a.clone();
|
||||
let relpath_b = "/0000:00:0a.0/0000:00:0b.0";
|
||||
uev_b.devpath = format!("{}{}/virtio0/block/{}", root_bus, relpath_b, devname);
|
||||
let matcher_b = VirtioBlkPciMatcher::new(&relpath_b);
|
||||
let matcher_b = VirtioBlkPciMatcher::new(relpath_b);
|
||||
|
||||
assert!(matcher_a.is_match(&uev_a));
|
||||
assert!(matcher_b.is_match(&uev_b));
|
||||
@@ -1053,7 +1053,7 @@ mod tests {
|
||||
"{}/0000:00:00.0/virtio0/host0/target0:0:0/0:0:{}/block/sda",
|
||||
root_bus, addr_a
|
||||
);
|
||||
let matcher_a = ScsiBlockMatcher::new(&addr_a);
|
||||
let matcher_a = ScsiBlockMatcher::new(addr_a);
|
||||
|
||||
let mut uev_b = uev_a.clone();
|
||||
let addr_b = "2:0";
|
||||
@@ -1061,7 +1061,7 @@ mod tests {
|
||||
"{}/0000:00:00.0/virtio0/host0/target0:0:2/0:0:{}/block/sdb",
|
||||
root_bus, addr_b
|
||||
);
|
||||
let matcher_b = ScsiBlockMatcher::new(&addr_b);
|
||||
let matcher_b = ScsiBlockMatcher::new(addr_b);
|
||||
|
||||
assert!(matcher_a.is_match(&uev_a));
|
||||
assert!(matcher_b.is_match(&uev_b));
|
||||
|
||||
@@ -95,6 +95,7 @@ pub const SYSTEM_DEV_PATH: &str = "/dev";
|
||||
// Linux UEvent related consts.
|
||||
pub const U_EVENT_ACTION: &str = "ACTION";
|
||||
pub const U_EVENT_ACTION_ADD: &str = "add";
|
||||
pub const U_EVENT_ACTION_REMOVE: &str = "remove";
|
||||
pub const U_EVENT_DEV_PATH: &str = "DEVPATH";
|
||||
pub const U_EVENT_SUB_SYSTEM: &str = "SUBSYSTEM";
|
||||
pub const U_EVENT_SEQ_NUM: &str = "SEQNUM";
|
||||
|
||||
@@ -302,7 +302,7 @@ async fn start_sandbox(
|
||||
}
|
||||
|
||||
// Initialize unique sandbox structure.
|
||||
let s = Sandbox::new(&logger).context("Failed to create sandbox")?;
|
||||
let s = Sandbox::new(logger).context("Failed to create sandbox")?;
|
||||
if init_mode {
|
||||
s.rtnl.handle_localhost().await?;
|
||||
}
|
||||
|
||||
@@ -193,7 +193,7 @@ fn update_guest_metrics() {
|
||||
Ok(kernel_stats) => {
|
||||
set_gauge_vec_cpu_time(&GUEST_CPU_TIME, "total", &kernel_stats.total);
|
||||
for (i, cpu_time) in kernel_stats.cpu_time.iter().enumerate() {
|
||||
set_gauge_vec_cpu_time(&GUEST_CPU_TIME, format!("{}", i).as_str(), &cpu_time);
|
||||
set_gauge_vec_cpu_time(&GUEST_CPU_TIME, format!("{}", i).as_str(), cpu_time);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -282,7 +282,7 @@ async fn ephemeral_storage_handler(
|
||||
fs::set_permissions(&storage.mount_point, permission)?;
|
||||
}
|
||||
} else {
|
||||
common_storage_handler(logger, &storage)?;
|
||||
common_storage_handler(logger, storage)?;
|
||||
}
|
||||
|
||||
Ok("".to_string())
|
||||
@@ -549,8 +549,12 @@ fn parse_mount_flags_and_options(options_vec: Vec<&str>) -> (MsFlags, String) {
|
||||
if !opt.is_empty() {
|
||||
match FLAGS.get(opt) {
|
||||
Some(x) => {
|
||||
let (_, f) = *x;
|
||||
flags |= f;
|
||||
let (clear, f) = *x;
|
||||
if clear {
|
||||
flags &= !f;
|
||||
} else {
|
||||
flags |= f;
|
||||
}
|
||||
}
|
||||
None => {
|
||||
if !options.is_empty() {
|
||||
@@ -605,7 +609,7 @@ pub async fn add_storages(
|
||||
DRIVER_NVDIMM_TYPE => nvdimm_storage_handler(&logger, &storage, sandbox.clone()).await,
|
||||
DRIVER_WATCHABLE_BIND_TYPE => {
|
||||
bind_watcher_storage_handler(&logger, &storage, sandbox.clone()).await?;
|
||||
// Don't register watch mounts, they're hanlded separately by the watcher.
|
||||
// Don't register watch mounts, they're handled separately by the watcher.
|
||||
Ok(String::new())
|
||||
}
|
||||
_ => {
|
||||
@@ -822,19 +826,21 @@ pub fn remove_mounts(mounts: &[String]) -> Result<()> {
|
||||
#[instrument]
|
||||
fn ensure_destination_exists(destination: &str, fs_type: &str) -> Result<()> {
|
||||
let d = Path::new(destination);
|
||||
if !d.exists() {
|
||||
let dir = d
|
||||
.parent()
|
||||
.ok_or_else(|| anyhow!("mount destination {} doesn't exist", destination))?;
|
||||
if !dir.exists() {
|
||||
fs::create_dir_all(dir).context(format!("create dir all failed on {:?}", dir))?;
|
||||
}
|
||||
if d.exists() {
|
||||
return Ok(());
|
||||
}
|
||||
let dir = d
|
||||
.parent()
|
||||
.ok_or_else(|| anyhow!("mount destination {} doesn't exist", destination))?;
|
||||
|
||||
if !dir.exists() {
|
||||
fs::create_dir_all(dir).context(format!("create dir all {:?}", dir))?;
|
||||
}
|
||||
|
||||
if fs_type != "bind" || d.is_dir() {
|
||||
fs::create_dir_all(d).context(format!("create dir all failed on {:?}", d))?;
|
||||
fs::create_dir_all(d).context(format!("create dir all {:?}", d))?;
|
||||
} else {
|
||||
fs::OpenOptions::new().create(true).open(d)?;
|
||||
fs::File::create(d).context(format!("create file {:?}", d))?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -860,6 +866,7 @@ mod tests {
|
||||
use super::*;
|
||||
use crate::{skip_if_not_root, skip_loop_if_not_root, skip_loop_if_root};
|
||||
use libc::umount;
|
||||
use std::fs::metadata;
|
||||
use std::fs::File;
|
||||
use std::fs::OpenOptions;
|
||||
use std::io::Write;
|
||||
@@ -1097,8 +1104,8 @@ mod tests {
|
||||
|
||||
// Create an actual mount
|
||||
let bare_mount = BareMount::new(
|
||||
&mnt_src_filename,
|
||||
&mnt_dest_filename,
|
||||
mnt_src_filename,
|
||||
mnt_dest_filename,
|
||||
"bind",
|
||||
MsFlags::MS_BIND,
|
||||
"",
|
||||
@@ -1267,7 +1274,7 @@ mod tests {
|
||||
let logger = slog::Logger::root(drain, o!());
|
||||
let result = get_cgroup_mounts(&logger, "", true);
|
||||
|
||||
assert_eq!(true, result.is_ok());
|
||||
assert!(result.is_ok());
|
||||
let result = result.unwrap();
|
||||
assert_eq!(1, result.len());
|
||||
assert_eq!(result[0].fstype, "cgroup2");
|
||||
@@ -1435,4 +1442,39 @@ mod tests {
|
||||
assert!(mounts[1].eq(&cg_devices_mount), "{}", msg);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ensure_destination_exists() {
|
||||
let dir = tempdir().expect("failed to create tmpdir");
|
||||
|
||||
let mut testfile = dir.into_path();
|
||||
testfile.push("testfile");
|
||||
|
||||
let result = ensure_destination_exists(testfile.to_str().unwrap(), "bind");
|
||||
|
||||
assert!(result.is_ok());
|
||||
assert!(testfile.exists());
|
||||
|
||||
let result = ensure_destination_exists(testfile.to_str().unwrap(), "bind");
|
||||
assert!(result.is_ok());
|
||||
|
||||
let meta = metadata(testfile).unwrap();
|
||||
|
||||
assert!(meta.is_file());
|
||||
|
||||
let dir = tempdir().expect("failed to create tmpdir");
|
||||
let mut testdir = dir.into_path();
|
||||
testdir.push("testdir");
|
||||
|
||||
let result = ensure_destination_exists(testdir.to_str().unwrap(), "ext4");
|
||||
assert!(result.is_ok());
|
||||
assert!(testdir.exists());
|
||||
|
||||
let result = ensure_destination_exists(testdir.to_str().unwrap(), "ext4");
|
||||
assert!(result.is_ok());
|
||||
|
||||
//let meta = metadata(testdir.to_str().unwrap()).unwrap();
|
||||
let meta = metadata(testdir).unwrap();
|
||||
assert!(meta.is_dir());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -102,7 +102,7 @@ impl Namespace {
|
||||
|
||||
let new_thread = tokio::spawn(async move {
|
||||
if let Err(err) = || -> Result<()> {
|
||||
let origin_ns_path = get_current_thread_ns_path(&ns_type.get());
|
||||
let origin_ns_path = get_current_thread_ns_path(ns_type.get());
|
||||
|
||||
File::open(Path::new(&origin_ns_path))?;
|
||||
|
||||
@@ -121,8 +121,12 @@ impl Namespace {
|
||||
let mut flags = MsFlags::empty();
|
||||
|
||||
if let Some(x) = FLAGS.get("rbind") {
|
||||
let (_, f) = *x;
|
||||
flags |= f;
|
||||
let (clear, f) = *x;
|
||||
if clear {
|
||||
flags &= !f;
|
||||
} else {
|
||||
flags |= f;
|
||||
}
|
||||
};
|
||||
|
||||
let bare_mount = BareMount::new(source, destination, "none", flags, "", &logger);
|
||||
|
||||
@@ -82,8 +82,8 @@ impl Handle {
|
||||
|
||||
// Add new ip addresses from request
|
||||
for ip_address in &iface.IPAddresses {
|
||||
let ip = IpAddr::from_str(&ip_address.get_address())?;
|
||||
let mask = u8::from_str_radix(ip_address.get_mask(), 10)?;
|
||||
let ip = IpAddr::from_str(ip_address.get_address())?;
|
||||
let mask = ip_address.get_mask().parse::<u8>()?;
|
||||
|
||||
self.add_addresses(link.index(), std::iter::once(IpNetwork::new(ip, mask)?))
|
||||
.await?;
|
||||
@@ -312,7 +312,6 @@ impl Handle {
|
||||
|
||||
for route in list {
|
||||
let link = self.find_link(LinkFilter::Name(&route.device)).await?;
|
||||
let is_v6 = is_ipv6(route.get_gateway()) || is_ipv6(route.get_dest());
|
||||
|
||||
const MAIN_TABLE: u8 = packet::constants::RT_TABLE_MAIN;
|
||||
const UNICAST: u8 = packet::constants::RTN_UNICAST;
|
||||
@@ -334,7 +333,7 @@ impl Handle {
|
||||
|
||||
// `rtnetlink` offers a separate request builders for different IP versions (IP v4 and v6).
|
||||
// This if branch is a bit clumsy because it does almost the same.
|
||||
if is_v6 {
|
||||
if route.get_family() == IPFamily::v6 {
|
||||
let dest_addr = if !route.dest.is_empty() {
|
||||
Ipv6Network::from_str(&route.dest)?
|
||||
} else {
|
||||
@@ -512,7 +511,7 @@ impl Handle {
|
||||
.and_then(|addr| if addr.is_empty() { None } else { Some(addr) }) // Make sure it's not empty
|
||||
.ok_or(nix::Error::Sys(nix::errno::Errno::EINVAL))?;
|
||||
|
||||
let ip = IpAddr::from_str(&ip_address)
|
||||
let ip = IpAddr::from_str(ip_address)
|
||||
.map_err(|e| anyhow!("Failed to parse IP {}: {:?}", ip_address, e))?;
|
||||
|
||||
// Import rtnetlink objects that make sense only for this function
|
||||
@@ -594,10 +593,6 @@ fn format_address(data: &[u8]) -> Result<String> {
|
||||
}
|
||||
}
|
||||
|
||||
fn is_ipv6(str: &str) -> bool {
|
||||
Ipv6Addr::from_str(str).is_ok()
|
||||
}
|
||||
|
||||
fn parse_mac_address(addr: &str) -> Result<[u8; 6]> {
|
||||
let mut split = addr.splitn(6, ':');
|
||||
|
||||
@@ -932,16 +927,6 @@ mod tests {
|
||||
assert_eq!(bytes, [0xAB, 0x0C, 0xDE, 0x12, 0x34, 0x56]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn check_ipv6() {
|
||||
assert!(is_ipv6("::1"));
|
||||
assert!(is_ipv6("2001:0:3238:DFE1:63::FEFB"));
|
||||
|
||||
assert!(!is_ipv6(""));
|
||||
assert!(!is_ipv6("127.0.0.1"));
|
||||
assert!(!is_ipv6("10.10.10.10"));
|
||||
}
|
||||
|
||||
fn clean_env_for_test_add_one_arp_neighbor(dummy_name: &str, ip: &str) {
|
||||
// ip link delete dummy
|
||||
Command::new("ip")
|
||||
|
||||
@@ -127,16 +127,11 @@ mod tests {
|
||||
// call do_setup_guest_dns
|
||||
let result = do_setup_guest_dns(logger, dns.clone(), src_filename, dst_filename);
|
||||
|
||||
assert_eq!(
|
||||
true,
|
||||
result.is_ok(),
|
||||
"result should be ok, but {:?}",
|
||||
result
|
||||
);
|
||||
assert!(result.is_ok(), "result should be ok, but {:?}", result);
|
||||
|
||||
// get content of /etc/resolv.conf
|
||||
let content = fs::read_to_string(dst_filename);
|
||||
assert_eq!(true, content.is_ok());
|
||||
assert!(content.is_ok());
|
||||
let content = content.unwrap();
|
||||
|
||||
let expected_dns: Vec<&str> = content.split('\n').collect();
|
||||
|
||||
@@ -3,11 +3,14 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use crate::pci;
|
||||
use async_trait::async_trait;
|
||||
use rustjail::{pipestream::PipeStream, process::StreamType};
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt, ReadHalf};
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
use std::ffi::CString;
|
||||
use std::io;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
use ttrpc::{
|
||||
@@ -20,8 +23,9 @@ use anyhow::{anyhow, Context, Result};
|
||||
use oci::{LinuxNamespace, Root, Spec};
|
||||
use protobuf::{RepeatedField, SingularPtrField};
|
||||
use protocols::agent::{
|
||||
AgentDetails, CopyFileRequest, GuestDetailsResponse, Interfaces, Metrics, OOMEvent,
|
||||
ReadStreamResponse, Routes, StatsContainerResponse, WaitProcessResponse, WriteStreamResponse,
|
||||
AddSwapRequest, AgentDetails, CopyFileRequest, GuestDetailsResponse, Interfaces, Metrics,
|
||||
OOMEvent, ReadStreamResponse, Routes, StatsContainerResponse, WaitProcessResponse,
|
||||
WriteStreamResponse,
|
||||
};
|
||||
use protocols::empty::Empty;
|
||||
use protocols::health::{
|
||||
@@ -40,7 +44,7 @@ use nix::sys::stat;
|
||||
use nix::unistd::{self, Pid};
|
||||
use rustjail::process::ProcessOperations;
|
||||
|
||||
use crate::device::{add_devices, rescan_pci_bus, update_device_cgroup};
|
||||
use crate::device::{add_devices, pcipath_to_sysfs, rescan_pci_bus, update_device_cgroup};
|
||||
use crate::linux_abi::*;
|
||||
use crate::metrics::get_metrics;
|
||||
use crate::mount::{add_storages, remove_mounts, BareMount, STORAGE_HANDLER_LIST};
|
||||
@@ -59,7 +63,7 @@ use tracing_opentelemetry::OpenTelemetrySpanExt;
|
||||
|
||||
use tracing::instrument;
|
||||
|
||||
use libc::{self, c_ushort, pid_t, winsize, TIOCSWINSZ};
|
||||
use libc::{self, c_char, c_ushort, pid_t, winsize, TIOCSWINSZ};
|
||||
use std::convert::TryFrom;
|
||||
use std::fs;
|
||||
use std::os::unix::prelude::PermissionsExt;
|
||||
@@ -186,7 +190,7 @@ impl AgentService {
|
||||
let p = if oci.process.is_some() {
|
||||
Process::new(
|
||||
&sl!(),
|
||||
&oci.process.as_ref().unwrap(),
|
||||
oci.process.as_ref().unwrap(),
|
||||
cid.as_str(),
|
||||
true,
|
||||
pipe_size,
|
||||
@@ -243,7 +247,7 @@ impl AgentService {
|
||||
// Find the sandbox storage used by this container
|
||||
let mounts = sandbox.container_mounts.get(&cid);
|
||||
if let Some(mounts) = mounts {
|
||||
remove_mounts(&mounts)?;
|
||||
remove_mounts(mounts)?;
|
||||
|
||||
for m in mounts.iter() {
|
||||
if sandbox.storages.get(m).is_some() {
|
||||
@@ -662,7 +666,7 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
let s = Arc::clone(&self.sandbox);
|
||||
let mut sandbox = s.lock().await;
|
||||
|
||||
let ctr = sandbox.get_container(&cid).ok_or_else(|| {
|
||||
let ctr = sandbox.get_container(cid).ok_or_else(|| {
|
||||
ttrpc_error(
|
||||
ttrpc::Code::INVALID_ARGUMENT,
|
||||
"invalid container id".to_string(),
|
||||
@@ -685,7 +689,7 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
let s = Arc::clone(&self.sandbox);
|
||||
let mut sandbox = s.lock().await;
|
||||
|
||||
let ctr = sandbox.get_container(&cid).ok_or_else(|| {
|
||||
let ctr = sandbox.get_container(cid).ok_or_else(|| {
|
||||
ttrpc_error(
|
||||
ttrpc::Code::INVALID_ARGUMENT,
|
||||
"invalid container id".to_string(),
|
||||
@@ -1192,6 +1196,18 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
|
||||
Err(ttrpc_error(ttrpc::Code::INTERNAL, ""))
|
||||
}
|
||||
|
||||
async fn add_swap(
|
||||
&self,
|
||||
ctx: &TtrpcContext,
|
||||
req: protocols::agent::AddSwapRequest,
|
||||
) -> ttrpc::Result<Empty> {
|
||||
trace_rpc_call!(ctx, "add_swap", req);
|
||||
|
||||
do_add_swap(&req).map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()))?;
|
||||
|
||||
Ok(Empty::new())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
@@ -1541,6 +1557,56 @@ fn do_copy_file(req: &CopyFileRequest) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn path_name_lookup<P: std::clone::Clone + AsRef<Path> + std::fmt::Debug>(
|
||||
path: P,
|
||||
lookup: &str,
|
||||
) -> Result<(PathBuf, String)> {
|
||||
for entry in fs::read_dir(path.clone())? {
|
||||
let entry = entry?;
|
||||
if let Some(name) = entry.path().file_name() {
|
||||
if let Some(name) = name.to_str() {
|
||||
if Some(0) == name.find(lookup) {
|
||||
return Ok((entry.path(), name.to_string()));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(anyhow!("cannot get {} dir in {:?}", lookup, path))
|
||||
}
|
||||
|
||||
fn do_add_swap(req: &AddSwapRequest) -> Result<()> {
|
||||
// re-scan PCI bus
|
||||
// looking for hidden devices
|
||||
rescan_pci_bus().context("Could not rescan PCI bus")?;
|
||||
|
||||
let mut slots = Vec::new();
|
||||
for slot in &req.PCIPath {
|
||||
slots.push(pci::Slot::new(*slot as u8)?);
|
||||
}
|
||||
let pcipath = pci::Path::new(slots)?;
|
||||
let root_bus_sysfs = format!("{}{}", SYSFS_DIR, create_pci_root_bus_path());
|
||||
let sysfs_rel_path = format!(
|
||||
"{}{}",
|
||||
root_bus_sysfs,
|
||||
pcipath_to_sysfs(&root_bus_sysfs, &pcipath)?
|
||||
);
|
||||
let (mut virtio_path, _) = path_name_lookup(sysfs_rel_path, "virtio")?;
|
||||
virtio_path.push("block");
|
||||
let (_, dev_name) = path_name_lookup(virtio_path, "vd")?;
|
||||
let dev_name = format!("/dev/{}", dev_name);
|
||||
|
||||
let c_str = CString::new(dev_name)?;
|
||||
let ret = unsafe { libc::swapon(c_str.as_ptr() as *const c_char, 0) };
|
||||
if ret != 0 {
|
||||
return Err(anyhow!(
|
||||
"libc::swapon get error {}",
|
||||
io::Error::last_os_error()
|
||||
));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Setup container bundle under CONTAINER_BASE, which is cleaned up
|
||||
// before removing a container.
|
||||
// - bundle path is /<CONTAINER_BASE>/<cid>/
|
||||
|
||||
@@ -272,7 +272,7 @@ impl Sandbox {
|
||||
ctr.cgroup_manager
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.update_cpuset_path(guest_cpuset.as_str(), &container_cpust)?;
|
||||
.update_cpuset_path(guest_cpuset.as_str(), container_cpust)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -461,11 +461,14 @@ mod tests {
|
||||
use tempfile::Builder;
|
||||
|
||||
fn bind_mount(src: &str, dst: &str, logger: &Logger) -> Result<(), Error> {
|
||||
let baremount = BareMount::new(src, dst, "bind", MsFlags::MS_BIND, "", &logger);
|
||||
let baremount = BareMount::new(src, dst, "bind", MsFlags::MS_BIND, "", logger);
|
||||
baremount.mount()
|
||||
}
|
||||
|
||||
use serial_test::serial;
|
||||
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn set_sandbox_storage() {
|
||||
let logger = slog::Logger::root(slog::Discard, o!());
|
||||
let mut s = Sandbox::new(&logger).unwrap();
|
||||
@@ -474,7 +477,7 @@ mod tests {
|
||||
let tmpdir_path = tmpdir.path().to_str().unwrap();
|
||||
|
||||
// Add a new sandbox storage
|
||||
let new_storage = s.set_sandbox_storage(&tmpdir_path);
|
||||
let new_storage = s.set_sandbox_storage(tmpdir_path);
|
||||
|
||||
// Check the reference counter
|
||||
let ref_count = s.storages[tmpdir_path];
|
||||
@@ -483,11 +486,11 @@ mod tests {
|
||||
"Invalid refcount, got {} expected 1.",
|
||||
ref_count
|
||||
);
|
||||
assert_eq!(new_storage, true);
|
||||
assert!(new_storage);
|
||||
|
||||
// Use the existing sandbox storage
|
||||
let new_storage = s.set_sandbox_storage(&tmpdir_path);
|
||||
assert_eq!(new_storage, false, "Should be false as already exists.");
|
||||
let new_storage = s.set_sandbox_storage(tmpdir_path);
|
||||
assert!(!new_storage, "Should be false as already exists.");
|
||||
|
||||
// Since we are using existing storage, the reference counter
|
||||
// should be 2 by now.
|
||||
@@ -500,6 +503,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn remove_sandbox_storage() {
|
||||
skip_if_not_root!();
|
||||
|
||||
@@ -527,7 +531,7 @@ mod tests {
|
||||
.unwrap();
|
||||
|
||||
assert!(
|
||||
s.remove_sandbox_storage(&srcdir_path).is_err(),
|
||||
s.remove_sandbox_storage(srcdir_path).is_err(),
|
||||
"Expect Err as the directory i not a mountpoint"
|
||||
);
|
||||
|
||||
@@ -556,6 +560,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn unset_and_remove_sandbox_storage() {
|
||||
skip_if_not_root!();
|
||||
|
||||
@@ -586,8 +591,8 @@ mod tests {
|
||||
|
||||
assert!(bind_mount(srcdir_path, destdir_path, &logger).is_ok());
|
||||
|
||||
assert_eq!(s.set_sandbox_storage(&destdir_path), true);
|
||||
assert!(s.unset_and_remove_sandbox_storage(&destdir_path).is_ok());
|
||||
assert!(s.set_sandbox_storage(destdir_path));
|
||||
assert!(s.unset_and_remove_sandbox_storage(destdir_path).is_ok());
|
||||
|
||||
let other_dir_str;
|
||||
{
|
||||
@@ -600,13 +605,14 @@ mod tests {
|
||||
let other_dir_path = other_dir.path().to_str().unwrap();
|
||||
other_dir_str = other_dir_path.to_string();
|
||||
|
||||
assert_eq!(s.set_sandbox_storage(&other_dir_path), true);
|
||||
assert!(s.set_sandbox_storage(other_dir_path));
|
||||
}
|
||||
|
||||
assert!(s.unset_and_remove_sandbox_storage(&other_dir_str).is_err());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn unset_sandbox_storage() {
|
||||
let logger = slog::Logger::root(slog::Discard, o!());
|
||||
let mut s = Sandbox::new(&logger).unwrap();
|
||||
@@ -614,17 +620,15 @@ mod tests {
|
||||
let storage_path = "/tmp/testEphe";
|
||||
|
||||
// Add a new sandbox storage
|
||||
assert_eq!(s.set_sandbox_storage(&storage_path), true);
|
||||
assert!(s.set_sandbox_storage(storage_path));
|
||||
// Use the existing sandbox storage
|
||||
assert_eq!(
|
||||
s.set_sandbox_storage(&storage_path),
|
||||
false,
|
||||
assert!(
|
||||
!s.set_sandbox_storage(storage_path),
|
||||
"Expects false as the storage is not new."
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
s.unset_sandbox_storage(&storage_path).unwrap(),
|
||||
false,
|
||||
assert!(
|
||||
!s.unset_sandbox_storage(storage_path).unwrap(),
|
||||
"Expects false as there is still a storage."
|
||||
);
|
||||
|
||||
@@ -636,9 +640,8 @@ mod tests {
|
||||
ref_count
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
s.unset_sandbox_storage(&storage_path).unwrap(),
|
||||
true,
|
||||
assert!(
|
||||
s.unset_sandbox_storage(storage_path).unwrap(),
|
||||
"Expects true as there is still a storage."
|
||||
);
|
||||
|
||||
@@ -654,7 +657,7 @@ mod tests {
|
||||
// If no container is using the sandbox storage, the reference
|
||||
// counter for it should not exist.
|
||||
assert!(
|
||||
s.unset_sandbox_storage(&storage_path).is_err(),
|
||||
s.unset_sandbox_storage(storage_path).is_err(),
|
||||
"Expects false as the reference counter should no exist."
|
||||
);
|
||||
}
|
||||
@@ -693,6 +696,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn get_container_entry_exist() {
|
||||
skip_if_not_root!();
|
||||
let logger = slog::Logger::root(slog::Discard, o!());
|
||||
@@ -706,6 +710,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn get_container_no_entry() {
|
||||
let logger = slog::Logger::root(slog::Discard, o!());
|
||||
let mut s = Sandbox::new(&logger).unwrap();
|
||||
@@ -715,6 +720,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn add_and_get_container() {
|
||||
skip_if_not_root!();
|
||||
let logger = slog::Logger::root(slog::Discard, o!());
|
||||
@@ -726,6 +732,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn update_shared_pidns() {
|
||||
skip_if_not_root!();
|
||||
let logger = slog::Logger::root(slog::Discard, o!());
|
||||
@@ -744,6 +751,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn add_guest_hooks() {
|
||||
let logger = slog::Logger::root(slog::Discard, o!());
|
||||
let mut s = Sandbox::new(&logger).unwrap();
|
||||
@@ -767,6 +775,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_sandbox_set_destroy() {
|
||||
let logger = slog::Logger::root(slog::Discard, o!());
|
||||
let mut s = Sandbox::new(&logger).unwrap();
|
||||
|
||||
@@ -66,7 +66,7 @@ pub fn setup_tracing(name: &'static str, logger: &Logger, _agent_cfg: &AgentConf
|
||||
let config = Config::default();
|
||||
|
||||
let builder = opentelemetry::sdk::trace::TracerProvider::builder()
|
||||
.with_simple_exporter(exporter)
|
||||
.with_batch_exporter(exporter, opentelemetry::runtime::TokioCurrentThread)
|
||||
.with_config(config);
|
||||
|
||||
let provider = builder.build();
|
||||
|
||||
@@ -87,20 +87,28 @@ impl Uevent {
|
||||
sb.uevent_map.insert(self.devpath.clone(), self.clone());
|
||||
|
||||
// Notify watchers that are interested in the udev event.
|
||||
for watch in &mut sb.uevent_watchers {
|
||||
sb.uevent_watchers.iter_mut().for_each(move |watch| {
|
||||
if let Some((matcher, _)) = watch {
|
||||
if matcher.is_match(&self) {
|
||||
if matcher.is_match(self) {
|
||||
let (_, sender) = watch.take().unwrap();
|
||||
let _ = sender.send(self.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
async fn process_remove(&self, logger: &Logger, sandbox: &Arc<Mutex<Sandbox>>) {
|
||||
let mut sb = sandbox.lock().await;
|
||||
sb.uevent_map.remove(&self.devpath);
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
async fn process(&self, logger: &Logger, sandbox: &Arc<Mutex<Sandbox>>) {
|
||||
if self.action == U_EVENT_ACTION_ADD {
|
||||
return self.process_add(logger, sandbox).await;
|
||||
} else if self.action == U_EVENT_ACTION_REMOVE {
|
||||
return self.process_remove(logger, sandbox).await;
|
||||
}
|
||||
debug!(*logger, "ignoring event"; "uevent" => format!("{:?}", self));
|
||||
}
|
||||
@@ -221,15 +229,17 @@ pub(crate) fn spawn_test_watcher(sandbox: Arc<Mutex<Sandbox>>, uev: Uevent) {
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
let mut sb = sandbox.lock().await;
|
||||
for w in &mut sb.uevent_watchers {
|
||||
if let Some((matcher, _)) = w {
|
||||
let uev = uev.clone();
|
||||
sb.uevent_watchers.iter_mut().for_each(move |watch| {
|
||||
if let Some((matcher, _)) = watch {
|
||||
if matcher.is_match(&uev) {
|
||||
let (_, sender) = w.take().unwrap();
|
||||
let _ = sender.send(uev);
|
||||
let (_, sender) = watch.take().unwrap();
|
||||
let _ = sender.send(uev.clone());
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
drop(sb); // unlock
|
||||
}
|
||||
});
|
||||
|
||||
@@ -3,26 +3,28 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#![allow(clippy::unknown_clippy_lints)]
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
use std::time::SystemTime;
|
||||
|
||||
use anyhow::{ensure, Context, Result};
|
||||
use async_recursion::async_recursion;
|
||||
use nix::mount::{umount, MsFlags};
|
||||
use slog::{debug, error, info, warn, Logger};
|
||||
use thiserror::Error;
|
||||
use tokio::fs;
|
||||
use tokio::sync::Mutex;
|
||||
use tokio::task;
|
||||
use tokio::time::{self, Duration};
|
||||
|
||||
use anyhow::{ensure, Context, Result};
|
||||
use async_recursion::async_recursion;
|
||||
use nix::mount::{umount, MsFlags};
|
||||
use slog::{debug, error, Logger};
|
||||
|
||||
use crate::mount::BareMount;
|
||||
use crate::protocols::agent as protos;
|
||||
|
||||
/// The maximum number of file system entries agent will watch for each mount.
|
||||
const MAX_ENTRIES_PER_STORAGE: usize = 8;
|
||||
const MAX_ENTRIES_PER_STORAGE: usize = 16;
|
||||
|
||||
/// The maximum size of a watchable mount in bytes.
|
||||
const MAX_SIZE_PER_WATCHABLE_MOUNT: u64 = 1024 * 1024;
|
||||
@@ -44,15 +46,35 @@ struct Storage {
|
||||
target_mount_point: PathBuf,
|
||||
|
||||
/// Flag to indicate that the Storage should be watched. Storage will be watched until
|
||||
/// the source becomes too large, either in number of files (>8) or total size (>1MB).
|
||||
/// the source becomes too large, either in number of files (>16) or total size (>1MB).
|
||||
watch: bool,
|
||||
|
||||
/// The list of files to watch from the source mount point and updated in the target one.
|
||||
watched_files: HashMap<PathBuf, SystemTime>,
|
||||
}
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
pub enum WatcherError {
|
||||
#[error(
|
||||
"Too many file system entries within to watch within: {mnt} ({count} must be < {})",
|
||||
MAX_ENTRIES_PER_STORAGE
|
||||
)]
|
||||
MountTooManyFiles { count: usize, mnt: String },
|
||||
|
||||
#[error(
|
||||
"Mount too large to watch: {mnt} ({size} must be < {})",
|
||||
MAX_SIZE_PER_WATCHABLE_MOUNT
|
||||
)]
|
||||
MountTooLarge { size: u64, mnt: String },
|
||||
}
|
||||
|
||||
impl Drop for Storage {
|
||||
fn drop(&mut self) {
|
||||
if !&self.watch {
|
||||
// If we weren't watching this storage entry, it means that a bind mount
|
||||
// was created.
|
||||
let _ = umount(&self.target_mount_point);
|
||||
}
|
||||
let _ = std::fs::remove_dir_all(&self.target_mount_point);
|
||||
}
|
||||
}
|
||||
@@ -65,7 +87,6 @@ impl Storage {
|
||||
watch: true,
|
||||
watched_files: HashMap::new(),
|
||||
};
|
||||
|
||||
Ok(entry)
|
||||
}
|
||||
|
||||
@@ -143,7 +164,9 @@ impl Storage {
|
||||
|
||||
// Update identified files:
|
||||
for path in &updated_files {
|
||||
self.update_target(logger, path.as_path()).await?;
|
||||
if let Err(e) = self.update_target(logger, path.as_path()).await {
|
||||
error!(logger, "failure in update_target: {:?}", e);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(updated_files.len())
|
||||
@@ -172,8 +195,10 @@ impl Storage {
|
||||
|
||||
ensure!(
|
||||
self.watched_files.len() <= MAX_ENTRIES_PER_STORAGE,
|
||||
"Too many file system entries to watch (must be < {})",
|
||||
MAX_ENTRIES_PER_STORAGE
|
||||
WatcherError::MountTooManyFiles {
|
||||
count: self.watched_files.len(),
|
||||
mnt: self.source_mount_point.display().to_string()
|
||||
}
|
||||
);
|
||||
|
||||
// Insert will return old entry if any
|
||||
@@ -201,10 +226,13 @@ impl Storage {
|
||||
size += res_size;
|
||||
}
|
||||
}
|
||||
|
||||
ensure!(
|
||||
size <= MAX_SIZE_PER_WATCHABLE_MOUNT,
|
||||
"Too many file system entries to watch (must be < {})",
|
||||
MAX_SIZE_PER_WATCHABLE_MOUNT,
|
||||
WatcherError::MountTooLarge {
|
||||
size,
|
||||
mnt: self.source_mount_point.display().to_string()
|
||||
}
|
||||
);
|
||||
|
||||
Ok(size)
|
||||
@@ -241,6 +269,19 @@ impl SandboxStorages {
|
||||
let entry = Storage::new(storage)
|
||||
.await
|
||||
.with_context(|| "Failed to add storage")?;
|
||||
|
||||
// If the storage source is a directory, let's create the target mount point:
|
||||
if entry.source_mount_point.as_path().is_dir() {
|
||||
fs::create_dir_all(&entry.target_mount_point)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Unable to mkdir all for {}",
|
||||
entry.target_mount_point.display()
|
||||
)
|
||||
})?;
|
||||
}
|
||||
|
||||
self.0.push(entry);
|
||||
}
|
||||
|
||||
@@ -255,38 +296,59 @@ impl SandboxStorages {
|
||||
async fn check(&mut self, logger: &Logger) -> Result<()> {
|
||||
for entry in self.0.iter_mut().filter(|e| e.watch) {
|
||||
if let Err(e) = entry.scan(logger).await {
|
||||
// If an error was observed, we will stop treating this Storage as being watchable, and
|
||||
// instead clean up the target-mount files on the tmpfs and bind mount the source_mount_point
|
||||
// to target_mount_point.
|
||||
error!(logger, "error observed when watching: {:?}", e);
|
||||
entry.watch = false;
|
||||
match e.downcast_ref::<WatcherError>() {
|
||||
Some(WatcherError::MountTooLarge { .. })
|
||||
| Some(WatcherError::MountTooManyFiles { .. }) => {
|
||||
//
|
||||
// If the mount we were watching is too large (bytes), or contains too many unique files,
|
||||
// we no longer want to watch. Instead, we'll attempt to create a bind mount and mark this storage
|
||||
// as non-watchable. if there's an error in creating bind mount, we'll continue watching.
|
||||
//
|
||||
// Ensure the target mount point exists:
|
||||
if !entry.target_mount_point.as_path().exists() {
|
||||
if entry.source_mount_point.as_path().is_dir() {
|
||||
fs::create_dir_all(entry.target_mount_point.as_path())
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"create dir for bindmount {:?}",
|
||||
entry.target_mount_point.as_path()
|
||||
)
|
||||
})?;
|
||||
} else {
|
||||
fs::File::create(entry.target_mount_point.as_path())
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"create file {:?}",
|
||||
entry.target_mount_point.as_path()
|
||||
)
|
||||
})?;
|
||||
}
|
||||
}
|
||||
|
||||
// Remove destination contents, but not the directory itself, since this is
|
||||
// assumed to be bind-mounted into a container. If source/mount is a file, no need to cleanup
|
||||
if entry.target_mount_point.as_path().is_dir() {
|
||||
for dir_entry in std::fs::read_dir(entry.target_mount_point.as_path())? {
|
||||
let dir_entry = dir_entry?;
|
||||
let path = dir_entry.path();
|
||||
if dir_entry.file_type()?.is_dir() {
|
||||
tokio::fs::remove_dir_all(path).await?;
|
||||
} else {
|
||||
tokio::fs::remove_file(path).await?;
|
||||
match BareMount::new(
|
||||
entry.source_mount_point.to_str().unwrap(),
|
||||
entry.target_mount_point.to_str().unwrap(),
|
||||
"bind",
|
||||
MsFlags::MS_BIND,
|
||||
"bind",
|
||||
logger,
|
||||
)
|
||||
.mount()
|
||||
{
|
||||
Ok(_) => {
|
||||
entry.watch = false;
|
||||
info!(logger, "watchable mount replaced with bind mount")
|
||||
}
|
||||
Err(e) => error!(logger, "unable to replace watchable: {:?}", e),
|
||||
}
|
||||
}
|
||||
_ => warn!(logger, "scan error: {:?}", e),
|
||||
}
|
||||
|
||||
// - Create bind mount from source to destination
|
||||
BareMount::new(
|
||||
entry.source_mount_point.to_str().unwrap(),
|
||||
entry.target_mount_point.to_str().unwrap(),
|
||||
"bind",
|
||||
MsFlags::MS_BIND,
|
||||
"bind",
|
||||
logger,
|
||||
)
|
||||
.mount()?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -368,7 +430,7 @@ impl BindWatcher {
|
||||
for (_, entries) in sandbox_storages.lock().await.iter_mut() {
|
||||
if let Err(err) = entries.check(&logger).await {
|
||||
// We don't fail background loop, but rather log error instead.
|
||||
error!(logger, "Check failed: {}", err);
|
||||
warn!(logger, "Check failed: {}", err);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -409,60 +471,43 @@ mod tests {
|
||||
use std::fs;
|
||||
use std::thread;
|
||||
|
||||
#[tokio::test]
|
||||
async fn watch_entries() {
|
||||
skip_if_not_root!();
|
||||
async fn create_test_storage(dir: &Path, id: &str) -> Result<(protos::Storage, PathBuf)> {
|
||||
let src_path = dir.join(format!("src{}", id));
|
||||
let src_filename = src_path.to_str().expect("failed to create src filename");
|
||||
let dest_path = dir.join(format!("dest{}", id));
|
||||
let dest_filename = dest_path.to_str().expect("failed to create dest filename");
|
||||
|
||||
// If there's an error with an entry, let's make sure it is removed, and that the
|
||||
// mount-destination behaves like a standard bind-mount.
|
||||
|
||||
// Create an entries vector with three storage objects: storage, storage1, storage2.
|
||||
// We'll first verify each are evaluated correctly, then increase the first entry's contents
|
||||
// so it fails mount size check (>1MB) (test handling for failure on mount that is a directory).
|
||||
// We'll then similarly cause failure with storage2 (test handling for failure on mount that is
|
||||
// a single file). We'll then verify that storage1 continues to be watchable.
|
||||
let source_dir = tempfile::tempdir().unwrap();
|
||||
let dest_dir = tempfile::tempdir().unwrap();
|
||||
std::fs::create_dir_all(src_filename).expect("failed to create path");
|
||||
|
||||
let storage = protos::Storage {
|
||||
source: source_dir.path().display().to_string(),
|
||||
mount_point: dest_dir.path().display().to_string(),
|
||||
source: src_filename.to_string(),
|
||||
mount_point: dest_filename.to_string(),
|
||||
..Default::default()
|
||||
};
|
||||
std::fs::File::create(source_dir.path().join("small.txt"))
|
||||
.unwrap()
|
||||
.set_len(10)
|
||||
.unwrap();
|
||||
|
||||
let source_dir1 = tempfile::tempdir().unwrap();
|
||||
let dest_dir1 = tempfile::tempdir().unwrap();
|
||||
let storage1 = protos::Storage {
|
||||
source: source_dir1.path().display().to_string(),
|
||||
mount_point: dest_dir1.path().display().to_string(),
|
||||
..Default::default()
|
||||
};
|
||||
std::fs::File::create(source_dir1.path().join("large.txt"))
|
||||
.unwrap()
|
||||
.set_len(MAX_SIZE_PER_WATCHABLE_MOUNT)
|
||||
.unwrap();
|
||||
Ok((storage, src_path))
|
||||
}
|
||||
|
||||
// And finally, create a single file mount:
|
||||
let source_dir2 = tempfile::tempdir().unwrap();
|
||||
let dest_dir2 = tempfile::tempdir().unwrap();
|
||||
|
||||
let source_path = source_dir2.path().join("mounted-file");
|
||||
let dest_path = dest_dir2.path().join("mounted-file");
|
||||
let mounted_file = std::fs::File::create(&source_path).unwrap();
|
||||
mounted_file.set_len(MAX_SIZE_PER_WATCHABLE_MOUNT).unwrap();
|
||||
|
||||
let storage2 = protos::Storage {
|
||||
source: source_path.display().to_string(),
|
||||
mount_point: dest_path.display().to_string(),
|
||||
..Default::default()
|
||||
};
|
||||
#[tokio::test]
|
||||
async fn test_empty_sourcedir_check() {
|
||||
//skip_if_not_root!();
|
||||
let dir = tempfile::tempdir().expect("failed to create tempdir");
|
||||
|
||||
let logger = slog::Logger::root(slog::Discard, o!());
|
||||
|
||||
let src_path = dir.path().join("src");
|
||||
let dest_path = dir.path().join("dest");
|
||||
let src_filename = src_path.to_str().expect("failed to create src filename");
|
||||
let dest_filename = dest_path.to_str().expect("failed to create dest filename");
|
||||
|
||||
std::fs::create_dir_all(src_filename).expect("failed to create path");
|
||||
|
||||
let storage = protos::Storage {
|
||||
source: src_filename.to_string(),
|
||||
mount_point: dest_filename.to_string(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let mut entries = SandboxStorages {
|
||||
..Default::default()
|
||||
};
|
||||
@@ -472,65 +517,255 @@ mod tests {
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
entries
|
||||
.add(std::iter::once(storage1), &logger)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
entries
|
||||
.add(std::iter::once(storage2), &logger)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Check that there are three entries, and that the
|
||||
// destination (mount point) matches what we expect for
|
||||
// the first:
|
||||
assert!(entries.check(&logger).await.is_ok());
|
||||
assert_eq!(entries.0.len(), 3);
|
||||
assert_eq!(std::fs::read_dir(dest_dir.path()).unwrap().count(), 1);
|
||||
assert_eq!(entries.0.len(), 1);
|
||||
|
||||
// Add a second file which will trip file size check:
|
||||
std::fs::File::create(source_dir.path().join("big.txt"))
|
||||
assert_eq!(std::fs::read_dir(src_path).unwrap().count(), 0);
|
||||
assert_eq!(std::fs::read_dir(dest_path).unwrap().count(), 0);
|
||||
assert_eq!(std::fs::read_dir(dir.path()).unwrap().count(), 2);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_single_file_check() {
|
||||
//skip_if_not_root!();
|
||||
let dir = tempfile::tempdir().expect("failed to create tempdir");
|
||||
|
||||
let logger = slog::Logger::root(slog::Discard, o!());
|
||||
|
||||
let src_file_path = dir.path().join("src.txt");
|
||||
let dest_file_path = dir.path().join("dest.txt");
|
||||
|
||||
let src_filename = src_file_path
|
||||
.to_str()
|
||||
.expect("failed to create src filename");
|
||||
let dest_filename = dest_file_path
|
||||
.to_str()
|
||||
.expect("failed to create dest filename");
|
||||
|
||||
let storage = protos::Storage {
|
||||
source: src_filename.to_string(),
|
||||
mount_point: dest_filename.to_string(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
//create file
|
||||
fs::write(src_file_path, "original").unwrap();
|
||||
|
||||
let mut entries = SandboxStorages::default();
|
||||
|
||||
entries
|
||||
.add(std::iter::once(storage), &logger)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert!(entries.check(&logger).await.is_ok());
|
||||
assert_eq!(entries.0.len(), 1);
|
||||
|
||||
// there should only be 2 files
|
||||
assert_eq!(std::fs::read_dir(dir.path()).unwrap().count(), 2);
|
||||
|
||||
assert_eq!(fs::read_to_string(dest_file_path).unwrap(), "original");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_watch_entries() {
|
||||
skip_if_not_root!();
|
||||
|
||||
// If there's an error with an entry, let's make sure it is removed, and that the
|
||||
// mount-destination behaves like a standard bind-mount.
|
||||
|
||||
// Create an entries vector with four storage objects: storage0,1,2,3.
|
||||
// 0th we'll have fail due to too many files before running a check
|
||||
// 1st will just have a single medium sized file, we'll keep it watchable throughout
|
||||
// 2nd will have a large file (<1MB), but we'll later make larger to make unwatchable
|
||||
// 3rd will have several files, and later we'll make unwatchable by having too many files.
|
||||
// We'll run check a couple of times to verify watchable is always watchable, and unwatchable bind mounts
|
||||
// match our expectations.
|
||||
let dir = tempfile::tempdir().expect("failed to create tempdir");
|
||||
|
||||
let (storage0, src0_path) = create_test_storage(dir.path(), "1")
|
||||
.await
|
||||
.expect("failed to create storage");
|
||||
let (storage1, src1_path) = create_test_storage(dir.path(), "2")
|
||||
.await
|
||||
.expect("failed to create storage");
|
||||
let (storage2, src2_path) = create_test_storage(dir.path(), "3")
|
||||
.await
|
||||
.expect("failed to create storage");
|
||||
let (storage3, src3_path) = create_test_storage(dir.path(), "4")
|
||||
.await
|
||||
.expect("failed to create storage");
|
||||
|
||||
// setup storage0: too many files
|
||||
for i in 1..21 {
|
||||
fs::write(src0_path.join(format!("{}.txt", i)), "original").unwrap();
|
||||
}
|
||||
|
||||
// setup storage1: two small files
|
||||
std::fs::File::create(src1_path.join("small.txt"))
|
||||
.unwrap()
|
||||
.set_len(10)
|
||||
.unwrap();
|
||||
fs::write(src1_path.join("foo.txt"), "original").unwrap();
|
||||
|
||||
// setup storage2: large file, but still watchable
|
||||
std::fs::File::create(src2_path.join("large.txt"))
|
||||
.unwrap()
|
||||
.set_len(MAX_SIZE_PER_WATCHABLE_MOUNT)
|
||||
.unwrap();
|
||||
|
||||
assert!(entries.check(&logger).await.is_ok());
|
||||
// setup storage3: many files, but still watchable
|
||||
for i in 1..MAX_ENTRIES_PER_STORAGE + 1 {
|
||||
fs::write(src3_path.join(format!("{}.txt", i)), "original").unwrap();
|
||||
}
|
||||
|
||||
// Verify Storage 0 is no longer going to be watched:
|
||||
assert!(!entries.0[0].watch);
|
||||
let logger = slog::Logger::root(slog::Discard, o!());
|
||||
|
||||
// Verify that the directory has two entries:
|
||||
assert_eq!(std::fs::read_dir(dest_dir.path()).unwrap().count(), 2);
|
||||
let mut entries = SandboxStorages {
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// Verify that the directory is a bind mount. Add an entry without calling check,
|
||||
// and verify that the destination directory includes these files in the case of
|
||||
// mount that is no longer being watched (storage), but not within the still-being
|
||||
// watched (storage1):
|
||||
fs::write(source_dir.path().join("1.txt"), "updated").unwrap();
|
||||
fs::write(source_dir1.path().join("2.txt"), "updated").unwrap();
|
||||
|
||||
assert_eq!(std::fs::read_dir(source_dir.path()).unwrap().count(), 3);
|
||||
assert_eq!(std::fs::read_dir(dest_dir.path()).unwrap().count(), 3);
|
||||
assert_eq!(std::fs::read_dir(source_dir1.path()).unwrap().count(), 2);
|
||||
assert_eq!(std::fs::read_dir(dest_dir1.path()).unwrap().count(), 1);
|
||||
|
||||
// Verify that storage1 is still working. After running check, we expect that the number
|
||||
// of entries to increment
|
||||
assert!(entries.check(&logger).await.is_ok());
|
||||
assert_eq!(std::fs::read_dir(dest_dir1.path()).unwrap().count(), 2);
|
||||
|
||||
// Break storage2 by increasing the file size
|
||||
mounted_file
|
||||
.set_len(MAX_SIZE_PER_WATCHABLE_MOUNT + 10)
|
||||
entries
|
||||
.add(std::iter::once(storage0), &logger)
|
||||
.await
|
||||
.unwrap();
|
||||
entries
|
||||
.add(std::iter::once(storage1), &logger)
|
||||
.await
|
||||
.unwrap();
|
||||
entries
|
||||
.add(std::iter::once(storage2), &logger)
|
||||
.await
|
||||
.unwrap();
|
||||
entries
|
||||
.add(std::iter::once(storage3), &logger)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(entries.check(&logger).await.is_ok());
|
||||
// Verify Storage 2 is no longer going to be watched:
|
||||
assert!(!entries.0[2].watch);
|
||||
|
||||
// Verify bind mount is working -- let's write to the file and observe output:
|
||||
fs::write(&source_path, "updated").unwrap();
|
||||
assert_eq!(fs::read_to_string(&source_path).unwrap(), "updated");
|
||||
assert!(entries.check(&logger).await.is_ok());
|
||||
// Check that there are four entries
|
||||
assert_eq!(entries.0.len(), 4);
|
||||
|
||||
//verify that storage 0 is no longer going to be watched, but 1,2,3 are
|
||||
assert!(!entries.0[0].watch);
|
||||
assert!(entries.0[1].watch);
|
||||
assert!(entries.0[2].watch);
|
||||
assert!(entries.0[3].watch);
|
||||
|
||||
assert_eq!(std::fs::read_dir(dir.path()).unwrap().count(), 8);
|
||||
|
||||
//verify target mount points contain expected number of entries:
|
||||
assert_eq!(
|
||||
std::fs::read_dir(entries.0[0].target_mount_point.as_path())
|
||||
.unwrap()
|
||||
.count(),
|
||||
20
|
||||
);
|
||||
assert_eq!(
|
||||
std::fs::read_dir(entries.0[1].target_mount_point.as_path())
|
||||
.unwrap()
|
||||
.count(),
|
||||
2
|
||||
);
|
||||
assert_eq!(
|
||||
std::fs::read_dir(entries.0[2].target_mount_point.as_path())
|
||||
.unwrap()
|
||||
.count(),
|
||||
1
|
||||
);
|
||||
assert_eq!(
|
||||
std::fs::read_dir(entries.0[3].target_mount_point.as_path())
|
||||
.unwrap()
|
||||
.count(),
|
||||
MAX_ENTRIES_PER_STORAGE
|
||||
);
|
||||
|
||||
// Add two files to storage 0, verify it is updated without needing to run check:
|
||||
fs::write(src0_path.join("1.txt"), "updated").unwrap();
|
||||
fs::write(src0_path.join("foo.txt"), "new").unwrap();
|
||||
fs::write(src0_path.join("bar.txt"), "new").unwrap();
|
||||
assert_eq!(
|
||||
std::fs::read_dir(entries.0[0].target_mount_point.as_path())
|
||||
.unwrap()
|
||||
.count(),
|
||||
22
|
||||
);
|
||||
assert_eq!(
|
||||
fs::read_to_string(&entries.0[0].target_mount_point.as_path().join("1.txt")).unwrap(),
|
||||
"updated"
|
||||
);
|
||||
|
||||
//
|
||||
// Prepare for second check: update mount sources
|
||||
//
|
||||
|
||||
// source 3 will become unwatchable
|
||||
fs::write(src3_path.join("foo.txt"), "updated").unwrap();
|
||||
|
||||
// source 2 will become unwatchable:
|
||||
std::fs::File::create(src2_path.join("small.txt"))
|
||||
.unwrap()
|
||||
.set_len(10)
|
||||
.unwrap();
|
||||
|
||||
// source 1: expect just an update
|
||||
fs::write(src1_path.join("foo.txt"), "updated").unwrap();
|
||||
|
||||
assert!(entries.check(&logger).await.is_ok());
|
||||
|
||||
// verify that only storage 1 is still watchable
|
||||
assert!(!entries.0[0].watch);
|
||||
assert!(entries.0[1].watch);
|
||||
assert!(!entries.0[2].watch);
|
||||
assert!(!entries.0[3].watch);
|
||||
|
||||
// Verify storage 1 was updated, and storage 2,3 are up to date despite no watch
|
||||
assert_eq!(
|
||||
std::fs::read_dir(entries.0[0].target_mount_point.as_path())
|
||||
.unwrap()
|
||||
.count(),
|
||||
22
|
||||
);
|
||||
assert_eq!(
|
||||
std::fs::read_dir(entries.0[1].target_mount_point.as_path())
|
||||
.unwrap()
|
||||
.count(),
|
||||
2
|
||||
);
|
||||
assert_eq!(
|
||||
fs::read_to_string(&entries.0[1].target_mount_point.as_path().join("foo.txt")).unwrap(),
|
||||
"updated"
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
std::fs::read_dir(entries.0[2].target_mount_point.as_path())
|
||||
.unwrap()
|
||||
.count(),
|
||||
2
|
||||
);
|
||||
assert_eq!(
|
||||
std::fs::read_dir(entries.0[3].target_mount_point.as_path())
|
||||
.unwrap()
|
||||
.count(),
|
||||
MAX_ENTRIES_PER_STORAGE + 1
|
||||
);
|
||||
|
||||
// verify that we can remove files as well, but that it isn't observed until check is run
|
||||
// for a watchable mount:
|
||||
fs::remove_file(src1_path.join("foo.txt")).unwrap();
|
||||
assert_eq!(
|
||||
std::fs::read_dir(entries.0[1].target_mount_point.as_path())
|
||||
.unwrap()
|
||||
.count(),
|
||||
2
|
||||
);
|
||||
assert!(entries.check(&logger).await.is_ok());
|
||||
assert_eq!(
|
||||
std::fs::read_dir(entries.0[1].target_mount_point.as_path())
|
||||
.unwrap()
|
||||
.count(),
|
||||
1
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -553,7 +788,15 @@ mod tests {
|
||||
.set_len(MAX_SIZE_PER_WATCHABLE_MOUNT + 1)
|
||||
.unwrap();
|
||||
thread::sleep(Duration::from_secs(1));
|
||||
assert!(entry.scan(&logger).await.is_err());
|
||||
|
||||
// Expect to receive a MountTooLarge error
|
||||
match entry.scan(&logger).await {
|
||||
Ok(_) => panic!("expected error"),
|
||||
Err(e) => match e.downcast_ref::<WatcherError>() {
|
||||
Some(WatcherError::MountTooLarge { .. }) => {}
|
||||
_ => panic!("unexpected error"),
|
||||
},
|
||||
}
|
||||
fs::remove_file(source_dir.path().join("big.txt")).unwrap();
|
||||
|
||||
std::fs::File::create(source_dir.path().join("big.txt"))
|
||||
@@ -561,6 +804,7 @@ mod tests {
|
||||
.set_len(MAX_SIZE_PER_WATCHABLE_MOUNT - 1)
|
||||
.unwrap();
|
||||
thread::sleep(Duration::from_secs(1));
|
||||
|
||||
assert!(entry.scan(&logger).await.is_ok());
|
||||
|
||||
std::fs::File::create(source_dir.path().join("too-big.txt"))
|
||||
@@ -568,26 +812,38 @@ mod tests {
|
||||
.set_len(2)
|
||||
.unwrap();
|
||||
thread::sleep(Duration::from_secs(1));
|
||||
assert!(entry.scan(&logger).await.is_err());
|
||||
|
||||
// Expect to receive a MountTooLarge error
|
||||
match entry.scan(&logger).await {
|
||||
Ok(_) => panic!("expected error"),
|
||||
Err(e) => match e.downcast_ref::<WatcherError>() {
|
||||
Some(WatcherError::MountTooLarge { .. }) => {}
|
||||
_ => panic!("unexpected error"),
|
||||
},
|
||||
}
|
||||
|
||||
fs::remove_file(source_dir.path().join("big.txt")).unwrap();
|
||||
fs::remove_file(source_dir.path().join("too-big.txt")).unwrap();
|
||||
|
||||
// Up to eight files should be okay:
|
||||
fs::write(source_dir.path().join("1.txt"), "updated").unwrap();
|
||||
fs::write(source_dir.path().join("2.txt"), "updated").unwrap();
|
||||
fs::write(source_dir.path().join("3.txt"), "updated").unwrap();
|
||||
fs::write(source_dir.path().join("4.txt"), "updated").unwrap();
|
||||
fs::write(source_dir.path().join("5.txt"), "updated").unwrap();
|
||||
fs::write(source_dir.path().join("6.txt"), "updated").unwrap();
|
||||
fs::write(source_dir.path().join("7.txt"), "updated").unwrap();
|
||||
fs::write(source_dir.path().join("8.txt"), "updated").unwrap();
|
||||
assert_eq!(entry.scan(&logger).await.unwrap(), 8);
|
||||
// Up to 16 files should be okay:
|
||||
for i in 1..MAX_ENTRIES_PER_STORAGE + 1 {
|
||||
fs::write(source_dir.path().join(format!("{}.txt", i)), "original").unwrap();
|
||||
}
|
||||
|
||||
// Nine files is too many:
|
||||
fs::write(source_dir.path().join("9.txt"), "updated").unwrap();
|
||||
assert_eq!(entry.scan(&logger).await.unwrap(), MAX_ENTRIES_PER_STORAGE);
|
||||
|
||||
// 17 files is too many:
|
||||
fs::write(source_dir.path().join("17.txt"), "updated").unwrap();
|
||||
thread::sleep(Duration::from_secs(1));
|
||||
assert!(entry.scan(&logger).await.is_err());
|
||||
|
||||
// Expect to receive a MountTooManyFiles error
|
||||
match entry.scan(&logger).await {
|
||||
Ok(_) => panic!("expected error"),
|
||||
Err(e) => match e.downcast_ref::<WatcherError>() {
|
||||
Some(WatcherError::MountTooManyFiles { .. }) => {}
|
||||
_ => panic!("unexpected error"),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -726,7 +982,10 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
use serial_test::serial;
|
||||
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn create_tmpfs() {
|
||||
skip_if_not_root!();
|
||||
|
||||
@@ -741,6 +1000,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn spawn_thread() {
|
||||
skip_if_not_root!();
|
||||
|
||||
@@ -768,4 +1028,68 @@ mod tests {
|
||||
let out = fs::read_to_string(dest_dir.path().join("1.txt")).unwrap();
|
||||
assert_eq!(out, "one");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn verify_container_cleanup_watching() {
|
||||
skip_if_not_root!();
|
||||
|
||||
let source_dir = tempfile::tempdir().unwrap();
|
||||
fs::write(source_dir.path().join("1.txt"), "one").unwrap();
|
||||
|
||||
let dest_dir = tempfile::tempdir().unwrap();
|
||||
|
||||
let storage = protos::Storage {
|
||||
source: source_dir.path().display().to_string(),
|
||||
mount_point: dest_dir.path().display().to_string(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let logger = slog::Logger::root(slog::Discard, o!());
|
||||
let mut watcher = BindWatcher::default();
|
||||
|
||||
watcher
|
||||
.add_container("test".into(), std::iter::once(storage), &logger)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
thread::sleep(Duration::from_secs(WATCH_INTERVAL_SECS));
|
||||
|
||||
let out = fs::read_to_string(dest_dir.path().join("1.txt")).unwrap();
|
||||
assert!(dest_dir.path().exists());
|
||||
assert_eq!(out, "one");
|
||||
|
||||
watcher.remove_container("test").await;
|
||||
|
||||
thread::sleep(Duration::from_secs(WATCH_INTERVAL_SECS));
|
||||
assert!(!dest_dir.path().exists());
|
||||
|
||||
for i in 1..21 {
|
||||
fs::write(source_dir.path().join(format!("{}.txt", i)), "fluff").unwrap();
|
||||
}
|
||||
|
||||
// verify non-watched storage is cleaned up correctly
|
||||
let storage1 = protos::Storage {
|
||||
source: source_dir.path().display().to_string(),
|
||||
mount_point: dest_dir.path().display().to_string(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
watcher
|
||||
.add_container("test".into(), std::iter::once(storage1), &logger)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
thread::sleep(Duration::from_secs(WATCH_INTERVAL_SECS));
|
||||
|
||||
assert!(dest_dir.path().exists());
|
||||
assert!(is_mounted(dest_dir.path().to_str().unwrap()).unwrap());
|
||||
|
||||
watcher.remove_container("test").await;
|
||||
|
||||
thread::sleep(Duration::from_secs(WATCH_INTERVAL_SECS));
|
||||
|
||||
assert!(!dest_dir.path().exists());
|
||||
assert!(!is_mounted(dest_dir.path().to_str().unwrap()).unwrap());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,11 +9,12 @@ edition = "2018"
|
||||
[dependencies]
|
||||
nix = "0.21.0"
|
||||
libc = "0.2.94"
|
||||
thiserror = "1.0.24"
|
||||
thiserror = "1.0.26"
|
||||
opentelemetry = { version = "0.14.0", features=["serialize"] }
|
||||
serde = { version = "1.0.126", features = ["derive"] }
|
||||
vsock = "0.2.3"
|
||||
tokio-vsock = "0.3.1"
|
||||
bincode = "1.3.3"
|
||||
byteorder = "1.4.3"
|
||||
slog = { version = "2.5.2", features = ["dynamic-keys", "max_level_trace", "release_max_level_info"] }
|
||||
async-trait = "0.1.50"
|
||||
tokio = "1.2.0"
|
||||
|
||||
@@ -12,15 +12,19 @@
|
||||
// payload, which allows the forwarder to know how many bytes it must read to
|
||||
// consume the trace span. The payload is a serialised version of the trace span.
|
||||
|
||||
#![allow(clippy::unknown_clippy_lints)]
|
||||
|
||||
use async_trait::async_trait;
|
||||
use byteorder::{ByteOrder, NetworkEndian};
|
||||
use opentelemetry::sdk::export::trace::{ExportResult, SpanData, SpanExporter};
|
||||
use opentelemetry::sdk::export::ExportError;
|
||||
use slog::{error, o, Logger};
|
||||
use std::io::{ErrorKind, Write};
|
||||
use std::net::Shutdown;
|
||||
use std::sync::Mutex;
|
||||
use vsock::{SockAddr, VsockStream};
|
||||
use slog::{error, info, o, Logger};
|
||||
use std::io::ErrorKind;
|
||||
use std::sync::Arc;
|
||||
use thiserror::Error;
|
||||
use tokio::io::AsyncWriteExt;
|
||||
use tokio::sync::Mutex;
|
||||
use tokio_vsock::VsockStream;
|
||||
|
||||
const ANY_CID: &str = "any";
|
||||
|
||||
@@ -38,7 +42,7 @@ const DEFAULT_PORT: u32 = 10240;
|
||||
pub struct Exporter {
|
||||
port: u32,
|
||||
cid: u32,
|
||||
conn: Mutex<VsockStream>,
|
||||
conn: Option<Arc<Mutex<VsockStream>>>,
|
||||
logger: Logger,
|
||||
}
|
||||
|
||||
@@ -49,7 +53,7 @@ impl Exporter {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(thiserror::Error, Debug)]
|
||||
#[derive(Error, Debug)]
|
||||
pub enum Error {
|
||||
#[error("connection error: {0}")]
|
||||
ConnectionError(String),
|
||||
@@ -70,7 +74,12 @@ fn make_io_error(desc: String) -> std::io::Error {
|
||||
}
|
||||
|
||||
// Send a trace span to the forwarder running on the host.
|
||||
fn write_span(writer: &mut dyn Write, span: &SpanData) -> Result<(), std::io::Error> {
|
||||
async fn write_span(
|
||||
writer: Arc<Mutex<VsockStream>>,
|
||||
span: &SpanData,
|
||||
) -> Result<(), std::io::Error> {
|
||||
let mut writer = writer.lock().await;
|
||||
|
||||
let encoded_payload: Vec<u8> =
|
||||
bincode::serialize(&span).map_err(|e| make_io_error(e.to_string()))?;
|
||||
|
||||
@@ -83,18 +92,17 @@ fn write_span(writer: &mut dyn Write, span: &SpanData) -> Result<(), std::io::Er
|
||||
NetworkEndian::write_u64(&mut payload_len_as_bytes, payload_len);
|
||||
|
||||
// Send the header
|
||||
writer
|
||||
.write_all(&payload_len_as_bytes)
|
||||
.map_err(|e| make_io_error(format!("failed to write trace header: {:?}", e)))?;
|
||||
writer.write_all(&payload_len_as_bytes).await?;
|
||||
|
||||
writer
|
||||
.write_all(&encoded_payload)
|
||||
.map_err(|e| make_io_error(format!("failed to write trace payload: {:?}", e)))
|
||||
writer.write_all(&encoded_payload).await
|
||||
}
|
||||
|
||||
fn handle_batch(writer: &mut dyn Write, batch: Vec<SpanData>) -> ExportResult {
|
||||
async fn handle_batch(
|
||||
writer: Arc<Mutex<VsockStream>>,
|
||||
batch: Vec<SpanData>,
|
||||
) -> Result<(), std::io::Error> {
|
||||
for span_data in batch {
|
||||
write_span(writer, &span_data).map_err(Error::IOError)?;
|
||||
write_span(writer.clone(), &span_data).await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -103,31 +111,32 @@ fn handle_batch(writer: &mut dyn Write, batch: Vec<SpanData>) -> ExportResult {
|
||||
#[async_trait]
|
||||
impl SpanExporter for Exporter {
|
||||
async fn export(&mut self, batch: Vec<SpanData>) -> ExportResult {
|
||||
let conn = self.conn.lock();
|
||||
if self.conn.is_none() {
|
||||
let conn = connect_vsock(self.cid, self.port).await.map(|e| {
|
||||
error!(self.logger, "failed to obtain connection"; "error" => format!("{:?}", e));
|
||||
e
|
||||
})?;
|
||||
|
||||
match conn {
|
||||
Ok(mut c) => handle_batch(&mut *c, batch),
|
||||
Err(e) => {
|
||||
error!(self.logger, "failed to obtain connection";
|
||||
"error" => format!("{}", e));
|
||||
|
||||
return Err(Error::ConnectionError(e.to_string()).into());
|
||||
}
|
||||
self.conn = Some(Arc::new(Mutex::new(conn)));
|
||||
}
|
||||
|
||||
handle_batch(self.conn.as_ref().unwrap().clone(), batch)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
error!(self.logger, "handle_batch error: {:?}", e);
|
||||
if e.kind() == ErrorKind::NotConnected {
|
||||
info!(self.logger, "drop connection");
|
||||
self.conn.take();
|
||||
}
|
||||
|
||||
Error::IOError(e)
|
||||
})?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn shutdown(&mut self) {
|
||||
let conn = match self.conn.lock() {
|
||||
Ok(conn) => conn,
|
||||
Err(e) => {
|
||||
error!(self.logger, "failed to obtain connection";
|
||||
"error" => format!("{}", e));
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
conn.shutdown(Shutdown::Write)
|
||||
.expect("failed to shutdown VSOCK connection");
|
||||
self.conn.take();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -169,8 +178,6 @@ impl Builder {
|
||||
pub fn init(self) -> Exporter {
|
||||
let Builder { port, cid, logger } = self;
|
||||
|
||||
let sock_addr = SockAddr::new_vsock(self.cid, self.port);
|
||||
|
||||
let cid_str: String;
|
||||
|
||||
if self.cid == libc::VMADDR_CID_ANY {
|
||||
@@ -179,18 +186,18 @@ impl Builder {
|
||||
cid_str = format!("{}", self.cid);
|
||||
}
|
||||
|
||||
let msg = format!(
|
||||
"failed to connect to VSOCK server (port: {}, cid: {}) - {}",
|
||||
self.port, cid_str, "ensure trace forwarder is running on host"
|
||||
);
|
||||
|
||||
let conn = VsockStream::connect(&sock_addr).expect(&msg);
|
||||
|
||||
Exporter {
|
||||
port,
|
||||
cid,
|
||||
conn: Mutex::new(conn),
|
||||
conn: None,
|
||||
logger: logger.new(o!("cid" => cid_str, "port" => port)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn connect_vsock(cid: u32, port: u32) -> Result<VsockStream, Error> {
|
||||
match VsockStream::connect(cid, port).await {
|
||||
Ok(conn) => Ok(conn),
|
||||
Err(e) => Err(Error::ConnectionError(e.to_string())),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,26 +8,6 @@ This repository contains the runtime for the
|
||||
For details of the other Kata Containers repositories, see the
|
||||
[repository summary](https://github.com/kata-containers/kata-containers).
|
||||
|
||||
* [Introduction](#introduction)
|
||||
* [License](#license)
|
||||
* [Platform support](#platform-support)
|
||||
* [Hardware requirements](#hardware-requirements)
|
||||
* [Download and install](#download-and-install)
|
||||
* [Quick start for developers](#quick-start-for-developers)
|
||||
* [Architecture overview](#architecture-overview)
|
||||
* [Configuration](#configuration)
|
||||
* [Hypervisor specific configuration](#hypervisor-specific-configuration)
|
||||
* [Stateless systems](#stateless-systems)
|
||||
* [Logging](#logging)
|
||||
* [Kata OCI](#kata-oci)
|
||||
* [Kata containerd shimv2](#kata-containerd-shimv2)
|
||||
* [Debugging](#debugging)
|
||||
* [Limitations](#limitations)
|
||||
* [Community](#community)
|
||||
* [Contact](#contact)
|
||||
* [Further information](#further-information)
|
||||
* [Additional packages](#additional-packages)
|
||||
|
||||
## Introduction
|
||||
|
||||
`kata-runtime`, referred to as "the runtime", is the Command-Line Interface
|
||||
@@ -47,7 +27,7 @@ to work seamlessly with both Docker and Kubernetes respectively.
|
||||
|
||||
The code is licensed under an Apache 2.0 license.
|
||||
|
||||
See [the license file](LICENSE) for further details.
|
||||
See [the license file](../../LICENSE) for further details.
|
||||
|
||||
## Platform support
|
||||
|
||||
@@ -150,7 +130,7 @@ $ kata-runtime env
|
||||
|
||||
For detailed information and analysis on obtaining logs for other system
|
||||
components, see the documentation for the
|
||||
[`kata-log-parser`](https://github.com/kata-containers/tests/tree/master/cmd/log-parser)
|
||||
[`kata-log-parser`](https://github.com/kata-containers/tests/tree/main/cmd/log-parser)
|
||||
tool.
|
||||
|
||||
For runtime logs, see the following sections for the CRI-O and containerd shimv2 based runtimes.
|
||||
|
||||
@@ -105,10 +105,10 @@ block_device_driver = "@DEFBLOCKSTORAGEDRIVER_ACRN@"
|
||||
# the OCI spec passed to the runtime.
|
||||
#
|
||||
# You can create a rootfs with hooks by customizing the osbuilder scripts:
|
||||
# https://github.com/kata-containers/osbuilder
|
||||
# https://github.com/kata-containers/kata-containers/tree/main/tools/osbuilder
|
||||
#
|
||||
# Hooks must be stored in a subdirectory of guest_hook_path according to their
|
||||
# hook type, i.e. "guest_hook_path/{prestart,postart,poststop}".
|
||||
# hook type, i.e. "guest_hook_path/{prestart,poststart,poststop}".
|
||||
# The agent will scan these directories for executable files and add them, in
|
||||
# lexicographical order, to the lifecycle of the guest container.
|
||||
# Hooks are executed in the runtime namespace of the guest. See the official documentation:
|
||||
|
||||
@@ -120,10 +120,10 @@ block_device_driver = "virtio-blk"
|
||||
# the OCI spec passed to the runtime.
|
||||
#
|
||||
# You can create a rootfs with hooks by customizing the osbuilder scripts:
|
||||
# https://github.com/kata-containers/osbuilder
|
||||
# https://github.com/kata-containers/kata-containers/tree/main/tools/osbuilder
|
||||
#
|
||||
# Hooks must be stored in a subdirectory of guest_hook_path according to their
|
||||
# hook type, i.e. "guest_hook_path/{prestart,postart,poststop}".
|
||||
# hook type, i.e. "guest_hook_path/{prestart,poststart,poststop}".
|
||||
# The agent will scan these directories for executable files and add them, in
|
||||
# lexicographical order, to the lifecycle of the guest container.
|
||||
# Hooks are executed in the runtime namespace of the guest. See the official documentation:
|
||||
|
||||
@@ -30,9 +30,7 @@ valid_hypervisor_paths = @FCVALIDHYPERVISORPATHS@
|
||||
# If the jailer path is not set kata will launch firecracker
|
||||
# without a jail. If the jailer is set firecracker will be
|
||||
# launched in a jailed enviornment created by the jailer
|
||||
# This is disabled by default as additional setup is required
|
||||
# for this feature today.
|
||||
#jailer_path = "@FCJAILERPATH@"
|
||||
jailer_path = "@FCJAILERPATH@"
|
||||
|
||||
# List of valid jailer path values for the hypervisor
|
||||
# Each member of the list can be a regular expression
|
||||
@@ -204,10 +202,10 @@ valid_entropy_sources = @DEFVALIDENTROPYSOURCES@
|
||||
# the OCI spec passed to the runtime.
|
||||
#
|
||||
# You can create a rootfs with hooks by customizing the osbuilder scripts:
|
||||
# https://github.com/kata-containers/osbuilder
|
||||
# https://github.com/kata-containers/kata-containers/tree/main/tools/osbuilder
|
||||
#
|
||||
# Hooks must be stored in a subdirectory of guest_hook_path according to their
|
||||
# hook type, i.e. "guest_hook_path/{prestart,postart,poststop}".
|
||||
# hook type, i.e. "guest_hook_path/{prestart,poststart,poststop}".
|
||||
# The agent will scan these directories for executable files and add them, in
|
||||
# lexicographical order, to the lifecycle of the guest container.
|
||||
# Hooks are executed in the runtime namespace of the guest. See the official documentation:
|
||||
|
||||
@@ -314,10 +314,10 @@ valid_entropy_sources = @DEFVALIDENTROPYSOURCES@
|
||||
# the OCI spec passed to the runtime.
|
||||
#
|
||||
# You can create a rootfs with hooks by customizing the osbuilder scripts:
|
||||
# https://github.com/kata-containers/osbuilder
|
||||
# https://github.com/kata-containers/kata-containers/tree/main/tools/osbuilder
|
||||
#
|
||||
# Hooks must be stored in a subdirectory of guest_hook_path according to their
|
||||
# hook type, i.e. "guest_hook_path/{prestart,postart,poststop}".
|
||||
# hook type, i.e. "guest_hook_path/{prestart,poststart,poststop}".
|
||||
# The agent will scan these directories for executable files and add them, in
|
||||
# lexicographical order, to the lifecycle of the guest container.
|
||||
# Hooks are executed in the runtime namespace of the guest. See the official documentation:
|
||||
@@ -356,6 +356,17 @@ valid_entropy_sources = @DEFVALIDENTROPYSOURCES@
|
||||
# See: https://www.qemu.org/docs/master/qemu-qmp-ref.html#Dump-guest-memory for details
|
||||
#guest_memory_dump_paging=false
|
||||
|
||||
# Enable swap in the guest. Default false.
|
||||
# When enable_guest_swap is enabled, insert a raw file to the guest as the swap device
|
||||
# if the swappiness of a container (set by annotation "io.katacontainers.container.resource.swappiness")
|
||||
# is bigger than 0.
|
||||
# The size of the swap device should be
|
||||
# swap_in_bytes (set by annotation "io.katacontainers.container.resource.swap_in_bytes") - memory_limit_in_bytes.
|
||||
# If swap_in_bytes is not set, the size should be memory_limit_in_bytes.
|
||||
# If swap_in_bytes and memory_limit_in_bytes is not set, the size should
|
||||
# be default_memory.
|
||||
#enable_guest_swap = true
|
||||
|
||||
[factory]
|
||||
# VM templating support. Once enabled, new VMs are created from template
|
||||
# using vm cloning. They will share the same initial kernel, initramfs and
|
||||
|
||||
@@ -34,12 +34,12 @@ import (
|
||||
)
|
||||
|
||||
type kernelModule struct {
|
||||
// description
|
||||
desc string
|
||||
|
||||
// maps parameter names to values
|
||||
parameters map[string]string
|
||||
|
||||
// description
|
||||
desc string
|
||||
|
||||
// if it is definitely required
|
||||
required bool
|
||||
}
|
||||
@@ -54,10 +54,10 @@ type kvmExtension struct {
|
||||
}
|
||||
|
||||
type vmContainerCapableDetails struct {
|
||||
cpuInfoFile string
|
||||
requiredCPUFlags map[string]string
|
||||
requiredCPUAttribs map[string]string
|
||||
requiredKernelModules map[string]kernelModule
|
||||
cpuInfoFile string
|
||||
}
|
||||
|
||||
const (
|
||||
|
||||
@@ -109,12 +109,12 @@ func TestCheckCheckKernelModulesNoNesting(t *testing.T) {
|
||||
}
|
||||
|
||||
actualModuleData := []testModuleData{
|
||||
{filepath.Join(sysModuleDir, "kvm"), true, ""},
|
||||
{filepath.Join(sysModuleDir, "kvm_intel"), true, ""},
|
||||
{filepath.Join(sysModuleDir, "kvm_intel/parameters/unrestricted_guest"), false, "Y"},
|
||||
{filepath.Join(sysModuleDir, "kvm"), "", true},
|
||||
{filepath.Join(sysModuleDir, "kvm_intel"), "", true},
|
||||
{filepath.Join(sysModuleDir, "kvm_intel/parameters/unrestricted_guest"), "Y", false},
|
||||
|
||||
// XXX: force a warning
|
||||
{filepath.Join(sysModuleDir, "kvm_intel/parameters/nested"), false, "N"},
|
||||
{filepath.Join(sysModuleDir, "kvm_intel/parameters/nested"), "N", false},
|
||||
}
|
||||
|
||||
vendor := archGenuineIntel
|
||||
@@ -194,12 +194,12 @@ func TestCheckCheckKernelModulesNoUnrestrictedGuest(t *testing.T) {
|
||||
}
|
||||
|
||||
actualModuleData := []testModuleData{
|
||||
{filepath.Join(sysModuleDir, "kvm"), true, ""},
|
||||
{filepath.Join(sysModuleDir, "kvm_intel"), true, ""},
|
||||
{filepath.Join(sysModuleDir, "kvm_intel/parameters/nested"), false, "Y"},
|
||||
{filepath.Join(sysModuleDir, "kvm"), "", true},
|
||||
{filepath.Join(sysModuleDir, "kvm_intel"), "", true},
|
||||
{filepath.Join(sysModuleDir, "kvm_intel/parameters/nested"), "Y", false},
|
||||
|
||||
// XXX: force a failure on non-VMM systems
|
||||
{filepath.Join(sysModuleDir, "kvm_intel/parameters/unrestricted_guest"), false, "N"},
|
||||
{filepath.Join(sysModuleDir, "kvm_intel/parameters/unrestricted_guest"), "N", false},
|
||||
}
|
||||
|
||||
vendor := archGenuineIntel
|
||||
@@ -295,10 +295,10 @@ func TestCheckHostIsVMContainerCapable(t *testing.T) {
|
||||
}
|
||||
|
||||
moduleData = []testModuleData{
|
||||
{filepath.Join(sysModuleDir, "kvm"), true, ""},
|
||||
{filepath.Join(sysModuleDir, "kvm_intel"), true, ""},
|
||||
{filepath.Join(sysModuleDir, "kvm_intel/parameters/nested"), false, "Y"},
|
||||
{filepath.Join(sysModuleDir, "kvm_intel/parameters/unrestricted_guest"), false, "Y"},
|
||||
{filepath.Join(sysModuleDir, "kvm"), "", true},
|
||||
{filepath.Join(sysModuleDir, "kvm_intel"), "", true},
|
||||
{filepath.Join(sysModuleDir, "kvm_intel/parameters/nested"), "Y", false},
|
||||
{filepath.Join(sysModuleDir, "kvm_intel/parameters/unrestricted_guest"), "Y", false},
|
||||
}
|
||||
} else if cpuType == cpuTypeAMD {
|
||||
cpuData = []testCPUData{
|
||||
@@ -311,9 +311,9 @@ func TestCheckHostIsVMContainerCapable(t *testing.T) {
|
||||
}
|
||||
|
||||
moduleData = []testModuleData{
|
||||
{filepath.Join(sysModuleDir, "kvm"), true, ""},
|
||||
{filepath.Join(sysModuleDir, "kvm_amd"), true, ""},
|
||||
{filepath.Join(sysModuleDir, "kvm_amd/parameters/nested"), false, "1"},
|
||||
{filepath.Join(sysModuleDir, "kvm"), "", true},
|
||||
{filepath.Join(sysModuleDir, "kvm_amd"), "", true},
|
||||
{filepath.Join(sysModuleDir, "kvm_amd/parameters/nested"), "1", false},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -338,51 +338,51 @@ func TestArchKernelParamHandler(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
type testData struct {
|
||||
onVMM bool
|
||||
expectIgnore bool
|
||||
fields logrus.Fields
|
||||
msg string
|
||||
onVMM bool
|
||||
expectIgnore bool
|
||||
}
|
||||
|
||||
data := []testData{
|
||||
{true, false, logrus.Fields{}, ""},
|
||||
{false, false, logrus.Fields{}, ""},
|
||||
{logrus.Fields{}, "", true, false},
|
||||
{logrus.Fields{}, "", false, false},
|
||||
|
||||
{
|
||||
false,
|
||||
false,
|
||||
logrus.Fields{
|
||||
// wrong type
|
||||
"parameter": 123,
|
||||
},
|
||||
"foo",
|
||||
false,
|
||||
false,
|
||||
},
|
||||
|
||||
{
|
||||
false,
|
||||
false,
|
||||
logrus.Fields{
|
||||
"parameter": "unrestricted_guest",
|
||||
},
|
||||
"",
|
||||
false,
|
||||
false,
|
||||
},
|
||||
|
||||
{
|
||||
true,
|
||||
true,
|
||||
logrus.Fields{
|
||||
"parameter": "unrestricted_guest",
|
||||
},
|
||||
"",
|
||||
true,
|
||||
true,
|
||||
},
|
||||
|
||||
{
|
||||
false,
|
||||
true,
|
||||
logrus.Fields{
|
||||
"parameter": "nested",
|
||||
},
|
||||
"",
|
||||
false,
|
||||
true,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -57,51 +57,51 @@ func TestArchKernelParamHandler(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
type testData struct {
|
||||
onVMM bool
|
||||
expectIgnore bool
|
||||
fields logrus.Fields
|
||||
msg string
|
||||
onVMM bool
|
||||
expectIgnore bool
|
||||
}
|
||||
|
||||
data := []testData{
|
||||
{true, false, logrus.Fields{}, ""},
|
||||
{false, false, logrus.Fields{}, ""},
|
||||
{logrus.Fields{}, "", true, false},
|
||||
{logrus.Fields{}, "", false, false},
|
||||
|
||||
{
|
||||
false,
|
||||
false,
|
||||
logrus.Fields{
|
||||
// wrong type
|
||||
"parameter": 123,
|
||||
},
|
||||
"foo",
|
||||
false,
|
||||
false,
|
||||
},
|
||||
|
||||
{
|
||||
false,
|
||||
false,
|
||||
logrus.Fields{
|
||||
"parameter": "unrestricted_guest",
|
||||
},
|
||||
"",
|
||||
false,
|
||||
false,
|
||||
},
|
||||
|
||||
{
|
||||
true,
|
||||
true,
|
||||
logrus.Fields{
|
||||
"parameter": "unrestricted_guest",
|
||||
},
|
||||
"",
|
||||
true,
|
||||
true,
|
||||
},
|
||||
|
||||
{
|
||||
false,
|
||||
true,
|
||||
logrus.Fields{
|
||||
"parameter": "nested",
|
||||
},
|
||||
"",
|
||||
false,
|
||||
true,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -26,8 +26,8 @@ import (
|
||||
|
||||
type testModuleData struct {
|
||||
path string
|
||||
isDir bool
|
||||
contents string
|
||||
isDir bool
|
||||
}
|
||||
|
||||
// nolint: structcheck, unused, deadcode
|
||||
@@ -400,46 +400,46 @@ func TestCheckCheckCPUFlags(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
type testData struct {
|
||||
cpuflags string
|
||||
required map[string]string
|
||||
cpuflags string
|
||||
expectCount uint32
|
||||
}
|
||||
|
||||
data := []testData{
|
||||
{
|
||||
"",
|
||||
map[string]string{},
|
||||
"",
|
||||
0,
|
||||
},
|
||||
{
|
||||
"",
|
||||
map[string]string{
|
||||
"a": "A flag",
|
||||
},
|
||||
"",
|
||||
0,
|
||||
},
|
||||
{
|
||||
"",
|
||||
map[string]string{
|
||||
"a": "A flag",
|
||||
"b": "B flag",
|
||||
},
|
||||
"",
|
||||
0,
|
||||
},
|
||||
{
|
||||
"a b c",
|
||||
map[string]string{
|
||||
"b": "B flag",
|
||||
},
|
||||
"a b c",
|
||||
0,
|
||||
},
|
||||
{
|
||||
"a b c",
|
||||
map[string]string{
|
||||
"x": "X flag",
|
||||
"y": "Y flag",
|
||||
"z": "Z flag",
|
||||
},
|
||||
"a b c",
|
||||
3,
|
||||
},
|
||||
}
|
||||
@@ -454,54 +454,54 @@ func TestCheckCheckCPUAttribs(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
type testData struct {
|
||||
cpuinfo string
|
||||
required map[string]string
|
||||
cpuinfo string
|
||||
expectCount uint32
|
||||
}
|
||||
|
||||
data := []testData{
|
||||
{
|
||||
"",
|
||||
map[string]string{},
|
||||
"",
|
||||
0,
|
||||
},
|
||||
{
|
||||
"",
|
||||
map[string]string{
|
||||
"a": "",
|
||||
},
|
||||
"",
|
||||
0,
|
||||
},
|
||||
{
|
||||
map[string]string{
|
||||
"b": "B attribute",
|
||||
},
|
||||
"a: b",
|
||||
map[string]string{
|
||||
"b": "B attribute",
|
||||
},
|
||||
0,
|
||||
},
|
||||
{
|
||||
map[string]string{
|
||||
"b": "B attribute",
|
||||
},
|
||||
"a: b\nc: d\ne: f",
|
||||
map[string]string{
|
||||
"b": "B attribute",
|
||||
},
|
||||
0,
|
||||
},
|
||||
{
|
||||
"a: b\n",
|
||||
map[string]string{
|
||||
"b": "B attribute",
|
||||
"c": "C attribute",
|
||||
"d": "D attribute",
|
||||
},
|
||||
"a: b\n",
|
||||
2,
|
||||
},
|
||||
{
|
||||
"a: b\nc: d\ne: f",
|
||||
map[string]string{
|
||||
"b": "B attribute",
|
||||
"d": "D attribute",
|
||||
"f": "F attribute",
|
||||
},
|
||||
"a: b\nc: d\ne: f",
|
||||
0,
|
||||
},
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user