mirror of
https://github.com/kata-containers/kata-containers.git
synced 2026-02-22 14:54:23 +00:00
Compare commits
202 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
68e7e006b1 | ||
|
|
d67afa969a | ||
|
|
69457dc4c7 | ||
|
|
d37c6e370d | ||
|
|
4ec5db0fb3 | ||
|
|
b5e17c1d45 | ||
|
|
b9ac2a118d | ||
|
|
8639f1b42f | ||
|
|
6e5a7aaff7 | ||
|
|
ebbee07246 | ||
|
|
36b8831801 | ||
|
|
2ff6964be8 | ||
|
|
0e0d29d228 | ||
|
|
e0083ed6bc | ||
|
|
8db3dfb305 | ||
|
|
5e360d4f58 | ||
|
|
1de0909a30 | ||
|
|
70207252f4 | ||
|
|
a86feb8bf7 | ||
|
|
abd028c6c2 | ||
|
|
8b597195ab | ||
|
|
f83adbe83d | ||
|
|
e0e6f94819 | ||
|
|
ecadb514ea | ||
|
|
a07df25809 | ||
|
|
ac6c1d1f45 | ||
|
|
e6d27759cb | ||
|
|
99cd083913 | ||
|
|
3eb7387bb7 | ||
|
|
be512e7f34 | ||
|
|
12ec33d70d | ||
|
|
491b95451c | ||
|
|
624dc2d222 | ||
|
|
fcab7c3a01 | ||
|
|
6977074930 | ||
|
|
592ecdb671 | ||
|
|
d1305ee9eb | ||
|
|
59a05c7401 | ||
|
|
a4f8f263bf | ||
|
|
79a40d4895 | ||
|
|
4a35d5fa6e | ||
|
|
f9db0c5a86 | ||
|
|
9304889330 | ||
|
|
5988199ada | ||
|
|
d144ded12c | ||
|
|
0d2a7f8324 | ||
|
|
5f6d747e6d | ||
|
|
e812c5ce66 | ||
|
|
7b4e5751ca | ||
|
|
87d197ef20 | ||
|
|
8e3863cecb | ||
|
|
c453919911 | ||
|
|
036d3a4088 | ||
|
|
4139d68d51 | ||
|
|
ca02c9f512 | ||
|
|
95602c8c08 | ||
|
|
8a9392fd9d | ||
|
|
ecbd94d80c | ||
|
|
2f5bc0f408 | ||
|
|
67b8f0773f | ||
|
|
bdf20b5d26 | ||
|
|
bd1e8a2a24 | ||
|
|
86501d5f6f | ||
|
|
fff0e50a73 | ||
|
|
b67a1da187 | ||
|
|
ed02c8a051 | ||
|
|
01765e1734 | ||
|
|
49326fe4e1 | ||
|
|
6f86fb8e27 | ||
|
|
94b1d9814c | ||
|
|
f1855594a2 | ||
|
|
c3836010a8 | ||
|
|
a482b0d410 | ||
|
|
407d3146e6 | ||
|
|
d4f8f3a779 | ||
|
|
ac64b021a6 | ||
|
|
56071c6e7b | ||
|
|
2752225360 | ||
|
|
93b3d0a28e | ||
|
|
71a3b73cb0 | ||
|
|
5d37d31ac7 | ||
|
|
ab59a65c92 | ||
|
|
390916b33c | ||
|
|
8ae14f6a55 | ||
|
|
9794c52c65 | ||
|
|
df93439c3b | ||
|
|
7fc35f19eb | ||
|
|
1660d5651f | ||
|
|
f49b89b632 | ||
|
|
3c24e23409 | ||
|
|
d73f3a8a26 | ||
|
|
59f104c022 | ||
|
|
b7dd97cac6 | ||
|
|
968f5b4031 | ||
|
|
57c5e5629b | ||
|
|
3c48f2202c | ||
|
|
856ab66871 | ||
|
|
f83115a838 | ||
|
|
e071d9251f | ||
|
|
a34f36f8f4 | ||
|
|
d6945200cc | ||
|
|
c282a1c709 | ||
|
|
09d416fe43 | ||
|
|
56f0a27fef | ||
|
|
334c4b8bdc | ||
|
|
3a63e3c1f7 | ||
|
|
1648b85e2d | ||
|
|
510798155d | ||
|
|
dc90c6e30b | ||
|
|
c071355359 | ||
|
|
4e2db96ef7 | ||
|
|
b29cbbfd2c | ||
|
|
8e8c720d51 | ||
|
|
c1dd9b9777 | ||
|
|
653e00dff8 | ||
|
|
de45f62096 | ||
|
|
1e531b44dc | ||
|
|
9092c23a2e | ||
|
|
b7f4e96ff3 | ||
|
|
063dec37c2 | ||
|
|
0b3c91d2a2 | ||
|
|
af125b1498 | ||
|
|
00dcd900f9 | ||
|
|
461b32491f | ||
|
|
2b779cba00 | ||
|
|
392c87550f | ||
|
|
39fe4a4b6f | ||
|
|
a5319c6be6 | ||
|
|
bf4e3a618f | ||
|
|
8a1723a5cb | ||
|
|
8a4f08cb0f | ||
|
|
219bb8e7d0 | ||
|
|
a85d0e465c | ||
|
|
40c6904324 | ||
|
|
421a33f846 | ||
|
|
980a2c7794 | ||
|
|
676d028504 | ||
|
|
083facd5ae | ||
|
|
7d1a604bad | ||
|
|
fa1f08f5da | ||
|
|
861c38b6aa | ||
|
|
c8596a4065 | ||
|
|
ba87e0afea | ||
|
|
9f490d16fe | ||
|
|
790f45190b | ||
|
|
42efe013c1 | ||
|
|
596037e20c | ||
|
|
095e8fdef4 | ||
|
|
0f9e23a3d9 | ||
|
|
20196048bf | ||
|
|
a1a7ed98df | ||
|
|
69fc8de712 | ||
|
|
8d4c2cf1b9 | ||
|
|
64c11a66fd | ||
|
|
807eeaafd0 | ||
|
|
3d573ba579 | ||
|
|
458fe865ea | ||
|
|
923cd3fda1 | ||
|
|
54f2b296e3 | ||
|
|
f82918f872 | ||
|
|
9c6e90fd55 | ||
|
|
cf1bae3521 | ||
|
|
1592a385eb | ||
|
|
60ff230d80 | ||
|
|
76437a9721 | ||
|
|
a9626682af | ||
|
|
ea06fe3afc | ||
|
|
6ee550e9a5 | ||
|
|
6199b69178 | ||
|
|
a33a22ccd1 | ||
|
|
2b4b825228 | ||
|
|
4a4232b851 | ||
|
|
e3d3b72fa2 | ||
|
|
f137048be3 | ||
|
|
73216a8104 | ||
|
|
fc17d7cc41 | ||
|
|
c6b7f69040 | ||
|
|
c91b142587 | ||
|
|
12fd6ffc1f | ||
|
|
64c9114a39 | ||
|
|
7eb43cec15 | ||
|
|
8551853cfe | ||
|
|
0ec4aa1a86 | ||
|
|
07e77f5be7 | ||
|
|
147c56bb8d | ||
|
|
709483425f | ||
|
|
8225d8044e | ||
|
|
86a82cace9 | ||
|
|
82c59efd65 | ||
|
|
7b309b578d | ||
|
|
fee4e7c7c4 | ||
|
|
4d53303a7d | ||
|
|
594b57d082 | ||
|
|
d33e343613 | ||
|
|
f8a93a1ded | ||
|
|
464d4c94de | ||
|
|
5f9c892e48 | ||
|
|
fa9ae9362c | ||
|
|
d48b22bb13 | ||
|
|
fafc7a8b1a | ||
|
|
bf8848f926 | ||
|
|
f8a48ab41d |
159
.github/workflows/cc-payload-after-push-amd64.yaml
vendored
159
.github/workflows/cc-payload-after-push-amd64.yaml
vendored
@@ -1,159 +0,0 @@
|
||||
name: CI | Publish CC runtime payload for amd64
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
target-arch:
|
||||
required: true
|
||||
type: string
|
||||
|
||||
jobs:
|
||||
build-asset:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
asset:
|
||||
- cc-cloud-hypervisor
|
||||
- cc-kernel
|
||||
- cc-qemu
|
||||
- cc-rootfs-image
|
||||
- cc-virtiofsd
|
||||
- cc-sev-kernel
|
||||
- cc-sev-ovmf
|
||||
- cc-sev-rootfs-initrd
|
||||
- cc-tdx-kernel
|
||||
- cc-tdx-rootfs-image
|
||||
- cc-tdx-qemu
|
||||
- cc-tdx-td-shim
|
||||
- cc-tdx-tdvf
|
||||
steps:
|
||||
- name: Login to Kata Containers quay.io
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.QUAY_DEPLOYER_USERNAME }}
|
||||
password: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0 # This is needed in order to keep the commit ids history
|
||||
- name: Build ${{ matrix.asset }}
|
||||
run: |
|
||||
make "${KATA_ASSET}-tarball"
|
||||
build_dir=$(readlink -f build)
|
||||
# store-artifact does not work with symlink
|
||||
sudo cp -r "${build_dir}" "kata-build"
|
||||
env:
|
||||
KATA_ASSET: ${{ matrix.asset }}
|
||||
TAR_OUTPUT: ${{ matrix.asset }}.tar.gz
|
||||
PUSH_TO_REGISTRY: yes
|
||||
|
||||
- name: store-artifact ${{ matrix.asset }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-build/kata-static-${{ matrix.asset }}.tar.xz
|
||||
retention-days: 1
|
||||
if-no-files-found: error
|
||||
|
||||
- name: store-artifact root_hash_tdx.txt
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: root_hash_tdx.txt
|
||||
path: tools/osbuilder/root_hash_tdx.txt
|
||||
retention-days: 1
|
||||
if-no-files-found: ignore
|
||||
|
||||
- name: store-artifact root_hash_vanilla.txt
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: root_hash_vanilla.txt
|
||||
path: tools/osbuilder/root_hash_vanilla.txt
|
||||
retention-days: 1
|
||||
if-no-files-found: ignore
|
||||
|
||||
build-asset-cc-shim-v2:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build-asset
|
||||
steps:
|
||||
- name: Login to Kata Containers quay.io
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.QUAY_DEPLOYER_USERNAME }}
|
||||
password: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Get root_hash_tdx.txt
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: root_hash_tdx.txt
|
||||
path: tools/osbuilder/
|
||||
|
||||
- name: Get root_hash_vanilla.txt
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: root_hash_vanilla.txt
|
||||
path: tools/osbuilder/
|
||||
|
||||
- name: Build cc-shim-v2
|
||||
run: |
|
||||
make cc-shim-v2-tarball
|
||||
build_dir=$(readlink -f build)
|
||||
# store-artifact does not work with symlink
|
||||
sudo cp -r "${build_dir}" "kata-build"
|
||||
env:
|
||||
PUSH_TO_REGISTRY: yes
|
||||
|
||||
- name: store-artifact cc-shim-v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-build/kata-static-cc-shim-v2.tar.xz
|
||||
retention-days: 1
|
||||
if-no-files-found: error
|
||||
|
||||
create-kata-tarball:
|
||||
runs-on: ubuntu-latest
|
||||
needs: [build-asset, build-asset-cc-shim-v2]
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: get-artifacts
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-artifacts
|
||||
- name: merge-artifacts
|
||||
run: |
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-merge-builds.sh kata-artifacts
|
||||
- name: store-artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: kata-static-tarball
|
||||
path: kata-static.tar.xz
|
||||
retention-days: 1
|
||||
if-no-files-found: error
|
||||
|
||||
kata-payload:
|
||||
needs: create-kata-tarball
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Login to Confidential Containers quay.io
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.COCO_QUAY_DEPLOYER_USERNAME }}
|
||||
password: ${{ secrets.COCO_QUAY_DEPLOYER_PASSWORD }}
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: kata-static-tarball
|
||||
|
||||
- name: build-and-push-kata-payload
|
||||
id: build-and-push-kata-payload
|
||||
run: |
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh \
|
||||
$(pwd)/kata-static.tar.xz "quay.io/confidential-containers/runtime-payload-ci" \
|
||||
"kata-containers-${{ inputs.target-arch }}"
|
||||
149
.github/workflows/cc-payload-after-push-s390x.yaml
vendored
149
.github/workflows/cc-payload-after-push-s390x.yaml
vendored
@@ -1,149 +0,0 @@
|
||||
name: CI | Publish CC runtime payload for s390x
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
target-arch:
|
||||
required: true
|
||||
type: string
|
||||
|
||||
jobs:
|
||||
build-asset:
|
||||
runs-on: s390x
|
||||
strategy:
|
||||
matrix:
|
||||
asset:
|
||||
- cc-kernel
|
||||
- cc-qemu
|
||||
- cc-rootfs-image
|
||||
- cc-virtiofsd
|
||||
steps:
|
||||
- name: Login to Kata Containers quay.io
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.QUAY_DEPLOYER_USERNAME }}
|
||||
password: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
|
||||
- name: Adjust a permission for repo
|
||||
run: |
|
||||
sudo chown -R $USER:$USER $GITHUB_WORKSPACE
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0 # This is needed in order to keep the commit ids history
|
||||
- name: Build ${{ matrix.asset }}
|
||||
run: |
|
||||
make "${KATA_ASSET}-tarball"
|
||||
build_dir=$(readlink -f build)
|
||||
# store-artifact does not work with symlink
|
||||
sudo cp -r "${build_dir}" "kata-build"
|
||||
sudo chown -R $(id -u):$(id -g) "kata-build"
|
||||
env:
|
||||
KATA_ASSET: ${{ matrix.asset }}
|
||||
TAR_OUTPUT: ${{ matrix.asset }}.tar.gz
|
||||
PUSH_TO_REGISTRY: yes
|
||||
|
||||
- name: store-artifact ${{ matrix.asset }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: kata-artifacts-s390x
|
||||
path: kata-build/kata-static-${{ matrix.asset }}.tar.xz
|
||||
retention-days: 1
|
||||
if-no-files-found: error
|
||||
|
||||
- name: store-artifact root_hash_vanilla.txt
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: root_hash_vanilla.txt-s390x
|
||||
path: tools/osbuilder/root_hash_vanilla.txt
|
||||
retention-days: 1
|
||||
if-no-files-found: ignore
|
||||
|
||||
build-asset-cc-shim-v2:
|
||||
runs-on: s390x
|
||||
needs: build-asset
|
||||
steps:
|
||||
- name: Login to Kata Containers quay.io
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.QUAY_DEPLOYER_USERNAME }}
|
||||
password: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
|
||||
- name: Adjust a permission for repo
|
||||
run: |
|
||||
sudo chown -R $USER:$USER $GITHUB_WORKSPACE
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Get root_hash_vanilla.txt
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: root_hash_vanilla.txt-s390x
|
||||
path: tools/osbuilder/
|
||||
|
||||
- name: Build cc-shim-v2
|
||||
run: |
|
||||
make cc-shim-v2-tarball
|
||||
build_dir=$(readlink -f build)
|
||||
# store-artifact does not work with symlink
|
||||
sudo cp -r "${build_dir}" "kata-build"
|
||||
env:
|
||||
PUSH_TO_REGISTRY: yes
|
||||
|
||||
- name: store-artifact cc-shim-v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: kata-artifacts-s390x
|
||||
path: kata-build/kata-static-cc-shim-v2.tar.xz
|
||||
retention-days: 1
|
||||
if-no-files-found: error
|
||||
|
||||
create-kata-tarball:
|
||||
runs-on: s390x
|
||||
needs: [build-asset, build-asset-cc-shim-v2]
|
||||
steps:
|
||||
- name: Adjust a permission for repo
|
||||
run: |
|
||||
sudo chown -R $USER:$USER $GITHUB_WORKSPACE
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
- name: get-artifacts
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: kata-artifacts-s390x
|
||||
path: kata-artifacts
|
||||
- name: merge-artifacts
|
||||
run: |
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-merge-builds.sh kata-artifacts
|
||||
- name: store-artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: kata-static-tarball-s390x
|
||||
path: kata-static.tar.xz
|
||||
retention-days: 1
|
||||
if-no-files-found: error
|
||||
|
||||
kata-payload:
|
||||
needs: create-kata-tarball
|
||||
runs-on: s390x
|
||||
steps:
|
||||
- name: Login to Confidential Containers quay.io
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.COCO_QUAY_DEPLOYER_USERNAME }}
|
||||
password: ${{ secrets.COCO_QUAY_DEPLOYER_PASSWORD }}
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: kata-static-tarball-s390x
|
||||
|
||||
- name: build-and-push-kata-payload
|
||||
id: build-and-push-kata-payload
|
||||
run: |
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh \
|
||||
$(pwd)/kata-static.tar.xz "quay.io/confidential-containers/runtime-payload-ci" \
|
||||
"kata-containers-${{ inputs.target-arch }}"
|
||||
39
.github/workflows/cc-payload-after-push.yaml
vendored
39
.github/workflows/cc-payload-after-push.yaml
vendored
@@ -1,39 +0,0 @@
|
||||
name: CI | Publish Kata Containers payload for Confidential Containers
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- CCv0
|
||||
|
||||
jobs:
|
||||
build-assets-amd64:
|
||||
uses: ./.github/workflows/cc-payload-after-push-amd64.yaml
|
||||
with:
|
||||
target-arch: amd64
|
||||
secrets: inherit
|
||||
|
||||
build-assets-s390x:
|
||||
uses: ./.github/workflows/cc-payload-after-push-s390x.yaml
|
||||
with:
|
||||
target-arch: s390x
|
||||
secrets: inherit
|
||||
|
||||
publish:
|
||||
runs-on: ubuntu-latest
|
||||
needs: [build-assets-amd64, build-assets-s390x]
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Login to Confidential Containers quay.io
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.COCO_QUAY_DEPLOYER_USERNAME }}
|
||||
password: ${{ secrets.COCO_QUAY_DEPLOYER_PASSWORD }}
|
||||
|
||||
- name: Push multi-arch manifest
|
||||
run: |
|
||||
docker manifest create quay.io/confidential-containers/runtime-payload-ci:kata-containers-latest \
|
||||
--amend quay.io/confidential-containers/runtime-payload-ci:kata-containers-amd64 \
|
||||
--amend quay.io/confidential-containers/runtime-payload-ci:kata-containers-s390x
|
||||
docker manifest push quay.io/confidential-containers/runtime-payload-ci:kata-containers-latest
|
||||
142
.github/workflows/cc-payload-amd64.yaml
vendored
142
.github/workflows/cc-payload-amd64.yaml
vendored
@@ -1,142 +0,0 @@
|
||||
name: Publish Kata Containers payload for Confidential Containers (amd64)
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
target-arch:
|
||||
required: true
|
||||
type: string
|
||||
|
||||
jobs:
|
||||
build-asset:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
asset:
|
||||
- cc-cloud-hypervisor
|
||||
- cc-kernel
|
||||
- cc-qemu
|
||||
- cc-rootfs-image
|
||||
- cc-virtiofsd
|
||||
- cc-sev-kernel
|
||||
- cc-sev-ovmf
|
||||
- cc-sev-rootfs-initrd
|
||||
- cc-tdx-kernel
|
||||
- cc-tdx-rootfs-image
|
||||
- cc-tdx-qemu
|
||||
- cc-tdx-td-shim
|
||||
- cc-tdx-tdvf
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Build ${{ matrix.asset }}
|
||||
run: |
|
||||
make "${KATA_ASSET}-tarball"
|
||||
build_dir=$(readlink -f build)
|
||||
# store-artifact does not work with symlink
|
||||
sudo cp -r "${build_dir}" "kata-build"
|
||||
env:
|
||||
KATA_ASSET: ${{ matrix.asset }}
|
||||
TAR_OUTPUT: ${{ matrix.asset }}.tar.gz
|
||||
|
||||
- name: store-artifact ${{ matrix.asset }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-build/kata-static-${{ matrix.asset }}.tar.xz
|
||||
retention-days: 1
|
||||
if-no-files-found: error
|
||||
|
||||
- name: store-artifact root_hash_tdx.txt
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: root_hash_tdx.txt
|
||||
path: tools/osbuilder/root_hash_tdx.txt
|
||||
retention-days: 1
|
||||
if-no-files-found: ignore
|
||||
|
||||
- name: store-artifact root_hash_vanilla.txt
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: root_hash_vanilla.txt
|
||||
path: tools/osbuilder/root_hash_vanilla.txt
|
||||
retention-days: 1
|
||||
if-no-files-found: ignore
|
||||
|
||||
build-asset-cc-shim-v2:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build-asset
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Get root_hash_tdx.txt
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: root_hash_tdx.txt
|
||||
path: tools/osbuilder/
|
||||
|
||||
- name: Get root_hash_vanilla.txt
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: root_hash_vanilla.txt
|
||||
path: tools/osbuilder/
|
||||
|
||||
- name: Build cc-shim-v2
|
||||
run: |
|
||||
make cc-shim-v2-tarball
|
||||
build_dir=$(readlink -f build)
|
||||
# store-artifact does not work with symlink
|
||||
sudo cp -r "${build_dir}" "kata-build"
|
||||
|
||||
- name: store-artifact cc-shim-v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-build/kata-static-cc-shim-v2.tar.xz
|
||||
retention-days: 1
|
||||
if-no-files-found: error
|
||||
|
||||
create-kata-tarball:
|
||||
runs-on: ubuntu-latest
|
||||
needs: [build-asset, build-asset-cc-shim-v2]
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: get-artifacts
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-artifacts
|
||||
- name: merge-artifacts
|
||||
run: |
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-merge-builds.sh kata-artifacts
|
||||
- name: store-artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: kata-static-tarball
|
||||
path: kata-static.tar.xz
|
||||
retention-days: 1
|
||||
if-no-files-found: error
|
||||
|
||||
kata-payload:
|
||||
needs: create-kata-tarball
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Login to quay.io
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.COCO_QUAY_DEPLOYER_USERNAME }}
|
||||
password: ${{ secrets.COCO_QUAY_DEPLOYER_PASSWORD }}
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: kata-static-tarball
|
||||
|
||||
- name: build-and-push-kata-payload
|
||||
id: build-and-push-kata-payload
|
||||
run: |
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh \
|
||||
$(pwd)/kata-static.tar.xz \
|
||||
"quay.io/confidential-containers/runtime-payload" \
|
||||
"kata-containers-${{ inputs.target-arch }}"
|
||||
|
||||
134
.github/workflows/cc-payload-s390x.yaml
vendored
134
.github/workflows/cc-payload-s390x.yaml
vendored
@@ -1,134 +0,0 @@
|
||||
name: Publish Kata Containers payload for Confidential Containers (s390x)
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
target-arch:
|
||||
required: true
|
||||
type: string
|
||||
|
||||
jobs:
|
||||
build-asset:
|
||||
runs-on: s390x
|
||||
strategy:
|
||||
matrix:
|
||||
asset:
|
||||
- cc-kernel
|
||||
- cc-qemu
|
||||
- cc-rootfs-image
|
||||
- cc-virtiofsd
|
||||
steps:
|
||||
- name: Adjust a permission for repo
|
||||
run: |
|
||||
sudo chown -R $USER:$USER $GITHUB_WORKSPACE
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
- name: Build ${{ matrix.asset }}
|
||||
run: |
|
||||
make "${KATA_ASSET}-tarball"
|
||||
build_dir=$(readlink -f build)
|
||||
# store-artifact does not work with symlink
|
||||
sudo cp -r "${build_dir}" "kata-build"
|
||||
env:
|
||||
KATA_ASSET: ${{ matrix.asset }}
|
||||
TAR_OUTPUT: ${{ matrix.asset }}.tar.gz
|
||||
|
||||
- name: store-artifact ${{ matrix.asset }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: kata-artifacts-s390x
|
||||
path: kata-build/kata-static-${{ matrix.asset }}.tar.xz
|
||||
retention-days: 1
|
||||
if-no-files-found: error
|
||||
|
||||
- name: store-artifact root_hash_vanilla.txt
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: root_hash_vanilla.txt-s390x
|
||||
path: tools/osbuilder/root_hash_vanilla.txt
|
||||
retention-days: 1
|
||||
if-no-files-found: ignore
|
||||
|
||||
build-asset-cc-shim-v2:
|
||||
runs-on: s390x
|
||||
needs: build-asset
|
||||
steps:
|
||||
- name: Adjust a permission for repo
|
||||
run: |
|
||||
sudo chown -R $USER:$USER $GITHUB_WORKSPACE
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Get root_hash_vanilla.txt
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: root_hash_vanilla.txt-s390x
|
||||
path: tools/osbuilder/
|
||||
|
||||
- name: Build cc-shim-v2
|
||||
run: |
|
||||
make cc-shim-v2-tarball
|
||||
build_dir=$(readlink -f build)
|
||||
# store-artifact does not work with symlink
|
||||
sudo cp -r "${build_dir}" "kata-build"
|
||||
|
||||
- name: store-artifact cc-shim-v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: kata-artifacts-s390x
|
||||
path: kata-build/kata-static-cc-shim-v2.tar.xz
|
||||
retention-days: 1
|
||||
if-no-files-found: error
|
||||
|
||||
create-kata-tarball:
|
||||
runs-on: s390x
|
||||
needs: [build-asset, build-asset-cc-shim-v2]
|
||||
steps:
|
||||
- name: Adjust a permission for repo
|
||||
run: |
|
||||
sudo chown -R $USER:$USER $GITHUB_WORKSPACE
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
- name: get-artifacts
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: kata-artifacts-s390x
|
||||
path: kata-artifacts
|
||||
- name: merge-artifacts
|
||||
run: |
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-merge-builds.sh kata-artifacts
|
||||
- name: store-artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: kata-static-tarball-s390x
|
||||
path: kata-static.tar.xz
|
||||
retention-days: 1
|
||||
if-no-files-found: error
|
||||
|
||||
kata-payload:
|
||||
needs: create-kata-tarball
|
||||
runs-on: s390x
|
||||
steps:
|
||||
- name: Login to quay.io
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.COCO_QUAY_DEPLOYER_USERNAME }}
|
||||
password: ${{ secrets.COCO_QUAY_DEPLOYER_PASSWORD }}
|
||||
|
||||
- name: Adjust a permission for repo
|
||||
run: |
|
||||
sudo chown -R $USER:$USER $GITHUB_WORKSPACE
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: kata-static-tarball-s390x
|
||||
|
||||
- name: build-and-push-kata-payload
|
||||
id: build-and-push-kata-payload
|
||||
run: |
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh \
|
||||
$(pwd)/kata-static.tar.xz \
|
||||
"quay.io/confidential-containers/runtime-payload" \
|
||||
"kata-containers-${{ inputs.target-arch }}"
|
||||
39
.github/workflows/cc-payload.yaml
vendored
39
.github/workflows/cc-payload.yaml
vendored
@@ -1,39 +0,0 @@
|
||||
name: Publish Kata Containers payload for Confidential Containers
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'CC\-[0-9]+.[0-9]+.[0-9]+'
|
||||
|
||||
jobs:
|
||||
build-assets-amd64:
|
||||
uses: ./.github/workflows/cc-payload-amd64.yaml
|
||||
with:
|
||||
target-arch: amd64
|
||||
secrets: inherit
|
||||
|
||||
build-assets-s390x:
|
||||
uses: ./.github/workflows/cc-payload-s390x.yaml
|
||||
with:
|
||||
target-arch: s390x
|
||||
secrets: inherit
|
||||
|
||||
publish:
|
||||
runs-on: ubuntu-latest
|
||||
needs: [build-assets-amd64, build-assets-s390x]
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Login to Confidential Containers quay.io
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.COCO_QUAY_DEPLOYER_USERNAME }}
|
||||
password: ${{ secrets.COCO_QUAY_DEPLOYER_PASSWORD }}
|
||||
|
||||
- name: Push multi-arch manifest
|
||||
run: |
|
||||
docker manifest create quay.io/confidential-containers/runtime-payload:kata-containers-latest \
|
||||
--amend quay.io/confidential-containers/runtime-payload:kata-containers-amd64 \
|
||||
--amend quay.io/confidential-containers/runtime-payload:kata-containers-s390x
|
||||
docker manifest push quay.io/confidential-containers/runtime-payload:kata-containers-latest
|
||||
4
.github/workflows/commit-message-check.yaml
vendored
4
.github/workflows/commit-message-check.yaml
vendored
@@ -47,7 +47,7 @@ jobs:
|
||||
uses: tim-actions/commit-message-checker-with-regex@v0.3.1
|
||||
with:
|
||||
commits: ${{ steps.get-pr-commits.outputs.commits }}
|
||||
pattern: '^.{0,75}(\n.*)*$|^Merge pull request (?:kata-containers)?#[\d]+ from.*'
|
||||
pattern: '^.{0,75}(\n.*)*$'
|
||||
error: 'Subject too long (max 75)'
|
||||
post_error: ${{ env.error_msg }}
|
||||
|
||||
@@ -95,6 +95,6 @@ jobs:
|
||||
uses: tim-actions/commit-message-checker-with-regex@v0.3.1
|
||||
with:
|
||||
commits: ${{ steps.get-pr-commits.outputs.commits }}
|
||||
pattern: '^[\s\t]*[^:\s\t]+[\s\t]*:|^Merge pull request (?:kata-containers)?#[\d]+ from.*'
|
||||
pattern: '^[\s\t]*[^:\s\t]+[\s\t]*:'
|
||||
error: 'Failed to find subsystem in subject'
|
||||
post_error: ${{ env.error_msg }}
|
||||
|
||||
129
.github/workflows/deploy-ccv0-demo.yaml
vendored
129
.github/workflows/deploy-ccv0-demo.yaml
vendored
@@ -1,129 +0,0 @@
|
||||
on:
|
||||
issue_comment:
|
||||
types: [created, edited]
|
||||
|
||||
name: deploy-ccv0-demo
|
||||
|
||||
jobs:
|
||||
check-comment-and-membership:
|
||||
runs-on: ubuntu-latest
|
||||
if: |
|
||||
github.event.issue.pull_request
|
||||
&& github.event_name == 'issue_comment'
|
||||
&& github.event.action == 'created'
|
||||
&& startsWith(github.event.comment.body, '/deploy-ccv0-demo')
|
||||
steps:
|
||||
- name: Check membership
|
||||
uses: kata-containers/is-organization-member@1.0.1
|
||||
id: is_organization_member
|
||||
with:
|
||||
organization: kata-containers
|
||||
username: ${{ github.event.comment.user.login }}
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Fail if not member
|
||||
run: |
|
||||
result=${{ steps.is_organization_member.outputs.result }}
|
||||
if [ $result == false ]; then
|
||||
user=${{ github.event.comment.user.login }}
|
||||
echo Either ${user} is not part of the kata-containers organization
|
||||
echo or ${user} has its Organization Visibility set to Private at
|
||||
echo https://github.com/orgs/kata-containers/people?query=${user}
|
||||
echo
|
||||
echo Ensure you change your Organization Visibility to Public and
|
||||
echo trigger the test again.
|
||||
exit 1
|
||||
fi
|
||||
|
||||
build-asset:
|
||||
runs-on: ubuntu-latest
|
||||
needs: check-comment-and-membership
|
||||
strategy:
|
||||
matrix:
|
||||
asset:
|
||||
- cloud-hypervisor
|
||||
- firecracker
|
||||
- kernel
|
||||
- qemu
|
||||
- rootfs-image
|
||||
- rootfs-initrd
|
||||
- shim-v2
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Install docker
|
||||
run: |
|
||||
curl -fsSL https://test.docker.com -o test-docker.sh
|
||||
sh test-docker.sh
|
||||
|
||||
- name: Prepare confidential container rootfs
|
||||
if: ${{ matrix.asset == 'rootfs-initrd' }}
|
||||
run: |
|
||||
pushd include_rootfs/etc
|
||||
curl -LO https://raw.githubusercontent.com/confidential-containers/documentation/main/demos/ssh-demo/aa-offline_fs_kbc-keys.json
|
||||
mkdir kata-containers
|
||||
envsubst < docs/how-to/data/confidential-agent-config.toml.in > kata-containers/agent.toml
|
||||
popd
|
||||
env:
|
||||
AA_KBC_PARAMS: offline_fs_kbc::null
|
||||
|
||||
- name: Build ${{ matrix.asset }}
|
||||
run: |
|
||||
make "${KATA_ASSET}-tarball"
|
||||
build_dir=$(readlink -f build)
|
||||
# store-artifact does not work with symlink
|
||||
sudo cp -r "${build_dir}" "kata-build"
|
||||
env:
|
||||
AA_KBC: offline_fs_kbc
|
||||
INCLUDE_ROOTFS: include_rootfs
|
||||
KATA_ASSET: ${{ matrix.asset }}
|
||||
TAR_OUTPUT: ${{ matrix.asset }}.tar.gz
|
||||
|
||||
- name: store-artifact ${{ matrix.asset }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-build/kata-static-${{ matrix.asset }}.tar.xz
|
||||
if-no-files-found: error
|
||||
|
||||
create-kata-tarball:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build-asset
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: get-artifacts
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-artifacts
|
||||
- name: merge-artifacts
|
||||
run: |
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-merge-builds.sh kata-artifacts
|
||||
- name: store-artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: kata-static-tarball
|
||||
path: kata-static.tar.xz
|
||||
|
||||
kata-deploy:
|
||||
needs: create-kata-tarball
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: kata-static-tarball
|
||||
- name: build-and-push-kata-deploy-ci
|
||||
id: build-and-push-kata-deploy-ci
|
||||
run: |
|
||||
tag=$(echo $GITHUB_REF | cut -d/ -f3-)
|
||||
pushd $GITHUB_WORKSPACE
|
||||
git checkout $tag
|
||||
pkg_sha=$(git rev-parse HEAD)
|
||||
popd
|
||||
mv kata-static.tar.xz $GITHUB_WORKSPACE/tools/packaging/kata-deploy/kata-static.tar.xz
|
||||
docker build --build-arg KATA_ARTIFACTS=kata-static.tar.xz -t quay.io/confidential-containers/runtime-payload:$pkg_sha $GITHUB_WORKSPACE/tools/packaging/kata-deploy
|
||||
docker login -u ${{ secrets.QUAY_DEPLOYER_USERNAME }} -p ${{ secrets.QUAY_DEPLOYER_PASSWORD }} quay.io
|
||||
docker push quay.io/confidential-containers/runtime-payload:$pkg_sha
|
||||
mkdir -p packaging/kata-deploy
|
||||
ln -s $GITHUB_WORKSPACE/tools/packaging/kata-deploy/action packaging/kata-deploy/action
|
||||
echo "::set-output name=PKG_SHA::${pkg_sha}"
|
||||
7
.github/workflows/kata-deploy-push.yaml
vendored
7
.github/workflows/kata-deploy-push.yaml
vendored
@@ -18,6 +18,7 @@ jobs:
|
||||
matrix:
|
||||
asset:
|
||||
- kernel
|
||||
- kernel-dragonball-experimental
|
||||
- shim-v2
|
||||
- qemu
|
||||
- cloud-hypervisor
|
||||
@@ -28,12 +29,6 @@ jobs:
|
||||
- nydus
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Install docker
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
curl -fsSL https://test.docker.com -o test-docker.sh
|
||||
sh test-docker.sh
|
||||
|
||||
- name: Build ${{ matrix.asset }}
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
|
||||
6
.github/workflows/kata-deploy-test.yaml
vendored
6
.github/workflows/kata-deploy-test.yaml
vendored
@@ -50,6 +50,7 @@ jobs:
|
||||
- cloud-hypervisor
|
||||
- firecracker
|
||||
- kernel
|
||||
- kernel-dragonball-experimental
|
||||
- nydus
|
||||
- qemu
|
||||
- rootfs-image
|
||||
@@ -71,11 +72,6 @@ jobs:
|
||||
with:
|
||||
ref: ${{ steps.get-PR-ref.outputs.pr-ref }}
|
||||
|
||||
- name: Install docker
|
||||
run: |
|
||||
curl -fsSL https://test.docker.com -o test-docker.sh
|
||||
sh test-docker.sh
|
||||
|
||||
- name: Build ${{ matrix.asset }}
|
||||
run: |
|
||||
make "${KATA_ASSET}-tarball"
|
||||
|
||||
6
.github/workflows/release.yaml
vendored
6
.github/workflows/release.yaml
vendored
@@ -13,6 +13,7 @@ jobs:
|
||||
- cloud-hypervisor
|
||||
- firecracker
|
||||
- kernel
|
||||
- kernel-dragonball-experimental
|
||||
- nydus
|
||||
- qemu
|
||||
- rootfs-image
|
||||
@@ -21,11 +22,6 @@ jobs:
|
||||
- virtiofsd
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Install docker
|
||||
run: |
|
||||
curl -fsSL https://test.docker.com -o test-docker.sh
|
||||
sh test-docker.sh
|
||||
|
||||
- name: Build ${{ matrix.asset }}
|
||||
run: |
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-copy-yq-installer.sh
|
||||
|
||||
17
.github/workflows/static-checks.yaml
vendored
17
.github/workflows/static-checks.yaml
vendored
@@ -27,7 +27,6 @@ jobs:
|
||||
target_branch: ${{ github.base_ref }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
@@ -38,6 +37,22 @@ jobs:
|
||||
go-version: 1.19.3
|
||||
env:
|
||||
GOPATH: ${{ runner.workspace }}/kata-containers
|
||||
- name: Check kernel config version
|
||||
run: |
|
||||
cd "${{ github.workspace }}/src/github.com/${{ github.repository }}"
|
||||
kernel_dir="tools/packaging/kernel/"
|
||||
kernel_version_file="${kernel_dir}kata_config_version"
|
||||
modified_files=$(git diff --name-only origin/main..HEAD)
|
||||
result=$(git whatchanged origin/main..HEAD "${kernel_dir}" >>"/dev/null")
|
||||
if git whatchanged origin/main..HEAD "${kernel_dir}" >>"/dev/null"; then
|
||||
echo "Kernel directory has changed, checking if $kernel_version_file has been updated"
|
||||
if echo "$modified_files" | grep -v "README.md" | grep "${kernel_dir}" >>"/dev/null"; then
|
||||
echo "$modified_files" | grep "$kernel_version_file" >>/dev/null || ( echo "Please bump version in $kernel_version_file" && exit 1)
|
||||
else
|
||||
echo "Readme file changed, no need for kernel config version update."
|
||||
fi
|
||||
echo "Check passed"
|
||||
fi
|
||||
- name: Setup GOPATH
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
|
||||
6
Makefile
6
Makefile
@@ -45,10 +45,8 @@ docs-url-alive-check:
|
||||
|
||||
.PHONY: \
|
||||
all \
|
||||
binary-tarball \
|
||||
kata-tarball \
|
||||
install-tarball \
|
||||
default \
|
||||
install-binary-tarball \
|
||||
static-checks \
|
||||
docs-url-alive-check
|
||||
|
||||
|
||||
|
||||
@@ -72,8 +72,7 @@ build_and_install_gperf() {
|
||||
curl -sLO "${gperf_tarball_url}"
|
||||
tar -xf "${gperf_tarball}"
|
||||
pushd "gperf-${gperf_version}"
|
||||
# gperf is a build time dependency of libseccomp and not to be used in the target.
|
||||
# Unset $CC since that might point to a cross compiler.
|
||||
# Unset $CC for configure, we will always use native for gperf
|
||||
CC= ./configure --prefix="${gperf_install_dir}"
|
||||
make
|
||||
make install
|
||||
|
||||
@@ -232,10 +232,6 @@ $ rustup target add "${ARCH}-unknown-linux-${LIBC}"
|
||||
|
||||
To build the agent:
|
||||
|
||||
```bash
|
||||
$ make -C kata-containers/src/agent
|
||||
```
|
||||
|
||||
The agent is built with seccomp capability by default.
|
||||
If you want to build the agent without the seccomp capability, you need to run `make` with `SECCOMP=no` as follows.
|
||||
|
||||
@@ -243,6 +239,31 @@ If you want to build the agent without the seccomp capability, you need to run `
|
||||
$ make -C kata-containers/src/agent SECCOMP=no
|
||||
```
|
||||
|
||||
For building the agent with seccomp support using `musl`, set the environment
|
||||
variables for the [`libseccomp` crate](https://github.com/libseccomp-rs/libseccomp-rs).
|
||||
|
||||
```bash
|
||||
$ export LIBSECCOMP_LINK_TYPE=static
|
||||
$ export LIBSECCOMP_LIB_PATH="the path of the directory containing libseccomp.a"
|
||||
$ make -C kata-containers/src/agent
|
||||
```
|
||||
|
||||
If the compilation fails when the agent tries to link the `libseccomp` library statically
|
||||
against `musl`, you will need to build `libseccomp` manually with `-U_FORTIFY_SOURCE`.
|
||||
You can use [our script](https://github.com/kata-containers/kata-containers/blob/main/ci/install_libseccomp.sh)
|
||||
to install `libseccomp` for the agent.
|
||||
|
||||
```bash
|
||||
$ mkdir -p ${seccomp_install_path} ${gperf_install_path}
|
||||
$ kata-containers/ci/install_libseccomp.sh ${seccomp_install_path} ${gperf_install_path}
|
||||
$ export LIBSECCOMP_LIB_PATH="${seccomp_install_path}/lib"
|
||||
```
|
||||
|
||||
On `ppc64le` and `s390x`, `glibc` is used. You will need to install the `libseccomp` library
|
||||
provided by your distribution.
|
||||
|
||||
> e.g. `libseccomp-dev` for Ubuntu, or `libseccomp-devel` for CentOS
|
||||
|
||||
> **Note:**
|
||||
>
|
||||
> - If you enable seccomp in the main configuration file but build the agent without seccomp capability,
|
||||
@@ -332,6 +353,7 @@ the `kata-agent` needs to be labeled `container_runtime_exec_t` again by yoursel
|
||||
> variable in the previous command and ensure the `qemu-img` command is
|
||||
> available on your system.
|
||||
> - If `qemu-img` is not installed, you will likely see errors such as `ERROR: File /dev/loop19p1 is not a block device` and `losetup: /tmp/tmp.bHz11oY851: Warning: file is smaller than 512 bytes; the loop device may be useless or invisible for system tools`. These can be mitigated by installing the `qemu-img` command (available in the `qemu-img` package on Fedora or the `qemu-utils` package on Debian).
|
||||
> - If `loop` module is not probed, you will likely see errors such as `losetup: cannot find an unused loop device`. Execute `modprobe loop` could resolve it.
|
||||
|
||||
|
||||
### Install the rootfs image
|
||||
|
||||
@@ -44,7 +44,4 @@
|
||||
- [How to run Docker with Kata Containers](how-to-run-docker-with-kata.md)
|
||||
- [How to run Kata Containers with `nydus`](how-to-use-virtio-fs-nydus-with-kata.md)
|
||||
- [How to run Kata Containers with AMD SEV-SNP](how-to-run-kata-containers-with-SNP-VMs.md)
|
||||
|
||||
## Confidential Containers
|
||||
- [How to use build and test the Confidential Containers `CCv0` proof of concept](how-to-build-and-test-ccv0.md)
|
||||
- [How to generate a Kata Containers payload for the Confidential Containers Operator](how-to-generate-a-kata-containers-payload-for-the-confidential-containers-operator.md)
|
||||
- [How to use EROFS to build rootfs in Kata Containers](how-to-use-erofs-build-rootfs.md)
|
||||
|
||||
@@ -1,640 +0,0 @@
|
||||
#!/bin/bash -e
|
||||
#
|
||||
# Copyright (c) 2021, 2022 IBM Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
# Disclaimer: This script is work in progress for supporting the CCv0 prototype
|
||||
# It shouldn't be considered supported by the Kata Containers community, or anyone else
|
||||
|
||||
# Based on https://github.com/kata-containers/kata-containers/blob/main/docs/Developer-Guide.md,
|
||||
# but with elements of the tests/.ci scripts used
|
||||
|
||||
readonly script_name="$(basename "${BASH_SOURCE[0]}")"
|
||||
|
||||
# By default in Golang >= 1.16 GO111MODULE is set to "on", but not all modules support it, so overwrite to "auto"
|
||||
export GO111MODULE="auto"
|
||||
|
||||
# Setup kata containers environments if not set - we default to use containerd
|
||||
export CRI_CONTAINERD=${CRI_CONTAINERD:-"yes"}
|
||||
export CRI_RUNTIME=${CRI_RUNTIME:-"containerd"}
|
||||
export CRIO=${CRIO:-"no"}
|
||||
export KATA_HYPERVISOR="${KATA_HYPERVISOR:-qemu}"
|
||||
export KUBERNETES=${KUBERNETES:-"no"}
|
||||
export AGENT_INIT="${AGENT_INIT:-${TEST_INITRD:-no}}"
|
||||
export AA_KBC="${AA_KBC:-offline_fs_kbc}"
|
||||
|
||||
# Allow the user to overwrite the default repo and branch names if they want to build from a fork
|
||||
export katacontainers_repo="${katacontainers_repo:-github.com/kata-containers/kata-containers}"
|
||||
export katacontainers_branch="${katacontainers_branch:-CCv0}"
|
||||
export kata_default_branch=${katacontainers_branch}
|
||||
export tests_repo="${tests_repo:-github.com/kata-containers/tests}"
|
||||
export tests_branch="${tests_branch:-CCv0}"
|
||||
export target_branch=${tests_branch} # kata-containers/ci/lib.sh uses target branch var to check out tests repo
|
||||
|
||||
# if .bash_profile exists then use it, otherwise fall back to .profile
|
||||
export PROFILE="${HOME}/.profile"
|
||||
if [ -r "${HOME}/.bash_profile" ]; then
|
||||
export PROFILE="${HOME}/.bash_profile"
|
||||
fi
|
||||
# Stop PS1: unbound variable error happening
|
||||
export PS1=${PS1:-}
|
||||
|
||||
# Create a bunch of common, derived values up front so we don't need to create them in all the different functions
|
||||
. ${PROFILE}
|
||||
if [ -z ${GOPATH} ]; then
|
||||
export GOPATH=${HOME}/go
|
||||
fi
|
||||
export tests_repo_dir="${GOPATH}/src/${tests_repo}"
|
||||
export katacontainers_repo_dir="${GOPATH}/src/${katacontainers_repo}"
|
||||
export ROOTFS_DIR="${katacontainers_repo_dir}/tools/osbuilder/rootfs-builder/rootfs"
|
||||
export PULL_IMAGE="${PULL_IMAGE:-quay.io/kata-containers/confidential-containers:signed}" # Doesn't need authentication
|
||||
export CONTAINER_ID="${CONTAINER_ID:-0123456789}"
|
||||
source /etc/os-release || source /usr/lib/os-release
|
||||
grep -Eq "\<fedora\>" /etc/os-release 2> /dev/null && export USE_PODMAN=true
|
||||
|
||||
|
||||
# If we've already checked out the test repo then source the confidential scripts
|
||||
if [ "${KUBERNETES}" == "yes" ]; then
|
||||
export BATS_TEST_DIRNAME="${tests_repo_dir}/integration/kubernetes/confidential"
|
||||
[ -d "${BATS_TEST_DIRNAME}" ] && source "${BATS_TEST_DIRNAME}/lib.sh"
|
||||
else
|
||||
export BATS_TEST_DIRNAME="${tests_repo_dir}/integration/containerd/confidential"
|
||||
[ -d "${BATS_TEST_DIRNAME}" ] && source "${BATS_TEST_DIRNAME}/lib.sh"
|
||||
fi
|
||||
|
||||
[ -d "${BATS_TEST_DIRNAME}" ] && source "${BATS_TEST_DIRNAME}/../../confidential/lib.sh"
|
||||
|
||||
export RUNTIME_CONFIG_PATH=/etc/kata-containers/configuration.toml
|
||||
|
||||
usage() {
|
||||
exit_code="$1"
|
||||
cat <<EOF
|
||||
Overview:
|
||||
Build and test kata containers from source
|
||||
Optionally set kata-containers and tests repo and branch as exported variables before running
|
||||
e.g. export katacontainers_repo=github.com/stevenhorsman/kata-containers && export katacontainers_branch=kata-ci-from-fork && export tests_repo=github.com/stevenhorsman/tests && export tests_branch=kata-ci-from-fork && ~/${script_name} build_and_install_all
|
||||
Usage:
|
||||
${script_name} [options] <command>
|
||||
Commands:
|
||||
- agent_create_container: Run CreateContainer command against the agent with agent-ctl
|
||||
- agent_pull_image: Run PullImage command against the agent with agent-ctl
|
||||
- all: Build and install everything, test kata with containerd and capture the logs
|
||||
- build_and_add_agent_to_rootfs: Builds the kata-agent and adds it to the rootfs
|
||||
- build_and_install_all: Build and install everything
|
||||
- build_and_install_rootfs: Builds and installs the rootfs image
|
||||
- build_kata_runtime: Build and install the kata runtime
|
||||
- build_cloud_hypervisor Checkout, patch, build and install Cloud Hypervisor
|
||||
- build_qemu: Checkout, patch, build and install QEMU
|
||||
- configure: Configure Kata to use rootfs and enable debug
|
||||
- connect_to_ssh_demo_pod: Ssh into the ssh demo pod, showing that the decryption succeeded
|
||||
- copy_signature_files_to_guest Copies signature verification files to guest
|
||||
- create_rootfs: Create a local rootfs
|
||||
- crictl_create_cc_container Use crictl to create a new busybox container in the kata cc pod
|
||||
- crictl_create_cc_pod Use crictl to create a new kata cc pod
|
||||
- crictl_delete_cc Use crictl to delete the kata cc pod sandbox and container in it
|
||||
- help: Display this help
|
||||
- init_kubernetes: initialize a Kubernetes cluster on this system
|
||||
- initialize: Install dependencies and check out kata-containers source
|
||||
- install_guest_kernel: Setup, build and install the guest kernel
|
||||
- kubernetes_create_cc_pod: Create a Kata CC runtime busybox-based pod in Kubernetes
|
||||
- kubernetes_create_ssh_demo_pod: Create a Kata CC runtime pod based on the ssh demo
|
||||
- kubernetes_delete_cc_pod: Delete the Kata CC runtime busybox-based pod in Kubernetes
|
||||
- kubernetes_delete_ssh_demo_pod: Delete the Kata CC runtime pod based on the ssh demo
|
||||
- open_kata_shell: Open a shell into the kata runtime
|
||||
- rebuild_and_install_kata: Rebuild the kata runtime and agent and build and install the image
|
||||
- shim_pull_image: Run PullImage command against the shim with ctr
|
||||
- test_capture_logs: Test using kata with containerd and capture the logs in the user's home directory
|
||||
- test: Test using kata with containerd
|
||||
|
||||
Options:
|
||||
-d: Enable debug
|
||||
-h: Display this help
|
||||
EOF
|
||||
# if script sourced don't exit as this will exit the main shell, just return instead
|
||||
[[ $_ != $0 ]] && return "$exit_code" || exit "$exit_code"
|
||||
}
|
||||
|
||||
build_and_install_all() {
|
||||
initialize
|
||||
build_and_install_kata_runtime
|
||||
configure
|
||||
create_a_local_rootfs
|
||||
build_and_install_rootfs
|
||||
install_guest_kernel_image
|
||||
case "$KATA_HYPERVISOR" in
|
||||
"qemu")
|
||||
build_qemu
|
||||
;;
|
||||
"cloud-hypervisor")
|
||||
build_cloud_hypervisor
|
||||
;;
|
||||
*)
|
||||
echo "Invalid option: $KATA_HYPERVISOR is not supported." >&2
|
||||
;;
|
||||
esac
|
||||
|
||||
check_kata_runtime
|
||||
if [ "${KUBERNETES}" == "yes" ]; then
|
||||
init_kubernetes
|
||||
fi
|
||||
}
|
||||
|
||||
rebuild_and_install_kata() {
|
||||
checkout_tests_repo
|
||||
checkout_kata_containers_repo
|
||||
build_and_install_kata_runtime
|
||||
build_and_add_agent_to_rootfs
|
||||
build_and_install_rootfs
|
||||
check_kata_runtime
|
||||
}
|
||||
|
||||
# Based on the jenkins_job_build.sh script in kata-containers/tests/.ci - checks out source code and installs dependencies
|
||||
initialize() {
|
||||
# We need git to checkout and bootstrap the ci scripts and some other packages used in testing
|
||||
sudo apt-get update && sudo apt-get install -y curl git qemu-utils
|
||||
|
||||
grep -qxF "export GOPATH=\${HOME}/go" "${PROFILE}" || echo "export GOPATH=\${HOME}/go" >> "${PROFILE}"
|
||||
grep -qxF "export GOROOT=/usr/local/go" "${PROFILE}" || echo "export GOROOT=/usr/local/go" >> "${PROFILE}"
|
||||
grep -qxF "export PATH=\${GOPATH}/bin:/usr/local/go/bin:\${PATH}" "${PROFILE}" || echo "export PATH=\${GOPATH}/bin:/usr/local/go/bin:\${PATH}" >> "${PROFILE}"
|
||||
|
||||
# Load the new go and PATH parameters from the profile
|
||||
. ${PROFILE}
|
||||
mkdir -p "${GOPATH}"
|
||||
|
||||
checkout_tests_repo
|
||||
|
||||
pushd "${tests_repo_dir}"
|
||||
local ci_dir_name=".ci"
|
||||
sudo -E PATH=$PATH -s "${ci_dir_name}/install_go.sh" -p -f
|
||||
sudo -E PATH=$PATH -s "${ci_dir_name}/install_rust.sh"
|
||||
# Need to change ownership of rustup so later process can create temp files there
|
||||
sudo chown -R ${USER}:${USER} "${HOME}/.rustup"
|
||||
|
||||
checkout_kata_containers_repo
|
||||
|
||||
# Run setup, but don't install kata as we will build it ourselves in locations matching the developer guide
|
||||
export INSTALL_KATA="no"
|
||||
sudo -E PATH=$PATH -s ${ci_dir_name}/setup.sh
|
||||
# Reload the profile to pick up installed dependencies
|
||||
. ${PROFILE}
|
||||
popd
|
||||
}
|
||||
|
||||
checkout_tests_repo() {
|
||||
echo "Creating repo: ${tests_repo} and branch ${tests_branch} into ${tests_repo_dir}..."
|
||||
# Due to git https://github.blog/2022-04-12-git-security-vulnerability-announced/ the tests repo needs
|
||||
# to be owned by root as it is re-checked out in rootfs.sh
|
||||
mkdir -p $(dirname "${tests_repo_dir}")
|
||||
[ -d "${tests_repo_dir}" ] || sudo -E git clone "https://${tests_repo}.git" "${tests_repo_dir}"
|
||||
sudo -E chown -R root:root "${tests_repo_dir}"
|
||||
pushd "${tests_repo_dir}"
|
||||
sudo -E git fetch
|
||||
if [ -n "${tests_branch}" ]; then
|
||||
sudo -E git checkout ${tests_branch}
|
||||
fi
|
||||
sudo -E git reset --hard origin/${tests_branch}
|
||||
popd
|
||||
|
||||
source "${BATS_TEST_DIRNAME}/lib.sh"
|
||||
source "${BATS_TEST_DIRNAME}/../../confidential/lib.sh"
|
||||
}
|
||||
|
||||
# Note: clone_katacontainers_repo using go, so that needs to be installed first
|
||||
checkout_kata_containers_repo() {
|
||||
source "${tests_repo_dir}/.ci/lib.sh"
|
||||
echo "Creating repo: ${katacontainers_repo} and branch ${kata_default_branch} into ${katacontainers_repo_dir}..."
|
||||
clone_katacontainers_repo
|
||||
sudo -E chown -R ${USER}:${USER} "${katacontainers_repo_dir}"
|
||||
}
|
||||
|
||||
build_and_install_kata_runtime() {
|
||||
pushd ${katacontainers_repo_dir}/src/runtime
|
||||
make clean && make DEFAULT_HYPERVISOR=${KATA_HYPERVISOR} && sudo -E PATH=$PATH make DEFAULT_HYPERVISOR=${KATA_HYPERVISOR} install
|
||||
popd
|
||||
}
|
||||
|
||||
configure() {
|
||||
configure_kata_to_use_rootfs
|
||||
enable_full_debug
|
||||
enable_agent_console
|
||||
|
||||
# Switch image offload to true in kata config
|
||||
switch_image_service_offload "on"
|
||||
|
||||
configure_cc_containerd
|
||||
# From crictl v1.24.1 the default timoout leads to the pod creation failing, so update it
|
||||
sudo crictl config --set timeout=10
|
||||
}
|
||||
|
||||
configure_kata_to_use_rootfs() {
|
||||
sudo mkdir -p /etc/kata-containers/
|
||||
sudo install -o root -g root -m 0640 /usr/share/defaults/kata-containers/configuration.toml /etc/kata-containers
|
||||
sudo sed -i 's/^\(initrd =.*\)/# \1/g' ${RUNTIME_CONFIG_PATH}
|
||||
}
|
||||
|
||||
build_and_add_agent_to_rootfs() {
|
||||
build_a_custom_kata_agent
|
||||
add_custom_agent_to_rootfs
|
||||
}
|
||||
|
||||
build_a_custom_kata_agent() {
|
||||
# Install libseccomp for static linking
|
||||
sudo -E PATH=$PATH GOPATH=$GOPATH ${katacontainers_repo_dir}/ci/install_libseccomp.sh /tmp/kata-libseccomp /tmp/kata-gperf
|
||||
export LIBSECCOMP_LINK_TYPE=static
|
||||
export LIBSECCOMP_LIB_PATH=/tmp/kata-libseccomp/lib
|
||||
|
||||
. "$HOME/.cargo/env"
|
||||
pushd ${katacontainers_repo_dir}/src/agent
|
||||
sudo -E PATH=$PATH make
|
||||
|
||||
ARCH=$(uname -m)
|
||||
[ ${ARCH} == "ppc64le" ] || [ ${ARCH} == "s390x" ] && export LIBC=gnu || export LIBC=musl
|
||||
[ ${ARCH} == "ppc64le" ] && export ARCH=powerpc64le
|
||||
|
||||
# Run a make install into the rootfs directory in order to create the kata-agent.service file which is required when we add to the rootfs
|
||||
sudo -E PATH=$PATH make install DESTDIR="${ROOTFS_DIR}"
|
||||
popd
|
||||
}
|
||||
|
||||
create_a_local_rootfs() {
|
||||
sudo rm -rf "${ROOTFS_DIR}"
|
||||
pushd ${katacontainers_repo_dir}/tools/osbuilder/rootfs-builder
|
||||
export distro="ubuntu"
|
||||
[[ -z "${USE_PODMAN:-}" ]] && use_docker="${use_docker:-1}"
|
||||
sudo -E OS_VERSION="${OS_VERSION:-}" GOPATH=$GOPATH EXTRA_PKGS="vim iputils-ping net-tools" DEBUG="${DEBUG:-}" USE_DOCKER="${use_docker:-}" SKOPEO=${SKOPEO:-} AA_KBC=${AA_KBC:-} UMOCI=yes SECCOMP=yes ./rootfs.sh -r ${ROOTFS_DIR} ${distro}
|
||||
|
||||
# Install_rust.sh during rootfs.sh switches us to the main branch of the tests repo, so switch back now
|
||||
pushd "${tests_repo_dir}"
|
||||
sudo -E git checkout ${tests_branch}
|
||||
popd
|
||||
# During the ./rootfs.sh call the kata agent is built as root, so we need to update the permissions, so we can rebuild it
|
||||
sudo chown -R ${USER}:${USER} "${katacontainers_repo_dir}/src/agent/"
|
||||
|
||||
popd
|
||||
}
|
||||
|
||||
add_custom_agent_to_rootfs() {
|
||||
pushd ${katacontainers_repo_dir}/tools/osbuilder/rootfs-builder
|
||||
|
||||
ARCH=$(uname -m)
|
||||
[ ${ARCH} == "ppc64le" ] || [ ${ARCH} == "s390x" ] && export LIBC=gnu || export LIBC=musl
|
||||
[ ${ARCH} == "ppc64le" ] && export ARCH=powerpc64le
|
||||
|
||||
sudo install -o root -g root -m 0550 -t ${ROOTFS_DIR}/usr/bin ${katacontainers_repo_dir}/src/agent/target/${ARCH}-unknown-linux-${LIBC}/release/kata-agent
|
||||
sudo install -o root -g root -m 0440 ../../../src/agent/kata-agent.service ${ROOTFS_DIR}/usr/lib/systemd/system/
|
||||
sudo install -o root -g root -m 0440 ../../../src/agent/kata-containers.target ${ROOTFS_DIR}/usr/lib/systemd/system/
|
||||
popd
|
||||
}
|
||||
|
||||
build_and_install_rootfs() {
|
||||
build_rootfs_image
|
||||
install_rootfs_image
|
||||
}
|
||||
|
||||
build_rootfs_image() {
|
||||
pushd ${katacontainers_repo_dir}/tools/osbuilder/image-builder
|
||||
# Logic from install_kata_image.sh - if we aren't using podman (ie on a fedora like), then use docker
|
||||
[[ -z "${USE_PODMAN:-}" ]] && use_docker="${use_docker:-1}"
|
||||
sudo -E USE_DOCKER="${use_docker:-}" ./image_builder.sh ${ROOTFS_DIR}
|
||||
popd
|
||||
}
|
||||
|
||||
install_rootfs_image() {
|
||||
pushd ${katacontainers_repo_dir}/tools/osbuilder/image-builder
|
||||
local commit=$(git log --format=%h -1 HEAD)
|
||||
local date=$(date +%Y-%m-%d-%T.%N%z)
|
||||
local image="kata-containers-${date}-${commit}"
|
||||
sudo install -o root -g root -m 0640 -D kata-containers.img "/usr/share/kata-containers/${image}"
|
||||
(cd /usr/share/kata-containers && sudo ln -sf "$image" kata-containers.img)
|
||||
echo "Built Rootfs from ${ROOTFS_DIR} to /usr/share/kata-containers/${image}"
|
||||
ls -al /usr/share/kata-containers/
|
||||
popd
|
||||
}
|
||||
|
||||
install_guest_kernel_image() {
|
||||
pushd ${katacontainers_repo_dir}/tools/packaging/kernel
|
||||
sudo -E PATH=$PATH ./build-kernel.sh setup
|
||||
sudo -E PATH=$PATH ./build-kernel.sh build
|
||||
sudo chmod u+wrx /usr/share/kata-containers/ # Give user permission to install kernel
|
||||
sudo -E PATH=$PATH ./build-kernel.sh install
|
||||
popd
|
||||
}
|
||||
|
||||
build_qemu() {
|
||||
${tests_repo_dir}/.ci/install_virtiofsd.sh
|
||||
${tests_repo_dir}/.ci/install_qemu.sh
|
||||
}
|
||||
|
||||
build_cloud_hypervisor() {
|
||||
${tests_repo_dir}/.ci/install_virtiofsd.sh
|
||||
${tests_repo_dir}/.ci/install_cloud_hypervisor.sh
|
||||
}
|
||||
|
||||
check_kata_runtime() {
|
||||
sudo kata-runtime check
|
||||
}
|
||||
|
||||
k8s_pod_file="${HOME}/busybox-cc.yaml"
|
||||
init_kubernetes() {
|
||||
# Check that kubeadm was installed and install it otherwise
|
||||
if ! [ -x "$(command -v kubeadm)" ]; then
|
||||
pushd "${tests_repo_dir}/.ci"
|
||||
sudo -E PATH=$PATH -s install_kubernetes.sh
|
||||
if [ "${CRI_CONTAINERD}" == "yes" ]; then
|
||||
sudo -E PATH=$PATH -s "configure_containerd_for_kubernetes.sh"
|
||||
fi
|
||||
popd
|
||||
fi
|
||||
|
||||
# If kubernetes init has previously run we need to clean it by removing the image and resetting k8s
|
||||
local cid=$(sudo docker ps -a -q -f name=^/kata-registry$)
|
||||
if [ -n "${cid}" ]; then
|
||||
sudo docker stop ${cid} && sudo docker rm ${cid}
|
||||
fi
|
||||
local k8s_nodes=$(kubectl get nodes -o name 2>/dev/null || true)
|
||||
if [ -n "${k8s_nodes}" ]; then
|
||||
sudo kubeadm reset -f
|
||||
fi
|
||||
|
||||
export CI="true" && sudo -E PATH=$PATH -s ${tests_repo_dir}/integration/kubernetes/init.sh
|
||||
sudo chown ${USER}:$(id -g -n ${USER}) "$HOME/.kube/config"
|
||||
cat << EOF > ${k8s_pod_file}
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: busybox-cc
|
||||
spec:
|
||||
runtimeClassName: kata
|
||||
containers:
|
||||
- name: nginx
|
||||
image: quay.io/kata-containers/confidential-containers:signed
|
||||
imagePullPolicy: Always
|
||||
EOF
|
||||
}
|
||||
|
||||
call_kubernetes_create_cc_pod() {
|
||||
kubernetes_create_cc_pod ${k8s_pod_file}
|
||||
}
|
||||
|
||||
call_kubernetes_delete_cc_pod() {
|
||||
pod_name=$(kubectl get pods -o jsonpath='{.items..metadata.name}')
|
||||
kubernetes_delete_cc_pod $pod_name
|
||||
}
|
||||
|
||||
call_kubernetes_create_ssh_demo_pod() {
|
||||
setup_decryption_files_in_guest
|
||||
kubernetes_create_ssh_demo_pod
|
||||
}
|
||||
|
||||
call_connect_to_ssh_demo_pod() {
|
||||
connect_to_ssh_demo_pod
|
||||
}
|
||||
|
||||
call_kubernetes_delete_ssh_demo_pod() {
|
||||
pod=$(kubectl get pods -o jsonpath='{.items..metadata.name}')
|
||||
kubernetes_delete_ssh_demo_pod $pod
|
||||
}
|
||||
|
||||
crictl_sandbox_name=kata-cc-busybox-sandbox
|
||||
call_crictl_create_cc_pod() {
|
||||
# Update iptables to allow forwarding to the cni0 bridge avoiding issues caused by the docker0 bridge
|
||||
sudo iptables -P FORWARD ACCEPT
|
||||
|
||||
# get_pod_config in tests_common exports `pod_config` that points to the prepared pod config yaml
|
||||
get_pod_config
|
||||
|
||||
crictl_delete_cc_pod_if_exists "${crictl_sandbox_name}"
|
||||
crictl_create_cc_pod "${pod_config}"
|
||||
sudo crictl pods
|
||||
}
|
||||
|
||||
call_crictl_create_cc_container() {
|
||||
# Create container configuration yaml based on our test copy of busybox
|
||||
# get_pod_config in tests_common exports `pod_config` that points to the prepared pod config yaml
|
||||
get_pod_config
|
||||
|
||||
local container_config="${FIXTURES_DIR}/${CONTAINER_CONFIG_FILE:-container-config.yaml}"
|
||||
local pod_name=${crictl_sandbox_name}
|
||||
crictl_create_cc_container ${pod_name} ${pod_config} ${container_config}
|
||||
sudo crictl ps -a
|
||||
}
|
||||
|
||||
crictl_delete_cc() {
|
||||
crictl_delete_cc_pod ${crictl_sandbox_name}
|
||||
}
|
||||
|
||||
test_kata_runtime() {
|
||||
echo "Running ctr with the kata runtime..."
|
||||
local test_image="quay.io/kata-containers/confidential-containers:signed"
|
||||
if [ -z $(ctr images ls -q name=="${test_image}") ]; then
|
||||
sudo ctr image pull "${test_image}"
|
||||
fi
|
||||
sudo ctr run --runtime "io.containerd.kata.v2" --rm -t "${test_image}" test-kata uname -a
|
||||
}
|
||||
|
||||
run_kata_and_capture_logs() {
|
||||
echo "Clearing systemd journal..."
|
||||
sudo systemctl stop systemd-journald
|
||||
sudo rm -f /var/log/journal/*/* /run/log/journal/*/*
|
||||
sudo systemctl start systemd-journald
|
||||
test_kata_runtime
|
||||
echo "Collecting logs..."
|
||||
sudo journalctl -q -o cat -a -t kata-runtime > ${HOME}/kata-runtime.log
|
||||
sudo journalctl -q -o cat -a -t kata > ${HOME}/shimv2.log
|
||||
echo "Logs output to ${HOME}/kata-runtime.log and ${HOME}/shimv2.log"
|
||||
}
|
||||
|
||||
get_ids() {
|
||||
guest_cid=$(sudo ss -H --vsock | awk '{print $6}' | cut -d: -f1)
|
||||
sandbox_id=$(ps -ef | grep containerd-shim-kata-v2 | egrep -o "id [^,][^,].* " | awk '{print $2}')
|
||||
}
|
||||
|
||||
open_kata_shell() {
|
||||
get_ids
|
||||
sudo -E "PATH=$PATH" kata-runtime exec ${sandbox_id}
|
||||
}
|
||||
|
||||
build_bundle_dir_if_necessary() {
|
||||
bundle_dir="/tmp/bundle"
|
||||
if [ ! -d "${bundle_dir}" ]; then
|
||||
rootfs_dir="$bundle_dir/rootfs"
|
||||
image="quay.io/kata-containers/confidential-containers:signed"
|
||||
mkdir -p "$rootfs_dir" && (cd "$bundle_dir" && runc spec)
|
||||
sudo docker export $(sudo docker create "$image") | tar -C "$rootfs_dir" -xvf -
|
||||
fi
|
||||
# There were errors in create container agent-ctl command due to /bin/ seemingly not being on the path, so hardcode it
|
||||
sudo sed -i -e 's%^\(\t*\)"sh"$%\1"/bin/sh"%g' "${bundle_dir}/config.json"
|
||||
}
|
||||
|
||||
build_agent_ctl() {
|
||||
cd ${GOPATH}/src/${katacontainers_repo}/src/tools/agent-ctl/
|
||||
if [ -e "${HOME}/.cargo/registry" ]; then
|
||||
sudo chown -R ${USER}:${USER} "${HOME}/.cargo/registry"
|
||||
fi
|
||||
sudo -E PATH=$PATH -s make
|
||||
ARCH=$(uname -m)
|
||||
[ ${ARCH} == "ppc64le" ] || [ ${ARCH} == "s390x" ] && export LIBC=gnu || export LIBC=musl
|
||||
[ ${ARCH} == "ppc64le" ] && export ARCH=powerpc64le
|
||||
cd "./target/${ARCH}-unknown-linux-${LIBC}/release/"
|
||||
}
|
||||
|
||||
run_agent_ctl_command() {
|
||||
get_ids
|
||||
build_bundle_dir_if_necessary
|
||||
command=$1
|
||||
# If kata-agent-ctl pre-built in this directory, use it directly, otherwise build it first and switch to release
|
||||
if [ ! -x kata-agent-ctl ]; then
|
||||
build_agent_ctl
|
||||
fi
|
||||
./kata-agent-ctl -l debug connect --bundle-dir "${bundle_dir}" --server-address "vsock://${guest_cid}:1024" -c "${command}"
|
||||
}
|
||||
|
||||
agent_pull_image() {
|
||||
run_agent_ctl_command "PullImage image=${PULL_IMAGE} cid=${CONTAINER_ID} source_creds=${SOURCE_CREDS}"
|
||||
}
|
||||
|
||||
agent_create_container() {
|
||||
run_agent_ctl_command "CreateContainer cid=${CONTAINER_ID}"
|
||||
}
|
||||
|
||||
shim_pull_image() {
|
||||
get_ids
|
||||
local ctr_shim_command="sudo ctr --namespace k8s.io shim --id ${sandbox_id} pull-image ${PULL_IMAGE} ${CONTAINER_ID}"
|
||||
echo "Issuing command '${ctr_shim_command}'"
|
||||
${ctr_shim_command}
|
||||
}
|
||||
|
||||
call_copy_signature_files_to_guest() {
|
||||
# TODO #5173 - remove this once the kernel_params aren't ignored by the agent config
|
||||
export DEBUG_CONSOLE="true"
|
||||
|
||||
if [ "${SKOPEO:-}" = "yes" ]; then
|
||||
add_kernel_params "agent.container_policy_file=/etc/containers/quay_verification/quay_policy.json"
|
||||
setup_skopeo_signature_files_in_guest
|
||||
else
|
||||
# TODO #4888 - set config to specifically enable signature verification to be on in ImageClient
|
||||
setup_offline_fs_kbc_signature_files_in_guest
|
||||
fi
|
||||
}
|
||||
|
||||
main() {
|
||||
while getopts "dh" opt; do
|
||||
case "$opt" in
|
||||
d)
|
||||
export DEBUG="-d"
|
||||
set -x
|
||||
;;
|
||||
h)
|
||||
usage 0
|
||||
;;
|
||||
\?)
|
||||
echo "Invalid option: -$OPTARG" >&2
|
||||
usage 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
shift $((OPTIND - 1))
|
||||
|
||||
subcmd="${1:-}"
|
||||
|
||||
[ -z "${subcmd}" ] && usage 1
|
||||
|
||||
case "${subcmd}" in
|
||||
all)
|
||||
build_and_install_all
|
||||
run_kata_and_capture_logs
|
||||
;;
|
||||
build_and_install_all)
|
||||
build_and_install_all
|
||||
;;
|
||||
rebuild_and_install_kata)
|
||||
rebuild_and_install_kata
|
||||
;;
|
||||
initialize)
|
||||
initialize
|
||||
;;
|
||||
build_kata_runtime)
|
||||
build_and_install_kata_runtime
|
||||
;;
|
||||
configure)
|
||||
configure
|
||||
;;
|
||||
create_rootfs)
|
||||
create_a_local_rootfs
|
||||
;;
|
||||
build_and_add_agent_to_rootfs)
|
||||
build_and_add_agent_to_rootfs
|
||||
;;
|
||||
build_and_install_rootfs)
|
||||
build_and_install_rootfs
|
||||
;;
|
||||
install_guest_kernel)
|
||||
install_guest_kernel_image
|
||||
;;
|
||||
build_cloud_hypervisor)
|
||||
build_cloud_hypervisor
|
||||
;;
|
||||
build_qemu)
|
||||
build_qemu
|
||||
;;
|
||||
init_kubernetes)
|
||||
init_kubernetes
|
||||
;;
|
||||
crictl_create_cc_pod)
|
||||
call_crictl_create_cc_pod
|
||||
;;
|
||||
crictl_create_cc_container)
|
||||
call_crictl_create_cc_container
|
||||
;;
|
||||
crictl_delete_cc)
|
||||
crictl_delete_cc
|
||||
;;
|
||||
kubernetes_create_cc_pod)
|
||||
call_kubernetes_create_cc_pod
|
||||
;;
|
||||
kubernetes_delete_cc_pod)
|
||||
call_kubernetes_delete_cc_pod
|
||||
;;
|
||||
kubernetes_create_ssh_demo_pod)
|
||||
call_kubernetes_create_ssh_demo_pod
|
||||
;;
|
||||
connect_to_ssh_demo_pod)
|
||||
call_connect_to_ssh_demo_pod
|
||||
;;
|
||||
kubernetes_delete_ssh_demo_pod)
|
||||
call_kubernetes_delete_ssh_demo_pod
|
||||
;;
|
||||
test)
|
||||
test_kata_runtime
|
||||
;;
|
||||
test_capture_logs)
|
||||
run_kata_and_capture_logs
|
||||
;;
|
||||
open_kata_console)
|
||||
open_kata_console
|
||||
;;
|
||||
open_kata_shell)
|
||||
open_kata_shell
|
||||
;;
|
||||
agent_pull_image)
|
||||
agent_pull_image
|
||||
;;
|
||||
shim_pull_image)
|
||||
shim_pull_image
|
||||
;;
|
||||
agent_create_container)
|
||||
agent_create_container
|
||||
;;
|
||||
copy_signature_files_to_guest)
|
||||
call_copy_signature_files_to_guest
|
||||
;;
|
||||
*)
|
||||
usage 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
main $@
|
||||
@@ -1,45 +0,0 @@
|
||||
# Copyright (c) 2021 IBM Corp.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
aa_kbc_params = "$AA_KBC_PARAMS"
|
||||
https_proxy = "$HTTPS_PROXY"
|
||||
[endpoints]
|
||||
allowed = [
|
||||
"AddARPNeighborsRequest",
|
||||
"AddSwapRequest",
|
||||
"CloseStdinRequest",
|
||||
"CopyFileRequest",
|
||||
"CreateContainerRequest",
|
||||
"CreateSandboxRequest",
|
||||
"DestroySandboxRequest",
|
||||
#"ExecProcessRequest",
|
||||
"GetMetricsRequest",
|
||||
"GetOOMEventRequest",
|
||||
"GuestDetailsRequest",
|
||||
"ListInterfacesRequest",
|
||||
"ListRoutesRequest",
|
||||
"MemHotplugByProbeRequest",
|
||||
"OnlineCPUMemRequest",
|
||||
"PauseContainerRequest",
|
||||
"PullImageRequest",
|
||||
"ReadStreamRequest",
|
||||
"RemoveContainerRequest",
|
||||
#"ReseedRandomDevRequest",
|
||||
"ResizeVolumeRequest",
|
||||
"ResumeContainerRequest",
|
||||
"SetGuestDateTimeRequest",
|
||||
"SignalProcessRequest",
|
||||
"StartContainerRequest",
|
||||
"StartTracingRequest",
|
||||
"StatsContainerRequest",
|
||||
"StopTracingRequest",
|
||||
"TtyWinResizeRequest",
|
||||
"UpdateContainerRequest",
|
||||
"UpdateInterfaceRequest",
|
||||
"UpdateRoutesRequest",
|
||||
"VolumeStatsRequest",
|
||||
"WaitProcessRequest",
|
||||
"WriteStreamRequest"
|
||||
]
|
||||
@@ -1,479 +0,0 @@
|
||||
# How to build, run and test Kata CCv0
|
||||
|
||||
## Introduction and Background
|
||||
|
||||
In order to try and make building (locally) and demoing the Kata Containers `CCv0` code base as simple as possible I've
|
||||
shared a script [`ccv0.sh`](./ccv0.sh). This script was originally my attempt to automate the steps of the
|
||||
[Developer Guide](https://github.com/kata-containers/kata-containers/blob/main/docs/Developer-Guide.md) so that I could do
|
||||
different sections of them repeatedly and reliably as I was playing around with make changes to different parts of the
|
||||
Kata code base. I then tried to weave in some of the [`tests/.ci`](https://github.com/kata-containers/tests/tree/main/.ci)
|
||||
scripts in order to have less duplicated code.
|
||||
As we're progress on the confidential containers journey I hope to add more features to demonstrate the functionality
|
||||
we have working.
|
||||
|
||||
*Disclaimer: This script has mostly just been used and tested by me ([@stevenhorsman](https://github.com/stevenhorsman)),*
|
||||
*so there might be issues with it. I'm happy to try and help solve these if possible, but this shouldn't be considered a*
|
||||
*fully supported process by the Kata Containers community.*
|
||||
|
||||
### Basic script set-up and optional environment variables
|
||||
|
||||
In order to build, configure and demo the CCv0 functionality, these are the set-up steps I take:
|
||||
- Provision a new VM
|
||||
- *I choose a Ubuntu 20.04 8GB VM for this as I had one available. There are some dependences on apt-get installed*
|
||||
*packages, so these will need re-working to be compatible with other platforms.*
|
||||
- Copy the script over to your VM *(I put it in the home directory)* and ensure it has execute permission by running
|
||||
```bash
|
||||
$ chmod u+x ccv0.sh
|
||||
```
|
||||
- Optionally set up some environment variables
|
||||
- By default the script checks out the `CCv0` branches of the `kata-containers/kata-containers` and
|
||||
`kata-containers/tests` repositories, but it is designed to be used to test of personal forks and branches as well.
|
||||
If you want to build and run these you can export the `katacontainers_repo`, `katacontainers_branch`, `tests_repo`
|
||||
and `tests_branch` variables e.g.
|
||||
```bash
|
||||
$ export katacontainers_repo=github.com/stevenhorsman/kata-containers
|
||||
$ export katacontainers_branch=stevenh/agent-pull-image-endpoint
|
||||
$ export tests_repo=github.com/stevenhorsman/tests
|
||||
$ export tests_branch=stevenh/add-ccv0-changes-to-build
|
||||
```
|
||||
before running the script.
|
||||
- By default the build and configuration are using `QEMU` as the hypervisor. In order to use `Cloud Hypervisor` instead
|
||||
set:
|
||||
```
|
||||
$ export KATA_HYPERVISOR="cloud-hypervisor"
|
||||
```
|
||||
before running the build.
|
||||
|
||||
- At this point you can provision a Kata confidential containers pod and container with either
|
||||
[`crictl`](#using-crictl-for-end-to-end-provisioning-of-a-kata-confidential-containers-pod-with-an-unencrypted-image),
|
||||
or [Kubernetes](#using-kubernetes-for-end-to-end-provisioning-of-a-kata-confidential-containers-pod-with-an-unencrypted-image)
|
||||
and then test and use it.
|
||||
|
||||
### Using crictl for end-to-end provisioning of a Kata confidential containers pod with an unencrypted image
|
||||
|
||||
- Run the full build process with Kubernetes turned off, so its configuration doesn't interfere with `crictl` using:
|
||||
```bash
|
||||
$ export KUBERNETES="no"
|
||||
$ export KATA_HYPERVISOR="qemu"
|
||||
$ ~/ccv0.sh -d build_and_install_all
|
||||
```
|
||||
> **Note**: Much of this script has to be run as `sudo`, so you are likely to get prompted for your password.
|
||||
- *I run this script sourced just so that the required installed components are accessible on the `PATH` to the rest*
|
||||
*of the process without having to reload the session.*
|
||||
- The steps that `build_and_install_all` takes is:
|
||||
- Checkout the git repos for the `tests` and `kata-containers` repos as specified by the environment variables
|
||||
(default to `CCv0` branches if they are not supplied)
|
||||
- Use the `tests/.ci` scripts to install the build dependencies
|
||||
- Build and install the Kata runtime
|
||||
- Configure Kata to use containerd and for debug and confidential containers features to be enabled (including
|
||||
enabling console access to the Kata guest shell, which should only be done in development)
|
||||
- Create, build and install a rootfs for the Kata hypervisor to use. For 'CCv0' this is currently based on Ubuntu
|
||||
20.04 and has extra packages like `umoci` added.
|
||||
- Build the Kata guest kernel
|
||||
- Install the hypervisor (in order to select which hypervisor will be used, the `KATA_HYPERVISOR` environment
|
||||
variable can be used to select between `qemu` or `cloud-hypervisor`)
|
||||
> **Note**: Depending on how where your VMs are hosted and how IPs are shared you might get an error from docker
|
||||
during matching `ERROR: toomanyrequests: Too Many Requests`. To get past
|
||||
this, login into Docker Hub and pull the images used with:
|
||||
> ```bash
|
||||
> $ sudo docker login
|
||||
> $ sudo docker pull ubuntu
|
||||
> ```
|
||||
> then re-run the command.
|
||||
- The first time this runs it may take a while, but subsequent runs will be quicker as more things are already
|
||||
installed and they can be further cut down by not running all the above steps
|
||||
[see "Additional script usage" below](#additional-script-usage)
|
||||
|
||||
- Create a new Kata sandbox pod using `crictl` with:
|
||||
```bash
|
||||
$ ~/ccv0.sh crictl_create_cc_pod
|
||||
```
|
||||
- This creates a pod configuration file, creates the pod from this using
|
||||
`sudo crictl runp -r kata ~/pod-config.yaml` and runs `sudo crictl pods` to show the pod
|
||||
- Create a new Kata confidential container with:
|
||||
```bash
|
||||
$ ~/ccv0.sh crictl_create_cc_container
|
||||
```
|
||||
- This creates a container (based on `busybox:1.33.1`) in the Kata cc sandbox and prints a list of containers.
|
||||
This will have been created based on an image pulled in the Kata pod sandbox/guest, not on the host machine.
|
||||
|
||||
As this point you should have a `crictl` pod and container that is using the Kata confidential containers runtime.
|
||||
You can [validate that the container image was pulled on the guest](#validate-that-the-container-image-was-pulled-on-the-guest)
|
||||
or [using the Kata pod sandbox for testing with `agent-ctl` or `ctr shim`](#using-a-kata-pod-sandbox-for-testing-with-agent-ctl-or-ctr-shim)
|
||||
|
||||
#### Clean up the `crictl` pod sandbox and container
|
||||
- When the testing is complete you can delete the container and pod by running:
|
||||
```bash
|
||||
$ ~/ccv0.sh crictl_delete_cc
|
||||
```
|
||||
### Using Kubernetes for end-to-end provisioning of a Kata confidential containers pod with an unencrypted image
|
||||
|
||||
- Run the full build process with the Kubernetes environment variable set to `"yes"`, so the Kubernetes cluster is
|
||||
configured and created using the VM
|
||||
as a single node cluster:
|
||||
```bash
|
||||
$ export KUBERNETES="yes"
|
||||
$ ~/ccv0.sh build_and_install_all
|
||||
```
|
||||
> **Note**: Depending on how where your VMs are hosted and how IPs are shared you might get an error from docker
|
||||
during matching `ERROR: toomanyrequests: Too Many Requests`. To get past
|
||||
this, login into Docker Hub and pull the images used with:
|
||||
> ```bash
|
||||
> $ sudo docker login
|
||||
> $ sudo docker pull registry:2
|
||||
> $ sudo docker pull ubuntu:20.04
|
||||
> ```
|
||||
> then re-run the command.
|
||||
- Check that your Kubernetes cluster has been correctly set-up by running :
|
||||
```bash
|
||||
$ kubectl get nodes
|
||||
```
|
||||
and checking that you see a single node e.g.
|
||||
```text
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
stevenh-ccv0-k8s1.fyre.ibm.com Ready control-plane,master 43s v1.22.0
|
||||
```
|
||||
- Create a Kata confidential containers pod by running:
|
||||
```bash
|
||||
$ ~/ccv0.sh kubernetes_create_cc_pod
|
||||
```
|
||||
- Wait a few seconds for pod to start then check that the pod's status is `Running` with
|
||||
```bash
|
||||
$ kubectl get pods
|
||||
```
|
||||
which should show something like:
|
||||
```text
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
busybox-cc 1/1 Running 0 54s
|
||||
```
|
||||
|
||||
- As this point you should have a Kubernetes pod and container running, that is using the Kata
|
||||
confidential containers runtime.
|
||||
You can [validate that the container image was pulled on the guest](#validate-that-the-container-image-was-pulled-on-the-guest)
|
||||
or [using the Kata pod sandbox for testing with `agent-ctl` or `ctr shim`](#using-a-kata-pod-sandbox-for-testing-with-agent-ctl-or-ctr-shim)
|
||||
|
||||
#### Clean up the Kubernetes pod sandbox and container
|
||||
- When the testing is complete you can delete the container and pod by running:
|
||||
```bash
|
||||
$ ~/ccv0.sh kubernetes_delete_cc_pod
|
||||
```
|
||||
|
||||
### Validate that the container image was pulled on the guest
|
||||
|
||||
There are a couple of ways we can check that the container pull image action was offloaded to the guest, by checking
|
||||
the guest's file system for the unpacked bundle and checking the host's directories to ensure it wasn't also pulled
|
||||
there.
|
||||
- To check the guest's file system:
|
||||
- Open a shell into the Kata guest with:
|
||||
```bash
|
||||
$ ~/ccv0.sh open_kata_shell
|
||||
```
|
||||
- List the files in the directory that the container image bundle should have been unpacked to with:
|
||||
```bash
|
||||
$ ls -ltr /run/kata-containers/confidential-containers_signed/
|
||||
```
|
||||
- This should give something like
|
||||
```
|
||||
total 72
|
||||
-rw-r--r-- 1 root root 2977 Jan 20 10:03 config.json
|
||||
-rw-r--r-- 1 root root 372 Jan 20 10:03 umoci.json
|
||||
-rw-r--r-- 1 root root 63584 Jan 20 10:03 sha256_be9faa75035c20288cde7d2cdeb6cd1f5f4dbcd845d3f86f7feab61c4eff9eb5.mtree
|
||||
drwxr-xr-x 12 root root 240 Jan 20 10:03 rootfs
|
||||
```
|
||||
which shows how the image has been pulled and then unbundled on the guest.
|
||||
- Leave the Kata guest shell by running:
|
||||
```bash
|
||||
$ exit
|
||||
```
|
||||
- To verify that the image wasn't pulled on the host system we can look at the shared sandbox on the host and we
|
||||
should only see a single bundle for the pause container as the `busybox` based container image should have been
|
||||
pulled on the guest:
|
||||
- Find all the `rootfs` directories under in the pod's shared directory with:
|
||||
```bash
|
||||
$ pod_id=$(ps -ef | grep containerd-shim-kata-v2 | egrep -o "id [^,][^,].* " | awk '{print $2}')
|
||||
$ sudo find /run/kata-containers/shared/sandboxes/${pod_id}/shared -name rootfs
|
||||
```
|
||||
which should only show a single `rootfs` directory if the container image was pulled on the guest, not the host
|
||||
- Looking that `rootfs` directory with
|
||||
```bash
|
||||
$ sudo ls -ltr $(sudo find /run/kata-containers/shared/sandboxes/${pod_id}/shared -name rootfs)
|
||||
```
|
||||
shows something similar to
|
||||
```
|
||||
total 668
|
||||
-rwxr-xr-x 1 root root 682696 Aug 25 13:58 pause
|
||||
drwxr-xr-x 2 root root 6 Jan 20 02:01 proc
|
||||
drwxr-xr-x 2 root root 6 Jan 20 02:01 dev
|
||||
drwxr-xr-x 2 root root 6 Jan 20 02:01 sys
|
||||
drwxr-xr-x 2 root root 25 Jan 20 02:01 etc
|
||||
```
|
||||
which is clearly the pause container indicating that the `busybox` based container image is not exposed to the host.
|
||||
|
||||
### Using a Kata pod sandbox for testing with `agent-ctl` or `ctr shim`
|
||||
|
||||
Once you have a kata pod sandbox created as described above, either using
|
||||
[`crictl`](#using-crictl-for-end-to-end-provisioning-of-a-kata-confidential-containers-pod-with-an-unencrypted-image), or [Kubernetes](#using-kubernetes-for-end-to-end-provisioning-of-a-kata-confidential-containers-pod-with-an-unencrypted-image)
|
||||
, you can use this to test specific components of the Kata confidential
|
||||
containers architecture. This can be useful for development and debugging to isolate and test features
|
||||
that aren't broadly supported end-to-end. Here are some examples:
|
||||
|
||||
- In the first terminal run the pull image on guest command against the Kata agent, via the shim (`containerd-shim-kata-v2`).
|
||||
This can be achieved using the [containerd](https://github.com/containerd/containerd) CLI tool, `ctr`, which can be used to
|
||||
interact with the shim directly. The command takes the form
|
||||
`ctr --namespace k8s.io shim --id <sandbox-id> pull-image <image> <new-container-id>` and can been run directly, or through
|
||||
the `ccv0.sh` script to automatically fill in the variables:
|
||||
- Optionally, set up some environment variables to set the image and credentials used:
|
||||
- By default the shim pull image test in `ccv0.sh` will use the `busybox:1.33.1` based test image
|
||||
`quay.io/kata-containers/confidential-containers:signed` which requires no authentication. To use a different
|
||||
image, set the `PULL_IMAGE` environment variable e.g.
|
||||
```bash
|
||||
$ export PULL_IMAGE="docker.io/library/busybox:latest"
|
||||
```
|
||||
Currently the containerd shim pull image
|
||||
code doesn't support using a container registry that requires authentication, so if this is required, see the
|
||||
below steps to run the pull image command against the agent directly.
|
||||
- Run the pull image agent endpoint with:
|
||||
```bash
|
||||
$ ~/ccv0.sh shim_pull_image
|
||||
```
|
||||
which we print the `ctr shim` command for reference
|
||||
- Alternatively you can issue the command directly to the `kata-agent` pull image endpoint, which also supports
|
||||
credentials in order to pull from an authenticated registry:
|
||||
- Optionally set up some environment variables to set the image and credentials used:
|
||||
- Set the `PULL_IMAGE` environment variable e.g. `export PULL_IMAGE="docker.io/library/busybox:latest"`
|
||||
if a specific container image is required.
|
||||
- If the container registry for the image requires authentication then this can be set with an environment
|
||||
variable `SOURCE_CREDS`. For example to use Docker Hub (`docker.io`) as an authenticated user first run
|
||||
`export SOURCE_CREDS="<dockerhub username>:<dockerhub api key>"`
|
||||
> **Note**: the credentials support on the agent request is a tactical solution for the short-term
|
||||
proof of concept to allow more images to be pulled and tested. Once we have support for getting
|
||||
keys into the Kata guest image using the attestation-agent and/or KBS I'd expect container registry
|
||||
credentials to be looked up using that mechanism.
|
||||
- Run the pull image agent endpoint with
|
||||
```bash
|
||||
$ ~/ccv0.sh agent_pull_image
|
||||
```
|
||||
and you should see output which includes `Command PullImage (1 of 1) returned (Ok(()), false)` to indicate
|
||||
that the `PullImage` request was successful e.g.
|
||||
```
|
||||
Finished release [optimized] target(s) in 0.21s
|
||||
{"msg":"announce","level":"INFO","ts":"2021-09-15T08:40:14.189360410-07:00","subsystem":"rpc","name":"kata-agent-ctl","pid":"830920","version":"0.1.0","source":"kata-agent-ctl","config":"Config { server_address: \"vsock://1970354082:1024\", bundle_dir: \"/tmp/bundle\", timeout_nano: 0, interactive: false, ignore_errors: false }"}
|
||||
{"msg":"client setup complete","level":"INFO","ts":"2021-09-15T08:40:14.193639057-07:00","pid":"830920","source":"kata-agent-ctl","name":"kata-agent-ctl","subsystem":"rpc","version":"0.1.0","server-address":"vsock://1970354082:1024"}
|
||||
{"msg":"Run command PullImage (1 of 1)","level":"INFO","ts":"2021-09-15T08:40:14.196643765-07:00","pid":"830920","source":"kata-agent-ctl","subsystem":"rpc","name":"kata-agent-ctl","version":"0.1.0"}
|
||||
{"msg":"response received","level":"INFO","ts":"2021-09-15T08:40:43.828200633-07:00","source":"kata-agent-ctl","name":"kata-agent-ctl","subsystem":"rpc","version":"0.1.0","pid":"830920","response":""}
|
||||
{"msg":"Command PullImage (1 of 1) returned (Ok(()), false)","level":"INFO","ts":"2021-09-15T08:40:43.828261708-07:00","subsystem":"rpc","pid":"830920","source":"kata-agent-ctl","version":"0.1.0","name":"kata-agent-ctl"}
|
||||
```
|
||||
> **Note**: The first time that `~/ccv0.sh agent_pull_image` is run, the `agent-ctl` tool will be built
|
||||
which may take a few minutes.
|
||||
- To validate that the image pull was successful, you can open a shell into the Kata guest with:
|
||||
```bash
|
||||
$ ~/ccv0.sh open_kata_shell
|
||||
```
|
||||
- Check the `/run/kata-containers/` directory to verify that the container image bundle has been created in a directory
|
||||
named either `01234556789` (for the container id), or the container image name, e.g.
|
||||
```bash
|
||||
$ ls -ltr /run/kata-containers/confidential-containers_signed/
|
||||
```
|
||||
which should show something like
|
||||
```
|
||||
total 72
|
||||
drwxr-xr-x 10 root root 200 Jan 1 1970 rootfs
|
||||
-rw-r--r-- 1 root root 2977 Jan 20 16:45 config.json
|
||||
-rw-r--r-- 1 root root 372 Jan 20 16:45 umoci.json
|
||||
-rw-r--r-- 1 root root 63584 Jan 20 16:45 sha256_be9faa75035c20288cde7d2cdeb6cd1f5f4dbcd845d3f86f7feab61c4eff9eb5.mtree
|
||||
```
|
||||
- Leave the Kata shell by running:
|
||||
```bash
|
||||
$ exit
|
||||
```
|
||||
|
||||
## Verifying signed images
|
||||
|
||||
For this sample demo, we use local attestation to pass through the required
|
||||
configuration to do container image signature verification. Due to this, the ability to verify images is limited
|
||||
to a pre-created selection of test images in our test
|
||||
repository [`quay.io/kata-containers/confidential-containers`](https://quay.io/repository/kata-containers/confidential-containers?tab=tags).
|
||||
For pulling images not in this test repository (called an *unprotected* registry below), we fall back to the behaviour
|
||||
of not enforcing signatures. More documentation on how to customise this to match your own containers through local,
|
||||
or remote attestation will be available in future.
|
||||
|
||||
In our test repository there are three tagged images:
|
||||
|
||||
| Test Image | Base Image used | Signature status | GPG key status |
|
||||
| --- | --- | --- | --- |
|
||||
| `quay.io/kata-containers/confidential-containers:signed` | `busybox:1.33.1` | [signature](https://github.com/kata-containers/tests/tree/CCv0/integration/confidential/fixtures/quay_verification/signatures.tar) embedded in kata rootfs | [public key](https://github.com/kata-containers/tests/tree/CCv0/integration/confidential/fixtures/quay_verification/public.gpg) embedded in kata rootfs |
|
||||
| `quay.io/kata-containers/confidential-containers:unsigned` | `busybox:1.33.1` | not signed | not signed |
|
||||
| `quay.io/kata-containers/confidential-containers:other_signed` | `nginx:1.21.3` | [signature](https://github.com/kata-containers/tests/tree/CCv0/integration/confidential/fixtures/quay_verification/signatures.tar) embedded in kata rootfs | GPG key not kept |
|
||||
|
||||
Using a standard unsigned `busybox` image that can be pulled from another, *unprotected*, `quay.io` repository we can
|
||||
test a few scenarios.
|
||||
|
||||
In this sample, with local attestation, we pass in the the public GPG key and signature files, and the [`offline_fs_kbc`
|
||||
configuration](https://github.com/confidential-containers/attestation-agent/blob/main/src/kbc_modules/offline_fs_kbc/README.md)
|
||||
into the guest image which specifies that any container image from `quay.io/kata-containers`
|
||||
must be signed with the embedded GPG key and the agent configuration needs updating to enable this.
|
||||
With this policy set a few tests of image verification can be done to test different scenarios by attempting
|
||||
to create containers from these images using `crictl`:
|
||||
|
||||
- If you don't already have the Kata Containers CC code built and configured for `crictl`, then follow the
|
||||
[instructions above](#using-crictl-for-end-to-end-provisioning-of-a-kata-confidential-containers-pod-with-an-unencrypted-image)
|
||||
up to the `~/ccv0.sh crictl_create_cc_pod` command.
|
||||
|
||||
- In order to enable the guest image, you will need to setup the required configuration, policy and signature files
|
||||
needed by running
|
||||
`~/ccv0.sh copy_signature_files_to_guest` and then run `~/ccv0.sh crictl_create_cc_pod` which will delete and recreate
|
||||
your pod - adding in the new files.
|
||||
|
||||
- To test the fallback behaviour works using an unsigned image from an *unprotected* registry we can pull the `busybox`
|
||||
image by running:
|
||||
```bash
|
||||
$ export CONTAINER_CONFIG_FILE=container-config_unsigned-unprotected.yaml
|
||||
$ ~/ccv0.sh crictl_create_cc_container
|
||||
```
|
||||
- This finishes showing the running container e.g.
|
||||
```text
|
||||
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID
|
||||
98c70fefe997a quay.io/prometheus/busybox:latest Less than a second ago Running prometheus-busybox-signed 0 70119e0539238
|
||||
```
|
||||
- To test that an unsigned image from our *protected* test container registry is rejected we can run:
|
||||
```bash
|
||||
$ export CONTAINER_CONFIG_FILE=container-config_unsigned-protected.yaml
|
||||
$ ~/ccv0.sh crictl_create_cc_container
|
||||
```
|
||||
- This correctly results in an error message from `crictl`:
|
||||
`PullImage from image service failed" err="rpc error: code = Internal desc = Security validate failed: Validate image failed: The signatures do not satisfied! Reject reason: [Match reference failed.]" image="quay.io/kata-containers/confidential-containers:unsigned"`
|
||||
- To test that the signed image our *protected* test container registry is accepted we can run:
|
||||
```bash
|
||||
$ export CONTAINER_CONFIG_FILE=container-config.yaml
|
||||
$ ~/ccv0.sh crictl_create_cc_container
|
||||
```
|
||||
- This finishes by showing a new `kata-cc-busybox-signed` running container e.g.
|
||||
```text
|
||||
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID
|
||||
b4d85c2132ed9 quay.io/kata-containers/confidential-containers:signed Less than a second ago Running kata-cc-busybox-signed 0 70119e0539238
|
||||
...
|
||||
```
|
||||
- Finally to check the image with a valid signature, but invalid GPG key (the real trusted piece of information we really
|
||||
want to protect with the attestation agent in future) fails we can run:
|
||||
```bash
|
||||
$ export CONTAINER_CONFIG_FILE=container-config_signed-protected-other.yaml
|
||||
$ ~/ccv0.sh crictl_create_cc_container
|
||||
```
|
||||
- Again this results in an error message from `crictl`:
|
||||
`"PullImage from image service failed" err="rpc error: code = Internal desc = Security validate failed: Validate image failed: The signatures do not satisfied! Reject reason: [signature verify failed! There is no pubkey can verify the signature!]" image="quay.io/kata-containers/confidential-containers:other_signed"`
|
||||
|
||||
### Using Kubernetes to create a Kata confidential containers pod from the encrypted ssh demo sample image
|
||||
|
||||
The [ssh-demo](https://github.com/confidential-containers/documentation/tree/main/demos/ssh-demo) explains how to
|
||||
demonstrate creating a Kata confidential containers pod from an encrypted image with the runtime created by the
|
||||
[confidential-containers operator](https://github.com/confidential-containers/documentation/blob/main/demos/operator-demo).
|
||||
To be fully confidential, this should be run on a Trusted Execution Environment, but it can be tested on generic
|
||||
hardware as well.
|
||||
|
||||
If you wish to build the Kata confidential containers runtime to do this yourself, then you can using the following
|
||||
steps:
|
||||
|
||||
- Run the full build process with the Kubernetes environment variable set to `"yes"`, so the Kubernetes cluster is
|
||||
configured and created using the VM as a single node cluster and with `AA_KBC` set to `offline_fs_kbc`.
|
||||
```bash
|
||||
$ export KUBERNETES="yes"
|
||||
$ export AA_KBC=offline_fs_kbc
|
||||
$ ~/ccv0.sh build_and_install_all
|
||||
```
|
||||
- The `AA_KBC=offline_fs_kbc` mode will ensure that, when creating the rootfs of the Kata guest, the
|
||||
[attestation-agent](https://github.com/confidential-containers/attestation-agent) will be added along with the
|
||||
[sample offline KBC](https://github.com/confidential-containers/documentation/blob/main/demos/ssh-demo/aa-offline_fs_kbc-keys.json)
|
||||
and an agent configuration file
|
||||
> **Note**: Depending on how where your VMs are hosted and how IPs are shared you might get an error from docker
|
||||
during matching `ERROR: toomanyrequests: Too Many Requests`. To get past
|
||||
this, login into Docker Hub and pull the images used with:
|
||||
> ```bash
|
||||
> $ sudo docker login
|
||||
> $ sudo docker pull registry:2
|
||||
> $ sudo docker pull ubuntu:20.04
|
||||
> ```
|
||||
> then re-run the command.
|
||||
- Check that your Kubernetes cluster has been correctly set-up by running :
|
||||
```bash
|
||||
$ kubectl get nodes
|
||||
```
|
||||
and checking that you see a single node e.g.
|
||||
```text
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
stevenh-ccv0-k8s1.fyre.ibm.com Ready control-plane,master 43s v1.22.0
|
||||
```
|
||||
- Create a sample Kata confidential containers ssh pod by running:
|
||||
```bash
|
||||
$ ~/ccv0.sh kubernetes_create_ssh_demo_pod
|
||||
```
|
||||
- As this point you should have a Kubernetes pod running the Kata confidential containers runtime that has pulled
|
||||
the [sample image](https://hub.docker.com/r/katadocker/ccv0-ssh) which was encrypted by the key file that we included
|
||||
in the rootfs.
|
||||
During the pod deployment the image was pulled and then decrypted using the key file, on the Kata guest image, without
|
||||
it ever being available to the host.
|
||||
|
||||
- To validate that the container is working you, can connect to the image via SSH by running:
|
||||
```bash
|
||||
$ ~/ccv0.sh connect_to_ssh_demo_pod
|
||||
```
|
||||
- During this connection the host key fingerprint is shown and should match:
|
||||
`ED25519 key fingerprint is SHA256:wK7uOpqpYQczcgV00fGCh+X97sJL3f6G1Ku4rvlwtR0.`
|
||||
- After you are finished connecting then run:
|
||||
```bash
|
||||
$ exit
|
||||
```
|
||||
|
||||
- To delete the sample SSH demo pod run:
|
||||
```bash
|
||||
$ ~/ccv0.sh kubernetes_delete_ssh_demo_pod
|
||||
```
|
||||
|
||||
## Additional script usage
|
||||
|
||||
As well as being able to use the script as above to build all of `kata-containers` from scratch it can be used to just
|
||||
re-build bits of it by running the script with different parameters. For example after the first build you will often
|
||||
not need to re-install the dependencies, the hypervisor or the Guest kernel, but just test code changes made to the
|
||||
runtime and agent. This can be done by running `~/ccv0.sh rebuild_and_install_kata`. (*Note this does a hard checkout*
|
||||
*from git, so if your changes are only made locally it is better to do the individual steps e.g.*
|
||||
`~/ccv0.sh build_kata_runtime && ~/ccv0.sh build_and_add_agent_to_rootfs && ~/ccv0.sh build_and_install_rootfs`).
|
||||
There are commands for a lot of steps in building, setting up and testing and the full list can be seen by running
|
||||
`~/ccv0.sh help`:
|
||||
```
|
||||
$ ~/ccv0.sh help
|
||||
Overview:
|
||||
Build and test kata containers from source
|
||||
Optionally set kata-containers and tests repo and branch as exported variables before running
|
||||
e.g. export katacontainers_repo=github.com/stevenhorsman/kata-containers && export katacontainers_branch=kata-ci-from-fork && export tests_repo=github.com/stevenhorsman/tests && export tests_branch=kata-ci-from-fork && ~/ccv0.sh build_and_install_all
|
||||
Usage:
|
||||
ccv0.sh [options] <command>
|
||||
Commands:
|
||||
- help: Display this help
|
||||
- all: Build and install everything, test kata with containerd and capture the logs
|
||||
- build_and_install_all: Build and install everything
|
||||
- initialize: Install dependencies and check out kata-containers source
|
||||
- rebuild_and_install_kata: Rebuild the kata runtime and agent and build and install the image
|
||||
- build_kata_runtime: Build and install the kata runtime
|
||||
- configure: Configure Kata to use rootfs and enable debug
|
||||
- create_rootfs: Create a local rootfs
|
||||
- build_and_add_agent_to_rootfs:Builds the kata-agent and adds it to the rootfs
|
||||
- build_and_install_rootfs: Builds and installs the rootfs image
|
||||
- install_guest_kernel: Setup, build and install the guest kernel
|
||||
- build_cloud_hypervisor Checkout, patch, build and install Cloud Hypervisor
|
||||
- build_qemu: Checkout, patch, build and install QEMU
|
||||
- init_kubernetes: initialize a Kubernetes cluster on this system
|
||||
- crictl_create_cc_pod Use crictl to create a new kata cc pod
|
||||
- crictl_create_cc_container Use crictl to create a new busybox container in the kata cc pod
|
||||
- crictl_delete_cc Use crictl to delete the kata cc pod sandbox and container in it
|
||||
- kubernetes_create_cc_pod: Create a Kata CC runtime busybox-based pod in Kubernetes
|
||||
- kubernetes_delete_cc_pod: Delete the Kata CC runtime busybox-based pod in Kubernetes
|
||||
- open_kata_shell: Open a shell into the kata runtime
|
||||
- agent_pull_image: Run PullImage command against the agent with agent-ctl
|
||||
- shim_pull_image: Run PullImage command against the shim with ctr
|
||||
- agent_create_container: Run CreateContainer command against the agent with agent-ctl
|
||||
- test: Test using kata with containerd
|
||||
- test_capture_logs: Test using kata with containerd and capture the logs in the user's home directory
|
||||
|
||||
Options:
|
||||
-d: Enable debug
|
||||
-h: Display this help
|
||||
```
|
||||
@@ -1,44 +0,0 @@
|
||||
# Generating a Kata Containers payload for the Confidential Containers Operator
|
||||
|
||||
[Confidential Containers
|
||||
Operator](https://github.com/confidential-containers/operator) consumes a Kata
|
||||
Containers payload, generated from the `CCv0` branch, and here one can find all
|
||||
the necessary info on how to build such a payload.
|
||||
|
||||
## Requirements
|
||||
|
||||
* `make` installed in the machine
|
||||
* Docker installed in the machine
|
||||
* `sudo` access to the machine
|
||||
|
||||
## Process
|
||||
|
||||
* Clone [Kata Containers](https://github.com/kata-containers/kata-containers)
|
||||
```sh
|
||||
git clone --branch CCv0 https://github.com/kata-containers/kata-containers
|
||||
```
|
||||
* In case you've already cloned the repo, make sure to switch to the `CCv0` branch
|
||||
```sh
|
||||
git checkout CCv0
|
||||
```
|
||||
* Ensure your tree is clean and in sync with upstream `CCv0`
|
||||
```sh
|
||||
git clean -xfd
|
||||
git reset --hard <upstream>/CCv0
|
||||
```
|
||||
* Make sure you're authenticated to `quay.io`
|
||||
```sh
|
||||
sudo docker login quay.io
|
||||
```
|
||||
* From the top repo directory, run:
|
||||
```sh
|
||||
sudo make cc-payload
|
||||
```
|
||||
* Make sure the image was upload to the [Confidential Containers
|
||||
runtime-payload
|
||||
registry](https://quay.io/repository/confidential-containers/runtime-payload?tab=tags)
|
||||
|
||||
## Notes
|
||||
|
||||
Make sure to run it on a machine that's not the one you're hacking on, prepare a
|
||||
cup of tea, and get back to it an hour later (at least).
|
||||
@@ -15,6 +15,18 @@ $ sudo .ci/aarch64/install_rom_aarch64.sh
|
||||
$ popd
|
||||
```
|
||||
|
||||
## Config KATA QEMU
|
||||
|
||||
After executing the above script, two files will be generated under the directory `/usr/share/kata-containers/` by default, namely `kata-flash0.img` and `kata-flash1.img`. Next we need to change the configuration file of `kata qemu`, which is in `/opt/kata/share/defaults/kata-containers/configuration-qemu.toml` by default, specify in the configuration file to use the UEFI ROM installed above. The above is an example of `kata deploy` installation. For package management installation, please use `kata-runtime env` to find the location of the configuration file. Please refer to the following configuration.
|
||||
|
||||
```
|
||||
[hypervisor.qemu]
|
||||
|
||||
# -pflash can add image file to VM. The arguments of it should be in format
|
||||
# of ["/path/to/flash0.img", "/path/to/flash1.img"]
|
||||
pflashes = ["/usr/share/kata-containers/kata-flash0.img", "/usr/share/kata-containers/kata-flash1.img"]
|
||||
```
|
||||
|
||||
## Run for test
|
||||
|
||||
Let's test if the memory hotplug is ready for Kata after install the UEFI ROM. Make sure containerd is ready to run Kata before test.
|
||||
|
||||
@@ -57,6 +57,7 @@ There are several kinds of Kata configurations and they are listed below.
|
||||
| `io.katacontainers.config.hypervisor.enable_iothreads` | `boolean`| enable IO to be processed in a separate thread. Supported currently for virtio-`scsi` driver |
|
||||
| `io.katacontainers.config.hypervisor.enable_mem_prealloc` | `boolean` | the memory space used for `nvdimm` device by the hypervisor |
|
||||
| `io.katacontainers.config.hypervisor.enable_vhost_user_store` | `boolean` | enable vhost-user storage device (QEMU) |
|
||||
| `io.katacontainers.config.hypervisor.vhost_user_reconnect_timeout_sec` | `string`| the timeout for reconnecting vhost user socket (QEMU)
|
||||
| `io.katacontainers.config.hypervisor.enable_virtio_mem` | `boolean` | enable virtio-mem (QEMU) |
|
||||
| `io.katacontainers.config.hypervisor.entropy_source` (R) | string| the path to a host source of entropy (`/dev/random`, `/dev/urandom` or real hardware RNG device) |
|
||||
| `io.katacontainers.config.hypervisor.file_mem_backend` (R) | string | file based memory backend root directory |
|
||||
@@ -87,7 +88,7 @@ There are several kinds of Kata configurations and they are listed below.
|
||||
| `io.katacontainers.config.hypervisor.use_vsock` | `boolean` | specify use of `vsock` for agent communication |
|
||||
| `io.katacontainers.config.hypervisor.vhost_user_store_path` (R) | `string` | specify the directory path where vhost-user devices related folders, sockets and device nodes should be (QEMU) |
|
||||
| `io.katacontainers.config.hypervisor.virtio_fs_cache_size` | uint32 | virtio-fs DAX cache size in `MiB` |
|
||||
| `io.katacontainers.config.hypervisor.virtio_fs_cache` | string | the cache mode for virtio-fs, valid values are `always`, `auto` and `none` |
|
||||
| `io.katacontainers.config.hypervisor.virtio_fs_cache` | string | the cache mode for virtio-fs, valid values are `always`, `auto` and `never` |
|
||||
| `io.katacontainers.config.hypervisor.virtio_fs_daemon` | string | virtio-fs `vhost-user` daemon path |
|
||||
| `io.katacontainers.config.hypervisor.virtio_fs_extra_args` | string | extra options passed to `virtiofs` daemon |
|
||||
| `io.katacontainers.config.hypervisor.enable_guest_swap` | `boolean` | enable swap in the guest |
|
||||
|
||||
@@ -27,8 +27,6 @@ $ image="quay.io/prometheus/busybox:latest"
|
||||
$ cat << EOF > "${pod_yaml}"
|
||||
metadata:
|
||||
name: busybox-sandbox1
|
||||
uid: $(uuidgen)
|
||||
namespace: default
|
||||
EOF
|
||||
$ cat << EOF > "${container_yaml}"
|
||||
metadata:
|
||||
|
||||
90
docs/how-to/how-to-use-erofs-build-rootfs.md
Normal file
90
docs/how-to/how-to-use-erofs-build-rootfs.md
Normal file
@@ -0,0 +1,90 @@
|
||||
# Configure Kata Containers to use EROFS build rootfs
|
||||
|
||||
## Introduction
|
||||
For kata containers, rootfs is used in the read-only way. EROFS can noticeably decrease metadata overhead.
|
||||
|
||||
`mkfs.erofs` can generate compressed and uncompressed EROFS images.
|
||||
|
||||
For uncompressed images, no files are compressed. However, it is optional to inline the data blocks at the end of the file with the metadata.
|
||||
|
||||
For compressed images, each file will be compressed using the lz4 or lz4hc algorithm, and it will be confirmed whether it can save space. Use No compression of the file if compression does not save space.
|
||||
|
||||
## Performance comparison
|
||||
| | EROFS | EXT4 | XFS |
|
||||
|-----------------|-------| --- | --- |
|
||||
| Image Size [MB] | 106(uncompressed) | 256 | 126 |
|
||||
|
||||
|
||||
## Guidance
|
||||
### Install the `erofs-utils`
|
||||
#### `apt/dnf` install
|
||||
On newer `Ubuntu/Debian` systems, it can be installed directly using the `apt` command, and on `Fedora` it can be installed directly using the `dnf` command.
|
||||
|
||||
```shell
|
||||
# Debian/Ubuntu
|
||||
$ apt install erofs-utils
|
||||
# Fedora
|
||||
$ dnf install erofs-utils
|
||||
```
|
||||
|
||||
#### Source install
|
||||
[https://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs-utils.git](https://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs-utils.git)
|
||||
|
||||
##### Compile dependencies
|
||||
If you need to enable the `Lz4` compression feature, `Lz4 1.8.0+` is required, and `Lz4 1.9.3+` is strongly recommended.
|
||||
|
||||
##### Compilation process
|
||||
For some old lz4 versions (lz4-1.8.0~1.8.3), if lz4-static is not installed, the lz4hc algorithm will not be supported. lz4-static can be installed with apt install lz4-static.x86_64. However, these versions have some bugs in compression, and it is not recommended to use these versions directly.
|
||||
If you use `lz4 1.9.0+`, you can directly use the following command to compile.
|
||||
|
||||
```shell
|
||||
$ ./autogen.sh
|
||||
$ ./configure
|
||||
$ make
|
||||
```
|
||||
|
||||
The compiled `mkfs.erofs` program will be saved in the `mkfs` directory. Afterwards, the generated tools can be installed to a system directory using make install (requires root privileges).
|
||||
|
||||
### Create a local rootfs
|
||||
```shell
|
||||
$ export distro="ubuntu"
|
||||
$ export FS_TYPE="erofs"
|
||||
$ export ROOTFS_DIR="realpath kata-containers/tools/osbuilder/rootfs-builder/rootfs"
|
||||
$ sudo rm -rf "${ROOTFS_DIR}"
|
||||
$ pushd kata-containers/tools/osbuilder/rootfs-builder
|
||||
$ script -fec 'sudo -E SECCOMP=no ./rootfs.sh "${distro}"'
|
||||
$ popd
|
||||
```
|
||||
|
||||
### Add a custom agent to the image - OPTIONAL
|
||||
> Note:
|
||||
> - You should only do this step if you are testing with the latest version of the agent.
|
||||
```shell
|
||||
$ sudo install -o root -g root -m 0550 -t "${ROOTFS_DIR}/usr/bin" "${ROOTFS_DIR}/../../../../src/agent/target/x86_64-unknown-linux-musl/release/kata-agent"
|
||||
$ sudo install -o root -g root -m 0440 "${ROOTFS_DIR}/../../../../src/agent/kata-agent.service" "${ROOTFS_DIR}/usr/lib/systemd/system/"
|
||||
$ sudo install -o root -g root -m 0440 "${ROOTFS_DIR}/../../../../src/agent/kata-containers.target" "${ROOTFS_DIR}/usr/lib/systemd/system/"
|
||||
```
|
||||
|
||||
### Build a root image
|
||||
```shell
|
||||
$ pushd kata-containers/tools/osbuilder/image-builder
|
||||
$ script -fec 'sudo -E ./image_builder.sh "${ROOTFS_DIR}"'
|
||||
$ popd
|
||||
```
|
||||
|
||||
### Install the rootfs image
|
||||
```shell
|
||||
$ pushd kata-containers/tools/osbuilder/image-builder
|
||||
$ commit="$(git log --format=%h -1 HEAD)"
|
||||
$ date="$(date +%Y-%m-%d-%T.%N%z)"
|
||||
$ rootfs="erofs"
|
||||
$ image="kata-containers-${rootfs}-${date}-${commit}"
|
||||
$ sudo install -o root -g root -m 0640 -D kata-containers.img "/usr/share/kata-containers/${image}"
|
||||
$ (cd /usr/share/kata-containers && sudo ln -sf "$image" kata-containers.img)
|
||||
$ popd
|
||||
```
|
||||
|
||||
### Use `EROFS` in the runtime
|
||||
```shell
|
||||
$ sudo sed -i -e 's/^# *\(rootfs_type\).*=.*$/\1 = erofs/g' /etc/kata-containers/configuration.toml
|
||||
```
|
||||
@@ -104,7 +104,7 @@ sudo dmsetup create "${POOL_NAME}" \
|
||||
|
||||
cat << EOF
|
||||
#
|
||||
# Add this to your config.toml configuration file and restart `containerd` daemon
|
||||
# Add this to your config.toml configuration file and restart containerd daemon
|
||||
#
|
||||
[plugins]
|
||||
[plugins.devmapper]
|
||||
|
||||
@@ -32,7 +32,6 @@ The `nydus-sandbox.yaml` looks like below:
|
||||
metadata:
|
||||
attempt: 1
|
||||
name: nydus-sandbox
|
||||
uid: nydus-uid
|
||||
namespace: default
|
||||
log_directory: /tmp
|
||||
linux:
|
||||
|
||||
@@ -42,8 +42,6 @@ $ image="quay.io/prometheus/busybox:latest"
|
||||
$ cat << EOF > "${pod_yaml}"
|
||||
metadata:
|
||||
name: busybox-sandbox1
|
||||
uid: $(uuidgen)
|
||||
namespace: default
|
||||
EOF
|
||||
$ cat << EOF > "${container_yaml}"
|
||||
metadata:
|
||||
|
||||
@@ -49,7 +49,7 @@ the latest driver.
|
||||
$ export QAT_DRIVER_VER=qat1.7.l.4.14.0-00031.tar.gz
|
||||
$ export QAT_DRIVER_URL=https://downloadmirror.intel.com/30178/eng/${QAT_DRIVER_VER}
|
||||
$ export QAT_CONF_LOCATION=~/QAT_conf
|
||||
$ export QAT_DOCKERFILE=https://raw.githubusercontent.com/intel/intel-device-plugins-for-kubernetes/master/demo/openssl-qat-engine/Dockerfile
|
||||
$ export QAT_DOCKERFILE=https://raw.githubusercontent.com/intel/intel-device-plugins-for-kubernetes/main/demo/openssl-qat-engine/Dockerfile
|
||||
$ export QAT_SRC=~/src/QAT
|
||||
$ export GOPATH=~/src/go
|
||||
$ export KATA_KERNEL_LOCATION=~/kata
|
||||
|
||||
4177
src/agent/Cargo.lock
generated
4177
src/agent/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -23,7 +23,6 @@ regex = "1.5.6"
|
||||
serial_test = "0.5.1"
|
||||
kata-sys-util = { path = "../libs/kata-sys-util" }
|
||||
kata-types = { path = "../libs/kata-types" }
|
||||
url = "2.2.2"
|
||||
|
||||
# Async helpers
|
||||
async-trait = "0.1.42"
|
||||
@@ -31,7 +30,7 @@ async-recursion = "0.3.2"
|
||||
futures = "0.3.17"
|
||||
|
||||
# Async runtime
|
||||
tokio = { version = "1.21.2", features = ["full"] }
|
||||
tokio = { version = "1.28.1", features = ["full"] }
|
||||
tokio-vsock = "0.3.1"
|
||||
|
||||
netlink-sys = { version = "0.7.0", features = ["tokio_socket",]}
|
||||
@@ -52,7 +51,7 @@ log = "0.4.11"
|
||||
prometheus = { version = "0.13.0", features = ["process"] }
|
||||
procfs = "0.12.0"
|
||||
anyhow = "1.0.32"
|
||||
cgroups = { package = "cgroups-rs", version = "0.2.10" }
|
||||
cgroups = { package = "cgroups-rs", version = "0.3.2" }
|
||||
|
||||
# Tracing
|
||||
tracing = "0.1.26"
|
||||
@@ -66,23 +65,12 @@ serde = { version = "1.0.129", features = ["derive"] }
|
||||
toml = "0.5.8"
|
||||
clap = { version = "3.0.1", features = ["derive"] }
|
||||
|
||||
# "vendored" feature for openssl is required by musl build
|
||||
openssl = { version = "0.10.38", features = ["vendored"] }
|
||||
|
||||
# Image pull/decrypt
|
||||
[target.'cfg(target_arch = "s390x")'.dependencies]
|
||||
image-rs = { git = "https://github.com/confidential-containers/image-rs", rev = "v0.3.0", default-features = false, features = ["default_s390x"] }
|
||||
|
||||
[target.'cfg(not(target_arch = "s390x"))'.dependencies]
|
||||
image-rs = { git = "https://github.com/confidential-containers/image-rs", rev = "v0.3.0", default-features = true }
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = "3.1.0"
|
||||
test-utils = { path = "../libs/test-utils" }
|
||||
which = "4.3.0"
|
||||
|
||||
[workspace]
|
||||
resolver = "2"
|
||||
members = [
|
||||
"rustjail",
|
||||
]
|
||||
|
||||
@@ -25,11 +25,11 @@ scan_fmt = "0.2.6"
|
||||
regex = "1.5.6"
|
||||
path-absolutize = "1.2.0"
|
||||
anyhow = "1.0.32"
|
||||
cgroups = { package = "cgroups-rs", version = "0.2.10" }
|
||||
cgroups = { package = "cgroups-rs", version = "0.3.2" }
|
||||
rlimit = "0.5.3"
|
||||
cfg-if = "0.1.0"
|
||||
|
||||
tokio = { version = "1.2.0", features = ["sync", "io-util", "process", "time", "macros", "rt"] }
|
||||
tokio = { version = "1.28.1", features = ["sync", "io-util", "process", "time", "macros", "rt"] }
|
||||
futures = "0.3.17"
|
||||
async-trait = "0.1.31"
|
||||
inotify = "0.9.2"
|
||||
|
||||
@@ -76,7 +76,7 @@ macro_rules! set_resource {
|
||||
|
||||
impl CgroupManager for Manager {
|
||||
fn apply(&self, pid: pid_t) -> Result<()> {
|
||||
self.cgroup.add_task(CgroupPid::from(pid as u64))?;
|
||||
self.cgroup.add_task_by_tgid(CgroupPid::from(pid as u64))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -236,7 +236,7 @@ impl CgroupManager for Manager {
|
||||
.unwrap()
|
||||
.trim_start_matches(root_path.to_str().unwrap());
|
||||
info!(sl!(), "updating cpuset for parent path {:?}", &r_path);
|
||||
let cg = new_cgroup(cgroups::hierarchies::auto(), r_path);
|
||||
let cg = new_cgroup(cgroups::hierarchies::auto(), r_path)?;
|
||||
let cpuset_controller: &CpuSetController = cg.controller_of().unwrap();
|
||||
cpuset_controller.set_cpus(guest_cpuset)?;
|
||||
}
|
||||
@@ -267,6 +267,10 @@ impl CgroupManager for Manager {
|
||||
fn as_any(&self) -> Result<&dyn Any> {
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
fn name(&self) -> &str {
|
||||
"cgroupfs"
|
||||
}
|
||||
}
|
||||
|
||||
fn set_network_resources(
|
||||
@@ -541,11 +545,8 @@ fn linux_device_to_cgroup_device(d: &LinuxDevice) -> Option<DeviceResource> {
|
||||
}
|
||||
|
||||
fn linux_device_group_to_cgroup_device(d: &LinuxDeviceCgroup) -> Option<DeviceResource> {
|
||||
let dev_type = match &d.r#type {
|
||||
Some(t_s) => match DeviceType::from_char(t_s.chars().next()) {
|
||||
Some(t_c) => t_c,
|
||||
None => return None,
|
||||
},
|
||||
let dev_type = match DeviceType::from_char(d.r#type.chars().next()) {
|
||||
Some(t) => t,
|
||||
None => return None,
|
||||
};
|
||||
|
||||
@@ -602,7 +603,7 @@ lazy_static! {
|
||||
// all mknod to all char devices
|
||||
LinuxDeviceCgroup {
|
||||
allow: true,
|
||||
r#type: Some("c".to_string()),
|
||||
r#type: "c".to_string(),
|
||||
major: Some(WILDCARD),
|
||||
minor: Some(WILDCARD),
|
||||
access: "m".to_string(),
|
||||
@@ -611,7 +612,7 @@ lazy_static! {
|
||||
// all mknod to all block devices
|
||||
LinuxDeviceCgroup {
|
||||
allow: true,
|
||||
r#type: Some("b".to_string()),
|
||||
r#type: "b".to_string(),
|
||||
major: Some(WILDCARD),
|
||||
minor: Some(WILDCARD),
|
||||
access: "m".to_string(),
|
||||
@@ -620,7 +621,7 @@ lazy_static! {
|
||||
// all read/write/mknod to char device /dev/console
|
||||
LinuxDeviceCgroup {
|
||||
allow: true,
|
||||
r#type: Some("c".to_string()),
|
||||
r#type: "c".to_string(),
|
||||
major: Some(5),
|
||||
minor: Some(1),
|
||||
access: "rwm".to_string(),
|
||||
@@ -629,7 +630,7 @@ lazy_static! {
|
||||
// all read/write/mknod to char device /dev/pts/<N>
|
||||
LinuxDeviceCgroup {
|
||||
allow: true,
|
||||
r#type: Some("c".to_string()),
|
||||
r#type: "c".to_string(),
|
||||
major: Some(136),
|
||||
minor: Some(WILDCARD),
|
||||
access: "rwm".to_string(),
|
||||
@@ -638,7 +639,7 @@ lazy_static! {
|
||||
// all read/write/mknod to char device /dev/ptmx
|
||||
LinuxDeviceCgroup {
|
||||
allow: true,
|
||||
r#type: Some("c".to_string()),
|
||||
r#type: "c".to_string(),
|
||||
major: Some(5),
|
||||
minor: Some(2),
|
||||
access: "rwm".to_string(),
|
||||
@@ -647,7 +648,7 @@ lazy_static! {
|
||||
// all read/write/mknod to char device /dev/net/tun
|
||||
LinuxDeviceCgroup {
|
||||
allow: true,
|
||||
r#type: Some("c".to_string()),
|
||||
r#type: "c".to_string(),
|
||||
major: Some(10),
|
||||
minor: Some(200),
|
||||
access: "rwm".to_string(),
|
||||
@@ -694,17 +695,6 @@ fn get_cpuacct_stats(cg: &cgroups::Cgroup) -> SingularPtrField<CpuUsage> {
|
||||
});
|
||||
}
|
||||
|
||||
if cg.v2() {
|
||||
return SingularPtrField::some(CpuUsage {
|
||||
total_usage: 0,
|
||||
percpu_usage: vec![],
|
||||
usage_in_kernelmode: 0,
|
||||
usage_in_usermode: 0,
|
||||
unknown_fields: UnknownFields::default(),
|
||||
cached_size: CachedSize::default(),
|
||||
});
|
||||
}
|
||||
|
||||
// try to get from cpu controller
|
||||
let cpu_controller: &CpuController = get_controller_or_return_singular_none!(cg);
|
||||
let stat = cpu_controller.cpu().stat;
|
||||
@@ -735,7 +725,7 @@ fn get_memory_stats(cg: &cgroups::Cgroup) -> SingularPtrField<MemoryStats> {
|
||||
let value = memory.use_hierarchy;
|
||||
let use_hierarchy = value == 1;
|
||||
|
||||
// gte memory datas
|
||||
// get memory data
|
||||
let usage = SingularPtrField::some(MemoryData {
|
||||
usage: memory.usage_in_bytes,
|
||||
max_usage: memory.max_usage_in_bytes,
|
||||
@@ -1026,9 +1016,9 @@ pub fn get_mounts(paths: &HashMap<String, String>) -> Result<HashMap<String, Str
|
||||
Ok(m)
|
||||
}
|
||||
|
||||
fn new_cgroup(h: Box<dyn cgroups::Hierarchy>, path: &str) -> Cgroup {
|
||||
fn new_cgroup(h: Box<dyn cgroups::Hierarchy>, path: &str) -> Result<Cgroup> {
|
||||
let valid_path = path.trim_start_matches('/').to_string();
|
||||
cgroups::Cgroup::new(h, valid_path.as_str())
|
||||
cgroups::Cgroup::new(h, valid_path.as_str()).map_err(anyhow::Error::from)
|
||||
}
|
||||
|
||||
impl Manager {
|
||||
@@ -1050,12 +1040,14 @@ impl Manager {
|
||||
m.insert(key.to_string(), p);
|
||||
}
|
||||
|
||||
let cg = new_cgroup(cgroups::hierarchies::auto(), cpath)?;
|
||||
|
||||
Ok(Self {
|
||||
paths: m,
|
||||
mounts,
|
||||
// rels: paths,
|
||||
cpath: cpath.to_string(),
|
||||
cgroup: new_cgroup(cgroups::hierarchies::auto(), cpath),
|
||||
cgroup: cg,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -66,6 +66,10 @@ impl CgroupManager for Manager {
|
||||
fn as_any(&self) -> Result<&dyn Any> {
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
fn name(&self) -> &str {
|
||||
"mock"
|
||||
}
|
||||
}
|
||||
|
||||
impl Manager {
|
||||
|
||||
@@ -52,10 +52,12 @@ pub trait Manager {
|
||||
fn as_any(&self) -> Result<&dyn Any> {
|
||||
Err(anyhow!("not supported!"))
|
||||
}
|
||||
|
||||
fn name(&self) -> &str;
|
||||
}
|
||||
|
||||
impl Debug for dyn Manager + Send + Sync {
|
||||
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
|
||||
write!(f, "CgroupManager")
|
||||
write!(f, "{}", self.name())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -26,7 +26,7 @@ pub trait SystemdInterface {
|
||||
|
||||
fn get_version(&self) -> Result<String>;
|
||||
|
||||
fn unit_exist(&self, unit_name: &str) -> Result<bool>;
|
||||
fn unit_exists(&self, unit_name: &str) -> Result<bool>;
|
||||
|
||||
fn add_process(&self, pid: i32, unit_name: &str) -> Result<()>;
|
||||
}
|
||||
@@ -36,8 +36,9 @@ pub struct DBusClient {}
|
||||
|
||||
impl DBusClient {
|
||||
fn build_proxy(&self) -> Result<SystemManager<'static>> {
|
||||
let connection = zbus::blocking::Connection::system()?;
|
||||
let proxy = SystemManager::new(&connection)?;
|
||||
let connection =
|
||||
zbus::blocking::Connection::system().context("Establishing a D-Bus connection")?;
|
||||
let proxy = SystemManager::new(&connection).context("Building a D-Bus proxy manager")?;
|
||||
Ok(proxy)
|
||||
}
|
||||
}
|
||||
@@ -108,8 +109,10 @@ impl SystemdInterface for DBusClient {
|
||||
Ok(systemd_version)
|
||||
}
|
||||
|
||||
fn unit_exist(&self, unit_name: &str) -> Result<bool> {
|
||||
let proxy = self.build_proxy()?;
|
||||
fn unit_exists(&self, unit_name: &str) -> Result<bool> {
|
||||
let proxy = self
|
||||
.build_proxy()
|
||||
.with_context(|| format!("Checking if systemd unit {} exists", unit_name))?;
|
||||
|
||||
Ok(proxy.get_unit(unit_name).is_ok())
|
||||
}
|
||||
|
||||
@@ -41,7 +41,7 @@ pub struct Manager {
|
||||
impl CgroupManager for Manager {
|
||||
fn apply(&self, pid: pid_t) -> Result<()> {
|
||||
let unit_name = self.unit_name.as_str();
|
||||
if self.dbus_client.unit_exist(unit_name).unwrap() {
|
||||
if self.dbus_client.unit_exists(unit_name)? {
|
||||
self.dbus_client.add_process(pid, self.unit_name.as_str())?;
|
||||
} else {
|
||||
self.dbus_client.start_unit(
|
||||
@@ -101,6 +101,10 @@ impl CgroupManager for Manager {
|
||||
fn as_any(&self) -> Result<&dyn Any> {
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
fn name(&self) -> &str {
|
||||
"systemd"
|
||||
}
|
||||
}
|
||||
|
||||
impl Manager {
|
||||
|
||||
@@ -71,7 +71,7 @@ impl Cpu {
|
||||
}
|
||||
|
||||
// v2:
|
||||
// cpu.shares <-> CPUShares
|
||||
// cpu.shares <-> CPUWeight
|
||||
// cpu.period <-> CPUQuotaPeriodUSec
|
||||
// cpu.period & cpu.quota <-> CPUQuotaPerSecUSec
|
||||
fn unified_apply(
|
||||
@@ -80,8 +80,8 @@ impl Cpu {
|
||||
systemd_version: &str,
|
||||
) -> Result<()> {
|
||||
if let Some(shares) = cpu_resources.shares {
|
||||
let unified_shares = get_unified_cpushares(shares);
|
||||
properties.push(("CPUShares", Value::U64(unified_shares)));
|
||||
let weight = shares_to_weight(shares);
|
||||
properties.push(("CPUWeight", Value::U64(weight)));
|
||||
}
|
||||
|
||||
if let Some(period) = cpu_resources.period {
|
||||
@@ -104,7 +104,7 @@ impl Cpu {
|
||||
|
||||
// ref: https://github.com/containers/crun/blob/main/crun.1.md#cgroup-v2
|
||||
// [2-262144] to [1-10000]
|
||||
fn get_unified_cpushares(shares: u64) -> u64 {
|
||||
fn shares_to_weight(shares: u64) -> u64 {
|
||||
if shares == 0 {
|
||||
return 100;
|
||||
}
|
||||
|
||||
@@ -1449,7 +1449,7 @@ impl LinuxContainer {
|
||||
pub fn new<T: Into<String> + Display + Clone>(
|
||||
id: T,
|
||||
base: T,
|
||||
mut config: Config,
|
||||
config: Config,
|
||||
logger: &Logger,
|
||||
) -> Result<Self> {
|
||||
let base = base.into();
|
||||
@@ -1475,26 +1475,18 @@ impl LinuxContainer {
|
||||
.context(format!("Cannot change owner of container {} root", id))?;
|
||||
|
||||
let spec = config.spec.as_ref().unwrap();
|
||||
|
||||
let linux = spec.linux.as_ref().unwrap();
|
||||
|
||||
// determine which cgroup driver to take and then assign to config.use_systemd_cgroup
|
||||
// systemd: "[slice]:[prefix]:[name]"
|
||||
// fs: "/path_a/path_b"
|
||||
let cpath = if SYSTEMD_CGROUP_PATH_FORMAT.is_match(linux.cgroups_path.as_str()) {
|
||||
config.use_systemd_cgroup = true;
|
||||
let cpath = if config.use_systemd_cgroup {
|
||||
if linux.cgroups_path.len() == 2 {
|
||||
format!("system.slice:kata_agent:{}", id.as_str())
|
||||
} else {
|
||||
linux.cgroups_path.clone()
|
||||
}
|
||||
} else if linux.cgroups_path.is_empty() {
|
||||
format!("/{}", id.as_str())
|
||||
} else {
|
||||
config.use_systemd_cgroup = false;
|
||||
if linux.cgroups_path.is_empty() {
|
||||
format!("/{}", id.as_str())
|
||||
} else {
|
||||
linux.cgroups_path.clone()
|
||||
}
|
||||
// if we have a systemd cgroup path we need to convert it to a fs cgroup path
|
||||
linux.cgroups_path.replace(':', "/")
|
||||
};
|
||||
|
||||
let cgroup_manager: Box<dyn Manager + Send + Sync> = if config.use_systemd_cgroup {
|
||||
|
||||
@@ -237,12 +237,6 @@ pub fn resources_grpc_to_oci(res: &grpc::LinuxResources) -> oci::LinuxResources
|
||||
let devices = {
|
||||
let mut d = Vec::new();
|
||||
for dev in res.Devices.iter() {
|
||||
let dev_type = if dev.Type.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(dev.Type.clone())
|
||||
};
|
||||
|
||||
let major = if dev.Major == -1 {
|
||||
None
|
||||
} else {
|
||||
@@ -256,7 +250,7 @@ pub fn resources_grpc_to_oci(res: &grpc::LinuxResources) -> oci::LinuxResources
|
||||
};
|
||||
d.push(oci::LinuxDeviceCgroup {
|
||||
allow: dev.Allow,
|
||||
r#type: dev_type,
|
||||
r#type: dev.Type.clone(),
|
||||
major,
|
||||
minor,
|
||||
access: dev.Access.clone(),
|
||||
|
||||
@@ -11,7 +11,6 @@ use std::fs;
|
||||
use std::str::FromStr;
|
||||
use std::time;
|
||||
use tracing::instrument;
|
||||
use url::Url;
|
||||
|
||||
use kata_types::config::default::DEFAULT_AGENT_VSOCK_PORT;
|
||||
|
||||
@@ -26,12 +25,6 @@ const LOG_VPORT_OPTION: &str = "agent.log_vport";
|
||||
const CONTAINER_PIPE_SIZE_OPTION: &str = "agent.container_pipe_size";
|
||||
const UNIFIED_CGROUP_HIERARCHY_OPTION: &str = "agent.unified_cgroup_hierarchy";
|
||||
const CONFIG_FILE: &str = "agent.config_file";
|
||||
const CONTAINER_POLICY_FILE: &str = "agent.container_policy_file";
|
||||
const AA_KBC_PARAMS: &str = "agent.aa_kbc_params";
|
||||
const HTTPS_PROXY: &str = "agent.https_proxy";
|
||||
const NO_PROXY: &str = "agent.no_proxy";
|
||||
const ENABLE_DATA_INTEGRITY: &str = "agent.data_integrity";
|
||||
const ENABLE_SIGNATURE_VERIFICATION: &str = "agent.enable_signature_verification";
|
||||
|
||||
const DEFAULT_LOG_LEVEL: slog::Level = slog::Level::Info;
|
||||
const DEFAULT_HOTPLUG_TIMEOUT: time::Duration = time::Duration::from_secs(3);
|
||||
@@ -59,11 +52,6 @@ const ERR_INVALID_CONTAINER_PIPE_SIZE_PARAM: &str = "unable to parse container p
|
||||
const ERR_INVALID_CONTAINER_PIPE_SIZE_KEY: &str = "invalid container pipe size key name";
|
||||
const ERR_INVALID_CONTAINER_PIPE_NEGATIVE: &str = "container pipe size should not be negative";
|
||||
|
||||
const ERR_INVALID_CONTAINER_POLICY_PATH_VALUE: &str = "invalid container_policy_file value";
|
||||
const ERR_INVALID_CONTAINER_POLICY_PATH_KEY: &str = "invalid container_policy_file key";
|
||||
const ERR_INVALID_CONTAINER_POLICY_ABSOLUTE: &str =
|
||||
"container_policy_file path must be an absolute file path";
|
||||
|
||||
#[derive(Debug, Default, Deserialize)]
|
||||
pub struct EndpointsConfig {
|
||||
pub allowed: Vec<String>,
|
||||
@@ -89,12 +77,6 @@ pub struct AgentConfig {
|
||||
pub tracing: bool,
|
||||
pub endpoints: AgentEndpoints,
|
||||
pub supports_seccomp: bool,
|
||||
pub container_policy_path: String,
|
||||
pub aa_kbc_params: String,
|
||||
pub https_proxy: String,
|
||||
pub no_proxy: String,
|
||||
pub data_integrity: bool,
|
||||
pub enable_signature_verification: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
@@ -110,12 +92,6 @@ pub struct AgentConfigBuilder {
|
||||
pub unified_cgroup_hierarchy: Option<bool>,
|
||||
pub tracing: Option<bool>,
|
||||
pub endpoints: Option<EndpointsConfig>,
|
||||
pub container_policy_path: Option<String>,
|
||||
pub aa_kbc_params: Option<String>,
|
||||
pub https_proxy: Option<String>,
|
||||
pub no_proxy: Option<String>,
|
||||
pub data_integrity: Option<bool>,
|
||||
pub enable_signature_verification: Option<bool>,
|
||||
}
|
||||
|
||||
macro_rules! config_override {
|
||||
@@ -177,12 +153,6 @@ impl Default for AgentConfig {
|
||||
tracing: false,
|
||||
endpoints: Default::default(),
|
||||
supports_seccomp: rpc::have_seccomp(),
|
||||
container_policy_path: String::from(""),
|
||||
aa_kbc_params: String::from(""),
|
||||
https_proxy: String::from(""),
|
||||
no_proxy: String::from(""),
|
||||
data_integrity: false,
|
||||
enable_signature_verification: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -211,16 +181,6 @@ impl FromStr for AgentConfig {
|
||||
config_override!(agent_config_builder, agent_config, server_addr);
|
||||
config_override!(agent_config_builder, agent_config, unified_cgroup_hierarchy);
|
||||
config_override!(agent_config_builder, agent_config, tracing);
|
||||
config_override!(agent_config_builder, agent_config, container_policy_path);
|
||||
config_override!(agent_config_builder, agent_config, aa_kbc_params);
|
||||
config_override!(agent_config_builder, agent_config, https_proxy);
|
||||
config_override!(agent_config_builder, agent_config, no_proxy);
|
||||
config_override!(agent_config_builder, agent_config, data_integrity);
|
||||
config_override!(
|
||||
agent_config_builder,
|
||||
agent_config,
|
||||
enable_signature_verification
|
||||
);
|
||||
|
||||
// Populate the allowed endpoints hash set, if we got any from the config file.
|
||||
if let Some(endpoints) = agent_config_builder.endpoints {
|
||||
@@ -249,10 +209,6 @@ impl AgentConfig {
|
||||
let mut config: AgentConfig = Default::default();
|
||||
let cmdline = fs::read_to_string(file)?;
|
||||
let params: Vec<&str> = cmdline.split_ascii_whitespace().collect();
|
||||
|
||||
let mut using_config_file = false;
|
||||
// Check if there is config file before parsing params that might
|
||||
// override values from the config file.
|
||||
for param in params.iter() {
|
||||
// If we get a configuration file path from the command line, we
|
||||
// generate our config from it.
|
||||
@@ -260,13 +216,9 @@ impl AgentConfig {
|
||||
// or if it can't be parsed properly.
|
||||
if param.starts_with(format!("{}=", CONFIG_FILE).as_str()) {
|
||||
let config_file = get_string_value(param)?;
|
||||
config = AgentConfig::from_config_file(&config_file)?;
|
||||
using_config_file = true;
|
||||
break;
|
||||
return AgentConfig::from_config_file(&config_file);
|
||||
}
|
||||
}
|
||||
|
||||
for param in params.iter() {
|
||||
// parse cmdline flags
|
||||
parse_cmdline_param!(param, DEBUG_CONSOLE_FLAG, config.debug_console);
|
||||
parse_cmdline_param!(param, DEV_MODE_FLAG, config.dev_mode);
|
||||
@@ -326,30 +278,6 @@ impl AgentConfig {
|
||||
config.unified_cgroup_hierarchy,
|
||||
get_bool_value
|
||||
);
|
||||
|
||||
parse_cmdline_param!(
|
||||
param,
|
||||
CONTAINER_POLICY_FILE,
|
||||
config.container_policy_path,
|
||||
get_container_policy_path_value
|
||||
);
|
||||
|
||||
parse_cmdline_param!(param, AA_KBC_PARAMS, config.aa_kbc_params, get_string_value);
|
||||
parse_cmdline_param!(param, HTTPS_PROXY, config.https_proxy, get_url_value);
|
||||
parse_cmdline_param!(param, NO_PROXY, config.no_proxy, get_string_value);
|
||||
parse_cmdline_param!(
|
||||
param,
|
||||
ENABLE_DATA_INTEGRITY,
|
||||
config.data_integrity,
|
||||
get_bool_value
|
||||
);
|
||||
|
||||
parse_cmdline_param!(
|
||||
param,
|
||||
ENABLE_SIGNATURE_VERIFICATION,
|
||||
config.enable_signature_verification,
|
||||
get_bool_value
|
||||
);
|
||||
}
|
||||
|
||||
if let Ok(addr) = env::var(SERVER_ADDR_ENV_VAR) {
|
||||
@@ -369,9 +297,7 @@ impl AgentConfig {
|
||||
}
|
||||
|
||||
// We did not get a configuration file: allow all endpoints.
|
||||
if !using_config_file {
|
||||
config.endpoints.all_allowed = true;
|
||||
}
|
||||
config.endpoints.all_allowed = true;
|
||||
|
||||
Ok(config)
|
||||
}
|
||||
@@ -505,35 +431,6 @@ fn get_container_pipe_size(param: &str) -> Result<i32> {
|
||||
Ok(value)
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
fn get_container_policy_path_value(param: &str) -> Result<String> {
|
||||
let fields: Vec<&str> = param.split('=').collect();
|
||||
|
||||
ensure!(!fields[0].is_empty(), ERR_INVALID_CONTAINER_POLICY_PATH_KEY);
|
||||
ensure!(fields.len() == 2, ERR_INVALID_CONTAINER_POLICY_PATH_VALUE);
|
||||
|
||||
let key = fields[0];
|
||||
ensure!(
|
||||
key == CONTAINER_POLICY_FILE,
|
||||
ERR_INVALID_CONTAINER_POLICY_PATH_KEY
|
||||
);
|
||||
|
||||
let value = String::from(fields[1]);
|
||||
ensure!(!value.is_empty(), ERR_INVALID_CONTAINER_POLICY_PATH_VALUE);
|
||||
ensure!(
|
||||
value.starts_with('/'),
|
||||
ERR_INVALID_CONTAINER_POLICY_ABSOLUTE
|
||||
);
|
||||
ensure!(!value.contains(".."), ERR_INVALID_CONTAINER_POLICY_ABSOLUTE);
|
||||
Ok(value)
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
fn get_url_value(param: &str) -> Result<String> {
|
||||
let value = get_string_value(param)?;
|
||||
Ok(Url::parse(&value)?.to_string())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use test_utils::assert_result;
|
||||
@@ -552,8 +449,6 @@ mod tests {
|
||||
assert!(!config.dev_mode);
|
||||
assert_eq!(config.log_level, DEFAULT_LOG_LEVEL);
|
||||
assert_eq!(config.hotplug_timeout, DEFAULT_HOTPLUG_TIMEOUT);
|
||||
assert_eq!(config.container_policy_path, "");
|
||||
assert!(config.enable_signature_verification);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -572,12 +467,6 @@ mod tests {
|
||||
server_addr: &'a str,
|
||||
unified_cgroup_hierarchy: bool,
|
||||
tracing: bool,
|
||||
container_policy_path: &'a str,
|
||||
aa_kbc_params: &'a str,
|
||||
https_proxy: &'a str,
|
||||
no_proxy: &'a str,
|
||||
data_integrity: bool,
|
||||
enable_signature_verification: bool,
|
||||
}
|
||||
|
||||
impl Default for TestData<'_> {
|
||||
@@ -593,12 +482,6 @@ mod tests {
|
||||
server_addr: TEST_SERVER_ADDR,
|
||||
unified_cgroup_hierarchy: false,
|
||||
tracing: false,
|
||||
container_policy_path: "",
|
||||
aa_kbc_params: "",
|
||||
https_proxy: "",
|
||||
no_proxy: "",
|
||||
data_integrity: false,
|
||||
enable_signature_verification: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -968,86 +851,6 @@ mod tests {
|
||||
tracing: true,
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.container_policy_file=/etc/containers/policy.json",
|
||||
container_policy_path: "/etc/containers/policy.json",
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.aa_kbc_params=offline_fs_kbc::null",
|
||||
aa_kbc_params: "offline_fs_kbc::null",
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.aa_kbc_params=eaa_kbc::127.0.0.1:50000",
|
||||
aa_kbc_params: "eaa_kbc::127.0.0.1:50000",
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.https_proxy=http://proxy.url.com:81/",
|
||||
https_proxy: "http://proxy.url.com:81/",
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.https_proxy=http://192.168.1.100:81/",
|
||||
https_proxy: "http://192.168.1.100:81/",
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.no_proxy=*.internal.url.com",
|
||||
no_proxy: "*.internal.url.com",
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.no_proxy=192.168.1.0/24,172.16.0.0/12",
|
||||
no_proxy: "192.168.1.0/24,172.16.0.0/12",
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "",
|
||||
data_integrity: false,
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.data_integrity=true",
|
||||
data_integrity: true,
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.data_integrity=false",
|
||||
data_integrity: false,
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.data_integrity=1",
|
||||
data_integrity: true,
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.data_integrity=0",
|
||||
data_integrity: false,
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.enable_signature_verification=false",
|
||||
enable_signature_verification: false,
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.enable_signature_verification=0",
|
||||
enable_signature_verification: false,
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.enable_signature_verification=1",
|
||||
enable_signature_verification: true,
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.enable_signature_verification=foo",
|
||||
enable_signature_verification: false,
|
||||
..Default::default()
|
||||
},
|
||||
];
|
||||
|
||||
let dir = tempdir().expect("failed to create tmpdir");
|
||||
@@ -1095,20 +898,6 @@ mod tests {
|
||||
assert_eq!(d.container_pipe_size, config.container_pipe_size, "{}", msg);
|
||||
assert_eq!(d.server_addr, config.server_addr, "{}", msg);
|
||||
assert_eq!(d.tracing, config.tracing, "{}", msg);
|
||||
assert_eq!(
|
||||
d.container_policy_path, config.container_policy_path,
|
||||
"{}",
|
||||
msg
|
||||
);
|
||||
assert_eq!(d.aa_kbc_params, config.aa_kbc_params, "{}", msg);
|
||||
assert_eq!(d.https_proxy, config.https_proxy, "{}", msg);
|
||||
assert_eq!(d.no_proxy, config.no_proxy, "{}", msg);
|
||||
assert_eq!(d.data_integrity, config.data_integrity, "{}", msg);
|
||||
assert_eq!(
|
||||
d.enable_signature_verification, config.enable_signature_verification,
|
||||
"{}",
|
||||
msg
|
||||
);
|
||||
|
||||
for v in vars_to_unset {
|
||||
env::remove_var(v);
|
||||
@@ -1580,72 +1369,6 @@ Caused by:
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_container_policy_path_value() {
|
||||
#[derive(Debug)]
|
||||
struct TestData<'a> {
|
||||
param: &'a str,
|
||||
result: Result<String>,
|
||||
}
|
||||
|
||||
let tests = &[
|
||||
TestData {
|
||||
param: "",
|
||||
result: Err(anyhow!(ERR_INVALID_CONTAINER_POLICY_PATH_KEY)),
|
||||
},
|
||||
TestData {
|
||||
param: "agent.container_policy_file",
|
||||
result: Err(anyhow!(ERR_INVALID_CONTAINER_POLICY_PATH_VALUE)),
|
||||
},
|
||||
TestData {
|
||||
param: "agent.container_policy_file=",
|
||||
result: Err(anyhow!(ERR_INVALID_CONTAINER_POLICY_PATH_VALUE)),
|
||||
},
|
||||
TestData {
|
||||
param: "foo=bar",
|
||||
result: Err(anyhow!(ERR_INVALID_CONTAINER_POLICY_PATH_KEY)),
|
||||
},
|
||||
TestData {
|
||||
param: "agent.policy_path=/another/absolute/path.json",
|
||||
result: Err(anyhow!(ERR_INVALID_CONTAINER_POLICY_PATH_KEY)),
|
||||
},
|
||||
TestData {
|
||||
param: "agent.container_policy_file=/etc/container/policy.json",
|
||||
result: Ok("/etc/container/policy.json".into()),
|
||||
},
|
||||
TestData {
|
||||
param: "agent.container_policy_file=/another/absolute/path.json",
|
||||
result: Ok("/another/absolute/path.json".into()),
|
||||
},
|
||||
TestData {
|
||||
param: "agent.container_policy_file=./relative/path.json",
|
||||
result: Err(anyhow!(ERR_INVALID_CONTAINER_POLICY_ABSOLUTE)),
|
||||
},
|
||||
TestData {
|
||||
param: "agent.container_policy_file=./relative/path.json",
|
||||
result: Err(anyhow!(ERR_INVALID_CONTAINER_POLICY_ABSOLUTE)),
|
||||
},
|
||||
TestData {
|
||||
param: "agent.container_policy_file=../../relative/path.json",
|
||||
result: Err(anyhow!(ERR_INVALID_CONTAINER_POLICY_ABSOLUTE)),
|
||||
},
|
||||
TestData {
|
||||
param: "agent.container_policy_file=junk_string",
|
||||
result: Err(anyhow!(ERR_INVALID_CONTAINER_POLICY_ABSOLUTE)),
|
||||
},
|
||||
];
|
||||
|
||||
for (i, d) in tests.iter().enumerate() {
|
||||
let msg = format!("test[{}]: {:?}", i, d);
|
||||
|
||||
let result = get_container_policy_path_value(d.param);
|
||||
|
||||
let msg = format!("{}: result: {:?}", msg, result);
|
||||
|
||||
assert_result!(d.result, result, msg);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_config_builder_from_string() {
|
||||
let config = AgentConfig::from_str(
|
||||
@@ -1676,50 +1399,4 @@ Caused by:
|
||||
// Verify that the default values are valid
|
||||
assert_eq!(config.hotplug_timeout, DEFAULT_HOTPLUG_TIMEOUT);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_config_from_cmdline_and_config_file() {
|
||||
let dir = tempdir().expect("failed to create tmpdir");
|
||||
|
||||
let agent_config = r#"
|
||||
dev_mode = false
|
||||
server_addr = 'vsock://8:2048'
|
||||
|
||||
[endpoints]
|
||||
allowed = ["CreateContainer", "StartContainer"]
|
||||
"#;
|
||||
|
||||
let config_path = dir.path().join("agent-config.toml");
|
||||
let config_filename = config_path.to_str().expect("failed to get config filename");
|
||||
|
||||
fs::write(config_filename, agent_config).expect("failed to write agen config");
|
||||
|
||||
let cmdline = format!("agent.devmode agent.config_file={}", config_filename);
|
||||
|
||||
let cmdline_path = dir.path().join("cmdline");
|
||||
let cmdline_filename = cmdline_path
|
||||
.to_str()
|
||||
.expect("failed to get cmdline filename");
|
||||
|
||||
fs::write(cmdline_filename, cmdline).expect("failed to write agen config");
|
||||
|
||||
let config = AgentConfig::from_cmdline(cmdline_filename, vec![])
|
||||
.expect("failed to parse command line");
|
||||
|
||||
// Should be overwritten by cmdline
|
||||
assert!(config.dev_mode);
|
||||
|
||||
// Should be from agent config
|
||||
assert_eq!(config.server_addr, "vsock://8:2048");
|
||||
|
||||
// Should be from agent config
|
||||
assert_eq!(
|
||||
config.endpoints.allowed,
|
||||
vec!["CreateContainer".to_string(), "StartContainer".to_string()]
|
||||
.iter()
|
||||
.cloned()
|
||||
.collect()
|
||||
);
|
||||
assert!(!config.endpoints.all_allowed);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -571,15 +571,13 @@ fn update_spec_devices(spec: &mut Spec, mut updates: HashMap<&str, DevUpdate>) -
|
||||
|
||||
if let Some(resources) = linux.resources.as_mut() {
|
||||
for r in &mut resources.devices {
|
||||
if let (Some(host_type), Some(host_major), Some(host_minor)) =
|
||||
(r.r#type.as_ref(), r.major, r.minor)
|
||||
{
|
||||
if let Some(update) = res_updates.get(&(host_type.as_str(), host_major, host_minor))
|
||||
if let (Some(host_major), Some(host_minor)) = (r.major, r.minor) {
|
||||
if let Some(update) = res_updates.get(&(r.r#type.as_str(), host_major, host_minor))
|
||||
{
|
||||
info!(
|
||||
sl!(),
|
||||
"update_spec_devices() updating resource";
|
||||
"type" => &host_type,
|
||||
"type" => &r.r#type,
|
||||
"host_major" => host_major,
|
||||
"host_minor" => host_minor,
|
||||
"guest_major" => update.guest_major,
|
||||
@@ -856,7 +854,7 @@ pub fn update_device_cgroup(spec: &mut Spec) -> Result<()> {
|
||||
allow: false,
|
||||
major: Some(major),
|
||||
minor: Some(minor),
|
||||
r#type: Some(String::from("b")),
|
||||
r#type: String::from("b"),
|
||||
access: String::from("rw"),
|
||||
});
|
||||
|
||||
@@ -1019,13 +1017,13 @@ mod tests {
|
||||
resources: Some(LinuxResources {
|
||||
devices: vec![
|
||||
oci::LinuxDeviceCgroup {
|
||||
r#type: Some("c".to_string()),
|
||||
r#type: "c".to_string(),
|
||||
major: Some(host_major_a),
|
||||
minor: Some(host_minor_a),
|
||||
..oci::LinuxDeviceCgroup::default()
|
||||
},
|
||||
oci::LinuxDeviceCgroup {
|
||||
r#type: Some("c".to_string()),
|
||||
r#type: "c".to_string(),
|
||||
major: Some(host_major_b),
|
||||
minor: Some(host_minor_b),
|
||||
..oci::LinuxDeviceCgroup::default()
|
||||
@@ -1118,13 +1116,13 @@ mod tests {
|
||||
resources: Some(LinuxResources {
|
||||
devices: vec![
|
||||
LinuxDeviceCgroup {
|
||||
r#type: Some("c".to_string()),
|
||||
r#type: "c".to_string(),
|
||||
major: Some(host_major),
|
||||
minor: Some(host_minor),
|
||||
..LinuxDeviceCgroup::default()
|
||||
},
|
||||
LinuxDeviceCgroup {
|
||||
r#type: Some("b".to_string()),
|
||||
r#type: "b".to_string(),
|
||||
major: Some(host_major),
|
||||
minor: Some(host_minor),
|
||||
..LinuxDeviceCgroup::default()
|
||||
|
||||
@@ -1,427 +0,0 @@
|
||||
// Copyright (c) 2021 Alibaba Cloud
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use std::env;
|
||||
use std::fmt::Write as _;
|
||||
use std::fs;
|
||||
use std::path::Path;
|
||||
use std::process::{Command, ExitStatus};
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::{anyhow, ensure, Result};
|
||||
use async_trait::async_trait;
|
||||
use protocols::image;
|
||||
use tokio::sync::Mutex;
|
||||
use ttrpc::{self, error::get_rpc_status as ttrpc_error};
|
||||
|
||||
use crate::rpc::{verify_cid, CONTAINER_BASE};
|
||||
use crate::sandbox::Sandbox;
|
||||
use crate::AGENT_CONFIG;
|
||||
|
||||
use image_rs::image::ImageClient;
|
||||
use std::io::Write;
|
||||
|
||||
const SKOPEO_PATH: &str = "/usr/bin/skopeo";
|
||||
const UMOCI_PATH: &str = "/usr/local/bin/umoci";
|
||||
const IMAGE_OCI: &str = "image_oci";
|
||||
const AA_PATH: &str = "/usr/local/bin/attestation-agent";
|
||||
const AA_KEYPROVIDER_PORT: &str = "127.0.0.1:50000";
|
||||
const AA_GETRESOURCE_PORT: &str = "127.0.0.1:50001";
|
||||
const OCICRYPT_CONFIG_PATH: &str = "/tmp/ocicrypt_config.json";
|
||||
// kata rootfs is readonly, use tmpfs before CC storage is implemented.
|
||||
const KATA_CC_IMAGE_WORK_DIR: &str = "/run/image/";
|
||||
const KATA_CC_PAUSE_BUNDLE: &str = "/pause_bundle";
|
||||
const CONFIG_JSON: &str = "config.json";
|
||||
|
||||
// Convenience macro to obtain the scope logger
|
||||
macro_rules! sl {
|
||||
() => {
|
||||
slog_scope::logger()
|
||||
};
|
||||
}
|
||||
|
||||
pub struct ImageService {
|
||||
sandbox: Arc<Mutex<Sandbox>>,
|
||||
attestation_agent_started: AtomicBool,
|
||||
image_client: Arc<Mutex<ImageClient>>,
|
||||
}
|
||||
|
||||
impl ImageService {
|
||||
pub fn new(sandbox: Arc<Mutex<Sandbox>>) -> Self {
|
||||
env::set_var("CC_IMAGE_WORK_DIR", KATA_CC_IMAGE_WORK_DIR);
|
||||
Self {
|
||||
sandbox,
|
||||
attestation_agent_started: AtomicBool::new(false),
|
||||
image_client: Arc::new(Mutex::new(ImageClient::default())),
|
||||
}
|
||||
}
|
||||
|
||||
fn pull_image_from_registry(
|
||||
image: &str,
|
||||
cid: &str,
|
||||
source_creds: Option<&str>,
|
||||
policy_path: Option<&str>,
|
||||
aa_kbc_params: &str,
|
||||
) -> Result<()> {
|
||||
let source_image = format!("{}{}", "docker://", image);
|
||||
|
||||
let tmp_cid_path = Path::new("/tmp/").join(cid);
|
||||
let oci_path = tmp_cid_path.join(IMAGE_OCI);
|
||||
let target_path_oci = format!("oci://{}:latest", oci_path.to_string_lossy());
|
||||
|
||||
fs::create_dir_all(&oci_path)?;
|
||||
|
||||
let mut pull_command = Command::new(SKOPEO_PATH);
|
||||
pull_command
|
||||
.arg("copy")
|
||||
.arg(source_image)
|
||||
.arg(&target_path_oci)
|
||||
.arg("--remove-signatures"); //umoci requires signatures to be removed
|
||||
|
||||
// If source credentials were passed (so not using an anonymous registry), pass them through
|
||||
if let Some(source_creds) = source_creds {
|
||||
pull_command.arg("--src-creds").arg(source_creds);
|
||||
}
|
||||
|
||||
// If a policy_path provided, use it, otherwise fall back to allow all image registries
|
||||
if let Some(policy_path) = policy_path {
|
||||
pull_command.arg("--policy").arg(policy_path);
|
||||
} else {
|
||||
info!(
|
||||
sl!(),
|
||||
"No policy path was supplied, so revert to allow all images to be pulled."
|
||||
);
|
||||
pull_command.arg("--insecure-policy");
|
||||
}
|
||||
|
||||
debug!(sl!(), "skopeo command: {:?}", &pull_command);
|
||||
if !aa_kbc_params.is_empty() {
|
||||
// Skopeo will copy an unencrypted image even if the decryption key argument is provided.
|
||||
// Thus, this does not guarantee that the image was encrypted.
|
||||
pull_command
|
||||
.arg("--decryption-key")
|
||||
.arg(format!("provider:attestation-agent:{}", aa_kbc_params))
|
||||
.env("OCICRYPT_KEYPROVIDER_CONFIG", OCICRYPT_CONFIG_PATH);
|
||||
}
|
||||
|
||||
let status: ExitStatus = pull_command.status()?;
|
||||
|
||||
if !status.success() {
|
||||
let mut error_message = format!("failed to pull image: {:?}", status);
|
||||
|
||||
if let Err(e) = fs::remove_dir_all(&tmp_cid_path) {
|
||||
let _ = write!(
|
||||
error_message,
|
||||
" and clean up of temporary container directory {:?} failed with error {:?}",
|
||||
tmp_cid_path, e
|
||||
);
|
||||
};
|
||||
return Err(anyhow!(error_message));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn unpack_image(cid: &str) -> Result<()> {
|
||||
let tmp_cid_path = Path::new("/tmp/").join(cid);
|
||||
let source_path_oci = tmp_cid_path.join(IMAGE_OCI);
|
||||
|
||||
let target_path_bundle = Path::new(CONTAINER_BASE).join(cid);
|
||||
|
||||
info!(sl!(), "unpack image {:?} to {:?}", cid, target_path_bundle);
|
||||
|
||||
// Unpack image
|
||||
let status: ExitStatus = Command::new(UMOCI_PATH)
|
||||
.arg("unpack")
|
||||
.arg("--image")
|
||||
.arg(&source_path_oci)
|
||||
.arg(&target_path_bundle)
|
||||
.status()?;
|
||||
|
||||
ensure!(status.success(), "failed to unpack image: {:?}", status);
|
||||
|
||||
// To save space delete the oci image after unpack
|
||||
fs::remove_dir_all(&tmp_cid_path)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// pause image is packaged in rootfs for CC
|
||||
fn unpack_pause_image(cid: &str) -> Result<()> {
|
||||
let cc_pause_bundle = Path::new(KATA_CC_PAUSE_BUNDLE);
|
||||
if !cc_pause_bundle.exists() {
|
||||
return Err(anyhow!("Pause image not present in rootfs"));
|
||||
}
|
||||
|
||||
info!(sl!(), "use guest pause image cid {:?}", cid);
|
||||
let pause_bundle = Path::new(CONTAINER_BASE).join(cid);
|
||||
let pause_rootfs = pause_bundle.join("rootfs");
|
||||
let pause_config = pause_bundle.join(CONFIG_JSON);
|
||||
let pause_binary = pause_rootfs.join("pause");
|
||||
fs::create_dir_all(&pause_rootfs)?;
|
||||
if !pause_config.exists() {
|
||||
fs::copy(
|
||||
cc_pause_bundle.join(CONFIG_JSON),
|
||||
pause_bundle.join(CONFIG_JSON),
|
||||
)?;
|
||||
}
|
||||
if !pause_binary.exists() {
|
||||
fs::copy(cc_pause_bundle.join("rootfs").join("pause"), pause_binary)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// If we fail to start the AA, Skopeo/ocicrypt won't be able to unwrap keys
|
||||
// and container decryption will fail.
|
||||
fn init_attestation_agent() -> Result<()> {
|
||||
let config_path = OCICRYPT_CONFIG_PATH;
|
||||
|
||||
// The image will need to be encrypted using a keyprovider
|
||||
// that has the same name (at least according to the config).
|
||||
let ocicrypt_config = serde_json::json!({
|
||||
"key-providers": {
|
||||
"attestation-agent":{
|
||||
"grpc":AA_KEYPROVIDER_PORT
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
let mut config_file = fs::File::create(config_path)?;
|
||||
config_file.write_all(ocicrypt_config.to_string().as_bytes())?;
|
||||
|
||||
// The Attestation Agent will run for the duration of the guest.
|
||||
Command::new(AA_PATH)
|
||||
.arg("--keyprovider_sock")
|
||||
.arg(AA_KEYPROVIDER_PORT)
|
||||
.arg("--getresource_sock")
|
||||
.arg(AA_GETRESOURCE_PORT)
|
||||
.spawn()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Determines the container id (cid) to use for a given request.
|
||||
///
|
||||
/// If the request specifies a non-empty id, use it; otherwise derive it from the image path.
|
||||
/// In either case, verify that the chosen id is valid.
|
||||
fn cid_from_request(req: &image::PullImageRequest) -> Result<String> {
|
||||
let req_cid = req.get_container_id();
|
||||
let cid = if !req_cid.is_empty() {
|
||||
req_cid.to_string()
|
||||
} else if let Some(last) = req.get_image().rsplit('/').next() {
|
||||
// ':' have special meaning for umoci during upack
|
||||
last.replace(':', "_")
|
||||
} else {
|
||||
return Err(anyhow!("Invalid image name. {}", req.get_image()));
|
||||
};
|
||||
verify_cid(&cid)?;
|
||||
Ok(cid)
|
||||
}
|
||||
|
||||
async fn pull_image(&self, req: &image::PullImageRequest) -> Result<String> {
|
||||
env::set_var("OCICRYPT_KEYPROVIDER_CONFIG", OCICRYPT_CONFIG_PATH);
|
||||
|
||||
let https_proxy = &AGENT_CONFIG.read().await.https_proxy;
|
||||
if !https_proxy.is_empty() {
|
||||
env::set_var("HTTPS_PROXY", https_proxy);
|
||||
}
|
||||
|
||||
let no_proxy = &AGENT_CONFIG.read().await.no_proxy;
|
||||
if !no_proxy.is_empty() {
|
||||
env::set_var("NO_PROXY", no_proxy);
|
||||
}
|
||||
|
||||
let cid = Self::cid_from_request(req)?;
|
||||
let image = req.get_image();
|
||||
// Can switch to use cid directly when we remove umoci
|
||||
let v: Vec<&str> = image.rsplit('/').collect();
|
||||
if !v[0].is_empty() && v[0].starts_with("pause:") {
|
||||
Self::unpack_pause_image(&cid)?;
|
||||
|
||||
let mut sandbox = self.sandbox.lock().await;
|
||||
sandbox.images.insert(String::from(image), cid);
|
||||
return Ok(image.to_owned());
|
||||
}
|
||||
|
||||
let aa_kbc_params = &AGENT_CONFIG.read().await.aa_kbc_params;
|
||||
if !aa_kbc_params.is_empty() {
|
||||
match self.attestation_agent_started.compare_exchange_weak(
|
||||
false,
|
||||
true,
|
||||
Ordering::SeqCst,
|
||||
Ordering::SeqCst,
|
||||
) {
|
||||
Ok(_) => Self::init_attestation_agent()?,
|
||||
Err(_) => info!(sl!(), "Attestation Agent already running"),
|
||||
}
|
||||
}
|
||||
|
||||
let source_creds = (!req.get_source_creds().is_empty()).then(|| req.get_source_creds());
|
||||
|
||||
if Path::new(SKOPEO_PATH).exists() {
|
||||
// Read the policy path from the agent config
|
||||
let config_policy_path = &AGENT_CONFIG.read().await.container_policy_path;
|
||||
let policy_path =
|
||||
(!config_policy_path.is_empty()).then_some(config_policy_path.as_str());
|
||||
Self::pull_image_from_registry(image, &cid, source_creds, policy_path, aa_kbc_params)?;
|
||||
Self::unpack_image(&cid)?;
|
||||
} else {
|
||||
// Read enable signature verification from the agent config and set it in the image_client
|
||||
let enable_signature_verification =
|
||||
&AGENT_CONFIG.read().await.enable_signature_verification;
|
||||
info!(
|
||||
sl!(),
|
||||
"enable_signature_verification set to: {}", enable_signature_verification
|
||||
);
|
||||
self.image_client.lock().await.config.security_validate =
|
||||
*enable_signature_verification;
|
||||
|
||||
// If the attestation-agent is being used, then enable the authenticated credentials support
|
||||
//TODO tidy logic once skopeo is removed to combine with aa_kbc_params check above
|
||||
info!(
|
||||
sl!(),
|
||||
"image_client.config.auth set to: {}",
|
||||
!aa_kbc_params.is_empty()
|
||||
);
|
||||
self.image_client.lock().await.config.auth = !aa_kbc_params.is_empty();
|
||||
|
||||
let bundle_path = Path::new(CONTAINER_BASE).join(&cid);
|
||||
fs::create_dir_all(&bundle_path)?;
|
||||
|
||||
let decrypt_config = format!("provider:attestation-agent:{}", aa_kbc_params);
|
||||
|
||||
info!(sl!(), "pull image {:?}, bundle path {:?}", cid, bundle_path);
|
||||
// Image layers will store at KATA_CC_IMAGE_WORK_DIR, generated bundles
|
||||
// with rootfs and config.json will store under CONTAINER_BASE/cid.
|
||||
self.image_client
|
||||
.lock()
|
||||
.await
|
||||
.pull_image(image, &bundle_path, &source_creds, &Some(&decrypt_config))
|
||||
.await?;
|
||||
|
||||
info!(
|
||||
sl!(),
|
||||
"pull and unpack image {:?}, with image-rs succeeded ", cid
|
||||
);
|
||||
}
|
||||
|
||||
let mut sandbox = self.sandbox.lock().await;
|
||||
sandbox.images.insert(String::from(image), cid);
|
||||
Ok(image.to_owned())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl protocols::image_ttrpc_async::Image for ImageService {
|
||||
async fn pull_image(
|
||||
&self,
|
||||
_ctx: &ttrpc::r#async::TtrpcContext,
|
||||
req: image::PullImageRequest,
|
||||
) -> ttrpc::Result<image::PullImageResponse> {
|
||||
match self.pull_image(&req).await {
|
||||
Ok(r) => {
|
||||
let mut resp = image::PullImageResponse::new();
|
||||
resp.image_ref = r;
|
||||
return Ok(resp);
|
||||
}
|
||||
Err(e) => {
|
||||
return Err(ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::ImageService;
|
||||
use protocols::image;
|
||||
|
||||
#[test]
|
||||
fn test_cid_from_request() {
|
||||
struct Case {
|
||||
cid: &'static str,
|
||||
image: &'static str,
|
||||
result: Option<&'static str>,
|
||||
}
|
||||
|
||||
let cases = [
|
||||
Case {
|
||||
cid: "",
|
||||
image: "",
|
||||
result: None,
|
||||
},
|
||||
Case {
|
||||
cid: "..",
|
||||
image: "",
|
||||
result: None,
|
||||
},
|
||||
Case {
|
||||
cid: "",
|
||||
image: "..",
|
||||
result: None,
|
||||
},
|
||||
Case {
|
||||
cid: "",
|
||||
image: "abc/..",
|
||||
result: None,
|
||||
},
|
||||
Case {
|
||||
cid: "",
|
||||
image: "abc/",
|
||||
result: None,
|
||||
},
|
||||
Case {
|
||||
cid: "",
|
||||
image: "../abc",
|
||||
result: Some("abc"),
|
||||
},
|
||||
Case {
|
||||
cid: "",
|
||||
image: "../9abc",
|
||||
result: Some("9abc"),
|
||||
},
|
||||
Case {
|
||||
cid: "some-string.1_2",
|
||||
image: "",
|
||||
result: Some("some-string.1_2"),
|
||||
},
|
||||
Case {
|
||||
cid: "0some-string.1_2",
|
||||
image: "",
|
||||
result: Some("0some-string.1_2"),
|
||||
},
|
||||
Case {
|
||||
cid: "a:b",
|
||||
image: "",
|
||||
result: None,
|
||||
},
|
||||
Case {
|
||||
cid: "",
|
||||
image: "prefix/a:b",
|
||||
result: Some("a_b"),
|
||||
},
|
||||
Case {
|
||||
cid: "",
|
||||
image: "/a/b/c/d:e",
|
||||
result: Some("d_e"),
|
||||
},
|
||||
];
|
||||
|
||||
for case in &cases {
|
||||
let mut req = image::PullImageRequest::new();
|
||||
req.set_image(case.image.to_string());
|
||||
req.set_container_id(case.cid.to_string());
|
||||
let ret = ImageService::cid_from_request(&req);
|
||||
match (case.result, ret) {
|
||||
(Some(expected), Ok(actual)) => assert_eq!(expected, actual),
|
||||
(None, Err(_)) => (),
|
||||
(None, Ok(r)) => panic!("Expected an error, got {}", r),
|
||||
(Some(expected), Err(e)) => {
|
||||
panic!("Expected {} but got an error ({})", expected, e)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -71,7 +71,6 @@ use tokio::{
|
||||
task::JoinHandle,
|
||||
};
|
||||
|
||||
mod image_rpc;
|
||||
mod rpc;
|
||||
mod tracer;
|
||||
|
||||
@@ -340,7 +339,7 @@ async fn start_sandbox(
|
||||
sandbox.lock().await.sender = Some(tx);
|
||||
|
||||
// vsock:///dev/vsock, port
|
||||
let mut server = rpc::start(sandbox.clone(), config.server_addr.as_str())?;
|
||||
let mut server = rpc::start(sandbox.clone(), config.server_addr.as_str(), init_mode)?;
|
||||
server.start().await?;
|
||||
|
||||
rx.await?;
|
||||
@@ -437,9 +436,8 @@ mod tests {
|
||||
let msg = format!("test[{}]: {:?}", i, d);
|
||||
let (rfd, wfd) = unistd::pipe2(OFlag::O_CLOEXEC).unwrap();
|
||||
defer!({
|
||||
// rfd is closed by the use of PipeStream in the crate_logger_task function,
|
||||
// but we will attempt to close in case of a failure
|
||||
let _ = unistd::close(rfd);
|
||||
// XXX: Never try to close rfd, because it will be closed by PipeStream in
|
||||
// create_logger_task() and it's not safe to close the same fd twice time.
|
||||
unistd::close(wfd).unwrap();
|
||||
});
|
||||
|
||||
|
||||
@@ -5,10 +5,11 @@
|
||||
|
||||
extern crate procfs;
|
||||
|
||||
use prometheus::{Encoder, Gauge, GaugeVec, IntCounter, TextEncoder};
|
||||
use prometheus::{Encoder, Gauge, GaugeVec, IntCounter, Opts, Registry, TextEncoder};
|
||||
|
||||
use anyhow::Result;
|
||||
use anyhow::{anyhow, Result};
|
||||
use slog::warn;
|
||||
use std::sync::Mutex;
|
||||
use tracing::instrument;
|
||||
|
||||
const NAMESPACE_KATA_AGENT: &str = "kata_agent";
|
||||
@@ -23,55 +24,70 @@ macro_rules! sl {
|
||||
|
||||
lazy_static! {
|
||||
|
||||
static ref AGENT_SCRAPE_COUNT: IntCounter =
|
||||
prometheus::register_int_counter!(format!("{}_{}",NAMESPACE_KATA_AGENT,"scrape_count"), "Metrics scrape count").unwrap();
|
||||
static ref REGISTERED: Mutex<bool> = Mutex::new(false);
|
||||
|
||||
static ref AGENT_THREADS: Gauge =
|
||||
prometheus::register_gauge!(format!("{}_{}",NAMESPACE_KATA_AGENT,"threads"), "Agent process threads").unwrap();
|
||||
// custom registry
|
||||
static ref REGISTRY: Registry = Registry::new();
|
||||
|
||||
static ref AGENT_TOTAL_TIME: Gauge =
|
||||
prometheus::register_gauge!(format!("{}_{}",NAMESPACE_KATA_AGENT,"total_time"), "Agent process total time").unwrap();
|
||||
static ref AGENT_SCRAPE_COUNT: IntCounter =
|
||||
IntCounter::new(format!("{}_{}",NAMESPACE_KATA_AGENT,"scrape_count"), "Metrics scrape count").unwrap();
|
||||
|
||||
static ref AGENT_TOTAL_VM: Gauge =
|
||||
prometheus::register_gauge!(format!("{}_{}",NAMESPACE_KATA_AGENT,"total_vm"), "Agent process total VM size").unwrap();
|
||||
// agent metrics
|
||||
static ref AGENT_THREADS: Gauge =
|
||||
Gauge::new(format!("{}_{}",NAMESPACE_KATA_AGENT,"threads"), "Agent process threads").unwrap();
|
||||
|
||||
static ref AGENT_TOTAL_RSS: Gauge =
|
||||
prometheus::register_gauge!(format!("{}_{}",NAMESPACE_KATA_AGENT,"total_rss"), "Agent process total RSS size").unwrap();
|
||||
static ref AGENT_TOTAL_TIME: Gauge =
|
||||
Gauge::new(format!("{}_{}",NAMESPACE_KATA_AGENT,"total_time"), "Agent process total time").unwrap();
|
||||
|
||||
static ref AGENT_PROC_STATUS: GaugeVec =
|
||||
prometheus::register_gauge_vec!(format!("{}_{}",NAMESPACE_KATA_AGENT,"proc_status"), "Agent process status.", &["item"]).unwrap();
|
||||
static ref AGENT_TOTAL_VM: Gauge =
|
||||
Gauge::new(format!("{}_{}",NAMESPACE_KATA_AGENT,"total_vm"), "Agent process total VM size").unwrap() ;
|
||||
|
||||
static ref AGENT_IO_STAT: GaugeVec =
|
||||
prometheus::register_gauge_vec!(format!("{}_{}",NAMESPACE_KATA_AGENT,"io_stat"), "Agent process IO statistics.", &["item"]).unwrap();
|
||||
static ref AGENT_TOTAL_RSS: Gauge =
|
||||
Gauge::new(format!("{}_{}",NAMESPACE_KATA_AGENT,"total_rss"), "Agent process total RSS size").unwrap();
|
||||
|
||||
static ref AGENT_PROC_STAT: GaugeVec =
|
||||
prometheus::register_gauge_vec!(format!("{}_{}",NAMESPACE_KATA_AGENT,"proc_stat"), "Agent process statistics.", &["item"]).unwrap();
|
||||
static ref AGENT_PROC_STATUS: GaugeVec =
|
||||
GaugeVec::new(Opts::new(format!("{}_{}",NAMESPACE_KATA_AGENT,"proc_status"), "Agent process status."), &["item"]).unwrap();
|
||||
|
||||
static ref AGENT_IO_STAT: GaugeVec =
|
||||
GaugeVec::new(Opts::new(format!("{}_{}",NAMESPACE_KATA_AGENT,"io_stat"), "Agent process IO statistics."), &["item"]).unwrap();
|
||||
|
||||
static ref AGENT_PROC_STAT: GaugeVec =
|
||||
GaugeVec::new(Opts::new(format!("{}_{}",NAMESPACE_KATA_AGENT,"proc_stat"), "Agent process statistics."), &["item"]).unwrap();
|
||||
|
||||
// guest os metrics
|
||||
static ref GUEST_LOAD: GaugeVec =
|
||||
prometheus::register_gauge_vec!(format!("{}_{}",NAMESPACE_KATA_GUEST,"load") , "Guest system load.", &["item"]).unwrap();
|
||||
static ref GUEST_LOAD: GaugeVec =
|
||||
GaugeVec::new(Opts::new(format!("{}_{}",NAMESPACE_KATA_GUEST,"load"), "Guest system load."), &["item"]).unwrap();
|
||||
|
||||
static ref GUEST_TASKS: GaugeVec =
|
||||
prometheus::register_gauge_vec!(format!("{}_{}",NAMESPACE_KATA_GUEST,"tasks") , "Guest system load.", &["item"]).unwrap();
|
||||
static ref GUEST_TASKS: GaugeVec =
|
||||
GaugeVec::new(Opts::new(format!("{}_{}",NAMESPACE_KATA_GUEST,"tasks"), "Guest system load."), &["item"]).unwrap();
|
||||
|
||||
static ref GUEST_CPU_TIME: GaugeVec =
|
||||
prometheus::register_gauge_vec!(format!("{}_{}",NAMESPACE_KATA_GUEST,"cpu_time") , "Guest CPU statistics.", &["cpu","item"]).unwrap();
|
||||
static ref GUEST_CPU_TIME: GaugeVec =
|
||||
GaugeVec::new(Opts::new(format!("{}_{}",NAMESPACE_KATA_GUEST,"cpu_time"), "Guest CPU statistics."), &["cpu","item"]).unwrap();
|
||||
|
||||
static ref GUEST_VM_STAT: GaugeVec =
|
||||
prometheus::register_gauge_vec!(format!("{}_{}",NAMESPACE_KATA_GUEST,"vm_stat") , "Guest virtual memory statistics.", &["item"]).unwrap();
|
||||
static ref GUEST_VM_STAT: GaugeVec =
|
||||
GaugeVec::new(Opts::new(format!("{}_{}",NAMESPACE_KATA_GUEST,"vm_stat"), "Guest virtual memory statistics."), &["item"]).unwrap();
|
||||
|
||||
static ref GUEST_NETDEV_STAT: GaugeVec =
|
||||
prometheus::register_gauge_vec!(format!("{}_{}",NAMESPACE_KATA_GUEST,"netdev_stat") , "Guest net devices statistics.", &["interface","item"]).unwrap();
|
||||
static ref GUEST_NETDEV_STAT: GaugeVec =
|
||||
GaugeVec::new(Opts::new(format!("{}_{}",NAMESPACE_KATA_GUEST,"netdev_stat"), "Guest net devices statistics."), &["interface","item"]).unwrap();
|
||||
|
||||
static ref GUEST_DISKSTAT: GaugeVec =
|
||||
prometheus::register_gauge_vec!(format!("{}_{}",NAMESPACE_KATA_GUEST,"diskstat") , "Disks statistics in system.", &["disk","item"]).unwrap();
|
||||
static ref GUEST_DISKSTAT: GaugeVec =
|
||||
GaugeVec::new(Opts::new(format!("{}_{}",NAMESPACE_KATA_GUEST,"diskstat"), "Disks statistics in system."), &["disk","item"]).unwrap();
|
||||
|
||||
static ref GUEST_MEMINFO: GaugeVec =
|
||||
prometheus::register_gauge_vec!(format!("{}_{}",NAMESPACE_KATA_GUEST,"meminfo") , "Statistics about memory usage in the system.", &["item"]).unwrap();
|
||||
static ref GUEST_MEMINFO: GaugeVec =
|
||||
GaugeVec::new(Opts::new(format!("{}_{}",NAMESPACE_KATA_GUEST,"meminfo"), "Statistics about memory usage in the system."), &["item"]).unwrap();
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
pub fn get_metrics(_: &protocols::agent::GetMetricsRequest) -> Result<String> {
|
||||
let mut registered = REGISTERED
|
||||
.lock()
|
||||
.map_err(|e| anyhow!("failed to check agent metrics register status {:?}", e))?;
|
||||
|
||||
if !(*registered) {
|
||||
register_metrics()?;
|
||||
*registered = true;
|
||||
}
|
||||
|
||||
AGENT_SCRAPE_COUNT.inc();
|
||||
|
||||
// update agent process metrics
|
||||
@@ -81,7 +97,7 @@ pub fn get_metrics(_: &protocols::agent::GetMetricsRequest) -> Result<String> {
|
||||
update_guest_metrics();
|
||||
|
||||
// gather all metrics and return as a String
|
||||
let metric_families = prometheus::gather();
|
||||
let metric_families = REGISTRY.gather();
|
||||
|
||||
let mut buffer = Vec::new();
|
||||
let encoder = TextEncoder::new();
|
||||
@@ -90,6 +106,31 @@ pub fn get_metrics(_: &protocols::agent::GetMetricsRequest) -> Result<String> {
|
||||
Ok(String::from_utf8(buffer)?)
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
fn register_metrics() -> Result<()> {
|
||||
REGISTRY.register(Box::new(AGENT_SCRAPE_COUNT.clone()))?;
|
||||
|
||||
// agent metrics
|
||||
REGISTRY.register(Box::new(AGENT_THREADS.clone()))?;
|
||||
REGISTRY.register(Box::new(AGENT_TOTAL_TIME.clone()))?;
|
||||
REGISTRY.register(Box::new(AGENT_TOTAL_VM.clone()))?;
|
||||
REGISTRY.register(Box::new(AGENT_TOTAL_RSS.clone()))?;
|
||||
REGISTRY.register(Box::new(AGENT_PROC_STATUS.clone()))?;
|
||||
REGISTRY.register(Box::new(AGENT_IO_STAT.clone()))?;
|
||||
REGISTRY.register(Box::new(AGENT_PROC_STAT.clone()))?;
|
||||
|
||||
// guest metrics
|
||||
REGISTRY.register(Box::new(GUEST_LOAD.clone()))?;
|
||||
REGISTRY.register(Box::new(GUEST_TASKS.clone()))?;
|
||||
REGISTRY.register(Box::new(GUEST_CPU_TIME.clone()))?;
|
||||
REGISTRY.register(Box::new(GUEST_VM_STAT.clone()))?;
|
||||
REGISTRY.register(Box::new(GUEST_NETDEV_STAT.clone()))?;
|
||||
REGISTRY.register(Box::new(GUEST_DISKSTAT.clone()))?;
|
||||
REGISTRY.register(Box::new(GUEST_MEMINFO.clone()))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
fn update_agent_metrics() -> Result<()> {
|
||||
let me = procfs::process::Process::myself();
|
||||
|
||||
@@ -34,12 +34,9 @@ use protocols::health::{
|
||||
HealthCheckResponse, HealthCheckResponse_ServingStatus, VersionCheckResponse,
|
||||
};
|
||||
use protocols::types::Interface;
|
||||
use protocols::{
|
||||
agent_ttrpc_async as agent_ttrpc, health_ttrpc_async as health_ttrpc,
|
||||
image_ttrpc_async as image_ttrpc,
|
||||
};
|
||||
use protocols::{agent_ttrpc_async as agent_ttrpc, health_ttrpc_async as health_ttrpc};
|
||||
use rustjail::cgroups::notifier;
|
||||
use rustjail::container::{BaseContainer, Container, LinuxContainer};
|
||||
use rustjail::container::{BaseContainer, Container, LinuxContainer, SYSTEMD_CGROUP_PATH_FORMAT};
|
||||
use rustjail::process::Process;
|
||||
use rustjail::specconv::CreateOpts;
|
||||
|
||||
@@ -52,7 +49,6 @@ use rustjail::process::ProcessOperations;
|
||||
use crate::device::{
|
||||
add_devices, get_virtio_blk_pci_device_name, update_device_cgroup, update_env_pci,
|
||||
};
|
||||
use crate::image_rpc;
|
||||
use crate::linux_abi::*;
|
||||
use crate::metrics::get_metrics;
|
||||
use crate::mount::{add_storages, baremount, STORAGE_HANDLER_LIST};
|
||||
@@ -84,12 +80,8 @@ use std::io::{BufRead, BufReader, Write};
|
||||
use std::os::unix::fs::FileExt;
|
||||
use std::path::PathBuf;
|
||||
|
||||
pub const CONTAINER_BASE: &str = "/run/kata-containers";
|
||||
const CONTAINER_BASE: &str = "/run/kata-containers";
|
||||
const MODPROBE_PATH: &str = "/sbin/modprobe";
|
||||
const ANNO_K8S_IMAGE_NAME: &str = "io.kubernetes.cri.image-name";
|
||||
const CONFIG_JSON: &str = "config.json";
|
||||
const INIT_TRUSTED_STORAGE: &str = "/usr/bin/kata-init-trusted-storage";
|
||||
const TRUSTED_STORAGE_DEVICE: &str = "/dev/trusted_store";
|
||||
|
||||
/// the iptables seriers binaries could appear either in /sbin
|
||||
/// or /usr/sbin, we need to check both of them
|
||||
@@ -145,41 +137,7 @@ macro_rules! is_allowed {
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct AgentService {
|
||||
sandbox: Arc<Mutex<Sandbox>>,
|
||||
}
|
||||
|
||||
// A container ID must match this regex:
|
||||
//
|
||||
// ^[a-zA-Z0-9][a-zA-Z0-9_.-]+$
|
||||
//
|
||||
pub fn verify_cid(id: &str) -> Result<()> {
|
||||
let mut chars = id.chars();
|
||||
|
||||
let valid = matches!(chars.next(), Some(first) if first.is_alphanumeric()
|
||||
&& id.len() > 1
|
||||
&& chars.all(|c| c.is_alphanumeric() || ['.', '-', '_'].contains(&c)));
|
||||
|
||||
match valid {
|
||||
true => Ok(()),
|
||||
false => Err(anyhow!("invalid container ID: {:?}", id)),
|
||||
}
|
||||
}
|
||||
|
||||
// Partially merge an OCI process specification into another one.
|
||||
fn merge_oci_process(target: &mut oci::Process, source: &oci::Process) {
|
||||
if target.args.is_empty() && !source.args.is_empty() {
|
||||
target.args.append(&mut source.args.clone());
|
||||
}
|
||||
|
||||
if target.cwd == "/" && source.cwd != "/" {
|
||||
target.cwd = String::from(&source.cwd);
|
||||
}
|
||||
|
||||
for source_env in &source.env {
|
||||
let variable_name: Vec<&str> = source_env.split('=').collect();
|
||||
if !target.env.iter().any(|i| i.contains(variable_name[0])) {
|
||||
target.env.push(source_env.to_string());
|
||||
}
|
||||
}
|
||||
init_mode: bool,
|
||||
}
|
||||
|
||||
impl AgentService {
|
||||
@@ -212,9 +170,6 @@ impl AgentService {
|
||||
"receive createcontainer, storages: {:?}", &req.storages
|
||||
);
|
||||
|
||||
// Merge the image bundle OCI spec into the container creation request OCI spec.
|
||||
self.merge_bundle_oci(&mut oci).await?;
|
||||
|
||||
// Some devices need some extra processing (the ones invoked with
|
||||
// --device for instance), and that's what this call is doing. It
|
||||
// updates the devices listed in the OCI spec, so that they actually
|
||||
@@ -222,30 +177,6 @@ impl AgentService {
|
||||
// cannot predict everything from the caller.
|
||||
add_devices(&req.devices.to_vec(), &mut oci, &self.sandbox).await?;
|
||||
|
||||
let linux = oci
|
||||
.linux
|
||||
.as_mut()
|
||||
.ok_or_else(|| anyhow!("Spec didn't contain linux field"))?;
|
||||
|
||||
for specdev in &mut linux.devices {
|
||||
let dev_major_minor = format!("{}:{}", specdev.major, specdev.minor);
|
||||
|
||||
if specdev.path == TRUSTED_STORAGE_DEVICE {
|
||||
let data_integrity = AGENT_CONFIG.read().await.data_integrity;
|
||||
info!(
|
||||
sl!(),
|
||||
"trusted_store device major:min {}, enable data integrity {}",
|
||||
dev_major_minor,
|
||||
data_integrity.to_string()
|
||||
);
|
||||
|
||||
Command::new(INIT_TRUSTED_STORAGE)
|
||||
.args([&dev_major_minor, &data_integrity.to_string()])
|
||||
.output()
|
||||
.expect("Failed to initialize confidential storage");
|
||||
}
|
||||
}
|
||||
|
||||
// Both rootfs and volumes (invoked with --volume for instance) will
|
||||
// be processed the same way. The idea is to always mount any provided
|
||||
// storage to the specified MountPoint, so that it will match what's
|
||||
@@ -280,9 +211,20 @@ impl AgentService {
|
||||
// restore the cwd for kata-agent process.
|
||||
defer!(unistd::chdir(&olddir).unwrap());
|
||||
|
||||
// determine which cgroup driver to take and then assign to use_systemd_cgroup
|
||||
// systemd: "[slice]:[prefix]:[name]"
|
||||
// fs: "/path_a/path_b"
|
||||
// If agent is init we can't use systemd cgroup mode, no matter what the host tells us
|
||||
let cgroups_path = oci.linux.as_ref().map_or("", |linux| &linux.cgroups_path);
|
||||
let use_systemd_cgroup = if self.init_mode {
|
||||
false
|
||||
} else {
|
||||
SYSTEMD_CGROUP_PATH_FORMAT.is_match(cgroups_path)
|
||||
};
|
||||
|
||||
let opts = CreateOpts {
|
||||
cgroup_name: "".to_string(),
|
||||
use_systemd_cgroup: false,
|
||||
use_systemd_cgroup,
|
||||
no_pivot_root: s.no_pivot_root,
|
||||
no_new_keyring: false,
|
||||
spec: Some(oci.clone()),
|
||||
@@ -696,54 +638,6 @@ impl AgentService {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// When being passed an image name through a container annotation, merge its
|
||||
// corresponding bundle OCI specification into the passed container creation one.
|
||||
async fn merge_bundle_oci(&self, container_oci: &mut oci::Spec) -> Result<()> {
|
||||
if let Some(image_name) = container_oci
|
||||
.annotations
|
||||
.get(&ANNO_K8S_IMAGE_NAME.to_string())
|
||||
{
|
||||
if let Some(container_id) = self.sandbox.clone().lock().await.images.get(image_name) {
|
||||
let image_oci_config_path = Path::new(CONTAINER_BASE)
|
||||
.join(container_id)
|
||||
.join(CONFIG_JSON);
|
||||
debug!(
|
||||
sl!(),
|
||||
"Image bundle config path: {:?}", image_oci_config_path
|
||||
);
|
||||
|
||||
let image_oci =
|
||||
oci::Spec::load(image_oci_config_path.to_str().ok_or_else(|| {
|
||||
anyhow!(
|
||||
"Invalid container image OCI config path {:?}",
|
||||
image_oci_config_path
|
||||
)
|
||||
})?)
|
||||
.context("load image bundle")?;
|
||||
|
||||
if let Some(container_root) = container_oci.root.as_mut() {
|
||||
if let Some(image_root) = image_oci.root.as_ref() {
|
||||
let root_path = Path::new(CONTAINER_BASE)
|
||||
.join(container_id)
|
||||
.join(image_root.path.clone());
|
||||
container_root.path =
|
||||
String::from(root_path.to_str().ok_or_else(|| {
|
||||
anyhow!("Invalid container image root path {:?}", root_path)
|
||||
})?);
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(container_process) = container_oci.process.as_mut() {
|
||||
if let Some(image_process) = image_oci.process.as_ref() {
|
||||
merge_oci_process(container_process, image_process);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
@@ -1791,28 +1685,25 @@ async fn read_stream(reader: Arc<Mutex<ReadHalf<PipeStream>>>, l: usize) -> Resu
|
||||
Ok(content)
|
||||
}
|
||||
|
||||
pub fn start(s: Arc<Mutex<Sandbox>>, server_address: &str) -> Result<TtrpcServer> {
|
||||
let agent_service = Box::new(AgentService { sandbox: s.clone() })
|
||||
as Box<dyn agent_ttrpc::AgentService + Send + Sync>;
|
||||
pub fn start(s: Arc<Mutex<Sandbox>>, server_address: &str, init_mode: bool) -> Result<TtrpcServer> {
|
||||
let agent_service = Box::new(AgentService {
|
||||
sandbox: s,
|
||||
init_mode,
|
||||
}) as Box<dyn agent_ttrpc::AgentService + Send + Sync>;
|
||||
|
||||
let agent_worker = Arc::new(agent_service);
|
||||
|
||||
let health_service = Box::new(HealthService {}) as Box<dyn health_ttrpc::Health + Send + Sync>;
|
||||
let health_worker = Arc::new(health_service);
|
||||
|
||||
let image_service =
|
||||
Box::new(image_rpc::ImageService::new(s)) as Box<dyn image_ttrpc::Image + Send + Sync>;
|
||||
|
||||
let aservice = agent_ttrpc::create_agent_service(agent_worker);
|
||||
|
||||
let hservice = health_ttrpc::create_health(health_worker);
|
||||
|
||||
let iservice = image_ttrpc::create_image(Arc::new(image_service));
|
||||
|
||||
let server = TtrpcServer::new()
|
||||
.bind(server_address)?
|
||||
.register_service(aservice)
|
||||
.register_service(hservice)
|
||||
.register_service(iservice);
|
||||
.register_service(hservice);
|
||||
|
||||
info!(sl!(), "ttRPC server started"; "address" => server_address);
|
||||
|
||||
@@ -2018,38 +1909,6 @@ fn do_copy_file(req: &CopyFileRequest) -> Result<()> {
|
||||
|
||||
std::fs::set_permissions(&dir, std::fs::Permissions::from_mode(req.dir_mode))?;
|
||||
|
||||
let sflag = stat::SFlag::from_bits_truncate(req.file_mode);
|
||||
|
||||
if sflag.contains(stat::SFlag::S_IFDIR) {
|
||||
fs::create_dir(path.clone()).or_else(|e| {
|
||||
if e.kind() != std::io::ErrorKind::AlreadyExists {
|
||||
return Err(e);
|
||||
}
|
||||
Ok(())
|
||||
})?;
|
||||
|
||||
std::fs::set_permissions(path.clone(), std::fs::Permissions::from_mode(req.file_mode))?;
|
||||
|
||||
unistd::chown(
|
||||
&path,
|
||||
Some(Uid::from_raw(req.uid as u32)),
|
||||
Some(Gid::from_raw(req.gid as u32)),
|
||||
)?;
|
||||
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if sflag.contains(stat::SFlag::S_IFLNK) {
|
||||
let src = PathBuf::from(String::from_utf8(req.data.clone()).unwrap());
|
||||
|
||||
unistd::symlinkat(&src, None, &path)?;
|
||||
let path_str = CString::new(path.to_str().unwrap())?;
|
||||
let ret = unsafe { libc::lchown(path_str.as_ptr(), req.uid as u32, req.gid as u32) };
|
||||
Errno::result(ret).map(drop)?;
|
||||
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut tmpfile = path.clone();
|
||||
tmpfile.set_extension("tmp");
|
||||
|
||||
@@ -2115,26 +1974,18 @@ pub fn setup_bundle(cid: &str, spec: &mut Spec) -> Result<PathBuf> {
|
||||
let spec_root_path = Path::new(&spec_root.path);
|
||||
|
||||
let bundle_path = Path::new(CONTAINER_BASE).join(cid);
|
||||
let config_path = bundle_path.join(CONFIG_JSON);
|
||||
let config_path = bundle_path.join("config.json");
|
||||
let rootfs_path = bundle_path.join("rootfs");
|
||||
|
||||
let rootfs_exists = Path::new(&rootfs_path).exists();
|
||||
info!(
|
||||
sl!(),
|
||||
"The rootfs_path is {:?} and exists: {}", rootfs_path, rootfs_exists
|
||||
);
|
||||
|
||||
if !rootfs_exists {
|
||||
fs::create_dir_all(&rootfs_path)?;
|
||||
baremount(
|
||||
spec_root_path,
|
||||
&rootfs_path,
|
||||
"bind",
|
||||
MsFlags::MS_BIND,
|
||||
"",
|
||||
&sl!(),
|
||||
)?;
|
||||
}
|
||||
fs::create_dir_all(&rootfs_path)?;
|
||||
baremount(
|
||||
spec_root_path,
|
||||
&rootfs_path,
|
||||
"bind",
|
||||
MsFlags::MS_BIND,
|
||||
"",
|
||||
&sl!(),
|
||||
)?;
|
||||
|
||||
let rootfs_path_name = rootfs_path
|
||||
.to_str()
|
||||
@@ -2314,6 +2165,7 @@ mod tests {
|
||||
|
||||
let agent_service = Box::new(AgentService {
|
||||
sandbox: Arc::new(Mutex::new(sandbox)),
|
||||
init_mode: true,
|
||||
});
|
||||
|
||||
let req = protocols::agent::UpdateInterfaceRequest::default();
|
||||
@@ -2331,6 +2183,7 @@ mod tests {
|
||||
|
||||
let agent_service = Box::new(AgentService {
|
||||
sandbox: Arc::new(Mutex::new(sandbox)),
|
||||
init_mode: true,
|
||||
});
|
||||
|
||||
let req = protocols::agent::UpdateRoutesRequest::default();
|
||||
@@ -2348,6 +2201,7 @@ mod tests {
|
||||
|
||||
let agent_service = Box::new(AgentService {
|
||||
sandbox: Arc::new(Mutex::new(sandbox)),
|
||||
init_mode: true,
|
||||
});
|
||||
|
||||
let req = protocols::agent::AddARPNeighborsRequest::default();
|
||||
@@ -2481,6 +2335,7 @@ mod tests {
|
||||
|
||||
let agent_service = Box::new(AgentService {
|
||||
sandbox: Arc::new(Mutex::new(sandbox)),
|
||||
init_mode: true,
|
||||
});
|
||||
|
||||
let result = agent_service
|
||||
@@ -2961,6 +2816,7 @@ OtherField:other
|
||||
let sandbox = Sandbox::new(&logger).unwrap();
|
||||
let agent_service = Box::new(AgentService {
|
||||
sandbox: Arc::new(Mutex::new(sandbox)),
|
||||
init_mode: true,
|
||||
});
|
||||
|
||||
let ctx = mk_ttrpc_context();
|
||||
@@ -3110,135 +2966,4 @@ COMMIT
|
||||
"We should see the resulting rule"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_merge_cwd() {
|
||||
#[derive(Debug)]
|
||||
struct TestData<'a> {
|
||||
container_process_cwd: &'a str,
|
||||
image_process_cwd: &'a str,
|
||||
expected: &'a str,
|
||||
}
|
||||
|
||||
let tests = &[
|
||||
// Image cwd should override blank container cwd
|
||||
// TODO - how can we tell the user didn't specifically set it to `/` vs not setting at all? Is that scenario valid?
|
||||
TestData {
|
||||
container_process_cwd: "/",
|
||||
image_process_cwd: "/imageDir",
|
||||
expected: "/imageDir",
|
||||
},
|
||||
// Container cwd should override image cwd
|
||||
TestData {
|
||||
container_process_cwd: "/containerDir",
|
||||
image_process_cwd: "/imageDir",
|
||||
expected: "/containerDir",
|
||||
},
|
||||
// Container cwd should override blank image cwd
|
||||
TestData {
|
||||
container_process_cwd: "/containerDir",
|
||||
image_process_cwd: "/",
|
||||
expected: "/containerDir",
|
||||
},
|
||||
];
|
||||
|
||||
for (i, d) in tests.iter().enumerate() {
|
||||
let msg = format!("test[{}]: {:?}", i, d);
|
||||
|
||||
let mut container_process = oci::Process {
|
||||
cwd: d.container_process_cwd.to_string(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let image_process = oci::Process {
|
||||
cwd: d.image_process_cwd.to_string(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
merge_oci_process(&mut container_process, &image_process);
|
||||
|
||||
assert_eq!(d.expected, container_process.cwd, "{}", msg);
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_merge_env() {
|
||||
#[derive(Debug)]
|
||||
struct TestData {
|
||||
container_process_env: Vec<String>,
|
||||
image_process_env: Vec<String>,
|
||||
expected: Vec<String>,
|
||||
}
|
||||
|
||||
let tests = &[
|
||||
// Test that the pods environment overrides the images
|
||||
TestData {
|
||||
container_process_env: vec!["ISPRODUCTION=true".to_string()],
|
||||
image_process_env: vec!["ISPRODUCTION=false".to_string()],
|
||||
expected: vec!["ISPRODUCTION=true".to_string()],
|
||||
},
|
||||
// Test that multiple environment variables can be overrided
|
||||
TestData {
|
||||
container_process_env: vec![
|
||||
"ISPRODUCTION=true".to_string(),
|
||||
"ISDEVELOPMENT=false".to_string(),
|
||||
],
|
||||
image_process_env: vec![
|
||||
"ISPRODUCTION=false".to_string(),
|
||||
"ISDEVELOPMENT=true".to_string(),
|
||||
],
|
||||
expected: vec![
|
||||
"ISPRODUCTION=true".to_string(),
|
||||
"ISDEVELOPMENT=false".to_string(),
|
||||
],
|
||||
},
|
||||
// Test that when none of the variables match do not override them
|
||||
TestData {
|
||||
container_process_env: vec!["ANOTHERENV=TEST".to_string()],
|
||||
image_process_env: vec![
|
||||
"ISPRODUCTION=false".to_string(),
|
||||
"ISDEVELOPMENT=true".to_string(),
|
||||
],
|
||||
expected: vec![
|
||||
"ANOTHERENV=TEST".to_string(),
|
||||
"ISPRODUCTION=false".to_string(),
|
||||
"ISDEVELOPMENT=true".to_string(),
|
||||
],
|
||||
},
|
||||
// Test a mix of both overriding and not
|
||||
TestData {
|
||||
container_process_env: vec![
|
||||
"ANOTHERENV=TEST".to_string(),
|
||||
"ISPRODUCTION=true".to_string(),
|
||||
],
|
||||
image_process_env: vec![
|
||||
"ISPRODUCTION=false".to_string(),
|
||||
"ISDEVELOPMENT=true".to_string(),
|
||||
],
|
||||
expected: vec![
|
||||
"ANOTHERENV=TEST".to_string(),
|
||||
"ISPRODUCTION=true".to_string(),
|
||||
"ISDEVELOPMENT=true".to_string(),
|
||||
],
|
||||
},
|
||||
];
|
||||
|
||||
for (i, d) in tests.iter().enumerate() {
|
||||
let msg = format!("test[{}]: {:?}", i, d);
|
||||
|
||||
let mut container_process = oci::Process {
|
||||
env: d.container_process_env.clone(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let image_process = oci::Process {
|
||||
env: d.image_process_env.clone(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
merge_oci_process(&mut container_process, &image_process);
|
||||
|
||||
assert_eq!(d.expected, container_process.env, "{}", msg);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -60,7 +60,6 @@ pub struct Sandbox {
|
||||
pub event_tx: Option<Sender<String>>,
|
||||
pub bind_watcher: BindWatcher,
|
||||
pub pcimap: HashMap<pci::Address, pci::Address>,
|
||||
pub images: HashMap<String, String>,
|
||||
}
|
||||
|
||||
impl Sandbox {
|
||||
@@ -94,7 +93,6 @@ impl Sandbox {
|
||||
event_tx: Some(tx),
|
||||
bind_watcher: BindWatcher::new(),
|
||||
pcimap: HashMap::new(),
|
||||
images: HashMap::new(),
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -24,7 +24,7 @@ async fn handle_sigchild(logger: Logger, sandbox: Arc<Mutex<Sandbox>>) -> Result
|
||||
loop {
|
||||
// Avoid reaping the undesirable child's signal, e.g., execute_hook's
|
||||
// The lock should be released immediately.
|
||||
let _ = rustjail::container::WAIT_PID_LOCKER.lock().await;
|
||||
let _locker = rustjail::container::WAIT_PID_LOCKER.lock().await;
|
||||
let result = wait::waitpid(
|
||||
Some(Pid::from_raw(-1)),
|
||||
Some(WaitPidFlag::WNOHANG | WaitPidFlag::__WALL),
|
||||
|
||||
@@ -1291,6 +1291,7 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
#[cfg(not(target_arch = "aarch64"))]
|
||||
async fn create_tmpfs() {
|
||||
skip_if_not_root!();
|
||||
|
||||
|
||||
@@ -18,4 +18,4 @@ bincode = "1.3.3"
|
||||
byteorder = "1.4.3"
|
||||
slog = { version = "2.5.2", features = ["dynamic-keys", "max_level_trace", "release_max_level_debug"] }
|
||||
async-trait = "0.1.50"
|
||||
tokio = "1.2.0"
|
||||
tokio = "1.28.1"
|
||||
|
||||
@@ -19,6 +19,7 @@ and configuration process.
|
||||
Device: [Device Document](docs/device.md)
|
||||
vCPU: [vCPU Document](docs/vcpu.md)
|
||||
API: [API Document](docs/api.md)
|
||||
`Upcall`: [`Upcall` Document](docs/upcall.md)
|
||||
|
||||
Currently, the documents are still actively adding.
|
||||
You could see the [official documentation](docs/) page for more details.
|
||||
|
||||
177
src/dragonball/docs/images/upcall-architecture.svg
Normal file
177
src/dragonball/docs/images/upcall-architecture.svg
Normal file
@@ -0,0 +1,177 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xl="http://www.w3.org/1999/xlink" xmlns:dc="http://purl.org/dc/elements/1.1/" version="1.1" viewBox="51 242 818 479" width="818" height="479">
|
||||
<defs>
|
||||
<marker orient="auto" overflow="visible" markerUnits="strokeWidth" id="FilledArrow_Marker" stroke-linejoin="miter" stroke-miterlimit="10" viewBox="-1 -4 10 8" markerWidth="10" markerHeight="8" color="black">
|
||||
<g>
|
||||
<path d="M 8 0 L 0 -3 L 0 3 Z" fill="currentColor" stroke="currentColor" stroke-width="1"/>
|
||||
</g>
|
||||
</marker>
|
||||
<marker orient="auto" overflow="visible" markerUnits="strokeWidth" id="FilledArrow_Marker_2" stroke-linejoin="miter" stroke-miterlimit="10" viewBox="-9 -4 10 8" markerWidth="10" markerHeight="8" color="black">
|
||||
<g>
|
||||
<path d="M -8 0 L 0 3 L 0 -3 Z" fill="currentColor" stroke="currentColor" stroke-width="1"/>
|
||||
</g>
|
||||
</marker>
|
||||
</defs>
|
||||
<g id="Canvas_1" fill="none" fill-opacity="1" stroke="none" stroke-opacity="1" stroke-dasharray="none">
|
||||
<title>Canvas 1</title>
|
||||
<rect fill="white" x="51" y="242" width="818" height="479"/>
|
||||
<g id="Canvas_1_Layer_1">
|
||||
<title>Layer 1</title>
|
||||
<g id="Line_4">
|
||||
<line x1="153" y1="279.5" x2="856.1097" y2="279.5" stroke="black" stroke-linecap="round" stroke-linejoin="round" stroke-dasharray="4.0,4.0" stroke-width="1"/>
|
||||
</g>
|
||||
<g id="Graphic_5">
|
||||
<text transform="translate(56 247.5)" fill="black">
|
||||
<tspan font-family="Alibaba PuHuiTi" font-size="16" fill="black" x="0" y="17">Guest User</tspan>
|
||||
</text>
|
||||
</g>
|
||||
<g id="Graphic_6">
|
||||
<text transform="translate(56 286)" fill="black">
|
||||
<tspan font-family="Alibaba PuHuiTi" font-size="16" fill="black" x="0" y="17">Guest Kernel</tspan>
|
||||
</text>
|
||||
</g>
|
||||
<g id="Line_7">
|
||||
<line x1="153" y1="592" x2="856.1097" y2="592" stroke="black" stroke-linecap="round" stroke-linejoin="round" stroke-dasharray="4.0,4.0" stroke-width="1"/>
|
||||
</g>
|
||||
<g id="Graphic_8">
|
||||
<text transform="translate(62.76 597.5)" fill="black">
|
||||
<tspan font-family="Alibaba PuHuiTi" font-size="16" fill="black" x="7531753e-19" y="17">Hypervisor</tspan>
|
||||
</text>
|
||||
</g>
|
||||
<g id="Graphic_10">
|
||||
<path d="M 264 328 L 347.456 328 C 354.0834 328 359.456 333.3726 359.456 340 L 359.456 524.5 C 359.456 531.1274 354.0834 536.5 347.456 536.5 L 264 536.5 C 257.37258 536.5 252 531.1274 252 524.5 L 252 340 C 252 333.3726 257.37258 328 264 328 Z" stroke="gray" stroke-linecap="round" stroke-linejoin="round" stroke-width="1"/>
|
||||
</g>
|
||||
<g id="Graphic_11">
|
||||
<text transform="translate(276.776 333)" fill="black">
|
||||
<tspan font-family="Alibaba PuHuiTi" font-size="16" fill="black" x="0" y="17">socket</tspan>
|
||||
</text>
|
||||
</g>
|
||||
<g id="Graphic_12">
|
||||
<path d="M 582 294.5 L 672 294.5 C 678.6274 294.5 684 299.8726 684 306.5 L 684 354 C 684 360.6274 678.6274 366 672 366 L 582 366 C 575.3726 366 570 360.6274 570 354 L 570 306.5 C 570 299.8726 575.3726 294.5 582 294.5 Z" stroke="gray" stroke-linecap="round" stroke-linejoin="round" stroke-width="1"/>
|
||||
<text transform="translate(575 302.578)" fill="black">
|
||||
<tspan font-family="Helvetica Neue" font-size="16" fill="black" x="27.704" y="15">Device </tspan>
|
||||
<tspan font-family="Helvetica Neue" font-size="16" fill="black" x="20.44" y="33.448">Manager</tspan>
|
||||
<tspan font-family="Helvetica Neue" font-size="16" fill="black" x="25.488" y="51.895996">Service</tspan>
|
||||
</text>
|
||||
</g>
|
||||
<g id="Graphic_13">
|
||||
<text transform="translate(284.824 374)" fill="black">
|
||||
<tspan font-family="Alibaba PuHuiTi" font-size="16" fill="black" x="8135714e-19" y="17">bind</tspan>
|
||||
</text>
|
||||
</g>
|
||||
<g id="Graphic_14">
|
||||
<text transform="translate(280.528 416.25)" fill="black">
|
||||
<tspan font-family="Alibaba PuHuiTi" font-size="16" fill="black" x="0" y="17">listen</tspan>
|
||||
</text>
|
||||
</g>
|
||||
<g id="Graphic_15">
|
||||
<text transform="translate(274.92 459.5)" fill="black">
|
||||
<tspan font-family="Alibaba PuHuiTi" font-size="16" fill="black" x="0" y="17">accept</tspan>
|
||||
</text>
|
||||
</g>
|
||||
<g id="Graphic_16">
|
||||
<text transform="translate(256.372 503.5)" fill="black">
|
||||
<tspan font-family="Alibaba PuHuiTi" font-size="16" fill="black" x="8668621e-19" y="17">new kthread</tspan>
|
||||
</text>
|
||||
</g>
|
||||
<g id="Graphic_17">
|
||||
<path d="M 268 566.5 L 807.5 566.5 C 813.0228 566.5 817.5 570.97715 817.5 576.5 L 817.5 576.5 C 817.5 582.02285 813.0228 586.5 807.5 586.5 L 268 586.5 C 262.47715 586.5 258 582.02285 258 576.5 L 258 576.5 C 258 570.97715 262.47715 566.5 268 566.5 Z" stroke="gray" stroke-linecap="round" stroke-linejoin="round" stroke-width="1"/>
|
||||
<text transform="translate(263 567.276)" fill="black">
|
||||
<tspan font-family="Helvetica Neue" font-size="16" fill="black" x="226.454" y="15">virtio-vsocket</tspan>
|
||||
</text>
|
||||
</g>
|
||||
<g id="Graphic_18">
|
||||
<path d="M 268 598.5 L 807.5 598.5 C 813.0228 598.5 817.5 602.97715 817.5 608.5 L 817.5 608.5 C 817.5 614.02285 813.0228 618.5 807.5 618.5 L 268 618.5 C 262.47715 618.5 258 614.02285 258 608.5 L 258 608.5 C 258 602.97715 262.47715 598.5 268 598.5 Z" stroke="gray" stroke-linecap="round" stroke-linejoin="round" stroke-width="1"/>
|
||||
<text transform="translate(263 599.276)" fill="black">
|
||||
<tspan font-family="Helvetica Neue" font-size="16" fill="black" x="193.254" y="15">virtio-vsocket backend</tspan>
|
||||
</text>
|
||||
</g>
|
||||
<g id="Line_20">
|
||||
<line x1="301.9" y1="352" x2="301.9" y2="369.84976" marker-end="url(#FilledArrow_Marker)" stroke="black" stroke-linecap="round" stroke-linejoin="round" stroke-width="1"/>
|
||||
</g>
|
||||
<g id="Line_21">
|
||||
<line x1="300.828" y1="394.6251" x2="300.828" y2="412.4749" marker-end="url(#FilledArrow_Marker)" stroke="black" stroke-linecap="round" stroke-linejoin="round" stroke-width="1"/>
|
||||
</g>
|
||||
<g id="Line_22">
|
||||
<line x1="300.828" y1="437.56256" x2="300.828" y2="455.4123" marker-end="url(#FilledArrow_Marker)" stroke="black" stroke-linecap="round" stroke-linejoin="round" stroke-width="1"/>
|
||||
</g>
|
||||
<g id="Line_23">
|
||||
<line x1="299.9" y1="480.1251" x2="299.9" y2="497.9749" marker-end="url(#FilledArrow_Marker)" stroke="black" stroke-linecap="round" stroke-linejoin="round" stroke-width="1"/>
|
||||
</g>
|
||||
<g id="Graphic_24">
|
||||
<rect x="266.5" y="541.5" width="71.188" height="20" stroke="gray" stroke-linecap="round" stroke-linejoin="round" stroke-width="1"/>
|
||||
<text transform="translate(271.5 540.5)" fill="black">
|
||||
<tspan font-family="Alibaba PuHuiTi" font-size="16" fill="black" x="13.858" y="17">Port</tspan>
|
||||
</text>
|
||||
</g>
|
||||
<g id="Graphic_27">
|
||||
<path d="M 582 648.5 L 672 648.5 C 678.6274 648.5 684 653.8726 684 660.5 L 684 708 C 684 714.6274 678.6274 720 672 720 L 582 720 C 575.3726 720 570 714.6274 570 708 L 570 660.5 C 570 653.8726 575.3726 648.5 582 648.5 Z" stroke="gray" stroke-linecap="round" stroke-linejoin="round" stroke-width="1"/>
|
||||
<text transform="translate(575 656.578)" fill="black">
|
||||
<tspan font-family="Helvetica Neue" font-size="16" fill="black" x="27.704" y="15">Device </tspan>
|
||||
<tspan font-family="Helvetica Neue" font-size="16" fill="black" x="20.44" y="33.448">Manager</tspan>
|
||||
<tspan font-family="Helvetica Neue" font-size="16" fill="black" x="20.288" y="51.895996">Backend</tspan>
|
||||
</text>
|
||||
</g>
|
||||
<g id="Line_28">
|
||||
<line x1="627" y1="375.9" x2="627" y2="638.6" marker-end="url(#FilledArrow_Marker)" marker-start="url(#FilledArrow_Marker_2)" stroke="black" stroke-linecap="round" stroke-linejoin="round" stroke-dasharray="4.0,4.0" stroke-width="1"/>
|
||||
</g>
|
||||
<g id="Graphic_31">
|
||||
<path d="M 711 294.5 L 801 294.5 C 807.6274 294.5 813 299.8726 813 306.5 L 813 354 C 813 360.6274 807.6274 366 801 366 L 711 366 C 704.3726 366 699 360.6274 699 354 L 699 306.5 C 699 299.8726 704.3726 294.5 711 294.5 Z" stroke="gray" stroke-linecap="round" stroke-linejoin="round" stroke-width="1"/>
|
||||
<text transform="translate(704 321.026)" fill="black">
|
||||
<tspan font-family="Helvetica Neue" font-size="16" fill="black" x="17.784" y="15">Service B</tspan>
|
||||
</text>
|
||||
</g>
|
||||
<g id="Graphic_30">
|
||||
<path d="M 711 648.5 L 801 648.5 C 807.6274 648.5 813 653.8726 813 660.5 L 813 708 C 813 714.6274 807.6274 720 801 720 L 711 720 C 704.3726 720 699 714.6274 699 708 L 699 660.5 C 699 653.8726 704.3726 648.5 711 648.5 Z" stroke="gray" stroke-linecap="round" stroke-linejoin="round" stroke-width="1"/>
|
||||
<text transform="translate(704 675.026)" fill="black">
|
||||
<tspan font-family="Helvetica Neue" font-size="16" fill="black" x="12.584" y="15">Backend B</tspan>
|
||||
</text>
|
||||
</g>
|
||||
<g id="Line_29">
|
||||
<line x1="756" y1="375.9" x2="756" y2="638.6" marker-end="url(#FilledArrow_Marker)" marker-start="url(#FilledArrow_Marker_2)" stroke="black" stroke-linecap="round" stroke-linejoin="round" stroke-dasharray="4.0,4.0" stroke-width="1"/>
|
||||
</g>
|
||||
<g id="Graphic_32">
|
||||
<text transform="translate(833 319.25)" fill="black">
|
||||
<tspan font-family="Alibaba PuHuiTi" font-size="16" fill="black" x="58264504e-20" y="17">……</tspan>
|
||||
</text>
|
||||
</g>
|
||||
<g id="Graphic_33">
|
||||
<text transform="translate(833 673.25)" fill="black">
|
||||
<tspan font-family="Alibaba PuHuiTi" font-size="16" fill="black" x="58264504e-20" y="17">……</tspan>
|
||||
</text>
|
||||
</g>
|
||||
<g id="Graphic_34">
|
||||
<text transform="translate(252.616 296)" fill="black">
|
||||
<tspan font-family="Alibaba PuHuiTi" font-size="16" fill="black" x="0" y="17">Upcall Server</tspan>
|
||||
</text>
|
||||
</g>
|
||||
<g id="Line_39">
|
||||
<path d="M 251.372 514.94444 L 196.16455 515.40173 L 196.2135 443.25 L 290.92825 443.92903" marker-end="url(#FilledArrow_Marker)" stroke="black" stroke-linecap="round" stroke-linejoin="round" stroke-width="1"/>
|
||||
</g>
|
||||
<g id="Graphic_41">
|
||||
<text transform="translate(417 503.5)" fill="black">
|
||||
<tspan font-family="Alibaba PuHuiTi" font-size="16" fill="black" x="0" y="17">Service handler</tspan>
|
||||
</text>
|
||||
</g>
|
||||
<g id="Graphic_42">
|
||||
<rect x="591.406" y="540.4723" width="71.188" height="20" stroke="gray" stroke-linecap="round" stroke-linejoin="round" stroke-width="1"/>
|
||||
<text transform="translate(596.406 539.4723)" fill="black">
|
||||
<tspan font-family="Alibaba PuHuiTi" font-size="16" fill="black" x="10.386" y="17">Conn</tspan>
|
||||
</text>
|
||||
</g>
|
||||
<g id="Graphic_43">
|
||||
<rect x="720.406" y="541.4723" width="71.188" height="20" stroke="gray" stroke-linecap="round" stroke-linejoin="round" stroke-width="1"/>
|
||||
<text transform="translate(725.406 540.4723)" fill="black">
|
||||
<tspan font-family="Alibaba PuHuiTi" font-size="16" fill="black" x="10.386" y="17">Conn</tspan>
|
||||
</text>
|
||||
</g>
|
||||
<g id="Line_44">
|
||||
<line x1="358.684" y1="514.5" x2="402.1" y2="514.5" marker-end="url(#FilledArrow_Marker)" stroke="black" stroke-linecap="round" stroke-linejoin="round" stroke-width="1"/>
|
||||
</g>
|
||||
<g id="Line_46">
|
||||
<path d="M 479.2467 498.5 L 480 328 L 560.10116 329.22604" marker-end="url(#FilledArrow_Marker)" stroke="black" stroke-linecap="round" stroke-linejoin="round" stroke-width="1"/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 12 KiB |
30
src/dragonball/docs/upcall.md
Normal file
30
src/dragonball/docs/upcall.md
Normal file
@@ -0,0 +1,30 @@
|
||||
# `Upcall`
|
||||
|
||||
## What is `Upcall`?
|
||||
|
||||
`Upcall` is a direct communication tool between VMM and guest developed upon `vsock`. The server side of the `upcall` is a driver in guest kernel (kernel patches are needed for this feature) and it'll start to serve the requests after the kernel starts. And the client side is in Dragonball VMM , it'll be a thread that communicates with `vsock` through `uds`.
|
||||
|
||||
We want to keep the lightweight of the VM through the implementation of the `upcall`.
|
||||
|
||||

|
||||
## What can `upcall` do?
|
||||
|
||||
We define specific operations in the device manager service (one of the services in `upcall` we developed) to perform device hotplug / hot-unplug including vCPU hotplug, `virtio-mmio` hotplug, and memory hotplug. We have accomplished device hotplug / hot-unplug directly through `upcall` in order to avoid the virtualization of ACPI to minimize virtual machines overhead. And there could be many other uses if other services are implemented.
|
||||
|
||||
## How to enable `upcall`?
|
||||
|
||||
`Upcall` needs a server in the guest kernel which will be several kernel patches for the `upcall` server itself and different services registered in the `upcall` server. It's currently tested on upstream Linux kernel 5.10.
|
||||
|
||||
To make it easy for users to use, we have open-source the `upcall` guest patches in [Dragonball experimental guest patches](../../../tools/packaging/kernel/patches/5.10.x/dragonball-experimental) and develop `upcall` support in [Kata guest kernel building script](../../../tools/packaging/kernel/build-kernel.sh).
|
||||
|
||||
You could use following command to download the upstream kernel (currently Dragonball uses 5.10.25) and put the `upcall` patches and other Kata patches into kernel code.
|
||||
|
||||
`sh build-kernel.sh -e -t dragonball -f setup`
|
||||
|
||||
`-e` here means experimental, mainly because `upcall` patches are not in upstream Linux kernel.
|
||||
`-t dragonball` is for specifying hypervisor type
|
||||
`-f` is for generating `.config` file
|
||||
|
||||
After this command, the kernel code with `upcall` and related `.config` file are all set up in the directory `kata-linux-dragonball-experimental-5.10.25-[config version]`. You can either manually compile the kernel with `make` command or following [Document for build-kernel.sh](../../../tools/packaging/kernel/README.md) to build and use this guest kernel.
|
||||
|
||||
Also, a client-side is also needed in VMM. Dragonball has already open-source the way to implement `upcall` client and Dragonball compiled with `dbs-upcall` feature will enable Dragonball client side.
|
||||
@@ -250,6 +250,11 @@ impl AddressSpaceMgr {
|
||||
self.address_space.as_ref()
|
||||
}
|
||||
|
||||
/// Get the guest memory.
|
||||
pub fn vm_memory(&self) -> Option<<GuestAddressSpaceImpl as GuestAddressSpace>::T> {
|
||||
self.get_vm_as().map(|m| m.memory())
|
||||
}
|
||||
|
||||
/// Create the address space for a virtual machine.
|
||||
///
|
||||
/// This method is designed to be called when starting up a virtual machine instead of at
|
||||
|
||||
@@ -35,6 +35,9 @@ pub use crate::device_manager::virtio_net_dev_mgr::{
|
||||
#[cfg(feature = "virtio-vsock")]
|
||||
pub use crate::device_manager::vsock_dev_mgr::{VsockDeviceConfigInfo, VsockDeviceError};
|
||||
|
||||
#[cfg(feature = "hotplug")]
|
||||
pub use crate::vcpu::{VcpuResizeError, VcpuResizeInfo};
|
||||
|
||||
use super::*;
|
||||
|
||||
/// Wrapper for all errors associated with VMM actions.
|
||||
@@ -44,9 +47,13 @@ pub enum VmmActionError {
|
||||
#[error("the virtual machine instance ID is invalid")]
|
||||
InvalidVMID,
|
||||
|
||||
/// VM doesn't exist and can't get VM information.
|
||||
#[error("VM doesn't exist and can't get VM information")]
|
||||
VmNotExist,
|
||||
|
||||
/// Failed to hotplug, due to Upcall not ready.
|
||||
#[error("Upcall not ready, can't hotplug device.")]
|
||||
UpcallNotReady,
|
||||
UpcallServerNotReady,
|
||||
|
||||
/// The action `ConfigureBootSource` failed either because of bad user input or an internal
|
||||
/// error.
|
||||
@@ -85,6 +92,11 @@ pub enum VmmActionError {
|
||||
/// The action `InsertFsDevice` failed either because of bad user input or an internal error.
|
||||
#[error("virtio-fs device error: {0}")]
|
||||
FsDevice(#[source] FsDeviceError),
|
||||
|
||||
#[cfg(feature = "hotplug")]
|
||||
/// The action `ResizeVcpu` Failed
|
||||
#[error("vcpu resize error : {0}")]
|
||||
ResizeVcpu(#[source] VcpuResizeError),
|
||||
}
|
||||
|
||||
/// This enum represents the public interface of the VMM. Each action contains various
|
||||
@@ -156,6 +168,10 @@ pub enum VmmAction {
|
||||
#[cfg(feature = "virtio-fs")]
|
||||
/// Update fs rate limiter, after microVM start.
|
||||
UpdateFsDevice(FsDeviceConfigUpdateInfo),
|
||||
|
||||
#[cfg(feature = "hotplug")]
|
||||
/// Resize Vcpu number in the guest.
|
||||
ResizeVcpu(VcpuResizeInfo),
|
||||
}
|
||||
|
||||
/// The enum represents the response sent by the VMM in case of success. The response is either
|
||||
@@ -256,6 +272,8 @@ impl VmmService {
|
||||
VmmAction::UpdateFsDevice(fs_update_cfg) => {
|
||||
self.update_fs_rate_limiters(vmm, fs_update_cfg)
|
||||
}
|
||||
#[cfg(feature = "hotplug")]
|
||||
VmmAction::ResizeVcpu(vcpu_resize_cfg) => self.resize_vcpu(vmm, vcpu_resize_cfg),
|
||||
};
|
||||
|
||||
debug!("send vmm response: {:?}", response);
|
||||
@@ -462,8 +480,8 @@ impl VmmService {
|
||||
let ctx = vm
|
||||
.create_device_op_context(Some(event_mgr.epoll_manager()))
|
||||
.map_err(|e| {
|
||||
if let StartMicroVmError::UpcallNotReady = e {
|
||||
return VmmActionError::UpcallNotReady;
|
||||
if let StartMicroVmError::UpcallServerNotReady = e {
|
||||
return VmmActionError::UpcallServerNotReady;
|
||||
}
|
||||
VmmActionError::Block(BlockDeviceError::UpdateNotAllowedPostBoot)
|
||||
})?;
|
||||
@@ -518,8 +536,8 @@ impl VmmService {
|
||||
.map_err(|e| {
|
||||
if let StartMicroVmError::MicroVMAlreadyRunning = e {
|
||||
VmmActionError::VirtioNet(VirtioNetDeviceError::UpdateNotAllowedPostBoot)
|
||||
} else if let StartMicroVmError::UpcallNotReady = e {
|
||||
VmmActionError::UpcallNotReady
|
||||
} else if let StartMicroVmError::UpcallServerNotReady = e {
|
||||
VmmActionError::UpcallServerNotReady
|
||||
} else {
|
||||
VmmActionError::StartMicroVm(e)
|
||||
}
|
||||
@@ -595,6 +613,37 @@ impl VmmService {
|
||||
.map(|_| VmmData::Empty)
|
||||
.map_err(VmmActionError::FsDevice)
|
||||
}
|
||||
|
||||
#[cfg(feature = "hotplug")]
|
||||
fn resize_vcpu(&mut self, vmm: &mut Vmm, config: VcpuResizeInfo) -> VmmRequestResult {
|
||||
if !cfg!(target_arch = "x86_64") {
|
||||
// TODO: Arm need to support vcpu hotplug. issue: #6010
|
||||
warn!("This arch do not support vm resize!");
|
||||
return Ok(VmmData::Empty);
|
||||
}
|
||||
|
||||
if !cfg!(feature = "dbs-upcall") {
|
||||
warn!("We only support cpu resize through upcall server in the guest kernel now, please enable dbs-upcall feature.");
|
||||
return Ok(VmmData::Empty);
|
||||
}
|
||||
|
||||
let vm = vmm.get_vm_mut().ok_or(VmmActionError::VmNotExist)?;
|
||||
|
||||
if !vm.is_vm_initialized() {
|
||||
return Err(VmmActionError::ResizeVcpu(
|
||||
VcpuResizeError::UpdateNotAllowedPreBoot,
|
||||
));
|
||||
}
|
||||
|
||||
vm.resize_vcpu(config, None).map_err(|e| {
|
||||
if let VcpuResizeError::UpcallServerNotReady = e {
|
||||
return VmmActionError::UpcallServerNotReady;
|
||||
}
|
||||
VmmActionError::ResizeVcpu(e)
|
||||
})?;
|
||||
|
||||
Ok(VmmData::Empty)
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_cpu_topology(
|
||||
@@ -656,10 +705,10 @@ mod tests {
|
||||
let (to_vmm, from_api) = channel();
|
||||
let (to_api, from_vmm) = channel();
|
||||
|
||||
let vmm = Arc::new(Mutex::new(create_vmm_instance()));
|
||||
let epoll_mgr = EpollManager::default();
|
||||
let vmm = Arc::new(Mutex::new(create_vmm_instance(epoll_mgr.clone())));
|
||||
let mut vservice = VmmService::new(from_api, to_api);
|
||||
|
||||
let epoll_mgr = EpollManager::default();
|
||||
let mut event_mgr = EventManager::new(&vmm, epoll_mgr).unwrap();
|
||||
let mut v = vmm.lock().unwrap();
|
||||
|
||||
@@ -681,9 +730,9 @@ mod tests {
|
||||
|
||||
let (_to_vmm, from_api) = channel();
|
||||
let (to_api, _from_vmm) = channel();
|
||||
let vmm = Arc::new(Mutex::new(create_vmm_instance()));
|
||||
let mut vservice = VmmService::new(from_api, to_api);
|
||||
let epoll_mgr = EpollManager::default();
|
||||
let vmm = Arc::new(Mutex::new(create_vmm_instance(epoll_mgr.clone())));
|
||||
let mut vservice = VmmService::new(from_api, to_api);
|
||||
let mut event_mgr = EventManager::new(&vmm, epoll_mgr).unwrap();
|
||||
let mut v = vmm.lock().unwrap();
|
||||
|
||||
@@ -695,9 +744,9 @@ mod tests {
|
||||
fn test_vmm_action_disconnected() {
|
||||
let (to_vmm, from_api) = channel();
|
||||
let (to_api, _from_vmm) = channel();
|
||||
let vmm = Arc::new(Mutex::new(create_vmm_instance()));
|
||||
let mut vservice = VmmService::new(from_api, to_api);
|
||||
let epoll_mgr = EpollManager::default();
|
||||
let vmm = Arc::new(Mutex::new(create_vmm_instance(epoll_mgr.clone())));
|
||||
let mut vservice = VmmService::new(from_api, to_api);
|
||||
let mut event_mgr = EventManager::new(&vmm, epoll_mgr).unwrap();
|
||||
let mut v = vmm.lock().unwrap();
|
||||
|
||||
|
||||
@@ -1065,7 +1065,9 @@ mod tests {
|
||||
use crate::vm::VmConfigInfo;
|
||||
|
||||
let epoll_manager = EpollManager::default();
|
||||
let vmm = Arc::new(Mutex::new(crate::vmm::tests::create_vmm_instance()));
|
||||
let vmm = Arc::new(Mutex::new(crate::vmm::tests::create_vmm_instance(
|
||||
epoll_manager.clone(),
|
||||
)));
|
||||
let event_mgr = crate::event_manager::EventManager::new(&vmm, epoll_manager).unwrap();
|
||||
let mut vm = crate::vm::tests::create_vm_instance();
|
||||
let vm_config = VmConfigInfo {
|
||||
|
||||
@@ -100,7 +100,7 @@ pub enum StartMicroVmError {
|
||||
|
||||
/// Upcall is not ready
|
||||
#[error("the upcall client is not ready")]
|
||||
UpcallNotReady,
|
||||
UpcallServerNotReady,
|
||||
|
||||
/// Configuration passed in is invalidate.
|
||||
#[error("invalid virtual machine configuration: {0} ")]
|
||||
|
||||
@@ -10,7 +10,10 @@ mod vcpu_manager;
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
use dbs_arch::cpuid::VpmuFeatureLevel;
|
||||
|
||||
pub use vcpu_manager::{VcpuManager, VcpuManagerError};
|
||||
pub use vcpu_manager::{VcpuManager, VcpuManagerError, VcpuResizeInfo};
|
||||
|
||||
#[cfg(feature = "hotplug")]
|
||||
pub use vcpu_manager::VcpuResizeError;
|
||||
|
||||
/// vcpu config collection
|
||||
pub struct VcpuConfig {
|
||||
|
||||
@@ -214,10 +214,20 @@ pub enum VcpuResponse {
|
||||
CacheRevalidated,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
/// Vcpu Hotplug Result returned from the guest
|
||||
pub enum VcpuResizeResult {
|
||||
/// All vCPU hotplug / hot-unplug operations are successful
|
||||
Success = 0,
|
||||
/// vCPU hotplug / hot-unplug failed
|
||||
Failed = 1,
|
||||
}
|
||||
|
||||
/// List of events that the vcpu_state_sender can send.
|
||||
pub enum VcpuStateEvent {
|
||||
/// (result, response) for hotplug, result 0 means failure, 1 means success.
|
||||
Hotplug((i32, u32)),
|
||||
/// (result, response) for hotplug / hot-unplugged.
|
||||
/// response records how many cpu has successfully being hotplugged / hot-unplugged.
|
||||
Hotplug((VcpuResizeResult, u32)),
|
||||
}
|
||||
|
||||
/// Wrapper over vCPU that hides the underlying interactions with the vCPU thread.
|
||||
|
||||
@@ -29,7 +29,7 @@ use crate::address_space_manager::GuestAddressSpaceImpl;
|
||||
use crate::api::v1::InstanceInfo;
|
||||
use crate::kvm_context::KvmContext;
|
||||
use crate::vcpu::vcpu_impl::{
|
||||
Vcpu, VcpuError, VcpuEvent, VcpuHandle, VcpuResponse, VcpuStateEvent,
|
||||
Vcpu, VcpuError, VcpuEvent, VcpuHandle, VcpuResizeResult, VcpuResponse, VcpuStateEvent,
|
||||
};
|
||||
use crate::vcpu::VcpuConfig;
|
||||
use crate::vm::VmConfigInfo;
|
||||
@@ -113,11 +113,6 @@ pub enum VcpuManagerError {
|
||||
#[error("vcpu internal error: {0}")]
|
||||
Vcpu(#[source] VcpuError),
|
||||
|
||||
#[cfg(feature = "hotplug")]
|
||||
/// vCPU resize error
|
||||
#[error("resize vcpu error: {0}")]
|
||||
VcpuResize(#[source] VcpuResizeError),
|
||||
|
||||
/// Kvm Ioctl Error
|
||||
#[error("failure in issuing KVM ioctl command: {0}")]
|
||||
Kvm(#[source] kvm_ioctls::Error),
|
||||
@@ -132,6 +127,10 @@ pub enum VcpuResizeError {
|
||||
VcpuIsHotplugging,
|
||||
|
||||
/// Cannot update the configuration of the microvm pre boot.
|
||||
#[error("resize vcpu operation is not allowed pre boot")]
|
||||
UpdateNotAllowedPreBoot,
|
||||
|
||||
/// Cannot update the configuration of the microvm post boot.
|
||||
#[error("resize vcpu operation is not allowed after boot")]
|
||||
UpdateNotAllowedPostBoot,
|
||||
|
||||
@@ -147,10 +146,24 @@ pub enum VcpuResizeError {
|
||||
#[error("Removable vcpu not enough, removable vcpu num: {0}, number to remove: {1}, present vcpu count {2}")]
|
||||
LackRemovableVcpus(u16, u16, u16),
|
||||
|
||||
#[cfg(all(feature = "hotplug", feature = "dbs-upcall"))]
|
||||
#[cfg(feature = "dbs-upcall")]
|
||||
/// Cannot update the configuration by upcall channel.
|
||||
#[error("cannot update the configuration by upcall channel: {0}")]
|
||||
Upcall(#[source] dbs_upcall::UpcallClientError),
|
||||
|
||||
#[cfg(feature = "dbs-upcall")]
|
||||
/// Cannot find upcall client
|
||||
#[error("Cannot find upcall client")]
|
||||
UpcallClientMissing,
|
||||
|
||||
#[cfg(feature = "dbs-upcall")]
|
||||
/// Upcall server is not ready
|
||||
#[error("Upcall server is not ready")]
|
||||
UpcallServerNotReady,
|
||||
|
||||
/// Vcpu manager error
|
||||
#[error("Vcpu manager error : {0}")]
|
||||
Vcpu(#[source] VcpuManagerError),
|
||||
}
|
||||
|
||||
/// Result for vCPU manager operations
|
||||
@@ -163,6 +176,13 @@ enum VcpuAction {
|
||||
Hotunplug,
|
||||
}
|
||||
|
||||
/// VcpuResizeInfo describes the information for vcpu hotplug / hot-unplug
|
||||
#[derive(Default, Debug, Clone, PartialEq, Eq)]
|
||||
pub struct VcpuResizeInfo {
|
||||
/// The desired vcpu count to resize.
|
||||
pub vcpu_count: Option<u8>,
|
||||
}
|
||||
|
||||
/// Infos related to per vcpu
|
||||
#[derive(Default)]
|
||||
pub(crate) struct VcpuInfo {
|
||||
@@ -810,11 +830,9 @@ mod hotplug {
|
||||
&mut self,
|
||||
vcpu_count: u8,
|
||||
sync_tx: Option<Sender<bool>>,
|
||||
) -> std::result::Result<(), VcpuManagerError> {
|
||||
) -> std::result::Result<(), VcpuResizeError> {
|
||||
if self.get_vcpus_action() != VcpuAction::None {
|
||||
return Err(VcpuManagerError::VcpuResize(
|
||||
VcpuResizeError::VcpuIsHotplugging,
|
||||
));
|
||||
return Err(VcpuResizeError::VcpuIsHotplugging);
|
||||
}
|
||||
self.action_sycn_tx = sync_tx;
|
||||
|
||||
@@ -831,9 +849,7 @@ mod hotplug {
|
||||
Ordering::Less => self.do_del_vcpu(vcpu_count, upcall),
|
||||
}
|
||||
} else {
|
||||
Err(VcpuManagerError::VcpuResize(
|
||||
VcpuResizeError::UpdateNotAllowedPostBoot,
|
||||
))
|
||||
Err(VcpuResizeError::UpdateNotAllowedPostBoot)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -841,28 +857,31 @@ mod hotplug {
|
||||
&mut self,
|
||||
vcpu_count: u8,
|
||||
upcall_client: Arc<UpcallClient<DevMgrService>>,
|
||||
) -> std::result::Result<(), VcpuManagerError> {
|
||||
) -> std::result::Result<(), VcpuResizeError> {
|
||||
info!("resize vcpu: add");
|
||||
if vcpu_count > self.vcpu_config.max_vcpu_count {
|
||||
return Err(VcpuManagerError::VcpuResize(
|
||||
VcpuResizeError::ExpectedVcpuExceedMax,
|
||||
));
|
||||
return Err(VcpuResizeError::ExpectedVcpuExceedMax);
|
||||
}
|
||||
|
||||
let created_vcpus = self.create_vcpus(vcpu_count, None, None)?;
|
||||
let cpu_ids = self.activate_vcpus(vcpu_count, true).map_err(|e| {
|
||||
// we need to rollback when activate vcpu error
|
||||
error!("activate vcpu error, rollback! {:?}", e);
|
||||
let activated_vcpus: Vec<u8> = created_vcpus
|
||||
.iter()
|
||||
.filter(|&cpu_id| self.vcpu_infos[*cpu_id as usize].handle.is_some())
|
||||
.copied()
|
||||
.collect();
|
||||
if let Err(e) = self.exit_vcpus(&activated_vcpus) {
|
||||
error!("try to rollback error, stop_vcpu: {:?}", e);
|
||||
}
|
||||
e
|
||||
})?;
|
||||
let created_vcpus = self
|
||||
.create_vcpus(vcpu_count, None, None)
|
||||
.map_err(VcpuResizeError::Vcpu)?;
|
||||
let cpu_ids = self
|
||||
.activate_vcpus(vcpu_count, true)
|
||||
.map_err(|e| {
|
||||
// we need to rollback when activate vcpu error
|
||||
error!("activate vcpu error, rollback! {:?}", e);
|
||||
let activated_vcpus: Vec<u8> = created_vcpus
|
||||
.iter()
|
||||
.filter(|&cpu_id| self.vcpu_infos[*cpu_id as usize].handle.is_some())
|
||||
.copied()
|
||||
.collect();
|
||||
if let Err(e) = self.exit_vcpus(&activated_vcpus) {
|
||||
error!("try to rollback error, stop_vcpu: {:?}", e);
|
||||
}
|
||||
e
|
||||
})
|
||||
.map_err(VcpuResizeError::Vcpu)?;
|
||||
|
||||
let mut cpu_ids_array = [0u8; (u8::MAX as usize) + 1];
|
||||
cpu_ids_array[..cpu_ids.len()].copy_from_slice(&cpu_ids[..cpu_ids.len()]);
|
||||
@@ -882,23 +901,19 @@ mod hotplug {
|
||||
&mut self,
|
||||
vcpu_count: u8,
|
||||
upcall_client: Arc<UpcallClient<DevMgrService>>,
|
||||
) -> std::result::Result<(), VcpuManagerError> {
|
||||
) -> std::result::Result<(), VcpuResizeError> {
|
||||
info!("resize vcpu: delete");
|
||||
if vcpu_count == 0 {
|
||||
return Err(VcpuManagerError::VcpuResize(
|
||||
VcpuResizeError::Vcpu0CanNotBeRemoved,
|
||||
));
|
||||
return Err(VcpuResizeError::Vcpu0CanNotBeRemoved);
|
||||
}
|
||||
|
||||
let mut cpu_ids = self.calculate_removable_vcpus();
|
||||
let cpu_num_to_be_del = (self.present_vcpus_count() - vcpu_count) as usize;
|
||||
if cpu_num_to_be_del >= cpu_ids.len() {
|
||||
return Err(VcpuManagerError::VcpuResize(
|
||||
VcpuResizeError::LackRemovableVcpus(
|
||||
cpu_ids.len() as u16,
|
||||
cpu_num_to_be_del as u16,
|
||||
self.present_vcpus_count() as u16,
|
||||
),
|
||||
return Err(VcpuResizeError::LackRemovableVcpus(
|
||||
cpu_ids.len() as u16,
|
||||
cpu_num_to_be_del as u16,
|
||||
self.present_vcpus_count() as u16,
|
||||
));
|
||||
}
|
||||
|
||||
@@ -924,7 +939,7 @@ mod hotplug {
|
||||
&self,
|
||||
_upcall_client: Arc<UpcallClient<DevMgrService>>,
|
||||
_request: DevMgrRequest,
|
||||
) -> std::result::Result<(), VcpuManagerError> {
|
||||
) -> std::result::Result<(), VcpuResizeError> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -933,7 +948,7 @@ mod hotplug {
|
||||
&self,
|
||||
upcall_client: Arc<UpcallClient<DevMgrService>>,
|
||||
request: DevMgrRequest,
|
||||
) -> std::result::Result<(), VcpuManagerError> {
|
||||
) -> std::result::Result<(), VcpuResizeError> {
|
||||
// This is used to fix clippy warnings.
|
||||
use dbs_upcall::{DevMgrResponse, UpcallClientRequest, UpcallClientResponse};
|
||||
|
||||
@@ -946,9 +961,14 @@ mod hotplug {
|
||||
Box::new(move |result| match result {
|
||||
UpcallClientResponse::DevMgr(response) => {
|
||||
if let DevMgrResponse::CpuDev(resp) = response {
|
||||
let result: VcpuResizeResult = if resp.result == 0 {
|
||||
VcpuResizeResult::Success
|
||||
} else {
|
||||
VcpuResizeResult::Failed
|
||||
};
|
||||
vcpu_state_sender
|
||||
.send(VcpuStateEvent::Hotplug((
|
||||
resp.result,
|
||||
result,
|
||||
resp.info.apic_id_index,
|
||||
)))
|
||||
.unwrap();
|
||||
@@ -957,7 +977,7 @@ mod hotplug {
|
||||
}
|
||||
UpcallClientResponse::UpcallReset => {
|
||||
vcpu_state_sender
|
||||
.send(VcpuStateEvent::Hotplug((0, 0)))
|
||||
.send(VcpuStateEvent::Hotplug((VcpuResizeResult::Success, 0)))
|
||||
.unwrap();
|
||||
vcpu_state_event.write(1).unwrap();
|
||||
}
|
||||
@@ -968,7 +988,6 @@ mod hotplug {
|
||||
}),
|
||||
)
|
||||
.map_err(VcpuResizeError::Upcall)
|
||||
.map_err(VcpuManagerError::VcpuResize)
|
||||
}
|
||||
|
||||
/// Get removable vcpus.
|
||||
@@ -993,16 +1012,19 @@ impl VcpuEpollHandler {
|
||||
while let Ok(event) = self.rx.try_recv() {
|
||||
match event {
|
||||
VcpuStateEvent::Hotplug((success, cpu_count)) => {
|
||||
info!("get vcpu event, cpu_index {}", cpu_count);
|
||||
self.process_cpu_action(success != 0, cpu_count);
|
||||
info!(
|
||||
"get vcpu event, cpu_index {} success {:?}",
|
||||
cpu_count, success
|
||||
);
|
||||
self.process_cpu_action(success, cpu_count);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn process_cpu_action(&self, success: bool, _cpu_index: u32) {
|
||||
fn process_cpu_action(&self, result: VcpuResizeResult, _cpu_index: u32) {
|
||||
let mut vcpu_manager = self.vcpu_manager.lock().unwrap();
|
||||
if success {
|
||||
if result == VcpuResizeResult::Success {
|
||||
match vcpu_manager.get_vcpus_action() {
|
||||
VcpuAction::Hotplug => {
|
||||
// Notify hotplug success
|
||||
@@ -1362,12 +1384,7 @@ mod tests {
|
||||
|
||||
// vcpu is already in hotplug process
|
||||
let res = vcpu_manager.resize_vcpu(1, None);
|
||||
assert!(matches!(
|
||||
res,
|
||||
Err(VcpuManagerError::VcpuResize(
|
||||
VcpuResizeError::VcpuIsHotplugging
|
||||
))
|
||||
));
|
||||
assert!(matches!(res, Err(VcpuResizeError::VcpuIsHotplugging)));
|
||||
|
||||
// clear vcpus action
|
||||
let cpu_ids = vec![0];
|
||||
@@ -1377,9 +1394,7 @@ mod tests {
|
||||
let res = vcpu_manager.resize_vcpu(1, None);
|
||||
assert!(matches!(
|
||||
res,
|
||||
Err(VcpuManagerError::VcpuResize(
|
||||
VcpuResizeError::UpdateNotAllowedPostBoot
|
||||
))
|
||||
Err(VcpuResizeError::UpdateNotAllowedPostBoot)
|
||||
));
|
||||
|
||||
// init upcall channel
|
||||
@@ -1397,20 +1412,10 @@ mod tests {
|
||||
|
||||
// exceeed max vcpu count
|
||||
let res = vcpu_manager.resize_vcpu(4, None);
|
||||
assert!(matches!(
|
||||
res,
|
||||
Err(VcpuManagerError::VcpuResize(
|
||||
VcpuResizeError::ExpectedVcpuExceedMax
|
||||
))
|
||||
));
|
||||
assert!(matches!(res, Err(VcpuResizeError::ExpectedVcpuExceedMax)));
|
||||
|
||||
// remove vcpu 0
|
||||
let res = vcpu_manager.resize_vcpu(0, None);
|
||||
assert!(matches!(
|
||||
res,
|
||||
Err(VcpuManagerError::VcpuResize(
|
||||
VcpuResizeError::Vcpu0CanNotBeRemoved
|
||||
))
|
||||
));
|
||||
assert!(matches!(res, Err(VcpuResizeError::Vcpu0CanNotBeRemoved)));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
use std::io::{self, Read, Seek, SeekFrom};
|
||||
use std::ops::Deref;
|
||||
use std::os::unix::io::RawFd;
|
||||
|
||||
use std::sync::{Arc, Mutex, RwLock};
|
||||
|
||||
use dbs_address_space::AddressSpace;
|
||||
@@ -22,6 +23,8 @@ use vmm_sys_util::eventfd::EventFd;
|
||||
|
||||
#[cfg(all(feature = "hotplug", feature = "dbs-upcall"))]
|
||||
use dbs_upcall::{DevMgrService, UpcallClient};
|
||||
#[cfg(feature = "hotplug")]
|
||||
use std::sync::mpsc::Sender;
|
||||
|
||||
use crate::address_space_manager::{
|
||||
AddressManagerError, AddressSpaceMgr, AddressSpaceMgrBuilder, GuestAddressSpaceImpl,
|
||||
@@ -35,6 +38,8 @@ use crate::event_manager::EventManager;
|
||||
use crate::kvm_context::KvmContext;
|
||||
use crate::resource_manager::ResourceManager;
|
||||
use crate::vcpu::{VcpuManager, VcpuManagerError};
|
||||
#[cfg(feature = "hotplug")]
|
||||
use crate::vcpu::{VcpuResizeError, VcpuResizeInfo};
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
use dbs_arch::gic::Error as GICError;
|
||||
|
||||
@@ -795,7 +800,30 @@ impl Vm {
|
||||
} else if self.is_upcall_client_ready() {
|
||||
Ok(DeviceOpContext::create_hotplug_ctx(self, epoll_mgr))
|
||||
} else {
|
||||
Err(StartMicroVmError::UpcallNotReady)
|
||||
Err(StartMicroVmError::UpcallServerNotReady)
|
||||
}
|
||||
}
|
||||
|
||||
/// Resize MicroVM vCPU number
|
||||
#[cfg(feature = "hotplug")]
|
||||
pub fn resize_vcpu(
|
||||
&mut self,
|
||||
config: VcpuResizeInfo,
|
||||
sync_tx: Option<Sender<bool>>,
|
||||
) -> std::result::Result<(), VcpuResizeError> {
|
||||
if self.upcall_client().is_none() {
|
||||
Err(VcpuResizeError::UpcallClientMissing)
|
||||
} else if self.is_upcall_client_ready() {
|
||||
if let Some(vcpu_count) = config.vcpu_count {
|
||||
self.vcpu_manager()
|
||||
.map_err(VcpuResizeError::Vcpu)?
|
||||
.resize_vcpu(vcpu_count, sync_tx)?;
|
||||
|
||||
self.vm_config.vcpu_count = vcpu_count;
|
||||
}
|
||||
Ok(())
|
||||
} else {
|
||||
Err(VcpuResizeError::UpcallServerNotReady)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -825,7 +853,14 @@ impl Vm {
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
use kvm_ioctls::VcpuExit;
|
||||
use linux_loader::cmdline::Cmdline;
|
||||
use test_utils::skip_if_not_root;
|
||||
use vm_memory::GuestMemory;
|
||||
use vmm_sys_util::tempfile::TempFile;
|
||||
|
||||
use super::*;
|
||||
use crate::test_utils::tests::create_vm_for_test;
|
||||
|
||||
impl Vm {
|
||||
pub fn set_instance_state(&mut self, mstate: InstanceState) {
|
||||
@@ -841,4 +876,204 @@ pub mod tests {
|
||||
let epoll_manager = EpollManager::default();
|
||||
Vm::new(None, instance_info, epoll_manager).unwrap()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_create_vm_instance() {
|
||||
skip_if_not_root!();
|
||||
let vm = create_vm_instance();
|
||||
assert!(vm.check_health().is_err());
|
||||
assert!(vm.kernel_config.is_none());
|
||||
assert!(vm.get_reset_eventfd().is_none());
|
||||
assert!(!vm.is_vm_initialized());
|
||||
assert!(!vm.is_vm_running());
|
||||
assert!(vm.reset_console().is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_vm_init_guest_memory() {
|
||||
skip_if_not_root!();
|
||||
let vm_config = VmConfigInfo {
|
||||
vcpu_count: 1,
|
||||
max_vcpu_count: 3,
|
||||
cpu_pm: "off".to_string(),
|
||||
mem_type: "shmem".to_string(),
|
||||
mem_file_path: "".to_string(),
|
||||
mem_size_mib: 16,
|
||||
serial_path: None,
|
||||
cpu_topology: CpuTopology {
|
||||
threads_per_core: 1,
|
||||
cores_per_die: 1,
|
||||
dies_per_socket: 1,
|
||||
sockets: 1,
|
||||
},
|
||||
vpmu_feature: 0,
|
||||
};
|
||||
|
||||
let mut vm = create_vm_instance();
|
||||
vm.set_vm_config(vm_config);
|
||||
assert!(vm.init_guest_memory().is_ok());
|
||||
let vm_memory = vm.address_space.vm_memory().unwrap();
|
||||
|
||||
assert_eq!(vm_memory.num_regions(), 1);
|
||||
assert_eq!(vm_memory.last_addr(), GuestAddress(0xffffff));
|
||||
|
||||
// Reconfigure an already configured vm will be ignored and just return OK.
|
||||
let vm_config = VmConfigInfo {
|
||||
vcpu_count: 1,
|
||||
max_vcpu_count: 3,
|
||||
cpu_pm: "off".to_string(),
|
||||
mem_type: "shmem".to_string(),
|
||||
mem_file_path: "".to_string(),
|
||||
mem_size_mib: 16,
|
||||
serial_path: None,
|
||||
cpu_topology: CpuTopology {
|
||||
threads_per_core: 1,
|
||||
cores_per_die: 1,
|
||||
dies_per_socket: 1,
|
||||
sockets: 1,
|
||||
},
|
||||
vpmu_feature: 0,
|
||||
};
|
||||
vm.set_vm_config(vm_config);
|
||||
assert!(vm.init_guest_memory().is_ok());
|
||||
let vm_memory = vm.address_space.vm_memory().unwrap();
|
||||
assert_eq!(vm_memory.num_regions(), 1);
|
||||
assert_eq!(vm_memory.last_addr(), GuestAddress(0xffffff));
|
||||
|
||||
let obj_addr = GuestAddress(0xf0);
|
||||
vm_memory.write_obj(67u8, obj_addr).unwrap();
|
||||
let read_val: u8 = vm_memory.read_obj(obj_addr).unwrap();
|
||||
assert_eq!(read_val, 67u8);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_vm_create_devices() {
|
||||
skip_if_not_root!();
|
||||
let epoll_mgr = EpollManager::default();
|
||||
let vmm = Arc::new(Mutex::new(crate::vmm::tests::create_vmm_instance(
|
||||
epoll_mgr.clone(),
|
||||
)));
|
||||
|
||||
let mut guard = vmm.lock().unwrap();
|
||||
let vm = guard.get_vm_mut().unwrap();
|
||||
|
||||
let vm_config = VmConfigInfo {
|
||||
vcpu_count: 1,
|
||||
max_vcpu_count: 3,
|
||||
cpu_pm: "off".to_string(),
|
||||
mem_type: "shmem".to_string(),
|
||||
mem_file_path: "".to_string(),
|
||||
mem_size_mib: 16,
|
||||
serial_path: None,
|
||||
cpu_topology: CpuTopology {
|
||||
threads_per_core: 1,
|
||||
cores_per_die: 1,
|
||||
dies_per_socket: 1,
|
||||
sockets: 1,
|
||||
},
|
||||
vpmu_feature: 0,
|
||||
};
|
||||
|
||||
vm.set_vm_config(vm_config);
|
||||
assert!(vm.init_guest_memory().is_ok());
|
||||
assert!(vm.setup_interrupt_controller().is_ok());
|
||||
|
||||
let vm_memory = vm.address_space.vm_memory().unwrap();
|
||||
assert_eq!(vm_memory.num_regions(), 1);
|
||||
assert_eq!(vm_memory.last_addr(), GuestAddress(0xffffff));
|
||||
|
||||
let kernel_file = TempFile::new().unwrap();
|
||||
let cmd_line = Cmdline::new(64);
|
||||
|
||||
vm.set_kernel_config(KernelConfigInfo::new(
|
||||
kernel_file.into_file(),
|
||||
None,
|
||||
cmd_line,
|
||||
));
|
||||
|
||||
vm.init_devices(epoll_mgr).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_vm_delete_devices() {
|
||||
skip_if_not_root!();
|
||||
let mut vm = create_vm_for_test();
|
||||
let epoll_mgr = EpollManager::default();
|
||||
|
||||
vm.setup_interrupt_controller().unwrap();
|
||||
vm.init_devices(epoll_mgr).unwrap();
|
||||
assert!(vm.remove_devices().is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_run_code() {
|
||||
skip_if_not_root!();
|
||||
|
||||
use std::io::{self, Write};
|
||||
// This example is based on https://lwn.net/Articles/658511/
|
||||
let code = [
|
||||
0xba, 0xf8, 0x03, /* mov $0x3f8, %dx */
|
||||
0x00, 0xd8, /* add %bl, %al */
|
||||
0x04, b'0', /* add $'0', %al */
|
||||
0xee, /* out %al, (%dx) */
|
||||
0xb0, b'\n', /* mov $'\n', %al */
|
||||
0xee, /* out %al, (%dx) */
|
||||
0xf4, /* hlt */
|
||||
];
|
||||
let load_addr = GuestAddress(0x1000);
|
||||
let instance_info = Arc::new(RwLock::new(InstanceInfo::default()));
|
||||
let epoll_manager = EpollManager::default();
|
||||
let mut vm = Vm::new(None, instance_info, epoll_manager).unwrap();
|
||||
|
||||
let vcpu_count = 1;
|
||||
let vm_config = VmConfigInfo {
|
||||
vcpu_count,
|
||||
max_vcpu_count: 1,
|
||||
cpu_pm: "off".to_string(),
|
||||
mem_type: "shmem".to_string(),
|
||||
mem_file_path: "".to_string(),
|
||||
mem_size_mib: 10,
|
||||
serial_path: None,
|
||||
cpu_topology: CpuTopology {
|
||||
threads_per_core: 1,
|
||||
cores_per_die: 1,
|
||||
dies_per_socket: 1,
|
||||
sockets: 1,
|
||||
},
|
||||
vpmu_feature: 0,
|
||||
};
|
||||
|
||||
vm.set_vm_config(vm_config);
|
||||
vm.init_guest_memory().unwrap();
|
||||
|
||||
let vm_memory = vm.address_space.vm_memory().unwrap();
|
||||
vm_memory.write_obj(code, load_addr).unwrap();
|
||||
|
||||
let vcpu_fd = vm.vm_fd().create_vcpu(0).unwrap();
|
||||
let mut vcpu_sregs = vcpu_fd.get_sregs().unwrap();
|
||||
assert_ne!(vcpu_sregs.cs.base, 0);
|
||||
assert_ne!(vcpu_sregs.cs.selector, 0);
|
||||
vcpu_sregs.cs.base = 0;
|
||||
vcpu_sregs.cs.selector = 0;
|
||||
vcpu_fd.set_sregs(&vcpu_sregs).unwrap();
|
||||
|
||||
let mut vcpu_regs = vcpu_fd.get_regs().unwrap();
|
||||
|
||||
vcpu_regs.rip = 0x1000;
|
||||
vcpu_regs.rax = 2;
|
||||
vcpu_regs.rbx = 3;
|
||||
vcpu_regs.rflags = 2;
|
||||
vcpu_fd.set_regs(&vcpu_regs).unwrap();
|
||||
|
||||
match vcpu_fd.run().expect("run failed") {
|
||||
VcpuExit::IoOut(0x3f8, data) => {
|
||||
assert_eq!(data.len(), 1);
|
||||
io::stdout().write_all(data).unwrap();
|
||||
}
|
||||
VcpuExit::Hlt => {
|
||||
io::stdout().write_all(b"KVM_EXIT_HLT\n").unwrap();
|
||||
}
|
||||
r => panic!("unexpected exit reason: {:?}", r),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -200,11 +200,10 @@ pub(crate) mod tests {
|
||||
|
||||
use super::*;
|
||||
|
||||
pub fn create_vmm_instance() -> Vmm {
|
||||
pub fn create_vmm_instance(epoll_manager: EpollManager) -> Vmm {
|
||||
let info = Arc::new(RwLock::new(InstanceInfo::default()));
|
||||
let event_fd = EventFd::new(libc::EFD_NONBLOCK).unwrap();
|
||||
let seccomp_filter: BpfProgram = Vec::new();
|
||||
let epoll_manager = EpollManager::default();
|
||||
|
||||
Vmm::new_with_epoll_manager(
|
||||
info,
|
||||
@@ -221,6 +220,6 @@ pub(crate) mod tests {
|
||||
fn test_create_vmm_instance() {
|
||||
skip_if_not_root!();
|
||||
|
||||
create_vmm_instance();
|
||||
create_vmm_instance(EpollManager::default());
|
||||
}
|
||||
}
|
||||
|
||||
23
src/libs/Cargo.lock
generated
23
src/libs/Cargo.lock
generated
@@ -110,14 +110,15 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
|
||||
|
||||
[[package]]
|
||||
name = "cgroups-rs"
|
||||
version = "0.2.8"
|
||||
version = "0.3.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1b827f9d9f6c2fff719d25f5d44cbc8d2ef6df1ef00d055c5c14d5dc25529579"
|
||||
checksum = "5b098e7c3a70d03c288fa0a96ccf13e770eb3d78c4cc0e1549b3c13215d5f965"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"log",
|
||||
"nix 0.23.1",
|
||||
"nix 0.25.1",
|
||||
"regex",
|
||||
"thiserror",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -536,9 +537,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
|
||||
|
||||
[[package]]
|
||||
name = "libc"
|
||||
version = "0.2.124"
|
||||
version = "0.2.139"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "21a41fed9d98f27ab1c6d161da622a4fa35e8a54a8adc24bbf3ddd0ef70b0e50"
|
||||
checksum = "201de327520df007757c1f0adce6e827fe8562fbc28bfd9c15571c66ca1f5f79"
|
||||
|
||||
[[package]]
|
||||
name = "lock_api"
|
||||
@@ -640,6 +641,18 @@ dependencies = [
|
||||
"memoffset",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "nix"
|
||||
version = "0.25.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f346ff70e7dbfd675fe90590b92d59ef2de15a8779ae305ebcbfd3f0caf59be4"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
"bitflags",
|
||||
"cfg-if",
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ntapi"
|
||||
version = "0.3.7"
|
||||
|
||||
@@ -12,7 +12,7 @@ edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
byteorder = "1.4.3"
|
||||
cgroups = { package = "cgroups-rs", version = "0.2.7" }
|
||||
cgroups = { package = "cgroups-rs", version = "0.3.2" }
|
||||
chrono = "0.4.0"
|
||||
common-path = "=1.0.0"
|
||||
fail = "0.5.0"
|
||||
|
||||
@@ -68,7 +68,7 @@ pub fn get_container_type(spec: &oci::Spec) -> Result<ContainerType, Error> {
|
||||
pub fn get_shim_id_info() -> Result<ShimIdInfo, Error> {
|
||||
let spec = load_oci_spec()?;
|
||||
match get_container_type(&spec)? {
|
||||
ContainerType::PodSandbox => Ok(ShimIdInfo::Sandbox),
|
||||
ContainerType::PodSandbox | ContainerType::SingleContainer => Ok(ShimIdInfo::Sandbox),
|
||||
ContainerType::PodContainer => {
|
||||
for k in CRI_SANDBOX_ID_KEY_LIST {
|
||||
if let Some(sandbox_id) = spec.annotations.get(*k) {
|
||||
|
||||
@@ -266,7 +266,7 @@ pub const KATA_ANNO_CFG_HYPERVISOR_SHARED_FS: &str =
|
||||
/// A sandbox annotations to specify virtio-fs vhost-user daemon path.
|
||||
pub const KATA_ANNO_CFG_HYPERVISOR_VIRTIO_FS_DAEMON: &str =
|
||||
"io.katacontainers.config.hypervisor.virtio_fs_daemon";
|
||||
/// A sandbox annotation to specify the cache mode for fs version cache or "none".
|
||||
/// A sandbox annotation to specify the cache mode for fs version cache.
|
||||
pub const KATA_ANNO_CFG_HYPERVISOR_VIRTIO_FS_CACHE: &str =
|
||||
"io.katacontainers.config.hypervisor.virtio_fs_cache";
|
||||
/// A sandbox annotation to specify the DAX cache size in MiB.
|
||||
|
||||
@@ -32,7 +32,7 @@ use regex::RegexSet;
|
||||
|
||||
use super::{default, ConfigOps, ConfigPlugin, TomlConfig};
|
||||
use crate::annotations::KATA_ANNO_CFG_HYPERVISOR_PREFIX;
|
||||
use crate::{eother, resolve_path, validate_path};
|
||||
use crate::{eother, resolve_path, sl, validate_path};
|
||||
|
||||
mod dragonball;
|
||||
pub use self::dragonball::{DragonballConfig, HYPERVISOR_NAME_DRAGONBALL};
|
||||
@@ -210,6 +210,9 @@ pub struct BootInfo {
|
||||
/// Path to root device on host
|
||||
#[serde(default)]
|
||||
pub image: String,
|
||||
/// Rootfs filesystem type.
|
||||
#[serde(default)]
|
||||
pub rootfs_type: String,
|
||||
/// Path to the firmware.
|
||||
///
|
||||
/// If you want that qemu uses the default firmware leave this option empty.
|
||||
@@ -767,7 +770,7 @@ pub struct SharedFsInfo {
|
||||
pub virtio_fs_extra_args: Vec<String>,
|
||||
|
||||
/// Cache mode:
|
||||
/// - none: Metadata, data, and pathname lookup are not cached in guest. They are always
|
||||
/// - never: Metadata, data, and pathname lookup are not cached in guest. They are always
|
||||
/// fetched from host and any changes are immediately pushed to host.
|
||||
/// - auto: Metadata and pathname lookup cache expires after a configured amount of time
|
||||
/// (default is 1 second). Data is cached while the file is open (close to open consistency).
|
||||
@@ -779,6 +782,10 @@ pub struct SharedFsInfo {
|
||||
#[serde(default)]
|
||||
pub virtio_fs_cache_size: u32,
|
||||
|
||||
/// Default size of virtqueues
|
||||
#[serde(default)]
|
||||
pub virtio_fs_queue_size: u32,
|
||||
|
||||
/// Enable virtio-fs DAX window if true.
|
||||
#[serde(default)]
|
||||
pub virtio_fs_is_dax: bool,
|
||||
@@ -846,6 +853,10 @@ impl SharedFsInfo {
|
||||
if self.virtio_fs_cache.is_empty() {
|
||||
self.virtio_fs_cache = default::DEFAULT_VIRTIO_FS_CACHE_MODE.to_string();
|
||||
}
|
||||
if self.virtio_fs_cache == *"none" {
|
||||
warn!(sl!(), "virtio-fs cache mode `none` is deprecated since Kata Containers 2.5.0 and will be removed in the future release, please use `never` instead. For more details please refer to https://github.com/kata-containers/kata-containers/issues/4234.");
|
||||
self.virtio_fs_cache = default::DEFAULT_VIRTIO_FS_CACHE_MODE.to_string();
|
||||
}
|
||||
if self.virtio_fs_is_dax && self.virtio_fs_cache_size == 0 {
|
||||
self.virtio_fs_cache_size = default::DEFAULT_VIRTIO_FS_DAX_SIZE_MB;
|
||||
}
|
||||
|
||||
@@ -19,6 +19,8 @@ pub(crate) const SANDBOX: &str = "sandbox";
|
||||
// docker: a sandbox sandbox container
|
||||
pub(crate) const PODSANDBOX: &str = "podsandbox";
|
||||
|
||||
pub(crate) const SINGLE_CONTAINER: &str = "single_container";
|
||||
|
||||
const STATE_READY: &str = "ready";
|
||||
const STATE_RUNNING: &str = "running";
|
||||
const STATE_STOPPED: &str = "stopped";
|
||||
@@ -45,6 +47,8 @@ pub enum ContainerType {
|
||||
PodContainer,
|
||||
/// A pod sandbox.
|
||||
PodSandbox,
|
||||
/// A single container.
|
||||
SingleContainer,
|
||||
}
|
||||
|
||||
impl ContainerType {
|
||||
@@ -64,6 +68,7 @@ impl Display for ContainerType {
|
||||
match self {
|
||||
ContainerType::PodContainer => write!(f, "{}", POD_CONTAINER),
|
||||
ContainerType::PodSandbox => write!(f, "{}", POD_SANDBOX),
|
||||
ContainerType::SingleContainer => write!(f, "{}", SINGLE_CONTAINER),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -69,7 +69,7 @@ pub fn container_type(spec: &oci::Spec) -> ContainerType {
|
||||
}
|
||||
}
|
||||
|
||||
ContainerType::PodSandbox
|
||||
ContainerType::SingleContainer
|
||||
}
|
||||
|
||||
/// Determine the k8s sandbox ID from OCI annotations.
|
||||
@@ -269,7 +269,7 @@ mod tests {
|
||||
// default
|
||||
assert_eq!(
|
||||
container_type_with_id(&spec),
|
||||
(ContainerType::PodSandbox, None)
|
||||
(ContainerType::SingleContainer, None)
|
||||
);
|
||||
|
||||
// crio sandbox
|
||||
|
||||
@@ -494,8 +494,8 @@ pub struct LinuxDevice {
|
||||
pub struct LinuxDeviceCgroup {
|
||||
#[serde(default)]
|
||||
pub allow: bool,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub r#type: Option<String>,
|
||||
#[serde(default, skip_serializing_if = "String::is_empty")]
|
||||
pub r#type: String,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub major: Option<i64>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
@@ -1431,21 +1431,21 @@ mod tests {
|
||||
devices: vec![
|
||||
crate::LinuxDeviceCgroup {
|
||||
allow: false,
|
||||
r#type: None,
|
||||
r#type: "".to_string(),
|
||||
major: None,
|
||||
minor: None,
|
||||
access: "rwm".to_string(),
|
||||
},
|
||||
crate::LinuxDeviceCgroup {
|
||||
allow: true,
|
||||
r#type: Some("c".to_string()),
|
||||
r#type: "c".to_string(),
|
||||
major: Some(10),
|
||||
minor: Some(229),
|
||||
access: "rw".to_string(),
|
||||
},
|
||||
crate::LinuxDeviceCgroup {
|
||||
allow: true,
|
||||
r#type: Some("b".to_string()),
|
||||
r#type: "b".to_string(),
|
||||
major: Some(8),
|
||||
minor: Some(0),
|
||||
access: "r".to_string(),
|
||||
|
||||
3
src/libs/protocols/.gitignore
vendored
3
src/libs/protocols/.gitignore
vendored
@@ -7,8 +7,5 @@ src/empty.rs
|
||||
src/health.rs
|
||||
src/health_ttrpc.rs
|
||||
src/health_ttrpc_async.rs
|
||||
src/image.rs
|
||||
src/image_ttrpc.rs
|
||||
src/image_ttrpc_async.rs
|
||||
src/oci.rs
|
||||
src/types.rs
|
||||
|
||||
@@ -157,30 +157,13 @@ fn real_main() -> Result<(), std::io::Error> {
|
||||
// generate async
|
||||
#[cfg(feature = "async")]
|
||||
{
|
||||
codegen(
|
||||
"src",
|
||||
&[
|
||||
"protos/agent.proto",
|
||||
"protos/health.proto",
|
||||
"protos/image.proto",
|
||||
],
|
||||
true,
|
||||
)?;
|
||||
codegen("src", &["protos/agent.proto", "protos/health.proto"], true)?;
|
||||
|
||||
fs::rename("src/agent_ttrpc.rs", "src/agent_ttrpc_async.rs")?;
|
||||
fs::rename("src/health_ttrpc.rs", "src/health_ttrpc_async.rs")?;
|
||||
fs::rename("src/image_ttrpc.rs", "src/image_ttrpc_async.rs")?;
|
||||
}
|
||||
|
||||
codegen(
|
||||
"src",
|
||||
&[
|
||||
"protos/agent.proto",
|
||||
"protos/health.proto",
|
||||
"protos/image.proto",
|
||||
],
|
||||
false,
|
||||
)?;
|
||||
codegen("src", &["protos/agent.proto", "protos/health.proto"], false)?;
|
||||
|
||||
// There is a message named 'Box' in oci.proto
|
||||
// so there is a struct named 'Box', we should replace Box<Self> to ::std::boxed::Box<Self>
|
||||
|
||||
@@ -1,31 +0,0 @@
|
||||
//
|
||||
// Copyright (c) 2021 Alibaba Inc.
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
syntax = "proto3";
|
||||
|
||||
option go_package = "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/agent/protocols/grpc";
|
||||
|
||||
package grpc;
|
||||
|
||||
// Image defines the public APIs for managing images.
|
||||
service Image {
|
||||
// PullImage pulls an image with authentication config.
|
||||
rpc PullImage(PullImageRequest) returns (PullImageResponse) {}
|
||||
}
|
||||
|
||||
message PullImageRequest {
|
||||
// Image name (e.g. docker.io/library/busybox:latest).
|
||||
string image = 1;
|
||||
// Unique image identifier, used to avoid duplication when unpacking the image layers.
|
||||
string container_id = 2;
|
||||
// Use USERNAME[:PASSWORD] for accessing the registry
|
||||
string source_creds = 3;
|
||||
}
|
||||
|
||||
message PullImageResponse {
|
||||
// Reference to the image in use. For most runtimes, this should be an
|
||||
// image ID or digest.
|
||||
string image_ref = 1;
|
||||
}
|
||||
@@ -15,10 +15,6 @@ pub mod health;
|
||||
pub mod health_ttrpc;
|
||||
#[cfg(feature = "async")]
|
||||
pub mod health_ttrpc_async;
|
||||
pub mod image;
|
||||
pub mod image_ttrpc;
|
||||
#[cfg(feature = "async")]
|
||||
pub mod image_ttrpc_async;
|
||||
pub mod oci;
|
||||
pub mod trans;
|
||||
pub mod types;
|
||||
|
||||
@@ -105,7 +105,7 @@ impl From<oci::LinuxDeviceCgroup> for crate::oci::LinuxDeviceCgroup {
|
||||
fn from(from: oci::LinuxDeviceCgroup) -> Self {
|
||||
crate::oci::LinuxDeviceCgroup {
|
||||
Allow: from.allow,
|
||||
Type: from.r#type.map_or("".to_string(), |t| t as String),
|
||||
Type: from.r#type,
|
||||
Major: from.major.map_or(0, |t| t),
|
||||
Minor: from.minor.map_or(0, |t| t),
|
||||
Access: from.access,
|
||||
@@ -478,7 +478,7 @@ impl From<crate::oci::LinuxDeviceCgroup> for oci::LinuxDeviceCgroup {
|
||||
|
||||
oci::LinuxDeviceCgroup {
|
||||
allow: from.get_Allow(),
|
||||
r#type: Some(from.take_Type()),
|
||||
r#type: from.take_Type(),
|
||||
major,
|
||||
minor,
|
||||
access: from.take_Access(),
|
||||
|
||||
239
src/runtime-rs/Cargo.lock
generated
239
src/runtime-rs/Cargo.lock
generated
@@ -9,7 +9,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "465a6172cf69b960917811022d8f29bc0b7fa1398bc4f78b3c466673db1213b6"
|
||||
dependencies = [
|
||||
"quote",
|
||||
"syn",
|
||||
"syn 1.0.96",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -213,7 +213,7 @@ checksum = "96cf8829f67d2eab0b2dfa42c5d0ef737e0724e4a82b01b3e292456202b19716"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
"syn 1.0.96",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -268,7 +268,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fd9e32d7420c85055e8107e5b2463c4eeefeaac18b52359fe9f9c08a18f342b2"
|
||||
dependencies = [
|
||||
"quote",
|
||||
"syn",
|
||||
"syn 1.0.96",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -401,14 +401,15 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
|
||||
|
||||
[[package]]
|
||||
name = "cgroups-rs"
|
||||
version = "0.2.9"
|
||||
version = "0.3.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cdae996d9638ba03253ffa1c93345a585974a97abbdeab9176c77922f3efc1e8"
|
||||
checksum = "5b098e7c3a70d03c288fa0a96ccf13e770eb3d78c4cc0e1549b3c13215d5f965"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"log",
|
||||
"nix 0.23.1",
|
||||
"nix 0.25.1",
|
||||
"regex",
|
||||
"thiserror",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -549,7 +550,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f877be4f7c9f246b183111634f75baa039715e3f46ce860677d3b19a69fb229c"
|
||||
dependencies = [
|
||||
"quote",
|
||||
"syn",
|
||||
"syn 1.0.96",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -731,7 +732,7 @@ checksum = "3418329ca0ad70234b9735dc4ceed10af4df60eff9c8e7b06cb5e520d92c3535"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
"syn 1.0.96",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1002,7 +1003,7 @@ checksum = "33c1e13800337f4d4d7a316bf45a567dbcb6ffe087f16424852d97e97a91f512"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
"syn 1.0.96",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1438,9 +1439,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
|
||||
|
||||
[[package]]
|
||||
name = "libc"
|
||||
version = "0.2.126"
|
||||
version = "0.2.139"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "349d5a591cd28b49e1d1037471617a32ddcda5731b99419008085f72d5a53836"
|
||||
checksum = "201de327520df007757c1f0adce6e827fe8562fbc28bfd9c15571c66ca1f5f79"
|
||||
|
||||
[[package]]
|
||||
name = "linux-loader"
|
||||
@@ -1550,14 +1551,14 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "mio"
|
||||
version = "0.8.3"
|
||||
version = "0.8.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "713d550d9b44d89174e066b7a6217ae06234c10cb47819a88290d2b353c31799"
|
||||
checksum = "5b9d9a46eff5b4ff64b45a9e316a6d1e0bc719ef429cbec4dc630684212bfdf9"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"log",
|
||||
"wasi 0.11.0+wasi-snapshot-preview1",
|
||||
"windows-sys",
|
||||
"windows-sys 0.45.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1657,6 +1658,18 @@ dependencies = [
|
||||
"memoffset",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "nix"
|
||||
version = "0.25.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f346ff70e7dbfd675fe90590b92d59ef2de15a8779ae305ebcbfd3f0caf59be4"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
"bitflags",
|
||||
"cfg-if 1.0.0",
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "no-std-compat"
|
||||
version = "0.4.1"
|
||||
@@ -1918,7 +1931,7 @@ dependencies = [
|
||||
"libc",
|
||||
"redox_syscall",
|
||||
"smallvec",
|
||||
"windows-sys",
|
||||
"windows-sys 0.36.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1975,7 +1988,7 @@ checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
"syn 1.0.96",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2012,9 +2025,9 @@ checksum = "eb9f9e6e233e5c4a35559a617bf40a4ec447db2e84c20b55a6f83167b7e57872"
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro2"
|
||||
version = "1.0.39"
|
||||
version = "1.0.58"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c54b25569025b7fc9651de43004ae593a75ad88543b17178aa5e1b9c4f15f56f"
|
||||
checksum = "fa1fb82fc0c281dd9671101b66b771ebbe1eaf967b96ac8740dcba4b70005ca8"
|
||||
dependencies = [
|
||||
"unicode-ident",
|
||||
]
|
||||
@@ -2057,7 +2070,7 @@ dependencies = [
|
||||
"itertools",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
"syn 1.0.96",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2128,9 +2141,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "quote"
|
||||
version = "1.0.18"
|
||||
version = "1.0.27"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a1feb54ed693b93a84e14094943b84b7c4eae204c512b7ccb95ab0c66d278ad1"
|
||||
checksum = "8f4f29d145265ec1c483c7c654450edde0bfe043d3938d6972630663356d9500"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
]
|
||||
@@ -2457,7 +2470,7 @@ checksum = "d3d8e8de557aee63c26b85b947f5e59b690d0454c753f3adeb5cd7835ab88391"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
"syn 1.0.96",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2490,7 +2503,7 @@ checksum = "b2acd6defeddb41eb60bb468f8825d0cfd0c2a76bc03bfd235b6a1dc4f6a1ad5"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
"syn 1.0.96",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2662,9 +2675,9 @@ checksum = "f2dd574626839106c320a323308629dcb1acfc96e32a8cba364ddc61ac23ee83"
|
||||
|
||||
[[package]]
|
||||
name = "socket2"
|
||||
version = "0.4.4"
|
||||
version = "0.4.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "66d72b759436ae32898a2af0a14218dbf55efde3feeb170eb623637db85ee1e0"
|
||||
checksum = "64a4a911eed85daf18834cfaa86a79b7d266ff93ff5ba14005426219480ed662"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"winapi",
|
||||
@@ -2695,7 +2708,7 @@ dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"rustversion",
|
||||
"syn",
|
||||
"syn 1.0.96",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2725,6 +2738,17 @@ dependencies = [
|
||||
"unicode-ident",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "syn"
|
||||
version = "2.0.16"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a6f671d4b5ffdb8eadec19c0ae67fe2639df8684bd7bc4b83d986b8db549cf01"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"unicode-ident",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "take_mut"
|
||||
version = "0.2.2"
|
||||
@@ -2776,7 +2800,7 @@ checksum = "0396bc89e626244658bef819e22d0cc459e795a5ebe878e6ec336d1674a8d79a"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
"syn 1.0.96",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2844,33 +2868,32 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c"
|
||||
|
||||
[[package]]
|
||||
name = "tokio"
|
||||
version = "1.19.1"
|
||||
version = "1.28.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "95eec79ea28c00a365f539f1961e9278fbcaf81c0ff6aaf0e93c181352446948"
|
||||
checksum = "0aa32867d44e6f2ce3385e89dceb990188b8bb0fb25b0cf576647a6f98ac5105"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
"bytes 1.1.0",
|
||||
"libc",
|
||||
"memchr",
|
||||
"mio",
|
||||
"num_cpus",
|
||||
"once_cell",
|
||||
"parking_lot 0.12.1",
|
||||
"pin-project-lite",
|
||||
"signal-hook-registry",
|
||||
"socket2",
|
||||
"tokio-macros",
|
||||
"winapi",
|
||||
"windows-sys 0.48.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tokio-macros"
|
||||
version = "1.8.0"
|
||||
version = "2.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9724f9a975fb987ef7a3cd9be0350edcbe130698af5b8f7a631e23d42d052484"
|
||||
checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
"syn 2.0.16",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -3239,7 +3262,7 @@ dependencies = [
|
||||
"once_cell",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
"syn 1.0.96",
|
||||
"wasm-bindgen-shared",
|
||||
]
|
||||
|
||||
@@ -3273,7 +3296,7 @@ checksum = "5be8e654bdd9b79216c2929ab90721aa82faf65c48cdf08bdc4e7f51357b80da"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
"syn 1.0.96",
|
||||
"wasm-bindgen-backend",
|
||||
"wasm-bindgen-shared",
|
||||
]
|
||||
@@ -3353,43 +3376,175 @@ version = "0.36.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ea04155a16a59f9eab786fe12a4a450e75cdb175f9e0d80da1e17db09f55b8d2"
|
||||
dependencies = [
|
||||
"windows_aarch64_msvc",
|
||||
"windows_i686_gnu",
|
||||
"windows_i686_msvc",
|
||||
"windows_x86_64_gnu",
|
||||
"windows_x86_64_msvc",
|
||||
"windows_aarch64_msvc 0.36.1",
|
||||
"windows_i686_gnu 0.36.1",
|
||||
"windows_i686_msvc 0.36.1",
|
||||
"windows_x86_64_gnu 0.36.1",
|
||||
"windows_x86_64_msvc 0.36.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-sys"
|
||||
version = "0.45.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0"
|
||||
dependencies = [
|
||||
"windows-targets 0.42.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-sys"
|
||||
version = "0.48.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9"
|
||||
dependencies = [
|
||||
"windows-targets 0.48.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-targets"
|
||||
version = "0.42.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071"
|
||||
dependencies = [
|
||||
"windows_aarch64_gnullvm 0.42.2",
|
||||
"windows_aarch64_msvc 0.42.2",
|
||||
"windows_i686_gnu 0.42.2",
|
||||
"windows_i686_msvc 0.42.2",
|
||||
"windows_x86_64_gnu 0.42.2",
|
||||
"windows_x86_64_gnullvm 0.42.2",
|
||||
"windows_x86_64_msvc 0.42.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-targets"
|
||||
version = "0.48.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7b1eb6f0cd7c80c79759c929114ef071b87354ce476d9d94271031c0497adfd5"
|
||||
dependencies = [
|
||||
"windows_aarch64_gnullvm 0.48.0",
|
||||
"windows_aarch64_msvc 0.48.0",
|
||||
"windows_i686_gnu 0.48.0",
|
||||
"windows_i686_msvc 0.48.0",
|
||||
"windows_x86_64_gnu 0.48.0",
|
||||
"windows_x86_64_gnullvm 0.48.0",
|
||||
"windows_x86_64_msvc 0.48.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows_aarch64_gnullvm"
|
||||
version = "0.42.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8"
|
||||
|
||||
[[package]]
|
||||
name = "windows_aarch64_gnullvm"
|
||||
version = "0.48.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc"
|
||||
|
||||
[[package]]
|
||||
name = "windows_aarch64_msvc"
|
||||
version = "0.36.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47"
|
||||
|
||||
[[package]]
|
||||
name = "windows_aarch64_msvc"
|
||||
version = "0.42.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43"
|
||||
|
||||
[[package]]
|
||||
name = "windows_aarch64_msvc"
|
||||
version = "0.48.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_gnu"
|
||||
version = "0.36.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_gnu"
|
||||
version = "0.42.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_gnu"
|
||||
version = "0.48.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_msvc"
|
||||
version = "0.36.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_msvc"
|
||||
version = "0.42.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_msvc"
|
||||
version = "0.48.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_gnu"
|
||||
version = "0.36.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_gnu"
|
||||
version = "0.42.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_gnu"
|
||||
version = "0.48.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_gnullvm"
|
||||
version = "0.42.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_gnullvm"
|
||||
version = "0.48.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_msvc"
|
||||
version = "0.36.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_msvc"
|
||||
version = "0.42.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_msvc"
|
||||
version = "0.48.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a"
|
||||
|
||||
[[package]]
|
||||
name = "zstd"
|
||||
version = "0.11.2+zstd.1.5.2"
|
||||
|
||||
@@ -23,10 +23,23 @@ ARCH_FILE = $(ARCH_DIR)/$(ARCH)$(ARCH_FILE_SUFFIX)
|
||||
|
||||
ifeq ($(ARCH), s390x)
|
||||
default:
|
||||
@echo "s390x not support currently"
|
||||
@echo "s390x is not currently supported"
|
||||
exit 0
|
||||
test:
|
||||
@echo "s390x not support currently"
|
||||
@echo "s390x is not currently supported"
|
||||
exit 0
|
||||
install:
|
||||
@echo "s390x is not currently supported"
|
||||
exit 0
|
||||
else ifeq ($(ARCH), powerpc64le)
|
||||
default:
|
||||
@echo "PowerPC 64 LE is not currently supported"
|
||||
exit 0
|
||||
test:
|
||||
@echo "PowerPC 64 LE is not currently supported"
|
||||
exit 0
|
||||
install:
|
||||
@echo "PowerPC 64 LE is not currently supported"
|
||||
exit 0
|
||||
else
|
||||
##TARGET default: build code
|
||||
@@ -34,6 +47,7 @@ default: runtime show-header
|
||||
##TARGET test: run cargo tests
|
||||
test:
|
||||
@cargo test --all --target $(TRIPLE) $(EXTRA_RUSTFEATURES) -- --nocapture
|
||||
install: install-runtime install-configs
|
||||
endif
|
||||
|
||||
ifeq (,$(realpath $(ARCH_FILE)))
|
||||
@@ -79,6 +93,12 @@ DBVALIDHYPERVISORPATHS := []
|
||||
PKGDATADIR := $(PREFIXDEPS)/share/$(PROJECT_DIR)
|
||||
KERNELDIR := $(PKGDATADIR)
|
||||
IMAGEPATH := $(PKGDATADIR)/$(IMAGENAME)
|
||||
|
||||
ROOTFSTYPE_EXT4 := \"ext4\"
|
||||
ROOTFSTYPE_XFS := \"xfs\"
|
||||
ROOTFSTYPE_EROFS := \"erofs\"
|
||||
DEFROOTFSTYPE := $(ROOTFSTYPE_EXT4)
|
||||
|
||||
PKGLIBEXECDIR := $(LIBEXECDIR)/$(PROJECT_DIR)
|
||||
FIRMWAREPATH :=
|
||||
FIRMWAREVOLUMEPATH :=
|
||||
@@ -119,6 +139,7 @@ DEFVALIDVIRTIOFSDAEMONPATHS := [\"$(DEFVIRTIOFSDAEMON)\"]
|
||||
# if value is 0, DAX is not enabled
|
||||
DEFVIRTIOFSCACHESIZE ?= 0
|
||||
DEFVIRTIOFSCACHE ?= auto
|
||||
DEFVIRTIOFSQUEUESIZE ?= 1024
|
||||
# Format example:
|
||||
# [\"-o\", \"arg1=xxx,arg2\", \"-o\", \"hello world\", \"--arg3=yyy\"]
|
||||
#
|
||||
@@ -209,6 +230,7 @@ USER_VARS += DBVALIDCTLPATHS
|
||||
USER_VARS += SYSCONFIG
|
||||
USER_VARS += IMAGENAME
|
||||
USER_VARS += IMAGEPATH
|
||||
USER_VARS += DEFROOTFSTYPE
|
||||
USER_VARS += MACHINETYPE
|
||||
USER_VARS += KERNELDIR
|
||||
USER_VARS += KERNELTYPE
|
||||
@@ -256,6 +278,7 @@ USER_VARS += DEFVIRTIOFSDAEMON
|
||||
USER_VARS += DEFVALIDVIRTIOFSDAEMONPATHS
|
||||
USER_VARS += DEFVIRTIOFSCACHESIZE
|
||||
USER_VARS += DEFVIRTIOFSCACHE
|
||||
USER_VARS += DEFVIRTIOFSQUEUESIZE
|
||||
USER_VARS += DEFVIRTIOFSEXTRAARGS
|
||||
USER_VARS += DEFENABLEANNOTATIONS
|
||||
USER_VARS += DEFENABLEIOTHREADS
|
||||
@@ -482,8 +505,6 @@ codecov: check_tarpaulin
|
||||
codecov-html: check_tarpaulin
|
||||
cargo tarpaulin $(TARPAULIN_ARGS) -o Html
|
||||
|
||||
install: install-runtime install-configs
|
||||
|
||||
install-runtime: runtime
|
||||
install -D $(TARGET_PATH) $(DESTDIR)$(BINDIR)/$(notdir $(TARGET_PATH))
|
||||
|
||||
|
||||
15
src/runtime-rs/arch/powerpc64le-options.mk
Normal file
15
src/runtime-rs/arch/powerpc64le-options.mk
Normal file
@@ -0,0 +1,15 @@
|
||||
# Copyright (c) 2019-2022 Alibaba Cloud
|
||||
# Copyright (c) 2019-2022 Ant Group
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
MACHINETYPE := pseries
|
||||
KERNELPARAMS :=
|
||||
MACHINEACCELERATORS := "cap-cfpc=broken,cap-sbbc=broken,cap-ibs=broken,cap-large-decr=off,cap-ccf-assist=off"
|
||||
CPUFEATURES := pmu=off
|
||||
|
||||
QEMUCMD := qemu-system-ppc64
|
||||
|
||||
# dragonball binary name
|
||||
DBCMD := dragonball
|
||||
@@ -17,6 +17,12 @@ ctlpath = "@DBCTLPATH@"
|
||||
kernel = "@KERNELPATH_DB@"
|
||||
image = "@IMAGEPATH@"
|
||||
|
||||
# rootfs filesystem type:
|
||||
# - ext4 (default)
|
||||
# - xfs
|
||||
# - erofs
|
||||
rootfs_type=@DEFROOTFSTYPE@
|
||||
|
||||
# List of valid annotation names for the hypervisor
|
||||
# Each member of the list is a regular expression, which is the base name
|
||||
# of the annotation, e.g. "path" for io.katacontainers.config.hypervisor.path"
|
||||
@@ -136,6 +142,34 @@ block_device_driver = "@DEFBLOCKSTORAGEDRIVER_DB@"
|
||||
# of shim, does not need an external virtiofsd process.
|
||||
shared_fs = "@DBSHAREDFS@"
|
||||
|
||||
# Default size of DAX cache in MiB
|
||||
virtio_fs_cache_size = @DEFVIRTIOFSCACHESIZE@
|
||||
|
||||
# Extra args for virtiofsd daemon
|
||||
#
|
||||
# Format example:
|
||||
# ["-o", "arg1=xxx,arg2", "-o", "hello world", "--arg3=yyy"]
|
||||
# Examples:
|
||||
# Set virtiofsd log level to debug : ["-o", "log_level=debug"] or ["-d"]
|
||||
#
|
||||
# see `virtiofsd -h` for possible options.
|
||||
virtio_fs_extra_args = @DEFVIRTIOFSEXTRAARGS@
|
||||
|
||||
# Cache mode:
|
||||
#
|
||||
# - never
|
||||
# Metadata, data, and pathname lookup are not cached in guest. They are
|
||||
# always fetched from host and any changes are immediately pushed to host.
|
||||
#
|
||||
# - auto
|
||||
# Metadata and pathname lookup cache expires after a configured amount of
|
||||
# time (default is 1 second). Data is cached while the file is open (close
|
||||
# to open consistency).
|
||||
#
|
||||
# - always
|
||||
# Metadata, data, and pathname lookup are cached in guest and never expire.
|
||||
virtio_fs_cache = "@DEFVIRTIOFSCACHE@"
|
||||
|
||||
# Enable huge pages for VM RAM, default false
|
||||
# Enabling this will result in the VM memory
|
||||
# being allocated using huge pages.
|
||||
|
||||
@@ -18,7 +18,7 @@ serde_json = ">=1.0.9"
|
||||
slog = "2.5.2"
|
||||
slog-scope = "4.4.0"
|
||||
ttrpc = { version = "0.6.1" }
|
||||
tokio = { version = "1.8.0", features = ["fs", "rt"] }
|
||||
tokio = { version = "1.28.1", features = ["fs", "rt"] }
|
||||
url = "2.2.2"
|
||||
nix = "0.24.2"
|
||||
|
||||
|
||||
@@ -21,7 +21,7 @@ serde_json = ">=1.0.9"
|
||||
slog = "2.5.2"
|
||||
slog-scope = "4.4.0"
|
||||
thiserror = "1.0"
|
||||
tokio = { version = "1.8.0", features = ["sync"] }
|
||||
tokio = { version = "1.28.1", features = ["sync", "fs"] }
|
||||
vmm-sys-util = "0.11.0"
|
||||
rand = "0.8.4"
|
||||
|
||||
|
||||
@@ -101,7 +101,10 @@ impl DragonballInner {
|
||||
|
||||
// get kernel params
|
||||
let mut kernel_params = KernelParams::new(self.config.debug_info.enable_debug);
|
||||
kernel_params.append(&mut KernelParams::new_rootfs_kernel_params(&rootfs_driver));
|
||||
kernel_params.append(&mut KernelParams::new_rootfs_kernel_params(
|
||||
&rootfs_driver,
|
||||
&self.config.boot_info.rootfs_type,
|
||||
)?);
|
||||
kernel_params.append(&mut KernelParams::from_string(
|
||||
&self.config.boot_info.kernel_params,
|
||||
));
|
||||
|
||||
@@ -314,7 +314,7 @@ impl VmmInstance {
|
||||
return Ok(vmm_data);
|
||||
}
|
||||
Err(vmm_action_error) => {
|
||||
if let VmmActionError::UpcallNotReady = vmm_action_error {
|
||||
if let VmmActionError::UpcallServerNotReady = vmm_action_error {
|
||||
std::thread::sleep(std::time::Duration::from_millis(10));
|
||||
continue;
|
||||
} else {
|
||||
|
||||
@@ -6,7 +6,10 @@
|
||||
|
||||
use anyhow::{anyhow, Result};
|
||||
|
||||
use crate::{VM_ROOTFS_DRIVER_BLK, VM_ROOTFS_DRIVER_PMEM};
|
||||
use crate::{
|
||||
VM_ROOTFS_DRIVER_BLK, VM_ROOTFS_DRIVER_PMEM, VM_ROOTFS_FILESYSTEM_EROFS,
|
||||
VM_ROOTFS_FILESYSTEM_EXT4, VM_ROOTFS_FILESYSTEM_XFS, VM_ROOTFS_ROOT_BLK, VM_ROOTFS_ROOT_PMEM,
|
||||
};
|
||||
use kata_types::config::LOG_VPORT_OPTION;
|
||||
|
||||
// Port where the agent will send the logs. Logs are sent through the vsock in cases
|
||||
@@ -67,43 +70,49 @@ impl KernelParams {
|
||||
Self { params }
|
||||
}
|
||||
|
||||
pub(crate) fn new_rootfs_kernel_params(rootfs_driver: &str) -> Self {
|
||||
let params = match rootfs_driver {
|
||||
VM_ROOTFS_DRIVER_BLK => {
|
||||
vec![
|
||||
Param {
|
||||
key: "root".to_string(),
|
||||
value: "/dev/vda1".to_string(),
|
||||
},
|
||||
Param {
|
||||
key: "rootflags".to_string(),
|
||||
value: "data=ordered,errors=remount-ro ro".to_string(),
|
||||
},
|
||||
Param {
|
||||
key: "rootfstype".to_string(),
|
||||
value: "ext4".to_string(),
|
||||
},
|
||||
]
|
||||
}
|
||||
pub(crate) fn new_rootfs_kernel_params(rootfs_driver: &str, rootfs_type: &str) -> Result<Self> {
|
||||
let mut params = vec![];
|
||||
|
||||
match rootfs_driver {
|
||||
VM_ROOTFS_DRIVER_PMEM => {
|
||||
vec![
|
||||
Param {
|
||||
key: "root".to_string(),
|
||||
value: "/dev/pmem0p1".to_string(),
|
||||
},
|
||||
Param {
|
||||
key: "rootflags".to_string(),
|
||||
value: "data=ordered,errors=remount-ro,dax ro".to_string(),
|
||||
},
|
||||
Param {
|
||||
key: "rootfstype".to_string(),
|
||||
value: "ext4".to_string(),
|
||||
},
|
||||
]
|
||||
params.push(Param::new("root", VM_ROOTFS_ROOT_PMEM));
|
||||
match rootfs_type {
|
||||
VM_ROOTFS_FILESYSTEM_EXT4 | VM_ROOTFS_FILESYSTEM_XFS => {
|
||||
params.push(Param::new(
|
||||
"rootflags",
|
||||
"dax,data=ordered,errors=remount-ro ro",
|
||||
));
|
||||
}
|
||||
VM_ROOTFS_FILESYSTEM_EROFS => {
|
||||
params.push(Param::new("rootflags", "dax ro"));
|
||||
}
|
||||
_ => {
|
||||
return Err(anyhow!("Unsupported rootfs type"));
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => vec![],
|
||||
};
|
||||
Self { params }
|
||||
VM_ROOTFS_DRIVER_BLK => {
|
||||
params.push(Param::new("root", VM_ROOTFS_ROOT_BLK));
|
||||
match rootfs_type {
|
||||
VM_ROOTFS_FILESYSTEM_EXT4 | VM_ROOTFS_FILESYSTEM_XFS => {
|
||||
params.push(Param::new("rootflags", "data=ordered,errors=remount-ro ro"));
|
||||
}
|
||||
VM_ROOTFS_FILESYSTEM_EROFS => {
|
||||
params.push(Param::new("rootflags", "ro"));
|
||||
}
|
||||
_ => {
|
||||
return Err(anyhow!("Unsupported rootfs type"));
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
return Err(anyhow!("Unsupported rootfs driver"));
|
||||
}
|
||||
}
|
||||
|
||||
params.push(Param::new("rootfstype", rootfs_type));
|
||||
|
||||
Ok(Self { params })
|
||||
}
|
||||
|
||||
pub(crate) fn append(&mut self, params: &mut KernelParams) {
|
||||
@@ -155,6 +164,12 @@ mod tests {
|
||||
|
||||
use super::*;
|
||||
|
||||
use crate::{
|
||||
VM_ROOTFS_DRIVER_BLK, VM_ROOTFS_DRIVER_PMEM, VM_ROOTFS_FILESYSTEM_EROFS,
|
||||
VM_ROOTFS_FILESYSTEM_EXT4, VM_ROOTFS_FILESYSTEM_XFS, VM_ROOTFS_ROOT_BLK,
|
||||
VM_ROOTFS_ROOT_PMEM,
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn test_params() {
|
||||
let param1 = Param::new("", "");
|
||||
@@ -190,4 +205,142 @@ mod tests {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct TestData<'a> {
|
||||
rootfs_driver: &'a str,
|
||||
rootfs_type: &'a str,
|
||||
expect_params: KernelParams,
|
||||
result: Result<()>,
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rootfs_kernel_params() {
|
||||
let tests = &[
|
||||
// EXT4
|
||||
TestData {
|
||||
rootfs_driver: VM_ROOTFS_DRIVER_PMEM,
|
||||
rootfs_type: VM_ROOTFS_FILESYSTEM_EXT4,
|
||||
expect_params: KernelParams {
|
||||
params: [
|
||||
Param::new("root", VM_ROOTFS_ROOT_PMEM),
|
||||
Param::new("rootflags", "dax,data=ordered,errors=remount-ro ro"),
|
||||
Param::new("rootfstype", VM_ROOTFS_FILESYSTEM_EXT4),
|
||||
]
|
||||
.to_vec(),
|
||||
},
|
||||
result: Ok(()),
|
||||
},
|
||||
TestData {
|
||||
rootfs_driver: VM_ROOTFS_DRIVER_BLK,
|
||||
rootfs_type: VM_ROOTFS_FILESYSTEM_EXT4,
|
||||
expect_params: KernelParams {
|
||||
params: [
|
||||
Param::new("root", VM_ROOTFS_ROOT_BLK),
|
||||
Param::new("rootflags", "data=ordered,errors=remount-ro ro"),
|
||||
Param::new("rootfstype", VM_ROOTFS_FILESYSTEM_EXT4),
|
||||
]
|
||||
.to_vec(),
|
||||
},
|
||||
result: Ok(()),
|
||||
},
|
||||
// XFS
|
||||
TestData {
|
||||
rootfs_driver: VM_ROOTFS_DRIVER_PMEM,
|
||||
rootfs_type: VM_ROOTFS_FILESYSTEM_XFS,
|
||||
expect_params: KernelParams {
|
||||
params: [
|
||||
Param::new("root", VM_ROOTFS_ROOT_PMEM),
|
||||
Param::new("rootflags", "dax,data=ordered,errors=remount-ro ro"),
|
||||
Param::new("rootfstype", VM_ROOTFS_FILESYSTEM_XFS),
|
||||
]
|
||||
.to_vec(),
|
||||
},
|
||||
result: Ok(()),
|
||||
},
|
||||
TestData {
|
||||
rootfs_driver: VM_ROOTFS_DRIVER_BLK,
|
||||
rootfs_type: VM_ROOTFS_FILESYSTEM_XFS,
|
||||
expect_params: KernelParams {
|
||||
params: [
|
||||
Param::new("root", VM_ROOTFS_ROOT_BLK),
|
||||
Param::new("rootflags", "data=ordered,errors=remount-ro ro"),
|
||||
Param::new("rootfstype", VM_ROOTFS_FILESYSTEM_XFS),
|
||||
]
|
||||
.to_vec(),
|
||||
},
|
||||
result: Ok(()),
|
||||
},
|
||||
// EROFS
|
||||
TestData {
|
||||
rootfs_driver: VM_ROOTFS_DRIVER_PMEM,
|
||||
rootfs_type: VM_ROOTFS_FILESYSTEM_EROFS,
|
||||
expect_params: KernelParams {
|
||||
params: [
|
||||
Param::new("root", VM_ROOTFS_ROOT_PMEM),
|
||||
Param::new("rootflags", "dax ro"),
|
||||
Param::new("rootfstype", VM_ROOTFS_FILESYSTEM_EROFS),
|
||||
]
|
||||
.to_vec(),
|
||||
},
|
||||
result: Ok(()),
|
||||
},
|
||||
TestData {
|
||||
rootfs_driver: VM_ROOTFS_DRIVER_BLK,
|
||||
rootfs_type: VM_ROOTFS_FILESYSTEM_EROFS,
|
||||
expect_params: KernelParams {
|
||||
params: [
|
||||
Param::new("root", VM_ROOTFS_ROOT_BLK),
|
||||
Param::new("rootflags", "ro"),
|
||||
Param::new("rootfstype", VM_ROOTFS_FILESYSTEM_EROFS),
|
||||
]
|
||||
.to_vec(),
|
||||
},
|
||||
result: Ok(()),
|
||||
},
|
||||
// Unsupported rootfs driver
|
||||
TestData {
|
||||
rootfs_driver: "foo",
|
||||
rootfs_type: VM_ROOTFS_FILESYSTEM_EXT4,
|
||||
expect_params: KernelParams {
|
||||
params: [
|
||||
Param::new("root", VM_ROOTFS_ROOT_BLK),
|
||||
Param::new("rootflags", "data=ordered,errors=remount-ro ro"),
|
||||
Param::new("rootfstype", VM_ROOTFS_FILESYSTEM_EXT4),
|
||||
]
|
||||
.to_vec(),
|
||||
},
|
||||
result: Err(anyhow!("Unsupported rootfs driver")),
|
||||
},
|
||||
// Unsupported rootfs type
|
||||
TestData {
|
||||
rootfs_driver: VM_ROOTFS_DRIVER_BLK,
|
||||
rootfs_type: "foo",
|
||||
expect_params: KernelParams {
|
||||
params: [
|
||||
Param::new("root", VM_ROOTFS_ROOT_BLK),
|
||||
Param::new("rootflags", "data=ordered,errors=remount-ro ro"),
|
||||
Param::new("rootfstype", VM_ROOTFS_FILESYSTEM_EXT4),
|
||||
]
|
||||
.to_vec(),
|
||||
},
|
||||
result: Err(anyhow!("Unsupported rootfs type")),
|
||||
},
|
||||
];
|
||||
|
||||
for (i, t) in tests.iter().enumerate() {
|
||||
let msg = format!("test[{}]: {:?}", i, t);
|
||||
let result = KernelParams::new_rootfs_kernel_params(t.rootfs_driver, t.rootfs_type);
|
||||
let msg = format!("{}, result: {:?}", msg, result);
|
||||
|
||||
if t.result.is_ok() {
|
||||
assert!(result.is_ok(), "{}", msg);
|
||||
assert_eq!(t.expect_params, result.unwrap());
|
||||
} else {
|
||||
let expected_error = format!("{}", t.result.as_ref().unwrap_err());
|
||||
let actual_error = format!("{}", result.unwrap_err());
|
||||
assert!(actual_error == expected_error, "{}", msg);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -27,6 +27,16 @@ use kata_types::config::hypervisor::Hypervisor as HypervisorConfig;
|
||||
// Config which driver to use as vm root dev
|
||||
const VM_ROOTFS_DRIVER_BLK: &str = "virtio-blk";
|
||||
const VM_ROOTFS_DRIVER_PMEM: &str = "virtio-pmem";
|
||||
|
||||
//Configure the root corresponding to the driver
|
||||
const VM_ROOTFS_ROOT_BLK: &str = "/dev/vda1";
|
||||
const VM_ROOTFS_ROOT_PMEM: &str = "/dev/pmem0p1";
|
||||
|
||||
// Config which filesystem to use as rootfs type
|
||||
const VM_ROOTFS_FILESYSTEM_EXT4: &str = "ext4";
|
||||
const VM_ROOTFS_FILESYSTEM_XFS: &str = "xfs";
|
||||
const VM_ROOTFS_FILESYSTEM_EROFS: &str = "erofs";
|
||||
|
||||
// before using hugepages for VM, we need to mount hugetlbfs
|
||||
// /dev/hugepages will be the mount point
|
||||
// mkdir -p /dev/hugepages
|
||||
|
||||
@@ -14,7 +14,7 @@ anyhow = "^1.0"
|
||||
async-trait = "0.1.48"
|
||||
bitflags = "1.2.1"
|
||||
byte-unit = "4.0.14"
|
||||
cgroups-rs = "0.2.9"
|
||||
cgroups-rs = "0.3.2"
|
||||
futures = "0.3.11"
|
||||
lazy_static = "1.4.0"
|
||||
libc = ">=0.2.39"
|
||||
@@ -28,7 +28,7 @@ serde = { version = "1.0.138", features = ["derive"] }
|
||||
serde_json = "1.0.82"
|
||||
slog = "2.5.2"
|
||||
slog-scope = "4.4.0"
|
||||
tokio = { version = "1.8.0", features = ["process"] }
|
||||
tokio = { version = "1.28.1", features = ["process"] }
|
||||
uuid = { version = "0.4", features = ["v4"] }
|
||||
|
||||
agent = { path = "../agent" }
|
||||
|
||||
@@ -9,6 +9,8 @@ mod utils;
|
||||
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
error::Error,
|
||||
io,
|
||||
iter::FromIterator,
|
||||
sync::Arc,
|
||||
};
|
||||
@@ -24,6 +26,8 @@ use oci::LinuxResources;
|
||||
use persist::sandbox_persist::Persist;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
const OS_ERROR_NO_SUCH_PROCESS: i32 = 3;
|
||||
|
||||
pub struct CgroupArgs {
|
||||
pub sid: String,
|
||||
pub config: TomlConfig,
|
||||
@@ -69,7 +73,7 @@ impl CgroupsResource {
|
||||
// will either hold all the pod threads (sandbox_cgroup_only is true)
|
||||
// or only the virtual CPU ones (sandbox_cgroup_only is false).
|
||||
let hier = cgroups_rs::hierarchies::auto();
|
||||
let cgroup_manager = CgroupBuilder::new(&config.path).build(hier);
|
||||
let cgroup_manager = CgroupBuilder::new(&config.path).build(hier)?;
|
||||
|
||||
// The shim configuration is requesting that we do not put all threads
|
||||
// into the sandbox resource controller.
|
||||
@@ -77,7 +81,7 @@ impl CgroupsResource {
|
||||
// the vCPU threads will eventually make it there.
|
||||
let overhead_cgroup_manager = if !config.sandbox_cgroup_only {
|
||||
let hier = cgroups_rs::hierarchies::auto();
|
||||
Some(CgroupBuilder::new(&config.overhead_path).build(hier))
|
||||
Some(CgroupBuilder::new(&config.overhead_path).build(hier)?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
@@ -109,7 +113,22 @@ impl CgroupsResource {
|
||||
/// overhead_cgroup_manager to the parent and then delete the cgroups.
|
||||
pub async fn delete(&self) -> Result<()> {
|
||||
for cg_pid in self.cgroup_manager.tasks() {
|
||||
self.cgroup_manager.remove_task(cg_pid);
|
||||
// For now, we can't guarantee that the thread in cgroup_manager does still
|
||||
// exist. Once it exit, we should ignor that error returned by remove_task
|
||||
// to let it go.
|
||||
if let Err(error) = self.cgroup_manager.remove_task(cg_pid) {
|
||||
match error.source() {
|
||||
Some(err) => match err.downcast_ref::<io::Error>() {
|
||||
Some(e) => {
|
||||
if e.raw_os_error() != Some(OS_ERROR_NO_SUCH_PROCESS) {
|
||||
return Err(error.into());
|
||||
}
|
||||
}
|
||||
None => return Err(error.into()),
|
||||
},
|
||||
None => return Err(error.into()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
self.cgroup_manager
|
||||
@@ -118,7 +137,7 @@ impl CgroupsResource {
|
||||
|
||||
if let Some(overhead) = self.overhead_cgroup_manager.as_ref() {
|
||||
for cg_pid in overhead.tasks() {
|
||||
overhead.remove_task(cg_pid);
|
||||
overhead.remove_task(cg_pid)?;
|
||||
}
|
||||
overhead.delete().context("delete overhead")?;
|
||||
}
|
||||
|
||||
@@ -35,7 +35,7 @@ pub struct ShareVirtioFsStandaloneConfig {
|
||||
|
||||
// virtio_fs_daemon is the virtio-fs vhost-user daemon path
|
||||
pub virtio_fs_daemon: String,
|
||||
// virtio_fs_cache cache mode for fs version cache or "none"
|
||||
// virtio_fs_cache cache mode for fs version cache
|
||||
pub virtio_fs_cache: String,
|
||||
// virtio_fs_extra_args passes options to virtiofsd daemon
|
||||
pub virtio_fs_extra_args: Vec<String>,
|
||||
|
||||
@@ -10,7 +10,7 @@ anyhow = "^1.0"
|
||||
lazy_static = "1.4.0"
|
||||
slog = "2.5.2"
|
||||
slog-scope = "4.4.0"
|
||||
tokio = { version = "1.8.0", features = ["rt-multi-thread"] }
|
||||
tokio = { version = "1.28.1", features = ["rt-multi-thread"] }
|
||||
hyper = { version = "0.14.20", features = ["stream", "server", "http1"] }
|
||||
hyperlocal = "0.8"
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@ slog = "2.5.2"
|
||||
slog-scope = "4.4.0"
|
||||
strum = { version = "0.24.0", features = ["derive"] }
|
||||
thiserror = "^1.0"
|
||||
tokio = { version = "1.8.0", features = ["rt-multi-thread", "process", "fs"] }
|
||||
tokio = { version = "1.28.1", features = ["rt-multi-thread", "process", "fs"] }
|
||||
ttrpc = { version = "0.6.1" }
|
||||
persist = {path = "../../persist"}
|
||||
agent = { path = "../../agent" }
|
||||
|
||||
@@ -7,7 +7,7 @@ edition = "2018"
|
||||
[dependencies]
|
||||
anyhow = "^1.0"
|
||||
async-trait = "0.1.48"
|
||||
tokio = { version = "1.8.0" }
|
||||
tokio = { version = "1.28.1" }
|
||||
|
||||
common = { path = "../common" }
|
||||
kata-types = { path = "../../../../libs/kata-types" }
|
||||
kata-types = { path = "../../../../libs/kata-types" }
|
||||
|
||||
@@ -20,7 +20,7 @@ serde_derive = "1.0.27"
|
||||
serde_json = "1.0.82"
|
||||
slog = "2.5.2"
|
||||
slog-scope = "4.4.0"
|
||||
tokio = { version = "1.8.0" }
|
||||
tokio = { version = "1.28.1" }
|
||||
toml = "0.4.2"
|
||||
url = "2.1.1"
|
||||
async-std = "1.12.0"
|
||||
|
||||
@@ -7,7 +7,7 @@ edition = "2018"
|
||||
[dependencies]
|
||||
anyhow = "^1.0"
|
||||
async-trait = "0.1.48"
|
||||
tokio = { version = "1.8.0" }
|
||||
tokio = { version = "1.28.1" }
|
||||
|
||||
common = { path = "../common" }
|
||||
kata-types = { path = "../../../../libs/kata-types" }
|
||||
kata-types = { path = "../../../../libs/kata-types" }
|
||||
|
||||
@@ -10,7 +10,7 @@ anyhow = "^1.0"
|
||||
async-trait = "0.1.48"
|
||||
slog = "2.5.2"
|
||||
slog-scope = "4.4.0"
|
||||
tokio = { version = "1.8.0", features = ["rt-multi-thread"] }
|
||||
tokio = { version = "1.28.1", features = ["rt-multi-thread"] }
|
||||
ttrpc = { version = "0.6.1" }
|
||||
|
||||
common = { path = "../runtimes/common" }
|
||||
|
||||
@@ -10,5 +10,5 @@ anyhow = "^1.0"
|
||||
common = { path = "../runtimes/common" }
|
||||
logging = { path = "../../../libs/logging"}
|
||||
runtimes = { path = "../runtimes" }
|
||||
tokio = { version = "1.8.0", features = [ "rt", "rt-multi-thread" ] }
|
||||
tokio = { version = "1.28.1", features = [ "rt", "rt-multi-thread" ] }
|
||||
|
||||
|
||||
@@ -27,7 +27,7 @@ slog-async = "2.5.2"
|
||||
slog-scope = "4.4.0"
|
||||
slog-stdlog = "4.1.0"
|
||||
thiserror = "1.0.30"
|
||||
tokio = { version = "1.8.0", features = [ "rt", "rt-multi-thread" ] }
|
||||
tokio = { version = "1.28.1", features = [ "rt", "rt-multi-thread" ] }
|
||||
unix_socket2 = "0.5.4"
|
||||
|
||||
kata-types = { path = "../../../libs/kata-types"}
|
||||
|
||||
@@ -38,7 +38,7 @@ impl ShimExecutor {
|
||||
let (container_type, id) = k8s::container_type_with_id(&spec);
|
||||
|
||||
match container_type {
|
||||
ContainerType::PodSandbox => {
|
||||
ContainerType::PodSandbox | ContainerType::SingleContainer => {
|
||||
let address = self.socket_address(&self.args.id)?;
|
||||
let socket = new_listener(&address)?;
|
||||
let child_pid = self.create_shim_process(socket)?;
|
||||
|
||||
1
src/runtime/.gitignore
vendored
1
src/runtime/.gitignore
vendored
@@ -18,3 +18,4 @@ config-generated.go
|
||||
/virtcontainers/hook/mock/hook
|
||||
/virtcontainers/profile.cov
|
||||
/virtcontainers/utils/supportfiles
|
||||
/virtcontainers/cpu_affinity_idx
|
||||
|
||||
@@ -56,9 +56,7 @@ BINLIBEXECLIST :=
|
||||
BIN_PREFIX = $(PROJECT_TYPE)
|
||||
PROJECT_DIR = $(PROJECT_TAG)
|
||||
IMAGENAME = $(PROJECT_TAG).img
|
||||
IMAGETDXNAME = $(PROJECT_TAG)-tdx.img
|
||||
INITRDNAME = $(PROJECT_TAG)-initrd.img
|
||||
INITRDSEVNAME = $(PROJECT_TAG)-initrd-sev.img
|
||||
|
||||
TARGET = $(BIN_PREFIX)-runtime
|
||||
RUNTIME_OUTPUT = $(CURDIR)/$(TARGET)
|
||||
@@ -99,9 +97,6 @@ GENERATED_VARS = \
|
||||
CONFIG_QEMU_IN \
|
||||
CONFIG_CLH_IN \
|
||||
CONFIG_FC_IN \
|
||||
CONFIG_CLH_TDX_IN \
|
||||
CONFIG_QEMU_TDX_IN \
|
||||
CONFIG_QEMU_SEV_IN \
|
||||
$(USER_VARS)
|
||||
SCRIPTS += $(COLLECT_SCRIPT)
|
||||
SCRIPTS_DIR := $(BINDIR)
|
||||
@@ -117,28 +112,14 @@ KERNELDIR := $(PKGDATADIR)
|
||||
|
||||
IMAGEPATH := $(PKGDATADIR)/$(IMAGENAME)
|
||||
INITRDPATH := $(PKGDATADIR)/$(INITRDNAME)
|
||||
INITRDSEVPATH := $(PKGDATADIR)/$(INITRDSEVNAME)
|
||||
IMAGETDXPATH := $(PKGDATADIR)/$(IMAGETDXNAME)
|
||||
|
||||
ROOTFSTYPE_EXT4 := \"ext4\"
|
||||
ROOTFSTYPE_XFS := \"xfs\"
|
||||
ROOTFSTYPE_EROFS := \"erofs\"
|
||||
DEFROOTFSTYPE := $(ROOTFSTYPE_EXT4)
|
||||
|
||||
FIRMWAREPATH :=
|
||||
FIRMWAREVOLUMEPATH :=
|
||||
TDVFFIRMWAREPATH := $(PREFIXDEPS)/share/tdvf/OVMF_CODE.fd
|
||||
TDVFFIRMWAREVOLUMEPATH := $(PREFIXDEPS)/share/tdvf/OVMF_VARS.fd
|
||||
TDSHIMFIRMWAREPATH := ${PREFIXDEPS}/share/td-shim/td-shim.bin
|
||||
SEVFIRMWAREPATH := $(PREFIXDEPS)/share/ovmf/OVMF.fd
|
||||
|
||||
AGENTCONFIGFILEPATH := /etc/agent-config.toml
|
||||
AGENTCONFIGFILEKERNELPARAM := agent.config_file=$(AGENTCONFIGFILEPATH)
|
||||
|
||||
ROOTMEASURECONFIG ?= ""
|
||||
ROOTMEASURECONFIGTDX ?= ""
|
||||
AGENT_AA_KBC_PARAMS ?= ""
|
||||
AGENT_AA_KBC_PARAMS_TDX ?= ""
|
||||
AGENT_AA_KBC_PARAMS_SEV ?= ""
|
||||
TDXKERNELPARAMS := tdx_disable_filter agent.enable_signature_verification=false $(AGENT_AA_KBC_PARAMS_TDX)
|
||||
TDXKERNELPARAMS_QEMU += $(TDXKERNELPARAMS) $(ROOTMEASURECONFIGTDX)
|
||||
TDXKERNELPARAMS_CLH += $(TDXKERNELPARAMS) $(ROOTMEASURECONFIG)
|
||||
SEVKERNELPARAMS := $(AGENTCONFIGFILEKERNELPARAM) agent.enable_signature_verification=false $(AGENT_AA_KBC_PARAMS_SEV)
|
||||
KERNELPARAMS += $(ROOTMEASURECONFIG) agent.enable_signature_verification=false $(AGENT_AA_KBC_PARAMS)
|
||||
|
||||
# Name of default configuration file the runtime will use.
|
||||
CONFIG_FILE = configuration.toml
|
||||
@@ -157,9 +138,6 @@ HYPERVISORS := $(HYPERVISOR_ACRN) $(HYPERVISOR_FC) $(HYPERVISOR_QEMU) $(HYPERVIS
|
||||
QEMUPATH := $(QEMUBINDIR)/$(QEMUCMD)
|
||||
QEMUVALIDHYPERVISORPATHS := [\"$(QEMUPATH)\"]
|
||||
|
||||
QEMUTDXPATH := $(QEMUBINDIR)/$(QEMUTDXCMD)
|
||||
QEMUTDXVALIDHYPERVISORPATHS := [\"$(QEMUTDXPATH)\"]
|
||||
|
||||
QEMUVIRTIOFSPATH := $(QEMUBINDIR)/$(QEMUVIRTIOFSCMD)
|
||||
|
||||
CLHPATH := $(CLHBINDIR)/$(CLHCMD)
|
||||
@@ -249,18 +227,6 @@ DEFSTATICRESOURCEMGMT ?= false
|
||||
|
||||
DEFBINDMOUNTS := []
|
||||
|
||||
# Image Service Offload
|
||||
DEFSERVICEOFFLOAD ?= false
|
||||
|
||||
# SEV Guest Pre-Attestation
|
||||
DEFGUESTPREATTESTATION ?= false
|
||||
DEFGUESTPREATTESTATIONPROXY ?= localhost:44444
|
||||
DEFGUESTPREATTESTATIONKEYSET ?= KEYSET-1
|
||||
DEFGUESTPREATTESTATIONSECRETGUID ?= 1ee27366-0c87-43a6-af48-28543eaf7cb0
|
||||
DEFGUESTPREATTESTATIONSECRETTYPE ?= connection
|
||||
DEFSEVCERTCHAIN ?= /opt/sev/cert_chain.cert
|
||||
DEFSEVGUESTPOLICY ?= 0
|
||||
|
||||
SED = sed
|
||||
|
||||
CLI_DIR = cmd
|
||||
@@ -299,30 +265,6 @@ ifneq (,$(QEMUCMD))
|
||||
|
||||
CONFIGS += $(CONFIG_QEMU)
|
||||
|
||||
CONFIG_FILE_QEMU_TDX = configuration-qemu-tdx.toml
|
||||
CONFIG_QEMU_TDX = config/$(CONFIG_FILE_QEMU_TDX)
|
||||
CONFIG_QEMU_TDX_IN = $(CONFIG_QEMU_TDX).in
|
||||
|
||||
CONFIG_PATH_QEMU_TDX = $(abspath $(CONFDIR)/$(CONFIG_FILE_QEMU_TDX))
|
||||
CONFIG_PATHS += $(CONFIG_PATH_QEMU_TDX)
|
||||
|
||||
SYSCONFIG_QEMU_TDX = $(abspath $(SYSCONFDIR)/$(CONFIG_FILE_QEMU_TDX))
|
||||
SYSCONFIG_PATHS += $(SYSCONFIG_QEMU_TDX)
|
||||
|
||||
CONFIGS += $(CONFIG_QEMU_TDX)
|
||||
|
||||
CONFIG_FILE_QEMU_SEV = configuration-qemu-sev.toml
|
||||
CONFIG_QEMU_SEV = config/$(CONFIG_FILE_QEMU_SEV)
|
||||
CONFIG_QEMU_SEV_IN = $(CONFIG_QEMU_SEV).in
|
||||
|
||||
CONFIG_PATH_QEMU_SEV = $(abspath $(CONFDIR)/$(CONFIG_FILE_QEMU_SEV))
|
||||
CONFIG_PATHS += $(CONFIG_PATH_QEMU_SEV)
|
||||
|
||||
SYSCONFIG_QEMU_SEV = $(abspath $(SYSCONFDIR)/$(CONFIG_FILE_QEMU_SEV))
|
||||
SYSCONFIG_PATHS += $(SYSCONFIG_QEMU_SEV)
|
||||
|
||||
CONFIGS += $(CONFIG_QEMU_SEV)
|
||||
|
||||
# qemu-specific options (all should be suffixed by "_QEMU")
|
||||
DEFBLOCKSTORAGEDRIVER_QEMU := virtio-scsi
|
||||
DEFBLOCKDEVICEAIO_QEMU := io_uring
|
||||
@@ -330,14 +272,6 @@ ifneq (,$(QEMUCMD))
|
||||
KERNELTYPE = uncompressed
|
||||
KERNELNAME = $(call MAKE_KERNEL_NAME,$(KERNELTYPE))
|
||||
KERNELPATH = $(KERNELDIR)/$(KERNELNAME)
|
||||
|
||||
KERNELTDXTYPE = compressed
|
||||
KERNELTDXNAME = $(call MAKE_KERNEL_TDX_NAME,$(KERNELTDXTYPE))
|
||||
KERNELTDXPATH = $(KERNELDIR)/$(KERNELTDXNAME)
|
||||
|
||||
KERNELSEVTYPE = compressed
|
||||
KERNELSEVNAME = $(call MAKE_KERNEL_SEV_NAME,$(KERNELSEVTYPE))
|
||||
KERNELSEVPATH = $(KERNELDIR)/$(KERNELSEVNAME)
|
||||
endif
|
||||
|
||||
ifneq (,$(CLHCMD))
|
||||
@@ -355,28 +289,12 @@ ifneq (,$(CLHCMD))
|
||||
|
||||
CONFIGS += $(CONFIG_CLH)
|
||||
|
||||
CONFIG_FILE_CLH_TDX = configuration-clh-tdx.toml
|
||||
CONFIG_CLH_TDX = config/$(CONFIG_FILE_CLH_TDX)
|
||||
CONFIG_CLH_TDX_IN = $(CONFIG_CLH_TDX).in
|
||||
|
||||
CONFIG_PATH_CLH_TDX = $(abspath $(CONFDIR)/$(CONFIG_FILE_CLH_TDX))
|
||||
CONFIG_PATHS += $(CONFIG_PATH_CLH_TDX)
|
||||
|
||||
SYSCONFIG_CLH_TDX = $(abspath $(SYSCONFDIR)/$(CONFIG_FILE_CLH_TDX))
|
||||
SYSCONFIG_PATHS += $(SYSCONFIG_CLH_TDX)
|
||||
|
||||
CONFIGS += $(CONFIG_CLH_TDX)
|
||||
|
||||
# CLH-specific options (all should be suffixed by "_CLH")
|
||||
# currently, huge pages are required for virtiofsd support
|
||||
DEFNETWORKMODEL_CLH := tcfilter
|
||||
KERNELTYPE_CLH = uncompressed
|
||||
KERNEL_NAME_CLH = $(call MAKE_KERNEL_NAME,$(KERNELTYPE_CLH))
|
||||
KERNELPATH_CLH = $(KERNELDIR)/$(KERNEL_NAME_CLH)
|
||||
|
||||
KERNELTDXTYPE_CLH = compressed
|
||||
KERNELTDXNAME_CLH = $(call MAKE_KERNEL_TDX_NAME,$(KERNELTDXTYPE_CLH))
|
||||
KERNELTDXPATH_CLH = $(KERNELDIR)/$(KERNELTDXNAME_CLH)
|
||||
endif
|
||||
|
||||
ifneq (,$(FCCMD))
|
||||
@@ -498,12 +416,9 @@ USER_VARS += FCVALIDJAILERPATHS
|
||||
USER_VARS += SYSCONFIG
|
||||
USER_VARS += IMAGENAME
|
||||
USER_VARS += IMAGEPATH
|
||||
USER_VARS += IMAGETDXNAME
|
||||
USER_VARS += IMAGETDXPATH
|
||||
USER_VARS += INITRDNAME
|
||||
USER_VARS += INITRDPATH
|
||||
USER_VARS += INITRDSEVNAME
|
||||
USER_VARS += INITRDSEVPATH
|
||||
USER_VARS += DEFROOTFSTYPE
|
||||
USER_VARS += MACHINETYPE
|
||||
USER_VARS += KERNELDIR
|
||||
USER_VARS += KERNELTYPE
|
||||
@@ -512,27 +427,15 @@ USER_VARS += KERNELTYPE_ACRN
|
||||
USER_VARS += KERNELTYPE_CLH
|
||||
USER_VARS += KERNELPATH_ACRN
|
||||
USER_VARS += KERNELPATH
|
||||
USER_VARS += KERNELTDXPATH
|
||||
USER_VARS += KERNELSEVPATH
|
||||
USER_VARS += KERNELPATH_CLH
|
||||
USER_VARS += KERNELTDXPATH_CLH
|
||||
USER_VARS += KERNELPATH_FC
|
||||
USER_VARS += KERNELVIRTIOFSPATH
|
||||
USER_VARS += FIRMWAREPATH
|
||||
USER_VARS += FIRMWAREVOLUMEPATH
|
||||
USER_VARS += TDSHIMFIRMWAREPATH
|
||||
USER_VARS += TDVFFIRMWAREPATH
|
||||
USER_VARS += TDVFFIRMWAREVOLUMEPATH
|
||||
USER_VARS += SEVFIRMWAREPATH
|
||||
USER_VARS += MACHINEACCELERATORS
|
||||
USER_VARS += CPUFEATURES
|
||||
USER_VARS += TDXCPUFEATURES
|
||||
USER_VARS += DEFMACHINETYPE_CLH
|
||||
USER_VARS += KERNELPARAMS
|
||||
USER_VARS += TDXKERNELPARAMS
|
||||
USER_VARS += TDXKERNELPARAMS_QEMU
|
||||
USER_VARS += TDXKERNELPARAMS_CLH
|
||||
USER_VARS += SEVKERNELPARAMS
|
||||
USER_VARS += LIBEXECDIR
|
||||
USER_VARS += LOCALSTATEDIR
|
||||
USER_VARS += PKGDATADIR
|
||||
@@ -552,8 +455,6 @@ USER_VARS += QEMUPATH
|
||||
USER_VARS += QEMUVALIDHYPERVISORPATHS
|
||||
USER_VARS += QEMUVIRTIOFSCMD
|
||||
USER_VARS += QEMUVIRTIOFSPATH
|
||||
USER_VARS += QEMUTDXPATH
|
||||
USER_VARS += QEMUTDXVALIDHYPERVISORPATHS
|
||||
USER_VARS += RUNTIME_NAME
|
||||
USER_VARS += SHAREDIR
|
||||
USER_VARS += SYSCONFDIR
|
||||
@@ -604,14 +505,6 @@ USER_VARS += DEFSTATICRESOURCEMGMT_FC
|
||||
USER_VARS += DEFBINDMOUNTS
|
||||
USER_VARS += DEFVFIOMODE
|
||||
USER_VARS += BUILDFLAGS
|
||||
USER_VARS += DEFSERVICEOFFLOAD
|
||||
USER_VARS += DEFGUESTPREATTESTATION
|
||||
USER_VARS += DEFGUESTPREATTESTATIONPROXY
|
||||
USER_VARS += DEFGUESTPREATTESTATIONKEYSET
|
||||
USER_VARS += DEFGUESTPREATTESTATIONSECRETGUID
|
||||
USER_VARS += DEFGUESTPREATTESTATIONSECRETTYPE
|
||||
USER_VARS += DEFSEVCERTCHAIN
|
||||
USER_VARS += DEFSEVGUESTPOLICY
|
||||
|
||||
|
||||
V = @
|
||||
@@ -649,7 +542,7 @@ endef
|
||||
|
||||
all: runtime containerd-shim-v2 monitor
|
||||
|
||||
# Targets that depend on .git-commit can use $(shell cat .git-commit) to get a
|
||||
# Targets that depend on .git-commit can use $(shell git rev-parse HEAD) to get a
|
||||
# git revision string. They will only be rebuilt if the revision string
|
||||
# actually changes.
|
||||
.PHONY: .git-commit.tmp
|
||||
@@ -690,14 +583,6 @@ define MAKE_KERNEL_NAME
|
||||
$(if $(findstring uncompressed,$1),vmlinux.container,vmlinuz.container)
|
||||
endef
|
||||
|
||||
define MAKE_KERNEL_TDX_NAME
|
||||
$(if $(findstring uncompressed,$1),vmlinux-tdx.container,vmlinuz-tdx.container)
|
||||
endef
|
||||
|
||||
define MAKE_KERNEL_SEV_NAME
|
||||
$(if $(findstring uncompressed,$1),vmlinux-sev.container,vmlinuz-sev.container)
|
||||
endef
|
||||
|
||||
define MAKE_KERNEL_VIRTIOFS_NAME
|
||||
$(if $(findstring uncompressed,$1),vmlinux-virtiofs.container,vmlinuz-virtiofs.container)
|
||||
endef
|
||||
@@ -712,7 +597,7 @@ $(SHIMV2_OUTPUT): $(SOURCES) $(GENERATED_FILES) $(MAKEFILE_LIST)
|
||||
|
||||
$(MONITOR_OUTPUT): $(SOURCES) $(GENERATED_FILES) $(MAKEFILE_LIST) .git-commit
|
||||
$(QUIET_BUILD)(cd $(MONITOR_DIR)/ && go build \
|
||||
--ldflags "-X main.GitCommit=$(shell cat .git-commit)" $(BUILDFLAGS) -o $@ .)
|
||||
--ldflags "-X main.GitCommit=$(shell git rev-parse HEAD)" $(BUILDFLAGS) -o $@ .)
|
||||
|
||||
.PHONY: \
|
||||
check \
|
||||
@@ -733,7 +618,7 @@ GENERATED_FILES += $(CONFIGS)
|
||||
|
||||
$(GENERATED_FILES): %: %.in $(MAKEFILE_LIST) VERSION .git-commit
|
||||
$(QUIET_GENERATE)$(SED) \
|
||||
-e "s|@COMMIT@|$(shell cat .git-commit)|g" \
|
||||
-e "s|@COMMIT@|$(shell git rev-parse HEAD)|g" \
|
||||
$(foreach v,$(GENERATED_VARS),-e "s|@$v@|$($v)|g") \
|
||||
$< > $@
|
||||
|
||||
@@ -860,7 +745,7 @@ show-variables:
|
||||
@printf "\n"
|
||||
|
||||
show-header: .git-commit
|
||||
@printf "%s - version %s (commit %s)\n\n" $(TARGET) $(VERSION) $(shell cat .git-commit)
|
||||
@printf "%s - version %s (commit %s)\n\n" $(TARGET) $(VERSION) $(shell git rev-parse HEAD)
|
||||
|
||||
show-arches: show-header
|
||||
@printf "Supported architectures (possible values for ARCH variable):\n\n"
|
||||
|
||||
@@ -9,10 +9,8 @@ MACHINETYPE := q35
|
||||
KERNELPARAMS :=
|
||||
MACHINEACCELERATORS :=
|
||||
CPUFEATURES := pmu=off
|
||||
TDXCPUFEATURES := -vmx-rdseed-exit,pmu=off
|
||||
|
||||
QEMUCMD := qemu-system-x86_64
|
||||
QEMUTDXCMD := qemu-system-x86_64-tdx
|
||||
|
||||
# Firecracker binary name
|
||||
FCCMD := firecracker
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user