mirror of
https://github.com/kata-containers/kata-containers.git
synced 2026-02-28 01:32:13 +00:00
Compare commits
262 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6aff5f300a | ||
|
|
e5d5284761 | ||
|
|
6f7ab31860 | ||
|
|
f31c1b121e | ||
|
|
449103c7bf | ||
|
|
b7051890af | ||
|
|
5ce2c1010a | ||
|
|
65f2bfb8c4 | ||
|
|
0e86a96157 | ||
|
|
567b4d5788 | ||
|
|
0015c8ef51 | ||
|
|
eb07f5ef5e | ||
|
|
7cc81ce867 | ||
|
|
265322990a | ||
|
|
064b45a2fa | ||
|
|
d2866081d2 | ||
|
|
2206e2dd5c | ||
|
|
66c600f8d8 | ||
|
|
a98985fab8 | ||
|
|
af49252c69 | ||
|
|
0b3d193730 | ||
|
|
e4045ff29a | ||
|
|
bcaf7fc3b4 | ||
|
|
9f738f0d05 | ||
|
|
648265d80e | ||
|
|
02b9fd6e95 | ||
|
|
b060fb5b31 | ||
|
|
2cf94ae717 | ||
|
|
a56b15112a | ||
|
|
1072658219 | ||
|
|
3b72e9ffab | ||
|
|
83b3a681f4 | ||
|
|
203d9e7803 | ||
|
|
08d2f6bfe4 | ||
|
|
a7179be31d | ||
|
|
dded329d26 | ||
|
|
7040fb8c50 | ||
|
|
f087044ecb | ||
|
|
5282701b5b | ||
|
|
3c0171df3d | ||
|
|
646d7ea4fb | ||
|
|
ca80301b4b | ||
|
|
4477b4c9dc | ||
|
|
09c5ca8032 | ||
|
|
c1247cc254 | ||
|
|
3b62eb4695 | ||
|
|
eaedd21277 | ||
|
|
2056eda5f0 | ||
|
|
32c3e55cde | ||
|
|
b83d4e1528 | ||
|
|
dac07239f5 | ||
|
|
3827b5f9f2 | ||
|
|
deb4627558 | ||
|
|
c40b3b4ce7 | ||
|
|
be9385342e | ||
|
|
8260ce8d15 | ||
|
|
25e0e2fb35 | ||
|
|
b6b8524ab7 | ||
|
|
e8a3f8571e | ||
|
|
ff04154fdb | ||
|
|
d94b285189 | ||
|
|
5d0f74cd70 | ||
|
|
bf2f0ea2ca | ||
|
|
4025ef7193 | ||
|
|
856a1f72c6 | ||
|
|
7f71eac6de | ||
|
|
dafff26f01 | ||
|
|
aa487307e8 | ||
|
|
78bbc51ff0 | ||
|
|
29413021e5 | ||
|
|
171d298dea | ||
|
|
489afffd8c | ||
|
|
e874c8fa2e | ||
|
|
806e959b01 | ||
|
|
27947cbb0b | ||
|
|
fa4af09658 | ||
|
|
e4997760f1 | ||
|
|
09f523c815 | ||
|
|
e77d44614b | ||
|
|
7061272b4e | ||
|
|
de848c1458 | ||
|
|
28601b51d2 | ||
|
|
f2b8c6619d | ||
|
|
4161fa3792 | ||
|
|
7506d1ec29 | ||
|
|
647dad2a00 | ||
|
|
e7b4e5e386 | ||
|
|
1a1e62b968 | ||
|
|
eb0bc5007c | ||
|
|
04df85a44f | ||
|
|
a554541495 | ||
|
|
c7cf26fa32 | ||
|
|
37b907dfbc | ||
|
|
ff498c55d1 | ||
|
|
529660fafb | ||
|
|
704da86e9b | ||
|
|
8801554889 | ||
|
|
1c2d69ded7 | ||
|
|
b4d61f887b | ||
|
|
6003608fe6 | ||
|
|
cf2d5ff4c1 | ||
|
|
d53eb61856 | ||
|
|
8a79b1449e | ||
|
|
9d44abb406 | ||
|
|
f2ed8dc568 | ||
|
|
ff06ef0bbc | ||
|
|
cb0fb91bdd | ||
|
|
e9d6179b28 | ||
|
|
e8836fafaa | ||
|
|
67ba0ad0ad | ||
|
|
724b2c612c | ||
|
|
1d6c1d1621 | ||
|
|
d511820974 | ||
|
|
543c90f145 | ||
|
|
65dc12d791 | ||
|
|
2ea521db5e | ||
|
|
93453c37d6 | ||
|
|
6c5e053dd5 | ||
|
|
85979021b3 | ||
|
|
e71c7ab932 | ||
|
|
c9d1a758cd | ||
|
|
05cd1cc7a0 | ||
|
|
7990d3a154 | ||
|
|
cfbca4fe0d | ||
|
|
26446d1edb | ||
|
|
ad574b7e10 | ||
|
|
757f37d956 | ||
|
|
6d56abbdad | ||
|
|
3e6c32c3c8 | ||
|
|
ddb8a94677 | ||
|
|
6c1a2f01f8 | ||
|
|
49696bbdf2 | ||
|
|
db75b5f3c4 | ||
|
|
9250858c3e | ||
|
|
ada53744ea | ||
|
|
f18e35014f | ||
|
|
c0919d6f45 | ||
|
|
743a765525 | ||
|
|
09cce86cc7 | ||
|
|
9e1b6064dc | ||
|
|
6a0e403acf | ||
|
|
2d19f3fbd7 | ||
|
|
e3e3873857 | ||
|
|
0590aab3e6 | ||
|
|
33d08a8417 | ||
|
|
078a1147a6 | ||
|
|
b7da1291ea | ||
|
|
ec695f67e1 | ||
|
|
ef3f6515cf | ||
|
|
dd12089e0d | ||
|
|
f3f3caa80a | ||
|
|
75aee526a9 | ||
|
|
c270df7a9c | ||
|
|
e94490232e | ||
|
|
e3318a04f7 | ||
|
|
05848d0c34 | ||
|
|
4fd820abd2 | ||
|
|
0b83c8549a | ||
|
|
795c5dc0ff | ||
|
|
5bda197e9d | ||
|
|
99690ab202 | ||
|
|
259ec408b5 | ||
|
|
16130e473c | ||
|
|
f0187ff969 | ||
|
|
f2bfc306a2 | ||
|
|
4b2e725d03 | ||
|
|
c605fff4c1 | ||
|
|
dd23beeb05 | ||
|
|
f2c5f18952 | ||
|
|
0e20f60534 | ||
|
|
a32b21bd32 | ||
|
|
25e3cab028 | ||
|
|
ad92d73e43 | ||
|
|
d66c214ae7 | ||
|
|
afc1c1a782 | ||
|
|
29bb9de864 | ||
|
|
4ec355fb78 | ||
|
|
2c89828749 | ||
|
|
893fd2b59c | ||
|
|
fe7ef878d2 | ||
|
|
30ec78b19a | ||
|
|
e0aa54301f | ||
|
|
ac33a389c0 | ||
|
|
db7b2f7aaa | ||
|
|
dd8605917b | ||
|
|
81d23a1865 | ||
|
|
a8432880f8 | ||
|
|
c4fb6fbda2 | ||
|
|
fb44edc22f | ||
|
|
c9df743dab | ||
|
|
ce19419d72 | ||
|
|
0582a9c75b | ||
|
|
d60b548d61 | ||
|
|
7610b34426 | ||
|
|
d574d37c4b | ||
|
|
d8961cbd4a | ||
|
|
2c5b3a5c20 | ||
|
|
27d75f93e2 | ||
|
|
b0cdf4eb0d | ||
|
|
bcdc4fde10 | ||
|
|
6a3ed38140 | ||
|
|
3adf9e250f | ||
|
|
f7e0d6313b | ||
|
|
2d552800f2 | ||
|
|
44afb4aa5f | ||
|
|
7aeaf2502a | ||
|
|
9320c2e484 | ||
|
|
959a277dc5 | ||
|
|
d5b4da7331 | ||
|
|
9cb12dfa88 | ||
|
|
b08c019003 | ||
|
|
0f2a4d202e | ||
|
|
02075f73e9 | ||
|
|
2bab0f31d7 | ||
|
|
81972f6ffc | ||
|
|
779754dcf6 | ||
|
|
f9862e054c | ||
|
|
6a4919eeb9 | ||
|
|
af5492e773 | ||
|
|
5fbbff9e5e | ||
|
|
7127178acc | ||
|
|
beab17f765 | ||
|
|
d783ddaf03 | ||
|
|
5bc37e39d5 | ||
|
|
c341234c0b | ||
|
|
3beb460a97 | ||
|
|
445b389b16 | ||
|
|
6ec7971f7a | ||
|
|
4df66568cf | ||
|
|
cf372f41bf | ||
|
|
671d9af456 | ||
|
|
eeb467bdc2 | ||
|
|
b1909e940e | ||
|
|
36093e86e0 | ||
|
|
587f4d45de | ||
|
|
a28bf266f9 | ||
|
|
388cd7dde4 | ||
|
|
275c498dc9 | ||
|
|
d3fb6bfd35 | ||
|
|
bdbee78517 | ||
|
|
baa8d9d99c | ||
|
|
b8d7a8c546 | ||
|
|
6b065f5609 | ||
|
|
08eaa60b59 | ||
|
|
e517167825 | ||
|
|
53659f1ede | ||
|
|
35f6be97df | ||
|
|
3bb1a67d80 | ||
|
|
d2d35d2dcc | ||
|
|
adda401a8c | ||
|
|
b7928f465e | ||
|
|
d4f664b73b | ||
|
|
cbf0b7ca7b | ||
|
|
562820449e | ||
|
|
380f8ad03f | ||
|
|
8231c6c4a3 | ||
|
|
6fdb262dca | ||
|
|
62fd84dfd8 | ||
|
|
0430794952 | ||
|
|
4cb4e44234 | ||
|
|
61687992f4 | ||
|
|
857d2bbc8e |
@@ -8,7 +8,7 @@
|
||||
script_dir=$(dirname "$(readlink -f "$0")")
|
||||
parent_dir=$(realpath "${script_dir}/../..")
|
||||
cidir="${parent_dir}/ci"
|
||||
source "${cidir}/lib.sh"
|
||||
source "${cidir}/../tests/common.bash"
|
||||
|
||||
cargo_deny_file="${script_dir}/action.yaml"
|
||||
|
||||
|
||||
56
.github/workflows/basic-ci-amd64.yaml
vendored
56
.github/workflows/basic-ci-amd64.yaml
vendored
@@ -23,7 +23,7 @@ jobs:
|
||||
matrix:
|
||||
containerd_version: ['lts', 'active']
|
||||
vmm: ['clh', 'dragonball', 'qemu', 'stratovirt', 'cloud-hypervisor', 'qemu-runtime-rs']
|
||||
runs-on: garm-ubuntu-2204-smaller
|
||||
runs-on: ubuntu-22.04
|
||||
env:
|
||||
CONTAINERD_VERSION: ${{ matrix.containerd_version }}
|
||||
GOPATH: ${{ github.workspace }}
|
||||
@@ -62,7 +62,7 @@ jobs:
|
||||
matrix:
|
||||
containerd_version: ['lts', 'active']
|
||||
vmm: ['clh', 'cloud-hypervisor', 'dragonball', 'qemu', 'stratovirt']
|
||||
runs-on: garm-ubuntu-2204-smaller
|
||||
runs-on: ubuntu-22.04
|
||||
env:
|
||||
CONTAINERD_VERSION: ${{ matrix.containerd_version }}
|
||||
GOPATH: ${{ github.workspace }}
|
||||
@@ -104,7 +104,7 @@ jobs:
|
||||
matrix:
|
||||
containerd_version: ['lts', 'active']
|
||||
vmm: ['clh', 'qemu', 'dragonball', 'stratovirt']
|
||||
runs-on: garm-ubuntu-2204-smaller
|
||||
runs-on: ubuntu-22.04
|
||||
env:
|
||||
CONTAINERD_VERSION: ${{ matrix.containerd_version }}
|
||||
GOPATH: ${{ github.workspace }}
|
||||
@@ -138,7 +138,7 @@ jobs:
|
||||
run: bash tests/integration/nydus/gha-run.sh run
|
||||
|
||||
run-runk:
|
||||
runs-on: garm-ubuntu-2204-smaller
|
||||
runs-on: ubuntu-22.04
|
||||
env:
|
||||
CONTAINERD_VERSION: lts
|
||||
steps:
|
||||
@@ -168,6 +168,37 @@ jobs:
|
||||
- name: Run runk tests
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/runk/gha-run.sh run
|
||||
run-stdio:
|
||||
runs-on: garm-ubuntu-2204-smaller
|
||||
env:
|
||||
CONTAINERD_VERSION: lts
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: Install dependencies
|
||||
run: bash tests/integration/stdio/gha-run.sh install-dependencies
|
||||
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: kata-static-tarball-amd64${{ inputs.tarball-suffix }}
|
||||
path: kata-artifacts
|
||||
|
||||
- name: Install kata
|
||||
run: bash tests/integration/stdio/gha-run.sh install-kata kata-artifacts
|
||||
|
||||
- name: Run stdio tests
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/stdio/gha-run.sh
|
||||
|
||||
run-tracing:
|
||||
strategy:
|
||||
@@ -177,6 +208,7 @@ jobs:
|
||||
- clh # cloud-hypervisor
|
||||
- qemu
|
||||
# TODO: enable me when https://github.com/kata-containers/kata-containers/issues/9763 is fixed
|
||||
# TODO: Transition to free runner (see #9940).
|
||||
if: false
|
||||
runs-on: garm-ubuntu-2204-smaller
|
||||
env:
|
||||
@@ -216,9 +248,10 @@ jobs:
|
||||
vmm:
|
||||
- clh
|
||||
- qemu
|
||||
exclude:
|
||||
# TODO: enable with clh when https://github.com/kata-containers/kata-containers/issues/9764 is fixed
|
||||
- vmm: clh
|
||||
# TODO: enable with clh when https://github.com/kata-containers/kata-containers/issues/9764 is fixed
|
||||
# TODO: enable with qemu when https://github.com/kata-containers/kata-containers/issues/9851 is fixed
|
||||
# TODO: Transition to free runner (see #9940).
|
||||
if: false
|
||||
runs-on: garm-ubuntu-2304
|
||||
env:
|
||||
GOPATH: ${{ github.workspace }}
|
||||
@@ -258,7 +291,7 @@ jobs:
|
||||
vmm:
|
||||
- clh
|
||||
- qemu
|
||||
runs-on: garm-ubuntu-2304-smaller
|
||||
runs-on: ubuntu-22.04
|
||||
env:
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
steps:
|
||||
@@ -301,7 +334,10 @@ jobs:
|
||||
- dragonball
|
||||
- qemu
|
||||
- cloud-hypervisor
|
||||
runs-on: garm-ubuntu-2304-smaller
|
||||
# TODO: enable with clh when https://github.com/kata-containers/kata-containers/issues/9852 is fixed
|
||||
exclude:
|
||||
- vmm: clh
|
||||
runs-on: ubuntu-22.04
|
||||
env:
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
steps:
|
||||
@@ -333,7 +369,9 @@ jobs:
|
||||
run: bash tests/integration/nerdctl/gha-run.sh run
|
||||
|
||||
- name: Collect artifacts ${{ matrix.vmm }}
|
||||
if: always()
|
||||
run: bash tests/integration/nerdctl/gha-run.sh collect-artifacts
|
||||
continue-on-error: true
|
||||
|
||||
- name: Archive artifacts ${{ matrix.vmm }}
|
||||
uses: actions/upload-artifact@v4
|
||||
|
||||
@@ -40,10 +40,6 @@ jobs:
|
||||
- shim-v2
|
||||
- virtiofsd
|
||||
steps:
|
||||
- name: Adjust a permission for repo
|
||||
run: |
|
||||
sudo chown -R $USER:$USER $GITHUB_WORKSPACE
|
||||
|
||||
- name: Login to Kata Containers quay.io
|
||||
if: ${{ inputs.push-to-registry == 'yes' }}
|
||||
uses: docker/login-action@v3
|
||||
@@ -68,7 +64,7 @@ jobs:
|
||||
make "${KATA_ASSET}-tarball"
|
||||
build_dir=$(readlink -f build)
|
||||
# store-artifact does not work with symlink
|
||||
sudo cp -r "${build_dir}" "kata-build"
|
||||
mkdir -p kata-build && cp "${build_dir}"/kata-static-${KATA_ASSET}*.tar.* kata-build/.
|
||||
env:
|
||||
KATA_ASSET: ${{ matrix.asset }}
|
||||
TAR_OUTPUT: ${{ matrix.asset }}.tar.gz
|
||||
|
||||
@@ -36,16 +36,11 @@ jobs:
|
||||
stage:
|
||||
- ${{ inputs.stage }}
|
||||
steps:
|
||||
- name: Adjust a permission for repo
|
||||
run: |
|
||||
sudo chown -R $USER:$USER $GITHUB_WORKSPACE
|
||||
|
||||
- name: Prepare the self-hosted runner
|
||||
run: |
|
||||
${HOME}/scripts/prepare_runner.sh
|
||||
sudo rm -rf $GITHUB_WORKSPACE/*
|
||||
|
||||
|
||||
- name: Login to Kata Containers quay.io
|
||||
if: ${{ inputs.push-to-registry == 'yes' }}
|
||||
uses: docker/login-action@v3
|
||||
@@ -70,8 +65,7 @@ jobs:
|
||||
make "${KATA_ASSET}-tarball"
|
||||
build_dir=$(readlink -f build)
|
||||
# store-artifact does not work with symlink
|
||||
sudo cp -r "${build_dir}" "kata-build"
|
||||
sudo chown -R $(id -u):$(id -g) "kata-build"
|
||||
mkdir -p kata-build && cp "${build_dir}"/kata-static-${KATA_ASSET}*.tar.* kata-build/.
|
||||
env:
|
||||
KATA_ASSET: ${{ matrix.asset }}
|
||||
TAR_OUTPUT: ${{ matrix.asset }}.tar.gz
|
||||
|
||||
@@ -67,8 +67,7 @@ jobs:
|
||||
make "${KATA_ASSET}-tarball"
|
||||
build_dir=$(readlink -f build)
|
||||
# store-artifact does not work with symlink
|
||||
sudo cp -r "${build_dir}" "kata-build"
|
||||
sudo chown -R $(id -u):$(id -g) "kata-build"
|
||||
mkdir -p kata-build && cp "${build_dir}"/kata-static-${KATA_ASSET}*.tar.* kata-build/.
|
||||
env:
|
||||
KATA_ASSET: ${{ matrix.asset }}
|
||||
TAR_OUTPUT: ${{ matrix.asset }}.tar.gz
|
||||
|
||||
11
.github/workflows/ci.yaml
vendored
11
.github/workflows/ci.yaml
vendored
@@ -43,7 +43,7 @@ jobs:
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
secrets: inherit
|
||||
|
||||
|
||||
build-kata-static-tarball-ppc64le:
|
||||
uses: ./.github/workflows/build-kata-static-tarball-ppc64le.yaml
|
||||
with:
|
||||
@@ -62,7 +62,7 @@ jobs:
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
secrets: inherit
|
||||
|
||||
|
||||
publish-kata-deploy-payload-ppc64le:
|
||||
needs: build-kata-static-tarball-ppc64le
|
||||
uses: ./.github/workflows/publish-kata-deploy-payload-ppc64le.yaml
|
||||
@@ -113,6 +113,8 @@ jobs:
|
||||
file: tests/integration/kubernetes/runtimeclass_workloads/confidential/unencrypted/Dockerfile
|
||||
|
||||
run-kata-deploy-tests-on-aks:
|
||||
# TODO: Reenable when Azure CI budget is secured (see #9939).
|
||||
if: false
|
||||
needs: publish-kata-deploy-payload-amd64
|
||||
uses: ./.github/workflows/run-kata-deploy-tests-on-aks.yaml
|
||||
with:
|
||||
@@ -125,6 +127,8 @@ jobs:
|
||||
secrets: inherit
|
||||
|
||||
run-kata-deploy-tests-on-garm:
|
||||
# TODO: Transition to free runner (see #9940).
|
||||
if: false
|
||||
needs: publish-kata-deploy-payload-amd64
|
||||
uses: ./.github/workflows/run-kata-deploy-tests-on-garm.yaml
|
||||
with:
|
||||
@@ -203,7 +207,8 @@ jobs:
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
pr-number: ${{ inputs.pr-number }}
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
|
||||
secrets: inherit
|
||||
|
||||
run-k8s-tests-on-ppc64le:
|
||||
needs: publish-kata-deploy-payload-ppc64le
|
||||
uses: ./.github/workflows/run-k8s-tests-on-ppc64le.yaml
|
||||
|
||||
31
.github/workflows/cleanup-resources.yaml
vendored
Normal file
31
.github/workflows/cleanup-resources.yaml
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
name: Cleanup dangling Azure resources
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 */6 * * *"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
cleanup-resources:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Log into Azure
|
||||
env:
|
||||
AZ_APPID: ${{ secrets.AZ_APPID }}
|
||||
AZ_PASSWORD: ${{ secrets.AZ_PASSWORD }}
|
||||
AZ_TENANT_ID: ${{ secrets.AZ_TENANT_ID }}
|
||||
AZ_SUBSCRIPTION_ID: ${{ secrets.AZ_SUBSCRIPTION_ID }}
|
||||
run: bash tests/integration/kubernetes/gha-run.sh login-azure
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: |
|
||||
pip3 install --user --upgrade \
|
||||
azure-identity==1.16.0 \
|
||||
azure-mgmt-resource==23.0.1
|
||||
|
||||
- name: Cleanup resources
|
||||
env:
|
||||
AZ_SUBSCRIPTION_ID: ${{ secrets.AZ_SUBSCRIPTION_ID }}
|
||||
CLEANUP_AFTER_HOURS: 6 # Clean up resources created more than this many hours ago.
|
||||
run: python3 tests/cleanup_resources.py
|
||||
5
.github/workflows/docs-url-alive-check.yaml
vendored
5
.github/workflows/docs-url-alive-check.yaml
vendored
@@ -26,11 +26,6 @@ jobs:
|
||||
with:
|
||||
fetch-depth: 0
|
||||
path: ./src/github.com/${{ github.repository }}
|
||||
- name: Setup
|
||||
run: |
|
||||
cd ${GOPATH}/src/github.com/${{ github.repository }} && ./ci/setup.sh
|
||||
env:
|
||||
GOPATH: ${{ runner.workspace }}/kata-containers
|
||||
# docs url alive check
|
||||
- name: Docs URL Alive Check
|
||||
run: |
|
||||
|
||||
2
.github/workflows/run-k8s-tests-on-garm.yaml
vendored
2
.github/workflows/run-k8s-tests-on-garm.yaml
vendored
@@ -86,7 +86,9 @@ jobs:
|
||||
run: bash tests/integration/kubernetes/gha-run.sh run-tests
|
||||
|
||||
- name: Collect artifacts ${{ matrix.vmm }}
|
||||
if: always()
|
||||
run: bash tests/integration/kubernetes/gha-run.sh collect-artifacts
|
||||
continue-on-error: true
|
||||
|
||||
- name: Archive artifacts ${{ matrix.vmm }}
|
||||
uses: actions/upload-artifact@v4
|
||||
|
||||
7
.github/workflows/run-k8s-tests-on-zvsi.yaml
vendored
7
.github/workflows/run-k8s-tests-on-zvsi.yaml
vendored
@@ -57,9 +57,12 @@ jobs:
|
||||
SNAPSHOTTER: ${{ matrix.snapshotter }}
|
||||
USING_NFD: ${{ matrix.using-nfd }}
|
||||
TARGET_ARCH: "s390x"
|
||||
AUTHENTICATED_IMAGE_USER: ${{ secrets.AUTHENTICATED_IMAGE_USER }}
|
||||
AUTHENTICATED_IMAGE_PASSWORD: ${{ secrets.AUTHENTICATED_IMAGE_PASSWORD }}
|
||||
steps:
|
||||
- name: Take a pre-action for self-hosted runner
|
||||
run: ${HOME}/script/pre_action.sh ubuntu-2204
|
||||
run: |
|
||||
"${HOME}/script/pre_action.sh" ubuntu-2204
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
@@ -90,4 +93,4 @@ jobs:
|
||||
if: always()
|
||||
run: |
|
||||
bash tests/integration/kubernetes/gha-run.sh cleanup-zvsi || true
|
||||
${HOME}/script/post_action.sh ubuntu-2204
|
||||
"${HOME}/script/post_action.sh" ubuntu-2204
|
||||
|
||||
30
.github/workflows/run-kata-coco-tests.yaml
vendored
30
.github/workflows/run-kata-coco-tests.yaml
vendored
@@ -40,13 +40,15 @@ jobs:
|
||||
DOCKER_TAG: ${{ inputs.tag }}
|
||||
PR_NUMBER: ${{ inputs.pr-number }}
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
KUBERNETES: "k3s"
|
||||
KUBERNETES: "vanilla"
|
||||
USING_NFD: "true"
|
||||
KBS: "true"
|
||||
K8S_TEST_HOST_TYPE: "baremetal"
|
||||
KBS_INGRESS: "nodeport"
|
||||
SNAPSHOTTER: ${{ matrix.snapshotter }}
|
||||
PULL_TYPE: ${{ matrix.pull-type }}
|
||||
AUTHENTICATED_IMAGE_USER: ${{ secrets.AUTHENTICATED_IMAGE_USER }}
|
||||
AUTHENTICATED_IMAGE_PASSWORD: ${{ secrets.AUTHENTICATED_IMAGE_PASSWORD }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
@@ -80,7 +82,7 @@ jobs:
|
||||
run: bash tests/integration/kubernetes/gha-run.sh install-kbs-client
|
||||
|
||||
- name: Run tests
|
||||
timeout-minutes: 30
|
||||
timeout-minutes: 50
|
||||
run: bash tests/integration/kubernetes/gha-run.sh run-tests
|
||||
|
||||
- name: Delete kata-deploy
|
||||
@@ -118,6 +120,8 @@ jobs:
|
||||
K8S_TEST_HOST_TYPE: "baremetal"
|
||||
SNAPSHOTTER: ${{ matrix.snapshotter }}
|
||||
PULL_TYPE: ${{ matrix.pull-type }}
|
||||
AUTHENTICATED_IMAGE_USER: ${{ secrets.AUTHENTICATED_IMAGE_USER }}
|
||||
AUTHENTICATED_IMAGE_PASSWORD: ${{ secrets.AUTHENTICATED_IMAGE_PASSWORD }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
@@ -170,9 +174,13 @@ jobs:
|
||||
KUBECONFIG: /home/kata/.kube/config
|
||||
KUBERNETES: "vanilla"
|
||||
USING_NFD: "false"
|
||||
KBS: "true"
|
||||
KBS_INGRESS: "nodeport"
|
||||
K8S_TEST_HOST_TYPE: "baremetal"
|
||||
SNAPSHOTTER: ${{ matrix.snapshotter }}
|
||||
PULL_TYPE: ${{ matrix.pull-type }}
|
||||
AUTHENTICATED_IMAGE_USER: ${{ secrets.AUTHENTICATED_IMAGE_USER }}
|
||||
AUTHENTICATED_IMAGE_PASSWORD: ${{ secrets.AUTHENTICATED_IMAGE_PASSWORD }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
@@ -193,6 +201,18 @@ jobs:
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/kubernetes/gha-run.sh deploy-kata-snp
|
||||
|
||||
- name: Uninstall previous `kbs-client`
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/kubernetes/gha-run.sh uninstall-kbs-client
|
||||
|
||||
- name: Deploy CoCo KBS
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/kubernetes/gha-run.sh deploy-coco-kbs
|
||||
|
||||
- name: Install `kbs-client`
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/kubernetes/gha-run.sh install-kbs-client
|
||||
|
||||
- name: Run tests
|
||||
timeout-minutes: 30
|
||||
run: bash tests/integration/kubernetes/gha-run.sh run-tests
|
||||
@@ -205,6 +225,10 @@ jobs:
|
||||
if: always()
|
||||
run: bash tests/integration/kubernetes/gha-run.sh cleanup-snapshotter
|
||||
|
||||
- name: Delete CoCo KBS
|
||||
if: always()
|
||||
run: bash tests/integration/kubernetes/gha-run.sh delete-coco-kbs
|
||||
|
||||
# Generate jobs for testing CoCo on non-TEE environments
|
||||
run-k8s-tests-coco-nontee:
|
||||
strategy:
|
||||
@@ -230,6 +254,8 @@ jobs:
|
||||
KBS_INGRESS: "aks"
|
||||
KUBERNETES: "vanilla"
|
||||
PULL_TYPE: ${{ matrix.pull-type }}
|
||||
AUTHENTICATED_IMAGE_USER: ${{ secrets.AUTHENTICATED_IMAGE_USER }}
|
||||
AUTHENTICATED_IMAGE_PASSWORD: ${{ secrets.AUTHENTICATED_IMAGE_PASSWORD }}
|
||||
SNAPSHOTTER: ${{ matrix.snapshotter }}
|
||||
USING_NFD: "false"
|
||||
steps:
|
||||
|
||||
@@ -34,6 +34,10 @@ jobs:
|
||||
- k0s
|
||||
- k3s
|
||||
- rke2
|
||||
# TODO: There are a couple of vmm/k8s combination failing (https://github.com/kata-containers/kata-containers/issues/9854)
|
||||
# and we will put the entire kata-deploy-tests on GARM on maintenance.
|
||||
# TODO: Transition to free runner (see #9940).
|
||||
if: false
|
||||
runs-on: garm-ubuntu-2004-smaller
|
||||
env:
|
||||
DOCKER_REGISTRY: ${{ inputs.registry }}
|
||||
|
||||
11
.github/workflows/run-kata-monitor-tests.yaml
vendored
11
.github/workflows/run-kata-monitor-tests.yaml
vendored
@@ -15,6 +15,8 @@ on:
|
||||
|
||||
jobs:
|
||||
run-monitor:
|
||||
# TODO: Transition to free runner (see #9940).
|
||||
if: false
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
@@ -23,9 +25,10 @@ jobs:
|
||||
container_engine:
|
||||
- crio
|
||||
- containerd
|
||||
include:
|
||||
- container_engine: containerd
|
||||
containerd_version: lts
|
||||
# TODO: enable when https://github.com/kata-containers/kata-containers/issues/9853 is fixed
|
||||
#include:
|
||||
# - container_engine: containerd
|
||||
# containerd_version: lts
|
||||
exclude:
|
||||
# TODO: enable with containerd when https://github.com/kata-containers/kata-containers/issues/9761 is fixed
|
||||
- container_engine: containerd
|
||||
@@ -33,7 +36,7 @@ jobs:
|
||||
runs-on: garm-ubuntu-2204-smaller
|
||||
env:
|
||||
CONTAINER_ENGINE: ${{ matrix.container_engine }}
|
||||
CONTAINERD_VERSION: ${{ matrix.containerd_version }}
|
||||
#CONTAINERD_VERSION: ${{ matrix.containerd_version }}
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
2
.github/workflows/run-runk-tests.yaml
vendored
2
.github/workflows/run-runk-tests.yaml
vendored
@@ -15,6 +15,8 @@ on:
|
||||
|
||||
jobs:
|
||||
run-runk:
|
||||
# TODO: Transition to free runner (see #9940).
|
||||
if: false
|
||||
runs-on: garm-ubuntu-2204-smaller
|
||||
env:
|
||||
CONTAINERD_VERSION: lts
|
||||
|
||||
2
.github/workflows/static-checks.yaml
vendored
2
.github/workflows/static-checks.yaml
vendored
@@ -40,6 +40,8 @@ jobs:
|
||||
instance: ubuntu-20.04
|
||||
|
||||
build-checks-depending-on-kvm:
|
||||
# TODO: Transition to free runner (see #9940).
|
||||
if: false
|
||||
runs-on: garm-ubuntu-2004-smaller
|
||||
strategy:
|
||||
fail-fast: false
|
||||
|
||||
@@ -7,6 +7,6 @@
|
||||
set -e
|
||||
|
||||
cidir=$(dirname "$0")
|
||||
source "${cidir}/lib.sh"
|
||||
source "${cidir}/../tests/common.bash"
|
||||
|
||||
run_docs_url_alive_check
|
||||
|
||||
@@ -1,22 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Copyright (c) 2019 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -e
|
||||
|
||||
cidir=$(dirname "$0")
|
||||
source "${cidir}/lib.sh"
|
||||
|
||||
clone_tests_repo
|
||||
|
||||
new_goroot=/usr/local/go
|
||||
|
||||
pushd "${tests_repo_dir}"
|
||||
# Force overwrite the current version of golang
|
||||
[ -z "${GOROOT}" ] || rm -rf "${GOROOT}"
|
||||
.ci/install_go.sh -p -f -d "$(dirname ${new_goroot})"
|
||||
[ -z "${GOROOT}" ] || sudo ln -sf "${new_goroot}" "${GOROOT}"
|
||||
go version
|
||||
popd
|
||||
@@ -1,16 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# Copyright (c) 2019 Ant Financial
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
set -e
|
||||
|
||||
cidir=$(dirname "$0")
|
||||
source "${cidir}/lib.sh"
|
||||
|
||||
clone_tests_repo
|
||||
|
||||
pushd ${tests_repo_dir}
|
||||
.ci/install_rust.sh ${1:-}
|
||||
popd
|
||||
@@ -1,19 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Copyright (c) 2018 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -e
|
||||
|
||||
cidir=$(dirname "$0")
|
||||
vcdir="${cidir}/../src/runtime/virtcontainers/"
|
||||
source "${cidir}/lib.sh"
|
||||
export CI_JOB="${CI_JOB:-default}"
|
||||
|
||||
clone_tests_repo
|
||||
|
||||
if [ "${CI_JOB}" != "PODMAN" ]; then
|
||||
echo "Install virtcontainers"
|
||||
make -C "${vcdir}" && chronic sudo make -C "${vcdir}" install
|
||||
fi
|
||||
81
ci/lib.sh
81
ci/lib.sh
@@ -1,81 +0,0 @@
|
||||
#
|
||||
# Copyright (c) 2018 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -o nounset
|
||||
|
||||
GOPATH=${GOPATH:-${HOME}/go}
|
||||
export kata_repo="github.com/kata-containers/kata-containers"
|
||||
export kata_repo_dir="$GOPATH/src/$kata_repo"
|
||||
export tests_repo="${tests_repo:-github.com/kata-containers/tests}"
|
||||
export tests_repo_dir="$GOPATH/src/$tests_repo"
|
||||
export branch="${target_branch:-main}"
|
||||
|
||||
# Clones the tests repository and checkout to the branch pointed out by
|
||||
# the global $branch variable.
|
||||
# If the clone exists and `CI` is exported then it does nothing. Otherwise
|
||||
# it will clone the repository or `git pull` the latest code.
|
||||
#
|
||||
clone_tests_repo()
|
||||
{
|
||||
if [ -d "$tests_repo_dir" ]; then
|
||||
[ -n "${CI:-}" ] && return
|
||||
|
||||
pushd "${tests_repo_dir}"
|
||||
git checkout "${branch}"
|
||||
git pull
|
||||
popd
|
||||
else
|
||||
git clone -q "https://${tests_repo}" "$tests_repo_dir"
|
||||
pushd "${tests_repo_dir}"
|
||||
git checkout "${branch}"
|
||||
popd
|
||||
fi
|
||||
}
|
||||
|
||||
run_static_checks()
|
||||
{
|
||||
# Make sure we have the targeting branch
|
||||
git remote set-branches --add origin "${branch}"
|
||||
git fetch -a
|
||||
bash "$kata_repo_dir/tests/static-checks.sh" "$@"
|
||||
}
|
||||
|
||||
run_docs_url_alive_check()
|
||||
{
|
||||
# Make sure we have the targeting branch
|
||||
git remote set-branches --add origin "${branch}"
|
||||
git fetch -a
|
||||
bash "$kata_repo_dir/tests/static-checks.sh" --docs --all "$kata_repo"
|
||||
}
|
||||
|
||||
run_get_pr_changed_file_details()
|
||||
{
|
||||
# Make sure we have the targeting branch
|
||||
git remote set-branches --add origin "${branch}"
|
||||
git fetch -a
|
||||
source "$kata_repo_dir/tests/common.bash"
|
||||
get_pr_changed_file_details
|
||||
}
|
||||
|
||||
# Check if the 1st argument version is greater than and equal to 2nd one
|
||||
# Version format: [0-9]+ separated by period (e.g. 2.4.6, 1.11.3 and etc.)
|
||||
#
|
||||
# Parameters:
|
||||
# $1 - a version to be tested
|
||||
# $2 - a target version
|
||||
#
|
||||
# Return:
|
||||
# 0 if $1 is greater than and equal to $2
|
||||
# 1 otherwise
|
||||
version_greater_than_equal() {
|
||||
local current_version=$1
|
||||
local target_version=$2
|
||||
smaller_version=$(echo -e "$current_version\n$target_version" | sort -V | head -1)
|
||||
if [ "${smaller_version}" = "${target_version}" ]; then
|
||||
return 0
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
@@ -15,7 +15,9 @@ pod='http-server'
|
||||
# Create a pod.
|
||||
#
|
||||
info "Creating the ${pod} pod"
|
||||
oc apply -f ${script_dir}/smoke/${pod}.yaml || \
|
||||
[ -z "$KATA_RUNTIME" ] && die "Please set the KATA_RUNTIME first"
|
||||
envsubst < "${script_dir}/smoke/${pod}.yaml.in" | \
|
||||
oc apply -f - || \
|
||||
die "failed to create ${pod} pod"
|
||||
|
||||
# Check it eventually goes to 'running'
|
||||
|
||||
@@ -27,4 +27,4 @@ spec:
|
||||
runAsUser: 1000
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
runtimeClassName: kata-qemu
|
||||
runtimeClassName: ${KATA_RUNTIME}
|
||||
@@ -5,6 +5,9 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
# The kata shim to be used
|
||||
export KATA_RUNTIME=${KATA_RUNTIME:-kata-qemu}
|
||||
|
||||
script_dir=$(dirname $0)
|
||||
source ${script_dir}/lib.sh
|
||||
|
||||
|
||||
21
ci/run.sh
21
ci/run.sh
@@ -1,21 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Copyright (c) 2019 Ant Financial
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
set -e
|
||||
cidir=$(dirname "$0")
|
||||
source "${cidir}/lib.sh"
|
||||
export CI_JOB="${CI_JOB:-}"
|
||||
|
||||
clone_tests_repo
|
||||
|
||||
pushd ${tests_repo_dir}
|
||||
.ci/run.sh
|
||||
# temporary fix, see https://github.com/kata-containers/tests/issues/3878
|
||||
if [ "$(uname -m)" != "s390x" ] && [ "$CI_JOB" == "CRI_CONTAINERD_K8S_MINIMAL" ]; then
|
||||
tracing/test-agent-shutdown.sh
|
||||
fi
|
||||
popd
|
||||
16
ci/setup.sh
16
ci/setup.sh
@@ -1,16 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Copyright (c) 2018 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -e
|
||||
|
||||
cidir=$(dirname "$0")
|
||||
source "${cidir}/lib.sh"
|
||||
|
||||
clone_tests_repo
|
||||
|
||||
pushd "${tests_repo_dir}"
|
||||
.ci/setup.sh
|
||||
popd
|
||||
@@ -7,6 +7,6 @@
|
||||
set -e
|
||||
|
||||
cidir=$(dirname "$0")
|
||||
source "${cidir}/lib.sh"
|
||||
source "${cidir}/../tests/common.bash"
|
||||
|
||||
run_static_checks "${@:-github.com/kata-containers/kata-containers}"
|
||||
|
||||
@@ -32,7 +32,7 @@ For virtio-fs, the [runtime](README.md#runtime) starts one `virtiofsd` daemon
|
||||
## Devicemapper
|
||||
|
||||
The
|
||||
[devicemapper `snapshotter`](https://github.com/containerd/containerd/tree/main/snapshots/devmapper)
|
||||
[devicemapper `snapshotter`](https://github.com/containerd/containerd/blob/main/docs/snapshotters/devmapper.md)
|
||||
is a special case. The `snapshotter` uses dedicated block devices
|
||||
rather than formatted filesystems, and operates at the block level
|
||||
rather than the file level. This knowledge is used to directly use the
|
||||
|
||||
@@ -40,7 +40,7 @@ use `RuntimeClass` instead of the deprecated annotations.
|
||||
### Containerd Runtime V2 API: Shim V2 API
|
||||
|
||||
The [`containerd-shim-kata-v2` (short as `shimv2` in this documentation)](../../src/runtime/cmd/containerd-shim-kata-v2/)
|
||||
implements the [Containerd Runtime V2 (Shim API)](https://github.com/containerd/containerd/tree/main/runtime/v2) for Kata.
|
||||
implements the [Containerd Runtime V2 (Shim API)](https://github.com/containerd/containerd/tree/main/core/runtime/v2) for Kata.
|
||||
With `shimv2`, Kubernetes can launch Pod and OCI-compatible containers with one shim per Pod. Prior to `shimv2`, `2N+1`
|
||||
shims (i.e. a `containerd-shim` and a `kata-shim` for each container and the Pod sandbox itself) and no standalone `kata-proxy`
|
||||
process were used, even with VSOCK not available.
|
||||
@@ -62,7 +62,7 @@ Follow the instructions to [install Kata Containers](../install/README.md).
|
||||
> You do not need to install `cri` if you have containerd 1.1 or above. Just remove the `cri` plugin from the list of
|
||||
> `disabled_plugins` in the containerd configuration file (`/etc/containerd/config.toml`).
|
||||
|
||||
Follow the instructions from the [CRI installation guide](https://github.com/containerd/containerd/blob/main/docs/cri/installation.md).
|
||||
Follow the instructions from the [CRI installation guide](https://github.com/containerd/containerd/blob/main/docs/cri/crictl.md#install-crictl).
|
||||
|
||||
Then, check if `containerd` is now available:
|
||||
|
||||
@@ -132,9 +132,9 @@ The `RuntimeClass` is suggested.
|
||||
|
||||
The following configuration includes two runtime classes:
|
||||
- `plugins.cri.containerd.runtimes.runc`: the runc, and it is the default runtime.
|
||||
- `plugins.cri.containerd.runtimes.kata`: The function in containerd (reference [the document here](https://github.com/containerd/containerd/tree/main/runtime/v2#binary-naming))
|
||||
- `plugins.cri.containerd.runtimes.kata`: The function in containerd (reference [the document here](https://github.com/containerd/containerd/tree/main/core/runtime/v2))
|
||||
where the dot-connected string `io.containerd.kata.v2` is translated to `containerd-shim-kata-v2` (i.e. the
|
||||
binary name of the Kata implementation of [Containerd Runtime V2 (Shim API)](https://github.com/containerd/containerd/tree/main/runtime/v2)).
|
||||
binary name of the Kata implementation of [Containerd Runtime V2 (Shim API)](https://github.com/containerd/containerd/tree/main/core/runtime/v2)).
|
||||
|
||||
```toml
|
||||
[plugins.cri.containerd]
|
||||
|
||||
@@ -53,7 +53,14 @@ $ ./configure --enable-virtfs --target-list=x86_64-softmmu --enable-debug
|
||||
$ make -j "$(nproc)"
|
||||
$ popd
|
||||
```
|
||||
|
||||
- Create cert-chain for SNP attestation ( using [snphost](https://github.com/virtee/snphost/blob/main/docs/snphost.1.adoc) )
|
||||
```bash
|
||||
$ git clone https://github.com/virtee/snphost.git && cd snphost/
|
||||
$ cargo build
|
||||
$ mkdir /tmp/certs
|
||||
$ ./target/debug/snphost fetch vcek der /tmp/certs
|
||||
$ ./target/debug/snphost import /tmp/certs /opt/snp/cert_chain.cert
|
||||
```
|
||||
### Kata Containers Configuration for SNP
|
||||
|
||||
The configuration file located at `/etc/kata-containers/configuration.toml` must be adapted as follows to support SNP-VMs:
|
||||
@@ -100,6 +107,10 @@ sev_snp_guest = true
|
||||
- Configure an OVMF (add path)
|
||||
```toml
|
||||
firmware = "/path/to/kata-containers/tools/packaging/static-build/ovmf/opt/kata/share/ovmf/OVMF.fd"
|
||||
```
|
||||
- SNP attestation (add cert-chain to default path or add the path with cert-chain)
|
||||
```toml
|
||||
snp_certs_path = "/path/to/cert-chain"
|
||||
```
|
||||
|
||||
## Test Kata Containers with Containerd
|
||||
|
||||
@@ -202,11 +202,6 @@ attributes of each environment (local and CI):
|
||||
- The hardware architecture.
|
||||
- Number (and spec) of the CPUs.
|
||||
|
||||
## Gotchas (part 3)
|
||||
|
||||
If in doubt, look at the
|
||||
["test artifacts" attached to the failing CI test](http://jenkins.katacontainers.io).
|
||||
|
||||
## Before raising a PR
|
||||
|
||||
- Remember to check that the test runs locally:
|
||||
|
||||
@@ -279,8 +279,8 @@ $ export KERNEL_EXTRAVERSION=$(awk '/^EXTRAVERSION =/{print $NF}' $GOPATH/$LINUX
|
||||
$ export KERNEL_ROOTFS_DIR=${KERNEL_MAJOR_VERSION}.${KERNEL_PATHLEVEL}.${KERNEL_SUBLEVEL}${KERNEL_EXTRAVERSION}
|
||||
$ cd $QAT_SRC
|
||||
$ KERNEL_SOURCE_ROOT=$GOPATH/$LINUX_VER ./configure --enable-icp-sriov=guest
|
||||
$ sudo -E make all -j $($(nproc ${CI:+--ignore 1}))
|
||||
$ sudo -E make INSTALL_MOD_PATH=$ROOTFS_DIR qat-driver-install -j $($(nproc ${CI:+--ignore 1}))
|
||||
$ sudo -E make all -j $(nproc)
|
||||
$ sudo -E make INSTALL_MOD_PATH=$ROOTFS_DIR qat-driver-install -j $(nproc)
|
||||
```
|
||||
|
||||
The `usdm_drv` module also needs to be copied into the rootfs modules path and
|
||||
|
||||
3741
src/agent/Cargo.lock
generated
3741
src/agent/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -21,8 +21,8 @@ scopeguard = "1.0.0"
|
||||
thiserror = "1.0.26"
|
||||
regex = "1.10.4"
|
||||
serial_test = "0.5.1"
|
||||
oci-distribution = "0.10.0"
|
||||
url = "2.5.0"
|
||||
derivative = "2.2.0"
|
||||
kata-sys-util = { path = "../libs/kata-sys-util" }
|
||||
kata-types = { path = "../libs/kata-types" }
|
||||
safe-path = { path = "../libs/safe-path" }
|
||||
@@ -34,8 +34,8 @@ async-recursion = "0.3.2"
|
||||
futures = "0.3.30"
|
||||
|
||||
# Async runtime
|
||||
tokio = { version = "1.28.1", features = ["full"] }
|
||||
tokio-vsock = "0.3.1"
|
||||
tokio = { version = "1.38.0", features = ["full"] }
|
||||
tokio-vsock = "0.3.4"
|
||||
|
||||
netlink-sys = { version = "0.7.0", features = ["tokio_socket"] }
|
||||
rtnetlink = "0.8.0"
|
||||
@@ -57,12 +57,7 @@ cfg-if = "1.0.0"
|
||||
prometheus = { version = "0.13.0", features = ["process"] }
|
||||
procfs = "0.12.0"
|
||||
|
||||
# anyhow is currently locked at 1.0.58 because:
|
||||
# - Versions between 1.0.59 - 1.0.76 have not been tested yet using Kata CI.
|
||||
# However, those versions are passing "make test" for the Kata Agent.
|
||||
# - Versions 1.0.77 or newer fail during "make test" - see
|
||||
# https://github.com/kata-containers/kata-containers/issues/9538
|
||||
anyhow = "=1.0.58"
|
||||
anyhow = "1"
|
||||
|
||||
cgroups = { package = "cgroups-rs", version = "0.3.3" }
|
||||
|
||||
@@ -81,11 +76,13 @@ strum = "0.26.2"
|
||||
strum_macros = "0.26.2"
|
||||
|
||||
# Image pull/decrypt
|
||||
image-rs = { git = "https://github.com/confidential-containers/guest-components", rev = "ca6b438", default-features = true, optional = true }
|
||||
openssl = { version = "0.10.54", features = ["vendored"], optional = true }
|
||||
image-rs = { git = "https://github.com/confidential-containers/guest-components", rev = "2c5ac6b01aafcb0be3875f5743c77d654a548146", default-features = false, optional = true }
|
||||
|
||||
# Agent Policy
|
||||
regorus = { version = "0.1.4", default-features = false, features = ["arc", "regex"], optional = true }
|
||||
regorus = { version = "0.1.4", default-features = false, features = [
|
||||
"arc",
|
||||
"regex",
|
||||
], optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = "3.1.0"
|
||||
@@ -106,7 +103,7 @@ default-pull = ["guest-pull"]
|
||||
seccomp = ["rustjail/seccomp"]
|
||||
standard-oci-runtime = ["rustjail/standard-oci-runtime"]
|
||||
agent-policy = ["regorus"]
|
||||
guest-pull = ["image-rs", "openssl"]
|
||||
guest-pull = ["image-rs/kata-cc-rustls-tls"]
|
||||
|
||||
[[bin]]
|
||||
name = "kata-agent"
|
||||
|
||||
@@ -15,7 +15,7 @@ PROJECT_COMPONENT = kata-agent
|
||||
TARGET = $(PROJECT_COMPONENT)
|
||||
|
||||
VERSION_FILE := ./VERSION
|
||||
VERSION := $(shell grep -v ^\# $(VERSION_FILE))
|
||||
VERSION := $(shell grep -v ^\# $(VERSION_FILE) 2>/dev/null || echo "unknown")
|
||||
COMMIT_NO := $(shell git rev-parse HEAD 2>/dev/null || true)
|
||||
COMMIT := $(if $(shell git status --porcelain --untracked-files=no 2>/dev/null || true),${COMMIT_NO}-dirty,${COMMIT_NO})
|
||||
COMMIT_MSG = $(if $(COMMIT),$(COMMIT),unknown)
|
||||
@@ -159,7 +159,7 @@ vendor:
|
||||
|
||||
#TARGET test: run cargo tests
|
||||
test: $(GENERATED_FILES)
|
||||
@cargo test --all --target $(TRIPLE) $(EXTRA_RUSTFEATURES) -- --nocapture
|
||||
@RUST_LIB_BACKTRACE=0 cargo test --all --target $(TRIPLE) $(EXTRA_RUSTFEATURES) -- --nocapture
|
||||
|
||||
##TARGET check: run test
|
||||
check: $(GENERATED_FILES) standard_rust_check
|
||||
|
||||
@@ -125,10 +125,11 @@ The kata agent has the ability to configure agent options in guest kernel comman
|
||||
| `agent.debug_console` | Debug console flag | Allow to connect guest OS running inside hypervisor Connect using `kata-runtime exec <sandbox-id>` | boolean | `false` |
|
||||
| `agent.debug_console_vport` | Debug console port | Allow to specify the `vsock` port to connect the debugging console | integer | `0` |
|
||||
| `agent.devmode` | Developer mode | Allow the agent process to coredump | boolean | `false` |
|
||||
| `agent.hotplug_timeout` | Hotplug timeout | Allow to configure hotplug timeout(seconds) of block devices | integer | `3` |
|
||||
| `agent.guest_components_rest_api` | `api-server-rest` configuration | Select the features that the API Server Rest attestation component will run with. Valid values are `all`, `attestation`, `resource` | string | `resource` |
|
||||
| `agent.guest_components_procs` | guest-components processes | Attestation-related processes that should be spawned as children of the guest. Valid values are `none`, `attestation-agent`, `confidential-data-hub` (implies `attestation-agent`), `api-server-rest` (implies `attestation-agent` and `confidential-data-hub`) | string | `api-server-rest` |
|
||||
| `agent.hotplug_timeout` | Hotplug timeout | Allow to configure hotplug timeout(seconds) of block devices | integer | `3` |
|
||||
| `agent.https_proxy` | HTTPS proxy | Allow to configure `https_proxy` in the guest | string | `""` |
|
||||
| `agent.image_registry_auth` | Image registry credential URI | The URI to where image-rs can find the credentials for pulling images from private registries e.g. `file:///root/.docker/config.json` to read from a file in the guest image, or `kbs:///default/credentials/test` to get the file from the KBS| string | `""` |
|
||||
| `agent.log` | Log level | Allow the agent log level to be changed (produces more or less output) | string | `"info"` |
|
||||
| `agent.log_vport` | Log port | Allow to specify the `vsock` port to read logs | integer | `0` |
|
||||
| `agent.no_proxy` | NO proxy | Allow to configure `no_proxy` in the guest | string | `""` |
|
||||
|
||||
@@ -30,8 +30,8 @@ cgroups = { package = "cgroups-rs", version = "0.3.3" }
|
||||
rlimit = "0.5.3"
|
||||
cfg-if = "0.1.0"
|
||||
|
||||
tokio = { version = "1.28.1", features = ["sync", "io-util", "process", "time", "macros", "rt", "fs"] }
|
||||
tokio-vsock = "0.3.1"
|
||||
tokio = { version = "1.38.0", features = ["sync", "io-util", "process", "time", "macros", "rt", "fs"] }
|
||||
tokio-vsock = "0.3.4"
|
||||
futures = "0.3.17"
|
||||
async-trait = "0.1.31"
|
||||
inotify = "0.9.2"
|
||||
|
||||
@@ -200,15 +200,8 @@ impl Process {
|
||||
}
|
||||
|
||||
pub async fn close_stdin(&mut self) {
|
||||
// stdin will be closed automatically in passfd-io senario
|
||||
if self.proc_io.is_some() {
|
||||
return;
|
||||
}
|
||||
|
||||
close_process_stream!(self, term_master, TermMaster);
|
||||
close_process_stream!(self, parent_stdin, ParentStdin);
|
||||
|
||||
self.notify_term_close();
|
||||
}
|
||||
|
||||
pub fn cleanup_process_stream(&mut self) {
|
||||
|
||||
150
src/agent/src/cdh.rs
Normal file
150
src/agent/src/cdh.rs
Normal file
@@ -0,0 +1,150 @@
|
||||
// Copyright (c) 2023 Intel Corporation
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
// Confidential Data Hub client wrapper.
|
||||
// Confidential Data Hub is a service running inside guest to provide resource related APIs.
|
||||
// https://github.com/confidential-containers/guest-components/tree/main/confidential-data-hub
|
||||
|
||||
use anyhow::Result;
|
||||
use derivative::Derivative;
|
||||
use protocols::{
|
||||
sealed_secret, sealed_secret_ttrpc_async, sealed_secret_ttrpc_async::SealedSecretServiceClient,
|
||||
};
|
||||
|
||||
use crate::CDH_SOCKET_URI;
|
||||
|
||||
// Nanoseconds
|
||||
const CDH_UNSEAL_TIMEOUT: i64 = 50 * 1000 * 1000 * 1000;
|
||||
const SEALED_SECRET_PREFIX: &str = "sealed.";
|
||||
|
||||
#[derive(Derivative)]
|
||||
#[derivative(Clone, Debug)]
|
||||
pub struct CDHClient {
|
||||
#[derivative(Debug = "ignore")]
|
||||
sealed_secret_client: SealedSecretServiceClient,
|
||||
}
|
||||
|
||||
impl CDHClient {
|
||||
pub fn new() -> Result<Self> {
|
||||
let client = ttrpc::asynchronous::Client::connect(CDH_SOCKET_URI)?;
|
||||
let sealed_secret_client =
|
||||
sealed_secret_ttrpc_async::SealedSecretServiceClient::new(client);
|
||||
|
||||
Ok(CDHClient {
|
||||
sealed_secret_client,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn unseal_secret_async(&self, sealed_secret: &str) -> Result<Vec<u8>> {
|
||||
let mut input = sealed_secret::UnsealSecretInput::new();
|
||||
input.set_secret(sealed_secret.into());
|
||||
|
||||
let unsealed_secret = self
|
||||
.sealed_secret_client
|
||||
.unseal_secret(ttrpc::context::with_timeout(CDH_UNSEAL_TIMEOUT), &input)
|
||||
.await?;
|
||||
Ok(unsealed_secret.plaintext)
|
||||
}
|
||||
|
||||
pub async fn unseal_env(&self, env: &str) -> Result<String> {
|
||||
if let Some((key, value)) = env.split_once('=') {
|
||||
if value.starts_with(SEALED_SECRET_PREFIX) {
|
||||
let unsealed_value = self.unseal_secret_async(value).await?;
|
||||
let unsealed_env = format!("{}={}", key, std::str::from_utf8(&unsealed_value)?);
|
||||
|
||||
return Ok(unsealed_env);
|
||||
}
|
||||
}
|
||||
|
||||
Ok((*env.to_owned()).to_string())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[cfg(feature = "sealed-secret")]
|
||||
mod tests {
|
||||
use crate::cdh::CDHClient;
|
||||
use crate::cdh::CDH_ADDR;
|
||||
use anyhow::anyhow;
|
||||
use async_trait::async_trait;
|
||||
use protocols::{sealed_secret, sealed_secret_ttrpc_async};
|
||||
use std::sync::Arc;
|
||||
use test_utils::skip_if_not_root;
|
||||
use tokio::signal::unix::{signal, SignalKind};
|
||||
|
||||
struct TestService;
|
||||
|
||||
#[async_trait]
|
||||
impl sealed_secret_ttrpc_async::SealedSecretService for TestService {
|
||||
async fn unseal_secret(
|
||||
&self,
|
||||
_ctx: &::ttrpc::asynchronous::TtrpcContext,
|
||||
_req: sealed_secret::UnsealSecretInput,
|
||||
) -> ttrpc::error::Result<sealed_secret::UnsealSecretOutput> {
|
||||
let mut output = sealed_secret::UnsealSecretOutput::new();
|
||||
output.set_plaintext("unsealed".into());
|
||||
Ok(output)
|
||||
}
|
||||
}
|
||||
|
||||
fn remove_if_sock_exist(sock_addr: &str) -> std::io::Result<()> {
|
||||
let path = sock_addr
|
||||
.strip_prefix("unix://")
|
||||
.expect("socket address does not have the expected format.");
|
||||
|
||||
if std::path::Path::new(path).exists() {
|
||||
std::fs::remove_file(path)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn start_ttrpc_server() {
|
||||
tokio::spawn(async move {
|
||||
let ss = Box::new(TestService {})
|
||||
as Box<dyn sealed_secret_ttrpc_async::SealedSecretService + Send + Sync>;
|
||||
let ss = Arc::new(ss);
|
||||
let ss_service = sealed_secret_ttrpc_async::create_sealed_secret_service(ss);
|
||||
|
||||
remove_if_sock_exist(CDH_ADDR).unwrap();
|
||||
|
||||
let mut server = ttrpc::asynchronous::Server::new()
|
||||
.bind(CDH_ADDR)
|
||||
.unwrap()
|
||||
.register_service(ss_service);
|
||||
|
||||
server.start().await.unwrap();
|
||||
|
||||
let mut interrupt = signal(SignalKind::interrupt()).unwrap();
|
||||
tokio::select! {
|
||||
_ = interrupt.recv() => {
|
||||
server.shutdown().await.unwrap();
|
||||
}
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_unseal_env() {
|
||||
skip_if_not_root!();
|
||||
|
||||
let rt = tokio::runtime::Runtime::new().unwrap();
|
||||
let _guard = rt.enter();
|
||||
start_ttrpc_server();
|
||||
std::thread::sleep(std::time::Duration::from_secs(2));
|
||||
|
||||
let cc = Some(CDHClient::new().unwrap());
|
||||
let cdh_client = cc.as_ref().ok_or(anyhow!("get cdh_client failed")).unwrap();
|
||||
let sealed_env = String::from("key=sealed.testdata");
|
||||
let unsealed_env = cdh_client.unseal_env(&sealed_env).await.unwrap();
|
||||
assert_eq!(unsealed_env, String::from("key=unsealed"));
|
||||
let normal_env = String::from("key=testdata");
|
||||
let unchanged_env = cdh_client.unseal_env(&normal_env).await.unwrap();
|
||||
assert_eq!(unchanged_env, String::from("key=testdata"));
|
||||
|
||||
rt.shutdown_background();
|
||||
std::thread::sleep(std::time::Duration::from_secs(2));
|
||||
}
|
||||
}
|
||||
@@ -29,6 +29,8 @@ const UNIFIED_CGROUP_HIERARCHY_OPTION: &str = "systemd.unified_cgroup_hierarchy"
|
||||
const CONFIG_FILE: &str = "agent.config_file";
|
||||
const GUEST_COMPONENTS_REST_API_OPTION: &str = "agent.guest_components_rest_api";
|
||||
const GUEST_COMPONENTS_PROCS_OPTION: &str = "agent.guest_components_procs";
|
||||
#[cfg(feature = "guest-pull")]
|
||||
const IMAGE_REGISTRY_AUTH_OPTION: &str = "agent.image_registry_auth";
|
||||
|
||||
// Configure the proxy settings for HTTPS requests in the guest,
|
||||
// to solve the problem of not being able to access the specified image in some cases.
|
||||
@@ -106,6 +108,8 @@ pub struct AgentConfig {
|
||||
pub no_proxy: String,
|
||||
pub guest_components_rest_api: GuestComponentsFeatures,
|
||||
pub guest_components_procs: GuestComponentsProcs,
|
||||
#[cfg(feature = "guest-pull")]
|
||||
pub image_registry_auth: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
@@ -125,6 +129,8 @@ pub struct AgentConfigBuilder {
|
||||
pub no_proxy: Option<String>,
|
||||
pub guest_components_rest_api: Option<GuestComponentsFeatures>,
|
||||
pub guest_components_procs: Option<GuestComponentsProcs>,
|
||||
#[cfg(feature = "guest-pull")]
|
||||
pub image_registry_auth: Option<String>,
|
||||
}
|
||||
|
||||
macro_rules! config_override {
|
||||
@@ -190,6 +196,8 @@ impl Default for AgentConfig {
|
||||
no_proxy: String::from(""),
|
||||
guest_components_rest_api: GuestComponentsFeatures::default(),
|
||||
guest_components_procs: GuestComponentsProcs::default(),
|
||||
#[cfg(feature = "guest-pull")]
|
||||
image_registry_auth: String::from(""),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -227,6 +235,8 @@ impl FromStr for AgentConfig {
|
||||
guest_components_rest_api
|
||||
);
|
||||
config_override!(agent_config_builder, agent_config, guest_components_procs);
|
||||
#[cfg(feature = "guest-pull")]
|
||||
config_override!(agent_config_builder, agent_config, image_registry_auth);
|
||||
|
||||
Ok(agent_config)
|
||||
}
|
||||
@@ -316,7 +326,6 @@ impl AgentConfig {
|
||||
get_vsock_port,
|
||||
|port| port > 0
|
||||
);
|
||||
|
||||
parse_cmdline_param!(
|
||||
param,
|
||||
CONTAINER_PIPE_SIZE_OPTION,
|
||||
@@ -343,6 +352,13 @@ impl AgentConfig {
|
||||
config.guest_components_procs,
|
||||
get_guest_components_procs_value
|
||||
);
|
||||
#[cfg(feature = "guest-pull")]
|
||||
parse_cmdline_param!(
|
||||
param,
|
||||
IMAGE_REGISTRY_AUTH_OPTION,
|
||||
config.image_registry_auth,
|
||||
get_string_value
|
||||
);
|
||||
}
|
||||
|
||||
config.override_config_from_envs();
|
||||
@@ -505,10 +521,8 @@ fn get_url_value(param: &str) -> Result<String> {
|
||||
fn get_guest_components_features_value(param: &str) -> Result<GuestComponentsFeatures> {
|
||||
let fields: Vec<&str> = param.split('=').collect();
|
||||
ensure!(fields.len() >= 2, ERR_INVALID_GET_VALUE_PARAM);
|
||||
|
||||
// We need name (but the value can be blank)
|
||||
ensure!(!fields[0].is_empty(), ERR_INVALID_GET_VALUE_NO_NAME);
|
||||
|
||||
let value = fields[1..].join("=");
|
||||
GuestComponentsFeatures::from_str(&value)
|
||||
.map_err(|_| anyhow!(ERR_INVALID_GUEST_COMPONENTS_REST_API_VALUE))
|
||||
@@ -570,6 +584,8 @@ mod tests {
|
||||
no_proxy: &'a str,
|
||||
guest_components_rest_api: GuestComponentsFeatures,
|
||||
guest_components_procs: GuestComponentsProcs,
|
||||
#[cfg(feature = "guest-pull")]
|
||||
image_registry_auth: &'a str,
|
||||
}
|
||||
|
||||
impl Default for TestData<'_> {
|
||||
@@ -589,6 +605,8 @@ mod tests {
|
||||
no_proxy: "",
|
||||
guest_components_rest_api: GuestComponentsFeatures::default(),
|
||||
guest_components_procs: GuestComponentsProcs::default(),
|
||||
#[cfg(feature = "guest-pull")]
|
||||
image_registry_auth: "",
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1020,6 +1038,18 @@ mod tests {
|
||||
guest_components_procs: GuestComponentsProcs::None,
|
||||
..Default::default()
|
||||
},
|
||||
#[cfg(feature = "guest-pull")]
|
||||
TestData {
|
||||
contents: "agent.image_registry_auth=file:///root/.docker/config.json",
|
||||
image_registry_auth: "file:///root/.docker/config.json",
|
||||
..Default::default()
|
||||
},
|
||||
#[cfg(feature = "guest-pull")]
|
||||
TestData {
|
||||
contents: "agent.image_registry_auth=kbs:///default/credentials/test",
|
||||
image_registry_auth: "kbs:///default/credentials/test",
|
||||
..Default::default()
|
||||
},
|
||||
];
|
||||
|
||||
let dir = tempdir().expect("failed to create tmpdir");
|
||||
@@ -1079,6 +1109,8 @@ mod tests {
|
||||
"{}",
|
||||
msg
|
||||
);
|
||||
#[cfg(feature = "guest-pull")]
|
||||
assert_eq!(d.image_registry_auth, config.image_registry_auth, "{}", msg);
|
||||
|
||||
for v in vars_to_unset {
|
||||
env::remove_var(v);
|
||||
|
||||
@@ -933,7 +933,11 @@ async fn vfio_ap_device_handler(
|
||||
for apqn in device.options.iter() {
|
||||
wait_for_ap_device(sandbox, ap::Address::from_str(apqn)?).await?;
|
||||
}
|
||||
Ok(Default::default())
|
||||
let dev_update = Some(DevUpdate::new(Z9_CRYPT_DEV_PATH, Z9_CRYPT_DEV_PATH)?);
|
||||
Ok(SpecUpdate {
|
||||
dev: dev_update,
|
||||
pci: Vec::new(),
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(not(target_arch = "s390x"))]
|
||||
|
||||
@@ -20,8 +20,6 @@ use tokio::sync::Mutex;
|
||||
use crate::rpc::CONTAINER_BASE;
|
||||
use crate::AGENT_CONFIG;
|
||||
|
||||
// A marker to merge container spec for images pulled inside guest.
|
||||
const ANNO_K8S_IMAGE_NAME: &str = "io.kubernetes.cri.image-name";
|
||||
const KATA_IMAGE_WORK_DIR: &str = "/run/kata-containers/image/";
|
||||
const CONFIG_JSON: &str = "config.json";
|
||||
const KATA_PAUSE_BUNDLE: &str = "/pause_bundle";
|
||||
@@ -52,23 +50,24 @@ fn copy_if_not_exists(src: &Path, dst: &Path) -> Result<()> {
|
||||
|
||||
pub struct ImageService {
|
||||
image_client: ImageClient,
|
||||
images: HashMap<String, String>,
|
||||
}
|
||||
|
||||
impl ImageService {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
image_client: ImageClient::new(PathBuf::from(KATA_IMAGE_WORK_DIR)),
|
||||
images: HashMap::new(),
|
||||
let mut image_client = ImageClient::new(PathBuf::from(KATA_IMAGE_WORK_DIR));
|
||||
#[cfg(feature = "guest-pull")]
|
||||
if !AGENT_CONFIG.image_registry_auth.is_empty() {
|
||||
let registry_auth = &AGENT_CONFIG.image_registry_auth;
|
||||
debug!(sl(), "Set registry auth file {:?}", registry_auth);
|
||||
image_client.config.file_paths.auth_file = registry_auth.clone();
|
||||
image_client.config.auth = true;
|
||||
}
|
||||
}
|
||||
|
||||
async fn add_image(&mut self, image: String, cid: String) {
|
||||
self.images.insert(image, cid);
|
||||
Self { image_client }
|
||||
}
|
||||
|
||||
/// pause image is packaged in rootfs
|
||||
fn unpack_pause_image(cid: &str, target_subpath: &str) -> Result<String> {
|
||||
fn unpack_pause_image(cid: &str) -> Result<String> {
|
||||
verify_id(cid).context("The guest pause image cid contains invalid characters.")?;
|
||||
|
||||
let guest_pause_bundle = Path::new(KATA_PAUSE_BUNDLE);
|
||||
@@ -102,9 +101,7 @@ impl ImageService {
|
||||
bail!("The number of args should be greater than or equal to one! Please check the pause image.");
|
||||
}
|
||||
|
||||
let container_bundle = scoped_join(CONTAINER_BASE, cid)?;
|
||||
fs::create_dir_all(&container_bundle)?;
|
||||
let pause_bundle = scoped_join(&container_bundle, target_subpath)?;
|
||||
let pause_bundle = scoped_join(CONTAINER_BASE, cid)?;
|
||||
fs::create_dir_all(&pause_bundle)?;
|
||||
let pause_rootfs = scoped_join(&pause_bundle, "rootfs")?;
|
||||
fs::create_dir_all(&pause_rootfs)?;
|
||||
@@ -125,7 +122,7 @@ impl ImageService {
|
||||
/// - `cid`: Container id
|
||||
/// - `image_metadata`: Annotations about the image (exp: "containerd.io/snapshot/cri.layer-digest": "sha256:24fb2886d6f6c5d16481dd7608b47e78a8e92a13d6e64d87d57cb16d5f766d63")
|
||||
/// # Returns
|
||||
/// - The image rootfs bundle path. (exp. /run/kata-containers/cb0b47276ea66ee9f44cc53afa94d7980b57a52c3f306f68cb034e58d9fbd3c6/images/rootfs)
|
||||
/// - The image rootfs bundle path. (exp. /run/kata-containers/cb0b47276ea66ee9f44cc53afa94d7980b57a52c3f306f68cb034e58d9fbd3c6/rootfs)
|
||||
pub async fn pull_image(
|
||||
&mut self,
|
||||
image: &str,
|
||||
@@ -146,16 +143,13 @@ impl ImageService {
|
||||
}
|
||||
|
||||
if is_sandbox {
|
||||
let mount_path = Self::unpack_pause_image(cid, "pause")?;
|
||||
self.add_image(String::from(image), String::from(cid)).await;
|
||||
let mount_path = Self::unpack_pause_image(cid)?;
|
||||
return Ok(mount_path);
|
||||
}
|
||||
|
||||
// Image layers will store at KATA_IMAGE_WORK_DIR, generated bundles
|
||||
// with rootfs and config.json will store under CONTAINER_BASE/cid/images.
|
||||
let bundle_base_dir = scoped_join(CONTAINER_BASE, cid)?;
|
||||
fs::create_dir_all(&bundle_base_dir)?;
|
||||
let bundle_path = scoped_join(&bundle_base_dir, "images")?;
|
||||
let bundle_path = scoped_join(CONTAINER_BASE, cid)?;
|
||||
fs::create_dir_all(&bundle_path)?;
|
||||
info!(sl(), "pull image {image:?}, bundle path {bundle_path:?}");
|
||||
|
||||
@@ -179,35 +173,9 @@ impl ImageService {
|
||||
return Err(e);
|
||||
}
|
||||
};
|
||||
self.add_image(String::from(image), String::from(cid)).await;
|
||||
let image_bundle_path = scoped_join(&bundle_path, "rootfs")?;
|
||||
Ok(image_bundle_path.as_path().display().to_string())
|
||||
}
|
||||
|
||||
/// Partially merge an OCI process specification into another one.
|
||||
fn merge_oci_process(&self, target: &mut oci::Process, source: &oci::Process) {
|
||||
// Override the target args only when the target args is empty and source.args is not empty
|
||||
if target.args.is_empty() && !source.args.is_empty() {
|
||||
target.args.append(&mut source.args.clone());
|
||||
}
|
||||
|
||||
// Override the target cwd only when the target cwd is blank and source.cwd is not blank
|
||||
if target.cwd == "/" && source.cwd != "/" {
|
||||
target.cwd = String::from(&source.cwd);
|
||||
}
|
||||
|
||||
for source_env in &source.env {
|
||||
if let Some((variable_name, variable_value)) = source_env.split_once('=') {
|
||||
debug!(
|
||||
sl(),
|
||||
"source spec environment variable: {variable_name:?} : {variable_value:?}"
|
||||
);
|
||||
if !target.env.iter().any(|i| i.contains(variable_name)) {
|
||||
target.env.push(source_env.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Set proxy environment from AGENT_CONFIG
|
||||
@@ -237,55 +205,6 @@ pub async fn set_proxy_env_vars() {
|
||||
};
|
||||
}
|
||||
|
||||
/// When being passed an image name through a container annotation, merge its
|
||||
/// corresponding bundle OCI specification into the passed container creation one.
|
||||
pub async fn merge_bundle_oci(container_oci: &mut oci::Spec) -> Result<()> {
|
||||
let image_service = IMAGE_SERVICE.clone();
|
||||
let mut image_service = image_service.lock().await;
|
||||
let image_service = image_service
|
||||
.as_mut()
|
||||
.expect("Image Service not initialized");
|
||||
if let Some(image_name) = container_oci.annotations.get(ANNO_K8S_IMAGE_NAME) {
|
||||
if let Some(container_id) = image_service.images.get(image_name) {
|
||||
let image_oci_config_path = Path::new(CONTAINER_BASE)
|
||||
.join(container_id)
|
||||
.join(CONFIG_JSON);
|
||||
debug!(
|
||||
sl(),
|
||||
"Image bundle config path: {:?}", image_oci_config_path
|
||||
);
|
||||
|
||||
let image_oci = oci::Spec::load(image_oci_config_path.to_str().ok_or_else(|| {
|
||||
anyhow!(
|
||||
"Invalid container image OCI config path {:?}",
|
||||
image_oci_config_path
|
||||
)
|
||||
})?)
|
||||
.context("load image bundle")?;
|
||||
|
||||
if let (Some(container_root), Some(image_root)) =
|
||||
(container_oci.root.as_mut(), image_oci.root.as_ref())
|
||||
{
|
||||
let root_path = Path::new(CONTAINER_BASE)
|
||||
.join(container_id)
|
||||
.join(image_root.path.clone());
|
||||
container_root.path =
|
||||
String::from(root_path.to_str().ok_or_else(|| {
|
||||
anyhow!("Invalid container image root path {:?}", root_path)
|
||||
})?);
|
||||
}
|
||||
|
||||
if let (Some(container_process), Some(image_process)) =
|
||||
(container_oci.process.as_mut(), image_oci.process.as_ref())
|
||||
{
|
||||
image_service.merge_oci_process(container_process, image_process);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Init the image service
|
||||
pub async fn init_image_service() {
|
||||
let image_service = ImageService::new();
|
||||
@@ -305,71 +224,3 @@ pub async fn pull_image(
|
||||
|
||||
image_service.pull_image(image, cid, image_metadata).await
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::ImageService;
|
||||
use rstest::rstest;
|
||||
|
||||
#[rstest]
|
||||
// TODO - how can we tell the user didn't specifically set it to `/` vs not setting at all? Is that scenario valid?
|
||||
#[case::image_cwd_should_override_blank_container_cwd("/", "/imageDir", "/imageDir")]
|
||||
#[case::container_cwd_should_override_image_cwd("/containerDir", "/imageDir", "/containerDir")]
|
||||
#[case::container_cwd_should_override_blank_image_cwd("/containerDir", "/", "/containerDir")]
|
||||
async fn test_merge_cwd(
|
||||
#[case] container_process_cwd: &str,
|
||||
#[case] image_process_cwd: &str,
|
||||
#[case] expected: &str,
|
||||
) {
|
||||
let image_service = ImageService::new();
|
||||
let mut container_process = oci::Process {
|
||||
cwd: container_process_cwd.to_string(),
|
||||
..Default::default()
|
||||
};
|
||||
let image_process = oci::Process {
|
||||
cwd: image_process_cwd.to_string(),
|
||||
..Default::default()
|
||||
};
|
||||
image_service.merge_oci_process(&mut container_process, &image_process);
|
||||
assert_eq!(expected, container_process.cwd);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[case::pods_environment_overrides_images(
|
||||
vec!["ISPRODUCTION=true".to_string()],
|
||||
vec!["ISPRODUCTION=false".to_string()],
|
||||
vec!["ISPRODUCTION=true".to_string()]
|
||||
)]
|
||||
#[case::multiple_environment_variables_can_be_overrided(
|
||||
vec!["ISPRODUCTION=true".to_string(), "ISDEVELOPMENT=false".to_string()],
|
||||
vec!["ISPRODUCTION=false".to_string(), "ISDEVELOPMENT=true".to_string()],
|
||||
vec!["ISPRODUCTION=true".to_string(), "ISDEVELOPMENT=false".to_string()]
|
||||
)]
|
||||
#[case::not_override_them_when_none_of_variables_match(
|
||||
vec!["ANOTHERENV=TEST".to_string()],
|
||||
vec!["ISPRODUCTION=false".to_string(), "ISDEVELOPMENT=true".to_string()],
|
||||
vec!["ANOTHERENV=TEST".to_string(), "ISPRODUCTION=false".to_string(), "ISDEVELOPMENT=true".to_string()]
|
||||
)]
|
||||
#[case::a_mix_of_both_overriding_and_not(
|
||||
vec!["ANOTHERENV=TEST".to_string(), "ISPRODUCTION=true".to_string()],
|
||||
vec!["ISPRODUCTION=false".to_string(), "ISDEVELOPMENT=true".to_string()],
|
||||
vec!["ANOTHERENV=TEST".to_string(), "ISPRODUCTION=true".to_string(), "ISDEVELOPMENT=true".to_string()]
|
||||
)]
|
||||
async fn test_merge_env(
|
||||
#[case] container_process_env: Vec<String>,
|
||||
#[case] image_process_env: Vec<String>,
|
||||
#[case] expected: Vec<String>,
|
||||
) {
|
||||
let image_service = ImageService::new();
|
||||
let mut container_process = oci::Process {
|
||||
env: container_process_env,
|
||||
..Default::default()
|
||||
};
|
||||
let image_process = oci::Process {
|
||||
env: image_process_env,
|
||||
..Default::default()
|
||||
};
|
||||
image_service.merge_oci_process(&mut container_process, &image_process);
|
||||
assert_eq!(expected, container_process.env);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -71,6 +71,7 @@ cfg_if! {
|
||||
pub const CCW_ROOT_BUS_PATH: &str = "/devices/css0";
|
||||
pub const AP_ROOT_BUS_PATH: &str = "/devices/ap";
|
||||
pub const AP_SCANS_PATH: &str = "/sys/bus/ap/scans";
|
||||
pub const Z9_CRYPT_DEV_PATH: &str = "/dev/z90crypt";
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -38,6 +38,7 @@ use std::process::Command;
|
||||
use std::sync::Arc;
|
||||
use tracing::{instrument, span};
|
||||
|
||||
mod cdh;
|
||||
mod config;
|
||||
mod console;
|
||||
mod device;
|
||||
@@ -59,6 +60,7 @@ mod util;
|
||||
mod version;
|
||||
mod watcher;
|
||||
|
||||
use cdh::CDHClient;
|
||||
use config::GuestComponentsProcs;
|
||||
use mount::{cgroups_mount, general_mount};
|
||||
use sandbox::Sandbox;
|
||||
@@ -104,9 +106,13 @@ const AA_ATTESTATION_URI: &str = concatcp!(UNIX_SOCKET_PREFIX, AA_ATTESTATION_SO
|
||||
|
||||
const CDH_PATH: &str = "/usr/local/bin/confidential-data-hub";
|
||||
const CDH_SOCKET: &str = "/run/confidential-containers/cdh.sock";
|
||||
const CDH_SOCKET_URI: &str = concatcp!(UNIX_SOCKET_PREFIX, CDH_SOCKET);
|
||||
|
||||
const API_SERVER_PATH: &str = "/usr/local/bin/api-server-rest";
|
||||
|
||||
/// Path of ocicrypt config file. This is used by image-rs when decrypting image.
|
||||
const OCICRYPT_CONFIG_PATH: &str = "/tmp/ocicrypt_config.json";
|
||||
|
||||
const DEFAULT_LAUNCH_PROCESS_TIMEOUT: i32 = 6;
|
||||
|
||||
lazy_static! {
|
||||
@@ -403,6 +409,7 @@ async fn start_sandbox(
|
||||
let (tx, rx) = tokio::sync::oneshot::channel();
|
||||
sandbox.lock().await.sender = Some(tx);
|
||||
|
||||
let mut cdh_client = None;
|
||||
let gc_procs = config.guest_components_procs;
|
||||
if gc_procs != GuestComponentsProcs::None {
|
||||
if !attestation_binaries_available(logger, &gc_procs) {
|
||||
@@ -411,12 +418,19 @@ async fn start_sandbox(
|
||||
"attestation binaries requested for launch not available"
|
||||
);
|
||||
} else {
|
||||
init_attestation_components(logger, config)?;
|
||||
cdh_client = init_attestation_components(logger, config)?;
|
||||
}
|
||||
}
|
||||
|
||||
// vsock:///dev/vsock, port
|
||||
let mut server = rpc::start(sandbox.clone(), config.server_addr.as_str(), init_mode).await?;
|
||||
let mut server = rpc::start(
|
||||
sandbox.clone(),
|
||||
config.server_addr.as_str(),
|
||||
init_mode,
|
||||
cdh_client,
|
||||
)
|
||||
.await?;
|
||||
|
||||
server.start().await?;
|
||||
|
||||
rx.await?;
|
||||
@@ -445,10 +459,11 @@ fn attestation_binaries_available(logger: &Logger, procs: &GuestComponentsProcs)
|
||||
// Start-up attestation-agent, CDH and api-server-rest if they are packaged in the rootfs
|
||||
// and the corresponding procs are enabled in the agent configuration. the process will be
|
||||
// launched in the background and the function will return immediately.
|
||||
fn init_attestation_components(logger: &Logger, config: &AgentConfig) -> Result<()> {
|
||||
// If the CDH is started, a CDH client will be instantiated and returned.
|
||||
fn init_attestation_components(logger: &Logger, config: &AgentConfig) -> Result<Option<CDHClient>> {
|
||||
// skip launch of any guest-component
|
||||
if config.guest_components_procs == GuestComponentsProcs::None {
|
||||
return Ok(());
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
debug!(logger, "spawning attestation-agent process {}", AA_PATH);
|
||||
@@ -463,13 +478,25 @@ fn init_attestation_components(logger: &Logger, config: &AgentConfig) -> Result<
|
||||
|
||||
// skip launch of confidential-data-hub and api-server-rest
|
||||
if config.guest_components_procs == GuestComponentsProcs::AttestationAgent {
|
||||
return Ok(());
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let ocicrypt_config = serde_json::json!({
|
||||
"key-providers": {
|
||||
"attestation-agent":{
|
||||
"ttrpc":CDH_SOCKET_URI
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
fs::write(OCICRYPT_CONFIG_PATH, ocicrypt_config.to_string().as_bytes())?;
|
||||
env::set_var("OCICRYPT_KEYPROVIDER_CONFIG", OCICRYPT_CONFIG_PATH);
|
||||
|
||||
debug!(
|
||||
logger,
|
||||
"spawning confidential-data-hub process {}", CDH_PATH
|
||||
);
|
||||
|
||||
launch_process(
|
||||
logger,
|
||||
CDH_PATH,
|
||||
@@ -479,9 +506,11 @@ fn init_attestation_components(logger: &Logger, config: &AgentConfig) -> Result<
|
||||
)
|
||||
.map_err(|e| anyhow!("launch_process {} failed: {:?}", CDH_PATH, e))?;
|
||||
|
||||
let cdh_client = CDHClient::new().context("Failed to create CDH Client")?;
|
||||
|
||||
// skip launch of api-server-rest
|
||||
if config.guest_components_procs == GuestComponentsProcs::ConfidentialDataHub {
|
||||
return Ok(());
|
||||
return Ok(Some(cdh_client));
|
||||
}
|
||||
|
||||
let features = config.guest_components_rest_api;
|
||||
@@ -498,7 +527,7 @@ fn init_attestation_components(logger: &Logger, config: &AgentConfig) -> Result<
|
||||
)
|
||||
.map_err(|e| anyhow!("launch_process {} failed: {:?}", API_SERVER_PATH, e))?;
|
||||
|
||||
Ok(())
|
||||
Ok(Some(cdh_client))
|
||||
}
|
||||
|
||||
fn wait_for_path_to_exist(logger: &Logger, path: &str, timeout_secs: i32) -> Result<()> {
|
||||
|
||||
@@ -76,6 +76,8 @@ use crate::policy::{do_set_policy, is_allowed};
|
||||
#[cfg(feature = "guest-pull")]
|
||||
use crate::image;
|
||||
|
||||
use crate::cdh::CDHClient;
|
||||
|
||||
use opentelemetry::global;
|
||||
use tracing::span;
|
||||
use tracing_opentelemetry::OpenTelemetrySpanExt;
|
||||
@@ -171,6 +173,7 @@ impl<T> OptionToTtrpcResult<T> for Option<T> {
|
||||
pub struct AgentService {
|
||||
sandbox: Arc<Mutex<Sandbox>>,
|
||||
init_mode: bool,
|
||||
cdh_client: Option<CDHClient>,
|
||||
}
|
||||
|
||||
impl AgentService {
|
||||
@@ -210,11 +213,6 @@ impl AgentService {
|
||||
"receive createcontainer, storages: {:?}", &req.storages
|
||||
);
|
||||
|
||||
// In case of pulling image inside guest, we need to merge the image bundle OCI spec
|
||||
// into the container creation request OCI spec.
|
||||
#[cfg(feature = "guest-pull")]
|
||||
image::merge_bundle_oci(&mut oci).await?;
|
||||
|
||||
// Some devices need some extra processing (the ones invoked with
|
||||
// --device for instance), and that's what this call is doing. It
|
||||
// updates the devices listed in the OCI spec, so that they actually
|
||||
@@ -222,6 +220,22 @@ impl AgentService {
|
||||
// cannot predict everything from the caller.
|
||||
add_devices(&req.devices, &mut oci, &self.sandbox).await?;
|
||||
|
||||
if let Some(cdh) = self.cdh_client.as_ref() {
|
||||
let process = oci
|
||||
.process
|
||||
.as_mut()
|
||||
.ok_or_else(|| anyhow!("Spec didn't contain process field"))?;
|
||||
|
||||
for env in process.env.iter_mut() {
|
||||
match cdh.unseal_env(env).await {
|
||||
Ok(unsealed_env) => *env = unsealed_env.to_string(),
|
||||
Err(e) => {
|
||||
warn!(sl(), "Failed to unseal secret: {}", e)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Both rootfs and volumes (invoked with --volume for instance) will
|
||||
// be processed the same way. The idea is to always mount any provided
|
||||
// storage to the specified MountPoint, so that it will match what's
|
||||
@@ -584,25 +598,32 @@ impl AgentService {
|
||||
let cid = req.container_id;
|
||||
let eid = req.exec_id;
|
||||
|
||||
let writer = {
|
||||
let mut sandbox = self.sandbox.lock().await;
|
||||
let p = sandbox.find_container_process(cid.as_str(), eid.as_str())?;
|
||||
|
||||
// use ptmx io
|
||||
if p.term_master.is_some() {
|
||||
p.get_writer(StreamType::TermMaster)
|
||||
} else {
|
||||
// use piped io
|
||||
p.get_writer(StreamType::ParentStdin)
|
||||
}
|
||||
};
|
||||
|
||||
let writer = writer.ok_or_else(|| anyhow!(ERR_CANNOT_GET_WRITER))?;
|
||||
writer.lock().await.write_all(req.data.as_slice()).await?;
|
||||
|
||||
let mut resp = WriteStreamResponse::new();
|
||||
resp.set_len(req.data.len() as u32);
|
||||
|
||||
// EOF of stdin
|
||||
if req.data.is_empty() {
|
||||
let mut sandbox = self.sandbox.lock().await;
|
||||
let p = sandbox.find_container_process(cid.as_str(), eid.as_str())?;
|
||||
p.close_stdin().await;
|
||||
} else {
|
||||
let writer = {
|
||||
let mut sandbox = self.sandbox.lock().await;
|
||||
let p = sandbox.find_container_process(cid.as_str(), eid.as_str())?;
|
||||
|
||||
// use ptmx io
|
||||
if p.term_master.is_some() {
|
||||
p.get_writer(StreamType::TermMaster)
|
||||
} else {
|
||||
// use piped io
|
||||
p.get_writer(StreamType::ParentStdin)
|
||||
}
|
||||
};
|
||||
|
||||
let writer = writer.ok_or_else(|| anyhow!(ERR_CANNOT_GET_WRITER))?;
|
||||
writer.lock().await.write_all(req.data.as_slice()).await?;
|
||||
}
|
||||
|
||||
Ok(resp)
|
||||
}
|
||||
|
||||
@@ -645,6 +666,7 @@ impl AgentService {
|
||||
biased;
|
||||
v = read_stream(&reader, req.len as usize) => {
|
||||
let vector = v?;
|
||||
|
||||
let mut resp = ReadStreamResponse::new();
|
||||
resp.set_data(vector);
|
||||
|
||||
@@ -845,6 +867,9 @@ impl agent_ttrpc::AgentService for AgentService {
|
||||
ctx: &TtrpcContext,
|
||||
req: protocols::agent::CloseStdinRequest,
|
||||
) -> ttrpc::Result<Empty> {
|
||||
// The stdin will be closed when EOF is got in rpc `write_stdin`[runtime-rs]
|
||||
// so this rpc will not be called anymore by runtime-rs.
|
||||
|
||||
trace_rpc_call!(ctx, "close_stdin", req);
|
||||
is_allowed(&req).await?;
|
||||
|
||||
@@ -1601,10 +1626,12 @@ pub async fn start(
|
||||
s: Arc<Mutex<Sandbox>>,
|
||||
server_address: &str,
|
||||
init_mode: bool,
|
||||
cdh_client: Option<CDHClient>,
|
||||
) -> Result<TtrpcServer> {
|
||||
let agent_service = Box::new(AgentService {
|
||||
sandbox: s,
|
||||
init_mode,
|
||||
cdh_client,
|
||||
}) as Box<dyn agent_ttrpc::AgentService + Send + Sync>;
|
||||
let aservice = agent_ttrpc::create_agent_service(Arc::new(agent_service));
|
||||
|
||||
@@ -1920,21 +1947,28 @@ pub fn setup_bundle(cid: &str, spec: &mut Spec) -> Result<PathBuf> {
|
||||
return Err(anyhow!(nix::Error::EINVAL));
|
||||
};
|
||||
|
||||
let spec_root_path = Path::new(&spec_root.path);
|
||||
|
||||
let bundle_path = Path::new(CONTAINER_BASE).join(cid);
|
||||
let config_path = bundle_path.join("config.json");
|
||||
let rootfs_path = bundle_path.join("rootfs");
|
||||
let spec_root_path = Path::new(&spec_root.path);
|
||||
|
||||
fs::create_dir_all(&rootfs_path)?;
|
||||
baremount(
|
||||
spec_root_path,
|
||||
&rootfs_path,
|
||||
"bind",
|
||||
MsFlags::MS_BIND,
|
||||
"",
|
||||
&sl(),
|
||||
)?;
|
||||
let rootfs_exists = Path::new(&rootfs_path).exists();
|
||||
info!(
|
||||
sl(),
|
||||
"The rootfs_path is {:?} and exists: {}", rootfs_path, rootfs_exists
|
||||
);
|
||||
|
||||
if !rootfs_exists {
|
||||
fs::create_dir_all(&rootfs_path)?;
|
||||
baremount(
|
||||
spec_root_path,
|
||||
&rootfs_path,
|
||||
"bind",
|
||||
MsFlags::MS_BIND,
|
||||
"",
|
||||
&sl(),
|
||||
)?;
|
||||
}
|
||||
|
||||
let rootfs_path_name = rootfs_path
|
||||
.to_str()
|
||||
@@ -2148,6 +2182,7 @@ mod tests {
|
||||
let agent_service = Box::new(AgentService {
|
||||
sandbox: Arc::new(Mutex::new(sandbox)),
|
||||
init_mode: true,
|
||||
cdh_client: None,
|
||||
});
|
||||
|
||||
let req = protocols::agent::UpdateInterfaceRequest::default();
|
||||
@@ -2162,10 +2197,10 @@ mod tests {
|
||||
async fn test_update_routes() {
|
||||
let logger = slog::Logger::root(slog::Discard, o!());
|
||||
let sandbox = Sandbox::new(&logger).unwrap();
|
||||
|
||||
let agent_service = Box::new(AgentService {
|
||||
sandbox: Arc::new(Mutex::new(sandbox)),
|
||||
init_mode: true,
|
||||
cdh_client: None,
|
||||
});
|
||||
|
||||
let req = protocols::agent::UpdateRoutesRequest::default();
|
||||
@@ -2180,10 +2215,10 @@ mod tests {
|
||||
async fn test_add_arp_neighbors() {
|
||||
let logger = slog::Logger::root(slog::Discard, o!());
|
||||
let sandbox = Sandbox::new(&logger).unwrap();
|
||||
|
||||
let agent_service = Box::new(AgentService {
|
||||
sandbox: Arc::new(Mutex::new(sandbox)),
|
||||
init_mode: true,
|
||||
cdh_client: None,
|
||||
});
|
||||
|
||||
let req = protocols::agent::AddARPNeighborsRequest::default();
|
||||
@@ -2322,6 +2357,7 @@ mod tests {
|
||||
let agent_service = Box::new(AgentService {
|
||||
sandbox: Arc::new(Mutex::new(sandbox)),
|
||||
init_mode: true,
|
||||
cdh_client: None,
|
||||
});
|
||||
|
||||
let result = agent_service
|
||||
@@ -2811,6 +2847,7 @@ OtherField:other
|
||||
let agent_service = Box::new(AgentService {
|
||||
sandbox: Arc::new(Mutex::new(sandbox)),
|
||||
init_mode: true,
|
||||
cdh_client: None,
|
||||
});
|
||||
|
||||
let ctx = mk_ttrpc_context();
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use super::new_device;
|
||||
use crate::image;
|
||||
use crate::storage::{StorageContext, StorageHandler};
|
||||
use anyhow::{anyhow, Result};
|
||||
@@ -12,8 +13,6 @@ use protocols::agent::Storage;
|
||||
use std::sync::Arc;
|
||||
use tracing::instrument;
|
||||
|
||||
use super::{common_storage_handler, new_device};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct ImagePullHandler {}
|
||||
|
||||
@@ -36,7 +35,7 @@ impl StorageHandler for ImagePullHandler {
|
||||
#[instrument]
|
||||
async fn create_device(
|
||||
&self,
|
||||
mut storage: Storage,
|
||||
storage: Storage,
|
||||
ctx: &mut StorageContext,
|
||||
) -> Result<Arc<dyn StorageDevice>> {
|
||||
//Currently the image metadata is not used to pulling image in the guest.
|
||||
@@ -51,12 +50,7 @@ impl StorageHandler for ImagePullHandler {
|
||||
.ok_or_else(|| anyhow!("failed to get container id"))?;
|
||||
let bundle_path = image::pull_image(image_name, &cid, &image_pull_volume.metadata).await?;
|
||||
|
||||
storage.source = bundle_path;
|
||||
storage.options = vec!["bind".to_string(), "ro".to_string()];
|
||||
|
||||
common_storage_handler(ctx.logger, &storage)?;
|
||||
|
||||
new_device(storage.mount_point)
|
||||
new_device(bundle_path)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -281,6 +281,8 @@ pub enum VmmData {
|
||||
MachineConfiguration(Box<VmConfigInfo>),
|
||||
/// Prometheus Metrics represented by String.
|
||||
HypervisorMetrics(String),
|
||||
/// Return vfio device's slot number in guest.
|
||||
VfioDeviceData(Option<u8>),
|
||||
/// Sync Hotplug
|
||||
SyncHotplug((Sender<Option<i32>>, Receiver<Option<i32>>)),
|
||||
}
|
||||
@@ -398,7 +400,9 @@ impl VmmService {
|
||||
self.add_balloon_device(vmm, event_mgr, balloon_cfg)
|
||||
}
|
||||
#[cfg(feature = "host-device")]
|
||||
VmmAction::InsertHostDevice(hostdev_cfg) => self.add_vfio_device(vmm, hostdev_cfg),
|
||||
VmmAction::InsertHostDevice(mut hostdev_cfg) => {
|
||||
self.add_vfio_device(vmm, &mut hostdev_cfg)
|
||||
}
|
||||
#[cfg(feature = "host-device")]
|
||||
VmmAction::PrepareRemoveHostDevice(hostdev_id) => {
|
||||
self.prepare_remove_vfio_device(vmm, &hostdev_id)
|
||||
@@ -850,7 +854,7 @@ impl VmmService {
|
||||
}
|
||||
|
||||
#[cfg(feature = "host-device")]
|
||||
fn add_vfio_device(&self, vmm: &mut Vmm, config: HostDeviceConfig) -> VmmRequestResult {
|
||||
fn add_vfio_device(&self, vmm: &mut Vmm, config: &mut HostDeviceConfig) -> VmmRequestResult {
|
||||
let vm = vmm.get_vm_mut().ok_or(VmmActionError::HostDeviceConfig(
|
||||
VfioDeviceError::InvalidVMID,
|
||||
))?;
|
||||
@@ -873,7 +877,8 @@ impl VmmService {
|
||||
.unwrap()
|
||||
.insert_device(&mut ctx, config)
|
||||
.map_err(VmmActionError::HostDeviceConfig)?;
|
||||
Ok(VmmData::Empty)
|
||||
|
||||
Ok(VmmData::VfioDeviceData(config.dev_config.guest_dev_id))
|
||||
}
|
||||
|
||||
// using upcall to unplug the pci device in the guest
|
||||
|
||||
@@ -553,7 +553,7 @@ impl DeviceOpContext {
|
||||
&self,
|
||||
dev: &Arc<dyn DeviceIo>,
|
||||
callback: Option<Box<dyn Fn(UpcallClientResponse) + Send>>,
|
||||
) -> Result<()> {
|
||||
) -> Result<u8> {
|
||||
if !self.is_hotplug || !self.pci_hotplug_enabled {
|
||||
return Err(DeviceMgrError::InvalidOperation);
|
||||
}
|
||||
@@ -561,7 +561,12 @@ impl DeviceOpContext {
|
||||
let (busno, devfn) = DeviceManager::get_pci_device_info(dev)?;
|
||||
let req = DevMgrRequest::AddPciDev(PciDevRequest { busno, devfn });
|
||||
|
||||
self.call_hotplug_device(req, callback)
|
||||
self.call_hotplug_device(req, callback)?;
|
||||
|
||||
// Extract the slot number from devfn
|
||||
// Right shift by 3 to remove function bits (2:0) and
|
||||
// align slot bits (7:3) to the least significant position
|
||||
Ok(devfn >> 3)
|
||||
}
|
||||
|
||||
#[cfg(feature = "host-device")]
|
||||
|
||||
@@ -255,7 +255,7 @@ impl VfioDeviceMgr {
|
||||
pub fn insert_device(
|
||||
&mut self,
|
||||
ctx: &mut DeviceOpContext,
|
||||
config: HostDeviceConfig,
|
||||
config: &mut HostDeviceConfig,
|
||||
) -> Result<()> {
|
||||
if !cfg!(feature = "hotplug") && ctx.is_hotplug {
|
||||
return Err(VfioDeviceError::UpdateNotAllowedPostBoot);
|
||||
@@ -267,7 +267,7 @@ impl VfioDeviceMgr {
|
||||
"hostdev_id" => &config.hostdev_id,
|
||||
"bdf" => &config.dev_config.bus_slot_func,
|
||||
);
|
||||
let device_index = self.info_list.insert_or_update(&config)?;
|
||||
let device_index = self.info_list.insert_or_update(config)?;
|
||||
// Handle device hotplug case
|
||||
if ctx.is_hotplug {
|
||||
slog::info!(
|
||||
@@ -277,7 +277,7 @@ impl VfioDeviceMgr {
|
||||
"hostdev_id" => &config.hostdev_id,
|
||||
"bdf" => &config.dev_config.bus_slot_func,
|
||||
);
|
||||
self.add_device(ctx, &config, device_index)?;
|
||||
self.add_device(ctx, config, device_index)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -438,7 +438,7 @@ impl VfioDeviceMgr {
|
||||
fn add_device(
|
||||
&mut self,
|
||||
ctx: &mut DeviceOpContext,
|
||||
cfg: &HostDeviceConfig,
|
||||
cfg: &mut HostDeviceConfig,
|
||||
idx: usize,
|
||||
) -> Result<()> {
|
||||
let dev = self.create_device(cfg, ctx, idx)?;
|
||||
@@ -450,8 +450,13 @@ impl VfioDeviceMgr {
|
||||
|
||||
self.register_memory(vm_memory.deref())?;
|
||||
}
|
||||
ctx.insert_hotplug_pci_device(&dev, None)
|
||||
.map_err(VfioDeviceError::VfioDeviceMgr)
|
||||
let slot = ctx
|
||||
.insert_hotplug_pci_device(&dev, None)
|
||||
.map_err(VfioDeviceError::VfioDeviceMgr)?;
|
||||
|
||||
cfg.dev_config.guest_dev_id = Some(slot);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Gets the index of the device with the specified `hostdev_id` if it exists in the list.
|
||||
|
||||
203
src/libs/Cargo.lock
generated
203
src/libs/Cargo.lock
generated
@@ -2,6 +2,21 @@
|
||||
# It is not intended for manual editing.
|
||||
version = 3
|
||||
|
||||
[[package]]
|
||||
name = "addr2line"
|
||||
version = "0.22.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6e4503c46a5c0c7844e948c9a4d6acd9f50cccb4de1c48eb9e291ea17470c678"
|
||||
dependencies = [
|
||||
"gimli",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "adler"
|
||||
version = "1.0.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe"
|
||||
|
||||
[[package]]
|
||||
name = "ahash"
|
||||
version = "0.7.7"
|
||||
@@ -48,7 +63,7 @@ checksum = "ed6aa3524a2dfcf9fe180c51eae2b58738348d819517ceadf95789c51fff7600"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
"syn 1.0.91",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -68,6 +83,21 @@ version = "1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
|
||||
|
||||
[[package]]
|
||||
name = "backtrace"
|
||||
version = "0.3.73"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5cc23269a4f8976d0a4d2e7109211a419fe30e8d88d677cd60b6bc79c5732e0a"
|
||||
dependencies = [
|
||||
"addr2line",
|
||||
"cc",
|
||||
"cfg-if",
|
||||
"libc",
|
||||
"miniz_oxide",
|
||||
"object",
|
||||
"rustc-demangle",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "base64"
|
||||
version = "0.13.1"
|
||||
@@ -87,7 +117,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fd9e32d7420c85055e8107e5b2463c4eeefeaac18b52359fe9f9c08a18f342b2"
|
||||
dependencies = [
|
||||
"quote",
|
||||
"syn",
|
||||
"syn 1.0.91",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -122,7 +152,7 @@ dependencies = [
|
||||
"borsh-schema-derive-internal",
|
||||
"proc-macro-crate",
|
||||
"proc-macro2",
|
||||
"syn",
|
||||
"syn 1.0.91",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -133,7 +163,7 @@ checksum = "afb438156919598d2c7bad7e1c0adf3d26ed3840dbc010db1a882a65583ca2fb"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
"syn 1.0.91",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -144,7 +174,7 @@ checksum = "634205cc43f74a1b9046ef87c4540ebda95696ec0f315024860cad7c5b0f5ccd"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
"syn 1.0.91",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -183,7 +213,7 @@ checksum = "a7ec4c6f261935ad534c0c22dbef2201b45918860eb1c574b972bd213a76af61"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
"syn 1.0.91",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -198,6 +228,12 @@ version = "1.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223"
|
||||
|
||||
[[package]]
|
||||
name = "cc"
|
||||
version = "1.0.99"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "96c51067fd44124faa7f870b4b1c969379ad32b2ba805aa959430ceaa384f695"
|
||||
|
||||
[[package]]
|
||||
name = "cfg-if"
|
||||
version = "1.0.0"
|
||||
@@ -328,7 +364,7 @@ dependencies = [
|
||||
"ident_case",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
"syn 1.0.91",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -339,7 +375,7 @@ checksum = "a4aab4dbc9f7611d8b55048a3a16d2d010c2c8334e46304b40ac1cc14bf3b48e"
|
||||
dependencies = [
|
||||
"darling_core",
|
||||
"quote",
|
||||
"syn",
|
||||
"syn 1.0.91",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -350,7 +386,7 @@ checksum = "3418329ca0ad70234b9735dc4ceed10af4df60eff9c8e7b06cb5e520d92c3535"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
"syn 1.0.91",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -474,7 +510,7 @@ checksum = "33c1e13800337f4d4d7a316bf45a567dbcb6ffe087f16424852d97e97a91f512"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
"syn 1.0.91",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -518,6 +554,12 @@ dependencies = [
|
||||
"wasi 0.10.2+wasi-snapshot-preview1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "gimli"
|
||||
version = "0.29.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "40ecd4077b5ae9fd2e9e169b102c6c330d0605168eb0e8bf79952b256dbefffd"
|
||||
|
||||
[[package]]
|
||||
name = "glob"
|
||||
version = "0.3.0"
|
||||
@@ -613,7 +655,7 @@ dependencies = [
|
||||
"httpdate",
|
||||
"itoa",
|
||||
"pin-project-lite",
|
||||
"socket2",
|
||||
"socket2 0.4.7",
|
||||
"tokio",
|
||||
"tower-service",
|
||||
"tracing",
|
||||
@@ -811,26 +853,23 @@ dependencies = [
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "mio"
|
||||
version = "0.8.2"
|
||||
name = "miniz_oxide"
|
||||
version = "0.7.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "52da4364ffb0e4fe33a9841a98a3f3014fb964045ce4f7a45a398243c8d6b0c9"
|
||||
checksum = "87dfd01fe195c66b572b37921ad8803d010623c0aca821bea2302239d155cdae"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"log",
|
||||
"miow",
|
||||
"ntapi 0.3.7",
|
||||
"wasi 0.11.0+wasi-snapshot-preview1",
|
||||
"winapi",
|
||||
"adler",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "miow"
|
||||
version = "0.3.7"
|
||||
name = "mio"
|
||||
version = "0.8.11"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b9f1c5b025cda876f66ef43a113f91ebc9f4ccef34843000e0adf6ebbab84e21"
|
||||
checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c"
|
||||
dependencies = [
|
||||
"winapi",
|
||||
"libc",
|
||||
"wasi 0.11.0+wasi-snapshot-preview1",
|
||||
"windows-sys 0.48.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -876,15 +915,6 @@ dependencies = [
|
||||
"pin-utils",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ntapi"
|
||||
version = "0.3.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c28774a7fd2fbb4f0babd8237ce554b73af68021b5f695a3cebd6c59bac0980f"
|
||||
dependencies = [
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ntapi"
|
||||
version = "0.4.1"
|
||||
@@ -932,6 +962,15 @@ dependencies = [
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "object"
|
||||
version = "0.36.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "576dfe1fc8f9df304abb159d767a29d0476f7750fbf8aa7ad07816004a207434"
|
||||
dependencies = [
|
||||
"memchr",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "oci"
|
||||
version = "0.1.0"
|
||||
@@ -1000,14 +1039,14 @@ checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
"syn 1.0.91",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pin-project-lite"
|
||||
version = "0.2.8"
|
||||
version = "0.2.14"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e280fbe77cc62c91527259e9442153f4688736748d24660126286329742b4c6c"
|
||||
checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02"
|
||||
|
||||
[[package]]
|
||||
name = "pin-utils"
|
||||
@@ -1032,11 +1071,11 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro2"
|
||||
version = "1.0.37"
|
||||
version = "1.0.85"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ec757218438d5fda206afc041538b2f6d889286160d649a86a24d37e1235afd1"
|
||||
checksum = "22244ce15aa966053a896d1accb3a6e68469b97c7f33f284b99f0d576879fc23"
|
||||
dependencies = [
|
||||
"unicode-xid",
|
||||
"unicode-ident",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1077,7 +1116,7 @@ dependencies = [
|
||||
"itertools",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
"syn 1.0.91",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1186,14 +1225,14 @@ checksum = "16b845dbfca988fa33db069c0e230574d15a3088f147a87b64c7589eb662c9ac"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
"syn 1.0.91",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "quote"
|
||||
version = "1.0.18"
|
||||
version = "1.0.36"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a1feb54ed693b93a84e14094943b84b7c4eae204c512b7ccb95ab0c66d278ad1"
|
||||
checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
]
|
||||
@@ -1334,7 +1373,7 @@ checksum = "b5c462a1328c8e67e4d6dbad1eb0355dd43e8ab432c6e227a43657f16ade5033"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
"syn 1.0.91",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1353,6 +1392,12 @@ dependencies = [
|
||||
"serde_json",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rustc-demangle"
|
||||
version = "0.1.24"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f"
|
||||
|
||||
[[package]]
|
||||
name = "rustversion"
|
||||
version = "1.0.12"
|
||||
@@ -1402,7 +1447,7 @@ checksum = "6eb8ec7724e4e524b2492b510e66957fe1a2c76c26a6975ec80823f2439da685"
|
||||
dependencies = [
|
||||
"darling_core",
|
||||
"serde-rename-rule",
|
||||
"syn",
|
||||
"syn 1.0.91",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1415,7 +1460,7 @@ dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"serde-attributes",
|
||||
"syn",
|
||||
"syn 1.0.91",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1432,7 +1477,7 @@ checksum = "4f1d362ca8fc9c3e3a7484440752472d68a6caa98f1ab81d99b5dfe517cec852"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
"syn 1.0.91",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1465,7 +1510,7 @@ checksum = "b2acd6defeddb41eb60bb468f8825d0cfd0c2a76bc03bfd235b6a1dc4f6a1ad5"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
"syn 1.0.91",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1565,6 +1610,16 @@ dependencies = [
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "socket2"
|
||||
version = "0.5.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"windows-sys 0.52.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "subprocess"
|
||||
version = "0.2.9"
|
||||
@@ -1586,6 +1641,17 @@ dependencies = [
|
||||
"unicode-xid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "syn"
|
||||
version = "2.0.66"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c42f3f41a2de00b01c0aaad383c5a45241efc8b2d1eda5661812fda5f3cdcff5"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"unicode-ident",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sysinfo"
|
||||
version = "0.30.12"
|
||||
@@ -1595,7 +1661,7 @@ dependencies = [
|
||||
"cfg-if",
|
||||
"core-foundation-sys",
|
||||
"libc",
|
||||
"ntapi 0.4.1",
|
||||
"ntapi",
|
||||
"once_cell",
|
||||
"rayon",
|
||||
"windows",
|
||||
@@ -1662,7 +1728,7 @@ checksum = "aa32fd3f627f367fe16f893e2597ae3c05020f8bba2666a4e6ea73d377e5714b"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
"syn 1.0.91",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1730,30 +1796,30 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20"
|
||||
|
||||
[[package]]
|
||||
name = "tokio"
|
||||
version = "1.17.0"
|
||||
version = "1.38.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2af73ac49756f3f7c01172e34a23e5d0216f6c32333757c2c61feb2bbff5a5ee"
|
||||
checksum = "ba4f4a02a7a80d6f274636f0aa95c7e383b912d41fe721a31f29e29698585a4a"
|
||||
dependencies = [
|
||||
"backtrace",
|
||||
"bytes",
|
||||
"libc",
|
||||
"memchr",
|
||||
"mio",
|
||||
"num_cpus",
|
||||
"pin-project-lite",
|
||||
"socket2",
|
||||
"socket2 0.5.7",
|
||||
"tokio-macros",
|
||||
"winapi",
|
||||
"windows-sys 0.48.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tokio-macros"
|
||||
version = "1.7.0"
|
||||
version = "2.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b557f72f448c511a979e2564e55d74e6c4432fc96ff4f6241bc6bded342643b7"
|
||||
checksum = "5f5ae998a069d4b5aba8ee9dad856af7d520c3699e6159b185c2acd48155d39a"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
"syn 2.0.66",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1828,7 +1894,7 @@ dependencies = [
|
||||
"thiserror",
|
||||
"tokio",
|
||||
"tokio-vsock",
|
||||
"windows-sys",
|
||||
"windows-sys 0.48.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1858,6 +1924,12 @@ dependencies = [
|
||||
"tempfile",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "unicode-ident"
|
||||
version = "1.0.12"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b"
|
||||
|
||||
[[package]]
|
||||
name = "unicode-segmentation"
|
||||
version = "1.9.0"
|
||||
@@ -1941,7 +2013,7 @@ dependencies = [
|
||||
"log",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
"syn 1.0.91",
|
||||
"wasm-bindgen-shared",
|
||||
]
|
||||
|
||||
@@ -1963,7 +2035,7 @@ checksum = "7d94ac45fcf608c1f45ef53e748d35660f168490c10b23704c7779ab8f5c3048"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
"syn 1.0.91",
|
||||
"wasm-bindgen-backend",
|
||||
"wasm-bindgen-shared",
|
||||
]
|
||||
@@ -2035,6 +2107,15 @@ dependencies = [
|
||||
"windows-targets 0.48.5",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-sys"
|
||||
version = "0.52.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d"
|
||||
dependencies = [
|
||||
"windows-targets 0.52.5",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-targets"
|
||||
version = "0.48.5"
|
||||
|
||||
@@ -88,3 +88,13 @@ pub const DEFAULT_CH_PCI_BRIDGES: u32 = 2;
|
||||
pub const MAX_CH_PCI_BRIDGES: u32 = 5;
|
||||
pub const MAX_CH_VCPUS: u32 = 256;
|
||||
pub const MIN_CH_MEMORY_SIZE_MB: u32 = 64;
|
||||
|
||||
//Default configuration for firecracker
|
||||
pub const DEFAULT_FIRECRACKER_ENTROPY_SOURCE: &str = "/dev/urandom";
|
||||
pub const DEFAULT_FIRECRACKER_MEMORY_SIZE_MB: u32 = 128;
|
||||
pub const DEFAULT_FIRECRACKER_MEMORY_SLOTS: u32 = 128;
|
||||
pub const DEFAULT_FIRECRACKER_VCPUS: u32 = 1;
|
||||
pub const DEFAULT_FIRECRACKER_GUEST_KERNEL_IMAGE: &str = "vmlinux";
|
||||
pub const DEFAULT_FIRECRACKER_GUEST_KERNEL_PARAMS: &str = "";
|
||||
pub const MAX_FIRECRACKER_VCPUS: u32 = 32;
|
||||
pub const MIN_FIRECRACKER_MEMORY_SIZE_MB: u32 = 128;
|
||||
|
||||
116
src/libs/kata-types/src/config/hypervisor/firecracker.rs
Normal file
116
src/libs/kata-types/src/config/hypervisor/firecracker.rs
Normal file
@@ -0,0 +1,116 @@
|
||||
// Copyright (c) 2019-2021 Alibaba Cloud
|
||||
// Copyright (c) 2022-2023 Nubificus LTD
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use std::io::Result;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
|
||||
use super::{default, register_hypervisor_plugin};
|
||||
|
||||
use crate::config::default::MAX_FIRECRACKER_VCPUS;
|
||||
use crate::config::default::MIN_FIRECRACKER_MEMORY_SIZE_MB;
|
||||
|
||||
use crate::config::{ConfigPlugin, TomlConfig};
|
||||
use crate::{eother, validate_path};
|
||||
|
||||
/// Hypervisor name for firecracker, used to index `TomlConfig::hypervisor`.
|
||||
pub const HYPERVISOR_NAME_FIRECRACKER: &str = "firecracker";
|
||||
|
||||
/// Configuration information for firecracker.
|
||||
#[derive(Default, Debug)]
|
||||
pub struct FirecrackerConfig {}
|
||||
|
||||
impl FirecrackerConfig {
|
||||
/// Create a new instance of `FirecrackerConfig`.
|
||||
pub fn new() -> Self {
|
||||
FirecrackerConfig {}
|
||||
}
|
||||
|
||||
/// Register the firecracker plugin.
|
||||
pub fn register(self) {
|
||||
let plugin = Arc::new(self);
|
||||
register_hypervisor_plugin(HYPERVISOR_NAME_FIRECRACKER, plugin);
|
||||
}
|
||||
}
|
||||
|
||||
impl ConfigPlugin for FirecrackerConfig {
|
||||
fn get_max_cpus(&self) -> u32 {
|
||||
MAX_FIRECRACKER_VCPUS
|
||||
}
|
||||
|
||||
fn get_min_memory(&self) -> u32 {
|
||||
MIN_FIRECRACKER_MEMORY_SIZE_MB
|
||||
}
|
||||
|
||||
fn name(&self) -> &str {
|
||||
HYPERVISOR_NAME_FIRECRACKER
|
||||
}
|
||||
|
||||
/// Adjust the configuration information after loading from configuration file.
|
||||
fn adjust_config(&self, conf: &mut TomlConfig) -> Result<()> {
|
||||
if let Some(firecracker) = conf.hypervisor.get_mut(HYPERVISOR_NAME_FIRECRACKER) {
|
||||
if firecracker.boot_info.kernel.is_empty() {
|
||||
firecracker.boot_info.kernel =
|
||||
default::DEFAULT_FIRECRACKER_GUEST_KERNEL_IMAGE.to_string();
|
||||
}
|
||||
if firecracker.boot_info.kernel_params.is_empty() {
|
||||
firecracker.boot_info.kernel_params =
|
||||
default::DEFAULT_FIRECRACKER_GUEST_KERNEL_PARAMS.to_string();
|
||||
}
|
||||
if firecracker.machine_info.entropy_source.is_empty() {
|
||||
firecracker.machine_info.entropy_source =
|
||||
default::DEFAULT_FIRECRACKER_ENTROPY_SOURCE.to_string();
|
||||
}
|
||||
|
||||
if firecracker.memory_info.default_memory == 0 {
|
||||
firecracker.memory_info.default_memory =
|
||||
default::DEFAULT_FIRECRACKER_MEMORY_SIZE_MB;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Validate the configuration information.
|
||||
fn validate(&self, conf: &TomlConfig) -> Result<()> {
|
||||
if let Some(firecracker) = conf.hypervisor.get(HYPERVISOR_NAME_FIRECRACKER) {
|
||||
if firecracker.path.is_empty() {
|
||||
return Err(eother!("Firecracker path is empty"));
|
||||
}
|
||||
validate_path!(
|
||||
firecracker.path,
|
||||
"FIRECRACKER binary path `{}` is invalid: {}"
|
||||
)?;
|
||||
if firecracker.boot_info.kernel.is_empty() {
|
||||
return Err(eother!("Guest kernel image for firecracker is empty"));
|
||||
}
|
||||
if firecracker.boot_info.image.is_empty() {
|
||||
return Err(eother!(
|
||||
"Both guest boot image and initrd for firecracker are empty"
|
||||
));
|
||||
}
|
||||
|
||||
if (firecracker.cpu_info.default_vcpus > 0
|
||||
&& firecracker.cpu_info.default_vcpus as u32 > default::MAX_FIRECRACKER_VCPUS)
|
||||
|| firecracker.cpu_info.default_maxvcpus > default::MAX_FIRECRACKER_VCPUS
|
||||
{
|
||||
return Err(eother!(
|
||||
"Firecracker hypervisor can not support {} vCPUs",
|
||||
firecracker.cpu_info.default_maxvcpus
|
||||
));
|
||||
}
|
||||
|
||||
if firecracker.memory_info.default_memory < MIN_FIRECRACKER_MEMORY_SIZE_MB {
|
||||
return Err(eother!(
|
||||
"Firecracker hypervisor has minimal memory limitation {}",
|
||||
MIN_FIRECRACKER_MEMORY_SIZE_MB
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -59,6 +59,9 @@ pub const VIRTIO_SCSI: &str = "virtio-scsi";
|
||||
/// Virtual PMEM device driver.
|
||||
pub const VIRTIO_PMEM: &str = "virtio-pmem";
|
||||
|
||||
mod firecracker;
|
||||
pub use self::firecracker::{FirecrackerConfig, HYPERVISOR_NAME_FIRECRACKER};
|
||||
|
||||
const VIRTIO_9P: &str = "virtio-9p";
|
||||
const VIRTIO_FS: &str = "virtio-fs";
|
||||
const VIRTIO_FS_INLINE: &str = "inline-virtio-fs";
|
||||
@@ -530,6 +533,7 @@ impl TopologyConfigInfo {
|
||||
HYPERVISOR_NAME_QEMU,
|
||||
HYPERVISOR_NAME_CH,
|
||||
HYPERVISOR_NAME_DRAGONBALL,
|
||||
HYPERVISOR_NAME_FIRECRACKER,
|
||||
];
|
||||
let hypervisor_name = toml_config.runtime.hypervisor_name.as_str();
|
||||
if !hypervisor_names.contains(&hypervisor_name) {
|
||||
|
||||
@@ -25,8 +25,8 @@ pub mod hypervisor;
|
||||
pub use self::agent::Agent;
|
||||
use self::default::DEFAULT_AGENT_DBG_CONSOLE_PORT;
|
||||
pub use self::hypervisor::{
|
||||
BootInfo, CloudHypervisorConfig, DragonballConfig, Hypervisor, QemuConfig,
|
||||
HYPERVISOR_NAME_DRAGONBALL, HYPERVISOR_NAME_QEMU,
|
||||
BootInfo, CloudHypervisorConfig, DragonballConfig, FirecrackerConfig, Hypervisor, QemuConfig,
|
||||
HYPERVISOR_NAME_DRAGONBALL, HYPERVISOR_NAME_FIRECRACKER, HYPERVISOR_NAME_QEMU,
|
||||
};
|
||||
|
||||
mod runtime;
|
||||
|
||||
@@ -130,7 +130,11 @@ fn count_files<P: AsRef<Path>>(path: P, limit: i32) -> std::io::Result<i32> {
|
||||
let file = entry?;
|
||||
let p = file.path();
|
||||
if p.is_dir() {
|
||||
num_files += count_files(&p, limit)?;
|
||||
let inc = count_files(&p, limit - num_files)?;
|
||||
if inc == -1 {
|
||||
return Ok(-1);
|
||||
}
|
||||
num_files += inc;
|
||||
} else {
|
||||
num_files += 1;
|
||||
}
|
||||
@@ -165,6 +169,40 @@ mod tests {
|
||||
use std::fs;
|
||||
use test_utils::skip_if_not_root;
|
||||
|
||||
#[test]
|
||||
fn test_count_files() {
|
||||
let limit = 8;
|
||||
let test_tmp_dir = tempfile::tempdir().expect("failed to create tempdir");
|
||||
let work_path = test_tmp_dir.path().join("work");
|
||||
|
||||
let result = fs::create_dir_all(&work_path);
|
||||
assert!(result.is_ok());
|
||||
|
||||
let origin_dir = work_path.join("origin_dir");
|
||||
let result = fs::create_dir_all(&origin_dir);
|
||||
assert!(result.is_ok());
|
||||
for n in 0..limit {
|
||||
let tmp_file = origin_dir.join(format!("file{}", n));
|
||||
let res = fs::File::create(tmp_file);
|
||||
assert!(res.is_ok());
|
||||
}
|
||||
|
||||
let symlink_origin_dir = work_path.join("symlink_origin_dir");
|
||||
let result = std::os::unix::fs::symlink(&origin_dir, &symlink_origin_dir);
|
||||
assert!(result.is_ok());
|
||||
for n in 0..2 {
|
||||
let tmp_file = work_path.join(format!("file{}", n));
|
||||
let res = fs::File::create(tmp_file);
|
||||
assert!(res.is_ok());
|
||||
}
|
||||
|
||||
let count = count_files(&work_path, limit).unwrap_or(0);
|
||||
assert_eq!(count, -1);
|
||||
|
||||
let count = count_files(&origin_dir, limit).unwrap_or(0);
|
||||
assert_eq!(count, limit);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_watchable_mount() {
|
||||
skip_if_not_root!();
|
||||
|
||||
@@ -198,13 +198,34 @@ fn real_main() -> Result<(), std::io::Error> {
|
||||
// generate async
|
||||
#[cfg(feature = "async")]
|
||||
{
|
||||
codegen("src", &["protos/agent.proto", "protos/health.proto"], true)?;
|
||||
|
||||
codegen(
|
||||
"src",
|
||||
&[
|
||||
"protos/agent.proto",
|
||||
"protos/health.proto",
|
||||
"protos/sealed_secret.proto",
|
||||
],
|
||||
true,
|
||||
)?;
|
||||
|
||||
fs::rename("src/agent_ttrpc.rs", "src/agent_ttrpc_async.rs")?;
|
||||
fs::rename("src/health_ttrpc.rs", "src/health_ttrpc_async.rs")?;
|
||||
fs::rename(
|
||||
"src/sealed_secret_ttrpc.rs",
|
||||
"src/sealed_secret_ttrpc_async.rs",
|
||||
)?;
|
||||
}
|
||||
|
||||
codegen("src", &["protos/agent.proto", "protos/health.proto"], false)?;
|
||||
codegen(
|
||||
"src",
|
||||
&[
|
||||
"protos/agent.proto",
|
||||
"protos/health.proto",
|
||||
"protos/sealed_secret.proto",
|
||||
],
|
||||
false,
|
||||
)?;
|
||||
|
||||
// There is a message named 'Box' in oci.proto
|
||||
// so there is a struct named 'Box', we should replace Box<Self> to ::std::boxed::Box<Self>
|
||||
|
||||
21
src/libs/protocols/protos/sealed_secret.proto
Normal file
21
src/libs/protocols/protos/sealed_secret.proto
Normal file
@@ -0,0 +1,21 @@
|
||||
//
|
||||
// Copyright (c) 2024 IBM
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package api;
|
||||
|
||||
message UnsealSecretInput {
|
||||
bytes secret = 1;
|
||||
}
|
||||
|
||||
message UnsealSecretOutput {
|
||||
bytes plaintext = 1;
|
||||
}
|
||||
|
||||
service SealedSecretService {
|
||||
rpc UnsealSecret(UnsealSecretInput) returns (UnsealSecretOutput) {};
|
||||
}
|
||||
@@ -27,3 +27,9 @@ pub use serde_config::{
|
||||
deserialize_enum_or_unknown, deserialize_message_field, serialize_enum_or_unknown,
|
||||
serialize_message_field,
|
||||
};
|
||||
|
||||
pub mod sealed_secret;
|
||||
pub mod sealed_secret_ttrpc;
|
||||
|
||||
#[cfg(feature = "async")]
|
||||
pub mod sealed_secret_ttrpc_async;
|
||||
|
||||
@@ -14,12 +14,12 @@ edition = "2018"
|
||||
[dependencies]
|
||||
anyhow = "^1.0"
|
||||
nix = "0.24.0"
|
||||
tokio = { version = "1.8.0", features = ["rt-multi-thread"] }
|
||||
tokio = { version = "1.38.0", features = ["rt-multi-thread"] }
|
||||
hyper = { version = "0.14.20", features = ["stream", "server", "http1"] }
|
||||
hyperlocal = "0.8"
|
||||
kata-types = { path = "../kata-types" }
|
||||
kata-sys-util = {path = "../kata-sys-util" }
|
||||
kata-sys-util = { path = "../kata-sys-util" }
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = "3.2.0"
|
||||
test-utils = {path = "../test-utils"}
|
||||
test-utils = { path = "../test-utils" }
|
||||
|
||||
@@ -37,6 +37,9 @@ fn get_uds_with_sid(short_id: &str, path: &str) -> Result<String> {
|
||||
return Ok(format!("unix://{}", p.display()));
|
||||
}
|
||||
|
||||
let _ = fs::create_dir_all(kata_run_path.join(short_id))
|
||||
.context(format!("failed to create directory {:?}", kata_run_path.join(short_id)));
|
||||
|
||||
let target_ids: Vec<String> = fs::read_dir(&kata_run_path)?
|
||||
.filter_map(|e| {
|
||||
let x = e.ok()?.file_name().to_string_lossy().into_owned();
|
||||
|
||||
97
src/runtime-rs/Cargo.lock
generated
97
src/runtime-rs/Cargo.lock
generated
@@ -185,7 +185,7 @@ dependencies = [
|
||||
"polling",
|
||||
"rustix 0.37.23",
|
||||
"slab",
|
||||
"socket2",
|
||||
"socket2 0.4.9",
|
||||
"waker-fn",
|
||||
]
|
||||
|
||||
@@ -1589,7 +1589,7 @@ dependencies = [
|
||||
"httpdate",
|
||||
"itoa",
|
||||
"pin-project-lite",
|
||||
"socket2",
|
||||
"socket2 0.4.9",
|
||||
"tokio",
|
||||
"tower-service",
|
||||
"tracing",
|
||||
@@ -1635,6 +1635,8 @@ dependencies = [
|
||||
"dragonball",
|
||||
"futures 0.3.28",
|
||||
"go-flag",
|
||||
"hyper",
|
||||
"hyperlocal",
|
||||
"hypervisor",
|
||||
"kata-sys-util",
|
||||
"kata-types",
|
||||
@@ -1644,6 +1646,9 @@ dependencies = [
|
||||
"nix 0.24.3",
|
||||
"path-clean",
|
||||
"persist",
|
||||
"qapi",
|
||||
"qapi-qmp",
|
||||
"qapi-spec",
|
||||
"rand 0.8.5",
|
||||
"rust-ini",
|
||||
"safe-path 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@@ -2034,9 +2039,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "mio"
|
||||
version = "0.8.8"
|
||||
version = "0.8.11"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "927a765cd3fc26206e66b296465fa9d3e5ab003e651c1b3c060e7956d96b19d2"
|
||||
checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"log",
|
||||
@@ -2699,9 +2704,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "pin-project-lite"
|
||||
version = "0.2.10"
|
||||
version = "0.2.14"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4c40d25201921e5ff0c862a505c6557ea88568a4e3ace775ab55e93f2f4f9d57"
|
||||
checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02"
|
||||
|
||||
[[package]]
|
||||
name = "pin-utils"
|
||||
@@ -2959,6 +2964,65 @@ dependencies = [
|
||||
"ttrpc-codegen",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "qapi"
|
||||
version = "0.14.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c6412bdd014ebee03ddbbe79ac03a0b622cce4d80ba45254f6357c847f06fa38"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"futures 0.3.28",
|
||||
"log",
|
||||
"memchr",
|
||||
"qapi-qmp",
|
||||
"qapi-spec",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "qapi-codegen"
|
||||
version = "0.11.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9ba4de731473de4c8bd508ddb38a9049e999b8a7429f3c052ba8735a178ff68c"
|
||||
dependencies = [
|
||||
"qapi-parser",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "qapi-parser"
|
||||
version = "0.10.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "80044db145aa2953ef5803d0376dcbca50f2763242547e856b7f37507adca677"
|
||||
dependencies = [
|
||||
"serde",
|
||||
"serde_json",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "qapi-qmp"
|
||||
version = "0.14.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e8b944db7e544d2fa97595e9a000a6ba5c62c426fa185e7e00aabe4b5640b538"
|
||||
dependencies = [
|
||||
"qapi-codegen",
|
||||
"qapi-spec",
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "qapi-spec"
|
||||
version = "0.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b360919a24ea5fc02fa762cb01bd8f43b643fee51c585f763257773b4dc5a9e8"
|
||||
dependencies = [
|
||||
"base64 0.13.1",
|
||||
"serde",
|
||||
"serde_json",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "quote"
|
||||
version = "1.0.35"
|
||||
@@ -3825,6 +3889,16 @@ dependencies = [
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "socket2"
|
||||
version = "0.5.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"windows-sys 0.52.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "static_assertions"
|
||||
version = "1.1.0"
|
||||
@@ -4099,11 +4173,10 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20"
|
||||
|
||||
[[package]]
|
||||
name = "tokio"
|
||||
version = "1.29.1"
|
||||
version = "1.38.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "532826ff75199d5833b9d2c5fe410f29235e25704ee5f0ef599fb51c21f4a4da"
|
||||
checksum = "ba4f4a02a7a80d6f274636f0aa95c7e383b912d41fe721a31f29e29698585a4a"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
"backtrace",
|
||||
"bytes",
|
||||
"libc",
|
||||
@@ -4112,16 +4185,16 @@ dependencies = [
|
||||
"parking_lot 0.12.1",
|
||||
"pin-project-lite",
|
||||
"signal-hook-registry",
|
||||
"socket2",
|
||||
"socket2 0.5.7",
|
||||
"tokio-macros",
|
||||
"windows-sys 0.48.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tokio-macros"
|
||||
version = "2.1.0"
|
||||
version = "2.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e"
|
||||
checksum = "5f5ae998a069d4b5aba8ee9dad856af7d520c3699e6159b185c2acd48155d39a"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
|
||||
@@ -109,6 +109,12 @@ ROOTFSTYPE_XFS := \"xfs\"
|
||||
ROOTFSTYPE_EROFS := \"erofs\"
|
||||
DEFROOTFSTYPE := $(ROOTFSTYPE_EXT4)
|
||||
|
||||
FCBINDIR := $(PREFIXDEPS)/bin
|
||||
FCPATH = $(FCBINDIR)/$(FCCMD)
|
||||
FCVALIDHYPERVISORPATHS := [\"$(FCPATH)\"]
|
||||
FCJAILERPATH = $(FCBINDIR)/$(FCJAILERCMD)
|
||||
FCVALIDJAILERPATHS = [\"$(FCJAILERPATH)\"]
|
||||
|
||||
PKGLIBEXECDIR := $(LIBEXECDIR)/$(PROJECT_DIR)
|
||||
FIRMWAREPATH :=
|
||||
FIRMWAREVOLUMEPATH :=
|
||||
@@ -164,8 +170,11 @@ DEFMSIZE9P := 8192
|
||||
DEFVFIOMODE := guest-kernel
|
||||
##VAR DEFSANDBOXCGROUPONLY=<bool> Default cgroup model
|
||||
DEFSANDBOXCGROUPONLY ?= false
|
||||
DEFSANDBOXCGROUPONLY_DB ?= true
|
||||
DEFSANDBOXCGROUPONLY_FC ?= true
|
||||
DEFSTATICRESOURCEMGMT ?= false
|
||||
DEFSTATICRESOURCEMGMT_DB ?= false
|
||||
DEFSTATICRESOURCEMGMT_FC ?= true
|
||||
DEFBINDMOUNTS := []
|
||||
DEFDANCONF := /run/kata-containers/dans
|
||||
SED = sed
|
||||
@@ -216,7 +225,7 @@ ifneq (,$(DBCMD))
|
||||
KERNELTYPE_DB = uncompressed
|
||||
KERNEL_NAME_DB = $(call MAKE_KERNEL_NAME_DB,$(KERNELTYPE_DB))
|
||||
KERNELPATH_DB = $(KERNELDIR)/$(KERNEL_NAME_DB)
|
||||
DEFSANDBOXCGROUPONLY = true
|
||||
DEFSANDBOXCGROUPONLY_DB = true
|
||||
RUNTIMENAME := virt_container
|
||||
PIPESIZE := 1
|
||||
DBSHAREDFS := inline-virtio-fs
|
||||
@@ -244,6 +253,9 @@ ifneq (,$(CLHCMD))
|
||||
KERNEL_NAME_CLH = $(call MAKE_KERNEL_NAME,$(KERNELTYPE_CLH))
|
||||
KERNELPATH_CLH = $(KERNELDIR)/$(KERNEL_NAME_CLH)
|
||||
VMROOTFSDRIVER_CLH := virtio-pmem
|
||||
|
||||
DEFSTATICRESOURCEMGMT = true
|
||||
DEFSANDBOXCGROUPONLY = true
|
||||
endif
|
||||
|
||||
ifneq (,$(QEMUCMD))
|
||||
@@ -288,6 +300,28 @@ endif
|
||||
DEFSECCOMPSANDBOXPARAM := on,obsolete=deny,spawn=deny,resourcecontrol=deny
|
||||
DEFGUESTSELINUXLABEL := system_u:system_r:container_t
|
||||
endif
|
||||
ifneq (,$(FCCMD))
|
||||
KNOWN_HYPERVISORS += $(HYPERVISOR_FC)
|
||||
CONFIG_FILE_FC = configuration-rs-fc.toml
|
||||
CONFIG_FC = config/$(CONFIG_FILE_FC)
|
||||
CONFIG_FC_IN = $(CONFIG_FC).in
|
||||
CONFIG_PATH_FC = $(abspath $(CONFDIR)/$(CONFIG_FILE_FC))
|
||||
CONFIG_PATHS += $(CONFIG_PATH_FC)
|
||||
SYSCONFIG_FC = $(abspath $(SYSCONFDIR)/$(CONFIG_FILE_FC))
|
||||
SYSCONFIG_PATHS += $(SYSCONFIG_FC)
|
||||
CONFIGS += $(CONFIG_FC)
|
||||
# firecracker-specific options (all should be suffixed by "_FC")
|
||||
DEFBLOCKSTORAGEDRIVER_FC := virtio-blk-mmio
|
||||
DEFMAXMEMSZ_FC := 2048
|
||||
DEFNETWORKMODEL_FC := tcfilter
|
||||
KERNELPARAMS = console=ttyS0 agent.log_vport=1025
|
||||
KERNELTYPE_FC = uncompressed
|
||||
KERNEL_NAME_FC = $(call MAKE_KERNEL_NAME_FC,$(KERNELTYPE_FC))
|
||||
KERNELPATH_FC = $(KERNELDIR)/$(KERNEL_NAME_FC)
|
||||
DEFSANDBOXCGROUPONLY_FC = true
|
||||
RUNTIMENAME := virt_container
|
||||
DEFSTATICRESOURCEMGMT_FC ?= true
|
||||
endif
|
||||
|
||||
ifeq ($(DEFAULT_HYPERVISOR),$(HYPERVISOR_DB))
|
||||
DEFAULT_HYPERVISOR_CONFIG = $(CONFIG_FILE_DB)
|
||||
@@ -296,16 +330,21 @@ endif
|
||||
ifeq ($(DEFAULT_HYPERVISOR),$(HYPERVISOR_QEMU))
|
||||
DEFAULT_HYPERVISOR_CONFIG = $(CONFIG_FILE_QEMU)
|
||||
endif
|
||||
ifeq ($(DEFAULT_HYPERVISOR),$(HYPERVISOR_FC))
|
||||
DEFAULT_HYPERVISOR_CONFIG = $(CONFIG_FILE_FC)
|
||||
endif
|
||||
# list of variables the user may wish to override
|
||||
USER_VARS += ARCH
|
||||
USER_VARS += BINDIR
|
||||
USER_VARS += CONFIG_DB_IN
|
||||
USER_VARS += CONFIG_FC_IN
|
||||
USER_VARS += CONFIG_PATH
|
||||
USER_VARS += CONFIG_QEMU_IN
|
||||
USER_VARS += DESTDIR
|
||||
USER_VARS += DEFAULT_HYPERVISOR
|
||||
USER_VARS += DBCMD
|
||||
USER_VARS += DBCTLCMD
|
||||
USER_VARS += FCCTLCMD
|
||||
USER_VARS += DBPATH
|
||||
USER_VARS += DBVALIDHYPERVISORPATHS
|
||||
USER_VARS += DBCTLPATH
|
||||
@@ -316,6 +355,13 @@ USER_VARS += QEMUPATH
|
||||
USER_VARS += QEMUVALIDHYPERVISORPATHS
|
||||
USER_VARS += FIRMWAREPATH_CLH
|
||||
USER_VARS += KERNELPATH_CLH
|
||||
USER_VARS += FCCMD
|
||||
USER_VARS += FCPATH
|
||||
USER_VARS += FCVALIDHYPERVISORPATHS
|
||||
USER_VARS += FCJAILERPATH
|
||||
USER_VARS += FCVALIDJAILERPATHS
|
||||
USER_VARS += FCVALIDJAILERPATHS
|
||||
USER_VARS += DEFMAXMEMSZ_FC
|
||||
USER_VARS += SYSCONFIG
|
||||
USER_VARS += IMAGENAME
|
||||
USER_VARS += IMAGEPATH
|
||||
@@ -329,6 +375,8 @@ USER_VARS += KERNELDIR
|
||||
USER_VARS += KERNELTYPE
|
||||
USER_VARS += KERNELPATH_DB
|
||||
USER_VARS += KERNELPATH_QEMU
|
||||
USER_VARS += KERNELPATH_FC
|
||||
USER_VARS += KERNELPATH
|
||||
USER_VARS += KERNELVIRTIOFSPATH
|
||||
USER_VARS += FIRMWAREPATH
|
||||
USER_VARS += FIRMWAREVOLUMEPATH
|
||||
@@ -365,6 +413,7 @@ USER_VARS += DEFBRIDGES
|
||||
USER_VARS += DEFNETWORKMODEL_DB
|
||||
USER_VARS += DEFNETWORKMODEL_CLH
|
||||
USER_VARS += DEFNETWORKMODEL_QEMU
|
||||
USER_VARS += DEFNETWORKMODEL_FC
|
||||
USER_VARS += DEFDISABLEGUESTEMPTYDIR
|
||||
USER_VARS += DEFDISABLEGUESTSECCOMP
|
||||
USER_VARS += DEFDISABLESELINUX
|
||||
@@ -374,6 +423,7 @@ USER_VARS += DEFDISABLEBLOCK
|
||||
USER_VARS += DEFBLOCKSTORAGEDRIVER_DB
|
||||
USER_VARS += DEFBLOCKSTORAGEDRIVER_QEMU
|
||||
USER_VARS += DEFBLOCKDEVICEAIO_QEMU
|
||||
USER_VARS += DEFBLOCKSTORAGEDRIVER_FC
|
||||
USER_VARS += DEFSHAREDFS_CLH_VIRTIOFS
|
||||
USER_VARS += DEFSHAREDFS_QEMU_VIRTIOFS
|
||||
USER_VARS += DEFVIRTIOFSDAEMON
|
||||
@@ -396,8 +446,11 @@ USER_VARS += DEFENTROPYSOURCE
|
||||
USER_VARS += DEFVALIDENTROPYSOURCES
|
||||
USER_VARS += DEFSANDBOXCGROUPONLY
|
||||
USER_VARS += DEFSANDBOXCGROUPONLY_QEMU
|
||||
USER_VARS += DEFSANDBOXCGROUPONLY_DB
|
||||
USER_VARS += DEFSANDBOXCGROUPONLY_FC
|
||||
USER_VARS += DEFSTATICRESOURCEMGMT
|
||||
USER_VARS += DEFSTATICRESOURCEMGMT_DB
|
||||
USER_VARS += DEFSTATICRESOURCEMGMT_FC
|
||||
USER_VARS += DEFBINDMOUNTS
|
||||
USER_VARS += DEFVFIOMODE
|
||||
USER_VARS += BUILDFLAGS
|
||||
@@ -405,6 +458,7 @@ USER_VARS += RUNTIMENAME
|
||||
USER_VARS += HYPERVISOR_DB
|
||||
USER_VARS += HYPERVISOR_CLH
|
||||
USER_VARS += HYPERVISOR_QEMU
|
||||
USER_VARS += HYPERVISOR_FC
|
||||
USER_VARS += PIPESIZE
|
||||
USER_VARS += DBSHAREDFS
|
||||
USER_VARS += KATA_INSTALL_GROUP
|
||||
@@ -417,7 +471,7 @@ SOURCES := \
|
||||
Cargo.toml
|
||||
|
||||
VERSION_FILE := ./VERSION
|
||||
VERSION := $(shell grep -v ^\# $(VERSION_FILE))
|
||||
VERSION := $(shell grep -v ^\# $(VERSION_FILE) 2>/dev/null || echo "unknown")
|
||||
COMMIT_NO := $(shell git rev-parse HEAD 2>/dev/null || true)
|
||||
COMMIT := $(if $(shell git status --porcelain --untracked-files=no 2>/dev/null || true),${COMMIT_NO}-dirty,${COMMIT_NO})
|
||||
COMMIT_MSG = $(if $(COMMIT),$(COMMIT),unknown)
|
||||
@@ -442,6 +496,7 @@ RUNTIME_VERSION=$(VERSION)
|
||||
GENERATED_VARS = \
|
||||
VERSION \
|
||||
CONFIG_DB_IN \
|
||||
CONFIG_FC_IN \
|
||||
$(USER_VARS)
|
||||
|
||||
|
||||
@@ -483,6 +538,9 @@ endef
|
||||
define MAKE_KERNEL_NAME_DB
|
||||
$(if $(findstring uncompressed,$1),vmlinux-dragonball-experimental.container,vmlinuz-dragonball-experimental.container)
|
||||
endef
|
||||
define MAKE_KERNEL_NAME_FC
|
||||
$(if $(findstring uncompressed,$1),vmlinux.container,vmlinuz.container)
|
||||
endef
|
||||
|
||||
# Returns the name of the kernel file to use based on the provided KERNELTYPE.
|
||||
# # $1 : KERNELTYPE (compressed or uncompressed)
|
||||
|
||||
@@ -13,3 +13,5 @@ QEMUCMD := qemu-system-aarch64
|
||||
|
||||
# dragonball binary name
|
||||
DBCMD := dragonball
|
||||
FCCMD := firecracker
|
||||
FCJAILERCMD := jailer
|
||||
|
||||
@@ -16,3 +16,7 @@ DBCMD := dragonball
|
||||
|
||||
# cloud-hypervisor binary name
|
||||
CLHCMD := cloud-hypervisor
|
||||
|
||||
# firecracker binary (vmm and jailer)
|
||||
FCCMD := firecracker
|
||||
FCJAILERCMD := jailer
|
||||
|
||||
@@ -341,7 +341,7 @@ disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
# The sandbox cgroup path is the parent cgroup of a container with the PodSandbox annotation.
|
||||
# The sandbox cgroup is constrained if there is no container type annotation.
|
||||
# See: https://pkg.go.dev/github.com/kata-containers/kata-containers/src/runtime/virtcontainers#ContainerType
|
||||
sandbox_cgroup_only=@DEFSANDBOXCGROUPONLY@
|
||||
sandbox_cgroup_only=@DEFSANDBOXCGROUPONLY_DB@
|
||||
|
||||
# Enabled experimental feature list, format: ["a", "b"].
|
||||
# Experimental features are features not stable enough for production,
|
||||
|
||||
373
src/runtime-rs/config/configuration-rs-fc.toml.in
Normal file
373
src/runtime-rs/config/configuration-rs-fc.toml.in
Normal file
@@ -0,0 +1,373 @@
|
||||
# Copyright (c) 2017-2023 Intel Corporation
|
||||
# Copyright (c) Adobe Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
# XXX: WARNING: this file is auto-generated.
|
||||
# XXX:
|
||||
# XXX: Source file: "@CONFIG_FC_IN@"
|
||||
# XXX: Project:
|
||||
# XXX: Name: @PROJECT_NAME@
|
||||
# XXX: Type: @PROJECT_TYPE@
|
||||
|
||||
[hypervisor.firecracker]
|
||||
path = "@FCPATH@"
|
||||
kernel = "@KERNELPATH_FC@"
|
||||
image = "@IMAGEPATH@"
|
||||
|
||||
rootfs_type=@DEFROOTFSTYPE@
|
||||
# List of valid annotation names for the hypervisor
|
||||
# Each member of the list is a regular expression, which is the base name
|
||||
# of the annotation, e.g. "path" for io.katacontainers.config.hypervisor.path"
|
||||
enable_annotations = @DEFENABLEANNOTATIONS@
|
||||
|
||||
# List of valid annotations values for the hypervisor
|
||||
# Each member of the list is a path pattern as described by glob(3).
|
||||
# The default if not set is empty (all annotations rejected.)
|
||||
# Your distribution recommends: @FCVALIDHYPERVISORPATHS@
|
||||
valid_hypervisor_paths = @FCVALIDHYPERVISORPATHS@
|
||||
|
||||
# Path for the jailer specific to firecracker
|
||||
# If the jailer path is not set kata will launch firecracker
|
||||
# without a jail. If the jailer is set firecracker will be
|
||||
# launched in a jailed enviornment created by the jailer
|
||||
#jailer_path = "@FCJAILERPATH@"
|
||||
|
||||
# List of valid jailer path values for the hypervisor
|
||||
# Each member of the list can be a regular expression
|
||||
# The default if not set is empty (all annotations rejected.)
|
||||
# Your distribution recommends: @FCVALIDJAILERPATHS@
|
||||
valid_jailer_paths = @FCVALIDJAILERPATHS@
|
||||
|
||||
|
||||
# Optional space-separated list of options to pass to the guest kernel.
|
||||
# For example, use `kernel_params = "vsyscall=emulate"` if you are having
|
||||
# trouble running pre-2.15 glibc.
|
||||
#
|
||||
# WARNING: - any parameter specified here will take priority over the default
|
||||
# parameter value of the same name used to start the virtual machine.
|
||||
# Do not set values here unless you understand the impact of doing so as you
|
||||
# may stop the virtual machine from booting.
|
||||
# To see the list of default parameters, enable hypervisor debug, create a
|
||||
# container and look for 'default-kernel-parameters' log entries.
|
||||
kernel_params = "@KERNELPARAMS@"
|
||||
|
||||
# Default number of vCPUs per SB/VM:
|
||||
# unspecified or 0 --> will be set to @DEFVCPUS@
|
||||
# < 0 --> will be set to the actual number of physical cores
|
||||
# > 0 <= number of physical cores --> will be set to the specified number
|
||||
# > number of physical cores --> will be set to the actual number of physical cores
|
||||
default_vcpus = 1
|
||||
|
||||
# Default maximum number of vCPUs per SB/VM:
|
||||
# unspecified or == 0 --> will be set to the actual number of physical cores or to the maximum number
|
||||
# of vCPUs supported by KVM if that number is exceeded
|
||||
# > 0 <= number of physical cores --> will be set to the specified number
|
||||
# > number of physical cores --> will be set to the actual number of physical cores or to the maximum number
|
||||
# of vCPUs supported by KVM if that number is exceeded
|
||||
# WARNING: Depending of the architecture, the maximum number of vCPUs supported by KVM is used when
|
||||
# the actual number of physical cores is greater than it.
|
||||
# WARNING: Be aware that this value impacts the virtual machine's memory footprint and CPU
|
||||
# the hotplug functionality. For example, `default_maxvcpus = 240` specifies that until 240 vCPUs
|
||||
# can be added to a SB/VM, but the memory footprint will be big. Another example, with
|
||||
# `default_maxvcpus = 8` the memory footprint will be small, but 8 will be the maximum number of
|
||||
# vCPUs supported by the SB/VM. In general, we recommend that you do not edit this variable,
|
||||
# unless you know what are you doing.
|
||||
# NOTICE: on arm platform with gicv2 interrupt controller, set it to 8.
|
||||
default_maxvcpus = @DEFMAXVCPUS@
|
||||
|
||||
# Bridges can be used to hot plug devices.
|
||||
# Limitations:
|
||||
# * Currently only pci bridges are supported
|
||||
# * Until 30 devices per bridge can be hot plugged.
|
||||
# * Until 5 PCI bridges can be cold plugged per VM.
|
||||
# This limitation could be a bug in the kernel
|
||||
# Default number of bridges per SB/VM:
|
||||
# unspecified or 0 --> will be set to @DEFBRIDGES@
|
||||
# > 1 <= 5 --> will be set to the specified number
|
||||
# > 5 --> will be set to 5
|
||||
default_bridges = @DEFBRIDGES@
|
||||
|
||||
# Default memory size in MiB for SB/VM.
|
||||
# If unspecified then it will be set @DEFMEMSZ@ MiB.
|
||||
default_memory = @DEFMEMSZ@
|
||||
|
||||
#
|
||||
# Default memory slots per SB/VM.
|
||||
# If unspecified then it will be set @DEFMEMSLOTS@.
|
||||
# This is will determine the times that memory will be hotadded to sandbox/VM.
|
||||
memory_slots = @DEFMEMSLOTS@
|
||||
|
||||
# The size in MiB will be plused to max memory of hypervisor.
|
||||
# It is the memory address space for the NVDIMM devie.
|
||||
# If set block storage driver (block_device_driver) to "nvdimm",
|
||||
# should set memory_offset to the size of block device.
|
||||
# Default 0
|
||||
#memory_offset = 0
|
||||
|
||||
# Default maximum memory in MiB per SB / VM
|
||||
# unspecified or == 0 --> will be set to the actual amount of physical RAM
|
||||
# > 0 <= amount of physical RAM --> will be set to the specified number
|
||||
# > amount of physical RAM --> will be set to the actual amount of physical RAM
|
||||
default_maxmemory = @DEFMAXMEMSZ_FC@
|
||||
|
||||
# Block storage driver to be used for the hypervisor in case the container
|
||||
# rootfs is backed by a block device. This is virtio-scsi, virtio-blk
|
||||
# or nvdimm.
|
||||
block_device_driver = "@DEFBLOCKSTORAGEDRIVER_FC@"
|
||||
|
||||
# Specifies cache-related options will be set to block devices or not.
|
||||
# Default false
|
||||
#block_device_cache_set = true
|
||||
|
||||
# Specifies cache-related options for block devices.
|
||||
# Denotes whether use of O_DIRECT (bypass the host page cache) is enabled.
|
||||
# Default false
|
||||
#block_device_cache_direct = true
|
||||
|
||||
# Specifies cache-related options for block devices.
|
||||
# Denotes whether flush requests for the device are ignored.
|
||||
# Default false
|
||||
#block_device_cache_noflush = true
|
||||
|
||||
# Enable pre allocation of VM RAM, default false
|
||||
# Enabling this will result in lower container density
|
||||
# as all of the memory will be allocated and locked
|
||||
# This is useful when you want to reserve all the memory
|
||||
# upfront or in the cases where you want memory latencies
|
||||
# to be very predictable
|
||||
# Default false
|
||||
#enable_mem_prealloc = true
|
||||
|
||||
# Enable huge pages for VM RAM, default false
|
||||
# Enabling this will result in the VM memory
|
||||
# being allocated using huge pages.
|
||||
# This is useful when you want to use vhost-user network
|
||||
# stacks within the container. This will automatically
|
||||
# result in memory pre allocation
|
||||
#enable_hugepages = true
|
||||
|
||||
# Enable vIOMMU, default false
|
||||
# Enabling this will result in the VM having a vIOMMU device
|
||||
# This will also add the following options to the kernel's
|
||||
# command line: intel_iommu=on,iommu=pt
|
||||
#enable_iommu = true
|
||||
|
||||
# This option changes the default hypervisor and kernel parameters
|
||||
# to enable debug output where available.
|
||||
#
|
||||
# Default false
|
||||
#enable_debug = true
|
||||
|
||||
# Disable the customizations done in the runtime when it detects
|
||||
# that it is running on top a VMM. This will result in the runtime
|
||||
# behaving as it would when running on bare metal.
|
||||
#
|
||||
#disable_nesting_checks = true
|
||||
|
||||
# This is the msize used for 9p shares. It is the number of bytes
|
||||
# used for 9p packet payload.
|
||||
#msize_9p = @DEFMSIZE9P@
|
||||
|
||||
# VFIO devices are hotplugged on a bridge by default.
|
||||
# Enable hotplugging on root bus. This may be required for devices with
|
||||
# a large PCI bar, as this is a current limitation with hotplugging on
|
||||
# a bridge.
|
||||
# Default false
|
||||
#hotplug_vfio_on_root_bus = true
|
||||
|
||||
#
|
||||
# Default entropy source.
|
||||
# The path to a host source of entropy (including a real hardware RNG)
|
||||
# /dev/urandom and /dev/random are two main options.
|
||||
# Be aware that /dev/random is a blocking source of entropy. If the host
|
||||
# runs out of entropy, the VMs boot time will increase leading to get startup
|
||||
# timeouts.
|
||||
# The source of entropy /dev/urandom is non-blocking and provides a
|
||||
# generally acceptable source of entropy. It should work well for pretty much
|
||||
# all practical purposes.
|
||||
#entropy_source= "@DEFENTROPYSOURCE@"
|
||||
|
||||
# List of valid annotations values for entropy_source
|
||||
# The default if not set is empty (all annotations rejected.)
|
||||
# Your distribution recommends: @DEFVALIDENTROPYSOURCES@
|
||||
valid_entropy_sources = @DEFVALIDENTROPYSOURCES@
|
||||
|
||||
# Path to OCI hook binaries in the *guest rootfs*.
|
||||
# This does not affect host-side hooks which must instead be added to
|
||||
# the OCI spec passed to the runtime.
|
||||
#
|
||||
# You can create a rootfs with hooks by customizing the osbuilder scripts:
|
||||
# https://github.com/kata-containers/kata-containers/tree/main/tools/osbuilder
|
||||
#
|
||||
# Hooks must be stored in a subdirectory of guest_hook_path according to their
|
||||
# hook type, i.e. "guest_hook_path/{prestart,poststart,poststop}".
|
||||
# The agent will scan these directories for executable files and add them, in
|
||||
# lexicographical order, to the lifecycle of the guest container.
|
||||
# Hooks are executed in the runtime namespace of the guest. See the official documentation:
|
||||
# https://github.com/opencontainers/runtime-spec/blob/v1.0.1/config.md#posix-platform-hooks
|
||||
# Warnings will be logged if any error is encountered will scanning for hooks,
|
||||
# but it will not abort container execution.
|
||||
#guest_hook_path = "/usr/share/oci/hooks"
|
||||
#
|
||||
# Use rx Rate Limiter to control network I/O inbound bandwidth(size in bits/sec for SB/VM).
|
||||
# In Firecracker, it provides a built-in rate limiter, which is based on TBF(Token Bucket Filter)
|
||||
# queueing discipline.
|
||||
# Default 0-sized value means unlimited rate.
|
||||
#rx_rate_limiter_max_rate = 0
|
||||
# Use tx Rate Limiter to control network I/O outbound bandwidth(size in bits/sec for SB/VM).
|
||||
# In Firecracker, it provides a built-in rate limiter, which is based on TBF(Token Bucket Filter)
|
||||
# queueing discipline.
|
||||
# Default 0-sized value means unlimited rate.
|
||||
#tx_rate_limiter_max_rate = 0
|
||||
|
||||
# disable applying SELinux on the VMM process (default false)
|
||||
disable_selinux=@DEFDISABLESELINUX@
|
||||
|
||||
[factory]
|
||||
# VM templating support. Once enabled, new VMs are created from template
|
||||
# using vm cloning. They will share the same initial kernel, initramfs and
|
||||
# agent memory by mapping it readonly. It helps speeding up new container
|
||||
# creation and saves a lot of memory if there are many kata containers running
|
||||
# on the same host.
|
||||
#
|
||||
# When disabled, new VMs are created from scratch.
|
||||
#
|
||||
# Note: Requires "initrd=" to be set ("image=" is not supported).
|
||||
#
|
||||
# Default false
|
||||
#enable_template = true
|
||||
|
||||
[agent.@PROJECT_TYPE@]
|
||||
# If enabled, make the agent display debug-level messages.
|
||||
# (default: disabled)
|
||||
#enable_debug = true
|
||||
|
||||
# Enable agent tracing.
|
||||
#
|
||||
# If enabled, the agent will generate OpenTelemetry trace spans.
|
||||
#
|
||||
# Notes:
|
||||
#
|
||||
# - If the runtime also has tracing enabled, the agent spans will be
|
||||
# associated with the appropriate runtime parent span.
|
||||
# - If enabled, the runtime will wait for the container to shutdown,
|
||||
# increasing the container shutdown time slightly.
|
||||
#
|
||||
# (default: disabled)
|
||||
#enable_tracing = true
|
||||
|
||||
# Comma separated list of kernel modules and their parameters.
|
||||
# These modules will be loaded in the guest kernel using modprobe(8).
|
||||
# The following example can be used to load two kernel modules with parameters
|
||||
# - kernel_modules=["e1000e InterruptThrottleRate=3000,3000,3000 EEE=1", "i915 enable_ppgtt=0"]
|
||||
# The first word is considered as the module name and the rest as its parameters.
|
||||
# Container will not be started when:
|
||||
# * A kernel module is specified and the modprobe command is not installed in the guest
|
||||
# or it fails loading the module.
|
||||
# * The module is not available in the guest or it doesn't met the guest kernel
|
||||
# requirements, like architecture and version.
|
||||
#
|
||||
kernel_modules=[]
|
||||
|
||||
# Enable debug console.
|
||||
|
||||
# If enabled, user can connect guest OS running inside hypervisor
|
||||
# through "kata-runtime exec <sandbox-id>" command
|
||||
|
||||
#debug_console_enabled = true
|
||||
|
||||
# Agent connection dialing timeout value in seconds
|
||||
# (default: 45)
|
||||
dial_timeout = 45
|
||||
|
||||
[runtime]
|
||||
# If enabled, the runtime will log additional debug messages to the
|
||||
# system log
|
||||
# (default: disabled)
|
||||
#enable_debug = true
|
||||
#
|
||||
# Internetworking model
|
||||
# Determines how the VM should be connected to the
|
||||
# the container network interface
|
||||
# Options:
|
||||
#
|
||||
# - macvtap
|
||||
# Used when the Container network interface can be bridged using
|
||||
# macvtap.
|
||||
#
|
||||
# - none
|
||||
# Used when customize network. Only creates a tap device. No veth pair.
|
||||
#
|
||||
# - tcfilter
|
||||
# Uses tc filter rules to redirect traffic from the network interface
|
||||
# provided by plugin to a tap interface connected to the VM.
|
||||
#
|
||||
internetworking_model="@DEFNETWORKMODEL_FC@"
|
||||
|
||||
name="@RUNTIMENAME@"
|
||||
hypervisor_name="@HYPERVISOR_FC@"
|
||||
agent_name="@PROJECT_TYPE@"
|
||||
|
||||
# disable guest seccomp
|
||||
# Determines whether container seccomp profiles are passed to the virtual
|
||||
# machine and applied by the kata agent. If set to true, seccomp is not applied
|
||||
# within the guest
|
||||
# (default: true)
|
||||
disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
|
||||
# If enabled, the runtime will create opentracing.io traces and spans.
|
||||
# (See https://www.jaegertracing.io/docs/getting-started).
|
||||
# (default: disabled)
|
||||
#enable_tracing = true
|
||||
|
||||
# Set the full url to the Jaeger HTTP Thrift collector.
|
||||
# The default if not set will be "http://localhost:14268/api/traces"
|
||||
#jaeger_endpoint = ""
|
||||
|
||||
# Sets the username to be used if basic auth is required for Jaeger.
|
||||
#jaeger_user = ""
|
||||
|
||||
# Sets the password to be used if basic auth is required for Jaeger.
|
||||
#jaeger_password = ""
|
||||
|
||||
# If enabled, the runtime will not create a network namespace for shim and hypervisor processes.
|
||||
# This option may have some potential impacts to your host. It should only be used when you know what you're doing.
|
||||
# `disable_new_netns` conflicts with `internetworking_model=tcfilter` and `internetworking_model=macvtap`. It works only
|
||||
# with `internetworking_model=none`. The tap device will be in the host network namespace and can connect to a bridge
|
||||
# (like OVS) directly.
|
||||
# (default: false)
|
||||
#disable_new_netns = true
|
||||
|
||||
# if enabled, the runtime will add all the kata processes inside one dedicated cgroup.
|
||||
# The container cgroups in the host are not created, just one single cgroup per sandbox.
|
||||
# The runtime caller is free to restrict or collect cgroup stats of the overall Kata sandbox.
|
||||
# The sandbox cgroup path is the parent cgroup of a container with the PodSandbox annotation.
|
||||
# The sandbox cgroup is constrained if there is no container type annotation.
|
||||
# See: https://pkg.go.dev/github.com/kata-containers/kata-containers/src/runtime/virtcontainers#ContainerType
|
||||
sandbox_cgroup_only=@DEFSANDBOXCGROUPONLY_FC@
|
||||
|
||||
# If enabled, the runtime will attempt to determine appropriate sandbox size (memory, CPU) before booting the virtual machine. In
|
||||
# this case, the runtime will not dynamically update the amount of memory and CPU in the virtual machine. This is generally helpful
|
||||
# when a hardware architecture or hypervisor solutions is utilized which does not support CPU and/or memory hotplug.
|
||||
# Compatibility for determining appropriate sandbox (VM) size:
|
||||
# - When running with pods, sandbox sizing information will only be available if using Kubernetes >= 1.23 and containerd >= 1.6. CRI-O
|
||||
# does not yet support sandbox sizing annotations.
|
||||
# - When running single containers using a tool like ctr, container sizing information will be available.
|
||||
static_sandbox_resource_mgmt=@DEFSTATICRESOURCEMGMT_FC@
|
||||
|
||||
# If enabled, the runtime will not create Kubernetes emptyDir mounts on the guest filesystem. Instead, emptyDir mounts will
|
||||
# be created on the host and shared via virtio-fs. This is potentially slower, but allows sharing of files from host to guest.
|
||||
disable_guest_empty_dir=@DEFDISABLEGUESTEMPTYDIR@
|
||||
|
||||
# Enabled experimental feature list, format: ["a", "b"].
|
||||
# Experimental features are features not stable enough for production,
|
||||
# they may break compatibility, and are prepared for a big version bump.
|
||||
# Supported experimental features:
|
||||
# (default: [])
|
||||
experimental=@DEFAULTEXPFEATURES@
|
||||
|
||||
# If enabled, user can run pprof tools with shim v2 process through kata-monitor.
|
||||
# (default: false)
|
||||
# enable_pprof = true
|
||||
@@ -18,15 +18,15 @@ serde_json = ">=1.0.9"
|
||||
slog = "2.5.2"
|
||||
slog-scope = "4.4.0"
|
||||
ttrpc = "0.8"
|
||||
tokio = { version = "1.28.1", features = ["fs", "rt"] }
|
||||
tokio = { version = "1.38.0", features = ["fs", "rt"] }
|
||||
tracing = "0.1.36"
|
||||
url = "2.2.2"
|
||||
nix = "0.24.2"
|
||||
|
||||
kata-types = { path = "../../../libs/kata-types"}
|
||||
logging = { path = "../../../libs/logging"}
|
||||
kata-types = { path = "../../../libs/kata-types" }
|
||||
logging = { path = "../../../libs/logging" }
|
||||
oci = { path = "../../../libs/oci" }
|
||||
protocols = { path = "../../../libs/protocols", features=["async"] }
|
||||
protocols = { path = "../../../libs/protocols", features = ["async"] }
|
||||
|
||||
[features]
|
||||
default = []
|
||||
|
||||
@@ -22,7 +22,7 @@ serde_json = ">=1.0.9"
|
||||
slog = "2.5.2"
|
||||
slog-scope = "4.4.0"
|
||||
thiserror = "1.0"
|
||||
tokio = { version = "1.28.1", features = ["sync", "fs", "process", "io-util"] }
|
||||
tokio = { version = "1.38.0", features = ["sync", "fs", "process", "io-util"] }
|
||||
vmm-sys-util = "0.11.0"
|
||||
rand = "0.8.4"
|
||||
path-clean = "1.0.1"
|
||||
@@ -43,8 +43,15 @@ safe-path = "0.1.0"
|
||||
crossbeam-channel = "0.5.6"
|
||||
tempdir = "0.3.7"
|
||||
|
||||
qapi = { version = "0.14", features = [ "qmp", "async-tokio-all" ] }
|
||||
qapi-spec = "0.3.1"
|
||||
qapi-qmp = "0.14.0"
|
||||
|
||||
[target.'cfg(not(target_arch = "s390x"))'.dependencies]
|
||||
dragonball = { path = "../../../dragonball", features = ["atomic-guest-memory", "virtio-vsock", "hotplug", "virtio-blk", "virtio-net", "virtio-fs", "vhost-net", "dbs-upcall", "virtio-mem", "virtio-balloon", "vhost-user-net", "host-device"] }
|
||||
dbs-utils = { path = "../../../dragonball/src/dbs_utils" }
|
||||
hyperlocal = "0.8.0"
|
||||
hyper = {version = "0.14.18", features = ["client"]}
|
||||
|
||||
[features]
|
||||
default = []
|
||||
|
||||
@@ -13,7 +13,7 @@ edition = "2021"
|
||||
anyhow = "1.0.68"
|
||||
serde = { version = "1.0.145", features = ["rc", "derive"] }
|
||||
serde_json = "1.0.91"
|
||||
tokio = { version = "1.28.1", features = ["sync", "rt"] }
|
||||
tokio = { version = "1.38.0", features = ["sync", "rt"] }
|
||||
|
||||
# Cloud Hypervisor public HTTP API functions
|
||||
# Note that the version specified is not necessarily the version of CH
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use std::convert::TryFrom;
|
||||
use std::path::PathBuf;
|
||||
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
@@ -19,6 +20,7 @@ use dragonball::device_manager::{
|
||||
};
|
||||
|
||||
use super::{build_dragonball_network_config, DragonballInner};
|
||||
use crate::device::pci_path::PciPath;
|
||||
use crate::VhostUserConfig;
|
||||
use crate::{
|
||||
device::DeviceType, HybridVsockConfig, NetworkConfig, ShareFsConfig, ShareFsMountConfig,
|
||||
@@ -37,46 +39,64 @@ pub(crate) fn drive_index_to_id(index: u64) -> String {
|
||||
}
|
||||
|
||||
impl DragonballInner {
|
||||
pub(crate) async fn add_device(&mut self, device: DeviceType) -> Result<()> {
|
||||
pub(crate) async fn add_device(&mut self, device: DeviceType) -> Result<DeviceType> {
|
||||
if self.state == VmmState::NotReady {
|
||||
info!(sl!(), "VMM not ready, queueing device {}", device);
|
||||
|
||||
// add the pending device by reverse order, thus the
|
||||
// start_vm would pop the devices in an right order
|
||||
// to add the devices.
|
||||
self.pending_devices.insert(0, device);
|
||||
return Ok(());
|
||||
self.pending_devices.insert(0, device.clone());
|
||||
return Ok(device);
|
||||
}
|
||||
|
||||
info!(sl!(), "dragonball add device {:?}", &device);
|
||||
match device {
|
||||
DeviceType::Network(network) => self
|
||||
.add_net_device(&network.config)
|
||||
.context("add net device"),
|
||||
DeviceType::Vfio(hostdev) => self.add_vfio_device(&hostdev).context("add vfio device"),
|
||||
DeviceType::Block(block) => self
|
||||
.add_block_device(
|
||||
DeviceType::Network(network) => {
|
||||
self.add_net_device(&network.config)
|
||||
.context("add net device")?;
|
||||
Ok(DeviceType::Network(network))
|
||||
}
|
||||
DeviceType::Vfio(mut hostdev) => {
|
||||
self.add_vfio_device(&mut hostdev)
|
||||
.context("add vfio device")?;
|
||||
|
||||
Ok(DeviceType::Vfio(hostdev))
|
||||
}
|
||||
DeviceType::Block(block) => {
|
||||
self.add_block_device(
|
||||
block.config.path_on_host.as_str(),
|
||||
block.device_id.as_str(),
|
||||
block.config.is_readonly,
|
||||
block.config.no_drop,
|
||||
)
|
||||
.context("add block device"),
|
||||
DeviceType::VhostUserBlk(block) => self
|
||||
.add_block_device(
|
||||
.context("add block device")?;
|
||||
Ok(DeviceType::Block(block))
|
||||
}
|
||||
DeviceType::VhostUserBlk(block) => {
|
||||
self.add_block_device(
|
||||
block.config.socket_path.as_str(),
|
||||
block.device_id.as_str(),
|
||||
block.is_readonly,
|
||||
block.no_drop,
|
||||
)
|
||||
.context("add vhost user based block device"),
|
||||
DeviceType::HybridVsock(hvsock) => self.add_hvsock(&hvsock.config).context("add vsock"),
|
||||
DeviceType::ShareFs(sharefs) => self
|
||||
.add_share_fs_device(&sharefs.config)
|
||||
.context("add share fs device"),
|
||||
DeviceType::VhostUserNetwork(dev) => self
|
||||
.add_vhost_user_net_device(&dev.config)
|
||||
.context("add vhost-user-net device"),
|
||||
.context("add vhost user based block device")?;
|
||||
Ok(DeviceType::VhostUserBlk(block))
|
||||
}
|
||||
DeviceType::HybridVsock(hvsock) => {
|
||||
self.add_hvsock(&hvsock.config).context("add vsock")?;
|
||||
Ok(DeviceType::HybridVsock(hvsock))
|
||||
}
|
||||
DeviceType::ShareFs(sharefs) => {
|
||||
self.add_share_fs_device(&sharefs.config)
|
||||
.context("add share fs device")?;
|
||||
Ok(DeviceType::ShareFs(sharefs))
|
||||
}
|
||||
DeviceType::VhostUserNetwork(dev) => {
|
||||
self.add_vhost_user_net_device(&dev.config)
|
||||
.context("add vhost-user-net device")?;
|
||||
Ok(DeviceType::VhostUserNetwork(dev))
|
||||
}
|
||||
DeviceType::Vsock(_) => todo!(),
|
||||
}
|
||||
}
|
||||
@@ -121,56 +141,49 @@ impl DragonballInner {
|
||||
}
|
||||
}
|
||||
|
||||
fn add_vfio_device(&mut self, device: &VfioDevice) -> Result<()> {
|
||||
let vfio_device = device.clone();
|
||||
|
||||
fn add_vfio_device(&mut self, device: &mut VfioDevice) -> Result<()> {
|
||||
// FIXME:
|
||||
// A device with multi-funtions, or a IOMMU group with one more
|
||||
// devices, the Primary device is selected to be passed to VM.
|
||||
// And the the first one is Primary device.
|
||||
// safe here, devices is not empty.
|
||||
let primary_device = vfio_device.devices.first().unwrap().clone();
|
||||
|
||||
let vendor_device_id = if let Some(vd) = primary_device.device_vendor {
|
||||
let primary_device = device.devices.first_mut().unwrap();
|
||||
let vendor_device_id = if let Some(vd) = primary_device.device_vendor.as_ref() {
|
||||
vd.get_device_vendor_id()?
|
||||
} else {
|
||||
0
|
||||
};
|
||||
|
||||
// It's safe to unwrap the guest_pci_path and get device slot,
|
||||
// As it has been assigned in vfio device manager.
|
||||
let pci_path = primary_device.guest_pci_path.unwrap();
|
||||
let guest_dev_id = pci_path.get_device_slot().unwrap().0;
|
||||
|
||||
info!(
|
||||
sl!(),
|
||||
"insert host device.
|
||||
host device id: {:?},
|
||||
bus_slot_func: {:?},
|
||||
guest device id: {:?},
|
||||
vendor/device id: {:?}",
|
||||
primary_device.hostdev_id,
|
||||
primary_device.bus_slot_func,
|
||||
guest_dev_id,
|
||||
vendor_device_id,
|
||||
);
|
||||
|
||||
let vfio_dev_config = VfioPciDeviceConfig {
|
||||
bus_slot_func: primary_device.bus_slot_func,
|
||||
bus_slot_func: primary_device.bus_slot_func.clone(),
|
||||
vendor_device_id,
|
||||
guest_dev_id: Some(guest_dev_id),
|
||||
..Default::default()
|
||||
};
|
||||
let host_dev_config = HostDeviceConfig {
|
||||
hostdev_id: primary_device.hostdev_id,
|
||||
hostdev_id: primary_device.hostdev_id.clone(),
|
||||
sysfs_path: primary_device.sysfs_path.clone(),
|
||||
dev_config: vfio_dev_config,
|
||||
};
|
||||
|
||||
self.vmm_instance
|
||||
let guest_device_id = self
|
||||
.vmm_instance
|
||||
.insert_host_device(host_dev_config)
|
||||
.context("insert host device failed")?;
|
||||
|
||||
// It's safe to unwrap guest_device_id as we can get a guest device id here.
|
||||
primary_device.guest_pci_path = Some(PciPath::try_from(guest_device_id.unwrap() as u32)?);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
@@ -104,10 +104,7 @@ impl Hypervisor for Dragonball {
|
||||
|
||||
async fn add_device(&self, device: DeviceType) -> Result<DeviceType> {
|
||||
let mut inner = self.inner.write().await;
|
||||
match inner.add_device(device.clone()).await {
|
||||
Ok(_) => Ok(device),
|
||||
Err(err) => Err(err),
|
||||
}
|
||||
inner.add_device(device.clone()).await
|
||||
}
|
||||
|
||||
async fn remove_device(&self, device: DeviceType) -> Result<()> {
|
||||
|
||||
@@ -197,12 +197,17 @@ impl VmmInstance {
|
||||
Err(anyhow!("Failed to get machine info"))
|
||||
}
|
||||
|
||||
pub fn insert_host_device(&self, device_cfg: HostDeviceConfig) -> Result<()> {
|
||||
self.handle_request_with_retry(Request::Sync(VmmAction::InsertHostDevice(
|
||||
device_cfg.clone(),
|
||||
)))
|
||||
.with_context(|| format!("Failed to insert host device {:?}", device_cfg))?;
|
||||
Ok(())
|
||||
pub fn insert_host_device(&self, device_cfg: HostDeviceConfig) -> Result<Option<u8>> {
|
||||
if let VmmData::VfioDeviceData(guest_dev_id) = self.handle_request_with_retry(
|
||||
Request::Sync(VmmAction::InsertHostDevice(device_cfg.clone())),
|
||||
)? {
|
||||
Ok(guest_dev_id)
|
||||
} else {
|
||||
Err(anyhow!(format!(
|
||||
"Failed to insert host device {:?}",
|
||||
device_cfg
|
||||
)))
|
||||
}
|
||||
}
|
||||
|
||||
pub fn prepare_remove_host_device(&self, id: &str) -> Result<()> {
|
||||
|
||||
324
src/runtime-rs/crates/hypervisor/src/firecracker/fc_api.rs
Normal file
324
src/runtime-rs/crates/hypervisor/src/firecracker/fc_api.rs
Normal file
@@ -0,0 +1,324 @@
|
||||
//Copyright (c) 2019-2022 Alibaba Cloud
|
||||
//Copyright (c) 2023 Nubificus Ltd
|
||||
//
|
||||
//SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
use crate::{
|
||||
firecracker::{
|
||||
inner_hypervisor::{FC_AGENT_SOCKET_NAME, ROOT},
|
||||
sl, FcInner,
|
||||
},
|
||||
kernel_param::KernelParams,
|
||||
NetworkConfig, Param,
|
||||
};
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use dbs_utils::net::MacAddr;
|
||||
use hyper::{Body, Method, Request, Response};
|
||||
use hyperlocal::Uri;
|
||||
use kata_sys_util::mount;
|
||||
use nix::mount::MsFlags;
|
||||
use serde_json::json;
|
||||
use tokio::{fs, fs::File};
|
||||
|
||||
const REQUEST_RETRY: u32 = 500;
|
||||
const FC_KERNEL: &str = "vmlinux";
|
||||
const FC_ROOT_FS: &str = "rootfs";
|
||||
const DRIVE_PREFIX: &str = "drive";
|
||||
const DISK_POOL_SIZE: u32 = 6;
|
||||
|
||||
impl FcInner {
|
||||
pub(crate) fn get_resource(&self, src: &str, dst: &str) -> Result<String> {
|
||||
if self.jailed {
|
||||
self.jail_resource(src, dst)
|
||||
} else {
|
||||
Ok(src.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
fn jail_resource(&self, src: &str, dst: &str) -> Result<String> {
|
||||
if src.is_empty() || dst.is_empty() {
|
||||
return Err(anyhow!("invalid param src {} dst {}", src, dst));
|
||||
}
|
||||
|
||||
let jailed_location = [self.vm_path.as_str(), ROOT, dst].join("/");
|
||||
mount::bind_mount_unchecked(src, jailed_location.as_str(), false, MsFlags::MS_SLAVE)
|
||||
.context("bind_mount ERROR")?;
|
||||
|
||||
let mut abs_path = String::from("/");
|
||||
abs_path.push_str(dst);
|
||||
Ok(abs_path)
|
||||
}
|
||||
|
||||
// Remounting jailer root to ensure it has exec permissions, since firecracker binary will
|
||||
// execute from there
|
||||
pub(crate) async fn remount_jailer_with_exec(&self) -> Result<()> {
|
||||
let localpath = [self.vm_path.clone(), ROOT.to_string()].join("/");
|
||||
let _ = fs::create_dir_all(&localpath)
|
||||
.await
|
||||
.context(format!("failed to create directory {:?}", &localpath));
|
||||
mount::bind_mount_unchecked(&localpath, &localpath, false, MsFlags::MS_SHARED)
|
||||
.context("bind mount jailer root")?;
|
||||
|
||||
mount::bind_remount(&localpath, false).context("rebind mount jailer root")?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) async fn prepare_hvsock(&mut self) -> Result<()> {
|
||||
let rel_uds_path = match self.jailed {
|
||||
false => [self.vm_path.as_str(), FC_AGENT_SOCKET_NAME].join("/"),
|
||||
true => FC_AGENT_SOCKET_NAME.to_string(),
|
||||
};
|
||||
|
||||
let body_vsock: String = json!({
|
||||
"guest_cid": 3,
|
||||
"uds_path": rel_uds_path,
|
||||
"vsock_id": ROOT,
|
||||
})
|
||||
.to_string();
|
||||
|
||||
self.request_with_retry(Method::PUT, "/vsock", body_vsock)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) async fn prepare_vmm_resources(&mut self) -> Result<()> {
|
||||
let mut kernel_params = KernelParams::new(self.config.debug_info.enable_debug);
|
||||
kernel_params.push(Param::new("pci", "off"));
|
||||
kernel_params.push(Param::new("iommu", "off"));
|
||||
let rootfs_driver = self.config.blockdev_info.block_device_driver.clone();
|
||||
|
||||
kernel_params.append(&mut KernelParams::new_rootfs_kernel_params(
|
||||
&rootfs_driver,
|
||||
&self.config.boot_info.rootfs_type,
|
||||
)?);
|
||||
kernel_params.append(&mut KernelParams::from_string(
|
||||
&self.config.boot_info.kernel_params,
|
||||
));
|
||||
let mut parameters = String::new().to_owned();
|
||||
|
||||
for param in &kernel_params.to_string() {
|
||||
parameters.push_str(¶m.to_string());
|
||||
}
|
||||
|
||||
let kernel = self
|
||||
.get_resource(&self.config.boot_info.kernel, FC_KERNEL)
|
||||
.context("get resource KERNEL")?;
|
||||
let rootfs = self
|
||||
.get_resource(&self.config.boot_info.image, FC_ROOT_FS)
|
||||
.context("get resource ROOTFS")?;
|
||||
|
||||
let body_kernel: String = json!({
|
||||
"kernel_image_path": kernel,
|
||||
"boot_args": parameters,
|
||||
})
|
||||
.to_string();
|
||||
|
||||
let body_rootfs: String = json!({
|
||||
"drive_id": "rootfs",
|
||||
"path_on_host": rootfs,
|
||||
"is_root_device": false,
|
||||
"is_read_only": true
|
||||
})
|
||||
.to_string();
|
||||
|
||||
info!(sl(), "Before first request");
|
||||
self.request_with_retry(Method::PUT, "/boot-source", body_kernel)
|
||||
.await?;
|
||||
self.request_with_retry(Method::PUT, "/drives/rootfs", body_rootfs)
|
||||
.await?;
|
||||
|
||||
let abs_path = [&self.vm_path, ROOT].join("/");
|
||||
|
||||
let rel_path = "/".to_string();
|
||||
let _ = fs::create_dir_all(&abs_path)
|
||||
.await
|
||||
.context(format!("failed to create directory {:?}", &abs_path));
|
||||
|
||||
// We create some placeholder drives to be used for patching block devices while the vmm is
|
||||
// running, as firecracker does not support device hotplug.
|
||||
for i in 1..DISK_POOL_SIZE {
|
||||
let full_path_name = format!("{}/drive{}", abs_path, i);
|
||||
|
||||
let _ = File::create(&full_path_name)
|
||||
.await
|
||||
.context(format!("failed to create file {:?}", &full_path_name));
|
||||
|
||||
let path_on_host = match self.jailed {
|
||||
false => abs_path.clone(),
|
||||
true => rel_path.clone(),
|
||||
};
|
||||
let body: String = json!({
|
||||
"drive_id": format!("drive{}",i),
|
||||
"path_on_host": format!("{}/drive{}", path_on_host, i),
|
||||
"is_root_device": false,
|
||||
"is_read_only": false
|
||||
})
|
||||
.to_string();
|
||||
|
||||
self.request_with_retry(Method::PUT, &format!("/drives/drive{}", i), body)
|
||||
.await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
pub(crate) async fn patch_container_rootfs(
|
||||
&mut self,
|
||||
drive_id: &str,
|
||||
drive_path: &str,
|
||||
) -> Result<()> {
|
||||
let new_drive_id = &[DRIVE_PREFIX, drive_id].concat();
|
||||
let new_drive_path = self
|
||||
.get_resource(drive_path, new_drive_id)
|
||||
.context("get resource CONTAINER ROOTFS")?;
|
||||
let body: String = json!({
|
||||
"drive_id": format!("drive{drive_id}"),
|
||||
"path_on_host": new_drive_path
|
||||
})
|
||||
.to_string();
|
||||
self.request_with_retry(
|
||||
Method::PATCH,
|
||||
&["/drives/", &format!("drive{drive_id}")].concat(),
|
||||
body,
|
||||
)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) async fn add_net_device(
|
||||
&mut self,
|
||||
config: &NetworkConfig,
|
||||
device_id: String,
|
||||
) -> Result<()> {
|
||||
let g_mac = match &config.guest_mac {
|
||||
Some(mac) => MacAddr::from_bytes(&mac.0).ok(),
|
||||
None => None,
|
||||
};
|
||||
let body: String = json!({
|
||||
"iface_id": &device_id,
|
||||
"guest_mac": g_mac,
|
||||
"host_dev_name": &config.host_dev_name
|
||||
|
||||
})
|
||||
.to_string();
|
||||
self.request_with_retry(
|
||||
Method::PUT,
|
||||
&["/network-interfaces/", &device_id].concat(),
|
||||
body,
|
||||
)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) async fn request_with_retry(
|
||||
&self,
|
||||
method: Method,
|
||||
uri: &str,
|
||||
data: String,
|
||||
) -> Result<()> {
|
||||
let url: hyper::Uri = Uri::new(&self.asock_path, uri).into();
|
||||
self.send_request_with_retry(method, url, data).await
|
||||
}
|
||||
|
||||
pub(crate) async fn send_request_with_retry(
|
||||
&self,
|
||||
method: Method,
|
||||
uri: hyper::Uri,
|
||||
data: String,
|
||||
) -> Result<()> {
|
||||
debug!(sl(), "METHOD: {:?}", method.clone());
|
||||
debug!(sl(), "URI: {:?}", uri.clone());
|
||||
debug!(sl(), "DATA: {:?}", data.clone());
|
||||
for _count in 0..REQUEST_RETRY {
|
||||
let req = Request::builder()
|
||||
.method(method.clone())
|
||||
.uri(uri.clone())
|
||||
.header("Accept", "application/json")
|
||||
.header("Content-Type", "application/json")
|
||||
.body(Body::from(data.clone()))?;
|
||||
|
||||
match self.send_request(req).await {
|
||||
Ok(resp) => {
|
||||
debug!(sl(), "Request sent, resp: {:?}", resp);
|
||||
return Ok(());
|
||||
}
|
||||
Err(resp) => {
|
||||
debug!(sl(), "Request sent with error, resp: {:?}", resp);
|
||||
std::thread::sleep(std::time::Duration::from_millis(10));
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(anyhow::anyhow!(
|
||||
"After {} attempts, it still doesn't work.",
|
||||
REQUEST_RETRY
|
||||
))
|
||||
}
|
||||
|
||||
pub(crate) async fn send_request(&self, req: Request<Body>) -> Result<Response<Body>> {
|
||||
let resp = self.client.request(req).await?;
|
||||
|
||||
let status = resp.status();
|
||||
debug!(sl(), "Request RESPONSE {:?} {:?}", &status, resp);
|
||||
if status.is_success() {
|
||||
return Ok(resp);
|
||||
} else {
|
||||
let body = hyper::body::to_bytes(resp.into_body()).await?;
|
||||
if body.is_empty() {
|
||||
debug!(sl(), "Request FAILED WITH STATUS: {:?}", status);
|
||||
None
|
||||
} else {
|
||||
let body = String::from_utf8_lossy(&body).into_owned();
|
||||
debug!(
|
||||
sl(),
|
||||
"Request FAILED WITH STATUS: {:?} and BODY: {:?}", status, body
|
||||
);
|
||||
Some(body)
|
||||
};
|
||||
}
|
||||
|
||||
Err(anyhow::anyhow!(
|
||||
"After {} attempts, it
|
||||
still doesn't work.",
|
||||
REQUEST_RETRY
|
||||
))
|
||||
}
|
||||
pub(crate) fn cleanup_resource(&self) {
|
||||
if self.jailed {
|
||||
self.umount_jail_resource(FC_KERNEL).ok();
|
||||
self.umount_jail_resource(FC_ROOT_FS).ok();
|
||||
|
||||
for i in 1..DISK_POOL_SIZE {
|
||||
self.umount_jail_resource(&[DRIVE_PREFIX, &i.to_string()].concat())
|
||||
.ok();
|
||||
}
|
||||
|
||||
self.umount_jail_resource("").ok();
|
||||
}
|
||||
std::fs::remove_dir_all(self.vm_path.as_str())
|
||||
.map_err(|err| {
|
||||
error!(
|
||||
sl(),
|
||||
"failed to remove dir all for {} with error: {:?}", &self.vm_path, &err
|
||||
);
|
||||
err
|
||||
})
|
||||
.ok();
|
||||
}
|
||||
|
||||
pub(crate) fn umount_jail_resource(&self, jailed_path: &str) -> Result<()> {
|
||||
let path = match jailed_path {
|
||||
// Handle final case to umount the bind-mounted `/run/kata/firecracker/{id}/root` dir
|
||||
"" => [self.vm_path.clone(), ROOT.to_string()].join("/"),
|
||||
// Handle generic case to umount the bind-mounted
|
||||
// `/run/kata/firecracker/{id}/root/asset` file/dir
|
||||
_ => [
|
||||
self.vm_path.clone(),
|
||||
ROOT.to_string(),
|
||||
jailed_path.to_string(),
|
||||
]
|
||||
.join("/"),
|
||||
};
|
||||
nix::mount::umount2(path.as_str(), nix::mount::MntFlags::MNT_DETACH)
|
||||
.with_context(|| format!("umount path {}", &path))
|
||||
}
|
||||
}
|
||||
208
src/runtime-rs/crates/hypervisor/src/firecracker/inner.rs
Normal file
208
src/runtime-rs/crates/hypervisor/src/firecracker/inner.rs
Normal file
@@ -0,0 +1,208 @@
|
||||
//Copyright (c) 2019-2022 Alibaba Cloud
|
||||
//Copyright (c) 2023 Nubificus Ltd
|
||||
//
|
||||
//SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
use crate::firecracker::{inner_hypervisor::FC_API_SOCKET_NAME, sl};
|
||||
use crate::HypervisorState;
|
||||
use crate::MemoryConfig;
|
||||
use crate::HYPERVISOR_FIRECRACKER;
|
||||
use crate::{device::DeviceType, VmmState};
|
||||
use anyhow::{Context, Result};
|
||||
use async_trait::async_trait;
|
||||
use hyper::Client;
|
||||
use hyperlocal::{UnixClientExt, UnixConnector};
|
||||
use kata_types::{
|
||||
capabilities::{Capabilities, CapabilityBits},
|
||||
config::hypervisor::Hypervisor as HypervisorConfig,
|
||||
};
|
||||
use nix::sched::{setns, CloneFlags};
|
||||
use persist::sandbox_persist::Persist;
|
||||
use std::os::unix::io::AsRawFd;
|
||||
use tokio::process::Command;
|
||||
|
||||
unsafe impl Send for FcInner {}
|
||||
unsafe impl Sync for FcInner {}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct FcInner {
|
||||
pub(crate) id: String,
|
||||
pub(crate) asock_path: String,
|
||||
pub(crate) state: VmmState,
|
||||
pub(crate) config: HypervisorConfig,
|
||||
pub(crate) pid: Option<u32>,
|
||||
pub(crate) vm_path: String,
|
||||
pub(crate) netns: Option<String>,
|
||||
pub(crate) client: Client<UnixConnector>,
|
||||
pub(crate) jailer_root: String,
|
||||
pub(crate) jailed: bool,
|
||||
pub(crate) run_dir: String,
|
||||
pub(crate) pending_devices: Vec<DeviceType>,
|
||||
pub(crate) capabilities: Capabilities,
|
||||
}
|
||||
|
||||
impl FcInner {
|
||||
pub fn new() -> FcInner {
|
||||
let mut capabilities = Capabilities::new();
|
||||
capabilities.set(CapabilityBits::BlockDeviceSupport);
|
||||
FcInner {
|
||||
id: String::default(),
|
||||
asock_path: String::default(),
|
||||
state: VmmState::NotReady,
|
||||
config: Default::default(),
|
||||
pid: None,
|
||||
netns: None,
|
||||
vm_path: String::default(),
|
||||
client: Client::unix(),
|
||||
jailer_root: String::default(),
|
||||
jailed: false,
|
||||
run_dir: String::default(),
|
||||
pending_devices: vec![],
|
||||
capabilities,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn prepare_vmm(&mut self, netns: Option<String>) -> Result<()> {
|
||||
let mut cmd: Command;
|
||||
self.netns = netns.clone();
|
||||
match self.jailed {
|
||||
true => {
|
||||
debug!(sl(), "Running Jailed");
|
||||
cmd = Command::new(&self.config.jailer_path);
|
||||
let api_socket = ["/run/", FC_API_SOCKET_NAME].join("/");
|
||||
let args = [
|
||||
"--id",
|
||||
&self.id,
|
||||
"--gid",
|
||||
"0",
|
||||
"--uid",
|
||||
"0",
|
||||
"--exec-file",
|
||||
&self.config.path,
|
||||
"--chroot-base-dir",
|
||||
&self.jailer_root,
|
||||
"--",
|
||||
"--api-sock",
|
||||
&api_socket,
|
||||
];
|
||||
cmd.args(args);
|
||||
}
|
||||
false => {
|
||||
debug!(sl(), "Running non-Jailed");
|
||||
cmd = Command::new(&self.config.path);
|
||||
cmd.args(["--api-sock", &self.asock_path]);
|
||||
}
|
||||
}
|
||||
debug!(sl(), "Exec: {:?}", cmd);
|
||||
|
||||
// Make sure we're in the correct Network Namespace
|
||||
unsafe {
|
||||
let _pre = cmd.pre_exec(move || {
|
||||
if let Some(netns_path) = &netns {
|
||||
debug!(sl(), "set netns for vmm master {:?}", &netns_path);
|
||||
let netns_fd = std::fs::File::open(netns_path);
|
||||
let _ = setns(netns_fd?.as_raw_fd(), CloneFlags::CLONE_NEWNET)
|
||||
.context("set netns failed");
|
||||
}
|
||||
Ok(())
|
||||
});
|
||||
}
|
||||
|
||||
let mut child = cmd.spawn()?;
|
||||
|
||||
match child.id() {
|
||||
Some(id) => {
|
||||
let cur_tid = nix::unistd::gettid().as_raw() as u32;
|
||||
info!(
|
||||
sl(),
|
||||
"VMM spawned successfully: PID: {:?}, current TID: {:?}", id, cur_tid
|
||||
);
|
||||
self.pid = Some(id);
|
||||
}
|
||||
None => {
|
||||
let exit_status = child.wait().await?;
|
||||
error!(sl(), "Process exited, status: {:?}", exit_status);
|
||||
}
|
||||
};
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn hypervisor_config(&self) -> HypervisorConfig {
|
||||
debug!(sl(), "[Firecracker]: Hypervisor config");
|
||||
self.config.clone()
|
||||
}
|
||||
|
||||
pub(crate) fn set_hypervisor_config(&mut self, config: HypervisorConfig) {
|
||||
debug!(sl(), "[Firecracker]: Set Hypervisor config");
|
||||
self.config = config;
|
||||
}
|
||||
|
||||
pub(crate) fn resize_memory(&mut self, new_mem_mb: u32) -> Result<(u32, MemoryConfig)> {
|
||||
warn!(
|
||||
sl(),
|
||||
"memory size unchanged, requested: {:?} Not implemented", new_mem_mb
|
||||
);
|
||||
Ok((
|
||||
0,
|
||||
MemoryConfig {
|
||||
..Default::default()
|
||||
},
|
||||
))
|
||||
}
|
||||
|
||||
pub(crate) fn set_capabilities(&mut self, flag: CapabilityBits) {
|
||||
self.capabilities.add(flag);
|
||||
}
|
||||
|
||||
pub(crate) fn set_guest_memory_block_size(&mut self, size: u32) {
|
||||
warn!(
|
||||
sl(),
|
||||
"guest memory block size unchanged, requested: {:?}, Not implemented", size
|
||||
);
|
||||
}
|
||||
|
||||
pub(crate) fn guest_memory_block_size_mb(&self) -> u32 {
|
||||
warn!(sl(), "guest memory block size Not implemented");
|
||||
0
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Persist for FcInner {
|
||||
type State = HypervisorState;
|
||||
type ConstructorArgs = ();
|
||||
|
||||
async fn save(&self) -> Result<Self::State> {
|
||||
Ok(HypervisorState {
|
||||
hypervisor_type: HYPERVISOR_FIRECRACKER.to_string(),
|
||||
id: self.id.clone(),
|
||||
vm_path: self.vm_path.clone(),
|
||||
config: self.hypervisor_config(),
|
||||
jailed: self.jailed,
|
||||
jailer_root: self.jailer_root.clone(),
|
||||
run_dir: self.run_dir.clone(),
|
||||
netns: self.netns.clone(),
|
||||
..Default::default()
|
||||
})
|
||||
}
|
||||
async fn restore(
|
||||
_hypervisor_args: Self::ConstructorArgs,
|
||||
hypervisor_state: Self::State,
|
||||
) -> Result<Self> {
|
||||
Ok(FcInner {
|
||||
id: hypervisor_state.id,
|
||||
asock_path: String::default(),
|
||||
state: VmmState::NotReady,
|
||||
vm_path: hypervisor_state.vm_path,
|
||||
config: hypervisor_state.config,
|
||||
netns: hypervisor_state.netns,
|
||||
pid: None,
|
||||
jailed: hypervisor_state.jailed,
|
||||
jailer_root: hypervisor_state.jailer_root,
|
||||
client: Client::unix(),
|
||||
pending_devices: vec![],
|
||||
run_dir: hypervisor_state.run_dir,
|
||||
capabilities: Capabilities::new(),
|
||||
})
|
||||
}
|
||||
}
|
||||
102
src/runtime-rs/crates/hypervisor/src/firecracker/inner_device.rs
Normal file
102
src/runtime-rs/crates/hypervisor/src/firecracker/inner_device.rs
Normal file
@@ -0,0 +1,102 @@
|
||||
//Copyright (c) 2019-2022 Alibaba Cloud
|
||||
//Copyright (c) 2019-2022 Ant Group
|
||||
//Copyright (c) 2023 Nubificus Ltd
|
||||
//
|
||||
//SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
use super::FcInner;
|
||||
use crate::firecracker::{
|
||||
inner_hypervisor::{FC_AGENT_SOCKET_NAME, ROOT},
|
||||
sl,
|
||||
};
|
||||
use crate::VmmState;
|
||||
use crate::{device::DeviceType, HybridVsockConfig, VsockConfig};
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use serde_json::json;
|
||||
|
||||
impl FcInner {
|
||||
pub(crate) async fn add_device(&mut self, device: DeviceType) -> Result<()> {
|
||||
if self.state == VmmState::NotReady {
|
||||
info!(sl(), "VMM not ready, queueing device {}", device);
|
||||
|
||||
self.pending_devices.insert(0, device);
|
||||
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
debug!(sl(), "Add Device {} ", &device);
|
||||
|
||||
match device {
|
||||
DeviceType::Block(block) => self
|
||||
.hotplug_block_device(block.config.path_on_host.as_str(), block.config.index)
|
||||
.await
|
||||
.context("add block device"),
|
||||
DeviceType::Network(network) => self
|
||||
.add_net_device(&network.config, network.device_id)
|
||||
.await
|
||||
.context("add net device"),
|
||||
DeviceType::HybridVsock(hvsock) => {
|
||||
self.add_hvsock(&hvsock.config).await.context("add vsock")
|
||||
}
|
||||
DeviceType::Vsock(vsock) => self.add_vsock(&vsock.config).await.context("add vsock"),
|
||||
_ => Err(anyhow!("unhandled device: {:?}", device)),
|
||||
}
|
||||
}
|
||||
|
||||
// Since Firecracker doesn't support sharefs, we patch block devices on pre-start inserted
|
||||
// dummy drives
|
||||
pub(crate) async fn hotplug_block_device(&mut self, path: &str, id: u64) -> Result<()> {
|
||||
if id > 0 {
|
||||
self.patch_container_rootfs(&id.to_string(), path).await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) async fn remove_device(&mut self, device: DeviceType) -> Result<()> {
|
||||
info!(sl(), "Remove Device {} ", device);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) async fn update_device(&mut self, device: DeviceType) -> Result<()> {
|
||||
info!(sl(), "update device {:?}", &device);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) async fn add_hvsock(&mut self, config: &HybridVsockConfig) -> Result<()> {
|
||||
let rel_uds_path = match self.jailed {
|
||||
false => [self.vm_path.as_str(), FC_AGENT_SOCKET_NAME].join("/"),
|
||||
true => FC_AGENT_SOCKET_NAME.to_string(),
|
||||
};
|
||||
let body_vsock: String = json!({
|
||||
"vsock_id": String::from(ROOT),
|
||||
"guest_cid": config.guest_cid,
|
||||
"uds_path": rel_uds_path,
|
||||
})
|
||||
.to_string();
|
||||
|
||||
info!(sl(), "HybridVsock configure: {:?}", &body_vsock);
|
||||
|
||||
self.request_with_retry(hyper::Method::PUT, "/vsock", body_vsock)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) async fn add_vsock(&mut self, config: &VsockConfig) -> Result<()> {
|
||||
let rel_uds_path = match self.jailed {
|
||||
false => [self.vm_path.as_str(), FC_AGENT_SOCKET_NAME].join("/"),
|
||||
true => FC_AGENT_SOCKET_NAME.to_string(),
|
||||
};
|
||||
let body_vsock: String = json!({
|
||||
"vsock_id": String::from(ROOT),
|
||||
"guest_cid": config.guest_cid,
|
||||
"uds_path": rel_uds_path,
|
||||
})
|
||||
.to_string();
|
||||
|
||||
info!(sl(), "HybridVsock configure: {:?}", &body_vsock);
|
||||
|
||||
self.request_with_retry(hyper::Method::PUT, "/vsock", body_vsock)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,192 @@
|
||||
//Copyright (c) 2019-2022 Alibaba Cloud
|
||||
//Copyright (c) 2023 Nubificus Ltd
|
||||
//
|
||||
//SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
use crate::firecracker::{sl, FcInner};
|
||||
use crate::{VcpuThreadIds, VmmState, HYPERVISOR_FIRECRACKER};
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use kata_types::capabilities::Capabilities;
|
||||
use kata_types::config::KATA_PATH;
|
||||
use std::collections::HashSet;
|
||||
use std::iter::FromIterator;
|
||||
use tokio::fs;
|
||||
|
||||
pub const FC_API_SOCKET_NAME: &str = "fc.sock";
|
||||
pub const FC_AGENT_SOCKET_NAME: &str = "kata.hvsock";
|
||||
pub const ROOT: &str = "root";
|
||||
|
||||
const HYBRID_VSOCK_SCHEME: &str = "hvsock";
|
||||
|
||||
impl FcInner {
|
||||
pub(crate) async fn prepare_vm(&mut self, id: &str, _netns: Option<String>) -> Result<()> {
|
||||
debug!(sl(), "Preparing Firecracker");
|
||||
|
||||
self.id = id.to_string();
|
||||
|
||||
if !self.config.jailer_path.is_empty() {
|
||||
debug!(sl(), "Running jailed");
|
||||
self.jailed = true;
|
||||
self.jailer_root = KATA_PATH.to_string();
|
||||
debug!(sl(), "jailer_root: {:?}", self.jailer_root);
|
||||
self.vm_path = [
|
||||
self.jailer_root.clone(),
|
||||
HYPERVISOR_FIRECRACKER.to_string(),
|
||||
id.to_string(),
|
||||
]
|
||||
.join("/");
|
||||
debug!(sl(), "VM Path: {:?}", self.vm_path);
|
||||
self.run_dir = [self.vm_path.clone(), "root".to_string(), "run".to_string()].join("/");
|
||||
debug!(sl(), "Rundir: {:?}", self.run_dir);
|
||||
let _ = self.remount_jailer_with_exec().await;
|
||||
} else {
|
||||
self.vm_path = [KATA_PATH.to_string(), id.to_string()].join("/");
|
||||
debug!(sl(), "VM Path: {:?}", self.vm_path);
|
||||
self.run_dir = [self.vm_path.clone(), "run".to_string()].join("/");
|
||||
debug!(sl(), "Rundir: {:?}", self.run_dir);
|
||||
}
|
||||
// We construct the FC API socket path based on the run_dir variable (jailed or
|
||||
// non-jailed).
|
||||
self.asock_path = [self.run_dir.as_str(), "fc.sock"].join("/");
|
||||
debug!(sl(), "Socket Path: {:?}", self.asock_path);
|
||||
|
||||
let _ = fs::create_dir_all(self.run_dir.as_str())
|
||||
.await
|
||||
.context(format!("failed to create directory {:?}", self.vm_path));
|
||||
|
||||
self.netns = _netns.clone();
|
||||
self.prepare_vmm(self.netns.clone()).await?;
|
||||
self.state = VmmState::VmmServerReady;
|
||||
self.prepare_vmm_resources().await?;
|
||||
self.prepare_hvsock().await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) async fn start_vm(&mut self, _timeout: i32) -> Result<()> {
|
||||
debug!(sl(), "Starting sandbox");
|
||||
let body: String = serde_json::json!({
|
||||
"action_type": "InstanceStart"
|
||||
})
|
||||
.to_string();
|
||||
self.request_with_retry(hyper::Method::PUT, "/actions", body)
|
||||
.await?;
|
||||
self.state = VmmState::VmRunning;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) async fn stop_vm(&mut self) -> Result<()> {
|
||||
debug!(sl(), "Stopping sandbox");
|
||||
if self.state != VmmState::VmRunning {
|
||||
debug!(sl(), "VM not running!");
|
||||
} else if let Some(pid_to_kill) = &self.pid {
|
||||
let pid = ::nix::unistd::Pid::from_raw(*pid_to_kill as i32);
|
||||
if let Err(err) = ::nix::sys::signal::kill(pid, nix::sys::signal::SIGKILL) {
|
||||
if err != ::nix::Error::ESRCH {
|
||||
debug!(sl(), "Failed to kill VMM with pid {} {:?}", pid, err);
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn pause_vm(&self) -> Result<()> {
|
||||
warn!(sl(), "Pause VM: Not implemented");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) async fn save_vm(&self) -> Result<()> {
|
||||
warn!(sl(), "Save VM: Not implemented");
|
||||
Ok(())
|
||||
}
|
||||
pub(crate) fn resume_vm(&self) -> Result<()> {
|
||||
warn!(sl(), "Resume VM: Not implemented");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) async fn get_agent_socket(&self) -> Result<String> {
|
||||
debug!(sl(), "Get kata-agent socket");
|
||||
let vsock_path = match self.jailed {
|
||||
false => [self.vm_path.as_str(), FC_AGENT_SOCKET_NAME].join("/"),
|
||||
true => [self.vm_path.as_str(), ROOT, FC_AGENT_SOCKET_NAME].join("/"),
|
||||
};
|
||||
Ok(format!("{}://{}", HYBRID_VSOCK_SCHEME, vsock_path))
|
||||
}
|
||||
|
||||
pub(crate) async fn disconnect(&mut self) {
|
||||
warn!(sl(), "Disconnect: Not implemented");
|
||||
}
|
||||
pub(crate) async fn get_thread_ids(&self) -> Result<VcpuThreadIds> {
|
||||
debug!(sl(), "Get Thread IDs");
|
||||
Ok(VcpuThreadIds::default())
|
||||
}
|
||||
|
||||
pub(crate) async fn get_pids(&self) -> Result<Vec<u32>> {
|
||||
debug!(sl(), "Get PIDs");
|
||||
let mut pids = HashSet::new();
|
||||
// get shim thread ids
|
||||
pids.insert(self.pid.unwrap());
|
||||
|
||||
debug!(sl(), "PIDs: {:?}", pids);
|
||||
Ok(Vec::from_iter(pids.into_iter()))
|
||||
}
|
||||
|
||||
pub(crate) async fn get_vmm_master_tid(&self) -> Result<u32> {
|
||||
debug!(sl(), "Get VMM master TID");
|
||||
if let Some(pid) = self.pid {
|
||||
Ok(pid)
|
||||
} else {
|
||||
Err(anyhow!("could not get vmm master tid"))
|
||||
}
|
||||
}
|
||||
pub(crate) async fn get_ns_path(&self) -> Result<String> {
|
||||
debug!(sl(), "Get NS path");
|
||||
if let Some(pid) = self.pid {
|
||||
let ns_path = format!("/proc/{}/ns", pid);
|
||||
Ok(ns_path)
|
||||
} else {
|
||||
Err(anyhow!("could not get ns path"))
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn cleanup(&self) -> Result<()> {
|
||||
debug!(sl(), "Cleanup");
|
||||
self.cleanup_resource();
|
||||
|
||||
std::fs::remove_dir_all(self.vm_path.as_str())
|
||||
.map_err(|err| {
|
||||
error!(
|
||||
sl(),
|
||||
"failed to remove dir all for {} with error: {:?}", &self.vm_path, &err
|
||||
);
|
||||
err
|
||||
})
|
||||
.ok();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) async fn resize_vcpu(&self, old_vcpu: u32, new_vcpu: u32) -> Result<(u32, u32)> {
|
||||
warn!(sl(), "Resize vCPU: Not implemented");
|
||||
Ok((old_vcpu, new_vcpu))
|
||||
}
|
||||
|
||||
pub(crate) async fn check(&self) -> Result<()> {
|
||||
warn!(sl(), "Check: Not implemented");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) async fn get_jailer_root(&self) -> Result<String> {
|
||||
debug!(sl(), "Get Jailer Root");
|
||||
Ok(self.jailer_root.clone())
|
||||
}
|
||||
|
||||
pub(crate) async fn capabilities(&self) -> Result<Capabilities> {
|
||||
debug!(sl(), "Capabilities");
|
||||
Ok(self.capabilities.clone())
|
||||
}
|
||||
|
||||
pub(crate) async fn get_hypervisor_metrics(&self) -> Result<String> {
|
||||
warn!(sl(), "Get Hypervisor Metrics: Not implemented");
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
215
src/runtime-rs/crates/hypervisor/src/firecracker/mod.rs
Normal file
215
src/runtime-rs/crates/hypervisor/src/firecracker/mod.rs
Normal file
@@ -0,0 +1,215 @@
|
||||
//Copyright (c) 2019-2022 Alibaba Cloud
|
||||
//Copyright (c) 2023 Nubificus Ltd
|
||||
//
|
||||
//SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
mod fc_api;
|
||||
mod inner;
|
||||
mod inner_device;
|
||||
mod inner_hypervisor;
|
||||
|
||||
use super::HypervisorState;
|
||||
use crate::MemoryConfig;
|
||||
use crate::{device::DeviceType, Hypervisor, HypervisorConfig, VcpuThreadIds};
|
||||
use anyhow::Context;
|
||||
use anyhow::Result;
|
||||
use async_trait::async_trait;
|
||||
use inner::FcInner;
|
||||
use kata_types::capabilities::Capabilities;
|
||||
use kata_types::capabilities::CapabilityBits;
|
||||
use persist::sandbox_persist::Persist;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Firecracker {
|
||||
inner: Arc<RwLock<FcInner>>,
|
||||
}
|
||||
|
||||
// Convenience function to set the scope.
|
||||
pub fn sl() -> slog::Logger {
|
||||
slog_scope::logger().new(o!("subsystem" => "firecracker"))
|
||||
}
|
||||
|
||||
impl Default for Firecracker {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl Firecracker {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
inner: Arc::new(RwLock::new(FcInner::new())),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn set_hypervisor_config(&mut self, config: HypervisorConfig) {
|
||||
let mut inner = self.inner.write().await;
|
||||
inner.set_hypervisor_config(config)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Hypervisor for Firecracker {
|
||||
async fn prepare_vm(&self, id: &str, netns: Option<String>) -> Result<()> {
|
||||
let mut inner = self.inner.write().await;
|
||||
inner.prepare_vm(id, netns).await
|
||||
}
|
||||
|
||||
async fn start_vm(&self, timeout: i32) -> Result<()> {
|
||||
let mut inner = self.inner.write().await;
|
||||
inner.start_vm(timeout).await
|
||||
}
|
||||
|
||||
async fn stop_vm(&self) -> Result<()> {
|
||||
let mut inner = self.inner.write().await;
|
||||
inner.stop_vm().await
|
||||
}
|
||||
|
||||
async fn pause_vm(&self) -> Result<()> {
|
||||
let inner = self.inner.read().await;
|
||||
inner.pause_vm()
|
||||
}
|
||||
|
||||
async fn resume_vm(&self) -> Result<()> {
|
||||
let inner = self.inner.read().await;
|
||||
inner.resume_vm()
|
||||
}
|
||||
|
||||
async fn save_vm(&self) -> Result<()> {
|
||||
let inner = self.inner.read().await;
|
||||
inner.save_vm().await
|
||||
}
|
||||
|
||||
async fn add_device(&self, device: DeviceType) -> Result<DeviceType> {
|
||||
let mut inner = self.inner.write().await;
|
||||
match inner.add_device(device.clone()).await {
|
||||
Ok(_) => Ok(device),
|
||||
Err(err) => Err(err),
|
||||
}
|
||||
}
|
||||
|
||||
async fn remove_device(&self, device: DeviceType) -> Result<()> {
|
||||
let mut inner = self.inner.write().await;
|
||||
inner.remove_device(device).await
|
||||
}
|
||||
|
||||
async fn update_device(&self, device: DeviceType) -> Result<()> {
|
||||
let mut inner = self.inner.write().await;
|
||||
inner.update_device(device).await
|
||||
}
|
||||
|
||||
async fn get_agent_socket(&self) -> Result<String> {
|
||||
let inner = self.inner.read().await;
|
||||
inner.get_agent_socket().await
|
||||
}
|
||||
|
||||
async fn disconnect(&self) {
|
||||
let mut inner = self.inner.write().await;
|
||||
inner.disconnect().await
|
||||
}
|
||||
|
||||
async fn hypervisor_config(&self) -> HypervisorConfig {
|
||||
let inner = self.inner.read().await;
|
||||
inner.hypervisor_config()
|
||||
}
|
||||
|
||||
async fn get_thread_ids(&self) -> Result<VcpuThreadIds> {
|
||||
let inner = self.inner.read().await;
|
||||
inner.get_thread_ids().await
|
||||
}
|
||||
|
||||
async fn cleanup(&self) -> Result<()> {
|
||||
let inner = self.inner.read().await;
|
||||
inner.cleanup().await
|
||||
}
|
||||
|
||||
async fn resize_vcpu(&self, old_vcpu: u32, new_vcpu: u32) -> Result<(u32, u32)> {
|
||||
let inner = self.inner.read().await;
|
||||
inner.resize_vcpu(old_vcpu, new_vcpu).await
|
||||
}
|
||||
|
||||
async fn get_pids(&self) -> Result<Vec<u32>> {
|
||||
let inner = self.inner.read().await;
|
||||
inner.get_pids().await
|
||||
}
|
||||
|
||||
async fn get_vmm_master_tid(&self) -> Result<u32> {
|
||||
let inner = self.inner.read().await;
|
||||
inner.get_vmm_master_tid().await
|
||||
}
|
||||
|
||||
async fn get_ns_path(&self) -> Result<String> {
|
||||
let inner = self.inner.read().await;
|
||||
inner.get_ns_path().await
|
||||
}
|
||||
|
||||
async fn check(&self) -> Result<()> {
|
||||
let inner = self.inner.read().await;
|
||||
inner.check().await
|
||||
}
|
||||
|
||||
async fn get_jailer_root(&self) -> Result<String> {
|
||||
let inner = self.inner.read().await;
|
||||
inner.get_jailer_root().await
|
||||
}
|
||||
|
||||
async fn save_state(&self) -> Result<HypervisorState> {
|
||||
self.save().await
|
||||
}
|
||||
|
||||
async fn capabilities(&self) -> Result<Capabilities> {
|
||||
let inner = self.inner.read().await;
|
||||
inner.capabilities().await
|
||||
}
|
||||
|
||||
async fn get_hypervisor_metrics(&self) -> Result<String> {
|
||||
let inner = self.inner.read().await;
|
||||
inner.get_hypervisor_metrics().await
|
||||
}
|
||||
|
||||
async fn set_capabilities(&self, flag: CapabilityBits) {
|
||||
let mut inner = self.inner.write().await;
|
||||
inner.set_capabilities(flag)
|
||||
}
|
||||
|
||||
async fn set_guest_memory_block_size(&self, size: u32) {
|
||||
let mut inner = self.inner.write().await;
|
||||
inner.set_guest_memory_block_size(size);
|
||||
}
|
||||
|
||||
async fn guest_memory_block_size(&self) -> u32 {
|
||||
let inner = self.inner.read().await;
|
||||
inner.guest_memory_block_size_mb()
|
||||
}
|
||||
|
||||
async fn resize_memory(&self, new_mem_mb: u32) -> Result<(u32, MemoryConfig)> {
|
||||
let mut inner = self.inner.write().await;
|
||||
inner.resize_memory(new_mem_mb)
|
||||
}
|
||||
|
||||
async fn get_passfd_listener_addr(&self) -> Result<(String, u32)> {
|
||||
Err(anyhow::anyhow!("Not yet supported"))
|
||||
}
|
||||
}
|
||||
#[async_trait]
|
||||
impl Persist for Firecracker {
|
||||
type State = HypervisorState;
|
||||
type ConstructorArgs = ();
|
||||
/// Save a state of the component.
|
||||
async fn save(&self) -> Result<Self::State> {
|
||||
let inner = self.inner.read().await;
|
||||
inner.save().await.context("save hypervisor state")
|
||||
}
|
||||
/// Restore a component from a specified state.
|
||||
async fn restore(
|
||||
hypervisor_args: Self::ConstructorArgs,
|
||||
hypervisor_state: Self::State,
|
||||
) -> Result<Self> {
|
||||
let inner = FcInner::restore(hypervisor_args, hypervisor_state).await?;
|
||||
Ok(Self {
|
||||
inner: Arc::new(RwLock::new(inner)),
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -118,6 +118,11 @@ impl KernelParams {
|
||||
self.params.append(&mut params.params);
|
||||
}
|
||||
|
||||
#[cfg(not(target_arch = "s390x"))]
|
||||
pub(crate) fn push(&mut self, new_param: Param) {
|
||||
self.params.push(new_param);
|
||||
}
|
||||
|
||||
pub(crate) fn from_string(params_string: &str) -> Self {
|
||||
let mut params = vec![];
|
||||
|
||||
|
||||
@@ -15,6 +15,8 @@ pub use device::driver::*;
|
||||
use device::DeviceType;
|
||||
#[cfg(not(target_arch = "s390x"))]
|
||||
pub mod dragonball;
|
||||
#[cfg(not(target_arch = "s390x"))]
|
||||
pub mod firecracker;
|
||||
mod kernel_param;
|
||||
pub mod qemu;
|
||||
pub use kernel_param::Param;
|
||||
@@ -61,6 +63,7 @@ const HUGE_SHMEM: &str = "hugeshmem";
|
||||
|
||||
pub const HYPERVISOR_DRAGONBALL: &str = "dragonball";
|
||||
pub const HYPERVISOR_QEMU: &str = "qemu";
|
||||
pub const HYPERVISOR_FIRECRACKER: &str = "firecracker";
|
||||
|
||||
pub const DEFAULT_HYBRID_VSOCK_NAME: &str = "kata.hvsock";
|
||||
pub const JAILER_ROOT: &str = "root";
|
||||
|
||||
@@ -21,7 +21,7 @@ use tokio;
|
||||
const MI_B: u64 = 1024 * 1024;
|
||||
const GI_B: u64 = 1024 * MI_B;
|
||||
|
||||
const QMP_SOCKET_FILE: &str = "qmp.sock";
|
||||
pub const QMP_SOCKET_FILE: &str = "qmp.sock";
|
||||
const DEBUG_MONITOR_SOCKET: &str = "debug-monitor.sock";
|
||||
|
||||
// The approach taken here is inspired by govmm. We build structs, each
|
||||
|
||||
@@ -3,7 +3,8 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use super::cmdline_generator::QemuCmdLine;
|
||||
use super::cmdline_generator::{QemuCmdLine, QMP_SOCKET_FILE};
|
||||
use super::qmp::Qmp;
|
||||
use crate::{
|
||||
hypervisor_persist::HypervisorState, utils::enter_netns, HypervisorConfig, MemoryConfig,
|
||||
VcpuThreadIds, VsockDevice, HYPERVISOR_QEMU,
|
||||
@@ -16,6 +17,7 @@ use kata_types::{
|
||||
config::KATA_PATH,
|
||||
};
|
||||
use persist::sandbox_persist::Persist;
|
||||
use std::cmp::Ordering;
|
||||
use std::collections::HashMap;
|
||||
use std::path::Path;
|
||||
use std::process::Stdio;
|
||||
@@ -32,6 +34,7 @@ pub struct QemuInner {
|
||||
id: String,
|
||||
|
||||
qemu_process: Option<Child>,
|
||||
qmp: Option<Qmp>,
|
||||
|
||||
config: HypervisorConfig,
|
||||
devices: Vec<DeviceType>,
|
||||
@@ -43,6 +46,7 @@ impl QemuInner {
|
||||
QemuInner {
|
||||
id: "".to_string(),
|
||||
qemu_process: None,
|
||||
qmp: None,
|
||||
config: Default::default(),
|
||||
devices: Vec::new(),
|
||||
netns: None,
|
||||
@@ -149,16 +153,33 @@ impl QemuInner {
|
||||
tokio::spawn(log_qemu_stderr(qemu_process.stderr.take().unwrap()));
|
||||
}
|
||||
|
||||
match Qmp::new(QMP_SOCKET_FILE) {
|
||||
Ok(qmp) => self.qmp = Some(qmp),
|
||||
Err(e) => {
|
||||
error!(sl!(), "couldn't initialise QMP: {:?}", e);
|
||||
return Err(e);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) async fn stop_vm(&mut self) -> Result<()> {
|
||||
info!(sl!(), "Stopping QEMU VM");
|
||||
if let Some(ref mut qemu_process) = &mut self.qemu_process {
|
||||
info!(sl!(), "QemuInner::stop_vm(): kill()'ing qemu");
|
||||
qemu_process.kill().await.map_err(anyhow::Error::from)
|
||||
let is_qemu_running = qemu_process.id().is_some();
|
||||
if is_qemu_running {
|
||||
info!(sl!(), "QemuInner::stop_vm(): kill()'ing qemu");
|
||||
qemu_process.kill().await.map_err(anyhow::Error::from)
|
||||
} else {
|
||||
info!(
|
||||
sl!(),
|
||||
"QemuInner::stop_vm(): qemu process isn't running (likely stopped already)"
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
} else {
|
||||
Err(anyhow!("qemu process not running"))
|
||||
Err(anyhow!("qemu process has not been started yet"))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -210,7 +231,7 @@ impl QemuInner {
|
||||
);
|
||||
Ok(qemu_pid)
|
||||
} else {
|
||||
Err(anyhow!("cannot get qemu pid (though it seems running)"))
|
||||
Err(anyhow!("QemuInner::get_vmm_master_tid(): qemu process isn't running (likely stopped already)"))
|
||||
}
|
||||
} else {
|
||||
Err(anyhow!("qemu process not running"))
|
||||
@@ -233,15 +254,48 @@ impl QemuInner {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) async fn resize_vcpu(&self, old_vcpus: u32, new_vcpus: u32) -> Result<(u32, u32)> {
|
||||
pub(crate) async fn resize_vcpu(
|
||||
&mut self,
|
||||
old_vcpus: u32,
|
||||
mut new_vcpus: u32,
|
||||
) -> Result<(u32, u32)> {
|
||||
info!(
|
||||
sl!(),
|
||||
"QemuInner::resize_vcpu(): {} -> {}", old_vcpus, new_vcpus
|
||||
);
|
||||
|
||||
// TODO The following sanity checks apparently have to be performed by
|
||||
// any hypervisor - wouldn't it make sense to move them to the caller?
|
||||
if new_vcpus == old_vcpus {
|
||||
return Ok((old_vcpus, new_vcpus));
|
||||
}
|
||||
todo!()
|
||||
|
||||
if new_vcpus == 0 {
|
||||
return Err(anyhow!("resize to 0 vcpus requested"));
|
||||
}
|
||||
|
||||
if new_vcpus > self.config.cpu_info.default_maxvcpus {
|
||||
warn!(
|
||||
sl!(),
|
||||
"Cannot allocate more vcpus than the max allowed number of vcpus. The maximum allowed amount of vcpus will be used instead.");
|
||||
new_vcpus = self.config.cpu_info.default_maxvcpus;
|
||||
}
|
||||
|
||||
if let Some(ref mut qmp) = self.qmp {
|
||||
match new_vcpus.cmp(&old_vcpus) {
|
||||
Ordering::Greater => {
|
||||
let hotplugged = qmp.hotplug_vcpus(new_vcpus - old_vcpus)?;
|
||||
new_vcpus = old_vcpus + hotplugged;
|
||||
}
|
||||
Ordering::Less => {
|
||||
let hotunplugged = qmp.hotunplug_vcpus(old_vcpus - new_vcpus)?;
|
||||
new_vcpus = old_vcpus - hotunplugged;
|
||||
}
|
||||
Ordering::Equal => {}
|
||||
}
|
||||
}
|
||||
|
||||
Ok((old_vcpus, new_vcpus))
|
||||
}
|
||||
|
||||
pub(crate) async fn get_pids(&self) -> Result<Vec<u32>> {
|
||||
@@ -385,6 +439,7 @@ impl Persist for QemuInner {
|
||||
Ok(QemuInner {
|
||||
id: hypervisor_state.id,
|
||||
qemu_process: None,
|
||||
qmp: None,
|
||||
config: hypervisor_state.config,
|
||||
devices: Vec::new(),
|
||||
netns: None,
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
|
||||
mod cmdline_generator;
|
||||
mod inner;
|
||||
mod qmp;
|
||||
|
||||
use crate::device::DeviceType;
|
||||
use crate::hypervisor_persist::HypervisorState;
|
||||
@@ -127,7 +128,7 @@ impl Hypervisor for Qemu {
|
||||
}
|
||||
|
||||
async fn resize_vcpu(&self, old_vcpus: u32, new_vcpus: u32) -> Result<(u32, u32)> {
|
||||
let inner = self.inner.read().await;
|
||||
let mut inner = self.inner.write().await;
|
||||
inner.resize_vcpu(old_vcpus, new_vcpus).await
|
||||
}
|
||||
|
||||
|
||||
135
src/runtime-rs/crates/hypervisor/src/qemu/qmp.rs
Normal file
135
src/runtime-rs/crates/hypervisor/src/qemu/qmp.rs
Normal file
@@ -0,0 +1,135 @@
|
||||
// Copyright (c) 2024 Red Hat
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use anyhow::Result;
|
||||
use std::fmt::{Debug, Error, Formatter};
|
||||
use std::io::BufReader;
|
||||
use std::os::unix::net::UnixStream;
|
||||
use std::time::Duration;
|
||||
|
||||
use qapi::qmp;
|
||||
use qapi_spec::Dictionary;
|
||||
|
||||
pub struct Qmp {
|
||||
qmp: qapi::Qmp<qapi::Stream<BufReader<UnixStream>, UnixStream>>,
|
||||
}
|
||||
|
||||
// We have to implement Debug since the Hypervisor trait requires it and Qmp
|
||||
// is ultimately stored in one of Hypervisor's implementations (Qemu).
|
||||
// We can't do it automatically since the type of Qmp::qmp isn't Debug.
|
||||
impl Debug for Qmp {
|
||||
fn fmt(&self, _f: &mut Formatter<'_>) -> Result<(), Error> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Qmp {
|
||||
pub fn new(qmp_sock_path: &str) -> Result<Self> {
|
||||
let stream = UnixStream::connect(qmp_sock_path)?;
|
||||
|
||||
// Set the read timeout to protect runtime-rs from blocking forever
|
||||
// trying to set up QMP connection if qemu fails to launch. The exact
|
||||
// value is a matter of judegement. Setting it too long would risk
|
||||
// being ineffective since container runtime would timeout first anyway
|
||||
// (containerd's task creation timeout is 2 s by default). OTOH
|
||||
// setting it too short would risk interfering with a normal launch,
|
||||
// perhaps just seeing some delay due to a heavily loaded host.
|
||||
stream.set_read_timeout(Some(Duration::from_millis(250)))?;
|
||||
|
||||
let mut qmp = Qmp {
|
||||
qmp: qapi::Qmp::new(qapi::Stream::new(
|
||||
BufReader::new(stream.try_clone()?),
|
||||
stream,
|
||||
)),
|
||||
};
|
||||
|
||||
let info = qmp.qmp.handshake()?;
|
||||
info!(sl!(), "QMP initialized: {:#?}", info);
|
||||
|
||||
Ok(qmp)
|
||||
}
|
||||
|
||||
pub fn hotplug_vcpus(&mut self, vcpu_cnt: u32) -> Result<u32> {
|
||||
let hotpluggable_cpus = self.qmp.execute(&qmp::query_hotpluggable_cpus {})?;
|
||||
//info!(sl!(), "hotpluggable CPUs: {:#?}", hotpluggable_cpus);
|
||||
|
||||
let mut hotplugged = 0;
|
||||
for vcpu in &hotpluggable_cpus {
|
||||
if hotplugged >= vcpu_cnt {
|
||||
break;
|
||||
}
|
||||
let core_id = match vcpu.props.core_id {
|
||||
Some(id) => id,
|
||||
None => continue,
|
||||
};
|
||||
if vcpu.qom_path.is_some() {
|
||||
info!(sl!(), "hotpluggable vcpu {} hotplugged already", core_id);
|
||||
continue;
|
||||
}
|
||||
let socket_id = match vcpu.props.socket_id {
|
||||
Some(id) => id,
|
||||
None => continue,
|
||||
};
|
||||
let thread_id = match vcpu.props.thread_id {
|
||||
Some(id) => id,
|
||||
None => continue,
|
||||
};
|
||||
|
||||
let mut cpu_args = Dictionary::new();
|
||||
cpu_args.insert("socket-id".to_owned(), socket_id.into());
|
||||
cpu_args.insert("core-id".to_owned(), core_id.into());
|
||||
cpu_args.insert("thread-id".to_owned(), thread_id.into());
|
||||
self.qmp.execute(&qmp::device_add {
|
||||
bus: None,
|
||||
id: Some(vcpu_id_from_core_id(core_id)),
|
||||
driver: hotpluggable_cpus[0].type_.clone(),
|
||||
arguments: cpu_args,
|
||||
})?;
|
||||
|
||||
hotplugged += 1;
|
||||
}
|
||||
|
||||
info!(
|
||||
sl!(),
|
||||
"Qmp::hotplug_vcpus(): hotplugged {}/{} vcpus", hotplugged, vcpu_cnt
|
||||
);
|
||||
|
||||
Ok(hotplugged)
|
||||
}
|
||||
|
||||
pub fn hotunplug_vcpus(&mut self, vcpu_cnt: u32) -> Result<u32> {
|
||||
let hotpluggable_cpus = self.qmp.execute(&qmp::query_hotpluggable_cpus {})?;
|
||||
|
||||
let mut hotunplugged = 0;
|
||||
for vcpu in &hotpluggable_cpus {
|
||||
if hotunplugged >= vcpu_cnt {
|
||||
break;
|
||||
}
|
||||
let core_id = match vcpu.props.core_id {
|
||||
Some(id) => id,
|
||||
None => continue,
|
||||
};
|
||||
if vcpu.qom_path.is_none() {
|
||||
info!(sl!(), "hotpluggable vcpu {} not hotplugged yet", core_id);
|
||||
continue;
|
||||
}
|
||||
self.qmp.execute(&qmp::device_del {
|
||||
id: vcpu_id_from_core_id(core_id),
|
||||
})?;
|
||||
hotunplugged += 1;
|
||||
}
|
||||
|
||||
info!(
|
||||
sl!(),
|
||||
"Qmp::hotunplug_vcpus(): hotunplugged {}/{} vcpus", hotunplugged, vcpu_cnt
|
||||
);
|
||||
|
||||
Ok(hotunplugged)
|
||||
}
|
||||
}
|
||||
|
||||
fn vcpu_id_from_core_id(core_id: i64) -> String {
|
||||
format!("cpu-{}", core_id)
|
||||
}
|
||||
@@ -14,9 +14,14 @@ pub const PERSIST_FILE: &str = "state.json";
|
||||
use kata_sys_util::validate::verify_id;
|
||||
use safe_path::scoped_join;
|
||||
|
||||
pub fn to_disk<T: serde::Serialize>(value: &T, sid: &str) -> Result<()> {
|
||||
pub fn to_disk<T: serde::Serialize>(value: &T, sid: &str, jailer_path: &str) -> Result<()> {
|
||||
verify_id(sid).context("failed to verify sid")?;
|
||||
let mut path = scoped_join(KATA_PATH, sid)?;
|
||||
// FIXME: handle jailed case
|
||||
let mut path = match jailer_path {
|
||||
"" => scoped_join(KATA_PATH, sid)?,
|
||||
_ => scoped_join(jailer_path, "root")?,
|
||||
};
|
||||
//let mut path = scoped_join(KATA_PATH, sid)?;
|
||||
if path.exists() {
|
||||
path.push(PERSIST_FILE);
|
||||
let f = File::create(path)
|
||||
@@ -62,10 +67,10 @@ mod tests {
|
||||
key: 1,
|
||||
};
|
||||
// invalid sid
|
||||
assert!(to_disk(&data, "..3").is_err());
|
||||
assert!(to_disk(&data, "../../../3").is_err());
|
||||
assert!(to_disk(&data, "a/b/c").is_err());
|
||||
assert!(to_disk(&data, ".#cdscd.").is_err());
|
||||
assert!(to_disk(&data, "..3", "").is_err());
|
||||
assert!(to_disk(&data, "../../../3", "").is_err());
|
||||
assert!(to_disk(&data, "a/b/c", "").is_err());
|
||||
assert!(to_disk(&data, ".#cdscd.", "").is_err());
|
||||
|
||||
let sid = "aadede";
|
||||
let sandbox_dir = [KATA_PATH, sid].join("/");
|
||||
@@ -74,7 +79,7 @@ mod tests {
|
||||
.create(&sandbox_dir)
|
||||
.is_ok()
|
||||
{
|
||||
assert!(to_disk(&data, sid).is_ok());
|
||||
assert!(to_disk(&data, sid, "").is_ok());
|
||||
if let Ok(result) = from_disk::<Kata>(sid) {
|
||||
assert_eq!(result.name, data.name);
|
||||
assert_eq!(result.key, data.key);
|
||||
|
||||
@@ -30,7 +30,7 @@ serde = { version = "1.0.138", features = ["derive"] }
|
||||
serde_json = "1.0.82"
|
||||
slog = "2.5.2"
|
||||
slog-scope = "4.4.0"
|
||||
tokio = { version = "1.28.1", features = ["process"] }
|
||||
tokio = { version = "1.38.0", features = ["process"] }
|
||||
tracing = "0.1.36"
|
||||
uuid = { version = "0.4", features = ["v4"] }
|
||||
|
||||
@@ -40,7 +40,7 @@ kata-types = { path = "../../../libs/kata-types" }
|
||||
kata-sys-util = { path = "../../../libs/kata-sys-util" }
|
||||
logging = { path = "../../../libs/logging" }
|
||||
oci = { path = "../../../libs/oci" }
|
||||
persist = { path = "../persist"}
|
||||
persist = { path = "../persist" }
|
||||
tests_utils = { path = "../../tests/utils" }
|
||||
|
||||
[features]
|
||||
|
||||
@@ -11,7 +11,7 @@ lazy_static = "1.4.0"
|
||||
netns-rs = "0.1.0"
|
||||
slog = "2.5.2"
|
||||
slog-scope = "4.4.0"
|
||||
tokio = { version = "1.28.1", features = ["rt-multi-thread"] }
|
||||
tokio = { version = "1.38.0", features = ["rt-multi-thread"] }
|
||||
tracing = "0.1.36"
|
||||
tracing-opentelemetry = "0.18.0"
|
||||
opentelemetry = { version = "0.18.0", features = ["rt-tokio-current-thread", "trace", "rt-tokio"] }
|
||||
|
||||
@@ -10,7 +10,7 @@ license = "Apache-2.0"
|
||||
[dependencies]
|
||||
anyhow = "^1.0"
|
||||
async-trait = "0.1.48"
|
||||
containerd-shim-protos = { version = "0.6.0", features = ["async"]}
|
||||
containerd-shim-protos = { version = "0.6.0", features = ["async"] }
|
||||
lazy_static = "1.4.0"
|
||||
nix = "0.24.2"
|
||||
protobuf = "3.2.0"
|
||||
@@ -19,12 +19,11 @@ slog = "2.5.2"
|
||||
slog-scope = "4.4.0"
|
||||
strum = { version = "0.24.0", features = ["derive"] }
|
||||
thiserror = "^1.0"
|
||||
tokio = { version = "1.28.1", features = ["rt-multi-thread", "process", "fs"] }
|
||||
tokio = { version = "1.38.0", features = ["rt-multi-thread", "process", "fs"] }
|
||||
ttrpc = "0.8"
|
||||
persist = {path = "../../persist"}
|
||||
persist = { path = "../../persist" }
|
||||
agent = { path = "../../agent" }
|
||||
kata-sys-util = { path = "../../../../libs/kata-sys-util" }
|
||||
kata-types = { path = "../../../../libs/kata-types" }
|
||||
oci = { path = "../../../../libs/oci" }
|
||||
resource = { path = "../../resource" }
|
||||
|
||||
|
||||
@@ -7,8 +7,8 @@ edition = "2018"
|
||||
[dependencies]
|
||||
anyhow = "^1.0"
|
||||
async-trait = "0.1.48"
|
||||
tokio = { version = "1.28.1" }
|
||||
tokio = { version = "1.38.0" }
|
||||
|
||||
common = { path = "../common" }
|
||||
kata-types = { path = "../../../../libs/kata-types" }
|
||||
resource = { path = "../../resource" }
|
||||
resource = { path = "../../resource" }
|
||||
|
||||
@@ -9,7 +9,7 @@ license = "Apache-2.0"
|
||||
anyhow = "^1.0"
|
||||
async-trait = "0.1.48"
|
||||
awaitgroup = "0.6.0"
|
||||
containerd-shim-protos = { version = "0.6.0", features = ["async"]}
|
||||
containerd-shim-protos = { version = "0.6.0", features = ["async"] }
|
||||
futures = "0.3.19"
|
||||
lazy_static = "1.4.0"
|
||||
libc = ">=0.2.39"
|
||||
@@ -21,7 +21,7 @@ serde_derive = "1.0.27"
|
||||
serde_json = "1.0.82"
|
||||
slog = "2.5.2"
|
||||
slog-scope = "4.4.0"
|
||||
tokio = { version = "1.28.1" }
|
||||
tokio = { version = "1.38.0" }
|
||||
toml = "0.4.2"
|
||||
url = "2.1.1"
|
||||
async-std = "1.12.0"
|
||||
@@ -32,9 +32,9 @@ common = { path = "../common" }
|
||||
hypervisor = { path = "../../hypervisor", features = ["cloud-hypervisor"] }
|
||||
kata-sys-util = { path = "../../../../libs/kata-sys-util" }
|
||||
kata-types = { path = "../../../../libs/kata-types" }
|
||||
logging = { path = "../../../../libs/logging"}
|
||||
logging = { path = "../../../../libs/logging" }
|
||||
oci = { path = "../../../../libs/oci" }
|
||||
persist = { path = "../../persist"}
|
||||
persist = { path = "../../persist" }
|
||||
resource = { path = "../../resource" }
|
||||
|
||||
[features]
|
||||
|
||||
@@ -44,6 +44,8 @@ struct ContainerIoWrite<'inner> {
|
||||
pub info: Arc<ContainerIoInfo>,
|
||||
write_future:
|
||||
Option<Pin<Box<dyn Future<Output = Result<agent::WriteStreamResponse>> + Send + 'inner>>>,
|
||||
shutdown_future:
|
||||
Option<Pin<Box<dyn Future<Output = Result<agent::WriteStreamResponse>> + Send + 'inner>>>,
|
||||
}
|
||||
|
||||
impl<'inner> ContainerIoWrite<'inner> {
|
||||
@@ -51,6 +53,7 @@ impl<'inner> ContainerIoWrite<'inner> {
|
||||
Self {
|
||||
info,
|
||||
write_future: Default::default(),
|
||||
shutdown_future: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -80,6 +83,30 @@ impl<'inner> ContainerIoWrite<'inner> {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Call rpc agent.write_stdin() with empty data to tell agent to close stdin of the process
|
||||
fn poll_shutdown_inner(&'inner mut self, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
|
||||
let mut shutdown_future = self.shutdown_future.take();
|
||||
if shutdown_future.is_none() {
|
||||
let req = agent::WriteStreamRequest {
|
||||
process_id: self.info.process.clone().into(),
|
||||
data: Vec::with_capacity(0),
|
||||
};
|
||||
shutdown_future = Some(Box::pin(self.info.agent.write_stdin(req)));
|
||||
}
|
||||
|
||||
let mut shutdown_future = shutdown_future.unwrap();
|
||||
match shutdown_future.as_mut().poll(cx) {
|
||||
Poll::Ready(v) => match v {
|
||||
Ok(_) => Poll::Ready(Ok(())),
|
||||
Err(err) => Poll::Ready(Err(std::io::Error::new(std::io::ErrorKind::Other, err))),
|
||||
},
|
||||
Poll::Pending => {
|
||||
self.shutdown_future = Some(shutdown_future);
|
||||
Poll::Pending
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'inner> AsyncWrite for ContainerIoWrite<'inner> {
|
||||
@@ -100,8 +127,13 @@ impl<'inner> AsyncWrite for ContainerIoWrite<'inner> {
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> {
|
||||
Poll::Ready(Ok(()))
|
||||
fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
|
||||
let me = unsafe {
|
||||
std::mem::transmute::<&mut ContainerIoWrite<'_>, &mut ContainerIoWrite<'inner>>(
|
||||
&mut *self,
|
||||
)
|
||||
};
|
||||
me.poll_shutdown_inner(cx)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -139,12 +139,11 @@ impl ContainerManager for VirtContainerManager {
|
||||
// * should be run after the container is deleted but before delete operation returns
|
||||
// * spec details: https://github.com/opencontainers/runtime-spec/blob/c1662686cff159595277b79322d0272f5182941b/config.md#poststop
|
||||
let c_spec = c.spec().await;
|
||||
let vmm_master_tid = self.hypervisor.get_vmm_master_tid().await?;
|
||||
let state = oci::State {
|
||||
version: c_spec.version.clone(),
|
||||
id: c.container_id.to_string(),
|
||||
status: oci::ContainerState::Stopped,
|
||||
pid: vmm_master_tid as i32,
|
||||
pid: self.pid as i32,
|
||||
bundle: c.config().await.bundle,
|
||||
annotations: c_spec.annotations.clone(),
|
||||
};
|
||||
|
||||
@@ -11,7 +11,7 @@ use agent::Agent;
|
||||
use anyhow::{Context, Result};
|
||||
use awaitgroup::{WaitGroup, Worker as WaitGroupWorker};
|
||||
use common::types::{ContainerProcess, ProcessExitStatus, ProcessStateInfo, ProcessStatus, PID};
|
||||
use tokio::io::{AsyncRead, AsyncWrite};
|
||||
use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt};
|
||||
use tokio::sync::{watch, RwLock};
|
||||
|
||||
use super::container::Container;
|
||||
@@ -23,6 +23,13 @@ pub type ProcessWatcher = (
|
||||
Arc<RwLock<ProcessExitStatus>>,
|
||||
);
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
enum StdIoType {
|
||||
Stdin,
|
||||
Stdout,
|
||||
Stderr,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Process {
|
||||
pub process: ContainerProcess,
|
||||
@@ -62,10 +69,6 @@ pub struct Process {
|
||||
pub exit_status: Arc<RwLock<ProcessExitStatus>>,
|
||||
pub exit_watcher_rx: Option<watch::Receiver<bool>>,
|
||||
pub exit_watcher_tx: Option<watch::Sender<bool>>,
|
||||
// used to sync between stdin io copy thread(tokio) and the close it call.
|
||||
// close io call should wait until the stdin io copy finished to
|
||||
// prevent stdin data lost.
|
||||
pub wg_stdin: WaitGroup,
|
||||
|
||||
// io streams using vsock fd passthrough feature
|
||||
pub passfd_io: Option<PassfdIo>,
|
||||
@@ -119,7 +122,6 @@ impl Process {
|
||||
exit_status: Arc::new(RwLock::new(ProcessExitStatus::new())),
|
||||
exit_watcher_rx: Some(receiver),
|
||||
exit_watcher_tx: Some(sender),
|
||||
wg_stdin: WaitGroup::new(),
|
||||
passfd_io: None,
|
||||
}
|
||||
}
|
||||
@@ -246,9 +248,8 @@ impl Process {
|
||||
self.post_fifos_open()?;
|
||||
|
||||
// start io copy for stdin
|
||||
let wgw_stdin = self.wg_stdin.worker();
|
||||
if let Some(stdin) = shim_io.stdin {
|
||||
self.run_io_copy("stdin", wgw_stdin, stdin, container_io.stdin)
|
||||
self.run_io_copy(StdIoType::Stdin, None, stdin, container_io.stdin)
|
||||
.await?;
|
||||
}
|
||||
|
||||
@@ -258,14 +259,19 @@ impl Process {
|
||||
|
||||
// start io copy for stdout
|
||||
if let Some(stdout) = shim_io.stdout {
|
||||
self.run_io_copy("stdout", wgw.clone(), container_io.stdout, stdout)
|
||||
.await?;
|
||||
self.run_io_copy(
|
||||
StdIoType::Stdout,
|
||||
Some(wgw.clone()),
|
||||
container_io.stdout,
|
||||
stdout,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
// start io copy for stderr
|
||||
if !self.terminal {
|
||||
if let Some(stderr) = shim_io.stderr {
|
||||
self.run_io_copy("stderr", wgw, container_io.stderr, stderr)
|
||||
self.run_io_copy(StdIoType::Stderr, Some(wgw), container_io.stderr, stderr)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
@@ -276,27 +282,51 @@ impl Process {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn run_io_copy<'a>(
|
||||
&'a self,
|
||||
io_name: &'a str,
|
||||
wgw: WaitGroupWorker,
|
||||
async fn run_io_copy(
|
||||
&self,
|
||||
io_type: StdIoType,
|
||||
wgw: Option<WaitGroupWorker>,
|
||||
mut reader: Box<dyn AsyncRead + Send + Unpin>,
|
||||
mut writer: Box<dyn AsyncWrite + Send + Unpin>,
|
||||
) -> Result<()> {
|
||||
info!(self.logger, "run io copy for {}", io_name);
|
||||
let io_name = io_name.to_string();
|
||||
let logger = self.logger.new(o!("io_name" => io_name));
|
||||
let io_name = format!("{:?}", io_type);
|
||||
|
||||
info!(self.logger, "run_io_copy[{}] starts", io_name);
|
||||
let logger = self.logger.new(o!("io_name" => io_name.clone()));
|
||||
|
||||
tokio::spawn(async move {
|
||||
match tokio::io::copy(&mut reader, &mut writer).await {
|
||||
Err(e) => {
|
||||
warn!(logger, "run_io_copy: failed to copy stream: {}", e);
|
||||
warn!(
|
||||
logger,
|
||||
"run_io_copy[{}]: failed to copy stream: {}", io_name, e
|
||||
);
|
||||
}
|
||||
Ok(length) => {
|
||||
info!(logger, "run_io_copy: stop to copy stream length {}", length)
|
||||
info!(
|
||||
logger,
|
||||
"run_io_copy[{}]: stop to copy stream length {}", io_name, length
|
||||
);
|
||||
// Send EOF to agent by calling rpc write_stdin with 0 length data
|
||||
if io_type == StdIoType::Stdin {
|
||||
writer
|
||||
.shutdown()
|
||||
.await
|
||||
.map_err(|e| {
|
||||
error!(
|
||||
logger,
|
||||
"run_io_copy[{}]: failed to shutdown: {:?}", io_name, e
|
||||
);
|
||||
e
|
||||
})
|
||||
.ok();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
wgw.done();
|
||||
if let Some(w) = wgw {
|
||||
w.done()
|
||||
}
|
||||
});
|
||||
|
||||
Ok(())
|
||||
@@ -400,24 +430,13 @@ impl Process {
|
||||
}
|
||||
|
||||
/// Close the stdin of the process in container.
|
||||
pub async fn close_io(&mut self, agent: Arc<dyn Agent>) {
|
||||
pub async fn close_io(&mut self, _agent: Arc<dyn Agent>) {
|
||||
// Close the stdin writer keeper so that
|
||||
// the end signal could be received in the read side
|
||||
self.stdin_w.take();
|
||||
|
||||
// In passfd io mode, the stdin close and sync logic is handled
|
||||
// in the agent side.
|
||||
if self.passfd_io.is_none() {
|
||||
self.wg_stdin.wait().await;
|
||||
}
|
||||
|
||||
let req = agent::CloseStdinRequest {
|
||||
process_id: self.process.clone().into(),
|
||||
};
|
||||
|
||||
if let Err(e) = agent.close_stdin(req).await {
|
||||
warn!(self.logger, "failed close process io: {:?}", e);
|
||||
}
|
||||
// The stdin will be closed when EOF is got in rpc `read_stdout` of agent
|
||||
// so we will not call agent.close_stdin anymore.
|
||||
}
|
||||
|
||||
pub async fn get_status(&self) -> ProcessStatus {
|
||||
|
||||
@@ -23,9 +23,13 @@ use common::{message::Message, RuntimeHandler, RuntimeInstance};
|
||||
use hypervisor::Hypervisor;
|
||||
#[cfg(not(target_arch = "s390x"))]
|
||||
use hypervisor::{dragonball::Dragonball, HYPERVISOR_DRAGONBALL};
|
||||
#[cfg(not(target_arch = "s390x"))]
|
||||
use hypervisor::{firecracker::Firecracker, HYPERVISOR_FIRECRACKER};
|
||||
use hypervisor::{qemu::Qemu, HYPERVISOR_QEMU};
|
||||
#[cfg(not(target_arch = "s390x"))]
|
||||
use kata_types::config::DragonballConfig;
|
||||
#[cfg(not(target_arch = "s390x"))]
|
||||
use kata_types::config::FirecrackerConfig;
|
||||
use kata_types::config::{hypervisor::register_hypervisor_plugin, QemuConfig, TomlConfig};
|
||||
|
||||
#[cfg(all(feature = "cloud-hypervisor", not(target_arch = "s390x")))]
|
||||
@@ -55,6 +59,9 @@ impl RuntimeHandler for VirtContainer {
|
||||
{
|
||||
let dragonball_config = Arc::new(DragonballConfig::new());
|
||||
register_hypervisor_plugin("dragonball", dragonball_config);
|
||||
|
||||
let firecracker_config = Arc::new(FirecrackerConfig::new());
|
||||
register_hypervisor_plugin("firecracker", firecracker_config);
|
||||
}
|
||||
|
||||
let qemu_config = Arc::new(QemuConfig::new());
|
||||
@@ -160,6 +167,14 @@ async fn new_hypervisor(toml_config: &TomlConfig) -> Result<Arc<dyn Hypervisor>>
|
||||
.await;
|
||||
Ok(Arc::new(hypervisor))
|
||||
}
|
||||
#[cfg(not(target_arch = "s390x"))]
|
||||
HYPERVISOR_FIRECRACKER => {
|
||||
let mut hypervisor = Firecracker::new();
|
||||
hypervisor
|
||||
.set_hypervisor_config(hypervisor_config.clone())
|
||||
.await;
|
||||
Ok(Arc::new(hypervisor))
|
||||
}
|
||||
|
||||
#[cfg(all(feature = "cloud-hypervisor", not(target_arch = "s390x")))]
|
||||
HYPERVISOR_NAME_CH => {
|
||||
|
||||
@@ -18,12 +18,14 @@ use common::{Sandbox, SandboxNetworkEnv};
|
||||
use containerd_shim_protos::events::task::TaskOOM;
|
||||
use hypervisor::VsockConfig;
|
||||
#[cfg(not(target_arch = "s390x"))]
|
||||
use hypervisor::{dragonball::Dragonball, HYPERVISOR_DRAGONBALL};
|
||||
use hypervisor::{dragonball::Dragonball, HYPERVISOR_DRAGONBALL, HYPERVISOR_FIRECRACKER};
|
||||
use hypervisor::{qemu::Qemu, HYPERVISOR_QEMU};
|
||||
use hypervisor::{utils::get_hvsock_path, HybridVsockConfig, DEFAULT_GUEST_VSOCK_CID};
|
||||
use hypervisor::{BlockConfig, Hypervisor};
|
||||
use kata_sys_util::hooks::HookStates;
|
||||
use kata_types::capabilities::CapabilityBits;
|
||||
#[cfg(not(target_arch = "s390x"))]
|
||||
use kata_types::config::hypervisor::HYPERVISOR_NAME_CH;
|
||||
use kata_types::config::TomlConfig;
|
||||
use persist::{self, sandbox_persist::Persist};
|
||||
use resource::manager::ManagerArgs;
|
||||
@@ -570,12 +572,39 @@ impl Persist for VirtSandbox {
|
||||
|
||||
/// Save a state of Sandbox
|
||||
async fn save(&self) -> Result<Self::State> {
|
||||
let hypervisor_state = self.hypervisor.save_state().await?;
|
||||
let sandbox_state = crate::sandbox_persist::SandboxState {
|
||||
sandbox_type: VIRTCONTAINER.to_string(),
|
||||
resource: Some(self.resource_manager.save().await?),
|
||||
hypervisor: Some(self.hypervisor.save_state().await?),
|
||||
hypervisor: match hypervisor_state.hypervisor_type.as_str() {
|
||||
// TODO support other hypervisors
|
||||
#[cfg(not(target_arch = "s390x"))]
|
||||
HYPERVISOR_DRAGONBALL => Ok(Some(hypervisor_state)),
|
||||
#[cfg(not(target_arch = "s390x"))]
|
||||
HYPERVISOR_NAME_CH => Ok(Some(hypervisor_state)),
|
||||
#[cfg(not(target_arch = "s390x"))]
|
||||
HYPERVISOR_FIRECRACKER => Ok(Some(hypervisor_state)),
|
||||
HYPERVISOR_QEMU => Ok(Some(hypervisor_state)),
|
||||
_ => Err(anyhow!(
|
||||
"Unsupported hypervisor {}",
|
||||
hypervisor_state.hypervisor_type
|
||||
)),
|
||||
}?,
|
||||
};
|
||||
persist::to_disk(&sandbox_state, &self.sid)?;
|
||||
// FIXME: properly handle jailed case
|
||||
// eg: Determine if we are running jailed:
|
||||
// let h = sandbox_state.hypervisor.clone().unwrap_or_default();
|
||||
// Figure out the jailed path:
|
||||
// jailed_path = h.<>
|
||||
// and somehow store the sandbox state into the jail:
|
||||
// persist::to_disk(&sandbox_state, &self.sid, jailed_path)?;
|
||||
// Issue is, how to handle restore.
|
||||
let h = sandbox_state.hypervisor.as_ref().unwrap();
|
||||
let vmpath = match h.jailed {
|
||||
true => h.vm_path.clone(),
|
||||
false => "".to_string(),
|
||||
};
|
||||
persist::to_disk(&sandbox_state, &self.sid, vmpath.as_str())?;
|
||||
Ok(sandbox_state)
|
||||
}
|
||||
/// Restore Sandbox
|
||||
|
||||
@@ -7,8 +7,8 @@ edition = "2018"
|
||||
[dependencies]
|
||||
anyhow = "^1.0"
|
||||
async-trait = "0.1.48"
|
||||
tokio = { version = "1.28.1" }
|
||||
tokio = { version = "1.38.0" }
|
||||
|
||||
common = { path = "../common" }
|
||||
kata-types = { path = "../../../../libs/kata-types" }
|
||||
resource = { path = "../../resource" }
|
||||
resource = { path = "../../resource" }
|
||||
|
||||
@@ -10,14 +10,14 @@ anyhow = "^1.0"
|
||||
async-trait = "0.1.48"
|
||||
slog = "2.5.2"
|
||||
slog-scope = "4.4.0"
|
||||
tokio = { version = "1.28.1", features = ["rt-multi-thread"] }
|
||||
tokio = { version = "1.38.0", features = ["rt-multi-thread"] }
|
||||
tracing = "0.1.36"
|
||||
ttrpc = "0.8"
|
||||
|
||||
common = { path = "../runtimes/common" }
|
||||
containerd-shim-protos = { version = "0.6.0", features = ["async"]}
|
||||
containerd-shim-protos = { version = "0.6.0", features = ["async"] }
|
||||
containerd-shim = { version = "0.6.0", features = ["async"] }
|
||||
logging = { path = "../../../libs/logging"}
|
||||
logging = { path = "../../../libs/logging" }
|
||||
kata-types = { path = "../../../libs/kata-types" }
|
||||
runtimes = { path = "../runtimes" }
|
||||
persist = { path = "../persist" }
|
||||
|
||||
@@ -10,5 +10,5 @@ anyhow = "^1.0"
|
||||
common = { path = "../runtimes/common" }
|
||||
logging = { path = "../../../libs/logging"}
|
||||
runtimes = { path = "../runtimes" }
|
||||
tokio = { version = "1.28.1", features = [ "rt", "rt-multi-thread" ] }
|
||||
tokio = { version = "1.38.0", features = [ "rt", "rt-multi-thread" ] }
|
||||
|
||||
|
||||
@@ -27,7 +27,7 @@ slog-async = "2.5.2"
|
||||
slog-scope = "4.4.0"
|
||||
slog-stdlog = "4.1.0"
|
||||
thiserror = "1.0.30"
|
||||
tokio = { version = "1.28.1", features = [ "rt", "rt-multi-thread" ] }
|
||||
tokio = { version = "1.38.0", features = [ "rt", "rt-multi-thread" ] }
|
||||
unix_socket2 = "0.5.4"
|
||||
tracing = "0.1.36"
|
||||
tracing-opentelemetry = "0.18.0"
|
||||
|
||||
@@ -50,8 +50,7 @@ impl ShimExecutor {
|
||||
let sid = id
|
||||
.ok_or(Error::InvalidArgument)
|
||||
.context("get sid for container")?;
|
||||
let (address, pid) = self.get_shim_info_from_sandbox(&sid)?;
|
||||
self.write_pid_file(&bundle_path, pid)?;
|
||||
let address = self.socket_address(&sid).context("socket address")?;
|
||||
self.write_address(&bundle_path, &address)?;
|
||||
Ok(address)
|
||||
}
|
||||
@@ -105,22 +104,6 @@ impl ShimExecutor {
|
||||
|
||||
Ok(child.id())
|
||||
}
|
||||
|
||||
fn get_shim_info_from_sandbox(&self, sandbox_id: &str) -> Result<(PathBuf, u32)> {
|
||||
// All containers of a pod share the same pod socket address.
|
||||
let address = self.socket_address(sandbox_id).context("socket address")?;
|
||||
let bundle_path = get_bundle_path().context("get bundle path")?;
|
||||
let parent_bundle_path = Path::new(&bundle_path)
|
||||
.parent()
|
||||
.unwrap_or_else(|| Path::new(""));
|
||||
let sandbox_bundle_path = parent_bundle_path
|
||||
.join(sandbox_id)
|
||||
.canonicalize()
|
||||
.context(Error::GetBundlePath)?;
|
||||
let pid = self.read_pid_file(&sandbox_bundle_path)?;
|
||||
|
||||
Ok((address, pid))
|
||||
}
|
||||
}
|
||||
|
||||
fn new_listener(address: &Path) -> Result<UnixListener> {
|
||||
@@ -139,7 +122,6 @@ mod tests {
|
||||
use std::path::Path;
|
||||
|
||||
use serial_test::serial;
|
||||
use tests_utils::gen_id;
|
||||
|
||||
use super::*;
|
||||
use crate::Args;
|
||||
@@ -173,50 +155,6 @@ mod tests {
|
||||
assert_eq!(cmd.get_current_dir().unwrap(), get_bundle_path().unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_get_info_from_sandbox() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let sandbox_id = gen_id(16);
|
||||
let bundle_path = &dir.path().join(&sandbox_id);
|
||||
std::fs::create_dir(bundle_path).unwrap();
|
||||
std::env::set_current_dir(bundle_path).unwrap();
|
||||
|
||||
let args = Args {
|
||||
id: sandbox_id.to_owned(),
|
||||
namespace: "default_namespace".into(),
|
||||
address: "default_address".into(),
|
||||
publish_binary: "containerd".into(),
|
||||
bundle: bundle_path.to_str().unwrap().into(),
|
||||
..Default::default()
|
||||
};
|
||||
let executor = ShimExecutor::new(args);
|
||||
|
||||
let addr = executor.socket_address(&executor.args.id).unwrap();
|
||||
executor.write_address(bundle_path, &addr).unwrap();
|
||||
executor.write_pid_file(bundle_path, 1267).unwrap();
|
||||
|
||||
let container_id = gen_id(16);
|
||||
let bundle_path2 = &dir.path().join(&container_id);
|
||||
std::fs::create_dir(bundle_path2).unwrap();
|
||||
std::env::set_current_dir(bundle_path2).unwrap();
|
||||
|
||||
let args = Args {
|
||||
id: container_id,
|
||||
namespace: "default_namespace".into(),
|
||||
address: "default_address".into(),
|
||||
publish_binary: "containerd".into(),
|
||||
bundle: bundle_path2.to_str().unwrap().into(),
|
||||
..Default::default()
|
||||
};
|
||||
let executor2 = ShimExecutor::new(args);
|
||||
|
||||
let (address, pid) = executor2.get_shim_info_from_sandbox(&sandbox_id).unwrap();
|
||||
|
||||
assert_eq!(pid, 1267);
|
||||
assert_eq!(&address, &addr);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_new_listener() {
|
||||
|
||||
@@ -150,6 +150,7 @@ FIRMWARETDVFVOLUMEPATH :=
|
||||
|
||||
FIRMWARESEVPATH := $(PREFIXDEPS)/share/ovmf/OVMF.fd
|
||||
FIRMWARESNPPATH := $(PREFIXDEPS)/share/ovmf/AMDSEV.fd
|
||||
SNPCERTSPATH := /opt/snp/cert_chain.cert
|
||||
|
||||
ROOTMEASURECONFIG ?= ""
|
||||
KERNELPARAMS += $(ROOTMEASURECONFIG)
|
||||
@@ -244,7 +245,7 @@ DEFDISABLEBLOCK := false
|
||||
DEFSHAREDFS_CLH_VIRTIOFS := virtio-fs
|
||||
DEFSHAREDFS_QEMU_VIRTIOFS := virtio-fs
|
||||
# Please keep DEFSHAREDFS_QEMU_COCO_DEV_VIRTIOFS in sync with TDX/SEV/SNP
|
||||
DEFSHAREDFS_QEMU_COCO_DEV_VIRTIOFS := virtio-9p
|
||||
DEFSHAREDFS_QEMU_COCO_DEV_VIRTIOFS := none
|
||||
DEFSHAREDFS_STRATOVIRT_VIRTIOFS := virtio-fs
|
||||
DEFSHAREDFS_QEMU_TDX_VIRTIOFS := none
|
||||
DEFSHAREDFS_QEMU_SEV_VIRTIOFS := none
|
||||
@@ -282,6 +283,9 @@ DEFBINDMOUNTS := []
|
||||
# Create Container Timeout in seconds
|
||||
DEFCREATECONTAINERTIMEOUT ?= 60
|
||||
|
||||
# Default directory of directly attachable network config.
|
||||
DEFDANCONF := /run/kata-containers/dans
|
||||
|
||||
SED = sed
|
||||
|
||||
CLI_DIR = cmd
|
||||
@@ -679,6 +683,7 @@ USER_VARS += FIRMWARETDVFPATH
|
||||
USER_VARS += FIRMWAREVOLUMEPATH
|
||||
USER_VARS += FIRMWARETDVFVOLUMEPATH
|
||||
USER_VARS += FIRMWARESNPPATH
|
||||
USER_VARS += SNPCERTSPATH
|
||||
USER_VARS += MACHINEACCELERATORS
|
||||
USER_VARS += CPUFEATURES
|
||||
USER_VARS += TDXCPUFEATURES
|
||||
@@ -772,6 +777,7 @@ USER_VARS += DEFSTATICRESOURCEMGMT_STRATOVIRT
|
||||
USER_VARS += DEFSTATICRESOURCEMGMT_TEE
|
||||
USER_VARS += DEFBINDMOUNTS
|
||||
USER_VARS += DEFCREATECONTAINERTIMEOUT
|
||||
USER_VARS += DEFDANCONF
|
||||
USER_VARS += DEFVFIOMODE
|
||||
USER_VARS += BUILDFLAGS
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user