mirror of
https://github.com/kata-containers/kata-containers.git
synced 2026-02-27 17:22:07 +00:00
Compare commits
88 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
35b32156ad | ||
|
|
2f638b3666 | ||
|
|
98bacb0efc | ||
|
|
69e681961a | ||
|
|
178ee3d7e3 | ||
|
|
7461bcd760 | ||
|
|
123c867172 | ||
|
|
98f60c100c | ||
|
|
960f089d3c | ||
|
|
92f3b11c94 | ||
|
|
e299c6bd4b | ||
|
|
06c94933f2 | ||
|
|
8c5053ca5d | ||
|
|
92619c833e | ||
|
|
4f3db76780 | ||
|
|
918c11e46b | ||
|
|
8c4507be21 | ||
|
|
a61fba6d45 | ||
|
|
ad9cb0ba58 | ||
|
|
d6dd99e986 | ||
|
|
3cbdec5a02 | ||
|
|
0623f1fe6b | ||
|
|
5883dc1bd9 | ||
|
|
4a5877f451 | ||
|
|
f90e75e542 | ||
|
|
d3b57325ee | ||
|
|
0d7bd066d3 | ||
|
|
ac1ce2d30b | ||
|
|
f4d71af457 | ||
|
|
f36f8ffa16 | ||
|
|
fcc120d495 | ||
|
|
cfbc834602 | ||
|
|
ea74df1270 | ||
|
|
c712057ae7 | ||
|
|
bc5bbfa60f | ||
|
|
0afcc57a92 | ||
|
|
bcc2ee6e12 | ||
|
|
bd797eddec | ||
|
|
b3760bb3a6 | ||
|
|
9cf1af873b | ||
|
|
4d6ca7623a | ||
|
|
719017d688 | ||
|
|
569ecdbe76 | ||
|
|
fa8a0ad49b | ||
|
|
8fbf862fa6 | ||
|
|
9141acd94c | ||
|
|
9a0ab92f65 | ||
|
|
f3eac35b55 | ||
|
|
8a7e0efd14 | ||
|
|
754308c478 | ||
|
|
a152f6034e | ||
|
|
50bf4434dd | ||
|
|
74791ed389 | ||
|
|
778ebb6e60 | ||
|
|
b5661e9882 | ||
|
|
88c13b6823 | ||
|
|
b8ce291dd0 | ||
|
|
f5e5ca427d | ||
|
|
eaa7ab7462 | ||
|
|
8d2fd24492 | ||
|
|
ab83ab6be5 | ||
|
|
1772df5ac2 | ||
|
|
2e49586445 | ||
|
|
e2a8815ba4 | ||
|
|
63495cf43a | ||
|
|
fb44305497 | ||
|
|
cea5c29e70 | ||
|
|
20c02528e5 | ||
|
|
3eb6f5858a | ||
|
|
8b0231bec8 | ||
|
|
8dc8565ed5 | ||
|
|
740e7e2f77 | ||
|
|
ef49fa95f7 | ||
|
|
727f233e2a | ||
|
|
619d1b487f | ||
|
|
babab160bc | ||
|
|
f168555569 | ||
|
|
af22e71375 | ||
|
|
b9379521a0 | ||
|
|
5b3bbc62ba | ||
|
|
b0c5f040f0 | ||
|
|
d44e39e059 | ||
|
|
43b0e95800 | ||
|
|
81801888a2 | ||
|
|
fba39ef32d | ||
|
|
57261ec97a | ||
|
|
63309514ca | ||
|
|
e229a03cc8 |
98
.github/workflows/cc-payload-after-push.yaml
vendored
98
.github/workflows/cc-payload-after-push.yaml
vendored
@@ -1,98 +0,0 @@
|
||||
name: CI | Publish Kata Containers payload for Confidential Containers
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- CCv0
|
||||
|
||||
jobs:
|
||||
build-asset:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
asset:
|
||||
- cc-cloud-hypervisor
|
||||
- cc-kernel
|
||||
- cc-qemu
|
||||
- cc-rootfs-image
|
||||
- cc-shim-v2
|
||||
- cc-virtiofsd
|
||||
- cc-sev-kernel
|
||||
- cc-sev-ovmf
|
||||
- cc-sev-rootfs-initrd
|
||||
- cc-tdx-kernel
|
||||
- cc-tdx-rootfs-image
|
||||
- cc-tdx-qemu
|
||||
- cc-tdx-td-shim
|
||||
- cc-tdx-tdvf
|
||||
steps:
|
||||
- name: Login to Kata Containers quay.io
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.QUAY_DEPLOYER_USERNAME }}
|
||||
password: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0 # This is needed in order to keep the commit ids history
|
||||
- name: Build ${{ matrix.asset }}
|
||||
run: |
|
||||
make "${KATA_ASSET}-tarball"
|
||||
build_dir=$(readlink -f build)
|
||||
# store-artifact does not work with symlink
|
||||
sudo cp -r "${build_dir}" "kata-build"
|
||||
env:
|
||||
KATA_ASSET: ${{ matrix.asset }}
|
||||
TAR_OUTPUT: ${{ matrix.asset }}.tar.gz
|
||||
PUSH_TO_REGISTRY: yes
|
||||
|
||||
- name: store-artifact ${{ matrix.asset }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-build/kata-static-${{ matrix.asset }}.tar.xz
|
||||
retention-days: 1
|
||||
if-no-files-found: error
|
||||
|
||||
create-kata-tarball:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build-asset
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: get-artifacts
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-artifacts
|
||||
- name: merge-artifacts
|
||||
run: |
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-merge-builds.sh kata-artifacts
|
||||
- name: store-artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: kata-static-tarball
|
||||
path: kata-static.tar.xz
|
||||
retention-days: 1
|
||||
if-no-files-found: error
|
||||
|
||||
kata-payload:
|
||||
needs: create-kata-tarball
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Login to Confidential Containers quay.io
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.COCO_QUAY_DEPLOYER_USERNAME }}
|
||||
password: ${{ secrets.COCO_QUAY_DEPLOYER_PASSWORD }}
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: kata-static-tarball
|
||||
|
||||
- name: build-and-push-kata-payload
|
||||
id: build-and-push-kata-payload
|
||||
run: |
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh $(pwd)/kata-static.tar.xz "quay.io/confidential-containers/runtime-payload-ci" "kata-containers-latest"
|
||||
88
.github/workflows/cc-payload.yaml
vendored
88
.github/workflows/cc-payload.yaml
vendored
@@ -1,88 +0,0 @@
|
||||
name: Publish Kata Containers payload for Confidential Containers
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'CC\-[0-9]+.[0-9]+.[0-9]+'
|
||||
|
||||
jobs:
|
||||
build-asset:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
asset:
|
||||
- cc-cloud-hypervisor
|
||||
- cc-kernel
|
||||
- cc-qemu
|
||||
- cc-rootfs-image
|
||||
- cc-shim-v2
|
||||
- cc-virtiofsd
|
||||
- cc-sev-kernel
|
||||
- cc-sev-ovmf
|
||||
- cc-sev-rootfs-initrd
|
||||
- cc-tdx-kernel
|
||||
- cc-tdx-rootfs-image
|
||||
- cc-tdx-qemu
|
||||
- cc-tdx-td-shim
|
||||
- cc-tdx-tdvf
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Build ${{ matrix.asset }}
|
||||
run: |
|
||||
make "${KATA_ASSET}-tarball"
|
||||
build_dir=$(readlink -f build)
|
||||
# store-artifact does not work with symlink
|
||||
sudo cp -r "${build_dir}" "kata-build"
|
||||
env:
|
||||
KATA_ASSET: ${{ matrix.asset }}
|
||||
TAR_OUTPUT: ${{ matrix.asset }}.tar.gz
|
||||
|
||||
- name: store-artifact ${{ matrix.asset }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-build/kata-static-${{ matrix.asset }}.tar.xz
|
||||
retention-days: 1
|
||||
if-no-files-found: error
|
||||
|
||||
create-kata-tarball:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build-asset
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: get-artifacts
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-artifacts
|
||||
- name: merge-artifacts
|
||||
run: |
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-merge-builds.sh kata-artifacts
|
||||
- name: store-artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: kata-static-tarball
|
||||
path: kata-static.tar.xz
|
||||
retention-days: 1
|
||||
if-no-files-found: error
|
||||
|
||||
kata-payload:
|
||||
needs: create-kata-tarball
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Login to quay.io
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.COCO_QUAY_DEPLOYER_USERNAME }}
|
||||
password: ${{ secrets.COCO_QUAY_DEPLOYER_PASSWORD }}
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: kata-static-tarball
|
||||
|
||||
- name: build-and-push-kata-payload
|
||||
id: build-and-push-kata-payload
|
||||
run: |
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh $(pwd)/kata-static.tar.xz
|
||||
4
.github/workflows/commit-message-check.yaml
vendored
4
.github/workflows/commit-message-check.yaml
vendored
@@ -47,7 +47,7 @@ jobs:
|
||||
uses: tim-actions/commit-message-checker-with-regex@v0.3.1
|
||||
with:
|
||||
commits: ${{ steps.get-pr-commits.outputs.commits }}
|
||||
pattern: '^.{0,75}(\n.*)*$|^Merge pull request (?:kata-containers)?#[\d]+ from.*'
|
||||
pattern: '^.{0,75}(\n.*)*$'
|
||||
error: 'Subject too long (max 75)'
|
||||
post_error: ${{ env.error_msg }}
|
||||
|
||||
@@ -95,6 +95,6 @@ jobs:
|
||||
uses: tim-actions/commit-message-checker-with-regex@v0.3.1
|
||||
with:
|
||||
commits: ${{ steps.get-pr-commits.outputs.commits }}
|
||||
pattern: '^[\s\t]*[^:\s\t]+[\s\t]*:|^Merge pull request (?:kata-containers)?#[\d]+ from.*'
|
||||
pattern: '^[\s\t]*[^:\s\t]+[\s\t]*:'
|
||||
error: 'Failed to find subsystem in subject'
|
||||
post_error: ${{ env.error_msg }}
|
||||
|
||||
8
.github/workflows/darwin-tests.yaml
vendored
8
.github/workflows/darwin-tests.yaml
vendored
@@ -9,16 +9,12 @@ on:
|
||||
name: Darwin tests
|
||||
jobs:
|
||||
test:
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.16.x, 1.17.x]
|
||||
os: [macos-latest]
|
||||
runs-on: ${{ matrix.os }}
|
||||
runs-on: macos-latest
|
||||
steps:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
go-version: 1.19.2
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Build utils
|
||||
|
||||
129
.github/workflows/deploy-ccv0-demo.yaml
vendored
129
.github/workflows/deploy-ccv0-demo.yaml
vendored
@@ -1,129 +0,0 @@
|
||||
on:
|
||||
issue_comment:
|
||||
types: [created, edited]
|
||||
|
||||
name: deploy-ccv0-demo
|
||||
|
||||
jobs:
|
||||
check-comment-and-membership:
|
||||
runs-on: ubuntu-latest
|
||||
if: |
|
||||
github.event.issue.pull_request
|
||||
&& github.event_name == 'issue_comment'
|
||||
&& github.event.action == 'created'
|
||||
&& startsWith(github.event.comment.body, '/deploy-ccv0-demo')
|
||||
steps:
|
||||
- name: Check membership
|
||||
uses: kata-containers/is-organization-member@1.0.1
|
||||
id: is_organization_member
|
||||
with:
|
||||
organization: kata-containers
|
||||
username: ${{ github.event.comment.user.login }}
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Fail if not member
|
||||
run: |
|
||||
result=${{ steps.is_organization_member.outputs.result }}
|
||||
if [ $result == false ]; then
|
||||
user=${{ github.event.comment.user.login }}
|
||||
echo Either ${user} is not part of the kata-containers organization
|
||||
echo or ${user} has its Organization Visibility set to Private at
|
||||
echo https://github.com/orgs/kata-containers/people?query=${user}
|
||||
echo
|
||||
echo Ensure you change your Organization Visibility to Public and
|
||||
echo trigger the test again.
|
||||
exit 1
|
||||
fi
|
||||
|
||||
build-asset:
|
||||
runs-on: ubuntu-latest
|
||||
needs: check-comment-and-membership
|
||||
strategy:
|
||||
matrix:
|
||||
asset:
|
||||
- cloud-hypervisor
|
||||
- firecracker
|
||||
- kernel
|
||||
- qemu
|
||||
- rootfs-image
|
||||
- rootfs-initrd
|
||||
- shim-v2
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Install docker
|
||||
run: |
|
||||
curl -fsSL https://test.docker.com -o test-docker.sh
|
||||
sh test-docker.sh
|
||||
|
||||
- name: Prepare confidential container rootfs
|
||||
if: ${{ matrix.asset == 'rootfs-initrd' }}
|
||||
run: |
|
||||
pushd include_rootfs/etc
|
||||
curl -LO https://raw.githubusercontent.com/confidential-containers/documentation/main/demos/ssh-demo/aa-offline_fs_kbc-keys.json
|
||||
mkdir kata-containers
|
||||
envsubst < docs/how-to/data/confidential-agent-config.toml.in > kata-containers/agent.toml
|
||||
popd
|
||||
env:
|
||||
AA_KBC_PARAMS: offline_fs_kbc::null
|
||||
|
||||
- name: Build ${{ matrix.asset }}
|
||||
run: |
|
||||
make "${KATA_ASSET}-tarball"
|
||||
build_dir=$(readlink -f build)
|
||||
# store-artifact does not work with symlink
|
||||
sudo cp -r "${build_dir}" "kata-build"
|
||||
env:
|
||||
AA_KBC: offline_fs_kbc
|
||||
INCLUDE_ROOTFS: include_rootfs
|
||||
KATA_ASSET: ${{ matrix.asset }}
|
||||
TAR_OUTPUT: ${{ matrix.asset }}.tar.gz
|
||||
|
||||
- name: store-artifact ${{ matrix.asset }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-build/kata-static-${{ matrix.asset }}.tar.xz
|
||||
if-no-files-found: error
|
||||
|
||||
create-kata-tarball:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build-asset
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: get-artifacts
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-artifacts
|
||||
- name: merge-artifacts
|
||||
run: |
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-merge-builds.sh kata-artifacts
|
||||
- name: store-artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: kata-static-tarball
|
||||
path: kata-static.tar.xz
|
||||
|
||||
kata-deploy:
|
||||
needs: create-kata-tarball
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: kata-static-tarball
|
||||
- name: build-and-push-kata-deploy-ci
|
||||
id: build-and-push-kata-deploy-ci
|
||||
run: |
|
||||
tag=$(echo $GITHUB_REF | cut -d/ -f3-)
|
||||
pushd $GITHUB_WORKSPACE
|
||||
git checkout $tag
|
||||
pkg_sha=$(git rev-parse HEAD)
|
||||
popd
|
||||
mv kata-static.tar.xz $GITHUB_WORKSPACE/tools/packaging/kata-deploy/kata-static.tar.xz
|
||||
docker build --build-arg KATA_ARTIFACTS=kata-static.tar.xz -t quay.io/confidential-containers/runtime-payload:$pkg_sha $GITHUB_WORKSPACE/tools/packaging/kata-deploy
|
||||
docker login -u ${{ secrets.QUAY_DEPLOYER_USERNAME }} -p ${{ secrets.QUAY_DEPLOYER_PASSWORD }} quay.io
|
||||
docker push quay.io/confidential-containers/runtime-payload:$pkg_sha
|
||||
mkdir -p packaging/kata-deploy
|
||||
ln -s $GITHUB_WORKSPACE/tools/packaging/kata-deploy/action packaging/kata-deploy/action
|
||||
echo "::set-output name=PKG_SHA::${pkg_sha}"
|
||||
8
.github/workflows/docs-url-alive-check.yaml
vendored
8
.github/workflows/docs-url-alive-check.yaml
vendored
@@ -5,11 +5,7 @@ on:
|
||||
name: Docs URL Alive Check
|
||||
jobs:
|
||||
test:
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.17.x]
|
||||
os: [ubuntu-20.04]
|
||||
runs-on: ${{ matrix.os }}
|
||||
runs-on: ubuntu-20.04
|
||||
# don't run this action on forks
|
||||
if: github.repository_owner == 'kata-containers'
|
||||
env:
|
||||
@@ -18,7 +14,7 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
go-version: 1.19.2
|
||||
env:
|
||||
GOPATH: ${{ runner.workspace }}/kata-containers
|
||||
- name: Set env
|
||||
|
||||
1
.github/workflows/kata-deploy-push.yaml
vendored
1
.github/workflows/kata-deploy-push.yaml
vendored
@@ -25,6 +25,7 @@ jobs:
|
||||
- rootfs-image
|
||||
- rootfs-initrd
|
||||
- virtiofsd
|
||||
- nydus
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Install docker
|
||||
|
||||
37
.github/workflows/kata-deploy-test.yaml
vendored
37
.github/workflows/kata-deploy-test.yaml
vendored
@@ -1,10 +1,5 @@
|
||||
on:
|
||||
workflow_dispatch: # this is used to trigger the workflow on non-main branches
|
||||
inputs:
|
||||
pr:
|
||||
description: 'PR number from the selected branch to test'
|
||||
type: string
|
||||
required: true
|
||||
issue_comment:
|
||||
types: [created, edited]
|
||||
|
||||
@@ -18,20 +13,19 @@ jobs:
|
||||
&& github.event_name == 'issue_comment'
|
||||
&& github.event.action == 'created'
|
||||
&& startsWith(github.event.comment.body, '/test_kata_deploy')
|
||||
|| github.event_name == 'workflow_dispatch'
|
||||
steps:
|
||||
- name: Check membership on comment or dispatch
|
||||
- name: Check membership
|
||||
uses: kata-containers/is-organization-member@1.0.1
|
||||
id: is_organization_member
|
||||
with:
|
||||
organization: kata-containers
|
||||
username: ${{ github.event.comment.user.login || github.event.sender.login }}
|
||||
username: ${{ github.event.comment.user.login }}
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Fail if not member
|
||||
run: |
|
||||
result=${{ steps.is_organization_member.outputs.result }}
|
||||
if [ $result == false ]; then
|
||||
user=${{ github.event.comment.user.login || github.event.sender.login }}
|
||||
user=${{ github.event.comment.user.login }}
|
||||
echo Either ${user} is not part of the kata-containers organization
|
||||
echo or ${user} has its Organization Visibility set to Private at
|
||||
echo https://github.com/orgs/kata-containers/people?query=${user}
|
||||
@@ -50,6 +44,7 @@ jobs:
|
||||
- cloud-hypervisor
|
||||
- firecracker
|
||||
- kernel
|
||||
- nydus
|
||||
- qemu
|
||||
- rootfs-image
|
||||
- rootfs-initrd
|
||||
@@ -59,12 +54,8 @@ jobs:
|
||||
- name: get-PR-ref
|
||||
id: get-PR-ref
|
||||
run: |
|
||||
if [ ${{ github.event_name }} == 'issue_comment' ]; then
|
||||
ref=$(cat $GITHUB_EVENT_PATH | jq -r '.issue.pull_request.url' | sed 's#^.*\/pulls#refs\/pull#' | sed 's#$#\/merge#')
|
||||
else # workflow_dispatch
|
||||
ref="refs/pull/${{ github.event.inputs.pr }}/merge"
|
||||
fi
|
||||
echo "reference for PR: " ${ref} "event:" ${{ github.event_name }}
|
||||
ref=$(cat $GITHUB_EVENT_PATH | jq -r '.issue.pull_request.url' | sed 's#^.*\/pulls#refs\/pull#' | sed 's#$#\/merge#')
|
||||
echo "reference for PR: " ${ref}
|
||||
echo "##[set-output name=pr-ref;]${ref}"
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
@@ -99,12 +90,8 @@ jobs:
|
||||
- name: get-PR-ref
|
||||
id: get-PR-ref
|
||||
run: |
|
||||
if [ ${{ github.event_name }} == 'issue_comment' ]; then
|
||||
ref=$(cat $GITHUB_EVENT_PATH | jq -r '.issue.pull_request.url' | sed 's#^.*\/pulls#refs\/pull#' | sed 's#$#\/merge#')
|
||||
else # workflow_dispatch
|
||||
ref="refs/pull/${{ github.event.inputs.pr }}/merge"
|
||||
fi
|
||||
echo "reference for PR: " ${ref} "event:" ${{ github.event_name }}
|
||||
ref=$(cat $GITHUB_EVENT_PATH | jq -r '.issue.pull_request.url' | sed 's#^.*\/pulls#refs\/pull#' | sed 's#$#\/merge#')
|
||||
echo "reference for PR: " ${ref}
|
||||
echo "##[set-output name=pr-ref;]${ref}"
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
@@ -130,12 +117,8 @@ jobs:
|
||||
- name: get-PR-ref
|
||||
id: get-PR-ref
|
||||
run: |
|
||||
if [ ${{ github.event_name }} == 'issue_comment' ]; then
|
||||
ref=$(cat $GITHUB_EVENT_PATH | jq -r '.issue.pull_request.url' | sed 's#^.*\/pulls#refs\/pull#' | sed 's#$#\/merge#')
|
||||
else # workflow_dispatch
|
||||
ref="refs/pull/${{ github.event.inputs.pr }}/merge"
|
||||
fi
|
||||
echo "reference for PR: " ${ref} "event:" ${{ github.event_name }}
|
||||
ref=$(cat $GITHUB_EVENT_PATH | jq -r '.issue.pull_request.url' | sed 's#^.*\/pulls#refs\/pull#' | sed 's#$#\/merge#')
|
||||
echo "reference for PR: " ${ref}
|
||||
echo "##[set-output name=pr-ref;]${ref}"
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
|
||||
1
.github/workflows/release.yaml
vendored
1
.github/workflows/release.yaml
vendored
@@ -13,6 +13,7 @@ jobs:
|
||||
- cloud-hypervisor
|
||||
- firecracker
|
||||
- kernel
|
||||
- nydus
|
||||
- qemu
|
||||
- rootfs-image
|
||||
- rootfs-initrd
|
||||
|
||||
52
.github/workflows/static-checks.yaml
vendored
52
.github/workflows/static-checks.yaml
vendored
@@ -9,11 +9,7 @@ on:
|
||||
name: Static checks
|
||||
jobs:
|
||||
check-vendored-code:
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.16.x, 1.17.x]
|
||||
os: [ubuntu-20.04]
|
||||
runs-on: ${{ matrix.os }}
|
||||
runs-on: ubuntu-20.04
|
||||
env:
|
||||
TRAVIS: "true"
|
||||
TRAVIS_BRANCH: ${{ github.base_ref }}
|
||||
@@ -26,7 +22,7 @@ jobs:
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
go-version: 1.19.2
|
||||
env:
|
||||
GOPATH: ${{ runner.workspace }}/kata-containers
|
||||
- name: Setup GOPATH
|
||||
@@ -65,11 +61,7 @@ jobs:
|
||||
cd ${GOPATH}/src/github.com/${{ github.repository }} && make vendor
|
||||
|
||||
static-checks:
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.16.x, 1.17.x]
|
||||
os: [ubuntu-20.04]
|
||||
runs-on: ${{ matrix.os }}
|
||||
runs-on: ubuntu-20.04
|
||||
env:
|
||||
TRAVIS: "true"
|
||||
TRAVIS_BRANCH: ${{ github.base_ref }}
|
||||
@@ -82,7 +74,7 @@ jobs:
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
go-version: 1.19.2
|
||||
env:
|
||||
GOPATH: ${{ runner.workspace }}/kata-containers
|
||||
- name: Setup GOPATH
|
||||
@@ -134,12 +126,9 @@ jobs:
|
||||
run: |
|
||||
cd ${GOPATH}/src/github.com/${{ github.repository }} && make static-checks
|
||||
|
||||
|
||||
compiler-checks:
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.16.x, 1.17.x]
|
||||
os: [ubuntu-20.04]
|
||||
runs-on: ${{ matrix.os }}
|
||||
runs-on: ubuntu-20.04
|
||||
env:
|
||||
TRAVIS: "true"
|
||||
TRAVIS_BRANCH: ${{ github.base_ref }}
|
||||
@@ -152,7 +141,7 @@ jobs:
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
go-version: 1.19.2
|
||||
env:
|
||||
GOPATH: ${{ runner.workspace }}/kata-containers
|
||||
- name: Setup GOPATH
|
||||
@@ -218,7 +207,7 @@ jobs:
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.17.x
|
||||
go-version: 1.19.2
|
||||
env:
|
||||
GOPATH: ${{ runner.workspace }}/kata-containers
|
||||
- name: Setup GOPATH
|
||||
@@ -284,7 +273,7 @@ jobs:
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.17.x
|
||||
go-version: 1.19.2
|
||||
env:
|
||||
GOPATH: ${{ runner.workspace }}/kata-containers
|
||||
- name: Setup GOPATH
|
||||
@@ -335,26 +324,3 @@ jobs:
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
cd ${GOPATH}/src/github.com/${{ github.repository }} && sudo -E PATH="$PATH" make test
|
||||
|
||||
test-dragonball:
|
||||
runs-on: self-hosted
|
||||
env:
|
||||
RUST_BACKTRACE: "1"
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Set env
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
echo "GOPATH=${{ github.workspace }}" >> $GITHUB_ENV
|
||||
- name: Install Rust
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
./ci/install_rust.sh
|
||||
PATH=$PATH:"$HOME/.cargo/bin"
|
||||
- name: Run Unit Test
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
cd src/dragonball
|
||||
/root/.cargo/bin/cargo version
|
||||
rustc --version
|
||||
sudo -E env PATH=$PATH LIBC=gnu SUPPORT_VIRTUALIZATION=true make test
|
||||
|
||||
6
Makefile
6
Makefile
@@ -8,7 +8,6 @@ COMPONENTS =
|
||||
|
||||
COMPONENTS += libs
|
||||
COMPONENTS += agent
|
||||
COMPONENTS += dragonball
|
||||
COMPONENTS += runtime
|
||||
COMPONENTS += runtime-rs
|
||||
|
||||
@@ -16,10 +15,9 @@ COMPONENTS += runtime-rs
|
||||
TOOLS =
|
||||
|
||||
TOOLS += agent-ctl
|
||||
TOOLS += kata-ctl
|
||||
TOOLS += log-parser
|
||||
TOOLS += runk
|
||||
TOOLS += trace-forwarder
|
||||
TOOLS += runk
|
||||
TOOLS += log-parser
|
||||
|
||||
STANDARD_TARGETS = build check clean install test vendor
|
||||
|
||||
|
||||
@@ -119,8 +119,10 @@ The table below lists the core parts of the project:
|
||||
| [runtime](src/runtime) | core | Main component run by a container manager and providing a containerd shimv2 runtime implementation. |
|
||||
| [runtime-rs](src/runtime-rs) | core | The Rust version runtime. |
|
||||
| [agent](src/agent) | core | Management process running inside the virtual machine / POD that sets up the container environment. |
|
||||
| [libraries](src/libs) | core | Library crates shared by multiple Kata Container components or published to [`crates.io`](https://crates.io/index.html) |
|
||||
| [`dragonball`](src/dragonball) | core | An optional built-in VMM brings out-of-the-box Kata Containers experience with optimizations on container workloads |
|
||||
| [documentation](docs) | documentation | Documentation common to all components (such as design and install documentation). |
|
||||
| [libraries](src/libs) | core | Library crates shared by multiple Kata Container components or published to [`crates.io`](https://crates.io/index.html) |
|
||||
| [tests](https://github.com/kata-containers/tests) | tests | Excludes unit tests which live with the main code. |
|
||||
|
||||
### Additional components
|
||||
@@ -133,7 +135,6 @@ The table below lists the remaining parts of the project:
|
||||
| [kernel](https://www.kernel.org) | kernel | Linux kernel used by the hypervisor to boot the guest image. Patches are stored [here](tools/packaging/kernel). |
|
||||
| [osbuilder](tools/osbuilder) | infrastructure | Tool to create "mini O/S" rootfs and initrd images and kernel for the hypervisor. |
|
||||
| [`agent-ctl`](src/tools/agent-ctl) | utility | Tool that provides low-level access for testing the agent. |
|
||||
| [`kata-ctl`](src/tools/kata-ctl) | utility | Tool that provides advanced commands and debug facilities. |
|
||||
| [`trace-forwarder`](src/tools/trace-forwarder) | utility | Agent tracing helper. |
|
||||
| [`runk`](src/tools/runk) | utility | Standard OCI container runtime based on the agent. |
|
||||
| [`ci`](https://github.com/kata-containers/ci) | CI | Continuous Integration configuration files and scripts. |
|
||||
|
||||
@@ -72,8 +72,7 @@ build_and_install_gperf() {
|
||||
curl -sLO "${gperf_tarball_url}"
|
||||
tar -xf "${gperf_tarball}"
|
||||
pushd "gperf-${gperf_version}"
|
||||
# gperf is a build time dependency of libseccomp and not to be used in the target.
|
||||
# Unset $CC since that might point to a cross compiler.
|
||||
# Unset $CC for configure, we will always use native for gperf
|
||||
CC= ./configure --prefix="${gperf_install_dir}"
|
||||
make
|
||||
make install
|
||||
|
||||
@@ -33,41 +33,51 @@ You need to install the following to build Kata Containers components:
|
||||
- `make`.
|
||||
- `gcc` (required for building the shim and runtime).
|
||||
|
||||
# Build and install Kata Containers
|
||||
## Build and install the Kata Containers runtime
|
||||
# Build and install the Kata Containers runtime
|
||||
|
||||
```bash
|
||||
$ git clone https://github.com/kata-containers/kata-containers.git
|
||||
$ pushd kata-containers/src/runtime
|
||||
$ make && sudo -E "PATH=$PATH" make install
|
||||
$ sudo mkdir -p /etc/kata-containers/
|
||||
$ sudo install -o root -g root -m 0640 /usr/share/defaults/kata-containers/configuration.toml /etc/kata-containers
|
||||
$ popd
|
||||
```
|
||||
$ go get -d -u github.com/kata-containers/kata-containers
|
||||
$ cd $GOPATH/src/github.com/kata-containers/kata-containers/src/runtime
|
||||
$ make && sudo -E PATH=$PATH make install
|
||||
```
|
||||
|
||||
The build will create the following:
|
||||
|
||||
- runtime binary: `/usr/local/bin/kata-runtime` and `/usr/local/bin/containerd-shim-kata-v2`
|
||||
- configuration file: `/usr/share/defaults/kata-containers/configuration.toml` and `/etc/kata-containers/configuration.toml`
|
||||
- configuration file: `/usr/share/defaults/kata-containers/configuration.toml`
|
||||
|
||||
# Check hardware requirements
|
||||
|
||||
You can check if your system is capable of creating a Kata Container by running the following:
|
||||
|
||||
```
|
||||
$ sudo kata-runtime check
|
||||
```
|
||||
|
||||
If your system is *not* able to run Kata Containers, the previous command will error out and explain why.
|
||||
|
||||
## Configure to use initrd or rootfs image
|
||||
|
||||
Kata containers can run with either an initrd image or a rootfs image.
|
||||
|
||||
If you want to test with `initrd`, make sure you have uncommented `initrd = /usr/share/kata-containers/kata-containers-initrd.img`
|
||||
in your configuration file, commenting out the `image` line in
|
||||
`/etc/kata-containers/configuration.toml`. For example:
|
||||
If you want to test with `initrd`, make sure you have `initrd = /usr/share/kata-containers/kata-containers-initrd.img`
|
||||
in your configuration file, commenting out the `image` line:
|
||||
|
||||
```bash
|
||||
`/usr/share/defaults/kata-containers/configuration.toml` and comment out the `image` line with the following. For example:
|
||||
|
||||
```
|
||||
$ sudo mkdir -p /etc/kata-containers/
|
||||
$ sudo install -o root -g root -m 0640 /usr/share/defaults/kata-containers/configuration.toml /etc/kata-containers
|
||||
$ sudo sed -i 's/^\(image =.*\)/# \1/g' /etc/kata-containers/configuration.toml
|
||||
$ sudo sed -i 's/^# \(initrd =.*\)/\1/g' /etc/kata-containers/configuration.toml
|
||||
```
|
||||
You can create the initrd image as shown in the [create an initrd image](#create-an-initrd-image---optional) section.
|
||||
|
||||
If you want to test with a rootfs `image`, make sure you have uncommented `image = /usr/share/kata-containers/kata-containers.img`
|
||||
If you want to test with a rootfs `image`, make sure you have `image = /usr/share/kata-containers/kata-containers.img`
|
||||
in your configuration file, commenting out the `initrd` line. For example:
|
||||
|
||||
```bash
|
||||
```
|
||||
$ sudo mkdir -p /etc/kata-containers/
|
||||
$ sudo install -o root -g root -m 0640 /usr/share/defaults/kata-containers/configuration.toml /etc/kata-containers
|
||||
$ sudo sed -i 's/^\(initrd =.*\)/# \1/g' /etc/kata-containers/configuration.toml
|
||||
```
|
||||
The rootfs image is created as shown in the [create a rootfs image](#create-a-rootfs-image) section.
|
||||
@@ -80,7 +90,7 @@ rootfs `image`(100MB+).
|
||||
|
||||
Enable seccomp as follows:
|
||||
|
||||
```bash
|
||||
```
|
||||
$ sudo sed -i '/^disable_guest_seccomp/ s/true/false/' /etc/kata-containers/configuration.toml
|
||||
```
|
||||
|
||||
@@ -90,7 +100,9 @@ This will pass container seccomp profiles to the kata agent.
|
||||
|
||||
Enable full debug as follows:
|
||||
|
||||
```bash
|
||||
```
|
||||
$ sudo mkdir -p /etc/kata-containers/
|
||||
$ sudo install -o root -g root -m 0640 /usr/share/defaults/kata-containers/configuration.toml /etc/kata-containers
|
||||
$ sudo sed -i -e 's/^# *\(enable_debug\).*=.*$/\1 = true/g' /etc/kata-containers/configuration.toml
|
||||
$ sudo sed -i -e 's/^kernel_params = "\(.*\)"/kernel_params = "\1 agent.log=debug initcall_debug"/g' /etc/kata-containers/configuration.toml
|
||||
```
|
||||
@@ -163,7 +175,7 @@ and offers possible workarounds and fixes.
|
||||
it stores. When messages are suppressed, it is noted in the logs. This can be checked
|
||||
for by looking for those notifications, such as:
|
||||
|
||||
```bash
|
||||
```sh
|
||||
$ sudo journalctl --since today | fgrep Suppressed
|
||||
Jun 29 14:51:17 mymachine systemd-journald[346]: Suppressed 4150 messages from /system.slice/docker.service
|
||||
```
|
||||
@@ -188,7 +200,7 @@ RateLimitBurst=0
|
||||
|
||||
Restart `systemd-journald` for the changes to take effect:
|
||||
|
||||
```bash
|
||||
```sh
|
||||
$ sudo systemctl restart systemd-journald
|
||||
```
|
||||
|
||||
@@ -202,24 +214,25 @@ $ sudo systemctl restart systemd-journald
|
||||
|
||||
The agent is built with a statically linked `musl.` The default `libc` used is `musl`, but on `ppc64le` and `s390x`, `gnu` should be used. To configure this:
|
||||
|
||||
```bash
|
||||
$ export ARCH="$(uname -m)"
|
||||
```
|
||||
$ export ARCH=$(uname -m)
|
||||
$ if [ "$ARCH" = "ppc64le" -o "$ARCH" = "s390x" ]; then export LIBC=gnu; else export LIBC=musl; fi
|
||||
$ [ "${ARCH}" == "ppc64le" ] && export ARCH=powerpc64le
|
||||
$ rustup target add "${ARCH}-unknown-linux-${LIBC}"
|
||||
$ [ ${ARCH} == "ppc64le" ] && export ARCH=powerpc64le
|
||||
$ rustup target add ${ARCH}-unknown-linux-${LIBC}
|
||||
```
|
||||
|
||||
To build the agent:
|
||||
|
||||
```bash
|
||||
$ make -C kata-containers/src/agent
|
||||
```
|
||||
$ go get -d -u github.com/kata-containers/kata-containers
|
||||
$ cd $GOPATH/src/github.com/kata-containers/kata-containers/src/agent && make
|
||||
```
|
||||
|
||||
The agent is built with seccomp capability by default.
|
||||
If you want to build the agent without the seccomp capability, you need to run `make` with `SECCOMP=no` as follows.
|
||||
|
||||
```bash
|
||||
$ make -C kata-containers/src/agent SECCOMP=no
|
||||
```
|
||||
$ make -C $GOPATH/src/github.com/kata-containers/kata-containers/src/agent SECCOMP=no
|
||||
```
|
||||
|
||||
> **Note:**
|
||||
@@ -227,6 +240,13 @@ $ make -C kata-containers/src/agent SECCOMP=no
|
||||
> - If you enable seccomp in the main configuration file but build the agent without seccomp capability,
|
||||
> the runtime exits conservatively with an error message.
|
||||
|
||||
## Get the osbuilder
|
||||
|
||||
```
|
||||
$ go get -d -u github.com/kata-containers/kata-containers
|
||||
$ cd $GOPATH/src/github.com/kata-containers/kata-containers/tools/osbuilder
|
||||
```
|
||||
|
||||
## Create a rootfs image
|
||||
### Create a local rootfs
|
||||
|
||||
@@ -234,26 +254,24 @@ As a prerequisite, you need to install Docker. Otherwise, you will not be
|
||||
able to run the `rootfs.sh` script with `USE_DOCKER=true` as expected in
|
||||
the following example.
|
||||
|
||||
```bash
|
||||
$ export distro="ubuntu" # example
|
||||
$ export ROOTFS_DIR="$(realpath kata-containers/tools/osbuilder/rootfs-builder/rootfs)"
|
||||
$ sudo rm -rf "${ROOTFS_DIR}"
|
||||
$ pushd kata-containers/tools/osbuilder/rootfs-builder
|
||||
$ script -fec 'sudo -E USE_DOCKER=true ./rootfs.sh "${distro}"'
|
||||
$ popd
|
||||
```
|
||||
$ export ROOTFS_DIR=${GOPATH}/src/github.com/kata-containers/kata-containers/tools/osbuilder/rootfs-builder/rootfs
|
||||
$ sudo rm -rf ${ROOTFS_DIR}
|
||||
$ cd $GOPATH/src/github.com/kata-containers/kata-containers/tools/osbuilder/rootfs-builder
|
||||
$ script -fec 'sudo -E GOPATH=$GOPATH USE_DOCKER=true ./rootfs.sh ${distro}'
|
||||
```
|
||||
|
||||
You MUST choose a distribution (e.g., `ubuntu`) for `${distro}`.
|
||||
You can get a supported distributions list in the Kata Containers by running the following.
|
||||
|
||||
```bash
|
||||
$ ./kata-containers/tools/osbuilder/rootfs-builder/rootfs.sh -l
|
||||
```
|
||||
$ ./rootfs.sh -l
|
||||
```
|
||||
|
||||
If you want to build the agent without seccomp capability, you need to run the `rootfs.sh` script with `SECCOMP=no` as follows.
|
||||
|
||||
```bash
|
||||
$ script -fec 'sudo -E AGENT_INIT=yes USE_DOCKER=true SECCOMP=no ./rootfs.sh "${distro}"'
|
||||
```
|
||||
$ script -fec 'sudo -E GOPATH=$GOPATH AGENT_INIT=yes USE_DOCKER=true SECCOMP=no ./rootfs.sh ${distro}'
|
||||
```
|
||||
|
||||
> **Note:**
|
||||
@@ -269,18 +287,17 @@ $ script -fec 'sudo -E AGENT_INIT=yes USE_DOCKER=true SECCOMP=no ./rootfs.sh "${
|
||||
>
|
||||
> - You should only do this step if you are testing with the latest version of the agent.
|
||||
|
||||
```bash
|
||||
$ sudo install -o root -g root -m 0550 -t "${ROOTFS_DIR}/usr/bin" "${ROOTFS_DIR}/../../../../src/agent/target/x86_64-unknown-linux-musl/release/kata-agent"
|
||||
$ sudo install -o root -g root -m 0440 "${ROOTFS_DIR}/../../../../src/agent/kata-agent.service" "${ROOTFS_DIR}/usr/lib/systemd/system/"
|
||||
$ sudo install -o root -g root -m 0440 "${ROOTFS_DIR}/../../../../src/agent/kata-containers.target" "${ROOTFS_DIR}/usr/lib/systemd/system/"
|
||||
```
|
||||
$ sudo install -o root -g root -m 0550 -t ${ROOTFS_DIR}/usr/bin ../../../src/agent/target/x86_64-unknown-linux-musl/release/kata-agent
|
||||
$ sudo install -o root -g root -m 0440 ../../../src/agent/kata-agent.service ${ROOTFS_DIR}/usr/lib/systemd/system/
|
||||
$ sudo install -o root -g root -m 0440 ../../../src/agent/kata-containers.target ${ROOTFS_DIR}/usr/lib/systemd/system/
|
||||
```
|
||||
|
||||
### Build a rootfs image
|
||||
|
||||
```bash
|
||||
$ pushd kata-containers/tools/osbuilder/image-builder
|
||||
$ script -fec 'sudo -E USE_DOCKER=true ./image_builder.sh "${ROOTFS_DIR}"'
|
||||
$ popd
|
||||
```
|
||||
$ cd $GOPATH/src/github.com/kata-containers/kata-containers/tools/osbuilder/image-builder
|
||||
$ script -fec 'sudo -E USE_DOCKER=true ./image_builder.sh ${ROOTFS_DIR}'
|
||||
```
|
||||
|
||||
> **Notes:**
|
||||
@@ -296,26 +313,21 @@ $ popd
|
||||
|
||||
### Install the rootfs image
|
||||
|
||||
```bash
|
||||
$ pushd kata-containers/tools/osbuilder/image-builder
|
||||
$ commit="$(git log --format=%h -1 HEAD)"
|
||||
$ date="$(date +%Y-%m-%d-%T.%N%z)"
|
||||
```
|
||||
$ commit=$(git log --format=%h -1 HEAD)
|
||||
$ date=$(date +%Y-%m-%d-%T.%N%z)
|
||||
$ image="kata-containers-${date}-${commit}"
|
||||
$ sudo install -o root -g root -m 0640 -D kata-containers.img "/usr/share/kata-containers/${image}"
|
||||
$ (cd /usr/share/kata-containers && sudo ln -sf "$image" kata-containers.img)
|
||||
$ popd
|
||||
```
|
||||
|
||||
## Create an initrd image - OPTIONAL
|
||||
### Create a local rootfs for initrd image
|
||||
|
||||
```bash
|
||||
$ export distro="ubuntu" # example
|
||||
$ export ROOTFS_DIR="$(realpath kata-containers/tools/osbuilder/rootfs-builder/rootfs)"
|
||||
$ sudo rm -rf "${ROOTFS_DIR}"
|
||||
$ pushd kata-containers/tools/osbuilder/rootfs-builder/
|
||||
$ script -fec 'sudo -E AGENT_INIT=yes USE_DOCKER=true ./rootfs.sh "${distro}"'
|
||||
$ popd
|
||||
```
|
||||
$ export ROOTFS_DIR="${GOPATH}/src/github.com/kata-containers/kata-containers/tools/osbuilder/rootfs-builder/rootfs"
|
||||
$ sudo rm -rf ${ROOTFS_DIR}
|
||||
$ cd $GOPATH/src/github.com/kata-containers/kata-containers/tools/osbuilder/rootfs-builder
|
||||
$ script -fec 'sudo -E GOPATH=$GOPATH AGENT_INIT=yes USE_DOCKER=true ./rootfs.sh ${distro}'
|
||||
```
|
||||
`AGENT_INIT` controls if the guest image uses the Kata agent as the guest `init` process. When you create an initrd image,
|
||||
always set `AGENT_INIT` to `yes`.
|
||||
@@ -323,14 +335,14 @@ always set `AGENT_INIT` to `yes`.
|
||||
You MUST choose a distribution (e.g., `ubuntu`) for `${distro}`.
|
||||
You can get a supported distributions list in the Kata Containers by running the following.
|
||||
|
||||
```bash
|
||||
$ ./kata-containers/tools/osbuilder/rootfs-builder/rootfs.sh -l
|
||||
```
|
||||
$ ./rootfs.sh -l
|
||||
```
|
||||
|
||||
If you want to build the agent without seccomp capability, you need to run the `rootfs.sh` script with `SECCOMP=no` as follows.
|
||||
|
||||
```bash
|
||||
$ script -fec 'sudo -E AGENT_INIT=yes USE_DOCKER=true SECCOMP=no ./rootfs.sh "${distro}"'
|
||||
```
|
||||
$ script -fec 'sudo -E GOPATH=$GOPATH AGENT_INIT=yes USE_DOCKER=true SECCOMP=no ./rootfs.sh ${distro}'
|
||||
```
|
||||
|
||||
> **Note:**
|
||||
@@ -339,31 +351,28 @@ $ script -fec 'sudo -E AGENT_INIT=yes USE_DOCKER=true SECCOMP=no ./rootfs.sh "${
|
||||
|
||||
Optionally, add your custom agent binary to the rootfs with the following commands. The default `$LIBC` used
|
||||
is `musl`, but on ppc64le and s390x, `gnu` should be used. Also, Rust refers to ppc64le as `powerpc64le`:
|
||||
```bash
|
||||
$ export ARCH="$(uname -m)"
|
||||
$ [ "${ARCH}" == "ppc64le" ] || [ "${ARCH}" == "s390x" ] && export LIBC=gnu || export LIBC=musl
|
||||
$ [ "${ARCH}" == "ppc64le" ] && export ARCH=powerpc64le
|
||||
$ sudo install -o root -g root -m 0550 -T "${ROOTFS_DIR}/../../../../src/agent/target/${ARCH}-unknown-linux-${LIBC}/release/kata-agent" "${ROOTFS_DIR}/sbin/init"
|
||||
```
|
||||
$ export ARCH=$(uname -m)
|
||||
$ [ ${ARCH} == "ppc64le" ] || [ ${ARCH} == "s390x" ] && export LIBC=gnu || export LIBC=musl
|
||||
$ [ ${ARCH} == "ppc64le" ] && export ARCH=powerpc64le
|
||||
$ sudo install -o root -g root -m 0550 -T ../../../src/agent/target/${ARCH}-unknown-linux-${LIBC}/release/kata-agent ${ROOTFS_DIR}/sbin/init
|
||||
```
|
||||
|
||||
### Build an initrd image
|
||||
|
||||
```bash
|
||||
$ pushd kata-containers/tools/osbuilder/initrd-builder
|
||||
$ script -fec 'sudo -E AGENT_INIT=yes USE_DOCKER=true ./initrd_builder.sh "${ROOTFS_DIR}"'
|
||||
$ popd
|
||||
```
|
||||
$ cd $GOPATH/src/github.com/kata-containers/kata-containers/tools/osbuilder/initrd-builder
|
||||
$ script -fec 'sudo -E AGENT_INIT=yes USE_DOCKER=true ./initrd_builder.sh ${ROOTFS_DIR}'
|
||||
```
|
||||
|
||||
### Install the initrd image
|
||||
|
||||
```bash
|
||||
$ pushd kata-containers/tools/osbuilder/initrd-builder
|
||||
$ commit="$(git log --format=%h -1 HEAD)"
|
||||
$ date="$(date +%Y-%m-%d-%T.%N%z)"
|
||||
```
|
||||
$ commit=$(git log --format=%h -1 HEAD)
|
||||
$ date=$(date +%Y-%m-%d-%T.%N%z)
|
||||
$ image="kata-containers-initrd-${date}-${commit}"
|
||||
$ sudo install -o root -g root -m 0640 -D kata-containers-initrd.img "/usr/share/kata-containers/${image}"
|
||||
$ (cd /usr/share/kata-containers && sudo ln -sf "$image" kata-containers-initrd.img)
|
||||
$ popd
|
||||
```
|
||||
|
||||
# Install guest kernel images
|
||||
@@ -382,44 +391,44 @@ Kata Containers makes use of upstream QEMU branch. The exact version
|
||||
and repository utilized can be found by looking at the [versions file](../versions.yaml).
|
||||
|
||||
Find the correct version of QEMU from the versions file:
|
||||
```bash
|
||||
$ source kata-containers/tools/packaging/scripts/lib.sh
|
||||
$ qemu_version="$(get_from_kata_deps "assets.hypervisor.qemu.version")"
|
||||
$ echo "${qemu_version}"
|
||||
```
|
||||
$ source ${GOPATH}/src/github.com/kata-containers/kata-containers/tools/packaging/scripts/lib.sh
|
||||
$ qemu_version=$(get_from_kata_deps "assets.hypervisor.qemu.version")
|
||||
$ echo ${qemu_version}
|
||||
```
|
||||
Get source from the matching branch of QEMU:
|
||||
```bash
|
||||
$ git clone -b "${qemu_version}" https://github.com/qemu/qemu.git
|
||||
$ your_qemu_directory="$(realpath qemu)"
|
||||
```
|
||||
$ go get -d github.com/qemu/qemu
|
||||
$ cd ${GOPATH}/src/github.com/qemu/qemu
|
||||
$ git checkout ${qemu_version}
|
||||
$ your_qemu_directory=${GOPATH}/src/github.com/qemu/qemu
|
||||
```
|
||||
|
||||
There are scripts to manage the build and packaging of QEMU. For the examples below, set your
|
||||
environment as:
|
||||
```bash
|
||||
$ packaging_dir="$(realpath kata-containers/tools/packaging)"
|
||||
```
|
||||
$ go get -d github.com/kata-containers/kata-containers
|
||||
$ packaging_dir="${GOPATH}/src/github.com/kata-containers/kata-containers/tools/packaging"
|
||||
```
|
||||
|
||||
Kata often utilizes patches for not-yet-upstream and/or backported fixes for components,
|
||||
including QEMU. These can be found in the [packaging/QEMU directory](../tools/packaging/qemu/patches),
|
||||
and it's *recommended* that you apply them. For example, suppose that you are going to build QEMU
|
||||
version 5.2.0, do:
|
||||
```bash
|
||||
$ "$packaging_dir/scripts/apply_patches.sh" "$packaging_dir/qemu/patches/5.2.x/"
|
||||
```
|
||||
$ cd $your_qemu_directory
|
||||
$ $packaging_dir/scripts/apply_patches.sh $packaging_dir/qemu/patches/5.2.x/
|
||||
```
|
||||
|
||||
To build utilizing the same options as Kata, you should make use of the `configure-hypervisor.sh` script. For example:
|
||||
```bash
|
||||
$ pushd "$your_qemu_directory"
|
||||
$ "$packaging_dir/scripts/configure-hypervisor.sh" kata-qemu > kata.cfg
|
||||
```
|
||||
$ cd $your_qemu_directory
|
||||
$ $packaging_dir/scripts/configure-hypervisor.sh kata-qemu > kata.cfg
|
||||
$ eval ./configure "$(cat kata.cfg)"
|
||||
$ make -j $(nproc --ignore=1)
|
||||
# Optional
|
||||
$ sudo -E make install
|
||||
$ popd
|
||||
```
|
||||
|
||||
If you do not want to install the respective QEMU version, the configuration file can be modified to point to the correct binary. In `/etc/kata-containers/configuration.toml`, change `path = "/path/to/qemu/build/qemu-system-x86_64"` to point to the correct QEMU binary.
|
||||
|
||||
See the [static-build script for QEMU](../tools/packaging/static-build/qemu/build-static-qemu.sh) for a reference on how to get, setup, configure and build QEMU for Kata.
|
||||
|
||||
### Build a custom QEMU for aarch64/arm64 - REQUIRED
|
||||
@@ -430,33 +439,11 @@ See the [static-build script for QEMU](../tools/packaging/static-build/qemu/buil
|
||||
> under upstream review for supporting NVDIMM on aarch64.
|
||||
>
|
||||
You could build the custom `qemu-system-aarch64` as required with the following command:
|
||||
```bash
|
||||
$ git clone https://github.com/kata-containers/tests.git
|
||||
$ script -fec 'sudo -E tests/.ci/install_qemu.sh'
|
||||
```
|
||||
|
||||
## Build `virtiofsd`
|
||||
|
||||
When using the file system type virtio-fs (default), `virtiofsd` is required
|
||||
|
||||
```bash
|
||||
$ pushd kata-containers/tools/packaging/static-build/virtiofsd
|
||||
$ ./build-static-virtiofsd.sh
|
||||
$ popd
|
||||
$ go get -d github.com/kata-containers/tests
|
||||
$ script -fec 'sudo -E ${GOPATH}/src/github.com/kata-containers/tests/.ci/install_qemu.sh'
|
||||
```
|
||||
|
||||
Modify `/etc/kata-containers/configuration.toml` and update value `virtio_fs_daemon = "/path/to/kata-containers/tools/packaging/static-build/virtiofsd/virtiofsd/virtiofsd"` to point to the binary.
|
||||
|
||||
# Check hardware requirements
|
||||
|
||||
You can check if your system is capable of creating a Kata Container by running the following:
|
||||
|
||||
```bash
|
||||
$ sudo kata-runtime check
|
||||
```
|
||||
|
||||
If your system is *not* able to run Kata Containers, the previous command will error out and explain why.
|
||||
|
||||
# Run Kata Containers with Containerd
|
||||
Refer to the [How to use Kata Containers and Containerd](how-to/containerd-kata.md) how-to guide.
|
||||
|
||||
@@ -487,7 +474,7 @@ See [Set up a debug console](#set-up-a-debug-console).
|
||||
|
||||
## Checking Docker default runtime
|
||||
|
||||
```bash
|
||||
```
|
||||
$ sudo docker info 2>/dev/null | grep -i "default runtime" | cut -d: -f2- | grep -q runc && echo "SUCCESS" || echo "ERROR: Incorrect default Docker runtime"
|
||||
```
|
||||
## Set up a debug console
|
||||
@@ -504,7 +491,7 @@ contain either `/bin/sh` or `/bin/bash`.
|
||||
|
||||
Enable debug_console_enabled in the `configuration.toml` configuration file:
|
||||
|
||||
```toml
|
||||
```
|
||||
[agent.kata]
|
||||
debug_console_enabled = true
|
||||
```
|
||||
@@ -515,7 +502,7 @@ This will pass `agent.debug_console agent.debug_console_vport=1026` to agent as
|
||||
|
||||
For Kata Containers `2.0.x` releases, the `kata-runtime exec` command depends on the`kata-monitor` running, in order to get the sandbox's `vsock` address to connect to. Thus, first start the `kata-monitor` process.
|
||||
|
||||
```bash
|
||||
```
|
||||
$ sudo kata-monitor
|
||||
```
|
||||
|
||||
@@ -577,10 +564,10 @@ an additional `coreutils` package.
|
||||
|
||||
For example using CentOS:
|
||||
|
||||
```bash
|
||||
$ pushd kata-containers/tools/osbuilder/rootfs-builder
|
||||
$ export ROOTFS_DIR="$(realpath ./rootfs)"
|
||||
$ script -fec 'sudo -E USE_DOCKER=true EXTRA_PKGS="bash coreutils" ./rootfs.sh centos'
|
||||
```
|
||||
$ cd $GOPATH/src/github.com/kata-containers/kata-containers/tools/osbuilder/rootfs-builder
|
||||
$ export ROOTFS_DIR=${GOPATH}/src/github.com/kata-containers/kata-containers/tools/osbuilder/rootfs-builder/rootfs
|
||||
$ script -fec 'sudo -E GOPATH=$GOPATH USE_DOCKER=true EXTRA_PKGS="bash coreutils" ./rootfs.sh centos'
|
||||
```
|
||||
|
||||
#### Build the debug image
|
||||
@@ -595,10 +582,9 @@ Install the image:
|
||||
>**Note**: When using an initrd image, replace the below rootfs image name `kata-containers.img`
|
||||
>with the initrd image name `kata-containers-initrd.img`.
|
||||
|
||||
```bash
|
||||
```
|
||||
$ name="kata-containers-centos-with-debug-console.img"
|
||||
$ sudo install -o root -g root -m 0640 kata-containers.img "/usr/share/kata-containers/${name}"
|
||||
$ popd
|
||||
```
|
||||
|
||||
Next, modify the `image=` values in the `[hypervisor.qemu]` section of the
|
||||
@@ -607,7 +593,7 @@ to specify the full path to the image name specified in the previous code
|
||||
section. Alternatively, recreate the symbolic link so it points to
|
||||
the new debug image:
|
||||
|
||||
```bash
|
||||
```
|
||||
$ (cd /usr/share/kata-containers && sudo ln -sf "$name" kata-containers.img)
|
||||
```
|
||||
|
||||
@@ -618,7 +604,7 @@ to avoid all subsequently created containers from using the debug image.
|
||||
|
||||
Create a container as normal. For example using `crictl`:
|
||||
|
||||
```bash
|
||||
```
|
||||
$ sudo crictl run -r kata container.yaml pod.yaml
|
||||
```
|
||||
|
||||
@@ -631,7 +617,7 @@ those for firecracker / cloud-hypervisor.
|
||||
|
||||
Add `agent.debug_console` to the guest kernel command line to allow the agent process to start a debug console.
|
||||
|
||||
```bash
|
||||
```
|
||||
$ sudo sed -i -e 's/^kernel_params = "\(.*\)"/kernel_params = "\1 agent.debug_console"/g' "${kata_configuration_file}"
|
||||
```
|
||||
|
||||
@@ -652,7 +638,7 @@ between the host and the guest. The kernel command line option `agent.debug_cons
|
||||
|
||||
Add the parameter `agent.debug_console_vport=1026` to the kernel command line
|
||||
as shown below:
|
||||
```bash
|
||||
```
|
||||
sudo sed -i -e 's/^kernel_params = "\(.*\)"/kernel_params = "\1 agent.debug_console_vport=1026"/g' "${kata_configuration_file}"
|
||||
```
|
||||
|
||||
@@ -665,7 +651,7 @@ Next, connect to the debug console. The VSOCKS paths vary slightly between each
|
||||
VMM solution.
|
||||
|
||||
In case of cloud-hypervisor, connect to the `vsock` as shown:
|
||||
```bash
|
||||
```
|
||||
$ sudo su -c 'cd /var/run/vc/vm/${sandbox_id}/root/ && socat stdin unix-connect:clh.sock'
|
||||
CONNECT 1026
|
||||
```
|
||||
@@ -673,7 +659,7 @@ CONNECT 1026
|
||||
**Note**: You need to type `CONNECT 1026` and press `RETURN` key after entering the `socat` command.
|
||||
|
||||
For firecracker, connect to the `hvsock` as shown:
|
||||
```bash
|
||||
```
|
||||
$ sudo su -c 'cd /var/run/vc/firecracker/${sandbox_id}/root/ && socat stdin unix-connect:kata.hvsock'
|
||||
CONNECT 1026
|
||||
```
|
||||
@@ -682,7 +668,7 @@ CONNECT 1026
|
||||
|
||||
|
||||
For QEMU, connect to the `vsock` as shown:
|
||||
```bash
|
||||
```
|
||||
$ sudo su -c 'cd /var/run/vc/vm/${sandbox_id} && socat "stdin,raw,echo=0,escape=0x11" "unix-connect:console.sock"'
|
||||
```
|
||||
|
||||
@@ -695,7 +681,7 @@ If the image is created using
|
||||
[osbuilder](../tools/osbuilder), the following YAML
|
||||
file exists and contains details of the image and how it was created:
|
||||
|
||||
```bash
|
||||
```
|
||||
$ cat /var/lib/osbuilder/osbuilder.yaml
|
||||
```
|
||||
|
||||
|
||||
@@ -64,8 +64,8 @@ The kata-runtime is controlled by TOKIO_RUNTIME_WORKER_THREADS to run the OS thr
|
||||
├─ TTRPC listener thread(M * tokio task)
|
||||
├─ TTRPC client handler thread(7 * M * tokio task)
|
||||
├─ container stdin io thread(M * tokio task)
|
||||
├─ container stdout io thread(M * tokio task)
|
||||
└─ container stderr io thread(M * tokio task)
|
||||
├─ container stdin io thread(M * tokio task)
|
||||
└─ container stdin io thread(M * tokio task)
|
||||
```
|
||||
### Extensible Framework
|
||||
The Kata 3.x runtime is designed with the extension of service, runtime, and hypervisor, combined with configuration to meet the needs of different scenarios. At present, the service provides a register mechanism to support multiple services. Services could interact with runtime through messages. In addition, the runtime handler handles messages from services. To meet the needs of a binary that supports multiple runtimes and hypervisors, the startup must obtain the runtime handler type and hypervisor type through configuration.
|
||||
|
||||
@@ -110,7 +110,7 @@ Devices and features used:
|
||||
- VFIO
|
||||
- hotplug
|
||||
- seccomp filters
|
||||
- [HTTP OpenAPI](https://github.com/cloud-hypervisor/cloud-hypervisor/blob/main/vmm/src/api/openapi/cloud-hypervisor.yaml)
|
||||
- [HTTP OpenAPI](https://github.com/cloud-hypervisor/cloud-hypervisor/blob/master/vmm/src/api/openapi/cloud-hypervisor.yaml)
|
||||
|
||||
### Summary
|
||||
|
||||
|
||||
@@ -42,9 +42,4 @@
|
||||
- [How to setup swap devices in guest kernel](how-to-setup-swap-devices-in-guest-kernel.md)
|
||||
- [How to run rootless vmm](how-to-run-rootless-vmm.md)
|
||||
- [How to run Docker with Kata Containers](how-to-run-docker-with-kata.md)
|
||||
- [How to run Kata Containers with `nydus`](how-to-use-virtio-fs-nydus-with-kata.md)
|
||||
- [How to run Kata Containers with AMD SEV-SNP](how-to-run-kata-containers-with-SNP-VMs.md)
|
||||
|
||||
## Confidential Containers
|
||||
- [How to use build and test the Confidential Containers `CCv0` proof of concept](how-to-build-and-test-ccv0.md)
|
||||
- [How to generate a Kata Containers payload for the Confidential Containers Operator](how-to-generate-a-kata-containers-payload-for-the-confidential-containers-operator.md)
|
||||
- [How to run Kata Containers with `nydus`](how-to-use-virtio-fs-nydus-with-kata.md)
|
||||
@@ -1,640 +0,0 @@
|
||||
#!/bin/bash -e
|
||||
#
|
||||
# Copyright (c) 2021, 2022 IBM Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
# Disclaimer: This script is work in progress for supporting the CCv0 prototype
|
||||
# It shouldn't be considered supported by the Kata Containers community, or anyone else
|
||||
|
||||
# Based on https://github.com/kata-containers/kata-containers/blob/main/docs/Developer-Guide.md,
|
||||
# but with elements of the tests/.ci scripts used
|
||||
|
||||
readonly script_name="$(basename "${BASH_SOURCE[0]}")"
|
||||
|
||||
# By default in Golang >= 1.16 GO111MODULE is set to "on", but not all modules support it, so overwrite to "auto"
|
||||
export GO111MODULE="auto"
|
||||
|
||||
# Setup kata containers environments if not set - we default to use containerd
|
||||
export CRI_CONTAINERD=${CRI_CONTAINERD:-"yes"}
|
||||
export CRI_RUNTIME=${CRI_RUNTIME:-"containerd"}
|
||||
export CRIO=${CRIO:-"no"}
|
||||
export KATA_HYPERVISOR="${KATA_HYPERVISOR:-qemu}"
|
||||
export KUBERNETES=${KUBERNETES:-"no"}
|
||||
export AGENT_INIT="${AGENT_INIT:-${TEST_INITRD:-no}}"
|
||||
export AA_KBC="${AA_KBC:-offline_fs_kbc}"
|
||||
|
||||
# Allow the user to overwrite the default repo and branch names if they want to build from a fork
|
||||
export katacontainers_repo="${katacontainers_repo:-github.com/kata-containers/kata-containers}"
|
||||
export katacontainers_branch="${katacontainers_branch:-CCv0}"
|
||||
export kata_default_branch=${katacontainers_branch}
|
||||
export tests_repo="${tests_repo:-github.com/kata-containers/tests}"
|
||||
export tests_branch="${tests_branch:-CCv0}"
|
||||
export target_branch=${tests_branch} # kata-containers/ci/lib.sh uses target branch var to check out tests repo
|
||||
|
||||
# if .bash_profile exists then use it, otherwise fall back to .profile
|
||||
export PROFILE="${HOME}/.profile"
|
||||
if [ -r "${HOME}/.bash_profile" ]; then
|
||||
export PROFILE="${HOME}/.bash_profile"
|
||||
fi
|
||||
# Stop PS1: unbound variable error happening
|
||||
export PS1=${PS1:-}
|
||||
|
||||
# Create a bunch of common, derived values up front so we don't need to create them in all the different functions
|
||||
. ${PROFILE}
|
||||
if [ -z ${GOPATH} ]; then
|
||||
export GOPATH=${HOME}/go
|
||||
fi
|
||||
export tests_repo_dir="${GOPATH}/src/${tests_repo}"
|
||||
export katacontainers_repo_dir="${GOPATH}/src/${katacontainers_repo}"
|
||||
export ROOTFS_DIR="${katacontainers_repo_dir}/tools/osbuilder/rootfs-builder/rootfs"
|
||||
export PULL_IMAGE="${PULL_IMAGE:-quay.io/kata-containers/confidential-containers:signed}" # Doesn't need authentication
|
||||
export CONTAINER_ID="${CONTAINER_ID:-0123456789}"
|
||||
source /etc/os-release || source /usr/lib/os-release
|
||||
grep -Eq "\<fedora\>" /etc/os-release 2> /dev/null && export USE_PODMAN=true
|
||||
|
||||
|
||||
# If we've already checked out the test repo then source the confidential scripts
|
||||
if [ "${KUBERNETES}" == "yes" ]; then
|
||||
export BATS_TEST_DIRNAME="${tests_repo_dir}/integration/kubernetes/confidential"
|
||||
[ -d "${BATS_TEST_DIRNAME}" ] && source "${BATS_TEST_DIRNAME}/lib.sh"
|
||||
else
|
||||
export BATS_TEST_DIRNAME="${tests_repo_dir}/integration/containerd/confidential"
|
||||
[ -d "${BATS_TEST_DIRNAME}" ] && source "${BATS_TEST_DIRNAME}/lib.sh"
|
||||
fi
|
||||
|
||||
[ -d "${BATS_TEST_DIRNAME}" ] && source "${BATS_TEST_DIRNAME}/../../confidential/lib.sh"
|
||||
|
||||
export RUNTIME_CONFIG_PATH=/etc/kata-containers/configuration.toml
|
||||
|
||||
usage() {
|
||||
exit_code="$1"
|
||||
cat <<EOF
|
||||
Overview:
|
||||
Build and test kata containers from source
|
||||
Optionally set kata-containers and tests repo and branch as exported variables before running
|
||||
e.g. export katacontainers_repo=github.com/stevenhorsman/kata-containers && export katacontainers_branch=kata-ci-from-fork && export tests_repo=github.com/stevenhorsman/tests && export tests_branch=kata-ci-from-fork && ~/${script_name} build_and_install_all
|
||||
Usage:
|
||||
${script_name} [options] <command>
|
||||
Commands:
|
||||
- agent_create_container: Run CreateContainer command against the agent with agent-ctl
|
||||
- agent_pull_image: Run PullImage command against the agent with agent-ctl
|
||||
- all: Build and install everything, test kata with containerd and capture the logs
|
||||
- build_and_add_agent_to_rootfs: Builds the kata-agent and adds it to the rootfs
|
||||
- build_and_install_all: Build and install everything
|
||||
- build_and_install_rootfs: Builds and installs the rootfs image
|
||||
- build_kata_runtime: Build and install the kata runtime
|
||||
- build_cloud_hypervisor Checkout, patch, build and install Cloud Hypervisor
|
||||
- build_qemu: Checkout, patch, build and install QEMU
|
||||
- configure: Configure Kata to use rootfs and enable debug
|
||||
- connect_to_ssh_demo_pod: Ssh into the ssh demo pod, showing that the decryption succeeded
|
||||
- copy_signature_files_to_guest Copies signature verification files to guest
|
||||
- create_rootfs: Create a local rootfs
|
||||
- crictl_create_cc_container Use crictl to create a new busybox container in the kata cc pod
|
||||
- crictl_create_cc_pod Use crictl to create a new kata cc pod
|
||||
- crictl_delete_cc Use crictl to delete the kata cc pod sandbox and container in it
|
||||
- help: Display this help
|
||||
- init_kubernetes: initialize a Kubernetes cluster on this system
|
||||
- initialize: Install dependencies and check out kata-containers source
|
||||
- install_guest_kernel: Setup, build and install the guest kernel
|
||||
- kubernetes_create_cc_pod: Create a Kata CC runtime busybox-based pod in Kubernetes
|
||||
- kubernetes_create_ssh_demo_pod: Create a Kata CC runtime pod based on the ssh demo
|
||||
- kubernetes_delete_cc_pod: Delete the Kata CC runtime busybox-based pod in Kubernetes
|
||||
- kubernetes_delete_ssh_demo_pod: Delete the Kata CC runtime pod based on the ssh demo
|
||||
- open_kata_shell: Open a shell into the kata runtime
|
||||
- rebuild_and_install_kata: Rebuild the kata runtime and agent and build and install the image
|
||||
- shim_pull_image: Run PullImage command against the shim with ctr
|
||||
- test_capture_logs: Test using kata with containerd and capture the logs in the user's home directory
|
||||
- test: Test using kata with containerd
|
||||
|
||||
Options:
|
||||
-d: Enable debug
|
||||
-h: Display this help
|
||||
EOF
|
||||
# if script sourced don't exit as this will exit the main shell, just return instead
|
||||
[[ $_ != $0 ]] && return "$exit_code" || exit "$exit_code"
|
||||
}
|
||||
|
||||
build_and_install_all() {
|
||||
initialize
|
||||
build_and_install_kata_runtime
|
||||
configure
|
||||
create_a_local_rootfs
|
||||
build_and_install_rootfs
|
||||
install_guest_kernel_image
|
||||
case "$KATA_HYPERVISOR" in
|
||||
"qemu")
|
||||
build_qemu
|
||||
;;
|
||||
"cloud-hypervisor")
|
||||
build_cloud_hypervisor
|
||||
;;
|
||||
*)
|
||||
echo "Invalid option: $KATA_HYPERVISOR is not supported." >&2
|
||||
;;
|
||||
esac
|
||||
|
||||
check_kata_runtime
|
||||
if [ "${KUBERNETES}" == "yes" ]; then
|
||||
init_kubernetes
|
||||
fi
|
||||
}
|
||||
|
||||
rebuild_and_install_kata() {
|
||||
checkout_tests_repo
|
||||
checkout_kata_containers_repo
|
||||
build_and_install_kata_runtime
|
||||
build_and_add_agent_to_rootfs
|
||||
build_and_install_rootfs
|
||||
check_kata_runtime
|
||||
}
|
||||
|
||||
# Based on the jenkins_job_build.sh script in kata-containers/tests/.ci - checks out source code and installs dependencies
|
||||
initialize() {
|
||||
# We need git to checkout and bootstrap the ci scripts and some other packages used in testing
|
||||
sudo apt-get update && sudo apt-get install -y curl git qemu-utils
|
||||
|
||||
grep -qxF "export GOPATH=\${HOME}/go" "${PROFILE}" || echo "export GOPATH=\${HOME}/go" >> "${PROFILE}"
|
||||
grep -qxF "export GOROOT=/usr/local/go" "${PROFILE}" || echo "export GOROOT=/usr/local/go" >> "${PROFILE}"
|
||||
grep -qxF "export PATH=\${GOPATH}/bin:/usr/local/go/bin:\${PATH}" "${PROFILE}" || echo "export PATH=\${GOPATH}/bin:/usr/local/go/bin:\${PATH}" >> "${PROFILE}"
|
||||
|
||||
# Load the new go and PATH parameters from the profile
|
||||
. ${PROFILE}
|
||||
mkdir -p "${GOPATH}"
|
||||
|
||||
checkout_tests_repo
|
||||
|
||||
pushd "${tests_repo_dir}"
|
||||
local ci_dir_name=".ci"
|
||||
sudo -E PATH=$PATH -s "${ci_dir_name}/install_go.sh" -p -f
|
||||
sudo -E PATH=$PATH -s "${ci_dir_name}/install_rust.sh"
|
||||
# Need to change ownership of rustup so later process can create temp files there
|
||||
sudo chown -R ${USER}:${USER} "${HOME}/.rustup"
|
||||
|
||||
checkout_kata_containers_repo
|
||||
|
||||
# Run setup, but don't install kata as we will build it ourselves in locations matching the developer guide
|
||||
export INSTALL_KATA="no"
|
||||
sudo -E PATH=$PATH -s ${ci_dir_name}/setup.sh
|
||||
# Reload the profile to pick up installed dependencies
|
||||
. ${PROFILE}
|
||||
popd
|
||||
}
|
||||
|
||||
checkout_tests_repo() {
|
||||
echo "Creating repo: ${tests_repo} and branch ${tests_branch} into ${tests_repo_dir}..."
|
||||
# Due to git https://github.blog/2022-04-12-git-security-vulnerability-announced/ the tests repo needs
|
||||
# to be owned by root as it is re-checked out in rootfs.sh
|
||||
mkdir -p $(dirname "${tests_repo_dir}")
|
||||
[ -d "${tests_repo_dir}" ] || sudo -E git clone "https://${tests_repo}.git" "${tests_repo_dir}"
|
||||
sudo -E chown -R root:root "${tests_repo_dir}"
|
||||
pushd "${tests_repo_dir}"
|
||||
sudo -E git fetch
|
||||
if [ -n "${tests_branch}" ]; then
|
||||
sudo -E git checkout ${tests_branch}
|
||||
fi
|
||||
sudo -E git reset --hard origin/${tests_branch}
|
||||
popd
|
||||
|
||||
source "${BATS_TEST_DIRNAME}/lib.sh"
|
||||
source "${BATS_TEST_DIRNAME}/../../confidential/lib.sh"
|
||||
}
|
||||
|
||||
# Note: clone_katacontainers_repo using go, so that needs to be installed first
|
||||
checkout_kata_containers_repo() {
|
||||
source "${tests_repo_dir}/.ci/lib.sh"
|
||||
echo "Creating repo: ${katacontainers_repo} and branch ${kata_default_branch} into ${katacontainers_repo_dir}..."
|
||||
clone_katacontainers_repo
|
||||
sudo -E chown -R ${USER}:${USER} "${katacontainers_repo_dir}"
|
||||
}
|
||||
|
||||
build_and_install_kata_runtime() {
|
||||
pushd ${katacontainers_repo_dir}/src/runtime
|
||||
make clean && make DEFAULT_HYPERVISOR=${KATA_HYPERVISOR} && sudo -E PATH=$PATH make DEFAULT_HYPERVISOR=${KATA_HYPERVISOR} install
|
||||
popd
|
||||
}
|
||||
|
||||
configure() {
|
||||
configure_kata_to_use_rootfs
|
||||
enable_full_debug
|
||||
enable_agent_console
|
||||
|
||||
# Switch image offload to true in kata config
|
||||
switch_image_service_offload "on"
|
||||
|
||||
configure_cc_containerd
|
||||
# From crictl v1.24.1 the default timoout leads to the pod creation failing, so update it
|
||||
sudo crictl config --set timeout=10
|
||||
}
|
||||
|
||||
configure_kata_to_use_rootfs() {
|
||||
sudo mkdir -p /etc/kata-containers/
|
||||
sudo install -o root -g root -m 0640 /usr/share/defaults/kata-containers/configuration.toml /etc/kata-containers
|
||||
sudo sed -i 's/^\(initrd =.*\)/# \1/g' ${RUNTIME_CONFIG_PATH}
|
||||
}
|
||||
|
||||
build_and_add_agent_to_rootfs() {
|
||||
build_a_custom_kata_agent
|
||||
add_custom_agent_to_rootfs
|
||||
}
|
||||
|
||||
build_a_custom_kata_agent() {
|
||||
# Install libseccomp for static linking
|
||||
sudo -E PATH=$PATH GOPATH=$GOPATH ${katacontainers_repo_dir}/ci/install_libseccomp.sh /tmp/kata-libseccomp /tmp/kata-gperf
|
||||
export LIBSECCOMP_LINK_TYPE=static
|
||||
export LIBSECCOMP_LIB_PATH=/tmp/kata-libseccomp/lib
|
||||
|
||||
. "$HOME/.cargo/env"
|
||||
pushd ${katacontainers_repo_dir}/src/agent
|
||||
sudo -E PATH=$PATH make
|
||||
|
||||
ARCH=$(uname -m)
|
||||
[ ${ARCH} == "ppc64le" ] || [ ${ARCH} == "s390x" ] && export LIBC=gnu || export LIBC=musl
|
||||
[ ${ARCH} == "ppc64le" ] && export ARCH=powerpc64le
|
||||
|
||||
# Run a make install into the rootfs directory in order to create the kata-agent.service file which is required when we add to the rootfs
|
||||
sudo -E PATH=$PATH make install DESTDIR="${ROOTFS_DIR}"
|
||||
popd
|
||||
}
|
||||
|
||||
create_a_local_rootfs() {
|
||||
sudo rm -rf "${ROOTFS_DIR}"
|
||||
pushd ${katacontainers_repo_dir}/tools/osbuilder/rootfs-builder
|
||||
export distro="ubuntu"
|
||||
[[ -z "${USE_PODMAN:-}" ]] && use_docker="${use_docker:-1}"
|
||||
sudo -E OS_VERSION="${OS_VERSION:-}" GOPATH=$GOPATH EXTRA_PKGS="vim iputils-ping net-tools" DEBUG="${DEBUG:-}" USE_DOCKER="${use_docker:-}" SKOPEO=${SKOPEO:-} AA_KBC=${AA_KBC:-} UMOCI=yes SECCOMP=yes ./rootfs.sh -r ${ROOTFS_DIR} ${distro}
|
||||
|
||||
# Install_rust.sh during rootfs.sh switches us to the main branch of the tests repo, so switch back now
|
||||
pushd "${tests_repo_dir}"
|
||||
sudo -E git checkout ${tests_branch}
|
||||
popd
|
||||
# During the ./rootfs.sh call the kata agent is built as root, so we need to update the permissions, so we can rebuild it
|
||||
sudo chown -R ${USER}:${USER} "${katacontainers_repo_dir}/src/agent/"
|
||||
|
||||
popd
|
||||
}
|
||||
|
||||
add_custom_agent_to_rootfs() {
|
||||
pushd ${katacontainers_repo_dir}/tools/osbuilder/rootfs-builder
|
||||
|
||||
ARCH=$(uname -m)
|
||||
[ ${ARCH} == "ppc64le" ] || [ ${ARCH} == "s390x" ] && export LIBC=gnu || export LIBC=musl
|
||||
[ ${ARCH} == "ppc64le" ] && export ARCH=powerpc64le
|
||||
|
||||
sudo install -o root -g root -m 0550 -t ${ROOTFS_DIR}/usr/bin ${katacontainers_repo_dir}/src/agent/target/${ARCH}-unknown-linux-${LIBC}/release/kata-agent
|
||||
sudo install -o root -g root -m 0440 ../../../src/agent/kata-agent.service ${ROOTFS_DIR}/usr/lib/systemd/system/
|
||||
sudo install -o root -g root -m 0440 ../../../src/agent/kata-containers.target ${ROOTFS_DIR}/usr/lib/systemd/system/
|
||||
popd
|
||||
}
|
||||
|
||||
build_and_install_rootfs() {
|
||||
build_rootfs_image
|
||||
install_rootfs_image
|
||||
}
|
||||
|
||||
build_rootfs_image() {
|
||||
pushd ${katacontainers_repo_dir}/tools/osbuilder/image-builder
|
||||
# Logic from install_kata_image.sh - if we aren't using podman (ie on a fedora like), then use docker
|
||||
[[ -z "${USE_PODMAN:-}" ]] && use_docker="${use_docker:-1}"
|
||||
sudo -E USE_DOCKER="${use_docker:-}" ./image_builder.sh ${ROOTFS_DIR}
|
||||
popd
|
||||
}
|
||||
|
||||
install_rootfs_image() {
|
||||
pushd ${katacontainers_repo_dir}/tools/osbuilder/image-builder
|
||||
local commit=$(git log --format=%h -1 HEAD)
|
||||
local date=$(date +%Y-%m-%d-%T.%N%z)
|
||||
local image="kata-containers-${date}-${commit}"
|
||||
sudo install -o root -g root -m 0640 -D kata-containers.img "/usr/share/kata-containers/${image}"
|
||||
(cd /usr/share/kata-containers && sudo ln -sf "$image" kata-containers.img)
|
||||
echo "Built Rootfs from ${ROOTFS_DIR} to /usr/share/kata-containers/${image}"
|
||||
ls -al /usr/share/kata-containers/
|
||||
popd
|
||||
}
|
||||
|
||||
install_guest_kernel_image() {
|
||||
pushd ${katacontainers_repo_dir}/tools/packaging/kernel
|
||||
sudo -E PATH=$PATH ./build-kernel.sh setup
|
||||
sudo -E PATH=$PATH ./build-kernel.sh build
|
||||
sudo chmod u+wrx /usr/share/kata-containers/ # Give user permission to install kernel
|
||||
sudo -E PATH=$PATH ./build-kernel.sh install
|
||||
popd
|
||||
}
|
||||
|
||||
build_qemu() {
|
||||
${tests_repo_dir}/.ci/install_virtiofsd.sh
|
||||
${tests_repo_dir}/.ci/install_qemu.sh
|
||||
}
|
||||
|
||||
build_cloud_hypervisor() {
|
||||
${tests_repo_dir}/.ci/install_virtiofsd.sh
|
||||
${tests_repo_dir}/.ci/install_cloud_hypervisor.sh
|
||||
}
|
||||
|
||||
check_kata_runtime() {
|
||||
sudo kata-runtime check
|
||||
}
|
||||
|
||||
k8s_pod_file="${HOME}/busybox-cc.yaml"
|
||||
init_kubernetes() {
|
||||
# Check that kubeadm was installed and install it otherwise
|
||||
if ! [ -x "$(command -v kubeadm)" ]; then
|
||||
pushd "${tests_repo_dir}/.ci"
|
||||
sudo -E PATH=$PATH -s install_kubernetes.sh
|
||||
if [ "${CRI_CONTAINERD}" == "yes" ]; then
|
||||
sudo -E PATH=$PATH -s "configure_containerd_for_kubernetes.sh"
|
||||
fi
|
||||
popd
|
||||
fi
|
||||
|
||||
# If kubernetes init has previously run we need to clean it by removing the image and resetting k8s
|
||||
local cid=$(sudo docker ps -a -q -f name=^/kata-registry$)
|
||||
if [ -n "${cid}" ]; then
|
||||
sudo docker stop ${cid} && sudo docker rm ${cid}
|
||||
fi
|
||||
local k8s_nodes=$(kubectl get nodes -o name 2>/dev/null || true)
|
||||
if [ -n "${k8s_nodes}" ]; then
|
||||
sudo kubeadm reset -f
|
||||
fi
|
||||
|
||||
export CI="true" && sudo -E PATH=$PATH -s ${tests_repo_dir}/integration/kubernetes/init.sh
|
||||
sudo chown ${USER}:$(id -g -n ${USER}) "$HOME/.kube/config"
|
||||
cat << EOF > ${k8s_pod_file}
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: busybox-cc
|
||||
spec:
|
||||
runtimeClassName: kata
|
||||
containers:
|
||||
- name: nginx
|
||||
image: quay.io/kata-containers/confidential-containers:signed
|
||||
imagePullPolicy: Always
|
||||
EOF
|
||||
}
|
||||
|
||||
call_kubernetes_create_cc_pod() {
|
||||
kubernetes_create_cc_pod ${k8s_pod_file}
|
||||
}
|
||||
|
||||
call_kubernetes_delete_cc_pod() {
|
||||
pod_name=$(kubectl get pods -o jsonpath='{.items..metadata.name}')
|
||||
kubernetes_delete_cc_pod $pod_name
|
||||
}
|
||||
|
||||
call_kubernetes_create_ssh_demo_pod() {
|
||||
setup_decryption_files_in_guest
|
||||
kubernetes_create_ssh_demo_pod
|
||||
}
|
||||
|
||||
call_connect_to_ssh_demo_pod() {
|
||||
connect_to_ssh_demo_pod
|
||||
}
|
||||
|
||||
call_kubernetes_delete_ssh_demo_pod() {
|
||||
pod=$(kubectl get pods -o jsonpath='{.items..metadata.name}')
|
||||
kubernetes_delete_ssh_demo_pod $pod
|
||||
}
|
||||
|
||||
crictl_sandbox_name=kata-cc-busybox-sandbox
|
||||
call_crictl_create_cc_pod() {
|
||||
# Update iptables to allow forwarding to the cni0 bridge avoiding issues caused by the docker0 bridge
|
||||
sudo iptables -P FORWARD ACCEPT
|
||||
|
||||
# get_pod_config in tests_common exports `pod_config` that points to the prepared pod config yaml
|
||||
get_pod_config
|
||||
|
||||
crictl_delete_cc_pod_if_exists "${crictl_sandbox_name}"
|
||||
crictl_create_cc_pod "${pod_config}"
|
||||
sudo crictl pods
|
||||
}
|
||||
|
||||
call_crictl_create_cc_container() {
|
||||
# Create container configuration yaml based on our test copy of busybox
|
||||
# get_pod_config in tests_common exports `pod_config` that points to the prepared pod config yaml
|
||||
get_pod_config
|
||||
|
||||
local container_config="${FIXTURES_DIR}/${CONTAINER_CONFIG_FILE:-container-config.yaml}"
|
||||
local pod_name=${crictl_sandbox_name}
|
||||
crictl_create_cc_container ${pod_name} ${pod_config} ${container_config}
|
||||
sudo crictl ps -a
|
||||
}
|
||||
|
||||
crictl_delete_cc() {
|
||||
crictl_delete_cc_pod ${crictl_sandbox_name}
|
||||
}
|
||||
|
||||
test_kata_runtime() {
|
||||
echo "Running ctr with the kata runtime..."
|
||||
local test_image="quay.io/kata-containers/confidential-containers:signed"
|
||||
if [ -z $(ctr images ls -q name=="${test_image}") ]; then
|
||||
sudo ctr image pull "${test_image}"
|
||||
fi
|
||||
sudo ctr run --runtime "io.containerd.kata.v2" --rm -t "${test_image}" test-kata uname -a
|
||||
}
|
||||
|
||||
run_kata_and_capture_logs() {
|
||||
echo "Clearing systemd journal..."
|
||||
sudo systemctl stop systemd-journald
|
||||
sudo rm -f /var/log/journal/*/* /run/log/journal/*/*
|
||||
sudo systemctl start systemd-journald
|
||||
test_kata_runtime
|
||||
echo "Collecting logs..."
|
||||
sudo journalctl -q -o cat -a -t kata-runtime > ${HOME}/kata-runtime.log
|
||||
sudo journalctl -q -o cat -a -t kata > ${HOME}/shimv2.log
|
||||
echo "Logs output to ${HOME}/kata-runtime.log and ${HOME}/shimv2.log"
|
||||
}
|
||||
|
||||
get_ids() {
|
||||
guest_cid=$(sudo ss -H --vsock | awk '{print $6}' | cut -d: -f1)
|
||||
sandbox_id=$(ps -ef | grep containerd-shim-kata-v2 | egrep -o "id [^,][^,].* " | awk '{print $2}')
|
||||
}
|
||||
|
||||
open_kata_shell() {
|
||||
get_ids
|
||||
sudo -E "PATH=$PATH" kata-runtime exec ${sandbox_id}
|
||||
}
|
||||
|
||||
build_bundle_dir_if_necessary() {
|
||||
bundle_dir="/tmp/bundle"
|
||||
if [ ! -d "${bundle_dir}" ]; then
|
||||
rootfs_dir="$bundle_dir/rootfs"
|
||||
image="quay.io/kata-containers/confidential-containers:signed"
|
||||
mkdir -p "$rootfs_dir" && (cd "$bundle_dir" && runc spec)
|
||||
sudo docker export $(sudo docker create "$image") | tar -C "$rootfs_dir" -xvf -
|
||||
fi
|
||||
# There were errors in create container agent-ctl command due to /bin/ seemingly not being on the path, so hardcode it
|
||||
sudo sed -i -e 's%^\(\t*\)"sh"$%\1"/bin/sh"%g' "${bundle_dir}/config.json"
|
||||
}
|
||||
|
||||
build_agent_ctl() {
|
||||
cd ${GOPATH}/src/${katacontainers_repo}/src/tools/agent-ctl/
|
||||
if [ -e "${HOME}/.cargo/registry" ]; then
|
||||
sudo chown -R ${USER}:${USER} "${HOME}/.cargo/registry"
|
||||
fi
|
||||
sudo -E PATH=$PATH -s make
|
||||
ARCH=$(uname -m)
|
||||
[ ${ARCH} == "ppc64le" ] || [ ${ARCH} == "s390x" ] && export LIBC=gnu || export LIBC=musl
|
||||
[ ${ARCH} == "ppc64le" ] && export ARCH=powerpc64le
|
||||
cd "./target/${ARCH}-unknown-linux-${LIBC}/release/"
|
||||
}
|
||||
|
||||
run_agent_ctl_command() {
|
||||
get_ids
|
||||
build_bundle_dir_if_necessary
|
||||
command=$1
|
||||
# If kata-agent-ctl pre-built in this directory, use it directly, otherwise build it first and switch to release
|
||||
if [ ! -x kata-agent-ctl ]; then
|
||||
build_agent_ctl
|
||||
fi
|
||||
./kata-agent-ctl -l debug connect --bundle-dir "${bundle_dir}" --server-address "vsock://${guest_cid}:1024" -c "${command}"
|
||||
}
|
||||
|
||||
agent_pull_image() {
|
||||
run_agent_ctl_command "PullImage image=${PULL_IMAGE} cid=${CONTAINER_ID} source_creds=${SOURCE_CREDS}"
|
||||
}
|
||||
|
||||
agent_create_container() {
|
||||
run_agent_ctl_command "CreateContainer cid=${CONTAINER_ID}"
|
||||
}
|
||||
|
||||
shim_pull_image() {
|
||||
get_ids
|
||||
local ctr_shim_command="sudo ctr --namespace k8s.io shim --id ${sandbox_id} pull-image ${PULL_IMAGE} ${CONTAINER_ID}"
|
||||
echo "Issuing command '${ctr_shim_command}'"
|
||||
${ctr_shim_command}
|
||||
}
|
||||
|
||||
call_copy_signature_files_to_guest() {
|
||||
# TODO #5173 - remove this once the kernel_params aren't ignored by the agent config
|
||||
export DEBUG_CONSOLE="true"
|
||||
|
||||
if [ "${SKOPEO:-}" = "yes" ]; then
|
||||
add_kernel_params "agent.container_policy_file=/etc/containers/quay_verification/quay_policy.json"
|
||||
setup_skopeo_signature_files_in_guest
|
||||
else
|
||||
# TODO #4888 - set config to specifically enable signature verification to be on in ImageClient
|
||||
setup_offline_fs_kbc_signature_files_in_guest
|
||||
fi
|
||||
}
|
||||
|
||||
main() {
|
||||
while getopts "dh" opt; do
|
||||
case "$opt" in
|
||||
d)
|
||||
export DEBUG="-d"
|
||||
set -x
|
||||
;;
|
||||
h)
|
||||
usage 0
|
||||
;;
|
||||
\?)
|
||||
echo "Invalid option: -$OPTARG" >&2
|
||||
usage 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
shift $((OPTIND - 1))
|
||||
|
||||
subcmd="${1:-}"
|
||||
|
||||
[ -z "${subcmd}" ] && usage 1
|
||||
|
||||
case "${subcmd}" in
|
||||
all)
|
||||
build_and_install_all
|
||||
run_kata_and_capture_logs
|
||||
;;
|
||||
build_and_install_all)
|
||||
build_and_install_all
|
||||
;;
|
||||
rebuild_and_install_kata)
|
||||
rebuild_and_install_kata
|
||||
;;
|
||||
initialize)
|
||||
initialize
|
||||
;;
|
||||
build_kata_runtime)
|
||||
build_and_install_kata_runtime
|
||||
;;
|
||||
configure)
|
||||
configure
|
||||
;;
|
||||
create_rootfs)
|
||||
create_a_local_rootfs
|
||||
;;
|
||||
build_and_add_agent_to_rootfs)
|
||||
build_and_add_agent_to_rootfs
|
||||
;;
|
||||
build_and_install_rootfs)
|
||||
build_and_install_rootfs
|
||||
;;
|
||||
install_guest_kernel)
|
||||
install_guest_kernel_image
|
||||
;;
|
||||
build_cloud_hypervisor)
|
||||
build_cloud_hypervisor
|
||||
;;
|
||||
build_qemu)
|
||||
build_qemu
|
||||
;;
|
||||
init_kubernetes)
|
||||
init_kubernetes
|
||||
;;
|
||||
crictl_create_cc_pod)
|
||||
call_crictl_create_cc_pod
|
||||
;;
|
||||
crictl_create_cc_container)
|
||||
call_crictl_create_cc_container
|
||||
;;
|
||||
crictl_delete_cc)
|
||||
crictl_delete_cc
|
||||
;;
|
||||
kubernetes_create_cc_pod)
|
||||
call_kubernetes_create_cc_pod
|
||||
;;
|
||||
kubernetes_delete_cc_pod)
|
||||
call_kubernetes_delete_cc_pod
|
||||
;;
|
||||
kubernetes_create_ssh_demo_pod)
|
||||
call_kubernetes_create_ssh_demo_pod
|
||||
;;
|
||||
connect_to_ssh_demo_pod)
|
||||
call_connect_to_ssh_demo_pod
|
||||
;;
|
||||
kubernetes_delete_ssh_demo_pod)
|
||||
call_kubernetes_delete_ssh_demo_pod
|
||||
;;
|
||||
test)
|
||||
test_kata_runtime
|
||||
;;
|
||||
test_capture_logs)
|
||||
run_kata_and_capture_logs
|
||||
;;
|
||||
open_kata_console)
|
||||
open_kata_console
|
||||
;;
|
||||
open_kata_shell)
|
||||
open_kata_shell
|
||||
;;
|
||||
agent_pull_image)
|
||||
agent_pull_image
|
||||
;;
|
||||
shim_pull_image)
|
||||
shim_pull_image
|
||||
;;
|
||||
agent_create_container)
|
||||
agent_create_container
|
||||
;;
|
||||
copy_signature_files_to_guest)
|
||||
call_copy_signature_files_to_guest
|
||||
;;
|
||||
*)
|
||||
usage 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
main $@
|
||||
@@ -77,8 +77,8 @@ $ command -v containerd
|
||||
You can manually install CNI plugins as follows:
|
||||
|
||||
```bash
|
||||
$ git clone https://github.com/containernetworking/plugins.git
|
||||
$ pushd plugins
|
||||
$ go get github.com/containernetworking/plugins
|
||||
$ pushd $GOPATH/src/github.com/containernetworking/plugins
|
||||
$ ./build_linux.sh
|
||||
$ sudo mkdir /opt/cni
|
||||
$ sudo cp -r bin /opt/cni/
|
||||
@@ -93,8 +93,8 @@ $ popd
|
||||
You can install the `cri-tools` from source code:
|
||||
|
||||
```bash
|
||||
$ git clone https://github.com/kubernetes-sigs/cri-tools.git
|
||||
$ pushd cri-tools
|
||||
$ go get github.com/kubernetes-sigs/cri-tools
|
||||
$ pushd $GOPATH/src/github.com/kubernetes-sigs/cri-tools
|
||||
$ make
|
||||
$ sudo -E make install
|
||||
$ popd
|
||||
|
||||
@@ -1,45 +0,0 @@
|
||||
# Copyright (c) 2021 IBM Corp.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
aa_kbc_params = "$AA_KBC_PARAMS"
|
||||
https_proxy = "$HTTPS_PROXY"
|
||||
[endpoints]
|
||||
allowed = [
|
||||
"AddARPNeighborsRequest",
|
||||
"AddSwapRequest",
|
||||
"CloseStdinRequest",
|
||||
"CopyFileRequest",
|
||||
"CreateContainerRequest",
|
||||
"CreateSandboxRequest",
|
||||
"DestroySandboxRequest",
|
||||
#"ExecProcessRequest",
|
||||
"GetMetricsRequest",
|
||||
"GetOOMEventRequest",
|
||||
"GuestDetailsRequest",
|
||||
"ListInterfacesRequest",
|
||||
"ListRoutesRequest",
|
||||
"MemHotplugByProbeRequest",
|
||||
"OnlineCPUMemRequest",
|
||||
"PauseContainerRequest",
|
||||
"PullImageRequest",
|
||||
"ReadStreamRequest",
|
||||
"RemoveContainerRequest",
|
||||
#"ReseedRandomDevRequest",
|
||||
"ResizeVolumeRequest",
|
||||
"ResumeContainerRequest",
|
||||
"SetGuestDateTimeRequest",
|
||||
"SignalProcessRequest",
|
||||
"StartContainerRequest",
|
||||
"StartTracingRequest",
|
||||
"StatsContainerRequest",
|
||||
"StopTracingRequest",
|
||||
"TtyWinResizeRequest",
|
||||
"UpdateContainerRequest",
|
||||
"UpdateInterfaceRequest",
|
||||
"UpdateRoutesRequest",
|
||||
"VolumeStatsRequest",
|
||||
"WaitProcessRequest",
|
||||
"WriteStreamRequest"
|
||||
]
|
||||
@@ -1,479 +0,0 @@
|
||||
# How to build, run and test Kata CCv0
|
||||
|
||||
## Introduction and Background
|
||||
|
||||
In order to try and make building (locally) and demoing the Kata Containers `CCv0` code base as simple as possible I've
|
||||
shared a script [`ccv0.sh`](./ccv0.sh). This script was originally my attempt to automate the steps of the
|
||||
[Developer Guide](https://github.com/kata-containers/kata-containers/blob/main/docs/Developer-Guide.md) so that I could do
|
||||
different sections of them repeatedly and reliably as I was playing around with make changes to different parts of the
|
||||
Kata code base. I then tried to weave in some of the [`tests/.ci`](https://github.com/kata-containers/tests/tree/main/.ci)
|
||||
scripts in order to have less duplicated code.
|
||||
As we're progress on the confidential containers journey I hope to add more features to demonstrate the functionality
|
||||
we have working.
|
||||
|
||||
*Disclaimer: This script has mostly just been used and tested by me ([@stevenhorsman](https://github.com/stevenhorsman)),*
|
||||
*so there might be issues with it. I'm happy to try and help solve these if possible, but this shouldn't be considered a*
|
||||
*fully supported process by the Kata Containers community.*
|
||||
|
||||
### Basic script set-up and optional environment variables
|
||||
|
||||
In order to build, configure and demo the CCv0 functionality, these are the set-up steps I take:
|
||||
- Provision a new VM
|
||||
- *I choose a Ubuntu 20.04 8GB VM for this as I had one available. There are some dependences on apt-get installed*
|
||||
*packages, so these will need re-working to be compatible with other platforms.*
|
||||
- Copy the script over to your VM *(I put it in the home directory)* and ensure it has execute permission by running
|
||||
```bash
|
||||
$ chmod u+x ccv0.sh
|
||||
```
|
||||
- Optionally set up some environment variables
|
||||
- By default the script checks out the `CCv0` branches of the `kata-containers/kata-containers` and
|
||||
`kata-containers/tests` repositories, but it is designed to be used to test of personal forks and branches as well.
|
||||
If you want to build and run these you can export the `katacontainers_repo`, `katacontainers_branch`, `tests_repo`
|
||||
and `tests_branch` variables e.g.
|
||||
```bash
|
||||
$ export katacontainers_repo=github.com/stevenhorsman/kata-containers
|
||||
$ export katacontainers_branch=stevenh/agent-pull-image-endpoint
|
||||
$ export tests_repo=github.com/stevenhorsman/tests
|
||||
$ export tests_branch=stevenh/add-ccv0-changes-to-build
|
||||
```
|
||||
before running the script.
|
||||
- By default the build and configuration are using `QEMU` as the hypervisor. In order to use `Cloud Hypervisor` instead
|
||||
set:
|
||||
```
|
||||
$ export KATA_HYPERVISOR="cloud-hypervisor"
|
||||
```
|
||||
before running the build.
|
||||
|
||||
- At this point you can provision a Kata confidential containers pod and container with either
|
||||
[`crictl`](#using-crictl-for-end-to-end-provisioning-of-a-kata-confidential-containers-pod-with-an-unencrypted-image),
|
||||
or [Kubernetes](#using-kubernetes-for-end-to-end-provisioning-of-a-kata-confidential-containers-pod-with-an-unencrypted-image)
|
||||
and then test and use it.
|
||||
|
||||
### Using crictl for end-to-end provisioning of a Kata confidential containers pod with an unencrypted image
|
||||
|
||||
- Run the full build process with Kubernetes turned off, so its configuration doesn't interfere with `crictl` using:
|
||||
```bash
|
||||
$ export KUBERNETES="no"
|
||||
$ export KATA_HYPERVISOR="qemu"
|
||||
$ ~/ccv0.sh -d build_and_install_all
|
||||
```
|
||||
> **Note**: Much of this script has to be run as `sudo`, so you are likely to get prompted for your password.
|
||||
- *I run this script sourced just so that the required installed components are accessible on the `PATH` to the rest*
|
||||
*of the process without having to reload the session.*
|
||||
- The steps that `build_and_install_all` takes is:
|
||||
- Checkout the git repos for the `tests` and `kata-containers` repos as specified by the environment variables
|
||||
(default to `CCv0` branches if they are not supplied)
|
||||
- Use the `tests/.ci` scripts to install the build dependencies
|
||||
- Build and install the Kata runtime
|
||||
- Configure Kata to use containerd and for debug and confidential containers features to be enabled (including
|
||||
enabling console access to the Kata guest shell, which should only be done in development)
|
||||
- Create, build and install a rootfs for the Kata hypervisor to use. For 'CCv0' this is currently based on Ubuntu
|
||||
20.04 and has extra packages like `umoci` added.
|
||||
- Build the Kata guest kernel
|
||||
- Install the hypervisor (in order to select which hypervisor will be used, the `KATA_HYPERVISOR` environment
|
||||
variable can be used to select between `qemu` or `cloud-hypervisor`)
|
||||
> **Note**: Depending on how where your VMs are hosted and how IPs are shared you might get an error from docker
|
||||
during matching `ERROR: toomanyrequests: Too Many Requests`. To get past
|
||||
this, login into Docker Hub and pull the images used with:
|
||||
> ```bash
|
||||
> $ sudo docker login
|
||||
> $ sudo docker pull ubuntu
|
||||
> ```
|
||||
> then re-run the command.
|
||||
- The first time this runs it may take a while, but subsequent runs will be quicker as more things are already
|
||||
installed and they can be further cut down by not running all the above steps
|
||||
[see "Additional script usage" below](#additional-script-usage)
|
||||
|
||||
- Create a new Kata sandbox pod using `crictl` with:
|
||||
```bash
|
||||
$ ~/ccv0.sh crictl_create_cc_pod
|
||||
```
|
||||
- This creates a pod configuration file, creates the pod from this using
|
||||
`sudo crictl runp -r kata ~/pod-config.yaml` and runs `sudo crictl pods` to show the pod
|
||||
- Create a new Kata confidential container with:
|
||||
```bash
|
||||
$ ~/ccv0.sh crictl_create_cc_container
|
||||
```
|
||||
- This creates a container (based on `busybox:1.33.1`) in the Kata cc sandbox and prints a list of containers.
|
||||
This will have been created based on an image pulled in the Kata pod sandbox/guest, not on the host machine.
|
||||
|
||||
As this point you should have a `crictl` pod and container that is using the Kata confidential containers runtime.
|
||||
You can [validate that the container image was pulled on the guest](#validate-that-the-container-image-was-pulled-on-the-guest)
|
||||
or [using the Kata pod sandbox for testing with `agent-ctl` or `ctr shim`](#using-a-kata-pod-sandbox-for-testing-with-agent-ctl-or-ctr-shim)
|
||||
|
||||
#### Clean up the `crictl` pod sandbox and container
|
||||
- When the testing is complete you can delete the container and pod by running:
|
||||
```bash
|
||||
$ ~/ccv0.sh crictl_delete_cc
|
||||
```
|
||||
### Using Kubernetes for end-to-end provisioning of a Kata confidential containers pod with an unencrypted image
|
||||
|
||||
- Run the full build process with the Kubernetes environment variable set to `"yes"`, so the Kubernetes cluster is
|
||||
configured and created using the VM
|
||||
as a single node cluster:
|
||||
```bash
|
||||
$ export KUBERNETES="yes"
|
||||
$ ~/ccv0.sh build_and_install_all
|
||||
```
|
||||
> **Note**: Depending on how where your VMs are hosted and how IPs are shared you might get an error from docker
|
||||
during matching `ERROR: toomanyrequests: Too Many Requests`. To get past
|
||||
this, login into Docker Hub and pull the images used with:
|
||||
> ```bash
|
||||
> $ sudo docker login
|
||||
> $ sudo docker pull registry:2
|
||||
> $ sudo docker pull ubuntu:20.04
|
||||
> ```
|
||||
> then re-run the command.
|
||||
- Check that your Kubernetes cluster has been correctly set-up by running :
|
||||
```bash
|
||||
$ kubectl get nodes
|
||||
```
|
||||
and checking that you see a single node e.g.
|
||||
```text
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
stevenh-ccv0-k8s1.fyre.ibm.com Ready control-plane,master 43s v1.22.0
|
||||
```
|
||||
- Create a Kata confidential containers pod by running:
|
||||
```bash
|
||||
$ ~/ccv0.sh kubernetes_create_cc_pod
|
||||
```
|
||||
- Wait a few seconds for pod to start then check that the pod's status is `Running` with
|
||||
```bash
|
||||
$ kubectl get pods
|
||||
```
|
||||
which should show something like:
|
||||
```text
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
busybox-cc 1/1 Running 0 54s
|
||||
```
|
||||
|
||||
- As this point you should have a Kubernetes pod and container running, that is using the Kata
|
||||
confidential containers runtime.
|
||||
You can [validate that the container image was pulled on the guest](#validate-that-the-container-image-was-pulled-on-the-guest)
|
||||
or [using the Kata pod sandbox for testing with `agent-ctl` or `ctr shim`](#using-a-kata-pod-sandbox-for-testing-with-agent-ctl-or-ctr-shim)
|
||||
|
||||
#### Clean up the Kubernetes pod sandbox and container
|
||||
- When the testing is complete you can delete the container and pod by running:
|
||||
```bash
|
||||
$ ~/ccv0.sh kubernetes_delete_cc_pod
|
||||
```
|
||||
|
||||
### Validate that the container image was pulled on the guest
|
||||
|
||||
There are a couple of ways we can check that the container pull image action was offloaded to the guest, by checking
|
||||
the guest's file system for the unpacked bundle and checking the host's directories to ensure it wasn't also pulled
|
||||
there.
|
||||
- To check the guest's file system:
|
||||
- Open a shell into the Kata guest with:
|
||||
```bash
|
||||
$ ~/ccv0.sh open_kata_shell
|
||||
```
|
||||
- List the files in the directory that the container image bundle should have been unpacked to with:
|
||||
```bash
|
||||
$ ls -ltr /run/kata-containers/confidential-containers_signed/
|
||||
```
|
||||
- This should give something like
|
||||
```
|
||||
total 72
|
||||
-rw-r--r-- 1 root root 2977 Jan 20 10:03 config.json
|
||||
-rw-r--r-- 1 root root 372 Jan 20 10:03 umoci.json
|
||||
-rw-r--r-- 1 root root 63584 Jan 20 10:03 sha256_be9faa75035c20288cde7d2cdeb6cd1f5f4dbcd845d3f86f7feab61c4eff9eb5.mtree
|
||||
drwxr-xr-x 12 root root 240 Jan 20 10:03 rootfs
|
||||
```
|
||||
which shows how the image has been pulled and then unbundled on the guest.
|
||||
- Leave the Kata guest shell by running:
|
||||
```bash
|
||||
$ exit
|
||||
```
|
||||
- To verify that the image wasn't pulled on the host system we can look at the shared sandbox on the host and we
|
||||
should only see a single bundle for the pause container as the `busybox` based container image should have been
|
||||
pulled on the guest:
|
||||
- Find all the `rootfs` directories under in the pod's shared directory with:
|
||||
```bash
|
||||
$ pod_id=$(ps -ef | grep containerd-shim-kata-v2 | egrep -o "id [^,][^,].* " | awk '{print $2}')
|
||||
$ sudo find /run/kata-containers/shared/sandboxes/${pod_id}/shared -name rootfs
|
||||
```
|
||||
which should only show a single `rootfs` directory if the container image was pulled on the guest, not the host
|
||||
- Looking that `rootfs` directory with
|
||||
```bash
|
||||
$ sudo ls -ltr $(sudo find /run/kata-containers/shared/sandboxes/${pod_id}/shared -name rootfs)
|
||||
```
|
||||
shows something similar to
|
||||
```
|
||||
total 668
|
||||
-rwxr-xr-x 1 root root 682696 Aug 25 13:58 pause
|
||||
drwxr-xr-x 2 root root 6 Jan 20 02:01 proc
|
||||
drwxr-xr-x 2 root root 6 Jan 20 02:01 dev
|
||||
drwxr-xr-x 2 root root 6 Jan 20 02:01 sys
|
||||
drwxr-xr-x 2 root root 25 Jan 20 02:01 etc
|
||||
```
|
||||
which is clearly the pause container indicating that the `busybox` based container image is not exposed to the host.
|
||||
|
||||
### Using a Kata pod sandbox for testing with `agent-ctl` or `ctr shim`
|
||||
|
||||
Once you have a kata pod sandbox created as described above, either using
|
||||
[`crictl`](#using-crictl-for-end-to-end-provisioning-of-a-kata-confidential-containers-pod-with-an-unencrypted-image), or [Kubernetes](#using-kubernetes-for-end-to-end-provisioning-of-a-kata-confidential-containers-pod-with-an-unencrypted-image)
|
||||
, you can use this to test specific components of the Kata confidential
|
||||
containers architecture. This can be useful for development and debugging to isolate and test features
|
||||
that aren't broadly supported end-to-end. Here are some examples:
|
||||
|
||||
- In the first terminal run the pull image on guest command against the Kata agent, via the shim (`containerd-shim-kata-v2`).
|
||||
This can be achieved using the [containerd](https://github.com/containerd/containerd) CLI tool, `ctr`, which can be used to
|
||||
interact with the shim directly. The command takes the form
|
||||
`ctr --namespace k8s.io shim --id <sandbox-id> pull-image <image> <new-container-id>` and can been run directly, or through
|
||||
the `ccv0.sh` script to automatically fill in the variables:
|
||||
- Optionally, set up some environment variables to set the image and credentials used:
|
||||
- By default the shim pull image test in `ccv0.sh` will use the `busybox:1.33.1` based test image
|
||||
`quay.io/kata-containers/confidential-containers:signed` which requires no authentication. To use a different
|
||||
image, set the `PULL_IMAGE` environment variable e.g.
|
||||
```bash
|
||||
$ export PULL_IMAGE="docker.io/library/busybox:latest"
|
||||
```
|
||||
Currently the containerd shim pull image
|
||||
code doesn't support using a container registry that requires authentication, so if this is required, see the
|
||||
below steps to run the pull image command against the agent directly.
|
||||
- Run the pull image agent endpoint with:
|
||||
```bash
|
||||
$ ~/ccv0.sh shim_pull_image
|
||||
```
|
||||
which we print the `ctr shim` command for reference
|
||||
- Alternatively you can issue the command directly to the `kata-agent` pull image endpoint, which also supports
|
||||
credentials in order to pull from an authenticated registry:
|
||||
- Optionally set up some environment variables to set the image and credentials used:
|
||||
- Set the `PULL_IMAGE` environment variable e.g. `export PULL_IMAGE="docker.io/library/busybox:latest"`
|
||||
if a specific container image is required.
|
||||
- If the container registry for the image requires authentication then this can be set with an environment
|
||||
variable `SOURCE_CREDS`. For example to use Docker Hub (`docker.io`) as an authenticated user first run
|
||||
`export SOURCE_CREDS="<dockerhub username>:<dockerhub api key>"`
|
||||
> **Note**: the credentials support on the agent request is a tactical solution for the short-term
|
||||
proof of concept to allow more images to be pulled and tested. Once we have support for getting
|
||||
keys into the Kata guest image using the attestation-agent and/or KBS I'd expect container registry
|
||||
credentials to be looked up using that mechanism.
|
||||
- Run the pull image agent endpoint with
|
||||
```bash
|
||||
$ ~/ccv0.sh agent_pull_image
|
||||
```
|
||||
and you should see output which includes `Command PullImage (1 of 1) returned (Ok(()), false)` to indicate
|
||||
that the `PullImage` request was successful e.g.
|
||||
```
|
||||
Finished release [optimized] target(s) in 0.21s
|
||||
{"msg":"announce","level":"INFO","ts":"2021-09-15T08:40:14.189360410-07:00","subsystem":"rpc","name":"kata-agent-ctl","pid":"830920","version":"0.1.0","source":"kata-agent-ctl","config":"Config { server_address: \"vsock://1970354082:1024\", bundle_dir: \"/tmp/bundle\", timeout_nano: 0, interactive: false, ignore_errors: false }"}
|
||||
{"msg":"client setup complete","level":"INFO","ts":"2021-09-15T08:40:14.193639057-07:00","pid":"830920","source":"kata-agent-ctl","name":"kata-agent-ctl","subsystem":"rpc","version":"0.1.0","server-address":"vsock://1970354082:1024"}
|
||||
{"msg":"Run command PullImage (1 of 1)","level":"INFO","ts":"2021-09-15T08:40:14.196643765-07:00","pid":"830920","source":"kata-agent-ctl","subsystem":"rpc","name":"kata-agent-ctl","version":"0.1.0"}
|
||||
{"msg":"response received","level":"INFO","ts":"2021-09-15T08:40:43.828200633-07:00","source":"kata-agent-ctl","name":"kata-agent-ctl","subsystem":"rpc","version":"0.1.0","pid":"830920","response":""}
|
||||
{"msg":"Command PullImage (1 of 1) returned (Ok(()), false)","level":"INFO","ts":"2021-09-15T08:40:43.828261708-07:00","subsystem":"rpc","pid":"830920","source":"kata-agent-ctl","version":"0.1.0","name":"kata-agent-ctl"}
|
||||
```
|
||||
> **Note**: The first time that `~/ccv0.sh agent_pull_image` is run, the `agent-ctl` tool will be built
|
||||
which may take a few minutes.
|
||||
- To validate that the image pull was successful, you can open a shell into the Kata guest with:
|
||||
```bash
|
||||
$ ~/ccv0.sh open_kata_shell
|
||||
```
|
||||
- Check the `/run/kata-containers/` directory to verify that the container image bundle has been created in a directory
|
||||
named either `01234556789` (for the container id), or the container image name, e.g.
|
||||
```bash
|
||||
$ ls -ltr /run/kata-containers/confidential-containers_signed/
|
||||
```
|
||||
which should show something like
|
||||
```
|
||||
total 72
|
||||
drwxr-xr-x 10 root root 200 Jan 1 1970 rootfs
|
||||
-rw-r--r-- 1 root root 2977 Jan 20 16:45 config.json
|
||||
-rw-r--r-- 1 root root 372 Jan 20 16:45 umoci.json
|
||||
-rw-r--r-- 1 root root 63584 Jan 20 16:45 sha256_be9faa75035c20288cde7d2cdeb6cd1f5f4dbcd845d3f86f7feab61c4eff9eb5.mtree
|
||||
```
|
||||
- Leave the Kata shell by running:
|
||||
```bash
|
||||
$ exit
|
||||
```
|
||||
|
||||
## Verifying signed images
|
||||
|
||||
For this sample demo, we use local attestation to pass through the required
|
||||
configuration to do container image signature verification. Due to this, the ability to verify images is limited
|
||||
to a pre-created selection of test images in our test
|
||||
repository [`quay.io/kata-containers/confidential-containers`](https://quay.io/repository/kata-containers/confidential-containers?tab=tags).
|
||||
For pulling images not in this test repository (called an *unprotected* registry below), we fall back to the behaviour
|
||||
of not enforcing signatures. More documentation on how to customise this to match your own containers through local,
|
||||
or remote attestation will be available in future.
|
||||
|
||||
In our test repository there are three tagged images:
|
||||
|
||||
| Test Image | Base Image used | Signature status | GPG key status |
|
||||
| --- | --- | --- | --- |
|
||||
| `quay.io/kata-containers/confidential-containers:signed` | `busybox:1.33.1` | [signature](https://github.com/kata-containers/tests/tree/CCv0/integration/confidential/fixtures/quay_verification/signatures.tar) embedded in kata rootfs | [public key](https://github.com/kata-containers/tests/tree/CCv0/integration/confidential/fixtures/quay_verification/public.gpg) embedded in kata rootfs |
|
||||
| `quay.io/kata-containers/confidential-containers:unsigned` | `busybox:1.33.1` | not signed | not signed |
|
||||
| `quay.io/kata-containers/confidential-containers:other_signed` | `nginx:1.21.3` | [signature](https://github.com/kata-containers/tests/tree/CCv0/integration/confidential/fixtures/quay_verification/signatures.tar) embedded in kata rootfs | GPG key not kept |
|
||||
|
||||
Using a standard unsigned `busybox` image that can be pulled from another, *unprotected*, `quay.io` repository we can
|
||||
test a few scenarios.
|
||||
|
||||
In this sample, with local attestation, we pass in the the public GPG key and signature files, and the [`offline_fs_kbc`
|
||||
configuration](https://github.com/confidential-containers/attestation-agent/blob/main/src/kbc_modules/offline_fs_kbc/README.md)
|
||||
into the guest image which specifies that any container image from `quay.io/kata-containers`
|
||||
must be signed with the embedded GPG key and the agent configuration needs updating to enable this.
|
||||
With this policy set a few tests of image verification can be done to test different scenarios by attempting
|
||||
to create containers from these images using `crictl`:
|
||||
|
||||
- If you don't already have the Kata Containers CC code built and configured for `crictl`, then follow the
|
||||
[instructions above](#using-crictl-for-end-to-end-provisioning-of-a-kata-confidential-containers-pod-with-an-unencrypted-image)
|
||||
up to the `~/ccv0.sh crictl_create_cc_pod` command.
|
||||
|
||||
- In order to enable the guest image, you will need to setup the required configuration, policy and signature files
|
||||
needed by running
|
||||
`~/ccv0.sh copy_signature_files_to_guest` and then run `~/ccv0.sh crictl_create_cc_pod` which will delete and recreate
|
||||
your pod - adding in the new files.
|
||||
|
||||
- To test the fallback behaviour works using an unsigned image from an *unprotected* registry we can pull the `busybox`
|
||||
image by running:
|
||||
```bash
|
||||
$ export CONTAINER_CONFIG_FILE=container-config_unsigned-unprotected.yaml
|
||||
$ ~/ccv0.sh crictl_create_cc_container
|
||||
```
|
||||
- This finishes showing the running container e.g.
|
||||
```text
|
||||
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID
|
||||
98c70fefe997a quay.io/prometheus/busybox:latest Less than a second ago Running prometheus-busybox-signed 0 70119e0539238
|
||||
```
|
||||
- To test that an unsigned image from our *protected* test container registry is rejected we can run:
|
||||
```bash
|
||||
$ export CONTAINER_CONFIG_FILE=container-config_unsigned-protected.yaml
|
||||
$ ~/ccv0.sh crictl_create_cc_container
|
||||
```
|
||||
- This correctly results in an error message from `crictl`:
|
||||
`PullImage from image service failed" err="rpc error: code = Internal desc = Security validate failed: Validate image failed: The signatures do not satisfied! Reject reason: [Match reference failed.]" image="quay.io/kata-containers/confidential-containers:unsigned"`
|
||||
- To test that the signed image our *protected* test container registry is accepted we can run:
|
||||
```bash
|
||||
$ export CONTAINER_CONFIG_FILE=container-config.yaml
|
||||
$ ~/ccv0.sh crictl_create_cc_container
|
||||
```
|
||||
- This finishes by showing a new `kata-cc-busybox-signed` running container e.g.
|
||||
```text
|
||||
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID
|
||||
b4d85c2132ed9 quay.io/kata-containers/confidential-containers:signed Less than a second ago Running kata-cc-busybox-signed 0 70119e0539238
|
||||
...
|
||||
```
|
||||
- Finally to check the image with a valid signature, but invalid GPG key (the real trusted piece of information we really
|
||||
want to protect with the attestation agent in future) fails we can run:
|
||||
```bash
|
||||
$ export CONTAINER_CONFIG_FILE=container-config_signed-protected-other.yaml
|
||||
$ ~/ccv0.sh crictl_create_cc_container
|
||||
```
|
||||
- Again this results in an error message from `crictl`:
|
||||
`"PullImage from image service failed" err="rpc error: code = Internal desc = Security validate failed: Validate image failed: The signatures do not satisfied! Reject reason: [signature verify failed! There is no pubkey can verify the signature!]" image="quay.io/kata-containers/confidential-containers:other_signed"`
|
||||
|
||||
### Using Kubernetes to create a Kata confidential containers pod from the encrypted ssh demo sample image
|
||||
|
||||
The [ssh-demo](https://github.com/confidential-containers/documentation/tree/main/demos/ssh-demo) explains how to
|
||||
demonstrate creating a Kata confidential containers pod from an encrypted image with the runtime created by the
|
||||
[confidential-containers operator](https://github.com/confidential-containers/documentation/blob/main/demos/operator-demo).
|
||||
To be fully confidential, this should be run on a Trusted Execution Environment, but it can be tested on generic
|
||||
hardware as well.
|
||||
|
||||
If you wish to build the Kata confidential containers runtime to do this yourself, then you can using the following
|
||||
steps:
|
||||
|
||||
- Run the full build process with the Kubernetes environment variable set to `"yes"`, so the Kubernetes cluster is
|
||||
configured and created using the VM as a single node cluster and with `AA_KBC` set to `offline_fs_kbc`.
|
||||
```bash
|
||||
$ export KUBERNETES="yes"
|
||||
$ export AA_KBC=offline_fs_kbc
|
||||
$ ~/ccv0.sh build_and_install_all
|
||||
```
|
||||
- The `AA_KBC=offline_fs_kbc` mode will ensure that, when creating the rootfs of the Kata guest, the
|
||||
[attestation-agent](https://github.com/confidential-containers/attestation-agent) will be added along with the
|
||||
[sample offline KBC](https://github.com/confidential-containers/documentation/blob/main/demos/ssh-demo/aa-offline_fs_kbc-keys.json)
|
||||
and an agent configuration file
|
||||
> **Note**: Depending on how where your VMs are hosted and how IPs are shared you might get an error from docker
|
||||
during matching `ERROR: toomanyrequests: Too Many Requests`. To get past
|
||||
this, login into Docker Hub and pull the images used with:
|
||||
> ```bash
|
||||
> $ sudo docker login
|
||||
> $ sudo docker pull registry:2
|
||||
> $ sudo docker pull ubuntu:20.04
|
||||
> ```
|
||||
> then re-run the command.
|
||||
- Check that your Kubernetes cluster has been correctly set-up by running :
|
||||
```bash
|
||||
$ kubectl get nodes
|
||||
```
|
||||
and checking that you see a single node e.g.
|
||||
```text
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
stevenh-ccv0-k8s1.fyre.ibm.com Ready control-plane,master 43s v1.22.0
|
||||
```
|
||||
- Create a sample Kata confidential containers ssh pod by running:
|
||||
```bash
|
||||
$ ~/ccv0.sh kubernetes_create_ssh_demo_pod
|
||||
```
|
||||
- As this point you should have a Kubernetes pod running the Kata confidential containers runtime that has pulled
|
||||
the [sample image](https://hub.docker.com/r/katadocker/ccv0-ssh) which was encrypted by the key file that we included
|
||||
in the rootfs.
|
||||
During the pod deployment the image was pulled and then decrypted using the key file, on the Kata guest image, without
|
||||
it ever being available to the host.
|
||||
|
||||
- To validate that the container is working you, can connect to the image via SSH by running:
|
||||
```bash
|
||||
$ ~/ccv0.sh connect_to_ssh_demo_pod
|
||||
```
|
||||
- During this connection the host key fingerprint is shown and should match:
|
||||
`ED25519 key fingerprint is SHA256:wK7uOpqpYQczcgV00fGCh+X97sJL3f6G1Ku4rvlwtR0.`
|
||||
- After you are finished connecting then run:
|
||||
```bash
|
||||
$ exit
|
||||
```
|
||||
|
||||
- To delete the sample SSH demo pod run:
|
||||
```bash
|
||||
$ ~/ccv0.sh kubernetes_delete_ssh_demo_pod
|
||||
```
|
||||
|
||||
## Additional script usage
|
||||
|
||||
As well as being able to use the script as above to build all of `kata-containers` from scratch it can be used to just
|
||||
re-build bits of it by running the script with different parameters. For example after the first build you will often
|
||||
not need to re-install the dependencies, the hypervisor or the Guest kernel, but just test code changes made to the
|
||||
runtime and agent. This can be done by running `~/ccv0.sh rebuild_and_install_kata`. (*Note this does a hard checkout*
|
||||
*from git, so if your changes are only made locally it is better to do the individual steps e.g.*
|
||||
`~/ccv0.sh build_kata_runtime && ~/ccv0.sh build_and_add_agent_to_rootfs && ~/ccv0.sh build_and_install_rootfs`).
|
||||
There are commands for a lot of steps in building, setting up and testing and the full list can be seen by running
|
||||
`~/ccv0.sh help`:
|
||||
```
|
||||
$ ~/ccv0.sh help
|
||||
Overview:
|
||||
Build and test kata containers from source
|
||||
Optionally set kata-containers and tests repo and branch as exported variables before running
|
||||
e.g. export katacontainers_repo=github.com/stevenhorsman/kata-containers && export katacontainers_branch=kata-ci-from-fork && export tests_repo=github.com/stevenhorsman/tests && export tests_branch=kata-ci-from-fork && ~/ccv0.sh build_and_install_all
|
||||
Usage:
|
||||
ccv0.sh [options] <command>
|
||||
Commands:
|
||||
- help: Display this help
|
||||
- all: Build and install everything, test kata with containerd and capture the logs
|
||||
- build_and_install_all: Build and install everything
|
||||
- initialize: Install dependencies and check out kata-containers source
|
||||
- rebuild_and_install_kata: Rebuild the kata runtime and agent and build and install the image
|
||||
- build_kata_runtime: Build and install the kata runtime
|
||||
- configure: Configure Kata to use rootfs and enable debug
|
||||
- create_rootfs: Create a local rootfs
|
||||
- build_and_add_agent_to_rootfs:Builds the kata-agent and adds it to the rootfs
|
||||
- build_and_install_rootfs: Builds and installs the rootfs image
|
||||
- install_guest_kernel: Setup, build and install the guest kernel
|
||||
- build_cloud_hypervisor Checkout, patch, build and install Cloud Hypervisor
|
||||
- build_qemu: Checkout, patch, build and install QEMU
|
||||
- init_kubernetes: initialize a Kubernetes cluster on this system
|
||||
- crictl_create_cc_pod Use crictl to create a new kata cc pod
|
||||
- crictl_create_cc_container Use crictl to create a new busybox container in the kata cc pod
|
||||
- crictl_delete_cc Use crictl to delete the kata cc pod sandbox and container in it
|
||||
- kubernetes_create_cc_pod: Create a Kata CC runtime busybox-based pod in Kubernetes
|
||||
- kubernetes_delete_cc_pod: Delete the Kata CC runtime busybox-based pod in Kubernetes
|
||||
- open_kata_shell: Open a shell into the kata runtime
|
||||
- agent_pull_image: Run PullImage command against the agent with agent-ctl
|
||||
- shim_pull_image: Run PullImage command against the shim with ctr
|
||||
- agent_create_container: Run CreateContainer command against the agent with agent-ctl
|
||||
- test: Test using kata with containerd
|
||||
- test_capture_logs: Test using kata with containerd and capture the logs in the user's home directory
|
||||
|
||||
Options:
|
||||
-d: Enable debug
|
||||
-h: Display this help
|
||||
```
|
||||
@@ -1,44 +0,0 @@
|
||||
# Generating a Kata Containers payload for the Confidential Containers Operator
|
||||
|
||||
[Confidential Containers
|
||||
Operator](https://github.com/confidential-containers/operator) consumes a Kata
|
||||
Containers payload, generated from the `CCv0` branch, and here one can find all
|
||||
the necessary info on how to build such a payload.
|
||||
|
||||
## Requirements
|
||||
|
||||
* `make` installed in the machine
|
||||
* Docker installed in the machine
|
||||
* `sudo` access to the machine
|
||||
|
||||
## Process
|
||||
|
||||
* Clone [Kata Containers](https://github.com/kata-containers/kata-containers)
|
||||
```sh
|
||||
git clone --branch CCv0 https://github.com/kata-containers/kata-containers
|
||||
```
|
||||
* In case you've already cloned the repo, make sure to switch to the `CCv0` branch
|
||||
```sh
|
||||
git checkout CCv0
|
||||
```
|
||||
* Ensure your tree is clean and in sync with upstream `CCv0`
|
||||
```sh
|
||||
git clean -xfd
|
||||
git reset --hard <upstream>/CCv0
|
||||
```
|
||||
* Make sure you're authenticated to `quay.io`
|
||||
```sh
|
||||
sudo docker login quay.io
|
||||
```
|
||||
* From the top repo directory, run:
|
||||
```sh
|
||||
sudo make cc-payload
|
||||
```
|
||||
* Make sure the image was upload to the [Confidential Containers
|
||||
runtime-payload
|
||||
registry](https://quay.io/repository/confidential-containers/runtime-payload?tab=tags)
|
||||
|
||||
## Notes
|
||||
|
||||
Make sure to run it on a machine that's not the one you're hacking on, prepare a
|
||||
cup of tea, and get back to it an hour later (at least).
|
||||
@@ -15,6 +15,18 @@ $ sudo .ci/aarch64/install_rom_aarch64.sh
|
||||
$ popd
|
||||
```
|
||||
|
||||
## Config KATA QEMU
|
||||
|
||||
After executing the above script, two files will be generated under the directory `/usr/share/kata-containers/` by default, namely `kata-flash0.img` and `kata-flash1.img`. Next we need to change the configuration file of `kata qemu`, which is in `/opt/kata/share/defaults/kata-containers/configuration-qemu.toml` by default, specify in the configuration file to use the UEFI ROM installed above. The above is an example of `kata deploy` installation. For package management installation, please use `kata-runtime env` to find the location of the configuration file. Please refer to the following configuration.
|
||||
|
||||
```
|
||||
[hypervisor.qemu]
|
||||
|
||||
# -pflash can add image file to VM. The arguments of it should be in format
|
||||
# of ["/path/to/flash0.img", "/path/to/flash1.img"]
|
||||
pflashes = ["/usr/share/kata-containers/kata-flash0.img", "/usr/share/kata-containers/kata-flash1.img"]
|
||||
```
|
||||
|
||||
## Run for test
|
||||
|
||||
Let's test if the memory hotplug is ready for Kata after install the UEFI ROM. Make sure containerd is ready to run Kata before test.
|
||||
|
||||
@@ -1,159 +0,0 @@
|
||||
# Kata Containers with AMD SEV-SNP VMs
|
||||
|
||||
## Disclaimer
|
||||
|
||||
This guide is designed for developers and is - same as the Developer Guide - not intended for production systems or end users. It is advisable to only follow this guide on non-critical development systems.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
To run Kata Containers in SNP-VMs, the following software stack is used.
|
||||
|
||||

|
||||
|
||||
The host BIOS and kernel must be capable of supporting AMD SEV-SNP and configured accordingly. For Kata Containers, the host kernel with branch [`sev-snp-iommu-avic_5.19-rc6_v3`](https://github.com/AMDESE/linux/tree/sev-snp-iommu-avic_5.19-rc6_v3) and commit [`3a88547`](https://github.com/AMDESE/linux/commit/3a885471cf89156ea555341f3b737ad2a8d9d3d0) is known to work in conjunction with SEV Firmware version 1.51.3 (0xh\_1.33.03) available on AMD's [SEV developer website](https://developer.amd.com/sev/). See [AMD's guide](https://github.com/AMDESE/AMDSEV/tree/sev-snp-devel) to configure the host accordingly. Verify that you are able to run SEV-SNP encrypted VMs first. The guest components required for Kata Containers are built as described below.
|
||||
|
||||
**Tip**: It is easiest to first have Kata Containers running on your system and then modify it to run containers in SNP-VMs. Follow the [Developer guide](../Developer-Guide.md#warning) and then follow the below steps. Nonetheless, you can just follow this guide from the start.
|
||||
|
||||
## How to build
|
||||
|
||||
Follow all of the below steps to install Kata Containers with SNP-support from scratch. These steps mostly follow the developer guide with modifications to support SNP
|
||||
|
||||
__Steps from the Developer Guide:__
|
||||
- Get all the [required components](../Developer-Guide.md#requirements-to-build-individual-components) for building the kata-runtime
|
||||
- [Build the and install kata-runtime](../Developer-Guide.md#build-and-install-the-kata-containers-runtime)
|
||||
- [Build a custom agent](../Developer-Guide.md#build-a-custom-kata-agent---optional)
|
||||
- [Create an initrd image](../Developer-Guide.md#create-an-initrd-image---optional) by first building a rootfs, then building the initrd based on the rootfs, use a custom agent and install. `ubuntu` works as the distribution of choice.
|
||||
- Get the [required components](../../tools/packaging/kernel/README.md#requirements) to build a custom kernel
|
||||
|
||||
__SNP-specific steps:__
|
||||
- Build the SNP-specific kernel as shown below (see this [guide](../../tools/packaging/kernel/README.md#build-kata-containers-kernel) for more information)
|
||||
```bash
|
||||
$ pushd kata-containers/tools/packaging/kernel/
|
||||
$ ./build-kernel.sh -a x86_64 -x snp setup
|
||||
$ ./build-kernel.sh -a x86_64 -x snp build
|
||||
$ sudo -E PATH="${PATH}" ./build-kernel.sh -x snp install
|
||||
$ popd
|
||||
```
|
||||
- Build a current OVMF capable of SEV-SNP:
|
||||
```bash
|
||||
$ pushd kata-containers/tools/packaging/static-build/ovmf
|
||||
$ ./build.sh
|
||||
$ tar -xvf edk2-x86_64.tar.gz
|
||||
$ popd
|
||||
```
|
||||
- Build a custom QEMU
|
||||
```bash
|
||||
$ source kata-containers/tools/packaging/scripts/lib.sh
|
||||
$ qemu_url="$(get_from_kata_deps "assets.hypervisor.qemu.snp.url")"
|
||||
$ qemu_branch="$(get_from_kata_deps "assets.hypervisor.qemu.snp.branch")"
|
||||
$ qemu_commit="$(get_from_kata_deps "assets.hypervisor.qemu.snp.commit")"
|
||||
$ git clone -b "${qemu_branch}" "${qemu_url}"
|
||||
$ pushd qemu
|
||||
$ git checkout "${qemu_commit}"
|
||||
$ ./configure --enable-virtfs --target-list=x86_64-softmmu --enable-debug
|
||||
$ make -j "$(nproc)"
|
||||
$ popd
|
||||
```
|
||||
|
||||
### Kata Containers Configuration for SNP
|
||||
|
||||
The configuration file located at `/etc/kata-containers/configuration.toml` must be adapted as follows to support SNP-VMs:
|
||||
- Use the SNP-specific kernel for the guest VM (change path)
|
||||
```toml
|
||||
kernel = "/usr/share/kata-containers/vmlinuz-snp.container"
|
||||
```
|
||||
- Enable the use of an initrd (uncomment)
|
||||
```toml
|
||||
initrd = "/usr/share/kata-containers/kata-containers-initrd.img"
|
||||
```
|
||||
- Disable the use of a rootfs (comment out)
|
||||
```toml
|
||||
# image = "/usr/share/kata-containers/kata-containers.img"
|
||||
```
|
||||
- Use the custom QEMU capable of SNP (change path)
|
||||
```toml
|
||||
path = "/path/to/qemu/build/qemu-system-x86_64"
|
||||
```
|
||||
- Use `virtio-9p` device since `virtio-fs` is unsupported due to bugs / shortcomings in QEMU version [`snp-v3`](https://github.com/AMDESE/qemu/tree/snp-v3) for SEV and SEV-SNP (change value)
|
||||
```toml
|
||||
shared_fs = "virtio-9p"
|
||||
```
|
||||
- Disable `virtiofsd` since it is no longer required (comment out)
|
||||
```toml
|
||||
# virtio_fs_daemon = "/usr/libexec/virtiofsd"
|
||||
```
|
||||
- Disable NVDIMM (uncomment)
|
||||
```toml
|
||||
disable_image_nvdimm = true
|
||||
```
|
||||
- Disable shared memory (uncomment)
|
||||
```toml
|
||||
file_mem_backend = ""
|
||||
```
|
||||
- Enable confidential guests (uncomment)
|
||||
```toml
|
||||
confidential_guest = true
|
||||
```
|
||||
- Enable SNP-VMs (uncomment)
|
||||
```toml
|
||||
sev_snp_guest = true
|
||||
```
|
||||
- Configure an OVMF (add path)
|
||||
```toml
|
||||
firmware = "/path/to/kata-containers/tools/packaging/static-build/ovmf/opt/kata/share/ovmf/OVMF.fd"
|
||||
```
|
||||
|
||||
## Test Kata Containers with Containerd
|
||||
|
||||
With Kata Containers configured to support SNP-VMs, we use containerd to test and deploy containers in these VMs.
|
||||
|
||||
### Install Containerd
|
||||
If not already present, follow [this guide](./containerd-kata.md#install) to install containerd and its related components including `CNI` and the `cri-tools` (skip Kata Containers since we already installed it)
|
||||
|
||||
### Containerd Configuration
|
||||
|
||||
Follow [this guide](./containerd-kata.md#configuration) to configure containerd to use Kata Containers
|
||||
|
||||
## Run Kata Containers in SNP-VMs
|
||||
|
||||
Run the below commands to start a container. See [this guide](./containerd-kata.md#run) for more information
|
||||
```bash
|
||||
$ sudo ctr image pull docker.io/library/busybox:latest
|
||||
$ sudo ctr run --cni --runtime io.containerd.run.kata.v2 -t --rm docker.io/library/busybox:latest hello sh
|
||||
```
|
||||
|
||||
### Check for active SNP:
|
||||
|
||||
Inside the running container, run the following commands to check if SNP is active. It should look something like this:
|
||||
```
|
||||
/ # dmesg | grep -i sev
|
||||
[ 0.299242] Memory Encryption Features active: AMD SEV SEV-ES SEV-SNP
|
||||
[ 0.472286] SEV: Using SNP CPUID table, 31 entries present.
|
||||
[ 0.514574] SEV: SNP guest platform device initialized.
|
||||
[ 0.885425] sev-guest sev-guest: Initialized SEV guest driver (using vmpck_id 0)
|
||||
```
|
||||
|
||||
### Obtain an SNP Attestation Report
|
||||
|
||||
To obtain an attestation report inside the container, the `/dev/sev-guest` must first be configured. As of now, the VM does not perform this step, however it can be performed inside the container, either in the terminal or in code.
|
||||
|
||||
Example for shell:
|
||||
```
|
||||
/ # SNP_MAJOR=$(cat /sys/devices/virtual/misc/sev-guest/dev | awk -F: '{print $1}')
|
||||
/ # SNP_MINOR=$(cat /sys/devices/virtual/misc/sev-guest/dev | awk -F: '{print $2}')
|
||||
/ # mknod -m 600 /dev/sev-guest c "${SNP_MAJOR}" "${SNP_MINOR}"
|
||||
```
|
||||
|
||||
## Known Issues
|
||||
|
||||
- Support for cgroups v2 is still [work in progress](https://github.com/kata-containers/kata-containers/issues/927). If issues occur due to cgroups v2 becoming the default in newer systems, one possible solution is to downgrade cgroups to v1:
|
||||
```bash
|
||||
sudo sed -i 's/^\(GRUB_CMDLINE_LINUX=".*\)"/\1 systemd.unified_cgroup_hierarchy=0"/' /etc/default/grub
|
||||
sudo update-grub
|
||||
sudo reboot
|
||||
```
|
||||
- If both SEV and SEV-SNP are supported by the host, Kata Containers uses SEV-SNP by default. You can verify what features are enabled by checking `/sys/module/kvm_amd/parameters/sev` and `sev_snp`. This means that Kata Containers can not run both SEV-SNP-VMs and SEV-VMs at the same time. If SEV is to be used by Kata Containers instead, reload the `kvm_amd` kernel module without SNP-support, this will disable SNP-support for the entire platform.
|
||||
```bash
|
||||
sudo rmmod kvm_amd && sudo modprobe kvm_amd sev_snp=0
|
||||
```
|
||||
|
||||
@@ -27,8 +27,6 @@ $ image="quay.io/prometheus/busybox:latest"
|
||||
$ cat << EOF > "${pod_yaml}"
|
||||
metadata:
|
||||
name: busybox-sandbox1
|
||||
uid: $(uuidgen)
|
||||
namespace: default
|
||||
EOF
|
||||
$ cat << EOF > "${container_yaml}"
|
||||
metadata:
|
||||
|
||||
@@ -32,7 +32,6 @@ The `nydus-sandbox.yaml` looks like below:
|
||||
metadata:
|
||||
attempt: 1
|
||||
name: nydus-sandbox
|
||||
uid: nydus-uid
|
||||
namespace: default
|
||||
log_directory: /tmp
|
||||
linux:
|
||||
|
||||
@@ -42,8 +42,6 @@ $ image="quay.io/prometheus/busybox:latest"
|
||||
$ cat << EOF > "${pod_yaml}"
|
||||
metadata:
|
||||
name: busybox-sandbox1
|
||||
uid: $(uuidgen)
|
||||
namespace: default
|
||||
EOF
|
||||
$ cat << EOF > "${container_yaml}"
|
||||
metadata:
|
||||
|
||||
File diff suppressed because one or more lines are too long
|
Before Width: | Height: | Size: 9.0 KiB |
@@ -24,7 +24,7 @@ architectures:
|
||||
|
||||
| Installation method | Description | Automatic updates | Use case | Availability
|
||||
|------------------------------------------------------|----------------------------------------------------------------------------------------------|-------------------|-----------------------------------------------------------------------------------------------|----------- |
|
||||
| [Using kata-deploy](#kata-deploy-installation) | The preferred way to deploy the Kata Containers distributed binaries on a Kubernetes cluster | **No!** | Best way to give it a try on kata-containers on an already up and running Kubernetes cluster. | Yes |
|
||||
| [Using kata-deploy](#kata-deploy-installation) | The preferred way to deploy the Kata Containers distributed binaries on a Kubernetes cluster | **No!** | Best way to give it a try on kata-containers on an already up and running Kubernetes cluster. | No |
|
||||
| [Using official distro packages](#official-packages) | Kata packages provided by Linux distributions official repositories | yes | Recommended for most users. | No |
|
||||
| [Using snap](#snap-installation) | Easy to install | yes | Good alternative to official distro packages. | No |
|
||||
| [Automatic](#automatic-installation) | Run a single command to install a full system | **No!** | For those wanting the latest release quickly. | No |
|
||||
@@ -32,8 +32,7 @@ architectures:
|
||||
| [Build from source](#build-from-source-installation) | Build the software components manually | **No!** | Power users and developers only. | Yes |
|
||||
|
||||
### Kata Deploy Installation
|
||||
|
||||
Follow the [`kata-deploy`](../../tools/packaging/kata-deploy/README.md).
|
||||
`ToDo`
|
||||
### Official packages
|
||||
`ToDo`
|
||||
### Snap Installation
|
||||
@@ -84,7 +83,7 @@ $ git clone https://github.com/kata-containers/kata-containers.git
|
||||
$ cd kata-containers/src/runtime-rs
|
||||
$ make && sudo make install
|
||||
```
|
||||
After running the command above, the default config file `configuration.toml` will be installed under `/usr/share/defaults/kata-containers/`, the binary file `containerd-shim-kata-v2` will be installed under `/usr/local/bin/` .
|
||||
After running the command above, the default config file `configuration.toml` will be installed under `/usr/share/defaults/kata-containers/`, the binary file `containerd-shim-kata-v2` will be installed under `/user/local/bin` .
|
||||
|
||||
### Build Kata Containers Kernel
|
||||
Follow the [Kernel installation guide](/tools/packaging/kernel/README.md).
|
||||
|
||||
@@ -61,9 +61,6 @@ spec:
|
||||
name: eosgx-demo-job-1
|
||||
image: oeciteam/oe-helloworld:latest
|
||||
imagePullPolicy: IfNotPresent
|
||||
volumeMounts:
|
||||
- mountPath: /dev
|
||||
name: dev-mount
|
||||
securityContext:
|
||||
readOnlyRootFilesystem: true
|
||||
capabilities:
|
||||
|
||||
3785
src/agent/Cargo.lock
generated
3785
src/agent/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -24,7 +24,6 @@ serial_test = "0.5.1"
|
||||
kata-sys-util = { path = "../libs/kata-sys-util" }
|
||||
kata-types = { path = "../libs/kata-types" }
|
||||
sysinfo = "0.23.0"
|
||||
url = "2.2.2"
|
||||
|
||||
# Async helpers
|
||||
async-trait = "0.1.42"
|
||||
@@ -32,7 +31,7 @@ async-recursion = "0.3.2"
|
||||
futures = "0.3.17"
|
||||
|
||||
# Async runtime
|
||||
tokio = { version = "1.21.2", features = ["full"] }
|
||||
tokio = { version = "1.14.0", features = ["full"] }
|
||||
tokio-vsock = "0.3.1"
|
||||
|
||||
netlink-sys = { version = "0.7.0", features = ["tokio_socket",]}
|
||||
@@ -67,14 +66,10 @@ serde = { version = "1.0.129", features = ["derive"] }
|
||||
toml = "0.5.8"
|
||||
clap = { version = "3.0.1", features = ["derive"] }
|
||||
|
||||
# Image pull/decrypt
|
||||
image-rs = { git = "https://github.com/confidential-containers/image-rs", tag = "v0.2.0" }
|
||||
# "vendored" feature for openssl is required by musl build
|
||||
openssl = { version = "0.10.38", features = ["vendored"] }
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = "3.1.0"
|
||||
test-utils = { path = "../libs/test-utils" }
|
||||
which = "4.3.0"
|
||||
|
||||
[workspace]
|
||||
members = [
|
||||
|
||||
@@ -85,14 +85,6 @@ ifeq ($(INIT),no)
|
||||
UNIT_FILES += kata-containers.target
|
||||
endif
|
||||
|
||||
# The following will be reverted, after
|
||||
# https://github.com/kata-containers/kata-containers/issues/5582
|
||||
# is resolved.
|
||||
IMAGE_RS_COMMIT = a1d7ba31201d9d7a575d05c5fed1f2cb2142a842
|
||||
ifeq ($(ARCH),s390x)
|
||||
$(shell sed -i -e "s/^\(image-rs.*\)tag\(.*\)/\1rev\2/" -e "s/^\(image-rs.*rev = \"\).*\(\".*\)/\1$(IMAGE_RS_COMMIT)\2/" Cargo.toml)
|
||||
endif
|
||||
|
||||
# Display name of command and it's version (or a message if not available).
|
||||
#
|
||||
# Arguments:
|
||||
|
||||
@@ -32,7 +32,7 @@ tokio = { version = "1.2.0", features = ["sync", "io-util", "process", "time", "
|
||||
futures = "0.3.17"
|
||||
async-trait = "0.1.31"
|
||||
inotify = "0.9.2"
|
||||
libseccomp = { version = "0.3.0", optional = true }
|
||||
libseccomp = { version = "0.2.3", optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
serial_test = "0.5.0"
|
||||
|
||||
@@ -252,28 +252,19 @@ fn set_devices_resources(
|
||||
}
|
||||
|
||||
fn set_hugepages_resources(
|
||||
cg: &cgroups::Cgroup,
|
||||
_cg: &cgroups::Cgroup,
|
||||
hugepage_limits: &[LinuxHugepageLimit],
|
||||
res: &mut cgroups::Resources,
|
||||
) {
|
||||
info!(sl!(), "cgroup manager set hugepage");
|
||||
let mut limits = vec![];
|
||||
let hugetlb_controller = cg.controller_of::<HugeTlbController>();
|
||||
|
||||
for l in hugepage_limits.iter() {
|
||||
if hugetlb_controller.is_some() && hugetlb_controller.unwrap().size_supported(&l.page_size)
|
||||
{
|
||||
let hr = HugePageResource {
|
||||
size: l.page_size.clone(),
|
||||
limit: l.limit,
|
||||
};
|
||||
limits.push(hr);
|
||||
} else {
|
||||
warn!(
|
||||
sl!(),
|
||||
"{} page size support cannot be verified, dropping requested limit", l.page_size
|
||||
);
|
||||
}
|
||||
let hr = HugePageResource {
|
||||
size: l.page_size.clone(),
|
||||
limit: l.limit,
|
||||
};
|
||||
limits.push(hr);
|
||||
}
|
||||
res.hugepages.limits = limits;
|
||||
}
|
||||
@@ -467,11 +458,8 @@ fn linux_device_to_cgroup_device(d: &LinuxDevice) -> Option<DeviceResource> {
|
||||
}
|
||||
|
||||
fn linux_device_group_to_cgroup_device(d: &LinuxDeviceCgroup) -> Option<DeviceResource> {
|
||||
let dev_type = match &d.r#type {
|
||||
Some(t_s) => match DeviceType::from_char(t_s.chars().next()) {
|
||||
Some(t_c) => t_c,
|
||||
None => return None,
|
||||
},
|
||||
let dev_type = match DeviceType::from_char(d.r#type.chars().next()) {
|
||||
Some(t) => t,
|
||||
None => return None,
|
||||
};
|
||||
|
||||
@@ -528,7 +516,7 @@ lazy_static! {
|
||||
// all mknod to all char devices
|
||||
LinuxDeviceCgroup {
|
||||
allow: true,
|
||||
r#type: Some("c".to_string()),
|
||||
r#type: "c".to_string(),
|
||||
major: Some(WILDCARD),
|
||||
minor: Some(WILDCARD),
|
||||
access: "m".to_string(),
|
||||
@@ -537,7 +525,7 @@ lazy_static! {
|
||||
// all mknod to all block devices
|
||||
LinuxDeviceCgroup {
|
||||
allow: true,
|
||||
r#type: Some("b".to_string()),
|
||||
r#type: "b".to_string(),
|
||||
major: Some(WILDCARD),
|
||||
minor: Some(WILDCARD),
|
||||
access: "m".to_string(),
|
||||
@@ -546,7 +534,7 @@ lazy_static! {
|
||||
// all read/write/mknod to char device /dev/console
|
||||
LinuxDeviceCgroup {
|
||||
allow: true,
|
||||
r#type: Some("c".to_string()),
|
||||
r#type: "c".to_string(),
|
||||
major: Some(5),
|
||||
minor: Some(1),
|
||||
access: "rwm".to_string(),
|
||||
@@ -555,7 +543,7 @@ lazy_static! {
|
||||
// all read/write/mknod to char device /dev/pts/<N>
|
||||
LinuxDeviceCgroup {
|
||||
allow: true,
|
||||
r#type: Some("c".to_string()),
|
||||
r#type: "c".to_string(),
|
||||
major: Some(136),
|
||||
minor: Some(WILDCARD),
|
||||
access: "rwm".to_string(),
|
||||
@@ -564,7 +552,7 @@ lazy_static! {
|
||||
// all read/write/mknod to char device /dev/ptmx
|
||||
LinuxDeviceCgroup {
|
||||
allow: true,
|
||||
r#type: Some("c".to_string()),
|
||||
r#type: "c".to_string(),
|
||||
major: Some(5),
|
||||
minor: Some(2),
|
||||
access: "rwm".to_string(),
|
||||
@@ -573,7 +561,7 @@ lazy_static! {
|
||||
// all read/write/mknod to char device /dev/net/tun
|
||||
LinuxDeviceCgroup {
|
||||
allow: true,
|
||||
r#type: Some("c".to_string()),
|
||||
r#type: "c".to_string(),
|
||||
major: Some(10),
|
||||
minor: Some(200),
|
||||
access: "rwm".to_string(),
|
||||
|
||||
@@ -236,12 +236,6 @@ pub fn resources_grpc_to_oci(res: &grpc::LinuxResources) -> oci::LinuxResources
|
||||
let devices = {
|
||||
let mut d = Vec::new();
|
||||
for dev in res.Devices.iter() {
|
||||
let dev_type = if dev.Type.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(dev.Type.clone())
|
||||
};
|
||||
|
||||
let major = if dev.Major == -1 {
|
||||
None
|
||||
} else {
|
||||
@@ -255,7 +249,7 @@ pub fn resources_grpc_to_oci(res: &grpc::LinuxResources) -> oci::LinuxResources
|
||||
};
|
||||
d.push(oci::LinuxDeviceCgroup {
|
||||
allow: dev.Allow,
|
||||
r#type: dev_type,
|
||||
r#type: dev.Type.clone(),
|
||||
major,
|
||||
minor,
|
||||
access: dev.Access.clone(),
|
||||
|
||||
@@ -11,7 +11,6 @@ use std::fs;
|
||||
use std::str::FromStr;
|
||||
use std::time;
|
||||
use tracing::instrument;
|
||||
use url::Url;
|
||||
|
||||
use kata_types::config::default::DEFAULT_AGENT_VSOCK_PORT;
|
||||
|
||||
@@ -26,12 +25,6 @@ const LOG_VPORT_OPTION: &str = "agent.log_vport";
|
||||
const CONTAINER_PIPE_SIZE_OPTION: &str = "agent.container_pipe_size";
|
||||
const UNIFIED_CGROUP_HIERARCHY_OPTION: &str = "agent.unified_cgroup_hierarchy";
|
||||
const CONFIG_FILE: &str = "agent.config_file";
|
||||
const CONTAINER_POLICY_FILE: &str = "agent.container_policy_file";
|
||||
const AA_KBC_PARAMS: &str = "agent.aa_kbc_params";
|
||||
const HTTPS_PROXY: &str = "agent.https_proxy";
|
||||
const NO_PROXY: &str = "agent.no_proxy";
|
||||
const ENABLE_DATA_INTEGRITY: &str = "agent.data_integrity";
|
||||
const ENABLE_SIGNATURE_VERIFICATION: &str = "agent.enable_signature_verification";
|
||||
|
||||
const DEFAULT_LOG_LEVEL: slog::Level = slog::Level::Info;
|
||||
const DEFAULT_HOTPLUG_TIMEOUT: time::Duration = time::Duration::from_secs(3);
|
||||
@@ -59,11 +52,6 @@ const ERR_INVALID_CONTAINER_PIPE_SIZE_PARAM: &str = "unable to parse container p
|
||||
const ERR_INVALID_CONTAINER_PIPE_SIZE_KEY: &str = "invalid container pipe size key name";
|
||||
const ERR_INVALID_CONTAINER_PIPE_NEGATIVE: &str = "container pipe size should not be negative";
|
||||
|
||||
const ERR_INVALID_CONTAINER_POLICY_PATH_VALUE: &str = "invalid container_policy_file value";
|
||||
const ERR_INVALID_CONTAINER_POLICY_PATH_KEY: &str = "invalid container_policy_file key";
|
||||
const ERR_INVALID_CONTAINER_POLICY_ABSOLUTE: &str =
|
||||
"container_policy_file path must be an absolute file path";
|
||||
|
||||
#[derive(Debug, Default, Deserialize)]
|
||||
pub struct EndpointsConfig {
|
||||
pub allowed: Vec<String>,
|
||||
@@ -89,12 +77,6 @@ pub struct AgentConfig {
|
||||
pub tracing: bool,
|
||||
pub endpoints: AgentEndpoints,
|
||||
pub supports_seccomp: bool,
|
||||
pub container_policy_path: String,
|
||||
pub aa_kbc_params: String,
|
||||
pub https_proxy: String,
|
||||
pub no_proxy: String,
|
||||
pub data_integrity: bool,
|
||||
pub enable_signature_verification: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
@@ -110,12 +92,6 @@ pub struct AgentConfigBuilder {
|
||||
pub unified_cgroup_hierarchy: Option<bool>,
|
||||
pub tracing: Option<bool>,
|
||||
pub endpoints: Option<EndpointsConfig>,
|
||||
pub container_policy_path: Option<String>,
|
||||
pub aa_kbc_params: Option<String>,
|
||||
pub https_proxy: Option<String>,
|
||||
pub no_proxy: Option<String>,
|
||||
pub data_integrity: Option<bool>,
|
||||
pub enable_signature_verification: Option<bool>,
|
||||
}
|
||||
|
||||
macro_rules! config_override {
|
||||
@@ -177,12 +153,6 @@ impl Default for AgentConfig {
|
||||
tracing: false,
|
||||
endpoints: Default::default(),
|
||||
supports_seccomp: rpc::have_seccomp(),
|
||||
container_policy_path: String::from(""),
|
||||
aa_kbc_params: String::from(""),
|
||||
https_proxy: String::from(""),
|
||||
no_proxy: String::from(""),
|
||||
data_integrity: false,
|
||||
enable_signature_verification: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -211,16 +181,6 @@ impl FromStr for AgentConfig {
|
||||
config_override!(agent_config_builder, agent_config, server_addr);
|
||||
config_override!(agent_config_builder, agent_config, unified_cgroup_hierarchy);
|
||||
config_override!(agent_config_builder, agent_config, tracing);
|
||||
config_override!(agent_config_builder, agent_config, container_policy_path);
|
||||
config_override!(agent_config_builder, agent_config, aa_kbc_params);
|
||||
config_override!(agent_config_builder, agent_config, https_proxy);
|
||||
config_override!(agent_config_builder, agent_config, no_proxy);
|
||||
config_override!(agent_config_builder, agent_config, data_integrity);
|
||||
config_override!(
|
||||
agent_config_builder,
|
||||
agent_config,
|
||||
enable_signature_verification
|
||||
);
|
||||
|
||||
// Populate the allowed endpoints hash set, if we got any from the config file.
|
||||
if let Some(endpoints) = agent_config_builder.endpoints {
|
||||
@@ -249,10 +209,6 @@ impl AgentConfig {
|
||||
let mut config: AgentConfig = Default::default();
|
||||
let cmdline = fs::read_to_string(file)?;
|
||||
let params: Vec<&str> = cmdline.split_ascii_whitespace().collect();
|
||||
|
||||
let mut using_config_file = false;
|
||||
// Check if there is config file before parsing params that might
|
||||
// override values from the config file.
|
||||
for param in params.iter() {
|
||||
// If we get a configuration file path from the command line, we
|
||||
// generate our config from it.
|
||||
@@ -260,13 +216,9 @@ impl AgentConfig {
|
||||
// or if it can't be parsed properly.
|
||||
if param.starts_with(format!("{}=", CONFIG_FILE).as_str()) {
|
||||
let config_file = get_string_value(param)?;
|
||||
config = AgentConfig::from_config_file(&config_file)?;
|
||||
using_config_file = true;
|
||||
break;
|
||||
return AgentConfig::from_config_file(&config_file);
|
||||
}
|
||||
}
|
||||
|
||||
for param in params.iter() {
|
||||
// parse cmdline flags
|
||||
parse_cmdline_param!(param, DEBUG_CONSOLE_FLAG, config.debug_console);
|
||||
parse_cmdline_param!(param, DEV_MODE_FLAG, config.dev_mode);
|
||||
@@ -326,30 +278,6 @@ impl AgentConfig {
|
||||
config.unified_cgroup_hierarchy,
|
||||
get_bool_value
|
||||
);
|
||||
|
||||
parse_cmdline_param!(
|
||||
param,
|
||||
CONTAINER_POLICY_FILE,
|
||||
config.container_policy_path,
|
||||
get_container_policy_path_value
|
||||
);
|
||||
|
||||
parse_cmdline_param!(param, AA_KBC_PARAMS, config.aa_kbc_params, get_string_value);
|
||||
parse_cmdline_param!(param, HTTPS_PROXY, config.https_proxy, get_url_value);
|
||||
parse_cmdline_param!(param, NO_PROXY, config.no_proxy, get_string_value);
|
||||
parse_cmdline_param!(
|
||||
param,
|
||||
ENABLE_DATA_INTEGRITY,
|
||||
config.data_integrity,
|
||||
get_bool_value
|
||||
);
|
||||
|
||||
parse_cmdline_param!(
|
||||
param,
|
||||
ENABLE_SIGNATURE_VERIFICATION,
|
||||
config.enable_signature_verification,
|
||||
get_bool_value
|
||||
);
|
||||
}
|
||||
|
||||
if let Ok(addr) = env::var(SERVER_ADDR_ENV_VAR) {
|
||||
@@ -369,9 +297,7 @@ impl AgentConfig {
|
||||
}
|
||||
|
||||
// We did not get a configuration file: allow all endpoints.
|
||||
if !using_config_file {
|
||||
config.endpoints.all_allowed = true;
|
||||
}
|
||||
config.endpoints.all_allowed = true;
|
||||
|
||||
Ok(config)
|
||||
}
|
||||
@@ -505,35 +431,6 @@ fn get_container_pipe_size(param: &str) -> Result<i32> {
|
||||
Ok(value)
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
fn get_container_policy_path_value(param: &str) -> Result<String> {
|
||||
let fields: Vec<&str> = param.split('=').collect();
|
||||
|
||||
ensure!(!fields[0].is_empty(), ERR_INVALID_CONTAINER_POLICY_PATH_KEY);
|
||||
ensure!(fields.len() == 2, ERR_INVALID_CONTAINER_POLICY_PATH_VALUE);
|
||||
|
||||
let key = fields[0];
|
||||
ensure!(
|
||||
key == CONTAINER_POLICY_FILE,
|
||||
ERR_INVALID_CONTAINER_POLICY_PATH_KEY
|
||||
);
|
||||
|
||||
let value = String::from(fields[1]);
|
||||
ensure!(!value.is_empty(), ERR_INVALID_CONTAINER_POLICY_PATH_VALUE);
|
||||
ensure!(
|
||||
value.starts_with('/'),
|
||||
ERR_INVALID_CONTAINER_POLICY_ABSOLUTE
|
||||
);
|
||||
ensure!(!value.contains(".."), ERR_INVALID_CONTAINER_POLICY_ABSOLUTE);
|
||||
Ok(value)
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
fn get_url_value(param: &str) -> Result<String> {
|
||||
let value = get_string_value(param)?;
|
||||
Ok(Url::parse(&value)?.to_string())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use test_utils::assert_result;
|
||||
@@ -552,8 +449,6 @@ mod tests {
|
||||
assert!(!config.dev_mode);
|
||||
assert_eq!(config.log_level, DEFAULT_LOG_LEVEL);
|
||||
assert_eq!(config.hotplug_timeout, DEFAULT_HOTPLUG_TIMEOUT);
|
||||
assert_eq!(config.container_policy_path, "");
|
||||
assert!(config.enable_signature_verification);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -572,12 +467,6 @@ mod tests {
|
||||
server_addr: &'a str,
|
||||
unified_cgroup_hierarchy: bool,
|
||||
tracing: bool,
|
||||
container_policy_path: &'a str,
|
||||
aa_kbc_params: &'a str,
|
||||
https_proxy: &'a str,
|
||||
no_proxy: &'a str,
|
||||
data_integrity: bool,
|
||||
enable_signature_verification: bool,
|
||||
}
|
||||
|
||||
impl Default for TestData<'_> {
|
||||
@@ -593,12 +482,6 @@ mod tests {
|
||||
server_addr: TEST_SERVER_ADDR,
|
||||
unified_cgroup_hierarchy: false,
|
||||
tracing: false,
|
||||
container_policy_path: "",
|
||||
aa_kbc_params: "",
|
||||
https_proxy: "",
|
||||
no_proxy: "",
|
||||
data_integrity: false,
|
||||
enable_signature_verification: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -968,86 +851,6 @@ mod tests {
|
||||
tracing: true,
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.container_policy_file=/etc/containers/policy.json",
|
||||
container_policy_path: "/etc/containers/policy.json",
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.aa_kbc_params=offline_fs_kbc::null",
|
||||
aa_kbc_params: "offline_fs_kbc::null",
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.aa_kbc_params=eaa_kbc::127.0.0.1:50000",
|
||||
aa_kbc_params: "eaa_kbc::127.0.0.1:50000",
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.https_proxy=http://proxy.url.com:81/",
|
||||
https_proxy: "http://proxy.url.com:81/",
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.https_proxy=http://192.168.1.100:81/",
|
||||
https_proxy: "http://192.168.1.100:81/",
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.no_proxy=*.internal.url.com",
|
||||
no_proxy: "*.internal.url.com",
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.no_proxy=192.168.1.0/24,172.16.0.0/12",
|
||||
no_proxy: "192.168.1.0/24,172.16.0.0/12",
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "",
|
||||
data_integrity: false,
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.data_integrity=true",
|
||||
data_integrity: true,
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.data_integrity=false",
|
||||
data_integrity: false,
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.data_integrity=1",
|
||||
data_integrity: true,
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.data_integrity=0",
|
||||
data_integrity: false,
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.enable_signature_verification=false",
|
||||
enable_signature_verification: false,
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.enable_signature_verification=0",
|
||||
enable_signature_verification: false,
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.enable_signature_verification=1",
|
||||
enable_signature_verification: true,
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.enable_signature_verification=foo",
|
||||
enable_signature_verification: false,
|
||||
..Default::default()
|
||||
},
|
||||
];
|
||||
|
||||
let dir = tempdir().expect("failed to create tmpdir");
|
||||
@@ -1095,20 +898,6 @@ mod tests {
|
||||
assert_eq!(d.container_pipe_size, config.container_pipe_size, "{}", msg);
|
||||
assert_eq!(d.server_addr, config.server_addr, "{}", msg);
|
||||
assert_eq!(d.tracing, config.tracing, "{}", msg);
|
||||
assert_eq!(
|
||||
d.container_policy_path, config.container_policy_path,
|
||||
"{}",
|
||||
msg
|
||||
);
|
||||
assert_eq!(d.aa_kbc_params, config.aa_kbc_params, "{}", msg);
|
||||
assert_eq!(d.https_proxy, config.https_proxy, "{}", msg);
|
||||
assert_eq!(d.no_proxy, config.no_proxy, "{}", msg);
|
||||
assert_eq!(d.data_integrity, config.data_integrity, "{}", msg);
|
||||
assert_eq!(
|
||||
d.enable_signature_verification, config.enable_signature_verification,
|
||||
"{}",
|
||||
msg
|
||||
);
|
||||
|
||||
for v in vars_to_unset {
|
||||
env::remove_var(v);
|
||||
@@ -1580,72 +1369,6 @@ Caused by:
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_container_policy_path_value() {
|
||||
#[derive(Debug)]
|
||||
struct TestData<'a> {
|
||||
param: &'a str,
|
||||
result: Result<String>,
|
||||
}
|
||||
|
||||
let tests = &[
|
||||
TestData {
|
||||
param: "",
|
||||
result: Err(anyhow!(ERR_INVALID_CONTAINER_POLICY_PATH_KEY)),
|
||||
},
|
||||
TestData {
|
||||
param: "agent.container_policy_file",
|
||||
result: Err(anyhow!(ERR_INVALID_CONTAINER_POLICY_PATH_VALUE)),
|
||||
},
|
||||
TestData {
|
||||
param: "agent.container_policy_file=",
|
||||
result: Err(anyhow!(ERR_INVALID_CONTAINER_POLICY_PATH_VALUE)),
|
||||
},
|
||||
TestData {
|
||||
param: "foo=bar",
|
||||
result: Err(anyhow!(ERR_INVALID_CONTAINER_POLICY_PATH_KEY)),
|
||||
},
|
||||
TestData {
|
||||
param: "agent.policy_path=/another/absolute/path.json",
|
||||
result: Err(anyhow!(ERR_INVALID_CONTAINER_POLICY_PATH_KEY)),
|
||||
},
|
||||
TestData {
|
||||
param: "agent.container_policy_file=/etc/container/policy.json",
|
||||
result: Ok("/etc/container/policy.json".into()),
|
||||
},
|
||||
TestData {
|
||||
param: "agent.container_policy_file=/another/absolute/path.json",
|
||||
result: Ok("/another/absolute/path.json".into()),
|
||||
},
|
||||
TestData {
|
||||
param: "agent.container_policy_file=./relative/path.json",
|
||||
result: Err(anyhow!(ERR_INVALID_CONTAINER_POLICY_ABSOLUTE)),
|
||||
},
|
||||
TestData {
|
||||
param: "agent.container_policy_file=./relative/path.json",
|
||||
result: Err(anyhow!(ERR_INVALID_CONTAINER_POLICY_ABSOLUTE)),
|
||||
},
|
||||
TestData {
|
||||
param: "agent.container_policy_file=../../relative/path.json",
|
||||
result: Err(anyhow!(ERR_INVALID_CONTAINER_POLICY_ABSOLUTE)),
|
||||
},
|
||||
TestData {
|
||||
param: "agent.container_policy_file=junk_string",
|
||||
result: Err(anyhow!(ERR_INVALID_CONTAINER_POLICY_ABSOLUTE)),
|
||||
},
|
||||
];
|
||||
|
||||
for (i, d) in tests.iter().enumerate() {
|
||||
let msg = format!("test[{}]: {:?}", i, d);
|
||||
|
||||
let result = get_container_policy_path_value(d.param);
|
||||
|
||||
let msg = format!("{}: result: {:?}", msg, result);
|
||||
|
||||
assert_result!(d.result, result, msg);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_config_builder_from_string() {
|
||||
let config = AgentConfig::from_str(
|
||||
@@ -1676,50 +1399,4 @@ Caused by:
|
||||
// Verify that the default values are valid
|
||||
assert_eq!(config.hotplug_timeout, DEFAULT_HOTPLUG_TIMEOUT);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_config_from_cmdline_and_config_file() {
|
||||
let dir = tempdir().expect("failed to create tmpdir");
|
||||
|
||||
let agent_config = r#"
|
||||
dev_mode = false
|
||||
server_addr = 'vsock://8:2048'
|
||||
|
||||
[endpoints]
|
||||
allowed = ["CreateContainer", "StartContainer"]
|
||||
"#;
|
||||
|
||||
let config_path = dir.path().join("agent-config.toml");
|
||||
let config_filename = config_path.to_str().expect("failed to get config filename");
|
||||
|
||||
fs::write(config_filename, agent_config).expect("failed to write agen config");
|
||||
|
||||
let cmdline = format!("agent.devmode agent.config_file={}", config_filename);
|
||||
|
||||
let cmdline_path = dir.path().join("cmdline");
|
||||
let cmdline_filename = cmdline_path
|
||||
.to_str()
|
||||
.expect("failed to get cmdline filename");
|
||||
|
||||
fs::write(cmdline_filename, cmdline).expect("failed to write agen config");
|
||||
|
||||
let config = AgentConfig::from_cmdline(cmdline_filename, vec![])
|
||||
.expect("failed to parse command line");
|
||||
|
||||
// Should be overwritten by cmdline
|
||||
assert!(config.dev_mode);
|
||||
|
||||
// Should be from agent config
|
||||
assert_eq!(config.server_addr, "vsock://8:2048");
|
||||
|
||||
// Should be from agent config
|
||||
assert_eq!(
|
||||
config.endpoints.allowed,
|
||||
vec!["CreateContainer".to_string(), "StartContainer".to_string()]
|
||||
.iter()
|
||||
.cloned()
|
||||
.collect()
|
||||
);
|
||||
assert!(!config.endpoints.all_allowed);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -571,15 +571,13 @@ fn update_spec_devices(spec: &mut Spec, mut updates: HashMap<&str, DevUpdate>) -
|
||||
|
||||
if let Some(resources) = linux.resources.as_mut() {
|
||||
for r in &mut resources.devices {
|
||||
if let (Some(host_type), Some(host_major), Some(host_minor)) =
|
||||
(r.r#type.as_ref(), r.major, r.minor)
|
||||
{
|
||||
if let Some(update) = res_updates.get(&(host_type.as_str(), host_major, host_minor))
|
||||
if let (Some(host_major), Some(host_minor)) = (r.major, r.minor) {
|
||||
if let Some(update) = res_updates.get(&(r.r#type.as_str(), host_major, host_minor))
|
||||
{
|
||||
info!(
|
||||
sl!(),
|
||||
"update_spec_devices() updating resource";
|
||||
"type" => &host_type,
|
||||
"type" => &r.r#type,
|
||||
"host_major" => host_major,
|
||||
"host_minor" => host_minor,
|
||||
"guest_major" => update.guest_major,
|
||||
@@ -856,7 +854,7 @@ pub fn update_device_cgroup(spec: &mut Spec) -> Result<()> {
|
||||
allow: false,
|
||||
major: Some(major),
|
||||
minor: Some(minor),
|
||||
r#type: Some(String::from("b")),
|
||||
r#type: String::from("b"),
|
||||
access: String::from("rw"),
|
||||
});
|
||||
|
||||
@@ -1019,13 +1017,13 @@ mod tests {
|
||||
resources: Some(LinuxResources {
|
||||
devices: vec![
|
||||
oci::LinuxDeviceCgroup {
|
||||
r#type: Some("c".to_string()),
|
||||
r#type: "c".to_string(),
|
||||
major: Some(host_major_a),
|
||||
minor: Some(host_minor_a),
|
||||
..oci::LinuxDeviceCgroup::default()
|
||||
},
|
||||
oci::LinuxDeviceCgroup {
|
||||
r#type: Some("c".to_string()),
|
||||
r#type: "c".to_string(),
|
||||
major: Some(host_major_b),
|
||||
minor: Some(host_minor_b),
|
||||
..oci::LinuxDeviceCgroup::default()
|
||||
@@ -1118,13 +1116,13 @@ mod tests {
|
||||
resources: Some(LinuxResources {
|
||||
devices: vec![
|
||||
LinuxDeviceCgroup {
|
||||
r#type: Some("c".to_string()),
|
||||
r#type: "c".to_string(),
|
||||
major: Some(host_major),
|
||||
minor: Some(host_minor),
|
||||
..LinuxDeviceCgroup::default()
|
||||
},
|
||||
LinuxDeviceCgroup {
|
||||
r#type: Some("b".to_string()),
|
||||
r#type: "b".to_string(),
|
||||
major: Some(host_major),
|
||||
minor: Some(host_minor),
|
||||
..LinuxDeviceCgroup::default()
|
||||
|
||||
@@ -1,412 +0,0 @@
|
||||
// Copyright (c) 2021 Alibaba Cloud
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use std::env;
|
||||
use std::fmt::Write as _;
|
||||
use std::fs;
|
||||
use std::path::Path;
|
||||
use std::process::{Command, ExitStatus};
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::{anyhow, ensure, Result};
|
||||
use async_trait::async_trait;
|
||||
use protocols::image;
|
||||
use tokio::sync::Mutex;
|
||||
use ttrpc::{self, error::get_rpc_status as ttrpc_error};
|
||||
|
||||
use crate::rpc::{verify_cid, CONTAINER_BASE};
|
||||
use crate::sandbox::Sandbox;
|
||||
use crate::AGENT_CONFIG;
|
||||
|
||||
use image_rs::image::ImageClient;
|
||||
use std::io::Write;
|
||||
|
||||
const SKOPEO_PATH: &str = "/usr/bin/skopeo";
|
||||
const UMOCI_PATH: &str = "/usr/local/bin/umoci";
|
||||
const IMAGE_OCI: &str = "image_oci";
|
||||
const AA_PATH: &str = "/usr/local/bin/attestation-agent";
|
||||
const AA_KEYPROVIDER_PORT: &str = "127.0.0.1:50000";
|
||||
const AA_GETRESOURCE_PORT: &str = "127.0.0.1:50001";
|
||||
const OCICRYPT_CONFIG_PATH: &str = "/tmp/ocicrypt_config.json";
|
||||
// kata rootfs is readonly, use tmpfs before CC storage is implemented.
|
||||
const KATA_CC_IMAGE_WORK_DIR: &str = "/run/image/";
|
||||
const KATA_CC_PAUSE_BUNDLE: &str = "/pause_bundle";
|
||||
const CONFIG_JSON: &str = "config.json";
|
||||
|
||||
// Convenience macro to obtain the scope logger
|
||||
macro_rules! sl {
|
||||
() => {
|
||||
slog_scope::logger()
|
||||
};
|
||||
}
|
||||
|
||||
pub struct ImageService {
|
||||
sandbox: Arc<Mutex<Sandbox>>,
|
||||
attestation_agent_started: AtomicBool,
|
||||
image_client: Arc<Mutex<ImageClient>>,
|
||||
}
|
||||
|
||||
impl ImageService {
|
||||
pub fn new(sandbox: Arc<Mutex<Sandbox>>) -> Self {
|
||||
env::set_var("CC_IMAGE_WORK_DIR", KATA_CC_IMAGE_WORK_DIR);
|
||||
Self {
|
||||
sandbox,
|
||||
attestation_agent_started: AtomicBool::new(false),
|
||||
image_client: Arc::new(Mutex::new(ImageClient::default())),
|
||||
}
|
||||
}
|
||||
|
||||
fn pull_image_from_registry(
|
||||
image: &str,
|
||||
cid: &str,
|
||||
source_creds: Option<&str>,
|
||||
policy_path: Option<&str>,
|
||||
aa_kbc_params: &str,
|
||||
) -> Result<()> {
|
||||
let source_image = format!("{}{}", "docker://", image);
|
||||
|
||||
let tmp_cid_path = Path::new("/tmp/").join(cid);
|
||||
let oci_path = tmp_cid_path.join(IMAGE_OCI);
|
||||
let target_path_oci = format!("oci://{}:latest", oci_path.to_string_lossy());
|
||||
|
||||
fs::create_dir_all(&oci_path)?;
|
||||
|
||||
let mut pull_command = Command::new(SKOPEO_PATH);
|
||||
pull_command
|
||||
.arg("copy")
|
||||
.arg(source_image)
|
||||
.arg(&target_path_oci)
|
||||
.arg("--remove-signatures"); //umoci requires signatures to be removed
|
||||
|
||||
// If source credentials were passed (so not using an anonymous registry), pass them through
|
||||
if let Some(source_creds) = source_creds {
|
||||
pull_command.arg("--src-creds").arg(source_creds);
|
||||
}
|
||||
|
||||
// If a policy_path provided, use it, otherwise fall back to allow all image registries
|
||||
if let Some(policy_path) = policy_path {
|
||||
pull_command.arg("--policy").arg(policy_path);
|
||||
} else {
|
||||
info!(
|
||||
sl!(),
|
||||
"No policy path was supplied, so revert to allow all images to be pulled."
|
||||
);
|
||||
pull_command.arg("--insecure-policy");
|
||||
}
|
||||
|
||||
debug!(sl!(), "skopeo command: {:?}", &pull_command);
|
||||
if !aa_kbc_params.is_empty() {
|
||||
// Skopeo will copy an unencrypted image even if the decryption key argument is provided.
|
||||
// Thus, this does not guarantee that the image was encrypted.
|
||||
pull_command
|
||||
.arg("--decryption-key")
|
||||
.arg(format!("provider:attestation-agent:{}", aa_kbc_params))
|
||||
.env("OCICRYPT_KEYPROVIDER_CONFIG", OCICRYPT_CONFIG_PATH);
|
||||
}
|
||||
|
||||
let status: ExitStatus = pull_command.status()?;
|
||||
|
||||
if !status.success() {
|
||||
let mut error_message = format!("failed to pull image: {:?}", status);
|
||||
|
||||
if let Err(e) = fs::remove_dir_all(&tmp_cid_path) {
|
||||
let _ = write!(
|
||||
error_message,
|
||||
" and clean up of temporary container directory {:?} failed with error {:?}",
|
||||
tmp_cid_path, e
|
||||
);
|
||||
};
|
||||
return Err(anyhow!(error_message));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn unpack_image(cid: &str) -> Result<()> {
|
||||
let tmp_cid_path = Path::new("/tmp/").join(cid);
|
||||
let source_path_oci = tmp_cid_path.join(IMAGE_OCI);
|
||||
|
||||
let target_path_bundle = Path::new(CONTAINER_BASE).join(cid);
|
||||
|
||||
info!(sl!(), "unpack image {:?} to {:?}", cid, target_path_bundle);
|
||||
|
||||
// Unpack image
|
||||
let status: ExitStatus = Command::new(UMOCI_PATH)
|
||||
.arg("unpack")
|
||||
.arg("--image")
|
||||
.arg(&source_path_oci)
|
||||
.arg(&target_path_bundle)
|
||||
.status()?;
|
||||
|
||||
ensure!(status.success(), "failed to unpack image: {:?}", status);
|
||||
|
||||
// To save space delete the oci image after unpack
|
||||
fs::remove_dir_all(&tmp_cid_path)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// pause image is packaged in rootfs for CC
|
||||
fn unpack_pause_image(cid: &str) -> Result<()> {
|
||||
let cc_pause_bundle = Path::new(KATA_CC_PAUSE_BUNDLE);
|
||||
if !cc_pause_bundle.exists() {
|
||||
return Err(anyhow!("Pause image not present in rootfs"));
|
||||
}
|
||||
|
||||
info!(sl!(), "use guest pause image cid {:?}", cid);
|
||||
let pause_bundle = Path::new(CONTAINER_BASE).join(&cid);
|
||||
let pause_rootfs = pause_bundle.join("rootfs");
|
||||
let pause_config = pause_bundle.join(CONFIG_JSON);
|
||||
let pause_binary = pause_rootfs.join("pause");
|
||||
fs::create_dir_all(&pause_rootfs)?;
|
||||
if !pause_config.exists() {
|
||||
fs::copy(
|
||||
cc_pause_bundle.join(CONFIG_JSON),
|
||||
pause_bundle.join(CONFIG_JSON),
|
||||
)?;
|
||||
}
|
||||
if !pause_binary.exists() {
|
||||
fs::copy(cc_pause_bundle.join("rootfs").join("pause"), pause_binary)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// If we fail to start the AA, Skopeo/ocicrypt won't be able to unwrap keys
|
||||
// and container decryption will fail.
|
||||
fn init_attestation_agent() -> Result<()> {
|
||||
let config_path = OCICRYPT_CONFIG_PATH;
|
||||
|
||||
// The image will need to be encrypted using a keyprovider
|
||||
// that has the same name (at least according to the config).
|
||||
let ocicrypt_config = serde_json::json!({
|
||||
"key-providers": {
|
||||
"attestation-agent":{
|
||||
"grpc":AA_KEYPROVIDER_PORT
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
let mut config_file = fs::File::create(config_path)?;
|
||||
config_file.write_all(ocicrypt_config.to_string().as_bytes())?;
|
||||
|
||||
// The Attestation Agent will run for the duration of the guest.
|
||||
Command::new(AA_PATH)
|
||||
.arg("--keyprovider_sock")
|
||||
.arg(AA_KEYPROVIDER_PORT)
|
||||
.arg("--getresource_sock")
|
||||
.arg(AA_GETRESOURCE_PORT)
|
||||
.spawn()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Determines the container id (cid) to use for a given request.
|
||||
///
|
||||
/// If the request specifies a non-empty id, use it; otherwise derive it from the image path.
|
||||
/// In either case, verify that the chosen id is valid.
|
||||
fn cid_from_request(req: &image::PullImageRequest) -> Result<String> {
|
||||
let req_cid = req.get_container_id();
|
||||
let cid = if !req_cid.is_empty() {
|
||||
req_cid.to_string()
|
||||
} else if let Some(last) = req.get_image().rsplit('/').next() {
|
||||
// ':' have special meaning for umoci during upack
|
||||
last.replace(':', "_")
|
||||
} else {
|
||||
return Err(anyhow!("Invalid image name. {}", req.get_image()));
|
||||
};
|
||||
verify_cid(&cid)?;
|
||||
Ok(cid)
|
||||
}
|
||||
|
||||
async fn pull_image(&self, req: &image::PullImageRequest) -> Result<String> {
|
||||
env::set_var("OCICRYPT_KEYPROVIDER_CONFIG", OCICRYPT_CONFIG_PATH);
|
||||
|
||||
let https_proxy = &AGENT_CONFIG.read().await.https_proxy;
|
||||
if !https_proxy.is_empty() {
|
||||
env::set_var("HTTPS_PROXY", https_proxy);
|
||||
}
|
||||
|
||||
let no_proxy = &AGENT_CONFIG.read().await.no_proxy;
|
||||
if !no_proxy.is_empty() {
|
||||
env::set_var("NO_PROXY", no_proxy);
|
||||
}
|
||||
|
||||
let cid = Self::cid_from_request(req)?;
|
||||
let image = req.get_image();
|
||||
// Can switch to use cid directly when we remove umoci
|
||||
let v: Vec<&str> = image.rsplit('/').collect();
|
||||
if !v[0].is_empty() && v[0].starts_with("pause:") {
|
||||
Self::unpack_pause_image(&cid)?;
|
||||
|
||||
let mut sandbox = self.sandbox.lock().await;
|
||||
sandbox.images.insert(String::from(image), cid);
|
||||
return Ok(image.to_owned());
|
||||
}
|
||||
|
||||
let aa_kbc_params = &AGENT_CONFIG.read().await.aa_kbc_params;
|
||||
if !aa_kbc_params.is_empty() {
|
||||
match self.attestation_agent_started.compare_exchange_weak(
|
||||
false,
|
||||
true,
|
||||
Ordering::SeqCst,
|
||||
Ordering::SeqCst,
|
||||
) {
|
||||
Ok(_) => Self::init_attestation_agent()?,
|
||||
Err(_) => info!(sl!(), "Attestation Agent already running"),
|
||||
}
|
||||
}
|
||||
|
||||
let source_creds = (!req.get_source_creds().is_empty()).then(|| req.get_source_creds());
|
||||
|
||||
if Path::new(SKOPEO_PATH).exists() {
|
||||
// Read the policy path from the agent config
|
||||
let config_policy_path = &AGENT_CONFIG.read().await.container_policy_path;
|
||||
let policy_path = (!config_policy_path.is_empty()).then(|| config_policy_path.as_str());
|
||||
Self::pull_image_from_registry(image, &cid, source_creds, policy_path, aa_kbc_params)?;
|
||||
Self::unpack_image(&cid)?;
|
||||
} else {
|
||||
// Read enable signature verification from the agent config and set it in the image_client
|
||||
let enable_signature_verification =
|
||||
&AGENT_CONFIG.read().await.enable_signature_verification;
|
||||
info!(
|
||||
sl!(),
|
||||
"enable_signature_verification set to: {}", enable_signature_verification
|
||||
);
|
||||
self.image_client.lock().await.config.security_validate =
|
||||
*enable_signature_verification;
|
||||
|
||||
let bundle_path = Path::new(CONTAINER_BASE).join(&cid);
|
||||
fs::create_dir_all(&bundle_path)?;
|
||||
|
||||
let decrypt_config = format!("provider:attestation-agent:{}", aa_kbc_params);
|
||||
|
||||
info!(sl!(), "pull image {:?}, bundle path {:?}", cid, bundle_path);
|
||||
// Image layers will store at KATA_CC_IMAGE_WORK_DIR, generated bundles
|
||||
// with rootfs and config.json will store under CONTAINER_BASE/cid.
|
||||
self.image_client
|
||||
.lock()
|
||||
.await
|
||||
.pull_image(image, &bundle_path, &source_creds, &Some(&decrypt_config))
|
||||
.await?;
|
||||
}
|
||||
|
||||
let mut sandbox = self.sandbox.lock().await;
|
||||
sandbox.images.insert(String::from(image), cid);
|
||||
Ok(image.to_owned())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl protocols::image_ttrpc_async::Image for ImageService {
|
||||
async fn pull_image(
|
||||
&self,
|
||||
_ctx: &ttrpc::r#async::TtrpcContext,
|
||||
req: image::PullImageRequest,
|
||||
) -> ttrpc::Result<image::PullImageResponse> {
|
||||
match self.pull_image(&req).await {
|
||||
Ok(r) => {
|
||||
let mut resp = image::PullImageResponse::new();
|
||||
resp.image_ref = r;
|
||||
return Ok(resp);
|
||||
}
|
||||
Err(e) => {
|
||||
return Err(ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::ImageService;
|
||||
use protocols::image;
|
||||
|
||||
#[test]
|
||||
fn test_cid_from_request() {
|
||||
struct Case {
|
||||
cid: &'static str,
|
||||
image: &'static str,
|
||||
result: Option<&'static str>,
|
||||
}
|
||||
|
||||
let cases = [
|
||||
Case {
|
||||
cid: "",
|
||||
image: "",
|
||||
result: None,
|
||||
},
|
||||
Case {
|
||||
cid: "..",
|
||||
image: "",
|
||||
result: None,
|
||||
},
|
||||
Case {
|
||||
cid: "",
|
||||
image: "..",
|
||||
result: None,
|
||||
},
|
||||
Case {
|
||||
cid: "",
|
||||
image: "abc/..",
|
||||
result: None,
|
||||
},
|
||||
Case {
|
||||
cid: "",
|
||||
image: "abc/",
|
||||
result: None,
|
||||
},
|
||||
Case {
|
||||
cid: "",
|
||||
image: "../abc",
|
||||
result: Some("abc"),
|
||||
},
|
||||
Case {
|
||||
cid: "",
|
||||
image: "../9abc",
|
||||
result: Some("9abc"),
|
||||
},
|
||||
Case {
|
||||
cid: "some-string.1_2",
|
||||
image: "",
|
||||
result: Some("some-string.1_2"),
|
||||
},
|
||||
Case {
|
||||
cid: "0some-string.1_2",
|
||||
image: "",
|
||||
result: Some("0some-string.1_2"),
|
||||
},
|
||||
Case {
|
||||
cid: "a:b",
|
||||
image: "",
|
||||
result: None,
|
||||
},
|
||||
Case {
|
||||
cid: "",
|
||||
image: "prefix/a:b",
|
||||
result: Some("a_b"),
|
||||
},
|
||||
Case {
|
||||
cid: "",
|
||||
image: "/a/b/c/d:e",
|
||||
result: Some("d_e"),
|
||||
},
|
||||
];
|
||||
|
||||
for case in &cases {
|
||||
let mut req = image::PullImageRequest::new();
|
||||
req.set_image(case.image.to_string());
|
||||
req.set_container_id(case.cid.to_string());
|
||||
let ret = ImageService::cid_from_request(&req);
|
||||
match (case.result, ret) {
|
||||
(Some(expected), Ok(actual)) => assert_eq!(expected, actual),
|
||||
(None, Err(_)) => (),
|
||||
(None, Ok(r)) => panic!("Expected an error, got {}", r),
|
||||
(Some(expected), Err(e)) => {
|
||||
panic!("Expected {} but got an error ({})", expected, e)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -71,7 +71,6 @@ use tokio::{
|
||||
task::JoinHandle,
|
||||
};
|
||||
|
||||
mod image_rpc;
|
||||
mod rpc;
|
||||
mod tracer;
|
||||
|
||||
|
||||
@@ -529,9 +529,7 @@ impl Handle {
|
||||
.map_err(|e| anyhow!("Failed to parse IP {}: {:?}", ip_address, e))?;
|
||||
|
||||
// Import rtnetlink objects that make sense only for this function
|
||||
use packet::constants::{
|
||||
NDA_UNSPEC, NLM_F_ACK, NLM_F_CREATE, NLM_F_REPLACE, NLM_F_REQUEST,
|
||||
};
|
||||
use packet::constants::{NDA_UNSPEC, NLM_F_ACK, NLM_F_CREATE, NLM_F_EXCL, NLM_F_REQUEST};
|
||||
use packet::neighbour::{NeighbourHeader, NeighbourMessage};
|
||||
use packet::nlas::neighbour::Nla;
|
||||
use packet::{NetlinkMessage, NetlinkPayload, RtnlMessage};
|
||||
@@ -574,7 +572,7 @@ impl Handle {
|
||||
|
||||
// Send request and ACK
|
||||
let mut req = NetlinkMessage::from(RtnlMessage::NewNeighbour(message));
|
||||
req.header.flags = NLM_F_REQUEST | NLM_F_ACK | NLM_F_CREATE | NLM_F_REPLACE;
|
||||
req.header.flags = NLM_F_REQUEST | NLM_F_ACK | NLM_F_EXCL | NLM_F_CREATE;
|
||||
|
||||
let mut response = self.handle.request(req)?;
|
||||
while let Some(message) = response.next().await {
|
||||
|
||||
@@ -34,10 +34,7 @@ use protocols::health::{
|
||||
HealthCheckResponse, HealthCheckResponse_ServingStatus, VersionCheckResponse,
|
||||
};
|
||||
use protocols::types::Interface;
|
||||
use protocols::{
|
||||
agent_ttrpc_async as agent_ttrpc, health_ttrpc_async as health_ttrpc,
|
||||
image_ttrpc_async as image_ttrpc,
|
||||
};
|
||||
use protocols::{agent_ttrpc_async as agent_ttrpc, health_ttrpc_async as health_ttrpc};
|
||||
use rustjail::cgroups::notifier;
|
||||
use rustjail::container::{BaseContainer, Container, LinuxContainer};
|
||||
use rustjail::process::Process;
|
||||
@@ -53,7 +50,6 @@ use rustjail::process::ProcessOperations;
|
||||
use crate::device::{
|
||||
add_devices, get_virtio_blk_pci_device_name, update_device_cgroup, update_env_pci,
|
||||
};
|
||||
use crate::image_rpc;
|
||||
use crate::linux_abi::*;
|
||||
use crate::metrics::get_metrics;
|
||||
use crate::mount::{add_storages, baremount, STORAGE_HANDLER_LIST};
|
||||
@@ -85,12 +81,8 @@ use std::io::{BufRead, BufReader, Write};
|
||||
use std::os::unix::fs::FileExt;
|
||||
use std::path::PathBuf;
|
||||
|
||||
pub const CONTAINER_BASE: &str = "/run/kata-containers";
|
||||
const CONTAINER_BASE: &str = "/run/kata-containers";
|
||||
const MODPROBE_PATH: &str = "/sbin/modprobe";
|
||||
const ANNO_K8S_IMAGE_NAME: &str = "io.kubernetes.cri.image-name";
|
||||
const CONFIG_JSON: &str = "config.json";
|
||||
const INIT_TRUSTED_STORAGE: &str = "/usr/bin/kata-init-trusted-storage";
|
||||
const TRUSTED_STORAGE_DEVICE: &str = "/dev/trusted_store";
|
||||
|
||||
const IPTABLES_SAVE: &str = "/sbin/iptables-save";
|
||||
const IPTABLES_RESTORE: &str = "/sbin/iptables-restore";
|
||||
@@ -142,43 +134,6 @@ pub struct AgentService {
|
||||
sandbox: Arc<Mutex<Sandbox>>,
|
||||
}
|
||||
|
||||
// A container ID must match this regex:
|
||||
//
|
||||
// ^[a-zA-Z0-9][a-zA-Z0-9_.-]+$
|
||||
//
|
||||
pub fn verify_cid(id: &str) -> Result<()> {
|
||||
let mut chars = id.chars();
|
||||
|
||||
let valid = match chars.next() {
|
||||
Some(first)
|
||||
if first.is_alphanumeric()
|
||||
&& id.len() > 1
|
||||
&& chars.all(|c| c.is_alphanumeric() || ['.', '-', '_'].contains(&c)) =>
|
||||
{
|
||||
true
|
||||
}
|
||||
_ => false,
|
||||
};
|
||||
|
||||
match valid {
|
||||
true => Ok(()),
|
||||
false => Err(anyhow!("invalid container ID: {:?}", id)),
|
||||
}
|
||||
}
|
||||
|
||||
// Partially merge an OCI process specification into another one.
|
||||
fn merge_oci_process(target: &mut oci::Process, source: &oci::Process) {
|
||||
if target.args.is_empty() && !source.args.is_empty() {
|
||||
target.args.append(&mut source.args.clone());
|
||||
}
|
||||
|
||||
if target.cwd.is_empty() && !source.cwd.is_empty() {
|
||||
target.cwd = String::from(&source.cwd);
|
||||
}
|
||||
|
||||
target.env.append(&mut source.env.clone());
|
||||
}
|
||||
|
||||
impl AgentService {
|
||||
#[instrument]
|
||||
async fn do_create_container(
|
||||
@@ -209,9 +164,6 @@ impl AgentService {
|
||||
"receive createcontainer, storages: {:?}", &req.storages
|
||||
);
|
||||
|
||||
// Merge the image bundle OCI spec into the container creation request OCI spec.
|
||||
self.merge_bundle_oci(&mut oci).await?;
|
||||
|
||||
// Some devices need some extra processing (the ones invoked with
|
||||
// --device for instance), and that's what this call is doing. It
|
||||
// updates the devices listed in the OCI spec, so that they actually
|
||||
@@ -219,30 +171,6 @@ impl AgentService {
|
||||
// cannot predict everything from the caller.
|
||||
add_devices(&req.devices.to_vec(), &mut oci, &self.sandbox).await?;
|
||||
|
||||
let linux = oci
|
||||
.linux
|
||||
.as_mut()
|
||||
.ok_or_else(|| anyhow!("Spec didn't contain linux field"))?;
|
||||
|
||||
for specdev in &mut linux.devices {
|
||||
let dev_major_minor = format!("{}:{}", specdev.major, specdev.minor);
|
||||
|
||||
if specdev.path == TRUSTED_STORAGE_DEVICE {
|
||||
let data_integrity = AGENT_CONFIG.read().await.data_integrity;
|
||||
info!(
|
||||
sl!(),
|
||||
"trusted_store device major:min {}, enable data integrity {}",
|
||||
dev_major_minor,
|
||||
data_integrity.to_string()
|
||||
);
|
||||
|
||||
Command::new(INIT_TRUSTED_STORAGE)
|
||||
.args(&[&dev_major_minor, &data_integrity.to_string()])
|
||||
.output()
|
||||
.expect("Failed to initialize confidential storage");
|
||||
}
|
||||
}
|
||||
|
||||
// Both rootfs and volumes (invoked with --volume for instance) will
|
||||
// be processed the same way. The idea is to always mount any provided
|
||||
// storage to the specified MountPoint, so that it will match what's
|
||||
@@ -701,54 +629,6 @@ impl AgentService {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// When being passed an image name through a container annotation, merge its
|
||||
// corresponding bundle OCI specification into the passed container creation one.
|
||||
async fn merge_bundle_oci(&self, container_oci: &mut oci::Spec) -> Result<()> {
|
||||
if let Some(image_name) = container_oci
|
||||
.annotations
|
||||
.get(&ANNO_K8S_IMAGE_NAME.to_string())
|
||||
{
|
||||
if let Some(container_id) = self.sandbox.clone().lock().await.images.get(image_name) {
|
||||
let image_oci_config_path = Path::new(CONTAINER_BASE)
|
||||
.join(container_id)
|
||||
.join(CONFIG_JSON);
|
||||
debug!(
|
||||
sl!(),
|
||||
"Image bundle config path: {:?}", image_oci_config_path
|
||||
);
|
||||
|
||||
let image_oci =
|
||||
oci::Spec::load(image_oci_config_path.to_str().ok_or_else(|| {
|
||||
anyhow!(
|
||||
"Invalid container image OCI config path {:?}",
|
||||
image_oci_config_path
|
||||
)
|
||||
})?)
|
||||
.context("load image bundle")?;
|
||||
|
||||
if let Some(container_root) = container_oci.root.as_mut() {
|
||||
if let Some(image_root) = image_oci.root.as_ref() {
|
||||
let root_path = Path::new(CONTAINER_BASE)
|
||||
.join(container_id)
|
||||
.join(image_root.path.clone());
|
||||
container_root.path =
|
||||
String::from(root_path.to_str().ok_or_else(|| {
|
||||
anyhow!("Invalid container image root path {:?}", root_path)
|
||||
})?);
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(container_process) = container_oci.process.as_mut() {
|
||||
if let Some(image_process) = image_oci.process.as_ref() {
|
||||
merge_oci_process(container_process, image_process);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
@@ -1777,27 +1657,22 @@ async fn read_stream(reader: Arc<Mutex<ReadHalf<PipeStream>>>, l: usize) -> Resu
|
||||
}
|
||||
|
||||
pub fn start(s: Arc<Mutex<Sandbox>>, server_address: &str) -> Result<TtrpcServer> {
|
||||
let agent_service = Box::new(AgentService { sandbox: s.clone() })
|
||||
as Box<dyn agent_ttrpc::AgentService + Send + Sync>;
|
||||
let agent_service =
|
||||
Box::new(AgentService { sandbox: s }) as Box<dyn agent_ttrpc::AgentService + Send + Sync>;
|
||||
|
||||
let agent_worker = Arc::new(agent_service);
|
||||
|
||||
let health_service = Box::new(HealthService {}) as Box<dyn health_ttrpc::Health + Send + Sync>;
|
||||
let health_worker = Arc::new(health_service);
|
||||
|
||||
let image_service =
|
||||
Box::new(image_rpc::ImageService::new(s)) as Box<dyn image_ttrpc::Image + Send + Sync>;
|
||||
|
||||
let aservice = agent_ttrpc::create_agent_service(agent_worker);
|
||||
|
||||
let hservice = health_ttrpc::create_health(health_worker);
|
||||
|
||||
let iservice = image_ttrpc::create_image(Arc::new(image_service));
|
||||
|
||||
let server = TtrpcServer::new()
|
||||
.bind(server_address)?
|
||||
.register_service(aservice)
|
||||
.register_service(hservice)
|
||||
.register_service(iservice);
|
||||
.register_service(hservice);
|
||||
|
||||
info!(sl!(), "ttRPC server started"; "address" => server_address);
|
||||
|
||||
@@ -2003,38 +1878,6 @@ fn do_copy_file(req: &CopyFileRequest) -> Result<()> {
|
||||
|
||||
std::fs::set_permissions(&dir, std::fs::Permissions::from_mode(req.dir_mode))?;
|
||||
|
||||
let sflag = stat::SFlag::from_bits_truncate(req.file_mode);
|
||||
|
||||
if sflag.contains(stat::SFlag::S_IFDIR) {
|
||||
fs::create_dir(path.clone()).or_else(|e| {
|
||||
if e.kind() != std::io::ErrorKind::AlreadyExists {
|
||||
return Err(e);
|
||||
}
|
||||
Ok(())
|
||||
})?;
|
||||
|
||||
std::fs::set_permissions(path.clone(), std::fs::Permissions::from_mode(req.file_mode))?;
|
||||
|
||||
unistd::chown(
|
||||
&path,
|
||||
Some(Uid::from_raw(req.uid as u32)),
|
||||
Some(Gid::from_raw(req.gid as u32)),
|
||||
)?;
|
||||
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if sflag.contains(stat::SFlag::S_IFLNK) {
|
||||
let src = PathBuf::from(String::from_utf8(req.data.clone()).unwrap());
|
||||
|
||||
unistd::symlinkat(&src, None, &path)?;
|
||||
let path_str = CString::new(path.to_str().unwrap())?;
|
||||
let ret = unsafe { libc::lchown(path_str.as_ptr(), req.uid as u32, req.gid as u32) };
|
||||
Errno::result(ret).map(drop)?;
|
||||
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut tmpfile = path.clone();
|
||||
tmpfile.set_extension("tmp");
|
||||
|
||||
@@ -2100,26 +1943,18 @@ pub fn setup_bundle(cid: &str, spec: &mut Spec) -> Result<PathBuf> {
|
||||
let spec_root_path = Path::new(&spec_root.path);
|
||||
|
||||
let bundle_path = Path::new(CONTAINER_BASE).join(cid);
|
||||
let config_path = bundle_path.join(CONFIG_JSON);
|
||||
let config_path = bundle_path.join("config.json");
|
||||
let rootfs_path = bundle_path.join("rootfs");
|
||||
|
||||
let rootfs_exists = Path::new(&rootfs_path).exists();
|
||||
info!(
|
||||
sl!(),
|
||||
"The rootfs_path is {:?} and exists: {}", rootfs_path, rootfs_exists
|
||||
);
|
||||
|
||||
if !rootfs_exists {
|
||||
fs::create_dir_all(&rootfs_path)?;
|
||||
baremount(
|
||||
spec_root_path,
|
||||
&rootfs_path,
|
||||
"bind",
|
||||
MsFlags::MS_BIND,
|
||||
"",
|
||||
&sl!(),
|
||||
)?;
|
||||
}
|
||||
fs::create_dir_all(&rootfs_path)?;
|
||||
baremount(
|
||||
spec_root_path,
|
||||
&rootfs_path,
|
||||
"bind",
|
||||
MsFlags::MS_BIND,
|
||||
"",
|
||||
&sl!(),
|
||||
)?;
|
||||
|
||||
let rootfs_path_name = rootfs_path
|
||||
.to_str()
|
||||
@@ -2197,6 +2032,11 @@ mod tests {
|
||||
use tempfile::{tempdir, TempDir};
|
||||
use test_utils::{assert_result, skip_if_not_root};
|
||||
use ttrpc::{r#async::TtrpcContext, MessageHeader};
|
||||
use which::which;
|
||||
|
||||
fn check_command(cmd: &str) -> bool {
|
||||
which(cmd).is_ok()
|
||||
}
|
||||
|
||||
fn mk_ttrpc_context() -> TtrpcContext {
|
||||
TtrpcContext {
|
||||
@@ -2916,6 +2756,18 @@ OtherField:other
|
||||
async fn test_ip_tables() {
|
||||
skip_if_not_root!();
|
||||
|
||||
if !check_command(IPTABLES_SAVE)
|
||||
|| !check_command(IPTABLES_RESTORE)
|
||||
|| !check_command(IP6TABLES_SAVE)
|
||||
|| !check_command(IP6TABLES_RESTORE)
|
||||
{
|
||||
warn!(
|
||||
sl!(),
|
||||
"one or more commands for ip tables test are missing, skip it"
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
let logger = slog::Logger::root(slog::Discard, o!());
|
||||
let sandbox = Sandbox::new(&logger).unwrap();
|
||||
let agent_service = Box::new(AgentService {
|
||||
|
||||
@@ -60,7 +60,6 @@ pub struct Sandbox {
|
||||
pub event_tx: Option<Sender<String>>,
|
||||
pub bind_watcher: BindWatcher,
|
||||
pub pcimap: HashMap<pci::Address, pci::Address>,
|
||||
pub images: HashMap<String, String>,
|
||||
}
|
||||
|
||||
impl Sandbox {
|
||||
@@ -94,7 +93,6 @@ impl Sandbox {
|
||||
event_tx: Some(tx),
|
||||
bind_watcher: BindWatcher::new(),
|
||||
pcimap: HashMap::new(),
|
||||
images: HashMap::new(),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -329,7 +327,7 @@ impl Sandbox {
|
||||
// Reject non-file, symlinks and non-executable files
|
||||
if !entry.file_type()?.is_file()
|
||||
|| entry.file_type()?.is_symlink()
|
||||
|| entry.metadata()?.permissions().mode() & 0o111 == 0
|
||||
|| entry.metadata()?.permissions().mode() & 0o777 & 0o111 == 0
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -24,7 +24,7 @@ async fn handle_sigchild(logger: Logger, sandbox: Arc<Mutex<Sandbox>>) -> Result
|
||||
loop {
|
||||
// Avoid reaping the undesirable child's signal, e.g., execute_hook's
|
||||
// The lock should be released immediately.
|
||||
let _ = rustjail::container::WAIT_PID_LOCKER.lock().await;
|
||||
rustjail::container::WAIT_PID_LOCKER.lock().await;
|
||||
let result = wait::waitpid(
|
||||
Some(Pid::from_raw(-1)),
|
||||
Some(WaitPidFlag::WNOHANG | WaitPidFlag::__WALL),
|
||||
|
||||
@@ -11,7 +11,7 @@ use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
use std::time::SystemTime;
|
||||
|
||||
use anyhow::{anyhow, ensure, Context, Result};
|
||||
use anyhow::{ensure, Context, Result};
|
||||
use async_recursion::async_recursion;
|
||||
use nix::mount::{umount, MsFlags};
|
||||
use nix::unistd::{Gid, Uid};
|
||||
@@ -34,13 +34,9 @@ const MAX_SIZE_PER_WATCHABLE_MOUNT: u64 = 1024 * 1024;
|
||||
/// How often to check for modified files.
|
||||
const WATCH_INTERVAL_SECS: u64 = 2;
|
||||
|
||||
/// Destination path for tmpfs, which used by the golang runtime
|
||||
/// Destination path for tmpfs
|
||||
const WATCH_MOUNT_POINT_PATH: &str = "/run/kata-containers/shared/containers/watchable/";
|
||||
|
||||
/// Destination path for tmpfs for runtime-rs passthrough file sharing
|
||||
const WATCH_MOUNT_POINT_PATH_PASSTHROUGH: &str =
|
||||
"/run/kata-containers/shared/containers/passthrough/watchable/";
|
||||
|
||||
/// Represents a single watched storage entry which may have multiple files to watch.
|
||||
#[derive(Default, Debug, Clone)]
|
||||
struct Storage {
|
||||
@@ -455,7 +451,7 @@ impl BindWatcher {
|
||||
) -> Result<()> {
|
||||
if self.watch_thread.is_none() {
|
||||
// Virtio-fs shared path is RO by default, so we back the target-mounts by tmpfs.
|
||||
self.mount(logger).await.context("mount watch directory")?;
|
||||
self.mount(logger).await?;
|
||||
|
||||
// Spawn background thread to monitor changes
|
||||
self.watch_thread = Some(Self::spawn_watcher(
|
||||
@@ -504,28 +500,16 @@ impl BindWatcher {
|
||||
}
|
||||
|
||||
async fn mount(&self, logger: &Logger) -> Result<()> {
|
||||
// the watchable directory is created on the host side.
|
||||
// here we can only check if it exist.
|
||||
// first we will check the default WATCH_MOUNT_POINT_PATH,
|
||||
// and then check WATCH_MOUNT_POINT_PATH_PASSTHROUGH
|
||||
// in turn which are introduced by runtime-rs file sharing.
|
||||
let watchable_dir = if Path::new(WATCH_MOUNT_POINT_PATH).is_dir() {
|
||||
WATCH_MOUNT_POINT_PATH
|
||||
} else if Path::new(WATCH_MOUNT_POINT_PATH_PASSTHROUGH).is_dir() {
|
||||
WATCH_MOUNT_POINT_PATH_PASSTHROUGH
|
||||
} else {
|
||||
return Err(anyhow!("watchable mount source not found"));
|
||||
};
|
||||
fs::create_dir_all(WATCH_MOUNT_POINT_PATH).await?;
|
||||
|
||||
baremount(
|
||||
Path::new("tmpfs"),
|
||||
Path::new(watchable_dir),
|
||||
Path::new(WATCH_MOUNT_POINT_PATH),
|
||||
"tmpfs",
|
||||
MsFlags::empty(),
|
||||
"",
|
||||
logger,
|
||||
)
|
||||
.context("baremount watchable mount path")?;
|
||||
)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -536,12 +520,7 @@ impl BindWatcher {
|
||||
handle.abort();
|
||||
}
|
||||
|
||||
// try umount watchable mount path in turn
|
||||
if Path::new(WATCH_MOUNT_POINT_PATH).is_dir() {
|
||||
let _ = umount(WATCH_MOUNT_POINT_PATH);
|
||||
} else if Path::new(WATCH_MOUNT_POINT_PATH_PASSTHROUGH).is_dir() {
|
||||
let _ = umount(WATCH_MOUNT_POINT_PATH_PASSTHROUGH);
|
||||
}
|
||||
let _ = umount(WATCH_MOUNT_POINT_PATH);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -550,7 +529,6 @@ mod tests {
|
||||
use super::*;
|
||||
use crate::mount::is_mounted;
|
||||
use nix::unistd::{Gid, Uid};
|
||||
use scopeguard::defer;
|
||||
use std::fs;
|
||||
use std::thread;
|
||||
use test_utils::skip_if_not_root;
|
||||
@@ -1297,19 +1275,13 @@ mod tests {
|
||||
let logger = slog::Logger::root(slog::Discard, o!());
|
||||
let mut watcher = BindWatcher::default();
|
||||
|
||||
for mount_point in [WATCH_MOUNT_POINT_PATH, WATCH_MOUNT_POINT_PATH_PASSTHROUGH] {
|
||||
fs::create_dir_all(mount_point).unwrap();
|
||||
// ensure the watchable directory is deleted.
|
||||
defer!(fs::remove_dir_all(mount_point).unwrap());
|
||||
watcher.mount(&logger).await.unwrap();
|
||||
assert!(is_mounted(WATCH_MOUNT_POINT_PATH).unwrap());
|
||||
|
||||
watcher.mount(&logger).await.unwrap();
|
||||
assert!(is_mounted(mount_point).unwrap());
|
||||
thread::sleep(Duration::from_millis(20));
|
||||
|
||||
thread::sleep(Duration::from_millis(20));
|
||||
|
||||
watcher.cleanup();
|
||||
assert!(!is_mounted(mount_point).unwrap());
|
||||
}
|
||||
watcher.cleanup();
|
||||
assert!(!is_mounted(WATCH_MOUNT_POINT_PATH).unwrap());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -1317,10 +1289,6 @@ mod tests {
|
||||
async fn spawn_thread() {
|
||||
skip_if_not_root!();
|
||||
|
||||
fs::create_dir_all(WATCH_MOUNT_POINT_PATH).unwrap();
|
||||
// ensure the watchable directory is deleted.
|
||||
defer!(fs::remove_dir_all(WATCH_MOUNT_POINT_PATH).unwrap());
|
||||
|
||||
let source_dir = tempfile::tempdir().unwrap();
|
||||
fs::write(source_dir.path().join("1.txt"), "one").unwrap();
|
||||
|
||||
@@ -1351,10 +1319,6 @@ mod tests {
|
||||
async fn verify_container_cleanup_watching() {
|
||||
skip_if_not_root!();
|
||||
|
||||
fs::create_dir_all(WATCH_MOUNT_POINT_PATH).unwrap();
|
||||
// ensure the watchable directory is deleted.
|
||||
defer!(fs::remove_dir_all(WATCH_MOUNT_POINT_PATH).unwrap());
|
||||
|
||||
let source_dir = tempfile::tempdir().unwrap();
|
||||
fs::write(source_dir.path().join("1.txt"), "one").unwrap();
|
||||
|
||||
|
||||
@@ -26,7 +26,7 @@ kvm-bindings = "0.5.0"
|
||||
kvm-ioctls = "0.11.0"
|
||||
lazy_static = "1.2"
|
||||
libc = "0.2.39"
|
||||
linux-loader = "0.6.0"
|
||||
linux-loader = "0.4.0"
|
||||
log = "0.4.14"
|
||||
nix = "0.24.2"
|
||||
seccompiler = "0.2.0"
|
||||
@@ -36,18 +36,17 @@ serde_json = "1.0.9"
|
||||
slog = "2.5.2"
|
||||
slog-scope = "4.4.0"
|
||||
thiserror = "1"
|
||||
vmm-sys-util = "0.10.0"
|
||||
virtio-queue = { version = "0.4.0", optional = true }
|
||||
vm-memory = { version = "0.9.0", features = ["backend-mmap"] }
|
||||
vmm-sys-util = "0.9.0"
|
||||
virtio-queue = { version = "0.1.0", optional = true }
|
||||
vm-memory = { version = "0.7.0", features = ["backend-mmap"] }
|
||||
|
||||
[dev-dependencies]
|
||||
slog-term = "2.9.0"
|
||||
slog-async = "2.7.0"
|
||||
test-utils = { path = "../libs/test-utils" }
|
||||
|
||||
[features]
|
||||
acpi = []
|
||||
atomic-guest-memory = [ "vm-memory/backend-atomic" ]
|
||||
atomic-guest-memory = []
|
||||
hotplug = ["virtio-vsock"]
|
||||
virtio-vsock = ["dbs-virtio-devices/virtio-vsock", "virtio-queue"]
|
||||
virtio-blk = ["dbs-virtio-devices/virtio-blk", "virtio-queue"]
|
||||
@@ -56,12 +55,11 @@ virtio-net = ["dbs-virtio-devices/virtio-net", "virtio-queue"]
|
||||
virtio-fs = ["dbs-virtio-devices/virtio-fs", "virtio-queue", "atomic-guest-memory"]
|
||||
|
||||
[patch.'crates-io']
|
||||
dbs-device = { git = "https://github.com/openanolis/dragonball-sandbox.git", rev = "c3d7831aee7c3962b8a90f0afbfd0fb7e4d30323" }
|
||||
dbs-interrupt = { git = "https://github.com/openanolis/dragonball-sandbox.git", rev = "c3d7831aee7c3962b8a90f0afbfd0fb7e4d30323" }
|
||||
dbs-legacy-devices = { git = "https://github.com/openanolis/dragonball-sandbox.git", rev = "c3d7831aee7c3962b8a90f0afbfd0fb7e4d30323" }
|
||||
dbs-upcall = { git = "https://github.com/openanolis/dragonball-sandbox.git", rev = "c3d7831aee7c3962b8a90f0afbfd0fb7e4d30323" }
|
||||
dbs-utils = { git = "https://github.com/openanolis/dragonball-sandbox.git", rev = "c3d7831aee7c3962b8a90f0afbfd0fb7e4d30323" }
|
||||
dbs-virtio-devices = { git = "https://github.com/openanolis/dragonball-sandbox.git", rev = "c3d7831aee7c3962b8a90f0afbfd0fb7e4d30323" }
|
||||
dbs-boot = { git = "https://github.com/openanolis/dragonball-sandbox.git", rev = "c3d7831aee7c3962b8a90f0afbfd0fb7e4d30323" }
|
||||
dbs-arch = { git = "https://github.com/openanolis/dragonball-sandbox.git", rev = "c3d7831aee7c3962b8a90f0afbfd0fb7e4d30323" }
|
||||
dbs-address-space = { git = "https://github.com/openanolis/dragonball-sandbox.git", rev = "c3d7831aee7c3962b8a90f0afbfd0fb7e4d30323" }
|
||||
dbs-device = { git = "https://github.com/openanolis/dragonball-sandbox.git", rev = "7a8e832b53d66994d6a16f0513d69f540583dcd0" }
|
||||
dbs-interrupt = { git = "https://github.com/openanolis/dragonball-sandbox.git", rev = "7a8e832b53d66994d6a16f0513d69f540583dcd0" }
|
||||
dbs-legacy-devices = { git = "https://github.com/openanolis/dragonball-sandbox.git", rev = "7a8e832b53d66994d6a16f0513d69f540583dcd0" }
|
||||
dbs-upcall = { git = "https://github.com/openanolis/dragonball-sandbox.git", rev = "7a8e832b53d66994d6a16f0513d69f540583dcd0" }
|
||||
dbs-utils = { git = "https://github.com/openanolis/dragonball-sandbox.git", rev = "7a8e832b53d66994d6a16f0513d69f540583dcd0" }
|
||||
dbs-virtio-devices = { git = "https://github.com/openanolis/dragonball-sandbox.git", rev = "7a8e832b53d66994d6a16f0513d69f540583dcd0" }
|
||||
dbs-boot = { git = "https://github.com/openanolis/dragonball-sandbox.git", rev = "7a8e832b53d66994d6a16f0513d69f540583dcd0" }
|
||||
dbs-arch = { git = "https://github.com/openanolis/dragonball-sandbox.git", rev = "7a8e832b53d66994d6a16f0513d69f540583dcd0" }
|
||||
|
||||
@@ -2,19 +2,12 @@
|
||||
# Copyright (c) 2019-2022 Ant Group. All rights reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
include ../../utils.mk
|
||||
|
||||
ifeq ($(ARCH), s390x)
|
||||
default build check test clippy:
|
||||
@echo "s390x not support currently"
|
||||
exit 0
|
||||
else
|
||||
|
||||
default: build
|
||||
|
||||
build:
|
||||
@echo "INFO: cargo build..."
|
||||
cargo build --all-features --target $(TRIPLE)
|
||||
# FIXME: This line will be removed when we solve the vm-memory dependency problem in Dragonball Sandbox
|
||||
cargo update -p vm-memory:0.8.0 --precise 0.7.0
|
||||
cargo build --all-features
|
||||
|
||||
check: clippy format
|
||||
|
||||
@@ -24,9 +17,6 @@ clippy:
|
||||
-- \
|
||||
-D warnings
|
||||
|
||||
vendor:
|
||||
@echo "INFO: vendor do nothing.."
|
||||
|
||||
format:
|
||||
@echo "INFO: cargo fmt..."
|
||||
cargo fmt -- --check
|
||||
@@ -35,13 +25,5 @@ clean:
|
||||
cargo clean
|
||||
|
||||
test:
|
||||
ifdef SUPPORT_VIRTUALIZATION
|
||||
cargo test --all-features --target $(TRIPLE) -- --nocapture
|
||||
else
|
||||
@echo "INFO: skip testing dragonball, it need virtualization support."
|
||||
exit 0
|
||||
endif
|
||||
|
||||
endif # ifeq ($(ARCH), s390x)
|
||||
|
||||
.DEFAULT_GOAL := default
|
||||
@echo "INFO: testing dragonball for development build"
|
||||
cargo test --all-features -- --nocapture
|
||||
|
||||
@@ -33,10 +33,10 @@ use log::{debug, error, info, warn};
|
||||
use nix::sys::mman;
|
||||
use nix::unistd::dup;
|
||||
#[cfg(feature = "atomic-guest-memory")]
|
||||
use vm_memory::GuestMemoryAtomic;
|
||||
use vm_memory::atomic::GuestMemoryAtomic;
|
||||
use vm_memory::{
|
||||
address::Address, FileOffset, GuestAddress, GuestAddressSpace, GuestMemoryMmap,
|
||||
GuestMemoryRegion, GuestRegionMmap, GuestUsize, MemoryRegionAddress, MmapRegion,
|
||||
Address, FileOffset, GuestAddress, GuestAddressSpace, GuestMemoryMmap, GuestMemoryRegion,
|
||||
GuestRegionMmap, GuestUsize, MemoryRegionAddress, MmapRegion,
|
||||
};
|
||||
|
||||
use crate::resource_manager::ResourceManager;
|
||||
@@ -270,7 +270,7 @@ impl AddressSpaceMgr {
|
||||
let size = info
|
||||
.size
|
||||
.checked_shl(20)
|
||||
.ok_or(AddressManagerError::InvalidOperation)?;
|
||||
.ok_or_else(|| AddressManagerError::InvalidOperation)?;
|
||||
|
||||
// Guest memory does not intersect with the MMIO hole.
|
||||
// TODO: make it work for ARM (issue #4307)
|
||||
@@ -281,13 +281,13 @@ impl AddressSpaceMgr {
|
||||
regions.push(region);
|
||||
start_addr = start_addr
|
||||
.checked_add(size)
|
||||
.ok_or(AddressManagerError::InvalidOperation)?;
|
||||
.ok_or_else(|| AddressManagerError::InvalidOperation)?;
|
||||
} else {
|
||||
// Add guest memory below the MMIO hole, avoid splitting the memory region
|
||||
// if the available address region is small than MINIMAL_SPLIT_SPACE MiB.
|
||||
let mut below_size = dbs_boot::layout::MMIO_LOW_START
|
||||
.checked_sub(start_addr)
|
||||
.ok_or(AddressManagerError::InvalidOperation)?;
|
||||
.ok_or_else(|| AddressManagerError::InvalidOperation)?;
|
||||
if below_size < (MINIMAL_SPLIT_SPACE) {
|
||||
below_size = 0;
|
||||
} else {
|
||||
@@ -299,12 +299,12 @@ impl AddressSpaceMgr {
|
||||
let above_start = dbs_boot::layout::MMIO_LOW_END + 1;
|
||||
let above_size = size
|
||||
.checked_sub(below_size)
|
||||
.ok_or(AddressManagerError::InvalidOperation)?;
|
||||
.ok_or_else(|| AddressManagerError::InvalidOperation)?;
|
||||
let region = self.create_region(above_start, above_size, info, &mut param)?;
|
||||
regions.push(region);
|
||||
start_addr = above_start
|
||||
.checked_add(above_size)
|
||||
.ok_or(AddressManagerError::InvalidOperation)?;
|
||||
.ok_or_else(|| AddressManagerError::InvalidOperation)?;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -502,7 +502,7 @@ impl AddressSpaceMgr {
|
||||
fn configure_numa(&self, mmap_reg: &MmapRegion, node_id: u32) -> Result<()> {
|
||||
let nodemask = 1_u64
|
||||
.checked_shl(node_id)
|
||||
.ok_or(AddressManagerError::InvalidOperation)?;
|
||||
.ok_or_else(|| AddressManagerError::InvalidOperation)?;
|
||||
let res = unsafe {
|
||||
libc::syscall(
|
||||
libc::SYS_mbind,
|
||||
|
||||
@@ -18,7 +18,7 @@ pub const DEFAULT_KERNEL_CMDLINE: &str = "reboot=k panic=1 pci=off nomodules 825
|
||||
i8042.noaux i8042.nomux i8042.nopnp i8042.dumbkbd";
|
||||
|
||||
/// Strongly typed data structure used to configure the boot source of the microvm.
|
||||
#[derive(Clone, Debug, Deserialize, PartialEq, Eq, Serialize, Default)]
|
||||
#[derive(Clone, Debug, Deserialize, PartialEq, Serialize, Default)]
|
||||
#[serde(deny_unknown_fields)]
|
||||
pub struct BootSourceConfig {
|
||||
/// Path of the kernel image.
|
||||
|
||||
@@ -10,7 +10,7 @@ use serde_derive::{Deserialize, Serialize};
|
||||
/// When Dragonball starts, the instance state is Uninitialized. Once start_microvm method is
|
||||
/// called, the state goes from Uninitialized to Starting. The state is changed to Running until
|
||||
/// the start_microvm method ends. Halting and Halted are currently unsupported.
|
||||
#[derive(Copy, Clone, Debug, Deserialize, PartialEq, Eq, Serialize)]
|
||||
#[derive(Copy, Clone, Debug, Deserialize, PartialEq, Serialize)]
|
||||
pub enum InstanceState {
|
||||
/// Microvm is not initialized.
|
||||
Uninitialized,
|
||||
@@ -29,7 +29,7 @@ pub enum InstanceState {
|
||||
}
|
||||
|
||||
/// The state of async actions
|
||||
#[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq)]
|
||||
#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)]
|
||||
pub enum AsyncState {
|
||||
/// Uninitialized
|
||||
Uninitialized,
|
||||
|
||||
@@ -10,7 +10,7 @@ pub const MAX_SUPPORTED_VCPUS: u8 = 254;
|
||||
pub const MEMORY_HOTPLUG_ALIGHMENT: u8 = 64;
|
||||
|
||||
/// Errors associated with configuring the microVM.
|
||||
#[derive(Debug, PartialEq, Eq, thiserror::Error)]
|
||||
#[derive(Debug, PartialEq, thiserror::Error)]
|
||||
pub enum VmConfigError {
|
||||
/// Cannot update the configuration of the microvm post boot.
|
||||
#[error("update operation is not allowed after boot")]
|
||||
|
||||
@@ -83,13 +83,13 @@ pub enum VmmActionError {
|
||||
|
||||
#[cfg(feature = "virtio-fs")]
|
||||
/// The action `InsertFsDevice` failed either because of bad user input or an internal error.
|
||||
#[error("virtio-fs device error: {0}")]
|
||||
#[error("virtio-fs device: {0}")]
|
||||
FsDevice(#[source] FsDeviceError),
|
||||
}
|
||||
|
||||
/// This enum represents the public interface of the VMM. Each action contains various
|
||||
/// bits of information (ids, paths, etc.).
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub enum VmmAction {
|
||||
/// Configure the boot source of the microVM using `BootSourceConfig`.
|
||||
/// This action can only be called before the microVM has booted.
|
||||
@@ -298,6 +298,7 @@ impl VmmService {
|
||||
let mut cmdline = linux_loader::cmdline::Cmdline::new(dbs_boot::layout::CMDLINE_MAX_SIZE);
|
||||
let boot_args = boot_source_config
|
||||
.boot_args
|
||||
.clone()
|
||||
.unwrap_or_else(|| String::from(DEFAULT_KERNEL_CMDLINE));
|
||||
cmdline
|
||||
.insert_str(boot_args)
|
||||
@@ -633,783 +634,3 @@ fn handle_cpu_topology(
|
||||
|
||||
Ok(cpu_topology)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
use dbs_utils::epoll_manager::EpollManager;
|
||||
use test_utils::skip_if_not_root;
|
||||
use vmm_sys_util::tempfile::TempFile;
|
||||
|
||||
use super::*;
|
||||
use crate::vmm::tests::create_vmm_instance;
|
||||
|
||||
struct TestData<'a> {
|
||||
req: Option<VmmAction>,
|
||||
vm_state: InstanceState,
|
||||
f: &'a dyn Fn(VmmRequestResult),
|
||||
}
|
||||
|
||||
impl<'a> TestData<'a> {
|
||||
fn new(req: VmmAction, vm_state: InstanceState, f: &'a dyn Fn(VmmRequestResult)) -> Self {
|
||||
Self {
|
||||
req: Some(req),
|
||||
vm_state,
|
||||
f,
|
||||
}
|
||||
}
|
||||
|
||||
fn check_request(&mut self) {
|
||||
let (to_vmm, from_api) = channel();
|
||||
let (to_api, from_vmm) = channel();
|
||||
|
||||
let vmm = Arc::new(Mutex::new(create_vmm_instance()));
|
||||
let mut vservice = VmmService::new(from_api, to_api);
|
||||
|
||||
let epoll_mgr = EpollManager::default();
|
||||
let mut event_mgr = EventManager::new(&vmm, epoll_mgr).unwrap();
|
||||
let mut v = vmm.lock().unwrap();
|
||||
|
||||
let vm = v.get_vm_mut().unwrap();
|
||||
vm.set_instance_state(self.vm_state);
|
||||
|
||||
to_vmm.send(Box::new(self.req.take().unwrap())).unwrap();
|
||||
assert!(vservice.run_vmm_action(&mut v, &mut event_mgr).is_ok());
|
||||
|
||||
let response = from_vmm.try_recv();
|
||||
assert!(response.is_ok());
|
||||
(self.f)(*response.unwrap());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_vmm_action_receive_unknown() {
|
||||
skip_if_not_root!();
|
||||
|
||||
let (_to_vmm, from_api) = channel();
|
||||
let (to_api, _from_vmm) = channel();
|
||||
let vmm = Arc::new(Mutex::new(create_vmm_instance()));
|
||||
let mut vservice = VmmService::new(from_api, to_api);
|
||||
let epoll_mgr = EpollManager::default();
|
||||
let mut event_mgr = EventManager::new(&vmm, epoll_mgr).unwrap();
|
||||
let mut v = vmm.lock().unwrap();
|
||||
|
||||
assert!(vservice.run_vmm_action(&mut v, &mut event_mgr).is_ok());
|
||||
}
|
||||
|
||||
#[should_panic]
|
||||
#[test]
|
||||
fn test_vmm_action_disconnected() {
|
||||
let (to_vmm, from_api) = channel();
|
||||
let (to_api, _from_vmm) = channel();
|
||||
let vmm = Arc::new(Mutex::new(create_vmm_instance()));
|
||||
let mut vservice = VmmService::new(from_api, to_api);
|
||||
let epoll_mgr = EpollManager::default();
|
||||
let mut event_mgr = EventManager::new(&vmm, epoll_mgr).unwrap();
|
||||
let mut v = vmm.lock().unwrap();
|
||||
|
||||
drop(to_vmm);
|
||||
vservice.run_vmm_action(&mut v, &mut event_mgr).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_vmm_action_config_boot_source() {
|
||||
skip_if_not_root!();
|
||||
|
||||
let kernel_file = TempFile::new().unwrap();
|
||||
|
||||
let tests = &mut [
|
||||
// invalid state
|
||||
TestData::new(
|
||||
VmmAction::ConfigureBootSource(BootSourceConfig::default()),
|
||||
InstanceState::Running,
|
||||
&|result| {
|
||||
if let Err(VmmActionError::BootSource(
|
||||
BootSourceConfigError::UpdateNotAllowedPostBoot,
|
||||
)) = result
|
||||
{
|
||||
let err_string = format!("{}", result.unwrap_err());
|
||||
let expected_err = String::from(
|
||||
"failed to configure boot source for VM: \
|
||||
the update operation is not allowed after boot",
|
||||
);
|
||||
assert_eq!(err_string, expected_err);
|
||||
} else {
|
||||
panic!();
|
||||
}
|
||||
},
|
||||
),
|
||||
// invalid kernel file path
|
||||
TestData::new(
|
||||
VmmAction::ConfigureBootSource(BootSourceConfig::default()),
|
||||
InstanceState::Uninitialized,
|
||||
&|result| {
|
||||
if let Err(VmmActionError::BootSource(
|
||||
BootSourceConfigError::InvalidKernelPath(_),
|
||||
)) = result
|
||||
{
|
||||
let err_string = format!("{}", result.unwrap_err());
|
||||
let expected_err = String::from(
|
||||
"failed to configure boot source for VM: \
|
||||
the kernel file cannot be opened due to invalid kernel path or invalid permissions: \
|
||||
No such file or directory (os error 2)");
|
||||
assert_eq!(err_string, expected_err);
|
||||
} else {
|
||||
panic!();
|
||||
}
|
||||
},
|
||||
),
|
||||
//success
|
||||
TestData::new(
|
||||
VmmAction::ConfigureBootSource(BootSourceConfig {
|
||||
kernel_path: kernel_file.as_path().to_str().unwrap().to_string(),
|
||||
..Default::default()
|
||||
}),
|
||||
InstanceState::Uninitialized,
|
||||
&|result| {
|
||||
assert!(result.is_ok());
|
||||
},
|
||||
),
|
||||
];
|
||||
|
||||
for t in tests.iter_mut() {
|
||||
t.check_request();
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_vmm_action_set_vm_configuration() {
|
||||
skip_if_not_root!();
|
||||
|
||||
let tests = &mut [
|
||||
// invalid state
|
||||
TestData::new(
|
||||
VmmAction::SetVmConfiguration(VmConfigInfo::default()),
|
||||
InstanceState::Running,
|
||||
&|result| {
|
||||
assert!(matches!(
|
||||
result,
|
||||
Err(VmmActionError::MachineConfig(
|
||||
VmConfigError::UpdateNotAllowedPostBoot
|
||||
))
|
||||
));
|
||||
let err_string = format!("{}", result.unwrap_err());
|
||||
let expected_err = String::from(
|
||||
"failed to set configuration for the VM: \
|
||||
update operation is not allowed after boot",
|
||||
);
|
||||
assert_eq!(err_string, expected_err);
|
||||
},
|
||||
),
|
||||
// invalid cpu count (0)
|
||||
TestData::new(
|
||||
VmmAction::SetVmConfiguration(VmConfigInfo {
|
||||
vcpu_count: 0,
|
||||
..Default::default()
|
||||
}),
|
||||
InstanceState::Uninitialized,
|
||||
&|result| {
|
||||
assert!(matches!(
|
||||
result,
|
||||
Err(VmmActionError::MachineConfig(
|
||||
VmConfigError::InvalidVcpuCount(0)
|
||||
))
|
||||
));
|
||||
let err_string = format!("{}", result.unwrap_err());
|
||||
let expected_err = String::from(
|
||||
"failed to set configuration for the VM: \
|
||||
the vCPU number '0' can only be 1 or an even number when hyperthreading is enabled");
|
||||
assert_eq!(err_string, expected_err);
|
||||
},
|
||||
),
|
||||
// invalid max cpu count (too small)
|
||||
TestData::new(
|
||||
VmmAction::SetVmConfiguration(VmConfigInfo {
|
||||
vcpu_count: 4,
|
||||
max_vcpu_count: 2,
|
||||
..Default::default()
|
||||
}),
|
||||
InstanceState::Uninitialized,
|
||||
&|result| {
|
||||
assert!(matches!(
|
||||
result,
|
||||
Err(VmmActionError::MachineConfig(
|
||||
VmConfigError::InvalidMaxVcpuCount(2)
|
||||
))
|
||||
));
|
||||
let err_string = format!("{}", result.unwrap_err());
|
||||
let expected_err = String::from(
|
||||
"failed to set configuration for the VM: \
|
||||
the max vCPU number '2' shouldn't less than vCPU count and can only be 1 or an even number when hyperthreading is enabled");
|
||||
assert_eq!(err_string, expected_err);
|
||||
},
|
||||
),
|
||||
// invalid cpu topology (larger than 254)
|
||||
TestData::new(
|
||||
VmmAction::SetVmConfiguration(VmConfigInfo {
|
||||
vcpu_count: 254,
|
||||
cpu_topology: CpuTopology {
|
||||
threads_per_core: 2,
|
||||
cores_per_die: 128,
|
||||
dies_per_socket: 1,
|
||||
sockets: 1,
|
||||
},
|
||||
..Default::default()
|
||||
}),
|
||||
InstanceState::Uninitialized,
|
||||
&|result| {
|
||||
assert!(matches!(
|
||||
result,
|
||||
Err(VmmActionError::MachineConfig(
|
||||
VmConfigError::VcpuCountExceedsMaximum
|
||||
))
|
||||
));
|
||||
let err_string = format!("{}", result.unwrap_err());
|
||||
let expected_err = String::from(
|
||||
"failed to set configuration for the VM: \
|
||||
the vCPU number shouldn't large than 254",
|
||||
);
|
||||
|
||||
assert_eq!(err_string, expected_err)
|
||||
},
|
||||
),
|
||||
// cpu topology and max_vcpu_count are not matched - success
|
||||
TestData::new(
|
||||
VmmAction::SetVmConfiguration(VmConfigInfo {
|
||||
vcpu_count: 16,
|
||||
max_vcpu_count: 32,
|
||||
cpu_topology: CpuTopology {
|
||||
threads_per_core: 1,
|
||||
cores_per_die: 128,
|
||||
dies_per_socket: 1,
|
||||
sockets: 1,
|
||||
},
|
||||
..Default::default()
|
||||
}),
|
||||
InstanceState::Uninitialized,
|
||||
&|result| {
|
||||
result.unwrap();
|
||||
},
|
||||
),
|
||||
// invalid threads_per_core
|
||||
TestData::new(
|
||||
VmmAction::SetVmConfiguration(VmConfigInfo {
|
||||
vcpu_count: 4,
|
||||
max_vcpu_count: 4,
|
||||
cpu_topology: CpuTopology {
|
||||
threads_per_core: 4,
|
||||
cores_per_die: 1,
|
||||
dies_per_socket: 1,
|
||||
sockets: 1,
|
||||
},
|
||||
..Default::default()
|
||||
}),
|
||||
InstanceState::Uninitialized,
|
||||
&|result| {
|
||||
assert!(matches!(
|
||||
result,
|
||||
Err(VmmActionError::MachineConfig(
|
||||
VmConfigError::InvalidThreadsPerCore(4)
|
||||
))
|
||||
));
|
||||
let err_string = format!("{}", result.unwrap_err());
|
||||
let expected_err = String::from(
|
||||
"failed to set configuration for the VM: \
|
||||
the threads_per_core number '4' can only be 1 or 2",
|
||||
);
|
||||
|
||||
assert_eq!(err_string, expected_err)
|
||||
},
|
||||
),
|
||||
// invalid mem size
|
||||
TestData::new(
|
||||
VmmAction::SetVmConfiguration(VmConfigInfo {
|
||||
mem_size_mib: 3,
|
||||
..Default::default()
|
||||
}),
|
||||
InstanceState::Uninitialized,
|
||||
&|result| {
|
||||
assert!(matches!(
|
||||
result,
|
||||
Err(VmmActionError::MachineConfig(
|
||||
VmConfigError::InvalidMemorySize(3)
|
||||
))
|
||||
));
|
||||
let err_string = format!("{}", result.unwrap_err());
|
||||
let expected_err = String::from(
|
||||
"failed to set configuration for the VM: \
|
||||
the memory size 0x3MiB is invalid",
|
||||
);
|
||||
assert_eq!(err_string, expected_err);
|
||||
},
|
||||
),
|
||||
// invalid mem path
|
||||
TestData::new(
|
||||
VmmAction::SetVmConfiguration(VmConfigInfo {
|
||||
mem_type: String::from("hugetlbfs"),
|
||||
mem_file_path: String::from(""),
|
||||
..Default::default()
|
||||
}),
|
||||
InstanceState::Uninitialized,
|
||||
&|result| {
|
||||
assert!(matches!(
|
||||
result,
|
||||
Err(VmmActionError::MachineConfig(
|
||||
VmConfigError::InvalidMemFilePath(_)
|
||||
))
|
||||
));
|
||||
let err_string = format!("{}", result.unwrap_err());
|
||||
let expected_err = String::from(
|
||||
"failed to set configuration for the VM: \
|
||||
the memory file path is invalid",
|
||||
);
|
||||
assert_eq!(err_string, expected_err);
|
||||
},
|
||||
),
|
||||
// success
|
||||
TestData::new(
|
||||
VmmAction::SetVmConfiguration(VmConfigInfo::default()),
|
||||
InstanceState::Uninitialized,
|
||||
&|result| {
|
||||
assert!(result.is_ok());
|
||||
},
|
||||
),
|
||||
];
|
||||
|
||||
for t in tests.iter_mut() {
|
||||
t.check_request();
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_vmm_action_start_microvm() {
|
||||
skip_if_not_root!();
|
||||
|
||||
let tests = &mut [
|
||||
// invalid state (running)
|
||||
TestData::new(VmmAction::StartMicroVm, InstanceState::Running, &|result| {
|
||||
assert!(matches!(
|
||||
result,
|
||||
Err(VmmActionError::StartMicroVm(
|
||||
StartMicroVmError::MicroVMAlreadyRunning
|
||||
))
|
||||
));
|
||||
let err_string = format!("{}", result.unwrap_err());
|
||||
let expected_err = String::from(
|
||||
"failed to boot the VM: \
|
||||
the virtual machine is already running",
|
||||
);
|
||||
assert_eq!(err_string, expected_err);
|
||||
}),
|
||||
// no kernel configuration
|
||||
TestData::new(
|
||||
VmmAction::StartMicroVm,
|
||||
InstanceState::Uninitialized,
|
||||
&|result| {
|
||||
assert!(matches!(
|
||||
result,
|
||||
Err(VmmActionError::StartMicroVm(
|
||||
StartMicroVmError::MissingKernelConfig
|
||||
))
|
||||
));
|
||||
let err_string = format!("{}", result.unwrap_err());
|
||||
let expected_err = String::from(
|
||||
"failed to boot the VM: \
|
||||
cannot start the virtual machine without kernel configuration",
|
||||
);
|
||||
assert_eq!(err_string, expected_err);
|
||||
},
|
||||
),
|
||||
];
|
||||
|
||||
for t in tests.iter_mut() {
|
||||
t.check_request();
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_vmm_action_shutdown_microvm() {
|
||||
skip_if_not_root!();
|
||||
|
||||
let tests = &mut [
|
||||
// success
|
||||
TestData::new(
|
||||
VmmAction::ShutdownMicroVm,
|
||||
InstanceState::Uninitialized,
|
||||
&|result| {
|
||||
assert!(result.is_ok());
|
||||
},
|
||||
),
|
||||
];
|
||||
|
||||
for t in tests.iter_mut() {
|
||||
t.check_request();
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "virtio-blk")]
|
||||
#[test]
|
||||
fn test_vmm_action_insert_block_device() {
|
||||
skip_if_not_root!();
|
||||
|
||||
let dummy_file = TempFile::new().unwrap();
|
||||
let dummy_path = dummy_file.as_path().to_owned();
|
||||
|
||||
let tests = &mut [
|
||||
// invalid state
|
||||
TestData::new(
|
||||
VmmAction::InsertBlockDevice(BlockDeviceConfigInfo::default()),
|
||||
InstanceState::Running,
|
||||
&|result| {
|
||||
assert!(matches!(
|
||||
result,
|
||||
Err(VmmActionError::Block(
|
||||
BlockDeviceError::UpdateNotAllowedPostBoot
|
||||
))
|
||||
));
|
||||
let err_string = format!("{}", result.unwrap_err());
|
||||
let expected_err = String::from(
|
||||
"virtio-blk device error: \
|
||||
block device does not support runtime update",
|
||||
);
|
||||
assert_eq!(err_string, expected_err);
|
||||
},
|
||||
),
|
||||
// success
|
||||
TestData::new(
|
||||
VmmAction::InsertBlockDevice(BlockDeviceConfigInfo {
|
||||
path_on_host: dummy_path,
|
||||
device_type: crate::device_manager::blk_dev_mgr::BlockDeviceType::RawBlock,
|
||||
is_root_device: true,
|
||||
part_uuid: None,
|
||||
is_read_only: false,
|
||||
is_direct: false,
|
||||
no_drop: false,
|
||||
drive_id: String::from("1"),
|
||||
rate_limiter: None,
|
||||
num_queues: BlockDeviceConfigInfo::default_num_queues(),
|
||||
queue_size: 256,
|
||||
use_shared_irq: None,
|
||||
use_generic_irq: None,
|
||||
}),
|
||||
InstanceState::Uninitialized,
|
||||
&|result| {
|
||||
assert!(result.is_ok());
|
||||
},
|
||||
),
|
||||
];
|
||||
|
||||
for t in tests.iter_mut() {
|
||||
t.check_request();
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "virtio-blk")]
|
||||
#[test]
|
||||
fn test_vmm_action_update_block_device() {
|
||||
skip_if_not_root!();
|
||||
|
||||
let tests = &mut [
|
||||
// invalid id
|
||||
TestData::new(
|
||||
VmmAction::UpdateBlockDevice(BlockDeviceConfigUpdateInfo {
|
||||
drive_id: String::from("1"),
|
||||
rate_limiter: None,
|
||||
}),
|
||||
InstanceState::Running,
|
||||
&|result| {
|
||||
assert!(matches!(
|
||||
result,
|
||||
Err(VmmActionError::Block(BlockDeviceError::InvalidDeviceId(_)))
|
||||
));
|
||||
let err_string = format!("{}", result.unwrap_err());
|
||||
let expected_err = String::from(
|
||||
"virtio-blk device error: \
|
||||
invalid block device id '1'",
|
||||
);
|
||||
assert_eq!(err_string, expected_err);
|
||||
},
|
||||
),
|
||||
];
|
||||
|
||||
for t in tests.iter_mut() {
|
||||
t.check_request();
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "virtio-blk")]
|
||||
#[test]
|
||||
fn test_vmm_action_remove_block_device() {
|
||||
skip_if_not_root!();
|
||||
|
||||
let tests = &mut [
|
||||
// invalid state
|
||||
TestData::new(
|
||||
VmmAction::RemoveBlockDevice(String::from("1")),
|
||||
InstanceState::Running,
|
||||
&|result| {
|
||||
assert!(matches!(
|
||||
result,
|
||||
Err(VmmActionError::Block(
|
||||
BlockDeviceError::UpdateNotAllowedPostBoot
|
||||
))
|
||||
));
|
||||
let err_string = format!("{}", result.unwrap_err());
|
||||
let expected_err = String::from(
|
||||
"virtio-blk device error: \
|
||||
block device does not support runtime update",
|
||||
);
|
||||
assert_eq!(err_string, expected_err);
|
||||
},
|
||||
),
|
||||
// invalid id
|
||||
TestData::new(
|
||||
VmmAction::RemoveBlockDevice(String::from("1")),
|
||||
InstanceState::Uninitialized,
|
||||
&|result| {
|
||||
assert!(matches!(
|
||||
result,
|
||||
Err(VmmActionError::Block(BlockDeviceError::InvalidDeviceId(_)))
|
||||
));
|
||||
let err_string = format!("{}", result.unwrap_err());
|
||||
let expected_err = String::from(
|
||||
"virtio-blk device error: \
|
||||
invalid block device id '1'",
|
||||
);
|
||||
assert_eq!(err_string, expected_err);
|
||||
},
|
||||
),
|
||||
];
|
||||
|
||||
for t in tests.iter_mut() {
|
||||
t.check_request();
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "virtio-fs")]
|
||||
#[test]
|
||||
fn test_vmm_action_insert_fs_device() {
|
||||
skip_if_not_root!();
|
||||
|
||||
let tests = &mut [
|
||||
// invalid state
|
||||
TestData::new(
|
||||
VmmAction::InsertFsDevice(FsDeviceConfigInfo::default()),
|
||||
InstanceState::Running,
|
||||
&|result| {
|
||||
assert!(matches!(
|
||||
result,
|
||||
Err(VmmActionError::FsDevice(
|
||||
FsDeviceError::UpdateNotAllowedPostBoot
|
||||
))
|
||||
));
|
||||
let err_string = format!("{}", result.unwrap_err());
|
||||
let expected_err = String::from(
|
||||
"virtio-fs device error: \
|
||||
update operation is not allowed after boot",
|
||||
);
|
||||
assert_eq!(err_string, expected_err);
|
||||
},
|
||||
),
|
||||
// success
|
||||
TestData::new(
|
||||
VmmAction::InsertFsDevice(FsDeviceConfigInfo::default()),
|
||||
InstanceState::Uninitialized,
|
||||
&|result| {
|
||||
assert!(result.is_ok());
|
||||
},
|
||||
),
|
||||
];
|
||||
|
||||
for t in tests.iter_mut() {
|
||||
t.check_request();
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "virtio-fs")]
|
||||
#[test]
|
||||
fn test_vmm_action_manipulate_fs_device() {
|
||||
skip_if_not_root!();
|
||||
|
||||
let tests = &mut [
|
||||
// invalid state
|
||||
TestData::new(
|
||||
VmmAction::ManipulateFsBackendFs(FsMountConfigInfo::default()),
|
||||
InstanceState::Uninitialized,
|
||||
&|result| {
|
||||
assert!(matches!(
|
||||
result,
|
||||
Err(VmmActionError::FsDevice(FsDeviceError::MicroVMNotRunning))
|
||||
));
|
||||
let err_string = format!("{}", result.unwrap_err());
|
||||
let expected_err = String::from(
|
||||
"virtio-fs device error: \
|
||||
vm is not running when attaching a backend fs",
|
||||
);
|
||||
assert_eq!(err_string, expected_err);
|
||||
},
|
||||
),
|
||||
// invalid backend
|
||||
TestData::new(
|
||||
VmmAction::ManipulateFsBackendFs(FsMountConfigInfo::default()),
|
||||
InstanceState::Running,
|
||||
&|result| {
|
||||
assert!(matches!(
|
||||
result,
|
||||
Err(VmmActionError::FsDevice(
|
||||
FsDeviceError::AttachBackendFailed(_)
|
||||
))
|
||||
));
|
||||
let err_string = format!("{}", result.unwrap_err());
|
||||
println!("{}", err_string);
|
||||
let expected_err = String::from(
|
||||
"virtio-fs device error: \
|
||||
Fs device attach a backend fs failed",
|
||||
);
|
||||
assert_eq!(err_string, expected_err);
|
||||
},
|
||||
),
|
||||
];
|
||||
for t in tests.iter_mut() {
|
||||
t.check_request();
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "virtio-net")]
|
||||
#[test]
|
||||
fn test_vmm_action_insert_network_device() {
|
||||
skip_if_not_root!();
|
||||
|
||||
let tests = &mut [
|
||||
// hotplug unready
|
||||
TestData::new(
|
||||
VmmAction::InsertNetworkDevice(VirtioNetDeviceConfigInfo::default()),
|
||||
InstanceState::Running,
|
||||
&|result| {
|
||||
assert!(matches!(
|
||||
result,
|
||||
Err(VmmActionError::StartMicroVm(
|
||||
StartMicroVmError::UpcallMissVsock
|
||||
))
|
||||
));
|
||||
let err_string = format!("{}", result.unwrap_err());
|
||||
let expected_err = String::from(
|
||||
"failed to boot the VM: \
|
||||
the upcall client needs a virtio-vsock device for communication",
|
||||
);
|
||||
assert_eq!(err_string, expected_err);
|
||||
},
|
||||
),
|
||||
// success
|
||||
TestData::new(
|
||||
VmmAction::InsertNetworkDevice(VirtioNetDeviceConfigInfo::default()),
|
||||
InstanceState::Uninitialized,
|
||||
&|result| {
|
||||
assert!(result.is_ok());
|
||||
},
|
||||
),
|
||||
];
|
||||
|
||||
for t in tests.iter_mut() {
|
||||
t.check_request();
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "virtio-net")]
|
||||
#[test]
|
||||
fn test_vmm_action_update_network_interface() {
|
||||
skip_if_not_root!();
|
||||
|
||||
let tests = &mut [
|
||||
// invalid id
|
||||
TestData::new(
|
||||
VmmAction::UpdateNetworkInterface(VirtioNetDeviceConfigUpdateInfo {
|
||||
iface_id: String::from("1"),
|
||||
rx_rate_limiter: None,
|
||||
tx_rate_limiter: None,
|
||||
}),
|
||||
InstanceState::Running,
|
||||
&|result| {
|
||||
assert!(matches!(
|
||||
result,
|
||||
Err(VmmActionError::VirtioNet(
|
||||
VirtioNetDeviceError::InvalidIfaceId(_)
|
||||
))
|
||||
));
|
||||
let err_string = format!("{}", result.unwrap_err());
|
||||
let expected_err = String::from(
|
||||
"virtio-net device error: \
|
||||
invalid virtio-net iface id '1'",
|
||||
);
|
||||
assert_eq!(err_string, expected_err);
|
||||
},
|
||||
),
|
||||
];
|
||||
|
||||
for t in tests.iter_mut() {
|
||||
t.check_request();
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "virtio-vsock")]
|
||||
#[test]
|
||||
fn test_vmm_action_insert_vsock_device() {
|
||||
skip_if_not_root!();
|
||||
|
||||
let tests = &mut [
|
||||
// invalid state
|
||||
TestData::new(
|
||||
VmmAction::InsertVsockDevice(VsockDeviceConfigInfo::default()),
|
||||
InstanceState::Running,
|
||||
&|result| {
|
||||
assert!(matches!(
|
||||
result,
|
||||
Err(VmmActionError::Vsock(
|
||||
VsockDeviceError::UpdateNotAllowedPostBoot
|
||||
))
|
||||
));
|
||||
let err_string = format!("{}", result.unwrap_err());
|
||||
let expected_err = String::from(
|
||||
"failed to add virtio-vsock device: \
|
||||
update operation is not allowed after boot",
|
||||
);
|
||||
assert_eq!(err_string, expected_err);
|
||||
},
|
||||
),
|
||||
// invalid guest_cid
|
||||
TestData::new(
|
||||
VmmAction::InsertVsockDevice(VsockDeviceConfigInfo::default()),
|
||||
InstanceState::Uninitialized,
|
||||
&|result| {
|
||||
assert!(matches!(
|
||||
result,
|
||||
Err(VmmActionError::Vsock(VsockDeviceError::GuestCIDInvalid(0)))
|
||||
));
|
||||
let err_string = format!("{}", result.unwrap_err());
|
||||
let expected_err = String::from(
|
||||
"failed to add virtio-vsock device: \
|
||||
the guest CID 0 is invalid",
|
||||
);
|
||||
assert_eq!(err_string, expected_err);
|
||||
},
|
||||
),
|
||||
// success
|
||||
TestData::new(
|
||||
VmmAction::InsertVsockDevice(VsockDeviceConfigInfo {
|
||||
guest_cid: 3,
|
||||
..Default::default()
|
||||
}),
|
||||
InstanceState::Uninitialized,
|
||||
&|result| {
|
||||
assert!(result.is_ok());
|
||||
},
|
||||
),
|
||||
];
|
||||
|
||||
for t in tests.iter_mut() {
|
||||
t.check_request();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -46,7 +46,7 @@ pub trait ConfigItem {
|
||||
}
|
||||
|
||||
/// Struct to manage a group of configuration items.
|
||||
#[derive(Debug, Default, Deserialize, PartialEq, Eq, Serialize)]
|
||||
#[derive(Debug, Default, Deserialize, PartialEq, Serialize)]
|
||||
pub struct ConfigInfos<T>
|
||||
where
|
||||
T: ConfigItem + Clone,
|
||||
@@ -316,7 +316,7 @@ where
|
||||
}
|
||||
|
||||
/// Configuration information for RateLimiter token bucket.
|
||||
#[derive(Clone, Debug, Default, Deserialize, PartialEq, Eq, Serialize)]
|
||||
#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)]
|
||||
pub struct TokenBucketConfigInfo {
|
||||
/// The size for the token bucket. A TokenBucket of `size` total capacity will take `refill_time`
|
||||
/// milliseconds to go from zero tokens to total capacity.
|
||||
@@ -349,7 +349,7 @@ impl From<&TokenBucketConfigInfo> for TokenBucket {
|
||||
}
|
||||
|
||||
/// Configuration information for RateLimiter objects.
|
||||
#[derive(Clone, Debug, Default, Deserialize, PartialEq, Eq, Serialize)]
|
||||
#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)]
|
||||
pub struct RateLimiterConfigInfo {
|
||||
/// Data used to initialize the RateLimiter::bandwidth bucket.
|
||||
pub bandwidth: TokenBucketConfigInfo,
|
||||
|
||||
@@ -106,7 +106,7 @@ pub enum BlockDeviceError {
|
||||
}
|
||||
|
||||
/// Type of low level storage device/protocol for virtio-blk devices.
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)]
|
||||
pub enum BlockDeviceType {
|
||||
/// Unknown low level device type.
|
||||
Unknown,
|
||||
@@ -131,7 +131,7 @@ impl BlockDeviceType {
|
||||
}
|
||||
|
||||
/// Configuration information for a block device.
|
||||
#[derive(Clone, Debug, Deserialize, PartialEq, Eq, Serialize)]
|
||||
#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
|
||||
pub struct BlockDeviceConfigUpdateInfo {
|
||||
/// Unique identifier of the drive.
|
||||
pub drive_id: String,
|
||||
@@ -151,7 +151,7 @@ impl BlockDeviceConfigUpdateInfo {
|
||||
}
|
||||
|
||||
/// Configuration information for a block device.
|
||||
#[derive(Clone, Debug, Deserialize, PartialEq, Eq, Serialize)]
|
||||
#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
|
||||
pub struct BlockDeviceConfigInfo {
|
||||
/// Unique identifier of the drive.
|
||||
pub drive_id: String,
|
||||
@@ -285,6 +285,7 @@ impl std::fmt::Debug for BlockDeviceInfo {
|
||||
pub type BlockDeviceInfo = DeviceConfigInfo<BlockDeviceConfigInfo>;
|
||||
|
||||
/// Wrapper for the collection that holds all the Block Devices Configs
|
||||
//#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
|
||||
#[derive(Clone)]
|
||||
pub struct BlockDeviceMgr {
|
||||
/// A list of `BlockDeviceInfo` objects.
|
||||
@@ -576,13 +577,7 @@ impl BlockDeviceMgr {
|
||||
) -> std::result::Result<(), DeviceMgrError> {
|
||||
// Respect user configuration if kernel_cmdline contains "root=",
|
||||
// special attention for the case when kernel command line starting with "root=xxx"
|
||||
let old_kernel_cmdline = format!(
|
||||
" {:?}",
|
||||
kernel_config
|
||||
.kernel_cmdline()
|
||||
.as_cstring()
|
||||
.map_err(DeviceMgrError::Cmdline)?
|
||||
);
|
||||
let old_kernel_cmdline = format!(" {}", kernel_config.kernel_cmdline().as_str());
|
||||
if !old_kernel_cmdline.contains(" root=") && self.has_root_block {
|
||||
let cmdline = kernel_config.kernel_cmdline_mut();
|
||||
if let Some(ref uuid) = self.part_uuid {
|
||||
@@ -624,7 +619,7 @@ impl BlockDeviceMgr {
|
||||
// we need to satisfy the condition by which a VMM can only have on root device
|
||||
if block_device_config.is_root_device {
|
||||
if self.has_root_block {
|
||||
Err(BlockDeviceError::RootBlockDeviceAlreadyAdded)
|
||||
return Err(BlockDeviceError::RootBlockDeviceAlreadyAdded);
|
||||
} else {
|
||||
self.has_root_block = true;
|
||||
self.read_only_root = block_device_config.is_read_only;
|
||||
|
||||
@@ -74,20 +74,11 @@ impl ConsoleManager {
|
||||
|
||||
/// Create a console backend device by using stdio streams.
|
||||
pub fn create_stdio_console(&mut self, device: Arc<Mutex<SerialDevice>>) -> Result<()> {
|
||||
device
|
||||
.lock()
|
||||
.unwrap()
|
||||
.set_output_stream(Some(Box::new(std::io::stdout())));
|
||||
let stdin_handle = std::io::stdin();
|
||||
stdin_handle
|
||||
.lock()
|
||||
.set_raw_mode()
|
||||
.map_err(|e| DeviceMgrError::ConsoleManager(ConsoleManagerError::StdinHandle(e)))?;
|
||||
stdin_handle
|
||||
.lock()
|
||||
.set_non_block(true)
|
||||
.map_err(ConsoleManagerError::StdinHandle)
|
||||
.map_err(DeviceMgrError::ConsoleManager)?;
|
||||
|
||||
let handler = ConsoleEpollHandler::new(device, Some(stdin_handle), None, &self.logger);
|
||||
self.subscriber_id = Some(self.epoll_mgr.add_subscriber(Box::new(handler)));
|
||||
|
||||
@@ -89,7 +89,7 @@ pub enum FsDeviceError {
|
||||
}
|
||||
|
||||
/// Configuration information for a vhost-user-fs device.
|
||||
#[derive(Clone, Debug, Deserialize, PartialEq, Eq, Serialize)]
|
||||
#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
|
||||
pub struct FsDeviceConfigInfo {
|
||||
/// vhost-user socket path.
|
||||
pub sock_path: String,
|
||||
@@ -201,7 +201,7 @@ impl FsDeviceConfigInfo {
|
||||
}
|
||||
|
||||
/// Configuration information for virtio-fs.
|
||||
#[derive(Clone, Debug, Deserialize, PartialEq, Eq, Serialize)]
|
||||
#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
|
||||
pub struct FsDeviceConfigUpdateInfo {
|
||||
/// virtiofs mount tag name used inside the guest.
|
||||
/// used as the device name during mount.
|
||||
@@ -242,7 +242,7 @@ impl ConfigItem for FsDeviceConfigInfo {
|
||||
}
|
||||
|
||||
/// Configuration information of manipulating backend fs for a virtiofs device.
|
||||
#[derive(Clone, Debug, Deserialize, PartialEq, Eq, Serialize, Default)]
|
||||
#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
|
||||
pub struct FsMountConfigInfo {
|
||||
/// Mount operations, mount, update, umount
|
||||
pub ops: String,
|
||||
|
||||
@@ -147,17 +147,13 @@ pub type Result<T> = ::std::result::Result<T, DeviceMgrError>;
|
||||
/// Type of the dragonball virtio devices.
|
||||
#[cfg(feature = "dbs-virtio-devices")]
|
||||
pub type DbsVirtioDevice = Box<
|
||||
dyn VirtioDevice<
|
||||
GuestAddressSpaceImpl,
|
||||
virtio_queue::QueueStateSync,
|
||||
vm_memory::GuestRegionMmap,
|
||||
>,
|
||||
dyn VirtioDevice<GuestAddressSpaceImpl, virtio_queue::QueueState, vm_memory::GuestRegionMmap>,
|
||||
>;
|
||||
|
||||
/// Type of the dragonball virtio mmio devices.
|
||||
#[cfg(feature = "dbs-virtio-devices")]
|
||||
pub type DbsMmioV2Device =
|
||||
MmioV2Device<GuestAddressSpaceImpl, virtio_queue::QueueStateSync, vm_memory::GuestRegionMmap>;
|
||||
MmioV2Device<GuestAddressSpaceImpl, virtio_queue::QueueState, vm_memory::GuestRegionMmap>;
|
||||
|
||||
/// Struct to support transactional operations for device management.
|
||||
pub struct DeviceManagerTx {
|
||||
@@ -595,19 +591,14 @@ impl DeviceManager {
|
||||
.map_err(|_| StartMicroVmError::EventFd)?;
|
||||
|
||||
info!(self.logger, "init console path: {:?}", com1_sock_path);
|
||||
|
||||
if let Some(legacy_manager) = self.legacy_manager.as_ref() {
|
||||
if let Some(path) = com1_sock_path {
|
||||
// Currently, the `com1_sock_path` "stdio" is only reserved for creating the stdio console
|
||||
if path != "stdio" {
|
||||
let com1 = legacy_manager.get_com1_serial();
|
||||
self.con_manager
|
||||
.create_socket_console(com1, path)
|
||||
.map_err(StartMicroVmError::DeviceManager)?;
|
||||
return Ok(());
|
||||
}
|
||||
if let Some(path) = com1_sock_path {
|
||||
if let Some(legacy_manager) = self.legacy_manager.as_ref() {
|
||||
let com1 = legacy_manager.get_com1_serial();
|
||||
self.con_manager
|
||||
.create_socket_console(com1, path)
|
||||
.map_err(StartMicroVmError::DeviceManager)?;
|
||||
}
|
||||
|
||||
} else if let Some(legacy_manager) = self.legacy_manager.as_ref() {
|
||||
let com1 = legacy_manager.get_com1_serial();
|
||||
self.con_manager
|
||||
.create_stdio_console(com1)
|
||||
@@ -795,14 +786,13 @@ impl DeviceManager {
|
||||
fn allocate_mmio_device_resource(
|
||||
&self,
|
||||
) -> std::result::Result<DeviceResources, StartMicroVmError> {
|
||||
let requests = vec![
|
||||
ResourceConstraint::MmioAddress {
|
||||
range: None,
|
||||
align: MMIO_DEFAULT_CFG_SIZE,
|
||||
size: MMIO_DEFAULT_CFG_SIZE,
|
||||
},
|
||||
ResourceConstraint::LegacyIrq { irq: None },
|
||||
];
|
||||
let mut requests = Vec::new();
|
||||
requests.push(ResourceConstraint::MmioAddress {
|
||||
range: None,
|
||||
align: MMIO_DEFAULT_CFG_SIZE,
|
||||
size: MMIO_DEFAULT_CFG_SIZE,
|
||||
});
|
||||
requests.push(ResourceConstraint::LegacyIrq { irq: None });
|
||||
|
||||
self.res_manager
|
||||
.allocate_device_resources(&requests, false)
|
||||
@@ -1002,7 +992,7 @@ impl DeviceManager {
|
||||
{
|
||||
self.vsock_manager
|
||||
.get_default_connector()
|
||||
.map(Some)
|
||||
.map(|d| Some(d))
|
||||
.unwrap_or(None)
|
||||
}
|
||||
#[cfg(not(feature = "virtio-vsock"))]
|
||||
|
||||
@@ -93,7 +93,7 @@ pub enum VirtioNetDeviceError {
|
||||
}
|
||||
|
||||
/// Configuration information for virtio net devices.
|
||||
#[derive(Clone, Debug, Deserialize, PartialEq, Eq, Serialize)]
|
||||
#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
|
||||
pub struct VirtioNetDeviceConfigUpdateInfo {
|
||||
/// ID of the guest network interface.
|
||||
pub iface_id: String,
|
||||
@@ -123,7 +123,7 @@ impl VirtioNetDeviceConfigUpdateInfo {
|
||||
}
|
||||
|
||||
/// Configuration information for virtio net devices.
|
||||
#[derive(Clone, Debug, Deserialize, PartialEq, Eq, Serialize, Default)]
|
||||
#[derive(Clone, Debug, Deserialize, PartialEq, Serialize, Default)]
|
||||
pub struct VirtioNetDeviceConfigInfo {
|
||||
/// ID of the guest network interface.
|
||||
pub iface_id: String,
|
||||
@@ -264,7 +264,7 @@ impl VirtioNetDeviceMgr {
|
||||
config.use_generic_irq.unwrap_or(USE_GENERIC_IRQ),
|
||||
)
|
||||
.map_err(VirtioNetDeviceError::DeviceManager)?;
|
||||
ctx.insert_hotplug_mmio_device(&dev, None)
|
||||
ctx.insert_hotplug_mmio_device(&dev.clone(), None)
|
||||
.map_err(VirtioNetDeviceError::DeviceManager)?;
|
||||
// live-upgrade need save/restore device from info.device.
|
||||
mgr.info_list[device_index].set_device(dev);
|
||||
@@ -320,7 +320,7 @@ impl VirtioNetDeviceMgr {
|
||||
}
|
||||
}
|
||||
|
||||
/// Attach all configured net device to the virtual machine instance.
|
||||
/// Attach all configured vsock device to the virtual machine instance.
|
||||
pub fn attach_devices(
|
||||
&mut self,
|
||||
ctx: &mut DeviceOpContext,
|
||||
|
||||
@@ -70,7 +70,7 @@ pub enum VsockDeviceError {
|
||||
}
|
||||
|
||||
/// Configuration information for a vsock device.
|
||||
#[derive(Clone, Debug, Deserialize, PartialEq, Eq, Serialize)]
|
||||
#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
|
||||
pub struct VsockDeviceConfigInfo {
|
||||
/// ID of the vsock device.
|
||||
pub id: String,
|
||||
|
||||
@@ -127,10 +127,6 @@ pub enum StartMicroVmError {
|
||||
#[error("failure while configuring guest kernel commandline: {0}")]
|
||||
LoadCommandline(#[source] linux_loader::loader::Error),
|
||||
|
||||
/// Cannot process command line string.
|
||||
#[error("failure while processing guest kernel commandline: {0}.")]
|
||||
ProcessCommandlne(#[source] linux_loader::cmdline::Error),
|
||||
|
||||
/// The device manager was not configured.
|
||||
#[error("the device manager failed to manage devices: {0}")]
|
||||
DeviceManager(#[source] device_manager::DeviceMgrError),
|
||||
|
||||
@@ -101,6 +101,7 @@ impl EventManager {
|
||||
/// Poll pending events and invoke registered event handler.
|
||||
///
|
||||
/// # Arguments:
|
||||
/// * max_events: maximum number of pending events to handle
|
||||
/// * timeout: maximum time in milliseconds to wait
|
||||
pub fn handle_events(&self, timeout: i32) -> std::result::Result<usize, EpollError> {
|
||||
self.epoll_mgr
|
||||
|
||||
@@ -210,19 +210,14 @@ mod x86_64 {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use kvm_ioctls::Kvm;
|
||||
use std::fs::File;
|
||||
use std::os::unix::fs::MetadataExt;
|
||||
use std::os::unix::io::{AsRawFd, FromRawFd};
|
||||
|
||||
use kvm_ioctls::Kvm;
|
||||
use test_utils::skip_if_not_root;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_create_kvm_context() {
|
||||
skip_if_not_root!();
|
||||
|
||||
let c = KvmContext::new(None).unwrap();
|
||||
|
||||
assert!(c.max_memslots >= 32);
|
||||
@@ -239,8 +234,6 @@ mod tests {
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
#[test]
|
||||
fn test_get_supported_cpu_id() {
|
||||
skip_if_not_root!();
|
||||
|
||||
let c = KvmContext::new(None).unwrap();
|
||||
|
||||
let _ = c
|
||||
@@ -251,8 +244,6 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_create_vm() {
|
||||
skip_if_not_root!();
|
||||
|
||||
let c = KvmContext::new(None).unwrap();
|
||||
|
||||
let _ = c.create_vm().unwrap();
|
||||
|
||||
@@ -36,7 +36,7 @@ const PIO_MAX: u16 = 0xFFFF;
|
||||
const MMIO_SPACE_RESERVED: u64 = 0x400_0000;
|
||||
|
||||
/// Errors associated with resource management operations
|
||||
#[derive(Debug, PartialEq, Eq, thiserror::Error)]
|
||||
#[derive(Debug, PartialEq, thiserror::Error)]
|
||||
pub enum ResourceError {
|
||||
/// Unknown/unsupported resource type.
|
||||
#[error("unsupported resource type")]
|
||||
@@ -569,7 +569,9 @@ impl ResourceManager {
|
||||
Resource::KvmMemSlot(slot) => self.free_kvm_mem_slot(*slot),
|
||||
Resource::MacAddresss(_) => Ok(()),
|
||||
};
|
||||
result?;
|
||||
if result.is_err() {
|
||||
return result;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@@ -586,9 +588,9 @@ mod tests {
|
||||
// Allocate/free shared IRQs multiple times.
|
||||
assert_eq!(mgr.allocate_legacy_irq(true, None).unwrap(), SHARED_IRQ);
|
||||
assert_eq!(mgr.allocate_legacy_irq(true, None).unwrap(), SHARED_IRQ);
|
||||
mgr.free_legacy_irq(SHARED_IRQ).unwrap();
|
||||
mgr.free_legacy_irq(SHARED_IRQ).unwrap();
|
||||
mgr.free_legacy_irq(SHARED_IRQ).unwrap();
|
||||
mgr.free_legacy_irq(SHARED_IRQ);
|
||||
mgr.free_legacy_irq(SHARED_IRQ);
|
||||
mgr.free_legacy_irq(SHARED_IRQ);
|
||||
|
||||
// Allocate specified IRQs.
|
||||
assert_eq!(
|
||||
@@ -596,7 +598,7 @@ mod tests {
|
||||
.unwrap(),
|
||||
LEGACY_IRQ_BASE + 10
|
||||
);
|
||||
mgr.free_legacy_irq(LEGACY_IRQ_BASE + 10).unwrap();
|
||||
mgr.free_legacy_irq(LEGACY_IRQ_BASE + 10);
|
||||
assert_eq!(
|
||||
mgr.allocate_legacy_irq(false, Some(LEGACY_IRQ_BASE + 10))
|
||||
.unwrap(),
|
||||
@@ -633,19 +635,19 @@ mod tests {
|
||||
let mgr = ResourceManager::new(None);
|
||||
|
||||
let msi = mgr.allocate_msi_irq(3).unwrap();
|
||||
mgr.free_msi_irq(msi, 3).unwrap();
|
||||
mgr.free_msi_irq(msi, 3);
|
||||
let msi = mgr.allocate_msi_irq(3).unwrap();
|
||||
mgr.free_msi_irq(msi, 3).unwrap();
|
||||
mgr.free_msi_irq(msi, 3);
|
||||
|
||||
let irq = mgr.allocate_msi_irq_aligned(8).unwrap();
|
||||
assert_eq!(irq & 0x7, 0);
|
||||
mgr.free_msi_irq(msi, 8).unwrap();
|
||||
mgr.free_msi_irq(msi, 8);
|
||||
let irq = mgr.allocate_msi_irq_aligned(8).unwrap();
|
||||
assert_eq!(irq & 0x7, 0);
|
||||
|
||||
let irq = mgr.allocate_msi_irq_aligned(512).unwrap();
|
||||
assert_eq!(irq, 512);
|
||||
mgr.free_msi_irq(irq, 512).unwrap();
|
||||
mgr.free_msi_irq(irq, 512);
|
||||
let irq = mgr.allocate_msi_irq_aligned(512).unwrap();
|
||||
assert_eq!(irq, 512);
|
||||
|
||||
@@ -688,9 +690,9 @@ mod tests {
|
||||
},
|
||||
];
|
||||
let resources = mgr.allocate_device_resources(&requests, false).unwrap();
|
||||
mgr.free_device_resources(&resources).unwrap();
|
||||
mgr.free_device_resources(&resources);
|
||||
let resources = mgr.allocate_device_resources(&requests, false).unwrap();
|
||||
mgr.free_device_resources(&resources).unwrap();
|
||||
mgr.free_device_resources(&resources);
|
||||
requests.push(ResourceConstraint::PioAddress {
|
||||
range: Some((0xc000, 0xc000)),
|
||||
align: 0x1000,
|
||||
@@ -700,7 +702,7 @@ mod tests {
|
||||
let resources = mgr
|
||||
.allocate_device_resources(&requests[0..requests.len() - 1], false)
|
||||
.unwrap();
|
||||
mgr.free_device_resources(&resources).unwrap();
|
||||
mgr.free_device_resources(&resources);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -719,7 +721,7 @@ mod tests {
|
||||
let mgr = ResourceManager::new(None);
|
||||
assert_eq!(mgr.allocate_kvm_mem_slot(1, None).unwrap(), 0);
|
||||
assert_eq!(mgr.allocate_kvm_mem_slot(1, Some(200)).unwrap(), 200);
|
||||
mgr.free_kvm_mem_slot(200).unwrap();
|
||||
mgr.free_kvm_mem_slot(200);
|
||||
assert_eq!(mgr.allocate_kvm_mem_slot(1, Some(200)).unwrap(), 200);
|
||||
assert_eq!(
|
||||
mgr.allocate_kvm_mem_slot(1, Some(KVM_USER_MEM_SLOTS))
|
||||
|
||||
@@ -39,7 +39,6 @@ impl Vcpu {
|
||||
/// vcpu thread to vmm thread.
|
||||
/// * `create_ts` - A timestamp used by the vcpu to calculate its lifetime.
|
||||
/// * `support_immediate_exit` - whether kvm uses supports immediate_exit flag.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn new_aarch64(
|
||||
id: u8,
|
||||
vcpu_fd: Arc<VcpuFd>,
|
||||
|
||||
@@ -533,11 +533,16 @@ impl Vcpu {
|
||||
fn check_io_port_info(&self, addr: u16, data: &[u8]) -> Result<bool> {
|
||||
let mut checked = false;
|
||||
|
||||
// debug info signal
|
||||
if addr == MAGIC_IOPORT_DEBUG_INFO && data.len() == 4 {
|
||||
let data = unsafe { std::ptr::read(data.as_ptr() as *const u32) };
|
||||
log::warn!("KDBG: guest kernel debug info: 0x{:x}", data);
|
||||
checked = true;
|
||||
match addr {
|
||||
// debug info signal
|
||||
MAGIC_IOPORT_DEBUG_INFO => {
|
||||
if data.len() == 4 {
|
||||
let data = unsafe { std::ptr::read(data.as_ptr() as *const u32) };
|
||||
log::warn!("KDBG: guest kernel debug info: 0x{:x}", data);
|
||||
checked = true;
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
};
|
||||
|
||||
Ok(checked)
|
||||
@@ -766,7 +771,6 @@ pub mod tests {
|
||||
use dbs_device::device_manager::IoManager;
|
||||
use kvm_ioctls::Kvm;
|
||||
use lazy_static::lazy_static;
|
||||
use test_utils::skip_if_not_root;
|
||||
|
||||
use super::*;
|
||||
use crate::kvm_context::KvmContext;
|
||||
@@ -851,7 +855,7 @@ pub mod tests {
|
||||
|
||||
let kvm = Kvm::new().unwrap();
|
||||
let vm = Arc::new(kvm.create_vm().unwrap());
|
||||
let _kvm_context = KvmContext::new(Some(kvm.as_raw_fd())).unwrap();
|
||||
let kvm_context = KvmContext::new(Some(kvm.as_raw_fd())).unwrap();
|
||||
let vcpu_fd = Arc::new(vm.create_vcpu(0).unwrap());
|
||||
let io_manager = IoManagerCached::new(Arc::new(ArcSwap::new(Arc::new(IoManager::new()))));
|
||||
let reset_event_fd = EventFd::new(libc::EFD_NONBLOCK).unwrap();
|
||||
@@ -876,8 +880,6 @@ pub mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_vcpu_run_emulation() {
|
||||
skip_if_not_root!();
|
||||
|
||||
let (mut vcpu, _) = create_vcpu();
|
||||
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
@@ -962,8 +964,6 @@ pub mod tests {
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
#[test]
|
||||
fn test_vcpu_check_io_port_info() {
|
||||
skip_if_not_root!();
|
||||
|
||||
let (vcpu, _receiver) = create_vcpu();
|
||||
|
||||
// debug info signal
|
||||
|
||||
@@ -774,7 +774,7 @@ impl VcpuManager {
|
||||
self.reset_event_fd.as_ref().unwrap().try_clone().unwrap(),
|
||||
self.vcpu_state_event.try_clone().unwrap(),
|
||||
self.vcpu_state_sender.clone(),
|
||||
request_ts,
|
||||
request_ts.clone(),
|
||||
self.support_immediate_exit,
|
||||
)
|
||||
.map_err(VcpuManagerError::Vcpu)
|
||||
|
||||
@@ -35,7 +35,6 @@ use crate::event_manager::EventManager;
|
||||
/// * `device_info` - A hashmap containing the attached devices for building FDT device nodes.
|
||||
/// * `gic_device` - The GIC device.
|
||||
/// * `initrd` - Information about an optional initrd.
|
||||
#[allow(clippy::borrowed_box)]
|
||||
fn configure_system<T: DeviceInfoForFDT + Clone + Debug, M: GuestMemory>(
|
||||
guest_mem: &M,
|
||||
cmdline: &str,
|
||||
@@ -59,9 +58,8 @@ fn configure_system<T: DeviceInfoForFDT + Clone + Debug, M: GuestMemory>(
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
impl Vm {
|
||||
/// Gets a reference to the irqchip of the VM
|
||||
#[allow(clippy::borrowed_box)]
|
||||
pub fn get_irqchip(&self) -> &Box<dyn GICDevice> {
|
||||
self.irqchip_handle.as_ref().unwrap()
|
||||
&self.irqchip_handle.as_ref().unwrap()
|
||||
}
|
||||
|
||||
/// Creates the irq chip in-kernel device model.
|
||||
@@ -138,7 +136,7 @@ impl Vm {
|
||||
|
||||
configure_system(
|
||||
guest_memory,
|
||||
cmdline.as_cstring().unwrap().to_str().unwrap(),
|
||||
cmdline.as_str(),
|
||||
vcpu_mpidr,
|
||||
self.device_manager.get_mmio_device_info(),
|
||||
self.get_irqchip(),
|
||||
|
||||
@@ -66,7 +66,7 @@ mod tests {
|
||||
cmdline.insert_str("ro").unwrap();
|
||||
let mut info = KernelConfigInfo::new(kernel.into_file(), Some(initrd.into_file()), cmdline);
|
||||
|
||||
assert_eq!(info.cmdline.as_cstring().unwrap().as_bytes(), b"ro");
|
||||
assert_eq!(info.cmdline.as_str(), "ro");
|
||||
assert!(info.initrd_file_mut().is_some());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -67,7 +67,7 @@ pub enum VmError {
|
||||
}
|
||||
|
||||
/// Configuration information for user defined NUMA nodes.
|
||||
#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq, Eq)]
|
||||
#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)]
|
||||
pub struct NumaRegionInfo {
|
||||
/// memory size for this region (unit: MiB)
|
||||
pub size: u64,
|
||||
@@ -80,7 +80,7 @@ pub struct NumaRegionInfo {
|
||||
}
|
||||
|
||||
/// Information for cpu topology to guide guest init
|
||||
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
|
||||
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
|
||||
pub struct CpuTopology {
|
||||
/// threads per core to indicate hyperthreading is enabled or not
|
||||
pub threads_per_core: u8,
|
||||
@@ -104,7 +104,7 @@ impl Default for CpuTopology {
|
||||
}
|
||||
|
||||
/// Configuration information for virtual machine instance.
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub struct VmConfigInfo {
|
||||
/// Number of vcpu to start.
|
||||
pub vcpu_count: u8,
|
||||
@@ -814,17 +814,3 @@ impl Vm {
|
||||
Err(StartMicroVmError::MicroVMAlreadyRunning)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
use super::*;
|
||||
|
||||
impl Vm {
|
||||
pub fn set_instance_state(&mut self, mstate: InstanceState) {
|
||||
self.shared_info
|
||||
.write()
|
||||
.expect("Failed to start microVM because shared info couldn't be written due to poisoned lock")
|
||||
.state = mstate;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -217,17 +217,11 @@ impl Vm {
|
||||
linux_loader::loader::load_cmdline(vm_memory, cmdline_addr, cmdline)
|
||||
.map_err(StartMicroVmError::LoadCommandline)?;
|
||||
|
||||
let cmdline_size = cmdline
|
||||
.as_cstring()
|
||||
.map_err(StartMicroVmError::ProcessCommandlne)?
|
||||
.as_bytes_with_nul()
|
||||
.len();
|
||||
|
||||
configure_system(
|
||||
vm_memory,
|
||||
self.address_space.address_space(),
|
||||
cmdline_addr,
|
||||
cmdline_size,
|
||||
cmdline.as_str().len() + 1,
|
||||
&initrd,
|
||||
self.vm_config.vcpu_count,
|
||||
self.vm_config.max_vcpu_count,
|
||||
|
||||
@@ -189,8 +189,6 @@ impl Vmm {
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) mod tests {
|
||||
use test_utils::skip_if_not_root;
|
||||
|
||||
use super::*;
|
||||
|
||||
pub fn create_vmm_instance() -> Vmm {
|
||||
@@ -212,8 +210,6 @@ pub(crate) mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_create_vmm_instance() {
|
||||
skip_if_not_root!();
|
||||
|
||||
create_vmm_instance();
|
||||
}
|
||||
}
|
||||
|
||||
2
src/libs/Cargo.lock
generated
2
src/libs/Cargo.lock
generated
@@ -413,7 +413,6 @@ dependencies = [
|
||||
"byte-unit",
|
||||
"glob",
|
||||
"lazy_static",
|
||||
"nix 0.24.2",
|
||||
"num_cpus",
|
||||
"oci",
|
||||
"regex",
|
||||
@@ -422,7 +421,6 @@ dependencies = [
|
||||
"slog",
|
||||
"slog-scope",
|
||||
"tempfile",
|
||||
"test-utils",
|
||||
"thiserror",
|
||||
"toml",
|
||||
]
|
||||
|
||||
@@ -8,7 +8,7 @@ use std::ffi::OsString;
|
||||
use std::fs::{self, File};
|
||||
use std::io::{Error, Result};
|
||||
use std::os::unix::io::AsRawFd;
|
||||
use std::path::Path;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::process::Command;
|
||||
|
||||
use crate::{eother, sl};
|
||||
@@ -29,6 +29,11 @@ const FUSE_SUPER_MAGIC: FsType = 0x65735546;
|
||||
// from linux.git/include/uapi/linux/magic.h
|
||||
const OVERLAYFS_SUPER_MAGIC: FsType = 0x794c7630;
|
||||
|
||||
/// Get bundle path (current working directory).
|
||||
pub fn get_bundle_path() -> Result<PathBuf> {
|
||||
std::env::current_dir()
|
||||
}
|
||||
|
||||
/// Get the basename of the canonicalized path
|
||||
pub fn get_base_name<P: AsRef<Path>>(src: P) -> Result<OsString> {
|
||||
let s = src.as_ref().canonicalize()?;
|
||||
|
||||
@@ -49,7 +49,7 @@ pub fn is_host_empty_dir(path: &str) -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
// update_ephemeral_storage_type sets the mount type to 'ephemeral'
|
||||
// set_ephemeral_storage_type sets the mount type to 'ephemeral'
|
||||
// if the mount source path is provisioned by k8s for ephemeral storage.
|
||||
// For the given pod ephemeral volume is created only once
|
||||
// backed by tmpfs inside the VM. For successive containers
|
||||
@@ -63,8 +63,6 @@ pub fn update_ephemeral_storage_type(oci_spec: &mut Spec) {
|
||||
if is_ephemeral_volume(&m.source) {
|
||||
m.r#type = String::from(mount::KATA_EPHEMERAL_VOLUME_TYPE);
|
||||
} else if is_host_empty_dir(&m.source) {
|
||||
// FIXME support disable_guest_empty_dir
|
||||
// https://github.com/kata-containers/kata-containers/blob/02a51e75a7e0c6fce5e8abe3b991eeac87e09645/src/runtime/pkg/katautils/create.go#L105
|
||||
m.r#type = String::from(mount::KATA_HOST_DIR_VOLUME_TYPE);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -49,7 +49,7 @@ pub enum ShimIdInfo {
|
||||
}
|
||||
|
||||
/// get container type
|
||||
pub fn get_container_type(spec: &oci::Spec) -> Result<ContainerType, Error> {
|
||||
pub fn get_contaier_type(spec: &oci::Spec) -> Result<ContainerType, Error> {
|
||||
for k in CRI_CONTAINER_TYPE_KEY_LIST.iter() {
|
||||
if let Some(type_value) = spec.annotations.get(*k) {
|
||||
match type_value.as_str() {
|
||||
@@ -67,7 +67,7 @@ pub fn get_container_type(spec: &oci::Spec) -> Result<ContainerType, Error> {
|
||||
/// get shim id info
|
||||
pub fn get_shim_id_info() -> Result<ShimIdInfo, Error> {
|
||||
let spec = load_oci_spec()?;
|
||||
match get_container_type(&spec)? {
|
||||
match get_contaier_type(&spec)? {
|
||||
ContainerType::PodSandbox => Ok(ShimIdInfo::Sandbox),
|
||||
ContainerType::PodContainer => {
|
||||
for k in CRI_SANDBOX_ID_KEY_LIST {
|
||||
|
||||
@@ -27,8 +27,6 @@ oci = { path = "../oci" }
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = "3"
|
||||
test-utils = { path = "../test-utils" }
|
||||
nix = "0.24.2"
|
||||
|
||||
[features]
|
||||
default = []
|
||||
|
||||
@@ -18,11 +18,9 @@ lazy_static! {
|
||||
"/usr/share/defaults/kata-containers/configuration.toml",
|
||||
];
|
||||
}
|
||||
|
||||
pub const DEFAULT_AGENT_NAME: &str = "kata-agent";
|
||||
pub const DEFAULT_AGENT_VSOCK_PORT: u32 = 1024;
|
||||
pub const DEFAULT_AGENT_LOG_PORT: u32 = 1025;
|
||||
pub const DEFAULT_AGENT_DBG_CONSOLE_PORT: u32 = 1026;
|
||||
pub const DEFAULT_AGENT_TYPE_NAME: &str = AGENT_NAME_KATA;
|
||||
|
||||
pub const DEFAULT_RUNTIME_NAME: &str = RUNTIME_NAME_VIRTCONTAINER;
|
||||
|
||||
@@ -32,7 +32,7 @@ use regex::RegexSet;
|
||||
|
||||
use super::{default, ConfigOps, ConfigPlugin, TomlConfig};
|
||||
use crate::annotations::KATA_ANNO_CFG_HYPERVISOR_PREFIX;
|
||||
use crate::{eother, resolve_path, validate_path};
|
||||
use crate::{eother, resolve_path, sl, validate_path};
|
||||
|
||||
mod dragonball;
|
||||
pub use self::dragonball::{DragonballConfig, HYPERVISOR_NAME_DRAGONBALL};
|
||||
@@ -50,8 +50,6 @@ const VIRTIO_FS: &str = "virtio-fs";
|
||||
const VIRTIO_FS_INLINE: &str = "inline-virtio-fs";
|
||||
const MAX_BRIDGE_SIZE: u32 = 5;
|
||||
|
||||
const KERNEL_PARAM_DELIMITER: &str = " ";
|
||||
|
||||
lazy_static! {
|
||||
static ref HYPERVISOR_PLUGINS: Mutex<HashMap<String, Arc<dyn ConfigPlugin>>> =
|
||||
Mutex::new(HashMap::new());
|
||||
@@ -239,16 +237,6 @@ impl BootInfo {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Add kernel parameters to bootinfo. It is always added before the original
|
||||
/// to let the original one takes priority
|
||||
pub fn add_kernel_params(&mut self, params: Vec<String>) {
|
||||
let mut p = params;
|
||||
if !self.kernel_params.is_empty() {
|
||||
p.push(self.kernel_params.clone()); // [new_params0, new_params1, ..., original_params]
|
||||
}
|
||||
self.kernel_params = p.join(KERNEL_PARAM_DELIMITER);
|
||||
}
|
||||
|
||||
/// Validate guest kernel image annotaion
|
||||
pub fn validate_boot_path(&self, path: &str) -> Result<()> {
|
||||
validate_path!(path, "path {} is invalid{}")?;
|
||||
@@ -301,39 +289,35 @@ impl CpuInfo {
|
||||
pub fn adjust_config(&mut self) -> Result<()> {
|
||||
let features: Vec<&str> = self.cpu_features.split(',').map(|v| v.trim()).collect();
|
||||
self.cpu_features = features.join(",");
|
||||
|
||||
let cpus = num_cpus::get() as u32;
|
||||
|
||||
// adjust default_maxvcpus
|
||||
if self.default_maxvcpus == 0 || self.default_maxvcpus > cpus {
|
||||
self.default_maxvcpus = cpus;
|
||||
}
|
||||
|
||||
// adjust default_vcpus
|
||||
if self.default_vcpus < 0 || self.default_vcpus as u32 > cpus {
|
||||
self.default_vcpus = cpus as i32;
|
||||
} else if self.default_vcpus == 0 {
|
||||
self.default_vcpus = default::DEFAULT_GUEST_VCPUS as i32;
|
||||
}
|
||||
|
||||
if self.default_vcpus > self.default_maxvcpus as i32 {
|
||||
self.default_vcpus = self.default_maxvcpus as i32;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Validate the configuration information.
|
||||
pub fn validate(&self) -> Result<()> {
|
||||
if self.default_vcpus > self.default_maxvcpus as i32 {
|
||||
return Err(eother!(
|
||||
"The default_vcpus({}) is greater than default_maxvcpus({})",
|
||||
self.default_vcpus,
|
||||
self.default_maxvcpus
|
||||
));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get default number of guest vCPUs.
|
||||
pub fn get_default_vcpus(&self) -> u32 {
|
||||
let cpus = num_cpus::get() as u32;
|
||||
if self.default_vcpus < 0 || self.default_vcpus as u32 > cpus {
|
||||
cpus
|
||||
} else if self.default_vcpus == 0 {
|
||||
default::DEFAULT_GUEST_VCPUS
|
||||
} else {
|
||||
self.default_vcpus as u32
|
||||
}
|
||||
}
|
||||
|
||||
/// Get default maximal number of guest vCPUs.
|
||||
pub fn get_default_max_vcpus(&self) -> u32 {
|
||||
let cpus = num_cpus::get() as u32;
|
||||
if self.default_maxvcpus == 0 || self.default_maxvcpus > cpus {
|
||||
cpus
|
||||
} else {
|
||||
self.default_maxvcpus
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Configuration information for debug
|
||||
@@ -846,6 +830,10 @@ impl SharedFsInfo {
|
||||
if self.virtio_fs_cache.is_empty() {
|
||||
self.virtio_fs_cache = default::DEFAULT_VIRTIO_FS_CACHE_MODE.to_string();
|
||||
}
|
||||
if self.virtio_fs_cache == *"none" {
|
||||
warn!(sl!(), "virtio-fs cache mode `none` is deprecated since Kata Containers 2.5.0 and will be removed in the future release, please use `never` instead. For more details please refer to https://github.com/kata-containers/kata-containers/issues/4234.");
|
||||
self.virtio_fs_cache = default::DEFAULT_VIRTIO_FS_CACHE_MODE.to_string();
|
||||
}
|
||||
if self.virtio_fs_is_dax && self.virtio_fs_cache_size == 0 {
|
||||
self.virtio_fs_cache_size = default::DEFAULT_VIRTIO_FS_DAX_SIZE_MB;
|
||||
}
|
||||
@@ -1083,107 +1071,4 @@ mod tests {
|
||||
assert!(get_hypervisor_plugin("dragonball").is_some());
|
||||
assert!(get_hypervisor_plugin("dragonball2").is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_add_kernel_params() {
|
||||
let mut boot_info = BootInfo {
|
||||
..Default::default()
|
||||
};
|
||||
let params = vec![
|
||||
String::from("foo"),
|
||||
String::from("bar"),
|
||||
String::from("baz=faz"),
|
||||
];
|
||||
boot_info.add_kernel_params(params);
|
||||
|
||||
assert_eq!(boot_info.kernel_params, String::from("foo bar baz=faz"));
|
||||
|
||||
let new_params = vec![
|
||||
String::from("boo=far"),
|
||||
String::from("a"),
|
||||
String::from("b=c"),
|
||||
];
|
||||
boot_info.add_kernel_params(new_params);
|
||||
|
||||
assert_eq!(
|
||||
boot_info.kernel_params,
|
||||
String::from("boo=far a b=c foo bar baz=faz")
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cpu_info_adjust_config() {
|
||||
// get CPU cores of the test node
|
||||
let node_cpus = num_cpus::get() as u32;
|
||||
let default_vcpus = default::DEFAULT_GUEST_VCPUS as i32;
|
||||
|
||||
struct TestData<'a> {
|
||||
desc: &'a str,
|
||||
input: &'a mut CpuInfo,
|
||||
output: CpuInfo,
|
||||
}
|
||||
|
||||
let tests = &mut [
|
||||
TestData {
|
||||
desc: "all with default values",
|
||||
input: &mut CpuInfo {
|
||||
cpu_features: "".to_string(),
|
||||
default_vcpus: 0,
|
||||
default_maxvcpus: 0,
|
||||
},
|
||||
output: CpuInfo {
|
||||
cpu_features: "".to_string(),
|
||||
default_vcpus: default_vcpus as i32,
|
||||
default_maxvcpus: node_cpus,
|
||||
},
|
||||
},
|
||||
TestData {
|
||||
desc: "all with big values",
|
||||
input: &mut CpuInfo {
|
||||
cpu_features: "a,b,c".to_string(),
|
||||
default_vcpus: 9999999,
|
||||
default_maxvcpus: 9999999,
|
||||
},
|
||||
output: CpuInfo {
|
||||
cpu_features: "a,b,c".to_string(),
|
||||
default_vcpus: node_cpus as i32,
|
||||
default_maxvcpus: node_cpus,
|
||||
},
|
||||
},
|
||||
TestData {
|
||||
desc: "default_vcpus lager than default_maxvcpus",
|
||||
input: &mut CpuInfo {
|
||||
cpu_features: "a, b ,c".to_string(),
|
||||
default_vcpus: -1,
|
||||
default_maxvcpus: 1,
|
||||
},
|
||||
output: CpuInfo {
|
||||
cpu_features: "a,b,c".to_string(),
|
||||
default_vcpus: 1,
|
||||
default_maxvcpus: 1,
|
||||
},
|
||||
},
|
||||
];
|
||||
|
||||
for (_, tc) in tests.iter_mut().enumerate() {
|
||||
// we can ensure that unwrap will not panic
|
||||
tc.input.adjust_config().unwrap();
|
||||
|
||||
assert_eq!(
|
||||
tc.input.cpu_features, tc.output.cpu_features,
|
||||
"test[{}] cpu_features",
|
||||
tc.desc
|
||||
);
|
||||
assert_eq!(
|
||||
tc.input.default_vcpus, tc.output.default_vcpus,
|
||||
"test[{}] default_vcpus",
|
||||
tc.desc
|
||||
);
|
||||
assert_eq!(
|
||||
tc.input.default_maxvcpus, tc.output.default_maxvcpus,
|
||||
"test[{}] default_maxvcpus",
|
||||
tc.desc
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -23,7 +23,6 @@ mod drop_in;
|
||||
pub mod hypervisor;
|
||||
|
||||
pub use self::agent::Agent;
|
||||
use self::default::DEFAULT_AGENT_DBG_CONSOLE_PORT;
|
||||
pub use self::hypervisor::{
|
||||
BootInfo, DragonballConfig, Hypervisor, QemuConfig, HYPERVISOR_NAME_DRAGONBALL,
|
||||
HYPERVISOR_NAME_QEMU,
|
||||
@@ -34,24 +33,6 @@ pub use self::runtime::{Runtime, RuntimeVendor, RUNTIME_NAME_VIRTCONTAINER};
|
||||
|
||||
pub use self::agent::AGENT_NAME_KATA;
|
||||
|
||||
// TODO: let agent use the constants here for consistency
|
||||
/// Debug console enabled flag for agent
|
||||
pub const DEBUG_CONSOLE_FLAG: &str = "agent.debug_console";
|
||||
/// Tracing enabled flag for agent
|
||||
pub const TRACE_MODE_OPTION: &str = "agent.trace";
|
||||
/// Tracing enabled
|
||||
pub const TRACE_MODE_ENABLE: &str = "true";
|
||||
/// Log level setting key for agent, if debugged mode on, set to debug
|
||||
pub const LOG_LEVEL_OPTION: &str = "agent.log";
|
||||
/// logging level: debug
|
||||
pub const LOG_LEVEL_DEBUG: &str = "debug";
|
||||
/// Option of which port will the debug console connect to
|
||||
pub const DEBUG_CONSOLE_VPORT_OPTION: &str = "agent.debug_console_vport";
|
||||
/// Option of which port the agent's log will connect to
|
||||
pub const LOG_VPORT_OPTION: &str = "agent.log_vport";
|
||||
/// Option of setting the container's pipe size
|
||||
pub const CONTAINER_PIPE_SIZE_OPTION: &str = "agent.container_pipe_size";
|
||||
|
||||
/// Trait to manipulate global Kata configuration information.
|
||||
pub trait ConfigPlugin: Send + Sync {
|
||||
/// Get the plugin name.
|
||||
@@ -170,32 +151,7 @@ impl TomlConfig {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get agent-specfic kernel parameters for further Hypervisor config revision
|
||||
pub fn get_agent_kernel_params(&self) -> Result<HashMap<String, String>> {
|
||||
let mut kv = HashMap::new();
|
||||
if let Some(cfg) = self.agent.get(&self.runtime.agent_name) {
|
||||
if cfg.debug {
|
||||
kv.insert(LOG_LEVEL_OPTION.to_string(), LOG_LEVEL_DEBUG.to_string());
|
||||
}
|
||||
if cfg.enable_tracing {
|
||||
kv.insert(TRACE_MODE_OPTION.to_string(), TRACE_MODE_ENABLE.to_string());
|
||||
}
|
||||
if cfg.container_pipe_size > 0 {
|
||||
let container_pipe_size = cfg.container_pipe_size.to_string();
|
||||
kv.insert(CONTAINER_PIPE_SIZE_OPTION.to_string(), container_pipe_size);
|
||||
}
|
||||
if cfg.debug_console_enabled {
|
||||
kv.insert(DEBUG_CONSOLE_FLAG.to_string(), "".to_string());
|
||||
kv.insert(
|
||||
DEBUG_CONSOLE_VPORT_OPTION.to_string(),
|
||||
DEFAULT_AGENT_DBG_CONSOLE_PORT.to_string(),
|
||||
);
|
||||
}
|
||||
}
|
||||
Ok(kv)
|
||||
}
|
||||
|
||||
/// Probe configuration file according to the default configuration file list.
|
||||
/// Probe configuration file according to the default configuration file list.
|
||||
fn get_default_config_file() -> Result<PathBuf> {
|
||||
for f in default::DEFAULT_RUNTIME_CONFIGURATIONS.iter() {
|
||||
if let Ok(path) = fs::canonicalize(f) {
|
||||
@@ -347,28 +303,4 @@ mod tests {
|
||||
let patterns = ["/usr/share".to_string(), "/bin/*".to_string()];
|
||||
validate_path_pattern(&patterns, "/bin/ls").unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_agent_kernel_params() {
|
||||
let mut config = TomlConfig {
|
||||
..Default::default()
|
||||
};
|
||||
let agent_config = Agent {
|
||||
debug: true,
|
||||
enable_tracing: true,
|
||||
container_pipe_size: 20,
|
||||
debug_console_enabled: true,
|
||||
..Default::default()
|
||||
};
|
||||
let agent_name = "test_agent";
|
||||
config.runtime.agent_name = agent_name.to_string();
|
||||
config.agent.insert(agent_name.to_owned(), agent_config);
|
||||
|
||||
let kv = config.get_agent_kernel_params().unwrap();
|
||||
assert_eq!(kv.get("agent.log").unwrap(), "debug");
|
||||
assert_eq!(kv.get("agent.trace").unwrap(), "true");
|
||||
assert_eq!(kv.get("agent.container_pipe_size").unwrap(), "20");
|
||||
kv.get("agent.debug_console").unwrap();
|
||||
assert_eq!(kv.get("agent.debug_console_vport").unwrap(), "1026"); // 1026 is the default port
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,39 +10,20 @@ use crate::annotations;
|
||||
use crate::container::ContainerType;
|
||||
use std::str::FromStr;
|
||||
|
||||
// K8S_EMPTY_DIR is the K8s specific path for `empty-dir` volumes
|
||||
// K8S_EMPTY_DIR is the k8s specific path for `empty-dir` volumes
|
||||
const K8S_EMPTY_DIR: &str = "kubernetes.io~empty-dir";
|
||||
// K8S_CONFIGMAP is the K8s specific path for `configmap` volumes
|
||||
const K8S_CONFIGMAP: &str = "kubernetes.io~configmap";
|
||||
// K8S_SECRET is the K8s specific path for `secret` volumes
|
||||
const K8S_SECRET: &str = "kubernetes.io~secret";
|
||||
|
||||
/// Check whether the path is a K8s empty directory.
|
||||
pub fn is_empty_dir<P: AsRef<Path>>(path: P) -> bool {
|
||||
is_special_dir(path, K8S_EMPTY_DIR)
|
||||
}
|
||||
|
||||
/// Check whether the path is a K8s configmap.
|
||||
pub fn is_configmap<P: AsRef<Path>>(path: P) -> bool {
|
||||
is_special_dir(path, K8S_CONFIGMAP)
|
||||
}
|
||||
|
||||
/// Check whether the path is a K8s secret.
|
||||
pub fn is_secret<P: AsRef<Path>>(path: P) -> bool {
|
||||
is_special_dir(path, K8S_SECRET)
|
||||
}
|
||||
|
||||
/// Check whether the path is a K8s empty directory, configmap, or secret.
|
||||
/// Check whether the path is a K8S empty directory.
|
||||
///
|
||||
/// For example, given a K8s EmptyDir, Kubernetes mounts
|
||||
/// For a K8S EmptyDir, Kubernetes mounts
|
||||
/// "/var/lib/kubelet/pods/<id>/volumes/kubernetes.io~empty-dir/<volumeMount name>"
|
||||
/// to "/<mount-point>".
|
||||
pub fn is_special_dir<P: AsRef<Path>>(path: P, dir_type: &str) -> bool {
|
||||
pub fn is_empty_dir<P: AsRef<Path>>(path: P) -> bool {
|
||||
let path = path.as_ref();
|
||||
|
||||
if let Some(parent) = path.parent() {
|
||||
if let Some(pname) = parent.file_name() {
|
||||
if pname == dir_type && parent.parent().is_some() {
|
||||
if pname == K8S_EMPTY_DIR && parent.parent().is_some() {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@@ -96,119 +77,10 @@ pub fn container_type_with_id(spec: &oci::Spec) -> (ContainerType, Option<String
|
||||
(container_type, sid)
|
||||
}
|
||||
|
||||
// count_files will return the number of files within a given path.
|
||||
// If the total number of
|
||||
// files observed is greater than limit, break and return -1
|
||||
fn count_files<P: AsRef<Path>>(path: P, limit: i32) -> std::io::Result<i32> {
|
||||
// First, Check to see if the path exists
|
||||
let src = std::fs::canonicalize(path)?;
|
||||
|
||||
// Special case if this is just a file, not a directory:
|
||||
if !src.is_dir() {
|
||||
return Ok(1);
|
||||
}
|
||||
|
||||
let mut num_files = 0;
|
||||
|
||||
for entry in std::fs::read_dir(src)? {
|
||||
let file = entry?;
|
||||
let p = file.path();
|
||||
if p.is_dir() {
|
||||
num_files += count_files(&p, limit)?;
|
||||
} else {
|
||||
num_files += 1;
|
||||
}
|
||||
|
||||
if num_files > limit {
|
||||
return Ok(-1);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(num_files)
|
||||
}
|
||||
|
||||
/// Check if a volume should be processed as a watchable volume,
|
||||
/// which adds inotify-like function for virtio-fs.
|
||||
pub fn is_watchable_mount<P: AsRef<Path>>(path: P) -> bool {
|
||||
if !is_secret(&path) && !is_configmap(&path) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// we have a cap on number of FDs which can be present in mount
|
||||
// to determine if watchable. A similar Check exists within the agent,
|
||||
// which may or may not help handle case where extra files are added to
|
||||
// a mount after the fact
|
||||
let count = count_files(&path, 8).unwrap_or(0);
|
||||
count > 0
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::{annotations, container};
|
||||
use std::fs;
|
||||
use test_utils::skip_if_not_root;
|
||||
|
||||
#[test]
|
||||
fn test_is_watchable_mount() {
|
||||
skip_if_not_root!();
|
||||
|
||||
let result = is_watchable_mount("");
|
||||
assert!(!result);
|
||||
|
||||
// path does not exist, failure expected:
|
||||
let result = is_watchable_mount("/var/lib/kubelet/pods/5f0861a0-a987-4a3a-bb0f-1058ddb9678f/volumes/kubernetes.io~empty-dir/foobar");
|
||||
assert!(!result);
|
||||
|
||||
let test_tmp_dir = tempfile::tempdir().expect("failed to create tempdir");
|
||||
|
||||
// Verify secret is successful (single file mount):
|
||||
// /tmppath/kubernetes.io~secret/super-secret-thing
|
||||
let secret_path = test_tmp_dir.path().join(K8S_SECRET);
|
||||
let result = fs::create_dir_all(&secret_path);
|
||||
assert!(result.is_ok());
|
||||
let secret_file = &secret_path.join("super-secret-thing");
|
||||
let result = fs::File::create(secret_file);
|
||||
assert!(result.is_ok());
|
||||
|
||||
let result = is_watchable_mount(secret_file);
|
||||
assert!(result);
|
||||
|
||||
// Verify that if we have too many files, it will no longer be watchable:
|
||||
// /tmp/kubernetes.io~configmap/amazing-dir-of-configs/
|
||||
// | - c0
|
||||
// | - c1
|
||||
// ...
|
||||
// | - c7
|
||||
// should be okay.
|
||||
//
|
||||
// 9 files should cause the mount to be deemed "not watchable"
|
||||
let configmap_path = test_tmp_dir
|
||||
.path()
|
||||
.join(K8S_CONFIGMAP)
|
||||
.join("amazing-dir-of-configs");
|
||||
let result = fs::create_dir_all(&configmap_path);
|
||||
assert!(result.is_ok());
|
||||
|
||||
// not a watchable mount if no files available.
|
||||
let result = is_watchable_mount(&configmap_path);
|
||||
assert!(!result);
|
||||
|
||||
for i in 0..8 {
|
||||
let configmap_file = &configmap_path.join(format!("c{}", i));
|
||||
let result = fs::File::create(configmap_file);
|
||||
assert!(result.is_ok());
|
||||
|
||||
let result = is_watchable_mount(&configmap_path);
|
||||
assert!(result);
|
||||
}
|
||||
let configmap_file = &configmap_path.join("too_much_files");
|
||||
let result = fs::File::create(configmap_file);
|
||||
assert!(result.is_ok());
|
||||
|
||||
let result = is_watchable_mount(&configmap_path);
|
||||
assert!(!result);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_empty_dir() {
|
||||
@@ -231,36 +103,6 @@ mod tests {
|
||||
assert!(is_empty_dir(empty_dir));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_configmap() {
|
||||
let path = "/volumes/kubernetes.io~configmap/cm";
|
||||
assert!(is_configmap(path));
|
||||
|
||||
let path = "/volumes/kubernetes.io~configmap//cm";
|
||||
assert!(is_configmap(path));
|
||||
|
||||
let path = "/volumes/kubernetes.io~configmap-test/cm";
|
||||
assert!(!is_configmap(path));
|
||||
|
||||
let path = "/volumes/kubernetes.io~configmap";
|
||||
assert!(!is_configmap(path));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_secret() {
|
||||
let path = "/volumes/kubernetes.io~secret/test-serect";
|
||||
assert!(is_secret(path));
|
||||
|
||||
let path = "/volumes/kubernetes.io~secret//test-serect";
|
||||
assert!(is_secret(path));
|
||||
|
||||
let path = "/volumes/kubernetes.io~secret-test/test-serect";
|
||||
assert!(!is_secret(path));
|
||||
|
||||
let path = "/volumes/kubernetes.io~secret";
|
||||
assert!(!is_secret(path));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_container_type() {
|
||||
let sid = "sid".to_string();
|
||||
|
||||
@@ -13,7 +13,7 @@ pub const KATA_VOLUME_TYPE_PREFIX: &str = "kata:";
|
||||
pub const KATA_GUEST_MOUNT_PREFIX: &str = "kata:guest-mount:";
|
||||
|
||||
/// KATA_EPHEMERAL_DEV_TYPE creates a tmpfs backed volume for sharing files between containers.
|
||||
pub const KATA_EPHEMERAL_VOLUME_TYPE: &str = "ephemeral";
|
||||
pub const KATA_EPHEMERAL_VOLUME_TYPE: &str = "kata:ephemeral";
|
||||
|
||||
/// KATA_HOST_DIR_TYPE use for host empty dir
|
||||
pub const KATA_HOST_DIR_VOLUME_TYPE: &str = "kata:hostdir";
|
||||
|
||||
@@ -494,8 +494,8 @@ pub struct LinuxDevice {
|
||||
pub struct LinuxDeviceCgroup {
|
||||
#[serde(default)]
|
||||
pub allow: bool,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub r#type: Option<String>,
|
||||
#[serde(default, skip_serializing_if = "String::is_empty")]
|
||||
pub r#type: String,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub major: Option<i64>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
@@ -1431,21 +1431,21 @@ mod tests {
|
||||
devices: vec![
|
||||
crate::LinuxDeviceCgroup {
|
||||
allow: false,
|
||||
r#type: None,
|
||||
r#type: "".to_string(),
|
||||
major: None,
|
||||
minor: None,
|
||||
access: "rwm".to_string(),
|
||||
},
|
||||
crate::LinuxDeviceCgroup {
|
||||
allow: true,
|
||||
r#type: Some("c".to_string()),
|
||||
r#type: "c".to_string(),
|
||||
major: Some(10),
|
||||
minor: Some(229),
|
||||
access: "rw".to_string(),
|
||||
},
|
||||
crate::LinuxDeviceCgroup {
|
||||
allow: true,
|
||||
r#type: Some("b".to_string()),
|
||||
r#type: "b".to_string(),
|
||||
major: Some(8),
|
||||
minor: Some(0),
|
||||
access: "r".to_string(),
|
||||
|
||||
3
src/libs/protocols/.gitignore
vendored
3
src/libs/protocols/.gitignore
vendored
@@ -7,8 +7,5 @@ src/empty.rs
|
||||
src/health.rs
|
||||
src/health_ttrpc.rs
|
||||
src/health_ttrpc_async.rs
|
||||
src/image.rs
|
||||
src/image_ttrpc.rs
|
||||
src/image_ttrpc_async.rs
|
||||
src/oci.rs
|
||||
src/types.rs
|
||||
|
||||
@@ -157,30 +157,13 @@ fn real_main() -> Result<(), std::io::Error> {
|
||||
// generate async
|
||||
#[cfg(feature = "async")]
|
||||
{
|
||||
codegen(
|
||||
"src",
|
||||
&[
|
||||
"protos/agent.proto",
|
||||
"protos/health.proto",
|
||||
"protos/image.proto",
|
||||
],
|
||||
true,
|
||||
)?;
|
||||
codegen("src", &["protos/agent.proto", "protos/health.proto"], true)?;
|
||||
|
||||
fs::rename("src/agent_ttrpc.rs", "src/agent_ttrpc_async.rs")?;
|
||||
fs::rename("src/health_ttrpc.rs", "src/health_ttrpc_async.rs")?;
|
||||
fs::rename("src/image_ttrpc.rs", "src/image_ttrpc_async.rs")?;
|
||||
}
|
||||
|
||||
codegen(
|
||||
"src",
|
||||
&[
|
||||
"protos/agent.proto",
|
||||
"protos/health.proto",
|
||||
"protos/image.proto",
|
||||
],
|
||||
false,
|
||||
)?;
|
||||
codegen("src", &["protos/agent.proto", "protos/health.proto"], false)?;
|
||||
|
||||
// There is a message named 'Box' in oci.proto
|
||||
// so there is a struct named 'Box', we should replace Box<Self> to ::std::boxed::Box<Self>
|
||||
|
||||
@@ -1,31 +0,0 @@
|
||||
//
|
||||
// Copyright (c) 2021 Alibaba Inc.
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
syntax = "proto3";
|
||||
|
||||
option go_package = "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/agent/protocols/grpc";
|
||||
|
||||
package grpc;
|
||||
|
||||
// Image defines the public APIs for managing images.
|
||||
service Image {
|
||||
// PullImage pulls an image with authentication config.
|
||||
rpc PullImage(PullImageRequest) returns (PullImageResponse) {}
|
||||
}
|
||||
|
||||
message PullImageRequest {
|
||||
// Image name (e.g. docker.io/library/busybox:latest).
|
||||
string image = 1;
|
||||
// Unique image identifier, used to avoid duplication when unpacking the image layers.
|
||||
string container_id = 2;
|
||||
// Use USERNAME[:PASSWORD] for accessing the registry
|
||||
string source_creds = 3;
|
||||
}
|
||||
|
||||
message PullImageResponse {
|
||||
// Reference to the image in use. For most runtimes, this should be an
|
||||
// image ID or digest.
|
||||
string image_ref = 1;
|
||||
}
|
||||
@@ -15,10 +15,6 @@ pub mod health;
|
||||
pub mod health_ttrpc;
|
||||
#[cfg(feature = "async")]
|
||||
pub mod health_ttrpc_async;
|
||||
pub mod image;
|
||||
pub mod image_ttrpc;
|
||||
#[cfg(feature = "async")]
|
||||
pub mod image_ttrpc_async;
|
||||
pub mod oci;
|
||||
pub mod trans;
|
||||
pub mod types;
|
||||
|
||||
@@ -105,7 +105,7 @@ impl From<oci::LinuxDeviceCgroup> for crate::oci::LinuxDeviceCgroup {
|
||||
fn from(from: oci::LinuxDeviceCgroup) -> Self {
|
||||
crate::oci::LinuxDeviceCgroup {
|
||||
Allow: from.allow,
|
||||
Type: from.r#type.map_or("".to_string(), |t| t as String),
|
||||
Type: from.r#type,
|
||||
Major: from.major.map_or(0, |t| t as i64),
|
||||
Minor: from.minor.map_or(0, |t| t as i64),
|
||||
Access: from.access,
|
||||
@@ -478,7 +478,7 @@ impl From<crate::oci::LinuxDeviceCgroup> for oci::LinuxDeviceCgroup {
|
||||
|
||||
oci::LinuxDeviceCgroup {
|
||||
allow: from.get_Allow(),
|
||||
r#type: Some(from.take_Type()),
|
||||
r#type: from.take_Type(),
|
||||
major,
|
||||
minor,
|
||||
access: from.take_Access(),
|
||||
|
||||
471
src/runtime-rs/Cargo.lock
generated
471
src/runtime-rs/Cargo.lock
generated
@@ -84,6 +84,12 @@ version = "1.0.57"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "08f9b8508dccb7687a1d6c4ce66b2b0ecef467c94667de27d8d7fe1f8d2a9cdc"
|
||||
|
||||
[[package]]
|
||||
name = "arc-swap"
|
||||
version = "0.4.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dabe5a181f83789739c194cbe5a897dde195078fac08568d09221fd6137a7ba8"
|
||||
|
||||
[[package]]
|
||||
name = "arc-swap"
|
||||
version = "1.5.0"
|
||||
@@ -268,6 +274,23 @@ dependencies = [
|
||||
"digest 0.10.3",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "blobfs"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/dragonflyoss/image-service.git?rev=316380792092f73c99f832c4cb44ef4319d6f76b#316380792092f73c99f832c4cb44ef4319d6f76b"
|
||||
dependencies = [
|
||||
"fuse-backend-rs",
|
||||
"libc",
|
||||
"log",
|
||||
"nydus-error",
|
||||
"rafs",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"serde_with",
|
||||
"storage",
|
||||
"vm-memory",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "block-buffer"
|
||||
version = "0.9.0"
|
||||
@@ -457,19 +480,19 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "core-foundation-sys"
|
||||
version = "0.8.3"
|
||||
version = "0.2.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc"
|
||||
|
||||
[[package]]
|
||||
name = "cpufeatures"
|
||||
version = "0.2.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "28d997bd5e24a5928dd43e46dc529867e207907fe0b239c3477d924f7f2ca320"
|
||||
checksum = "065a5d7ffdcbc8fa145d6f0746f3555025b9097a9e9cda59f7467abae670c78d"
|
||||
dependencies = [
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "core-foundation-sys"
|
||||
version = "0.8.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc"
|
||||
|
||||
[[package]]
|
||||
name = "cpuid-bool"
|
||||
version = "0.1.2"
|
||||
@@ -562,20 +585,22 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "dashmap"
|
||||
version = "4.0.2"
|
||||
version = "5.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e77a43b28d0668df09411cb0bc9a8c2adc40f9a048afe863e05fd43251e8e39c"
|
||||
checksum = "4c8858831f7781322e539ea39e72449c46b059638250c14344fec8d0aa6e539c"
|
||||
dependencies = [
|
||||
"cfg-if 1.0.0",
|
||||
"num_cpus",
|
||||
"parking_lot 0.12.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "dbs-address-space"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/openanolis/dragonball-sandbox.git?rev=c3d7831aee7c3962b8a90f0afbfd0fb7e4d30323#c3d7831aee7c3962b8a90f0afbfd0fb7e4d30323"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9acd47f8b1ad8a6a62450d2d83ced5452dbf9549e2b98709d945554b22a45ed7"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"arc-swap 1.5.0",
|
||||
"libc",
|
||||
"nix 0.23.1",
|
||||
"thiserror",
|
||||
@@ -595,7 +620,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "dbs-arch"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/openanolis/dragonball-sandbox.git?rev=c3d7831aee7c3962b8a90f0afbfd0fb7e4d30323#c3d7831aee7c3962b8a90f0afbfd0fb7e4d30323"
|
||||
source = "git+https://github.com/openanolis/dragonball-sandbox.git?rev=7a8e832b53d66994d6a16f0513d69f540583dcd0#7a8e832b53d66994d6a16f0513d69f540583dcd0"
|
||||
dependencies = [
|
||||
"kvm-bindings",
|
||||
"kvm-ioctls",
|
||||
@@ -608,7 +633,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "dbs-boot"
|
||||
version = "0.2.0"
|
||||
source = "git+https://github.com/openanolis/dragonball-sandbox.git?rev=c3d7831aee7c3962b8a90f0afbfd0fb7e4d30323#c3d7831aee7c3962b8a90f0afbfd0fb7e4d30323"
|
||||
source = "git+https://github.com/openanolis/dragonball-sandbox.git?rev=7a8e832b53d66994d6a16f0513d69f540583dcd0#7a8e832b53d66994d6a16f0513d69f540583dcd0"
|
||||
dependencies = [
|
||||
"dbs-arch",
|
||||
"kvm-bindings",
|
||||
@@ -623,7 +648,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "dbs-device"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/openanolis/dragonball-sandbox.git?rev=c3d7831aee7c3962b8a90f0afbfd0fb7e4d30323#c3d7831aee7c3962b8a90f0afbfd0fb7e4d30323"
|
||||
source = "git+https://github.com/openanolis/dragonball-sandbox.git?rev=7a8e832b53d66994d6a16f0513d69f540583dcd0#7a8e832b53d66994d6a16f0513d69f540583dcd0"
|
||||
dependencies = [
|
||||
"thiserror",
|
||||
]
|
||||
@@ -631,7 +656,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "dbs-interrupt"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/openanolis/dragonball-sandbox.git?rev=c3d7831aee7c3962b8a90f0afbfd0fb7e4d30323#c3d7831aee7c3962b8a90f0afbfd0fb7e4d30323"
|
||||
source = "git+https://github.com/openanolis/dragonball-sandbox.git?rev=7a8e832b53d66994d6a16f0513d69f540583dcd0#7a8e832b53d66994d6a16f0513d69f540583dcd0"
|
||||
dependencies = [
|
||||
"dbs-device",
|
||||
"kvm-bindings",
|
||||
@@ -643,7 +668,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "dbs-legacy-devices"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/openanolis/dragonball-sandbox.git?rev=c3d7831aee7c3962b8a90f0afbfd0fb7e4d30323#c3d7831aee7c3962b8a90f0afbfd0fb7e4d30323"
|
||||
source = "git+https://github.com/openanolis/dragonball-sandbox.git?rev=7a8e832b53d66994d6a16f0513d69f540583dcd0#7a8e832b53d66994d6a16f0513d69f540583dcd0"
|
||||
dependencies = [
|
||||
"dbs-device",
|
||||
"dbs-utils",
|
||||
@@ -655,9 +680,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "dbs-uhttp"
|
||||
version = "0.3.1"
|
||||
version = "0.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6fd0544fe7ba81fa8deb8800843836d279a81b051e2e8ab046fe1b0cb096c1cc"
|
||||
checksum = "b773f7f1b9088438e9746890c7c0836b133b07935812867a33e06e81c92c0cdc"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"mio",
|
||||
@@ -666,7 +691,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "dbs-utils"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/openanolis/dragonball-sandbox.git?rev=c3d7831aee7c3962b8a90f0afbfd0fb7e4d30323#c3d7831aee7c3962b8a90f0afbfd0fb7e4d30323"
|
||||
source = "git+https://github.com/openanolis/dragonball-sandbox.git?rev=7a8e832b53d66994d6a16f0513d69f540583dcd0#7a8e832b53d66994d6a16f0513d69f540583dcd0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"event-manager",
|
||||
@@ -681,8 +706,9 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "dbs-virtio-devices"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/openanolis/dragonball-sandbox.git?rev=c3d7831aee7c3962b8a90f0afbfd0fb7e4d30323#c3d7831aee7c3962b8a90f0afbfd0fb7e4d30323"
|
||||
source = "git+https://github.com/openanolis/dragonball-sandbox.git?rev=7a8e832b53d66994d6a16f0513d69f540583dcd0#7a8e832b53d66994d6a16f0513d69f540583dcd0"
|
||||
dependencies = [
|
||||
"blobfs",
|
||||
"byteorder",
|
||||
"caps",
|
||||
"dbs-device",
|
||||
@@ -696,8 +722,7 @@ dependencies = [
|
||||
"libc",
|
||||
"log",
|
||||
"nix 0.23.1",
|
||||
"nydus-blobfs",
|
||||
"nydus-rafs",
|
||||
"rafs",
|
||||
"rlimit",
|
||||
"serde",
|
||||
"serde_json",
|
||||
@@ -740,11 +765,21 @@ dependencies = [
|
||||
"subtle",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "diskarbitration-sys"
|
||||
version = "0.0.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8f82432ae94d42f160b6e17389d6e1c1eee29827b99ad32d35a0a96bb98bedb5"
|
||||
dependencies = [
|
||||
"core-foundation-sys 0.2.3",
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "dragonball"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"arc-swap 1.5.0",
|
||||
"bytes 1.1.0",
|
||||
"dbs-address-space",
|
||||
"dbs-allocator",
|
||||
@@ -887,24 +922,20 @@ checksum = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba"
|
||||
|
||||
[[package]]
|
||||
name = "fuse-backend-rs"
|
||||
version = "0.9.6"
|
||||
version = "0.9.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "994a3bfb694ee52bf8f3bca80d784b723f150810998219337e429cc5dbe92717"
|
||||
checksum = "3a96ec48cd39ee2504eaa4a31b88262b7d13151a4da0b53af8fd212c7c9ffa5d"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"arc-swap 1.5.0",
|
||||
"bitflags",
|
||||
"caps",
|
||||
"core-foundation-sys",
|
||||
"io-uring",
|
||||
"core-foundation-sys 0.2.3",
|
||||
"diskarbitration-sys",
|
||||
"lazy_static",
|
||||
"libc",
|
||||
"log",
|
||||
"mio",
|
||||
"nix 0.24.2",
|
||||
"scoped-tls",
|
||||
"slab",
|
||||
"socket2",
|
||||
"tokio-uring",
|
||||
"nix 0.23.1",
|
||||
"virtio-queue",
|
||||
"vm-memory",
|
||||
"vmm-sys-util",
|
||||
@@ -1093,16 +1124,16 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "governor"
|
||||
version = "0.4.1"
|
||||
version = "0.4.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7df0ee4b237afb71e99f7e2fbd840ffec2d6c4bb569f69b2af18aa1f63077d38"
|
||||
checksum = "19775995ee20209163239355bc3ad2f33f83da35d9ef72dea26e5af753552c87"
|
||||
dependencies = [
|
||||
"dashmap",
|
||||
"futures 0.3.21",
|
||||
"futures-timer",
|
||||
"no-std-compat",
|
||||
"nonzero_ext",
|
||||
"parking_lot 0.11.2",
|
||||
"parking_lot 0.12.1",
|
||||
"quanta",
|
||||
"rand 0.8.5",
|
||||
"smallvec",
|
||||
@@ -1138,82 +1169,12 @@ dependencies = [
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "hex"
|
||||
version = "0.4.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70"
|
||||
|
||||
[[package]]
|
||||
name = "http"
|
||||
version = "0.2.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "75f43d41e26995c17e71ee126451dd3941010b0514a81a9d11f3b341debc2399"
|
||||
dependencies = [
|
||||
"bytes 1.1.0",
|
||||
"fnv",
|
||||
"itoa",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "http-body"
|
||||
version = "0.4.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1"
|
||||
dependencies = [
|
||||
"bytes 1.1.0",
|
||||
"http",
|
||||
"pin-project-lite",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "httparse"
|
||||
version = "1.8.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904"
|
||||
|
||||
[[package]]
|
||||
name = "httpdate"
|
||||
version = "1.0.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421"
|
||||
|
||||
[[package]]
|
||||
name = "hyper"
|
||||
version = "0.14.20"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "02c929dc5c39e335a03c405292728118860721b10190d98c2a0f0efd5baafbac"
|
||||
dependencies = [
|
||||
"bytes 1.1.0",
|
||||
"futures-channel",
|
||||
"futures-core",
|
||||
"futures-util",
|
||||
"http",
|
||||
"http-body",
|
||||
"httparse",
|
||||
"httpdate",
|
||||
"itoa",
|
||||
"pin-project-lite",
|
||||
"socket2",
|
||||
"tokio",
|
||||
"tower-service",
|
||||
"tracing",
|
||||
"want",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "hyperlocal"
|
||||
version = "0.8.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0fafdf7b2b2de7c9784f76e02c0935e65a8117ec3b768644379983ab333ac98c"
|
||||
dependencies = [
|
||||
"futures-util",
|
||||
"hex",
|
||||
"hyper",
|
||||
"pin-project",
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "hypervisor"
|
||||
version = "0.1.0"
|
||||
@@ -1246,7 +1207,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ad2bfd338099682614d3ee3fe0cd72e0b6a41ca6a87f6a74a3bd593c91650501"
|
||||
dependencies = [
|
||||
"android_system_properties",
|
||||
"core-foundation-sys",
|
||||
"core-foundation-sys 0.8.3",
|
||||
"js-sys",
|
||||
"wasm-bindgen",
|
||||
"winapi",
|
||||
@@ -1430,9 +1391,9 @@ checksum = "349d5a591cd28b49e1d1037471617a32ddcda5731b99419008085f72d5a53836"
|
||||
|
||||
[[package]]
|
||||
name = "linux-loader"
|
||||
version = "0.6.0"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "62a2f912deca034ec34b0a43a390059ea98daac40e440ebe8bea88f3315fe168"
|
||||
checksum = "8a5e77493808403a6bd56a301a64ea6b9342e36ea845044bf0dfdf56fe52fa08"
|
||||
dependencies = [
|
||||
"vm-memory",
|
||||
]
|
||||
@@ -1693,47 +1654,10 @@ dependencies = [
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "nydus-api"
|
||||
version = "0.1.1"
|
||||
source = "git+https://github.com/dragonflyoss/image-service.git?rev=e429be3e8623d47db0f97186f761aeda2983c6f4#e429be3e8623d47db0f97186f761aeda2983c6f4"
|
||||
dependencies = [
|
||||
"dbs-uhttp",
|
||||
"http",
|
||||
"lazy_static",
|
||||
"libc",
|
||||
"log",
|
||||
"mio",
|
||||
"nydus-error",
|
||||
"nydus-utils",
|
||||
"serde",
|
||||
"serde_derive",
|
||||
"serde_json",
|
||||
"url",
|
||||
"vmm-sys-util",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "nydus-blobfs"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/dragonflyoss/image-service.git?rev=e429be3e8623d47db0f97186f761aeda2983c6f4#e429be3e8623d47db0f97186f761aeda2983c6f4"
|
||||
dependencies = [
|
||||
"fuse-backend-rs",
|
||||
"libc",
|
||||
"log",
|
||||
"nydus-error",
|
||||
"nydus-rafs",
|
||||
"nydus-storage",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"serde_with",
|
||||
"vm-memory",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "nydus-error"
|
||||
version = "0.2.1"
|
||||
source = "git+https://github.com/dragonflyoss/image-service.git?rev=e429be3e8623d47db0f97186f761aeda2983c6f4#e429be3e8623d47db0f97186f761aeda2983c6f4"
|
||||
version = "0.2.0"
|
||||
source = "git+https://github.com/dragonflyoss/image-service.git?rev=316380792092f73c99f832c4cb44ef4319d6f76b#316380792092f73c99f832c4cb44ef4319d6f76b"
|
||||
dependencies = [
|
||||
"backtrace",
|
||||
"httpdate",
|
||||
@@ -1743,80 +1667,22 @@ dependencies = [
|
||||
"serde_json",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "nydus-rafs"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/dragonflyoss/image-service.git?rev=e429be3e8623d47db0f97186f761aeda2983c6f4#e429be3e8623d47db0f97186f761aeda2983c6f4"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"arc-swap",
|
||||
"bitflags",
|
||||
"blake3",
|
||||
"flate2",
|
||||
"fuse-backend-rs",
|
||||
"futures 0.3.21",
|
||||
"lazy_static",
|
||||
"libc",
|
||||
"log",
|
||||
"lz4-sys",
|
||||
"nix 0.24.2",
|
||||
"nydus-api",
|
||||
"nydus-error",
|
||||
"nydus-storage",
|
||||
"nydus-utils",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"serde_with",
|
||||
"sha2 0.10.5",
|
||||
"spmc",
|
||||
"vm-memory",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "nydus-storage"
|
||||
version = "0.5.0"
|
||||
source = "git+https://github.com/dragonflyoss/image-service.git?rev=e429be3e8623d47db0f97186f761aeda2983c6f4#e429be3e8623d47db0f97186f761aeda2983c6f4"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"arc-swap",
|
||||
"bitflags",
|
||||
"dbs-uhttp",
|
||||
"fuse-backend-rs",
|
||||
"futures 0.3.21",
|
||||
"governor",
|
||||
"lazy_static",
|
||||
"libc",
|
||||
"log",
|
||||
"nix 0.24.2",
|
||||
"nydus-api",
|
||||
"nydus-error",
|
||||
"nydus-utils",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"serde_with",
|
||||
"sha2 0.10.5",
|
||||
"tokio",
|
||||
"vm-memory",
|
||||
"vmm-sys-util",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "nydus-utils"
|
||||
version = "0.3.1"
|
||||
source = "git+https://github.com/dragonflyoss/image-service.git?rev=e429be3e8623d47db0f97186f761aeda2983c6f4#e429be3e8623d47db0f97186f761aeda2983c6f4"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/dragonflyoss/image-service.git?rev=316380792092f73c99f832c4cb44ef4319d6f76b#316380792092f73c99f832c4cb44ef4319d6f76b"
|
||||
dependencies = [
|
||||
"blake3",
|
||||
"flate2",
|
||||
"fuse-backend-rs",
|
||||
"lazy_static",
|
||||
"libc",
|
||||
"log",
|
||||
"lz4-sys",
|
||||
"nix 0.24.2",
|
||||
"nydus-error",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"sha2 0.10.5",
|
||||
"tokio",
|
||||
"sha2",
|
||||
"zstd",
|
||||
]
|
||||
|
||||
@@ -1941,26 +1807,6 @@ dependencies = [
|
||||
"indexmap",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pin-project"
|
||||
version = "1.0.12"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ad29a609b6bcd67fee905812e544992d216af9d755757c05ed2d0e15a74c6ecc"
|
||||
dependencies = [
|
||||
"pin-project-internal",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pin-project-internal"
|
||||
version = "1.0.12"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pin-project-lite"
|
||||
version = "0.2.9"
|
||||
@@ -2118,6 +1964,34 @@ dependencies = [
|
||||
"proc-macro2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rafs"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/dragonflyoss/image-service.git?rev=316380792092f73c99f832c4cb44ef4319d6f76b#316380792092f73c99f832c4cb44ef4319d6f76b"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"arc-swap 0.4.8",
|
||||
"bitflags",
|
||||
"blake3",
|
||||
"flate2",
|
||||
"fuse-backend-rs",
|
||||
"futures 0.3.21",
|
||||
"lazy_static",
|
||||
"libc",
|
||||
"log",
|
||||
"lz4-sys",
|
||||
"nix 0.23.1",
|
||||
"nydus-error",
|
||||
"nydus-utils",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"serde_with",
|
||||
"sha2",
|
||||
"spmc",
|
||||
"storage",
|
||||
"vm-memory",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rand"
|
||||
version = "0.3.23"
|
||||
@@ -2308,7 +2182,6 @@ dependencies = [
|
||||
"serde",
|
||||
"slog",
|
||||
"slog-scope",
|
||||
"test-utils",
|
||||
"tokio",
|
||||
"uuid",
|
||||
]
|
||||
@@ -2343,9 +2216,6 @@ version = "0.1.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"common",
|
||||
"hyper",
|
||||
"hyperlocal",
|
||||
"hypervisor",
|
||||
"kata-types",
|
||||
"lazy_static",
|
||||
"linux_container",
|
||||
@@ -2398,12 +2268,6 @@ dependencies = [
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "scoped-tls"
|
||||
version = "1.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ea6a9290e3c9cf0f18145ef7ffa62d68ee0bf5fcd651017e586dc7fd5da448c2"
|
||||
|
||||
[[package]]
|
||||
name = "scopeguard"
|
||||
version = "1.1.0"
|
||||
@@ -2524,17 +2388,6 @@ dependencies = [
|
||||
"opaque-debug",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sha2"
|
||||
version = "0.10.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cf9db03534dff993187064c4e0c05a5708d2a9728ace9a8959b77bedf415dac5"
|
||||
dependencies = [
|
||||
"cfg-if 1.0.0",
|
||||
"cpufeatures",
|
||||
"digest 0.10.3",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "shim"
|
||||
version = "0.1.0"
|
||||
@@ -2554,7 +2407,7 @@ dependencies = [
|
||||
"rand 0.8.5",
|
||||
"serial_test",
|
||||
"service",
|
||||
"sha2 0.9.3",
|
||||
"sha2",
|
||||
"slog",
|
||||
"slog-async",
|
||||
"slog-scope",
|
||||
@@ -2617,7 +2470,7 @@ version = "4.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2f95a4b4c3274cd2869549da82b57ccc930859bdbf5bcea0424bc5f140b3c786"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"arc-swap 1.5.0",
|
||||
"lazy_static",
|
||||
"slog",
|
||||
]
|
||||
@@ -2655,6 +2508,34 @@ version = "0.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "02a8428da277a8e3a15271d79943e80ccc2ef254e78813a166a08d65e4c3ece5"
|
||||
|
||||
[[package]]
|
||||
name = "storage"
|
||||
version = "0.5.0"
|
||||
source = "git+https://github.com/dragonflyoss/image-service.git?rev=316380792092f73c99f832c4cb44ef4319d6f76b#316380792092f73c99f832c4cb44ef4319d6f76b"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"arc-swap 0.4.8",
|
||||
"bitflags",
|
||||
"dbs-uhttp",
|
||||
"fuse-backend-rs",
|
||||
"futures 0.3.21",
|
||||
"governor",
|
||||
"lazy_static",
|
||||
"libc",
|
||||
"log",
|
||||
"nix 0.23.1",
|
||||
"nydus-error",
|
||||
"nydus-utils",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"serde_with",
|
||||
"sha2",
|
||||
"spmc",
|
||||
"tokio",
|
||||
"vm-memory",
|
||||
"vmm-sys-util",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "strsim"
|
||||
version = "0.10.0"
|
||||
@@ -2730,13 +2611,6 @@ dependencies = [
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "test-utils"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"nix 0.24.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tests_utils"
|
||||
version = "0.1.0"
|
||||
@@ -2858,20 +2732,6 @@ dependencies = [
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tokio-uring"
|
||||
version = "0.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d3ad494f39874984d990ade7f6319dafbcd3301ff0b1841f8a55a1ebb3e742c8"
|
||||
dependencies = [
|
||||
"io-uring",
|
||||
"libc",
|
||||
"scoped-tls",
|
||||
"slab",
|
||||
"socket2",
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tokio-vsock"
|
||||
version = "0.3.1"
|
||||
@@ -2903,38 +2763,6 @@ dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tower-service"
|
||||
version = "0.3.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52"
|
||||
|
||||
[[package]]
|
||||
name = "tracing"
|
||||
version = "0.1.35"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a400e31aa60b9d44a52a8ee0343b5b18566b03a8321e0d321f695cf56e940160"
|
||||
dependencies = [
|
||||
"cfg-if 1.0.0",
|
||||
"pin-project-lite",
|
||||
"tracing-core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tracing-core"
|
||||
version = "0.1.28"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7b7358be39f2f274f322d2aaed611acc57f382e8eb1e5b48cb9ae30933495ce7"
|
||||
dependencies = [
|
||||
"once_cell",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "try-lock"
|
||||
version = "0.2.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642"
|
||||
|
||||
[[package]]
|
||||
name = "ttrpc"
|
||||
version = "0.6.1"
|
||||
@@ -3101,9 +2929,9 @@ checksum = "3ff512178285488516ed85f15b5d0113a7cdb89e9e8a760b269ae4f02b84bd6b"
|
||||
|
||||
[[package]]
|
||||
name = "virtio-queue"
|
||||
version = "0.4.0"
|
||||
version = "0.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "519c0a333c871650269cba303bc108075d52a0c0d64f9b91fae61829b53725af"
|
||||
checksum = "f90da9e627f6aaf667cc7b6548a28be332d3e1f058f4ceeb46ab6bcee5c4b74d"
|
||||
dependencies = [
|
||||
"log",
|
||||
"vm-memory",
|
||||
@@ -3118,11 +2946,11 @@ checksum = "f43fb5a6bd1a7d423ad72802801036719b7546cf847a103f8fe4575f5b0d45a6"
|
||||
|
||||
[[package]]
|
||||
name = "vm-memory"
|
||||
version = "0.9.0"
|
||||
version = "0.7.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "583f213899e8a5eea23d9c507252d4bed5bc88f0ecbe0783262f80034630744b"
|
||||
checksum = "339d4349c126fdcd87e034631d7274370cf19eb0e87b33166bcd956589fc72c5"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"arc-swap 1.5.0",
|
||||
"libc",
|
||||
"winapi",
|
||||
]
|
||||
@@ -3135,9 +2963,9 @@ checksum = "a4b5231d334edbc03b22704caa1a022e4c07491d6df736593f26094df8b04a51"
|
||||
|
||||
[[package]]
|
||||
name = "vmm-sys-util"
|
||||
version = "0.10.0"
|
||||
version = "0.9.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "08604d7be03eb26e33b3cee3ed4aef2bf550b305d1cca60e84da5d28d3790b62"
|
||||
checksum = "733537bded03aaa93543f785ae997727b30d1d9f4a03b7861d23290474242e11"
|
||||
dependencies = [
|
||||
"bitflags",
|
||||
"libc",
|
||||
@@ -3159,16 +2987,6 @@ version = "1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9d5b2c62b4012a3e1eca5a7e077d13b3bf498c4073e33ccd58626607748ceeca"
|
||||
|
||||
[[package]]
|
||||
name = "want"
|
||||
version = "0.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0"
|
||||
dependencies = [
|
||||
"log",
|
||||
"try-lock",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wasi"
|
||||
version = "0.9.0+wasi-snapshot-preview1"
|
||||
@@ -3387,3 +3205,8 @@ dependencies = [
|
||||
"cc",
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[patch.unused]]
|
||||
name = "dbs-upcall"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/openanolis/dragonball-sandbox.git?rev=7a8e832b53d66994d6a16f0513d69f540583dcd0#7a8e832b53d66994d6a16f0513d69f540583dcd0"
|
||||
|
||||
@@ -4,11 +4,11 @@ members = [
|
||||
]
|
||||
|
||||
[patch.'crates-io']
|
||||
dbs-address-space = { git = "https://github.com/openanolis/dragonball-sandbox.git", rev = "c3d7831aee7c3962b8a90f0afbfd0fb7e4d30323" }
|
||||
dbs-device = { git = "https://github.com/openanolis/dragonball-sandbox.git", rev = "c3d7831aee7c3962b8a90f0afbfd0fb7e4d30323" }
|
||||
dbs-utils = { git = "https://github.com/openanolis/dragonball-sandbox.git", rev = "c3d7831aee7c3962b8a90f0afbfd0fb7e4d30323" }
|
||||
dbs-interrupt = { git = "https://github.com/openanolis/dragonball-sandbox.git", rev = "c3d7831aee7c3962b8a90f0afbfd0fb7e4d30323" }
|
||||
dbs-legacy-devices = { git = "https://github.com/openanolis/dragonball-sandbox.git", rev = "c3d7831aee7c3962b8a90f0afbfd0fb7e4d30323" }
|
||||
dbs-virtio-devices = { git = "https://github.com/openanolis/dragonball-sandbox.git", rev = "c3d7831aee7c3962b8a90f0afbfd0fb7e4d30323" }
|
||||
dbs-boot = { git = "https://github.com/openanolis/dragonball-sandbox.git", rev = "c3d7831aee7c3962b8a90f0afbfd0fb7e4d30323" }
|
||||
dbs-arch = { git = "https://github.com/openanolis/dragonball-sandbox.git", rev = "c3d7831aee7c3962b8a90f0afbfd0fb7e4d30323" }
|
||||
dbs-device = { git = "https://github.com/openanolis/dragonball-sandbox.git", rev = "7a8e832b53d66994d6a16f0513d69f540583dcd0" }
|
||||
dbs-utils = { git = "https://github.com/openanolis/dragonball-sandbox.git", rev = "7a8e832b53d66994d6a16f0513d69f540583dcd0" }
|
||||
dbs-interrupt = { git = "https://github.com/openanolis/dragonball-sandbox.git", rev = "7a8e832b53d66994d6a16f0513d69f540583dcd0" }
|
||||
dbs-legacy-devices = { git = "https://github.com/openanolis/dragonball-sandbox.git", rev = "7a8e832b53d66994d6a16f0513d69f540583dcd0" }
|
||||
dbs-virtio-devices = { git = "https://github.com/openanolis/dragonball-sandbox.git", rev = "7a8e832b53d66994d6a16f0513d69f540583dcd0" }
|
||||
dbs-boot = { git = "https://github.com/openanolis/dragonball-sandbox.git", rev = "7a8e832b53d66994d6a16f0513d69f540583dcd0" }
|
||||
dbs-arch = { git = "https://github.com/openanolis/dragonball-sandbox.git", rev = "7a8e832b53d66994d6a16f0513d69f540583dcd0" }
|
||||
dbs-upcall = { git = "https://github.com/openanolis/dragonball-sandbox.git", rev = "7a8e832b53d66994d6a16f0513d69f540583dcd0" }
|
||||
|
||||
@@ -31,7 +31,7 @@ test:
|
||||
else
|
||||
##TARGET default: build code
|
||||
default: runtime show-header
|
||||
##TARGET test: run cargo tests
|
||||
#TARGET test: run cargo tests
|
||||
test:
|
||||
@cargo test --all --target $(TRIPLE) $(EXTRA_RUSTFEATURES) -- --nocapture
|
||||
endif
|
||||
@@ -50,6 +50,7 @@ EXEC_PREFIX := $(PREFIX)/local
|
||||
BINDIR := $(EXEC_PREFIX)/bin
|
||||
else
|
||||
EXEC_PREFIX := $(PREFIX)
|
||||
##VAR BINDIR=<path> is a directory for installing executable programs
|
||||
# when creating the kata-deploy image, the default installation path for go runtime is $(EXEC_PREFIX)/bin, so we put it here for multiple runtime
|
||||
BINDIR := $(EXEC_PREFIX)/runtime-rs/bin/
|
||||
endif
|
||||
@@ -72,7 +73,7 @@ HYPERVISOR_CLH = cloud-hypervisor
|
||||
|
||||
DEFAULT_HYPERVISOR ?= $(HYPERVISOR_DB)
|
||||
|
||||
##VAR HYPERVISOR=<hypervisor_name> List of hypervisors this build system can generate configuration for.
|
||||
# List of hypervisors this build system can generate configuration for.
|
||||
HYPERVISORS := $(HYPERVISOR_DB) $(HYPERVISOR_ACRN) $(HYPERVISOR_FC) $(HYPERVISOR_QEMU) $(HYPERVISOR_CLH)
|
||||
|
||||
DBVALIDHYPERVISORPATHS := []
|
||||
@@ -83,28 +84,28 @@ PKGLIBEXECDIR := $(LIBEXECDIR)/$(PROJECT_DIR)
|
||||
FIRMWAREPATH :=
|
||||
FIRMWAREVOLUMEPATH :=
|
||||
|
||||
##VAR DEFVCPUS=<number> Default number of vCPUs
|
||||
# Default number of vCPUs
|
||||
DEFVCPUS := 1
|
||||
##VAR DEFMAXVCPUS=<number> Default maximum number of vCPUs
|
||||
# Default maximum number of vCPUs
|
||||
DEFMAXVCPUS := 0
|
||||
##VAR DEFMEMSZ=<number> Default memory size in MiB
|
||||
# Default memory size in MiB
|
||||
DEFMEMSZ := 2048
|
||||
##VAR DEFMEMSLOTS=<number> Default memory slots
|
||||
# Default memory slots
|
||||
# Cases to consider :
|
||||
# - nvdimm rootfs image
|
||||
# - preallocated memory
|
||||
# - vm template memory
|
||||
# - hugepage memory
|
||||
DEFMEMSLOTS := 10
|
||||
##VAR DEFBRIDGES=<number> Default number of bridges
|
||||
DEFBRIDGES := 0
|
||||
#Default number of bridges
|
||||
DEFBRIDGES := 1
|
||||
DEFENABLEANNOTATIONS := []
|
||||
DEFDISABLEGUESTSECCOMP := true
|
||||
DEFDISABLEGUESTEMPTYDIR := false
|
||||
##VAR DEFAULTEXPFEATURES=[features] Default experimental features enabled
|
||||
#Default experimental features enabled
|
||||
DEFAULTEXPFEATURES := []
|
||||
DEFDISABLESELINUX := false
|
||||
##VAR DEFENTROPYSOURCE=[entropy_source] Default entropy source
|
||||
#Default entropy source
|
||||
DEFENTROPYSOURCE := /dev/urandom
|
||||
DEFVALIDENTROPYSOURCES := [\"/dev/urandom\",\"/dev/random\",\"\"]
|
||||
DEFDISABLEBLOCK := false
|
||||
@@ -115,8 +116,8 @@ ifeq ($(ARCH),x86_64)
|
||||
DEFVIRTIOFSDAEMON := $(LIBEXECDIR)/virtiofsd
|
||||
endif
|
||||
DEFVALIDVIRTIOFSDAEMONPATHS := [\"$(DEFVIRTIOFSDAEMON)\"]
|
||||
##VAR DEFVIRTIOFSCACHESIZE=<cache_size> Default DAX mapping cache size in MiB
|
||||
# if value is 0, DAX is not enabled
|
||||
# Default DAX mapping cache size in MiB
|
||||
#if value is 0, DAX is not enabled
|
||||
DEFVIRTIOFSCACHESIZE ?= 0
|
||||
DEFVIRTIOFSCACHE ?= auto
|
||||
# Format example:
|
||||
@@ -133,7 +134,7 @@ DEFFILEMEMBACKEND := ""
|
||||
DEFVALIDFILEMEMBACKENDS := [\"$(DEFFILEMEMBACKEND)\"]
|
||||
DEFMSIZE9P := 8192
|
||||
DEFVFIOMODE := guest-kernel
|
||||
##VAR DEFSANDBOXCGROUPONLY=<bool> Default cgroup model
|
||||
# Default cgroup model
|
||||
DEFSANDBOXCGROUPONLY ?= false
|
||||
DEFSTATICRESOURCEMGMT_DB ?= false
|
||||
DEFBINDMOUNTS := []
|
||||
@@ -159,9 +160,9 @@ KNOWN_HYPERVISORS =
|
||||
|
||||
CONFDIR := $(DEFAULTSDIR)/$(PROJECT_DIR)
|
||||
SYSCONFDIR := $(SYSCONFDIR)/$(PROJECT_DIR)
|
||||
##VAR CONFIG_PATH=<path> Main configuration file location for stateless systems
|
||||
# Main configuration file location for stateless systems
|
||||
CONFIG_PATH := $(abspath $(CONFDIR)/$(CONFIG_FILE))
|
||||
##VAR SYSCONFIG=<path> Secondary configuration file location. Note that this takes precedence
|
||||
# Secondary configuration file location. Note that this takes precedence
|
||||
# over CONFIG_PATH.
|
||||
SYSCONFIG := $(abspath $(SYSCONFDIR)/$(CONFIG_FILE))
|
||||
SHAREDIR := $(SHAREDIR)
|
||||
@@ -176,7 +177,7 @@ ifneq (,$(DBCMD))
|
||||
SYSCONFIG_DB = $(abspath $(SYSCONFDIR)/$(CONFIG_FILE_DB))
|
||||
SYSCONFIG_PATHS += $(SYSCONFIG_DB)
|
||||
CONFIGS += $(CONFIG_DB)
|
||||
# dragonball-specific options (all should be suffixed by "_DB")
|
||||
# dragonball-specific options (all should be suffixed by "_dragonball")
|
||||
DEFMAXVCPUS_DB := 1
|
||||
DEFBLOCKSTORAGEDRIVER_DB := virtio-blk
|
||||
DEFNETWORKMODEL_DB := tcfilter
|
||||
@@ -357,15 +358,15 @@ endef
|
||||
|
||||
.DEFAULT_GOAL := default
|
||||
|
||||
GENERATED_FILES += $(CONFIGS)
|
||||
|
||||
runtime: $(TARGET)
|
||||
|
||||
$(TARGET): $(GENERATED_FILES) $(TARGET_PATH)
|
||||
$(TARGET): $(GENERATED_CODE) $(TARGET_PATH)
|
||||
|
||||
$(TARGET_PATH): $(SOURCES) | show-summary
|
||||
@RUSTFLAGS="$(EXTRA_RUSTFLAGS) --deny warnings" cargo build --target $(TRIPLE) --$(BUILD_TYPE) $(EXTRA_RUSTFEATURES)
|
||||
|
||||
GENERATED_FILES += $(CONFIGS)
|
||||
|
||||
$(GENERATED_FILES): %: %.in
|
||||
@sed \
|
||||
$(foreach r,$(GENERATED_REPLACEMENTS),-e 's|@$r@|$($r)|g') \
|
||||
@@ -453,7 +454,7 @@ endif
|
||||
@printf "\tassets path (PKGDATADIR) : %s\n" $(abspath $(PKGDATADIR))
|
||||
@printf "\tshim path (PKGLIBEXECDIR) : %s\n" $(abspath $(PKGLIBEXECDIR))
|
||||
@printf "\n"
|
||||
##TARGET help: Show help comments that start with `##VAR` and `##TARGET` in runtime-rs makefile
|
||||
## help: Show help comments that start with `##VAR` and `##TARGET`
|
||||
help: Makefile show-summary
|
||||
@echo "========================== Help ============================="
|
||||
@echo "Variables:"
|
||||
@@ -490,7 +491,7 @@ install-runtime: runtime
|
||||
|
||||
install-configs: $(CONFIGS)
|
||||
$(foreach f,$(CONFIGS),$(call INSTALL_FILE,$f,$(dir $(CONFIG_PATH)))) \
|
||||
ln -sf $(DEFAULT_HYPERVISOR_CONFIG) $(DESTDIR)/$(CONFIG_PATH)
|
||||
sudo ln -sf $(DEFAULT_HYPERVISOR_CONFIG) $(DESTDIR)/$(CONFIG_PATH)
|
||||
|
||||
.PHONY: \
|
||||
help \
|
||||
|
||||
@@ -97,10 +97,6 @@ Currently, only built-in `Dragonball` has been implemented.
|
||||
|
||||
Persist defines traits and functions to help different components save state to disk and load state from disk.
|
||||
|
||||
### helper libraries
|
||||
|
||||
Some helper libraries are maintained in [the library directory](../libs) so that they can be shared with other rust components.
|
||||
|
||||
## Build and install
|
||||
|
||||
```bash
|
||||
|
||||
@@ -40,10 +40,6 @@ impl AgentManager for KataAgent {
|
||||
self.stop_log_forwarder().await;
|
||||
}
|
||||
|
||||
async fn agent_sock(&self) -> Result<String> {
|
||||
self.agent_sock().await
|
||||
}
|
||||
|
||||
async fn agent_config(&self) -> AgentConfig {
|
||||
self.agent_config().await
|
||||
}
|
||||
|
||||
@@ -7,15 +7,12 @@
|
||||
mod agent;
|
||||
mod trans;
|
||||
|
||||
use std::{
|
||||
os::unix::io::{IntoRawFd, RawFd},
|
||||
sync::Arc,
|
||||
};
|
||||
use std::os::unix::io::{IntoRawFd, RawFd};
|
||||
|
||||
use anyhow::{Context, Result};
|
||||
use kata_types::config::Agent as AgentConfig;
|
||||
use protocols::{agent_ttrpc_async as agent_ttrpc, health_ttrpc_async as health_ttrpc};
|
||||
use tokio::sync::RwLock;
|
||||
use tokio::sync::Mutex;
|
||||
use ttrpc::asynchronous::Client;
|
||||
|
||||
use crate::{log_forwarder::LogForwarder, sock};
|
||||
@@ -44,25 +41,27 @@ pub(crate) struct KataAgentInner {
|
||||
log_forwarder: LogForwarder,
|
||||
}
|
||||
|
||||
unsafe impl Send for KataAgent {}
|
||||
unsafe impl Sync for KataAgent {}
|
||||
pub struct KataAgent {
|
||||
pub(crate) inner: Arc<RwLock<KataAgentInner>>,
|
||||
pub(crate) inner: Mutex<KataAgentInner>,
|
||||
}
|
||||
|
||||
impl KataAgent {
|
||||
pub fn new(config: AgentConfig) -> Self {
|
||||
KataAgent {
|
||||
inner: Arc::new(RwLock::new(KataAgentInner {
|
||||
inner: Mutex::new(KataAgentInner {
|
||||
client: None,
|
||||
client_fd: -1,
|
||||
socket_address: "".to_string(),
|
||||
config,
|
||||
log_forwarder: LogForwarder::new(),
|
||||
})),
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn get_health_client(&self) -> Option<(health_ttrpc::HealthClient, i64, RawFd)> {
|
||||
let inner = self.inner.read().await;
|
||||
let inner = self.inner.lock().await;
|
||||
inner.client.as_ref().map(|c| {
|
||||
(
|
||||
health_ttrpc::HealthClient::new(c.clone()),
|
||||
@@ -73,7 +72,7 @@ impl KataAgent {
|
||||
}
|
||||
|
||||
pub async fn get_agent_client(&self) -> Option<(agent_ttrpc::AgentServiceClient, i64, RawFd)> {
|
||||
let inner = self.inner.read().await;
|
||||
let inner = self.inner.lock().await;
|
||||
inner.client.as_ref().map(|c| {
|
||||
(
|
||||
agent_ttrpc::AgentServiceClient::new(c.clone()),
|
||||
@@ -84,13 +83,13 @@ impl KataAgent {
|
||||
}
|
||||
|
||||
pub(crate) async fn set_socket_address(&self, address: &str) -> Result<()> {
|
||||
let mut inner = self.inner.write().await;
|
||||
let mut inner = self.inner.lock().await;
|
||||
inner.socket_address = address.to_string();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) async fn connect_agent_server(&self) -> Result<()> {
|
||||
let mut inner = self.inner.write().await;
|
||||
let mut inner = self.inner.lock().await;
|
||||
|
||||
let config = sock::ConnectConfig::new(
|
||||
inner.config.dial_timeout_ms as u64,
|
||||
@@ -108,7 +107,7 @@ impl KataAgent {
|
||||
}
|
||||
|
||||
pub(crate) async fn start_log_forwarder(&self) -> Result<()> {
|
||||
let mut inner = self.inner.write().await;
|
||||
let mut inner = self.inner.lock().await;
|
||||
let config = sock::ConnectConfig::new(
|
||||
inner.config.dial_timeout_ms as u64,
|
||||
inner.config.reconnect_timeout_ms as u64,
|
||||
@@ -124,21 +123,12 @@ impl KataAgent {
|
||||
}
|
||||
|
||||
pub(crate) async fn stop_log_forwarder(&self) {
|
||||
let mut inner = self.inner.write().await;
|
||||
let mut inner = self.inner.lock().await;
|
||||
inner.log_forwarder.stop();
|
||||
}
|
||||
|
||||
pub(crate) async fn agent_sock(&self) -> Result<String> {
|
||||
let inner = self.inner.read().await;
|
||||
Ok(format!(
|
||||
"{}:{}",
|
||||
inner.socket_address.clone(),
|
||||
inner.config.server_port
|
||||
))
|
||||
}
|
||||
|
||||
pub(crate) async fn agent_config(&self) -> AgentConfig {
|
||||
let inner = self.inner.read().await;
|
||||
let inner = self.inner.lock().await;
|
||||
inner.config.clone()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -38,7 +38,6 @@ pub trait AgentManager: Send + Sync {
|
||||
async fn start(&self, address: &str) -> Result<()>;
|
||||
async fn stop(&self);
|
||||
|
||||
async fn agent_sock(&self) -> Result<String>;
|
||||
async fn agent_config(&self) -> AgentConfig;
|
||||
}
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@ slog = "2.5.2"
|
||||
slog-scope = "4.4.0"
|
||||
thiserror = "1.0"
|
||||
tokio = { version = "1.8.0", features = ["sync"] }
|
||||
vmm-sys-util = "0.10.0"
|
||||
vmm-sys-util = "0.9.0"
|
||||
|
||||
kata-sys-util = { path = "../../../libs/kata-sys-util" }
|
||||
kata-types = { path = "../../../libs/kata-types" }
|
||||
|
||||
@@ -1,94 +0,0 @@
|
||||
# Multi-vmm support for runtime-rs
|
||||
Some key points for supporting multi-vmm in rust runtime.
|
||||
## 1. Hypervisor Config
|
||||
|
||||
The diagram below gives an overview for the hypervisor config
|
||||
|
||||

|
||||
|
||||
VMM's config info will be loaded when initialize the runtime instance, there are some important functions need to be focused on.
|
||||
### `VirtContainer::init()`
|
||||
|
||||
This function initialize the runtime handler. It will register the plugins into the HYPERVISOR_PLUGINS. Different plugins are needed for different hypervisors.
|
||||
```rust
|
||||
#[async_trait]
|
||||
impl RuntimeHandler for VirtContainer {
|
||||
fn init() -> Result<()> {
|
||||
// register
|
||||
let dragonball_config = Arc::new(DragonballConfig::new());
|
||||
register_hypervisor_plugin("dragonball", dragonball_config);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
[This is the plugin method for QEMU. Other VMM plugin methods haven't support currently.](../../../libs/kata-types/src/config/hypervisor/qemu.rs)
|
||||
QEMU plugin defines the methods to adjust and validate the hypervisor config file, those methods could be modified if it is needed.
|
||||
|
||||
After that, when loading the TOML config, the plugins will be called to adjust and validate the config file.
|
||||
```rust
|
||||
async fn try_init(&mut self, spec: &oci::Spec) -> Result<()> {、
|
||||
...
|
||||
let config = load_config(spec).context("load config")?;
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
### new_instance
|
||||
|
||||
This function will create a runtime_instance which include the operations for container and sandbox. At the same time, a hypervisor instance will be created. QEMU instance will be created here as well, and set the hypervisor config file
|
||||
```rust
|
||||
async fn new_hypervisor(toml_config: &TomlConfig) -> Result<Arc<dyn Hypervisor>> {
|
||||
let hypervisor_name = &toml_config.runtime.hypervisor_name;
|
||||
let hypervisor_config = toml_config
|
||||
.hypervisor
|
||||
.get(hypervisor_name)
|
||||
.ok_or_else(|| anyhow!("failed to get hypervisor for {}", &hypervisor_name))
|
||||
.context("get hypervisor")?;
|
||||
|
||||
// TODO: support other hypervisor
|
||||
match hypervisor_name.as_str() {
|
||||
HYPERVISOR_DRAGONBALL => {
|
||||
let mut hypervisor = Dragonball::new();
|
||||
hypervisor
|
||||
.set_hypervisor_config(hypervisor_config.clone())
|
||||
.await;
|
||||
Ok(Arc::new(hypervisor))
|
||||
}
|
||||
_ => Err(anyhow!("Unsupported hypervisor {}", &hypervisor_name)),
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## 2. Hypervisor Trait
|
||||
|
||||
[To support multi-vmm, the hypervisor trait need to be implemented.](./src/lib.rs)
|
||||
```rust
|
||||
pub trait Hypervisor: Send + Sync {
|
||||
// vm manager
|
||||
async fn prepare_vm(&self, id: &str, netns: Option<String>) -> Result<()>;
|
||||
async fn start_vm(&self, timeout: i32) -> Result<()>;
|
||||
async fn stop_vm(&self) -> Result<()>;
|
||||
async fn pause_vm(&self) -> Result<()>;
|
||||
async fn save_vm(&self) -> Result<()>;
|
||||
async fn resume_vm(&self) -> Result<()>;
|
||||
|
||||
// device manager
|
||||
async fn add_device(&self, device: device::Device) -> Result<()>;
|
||||
async fn remove_device(&self, device: device::Device) -> Result<()>;
|
||||
|
||||
// utils
|
||||
async fn get_agent_socket(&self) -> Result<String>;
|
||||
async fn disconnect(&self);
|
||||
async fn hypervisor_config(&self) -> HypervisorConfig;
|
||||
async fn get_thread_ids(&self) -> Result<VcpuThreadIds>;
|
||||
async fn get_pids(&self) -> Result<Vec<u32>>;
|
||||
async fn cleanup(&self) -> Result<()>;
|
||||
async fn check(&self) -> Result<()>;
|
||||
async fn get_jailer_root(&self) -> Result<String>;
|
||||
async fn save_state(&self) -> Result<HypervisorState>;
|
||||
}
|
||||
```
|
||||
In current design, VM will be started in the following steps.
|
||||
|
||||

|
||||
@@ -21,7 +21,6 @@ fn override_driver(bdf: &str, driver: &str) -> Result<()> {
|
||||
const SYS_PCI_DEVICES_PATH: &str = "/sys/bus/pci/devices";
|
||||
const PCI_DRIVER_PROBE: &str = "/sys/bus/pci/drivers_probe";
|
||||
const VFIO_NEW_ID_PATH: &str = "/sys/bus/pci/drivers/vfio-pci/new_id";
|
||||
const VFIO_UNBIND_PATH: &str = "/sys/bus/pci/drivers/vfio-pci/unbind";
|
||||
|
||||
pub const VFIO_PCI: &str = "vfio-pci";
|
||||
|
||||
@@ -133,10 +132,11 @@ pub fn bind_device_to_host(bdf: &str, host_driver: &str, _vendor_device_id: &str
|
||||
|
||||
override_driver(bdf, host_driver).context("override driver")?;
|
||||
|
||||
let unbind_path = "/sys/bus/pci/drivers/vfio-pci/unbind";
|
||||
|
||||
// echo bdf > /sys/bus/pci/drivers/vfio-pci/unbind"
|
||||
std::fs::write(VFIO_UNBIND_PATH, bdf)
|
||||
.with_context(|| format!("echo {}> {}", bdf, VFIO_UNBIND_PATH))?;
|
||||
info!(sl!(), "echo {} > {}", bdf, VFIO_UNBIND_PATH);
|
||||
std::fs::write(unbind_path, bdf).with_context(|| format!("echo {}> {}", bdf, unbind_path))?;
|
||||
info!(sl!(), "echo {} > {}", bdf, unbind_path);
|
||||
|
||||
// echo bdf > /sys/bus/pci/drivers_probe
|
||||
std::fs::write(PCI_DRIVER_PROBE, bdf)
|
||||
|
||||
@@ -91,7 +91,6 @@ impl DragonballInner {
|
||||
kernel_params.append(&mut KernelParams::from_string(
|
||||
&self.config.boot_info.kernel_params,
|
||||
));
|
||||
info!(sl!(), "prepared kernel_params={:?}", kernel_params);
|
||||
|
||||
// set boot source
|
||||
let kernel_path = self.config.boot_info.kernel.clone();
|
||||
|
||||
@@ -7,7 +7,6 @@
|
||||
use anyhow::{anyhow, Result};
|
||||
|
||||
use crate::{VM_ROOTFS_DRIVER_BLK, VM_ROOTFS_DRIVER_PMEM};
|
||||
use kata_types::config::LOG_VPORT_OPTION;
|
||||
|
||||
// Port where the agent will send the logs. Logs are sent through the vsock in cases
|
||||
// where the hypervisor has no console.sock, i.e dragonball
|
||||
@@ -29,18 +28,6 @@ impl Param {
|
||||
value: value.to_owned(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn to_string(&self) -> Result<String> {
|
||||
if self.key.is_empty() && self.value.is_empty() {
|
||||
Err(anyhow!("Empty key and value"))
|
||||
} else if self.key.is_empty() {
|
||||
Err(anyhow!("Empty key"))
|
||||
} else if self.value.is_empty() {
|
||||
Ok(self.key.to_string())
|
||||
} else {
|
||||
Ok(format!("{}{}{}", self.key, KERNEL_KV_DELIMITER, self.value))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
@@ -61,7 +48,7 @@ impl KernelParams {
|
||||
];
|
||||
|
||||
if debug {
|
||||
params.push(Param::new(LOG_VPORT_OPTION, VSOCK_LOGS_PORT));
|
||||
params.push(Param::new("agent.log_vport", VSOCK_LOGS_PORT));
|
||||
}
|
||||
|
||||
Self { params }
|
||||
@@ -142,7 +129,18 @@ impl KernelParams {
|
||||
let mut parameters: Vec<String> = Vec::new();
|
||||
|
||||
for param in &self.params {
|
||||
parameters.push(param.to_string()?);
|
||||
if param.key.is_empty() && param.value.is_empty() {
|
||||
return Err(anyhow!("Empty key and value"));
|
||||
} else if param.key.is_empty() {
|
||||
return Err(anyhow!("Empty key"));
|
||||
} else if param.value.is_empty() {
|
||||
parameters.push(param.key.to_string());
|
||||
} else {
|
||||
parameters.push(format!(
|
||||
"{}{}{}",
|
||||
param.key, KERNEL_KV_DELIMITER, param.value
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(parameters.join(KERNEL_PARAM_DELIMITER))
|
||||
@@ -155,20 +153,6 @@ mod tests {
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_params() {
|
||||
let param1 = Param::new("", "");
|
||||
let param2 = Param::new("", "foo");
|
||||
let param3 = Param::new("foo", "");
|
||||
|
||||
assert!(param1.to_string().is_err());
|
||||
assert!(param2.to_string().is_err());
|
||||
assert_eq!(param3.to_string().unwrap(), String::from("foo"));
|
||||
|
||||
let param4 = Param::new("foo", "bar");
|
||||
assert_eq!(param4.to_string().unwrap(), String::from("foo=bar"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_kernel_params() -> Result<()> {
|
||||
let expect_params_string = "k1=v1 k2=v2 k3=v3".to_string();
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user