mirror of
https://github.com/kata-containers/kata-containers.git
synced 2026-02-22 14:54:23 +00:00
Compare commits
230 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1e6cea24c8 | ||
|
|
1d9915147d | ||
|
|
ae1be28ddd | ||
|
|
72833cb00b | ||
|
|
fda095a4c9 | ||
|
|
c7347cb76d | ||
|
|
639bc84329 | ||
|
|
7ae5fa463e | ||
|
|
1381cab6f0 | ||
|
|
7fc7328bbc | ||
|
|
f5edbfd696 | ||
|
|
d96e8375c4 | ||
|
|
e5a284474d | ||
|
|
5656fc6139 | ||
|
|
3a3849efff | ||
|
|
64ceb0832a | ||
|
|
d5878437a4 | ||
|
|
469c651fc0 | ||
|
|
908aacfa78 | ||
|
|
4bda16565b | ||
|
|
66ccc25724 | ||
|
|
d4dd87a974 | ||
|
|
b13db29aaa | ||
|
|
2499d013bd | ||
|
|
d28410ed75 | ||
|
|
95aa21f018 | ||
|
|
6cc5b79507 | ||
|
|
0a39f59a9b | ||
|
|
a0766986e7 | ||
|
|
56fb2a9482 | ||
|
|
2d266df846 | ||
|
|
d28a512d29 | ||
|
|
a13d5a3f04 | ||
|
|
8614e5efc4 | ||
|
|
4b5b788918 | ||
|
|
958cd8dd9f | ||
|
|
e1b780492f | ||
|
|
acd2a933da | ||
|
|
62e239ceaa | ||
|
|
fd7bcd88d0 | ||
|
|
fbc8454d3d | ||
|
|
897e2e2b6e | ||
|
|
ec0af6fbda | ||
|
|
7aca7a6671 | ||
|
|
d03738a757 | ||
|
|
cf54a1b0e1 | ||
|
|
ec7b2aa441 | ||
|
|
5431841a80 | ||
|
|
b231a795d7 | ||
|
|
befb2a7c33 | ||
|
|
5b809ca440 | ||
|
|
a3fd3d90bc | ||
|
|
36bf080c1e | ||
|
|
8332f427d2 | ||
|
|
2d6b1e6b13 | ||
|
|
65e908a584 | ||
|
|
be40646d04 | ||
|
|
438f81b108 | ||
|
|
9492c45d06 | ||
|
|
78b96a6e2e | ||
|
|
97785b1f3f | ||
|
|
33555037c0 | ||
|
|
27cff15015 | ||
|
|
70d9afbd1f | ||
|
|
db04c7ec93 | ||
|
|
e4cbc6abce | ||
|
|
75ac09baba | ||
|
|
c9f5966f56 | ||
|
|
ec290853e9 | ||
|
|
fb341f8ebb | ||
|
|
23cb5bb6c2 | ||
|
|
eb1cf792de | ||
|
|
62a975603e | ||
|
|
fdf3088be0 | ||
|
|
48c5b1fb55 | ||
|
|
1bdb34e880 | ||
|
|
27ce3eef12 | ||
|
|
fce49d4206 | ||
|
|
2ceb7a35fc | ||
|
|
76df852f33 | ||
|
|
d3e0ecc394 | ||
|
|
6de8e59109 | ||
|
|
47ce5dad9d | ||
|
|
47534c1c3e | ||
|
|
45bd451fa0 | ||
|
|
9a7dff9c40 | ||
|
|
968318180d | ||
|
|
b04bdf54a5 | ||
|
|
7831caf1e7 | ||
|
|
17b1e94f1a | ||
|
|
e9393827e8 | ||
|
|
1ac0e67245 | ||
|
|
bd3c93713f | ||
|
|
9f865f5bad | ||
|
|
61a252094e | ||
|
|
add785f677 | ||
|
|
dde34bb7b8 | ||
|
|
71fffb8736 | ||
|
|
d75a0ccbd1 | ||
|
|
bddaea6df1 | ||
|
|
bed128164a | ||
|
|
53bcb0b108 | ||
|
|
c332a91ef8 | ||
|
|
fe98d49a29 | ||
|
|
730c56af2a | ||
|
|
a9358b59b7 | ||
|
|
1d9efeb92b | ||
|
|
225c7fc026 | ||
|
|
f1d3450d1f | ||
|
|
1e90fc38de | ||
|
|
f389b05f20 | ||
|
|
8bea57326a | ||
|
|
7257ee0397 | ||
|
|
ca87aca1a6 | ||
|
|
6008fd56a1 | ||
|
|
a640bb86ec | ||
|
|
a131eec5c1 | ||
|
|
0bd36a63d9 | ||
|
|
7709198c3b | ||
|
|
b4de302cb2 | ||
|
|
099b241702 | ||
|
|
4c006c707a | ||
|
|
429b2654f4 | ||
|
|
3fc170788d | ||
|
|
eeacd8fd74 | ||
|
|
9060904c4f | ||
|
|
8e073a6715 | ||
|
|
95c63f4982 | ||
|
|
7dc8060051 | ||
|
|
546e3ae9ea | ||
|
|
122ad95da6 | ||
|
|
d9eb1b0e06 | ||
|
|
5203158195 | ||
|
|
a806d74ce3 | ||
|
|
b6b0addd5e | ||
|
|
41f23f1d2a | ||
|
|
1cf1a332a5 | ||
|
|
0215d958da | ||
|
|
530fedd188 | ||
|
|
1943a1c96d | ||
|
|
47140357c4 | ||
|
|
90e2b7d1bc | ||
|
|
c1ca49a66c | ||
|
|
af235fc576 | ||
|
|
bb7ca954c7 | ||
|
|
e87231edc7 | ||
|
|
f9bbe4e439 | ||
|
|
df5eafd2a1 | ||
|
|
5e00a24145 | ||
|
|
dde627cef4 | ||
|
|
47db9b3773 | ||
|
|
200cbfd0b0 | ||
|
|
4a6ba534f1 | ||
|
|
419b5ed715 | ||
|
|
233d15452b | ||
|
|
e657f58cf9 | ||
|
|
9f2799ba4f | ||
|
|
d2528ef84f | ||
|
|
9162103f85 | ||
|
|
aab9d36e47 | ||
|
|
e1596f7abf | ||
|
|
cd7001612a | ||
|
|
10974b7bec | ||
|
|
98e0dc1676 | ||
|
|
f153229865 | ||
|
|
311c3638c6 | ||
|
|
84b0ca1b18 | ||
|
|
b6e0effc06 | ||
|
|
2b5dbfacb8 | ||
|
|
c54f78bc6b | ||
|
|
4a66acc6f5 | ||
|
|
585f82f730 | ||
|
|
02a18c1359 | ||
|
|
ca96a6ac76 | ||
|
|
353ceb948e | ||
|
|
42531cf6c4 | ||
|
|
b4c710576e | ||
|
|
54e7e1fdc3 | ||
|
|
17f3eb0579 | ||
|
|
ee635293c6 | ||
|
|
f5c509d58e | ||
|
|
4bc978416c | ||
|
|
66d292bdb4 | ||
|
|
b47cc6fffe | ||
|
|
0626d7182a | ||
|
|
4307f0c998 | ||
|
|
c653719270 | ||
|
|
d031e479ab | ||
|
|
66d881a5da | ||
|
|
3acce82c91 | ||
|
|
d625f20d18 | ||
|
|
a23d6a1241 | ||
|
|
9a92a4bacf | ||
|
|
734ef71cf7 | ||
|
|
18137b1583 | ||
|
|
d5f907dcf1 | ||
|
|
d2cb14cdbc | ||
|
|
944eb2cf3f | ||
|
|
ebd8ec227b | ||
|
|
afd286f6d6 | ||
|
|
3f8abb4da7 | ||
|
|
91c6d524f8 | ||
|
|
6baa60d77d | ||
|
|
ab27e11d31 | ||
|
|
90b6d5725b | ||
|
|
373a388844 | ||
|
|
a8678a7794 | ||
|
|
e71bc1f068 | ||
|
|
17d053f4bb | ||
|
|
c148b70da7 | ||
|
|
4e9d1363b3 | ||
|
|
4621f53e4a | ||
|
|
61c282c725 | ||
|
|
9fd430e46b | ||
|
|
ef1639b6bf | ||
|
|
7e76ef587a | ||
|
|
185b94b7fa | ||
|
|
487171d992 | ||
|
|
8f550de88a | ||
|
|
42f5ef9ff1 | ||
|
|
8b097244e7 | ||
|
|
f525631522 | ||
|
|
d7059e9024 | ||
|
|
0d70dc31c1 | ||
|
|
c018a1cc61 | ||
|
|
c70195d629 | ||
|
|
f0bd83b073 | ||
|
|
c4da296326 | ||
|
|
5b8471ffce | ||
|
|
a49d0fb343 |
5
.github/actionlint.yaml
vendored
5
.github/actionlint.yaml
vendored
@@ -7,7 +7,7 @@
|
||||
self-hosted-runner:
|
||||
# Labels of self-hosted runner that linter should ignore
|
||||
labels:
|
||||
- arm64-builder
|
||||
- ubuntu-22.04-arm
|
||||
- garm-ubuntu-2004
|
||||
- garm-ubuntu-2004-smaller
|
||||
- garm-ubuntu-2204
|
||||
@@ -21,5 +21,4 @@ self-hosted-runner:
|
||||
- sev-snp
|
||||
- s390x
|
||||
- s390x-large
|
||||
- tdx-no-attestation
|
||||
- tdx-attestation
|
||||
- tdx
|
||||
|
||||
46
.github/workflows/basic-ci-amd64.yaml
vendored
46
.github/workflows/basic-ci-amd64.yaml
vendored
@@ -56,6 +56,51 @@ jobs:
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/cri-containerd/gha-run.sh run
|
||||
|
||||
run-containerd-sandboxapi:
|
||||
strategy:
|
||||
# We can set this to true whenever we're 100% sure that
|
||||
# the all the tests are not flaky, otherwise we'll fail
|
||||
# all the tests due to a single flaky instance.
|
||||
fail-fast: false
|
||||
matrix:
|
||||
containerd_version: ['latest']
|
||||
vmm: ['dragonball', 'cloud-hypervisor', 'qemu-runtime-rs']
|
||||
runs-on: ubuntu-22.04
|
||||
env:
|
||||
CONTAINERD_VERSION: ${{ matrix.containerd_version }}
|
||||
GOPATH: ${{ github.workspace }}
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
#the latest containerd from 2.0 need to set the CGROUP_DRIVER for e2e testing
|
||||
CGROUP_DRIVER: ""
|
||||
SANDBOXER: "shim"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: Install dependencies
|
||||
run: bash tests/integration/cri-containerd/gha-run.sh install-dependencies
|
||||
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: kata-static-tarball-amd64${{ inputs.tarball-suffix }}
|
||||
path: kata-artifacts
|
||||
|
||||
- name: Install kata
|
||||
run: bash tests/integration/cri-containerd/gha-run.sh install-kata kata-artifacts
|
||||
|
||||
- name: Run containerd-sandboxapi tests
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/cri-containerd/gha-run.sh run
|
||||
|
||||
run-containerd-stability:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
@@ -67,6 +112,7 @@ jobs:
|
||||
CONTAINERD_VERSION: ${{ matrix.containerd_version }}
|
||||
GOPATH: ${{ github.workspace }}
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
SANDBOXER: "podsandbox"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
|
||||
@@ -34,6 +34,7 @@ jobs:
|
||||
asset:
|
||||
- agent
|
||||
- agent-ctl
|
||||
- busybox
|
||||
- cloud-hypervisor
|
||||
- cloud-hypervisor-glibc
|
||||
- coco-guest-components
|
||||
@@ -136,6 +137,15 @@ jobs:
|
||||
retention-days: 15
|
||||
if-no-files-found: error
|
||||
|
||||
- name: store-extratarballs-artifact ${{ matrix.asset }}
|
||||
if: ${{ startsWith(matrix.asset, 'kernel-nvidia-gpu') }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: kata-artifacts-amd64-${{ matrix.asset }}-headers${{ inputs.tarball-suffix }}
|
||||
path: kata-build/kata-static-${{ matrix.asset }}-headers.tar.xz
|
||||
retention-days: 15
|
||||
if-no-files-found: error
|
||||
|
||||
build-asset-rootfs:
|
||||
runs-on: ubuntu-22.04
|
||||
needs: build-asset
|
||||
@@ -147,6 +157,8 @@ jobs:
|
||||
- rootfs-image-mariner
|
||||
- rootfs-initrd
|
||||
- rootfs-initrd-confidential
|
||||
- rootfs-nvidia-gpu-initrd
|
||||
- rootfs-nvidia-gpu-confidential-initrd
|
||||
steps:
|
||||
- name: Login to Kata Containers quay.io
|
||||
if: ${{ inputs.push-to-registry == 'yes' }}
|
||||
@@ -207,9 +219,24 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
asset:
|
||||
- agent
|
||||
- busybox
|
||||
- coco-guest-components
|
||||
- kernel-nvidia-gpu-headers
|
||||
- kernel-nvidia-gpu-confidential-headers
|
||||
- pause-image
|
||||
steps:
|
||||
- uses: geekyeggo/delete-artifact@v5
|
||||
with:
|
||||
name: kata-artifacts-amd64-${{ matrix.asset}}${{ inputs.tarball-suffix }}
|
||||
|
||||
# We don't need the binaries installed in the rootfs as part of the release tarball, so can delete them now we've built the rootfs
|
||||
remove-rootfs-binary-artifacts-for-release:
|
||||
runs-on: ubuntu-22.04
|
||||
needs: build-asset-rootfs
|
||||
strategy:
|
||||
matrix:
|
||||
asset:
|
||||
- agent
|
||||
steps:
|
||||
- uses: geekyeggo/delete-artifact@v5
|
||||
if: ${{ inputs.stage == 'release' }}
|
||||
@@ -218,7 +245,7 @@ jobs:
|
||||
|
||||
build-asset-shim-v2:
|
||||
runs-on: ubuntu-22.04
|
||||
needs: [build-asset, build-asset-rootfs, remove-rootfs-binary-artifacts]
|
||||
needs: [build-asset, build-asset-rootfs, remove-rootfs-binary-artifacts, remove-rootfs-binary-artifacts-for-release]
|
||||
steps:
|
||||
- name: Login to Kata Containers quay.io
|
||||
if: ${{ inputs.push-to-registry == 'yes' }}
|
||||
|
||||
@@ -23,19 +23,28 @@ on:
|
||||
|
||||
jobs:
|
||||
build-asset:
|
||||
runs-on: arm64-builder
|
||||
runs-on: ubuntu-22.04-arm
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
id-token: write
|
||||
attestations: write
|
||||
strategy:
|
||||
matrix:
|
||||
asset:
|
||||
- agent
|
||||
- busybox
|
||||
- cloud-hypervisor
|
||||
- firecracker
|
||||
- kernel
|
||||
- kernel-dragonball-experimental
|
||||
- kernel-nvidia-gpu
|
||||
- nydus
|
||||
- qemu
|
||||
- stratovirt
|
||||
- virtiofsd
|
||||
env:
|
||||
PERFORM_ATTESTATION: ${{ matrix.asset == 'agent' && inputs.push-to-registry == 'yes' && 'yes' || 'no' }}
|
||||
steps:
|
||||
- name: Login to Kata Containers quay.io
|
||||
if: ${{ inputs.push-to-registry == 'yes' }}
|
||||
@@ -72,6 +81,34 @@ jobs:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
RELEASE: ${{ inputs.stage == 'release' && 'yes' || 'no' }}
|
||||
|
||||
- name: Parse OCI image name and digest
|
||||
id: parse-oci-segments
|
||||
if: ${{ env.PERFORM_ATTESTATION == 'yes' }}
|
||||
run: |
|
||||
oci_image="$(<"build/${{ matrix.asset }}-oci-image")"
|
||||
echo "oci-name=${oci_image%@*}" >> "$GITHUB_OUTPUT"
|
||||
echo "oci-digest=${oci_image#*@}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- uses: oras-project/setup-oras@v1
|
||||
if: ${{ env.PERFORM_ATTESTATION == 'yes' }}
|
||||
with:
|
||||
version: "1.2.0"
|
||||
|
||||
# for pushing attestations to the registry
|
||||
- uses: docker/login-action@v3
|
||||
if: ${{ env.PERFORM_ATTESTATION == 'yes' }}
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- uses: actions/attest-build-provenance@v1
|
||||
if: ${{ env.PERFORM_ATTESTATION == 'yes' }}
|
||||
with:
|
||||
subject-name: ${{ steps.parse-oci-segments.outputs.oci-name }}
|
||||
subject-digest: ${{ steps.parse-oci-segments.outputs.oci-digest }}
|
||||
push-to-registry: true
|
||||
|
||||
- name: store-artifact ${{ matrix.asset }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
@@ -80,14 +117,24 @@ jobs:
|
||||
retention-days: 15
|
||||
if-no-files-found: error
|
||||
|
||||
- name: store-extratarballs-artifact ${{ matrix.asset }}
|
||||
if: ${{ startsWith(matrix.asset, 'kernel-nvidia-gpu') }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: kata-artifacts-arm64-${{ matrix.asset }}-headers${{ inputs.tarball-suffix }}
|
||||
path: kata-build/kata-static-${{ matrix.asset }}-headers.tar.xz
|
||||
retention-days: 15
|
||||
if-no-files-found: error
|
||||
|
||||
build-asset-rootfs:
|
||||
runs-on: arm64-builder
|
||||
runs-on: ubuntu-22.04-arm
|
||||
needs: build-asset
|
||||
strategy:
|
||||
matrix:
|
||||
asset:
|
||||
- rootfs-image
|
||||
- rootfs-initrd
|
||||
- rootfs-nvidia-gpu-initrd
|
||||
steps:
|
||||
- name: Login to Kata Containers quay.io
|
||||
if: ${{ inputs.push-to-registry == 'yes' }}
|
||||
@@ -142,7 +189,21 @@ jobs:
|
||||
|
||||
# We don't need the binaries installed in the rootfs as part of the release tarball, so can delete them now we've built the rootfs
|
||||
remove-rootfs-binary-artifacts:
|
||||
runs-on: ubuntu-22.04
|
||||
runs-on: ubuntu-22.04-arm
|
||||
needs: build-asset-rootfs
|
||||
strategy:
|
||||
matrix:
|
||||
asset:
|
||||
- busybox
|
||||
- kernel-nvidia-gpu-headers
|
||||
steps:
|
||||
- uses: geekyeggo/delete-artifact@v5
|
||||
with:
|
||||
name: kata-artifacts-arm64-${{ matrix.asset}}${{ inputs.tarball-suffix }}
|
||||
|
||||
# We don't need the binaries installed in the rootfs as part of the release tarball, so can delete them now we've built the rootfs
|
||||
remove-rootfs-binary-artifacts-for-release:
|
||||
runs-on: ubuntu-22.04-arm
|
||||
needs: build-asset-rootfs
|
||||
strategy:
|
||||
matrix:
|
||||
@@ -155,8 +216,8 @@ jobs:
|
||||
name: kata-artifacts-arm64-${{ matrix.asset}}${{ inputs.tarball-suffix }}
|
||||
|
||||
build-asset-shim-v2:
|
||||
runs-on: arm64-builder
|
||||
needs: [build-asset, build-asset-rootfs, remove-rootfs-binary-artifacts]
|
||||
runs-on: ubuntu-22.04-arm
|
||||
needs: [build-asset, build-asset-rootfs, remove-rootfs-binary-artifacts, remove-rootfs-binary-artifacts-for-release]
|
||||
steps:
|
||||
- name: Login to Kata Containers quay.io
|
||||
if: ${{ inputs.push-to-registry == 'yes' }}
|
||||
@@ -210,13 +271,9 @@ jobs:
|
||||
if-no-files-found: error
|
||||
|
||||
create-kata-tarball:
|
||||
runs-on: arm64-builder
|
||||
runs-on: ubuntu-22.04-arm
|
||||
needs: [build-asset, build-asset-rootfs, build-asset-shim-v2]
|
||||
steps:
|
||||
- name: Adjust a permission for repo
|
||||
run: |
|
||||
sudo chown -R "$USER":"$USER" "$GITHUB_WORKSPACE"
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
|
||||
2
.github/workflows/darwin-tests.yaml
vendored
2
.github/workflows/darwin-tests.yaml
vendored
@@ -18,7 +18,7 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.22.2
|
||||
go-version: 1.22.11
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
- name: Build utils
|
||||
|
||||
2
.github/workflows/docs-url-alive-check.yaml
vendored
2
.github/workflows/docs-url-alive-check.yaml
vendored
@@ -14,7 +14,7 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.22.2
|
||||
go-version: 1.22.11
|
||||
env:
|
||||
GOPATH: ${{ github.workspace }}/kata-containers
|
||||
- name: Set env
|
||||
|
||||
2
.github/workflows/gatekeeper.yaml
vendored
2
.github/workflows/gatekeeper.yaml
vendored
@@ -29,7 +29,7 @@ jobs:
|
||||
TARGET_BRANCH: ${{ github.event.pull_request.base.ref }}
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
COMMIT_HASH: ${{ github.event.pull_request.head.sha }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
GH_PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
run: |
|
||||
#!/usr/bin/env bash -x
|
||||
mapfile -t lines < <(python3 tools/testing/gatekeeper/skips.py -t)
|
||||
|
||||
@@ -24,12 +24,8 @@ on:
|
||||
|
||||
jobs:
|
||||
kata-payload:
|
||||
runs-on: arm64-builder
|
||||
runs-on: ubuntu-22.04-arm
|
||||
steps:
|
||||
- name: Adjust a permission for repo
|
||||
run: |
|
||||
sudo chown -R "$USER":"$USER" "$GITHUB_WORKSPACE"
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
|
||||
2
.github/workflows/release-arm64.yaml
vendored
2
.github/workflows/release-arm64.yaml
vendored
@@ -16,7 +16,7 @@ jobs:
|
||||
|
||||
kata-deploy:
|
||||
needs: build-kata-static-tarball-arm64
|
||||
runs-on: arm64-builder
|
||||
runs-on: ubuntu-22.04-arm
|
||||
steps:
|
||||
- name: Login to Kata Containers docker.io
|
||||
uses: docker/login-action@v3
|
||||
|
||||
9
.github/workflows/run-k8s-tests-on-aks.yaml
vendored
9
.github/workflows/run-k8s-tests-on-aks.yaml
vendored
@@ -103,8 +103,13 @@ jobs:
|
||||
AZ_SUBSCRIPTION_ID: ${{ secrets.AZ_SUBSCRIPTION_ID }}
|
||||
|
||||
- name: Create AKS cluster
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/kubernetes/gha-run.sh create-cluster
|
||||
uses: nick-fields/retry@v3
|
||||
with:
|
||||
timeout_minutes: 15
|
||||
max_attempts: 20
|
||||
retry_on: error
|
||||
retry_wait_seconds: 10
|
||||
command: bash tests/integration/kubernetes/gha-run.sh create-cluster
|
||||
|
||||
- name: Install `bats`
|
||||
run: bash tests/integration/kubernetes/gha-run.sh install-bats
|
||||
|
||||
@@ -49,7 +49,7 @@ jobs:
|
||||
DOCKER_REGISTRY: ${{ inputs.registry }}
|
||||
DOCKER_REPO: ${{ inputs.repo }}
|
||||
DOCKER_TAG: ${{ inputs.tag }}
|
||||
PR_NUMBER: ${{ inputs.pr-number }}
|
||||
GH_PR_NUMBER: ${{ inputs.pr-number }}
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
KUBERNETES: ${{ matrix.k8s }}
|
||||
KUBERNETES_EXTRA_PARAMS: ${{ matrix.container_runtime != 'crio' && '' || '--cri-socket remote:unix:///var/run/crio/crio.sock --kubelet-extra-args --cgroup-driver="systemd"' }}
|
||||
@@ -105,4 +105,5 @@ jobs:
|
||||
|
||||
- name: Delete kata-deploy
|
||||
if: always()
|
||||
timeout-minutes: 5
|
||||
run: bash tests/integration/kubernetes/gha-run.sh cleanup
|
||||
|
||||
@@ -36,7 +36,7 @@ jobs:
|
||||
DOCKER_REGISTRY: ${{ inputs.registry }}
|
||||
DOCKER_REPO: ${{ inputs.repo }}
|
||||
DOCKER_TAG: ${{ inputs.tag }}
|
||||
PR_NUMBER: ${{ inputs.pr-number }}
|
||||
GH_PR_NUMBER: ${{ inputs.pr-number }}
|
||||
GOPATH: ${{ github.workspace }}
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
KUBERNETES: ${{ matrix.k8s }}
|
||||
|
||||
1
.github/workflows/run-k8s-tests-on-zvsi.yaml
vendored
1
.github/workflows/run-k8s-tests-on-zvsi.yaml
vendored
@@ -64,7 +64,6 @@ jobs:
|
||||
DOCKER_REGISTRY: ${{ inputs.registry }}
|
||||
DOCKER_REPO: ${{ inputs.repo }}
|
||||
DOCKER_TAG: ${{ inputs.tag }}
|
||||
PR_NUMBER: ${{ inputs.pr-number }}
|
||||
GH_PR_NUMBER: ${{ inputs.pr-number }}
|
||||
KATA_HOST_OS: "ubuntu"
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
|
||||
@@ -87,8 +87,13 @@ jobs:
|
||||
AZ_SUBSCRIPTION_ID: ${{ secrets.AZ_SUBSCRIPTION_ID }}
|
||||
|
||||
- name: Create AKS cluster
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/kubernetes/gha-run.sh create-cluster
|
||||
uses: nick-fields/retry@v3
|
||||
with:
|
||||
timeout_minutes: 15
|
||||
max_attempts: 20
|
||||
retry_on: error
|
||||
retry_wait_seconds: 10
|
||||
command: bash tests/integration/kubernetes/gha-run.sh create-cluster
|
||||
|
||||
- name: Install `bats`
|
||||
run: bash tests/integration/kubernetes/gha-run.sh install-bats
|
||||
|
||||
97
.github/workflows/run-kata-coco-tests.yaml
vendored
97
.github/workflows/run-kata-coco-tests.yaml
vendored
@@ -36,25 +36,17 @@ jobs:
|
||||
- nydus
|
||||
pull-type:
|
||||
- guest-pull
|
||||
k8s-test-host-type:
|
||||
- baremetal-attestation
|
||||
- baremetal-no-attestation
|
||||
include:
|
||||
- k8s-test-host-type: baremetal-attestation
|
||||
machine: tdx-attestation
|
||||
- k8s-test-host-type: baremetal-no-attestation
|
||||
machine: tdx-no-attestation
|
||||
runs-on: ${{ matrix.machine }}
|
||||
runs-on: tdx
|
||||
env:
|
||||
DOCKER_REGISTRY: ${{ inputs.registry }}
|
||||
DOCKER_REPO: ${{ inputs.repo }}
|
||||
DOCKER_TAG: ${{ inputs.tag }}
|
||||
PR_NUMBER: ${{ inputs.pr-number }}
|
||||
GH_PR_NUMBER: ${{ inputs.pr-number }}
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
KUBERNETES: "vanilla"
|
||||
USING_NFD: "true"
|
||||
KBS: "true"
|
||||
K8S_TEST_HOST_TYPE: ${{ matrix.k8s-test-host-type }}
|
||||
K8S_TEST_HOST_TYPE: "baremetal"
|
||||
KBS_INGRESS: "nodeport"
|
||||
SNAPSHOTTER: ${{ matrix.snapshotter }}
|
||||
PULL_TYPE: ${{ matrix.pull-type }}
|
||||
@@ -83,17 +75,14 @@ jobs:
|
||||
run: bash tests/integration/kubernetes/gha-run.sh deploy-kata-tdx
|
||||
|
||||
- name: Uninstall previous `kbs-client`
|
||||
if: ${{ matrix.machine != 'tdx-no-attestation' }}
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/kubernetes/gha-run.sh uninstall-kbs-client
|
||||
|
||||
- name: Deploy CoCo KBS
|
||||
if: ${{ matrix.machine != 'tdx-no-attestation' }}
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/kubernetes/gha-run.sh deploy-coco-kbs
|
||||
|
||||
- name: Install `kbs-client`
|
||||
if: ${{ matrix.machine != 'tdx-no-attestation' }}
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/kubernetes/gha-run.sh install-kbs-client
|
||||
|
||||
@@ -114,79 +103,14 @@ jobs:
|
||||
run: bash tests/integration/kubernetes/gha-run.sh cleanup-snapshotter
|
||||
|
||||
- name: Delete CoCo KBS
|
||||
if: ${{ always() && matrix.machine != 'tdx-no-attestation' }}
|
||||
if: always()
|
||||
run: bash tests/integration/kubernetes/gha-run.sh delete-coco-kbs
|
||||
|
||||
- name: Delete CSI driver
|
||||
timeout-minutes: 5
|
||||
run: bash tests/integration/kubernetes/gha-run.sh delete-csi-driver
|
||||
|
||||
run-k8s-tests-on-sev:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
vmm:
|
||||
- qemu-sev
|
||||
snapshotter:
|
||||
- nydus
|
||||
pull-type:
|
||||
- guest-pull
|
||||
runs-on: sev
|
||||
env:
|
||||
DOCKER_REGISTRY: ${{ inputs.registry }}
|
||||
DOCKER_REPO: ${{ inputs.repo }}
|
||||
DOCKER_TAG: ${{ inputs.tag }}
|
||||
PR_NUMBER: ${{ inputs.pr-number }}
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
KUBECONFIG: /home/kata/.kube/config
|
||||
KUBERNETES: "vanilla"
|
||||
USING_NFD: "false"
|
||||
K8S_TEST_HOST_TYPE: "baremetal"
|
||||
SNAPSHOTTER: ${{ matrix.snapshotter }}
|
||||
PULL_TYPE: ${{ matrix.pull-type }}
|
||||
AUTHENTICATED_IMAGE_USER: ${{ secrets.AUTHENTICATED_IMAGE_USER }}
|
||||
AUTHENTICATED_IMAGE_PASSWORD: ${{ secrets.AUTHENTICATED_IMAGE_PASSWORD }}
|
||||
AUTO_GENERATE_POLICY: "yes"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: Deploy Snapshotter
|
||||
timeout-minutes: 5
|
||||
run: bash tests/integration/kubernetes/gha-run.sh deploy-snapshotter
|
||||
|
||||
- name: Deploy Kata
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/kubernetes/gha-run.sh deploy-kata-sev
|
||||
|
||||
- name: Deploy CSI driver
|
||||
timeout-minutes: 5
|
||||
run: bash tests/integration/kubernetes/gha-run.sh deploy-csi-driver
|
||||
|
||||
- name: Run tests
|
||||
timeout-minutes: 50
|
||||
run: bash tests/integration/kubernetes/gha-run.sh run-tests
|
||||
|
||||
- name: Delete CSI driver
|
||||
timeout-minutes: 5
|
||||
run: bash tests/integration/kubernetes/gha-run.sh delete-csi-driver
|
||||
|
||||
- name: Delete kata-deploy
|
||||
if: always()
|
||||
run: bash tests/integration/kubernetes/gha-run.sh cleanup-sev
|
||||
|
||||
- name: Delete Snapshotter
|
||||
if: always()
|
||||
run: bash tests/integration/kubernetes/gha-run.sh cleanup-snapshotter
|
||||
|
||||
# AMD has deprecated SEV support on Kata and henceforth SNP will be the only feature supported for Kata Containers.
|
||||
run-k8s-tests-sev-snp:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
@@ -202,7 +126,7 @@ jobs:
|
||||
DOCKER_REGISTRY: ${{ inputs.registry }}
|
||||
DOCKER_REPO: ${{ inputs.repo }}
|
||||
DOCKER_TAG: ${{ inputs.tag }}
|
||||
PR_NUMBER: ${{ inputs.pr-number }}
|
||||
GH_PR_NUMBER: ${{ inputs.pr-number }}
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
KUBECONFIG: /home/kata/.kube/config
|
||||
KUBERNETES: "vanilla"
|
||||
@@ -333,8 +257,13 @@ jobs:
|
||||
AZ_SUBSCRIPTION_ID: ${{ secrets.AZ_SUBSCRIPTION_ID }}
|
||||
|
||||
- name: Create AKS cluster
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/kubernetes/gha-run.sh create-cluster
|
||||
uses: nick-fields/retry@v3
|
||||
with:
|
||||
timeout_minutes: 15
|
||||
max_attempts: 20
|
||||
retry_on: error
|
||||
retry_wait_seconds: 10
|
||||
command: bash tests/integration/kubernetes/gha-run.sh create-cluster
|
||||
|
||||
- name: Install `bats`
|
||||
run: bash tests/integration/kubernetes/gha-run.sh install-bats
|
||||
|
||||
@@ -71,8 +71,13 @@ jobs:
|
||||
AZ_SUBSCRIPTION_ID: ${{ secrets.AZ_SUBSCRIPTION_ID }}
|
||||
|
||||
- name: Create AKS cluster
|
||||
timeout-minutes: 10
|
||||
run: bash tests/functional/kata-deploy/gha-run.sh create-cluster
|
||||
uses: nick-fields/retry@v3
|
||||
with:
|
||||
timeout_minutes: 15
|
||||
max_attempts: 20
|
||||
retry_on: error
|
||||
retry_wait_seconds: 10
|
||||
command: bash tests/integration/kubernetes/gha-run.sh create-cluster
|
||||
|
||||
- name: Install `bats`
|
||||
run: bash tests/functional/kata-deploy/gha-run.sh install-bats
|
||||
@@ -85,7 +90,7 @@ jobs:
|
||||
|
||||
- name: Run tests
|
||||
run: bash tests/functional/kata-deploy/gha-run.sh run-tests
|
||||
|
||||
|
||||
- name: Delete AKS cluster
|
||||
if: always()
|
||||
run: bash tests/functional/kata-deploy/gha-run.sh delete-cluster
|
||||
|
||||
@@ -43,7 +43,7 @@ jobs:
|
||||
DOCKER_REGISTRY: ${{ inputs.registry }}
|
||||
DOCKER_REPO: ${{ inputs.repo }}
|
||||
DOCKER_TAG: ${{ inputs.tag }}
|
||||
PR_NUMBER: ${{ inputs.pr-number }}
|
||||
GH_PR_NUMBER: ${{ inputs.pr-number }}
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
KUBERNETES: ${{ matrix.k8s }}
|
||||
USING_NFD: "false"
|
||||
|
||||
29
.github/workflows/shellcheck.yaml
vendored
Normal file
29
.github/workflows/shellcheck.yaml
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
# https://github.com/marketplace/actions/shellcheck
|
||||
name: Check shell scripts
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
- edited
|
||||
- reopened
|
||||
- synchronize
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
shellcheck:
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- name: Checkout the code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
- name: Run ShellCheck
|
||||
uses: ludeeus/action-shellcheck@master
|
||||
|
||||
@@ -26,7 +26,7 @@ jobs:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
instance:
|
||||
- "arm-no-k8s"
|
||||
- "ubuntu-22.04-arm"
|
||||
- "s390x"
|
||||
- "ppc64le"
|
||||
uses: ./.github/workflows/build-checks.yaml
|
||||
|
||||
@@ -79,8 +79,8 @@ list_issues_for_pr()
|
||||
# "<git-commit> <git-commit-msg>"
|
||||
#
|
||||
local issues=$(echo "$commits" |\
|
||||
egrep -v "^( | )" |\
|
||||
egrep -i "fixes:* *(#*[0-9][0-9]*)" |\
|
||||
grep -v -E "^( | )" |\
|
||||
grep -i -E "fixes:* *(#*[0-9][0-9]*)" |\
|
||||
tr ' ' '\n' |\
|
||||
grep "[0-9][0-9]*" |\
|
||||
sed 's/[.,\#]//g' |\
|
||||
|
||||
@@ -29,7 +29,7 @@ function verify_yq_exists() {
|
||||
# Install via binary download, as we may not have golang installed at this point
|
||||
function install_yq() {
|
||||
local yq_pkg="github.com/mikefarah/yq"
|
||||
local yq_version=v4.40.7
|
||||
local yq_version=v4.44.5
|
||||
local precmd=""
|
||||
local yq_path=""
|
||||
INSTALL_IN_GOPATH=${INSTALL_IN_GOPATH:-true}
|
||||
@@ -82,6 +82,9 @@ function install_yq() {
|
||||
goarch=arm64
|
||||
fi
|
||||
;;
|
||||
"riscv64")
|
||||
goarch=riscv64
|
||||
;;
|
||||
"ppc64le")
|
||||
goarch=ppc64le
|
||||
;;
|
||||
|
||||
@@ -198,7 +198,7 @@ it stores. When messages are suppressed, it is noted in the logs. This can be ch
|
||||
for by looking for those notifications, such as:
|
||||
|
||||
```bash
|
||||
$ sudo journalctl --since today | fgrep Suppressed
|
||||
$ sudo journalctl --since today | grep -F Suppressed
|
||||
Jun 29 14:51:17 mymachine systemd-journald[346]: Suppressed 4150 messages from /system.slice/docker.service
|
||||
```
|
||||
|
||||
@@ -268,7 +268,7 @@ to install `libseccomp` for the agent.
|
||||
|
||||
```bash
|
||||
$ mkdir -p ${seccomp_install_path} ${gperf_install_path}
|
||||
$ pushd kata-containers/ci
|
||||
$ pushd kata-containers/ci
|
||||
$ script -fec 'sudo -E ./install_libseccomp.sh ${seccomp_install_path} ${gperf_install_path}"'
|
||||
$ export LIBSECCOMP_LIB_PATH="${seccomp_install_path}/lib"
|
||||
$ popd
|
||||
|
||||
@@ -10,7 +10,19 @@ To run Kata Containers in SNP-VMs, the following software stack is used.
|
||||
|
||||

|
||||
|
||||
The host BIOS and kernel must be capable of supporting AMD SEV-SNP and configured accordingly. For Kata Containers, the host kernel with branch [`sev-snp-iommu-avic_5.19-rc6_v3`](https://github.com/AMDESE/linux/tree/sev-snp-iommu-avic_5.19-rc6_v3) and commit [`3a88547`](https://github.com/AMDESE/linux/commit/3a885471cf89156ea555341f3b737ad2a8d9d3d0) is known to work in conjunction with SEV Firmware version 1.51.3 (0xh\_1.33.03) available on AMD's [SEV developer website](https://developer.amd.com/sev/). See [AMD's guide](https://github.com/AMDESE/AMDSEV/tree/sev-snp-devel) to configure the host accordingly. Verify that you are able to run SEV-SNP encrypted VMs first. The guest components required for Kata Containers are built as described below.
|
||||
The host BIOS and kernel must be capable of supporting AMD SEV-SNP and the host must be configured accordingly.
|
||||
|
||||
The latest SEV Firmware version is available on AMD's [SEV Developer Webpage](https://www.amd.com/en/developer/sev.html). It can also be updated via a platform OEM BIOS update.
|
||||
|
||||
The host kernel must be equal to or later than upstream version [6.11](https://cdn.kernel.org/pub/linux/kernel/v6.x/linux-6.11.tar.xz).
|
||||
|
||||
[`sev-utils`](https://github.com/amd/sev-utils/blob/coco-202501150000/docs/snp.md) is an easy way to install the required host kernel with the `setup-host` command. However, it will also build compatible guest kernel, OVMF, and QEMU components which are not necessary as these components are packaged with kata. The `sev-utils` script utility can be used with these additional components to test the memory encrypted launch and attestation of a base QEMU SNP guest.
|
||||
|
||||
For a simplified way to build just the upstream compatible host kernel, use the Confidential Containers fork of [AMDESE AMDSEV](https://github.com/confidential-containers/amdese-amdsev/tree/amd-snp-202501150000). Individual components can be built by running the following command:
|
||||
|
||||
```
|
||||
./build.sh kernel host --install
|
||||
```
|
||||
|
||||
**Tip**: It is easiest to first have Kata Containers running on your system and then modify it to run containers in SNP-VMs. Follow the [Developer guide](../Developer-Guide.md#warning) and then follow the below steps. Nonetheless, you can just follow this guide from the start.
|
||||
|
||||
|
||||
@@ -98,7 +98,7 @@ a number larger than `0` if you have either of the `vmx` or `svm` nested virtual
|
||||
available:
|
||||
|
||||
```sh
|
||||
$ minikube ssh "egrep -c 'vmx|svm' /proc/cpuinfo"
|
||||
$ minikube ssh "grep -c -E 'vmx|svm' /proc/cpuinfo"
|
||||
```
|
||||
|
||||
## Installing Kata Containers
|
||||
@@ -122,8 +122,8 @@ and will be executing a `sleep infinity` once it has successfully completed its
|
||||
You can accomplish this by running the following:
|
||||
|
||||
```sh
|
||||
$ podname=$(kubectl -n kube-system get pods -o=name | fgrep kata-deploy | sed 's?pod/??')
|
||||
$ kubectl -n kube-system exec ${podname} -- ps -ef | fgrep infinity
|
||||
$ podname=$(kubectl -n kube-system get pods -o=name | grep -F kata-deploy | sed 's?pod/??')
|
||||
$ kubectl -n kube-system exec ${podname} -- ps -ef | grep -F infinity
|
||||
```
|
||||
|
||||
> *NOTE:* This check only works for single node clusters, which is the default for Minikube.
|
||||
@@ -197,7 +197,7 @@ $ minikube ssh -- uname -a
|
||||
And then compare that against the kernel that is running inside the container:
|
||||
|
||||
```sh
|
||||
$ podname=$(kubectl get pods -o=name | fgrep php-apache-kata-qemu | sed 's?pod/??')
|
||||
$ podname=$(kubectl get pods -o=name | grep -F php-apache-kata-qemu | sed 's?pod/??')
|
||||
$ kubectl exec ${podname} -- uname -a
|
||||
```
|
||||
|
||||
|
||||
@@ -2,13 +2,13 @@
|
||||
|
||||
# Introduction
|
||||
|
||||
Intel® QuickAssist Technology (QAT) provides hardware acceleration
|
||||
for security (cryptography) and compression. These instructions cover the
|
||||
steps for the latest [Ubuntu LTS release](https://ubuntu.com/download/desktop)
|
||||
which already include the QAT host driver. These instructions can be adapted to
|
||||
any Linux distribution. These instructions guide the user on how to download
|
||||
the kernel sources, compile kernel driver modules against those sources, and
|
||||
load them onto the host as well as preparing a specially built Kata Containers
|
||||
Intel® QuickAssist Technology (QAT) provides hardware acceleration
|
||||
for security (cryptography) and compression. These instructions cover the
|
||||
steps for the latest [Ubuntu LTS release](https://ubuntu.com/download/desktop)
|
||||
which already include the QAT host driver. These instructions can be adapted to
|
||||
any Linux distribution. These instructions guide the user on how to download
|
||||
the kernel sources, compile kernel driver modules against those sources, and
|
||||
load them onto the host as well as preparing a specially built Kata Containers
|
||||
kernel and custom Kata Containers rootfs.
|
||||
|
||||
* Download kernel sources
|
||||
@@ -16,7 +16,7 @@ kernel and custom Kata Containers rootfs.
|
||||
* Compile kernel driver modules against those sources
|
||||
* Download rootfs
|
||||
* Add driver modules to rootfs
|
||||
* Build rootfs image
|
||||
* Build rootfs image
|
||||
|
||||
## Helpful Links before starting
|
||||
|
||||
@@ -35,8 +35,8 @@ reboot, and some steps to complete when the host kernel changes.
|
||||
|
||||
## Script variables
|
||||
|
||||
The following list of variables must be set before running through the
|
||||
scripts. These variables refer to locations to store modules and configuration
|
||||
The following list of variables must be set before running through the
|
||||
scripts. These variables refer to locations to store modules and configuration
|
||||
files on the host and links to the drivers to use. Modify these as
|
||||
needed to point to updated drivers or different install locations.
|
||||
|
||||
@@ -58,9 +58,9 @@ $ export KATA_ROOTFS_LOCATION=~/kata
|
||||
|
||||
## Prepare the Ubuntu Host
|
||||
|
||||
The host could be a bare metal instance or a virtual machine. If using a
|
||||
virtual machine, make sure that KVM nesting is enabled. The following
|
||||
instructions reference an Intel® C62X chipset. Some of the instructions must be
|
||||
The host could be a bare metal instance or a virtual machine. If using a
|
||||
virtual machine, make sure that KVM nesting is enabled. The following
|
||||
instructions reference an Intel® C62X chipset. Some of the instructions must be
|
||||
modified if using a different Intel® QAT device. The Intel® QAT chipset can be
|
||||
identified by executing the following.
|
||||
|
||||
@@ -74,7 +74,7 @@ $ for i in 0434 0435 37c8 1f18 1f19; do lspci -d 8086:$i; done
|
||||
|
||||
These packages are necessary to compile the Kata kernel, Intel® QAT driver, and to
|
||||
prepare the rootfs for Kata. [Docker](https://docs.docker.com/engine/install/ubuntu/)
|
||||
also needs to be installed to be able to build the rootfs. To test that
|
||||
also needs to be installed to be able to build the rootfs. To test that
|
||||
everything works a Kubernetes pod is started requesting Intel® QAT resources. For the
|
||||
pass through of the virtual functions the kernel boot parameter needs to have
|
||||
`INTEL_IOMMU=on`.
|
||||
@@ -89,7 +89,7 @@ $ sudo reboot
|
||||
|
||||
### Download Intel® QAT drivers
|
||||
|
||||
This will download the [Intel® QAT drivers](https://www.intel.com/content/www/us/en/developer/topic-technology/open/quick-assist-technology/overview.html).
|
||||
This will download the [Intel® QAT drivers](https://www.intel.com/content/www/us/en/developer/topic-technology/open/quick-assist-technology/overview.html).
|
||||
Make sure to check the website for the latest version.
|
||||
|
||||
```bash
|
||||
@@ -100,13 +100,13 @@ $ curl -L $QAT_DRIVER_URL | tar zx
|
||||
|
||||
### Copy Intel® QAT configuration files and enable virtual functions
|
||||
|
||||
Modify the instructions below as necessary if using a different Intel® QAT hardware
|
||||
platform. You can learn more about customizing configuration files at the
|
||||
Modify the instructions below as necessary if using a different Intel® QAT hardware
|
||||
platform. You can learn more about customizing configuration files at the
|
||||
[Intel® QAT Engine repository](https://github.com/intel/QAT_Engine/#copy-the-correct-intel-quickassist-technology-driver-config-files)
|
||||
This section starts from a base config file and changes the `SSL` section to
|
||||
This section starts from a base config file and changes the `SSL` section to
|
||||
`SHIM` to support the OpenSSL engine. There are more tweaks that you can make
|
||||
depending on the use case and how many Intel® QAT engines should be run. You
|
||||
can find more information about how to customize in the
|
||||
can find more information about how to customize in the
|
||||
[Intel® QuickAssist Technology Software for Linux* - Programmer's Guide.](https://www.intel.com/content/www/us/en/content-details/709196/intel-quickassist-technology-api-programmer-s-guide.html)
|
||||
|
||||
> **Note: This section assumes that a Intel® QAT `c6xx` platform is used.**
|
||||
@@ -119,16 +119,16 @@ $ sed -i 's/\[SSL\]/\[SHIM\]/g' $QAT_CONF_LOCATION/c6xxvf_dev0.conf
|
||||
|
||||
### Expose and Bind Intel® QAT virtual functions to VFIO-PCI (Every reboot)
|
||||
|
||||
To enable virtual functions, the host OS should have IOMMU groups enabled. In
|
||||
the UEFI Firmware Intel® Virtualization Technology for Directed I/O
|
||||
(Intel® VT-d) must be enabled. Also, the kernel boot parameter should be
|
||||
To enable virtual functions, the host OS should have IOMMU groups enabled. In
|
||||
the UEFI Firmware Intel® Virtualization Technology for Directed I/O
|
||||
(Intel® VT-d) must be enabled. Also, the kernel boot parameter should be
|
||||
`intel_iommu=on` or `intel_iommu=ifgx_off`. This should have been set from
|
||||
the instructions above. Check the output of `/proc/cmdline` to confirm. The
|
||||
the instructions above. Check the output of `/proc/cmdline` to confirm. The
|
||||
following commands assume you installed an Intel® QAT card, IOMMU is on, and
|
||||
VT-d is enabled. The vendor and device ID add to the `VFIO-PCI` driver so that
|
||||
each exposed virtual function can be bound to the `VFIO-PCI` driver. Once
|
||||
complete, each virtual function passes into a Kata Containers container using
|
||||
the PCIe device passthrough feature. For Kubernetes, the
|
||||
the PCIe device passthrough feature. For Kubernetes, the
|
||||
[Intel device plugin](https://github.com/intel/intel-device-plugins-for-kubernetes)
|
||||
for Kubernetes handles the binding of the driver, but the VF’s still must be
|
||||
enabled.
|
||||
@@ -155,10 +155,10 @@ $ for f in /sys/bus/pci/devices/0000:$QAT_PCI_BUS_PF_1/virtfn*
|
||||
|
||||
### Check Intel® QAT virtual functions are enabled
|
||||
|
||||
If the following command returns empty, then the virtual functions are not
|
||||
properly enabled. This command checks the enumerated device IDs for just the
|
||||
virtual functions. Using the Intel® QAT as an example, the physical device ID
|
||||
is `37c8` and virtual function device ID is `37c9`. The following command checks
|
||||
If the following command returns empty, then the virtual functions are not
|
||||
properly enabled. This command checks the enumerated device IDs for just the
|
||||
virtual functions. Using the Intel® QAT as an example, the physical device ID
|
||||
is `37c8` and virtual function device ID is `37c9`. The following command checks
|
||||
if VF's are enabled for any of the currently known Intel® QAT device ID's. The
|
||||
following `ls` command should show the 16 VF's bound to `VFIO-PCI`.
|
||||
|
||||
@@ -182,7 +182,7 @@ follows the instructions from the
|
||||
[packaging kernel repository](../../tools/packaging/kernel)
|
||||
and uses the latest Kata kernel
|
||||
[config](../../tools/packaging/kernel/configs).
|
||||
There are some patches that must be installed as well, which the
|
||||
There are some patches that must be installed as well, which the
|
||||
`build-kernel.sh` script should automatically apply. If you are using a
|
||||
different kernel version, then you might need to manually apply them. Since
|
||||
the Kata Containers kernel has a minimal set of kernel flags set, you must
|
||||
@@ -228,17 +228,17 @@ $ cp ${GOPATH}/${LINUX_VER}/vmlinux ${KATA_KERNEL_LOCATION}/${KATA_KERNEL_NAME}
|
||||
|
||||
### Prepare Kata root filesystem
|
||||
|
||||
These instructions build upon the OS builder instructions located in the
|
||||
These instructions build upon the OS builder instructions located in the
|
||||
[Developer Guide](../Developer-Guide.md). At this point it is recommended that
|
||||
[Docker](https://docs.docker.com/engine/install/ubuntu/) is installed first, and
|
||||
then [Kata-deploy](../../tools/packaging/kata-deploy)
|
||||
is use to install Kata. This will make sure that the correct `agent` version
|
||||
is use to install Kata. This will make sure that the correct `agent` version
|
||||
is installed into the rootfs in the steps below.
|
||||
|
||||
The following instructions use Ubuntu as the root filesystem with systemd as
|
||||
the init and will add in the `kmod` binary, which is not a standard binary in
|
||||
a Kata rootfs image. The `kmod` binary is necessary to load the Intel® QAT
|
||||
kernel modules when the virtual machine rootfs boots.
|
||||
The following instructions use Ubuntu as the root filesystem with systemd as
|
||||
the init and will add in the `kmod` binary, which is not a standard binary in
|
||||
a Kata rootfs image. The `kmod` binary is necessary to load the Intel® QAT
|
||||
kernel modules when the virtual machine rootfs boots.
|
||||
|
||||
```bash
|
||||
$ export OSBUILDER=$GOPATH/src/github.com/kata-containers/kata-containers/tools/osbuilder
|
||||
@@ -247,7 +247,7 @@ $ export EXTRA_PKGS='kmod'
|
||||
```
|
||||
|
||||
Make sure that the `kata-agent` version matches the installed `kata-runtime`
|
||||
version. Also make sure the `kata-runtime` install location is in your `PATH`
|
||||
version. Also make sure the `kata-runtime` install location is in your `PATH`
|
||||
variable. The following `AGENT_VERSION` can be set manually to match
|
||||
the `kata-runtime` version if the following commands don't work.
|
||||
|
||||
@@ -262,10 +262,10 @@ $ script -fec 'sudo -E GOPATH=$GOPATH USE_DOCKER=true SECCOMP=no ./rootfs.sh ubu
|
||||
|
||||
### Compile Intel® QAT drivers for Kata Containers kernel and add to Kata Containers rootfs
|
||||
|
||||
After the Kata Containers kernel builds with the proper configuration flags,
|
||||
After the Kata Containers kernel builds with the proper configuration flags,
|
||||
you must build the Intel® QAT drivers against that Kata Containers kernel
|
||||
version in a similar way they were previously built for the host OS. You must
|
||||
set the `KERNEL_SOURCE_ROOT` variable to the Kata Containers kernel source
|
||||
version in a similar way they were previously built for the host OS. You must
|
||||
set the `KERNEL_SOURCE_ROOT` variable to the Kata Containers kernel source
|
||||
directory and build the Intel® QAT drivers again. The `make` command will
|
||||
install the Intel® QAT modules into the Kata rootfs.
|
||||
|
||||
@@ -284,16 +284,16 @@ $ sudo -E make INSTALL_MOD_PATH=$ROOTFS_DIR qat-driver-install -j $(nproc)
|
||||
```
|
||||
|
||||
The `usdm_drv` module also needs to be copied into the rootfs modules path and
|
||||
`depmod` should be run.
|
||||
`depmod` should be run.
|
||||
|
||||
```bash
|
||||
$ sudo cp $QAT_SRC/build/usdm_drv.ko $ROOTFS_DIR/lib/modules/${KERNEL_ROOTFS_DIR}/updates/drivers
|
||||
$ sudo cp $QAT_SRC/build/usdm_drv.ko $ROOTFS_DIR/lib/modules/${KERNEL_ROOTFS_DIR}/updates/drivers
|
||||
$ sudo depmod -a -b ${ROOTFS_DIR} ${KERNEL_ROOTFS_DIR}
|
||||
$ cd ${OSBUILDER}/image-builder
|
||||
$ script -fec 'sudo -E USE_DOCKER=true ./image_builder.sh ${ROOTFS_DIR}'
|
||||
```
|
||||
|
||||
> **Note: Ignore any errors on modules.builtin and modules.order when running
|
||||
> **Note: Ignore any errors on modules.builtin and modules.order when running
|
||||
> `depmod`.**
|
||||
|
||||
### Copy Kata rootfs
|
||||
@@ -305,17 +305,17 @@ $ cp ${OSBUILDER}/image-builder/kata-containers.img $KATA_ROOTFS_LOCATION
|
||||
|
||||
## Verify Intel® QAT works in a container
|
||||
|
||||
The following instructions uses a OpenSSL Dockerfile that builds the
|
||||
Intel® QAT engine to allow OpenSSL to offload crypto functions. It is a
|
||||
The following instructions uses a OpenSSL Dockerfile that builds the
|
||||
Intel® QAT engine to allow OpenSSL to offload crypto functions. It is a
|
||||
convenient way to test that VFIO device passthrough for the Intel® QAT VF’s are
|
||||
working properly with the Kata Containers VM.
|
||||
|
||||
### Build OpenSSL Intel® QAT engine container
|
||||
|
||||
Use the OpenSSL Intel® QAT [Dockerfile](https://github.com/intel/intel-device-plugins-for-kubernetes/tree/main/demo/openssl-qat-engine)
|
||||
to build a container image with an optimized OpenSSL engine for
|
||||
Use the OpenSSL Intel® QAT [Dockerfile](https://github.com/intel/intel-device-plugins-for-kubernetes/tree/main/demo/openssl-qat-engine)
|
||||
to build a container image with an optimized OpenSSL engine for
|
||||
Intel® QAT. Using `docker build` with the Kata Containers runtime can sometimes
|
||||
have issues. Therefore, make sure that `runc` is the default Docker container
|
||||
have issues. Therefore, make sure that `runc` is the default Docker container
|
||||
runtime.
|
||||
|
||||
```bash
|
||||
@@ -324,12 +324,12 @@ $ curl -O $QAT_DOCKERFILE
|
||||
$ sudo docker build -t openssl-qat-engine .
|
||||
```
|
||||
|
||||
> **Note: The Intel® QAT driver version in this container might not match the
|
||||
> **Note: The Intel® QAT driver version in this container might not match the
|
||||
> Intel® QAT driver compiled and loaded on the host when compiling.**
|
||||
|
||||
### Test Intel® QAT with the ctr tool
|
||||
|
||||
The `ctr` tool can be used to interact with the containerd daemon. It may be
|
||||
The `ctr` tool can be used to interact with the containerd daemon. It may be
|
||||
more convenient to use this tool to verify the kernel and image instead of
|
||||
setting up a Kubernetes cluster. The correct Kata runtimes need to be added
|
||||
to the containerd `config.toml`. Below is a sample snippet that can be added
|
||||
@@ -350,7 +350,7 @@ to allow QEMU and Cloud Hypervisor (CLH) to work with `ctr`.
|
||||
ConfigPath = "/opt/kata/share/defaults/kata-containers/configuration-clh.toml"
|
||||
```
|
||||
|
||||
In addition, containerd expects the binary to be in `/usr/local/bin` so add
|
||||
In addition, containerd expects the binary to be in `/usr/local/bin` so add
|
||||
this small script so that it redirects to be able to use either QEMU or
|
||||
Cloud Hypervisor with Kata.
|
||||
|
||||
@@ -363,30 +363,30 @@ $ echo 'KATA_CONF_FILE=/opt/kata/share/defaults/kata-containers/configuration-cl
|
||||
$ sudo chmod +x /usr/local/bin/containerd-shim-kata-clh-v2
|
||||
```
|
||||
|
||||
After the OpenSSL image is built and imported into containerd, a Intel® QAT
|
||||
virtual function exposed in the step above can be added to the `ctr` command.
|
||||
Make sure to change the `/dev/vfio` number to one that actually exists on the
|
||||
host system. When using the `ctr` tool, the`configuration.toml` for Kata needs
|
||||
to point to the custom Kata kernel and rootfs built above and the Intel® QAT
|
||||
modules in the Kata rootfs need to load at boot. The following steps assume that
|
||||
`kata-deploy` was used to install Kata and QEMU is being tested. If using a
|
||||
different hypervisor, different install method for Kata, or a different
|
||||
Intel® QAT chipset then the command will need to be modified.
|
||||
After the OpenSSL image is built and imported into containerd, a Intel® QAT
|
||||
virtual function exposed in the step above can be added to the `ctr` command.
|
||||
Make sure to change the `/dev/vfio` number to one that actually exists on the
|
||||
host system. When using the `ctr` tool, the`configuration.toml` for Kata needs
|
||||
to point to the custom Kata kernel and rootfs built above and the Intel® QAT
|
||||
modules in the Kata rootfs need to load at boot. The following steps assume that
|
||||
`kata-deploy` was used to install Kata and QEMU is being tested. If using a
|
||||
different hypervisor, different install method for Kata, or a different
|
||||
Intel® QAT chipset then the command will need to be modified.
|
||||
|
||||
> **Note: The following was tested with
|
||||
> **Note: The following was tested with
|
||||
[containerd v1.4.6](https://github.com/containerd/containerd/releases/tag/v1.4.6).**
|
||||
|
||||
```bash
|
||||
$ config_file="/opt/kata/share/defaults/kata-containers/configuration-qemu.toml"
|
||||
$ sudo sed -i "/kernel =/c kernel = "\"${KATA_ROOTFS_LOCATION}/${KATA_KERNEL_NAME}\""" $config_file
|
||||
$ sudo sed -i "/image =/c image = "\"${KATA_KERNEL_LOCATION}/kata-containers.img\""" $config_file
|
||||
$ sudo sed -i -e 's/^kernel_params = "\(.*\)"/kernel_params = "\1 modules-load=usdm_drv,qat_c62xvf"/g' $config_file
|
||||
$ sudo sed -i -e 's/^kernel_params = "\(.*\)"/kernel_params = "\1 modules-load=usdm_drv,qat_c62xvf"/g' $config_file
|
||||
$ sudo docker save -o openssl-qat-engine.tar openssl-qat-engine:latest
|
||||
$ sudo ctr images import openssl-qat-engine.tar
|
||||
$ sudo ctr run --runtime io.containerd.run.kata-qemu.v2 --privileged -t --rm --device=/dev/vfio/180 --mount type=bind,src=/dev,dst=/dev,options=rbind:rw --mount type=bind,src=${QAT_CONF_LOCATION}/c6xxvf_dev0.conf,dst=/etc/c6xxvf_dev0.conf,options=rbind:rw docker.io/library/openssl-qat-engine:latest bash
|
||||
```
|
||||
|
||||
Below are some commands to run in the container image to verify Intel® QAT is
|
||||
Below are some commands to run in the container image to verify Intel® QAT is
|
||||
working
|
||||
|
||||
```sh
|
||||
@@ -412,24 +412,24 @@ root@67561dc2757a/ # openssl engine -c -t qat-hw
|
||||
|
||||
### Test Intel® QAT in Kubernetes
|
||||
|
||||
Start a Kubernetes cluster with containerd as the CRI. The host should
|
||||
already be setup with 16 virtual functions of the Intel® QAT card bound to
|
||||
`VFIO-PCI`. Verify this by looking in `/dev/vfio` for a listing of devices.
|
||||
You might need to disable Docker before initializing Kubernetes. Be aware
|
||||
Start a Kubernetes cluster with containerd as the CRI. The host should
|
||||
already be setup with 16 virtual functions of the Intel® QAT card bound to
|
||||
`VFIO-PCI`. Verify this by looking in `/dev/vfio` for a listing of devices.
|
||||
You might need to disable Docker before initializing Kubernetes. Be aware
|
||||
that the OpenSSL container image built above will need to be exported from
|
||||
Docker and imported into containerd.
|
||||
|
||||
If Kata is installed through [`kata-deploy`](../../tools/packaging/kata-deploy/README.md)
|
||||
there will be multiple `configuration.toml` files associated with different
|
||||
hypervisors. Rather than add in the custom Kata kernel, Kata rootfs, and
|
||||
there will be multiple `configuration.toml` files associated with different
|
||||
hypervisors. Rather than add in the custom Kata kernel, Kata rootfs, and
|
||||
kernel modules to each `configuration.toml` as the default, instead use
|
||||
[annotations](../how-to/how-to-load-kernel-modules-with-kata.md)
|
||||
in the Kubernetes YAML file to tell Kata which kernel and rootfs to use. The
|
||||
in the Kubernetes YAML file to tell Kata which kernel and rootfs to use. The
|
||||
easy way to do this is to use `kata-deploy` which will install the Kata binaries
|
||||
to `/opt` and properly configure the `/etc/containerd/config.toml` with annotation
|
||||
to `/opt` and properly configure the `/etc/containerd/config.toml` with annotation
|
||||
support. However, the `configuration.toml` needs to enable support for
|
||||
annotations as well. The following configures both QEMU and Cloud Hypervisor
|
||||
`configuration.toml` files that are currently available with Kata Container
|
||||
`configuration.toml` files that are currently available with Kata Container
|
||||
versions 2.0 and higher.
|
||||
|
||||
```bash
|
||||
@@ -446,15 +446,15 @@ $ sudo ctr -n=k8s.io images import openssl-qat-engine.tar
|
||||
|
||||
The [Intel® QAT Plugin](https://github.com/intel/intel-device-plugins-for-kubernetes/blob/main/cmd/qat_plugin/README.md)
|
||||
needs to be started so that the virtual functions can be discovered and
|
||||
used by Kubernetes.
|
||||
used by Kubernetes.
|
||||
|
||||
The following YAML file can be used to start a Kata container with Intel® QAT
|
||||
support. If Kata is installed with `kata-deploy`, then the containerd
|
||||
`configuration.toml` should have all of the Kata runtime classes already
|
||||
populated and annotations supported. To use a Intel® QAT virtual function, the
|
||||
Intel® QAT plugin needs to be started after the VF's are bound to `VFIO-PCI` as
|
||||
described [above](#expose-and-bind-intel-qat-virtual-functions-to-vfio-pci-every-reboot).
|
||||
Edit the following to point to the correct Kata kernel and rootfs location
|
||||
support. If Kata is installed with `kata-deploy`, then the containerd
|
||||
`configuration.toml` should have all of the Kata runtime classes already
|
||||
populated and annotations supported. To use a Intel® QAT virtual function, the
|
||||
Intel® QAT plugin needs to be started after the VF's are bound to `VFIO-PCI` as
|
||||
described [above](#expose-and-bind-intel-qat-virtual-functions-to-vfio-pci-every-reboot).
|
||||
Edit the following to point to the correct Kata kernel and rootfs location
|
||||
built with Intel® QAT support.
|
||||
|
||||
```bash
|
||||
@@ -497,7 +497,7 @@ spec:
|
||||
EOF
|
||||
```
|
||||
|
||||
Use `kubectl` to start the pod. Verify that Intel® QAT card acceleration is
|
||||
Use `kubectl` to start the pod. Verify that Intel® QAT card acceleration is
|
||||
working with the Intel® QAT engine.
|
||||
```bash
|
||||
$ kubectl apply -f kata-openssl-qat.yaml
|
||||
@@ -531,14 +531,14 @@ $ ls /dev/vfio
|
||||
* Check that the modules load when inside the Kata Container.
|
||||
|
||||
```sh
|
||||
bash-5.0# egrep "qat|usdm_drv" /proc/modules
|
||||
bash-5.0# grep -E "qat|usdm_drv" /proc/modules
|
||||
qat_c62xvf 16384 - - Live 0x0000000000000000 (O)
|
||||
usdm_drv 86016 - - Live 0x0000000000000000 (O)
|
||||
intel_qat 184320 - - Live 0x0000000000000000 (O)
|
||||
```
|
||||
|
||||
* Verify that at least the first `c6xxvf_dev0.conf` file mounts inside the
|
||||
container image in `/etc`. You will need one configuration file for each VF
|
||||
* Verify that at least the first `c6xxvf_dev0.conf` file mounts inside the
|
||||
container image in `/etc`. You will need one configuration file for each VF
|
||||
passed into the container.
|
||||
|
||||
```sh
|
||||
@@ -548,10 +548,10 @@ c6xxvf_dev1.conf c6xxvf_dev12.conf c6xxvf_dev15.conf c6xxvf_dev4.conf c6xxv
|
||||
c6xxvf_dev10.conf c6xxvf_dev13.conf c6xxvf_dev2.conf c6xxvf_dev5.conf c6xxvf_dev8.conf hosts
|
||||
```
|
||||
|
||||
* Check `dmesg` inside the container to see if there are any issues with the
|
||||
* Check `dmesg` inside the container to see if there are any issues with the
|
||||
Intel® QAT driver.
|
||||
|
||||
* If there are issues building the OpenSSL Intel® QAT container image, then
|
||||
* If there are issues building the OpenSSL Intel® QAT container image, then
|
||||
check to make sure that runc is the default runtime for building container.
|
||||
|
||||
```sh
|
||||
@@ -564,11 +564,11 @@ Environment="DOCKER_DEFAULT_RUNTIME=--default-runtime runc"
|
||||
|
||||
### Verify Intel® QAT card counters are incremented
|
||||
|
||||
To check the built in firmware counters, the Intel® QAT driver has to be compiled
|
||||
and installed to the host and can't rely on the built in host driver. The
|
||||
counters will increase when the accelerator is actively being used. To verify
|
||||
Intel® QAT is actively accelerating the containerized application, use the
|
||||
following instructions to check if any of the counters increment. Make
|
||||
To check the built in firmware counters, the Intel® QAT driver has to be compiled
|
||||
and installed to the host and can't rely on the built in host driver. The
|
||||
counters will increase when the accelerator is actively being used. To verify
|
||||
Intel® QAT is actively accelerating the containerized application, use the
|
||||
following instructions to check if any of the counters increment. Make
|
||||
sure to change the PCI Device ID to match whats in the system.
|
||||
|
||||
```bash
|
||||
|
||||
@@ -42,7 +42,7 @@ The following is an example of how to use `lspci` to check if your NIC supports
|
||||
SR-IOV.
|
||||
|
||||
```
|
||||
$ lspci | fgrep -i ethernet
|
||||
$ lspci | grep -i -F ethernet
|
||||
01:00.0 Ethernet controller: Intel Corporation Ethernet Controller 10-Gigabit X540-AT2 (rev 03)
|
||||
|
||||
...
|
||||
|
||||
562
src/agent/Cargo.lock
generated
562
src/agent/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -12,7 +12,7 @@ oci-spec = { version = "0.6.8", features = ["runtime"] }
|
||||
rustjail = { path = "rustjail" }
|
||||
protocols = { path = "../libs/protocols", features = ["async", "with-serde"] }
|
||||
lazy_static = "1.3.0"
|
||||
ttrpc = { version = "0.8", features = ["async"], default-features = false }
|
||||
ttrpc = { version = "0.8.4", features = ["async"], default-features = false }
|
||||
protobuf = "3.2.0"
|
||||
libc = "0.2.58"
|
||||
nix = "0.24.2"
|
||||
@@ -78,7 +78,7 @@ strum = "0.26.2"
|
||||
strum_macros = "0.26.2"
|
||||
|
||||
# Image pull/decrypt
|
||||
image-rs = { git = "https://github.com/confidential-containers/guest-components", rev = "v0.10.0", default-features = false, optional = true }
|
||||
image-rs = { git = "https://github.com/confidential-containers/guest-components", rev = "514c561d933cb11a0f1628621a0b930157af76cd", default-features = false, optional = true }
|
||||
|
||||
# Agent Policy
|
||||
regorus = { version = "0.2.6", default-features = false, features = [
|
||||
@@ -88,6 +88,7 @@ regorus = { version = "0.2.6", default-features = false, features = [
|
||||
], optional = true }
|
||||
cdi = { git = "https://github.com/cncf-tags/container-device-interface-rs", rev = "fba5677a8e7cc962fc6e495fcec98d7d765e332a" }
|
||||
json-patch = "2.0.0"
|
||||
kata-agent-policy = { path = "policy" }
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = "3.1.0"
|
||||
@@ -97,7 +98,7 @@ rstest = "0.18.0"
|
||||
async-std = { version = "1.12.0", features = ["attributes"] }
|
||||
|
||||
[workspace]
|
||||
members = ["rustjail"]
|
||||
members = ["rustjail", "policy"]
|
||||
|
||||
[profile.release]
|
||||
lto = true
|
||||
|
||||
33
src/agent/policy/Cargo.toml
Normal file
33
src/agent/policy/Cargo.toml
Normal file
@@ -0,0 +1,33 @@
|
||||
[package]
|
||||
name = "kata-agent-policy"
|
||||
version = "0.1.0"
|
||||
authors = ["The Kata Containers community <kata-dev@lists.katacontainers.io>"]
|
||||
edition = "2018"
|
||||
license = "Apache-2.0"
|
||||
|
||||
[dependencies]
|
||||
# Async runtime
|
||||
tokio = { version = "1.39.0", features = ["full"] }
|
||||
tokio-vsock = "0.3.4"
|
||||
|
||||
anyhow = "1"
|
||||
|
||||
# Configuration
|
||||
serde = { version = "1.0.129", features = ["derive"] }
|
||||
serde_json = "1.0.39"
|
||||
|
||||
# Agent Policy
|
||||
regorus = { version = "0.2.8", default-features = false, features = [
|
||||
"arc",
|
||||
"regex",
|
||||
"std",
|
||||
] }
|
||||
json-patch = "2.0.0"
|
||||
|
||||
|
||||
# Note: this crate sets the slog 'max_*' features which allows the log level
|
||||
# to be modified at runtime.
|
||||
logging = { path = "../../libs/logging" }
|
||||
slog = "2.5.2"
|
||||
slog-scope = "4.1.2"
|
||||
slog-term = "2.9.0"
|
||||
6
src/agent/policy/src/lib.rs
Normal file
6
src/agent/policy/src/lib.rs
Normal file
@@ -0,0 +1,6 @@
|
||||
// Copyright (c) 2024 Edgeless Systems GmbH
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
pub mod policy;
|
||||
243
src/agent/policy/src/policy.rs
Normal file
243
src/agent/policy/src/policy.rs
Normal file
@@ -0,0 +1,243 @@
|
||||
// Copyright (c) 2023 Microsoft Corporation
|
||||
// Copyright (c) 2024 Edgeless Systems GmbH
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
//! Policy evaluation for the kata-agent.
|
||||
|
||||
use anyhow::{bail, Result};
|
||||
use slog::{debug, error, info, warn};
|
||||
use tokio::io::AsyncWriteExt;
|
||||
|
||||
static POLICY_LOG_FILE: &str = "/tmp/policy.txt";
|
||||
static POLICY_DEFAULT_FILE: &str = "/etc/kata-opa/default-policy.rego";
|
||||
|
||||
/// Convenience macro to obtain the scope logger
|
||||
macro_rules! sl {
|
||||
() => {
|
||||
slog_scope::logger()
|
||||
};
|
||||
}
|
||||
|
||||
/// Singleton policy object.
|
||||
#[derive(Debug, Default)]
|
||||
pub struct AgentPolicy {
|
||||
/// When true policy errors are ignored, for debug purposes.
|
||||
allow_failures: bool,
|
||||
|
||||
/// "/tmp/policy.txt" log file for policy activity.
|
||||
log_file: Option<tokio::fs::File>,
|
||||
|
||||
/// Regorus engine
|
||||
engine: regorus::Engine,
|
||||
}
|
||||
|
||||
#[derive(serde::Deserialize, Debug)]
|
||||
struct MetadataResponse {
|
||||
allowed: bool,
|
||||
ops: Option<json_patch::Patch>,
|
||||
}
|
||||
|
||||
impl AgentPolicy {
|
||||
/// Create AgentPolicy object.
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
allow_failures: false,
|
||||
engine: Self::new_engine(),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
fn new_engine() -> regorus::Engine {
|
||||
let mut engine = regorus::Engine::new();
|
||||
engine.set_strict_builtin_errors(false);
|
||||
engine.set_gather_prints(true);
|
||||
// assign a slice of the engine data "pstate" to be used as policy state
|
||||
engine
|
||||
.add_data(
|
||||
regorus::Value::from_json_str(
|
||||
r#"{
|
||||
"pstate": {}
|
||||
}"#,
|
||||
)
|
||||
.unwrap(),
|
||||
)
|
||||
.unwrap();
|
||||
engine
|
||||
}
|
||||
|
||||
/// Initialize regorus.
|
||||
pub async fn initialize(
|
||||
&mut self,
|
||||
log_level: usize,
|
||||
default_policy_file: String,
|
||||
log_file: Option<String>,
|
||||
) -> Result<()> {
|
||||
// log file path
|
||||
let log_file_path = match log_file {
|
||||
Some(path) => path,
|
||||
None => POLICY_LOG_FILE.to_string(),
|
||||
};
|
||||
let log_file_path = log_file_path.as_str();
|
||||
|
||||
if log_level >= slog::Level::Debug.as_usize() {
|
||||
self.log_file = Some(
|
||||
tokio::fs::OpenOptions::new()
|
||||
.write(true)
|
||||
.truncate(true)
|
||||
.create(true)
|
||||
.open(&log_file_path)
|
||||
.await?,
|
||||
);
|
||||
debug!(sl!(), "policy: log file: {}", log_file_path);
|
||||
}
|
||||
|
||||
// Check if policy file has been set via AgentConfig
|
||||
// If empty, use default file.
|
||||
let mut default_policy_file = default_policy_file;
|
||||
if default_policy_file.is_empty() {
|
||||
default_policy_file = POLICY_DEFAULT_FILE.to_string();
|
||||
}
|
||||
info!(sl!(), "default policy: {default_policy_file}");
|
||||
|
||||
self.engine.add_policy_from_file(default_policy_file)?;
|
||||
self.update_allow_failures_flag().await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn apply_patch_to_state(&mut self, patch: json_patch::Patch) -> Result<()> {
|
||||
// Convert the current engine data to a JSON value
|
||||
let mut state = serde_json::to_value(self.engine.get_data())?;
|
||||
|
||||
// Apply the patch to the state
|
||||
json_patch::patch(&mut state, &patch)?;
|
||||
|
||||
// Clear the existing data in the engine
|
||||
self.engine.clear_data();
|
||||
|
||||
// Add the patched state back to the engine
|
||||
self.engine
|
||||
.add_data(regorus::Value::from_json_str(&state.to_string())?)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Ask regorus if an API call should be allowed or not.
|
||||
pub async fn allow_request(&mut self, ep: &str, ep_input: &str) -> Result<(bool, String)> {
|
||||
debug!(sl!(), "policy check: {ep}");
|
||||
self.log_eval_input(ep, ep_input).await;
|
||||
|
||||
let query = format!("data.agent_policy.{ep}");
|
||||
self.engine.set_input_json(ep_input)?;
|
||||
|
||||
let results = self.engine.eval_query(query, false)?;
|
||||
|
||||
let prints = match self.engine.take_prints() {
|
||||
Ok(p) => p.join(" "),
|
||||
Err(e) => format!("Failed to get policy log: {e}"),
|
||||
};
|
||||
|
||||
if results.result.len() != 1 {
|
||||
// Results are empty when AllowRequestsFailingPolicy is used to allow a Request that hasn't been defined in the policy
|
||||
if self.allow_failures {
|
||||
return Ok((true, prints));
|
||||
}
|
||||
bail!(
|
||||
"policy check: unexpected eval_query result len {:?}",
|
||||
results
|
||||
);
|
||||
}
|
||||
|
||||
if results.result[0].expressions.len() != 1 {
|
||||
bail!(
|
||||
"policy check: unexpected eval_query result expressions {:?}",
|
||||
results
|
||||
);
|
||||
}
|
||||
|
||||
let mut allow = match &results.result[0].expressions[0].value {
|
||||
regorus::Value::Bool(b) => *b,
|
||||
|
||||
// Match against a specific variant that could be interpreted as MetadataResponse
|
||||
regorus::Value::Object(obj) => {
|
||||
let json_str = serde_json::to_string(obj)?;
|
||||
|
||||
self.log_eval_input(ep, &json_str).await;
|
||||
|
||||
let metadata_response: MetadataResponse = serde_json::from_str(&json_str)?;
|
||||
|
||||
if metadata_response.allowed {
|
||||
if let Some(ops) = metadata_response.ops {
|
||||
self.apply_patch_to_state(ops).await?;
|
||||
}
|
||||
}
|
||||
metadata_response.allowed
|
||||
}
|
||||
|
||||
_ => {
|
||||
error!(sl!(), "allow_request: unexpected eval_query result type");
|
||||
bail!(
|
||||
"policy check: unexpected eval_query result type {:?}",
|
||||
results
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
if !allow && self.allow_failures {
|
||||
warn!(sl!(), "policy: ignoring error for {ep}");
|
||||
allow = true;
|
||||
}
|
||||
|
||||
Ok((allow, prints))
|
||||
}
|
||||
|
||||
/// Replace the Policy in regorus.
|
||||
pub async fn set_policy(&mut self, policy: &str) -> Result<()> {
|
||||
self.engine = Self::new_engine();
|
||||
self.engine
|
||||
.add_policy("agent_policy".to_string(), policy.to_string())?;
|
||||
self.update_allow_failures_flag().await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn log_eval_input(&mut self, ep: &str, input: &str) {
|
||||
if let Some(log_file) = &mut self.log_file {
|
||||
match ep {
|
||||
"StatsContainerRequest" | "ReadStreamRequest" | "SetPolicyRequest" => {
|
||||
// - StatsContainerRequest and ReadStreamRequest are called
|
||||
// relatively often, so we're not logging them, to avoid
|
||||
// growing this log file too much.
|
||||
// - Confidential Containers Policy documents are relatively
|
||||
// large, so we're not logging them here, for SetPolicyRequest.
|
||||
// The Policy text can be obtained directly from the pod YAML.
|
||||
}
|
||||
_ => {
|
||||
let log_entry = format!("[\"ep\":\"{ep}\",{input}],\n\n");
|
||||
|
||||
if let Err(e) = log_file.write_all(log_entry.as_bytes()).await {
|
||||
warn!(sl!(), "policy: log_eval_input: write_all failed: {}", e);
|
||||
} else if let Err(e) = log_file.flush().await {
|
||||
warn!(sl!(), "policy: log_eval_input: flush failed: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn update_allow_failures_flag(&mut self) -> Result<()> {
|
||||
self.allow_failures = match self.allow_request("AllowRequestsFailingPolicy", "{}").await {
|
||||
Ok((allowed, _prints)) => {
|
||||
if allowed {
|
||||
warn!(
|
||||
sl!(),
|
||||
"policy: AllowRequestsFailingPolicy is enabled - will ignore errors"
|
||||
);
|
||||
}
|
||||
allowed
|
||||
}
|
||||
Err(_) => false,
|
||||
};
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -1170,6 +1170,23 @@ impl Manager {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn subcgroup(&self) -> &str {
|
||||
// Check if we're in a Docker-in-Docker setup by verifying:
|
||||
// 1. We're using cgroups v2 (which restricts direct process control)
|
||||
// 2. An "init" subdirectory exists (used by DinD for process delegation)
|
||||
let is_dind = cgroups::hierarchies::is_cgroup2_unified_mode()
|
||||
&& cgroups::hierarchies::auto()
|
||||
.root()
|
||||
.join(&self.cpath)
|
||||
.join("init")
|
||||
.exists();
|
||||
if is_dind {
|
||||
"/init/"
|
||||
} else {
|
||||
"/"
|
||||
}
|
||||
}
|
||||
|
||||
fn get_paths_and_mounts(
|
||||
cpath: &str,
|
||||
) -> Result<(HashMap<String, String>, HashMap<String, String>)> {
|
||||
|
||||
@@ -19,7 +19,7 @@ pub trait SystemdInterface {
|
||||
fn kill_unit(&self) -> Result<()>;
|
||||
fn freeze_unit(&self) -> Result<()>;
|
||||
fn thaw_unit(&self) -> Result<()>;
|
||||
fn add_process(&self, pid: i32) -> Result<()>;
|
||||
fn add_process(&self, pid: i32, subcgroup: &str) -> Result<()>;
|
||||
fn get_version(&self) -> Result<String>;
|
||||
fn unit_exists(&self) -> Result<bool>;
|
||||
}
|
||||
@@ -151,11 +151,10 @@ impl SystemdInterface for DBusClient {
|
||||
}
|
||||
}
|
||||
|
||||
fn add_process(&self, pid: i32) -> Result<()> {
|
||||
fn add_process(&self, pid: i32, subcgroup: &str) -> Result<()> {
|
||||
let proxy = self.build_proxy()?;
|
||||
|
||||
proxy
|
||||
.attach_processes_to_unit(&self.unit_name, "/", &[pid as u32])
|
||||
.attach_processes_to_unit(&self.unit_name, subcgroup, &[pid as u32])
|
||||
.context(format!(
|
||||
"failed to add process into unit {}",
|
||||
self.unit_name
|
||||
|
||||
@@ -41,7 +41,8 @@ pub struct Manager {
|
||||
impl CgroupManager for Manager {
|
||||
fn apply(&self, pid: pid_t) -> Result<()> {
|
||||
if self.dbus_client.unit_exists()? {
|
||||
self.dbus_client.add_process(pid)?;
|
||||
let subcgroup = self.fs_manager.subcgroup();
|
||||
self.dbus_client.add_process(pid, subcgroup)?;
|
||||
} else {
|
||||
self.dbus_client.start_unit(
|
||||
(pid as u32).try_into().unwrap(),
|
||||
|
||||
@@ -836,6 +836,7 @@ fn mount_from(
|
||||
if !src.is_dir() {
|
||||
let _ = OpenOptions::new()
|
||||
.create(true)
|
||||
.truncate(true)
|
||||
.write(true)
|
||||
.open(&dest)
|
||||
.map_err(|e| {
|
||||
|
||||
@@ -147,7 +147,7 @@ pub async fn unseal_file(path: &str) -> Result<()> {
|
||||
continue;
|
||||
}
|
||||
|
||||
let target_path = fs::canonicalize(&entry.path())?;
|
||||
let target_path = fs::canonicalize(entry.path())?;
|
||||
info!(sl(), "sealed source entry target path: {:?}", target_path);
|
||||
|
||||
// Skip if the target path is not a file (e.g., it's a symlink pointing to the secret file).
|
||||
@@ -177,8 +177,8 @@ pub async fn unseal_file(path: &str) -> Result<()> {
|
||||
fs::write(&unsealed_filename, unsealed_value)?;
|
||||
|
||||
// Remove the original sealed symlink and create a symlink to the unsealed file
|
||||
fs::remove_file(&entry.path())?;
|
||||
symlink(unsealed_filename_symlink, &entry.path())?;
|
||||
fs::remove_file(entry.path())?;
|
||||
symlink(unsealed_filename_symlink, entry.path())?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
use crate::rpc;
|
||||
|
||||
use anyhow::{anyhow, bail, ensure, Context, Result};
|
||||
use serde::Deserialize;
|
||||
use std::env;
|
||||
@@ -23,9 +23,11 @@ const SERVER_ADDR_OPTION: &str = "agent.server_addr";
|
||||
const PASSFD_LISTENER_PORT: &str = "agent.passfd_listener_port";
|
||||
const HOTPLUG_TIMOUT_OPTION: &str = "agent.hotplug_timeout";
|
||||
const CDH_API_TIMOUT_OPTION: &str = "agent.cdh_api_timeout";
|
||||
const CDI_TIMEOUT_OPTION: &str = "agent.cdi_timeout";
|
||||
const DEBUG_CONSOLE_VPORT_OPTION: &str = "agent.debug_console_vport";
|
||||
const LOG_VPORT_OPTION: &str = "agent.log_vport";
|
||||
const CONTAINER_PIPE_SIZE_OPTION: &str = "agent.container_pipe_size";
|
||||
const CGROUP_NO_V1: &str = "cgroup_no_v1";
|
||||
const UNIFIED_CGROUP_HIERARCHY_OPTION: &str = "systemd.unified_cgroup_hierarchy";
|
||||
const CONFIG_FILE: &str = "agent.config_file";
|
||||
const GUEST_COMPONENTS_REST_API_OPTION: &str = "agent.guest_components_rest_api";
|
||||
@@ -69,6 +71,7 @@ const MEM_AGENT_COMPACT_FORCE_TIMES: &str = "agent.mem_agent_compact_force_times
|
||||
const DEFAULT_LOG_LEVEL: slog::Level = slog::Level::Info;
|
||||
const DEFAULT_HOTPLUG_TIMEOUT: time::Duration = time::Duration::from_secs(3);
|
||||
const DEFAULT_CDH_API_TIMEOUT: time::Duration = time::Duration::from_secs(50);
|
||||
const DEFAULT_CDI_TIMEOUT: time::Duration = time::Duration::from_secs(100);
|
||||
const DEFAULT_CONTAINER_PIPE_SIZE: i32 = 0;
|
||||
const VSOCK_ADDR: &str = "vsock://-1";
|
||||
|
||||
@@ -131,14 +134,15 @@ pub struct AgentConfig {
|
||||
pub log_level: slog::Level,
|
||||
pub hotplug_timeout: time::Duration,
|
||||
pub cdh_api_timeout: time::Duration,
|
||||
pub cdi_timeout: time::Duration,
|
||||
pub debug_console_vport: i32,
|
||||
pub log_vport: i32,
|
||||
pub container_pipe_size: i32,
|
||||
pub server_addr: String,
|
||||
pub passfd_listener_port: i32,
|
||||
pub cgroup_no_v1: String,
|
||||
pub unified_cgroup_hierarchy: bool,
|
||||
pub tracing: bool,
|
||||
pub supports_seccomp: bool,
|
||||
pub https_proxy: String,
|
||||
pub no_proxy: String,
|
||||
pub guest_components_rest_api: GuestComponentsFeatures,
|
||||
@@ -168,6 +172,7 @@ pub struct AgentConfigBuilder {
|
||||
pub log_level: Option<String>,
|
||||
pub hotplug_timeout: Option<time::Duration>,
|
||||
pub cdh_api_timeout: Option<time::Duration>,
|
||||
pub cdi_timeout: Option<time::Duration>,
|
||||
pub debug_console_vport: Option<i32>,
|
||||
pub log_vport: Option<i32>,
|
||||
pub container_pipe_size: Option<i32>,
|
||||
@@ -250,7 +255,7 @@ macro_rules! parse_cmdline_param {
|
||||
($param:ident, $key:ident, $field:expr, $func:ident, $guard:expr) => {
|
||||
if $param.starts_with(format!("{}=", $key).as_str()) {
|
||||
let val = $func($param)?;
|
||||
if $guard(val) {
|
||||
if $guard(&val) {
|
||||
$field = val;
|
||||
}
|
||||
continue;
|
||||
@@ -266,14 +271,15 @@ impl Default for AgentConfig {
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
cdh_api_timeout: DEFAULT_CDH_API_TIMEOUT,
|
||||
cdi_timeout: DEFAULT_CDI_TIMEOUT,
|
||||
debug_console_vport: 0,
|
||||
log_vport: 0,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
server_addr: format!("{}:{}", VSOCK_ADDR, DEFAULT_AGENT_VSOCK_PORT),
|
||||
passfd_listener_port: 0,
|
||||
cgroup_no_v1: String::from(""),
|
||||
unified_cgroup_hierarchy: false,
|
||||
tracing: false,
|
||||
supports_seccomp: rpc::have_seccomp(),
|
||||
https_proxy: String::from(""),
|
||||
no_proxy: String::from(""),
|
||||
guest_components_rest_api: GuestComponentsFeatures::default(),
|
||||
@@ -311,6 +317,7 @@ impl FromStr for AgentConfig {
|
||||
);
|
||||
config_override!(agent_config_builder, agent_config, hotplug_timeout);
|
||||
config_override!(agent_config_builder, agent_config, cdh_api_timeout);
|
||||
config_override!(agent_config_builder, agent_config, cdi_timeout);
|
||||
config_override!(agent_config_builder, agent_config, debug_console_vport);
|
||||
config_override!(agent_config_builder, agent_config, log_vport);
|
||||
config_override!(agent_config_builder, agent_config, container_pipe_size);
|
||||
@@ -474,7 +481,7 @@ impl AgentConfig {
|
||||
HOTPLUG_TIMOUT_OPTION,
|
||||
config.hotplug_timeout,
|
||||
get_timeout,
|
||||
|hotplug_timeout: time::Duration| hotplug_timeout.as_secs() > 0
|
||||
|hotplug_timeout: &time::Duration| hotplug_timeout.as_secs() > 0
|
||||
);
|
||||
|
||||
// ensure the timeout is a positive value
|
||||
@@ -483,7 +490,16 @@ impl AgentConfig {
|
||||
CDH_API_TIMOUT_OPTION,
|
||||
config.cdh_api_timeout,
|
||||
get_timeout,
|
||||
|cdh_api_timeout: time::Duration| cdh_api_timeout.as_secs() > 0
|
||||
|cdh_api_timeout: &time::Duration| cdh_api_timeout.as_secs() > 0
|
||||
);
|
||||
|
||||
// ensure the timeout is a positive value
|
||||
parse_cmdline_param!(
|
||||
param,
|
||||
CDI_TIMEOUT_OPTION,
|
||||
config.cdi_timeout,
|
||||
get_timeout,
|
||||
|cdi_timeout: &time::Duration| cdi_timeout.as_secs() > 0
|
||||
);
|
||||
|
||||
// vsock port should be positive values
|
||||
@@ -492,21 +508,21 @@ impl AgentConfig {
|
||||
DEBUG_CONSOLE_VPORT_OPTION,
|
||||
config.debug_console_vport,
|
||||
get_number_value,
|
||||
|port| port > 0
|
||||
|port: &i32| *port > 0
|
||||
);
|
||||
parse_cmdline_param!(
|
||||
param,
|
||||
LOG_VPORT_OPTION,
|
||||
config.log_vport,
|
||||
get_number_value,
|
||||
|port| port > 0
|
||||
|port: &i32| *port > 0
|
||||
);
|
||||
parse_cmdline_param!(
|
||||
param,
|
||||
PASSFD_LISTENER_PORT,
|
||||
config.passfd_listener_port,
|
||||
get_number_value,
|
||||
|port| port > 0
|
||||
|port: &i32| *port > 0
|
||||
);
|
||||
parse_cmdline_param!(
|
||||
param,
|
||||
@@ -514,6 +530,13 @@ impl AgentConfig {
|
||||
config.container_pipe_size,
|
||||
get_container_pipe_size
|
||||
);
|
||||
parse_cmdline_param!(
|
||||
param,
|
||||
CGROUP_NO_V1,
|
||||
config.cgroup_no_v1,
|
||||
get_string_value,
|
||||
|no_v1| no_v1 == "all"
|
||||
);
|
||||
parse_cmdline_param!(
|
||||
param,
|
||||
UNIFIED_CGROUP_HIERARCHY_OPTION,
|
||||
@@ -712,7 +735,7 @@ where
|
||||
|
||||
fields[1]
|
||||
.parse::<T>()
|
||||
.map_err(|e| anyhow!("parse from {} failed: {:?}", &fields[1], e))
|
||||
.map_err(|e| anyhow!("parse from {} failed: {:?}", fields[1], e))
|
||||
}
|
||||
|
||||
// Map logrus (https://godoc.org/github.com/sirupsen/logrus)
|
||||
@@ -755,7 +778,10 @@ fn get_timeout(param: &str) -> Result<time::Duration> {
|
||||
let fields: Vec<&str> = param.split('=').collect();
|
||||
ensure!(fields.len() == 2, ERR_INVALID_TIMEOUT);
|
||||
ensure!(
|
||||
matches!(fields[0], HOTPLUG_TIMOUT_OPTION | CDH_API_TIMOUT_OPTION),
|
||||
matches!(
|
||||
fields[0],
|
||||
HOTPLUG_TIMOUT_OPTION | CDH_API_TIMOUT_OPTION | CDI_TIMEOUT_OPTION
|
||||
),
|
||||
ERR_INVALID_TIMEOUT_KEY
|
||||
);
|
||||
|
||||
@@ -898,6 +924,7 @@ mod tests {
|
||||
hotplug_timeout: time::Duration,
|
||||
container_pipe_size: i32,
|
||||
server_addr: &'a str,
|
||||
cgroup_no_v1: &'a str,
|
||||
unified_cgroup_hierarchy: bool,
|
||||
tracing: bool,
|
||||
https_proxy: &'a str,
|
||||
@@ -927,6 +954,7 @@ mod tests {
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
server_addr: TEST_SERVER_ADDR,
|
||||
cgroup_no_v1: "",
|
||||
unified_cgroup_hierarchy: false,
|
||||
tracing: false,
|
||||
https_proxy: "",
|
||||
@@ -1073,6 +1101,22 @@ mod tests {
|
||||
dev_mode: true,
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "cgroup_no_v1=1",
|
||||
cgroup_no_v1: "",
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "cgroup_no_v1=all",
|
||||
cgroup_no_v1: "all",
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "cgroup_no_v1=0 systemd.unified_cgroup_hierarchy=1",
|
||||
cgroup_no_v1: "",
|
||||
unified_cgroup_hierarchy: true,
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.devmode agent.debug_console agent.hotplug_timeout=100 systemd.unified_cgroup_hierarchy=a",
|
||||
debug_console: true,
|
||||
@@ -1508,6 +1552,7 @@ mod tests {
|
||||
|
||||
assert_eq!(d.debug_console, config.debug_console, "{}", msg);
|
||||
assert_eq!(d.dev_mode, config.dev_mode, "{}", msg);
|
||||
assert_eq!(d.cgroup_no_v1, config.cgroup_no_v1, "{}", msg);
|
||||
assert_eq!(
|
||||
d.unified_cgroup_hierarchy, config.unified_cgroup_hierarchy,
|
||||
"{}",
|
||||
@@ -1677,6 +1722,7 @@ Caused by:
|
||||
)))]
|
||||
#[case("agent.chd_api_timeout=1", Err(anyhow!(ERR_INVALID_TIMEOUT_KEY)))]
|
||||
#[case("agent.cdh_api_timeout=600", Ok(time::Duration::from_secs(600)))]
|
||||
#[case("agent.cdi_timeout=320", Ok(time::Duration::from_secs(320)))]
|
||||
fn test_timeout(#[case] param: &str, #[case] expected: Result<time::Duration>) {
|
||||
let result = get_timeout(param);
|
||||
let msg = format!("expected: {:?}, result: {:?}", expected, result);
|
||||
|
||||
@@ -248,7 +248,7 @@ pub async fn handle_cdi_devices(
|
||||
logger: &Logger,
|
||||
spec: &mut Spec,
|
||||
spec_dir: &str,
|
||||
cdi_timeout: u64,
|
||||
cdi_timeout: time::Duration,
|
||||
) -> Result<()> {
|
||||
if let Some(container_type) = spec
|
||||
.annotations()
|
||||
@@ -271,7 +271,7 @@ pub async fn handle_cdi_devices(
|
||||
let options: Vec<CdiOption> = vec![with_auto_refresh(false), with_spec_dirs(&[spec_dir])];
|
||||
let cache: Arc<std::sync::Mutex<cdi::cache::Cache>> = new_cache(options);
|
||||
|
||||
for _ in 0..=cdi_timeout {
|
||||
for i in 0..=cdi_timeout.as_secs() {
|
||||
let inject_result = {
|
||||
// Lock cache within this scope, std::sync::Mutex has no Send
|
||||
// and await will not work with time::sleep
|
||||
@@ -294,15 +294,20 @@ pub async fn handle_cdi_devices(
|
||||
return Ok(());
|
||||
}
|
||||
Err(e) => {
|
||||
info!(logger, "error injecting devices: {:?}", e);
|
||||
println!("error injecting devices: {:?}", e);
|
||||
info!(
|
||||
logger,
|
||||
"waiting for CDI spec(s) to be generated ({} of {} max tries) {:?}",
|
||||
i,
|
||||
cdi_timeout.as_secs(),
|
||||
e
|
||||
);
|
||||
}
|
||||
}
|
||||
time::sleep(Duration::from_millis(1000)).await;
|
||||
time::sleep(Duration::from_secs(1)).await;
|
||||
}
|
||||
Err(anyhow!(
|
||||
"failed to inject devices after CDI timeout of {} seconds",
|
||||
cdi_timeout
|
||||
cdi_timeout.as_secs()
|
||||
))
|
||||
}
|
||||
|
||||
@@ -1243,8 +1248,15 @@ mod tests {
|
||||
|
||||
fs::write(&cdi_file, cdi_content).expect("Failed to write CDI file");
|
||||
|
||||
let res =
|
||||
handle_cdi_devices(&logger, &mut spec, temp_dir.path().to_str().unwrap(), 0).await;
|
||||
let cdi_timeout = Duration::from_secs(0);
|
||||
|
||||
let res = handle_cdi_devices(
|
||||
&logger,
|
||||
&mut spec,
|
||||
temp_dir.path().to_str().unwrap(),
|
||||
cdi_timeout,
|
||||
)
|
||||
.await;
|
||||
println!("modfied spec {:?}", spec);
|
||||
assert!(res.is_ok(), "{}", res.err().unwrap());
|
||||
|
||||
|
||||
@@ -12,7 +12,9 @@ use crate::pci;
|
||||
use crate::sandbox::Sandbox;
|
||||
use crate::uevent::{wait_for_uevent, Uevent, UeventMatcher};
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use kata_types::device::{DRIVER_VFIO_AP_TYPE, DRIVER_VFIO_PCI_GK_TYPE, DRIVER_VFIO_PCI_TYPE};
|
||||
use kata_types::device::{
|
||||
DRIVER_VFIO_AP_COLD_TYPE, DRIVER_VFIO_AP_TYPE, DRIVER_VFIO_PCI_GK_TYPE, DRIVER_VFIO_PCI_TYPE,
|
||||
};
|
||||
use protocols::agent::Device;
|
||||
use slog::Logger;
|
||||
use std::ffi::OsStr;
|
||||
@@ -94,7 +96,7 @@ impl DeviceHandler for VfioPciDeviceHandler {
|
||||
impl DeviceHandler for VfioApDeviceHandler {
|
||||
#[instrument]
|
||||
fn driver_types(&self) -> &[&str] {
|
||||
&[DRIVER_VFIO_AP_TYPE]
|
||||
&[DRIVER_VFIO_AP_TYPE, DRIVER_VFIO_AP_COLD_TYPE]
|
||||
}
|
||||
|
||||
#[cfg(target_arch = "s390x")]
|
||||
@@ -103,7 +105,16 @@ impl DeviceHandler for VfioApDeviceHandler {
|
||||
// Force AP bus rescan
|
||||
fs::write(AP_SCANS_PATH, "1")?;
|
||||
for apqn in device.options.iter() {
|
||||
wait_for_ap_device(ctx.sandbox, ap::Address::from_str(apqn)?).await?;
|
||||
let ap_address = ap::Address::from_str(apqn).context("Failed to parse AP address")?;
|
||||
match device.type_.as_str() {
|
||||
DRIVER_VFIO_AP_TYPE => {
|
||||
wait_for_ap_device(ctx.sandbox, ap_address).await?;
|
||||
}
|
||||
DRIVER_VFIO_AP_COLD_TYPE => {
|
||||
check_ap_device(ctx.sandbox, ap_address).await?;
|
||||
}
|
||||
_ => return Err(anyhow!("Unsupported AP device type: {}", device.type_)),
|
||||
}
|
||||
}
|
||||
let dev_update = Some(DevUpdate::new(Z9_CRYPT_DEV_PATH, Z9_CRYPT_DEV_PATH)?);
|
||||
Ok(SpecUpdate {
|
||||
@@ -201,6 +212,37 @@ async fn wait_for_ap_device(sandbox: &Arc<Mutex<Sandbox>>, address: ap::Address)
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(target_arch = "s390x")]
|
||||
#[instrument]
|
||||
async fn check_ap_device(sandbox: &Arc<Mutex<Sandbox>>, address: ap::Address) -> Result<()> {
|
||||
let ap_path = format!(
|
||||
"/sys/{}/card{:02x}/{}/online",
|
||||
AP_ROOT_BUS_PATH, address.adapter_id, address
|
||||
);
|
||||
if !Path::new(&ap_path).is_file() {
|
||||
return Err(anyhow!(
|
||||
"AP device online file not found or not accessible: {}",
|
||||
ap_path
|
||||
));
|
||||
}
|
||||
match fs::read_to_string(&ap_path) {
|
||||
Ok(content) => {
|
||||
let is_online = content.trim() == "1";
|
||||
if !is_online {
|
||||
return Err(anyhow!("AP device {} exists but is not online", address));
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
return Err(anyhow!(
|
||||
"Failed to read online status for AP device {}: {}",
|
||||
address,
|
||||
e
|
||||
));
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn wait_for_pci_device(
|
||||
sandbox: &Arc<Mutex<Sandbox>>,
|
||||
pcipath: &pci::Path,
|
||||
|
||||
@@ -9,10 +9,11 @@ use safe_path::scoped_join;
|
||||
use std::collections::HashMap;
|
||||
use std::env;
|
||||
use std::fs;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::{anyhow, bail, Context, Result};
|
||||
use image_rs::builder::ClientBuilder;
|
||||
use image_rs::image::ImageClient;
|
||||
use kata_sys_util::validate::verify_id;
|
||||
use oci_spec::runtime as oci;
|
||||
@@ -57,15 +58,16 @@ pub struct ImageService {
|
||||
}
|
||||
|
||||
impl ImageService {
|
||||
pub fn new() -> Self {
|
||||
let mut image_client = ImageClient::new(PathBuf::from(KATA_IMAGE_WORK_DIR));
|
||||
pub async fn new() -> Result<Self> {
|
||||
let mut image_client_builder =
|
||||
ClientBuilder::default().work_dir(KATA_IMAGE_WORK_DIR.into());
|
||||
#[cfg(feature = "guest-pull")]
|
||||
{
|
||||
if !AGENT_CONFIG.image_registry_auth.is_empty() {
|
||||
let registry_auth = &AGENT_CONFIG.image_registry_auth;
|
||||
debug!(sl(), "Set registry auth file {:?}", registry_auth);
|
||||
image_client.config.file_paths.auth_file = registry_auth.clone();
|
||||
image_client.config.auth = true;
|
||||
image_client_builder = image_client_builder
|
||||
.authenticated_registry_credentials_uri(registry_auth.into());
|
||||
}
|
||||
|
||||
let enable_signature_verification = &AGENT_CONFIG.enable_signature_verification;
|
||||
@@ -73,15 +75,15 @@ impl ImageService {
|
||||
sl(),
|
||||
"Enable image signature verification: {:?}", enable_signature_verification
|
||||
);
|
||||
image_client.config.security_validate = *enable_signature_verification;
|
||||
|
||||
if !AGENT_CONFIG.image_policy_file.is_empty() {
|
||||
if !AGENT_CONFIG.image_policy_file.is_empty() && *enable_signature_verification {
|
||||
let image_policy_file = &AGENT_CONFIG.image_policy_file;
|
||||
debug!(sl(), "Use imagepolicy file {:?}", image_policy_file);
|
||||
image_client.config.file_paths.policy_path = image_policy_file.clone();
|
||||
debug!(sl(), "Use image policy file {:?}", image_policy_file);
|
||||
image_client_builder =
|
||||
image_client_builder.image_security_policy_uri(image_policy_file.into());
|
||||
}
|
||||
}
|
||||
Self { image_client }
|
||||
let image_client = image_client_builder.build().await?;
|
||||
Ok(Self { image_client })
|
||||
}
|
||||
|
||||
/// get guest pause image process specification
|
||||
@@ -276,9 +278,10 @@ pub async fn set_proxy_env_vars() {
|
||||
}
|
||||
|
||||
/// Init the image service
|
||||
pub async fn init_image_service() {
|
||||
let image_service = ImageService::new();
|
||||
pub async fn init_image_service() -> Result<()> {
|
||||
let image_service = ImageService::new().await?;
|
||||
*IMAGE_SERVICE.lock().await = Some(image_service);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn pull_image(
|
||||
|
||||
@@ -12,7 +12,7 @@ use std::fs;
|
||||
|
||||
pub const SYSFS_DIR: &str = "/sys";
|
||||
#[cfg(any(
|
||||
target_arch = "powerpc64",
|
||||
all(target_arch = "powerpc64", target_endian = "little"),
|
||||
target_arch = "s390x",
|
||||
target_arch = "x86_64",
|
||||
target_arch = "x86"
|
||||
|
||||
@@ -134,7 +134,7 @@ lazy_static! {
|
||||
|
||||
#[cfg(feature = "agent-policy")]
|
||||
lazy_static! {
|
||||
static ref AGENT_POLICY: Mutex<policy::AgentPolicy> = Mutex::new(AgentPolicy::new());
|
||||
static ref AGENT_POLICY: Mutex<AgentPolicy> = Mutex::new(AgentPolicy::new());
|
||||
}
|
||||
|
||||
#[derive(Parser)]
|
||||
@@ -228,8 +228,9 @@ async fn real_main(init_mode: bool) -> std::result::Result<(), Box<dyn std::erro
|
||||
})?;
|
||||
|
||||
lazy_static::initialize(&AGENT_CONFIG);
|
||||
let cgroup_v2 = AGENT_CONFIG.unified_cgroup_hierarchy || AGENT_CONFIG.cgroup_no_v1 == "all";
|
||||
|
||||
init_agent_as_init(&logger, AGENT_CONFIG.unified_cgroup_hierarchy)?;
|
||||
init_agent_as_init(&logger, cgroup_v2)?;
|
||||
drop(logger_async_guard);
|
||||
} else {
|
||||
lazy_static::initialize(&AGENT_CONFIG);
|
||||
@@ -632,7 +633,15 @@ fn init_agent_as_init(logger: &Logger, unified_cgroup_hierarchy: bool) -> Result
|
||||
|
||||
#[cfg(feature = "agent-policy")]
|
||||
async fn initialize_policy() -> Result<()> {
|
||||
AGENT_POLICY.lock().await.initialize().await
|
||||
AGENT_POLICY
|
||||
.lock()
|
||||
.await
|
||||
.initialize(
|
||||
AGENT_CONFIG.log_level.as_usize(),
|
||||
AGENT_CONFIG.policy_file.clone(),
|
||||
None,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
// The Rust standard library had suppressed the default SIGPIPE behavior,
|
||||
@@ -650,7 +659,7 @@ use crate::config::AgentConfig;
|
||||
use std::os::unix::io::{FromRawFd, RawFd};
|
||||
|
||||
#[cfg(feature = "agent-policy")]
|
||||
use crate::policy::AgentPolicy;
|
||||
use kata_agent_policy::policy::AgentPolicy;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
@@ -299,7 +299,13 @@ pub fn cgroups_mount(logger: &Logger, unified_cgroup_hierarchy: bool) -> Result<
|
||||
|
||||
// Enable memory hierarchical account.
|
||||
// For more information see https://www.kernel.org/doc/Documentation/cgroup-v1/memory.txt
|
||||
online_device("/sys/fs/cgroup/memory/memory.use_hierarchy")
|
||||
// cgroupsV2 will automatically enable memory.use_hierarchy.
|
||||
// additinoally this directory layout is not present in cgroupsV2.
|
||||
if !unified_cgroup_hierarchy {
|
||||
return online_device("/sys/fs/cgroup/memory/memory.use_hierarchy");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
@@ -531,6 +537,7 @@ mod tests {
|
||||
|
||||
OpenOptions::new()
|
||||
.create(true)
|
||||
.truncate(true)
|
||||
.write(true)
|
||||
.open(test_file_filename)
|
||||
.expect("failed to create test file");
|
||||
|
||||
@@ -1079,8 +1079,8 @@ mod tests {
|
||||
.expect("failed to show neigh")
|
||||
.stdout;
|
||||
|
||||
let stdout = std::str::from_utf8(&stdout).expect("failed to conveert stdout");
|
||||
assert_eq!(stdout, format!("{} lladdr {} PERMANENT\n", to_ip, mac));
|
||||
let stdout = std::str::from_utf8(&stdout).expect("failed to convert stdout");
|
||||
assert_eq!(stdout.trim(), format!("{} lladdr {} PERMANENT", to_ip, mac));
|
||||
|
||||
clean_env_for_test_add_one_arp_neighbor(dummy_name, to_ip);
|
||||
}
|
||||
|
||||
@@ -3,22 +3,11 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use anyhow::{bail, Result};
|
||||
use protobuf::MessageDyn;
|
||||
use tokio::io::AsyncWriteExt;
|
||||
|
||||
use crate::rpc::ttrpc_error;
|
||||
use crate::{AGENT_CONFIG, AGENT_POLICY};
|
||||
|
||||
static POLICY_LOG_FILE: &str = "/tmp/policy.txt";
|
||||
static POLICY_DEFAULT_FILE: &str = "/etc/kata-opa/default-policy.rego";
|
||||
|
||||
/// Convenience macro to obtain the scope logger
|
||||
macro_rules! sl {
|
||||
() => {
|
||||
slog_scope::logger()
|
||||
};
|
||||
}
|
||||
use crate::AGENT_POLICY;
|
||||
use kata_agent_policy::policy::AgentPolicy;
|
||||
|
||||
async fn allow_request(policy: &mut AgentPolicy, ep: &str, request: &str) -> ttrpc::Result<()> {
|
||||
match policy.allow_request(ep, request).await {
|
||||
@@ -54,213 +43,3 @@ pub async fn do_set_policy(req: &protocols::agent::SetPolicyRequest) -> ttrpc::R
|
||||
.await
|
||||
.map_err(|e| ttrpc_error(ttrpc::Code::INVALID_ARGUMENT, e))
|
||||
}
|
||||
|
||||
/// Singleton policy object.
|
||||
#[derive(Debug, Default)]
|
||||
pub struct AgentPolicy {
|
||||
/// When true policy errors are ignored, for debug purposes.
|
||||
allow_failures: bool,
|
||||
|
||||
/// "/tmp/policy.txt" log file for policy activity.
|
||||
log_file: Option<tokio::fs::File>,
|
||||
|
||||
/// Regorus engine
|
||||
engine: regorus::Engine,
|
||||
}
|
||||
|
||||
#[derive(serde::Deserialize, Debug)]
|
||||
struct MetadataResponse {
|
||||
allowed: bool,
|
||||
ops: Option<json_patch::Patch>,
|
||||
}
|
||||
|
||||
impl AgentPolicy {
|
||||
/// Create AgentPolicy object.
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
allow_failures: false,
|
||||
engine: Self::new_engine(),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
fn new_engine() -> regorus::Engine {
|
||||
let mut engine = regorus::Engine::new();
|
||||
engine.set_strict_builtin_errors(false);
|
||||
engine.set_gather_prints(true);
|
||||
// assign a slice of the engine data "pstate" to be used as policy state
|
||||
engine
|
||||
.add_data(
|
||||
regorus::Value::from_json_str(
|
||||
r#"{
|
||||
"pstate": {}
|
||||
}"#,
|
||||
)
|
||||
.unwrap(),
|
||||
)
|
||||
.unwrap();
|
||||
engine
|
||||
}
|
||||
|
||||
/// Initialize regorus.
|
||||
pub async fn initialize(&mut self) -> Result<()> {
|
||||
if AGENT_CONFIG.log_level.as_usize() >= slog::Level::Debug.as_usize() {
|
||||
self.log_file = Some(
|
||||
tokio::fs::OpenOptions::new()
|
||||
.write(true)
|
||||
.truncate(true)
|
||||
.create(true)
|
||||
.open(POLICY_LOG_FILE)
|
||||
.await?,
|
||||
);
|
||||
debug!(sl!(), "policy: log file: {}", POLICY_LOG_FILE);
|
||||
}
|
||||
|
||||
// Check if policy file has been set via AgentConfig
|
||||
// If empty, use default file.
|
||||
let mut default_policy_file = AGENT_CONFIG.policy_file.clone();
|
||||
if default_policy_file.is_empty() {
|
||||
default_policy_file = POLICY_DEFAULT_FILE.to_string();
|
||||
}
|
||||
info!(sl!(), "default policy: {default_policy_file}");
|
||||
|
||||
self.engine.add_policy_from_file(default_policy_file)?;
|
||||
self.update_allow_failures_flag().await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn apply_patch_to_state(&mut self, patch: json_patch::Patch) -> Result<()> {
|
||||
// Convert the current engine data to a JSON value
|
||||
let mut state = serde_json::to_value(self.engine.get_data())?;
|
||||
|
||||
// Apply the patch to the state
|
||||
json_patch::patch(&mut state, &patch)?;
|
||||
|
||||
// Clear the existing data in the engine
|
||||
self.engine.clear_data();
|
||||
|
||||
// Add the patched state back to the engine
|
||||
self.engine
|
||||
.add_data(regorus::Value::from_json_str(&state.to_string())?)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Ask regorus if an API call should be allowed or not.
|
||||
async fn allow_request(&mut self, ep: &str, ep_input: &str) -> Result<(bool, String)> {
|
||||
debug!(sl!(), "policy check: {ep}");
|
||||
self.log_eval_input(ep, ep_input).await;
|
||||
|
||||
let query = format!("data.agent_policy.{ep}");
|
||||
self.engine.set_input_json(ep_input)?;
|
||||
|
||||
let results = self.engine.eval_query(query, false)?;
|
||||
|
||||
let prints = match self.engine.take_prints() {
|
||||
Ok(p) => p.join(" "),
|
||||
Err(e) => format!("Failed to get policy log: {e}"),
|
||||
};
|
||||
|
||||
if results.result.len() != 1 {
|
||||
// Results are empty when AllowRequestsFailingPolicy is used to allow a Request that hasn't been defined in the policy
|
||||
if self.allow_failures {
|
||||
return Ok((true, prints));
|
||||
}
|
||||
bail!(
|
||||
"policy check: unexpected eval_query result len {:?}",
|
||||
results
|
||||
);
|
||||
}
|
||||
|
||||
if results.result[0].expressions.len() != 1 {
|
||||
bail!(
|
||||
"policy check: unexpected eval_query result expressions {:?}",
|
||||
results
|
||||
);
|
||||
}
|
||||
|
||||
let mut allow = match &results.result[0].expressions[0].value {
|
||||
regorus::Value::Bool(b) => *b,
|
||||
|
||||
// Match against a specific variant that could be interpreted as MetadataResponse
|
||||
regorus::Value::Object(obj) => {
|
||||
let json_str = serde_json::to_string(obj)?;
|
||||
|
||||
self.log_eval_input(ep, &json_str).await;
|
||||
|
||||
let metadata_response: MetadataResponse = serde_json::from_str(&json_str)?;
|
||||
|
||||
if metadata_response.allowed {
|
||||
if let Some(ops) = metadata_response.ops {
|
||||
self.apply_patch_to_state(ops).await?;
|
||||
}
|
||||
}
|
||||
metadata_response.allowed
|
||||
}
|
||||
|
||||
_ => {
|
||||
error!(sl!(), "allow_request: unexpected eval_query result type");
|
||||
bail!(
|
||||
"policy check: unexpected eval_query result type {:?}",
|
||||
results
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
if !allow && self.allow_failures {
|
||||
warn!(sl!(), "policy: ignoring error for {ep}");
|
||||
allow = true;
|
||||
}
|
||||
|
||||
Ok((allow, prints))
|
||||
}
|
||||
|
||||
/// Replace the Policy in regorus.
|
||||
pub async fn set_policy(&mut self, policy: &str) -> Result<()> {
|
||||
self.engine = Self::new_engine();
|
||||
self.engine
|
||||
.add_policy("agent_policy".to_string(), policy.to_string())?;
|
||||
self.update_allow_failures_flag().await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn log_eval_input(&mut self, ep: &str, input: &str) {
|
||||
if let Some(log_file) = &mut self.log_file {
|
||||
match ep {
|
||||
"StatsContainerRequest" | "ReadStreamRequest" | "SetPolicyRequest" => {
|
||||
// - StatsContainerRequest and ReadStreamRequest are called
|
||||
// relatively often, so we're not logging them, to avoid
|
||||
// growing this log file too much.
|
||||
// - Confidential Containers Policy documents are relatively
|
||||
// large, so we're not logging them here, for SetPolicyRequest.
|
||||
// The Policy text can be obtained directly from the pod YAML.
|
||||
}
|
||||
_ => {
|
||||
let log_entry = format!("[\"ep\":\"{ep}\",{input}],\n\n");
|
||||
|
||||
if let Err(e) = log_file.write_all(log_entry.as_bytes()).await {
|
||||
warn!(sl!(), "policy: log_eval_input: write_all failed: {}", e);
|
||||
} else if let Err(e) = log_file.flush().await {
|
||||
warn!(sl!(), "policy: log_eval_input: flush failed: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn update_allow_failures_flag(&mut self) -> Result<()> {
|
||||
self.allow_failures = match self.allow_request("AllowRequestsFailingPolicy", "{}").await {
|
||||
Ok((allowed, _prints)) => {
|
||||
if allowed {
|
||||
warn!(
|
||||
sl!(),
|
||||
"policy: AllowRequestsFailingPolicy is enabled - will ignore errors"
|
||||
);
|
||||
}
|
||||
allowed
|
||||
}
|
||||
Err(_) => false,
|
||||
};
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,9 +12,9 @@ use std::os::unix::io::{AsRawFd, FromRawFd};
|
||||
use tracing::instrument;
|
||||
|
||||
pub const RNGDEV: &str = "/dev/random";
|
||||
#[cfg(target_arch = "powerpc64")]
|
||||
#[cfg(all(target_arch = "powerpc64", target_endian = "little"))]
|
||||
pub const RNDADDTOENTCNT: libc::c_uint = 0x80045201;
|
||||
#[cfg(target_arch = "powerpc64")]
|
||||
#[cfg(all(target_arch = "powerpc64", target_endian = "little"))]
|
||||
pub const RNDRESEEDCRNG: libc::c_int = 0x20005207;
|
||||
#[cfg(not(target_arch = "powerpc64"))]
|
||||
pub const RNDADDTOENTCNT: libc::c_int = 0x40045201;
|
||||
|
||||
@@ -130,8 +130,6 @@ const ERR_NO_SANDBOX_PIDNS: &str = "Sandbox does not have sandbox_pidns";
|
||||
// not available.
|
||||
const IPTABLES_RESTORE_WAIT_SEC: u64 = 5;
|
||||
|
||||
const CDI_TIMEOUT_LIMIT: u64 = 100;
|
||||
|
||||
// Convenience function to obtain the scope logger.
|
||||
fn sl() -> slog::Logger {
|
||||
slog_scope::logger()
|
||||
@@ -234,7 +232,7 @@ impl AgentService {
|
||||
// or other entities for a specifc device.
|
||||
// In Kata we only consider the directory "/var/run/cdi", "/etc" may be
|
||||
// readonly
|
||||
handle_cdi_devices(&sl(), &mut oci, "/var/run/cdi", CDI_TIMEOUT_LIMIT).await?;
|
||||
handle_cdi_devices(&sl(), &mut oci, "/var/run/cdi", AGENT_CONFIG.cdi_timeout).await?;
|
||||
|
||||
cdh_handler(&mut oci).await?;
|
||||
|
||||
@@ -1294,6 +1292,9 @@ impl agent_ttrpc::AgentService for AgentService {
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "guest-pull")]
|
||||
image::init_image_service().await.map_ttrpc_err(same)?;
|
||||
|
||||
Ok(Empty::new())
|
||||
}
|
||||
|
||||
@@ -1748,9 +1749,6 @@ pub async fn start(
|
||||
let health_service = Box::new(HealthService {}) as Box<dyn health_ttrpc::Health + Send + Sync>;
|
||||
let hservice = health_ttrpc::create_health(Arc::new(health_service));
|
||||
|
||||
#[cfg(feature = "guest-pull")]
|
||||
image::init_image_service().await;
|
||||
|
||||
let server = TtrpcServer::new()
|
||||
.bind(server_address)?
|
||||
.register_service(aservice)
|
||||
|
||||
@@ -1065,7 +1065,7 @@ mod tests {
|
||||
|
||||
let logger = slog::Logger::root(slog::Discard, o!());
|
||||
|
||||
let test_pids = [std::i32::MIN, -1, 0, 1, std::i32::MAX];
|
||||
let test_pids = [i32::MIN, -1, 0, 1, i32::MAX];
|
||||
|
||||
for test_pid in test_pids {
|
||||
let mut s = Sandbox::new(&logger).unwrap();
|
||||
|
||||
@@ -18,14 +18,14 @@ const BUF_SIZE: usize = 8192;
|
||||
|
||||
// Interruptable I/O copy using readers and writers
|
||||
// (an interruptable version of "io::copy()").
|
||||
pub async fn interruptable_io_copier<R: Sized, W: Sized>(
|
||||
pub async fn interruptable_io_copier<R, W>(
|
||||
mut reader: R,
|
||||
mut writer: W,
|
||||
mut shutdown: Receiver<bool>,
|
||||
) -> io::Result<u64>
|
||||
where
|
||||
R: tokio::io::AsyncRead + Unpin,
|
||||
W: tokio::io::AsyncWrite + Unpin,
|
||||
R: tokio::io::AsyncRead + Unpin + Sized,
|
||||
W: tokio::io::AsyncWrite + Unpin + Sized,
|
||||
{
|
||||
let mut total_bytes: u64 = 0;
|
||||
|
||||
@@ -181,13 +181,13 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
impl ToString for BufWriter {
|
||||
fn to_string(&self) -> String {
|
||||
impl std::fmt::Display for BufWriter {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
let data_ref = self.data.clone();
|
||||
let output = data_ref.lock().unwrap();
|
||||
let s = (*output).clone();
|
||||
|
||||
String::from_utf8(s).unwrap()
|
||||
write!(f, "{}", String::from_utf8(s).unwrap())
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
34
src/dragonball/Cargo.lock
generated
34
src/dragonball/Cargo.lock
generated
@@ -428,9 +428,12 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "deranged"
|
||||
version = "0.3.8"
|
||||
version = "0.3.11"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f2696e8a945f658fd14dc3b87242e6b80cd0f36ff04ea560fa39082368847946"
|
||||
checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4"
|
||||
dependencies = [
|
||||
"powerfmt",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "derivative"
|
||||
@@ -1146,6 +1149,12 @@ dependencies = [
|
||||
"memoffset",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "num-conv"
|
||||
version = "0.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9"
|
||||
|
||||
[[package]]
|
||||
name = "num-traits"
|
||||
version = "0.2.16"
|
||||
@@ -1379,6 +1388,12 @@ version = "0.3.27"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964"
|
||||
|
||||
[[package]]
|
||||
name = "powerfmt"
|
||||
version = "0.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391"
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro2"
|
||||
version = "1.0.66"
|
||||
@@ -1897,14 +1912,16 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "time"
|
||||
version = "0.3.28"
|
||||
version = "0.3.37"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "17f6bb557fd245c28e6411aa56b6403c689ad95061f50e4be16c274e70a17e48"
|
||||
checksum = "35e7868883861bd0e56d9ac6efcaaca0d6d5d82a2a7ec8209ff492c07cf37b21"
|
||||
dependencies = [
|
||||
"deranged",
|
||||
"itoa",
|
||||
"libc",
|
||||
"num-conv",
|
||||
"num_threads",
|
||||
"powerfmt",
|
||||
"serde",
|
||||
"time-core",
|
||||
"time-macros",
|
||||
@@ -1912,16 +1929,17 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "time-core"
|
||||
version = "0.1.1"
|
||||
version = "0.1.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb"
|
||||
checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3"
|
||||
|
||||
[[package]]
|
||||
name = "time-macros"
|
||||
version = "0.2.14"
|
||||
version = "0.2.19"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1a942f44339478ef67935ab2bbaec2fb0322496cf3cbe84b261e06ac3814c572"
|
||||
checksum = "2834e6017e3e5e4b9834939793b282bc03b37a3336245fa820e35e233e2a85de"
|
||||
dependencies = [
|
||||
"num-conv",
|
||||
"time-core",
|
||||
]
|
||||
|
||||
|
||||
@@ -50,7 +50,7 @@ vm-memory = { version = "0.10.0", features = ["backend-mmap"] }
|
||||
crossbeam-channel = "0.5.6"
|
||||
fuse-backend-rs = "0.10.5"
|
||||
vfio-bindings = { version = "0.3.0", optional = true }
|
||||
vfio-ioctls = { version = "0.1.0", optional = true }
|
||||
vfio-ioctls = { version = "0.1.0", optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
slog-async = "2.7.0"
|
||||
@@ -81,3 +81,8 @@ vhost-user-fs = ["dbs-virtio-devices/vhost-user-fs"]
|
||||
vhost-user-net = ["dbs-virtio-devices/vhost-user-net"]
|
||||
vhost-user-blk = ["dbs-virtio-devices/vhost-user-blk"]
|
||||
host-device = ["dep:vfio-bindings", "dep:vfio-ioctls", "dep:dbs-pci"]
|
||||
|
||||
[lints.rust]
|
||||
unexpected_cfgs = { level = "warn", check-cfg = [
|
||||
'cfg(feature, values("test-mock"))',
|
||||
] }
|
||||
|
||||
@@ -6,7 +6,7 @@ include ../../utils.mk
|
||||
|
||||
PROJECT_DIRS := $(shell find . -name Cargo.toml -printf '%h\n' | sort -u)
|
||||
|
||||
ifeq ($(ARCH), $(filter $(ARCH), s390x ppc64le))
|
||||
ifeq ($(ARCH), $(filter $(ARCH), s390x powerpc64le))
|
||||
default build check test clippy vendor:
|
||||
@echo "$(ARCH) is not support currently"
|
||||
exit 0
|
||||
@@ -34,7 +34,7 @@ vendor:
|
||||
|
||||
format:
|
||||
@echo "INFO: rust fmt..."
|
||||
# This is kinda dirty step here simply because cargo fmt --all will apply fmt to all dependencies of dragonball which will include /src/libs/protocols with some file generated during compilation time and could not be formatted when you use cargo fmt --all before building the whole project. In order to avoid this problem, we do fmt check in this following way.
|
||||
# This is kinda dirty step here simply because cargo fmt --all will apply fmt to all dependencies of dragonball which will include /src/libs/protocols with some file generated during compilation time and could not be formatted when you use cargo fmt --all before building the whole project. In order to avoid this problem, we do fmt check in this following way.
|
||||
rustfmt --edition 2018 ./src/dbs_address_space/src/lib.rs ./src/dbs_allocator/src/lib.rs ./src/dbs_arch/src/lib.rs ./src/dbs_boot/src/lib.rs ./src/dbs_device/src/lib.rs ./src/dbs_interrupt/src/lib.rs ./src/dbs_legacy_devices/src/lib.rs ./src/dbs_pci/src/lib.rs ./src/dbs_upcall/src/lib.rs ./src/dbs_utils/src/lib.rs ./src/dbs_virtio_devices/src/lib.rs ./src/lib.rs --check
|
||||
|
||||
clean:
|
||||
|
||||
@@ -165,7 +165,7 @@ impl<T> DeviceConfigInfo<T>
|
||||
where
|
||||
T: ConfigItem + Clone,
|
||||
{
|
||||
/// Create a new instance of ['DeviceInfoGroup'].
|
||||
/// Create a new instance of ['DeviceConfigInfo'].
|
||||
pub fn new(config: T) -> Self {
|
||||
DeviceConfigInfo {
|
||||
config,
|
||||
@@ -173,7 +173,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new instance of ['DeviceInfoGroup'] with optional device.
|
||||
/// Create a new instance of ['DeviceConfigInfo'] with optional device.
|
||||
pub fn new_with_device(config: T, device: Option<Arc<dyn DeviceIo>>) -> Self {
|
||||
DeviceConfigInfo { config, device }
|
||||
}
|
||||
|
||||
@@ -166,15 +166,6 @@ impl Range {
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialOrd for Range {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
||||
match self.min.cmp(&other.min) {
|
||||
Ordering::Equal => Some(self.max.cmp(&other.max)),
|
||||
res => Some(res),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Ord for Range {
|
||||
fn cmp(&self, other: &Self) -> Ordering {
|
||||
match self.min.cmp(&other.min) {
|
||||
@@ -184,6 +175,12 @@ impl Ord for Range {
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialOrd for Range {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
||||
Some(self.cmp(other))
|
||||
}
|
||||
}
|
||||
|
||||
/// State of interval tree node.
|
||||
///
|
||||
/// Valid state transitions:
|
||||
@@ -424,7 +421,7 @@ impl<T> Node<T> {
|
||||
let l = height(&self.0.left);
|
||||
let r = height(&self.0.right);
|
||||
match (l as i32) - (r as i32) {
|
||||
1 | 0 | -1 => self,
|
||||
-1..=1 => self,
|
||||
2 => self.rotate_left_successor(),
|
||||
-2 => self.rotate_right_successor(),
|
||||
_ => unreachable!(),
|
||||
|
||||
@@ -152,7 +152,7 @@ impl Ord for IoRange {
|
||||
|
||||
impl PartialOrd for IoRange {
|
||||
fn partial_cmp(&self, other: &IoRange) -> Option<Ordering> {
|
||||
self.base.partial_cmp(&other.base)
|
||||
Some(self.cmp(other))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -93,7 +93,7 @@ mod test {
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(feature = "legacy_irq", target_arch = "x86_64"))]
|
||||
#[cfg(all(feature = "legacy-irq", target_arch = "x86_64"))]
|
||||
#[test]
|
||||
fn test_new_msi_routing_multi() {
|
||||
let mut msi_fds = Vec::with_capacity(16);
|
||||
|
||||
@@ -85,7 +85,6 @@ mod tests {
|
||||
#[test]
|
||||
fn test_i8042_valid_ops() {
|
||||
let reset_evt = EventFdTrigger::new(EventFd::new(libc::EFD_NONBLOCK).unwrap());
|
||||
let metrics = Arc::new(I8042DeviceMetrics::default());
|
||||
let mut i8042 = I8042Device::new(reset_evt.try_clone().unwrap());
|
||||
|
||||
let mut v = [0x00u8; 1];
|
||||
@@ -106,7 +105,6 @@ mod tests {
|
||||
#[test]
|
||||
fn test_i8042_invalid_ops() {
|
||||
let reset_evt = EventFdTrigger::new(EventFd::new(libc::EFD_NONBLOCK).unwrap());
|
||||
let metrics = Arc::new(I8042DeviceMetrics::default());
|
||||
let mut i8042 = I8042Device::new(reset_evt.try_clone().unwrap());
|
||||
|
||||
let mut v = [0x00u8; 2];
|
||||
@@ -125,9 +123,9 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[ignore = "Issue #10821 - IO Safety violation: owned file descriptor already closed"]
|
||||
fn test_i8042_reset_err() {
|
||||
let reset_evt = EventFdTrigger::new(unsafe { EventFd::from_raw_fd(i32::MAX) });
|
||||
let metrics = Arc::new(I8042DeviceMetrics::default());
|
||||
let mut i8042 = I8042Device::new(reset_evt);
|
||||
i8042.pio_write(
|
||||
PioAddress(0),
|
||||
|
||||
@@ -9,14 +9,18 @@ homepage = "https://github.com/openanolis/dragonball-sandbox"
|
||||
repository = "https://github.com/openanolis/dragonball-sandbox/tree/main/crates/dbs-pci"
|
||||
keywords = ["dragonball", "secure-sandbox", "devices", "pci"]
|
||||
readme = "README.md"
|
||||
|
||||
|
||||
[dependencies]
|
||||
log = "0.4.14"
|
||||
thiserror = "1"
|
||||
dbs-allocator = { path = "../dbs_allocator" }
|
||||
dbs-boot = { path = "../dbs_boot" }
|
||||
dbs-device = { path = "../dbs_device" }
|
||||
dbs-interrupt = { path = "../dbs_interrupt", features = ["kvm-irq", "kvm-legacy-irq", "kvm-msi-irq"] }
|
||||
dbs-interrupt = { path = "../dbs_interrupt", features = [
|
||||
"kvm-irq",
|
||||
"kvm-legacy-irq",
|
||||
"kvm-msi-irq",
|
||||
] }
|
||||
downcast-rs = "1.2.0"
|
||||
byteorder = "1.4.3"
|
||||
vm-memory = "0.10.0"
|
||||
@@ -28,4 +32,9 @@ libc = "0.2.39"
|
||||
|
||||
[dev-dependencies]
|
||||
dbs-arch = { path = "../dbs_arch" }
|
||||
kvm-ioctls = "0.12.0"
|
||||
kvm-ioctls = "0.12.0"
|
||||
|
||||
[lints.rust]
|
||||
unexpected_cfgs = { level = "warn", check-cfg = [
|
||||
'cfg(feature, values("test-mock"))',
|
||||
] }
|
||||
|
||||
@@ -431,6 +431,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[ignore = "Issue #10821 - IO Safety violation: owned file descriptor already closed"]
|
||||
fn test_set_options() {
|
||||
// This line will fail to provide an initialized FD if the test is not run as root.
|
||||
let tap = Tap::new().unwrap();
|
||||
|
||||
@@ -110,7 +110,7 @@ fn validate_and_configure_tap(tap: &Tap, vq_pairs: usize) -> VirtioResult<()> {
|
||||
TapError::MissingFlags(
|
||||
missing_flags
|
||||
.into_iter()
|
||||
.map(|flag| *flag)
|
||||
.copied()
|
||||
.collect::<Vec<&str>>()
|
||||
.join(", "),
|
||||
),
|
||||
@@ -372,7 +372,7 @@ where
|
||||
let intr_evts = config.get_queue_interrupt_eventfds();
|
||||
assert_eq!(config.queues.len(), intr_evts.len());
|
||||
|
||||
let vq_pair = vec![
|
||||
let vq_pair = [
|
||||
&config.queues[2 * pair_index],
|
||||
&config.queues[2 * pair_index + 1],
|
||||
];
|
||||
|
||||
@@ -165,7 +165,7 @@ where
|
||||
if queue_idx < self.intr_evts.len() {
|
||||
if let Err(e) = self.intr_evts[queue_idx].read() {
|
||||
error!("{}: failed to read queue eventfd, {:?}", self.id, e);
|
||||
} else if let Err(e) = self.config.queues[queue_idx as usize].notify() {
|
||||
} else if let Err(e) = self.config.queues[queue_idx].notify() {
|
||||
error!("{}: failed to notify guest, {:?}", self.id, e);
|
||||
}
|
||||
} else {
|
||||
@@ -249,7 +249,7 @@ impl VhostUserBlockDevice {
|
||||
master.set_protocol_features(protocol_featuers)?;
|
||||
|
||||
let config_len = mem::size_of::<VirtioBlockConfig>();
|
||||
let config_space: Vec<u8> = vec![0u8; config_len as usize];
|
||||
let config_space: Vec<u8> = vec![0u8; config_len];
|
||||
|
||||
let (_, mut config_space) = master
|
||||
.get_config(
|
||||
|
||||
@@ -229,8 +229,8 @@ impl Endpoint {
|
||||
})?;
|
||||
|
||||
regions.push(VhostUserMemoryRegionInfo {
|
||||
guest_phys_addr: guest_phys_addr.raw_value() as u64,
|
||||
memory_size: region.len() as u64,
|
||||
guest_phys_addr: guest_phys_addr.raw_value(),
|
||||
memory_size: region.len(),
|
||||
userspace_addr: userspace_addr as *const u8 as u64,
|
||||
mmap_offset: file_offset.start(),
|
||||
mmap_handle: file_offset.file().as_raw_fd(),
|
||||
@@ -330,8 +330,8 @@ impl Endpoint {
|
||||
.map_err(|_| VirtioError::InvalidGuestAddress(guest_phys_addr))?;
|
||||
|
||||
regions.push(VhostUserMemoryRegionInfo {
|
||||
guest_phys_addr: guest_phys_addr.raw_value() as u64,
|
||||
memory_size: region.len() as u64,
|
||||
guest_phys_addr: guest_phys_addr.raw_value(),
|
||||
memory_size: region.len(),
|
||||
userspace_addr: userspace_addr as *const u8 as u64,
|
||||
mmap_offset: file_offset.start(),
|
||||
mmap_handle: file_offset.file().as_raw_fd(),
|
||||
@@ -342,7 +342,7 @@ impl Endpoint {
|
||||
|
||||
// 9. setup vrings
|
||||
for queue_cfg in config.virtio_config.queues.iter() {
|
||||
master.set_vring_num(queue_cfg.index() as usize, queue_cfg.actual_size() as u16)?;
|
||||
master.set_vring_num(queue_cfg.index() as usize, queue_cfg.actual_size())?;
|
||||
info!(
|
||||
"{}: set_vring_num(idx: {}, size: {})",
|
||||
self.name,
|
||||
|
||||
@@ -55,9 +55,9 @@ impl VsockStream for HybridStream {
|
||||
let mut flag = unsafe { libc::fcntl(fd, libc::F_GETFL) };
|
||||
|
||||
if nonblocking {
|
||||
flag = flag | libc::O_NONBLOCK;
|
||||
flag |= libc::O_NONBLOCK;
|
||||
} else {
|
||||
flag = flag & !libc::O_NONBLOCK;
|
||||
flag |= !libc::O_NONBLOCK;
|
||||
}
|
||||
|
||||
let ret = unsafe { libc::fcntl(fd, libc::F_SETFL, flag) };
|
||||
|
||||
@@ -729,10 +729,10 @@ mod tests {
|
||||
use vm_memory::{GuestAddress, GuestMemoryMmap, MmapRegion};
|
||||
|
||||
use super::*;
|
||||
use crate::config_manager::DeviceInfoGroup;
|
||||
use crate::config_manager::DeviceConfigInfo;
|
||||
use crate::test_utils::tests::create_vm_for_test;
|
||||
|
||||
type VfioDeviceInfo = DeviceInfoGroup<VfioDeviceConfigInfo, VfioDeviceError>;
|
||||
type VfioDeviceInfo = DeviceConfigInfo<VfioDeviceConfigInfo, VfioDeviceError>;
|
||||
|
||||
fn get_vfio_dev_mgr() -> VfioDeviceMgr {
|
||||
let kvm = Kvm::new().unwrap();
|
||||
|
||||
@@ -320,7 +320,7 @@ pub struct Vcpu {
|
||||
type VcpuCell = Cell<Option<*const Vcpu>>;
|
||||
|
||||
impl Vcpu {
|
||||
thread_local!(static TLS_VCPU_PTR: VcpuCell = Cell::new(None));
|
||||
thread_local!(static TLS_VCPU_PTR: VcpuCell = const { Cell::new(None) });
|
||||
|
||||
/// Associates `self` with the current thread.
|
||||
///
|
||||
|
||||
@@ -484,6 +484,7 @@ impl VcpuManager {
|
||||
/// Get available vcpus to create with target vcpu_count
|
||||
/// Argument:
|
||||
/// * vcpu_count: target vcpu_count online in VcpuManager.
|
||||
///
|
||||
/// Return:
|
||||
/// * return available vcpu ids to create vcpu .
|
||||
fn calculate_available_vcpus(&self, vcpu_count: u8) -> Vec<u8> {
|
||||
|
||||
63
src/libs/Cargo.lock
generated
63
src/libs/Cargo.lock
generated
@@ -400,6 +400,15 @@ dependencies = [
|
||||
"syn 2.0.66",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "deranged"
|
||||
version = "0.3.11"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4"
|
||||
dependencies = [
|
||||
"powerfmt",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "derive-new"
|
||||
version = "0.5.9"
|
||||
@@ -676,6 +685,15 @@ version = "0.4.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70"
|
||||
|
||||
[[package]]
|
||||
name = "home"
|
||||
version = "0.5.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e3d1354bf6b7235cb4a0576c2619fd4ed18183f689b12b006a0ee7329eeff9a5"
|
||||
dependencies = [
|
||||
"windows-sys 0.52.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "http"
|
||||
version = "0.2.8"
|
||||
@@ -983,6 +1001,12 @@ dependencies = [
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "num-conv"
|
||||
version = "0.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9"
|
||||
|
||||
[[package]]
|
||||
name = "num-integer"
|
||||
version = "0.1.44"
|
||||
@@ -1120,6 +1144,12 @@ version = "0.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
|
||||
|
||||
[[package]]
|
||||
name = "powerfmt"
|
||||
version = "0.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391"
|
||||
|
||||
[[package]]
|
||||
name = "ppv-lite86"
|
||||
version = "0.2.16"
|
||||
@@ -1544,9 +1574,9 @@ checksum = "1c107b6f4780854c8b126e228ea8869f4d7b71260f962fefb57b996b8959ba6b"
|
||||
|
||||
[[package]]
|
||||
name = "serde"
|
||||
version = "1.0.147"
|
||||
version = "1.0.210"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d193d69bae983fc11a79df82342761dfbf28a99fc8d203dca4c3c1b590948965"
|
||||
checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a"
|
||||
dependencies = [
|
||||
"serde_derive",
|
||||
]
|
||||
@@ -1583,13 +1613,13 @@ checksum = "794e44574226fc701e3be5c651feb7939038fc67fb73f6f4dd5c4ba90fd3be70"
|
||||
|
||||
[[package]]
|
||||
name = "serde_derive"
|
||||
version = "1.0.147"
|
||||
version = "1.0.210"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4f1d362ca8fc9c3e3a7484440752472d68a6caa98f1ab81d99b5dfe517cec852"
|
||||
checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 1.0.91",
|
||||
"syn 2.0.66",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1703,7 +1733,7 @@ dependencies = [
|
||||
"slog",
|
||||
"term",
|
||||
"thread_local",
|
||||
"time 0.3.22",
|
||||
"time 0.3.37",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1889,13 +1919,16 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "time"
|
||||
version = "0.3.22"
|
||||
version = "0.3.37"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ea9e1b3cf1243ae005d9e74085d4d542f3125458f3a81af210d901dcd7411efd"
|
||||
checksum = "35e7868883861bd0e56d9ac6efcaaca0d6d5d82a2a7ec8209ff492c07cf37b21"
|
||||
dependencies = [
|
||||
"deranged",
|
||||
"itoa",
|
||||
"libc",
|
||||
"num-conv",
|
||||
"num_threads",
|
||||
"powerfmt",
|
||||
"serde",
|
||||
"time-core",
|
||||
"time-macros",
|
||||
@@ -1903,16 +1936,17 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "time-core"
|
||||
version = "0.1.1"
|
||||
version = "0.1.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb"
|
||||
checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3"
|
||||
|
||||
[[package]]
|
||||
name = "time-macros"
|
||||
version = "0.2.9"
|
||||
version = "0.2.19"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "372950940a5f07bf38dbe211d7283c9e6d7327df53794992d293e534c733d09b"
|
||||
checksum = "2834e6017e3e5e4b9834939793b282bc03b37a3336245fa820e35e233e2a85de"
|
||||
dependencies = [
|
||||
"num-conv",
|
||||
"time-core",
|
||||
]
|
||||
|
||||
@@ -2015,14 +2049,15 @@ checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642"
|
||||
|
||||
[[package]]
|
||||
name = "ttrpc"
|
||||
version = "0.8.1"
|
||||
version = "0.8.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "55ea338db445bee75c596cf8a478fbfcefad5a943c9e92a7e1c805c65ed39551"
|
||||
checksum = "2c580c498a547b4c083ec758be543e11a0772e03013aef4cdb1fbe77c8b62cae"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"byteorder",
|
||||
"crossbeam",
|
||||
"futures",
|
||||
"home",
|
||||
"libc",
|
||||
"log",
|
||||
"nix 0.26.4",
|
||||
|
||||
@@ -9,12 +9,17 @@ use anyhow::anyhow;
|
||||
use anyhow::Result;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::fmt;
|
||||
#[cfg(all(target_arch = "powerpc64", target_endian = "little"))]
|
||||
use std::fs;
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
use thiserror::Error;
|
||||
|
||||
#[cfg(any(target_arch = "s390x", target_arch = "powerpc64le"))]
|
||||
#[cfg(any(
|
||||
target_arch = "s390x",
|
||||
all(target_arch = "powerpc64", target_endian = "little")
|
||||
))]
|
||||
use nix::unistd::Uid;
|
||||
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
@@ -198,7 +203,6 @@ pub fn arch_guest_protection(
|
||||
|
||||
#[cfg(target_arch = "s390x")]
|
||||
#[allow(dead_code)]
|
||||
// Guest protection is not supported on ARM64.
|
||||
pub fn available_guest_protection() -> Result<GuestProtection, ProtectionError> {
|
||||
if !Uid::effective().is_root() {
|
||||
return Err(ProtectionError::NoPerms)?;
|
||||
@@ -234,18 +238,21 @@ pub fn available_guest_protection() -> Result<GuestProtection, ProtectionError>
|
||||
Ok(GuestProtection::Se)
|
||||
}
|
||||
|
||||
#[cfg(target_arch = "powerpc64le")]
|
||||
pub fn available_guest_protection() -> Result<check::GuestProtection, check::ProtectionError> {
|
||||
#[cfg(all(target_arch = "powerpc64", target_endian = "little"))]
|
||||
const PEF_SYS_FIRMWARE_DIR: &str = "/sys/firmware/ultravisor/";
|
||||
|
||||
#[cfg(all(target_arch = "powerpc64", target_endian = "little"))]
|
||||
pub fn available_guest_protection() -> Result<GuestProtection, ProtectionError> {
|
||||
if !Uid::effective().is_root() {
|
||||
return Err(check::ProtectionError::NoPerms);
|
||||
return Err(ProtectionError::NoPerms);
|
||||
}
|
||||
|
||||
let metadata = fs::metadata(PEF_SYS_FIRMWARE_DIR);
|
||||
if metadata.is_ok() && metadata.unwrap().is_dir() {
|
||||
Ok(check::GuestProtection::Pef)
|
||||
return Ok(GuestProtection::Pef);
|
||||
}
|
||||
|
||||
Ok(check::GuestProtection::NoProtection)
|
||||
Ok(GuestProtection::NoProtection)
|
||||
}
|
||||
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
|
||||
@@ -227,9 +227,6 @@ pub const KATA_ANNO_CFG_HYPERVISOR_FILE_BACKED_MEM_ROOT_DIR: &str =
|
||||
/// A sandbox annotation that is used to enable/disable virtio-mem.
|
||||
pub const KATA_ANNO_CFG_HYPERVISOR_VIRTIO_MEM: &str =
|
||||
"io.katacontainers.config.hypervisor.enable_virtio_mem";
|
||||
/// A sandbox annotation to enable swap of vm memory.
|
||||
pub const KATA_ANNO_CFG_HYPERVISOR_ENABLE_SWAP: &str =
|
||||
"io.katacontainers.config.hypervisor.enable_swap";
|
||||
/// A sandbox annotation to enable swap in the guest.
|
||||
pub const KATA_ANNO_CFG_HYPERVISOR_ENABLE_GUEST_SWAP: &str =
|
||||
"io.katacontainers.config.hypervisor.enable_guest_swap";
|
||||
@@ -786,14 +783,6 @@ impl Annotation {
|
||||
return Err(bool_err);
|
||||
}
|
||||
},
|
||||
KATA_ANNO_CFG_HYPERVISOR_ENABLE_SWAP => match self.get_value::<bool>(key) {
|
||||
Ok(r) => {
|
||||
hv.memory_info.enable_swap = r.unwrap_or_default();
|
||||
}
|
||||
Err(_e) => {
|
||||
return Err(bool_err);
|
||||
}
|
||||
},
|
||||
KATA_ANNO_CFG_HYPERVISOR_ENABLE_GUEST_SWAP => match self.get_value::<bool>(key)
|
||||
{
|
||||
Ok(r) => {
|
||||
|
||||
@@ -499,7 +499,7 @@ pub struct DeviceInfo {
|
||||
///
|
||||
/// Enabling this will result in the VM balloon device having f_reporting=on set
|
||||
#[serde(default)]
|
||||
pub enable_balloon_f_reporting: bool,
|
||||
pub reclaim_guest_freed_memory: bool,
|
||||
}
|
||||
|
||||
impl DeviceInfo {
|
||||
@@ -708,12 +708,6 @@ pub struct MemoryInfo {
|
||||
#[serde(default)]
|
||||
pub enable_virtio_mem: bool,
|
||||
|
||||
/// Enable swap of vm memory. Default false.
|
||||
///
|
||||
/// The behaviour is undefined if mem_prealloc is also set to true
|
||||
#[serde(default)]
|
||||
pub enable_swap: bool,
|
||||
|
||||
/// Enable swap in the guest. Default false.
|
||||
///
|
||||
/// When enable_guest_swap is enabled, insert a raw file to the guest as the swap device if the
|
||||
|
||||
@@ -22,8 +22,10 @@ pub const DRIVER_VFIO_PCI_GK_TYPE: &str = "vfio-pci-gk";
|
||||
/// VFIO PCI device to be bound to vfio-pci and made available inside the
|
||||
/// container as a VFIO device node
|
||||
pub const DRIVER_VFIO_PCI_TYPE: &str = "vfio-pci";
|
||||
/// DRIVER_VFIO_AP_TYPE is the device driver for vfio-ap.
|
||||
/// DRIVER_VFIO_AP_TYPE is the device driver for vfio-ap hotplug.
|
||||
pub const DRIVER_VFIO_AP_TYPE: &str = "vfio-ap";
|
||||
/// DRIVER_VFIO_AP_COLD_TYPE is the device driver for vfio-ap coldplug.
|
||||
pub const DRIVER_VFIO_AP_COLD_TYPE: &str = "vfio-ap-cold";
|
||||
|
||||
/// DRIVER_9P_TYPE is the driver for 9pfs volume.
|
||||
pub const DRIVER_9P_TYPE: &str = "9p";
|
||||
|
||||
@@ -11,7 +11,7 @@ mod tests {
|
||||
KATA_ANNO_CFG_HYPERVISOR_BLOCK_DEV_DRIVER, KATA_ANNO_CFG_HYPERVISOR_CTLPATH,
|
||||
KATA_ANNO_CFG_HYPERVISOR_DEFAULT_MEMORY, KATA_ANNO_CFG_HYPERVISOR_DEFAULT_VCPUS,
|
||||
KATA_ANNO_CFG_HYPERVISOR_ENABLE_GUEST_SWAP, KATA_ANNO_CFG_HYPERVISOR_ENABLE_HUGEPAGES,
|
||||
KATA_ANNO_CFG_HYPERVISOR_ENABLE_IO_THREADS, KATA_ANNO_CFG_HYPERVISOR_ENABLE_SWAP,
|
||||
KATA_ANNO_CFG_HYPERVISOR_ENABLE_IO_THREADS
|
||||
KATA_ANNO_CFG_HYPERVISOR_FILE_BACKED_MEM_ROOT_DIR,
|
||||
KATA_ANNO_CFG_HYPERVISOR_GUEST_HOOK_PATH, KATA_ANNO_CFG_HYPERVISOR_JAILER_PATH,
|
||||
KATA_ANNO_CFG_HYPERVISOR_KERNEL_PATH, KATA_ANNO_CFG_HYPERVISOR_MEMORY_PREALLOC,
|
||||
@@ -129,10 +129,6 @@ mod tests {
|
||||
KATA_ANNO_CFG_HYPERVISOR_ENABLE_IO_THREADS.to_string(),
|
||||
"false".to_string(),
|
||||
);
|
||||
anno_hash.insert(
|
||||
KATA_ANNO_CFG_HYPERVISOR_ENABLE_SWAP.to_string(),
|
||||
"false".to_string(),
|
||||
);
|
||||
anno_hash.insert(
|
||||
KATA_ANNO_CFG_HYPERVISOR_FILE_BACKED_MEM_ROOT_DIR.to_string(),
|
||||
"./test_file_backend_mem_root".to_string(),
|
||||
@@ -201,7 +197,6 @@ mod tests {
|
||||
assert_eq!(hv.memory_info.default_memory, 100);
|
||||
assert!(!hv.enable_iothreads);
|
||||
assert!(!hv.enable_iothreads);
|
||||
assert!(!hv.memory_info.enable_swap);
|
||||
assert_eq!(
|
||||
hv.memory_info.file_mem_backend,
|
||||
"./test_file_backend_mem_root"
|
||||
|
||||
@@ -19,7 +19,7 @@ default_maxvcpus = 64
|
||||
machine_type = "q35"
|
||||
confidential_guest = true
|
||||
rootless = true
|
||||
enable_annotations = ["shared_fs","path", "ctlpath","jailer_path","enable_iothreads","default_memory","memory_slots","enable_mem_prealloc","enable_hugepages","file_mem_backend","enable_virtio_mem","enable_swap","enable_guest_swap","default_vcpus","virtio_fs_extra_args","block_device_driver","vhost_user_store_path","kernel","guest_hook_path","block_device_cache_noflush","virtio_fs_daemon"]
|
||||
enable_annotations = ["shared_fs","path", "ctlpath","jailer_path","enable_iothreads","default_memory","memory_slots","enable_mem_prealloc","enable_hugepages","file_mem_backend","enable_virtio_mem","enable_guest_swap","default_vcpus","virtio_fs_extra_args","block_device_driver","vhost_user_store_path","kernel","guest_hook_path","block_device_cache_noflush","virtio_fs_daemon"]
|
||||
machine_accelerators="noapic"
|
||||
default_bridges = 2
|
||||
default_memory = 128
|
||||
@@ -46,7 +46,6 @@ enable_iommu = true
|
||||
enable_iommu_platform = true
|
||||
file_mem_backend = "/dev/shm"
|
||||
valid_file_mem_backends = ["/dev/shm","/dev/snd","./test_file_backend_mem_root"]
|
||||
enable_swap = true
|
||||
pflashes = ["/proc/mounts"]
|
||||
enable_debug = true
|
||||
msize_9p = 16384
|
||||
|
||||
@@ -45,7 +45,6 @@ enable_iommu = true
|
||||
enable_iommu_platform = true
|
||||
file_mem_backend = "/dev/shm"
|
||||
valid_file_mem_backends = ["/dev/shm"]
|
||||
enable_swap = true
|
||||
pflashes = ["/proc/mounts"]
|
||||
enable_debug = true
|
||||
msize_9p = 16384
|
||||
|
||||
@@ -11,7 +11,7 @@ with-serde = []
|
||||
async = ["ttrpc/async", "async-trait"]
|
||||
|
||||
[dependencies]
|
||||
ttrpc = "0.8"
|
||||
ttrpc = "0.8.4"
|
||||
async-trait = { version = "0.1.42", optional = true }
|
||||
protobuf = { version = "3.2.0" }
|
||||
serde = { version = "1.0.130", features = ["derive"] }
|
||||
|
||||
@@ -229,6 +229,8 @@ fn real_main() -> Result<(), std::io::Error> {
|
||||
false,
|
||||
)?;
|
||||
|
||||
codegen("src", &["protos/cri-api/api.proto"], false)?;
|
||||
|
||||
// There is a message named 'Box' in oci.proto
|
||||
// so there is a struct named 'Box', we should replace Box<Self> to ::std::boxed::Box<Self>
|
||||
// to avoid the conflict.
|
||||
|
||||
1968
src/libs/protocols/protos/cri-api/api.proto
Normal file
1968
src/libs/protocols/protos/cri-api/api.proto
Normal file
File diff suppressed because it is too large
Load Diff
@@ -25,6 +25,7 @@ pub mod remote;
|
||||
pub mod remote_ttrpc;
|
||||
#[cfg(feature = "async")]
|
||||
pub mod remote_ttrpc_async;
|
||||
pub mod api;
|
||||
|
||||
#[cfg(feature = "with-serde")]
|
||||
pub use serde_config::{
|
||||
|
||||
@@ -49,9 +49,9 @@
|
||||
//!
|
||||
//! The work is inspired by:
|
||||
//! - [`filepath-securejoin`](https://github.com/cyphar/filepath-securejoin): secure_join() written
|
||||
//! in Go.
|
||||
//! in Go.
|
||||
//! - [CVE-2021-30465](https://github.com/advisories/GHSA-c3xm-pvg7-gh7r): symlink related TOCTOU
|
||||
//! flaw in `runC`.
|
||||
//! flaw in `runC`.
|
||||
|
||||
#![deny(missing_docs)]
|
||||
|
||||
|
||||
@@ -37,8 +37,10 @@ fn get_uds_with_sid(short_id: &str, path: &str) -> Result<String> {
|
||||
return Ok(format!("unix://{}", p.display()));
|
||||
}
|
||||
|
||||
let _ = fs::create_dir_all(kata_run_path.join(short_id))
|
||||
.context(format!("failed to create directory {:?}", kata_run_path.join(short_id)));
|
||||
let _ = fs::create_dir_all(kata_run_path.join(short_id)).context(format!(
|
||||
"failed to create directory {:?}",
|
||||
kata_run_path.join(short_id)
|
||||
));
|
||||
|
||||
let target_ids: Vec<String> = fs::read_dir(&kata_run_path)?
|
||||
.filter_map(|e| {
|
||||
@@ -71,8 +73,11 @@ fn get_uds_with_sid(short_id: &str, path: &str) -> Result<String> {
|
||||
}
|
||||
|
||||
// return sandbox's storage path
|
||||
pub fn sb_storage_path() -> String {
|
||||
String::from(KATA_PATH)
|
||||
pub fn sb_storage_path() -> Result<&'static str> {
|
||||
//make sure the path existed
|
||||
std::fs::create_dir_all(KATA_PATH).context(format!("failed to create dir: {}", KATA_PATH))?;
|
||||
|
||||
Ok(KATA_PATH)
|
||||
}
|
||||
|
||||
// returns the address of the unix domain socket(UDS) for communication with shim
|
||||
@@ -85,7 +90,7 @@ pub fn mgmt_socket_addr(sid: &str) -> Result<String> {
|
||||
));
|
||||
}
|
||||
|
||||
get_uds_with_sid(sid, &sb_storage_path())
|
||||
get_uds_with_sid(sid, &sb_storage_path()?)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
38
src/runtime-rs/Cargo.lock
generated
38
src/runtime-rs/Cargo.lock
generated
@@ -513,6 +513,7 @@ dependencies = [
|
||||
"oci-spec",
|
||||
"persist",
|
||||
"protobuf 3.2.0",
|
||||
"protocols",
|
||||
"resource",
|
||||
"runtime-spec",
|
||||
"serde_json",
|
||||
@@ -571,7 +572,7 @@ dependencies = [
|
||||
"signal-hook",
|
||||
"signal-hook-tokio",
|
||||
"thiserror",
|
||||
"time 0.3.31",
|
||||
"time 0.3.37",
|
||||
"tokio",
|
||||
"windows-sys 0.48.0",
|
||||
]
|
||||
@@ -1579,6 +1580,15 @@ dependencies = [
|
||||
"digest 0.10.7",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "home"
|
||||
version = "0.5.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e3d1354bf6b7235cb4a0576c2619fd4ed18183f689b12b006a0ee7329eeff9a5"
|
||||
dependencies = [
|
||||
"windows-sys 0.52.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "http"
|
||||
version = "0.2.9"
|
||||
@@ -2279,6 +2289,12 @@ dependencies = [
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "num-conv"
|
||||
version = "0.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9"
|
||||
|
||||
[[package]]
|
||||
name = "num-traits"
|
||||
version = "0.2.16"
|
||||
@@ -3889,7 +3905,7 @@ dependencies = [
|
||||
"serde",
|
||||
"serde_json",
|
||||
"slog",
|
||||
"time 0.3.31",
|
||||
"time 0.3.37",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -3924,7 +3940,7 @@ dependencies = [
|
||||
"slog",
|
||||
"term",
|
||||
"thread_local",
|
||||
"time 0.3.31",
|
||||
"time 0.3.37",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -4191,13 +4207,14 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "time"
|
||||
version = "0.3.31"
|
||||
version = "0.3.37"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f657ba42c3f86e7680e53c8cd3af8abbe56b5491790b46e22e19c0d57463583e"
|
||||
checksum = "35e7868883861bd0e56d9ac6efcaaca0d6d5d82a2a7ec8209ff492c07cf37b21"
|
||||
dependencies = [
|
||||
"deranged",
|
||||
"itoa",
|
||||
"libc",
|
||||
"num-conv",
|
||||
"num_threads",
|
||||
"powerfmt",
|
||||
"serde",
|
||||
@@ -4213,10 +4230,11 @@ checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3"
|
||||
|
||||
[[package]]
|
||||
name = "time-macros"
|
||||
version = "0.2.16"
|
||||
version = "0.2.19"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "26197e33420244aeb70c3e8c78376ca46571bc4e701e4791c2cd9f57dcb3a43f"
|
||||
checksum = "2834e6017e3e5e4b9834939793b282bc03b37a3336245fa820e35e233e2a85de"
|
||||
dependencies = [
|
||||
"num-conv",
|
||||
"time-core",
|
||||
]
|
||||
|
||||
@@ -4426,14 +4444,15 @@ checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed"
|
||||
|
||||
[[package]]
|
||||
name = "ttrpc"
|
||||
version = "0.8.1"
|
||||
version = "0.8.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "55ea338db445bee75c596cf8a478fbfcefad5a943c9e92a7e1c805c65ed39551"
|
||||
checksum = "2c580c498a547b4c083ec758be543e11a0772e03013aef4cdb1fbe77c8b62cae"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"byteorder",
|
||||
"crossbeam",
|
||||
"futures 0.3.28",
|
||||
"home",
|
||||
"libc",
|
||||
"log",
|
||||
"nix 0.26.2",
|
||||
@@ -4629,6 +4648,7 @@ dependencies = [
|
||||
"serde_json",
|
||||
"slog",
|
||||
"slog-scope",
|
||||
"strum 0.24.1",
|
||||
"tokio",
|
||||
"toml 0.4.10",
|
||||
"tracing",
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
#
|
||||
|
||||
MACHINETYPE :=
|
||||
KERNELPARAMS :=
|
||||
KERNELPARAMS := cgroup_no_v1=all systemd.unified_cgroup_hierarchy=1
|
||||
MACHINEACCELERATORS :=
|
||||
CPUFEATURES := pmu=off
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
#
|
||||
|
||||
MACHINETYPE := pseries
|
||||
KERNELPARAMS :=
|
||||
KERNELPARAMS := cgroup_no_v1=all systemd.unified_cgroup_hierarchy=1
|
||||
MACHINEACCELERATORS := "cap-cfpc=broken,cap-sbbc=broken,cap-ibs=broken,cap-large-decr=off,cap-ccf-assist=off"
|
||||
CPUFEATURES := pmu=off
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
#
|
||||
|
||||
MACHINETYPE := s390-ccw-virtio
|
||||
KERNELPARAMS :=
|
||||
KERNELPARAMS := cgroup_no_v1=all systemd.unified_cgroup_hierarchy=1
|
||||
MACHINEACCELERATORS :=
|
||||
CPUFEATURES :=
|
||||
|
||||
|
||||
@@ -5,7 +5,8 @@
|
||||
#
|
||||
|
||||
MACHINETYPE := q35
|
||||
KERNELPARAMS :=
|
||||
KERNELPARAMS := cgroup_no_v1=all systemd.unified_cgroup_hierarchy=1
|
||||
KERNELTDXPARAMS := cgroup_no_v1=all systemd.unified_cgroup_hierarchy=1
|
||||
MACHINEACCELERATORS :=
|
||||
CPUFEATURES := pmu=off
|
||||
|
||||
|
||||
@@ -158,10 +158,25 @@ virtio_fs_cache = "@DEFVIRTIOFSCACHE@"
|
||||
# > 5 --> will be set to 5
|
||||
default_bridges = @DEFBRIDGES@
|
||||
|
||||
# Reclaim guest freed memory.
|
||||
# Enabling this will result in the VM balloon device having f_reporting=on set.
|
||||
# Then the hypervisor will use it to reclaim guest freed memory.
|
||||
# This is useful for reducing the amount of memory used by a VM.
|
||||
# Enabling this feature may sometimes reduce the speed of memory access in
|
||||
# the VM.
|
||||
#
|
||||
# Default false
|
||||
#reclaim_guest_freed_memory = true
|
||||
|
||||
# Block storage driver to be used for the hypervisor in case the container
|
||||
# rootfs is backed by a block device.
|
||||
block_device_driver = "virtio-blk-pci"
|
||||
|
||||
# Specifies cache-related options for block devices.
|
||||
# Denotes whether use of O_DIRECT (bypass the host page cache) is enabled.
|
||||
# Default false
|
||||
#block_device_cache_direct = true
|
||||
|
||||
# Enable huge pages for VM RAM, default false
|
||||
# Enabling this will result in the VM memory
|
||||
# being allocated using huge pages.
|
||||
|
||||
@@ -98,11 +98,15 @@ default_maxvcpus = @DEFMAXVCPUS_DB@
|
||||
# > 5 --> will be set to 5
|
||||
default_bridges = @DEFBRIDGES@
|
||||
|
||||
# Enable balloon f_reporting
|
||||
# Enabling this will result in the VM balloon device having f_reporting=on set
|
||||
# Reclaim guest freed memory.
|
||||
# Enabling this will result in the VM balloon device having f_reporting=on set.
|
||||
# Then the hypervisor will use it to reclaim guest freed memory.
|
||||
# This is useful for reducing the amount of memory used by a VM.
|
||||
# Enabling this feature may sometimes reduce the speed of memory access in
|
||||
# the VM.
|
||||
#
|
||||
# Default false
|
||||
#enable_balloon_f_reporting = true
|
||||
#reclaim_guest_freed_memory = true
|
||||
|
||||
# Default memory size in MiB for SB/VM.
|
||||
# If unspecified then it will be set @DEFMEMSZ@ MiB.
|
||||
@@ -200,6 +204,11 @@ virtio_fs_extra_args = @DEFVIRTIOFSEXTRAARGS@
|
||||
# Metadata, data, and pathname lookup are cached in guest and never expire.
|
||||
virtio_fs_cache = "@DEFVIRTIOFSCACHE@"
|
||||
|
||||
# Specifies cache-related options for block devices.
|
||||
# Denotes whether use of O_DIRECT (bypass the host page cache) is enabled.
|
||||
# Default false
|
||||
#block_device_cache_direct = true
|
||||
|
||||
# Enable huge pages for VM RAM, default false
|
||||
# Enabling this will result in the VM memory
|
||||
# being allocated using huge pages.
|
||||
|
||||
@@ -143,6 +143,16 @@ default_maxvcpus = @DEFMAXVCPUS_QEMU@
|
||||
# > 5 --> will be set to 5
|
||||
default_bridges = @DEFBRIDGES@
|
||||
|
||||
# Reclaim guest freed memory.
|
||||
# Enabling this will result in the VM balloon device having f_reporting=on set.
|
||||
# Then the hypervisor will use it to reclaim guest freed memory.
|
||||
# This is useful for reducing the amount of memory used by a VM.
|
||||
# Enabling this feature may sometimes reduce the speed of memory access in
|
||||
# the VM.
|
||||
#
|
||||
# Default false
|
||||
#reclaim_guest_freed_memory = true
|
||||
|
||||
# Default memory size in MiB for SB/VM.
|
||||
# If unspecified then it will be set @DEFMEMSZ@ MiB.
|
||||
default_memory = @DEFMEMSZ@
|
||||
|
||||
@@ -121,11 +121,6 @@ block_device_driver = "@DEFBLOCKSTORAGEDRIVER_FC@"
|
||||
# Default false
|
||||
#block_device_cache_set = true
|
||||
|
||||
# Specifies cache-related options for block devices.
|
||||
# Denotes whether use of O_DIRECT (bypass the host page cache) is enabled.
|
||||
# Default false
|
||||
#block_device_cache_direct = true
|
||||
|
||||
# Specifies cache-related options for block devices.
|
||||
# Denotes whether flush requests for the device are ignored.
|
||||
# Default false
|
||||
|
||||
@@ -17,7 +17,7 @@ serde = { version = "^1.0", features = ["derive"] }
|
||||
serde_json = ">=1.0.9"
|
||||
slog = "2.5.2"
|
||||
slog-scope = "4.4.0"
|
||||
ttrpc = "0.8"
|
||||
ttrpc = "0.8.4"
|
||||
tokio = { version = "1.38.0", features = ["fs", "rt"] }
|
||||
tracing = "0.1.36"
|
||||
url = "2.2.2"
|
||||
|
||||
@@ -28,7 +28,7 @@ rand = "0.8.4"
|
||||
path-clean = "1.0.1"
|
||||
lazy_static = "1.4"
|
||||
tracing = "0.1.36"
|
||||
ttrpc = {version = "0.8.1", features = ["async"] }
|
||||
ttrpc = { version = "0.8.4", features = ["async"] }
|
||||
protobuf = "3.1.0"
|
||||
|
||||
kata-sys-util = { path = "../../../libs/kata-sys-util" }
|
||||
@@ -46,15 +46,28 @@ safe-path = "0.1.0"
|
||||
crossbeam-channel = "0.5.6"
|
||||
tempdir = "0.3.7"
|
||||
|
||||
qapi = { version = "0.14", features = [ "qmp", "async-tokio-all" ] }
|
||||
qapi = { version = "0.14", features = ["qmp", "async-tokio-all"] }
|
||||
qapi-spec = "0.3.1"
|
||||
qapi-qmp = "0.14.0"
|
||||
|
||||
[target.'cfg(not(target_arch = "s390x"))'.dependencies]
|
||||
dragonball = { path = "../../../dragonball", features = ["atomic-guest-memory", "virtio-vsock", "hotplug", "virtio-blk", "virtio-net", "virtio-fs", "vhost-net", "dbs-upcall", "virtio-mem", "virtio-balloon", "vhost-user-net", "host-device"], optional = true }
|
||||
dragonball = { path = "../../../dragonball", features = [
|
||||
"atomic-guest-memory",
|
||||
"virtio-vsock",
|
||||
"hotplug",
|
||||
"virtio-blk",
|
||||
"virtio-net",
|
||||
"virtio-fs",
|
||||
"vhost-net",
|
||||
"dbs-upcall",
|
||||
"virtio-mem",
|
||||
"virtio-balloon",
|
||||
"vhost-user-net",
|
||||
"host-device",
|
||||
], optional = true }
|
||||
dbs-utils = { path = "../../../dragonball/src/dbs_utils" }
|
||||
hyperlocal = "0.8.0"
|
||||
hyper = {version = "0.14.18", features = ["client"]}
|
||||
hyper = { version = "0.14.18", features = ["client"] }
|
||||
|
||||
[features]
|
||||
default = []
|
||||
@@ -76,3 +89,8 @@ serial_test = "2.0.0"
|
||||
|
||||
[build-dependencies]
|
||||
ttrpc-codegen = "0.4.2"
|
||||
|
||||
[lints.rust]
|
||||
unexpected_cfgs = { level = "warn", check-cfg = [
|
||||
'cfg(feature, values("enable-vendor"))',
|
||||
] }
|
||||
|
||||
@@ -180,6 +180,15 @@ impl TryFrom<NamedHypervisorConfig> for VmConfig {
|
||||
|
||||
let platform = get_platform_cfg(guest_protection_to_use);
|
||||
|
||||
let balloon = if cfg.device_info.reclaim_guest_freed_memory {
|
||||
Some(crate::BalloonConfig {
|
||||
free_page_reporting: true,
|
||||
..Default::default()
|
||||
})
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let cfg = VmConfig {
|
||||
cpus,
|
||||
memory,
|
||||
@@ -193,6 +202,7 @@ impl TryFrom<NamedHypervisorConfig> for VmConfig {
|
||||
vsock: Some(vsock),
|
||||
rng,
|
||||
platform,
|
||||
balloon,
|
||||
|
||||
..Default::default()
|
||||
};
|
||||
@@ -1643,7 +1653,6 @@ mod tests {
|
||||
result: Result<VmConfig, VmConfigError>,
|
||||
}
|
||||
|
||||
let u8_max = std::u8::MAX;
|
||||
let sysinfo = nix::sys::sysinfo::sysinfo().unwrap();
|
||||
|
||||
let actual_max_mem_bytes = sysinfo.ram_total();
|
||||
@@ -1669,8 +1678,8 @@ mod tests {
|
||||
let valid_vsock =
|
||||
VsockConfig::try_from((vsock_socket_path.to_string(), DEFAULT_VSOCK_CID)).unwrap();
|
||||
|
||||
let (cpu_info, cpus_config) = make_cpu_objects(7, u8_max, false);
|
||||
let (cpu_info_tdx, cpus_config_tdx) = make_cpu_objects(7, u8_max, true);
|
||||
let (cpu_info, cpus_config) = make_cpu_objects(7, u8::MAX, false);
|
||||
let (cpu_info_tdx, cpus_config_tdx) = make_cpu_objects(7, u8::MAX, true);
|
||||
|
||||
let (memory_info_std, mem_config_std) =
|
||||
make_memory_objects(79, usable_max_mem_bytes, false);
|
||||
|
||||
@@ -446,8 +446,6 @@ pub struct VmConfig {
|
||||
pub numa: Option<Vec<NumaConfig>>,
|
||||
#[serde(default)]
|
||||
pub watchdog: bool,
|
||||
#[cfg(feature = "guest_debug")]
|
||||
pub gdb: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub platform: Option<PlatformConfig>,
|
||||
}
|
||||
|
||||
@@ -27,7 +27,7 @@ pub struct CloudHypervisorInner {
|
||||
pub(crate) api_socket: Option<UnixStream>,
|
||||
pub(crate) extra_args: Option<Vec<String>>,
|
||||
|
||||
pub(crate) config: Option<HypervisorConfig>,
|
||||
pub(crate) config: HypervisorConfig,
|
||||
|
||||
pub(crate) process: Option<Child>,
|
||||
pub(crate) pid: Option<u32>,
|
||||
@@ -101,7 +101,7 @@ impl CloudHypervisorInner {
|
||||
process: None,
|
||||
pid: None,
|
||||
|
||||
config: None,
|
||||
config: Default::default(),
|
||||
state: VmmState::NotReady,
|
||||
timeout_secs: CH_DEFAULT_TIMEOUT_SECS as i32,
|
||||
id: String::default(),
|
||||
@@ -124,11 +124,11 @@ impl CloudHypervisorInner {
|
||||
}
|
||||
|
||||
pub fn set_hypervisor_config(&mut self, config: HypervisorConfig) {
|
||||
self.config = Some(config);
|
||||
self.config = config;
|
||||
}
|
||||
|
||||
pub fn hypervisor_config(&self) -> HypervisorConfig {
|
||||
self.config.clone().unwrap_or_default()
|
||||
self.config.clone()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -168,7 +168,7 @@ impl Persist for CloudHypervisorInner {
|
||||
let (tx, rx) = channel(true);
|
||||
|
||||
let mut ch = Self {
|
||||
config: Some(hypervisor_state.config),
|
||||
config: hypervisor_state.config,
|
||||
state: VmmState::NotReady,
|
||||
id: hypervisor_state.id,
|
||||
vm_path: hypervisor_state.vm_path,
|
||||
|
||||
@@ -315,7 +315,8 @@ impl CloudHypervisorInner {
|
||||
.ok_or("missing socket")
|
||||
.map_err(|e| anyhow!(e))?;
|
||||
|
||||
let disk_config = DiskConfig::try_from(device.config)?;
|
||||
let mut disk_config = DiskConfig::try_from(device.config.clone())?;
|
||||
disk_config.direct = self.config.blockdev_info.block_device_cache_direct;
|
||||
|
||||
let response = cloud_hypervisor_vm_blockdev_add(
|
||||
socket.try_clone().context("failed to clone socket")?,
|
||||
|
||||
@@ -117,11 +117,7 @@ impl CloudHypervisorInner {
|
||||
}
|
||||
|
||||
async fn get_kernel_params(&self) -> Result<String> {
|
||||
let cfg = self
|
||||
.config
|
||||
.as_ref()
|
||||
.ok_or("no hypervisor config for CH")
|
||||
.map_err(|e| anyhow!(e))?;
|
||||
let cfg = &self.config;
|
||||
|
||||
let enable_debug = cfg.debug_info.enable_debug;
|
||||
|
||||
@@ -200,15 +196,10 @@ impl CloudHypervisorInner {
|
||||
|
||||
let vsock_socket_path = get_vsock_path(&self.id)?;
|
||||
|
||||
let hypervisor_config = self
|
||||
.config
|
||||
.as_ref()
|
||||
.ok_or("no hypervisor config for CH")
|
||||
.map_err(|e| anyhow!(e))?;
|
||||
|
||||
debug!(
|
||||
sl!(),
|
||||
"generic Hypervisor configuration: {:?}", hypervisor_config
|
||||
"generic Hypervisor configuration: {:?}",
|
||||
self.config.clone()
|
||||
);
|
||||
|
||||
let kernel_params = self.get_kernel_params().await?;
|
||||
@@ -217,7 +208,7 @@ impl CloudHypervisorInner {
|
||||
kernel_params,
|
||||
sandbox_path,
|
||||
vsock_socket_path,
|
||||
cfg: hypervisor_config.clone(),
|
||||
cfg: self.config.clone(),
|
||||
guest_protection_to_use: self.guest_protection_to_use.clone(),
|
||||
shared_fs_devices,
|
||||
network_devices,
|
||||
@@ -324,11 +315,7 @@ impl CloudHypervisorInner {
|
||||
async fn cloud_hypervisor_launch(&mut self, _timeout_secs: i32) -> Result<()> {
|
||||
self.cloud_hypervisor_ensure_not_launched().await?;
|
||||
|
||||
let cfg = self
|
||||
.config
|
||||
.as_ref()
|
||||
.ok_or("no hypervisor config for CH")
|
||||
.map_err(|e| anyhow!(e))?;
|
||||
let cfg = &self.config;
|
||||
|
||||
let debug = cfg.debug_info.enable_debug;
|
||||
|
||||
@@ -338,13 +325,7 @@ impl CloudHypervisorInner {
|
||||
|
||||
let _ = std::fs::remove_file(api_socket_path.clone());
|
||||
|
||||
let binary_path = self
|
||||
.config
|
||||
.as_ref()
|
||||
.ok_or("no hypervisor config for CH")
|
||||
.map_err(|e| anyhow!(e))?
|
||||
.path
|
||||
.to_string();
|
||||
let binary_path = cfg.path.to_string();
|
||||
|
||||
let path = Path::new(&binary_path).canonicalize()?;
|
||||
|
||||
@@ -567,11 +548,7 @@ impl CloudHypervisorInner {
|
||||
// call, if confidential_guest is set, a confidential
|
||||
// guest will be created.
|
||||
async fn handle_guest_protection(&mut self) -> Result<()> {
|
||||
let cfg = self
|
||||
.config
|
||||
.as_ref()
|
||||
.ok_or("missing hypervisor config")
|
||||
.map_err(|e| anyhow!(e))?;
|
||||
let cfg = &self.config;
|
||||
|
||||
let confidential_guest = cfg.security_info.confidential_guest;
|
||||
|
||||
|
||||
@@ -491,7 +491,7 @@ impl DeviceManager {
|
||||
let id = format!("{:x}", rand_bytes);
|
||||
|
||||
// check collision in devices
|
||||
if self.devices.get(&id).is_none() {
|
||||
if !self.devices.contains_key(&id) {
|
||||
return Ok(id);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -177,6 +177,7 @@ pub struct HostDevice {
|
||||
pub guest_pci_path: Option<PciPath>,
|
||||
|
||||
/// vfio_vendor for vendor's some special cases.
|
||||
#[allow(unexpected_cfgs)]
|
||||
#[cfg(feature = "enable-vendor")]
|
||||
pub vfio_vendor: VfioVendor,
|
||||
}
|
||||
@@ -560,11 +561,8 @@ impl PCIeDevice for VfioDevice {
|
||||
))?;
|
||||
hostdev.guest_pci_path = Some(pci_path.clone());
|
||||
|
||||
self.device_options.push(format!(
|
||||
"0000:{}={}",
|
||||
hostdev.bus_slot_func,
|
||||
pci_path.to_string()
|
||||
));
|
||||
self.device_options
|
||||
.push(format!("0000:{}={}", hostdev.bus_slot_func, pci_path));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -33,9 +33,9 @@ impl PciSlot {
|
||||
}
|
||||
}
|
||||
|
||||
impl ToString for PciSlot {
|
||||
fn to_string(&self) -> String {
|
||||
format!("{:02x}", self.0)
|
||||
impl std::fmt::Display for PciSlot {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{:02x}", self.0)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -116,14 +116,17 @@ impl PciPath {
|
||||
}
|
||||
}
|
||||
|
||||
impl ToString for PciPath {
|
||||
// method to format the PciPath into a string
|
||||
fn to_string(&self) -> String {
|
||||
self.slots
|
||||
.iter()
|
||||
.map(|pci_slot| format!("{:02x}", pci_slot.0))
|
||||
.collect::<Vec<String>>()
|
||||
.join("/")
|
||||
impl std::fmt::Display for PciPath {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"{}",
|
||||
self.slots
|
||||
.iter()
|
||||
.map(|pci_slot| format!("{:02x}", pci_slot.0))
|
||||
.collect::<Vec<String>>()
|
||||
.join("/")
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user