Compare commits
116 Commits
saulparede
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a0e99a86cf | ||
|
|
012bf4b333 | ||
|
|
7dce05b5fc | ||
|
|
3c584a474f | ||
|
|
48ef2220e8 | ||
|
|
c96b2034dc | ||
|
|
b8576ef476 | ||
|
|
a747b9f774 | ||
|
|
302b2c8d75 | ||
|
|
7fa68ffd52 | ||
|
|
119a145923 | ||
|
|
9f6bce9517 | ||
|
|
b04260f926 | ||
|
|
26d41b8f6e | ||
|
|
004333ed71 | ||
|
|
8dae67794a | ||
|
|
65b2a75aca | ||
|
|
75ecfe3fe2 | ||
|
|
a923bb2917 | ||
|
|
1163b6581f | ||
|
|
29e5d5d951 | ||
|
|
0cf3243801 | ||
|
|
64735222c6 | ||
|
|
30e030e18e | ||
|
|
8cebcf0113 | ||
|
|
237729d728 | ||
|
|
f0ad9f1709 | ||
|
|
1b8189731a | ||
|
|
4fad88499c | ||
|
|
fb77c357f4 | ||
|
|
de3afd3076 | ||
|
|
cd931d4905 | ||
|
|
911aee5ad7 | ||
|
|
858620d2e7 | ||
|
|
8c2b7ed619 | ||
|
|
af7fdd5cd1 | ||
|
|
0d8186ae16 | ||
|
|
7e0f5e533a | ||
|
|
bcfb2354e0 | ||
|
|
caf6b244e6 | ||
|
|
fb5482f647 | ||
|
|
46aa318b74 | ||
|
|
ec9c57c595 | ||
|
|
8950f1caeb | ||
|
|
814ae53d77 | ||
|
|
27dfb0d06f | ||
|
|
7ae2282a99 | ||
|
|
fd583d833b | ||
|
|
eb4ce0e98b | ||
|
|
6a832dd1f3 | ||
|
|
79efe3e041 | ||
|
|
2728b493d5 | ||
|
|
1ec97d25e7 | ||
|
|
aa6890eae1 | ||
|
|
fe817bb47b | ||
|
|
514a2b1a7c | ||
|
|
2edb588ed9 | ||
|
|
9871256771 | ||
|
|
8de7f29981 | ||
|
|
1c63738b80 | ||
|
|
6e79a9d6ad | ||
|
|
8df9cf35df | ||
|
|
ef32923461 | ||
|
|
85e17c2e77 | ||
|
|
c3868f8e60 | ||
|
|
27417d9d15 | ||
|
|
83f37f4beb | ||
|
|
e44dfccf7a | ||
|
|
1035504492 | ||
|
|
20cb65b1fb | ||
|
|
864f181faf | ||
|
|
642b5661ff | ||
|
|
4403289123 | ||
|
|
d2c2ec6e23 | ||
|
|
608f378bff | ||
|
|
f14895bdc4 | ||
|
|
fd716c017d | ||
|
|
740d380b8e | ||
|
|
6194510e90 | ||
|
|
7e3fd74779 | ||
|
|
f6319da73d | ||
|
|
91d6c39f06 | ||
|
|
5ab0744c25 | ||
|
|
e905b74267 | ||
|
|
5333e45313 | ||
|
|
795869152d | ||
|
|
8903b12d34 | ||
|
|
476f550977 | ||
|
|
ae59cf26a0 | ||
|
|
cfc1836a31 | ||
|
|
7ab6e11e10 | ||
|
|
e475fb2116 | ||
|
|
f62a6b6ab2 | ||
|
|
4afb55154a | ||
|
|
38a655487f | ||
|
|
e1d7d5bef8 | ||
|
|
e4eda5e1d8 | ||
|
|
2f5415d8f5 | ||
|
|
3876a80208 | ||
|
|
5765bc97b4 | ||
|
|
62d74bb1fd | ||
|
|
eabb98ecab | ||
|
|
c1b7069e50 | ||
|
|
fddd1e8b6e | ||
|
|
d6178d78b1 | ||
|
|
1c7b14e282 | ||
|
|
e9bda42b01 | ||
|
|
a66c93caaa | ||
|
|
17454c0969 | ||
|
|
f8617241f4 | ||
|
|
d0f0dc2008 | ||
|
|
3e39c1fad3 | ||
|
|
a6a81124cb | ||
|
|
8d09a0e7e7 | ||
|
|
ce65d17276 | ||
|
|
27bebfb438 |
@@ -32,5 +32,6 @@ ignoreRegExpList:
|
||||
ignorePaths:
|
||||
- "**/vendor/**" # vendor files aren't owned by us
|
||||
- "**/src/runtime/virtcontainers/pkg/cloud-hypervisor/client/**" # Generated files
|
||||
- "**/requirements.txt"
|
||||
|
||||
useGitignore: true
|
||||
|
||||
7
.github/dependabot.yml
vendored
@@ -37,9 +37,9 @@ updates:
|
||||
# create groups for common dependencies, so they can all go in a single PR
|
||||
# We can extend this as we see more frequent groups
|
||||
groups:
|
||||
bit-vec:
|
||||
aws-libcrypto:
|
||||
patterns:
|
||||
- bit-vec
|
||||
- aws-lc-*
|
||||
bumpalo:
|
||||
patterns:
|
||||
- bumpalo
|
||||
@@ -67,6 +67,9 @@ updates:
|
||||
rustix:
|
||||
patterns:
|
||||
- rustix
|
||||
rustls-webpki:
|
||||
patterns:
|
||||
- rustls-webpki
|
||||
slab:
|
||||
patterns:
|
||||
- slab
|
||||
|
||||
@@ -47,6 +47,7 @@ jobs:
|
||||
- coco-guest-components
|
||||
- firecracker
|
||||
- kernel
|
||||
- kernel-debug
|
||||
- kernel-dragonball-experimental
|
||||
- kernel-nvidia-gpu
|
||||
- nydus
|
||||
@@ -347,6 +348,16 @@ jobs:
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-merge-builds.sh kata-artifacts versions.yaml
|
||||
env:
|
||||
RELEASE: ${{ inputs.stage == 'release' && 'yes' || 'no' }}
|
||||
- name: Check kata tarball size (GitHub release asset limit)
|
||||
run: |
|
||||
# https://docs.github.com/en/repositories/releasing-projects-on-github/about-releases#storage-and-bandwidth-quotas
|
||||
GITHUB_ASSET_MAX_BYTES=2147483648
|
||||
tarball_size=$(stat -c "%s" kata-static.tar.zst)
|
||||
if [[ "${tarball_size}" -ge "${GITHUB_ASSET_MAX_BYTES}" ]]; then
|
||||
echo "::error::tarball size (${tarball_size} bytes) >= GitHub release asset limit (${GITHUB_ASSET_MAX_BYTES} bytes)"
|
||||
exit 1
|
||||
fi
|
||||
echo "tarball size: ${tarball_size} bytes"
|
||||
- name: store-artifacts
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
@@ -447,6 +458,16 @@ jobs:
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-merge-builds.sh kata-tools-artifacts versions.yaml kata-tools-static.tar.zst
|
||||
env:
|
||||
RELEASE: ${{ inputs.stage == 'release' && 'yes' || 'no' }}
|
||||
- name: Check kata-tools tarball size (GitHub release asset limit)
|
||||
run: |
|
||||
# https://docs.github.com/en/repositories/releasing-projects-on-github/about-releases#storage-and-bandwidth-quotas
|
||||
GITHUB_ASSET_MAX_BYTES=2147483648
|
||||
tarball_size=$(stat -c "%s" kata-tools-static.tar.zst)
|
||||
if [[ "${tarball_size}" -ge "${GITHUB_ASSET_MAX_BYTES}" ]]; then
|
||||
echo "::error::tarball size (${tarball_size} bytes) >= GitHub release asset limit (${GITHUB_ASSET_MAX_BYTES} bytes)"
|
||||
exit 1
|
||||
fi
|
||||
echo "tarball size: ${tarball_size} bytes"
|
||||
- name: store-artifacts
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
|
||||
@@ -45,6 +45,7 @@ jobs:
|
||||
- cloud-hypervisor
|
||||
- firecracker
|
||||
- kernel
|
||||
- kernel-debug
|
||||
- kernel-dragonball-experimental
|
||||
- kernel-nvidia-gpu
|
||||
- kernel-cca-confidential
|
||||
@@ -326,6 +327,16 @@ jobs:
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-merge-builds.sh kata-artifacts versions.yaml
|
||||
env:
|
||||
RELEASE: ${{ inputs.stage == 'release' && 'yes' || 'no' }}
|
||||
- name: Check kata tarball size (GitHub release asset limit)
|
||||
run: |
|
||||
# https://docs.github.com/en/repositories/releasing-projects-on-github/about-releases#storage-and-bandwidth-quotas
|
||||
GITHUB_ASSET_MAX_BYTES=2147483648
|
||||
tarball_size=$(stat -c "%s" kata-static.tar.zst)
|
||||
if [[ "${tarball_size}" -ge "${GITHUB_ASSET_MAX_BYTES}" ]]; then
|
||||
echo "::error::tarball size (${tarball_size} bytes) >= GitHub release asset limit (${GITHUB_ASSET_MAX_BYTES} bytes)"
|
||||
exit 1
|
||||
fi
|
||||
echo "tarball size: ${tarball_size} bytes"
|
||||
- name: store-artifacts
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
|
||||
@@ -262,6 +262,16 @@ jobs:
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-merge-builds.sh kata-artifacts versions.yaml
|
||||
env:
|
||||
RELEASE: ${{ inputs.stage == 'release' && 'yes' || 'no' }}
|
||||
- name: Check kata tarball size (GitHub release asset limit)
|
||||
run: |
|
||||
# https://docs.github.com/en/repositories/releasing-projects-on-github/about-releases#storage-and-bandwidth-quotas
|
||||
GITHUB_ASSET_MAX_BYTES=2147483648
|
||||
tarball_size=$(stat -c "%s" kata-static.tar.zst)
|
||||
if [[ "${tarball_size}" -ge "${GITHUB_ASSET_MAX_BYTES}" ]]; then
|
||||
echo "::error::tarball size (${tarball_size} bytes) >= GitHub release asset limit (${GITHUB_ASSET_MAX_BYTES} bytes)"
|
||||
exit 1
|
||||
fi
|
||||
echo "tarball size: ${tarball_size} bytes"
|
||||
- name: store-artifacts
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
|
||||
@@ -350,6 +350,16 @@ jobs:
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-merge-builds.sh kata-artifacts versions.yaml
|
||||
env:
|
||||
RELEASE: ${{ inputs.stage == 'release' && 'yes' || 'no' }}
|
||||
- name: Check kata tarball size (GitHub release asset limit)
|
||||
run: |
|
||||
# https://docs.github.com/en/repositories/releasing-projects-on-github/about-releases#storage-and-bandwidth-quotas
|
||||
GITHUB_ASSET_MAX_BYTES=2147483648
|
||||
tarball_size=$(stat -c "%s" kata-static.tar.zst)
|
||||
if [[ "${tarball_size}" -ge "${GITHUB_ASSET_MAX_BYTES}" ]]; then
|
||||
echo "::error::tarball size (${tarball_size} bytes) >= GitHub release asset limit (${GITHUB_ASSET_MAX_BYTES} bytes)"
|
||||
exit 1
|
||||
fi
|
||||
echo "tarball size: ${tarball_size} bytes"
|
||||
- name: store-artifacts
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
|
||||
43
.github/workflows/docs.yaml
vendored
@@ -4,17 +4,18 @@ on:
|
||||
branches:
|
||||
- main
|
||||
permissions: {}
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
deploy-docs:
|
||||
name: deploy-docs
|
||||
build:
|
||||
runs-on: ubuntu-24.04
|
||||
name: Build docs
|
||||
permissions:
|
||||
contents: read
|
||||
pages: write
|
||||
id-token: write
|
||||
environment:
|
||||
name: github-pages
|
||||
url: ${{ steps.deployment.outputs.page_url }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/configure-pages@983d7736d9b0ae728b81ab479565c72886d7745b # v5.0.0
|
||||
- uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5.0.1
|
||||
@@ -23,10 +24,30 @@ jobs:
|
||||
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
||||
with:
|
||||
python-version: 3.x
|
||||
- run: pip install zensical
|
||||
- run: zensical build --clean
|
||||
|
||||
- run: pip install -r docs/requirements.txt
|
||||
- run: python3 -m mkdocs build --config-file ./mkdocs.yaml --site-dir site/
|
||||
id: build
|
||||
|
||||
- uses: actions/upload-pages-artifact@7b1f4a764d45c48632c6b24a0339c27f5614fb0b # v4.0.0
|
||||
with:
|
||||
path: site
|
||||
- uses: actions/deploy-pages@d6db90164ac5ed86f2b6aed7e0febac5b3c0c03e # v4.0.5
|
||||
id: deployment
|
||||
with:
|
||||
path: site/
|
||||
name: github-pages
|
||||
|
||||
deploy:
|
||||
needs: build
|
||||
runs-on: ubuntu-24.04
|
||||
name: Deploy docs
|
||||
permissions:
|
||||
pages: write
|
||||
id-token: write
|
||||
environment:
|
||||
name: github-pages
|
||||
url: ${{ steps.deployment.outputs.page_url }}
|
||||
steps:
|
||||
- name: Deploy to GitHub Pages
|
||||
uses: actions/deploy-pages@d6db90164ac5ed86f2b6aed7e0febac5b3c0c03e # v4.0.5
|
||||
id: deployment
|
||||
with:
|
||||
artifact_name: github-pages
|
||||
|
||||
3
.github/workflows/run-k8s-tests-on-aks.yaml
vendored
@@ -47,9 +47,6 @@ jobs:
|
||||
vmm: clh
|
||||
instance-type: small
|
||||
genpolicy-pull-method: oci-distribution
|
||||
- host_os: cbl-mariner
|
||||
vmm: cloud-hypervisor
|
||||
instance-type: small
|
||||
- host_os: cbl-mariner
|
||||
vmm: clh
|
||||
instance-type: small
|
||||
|
||||
@@ -49,6 +49,8 @@ jobs:
|
||||
KATA_HYPERVISOR: ${{ matrix.environment.vmm }}
|
||||
KUBERNETES: kubeadm
|
||||
KBS: ${{ matrix.environment.name == 'nvidia-gpu-snp' && 'true' || 'false' }}
|
||||
SNAPSHOTTER: ${{ matrix.environment.name == 'nvidia-gpu-snp' && 'nydus' || '' }}
|
||||
USE_EXPERIMENTAL_SNAPSHOTTER_SETUP: ${{ matrix.environment.name == 'nvidia-gpu-snp' && 'true' || 'false' }}
|
||||
K8S_TEST_HOST_TYPE: baremetal
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
@@ -98,7 +100,7 @@ jobs:
|
||||
run: bash tests/integration/kubernetes/gha-run.sh install-bats
|
||||
|
||||
- name: Run tests ${{ matrix.environment.vmm }}
|
||||
timeout-minutes: 30
|
||||
timeout-minutes: 60
|
||||
run: bash tests/integration/kubernetes/gha-run.sh run-nv-tests
|
||||
env:
|
||||
NGC_API_KEY: ${{ secrets.NGC_API_KEY }}
|
||||
|
||||
3314
Cargo.lock
generated
66
Cargo.toml
@@ -6,6 +6,12 @@ rust-version = "1.88"
|
||||
|
||||
[workspace]
|
||||
members = [
|
||||
# kata-agent
|
||||
"src/agent",
|
||||
"src/agent/rustjail",
|
||||
"src/agent/policy",
|
||||
"src/agent/vsock-exporter",
|
||||
|
||||
# Dragonball
|
||||
"src/dragonball",
|
||||
"src/dragonball/dbs_acpi",
|
||||
@@ -41,7 +47,6 @@ resolver = "2"
|
||||
|
||||
# TODO: Add all excluded crates to root workspace
|
||||
exclude = [
|
||||
"src/agent",
|
||||
"src/tools",
|
||||
"src/libs",
|
||||
|
||||
@@ -56,19 +61,19 @@ exclude = [
|
||||
|
||||
[workspace.dependencies]
|
||||
# Rust-VMM crates
|
||||
event-manager = "0.2.1"
|
||||
kvm-bindings = "0.6.0"
|
||||
kvm-ioctls = "=0.12.1"
|
||||
linux-loader = "0.8.0"
|
||||
event-manager = "0.4.0"
|
||||
kvm-bindings = "0.14.0"
|
||||
kvm-ioctls = "0.24.0"
|
||||
linux-loader = "0.13.0"
|
||||
seccompiler = "0.5.0"
|
||||
vfio-bindings = "0.3.0"
|
||||
vfio-ioctls = "0.1.0"
|
||||
virtio-bindings = "0.1.0"
|
||||
virtio-queue = "0.7.0"
|
||||
vm-fdt = "0.2.0"
|
||||
vm-memory = "0.10.0"
|
||||
vm-superio = "0.5.0"
|
||||
vmm-sys-util = "0.11.0"
|
||||
vfio-bindings = "0.6.1"
|
||||
vfio-ioctls = "0.5.0"
|
||||
virtio-bindings = "0.2.0"
|
||||
virtio-queue = "0.17.0"
|
||||
vm-fdt = "0.3.0"
|
||||
vm-memory = "=0.17.1"
|
||||
vm-superio = "0.8.0"
|
||||
vmm-sys-util = "0.15.0"
|
||||
|
||||
# Local dependencies from Dragonball Sandbox crates
|
||||
dragonball = { path = "src/dragonball" }
|
||||
@@ -104,6 +109,7 @@ wasm_container = { path = "src/runtime-rs/crates/runtimes/wasm_container" }
|
||||
kata-sys-util = { path = "src/libs/kata-sys-util" }
|
||||
kata-types = { path = "src/libs/kata-types", features = ["safe-path"] }
|
||||
logging = { path = "src/libs/logging" }
|
||||
mem-agent = { path = "src/libs/mem-agent" }
|
||||
protocols = { path = "src/libs/protocols", features = ["async"] }
|
||||
runtime-spec = { path = "src/libs/runtime-spec" }
|
||||
safe-path = { path = "src/libs/safe-path" }
|
||||
@@ -112,35 +118,65 @@ test-utils = { path = "src/libs/test-utils" }
|
||||
|
||||
# Local dependencies from `src/agent`
|
||||
kata-agent-policy = { path = "src/agent/policy" }
|
||||
rustjail = { path = "src/agent/rustjail" }
|
||||
vsock-exporter = { path = "src/agent/vsock-exporter" }
|
||||
|
||||
# Outside dependencies
|
||||
actix-rt = "2.7.0"
|
||||
anyhow = "1.0"
|
||||
async-recursion = "0.3.2"
|
||||
async-trait = "0.1.48"
|
||||
capctl = "0.2.0"
|
||||
cfg-if = "1.0.0"
|
||||
cgroups = { package = "cgroups-rs", git = "https://github.com/kata-containers/cgroups-rs", rev = "v0.3.5" }
|
||||
clap = { version = "4.5.40", features = ["derive"] }
|
||||
const_format = "0.2.30"
|
||||
containerd-shim = { version = "0.10.0", features = ["async"] }
|
||||
containerd-shim-protos = { version = "0.10.0", features = ["async"] }
|
||||
derivative = "2.2.0"
|
||||
futures = "0.3.30"
|
||||
go-flag = "0.1.0"
|
||||
hyper = "0.14.20"
|
||||
hyperlocal = "0.8.0"
|
||||
ipnetwork = "0.17.0"
|
||||
lazy_static = "1.4"
|
||||
libc = "0.2"
|
||||
libc = "0.2.94"
|
||||
log = "0.4.14"
|
||||
netlink-packet-core = "0.7.0"
|
||||
netlink-packet-route = "0.19.0"
|
||||
netlink-sys = { version = "0.7.0", features = ["tokio_socket"] }
|
||||
netns-rs = "0.1.0"
|
||||
# Note: nix needs to stay sync'd with libs versions
|
||||
nix = "0.26.4"
|
||||
oci-spec = { version = "0.8.1", features = ["runtime"] }
|
||||
opentelemetry = { version = "0.17.0", features = ["rt-tokio"] }
|
||||
procfs = "0.12.0"
|
||||
prometheus = { version = "0.14.0", features = ["process"] }
|
||||
protobuf = "3.7.2"
|
||||
rand = "0.8.4"
|
||||
regex = "1.10.5"
|
||||
rstest = "0.18.0"
|
||||
rtnetlink = "0.14.0"
|
||||
scan_fmt = "0.2.6"
|
||||
scopeguard = "1.0.0"
|
||||
serde = { version = "1.0.145", features = ["derive"] }
|
||||
serde_json = "1.0.91"
|
||||
serial_test = "0.10.0"
|
||||
sha2 = "0.10.9"
|
||||
slog = "2.5.2"
|
||||
slog-scope = "4.4.0"
|
||||
slog-stdlog = "4.0.0"
|
||||
slog-term = "2.9.0"
|
||||
strum = { version = "0.24.0", features = ["derive"] }
|
||||
strum_macros = "0.26.2"
|
||||
tempfile = "3.19.1"
|
||||
thiserror = "1.0"
|
||||
thiserror = "1.0.26"
|
||||
tokio = "1.46.1"
|
||||
tokio-vsock = "0.3.4"
|
||||
toml = "0.5.8"
|
||||
tracing = "0.1.41"
|
||||
tracing-opentelemetry = "0.18.0"
|
||||
tracing-subscriber = "0.3.20"
|
||||
ttrpc = "0.8.4"
|
||||
url = "2.5.4"
|
||||
which = "4.3.0"
|
||||
|
||||
8
Makefile
@@ -49,8 +49,11 @@ docs-url-alive-check:
|
||||
build-and-publish-kata-debug:
|
||||
bash tools/packaging/kata-debug/kata-debug-build-and-upload-payload.sh ${KATA_DEBUG_REGISTRY} ${KATA_DEBUG_TAG}
|
||||
|
||||
docs-serve:
|
||||
docker run --rm -p 8000:8000 -v ./docs:/docs:ro -v ${PWD}/zensical.toml:/zensical.toml:ro zensical/zensical serve --config-file /zensical.toml -a 0.0.0.0:8000
|
||||
docs-build:
|
||||
docker build -t kata-docs:latest -f ./docs/Dockerfile ./docs
|
||||
|
||||
docs-serve: docs-build
|
||||
docker run --rm -p 8000:8000 -v ${PWD}:/docs:ro kata-docs:latest serve --config-file /docs/mkdocs.yaml -a 0.0.0.0:8000
|
||||
|
||||
.PHONY: \
|
||||
all \
|
||||
@@ -59,4 +62,5 @@ docs-serve:
|
||||
default \
|
||||
static-checks \
|
||||
docs-url-alive-check \
|
||||
docs-build \
|
||||
docs-serve
|
||||
|
||||
@@ -74,7 +74,7 @@ See the [official documentation](docs) including:
|
||||
- [Developer guide](docs/Developer-Guide.md)
|
||||
- [Design documents](docs/design)
|
||||
- [Architecture overview](docs/design/architecture)
|
||||
- [Architecture 3.0 overview](docs/design/architecture_3.0/)
|
||||
- [Architecture 4.0 overview](docs/design/architecture_4.0/)
|
||||
|
||||
## Configuration
|
||||
|
||||
|
||||
18
docs/.nav.yml
Normal file
@@ -0,0 +1,18 @@
|
||||
# https://lukasgeiter.github.io/mkdocs-awesome-nav/
|
||||
nav:
|
||||
- Home: index.md
|
||||
- Getting Started:
|
||||
- prerequisites.md
|
||||
- installation.md
|
||||
- Configuration:
|
||||
- helm-configuration.md
|
||||
- runtime-configuration.md
|
||||
- Platform Support:
|
||||
- hypervisors.md
|
||||
- Guides:
|
||||
- Use Cases:
|
||||
- NVIDIA GPU Passthrough: use-cases/NVIDIA-GPU-passthrough-and-Kata-QEMU.md
|
||||
- NVIDIA vGPU: use-cases/NVIDIA-GPU-passthrough-and-Kata.md
|
||||
- Intel Discrete GPU: use-cases/Intel-Discrete-GPU-passthrough-and-Kata.md
|
||||
- Misc:
|
||||
- Architecture: design/architecture/
|
||||
@@ -522,10 +522,18 @@ $ sudo kata-runtime check
|
||||
If your system is *not* able to run Kata Containers, the previous command will error out and explain why.
|
||||
|
||||
# Run Kata Containers with Containerd
|
||||
|
||||
Refer to the [How to use Kata Containers and Containerd](how-to/containerd-kata.md) how-to guide.
|
||||
|
||||
# Run Kata Containers with Kubernetes
|
||||
Refer to the [Run Kata Containers with Kubernetes](how-to/run-kata-with-k8s.md) how-to guide.
|
||||
|
||||
- Containerd
|
||||
|
||||
Refer to the [How to use Kata Containers and Containerd with Kubernetes](how-to/how-to-use-k8s-with-containerd-and-kata.md) how-to guide.
|
||||
|
||||
- CRI-O
|
||||
|
||||
Refer to the [How to use Kata Containers and CRI-O with Kubernetes](how-to/how-to-use-k8s-with-crio-and-kata.md) how-to guide.
|
||||
|
||||
# Troubleshoot Kata Containers
|
||||
|
||||
|
||||
11
docs/Dockerfile
Normal file
@@ -0,0 +1,11 @@
|
||||
# Copyright 2026 Kata Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
FROM python:3.12-slim
|
||||
|
||||
WORKDIR /
|
||||
COPY ./requirements.txt requirements.txt
|
||||
RUN pip install --no-cache-dir -r requirements.txt
|
||||
|
||||
ENTRYPOINT ["python3", "-m", "mkdocs"]
|
||||
@@ -32,23 +32,26 @@ their own validation and patch backporting from there.
|
||||
|
||||
## Release Process
|
||||
|
||||
### Bump the `VERSION` and `Chart.yaml` file
|
||||
|
||||
When the `kata-containers/kata-containers` repository is ready for a new
|
||||
release, first create a PR to set the release in the [`VERSION`](./../VERSION)
|
||||
file and update the `version` and `appVersion` in the
|
||||
[`Chart.yaml`](./../tools/packaging/kata-deploy/helm-chart/kata-deploy/Chart.yaml)
|
||||
file and have it merged.
|
||||
|
||||
### Lock the `main` branch
|
||||
### Lock the `main` branch and announce release process
|
||||
|
||||
In order to prevent any PRs getting merged during the release process, and
|
||||
slowing the release process down, by impacting the payload caches, we have
|
||||
recently trialed setting the `main` branch to read only whilst the release
|
||||
action runs.
|
||||
recently trialed setting the `main` branch to read-only.
|
||||
Once the `kata-containers/kata-containers` repository is ready for a new
|
||||
release, lock the main branch until the release action has completed.
|
||||
Notify the #kata-dev Slack channel about the ongoing release process.
|
||||
Ideally, CI usage by others should be reduced to a minimum during the
|
||||
ongoing release process.
|
||||
|
||||
> [!NOTE]
|
||||
> Admin permission is needed to complete this task.
|
||||
> Admin permission is needed to lock/unlock the `main` branch.
|
||||
|
||||
### Bump the `VERSION` and `Chart.yaml` file
|
||||
|
||||
Create a PR to set the release in the [`VERSION`](./../VERSION) file and to
|
||||
update the `version` and `appVersion` fields in the
|
||||
[`Chart.yaml`](./../tools/packaging/kata-deploy/helm-chart/kata-deploy/Chart.yaml)
|
||||
file. Temporarily unlock the main branch to merge the PR.
|
||||
|
||||
### Wait for the `VERSION` bump PR payload publish to complete
|
||||
|
||||
@@ -60,7 +63,7 @@ and are cached, so that the release process can just download these artifacts
|
||||
rather than needing to build them all, which takes time and can reveal errors in
|
||||
infra.
|
||||
|
||||
### Check GitHub Actions
|
||||
### Trigger the `Release Kata Containers` GitHub Action
|
||||
|
||||
We make use of [GitHub actions](https://github.com/features/actions) in the
|
||||
[release](https://github.com/kata-containers/kata-containers/actions/workflows/release.yaml)
|
||||
|
||||
|
Before Width: | Height: | Size: 710 B After Width: | Height: | Size: 710 B |
@@ -32,4 +32,4 @@ runtime. Refer to the following guides on how to set up Kata
|
||||
Containers with Kubernetes:
|
||||
|
||||
- [How to use Kata Containers and containerd](../../how-to/containerd-kata.md)
|
||||
- [Run Kata Containers with Kubernetes](../../how-to/run-kata-with-k8s.md)
|
||||
- [Run Kata Containers with Kubernetes](../../how-to/how-to-use-k8s-with-crio-and-kata.md)
|
||||
|
||||
@@ -1,168 +0,0 @@
|
||||
# Kata 3.0 Architecture
|
||||
## Overview
|
||||
In cloud-native scenarios, there is an increased demand for container startup speed, resource consumption, stability, and security, areas where the present Kata Containers runtime is challenged relative to other runtimes. To achieve this, we propose a solid, field-tested and secure Rust version of the kata-runtime.
|
||||
|
||||
Also, we provide the following designs:
|
||||
|
||||
- Turn key solution with builtin `Dragonball` Sandbox
|
||||
- Async I/O to reduce resource consumption
|
||||
- Extensible framework for multiple services, runtimes and hypervisors
|
||||
- Lifecycle management for sandbox and container associated resources
|
||||
|
||||
### Rationale for choosing Rust
|
||||
|
||||
We chose Rust because it is designed as a system language with a focus on efficiency.
|
||||
In contrast to Go, Rust makes a variety of design trade-offs in order to obtain
|
||||
good execution performance, with innovative techniques that, in contrast to C or
|
||||
C++, provide reasonable protection against common memory errors (buffer
|
||||
overflow, invalid pointers, range errors), error checking (ensuring errors are
|
||||
dealt with), thread safety, ownership of resources, and more.
|
||||
|
||||
These benefits were verified in our project when the Kata Containers guest agent
|
||||
was rewritten in Rust. We notably saw a significant reduction in memory usage
|
||||
with the Rust-based implementation.
|
||||
|
||||
|
||||
## Design
|
||||
### Architecture
|
||||

|
||||
### Built-in VMM
|
||||
#### Current Kata 2.x architecture
|
||||

|
||||
As shown in the figure, runtime and VMM are separate processes. The runtime process forks the VMM process and interacts through the inter-process RPC. Typically, process interaction consumes more resources than peers within the process, and it will result in relatively low efficiency. At the same time, the cost of resource operation and maintenance should be considered. For example, when performing resource recovery under abnormal conditions, the exception of any process must be detected by others and activate the appropriate resource recovery process. If there are additional processes, the recovery becomes even more difficult.
|
||||
#### How To Support Built-in VMM
|
||||
We provide `Dragonball` Sandbox to enable built-in VMM by integrating VMM's function into the Rust library. We could perform VMM-related functionalities by using the library. Because runtime and VMM are in the same process, there is a benefit in terms of message processing speed and API synchronization. It can also guarantee the consistency of the runtime and the VMM life cycle, reducing resource recovery and exception handling maintenance, as shown in the figure:
|
||||

|
||||
### Async Support
|
||||
#### Why Need Async
|
||||
**Async is already in stable Rust and allows us to write async code**
|
||||
|
||||
- Async provides significantly reduced CPU and memory overhead, especially for workloads with a large amount of IO-bound tasks
|
||||
- Async is zero-cost in Rust, which means that you only pay for what you use. Specifically, you can use async without heap allocations and dynamic dispatch, which greatly improves efficiency
|
||||
- For more (see [Why Async?](https://rust-lang.github.io/async-book/01_getting_started/02_why_async.html) and [The State of Asynchronous Rust](https://rust-lang.github.io/async-book/01_getting_started/03_state_of_async_rust.html)).
|
||||
|
||||
**There may be several problems if implementing kata-runtime with Sync Rust**
|
||||
|
||||
- Too many threads with a new TTRPC connection
|
||||
- TTRPC threads: reaper thread(1) + listener thread(1) + client handler(2)
|
||||
- Add 3 I/O threads with a new container
|
||||
- In Sync mode, implementing a timeout mechanism is challenging. For example, in TTRPC API interaction, the timeout mechanism is difficult to align with Golang
|
||||
#### How To Support Async
|
||||
The kata-runtime is controlled by TOKIO_RUNTIME_WORKER_THREADS to run the OS thread, which is 2 threads by default. For TTRPC and container-related threads run in the `tokio` thread in a unified manner, and related dependencies need to be switched to Async, such as Timer, File, Netlink, etc. With the help of Async, we can easily support no-block I/O and timer. Currently, we only utilize Async for kata-runtime. The built-in VMM keeps the OS thread because it can ensure that the threads are controllable.
|
||||
|
||||
**For N `tokio` worker threads and M containers**
|
||||
|
||||
- Sync runtime(both OS thread and `tokio` task are OS thread but without `tokio` worker thread) OS thread number: 4 + 12*M
|
||||
- Async runtime(only OS thread is OS thread) OS thread number: 2 + N
|
||||
```shell
|
||||
├─ main(OS thread)
|
||||
├─ async-logger(OS thread)
|
||||
└─ tokio worker(N * OS thread)
|
||||
├─ agent log forwarder(1 * tokio task)
|
||||
├─ health check thread(1 * tokio task)
|
||||
├─ TTRPC reaper thread(M * tokio task)
|
||||
├─ TTRPC listener thread(M * tokio task)
|
||||
├─ TTRPC client handler thread(7 * M * tokio task)
|
||||
├─ container stdin io thread(M * tokio task)
|
||||
├─ container stdout io thread(M * tokio task)
|
||||
└─ container stderr io thread(M * tokio task)
|
||||
```
|
||||
### Extensible Framework
|
||||
The Kata 3.x runtime is designed with the extension of service, runtime, and hypervisor, combined with configuration to meet the needs of different scenarios. At present, the service provides a register mechanism to support multiple services. Services could interact with runtime through messages. In addition, the runtime handler handles messages from services. To meet the needs of a binary that supports multiple runtimes and hypervisors, the startup must obtain the runtime handler type and hypervisor type through configuration.
|
||||
|
||||

|
||||
### Resource Manager
|
||||
In our case, there will be a variety of resources, and every resource has several subtypes. Especially for `Virt-Container`, every subtype of resource has different operations. And there may be dependencies, such as the share-fs rootfs and the share-fs volume will use share-fs resources to share files to the VM. Currently, network and share-fs are regarded as sandbox resources, while rootfs, volume, and cgroup are regarded as container resources. Also, we abstract a common interface for each resource and use subclass operations to evaluate the differences between different subtypes.
|
||||

|
||||
|
||||
## Roadmap
|
||||
|
||||
- Stage 1 (June): provide basic features (current delivered)
|
||||
- Stage 2 (September): support common features
|
||||
- Stage 3: support full features
|
||||
|
||||
| **Class** | **Sub-Class** | **Development Stage** | **Status** |
|
||||
| -------------------------- | ------------------- | --------------------- |------------|
|
||||
| Service | task service | Stage 1 | ✅ |
|
||||
| | extend service | Stage 3 | 🚫 |
|
||||
| | image service | Stage 3 | 🚫 |
|
||||
| Runtime handler | `Virt-Container` | Stage 1 | ✅ |
|
||||
| Endpoint | VETH Endpoint | Stage 1 | ✅ |
|
||||
| | Physical Endpoint | Stage 2 | ✅ |
|
||||
| | Tap Endpoint | Stage 2 | ✅ |
|
||||
| | `Tuntap` Endpoint | Stage 2 | ✅ |
|
||||
| | `IPVlan` Endpoint | Stage 2 | ✅ |
|
||||
| | `MacVlan` Endpoint | Stage 2 | ✅ |
|
||||
| | MACVTAP Endpoint | Stage 3 | 🚫 |
|
||||
| | `VhostUserEndpoint` | Stage 3 | 🚫 |
|
||||
| Network Interworking Model | Tc filter | Stage 1 | ✅ |
|
||||
| | `MacVtap` | Stage 3 | 🚧 |
|
||||
| Storage | Virtio-fs | Stage 1 | ✅ |
|
||||
| | `nydus` | Stage 2 | 🚧 |
|
||||
| | `device mapper` | Stage 2 | 🚫 |
|
||||
| `Cgroup V2` | | Stage 2 | 🚧 |
|
||||
| Hypervisor | `Dragonball` | Stage 1 | 🚧 |
|
||||
| | QEMU | Stage 2 | 🚫 |
|
||||
| | Cloud Hypervisor | Stage 3 | 🚫 |
|
||||
| | Firecracker | Stage 3 | 🚫 |
|
||||
|
||||
## FAQ
|
||||
|
||||
- Are the "service", "message dispatcher" and "runtime handler" all part of the single Kata 3.x runtime binary?
|
||||
|
||||
Yes. They are components in Kata 3.x runtime. And they will be packed into one binary.
|
||||
1. Service is an interface, which is responsible for handling multiple services like task service, image service and etc.
|
||||
2. Message dispatcher, it is used to match multiple requests from the service module.
|
||||
3. Runtime handler is used to deal with the operation for sandbox and container.
|
||||
- What is the name of the Kata 3.x runtime binary?
|
||||
|
||||
Apparently we can't use `containerd-shim-v2-kata` because it's already used. We are facing the hardest issue of "naming" again. Any suggestions are welcomed.
|
||||
Internally we use `containerd-shim-v2-rund`.
|
||||
|
||||
- Is the Kata 3.x design compatible with the containerd shimv2 architecture?
|
||||
|
||||
Yes. It is designed to follow the functionality of go version kata. And it implements the `containerd shim v2` interface/protocol.
|
||||
|
||||
- How will users migrate to the Kata 3.x architecture?
|
||||
|
||||
The migration plan will be provided before the Kata 3.x is merging into the main branch.
|
||||
|
||||
- Is `Dragonball` limited to its own built-in VMM? Can the `Dragonball` system be configured to work using an external `Dragonball` VMM/hypervisor?
|
||||
|
||||
The `Dragonball` could work as an external hypervisor. However, stability and performance is challenging in this case. Built in VMM could optimise the container overhead, and it's easy to maintain stability.
|
||||
|
||||
`runD` is the `containerd-shim-v2` counterpart of `runC` and can run a pod/containers. `Dragonball` is a `microvm`/VMM that is designed to run container workloads. Instead of `microvm`/VMM, we sometimes refer to it as secure sandbox.
|
||||
|
||||
- QEMU, Cloud Hypervisor and Firecracker support are planned, but how that would work. Are they working in separate process?
|
||||
|
||||
Yes. They are unable to work as built in VMM.
|
||||
|
||||
- What is `upcall`?
|
||||
|
||||
The `upcall` is used to hotplug CPU/memory/MMIO devices, and it solves two issues.
|
||||
1. avoid dependency on PCI/ACPI
|
||||
2. avoid dependency on `udevd` within guest and get deterministic results for hotplug operations. So `upcall` is an alternative to ACPI based CPU/memory/device hotplug. And we may cooperate with the community to add support for ACPI based CPU/memory/device hotplug if needed.
|
||||
|
||||
`Dbs-upcall` is a `vsock-based` direct communication tool between VMM and guests. The server side of the `upcall` is a driver in guest kernel (kernel patches are needed for this feature) and it'll start to serve the requests once the kernel has started. And the client side is in VMM , it'll be a thread that communicates with VSOCK through `uds`. We have accomplished device hotplug / hot-unplug directly through `upcall` in order to avoid virtualization of ACPI to minimize virtual machine's overhead. And there could be many other usage through this direct communication channel. It's already open source.
|
||||
https://github.com/openanolis/dragonball-sandbox/tree/main/crates/dbs-upcall
|
||||
|
||||
- The URL below says the kernel patches work with 4.19, but do they also work with 5.15+ ?
|
||||
|
||||
Forward compatibility should be achievable, we have ported it to 5.10 based kernel.
|
||||
|
||||
- Are these patches platform-specific or would they work for any architecture that supports VSOCK?
|
||||
|
||||
It's almost platform independent, but some message related to CPU hotplug are platform dependent.
|
||||
|
||||
- Could the kernel driver be replaced with a userland daemon in the guest using loopback VSOCK?
|
||||
|
||||
We need to create device nodes for hot-added CPU/memory/devices, so it's not easy for userspace daemon to do these tasks.
|
||||
|
||||
- The fact that `upcall` allows communication between the VMM and the guest suggests that this architecture might be incompatible with https://github.com/confidential-containers where the VMM should have no knowledge of what happens inside the VM.
|
||||
|
||||
1. `TDX` doesn't support CPU/memory hotplug yet.
|
||||
2. For ACPI based device hotplug, it depends on ACPI `DSDT` table, and the guest kernel will execute `ASL` code to handle during handling those hotplug event. And it should be easier to audit VSOCK based communication than ACPI `ASL` methods.
|
||||
|
||||
- What is the security boundary for the monolithic / "Built-in VMM" case?
|
||||
|
||||
It has the security boundary of virtualization. More details will be provided in next stage.
|
||||
|
Before Width: | Height: | Size: 95 KiB |
|
Before Width: | Height: | Size: 66 KiB |
|
Before Width: | Height: | Size: 136 KiB |
|
Before Width: | Height: | Size: 72 KiB |
|
Before Width: | Height: | Size: 139 KiB |
433
docs/design/architecture_4.0/README.md
Normal file
@@ -0,0 +1,433 @@
|
||||
# Kata Containers 4.0 Architecture (Rust Runtime)
|
||||
|
||||
## Overview
|
||||
|
||||
Kata Containers 4.0 represents a significant architectural evolution, moving beyond the limitations of legacy multi-process container runtimes. Driven by a modern Rust-based stack, this release transitions to an asynchronous, unified architecture that drastically reduces resource consumption and latency.
|
||||
|
||||
By consolidating the entire runtime into a single, high-performance binary, Kata 4.0 eliminates the overhead of cross-process communication and streamlines the container lifecycle. The result is a secure, production-tested runtime capable of handling high-density workloads with efficiency. With built-in support for diverse container abstractions and optimized hypervisor integration, Kata 4.0 delivers the agility and robustness required by modern, cloud-native infrastructure.
|
||||
|
||||
---
|
||||
|
||||
## 1. Architecture Overview
|
||||
|
||||
The Kata Containers Rust Runtime is designed to minimize resource overhead and startup latency. It achieves this by shifting from traditional process-based management to a more integrated, Rust-native control flow.
|
||||
|
||||
```mermaid
|
||||
graph TD
|
||||
containerd["containerd"] --> shimv2["containerd-shim-kata-v2 (shimv2)"]
|
||||
|
||||
subgraph BuiltIn["Built-in VMM (Integrated Mode)"]
|
||||
direction TD
|
||||
subgraph shimv2_bi["shimv2 process (Single Process)"]
|
||||
runtime_bi["shimv2 runtime"]
|
||||
subgraph dragonball["Dragonball VMM (library)"]
|
||||
helpers_bi["virtiofs / nydus\n(BuiltIn)"]
|
||||
end
|
||||
runtime_bi -->|"direct function calls"| dragonball
|
||||
end
|
||||
subgraph guestvm_bi["Guest VM"]
|
||||
agent_bi["kata-agent"]
|
||||
end
|
||||
shimv2_bi -->|"hybrid-vsock"| guestvm_bi
|
||||
end
|
||||
|
||||
subgraph OptionalVMM["Optional VMM (External Mode)"]
|
||||
direction TD
|
||||
shimv2_ext["shimv2 process"]
|
||||
imagesrvd_ext["virtiofsd / nydusd\n(Independent Process)"]
|
||||
ext_vmm["External VMM process\n(QEMU / Cloud-Hypervisor / Firecracker)"]
|
||||
subgraph guestvm_ext["Guest VM"]
|
||||
agent_ext["kata-agent"]
|
||||
end
|
||||
shimv2_ext -->|"fork + IPC/RPC"| ext_vmm
|
||||
shimv2_ext -->|"manages"| imagesrvd_ext
|
||||
ext_vmm -->|"vsock / hybrid-vsock"| guestvm_ext
|
||||
end
|
||||
|
||||
shimv2 --> BuiltIn
|
||||
shimv2 --> OptionalVMM
|
||||
|
||||
classDef process fill:#d0e8ff,stroke:#336,stroke-width:1px
|
||||
classDef vm fill:#d4edda,stroke:#155724,stroke-width:1px
|
||||
classDef agent fill:#fff3cd,stroke:#856404,stroke-width:1px
|
||||
class shimv2,runtime_bi,shimv2_ext,helpers_bi,imagesrvd_ext,ext_vmm process
|
||||
class guestvm_bi,guestvm_ext vm
|
||||
class agent_bi,agent_ext agent
|
||||
```
|
||||
|
||||
The runtime employs a **flexible VMM strategy**, supporting both `built-in` and `optional` VMMs. This allows users to choose between a tightly integrated VMM (e.g., Dragonball) for peak performance, or external options (e.g., QEMU, Cloud-Hypervisor, Firecracker) for enhanced compatibility and modularity.
|
||||
|
||||
### A. Built-in VMM (Integrated Mode)
|
||||
|
||||
The built-in VMM mode is the default and recommended configuration for users, as it offers superior performance and resource efficiency.
|
||||
|
||||
In this mode, the VMM (`Dragonball`) is **deeply integrated** into the `shimv2`'s lifecycle. This eliminates the overhead of IPC, enabling lower-latency message processing and tight API synchronization. Moreover, it ensures the runtime and VMM share a unified lifecycle, simplifying exception handling and resource cleanup.
|
||||
|
||||
* **Integrated Management**: The `shimv2` directly controls the VMM and its critical helper services (`virtiofsd` or `nydusd`).
|
||||
* **Performance**: By eliminating external process overhead and complex inter-process communication (IPC), this mode achieves faster container startup and higher resource density.
|
||||
* **Core Technology**: Primarily utilizes **Dragonball**, the native Rust-based VMM optimized and dedicated for cloud-native scenarios.
|
||||
|
||||
> **Note**: The built-in VMM mode is the default and recommended configuration for users, as it offers superior performance and resource efficiency.
|
||||
|
||||
### B. Optional VMM (External Mode)
|
||||
|
||||
The optional VMM mode is available for users with specific requirements that necessitate external hypervisor support.
|
||||
|
||||
In this mode, the runtime and the VMM operate as separate, decoupled processes. The runtime forks the VMM process and interacts with it via inter-process RPC. And the `containerd-shim-kata-v2`(short of `shimv2`) manages the VMM as an **external process**.
|
||||
|
||||
* **Decoupled Lifecycle**: The `shimv2` communicates with the VMM (e.g., QEMU, Cloud-Hypervisor, or Firecracker) via vsock/hybrid vsock.
|
||||
* **Flexibility**: Ideal for environments that require specific hypervisor hardware emulation or legacy compatibility.
|
||||
|
||||
> **Note**: This approach (Optional VMM) introduces overhead due to context switching and cross-process communication. Furthermore, managing resources across process boundaries—especially during abnormal conditions—introduces significant complexity in error detection and recovery.
|
||||
|
||||
---
|
||||
|
||||
## Core Architectural Principles
|
||||
|
||||
* **Safety via Rust**: Leveraging Rust's ownership and type systems to eliminate memory-related vulnerabilities (buffer overflows, dangling pointers) by design.
|
||||
* **Performance via Async**: Utilizing Tokio to handle high-concurrency I/O, reducing the OS thread footprint by an order of magnitude.
|
||||
* **Built-in VMM**: A modular, library-based approach to virtualization, enabling tighter integration with the runtime.
|
||||
* **Pluggable Framework**: A clean abstraction layer allowing seamless swapping of hypervisors, network interfaces, and storage backends.
|
||||
|
||||
---
|
||||
|
||||
## Design Deep Dive
|
||||
|
||||
### Built-in VMM Integration (Dragonball)
|
||||
|
||||
The legacy Kata 2.x architecture relied on inter-process communication (IPC) between the runtime and the VMM. This introduced context-switching latency and complex error-recovery requirements across process boundaries. In contrast, the built-in VMM approach embeds the VMM directly within the runtime's process space. This eliminates IPC overhead, allowing for direct function calls and shared memory access, resulting in significantly reduced startup times and improved performance.
|
||||
|
||||
```mermaid
|
||||
graph LR
|
||||
subgraph HostProcess["Host Process:containerd-shim-kata-v2 (shimv2)"]
|
||||
shimv2["shimv2 runtime"]
|
||||
end
|
||||
|
||||
imagesrvd["virtiofsd / nydusd\n(Independent Process)"]
|
||||
|
||||
subgraph ExtVMMProc["External VMM Process (e.g., QEMU)"]
|
||||
vmm["VMM\n(QEMU / Cloud-Hypervisor\n/ Firecracker)"]
|
||||
end
|
||||
|
||||
subgraph GuestVM["Guest VM"]
|
||||
agent["kata-agent"]
|
||||
end
|
||||
|
||||
shimv2 -->|"fork + IPC / RPC"| vmm
|
||||
shimv2 -->|"manages"| imagesrvd
|
||||
vmm -->|"vsock / hybrid-vsock"| GuestVM
|
||||
|
||||
classDef proc fill:#d0e8ff,stroke:#336,stroke-width:1px
|
||||
classDef vm fill:#d4edda,stroke:#155724,stroke-width:1px
|
||||
classDef ag fill:#fff3cd,stroke:#856404,stroke-width:1px
|
||||
class shimv2,imagesrvd,vmm proc
|
||||
class agent ag
|
||||
```
|
||||
|
||||
```mermaid
|
||||
graph LR
|
||||
subgraph SingleProcess["Single Process: containerd-shim-kata-v2 (shimv2)"]
|
||||
shimv2["shimv2 runtime"]
|
||||
subgraph dragonball["Dragonball VMM (library)"]
|
||||
helpers["virtiofs / nydus\n(BuiltIn)"]
|
||||
end
|
||||
shimv2 -->|"direct function calls"| dragonball
|
||||
end
|
||||
|
||||
subgraph GuestVM["Guest VM"]
|
||||
agent["kata-agent"]
|
||||
end
|
||||
|
||||
dragonball -->|"hybrid-vsock"| GuestVM
|
||||
|
||||
classDef proc fill:#d0e8ff,stroke:#336,stroke-width:1px
|
||||
classDef vm fill:#d4edda,stroke:#155724,stroke-width:1px
|
||||
classDef ag fill:#fff3cd,stroke:#856404,stroke-width:1px
|
||||
class shimv2,helpers proc
|
||||
class agent ag
|
||||
```
|
||||
|
||||
By integrating Dragonball directly as a library, we eliminate the need for heavy IPC.
|
||||
|
||||
* **API Synchronization**: Direct function calls replace RPCs, reducing latency.
|
||||
* **Unified Lifecycle**: The runtime and VMM share a single process lifecycle, significantly simplifying resource cleanup and fault isolation.
|
||||
|
||||
### Layered Architecture
|
||||
|
||||
The Kata 4.0 runtime utilizes a highly modular, layered architecture designed to decouple high-level service requests from low-level infrastructure execution. This design facilitates extensibility, allowing the system to support diverse container types and dragonball within a single, unified Rust binary and also support other hypervisors as optional VMMs.
|
||||
|
||||
```mermaid
|
||||
graph TD
|
||||
subgraph L1["Layer 1 — Service & Orchestration Layer"]
|
||||
TaskSvc["Task Service"]
|
||||
ImageSvc["Image Service"]
|
||||
OtherSvc["Other Services"]
|
||||
Dispatcher["Message Dispatcher"]
|
||||
TaskSvc --> Dispatcher
|
||||
ImageSvc --> Dispatcher
|
||||
OtherSvc --> Dispatcher
|
||||
end
|
||||
|
||||
subgraph L2["Layer 2 — Management & Handler Layer"]
|
||||
subgraph RuntimeHandler["Runtime Handler"]
|
||||
SandboxMgr["Sandbox Manager"]
|
||||
ContainerMgr["Container Manager"]
|
||||
end
|
||||
subgraph ContainerAbstractions["Container Abstractions"]
|
||||
LinuxContainer["LinuxContainer"]
|
||||
VirtContainer["VirtContainer"]
|
||||
WasmContainer["WasmContainer"]
|
||||
end
|
||||
end
|
||||
|
||||
subgraph L3["Layer 3 — Infrastructure Abstraction Layer"]
|
||||
subgraph HypervisorIface["Hypervisor Interface"]
|
||||
Qemu["Qemu"]
|
||||
CloudHV["Cloud Hypervisor"]
|
||||
Firecracker["Firecracker"]
|
||||
Dragonball["Dragonball"]
|
||||
end
|
||||
subgraph ResourceMgr["Resource Manager"]
|
||||
Sharedfs["Sharedfs"]
|
||||
Network["Network"]
|
||||
Rootfs["Rootfs"]
|
||||
Volume["Volume"]
|
||||
Cgroup["Cgroup"]
|
||||
end
|
||||
end
|
||||
|
||||
subgraph L4["Layer 4 — Built-in Dragonball VMM Layer"]
|
||||
BuiltinDB["Builtin Dragonball"]
|
||||
end
|
||||
|
||||
Dispatcher --> RuntimeHandler
|
||||
RuntimeHandler --> ContainerAbstractions
|
||||
ContainerAbstractions --> HypervisorIface
|
||||
ContainerAbstractions --> ResourceMgr
|
||||
Dragonball --> BuiltinDB
|
||||
|
||||
classDef svc fill:#cce5ff,stroke:#004085,stroke-width:1px
|
||||
classDef handler fill:#d4edda,stroke:#155724,stroke-width:1px
|
||||
classDef infra fill:#fff3cd,stroke:#856404,stroke-width:1px
|
||||
classDef builtin fill:#f8d7da,stroke:#721c24,stroke-width:1px
|
||||
class TaskSvc,ImageSvc,OtherSvc,Dispatcher svc
|
||||
class SandboxMgr,ContainerMgr,LinuxContainer,VirtContainer,WasmContainer handler
|
||||
class Qemu,CloudHV,Firecracker,Dragonball,Sharedfs,Network,Rootfs,Volume,Cgroup infra
|
||||
class BuiltinDB builtin
|
||||
```
|
||||
|
||||
#### Service & Orchestration Layer
|
||||
|
||||
* **Service Layer**: The entry point for the runtime, providing specialized interfaces for external callers (e.g., `containerd`). It includes:
|
||||
* **Task Service**: Manages the lifecycle of containerized processes.
|
||||
* **Image Service**: Handles container image operations.
|
||||
* **Other Services**: An extensible framework allowing for custom modules.
|
||||
|
||||
* **Message Dispatcher**: Acts as a centralized traffic controller. It parses requests from the Service layer and routes them to the appropriate **Runtime Handler**, ensuring efficient message multiplexing.
|
||||
|
||||
#### Management & Handler Layer
|
||||
|
||||
* **Runtime Handler**: The core processing engine. It abstracts the underlying workload, enabling the runtime to handle various container types through:
|
||||
* **Sandbox Manager**: Orchestrates the lifecycle of the entire Pod (Sandbox).
|
||||
* **Container Manager**: Manages individual containers within a Sandbox.
|
||||
|
||||
* **Container Abstractions**: The framework is agnostic to the container implementation, with explicit support paths for:
|
||||
* **LinuxContainer** (Standard/OCI)
|
||||
* **VirtContainer** (Virtualization-based)
|
||||
* **WasmContainer** (WebAssembly-based)
|
||||
|
||||
#### Infrastructure Abstraction Layer
|
||||
|
||||
This layer provides standardized interfaces for hardware and resource management, regardless of the underlying backend.
|
||||
|
||||
* **Hypervisor Interface**: A pluggable architecture supporting multiple virtualization backends, including **Qemu**, **Cloud Hypervisor**, **Firecracker**, and **Dragonball**.
|
||||
|
||||
* **Resource Manager**: A unified interface for managing critical infrastructure components:
|
||||
* **Sharedfs, Network, Rootfs, Volume, and cgroup management**.
|
||||
|
||||
#### Built-in Dragonball VMM Layer
|
||||
|
||||
Representing the core of the high-performance runtime, the `Builtin Dragonball` block demonstrates deep integration between the runtime and the hypervisor.
|
||||
|
||||
#### Key Architectural Advantages
|
||||
|
||||
* **Uniformity**: By consolidating these layers into a single binary, the runtime ensures a consistent state across all sub-modules, preventing the "split-brain" scenarios common in multi-process runtimes.
|
||||
* **Modularity**: The clear separation between the **Message Dispatcher** and the **Runtime Handler** allows developers to introduce new container types (e.g., WASM) or hypervisors without modifying existing core logic.
|
||||
* **Efficiency**: The direct integration of `Dragonball` as a library allows for "Zero-Copy" resource management and direct API access, which drastically improves performance compared to traditional RPC-based hypervisor interaction.
|
||||
|
||||
### Extensible Framework
|
||||
|
||||
The Kata Rust runtime features a modular design that supports diverse services, runtimes, and hypervisors. We utilize a registration mechanism to decouple service logic from the core runtime. At startup, the runtime resolves the required runtime handler and hypervisor types based on configuration.
|
||||
|
||||
```mermaid
|
||||
graph LR
|
||||
API["API"]
|
||||
|
||||
subgraph Services["Configurable Services"]
|
||||
TaskSvc["Task Service"]
|
||||
ImageSvc["Image Service"]
|
||||
OtherSvc["Other Service"]
|
||||
end
|
||||
|
||||
Msg(["Message Dispatcher"])
|
||||
|
||||
subgraph Handlers["Configurable Runtime Handlers"]
|
||||
WasmC["WasmContainer"]
|
||||
VirtC["VirtContainer"]
|
||||
LinuxC["LinuxContainer"]
|
||||
end
|
||||
|
||||
subgraph HVs["Configurable Hypervisors"]
|
||||
DB["Dragonball"]
|
||||
QEMU["QEMU"]
|
||||
CH["Cloud Hypervisor"]
|
||||
FC["Firecracker"]
|
||||
end
|
||||
|
||||
API --> Services
|
||||
Services --> Msg
|
||||
Msg --> Handlers
|
||||
Handlers --> HVs
|
||||
|
||||
classDef api fill:#d0e8ff,stroke:#336,stroke-width:1px
|
||||
classDef svc fill:#e2d9f3,stroke:#6610f2,stroke-width:1px
|
||||
classDef msg fill:#fff3cd,stroke:#856404,stroke-width:1px
|
||||
classDef handler fill:#d4edda,stroke:#155724,stroke-width:1px
|
||||
classDef hv fill:#f8d7da,stroke:#721c24,stroke-width:1px
|
||||
class API api
|
||||
class TaskSvc,ImageSvc,OtherSvc svc
|
||||
class Msg msg
|
||||
class WasmC,VirtC,LinuxC handler
|
||||
class DB,QEMU,CH,FC hv
|
||||
```
|
||||
|
||||
### Modular Resource Manager
|
||||
|
||||
Managing diverse resources—from Virtio-fs volumes to Cgroup V2—is handled by an abstracted resource manager. Each resource type implements a common trait, enabling uniform lifecycle hooks and deterministic dependency resolution.
|
||||
|
||||
```mermaid
|
||||
graph LR
|
||||
RM["Resource Manager"]
|
||||
|
||||
subgraph SandboxRes["Sandbox Resources"]
|
||||
Network["Network Entity"]
|
||||
SharedFs["Shared FS"]
|
||||
end
|
||||
|
||||
subgraph ContainerRes["Container Resources"]
|
||||
Rootfs["Rootfs"]
|
||||
Cgroup["Cgroup"]
|
||||
Volume["Volume"]
|
||||
end
|
||||
|
||||
RM --> Network
|
||||
RM --> SharedFs
|
||||
RM --> Rootfs
|
||||
RM --> Cgroup
|
||||
RM --> Volume
|
||||
|
||||
Network --> Endpoint["endpoint\n(veth / physical)"]
|
||||
Network --> NetModel["model\n(tcfilter / route)"]
|
||||
SharedFs --> InlineVirtioFs["inline virtiofs"]
|
||||
SharedFs --> StandaloneVirtioFs["standalone virtiofs"]
|
||||
|
||||
Rootfs --> RootfsTypes["block / virtiofs / nydus"]
|
||||
Cgroup --> CgroupVers["v1 / v2"]
|
||||
Volume --> VolumeTypes["sharefs / shm / local\nephemeral / direct / block"]
|
||||
|
||||
classDef rm fill:#e2d9f3,stroke:#6610f2,stroke-width:2px
|
||||
classDef sandbox fill:#d0e8ff,stroke:#336,stroke-width:1px
|
||||
classDef container fill:#d4edda,stroke:#155724,stroke-width:1px
|
||||
classDef impl fill:#fff3cd,stroke:#856404,stroke-width:1px
|
||||
class RM rm
|
||||
class Network,SharedFs sandbox
|
||||
class Rootfs,Cgroup,Volume container
|
||||
class Endpoint,NetModel,InlineVirtioFs,StandaloneVirtioFs,RootfsTypes,CgroupVers,VolumeTypes impl
|
||||
```
|
||||
|
||||
### Asynchronous I/O Model
|
||||
|
||||
Synchronous runtimes are often limited by "thread bloat," where each container or connection spawns multiple OS threads.
|
||||
|
||||
#### Why Async Rust?
|
||||
|
||||
**The Rust async ecosystem is stable and highly efficient, providing several key benefits:**
|
||||
|
||||
- Reduced Overhead: Significantly lower CPU and memory consumption, particularly for I/O-bound workloads.
|
||||
- Zero-Cost Abstractions: Rust's async model allows developers to "pay only for what they use," avoiding heap allocations and dynamic dispatch where possible.
|
||||
- For further reading, see [Why Async?](https://rust-lang.github.io/async-book/01_getting_started/02_why_async.html) and [The State of Asynchronous Rust](https://rust-lang.github.io/async-book/01_getting_started/03_state_of_async_rust.html).
|
||||
|
||||
**Limitations of Synchronous Rust in kata-runtime:**
|
||||
|
||||
- Thread Proliferation: Every TTRPC connection creates multiple threads (Reaper, Listener, Handler), and each container adds 3 additional I/O threads, leading to high thread count and memory pressure.
|
||||
- Timeout Complexity: Implementing reliable, cross-platform timeout mechanisms in synchronous code is difficult, especially when aligning with Golang-based components.
|
||||
|
||||
#### Implementation
|
||||
|
||||
The kata-runtime utilizes Tokio to manage asynchronous tasks. By offloading TTRPC and container-related I/O to a unified Tokio executor and switching dependencies (Timer, File, Netlink) to their asynchronous counterparts, we achieve non-blocking I/O. The built-in VMM remains on a dedicated OS thread to ensure control and real-time performance.
|
||||
|
||||
**Comparison of OS Thread usage (for N tokio worker threads and M containers)**
|
||||
|
||||
- Sync Runtime: OS thread count scales as 4 + 12*M.
|
||||
- Async Runtime: OS thread count scales as 2 + N.
|
||||
|
||||
```shell
|
||||
├─ main(OS thread)
|
||||
├─ async-logger(OS thread)
|
||||
└─ tokio worker(N * OS thread)
|
||||
├─ agent log forwarder(1 * tokio task)
|
||||
├─ health check thread(1 * tokio task)
|
||||
├─ TTRPC reaper thread(M * tokio task)
|
||||
├─ TTRPC listener thread(M * tokio task)
|
||||
├─ TTRPC client handler thread(7 * M * tokio task)
|
||||
├─ container stdin io thread(M * tokio task)
|
||||
├─ container stdout io thread(M * tokio task)
|
||||
└─ container stderr io thread(M * tokio task)
|
||||
```
|
||||
|
||||
The Async Advantage:
|
||||
We move away from thread-per-task to a Tokio-driven task model.
|
||||
|
||||
* **Scalability**: The OS thread count is reduced from 4 + 12*M (Sync) to 2 + N (Async), where N is the worker thread count.
|
||||
* **Efficiency**: Non-blocking I/O allows a single thread to handle multiplexed container operations, significantly lowering memory consumption for high-density pod deployments.
|
||||
|
||||
---
|
||||
|
||||
## 2. Getting Started
|
||||
To configure your preferred VMM strategy, locate the `[hypervisor]` block in your runtime configuration file:
|
||||
|
||||
- Install Kata Containers with the Rust Runtime and Dragonball as the built-in VMM by following the [containerd-kata](../../how-to/containerd-kata.md).
|
||||
- Run a kata with builtin VMM Dragonball
|
||||
|
||||
```shell
|
||||
$ sudo ctr run --runtime io.containerd.kata.v2 -d docker.io/library/ubuntu:latest hello
|
||||
```
|
||||
|
||||
As the VMM and its image service have been builtin, you should only see a single containerd-shim-kata-v2 process.
|
||||
|
||||
---
|
||||
|
||||
## FAQ
|
||||
|
||||
* **Q1**: Is the architecture compatible with containerd?
|
||||
|
||||
Yes. It implements the containerd-shim-v2 interface, ensuring drop-in compatibility with standard cloud-native tooling.
|
||||
|
||||
* **Q2**: What is the security boundary for the "Built-in VMM" model?
|
||||
|
||||
The security boundary remains established by the hypervisor (hardware virtualization). The shift to a monolithic process model does not compromise isolation; rather, it improves the integrity of the control plane by reducing the attack surface typically associated with complex IPC mechanisms.
|
||||
|
||||
* **Q3**: What is the migration path?
|
||||
|
||||
Migration is managed via configuration policies. The containerd shim configuration will allow users to toggle between the legacy runtime and the runtime-rs (internally `RunD`) binary, facilitating canary deployments and gradual migration.
|
||||
|
||||
* **Q4**: Why upcall instead of ACPI?
|
||||
|
||||
Standard ACPI-based hotplugging requires heavy guest-side kernel emulation and udevd interaction. Dbs-upcall utilizes a vsock-based direct channel to trigger hotplug events, providing:
|
||||
|
||||
Deterministic execution: Bypassing complex guest-side ACPI state machines.
|
||||
Lower overhead: Minimizing guest kernel footprint.
|
||||
|
||||
* **Q5**: How upcall works?
|
||||
|
||||
The `Dbs-upcall` architecture consists of a server-side driver in the guest kernel and a client-side thread within the VMM. Once the guest kernel initializes, it establishes a communication channel via vsock (using uds). This allows the VMM to directly request device hot-add/hot-remove operations. We have already open-sourced this implementation: [dbs-upcall](https://github.com/openanolis/dragonball-sandbox/tree/main/crates/dbs-upcall).
|
||||
@@ -1,137 +1,324 @@
|
||||
# Virtualization in Kata Containers
|
||||
|
||||
Kata Containers, a second layer of isolation is created on top of those provided by traditional namespace-containers. The
|
||||
hardware virtualization interface is the basis of this additional layer. Kata will launch a lightweight virtual machine,
|
||||
and use the guest’s Linux kernel to create a container workload, or workloads in the case of multi-container pods. In Kubernetes
|
||||
and in the Kata implementation, the sandbox is carried out at the pod level. In Kata, this sandbox is created using a virtual machine.
|
||||
## Overview
|
||||
|
||||
This document describes how Kata Containers maps container technologies to virtual machines technologies, and how this is realized in
|
||||
the multiple hypervisors and virtual machine monitors that Kata supports.
|
||||
Kata Containers creates a second layer of isolation on top of traditional namespace-based containers using hardware virtualization. Kata launches a lightweight virtual machine (VM) and uses the guest Linux kernel to create container workloads. In Kubernetes, the sandbox is implemented at the pod level using VMs.
|
||||
|
||||
## Mapping container concepts to virtual machine technologies
|
||||
This document describes:
|
||||
|
||||
A typical deployment of Kata Containers will be in Kubernetes by way of a Container Runtime Interface (CRI) implementation. On every node,
|
||||
Kubelet will interact with a CRI implementer (such as containerd or CRI-O), which will in turn interface with Kata Containers (an OCI based runtime).
|
||||
- How Kata Containers maps container technologies to virtualization technologies
|
||||
- The multiple hypervisors and Virtual Machine Monitors (VMMs) supported by Kata
|
||||
- Guidance for selecting the appropriate hypervisor for your use case
|
||||
|
||||
The CRI API, as defined at the [Kubernetes CRI-API repo](https://github.com/kubernetes/cri-api/), implies a few constructs being supported by the
|
||||
CRI implementation, and ultimately in Kata Containers. In order to support the full [API](https://github.com/kubernetes/cri-api/blob/a6f63f369f6d50e9d0886f2eda63d585fbd1ab6a/pkg/apis/runtime/v1alpha2/api.proto#L34-L110) with the CRI-implementer, Kata must provide the following constructs:
|
||||
### Architecture
|
||||
|
||||

|
||||
A typical Kata Containers deployment integrates with Kubernetes through a Container Runtime Interface (CRI) implementation:
|
||||
|
||||
These constructs can then be further mapped to what devices are necessary for interfacing with the virtual machine:
|
||||
```
|
||||
Kubelet → CRI (containerd/CRI-O) → Kata Containers (OCI runtime) → VM → Containers
|
||||
```
|
||||
|
||||

|
||||
The CRI API requires Kata to support the following constructs:
|
||||
|
||||
Ultimately, these concepts map to specific para-virtualized devices or virtualization technologies.
|
||||
| CRI Construct | VM Equivalent | Virtualization Technology |
|
||||
|---------------|---------------|---------------------------|
|
||||
| Pod Sandbox | VM | Hypervisor/VMM |
|
||||
| Container | Process in VM | Namespace/Cgroup in guest |
|
||||
| Network | Network Interface | virtio-net, vhost-net, physical, etc. |
|
||||
| Storage | Block/File Device | virtio-block, virtio-scsi, virtio-fs |
|
||||
| Compute | vCPU/Memory | KVM, ACPI hotplug |
|
||||
|
||||

|
||||
### Mapping Container Concepts to Virtualization Technologies
|
||||
|
||||
Each hypervisor or VMM varies on how or if it handles each of these.
|
||||
Kata Containers implements the Kubernetes Container Runtime Interface (CRI) to provide pod and container lifecycle management. The CRI API defines abstractions that Kata must translate into virtualization primitives.
|
||||
|
||||
## Kata Containers Hypervisor and VMM support
|
||||
The mapping from CRI constructs to virtualization technologies follows a three-layer model:
|
||||
|
||||
Kata Containers [supports multiple hypervisors](../hypervisors.md).
|
||||
```
|
||||
CRI API Constructs → VM Abstractions → Para-virtualized Devices
|
||||
```
|
||||
|
||||
Details of each solution and a summary are provided below.
|
||||
**Layer 1: CRI API Constructs**
|
||||
|
||||
The CRI API ([kubernetes/cri-api](https://github.com/kubernetes/cri-api)) defines the following abstractions that Kata must implement:
|
||||
|
||||
| Construct | Description |
|
||||
|-----------|-------------|
|
||||
| Pod Sandbox | Isolated execution environment for containers |
|
||||
| Container | Process workload within a sandbox |
|
||||
| Network | Pod and container networking interfaces |
|
||||
| Storage | Volume mounts and image storage |
|
||||
| RuntimeConfig | Resource constraints (CPU, memory, cgroups) |
|
||||
|
||||

|
||||
|
||||
**Layer 2: VM Abstractions**
|
||||
|
||||
Kata translates CRI constructs into VM-level concepts:
|
||||
|
||||
| CRI Construct | VM Equivalent |
|
||||
|---------------|---------------|
|
||||
| Pod Sandbox | Virtual Machine |
|
||||
| Container | Process/namespace in guest OS |
|
||||
| Network | Virtual NIC (vNIC) |
|
||||
| Storage | Virtual block device or filesystem |
|
||||
| RuntimeConfig | VM resources (vCPU, memory) |
|
||||
|
||||

|
||||
|
||||
**Layer 3: Para-virtualized Devices**
|
||||
|
||||
VM abstractions are realized through para-virtualized drivers for optimal performance:
|
||||
|
||||
| VM Concept | Device Technology |
|
||||
|------------|-------------------|
|
||||
| vNIC | virtio-net, vhost-net, macvtap |
|
||||
| Block Storage | virtio-block, virtio-scsi |
|
||||
| Shared Filesystem | virtio-fs |
|
||||
| Agent Communication | virtio-vsock |
|
||||
| Device Passthrough | VFIO with IOMMU |
|
||||
|
||||

|
||||
|
||||
> **Note:** Each hypervisor implements these mappings differently based on its device model and feature set. See the [Hypervisor Details](#hypervisor-details) section for specific implementations.
|
||||
|
||||
### Device Mapping
|
||||
|
||||
Container constructs map to para-virtualized devices:
|
||||
|
||||
| Construct | Device Type | Technology |
|
||||
|-----------|-------------|------------|
|
||||
| Network | Network Interface | virtio-net, vhost-net |
|
||||
| Storage (ephemeral) | Block Device | virtio-block, virtio-scsi |
|
||||
| Storage (shared) | Filesystem | virtio-fs |
|
||||
| Communication | Socket | virtio-vsock |
|
||||
| GPU/Passthrough | PCI Device | VFIO, IOMMU |
|
||||
|
||||
## Supported Hypervisors and VMMs
|
||||
|
||||
Kata Containers supports multiple hypervisors, each with different characteristics:
|
||||
|
||||
| Hypervisor | Language | Architectures | Type |
|
||||
|------------|----------|---------------|------|
|
||||
| [QEMU] | C | x86_64, aarch64, ppc64le, s390x, risc-v | Type 2 (KVM) |
|
||||
| [Cloud Hypervisor] | Rust | x86_64, aarch64 | Type 2 (KVM) |
|
||||
| [Firecracker] | Rust | x86_64, aarch64 | Type 2 (KVM) |
|
||||
| `Dragonball` | Rust | x86_64, aarch64 | Type 2 (KVM) Built-in |
|
||||
|
||||
> **Note:** All supported hypervisors use KVM (Kernel-based Virtual Machine) as the underlying hardware virtualization interface on Linux.
|
||||
|
||||
## Hypervisor Details
|
||||
|
||||
### QEMU/KVM
|
||||
|
||||
Kata Containers with QEMU has complete compatibility with Kubernetes.
|
||||
QEMU is the most mature and feature-complete hypervisor option for Kata Containers.
|
||||
|
||||
Depending on the host architecture, Kata Containers supports various machine types,
|
||||
for example `q35` on x86 systems, `virt` on ARM systems and `pseries` on IBM Power systems. The default Kata Containers
|
||||
machine type is `q35`. The machine type and its [`Machine accelerators`](#machine-accelerators) can
|
||||
be changed by editing the runtime [`configuration`](architecture/README.md#configuration) file.
|
||||
**Machine Types:**
|
||||
|
||||
Devices and features used:
|
||||
- virtio VSOCK or virtio serial
|
||||
- virtio block or virtio SCSI
|
||||
- [virtio net](https://www.redhat.com/en/virtio-networking-series)
|
||||
- virtio fs or virtio 9p (recommend: virtio fs)
|
||||
- VFIO
|
||||
- hotplug
|
||||
- machine accelerators
|
||||
- `q35` (x86_64, default)
|
||||
- `s390x` (s390x)
|
||||
- `virt` (aarch64)
|
||||
- `pseries` (ppc64le)
|
||||
- `risc-v` (riscv64, experimental)
|
||||
|
||||
Machine accelerators and hotplug are used in Kata Containers to manage resource constraints, improve boot time and reduce memory footprint. These are documented below.
|
||||
**Devices and Features:**
|
||||
|
||||
#### Machine accelerators
|
||||
- virtio-vsock (agent communication)
|
||||
- virtio-block or virtio-scsi (storage)
|
||||
- virtio-net/vhost-net/vhost-user-net (networking)
|
||||
- virtio-fs (shared filesystem, virtio-fs recommended)
|
||||
- VFIO (device passthrough)
|
||||
- CPU and memory hotplug
|
||||
- NVDIMM (x86_64, for rootfs as persistent memory)
|
||||
|
||||
Machine accelerators are architecture specific and can be used to improve the performance
|
||||
and enable specific features of the machine types. The following machine accelerators
|
||||
are used in Kata Containers:
|
||||
**Use Cases:**
|
||||
|
||||
- NVDIMM: This machine accelerator is x86 specific and only supported by `q35` machine types.
|
||||
`nvdimm` is used to provide the root filesystem as a persistent memory device to the Virtual Machine.
|
||||
- Production workloads requiring full CRI API compatibility
|
||||
- Scenarios requiring device passthrough (VFIO)
|
||||
- Multi-architecture deployments
|
||||
|
||||
#### Hotplug devices
|
||||
**Configuration:** See [`configuration-qemu.toml`](../../src/runtime/config/configuration-qemu.toml.in)
|
||||
|
||||
The Kata Containers VM starts with a minimum amount of resources, allowing for faster boot time and a reduction in memory footprint. As the container launch progresses,
|
||||
devices are hotplugged to the VM. For example, when a CPU constraint is specified which includes additional CPUs, they can be hot added. Kata Containers has support
|
||||
for hot-adding the following devices:
|
||||
- Virtio block
|
||||
- Virtio SCSI
|
||||
- VFIO
|
||||
- CPU
|
||||
### Dragonball (Built-in VMM)
|
||||
|
||||
### Firecracker/KVM
|
||||
Dragonball is a Rust-based VMM integrated directly into the Kata Containers Rust runtime as a library.
|
||||
|
||||
Firecracker, built on many rust crates that are within [rust-VMM](https://github.com/rust-vmm), has a very limited device model, providing a lighter
|
||||
footprint and attack surface, focusing on function-as-a-service like use cases. As a result, Kata Containers with Firecracker VMM supports a subset of the CRI API.
|
||||
Firecracker does not support file-system sharing, and as a result only block-based storage drivers are supported. Firecracker does not support device
|
||||
hotplug nor does it support VFIO. As a result, Kata Containers with Firecracker VMM does not support updating container resources after boot, nor
|
||||
does it support device passthrough.
|
||||
**Advantages:**
|
||||
|
||||
Devices used:
|
||||
- virtio VSOCK
|
||||
- virtio block
|
||||
- virtio net
|
||||
- **Zero IPC overhead**: VMM runs in the same process as the runtime
|
||||
- **Unified lifecycle**: Simplified resource management and error handling
|
||||
- **Optimized for containers**: Purpose-built for container workloads
|
||||
- **Upcall support**: Direct VMM-to-Guest communication for efficient hotplug operations
|
||||
- **Low resource overhead**: Minimal CPU and memory footprint
|
||||
|
||||
**Architecture:**
|
||||
```
|
||||
┌─────────────────────────────────────────┐
|
||||
│ Kata Containers Runtime (Rust) │
|
||||
│ ┌─────────────────────────────────┐ │
|
||||
│ │ Dragonball VMM Library │ │
|
||||
│ └─────────────────────────────────┘ │
|
||||
└─────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
**Features:**
|
||||
|
||||
- Built-in virtio-fs/nydus support
|
||||
- Async I/O via Tokio
|
||||
- Single binary deployment
|
||||
- Optimized startup latency
|
||||
|
||||
**Use Cases:**
|
||||
|
||||
- Default choice for most container workloads
|
||||
- High-density container deployments and low resource overhead scenarios
|
||||
- Scenarios requiring optimal startup performance
|
||||
|
||||
**Configuration:** See [`configuration-dragonball.toml`](../../src/runtime-rs/config/configuration-dragonball.toml.in)
|
||||
|
||||
### Cloud Hypervisor/KVM
|
||||
|
||||
[Cloud Hypervisor](https://github.com/cloud-hypervisor/cloud-hypervisor), based
|
||||
on [rust-vmm](https://github.com/rust-vmm), is designed to have a
|
||||
lighter footprint and smaller attack surface for running modern cloud
|
||||
workloads. Kata Containers with Cloud
|
||||
Hypervisor provides mostly complete compatibility with Kubernetes
|
||||
comparable to the QEMU configuration. As of the 1.12 and 2.0.0 release
|
||||
of Kata Containers, the Cloud Hypervisor configuration supports both CPU
|
||||
and memory resize, device hotplug (disk and VFIO), file-system sharing through virtio-fs,
|
||||
block-based volumes, booting from VM images backed by pmem device, and
|
||||
fine-grained seccomp filters for each VMM threads (e.g. all virtio
|
||||
device worker threads).
|
||||
Cloud Hypervisor is a Rust-based VMM designed for modern cloud workloads with a focus on performance and security.
|
||||
|
||||
Devices and features used:
|
||||
- virtio VSOCK or virtio serial
|
||||
- virtio block
|
||||
- virtio net
|
||||
- virtio fs
|
||||
- virtio pmem
|
||||
- VFIO
|
||||
- hotplug
|
||||
- seccomp filters
|
||||
- [HTTP OpenAPI](https://github.com/cloud-hypervisor/cloud-hypervisor/blob/main/vmm/src/api/openapi/cloud-hypervisor.yaml)
|
||||
**Features:**
|
||||
|
||||
### StratoVirt/KVM
|
||||
- CPU and memory resize
|
||||
- Device hotplug (disk, VFIO)
|
||||
- virtio-fs (shared filesystem)
|
||||
- virtio-pmem (persistent memory)
|
||||
- virtio-block (block storage)
|
||||
- virtio-vsock (agent communication)
|
||||
- Fine-grained seccomp filters per VMM thread
|
||||
- HTTP OpenAPI for management
|
||||
|
||||
[StratoVirt](https://gitee.com/openeuler/stratovirt) is an enterprise-level open source VMM oriented to cloud data centers, implements a unified architecture to support Standard-VMs, containers and serverless (Micro-VM). StratoVirt has some competitive advantages, such as lightweight and low resource overhead, fast boot, hardware acceleration, and language-level security with Rust.
|
||||
**Use Cases:**
|
||||
|
||||
Currently, StratoVirt in Kata supports Micro-VM machine type, mainly focus on FaaS cases, supporting device hotplug (virtio block), file-system sharing through virtio fs and so on. Kata Containers with StratoVirt now use virtio-mmio bus as driver, and doesn't support CPU/memory resize nor VFIO, thus doesn't support updating container resources after booted.
|
||||
- High-performance cloud-native workloads
|
||||
- Applications requiring memory/CPU resizing
|
||||
- Security-sensitive deployments (seccomp isolation)
|
||||
|
||||
Devices and features used currently:
|
||||
- Micro-VM machine type for FaaS(mmio, no ACPI)
|
||||
- Virtual Socket(vhost VSOCK、virtio console)
|
||||
- Virtual Storage(virtio block, mmio)
|
||||
- Virtual Networking(virtio net, mmio)
|
||||
- Shared Filesystem(virtio fs)
|
||||
- Device Hotplugging(virtio block hotplug)
|
||||
- Entropy Source(virtio RNG)
|
||||
- QMP API
|
||||
**Configuration:** See [`configuration-cloud-hypervisor.toml`](../../src/runtime-rs/config/configuration-cloud-hypervisor.toml.in)
|
||||
|
||||
### Summary
|
||||
### Firecracker/KVM
|
||||
|
||||
| Solution | release introduced | brief summary |
|
||||
|-|-|-|
|
||||
| Cloud Hypervisor | 1.10 | upstream Cloud Hypervisor with rich feature support, e.g. hotplug, VFIO and FS sharing|
|
||||
| Firecracker | 1.5 | upstream Firecracker, rust-VMM based, no VFIO, no FS sharing, no memory/CPU hotplug |
|
||||
| QEMU | 1.0 | upstream QEMU, with support for hotplug and filesystem sharing |
|
||||
| StratoVirt | 3.3 | upstream StratoVirt with FS sharing and virtio block hotplug, no VFIO, no CPU/memory resize |
|
||||
Firecracker is a minimalist VMM built on rust-vmm crates, optimized for serverless and FaaS workloads.
|
||||
|
||||
**Devices:**
|
||||
|
||||
- virtio-vsock (agent communication)
|
||||
- virtio-block (block storage)
|
||||
- virtio-net (networking)
|
||||
|
||||
**Limitations:**
|
||||
|
||||
- No filesystem sharing (virtio-fs not supported)
|
||||
- No device hotplug
|
||||
- No VFIO/passthrough support
|
||||
- No CPU/memory hotplug
|
||||
- Limited CRI API support
|
||||
|
||||
**Use Cases:**
|
||||
|
||||
- Serverless/FaaS workloads
|
||||
- Single-tenant microVMs
|
||||
- Scenarios prioritizing minimal attack surface
|
||||
|
||||
**Configuration:** See [`configuration-fc.toml`](../../src/runtime/config/configuration-fc.toml.in)
|
||||
|
||||
## Hypervisor Comparison Summary
|
||||
|
||||
| Feature | QEMU | Cloud Hypervisor | Firecracker | Dragonball |
|
||||
|---------|------|------------------|-------------|------------|
|
||||
| Maturity | Excellent | Good | Good | Good |
|
||||
| CRI Compatibility | Full | Full | Partial | Full |
|
||||
| Filesystem Sharing | ✓ | ✓ | ✗ | ✓ |
|
||||
| Device Hotplug | ✓ | ✓ | ✗ | ✓ |
|
||||
| VFIO/Passthrough | ✓ | ✓ | ✗ | ✓ |
|
||||
| CPU/Memory Hotplug | ✓ | ✓ | ✗ | ✓ |
|
||||
| Security Isolation | Good | Excellent (seccomp) | Excellent | Excellent |
|
||||
| Startup Latency | Good | Excellent | Excellent | Best |
|
||||
| Resource Overhead | Medium | Low | Lowest | Lowest |
|
||||
|
||||
## Choosing a Hypervisor
|
||||
|
||||
### Decision Matrix
|
||||
|
||||
| Requirement | Recommended Hypervisor |
|
||||
|-------------|------------------------|
|
||||
| Full CRI API compatibility | QEMU, Cloud Hypervisor, Dragonball |
|
||||
| Device passthrough (VFIO) | QEMU, Cloud Hypervisor, Dragonball |
|
||||
| Minimal resource overhead | Dragonball, Firecracker |
|
||||
| Fastest startup time | Dragonball, Firecracker |
|
||||
| Serverless/FaaS | Dragonball, Firecracker |
|
||||
| Production workloads | Dragonball, QEMU |
|
||||
| Memory/CPU resizing | Dragonball, Cloud Hypervisor, QEMU |
|
||||
| Maximum security isolation | Cloud Hypervisor (seccomp), Firecracker, Dragonball |
|
||||
| Multi-architecture | QEMU |
|
||||
|
||||
### Recommendations
|
||||
|
||||
**For Most Users:** Use the default Dragonball VMM with the Kata Containers Rust runtime. It provides the best balance of performance, security, and container density.
|
||||
|
||||
**For Device Passthrough:** Use QEMU, Cloud Hypervisor, or Dragonball if you require VFIO device assignment.
|
||||
|
||||
**For Serverless:** Use Dragonball or Firecracker for ultra-lightweight, single-tenant microVMs.
|
||||
|
||||
**For Legacy/Ecosystem Compatibility:** Use QEMU for its extensive hardware emulation and multi-architecture support.
|
||||
|
||||
## Hypervisor Configuration
|
||||
|
||||
### Configuration Files
|
||||
|
||||
Each hypervisor has a dedicated configuration file:
|
||||
|
||||
| Hypervisor | Rust Runtime Configuration | Go Runtime Configuration |
|
||||
|------------|----------------|-----------------|
|
||||
| QEMU |`configuration-qemu-runtime-rs.toml` |`configuration-qemu.toml` |
|
||||
| Cloud Hypervisor | `configuration-cloud-hypervisor.toml` | `configuration-clh.toml` |
|
||||
| Firecracker | `configuration-rs-fc.toml` | `configuration-fc.toml` |
|
||||
| Dragonball | `configuration-dragonball.toml` (default) | `No` |
|
||||
|
||||
> **Note:** Configuration files are typically installed in `/opt/kata/share/defaults/kata-containers/` or `/opt/kata/share/defaults/kata-containers/runtime-rs/` or `/usr/share/defaults/kata-containers/`.
|
||||
|
||||
### Switching Hypervisors
|
||||
|
||||
Use the `kata-manager` tool to switch the configured hypervisor:
|
||||
|
||||
```bash
|
||||
# List available hypervisors
|
||||
$ kata-manager -L
|
||||
|
||||
# Switch to a different hypervisor
|
||||
$ sudo kata-manager -S <hypervisor-name>
|
||||
```
|
||||
|
||||
For detailed instructions, see the [`kata-manager` documentation](../../utils/README.md).
|
||||
|
||||
## Hypervisor Versions
|
||||
|
||||
The following versions are used in this release (from [versions.yaml](../../versions.yaml)):
|
||||
|
||||
| Hypervisor | Version | Repository |
|
||||
|------------|---------|------------|
|
||||
| Cloud Hypervisor | v51.1 | https://github.com/cloud-hypervisor/cloud-hypervisor |
|
||||
| Firecracker | v1.12.1 | https://github.com/firecracker-microvm/firecracker |
|
||||
| QEMU | v10.2.1 | https://github.com/qemu/qemu |
|
||||
| Dragonball | builtin | https://github.com/kata-containers/kata-containers/tree/main/src/dragonball |
|
||||
|
||||
> **Note:** Dragonball is integrated into the Kata Containers Rust runtime and does not have a separate version number.
|
||||
> For the latest hypervisor versions, see the [versions.yaml](../../versions.yaml) file in the Kata Containers repository.
|
||||
|
||||
## References
|
||||
|
||||
- [Kata Containers Architecture](./architecture/README.md)
|
||||
- [Configuration Guide](../../src/runtime/README.md#configuration)
|
||||
- [QEMU Documentation](https://www.qemu.org/documentation/)
|
||||
- [Cloud Hypervisor Documentation](https://github.com/cloud-hypervisor/cloud-hypervisor/blob/main/docs/api.md)
|
||||
- [Firecracker Documentation](https://github.com/firecracker-microvm/firecracker/tree/main/docs)
|
||||
- [Dragonball Source](https://github.com/kata-containers/kata-containers/tree/main/src/dragonball)
|
||||
|
||||
[KVM]: https://en.wikipedia.org/wiki/Kernel-based_Virtual_Machine
|
||||
[QEMU]: https://www.qemu.org
|
||||
[Cloud Hypervisor]: https://github.com/cloud-hypervisor/cloud-hypervisor
|
||||
[Firecracker]: https://github.com/firecracker-microvm/firecracker
|
||||
[`Dragonball`]: https://github.com/kata-containers/kata-containers/tree/main/src/dragonball
|
||||
|
||||
264
docs/helm-configuration.md
Normal file
@@ -0,0 +1,264 @@
|
||||
# Helm Configuration
|
||||
|
||||
## Parameters
|
||||
|
||||
The helm chart provides a comprehensive set of configuration options. You may view the parameters and their descriptions by going to the [GitHub source](https://github.com/kata-containers/kata-containers/blob/main/tools/packaging/kata-deploy/helm-chart/kata-deploy/values.yaml) or by using helm:
|
||||
|
||||
```sh
|
||||
# List available kata-deploy chart versions:
|
||||
# helm search repo kata-deploy-charts/kata-deploy --versions
|
||||
#
|
||||
# Then replace X.Y.Z below with the desired chart version:
|
||||
helm show values --version X.Y.Z oci://ghcr.io/kata-containers/kata-deploy-charts/kata-deploy
|
||||
```
|
||||
|
||||
### shims
|
||||
|
||||
Kata ships with a number of pre-built artifacts and runtimes. You may selectively enable or disable specific shims. For example:
|
||||
|
||||
```yaml title="values.yaml"
|
||||
shims:
|
||||
disableAll: true
|
||||
qemu:
|
||||
enabled: true
|
||||
qemu-nvidia-gpu:
|
||||
enabled: true
|
||||
qemu-nvidia-gpu-snp:
|
||||
enabled: false
|
||||
|
||||
```
|
||||
|
||||
Shims can also have configuration options specific to them:
|
||||
|
||||
```yaml
|
||||
qemu-nvidia-gpu:
|
||||
enabled: ~
|
||||
supportedArches:
|
||||
- amd64
|
||||
- arm64
|
||||
allowedHypervisorAnnotations: []
|
||||
containerd:
|
||||
snapshotter: ""
|
||||
runtimeClass:
|
||||
# This label is automatically added by gpu-operator. Override it
|
||||
# if you want to use a different label.
|
||||
# Uncomment once GPU Operator v26.3 is out
|
||||
# nodeSelector:
|
||||
# nvidia.com/cc.ready.state: "false"
|
||||
```
|
||||
|
||||
It's best to reference the default `values.yaml` file above for more details.
|
||||
|
||||
### Custom Runtimes
|
||||
|
||||
Kata allows you to create custom runtime configurations. This is done by overlaying one of the pre-existing runtime configs with user-provided configs. For example, we can use the `qemu-nvidia-gpu` as a base config and overlay our own parameters to it:
|
||||
|
||||
```yaml
|
||||
customRuntimes:
|
||||
enabled: false
|
||||
runtimes:
|
||||
my-gpu-runtime:
|
||||
baseConfig: "qemu-nvidia-gpu" # Required: existing config to use as base
|
||||
dropIn: | # Optional: overrides via config.d mechanism
|
||||
[hypervisor.qemu]
|
||||
default_memory = 1024
|
||||
default_vcpus = 4
|
||||
runtimeClass: |
|
||||
kind: RuntimeClass
|
||||
apiVersion: node.k8s.io/v1
|
||||
metadata:
|
||||
name: kata-my-gpu-runtime
|
||||
labels:
|
||||
app.kubernetes.io/managed-by: kata-deploy
|
||||
handler: kata-my-gpu-runtime
|
||||
overhead:
|
||||
podFixed:
|
||||
memory: "640Mi"
|
||||
cpu: "500m"
|
||||
scheduling:
|
||||
nodeSelector:
|
||||
katacontainers.io/kata-runtime: "true"
|
||||
# Optional: CRI-specific configuration
|
||||
containerd:
|
||||
snapshotter: "nydus" # Configure containerd snapshotter (nydus, erofs, etc.)
|
||||
crio:
|
||||
pullType: "guest-pull" # Configure CRI-O runtime_pull_image = true
|
||||
```
|
||||
|
||||
Again, view the default [`values.yaml`](#parameters) file for more details.
|
||||
|
||||
## Examples
|
||||
|
||||
We provide a few examples that you can pass to helm via the `-f`/`--values` flag.
|
||||
|
||||
### [`try-kata-tee.values.yaml`](https://github.com/kata-containers/kata-containers/blob/main/tools/packaging/kata-deploy/helm-chart/kata-deploy/try-kata-tee.values.yaml)
|
||||
|
||||
This file enables only the TEE (Trusted Execution Environment) shims for confidential computing:
|
||||
|
||||
```sh
|
||||
helm install kata-deploy oci://ghcr.io/kata-containers/kata-deploy-charts/kata-deploy \
|
||||
--version VERSION \
|
||||
-f try-kata-tee.values.yaml
|
||||
```
|
||||
|
||||
Includes:
|
||||
|
||||
- `qemu-snp` - AMD SEV-SNP (amd64)
|
||||
- `qemu-tdx` - Intel TDX (amd64)
|
||||
- `qemu-se` - IBM Secure Execution for Linux (SEL) (s390x)
|
||||
- `qemu-se-runtime-rs` - IBM Secure Execution for Linux (SEL) Rust runtime (s390x)
|
||||
- `qemu-cca` - Arm Confidential Compute Architecture (arm64)
|
||||
- `qemu-coco-dev` - Confidential Containers development (amd64, s390x)
|
||||
- `qemu-coco-dev-runtime-rs` - Confidential Containers development Rust runtime (amd64, s390x)
|
||||
|
||||
### [`try-kata-nvidia-gpu.values.yaml`](https://github.com/kata-containers/kata-containers/blob/main/tools/packaging/kata-deploy/helm-chart/kata-deploy/try-kata-nvidia-gpu.values.yaml)
|
||||
|
||||
This file enables only the NVIDIA GPU-enabled shims:
|
||||
|
||||
```sh
|
||||
helm install kata-deploy oci://ghcr.io/kata-containers/kata-deploy-charts/kata-deploy \
|
||||
--version VERSION \
|
||||
-f try-kata-nvidia-gpu.values.yaml
|
||||
```
|
||||
|
||||
Includes:
|
||||
|
||||
- `qemu-nvidia-gpu` - Standard NVIDIA GPU support (amd64, arm64)
|
||||
- `qemu-nvidia-gpu-snp` - NVIDIA GPU with AMD SEV-SNP (amd64)
|
||||
- `qemu-nvidia-gpu-tdx` - NVIDIA GPU with Intel TDX (amd64)
|
||||
|
||||
### `nodeSelector`
|
||||
|
||||
We can deploy Kata only to specific nodes using `nodeSelector`
|
||||
|
||||
```sh
|
||||
# First, label the nodes where you want kata-containers to be installed
|
||||
$ kubectl label nodes worker-node-1 kata-containers=enabled
|
||||
$ kubectl label nodes worker-node-2 kata-containers=enabled
|
||||
|
||||
# Then install the chart with `nodeSelector`
|
||||
$ helm install kata-deploy \
|
||||
--set nodeSelector.kata-containers="enabled" \
|
||||
"${CHART}" --version "${VERSION}"
|
||||
```
|
||||
|
||||
You can also use a values file:
|
||||
|
||||
```yaml title="values.yaml"
|
||||
nodeSelector:
|
||||
kata-containers: "enabled"
|
||||
node-type: "worker"
|
||||
```
|
||||
|
||||
```sh
|
||||
$ helm install kata-deploy -f values.yaml "${CHART}" --version "${VERSION}"
|
||||
```
|
||||
|
||||
### Multiple Kata installations on the Same Node
|
||||
|
||||
For debugging, testing and other use-case it is possible to deploy multiple
|
||||
versions of Kata on the very same node. All the needed artifacts are getting the
|
||||
`multiInstallSuffix` appended to distinguish each installation. **BEWARE** that one
|
||||
needs at least **containerd-2.0** since this version has drop-in conf support
|
||||
which is a prerequisite for the `multiInstallSuffix` to work properly.
|
||||
|
||||
```sh
|
||||
$ helm install kata-deploy-cicd \
|
||||
-n kata-deploy-cicd \
|
||||
--set env.multiInstallSuffix=cicd \
|
||||
--set env.debug=true \
|
||||
"${CHART}" --version "${VERSION}"
|
||||
```
|
||||
|
||||
Note: `runtimeClasses` are automatically created by Helm (via
|
||||
`runtimeClasses.enabled=true`, which is the default).
|
||||
|
||||
Now verify the installation by examining the `runtimeClasses`:
|
||||
|
||||
```sh
|
||||
$ kubectl get runtimeClasses
|
||||
NAME HANDLER AGE
|
||||
kata-clh-cicd kata-clh-cicd 77s
|
||||
kata-cloud-hypervisor-cicd kata-cloud-hypervisor-cicd 77s
|
||||
kata-dragonball-cicd kata-dragonball-cicd 77s
|
||||
kata-fc-cicd kata-fc-cicd 77s
|
||||
kata-qemu-cicd kata-qemu-cicd 77s
|
||||
kata-qemu-coco-dev-cicd kata-qemu-coco-dev-cicd 77s
|
||||
kata-qemu-nvidia-gpu-cicd kata-qemu-nvidia-gpu-cicd 77s
|
||||
kata-qemu-nvidia-gpu-snp-cicd kata-qemu-nvidia-gpu-snp-cicd 77s
|
||||
kata-qemu-nvidia-gpu-tdx-cicd kata-qemu-nvidia-gpu-tdx-cicd 76s
|
||||
kata-qemu-runtime-rs-cicd kata-qemu-runtime-rs-cicd 77s
|
||||
kata-qemu-se-runtime-rs-cicd kata-qemu-se-runtime-rs-cicd 77s
|
||||
kata-qemu-snp-cicd kata-qemu-snp-cicd 77s
|
||||
kata-qemu-tdx-cicd kata-qemu-tdx-cicd 77s
|
||||
kata-stratovirt-cicd kata-stratovirt-cicd 77s
|
||||
```
|
||||
|
||||
## RuntimeClass Node Selectors for TEE Shims
|
||||
|
||||
**Manual configuration:** Any `nodeSelector` you set under `shims.<shim>.runtimeClass.nodeSelector`
|
||||
is **always applied** to that shim's RuntimeClass, whether or not NFD is present. Use this when
|
||||
you want to pin TEE workloads to specific nodes (e.g. without NFD, or with custom labels).
|
||||
|
||||
**Auto-inject when NFD is present:** If you do *not* set a `runtimeClass.nodeSelector` for a
|
||||
TEE shim, the chart can **automatically inject** NFD-based labels when NFD is detected in the
|
||||
cluster (deployed by this chart with `node-feature-discovery.enabled=true` or found externally):
|
||||
|
||||
- AMD SEV-SNP shims: `amd.feature.node.kubernetes.io/snp: "true"`
|
||||
- Intel TDX shims: `intel.feature.node.kubernetes.io/tdx: "true"`
|
||||
- IBM Secure Execution for Linux (SEL) shims (s390x): `feature.node.kubernetes.io/cpu-security.se.enabled: "true"`
|
||||
|
||||
The chart uses Helm's `lookup` function to detect NFD (by looking for the
|
||||
`node-feature-discovery-worker` DaemonSet). Auto-inject only runs when NFD is detected and
|
||||
no manual `runtimeClass.nodeSelector` is set for that shim.
|
||||
|
||||
**Note**: NFD detection requires cluster access. During `helm template` (dry-run without a
|
||||
cluster), external NFD is not seen, so auto-injected labels are not added. Manual
|
||||
`runtimeClass.nodeSelector` values are still applied in all cases.
|
||||
|
||||
## Customizing Configuration with Drop-in Files
|
||||
|
||||
When kata-deploy installs Kata Containers, the base configuration files should not
|
||||
be modified directly. Instead, use drop-in configuration files to customize
|
||||
settings. This approach ensures your customizations survive kata-deploy upgrades.
|
||||
|
||||
### How Drop-in Files Work
|
||||
|
||||
The Kata runtime reads the base configuration file and then applies any `.toml`
|
||||
files found in the `config.d/` directory alongside it. Files are processed in
|
||||
alphabetical order, with later files overriding earlier settings.
|
||||
|
||||
### Creating Custom Drop-in Files
|
||||
|
||||
To add custom settings, create a `.toml` file in the appropriate `config.d/`
|
||||
directory. Use a numeric prefix to control the order of application.
|
||||
|
||||
**Reserved prefixes** (used by kata-deploy):
|
||||
|
||||
- `10-*`: Core kata-deploy settings
|
||||
- `20-*`: Debug settings
|
||||
- `30-*`: Kernel parameters
|
||||
|
||||
**Recommended prefixes for custom settings**: `50-89`
|
||||
|
||||
### Drop-In Config Examples
|
||||
|
||||
#### Adding Custom Kernel Parameters
|
||||
|
||||
```bash
|
||||
# SSH into the node or use kubectl exec
|
||||
sudo mkdir -p /opt/kata/share/defaults/kata-containers/runtimes/qemu/config.d/
|
||||
sudo cat > /opt/kata/share/defaults/kata-containers/runtimes/qemu/config.d/50-custom.toml << 'EOF'
|
||||
[hypervisor.qemu]
|
||||
kernel_params = "my_param=value"
|
||||
EOF
|
||||
```
|
||||
|
||||
#### Changing Default Memory Size
|
||||
|
||||
```bash
|
||||
sudo cat > /opt/kata/share/defaults/kata-containers/runtimes/qemu/config.d/50-memory.toml << 'EOF'
|
||||
[hypervisor.qemu]
|
||||
default_memory = 4096
|
||||
EOF
|
||||
```
|
||||
@@ -3,9 +3,9 @@
|
||||
## Kubernetes Integration
|
||||
|
||||
- [Run Kata containers with `crictl`](run-kata-with-crictl.md)
|
||||
- [Run Kata Containers with Kubernetes](run-kata-with-k8s.md)
|
||||
- [How to use Kata Containers and Containerd](containerd-kata.md)
|
||||
- [How to use Kata Containers and containerd with Kubernetes](how-to-use-k8s-with-containerd-and-kata.md)
|
||||
- [How to use Kata Containers and CRI-O with Kubernetes](how-to-use-k8s-with-crio-and-kata.md)
|
||||
- [Kata Containers and service mesh for Kubernetes](service-mesh.md)
|
||||
- [How to import Kata Containers logs into Fluentd](how-to-import-kata-logs-with-fluentd.md)
|
||||
|
||||
@@ -50,3 +50,4 @@
|
||||
- [How to pull images in the guest](how-to-pull-images-in-guest-with-kata.md)
|
||||
- [How to use mem-agent to decrease the memory usage of Kata container](how-to-use-memory-agent.md)
|
||||
- [How to use seccomp with runtime-rs](how-to-use-seccomp-with-runtime-rs.md)
|
||||
- [How to use passthroughfd-IO with runtime-rs and Dragonball](how-to-use-passthroughfd-io-within-runtime-rs.md)
|
||||
|
||||
@@ -5,7 +5,7 @@ and [Kata Containers](https://katacontainers.io). The containerd provides not on
|
||||
command line tool, but also the [CRI](https://kubernetes.io/blog/2016/12/container-runtime-interface-cri-in-kubernetes/)
|
||||
interface for [Kubernetes](https://kubernetes.io) and other CRI clients.
|
||||
|
||||
This document is primarily written for Kata Containers v1.5.0-rc2 or above, and containerd v1.2.0 or above.
|
||||
This document is primarily written for Kata Containers v3.28 or above, and containerd v1.7.0 or above.
|
||||
Previous versions are addressed here, but we suggest users upgrade to the newer versions for better support.
|
||||
|
||||
## Concepts
|
||||
@@ -14,7 +14,7 @@ Previous versions are addressed here, but we suggest users upgrade to the newer
|
||||
|
||||
[`RuntimeClass`](https://kubernetes.io/docs/concepts/containers/runtime-class/) is a Kubernetes feature first
|
||||
introduced in Kubernetes 1.12 as alpha. It is the feature for selecting the container runtime configuration to
|
||||
use to run a pod’s containers. This feature is supported in `containerd` since [v1.2.0](https://github.com/containerd/containerd/releases/tag/v1.2.0).
|
||||
use to run a pod's containers. This feature is supported in `containerd` since [v1.2.0](https://github.com/containerd/containerd/releases/tag/v1.2.0).
|
||||
|
||||
Before the `RuntimeClass` was introduced, Kubernetes was not aware of the difference of runtimes on the node. `kubelet`
|
||||
creates Pod sandboxes and containers through CRI implementations, and treats all the Pods equally. However, there
|
||||
@@ -123,18 +123,56 @@ The following sections outline how to add Kata Containers to the configurations.
|
||||
|
||||
#### Kata Containers as a `RuntimeClass`
|
||||
|
||||
For
|
||||
- Kata Containers v1.5.0 or above (including `1.5.0-rc`)
|
||||
- Containerd v1.2.0 or above
|
||||
- Kubernetes v1.12.0 or above
|
||||
For Kubernetes users, we suggest using `RuntimeClass` to select Kata Containers as the runtime for untrusted workloads. The configuration is as follows:
|
||||
|
||||
- Kata Containers v3.28.0 or above
|
||||
- Containerd v1.7.0 or above
|
||||
- Kubernetes v1.33 or above
|
||||
|
||||
The `RuntimeClass` is suggested.
|
||||
|
||||
The following example registers custom runtimes into containerd:
|
||||
|
||||
You can check the detailed information about the configuration of containerd in the [Containerd config documentation](https://github.com/containerd/containerd/blob/main/docs/cri/config.md).
|
||||
|
||||
+ In containerd 2.x
|
||||
|
||||
```toml
|
||||
version = 3
|
||||
[plugins."io.containerd.cri.v1.runtime".containerd]
|
||||
[plugins."io.containerd.cri.v1.runtime".containerd.runtimes]
|
||||
[plugins."io.containerd.cri.v1.runtime".containerd.runtimes.runc]
|
||||
runtime_type = "io.containerd.runc.v2"
|
||||
[plugins."io.containerd.cri.v1.runtime".containerd.runtimes.kata]
|
||||
runtime_type = "io.containerd.kata.v2"
|
||||
[plugins."io.containerd.cri.v1.runtime".containerd.runtimes.kata.options]
|
||||
ConfigPath = "/opt/kata/share/defaults/kata-containers/configuration.toml"
|
||||
```
|
||||
|
||||
+ In containerd 1.7.x
|
||||
|
||||
```toml
|
||||
version = 2
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd]
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes]
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
|
||||
runtime_type = "io.containerd.runc.v2"
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.kata]
|
||||
runtime_type = "io.containerd.kata.v2"
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.kata.options]
|
||||
ConfigPath = "/opt/kata/share/defaults/kata-containers/configuration.toml"
|
||||
```
|
||||
|
||||
The following configuration includes two runtime classes:
|
||||
- `plugins.cri.containerd.runtimes.runc`: the runc, and it is the default runtime.
|
||||
- `plugins.cri.containerd.runtimes.kata`: The function in containerd (reference [the document here](https://github.com/containerd/containerd/tree/main/core/runtime/v2))
|
||||
|
||||
- `plugins.<X>.containerd.runtimes.runc`: the runc, and it is the default runtime.
|
||||
- `plugins.<X>.containerd.runtimes.kata`: The function in containerd (reference [the document here](https://github.com/containerd/containerd/tree/main/core/runtime/v2))
|
||||
where the dot-connected string `io.containerd.kata.v2` is translated to `containerd-shim-kata-v2` (i.e. the
|
||||
binary name of the Kata implementation of [Containerd Runtime V2 (Shim API)](https://github.com/containerd/containerd/tree/main/core/runtime/v2)).
|
||||
binary name of the Kata implementation of [Containerd Runtime V2 (Shim API)](https://github.com/containerd/containerd/tree/main/core/runtime/v2)). By default, the `containerd-shim-kata-v2` (short of `shimv2`) binary will be installed under the path of `/usr/local/bin/`.
|
||||
|
||||
And `<X>` is `io.containerd.cri.v1.runtime` for containerd v2.x and `io.containerd.grpc.v1.cri` for containerd v1.7.x.
|
||||
|
||||
+ In containerd 1.7.x
|
||||
|
||||
```toml
|
||||
[plugins.cri.containerd]
|
||||
@@ -149,7 +187,7 @@ The following configuration includes two runtime classes:
|
||||
CriuPath = ""
|
||||
CriuWorkPath = ""
|
||||
IoGid = 0
|
||||
[plugins.cri.containerd.runtimes.kata]
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.kata]
|
||||
runtime_type = "io.containerd.kata.v2"
|
||||
privileged_without_host_devices = true
|
||||
pod_annotations = ["io.katacontainers.*"]
|
||||
@@ -158,13 +196,71 @@ The following configuration includes two runtime classes:
|
||||
ConfigPath = "/opt/kata/share/defaults/kata-containers/configuration.toml"
|
||||
```
|
||||
|
||||
+ In containerd 2.x
|
||||
|
||||
```toml
|
||||
[plugins."io.containerd.cri.v1.runtime".containerd]
|
||||
no_pivot = false
|
||||
[plugins."io.containerd.cri.v1.runtime".containerd.runtimes]
|
||||
[plugins."io.containerd.cri.v1.runtime".containerd.runtimes.runc]
|
||||
privileged_without_host_devices = false
|
||||
runtime_type = "io.containerd.runc.v2"
|
||||
[plugins."io.containerd.cri.v1.runtime".containerd.runtimes.runc.options]
|
||||
BinaryName = ""
|
||||
CriuImagePath = ""
|
||||
CriuPath = ""
|
||||
CriuWorkPath = ""
|
||||
IoGid = 0
|
||||
[plugins."io.containerd.cri.v1.runtime".containerd.runtimes.kata]
|
||||
runtime_type = "io.containerd.kata.v2"
|
||||
privileged_without_host_devices = true
|
||||
pod_annotations = ["io.katacontainers.*"]
|
||||
container_annotations = ["io.katacontainers.*"]
|
||||
[plugins."io.containerd.cri.v1.runtime".containerd.runtimes.kata.options]
|
||||
ConfigPath = "/opt/kata/share/defaults/kata-containers/configuration.toml"
|
||||
```
|
||||
|
||||
`privileged_without_host_devices` tells containerd that a privileged Kata container should not have direct access to all host devices. If unset, containerd will pass all host devices to Kata container, which may cause security issues.
|
||||
|
||||
`pod_annotations` is the list of pod annotations passed to both the pod sandbox as well as container through the OCI config.
|
||||
|
||||
`container_annotations` is the list of container annotations passed through to the OCI config of the containers.
|
||||
|
||||
This `ConfigPath` option is optional. If you do not specify it, shimv2 first tries to get the configuration file from the environment variable `KATA_CONF_FILE`. If neither are set, shimv2 will use the default Kata configuration file paths (`/etc/kata-containers/configuration.toml` and `/usr/share/defaults/kata-containers/configuration.toml`).
|
||||
This `ConfigPath` option is optional. If you want to use a different configuration file, you can specify the path of the configuration file with `ConfigPath` in the containerd configuration file. We use containerd 2.x configuration as an example here, and the configuration for containerd 1.7.x is similar, just replace `io.containerd.cri.v1.runtime` with `io.containerd.grpc.v1.cri`.
|
||||
|
||||
```toml
|
||||
[plugins."io.containerd.cri.v1.runtime".containerd.runtimes.kata.options]
|
||||
ConfigPath = "/opt/kata/share/defaults/kata-containers/configuration-qemu.toml"
|
||||
```
|
||||
|
||||
> **Note:** In this example, the specified `ConfigPath` is valid in Kubernetes/Containerd workflow with containerd v1.7+ but doesn't work with ctr and nerdctl.
|
||||
|
||||
If you do not specify it, `shimv2` first tries to get the configuration file from the environment variable `KATA_CONF_FILE`. If you want to adopt this way, you should first create a shell script as `containerd-shim-kata-v2` which is placed under the path of `/usr/local/bin/`. The following is an example of the shell script `containerd-shim-kata-qemu-v2` which specifies the configuration file with `KATA_CONF_FILE`
|
||||
|
||||
> **Note:** Just use containerd 2.x configuration as an example, the configuration for containerd 1.7.x is similar, just replace `io.containerd.cri.v1.runtime` with `io.containerd.grpc.v1.cri`
|
||||
|
||||
```shell
|
||||
~$ cat /usr/local/bin/containerd-shim-kata-qemu-v2
|
||||
#!/bin/bash
|
||||
KATA_CONF_FILE=/opt/kata/share/defaults/kata-containers/configuration-qemu.toml /opt/kata/bin/containerd-shim-kata-v2 "$@"
|
||||
```
|
||||
|
||||
And then just reference it in the configuration of containerd:
|
||||
|
||||
```toml
|
||||
[plugins."io.containerd.cri.v1.runtime".containerd.runtimes.kata-qemu]
|
||||
runtime_type = "io.containerd.kata-qemu.v2"
|
||||
```
|
||||
|
||||
Finally you can run a Kata container with the runtime `io.containerd.kata-qemu.v2`:
|
||||
|
||||
```shell
|
||||
$ sudo ctr run --cni --runtime io.containerd.kata-qemu.v2 -t --rm docker.io/library/busybox:latest hello sh
|
||||
```
|
||||
|
||||
> **Note:** The `KATA_CONF_FILE` environment variable is valid in both Kubernetes/Containerd workflow with containerd and containerd tools(ctr, nerdctl, etc.) scenarios.
|
||||
|
||||
If neither are set, shimv2 will use the default Kata configuration file paths (`/etc/kata-containers/configuration.toml` and `/usr/share/defaults/kata-containers/configuration.toml` and `/opt/kata/share/defaults/kata-containers/configuration.toml`).
|
||||
|
||||
#### Kata Containers as the runtime for untrusted workload
|
||||
|
||||
@@ -173,18 +269,20 @@ for an untrusted workload. With the following configuration, you can run trusted
|
||||
and then, run an untrusted workload with Kata Containers:
|
||||
|
||||
```toml
|
||||
[plugins.cri.containerd]
|
||||
# "plugins.cri.containerd.default_runtime" is the runtime to use in containerd.
|
||||
[plugins.cri.containerd.default_runtime]
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd]
|
||||
# "plugins."io.containerd.grpc.v1.cri".containerd.default_runtime" is the runtime to use in containerd.
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.default_runtime]
|
||||
# runtime_type is the runtime type to use in containerd e.g. io.containerd.runtime.v1.linux
|
||||
runtime_type = "io.containerd.runtime.v1.linux"
|
||||
|
||||
# "plugins.cri.containerd.untrusted_workload_runtime" is a runtime to run untrusted workloads on it.
|
||||
[plugins.cri.containerd.untrusted_workload_runtime]
|
||||
# "plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime" is a runtime to run untrusted workloads on it.
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime]
|
||||
# runtime_type is the runtime type to use in containerd e.g. io.containerd.runtime.v1.linux
|
||||
runtime_type = "io.containerd.kata.v2"
|
||||
```
|
||||
|
||||
> **Note:** The `untrusted_workload_runtime` is deprecated since containerd v1.7.0, and it is recommended to use `RuntimeClass` instead.
|
||||
|
||||
You can find more information on the [Containerd config documentation](https://github.com/containerd/containerd/blob/main/docs/cri/config.md)
|
||||
|
||||
#### Kata Containers as the default runtime
|
||||
@@ -192,8 +290,8 @@ You can find more information on the [Containerd config documentation](https://g
|
||||
If you want to set Kata Containers as the only runtime in the deployment, you can simply configure as follows:
|
||||
|
||||
```toml
|
||||
[plugins.cri.containerd]
|
||||
[plugins.cri.containerd.default_runtime]
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd]
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.default_runtime]
|
||||
runtime_type = "io.containerd.kata.v2"
|
||||
```
|
||||
|
||||
@@ -246,11 +344,14 @@ debug: true
|
||||
|
||||
### Launch containers with `ctr` command line
|
||||
|
||||
> **Note:** With containerd command tool `ctr`, the `ConfigPath` is not supported, and the configuration file should be explicitly specified with the option `--runtime-config-path`, otherwise, it'll use the default configurations.
|
||||
|
||||
To run a container with Kata Containers through the containerd command line, you can run the following:
|
||||
|
||||
```bash
|
||||
$ sudo ctr image pull docker.io/library/busybox:latest
|
||||
$ sudo ctr run --cni --runtime io.containerd.run.kata.v2 -t --rm docker.io/library/busybox:latest hello sh
|
||||
$ CONFIG_PATH="/opt/kata/share/defaults/kata-containers/configuration-qemu.toml"
|
||||
$ sudo ctr run --cni --runtime io.containerd.kata.v2 --runtime-config-path $CONFIG_PATH -t --rm docker.io/library/busybox:latest hello sh
|
||||
```
|
||||
|
||||
This launches a BusyBox container named `hello`, and it will be removed by `--rm` after it quits.
|
||||
@@ -260,7 +361,9 @@ loopback interface is created.
|
||||
### Launch containers using `ctr` command line with rootfs bundle
|
||||
|
||||
#### Get rootfs
|
||||
|
||||
Use the script to create rootfs
|
||||
|
||||
```bash
|
||||
ctr i pull quay.io/prometheus/busybox:latest
|
||||
ctr i export rootfs.tar quay.io/prometheus/busybox:latest
|
||||
@@ -278,7 +381,9 @@ for ((i=0;i<$(cat ${layers_dir}/manifest.json | jq -r ".[].Layers | length");i++
|
||||
tar -C ${rootfs_dir} -xf ${layers_dir}/$(cat ${layers_dir}/manifest.json | jq -r ".[].Layers[${i}]")
|
||||
done
|
||||
```
|
||||
|
||||
#### Get `config.json`
|
||||
|
||||
Use runc spec to generate `config.json`
|
||||
```bash
|
||||
cd ./bundle/rootfs
|
||||
@@ -295,10 +400,13 @@ Change the root `path` in `config.json` to the absolute path of rootfs
|
||||
```
|
||||
|
||||
#### Run container
|
||||
|
||||
```bash
|
||||
sudo ctr run -d --runtime io.containerd.run.kata.v2 --config bundle/config.json hello
|
||||
CONFIG_PATH="/opt/kata/share/defaults/kata-containers/configuration-qemu.toml"
|
||||
sudo ctr run -d --runtime io.containerd.kata.v2 --runtime-config-path $CONFIG_PATH --config bundle/config.json hello
|
||||
sudo ctr t exec --exec-id ${ID} -t hello sh
|
||||
```
|
||||
|
||||
### Launch Pods with `crictl` command line
|
||||
|
||||
With the `crictl` command line of `cri-tools`, you can specify runtime class with `-r` or `--runtime` flag.
|
||||
|
||||
@@ -96,6 +96,10 @@ path = "/path/to/qemu/build/qemu-system-x86_64"
|
||||
```toml
|
||||
shared_fs = "virtio-9p"
|
||||
```
|
||||
- Use `blockfile` snapshotter: Since virtio-fs remains unsupported due to bugs in QEMU snp-v3, and virtio-9p is no longer supported in runtime-rs, it is recommended to use the blockfile snapshotter. This allows container images to be managed via block devices without relying on a shared file system. To enable this, set the `snapshotter` to `blockfile` in the containerd config file, please refer to [blockfile guide](https://github.com/containerd/containerd/blob/main/docs/snapshotters/blockfile.md) for more information. Additionally, shared_fs should be set to "none" since no shared file system is used.
|
||||
```toml
|
||||
shared_fs = "none"
|
||||
```
|
||||
- Disable `virtiofsd` since it is no longer required (comment out)
|
||||
```toml
|
||||
# virtio_fs_daemon = "/usr/libexec/virtiofsd"
|
||||
|
||||
@@ -12,11 +12,11 @@ Currently, there is no widely applicable and convenient method available for use
|
||||
|
||||
According to the proposal, it requires to use the `kata-ctl direct-volume` command to add a direct assigned block volume device to the Kata Containers runtime.
|
||||
|
||||
And then with the help of method [get_volume_mount_info](https://github.com/kata-containers/kata-containers/blob/099b4b0d0e3db31b9054e7240715f0d7f51f9a1c/src/libs/kata-types/src/mount.rs#L95), get information from JSON file: `(mountinfo.json)` and parse them into structure [Direct Volume Info](https://github.com/kata-containers/kata-containers/blob/099b4b0d0e3db31b9054e7240715f0d7f51f9a1c/src/libs/kata-types/src/mount.rs#L70) which is used to save device-related information.
|
||||
And then with the help of method [get_volume_mount_info](https://github.com/kata-containers/kata-containers/blob/099b4b0d0e3db31b9054e7240715f0d7f51f9a1c/src/libs/kata-types/src/mount.rs#L95), get information from JSON file: `(mountInfo.json)` and parse them into structure [Direct Volume Info](https://github.com/kata-containers/kata-containers/blob/099b4b0d0e3db31b9054e7240715f0d7f51f9a1c/src/libs/kata-types/src/mount.rs#L70) which is used to save device-related information.
|
||||
|
||||
We only fill the `mountinfo.json`, such as `device` ,`volume_type`, `fs_type`, `metadata` and `options`, which correspond to the fields in [Direct Volume Info](https://github.com/kata-containers/kata-containers/blob/099b4b0d0e3db31b9054e7240715f0d7f51f9a1c/src/libs/kata-types/src/mount.rs#L70), to describe a device.
|
||||
We only fill the `mountInfo.json`, such as `device` ,`volume-type`, `fstype`, `metadata` and `options`, which correspond to the fields in [Direct Volume Info](https://github.com/kata-containers/kata-containers/blob/099b4b0d0e3db31b9054e7240715f0d7f51f9a1c/src/libs/kata-types/src/mount.rs#L70), to describe a device.
|
||||
|
||||
The JSON file `mountinfo.json` placed in a sub-path `/kubelet/kata-test-vol-001/volume001` which under fixed path `/run/kata-containers/shared/direct-volumes/`.
|
||||
The JSON file `mountInfo.json` placed in a sub-path `/kubelet/kata-test-vol-001/volume001` which under fixed path `/run/kata-containers/shared/direct-volumes/`.
|
||||
And the full path looks like: `/run/kata-containers/shared/direct-volumes/kubelet/kata-test-vol-001/volume001`, But for some security reasons. it is
|
||||
encoded as `/run/kata-containers/shared/direct-volumes/L2t1YmVsZXQva2F0YS10ZXN0LXZvbC0wMDEvdm9sdW1lMDAx`.
|
||||
|
||||
@@ -47,18 +47,18 @@ $ sudo mkfs.ext4 /tmp/stor/rawdisk01.20g
|
||||
```json
|
||||
{
|
||||
"device": "/tmp/stor/rawdisk01.20g",
|
||||
"volume_type": "directvol",
|
||||
"fs_type": "ext4",
|
||||
"volume-type": "directvol",
|
||||
"fstype": "ext4",
|
||||
"metadata":"{}",
|
||||
"options": []
|
||||
}
|
||||
```
|
||||
|
||||
```bash
|
||||
$ sudo kata-ctl direct-volume add /kubelet/kata-direct-vol-002/directvol002 "{\"device\": \"/tmp/stor/rawdisk01.20g\", \"volume_type\": \"directvol\", \"fs_type\": \"ext4\", \"metadata\":"{}", \"options\": []}"
|
||||
$ sudo kata-ctl direct-volume add /kubelet/kata-direct-vol-002/directvol002 "{\"device\": \"/tmp/stor/rawdisk01.20g\", \"volume-type\": \"directvol\", \"fstype\": \"ext4\", \"metadata\":"{}", \"options\": []}"
|
||||
$# /kubelet/kata-direct-vol-002/directvol002 <==> /run/kata-containers/shared/direct-volumes/W1lMa2F0ZXQva2F0YS10a2F0DAxvbC0wMDEvdm9sdW1lMDAx
|
||||
$ cat W1lMa2F0ZXQva2F0YS10a2F0DAxvbC0wMDEvdm9sdW1lMDAx/mountInfo.json
|
||||
{"volume_type":"directvol","device":"/tmp/stor/rawdisk01.20g","fs_type":"ext4","metadata":{},"options":[]}
|
||||
{"volume-type":"directvol","device":"/tmp/stor/rawdisk01.20g","fstype":"ext4","metadata":{},"options":[]}
|
||||
```
|
||||
|
||||
#### Run a Kata container with direct block device volume
|
||||
@@ -76,7 +76,7 @@ $ sudo ctr run -t --rm --runtime io.containerd.kata.v2 --mount type=directvol,sr
|
||||
> **Tip:** It only supports `vfio-pci` based PCI device passthrough mode.
|
||||
|
||||
In this scenario, the device's host kernel driver will be replaced by `vfio-pci`, and IOMMU group ID generated.
|
||||
And either device's BDF or its VFIO IOMMU group ID in `/dev/vfio/` is fine for "device" in `mountinfo.json`.
|
||||
And either device's BDF or its VFIO IOMMU group ID in `/dev/vfio/` is fine for "device" in `mountInfo.json`.
|
||||
|
||||
```bash
|
||||
$ lspci -nn -k -s 45:00.1
|
||||
@@ -92,15 +92,15 @@ $ ls /sys/kernel/iommu_groups/110/devices/
|
||||
|
||||
#### setup VFIO device for kata-containers
|
||||
|
||||
First, configure the `mountinfo.json`, as below:
|
||||
First, configure the `mountInfo.json`, as below:
|
||||
|
||||
- (1) device with `BB:DD:F`
|
||||
|
||||
```json
|
||||
{
|
||||
"device": "45:00.1",
|
||||
"volume_type": "vfiovol",
|
||||
"fs_type": "ext4",
|
||||
"volume-type": "vfiovol",
|
||||
"fstype": "ext4",
|
||||
"metadata":"{}",
|
||||
"options": []
|
||||
}
|
||||
@@ -111,8 +111,8 @@ First, configure the `mountinfo.json`, as below:
|
||||
```json
|
||||
{
|
||||
"device": "0000:45:00.1",
|
||||
"volume_type": "vfiovol",
|
||||
"fs_type": "ext4",
|
||||
"volume-type": "vfiovol",
|
||||
"fstype": "ext4",
|
||||
"metadata":"{}",
|
||||
"options": []
|
||||
}
|
||||
@@ -123,8 +123,8 @@ First, configure the `mountinfo.json`, as below:
|
||||
```json
|
||||
{
|
||||
"device": "/dev/vfio/110",
|
||||
"volume_type": "vfiovol",
|
||||
"fs_type": "ext4",
|
||||
"volume-type": "vfiovol",
|
||||
"fstype": "ext4",
|
||||
"metadata":"{}",
|
||||
"options": []
|
||||
}
|
||||
@@ -133,10 +133,10 @@ First, configure the `mountinfo.json`, as below:
|
||||
Second, run kata-containers with device(`/dev/vfio/110`) as an example:
|
||||
|
||||
```bash
|
||||
$ sudo kata-ctl direct-volume add /kubelet/kata-vfio-vol-003/vfiovol003 "{\"device\": \"/dev/vfio/110\", \"volume_type\": \"vfiovol\", \"fs_type\": \"ext4\", \"metadata\":"{}", \"options\": []}"
|
||||
$ sudo kata-ctl direct-volume add /kubelet/kata-vfio-vol-003/vfiovol003 "{\"device\": \"/dev/vfio/110\", \"volume-type\": \"vfiovol\", \"fstype\": \"ext4\", \"metadata\":"{}", \"options\": []}"
|
||||
$ # /kubelet/kata-vfio-vol-003/directvol003 <==> /run/kata-containers/shared/direct-volumes/F0va22F0ZvaS12F0YS10a2F0DAxvbC0F0ZXvdm9sdF0Z0YSx
|
||||
$ cat F0va22F0ZvaS12F0YS10a2F0DAxvbC0F0ZXvdm9sdF0Z0YSx/mountInfo.json
|
||||
{"volume_type":"vfiovol","device":"/dev/vfio/110","fs_type":"ext4","metadata":{},"options":[]}
|
||||
{"volume-type":"vfiovol","device":"/dev/vfio/110","fstype":"ext4","metadata":{},"options":[]}
|
||||
```
|
||||
|
||||
#### Run a Kata container with VFIO block device based volume
|
||||
@@ -190,25 +190,25 @@ be passed to Hypervisor, such as Dragonball, Cloud-Hypervisor, Firecracker or QE
|
||||
|
||||
First, `mkdir` a sub-path `kubelet/kata-test-vol-001/` under `/run/kata-containers/shared/direct-volumes/`.
|
||||
|
||||
Second, fill fields in `mountinfo.json`, it looks like as below:
|
||||
Second, fill fields in `mountInfo.json`, it looks like as below:
|
||||
```json
|
||||
{
|
||||
"device": "/tmp/vhu-targets/vhost-blk-rawdisk01.sock",
|
||||
"volume_type": "spdkvol",
|
||||
"fs_type": "ext4",
|
||||
"volume-type": "spdkvol",
|
||||
"fstype": "ext4",
|
||||
"metadata":"{}",
|
||||
"options": []
|
||||
}
|
||||
```
|
||||
|
||||
Third, with the help of `kata-ctl direct-volume` to add block device to generate `mountinfo.json`, and run a kata container with `--mount`.
|
||||
Third, with the help of `kata-ctl direct-volume` to add block device to generate `mountInfo.json`, and run a kata container with `--mount`.
|
||||
|
||||
```bash
|
||||
$ # kata-ctl direct-volume add
|
||||
$ sudo kata-ctl direct-volume add /kubelet/kata-test-vol-001/volume001 "{\"device\": \"/tmp/vhu-targets/vhost-blk-rawdisk01.sock\", \"volume_type\":\"spdkvol\", \"fs_type\": \"ext4\", \"metadata\":"{}", \"options\": []}"
|
||||
$ sudo kata-ctl direct-volume add /kubelet/kata-test-vol-001/volume001 "{\"device\": \"/tmp/vhu-targets/vhost-blk-rawdisk01.sock\", \"volume-type\":\"spdkvol\", \"fstype\": \"ext4\", \"metadata\":"{}", \"options\": []}"
|
||||
$ # /kubelet/kata-test-vol-001/volume001 <==> /run/kata-containers/shared/direct-volumes/L2t1YmVsZXQva2F0YS10ZXN0LXZvbC0wMDEvdm9sdW1lMDAx
|
||||
$ cat L2t1YmVsZXQva2F0YS10ZXN0LXZvbC0wMDEvdm9sdW1lMDAx/mountInfo.json
|
||||
$ {"volume_type":"spdkvol","device":"/tmp/vhu-targets/vhost-blk-rawdisk01.sock","fs_type":"ext4","metadata":{},"options":[]}
|
||||
$ {"volume-type":"spdkvol","device":"/tmp/vhu-targets/vhost-blk-rawdisk01.sock","fstype":"ext4","metadata":{},"options":[]}
|
||||
```
|
||||
|
||||
As `/run/kata-containers/shared/direct-volumes/` is a fixed path , we will be able to run a kata pod with `--mount` and set
|
||||
|
||||
@@ -17,7 +17,7 @@ You must have a running Kubernetes cluster first. If not, [install a Kubernetes
|
||||
Also you should ensure that `kubectl` working correctly.
|
||||
|
||||
> **Note**: More information about Kubernetes integrations:
|
||||
> - [Run Kata Containers with Kubernetes](run-kata-with-k8s.md)
|
||||
> - [Run Kata Containers with Kubernetes](how-to-use-k8s-with-crio-and-kata.md)
|
||||
> - [How to use Kata Containers and Containerd](containerd-kata.md)
|
||||
> - [How to use Kata Containers and containerd with Kubernetes](how-to-use-k8s-with-containerd-and-kata.md)
|
||||
|
||||
|
||||
@@ -46,6 +46,8 @@ There are several kinds of Kata configurations and they are listed below.
|
||||
| `io.katacontainers.config.hypervisor.block_device_cache_noflush` | `boolean` | Denotes whether flush requests for the device are ignored |
|
||||
| `io.katacontainers.config.hypervisor.block_device_cache_set` | `boolean` | cache-related options will be set to block devices or not |
|
||||
| `io.katacontainers.config.hypervisor.block_device_driver` | string | the driver to be used for block device, valid values are `virtio-blk`, `virtio-scsi`, `nvdimm`|
|
||||
| `io.katacontainers.config.hypervisor.blk_logical_sector_size` | uint32 | logical sector size in bytes reported by block devices to the guest (0 = hypervisor default, must be a power of 2 between 512 and 65536) |
|
||||
| `io.katacontainers.config.hypervisor.blk_physical_sector_size` | uint32 | physical sector size in bytes reported by block devices to the guest (0 = hypervisor default, must be a power of 2 between 512 and 65536) |
|
||||
| `io.katacontainers.config.hypervisor.cpu_features` | `string` | Comma-separated list of CPU features to pass to the CPU (QEMU) |
|
||||
| `io.katacontainers.config.hypervisor.default_max_vcpus` | uint32| the maximum number of vCPUs allocated for the VM by the hypervisor |
|
||||
| `io.katacontainers.config.hypervisor.default_memory` | uint32| the memory assigned for a VM by the hypervisor in `MiB` |
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
# Run Kata Containers with Kubernetes
|
||||
# How to use Kata Containers and CRI-O with Kubernetes
|
||||
|
||||
## Prerequisites
|
||||
|
||||
This guide requires Kata Containers available on your system, install-able by following [this guide](../install/README.md).
|
||||
|
||||
## Install a CRI implementation
|
||||
@@ -9,22 +10,16 @@ Kubernetes CRI (Container Runtime Interface) implementations allow using any
|
||||
OCI-compatible runtime with Kubernetes, such as the Kata Containers runtime.
|
||||
|
||||
Kata Containers support both the [CRI-O](https://github.com/kubernetes-incubator/cri-o) and
|
||||
[containerd](https://github.com/containerd/containerd) CRI implementations.
|
||||
|
||||
After choosing one CRI implementation, you must make the appropriate configuration
|
||||
to ensure it integrates with Kata Containers.
|
||||
|
||||
Kata Containers 1.5 introduced the `shimv2` for containerd 1.2.0, reducing the components
|
||||
required to spawn pods and containers, and this is the preferred way to run Kata Containers with Kubernetes ([as documented here](../how-to/how-to-use-k8s-with-containerd-and-kata.md#configure-containerd-to-use-kata-containers)).
|
||||
|
||||
An equivalent shim implementation for CRI-O is planned.
|
||||
[containerd](https://github.com/containerd/containerd) CRI implementations. We choose `CRI-O` for our examples in this guide.
|
||||
|
||||
### CRI-O
|
||||
|
||||
For CRI-O installation instructions, refer to the [CRI-O Tutorial](https://github.com/cri-o/cri-o/blob/main/tutorial.md) page.
|
||||
|
||||
The following sections show how to set up the CRI-O snippet configuration file (default path: `/etc/crio/crio.conf`) for Kata.
|
||||
|
||||
Unless otherwise stated, all the following settings are specific to the `crio.runtime` table:
|
||||
|
||||
```toml
|
||||
# The "crio.runtime" table contains settings pertaining to the OCI
|
||||
# runtime used and options for how to set up and manage the OCI runtime.
|
||||
@@ -33,16 +28,17 @@ Unless otherwise stated, all the following settings are specific to the `crio.ru
|
||||
A comprehensive documentation of the configuration file can be found [here](https://github.com/cri-o/cri-o/blob/main/docs/crio.conf.5.md).
|
||||
|
||||
> **Note**: After any change to this file, the CRI-O daemon have to be restarted with:
|
||||
|
||||
>````
|
||||
>$ sudo systemctl restart crio
|
||||
>````
|
||||
|
||||
#### Kubernetes Runtime Class (CRI-O v1.12+)
|
||||
|
||||
The [Kubernetes Runtime Class](https://kubernetes.io/docs/concepts/containers/runtime-class/)
|
||||
is the preferred way of specifying the container runtime configuration to run a Pod's containers.
|
||||
To use this feature, Kata must added as a runtime handler. This can be done by
|
||||
dropping a `50-kata` snippet file into `/etc/crio/crio.conf.d`, with the
|
||||
content shown below:
|
||||
To use this feature, Kata must added as a runtime handler. This can be done by dropping a `50-kata`
|
||||
snippet file into `/etc/crio/crio.conf.d`, with the content shown below:
|
||||
|
||||
```toml
|
||||
[crio.runtime.runtimes.kata]
|
||||
@@ -52,13 +48,6 @@ content shown below:
|
||||
privileged_without_host_devices = true
|
||||
```
|
||||
|
||||
|
||||
### containerd
|
||||
|
||||
To customize containerd to select Kata Containers runtime, follow our
|
||||
"Configure containerd to use Kata Containers" internal documentation
|
||||
[here](../how-to/how-to-use-k8s-with-containerd-and-kata.md#configure-containerd-to-use-kata-containers).
|
||||
|
||||
## Install Kubernetes
|
||||
|
||||
Depending on what your needs are and what you expect to do with Kubernetes,
|
||||
@@ -72,25 +61,16 @@ implementation you chose, and the Kubelet service has to be updated accordingly.
|
||||
### Configure for CRI-O
|
||||
|
||||
`/etc/systemd/system/kubelet.service.d/0-crio.conf`
|
||||
|
||||
```
|
||||
[Service]
|
||||
Environment="KUBELET_EXTRA_ARGS=--container-runtime=remote --runtime-request-timeout=15m --container-runtime-endpoint=unix:///var/run/crio/crio.sock"
|
||||
```
|
||||
|
||||
### Configure for containerd
|
||||
|
||||
`/etc/systemd/system/kubelet.service.d/0-cri-containerd.conf`
|
||||
```
|
||||
[Service]
|
||||
Environment="KUBELET_EXTRA_ARGS=--container-runtime=remote --runtime-request-timeout=15m --container-runtime-endpoint=unix:///run/containerd/containerd.sock"
|
||||
```
|
||||
For more information about containerd see the "Configure Kubelet to use containerd"
|
||||
documentation [here](../how-to/how-to-use-k8s-with-containerd-and-kata.md#configure-kubelet-to-use-containerd).
|
||||
|
||||
## Run a Kubernetes pod with Kata Containers
|
||||
|
||||
After you update your Kubelet service based on the CRI implementation you
|
||||
are using, reload and restart Kubelet. Then, start your cluster:
|
||||
After you update your Kubelet service based on the CRI implementation you are using, reload and restart Kubelet. Then, start your cluster:
|
||||
|
||||
```bash
|
||||
$ sudo systemctl daemon-reload
|
||||
$ sudo systemctl restart kubelet
|
||||
@@ -98,12 +78,6 @@ $ sudo systemctl restart kubelet
|
||||
# If using CRI-O
|
||||
$ sudo kubeadm init --ignore-preflight-errors=all --cri-socket /var/run/crio/crio.sock --pod-network-cidr=10.244.0.0/16
|
||||
|
||||
# If using containerd
|
||||
$ cat <<EOF | tee kubeadm-config.yaml
|
||||
apiVersion: kubeadm.k8s.io/v1beta3
|
||||
kind: InitConfiguration
|
||||
nodeRegistration:
|
||||
criSocket: "/run/containerd/containerd.sock"
|
||||
---
|
||||
kind: KubeletConfiguration
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
@@ -118,6 +92,7 @@ $ export KUBECONFIG=/etc/kubernetes/admin.conf
|
||||
### Allow pods to run in the control-plane node
|
||||
|
||||
By default, the cluster will not schedule pods in the control-plane node. To enable control-plane node scheduling:
|
||||
|
||||
```bash
|
||||
$ sudo -E kubectl taint nodes --all node-role.kubernetes.io/control-plane-
|
||||
```
|
||||
@@ -161,6 +136,7 @@ If a pod has the `runtimeClassName` set to `kata`, the CRI plugin runs the pod w
|
||||
```
|
||||
|
||||
- Create the pod
|
||||
|
||||
```bash
|
||||
$ sudo -E kubectl apply -f nginx-kata.yaml
|
||||
```
|
||||
@@ -172,6 +148,7 @@ If a pod has the `runtimeClassName` set to `kata`, the CRI plugin runs the pod w
|
||||
```
|
||||
|
||||
- Check hypervisor is running
|
||||
|
||||
```bash
|
||||
$ ps aux | grep qemu
|
||||
```
|
||||
159
docs/how-to/how-to-use-passthroughfd-io-within-runtime-rs.md
Normal file
@@ -0,0 +1,159 @@
|
||||
# How to Use Passthrough-FD IO within Runtime-rs and Dragonball
|
||||
|
||||
This document describes the Passthrough-FD (pass-fd) technology implemented in Kata Containers to optimize IO performance. By bypassing the intermediate proxy layers, this technology significantly reduces latency and CPU overhead for container IO streams.
|
||||
|
||||
## Important Limitation
|
||||
|
||||
Before diving into the technical details, please note the following restriction:
|
||||
|
||||
- Exclusive Support for Dragonball VMM: This feature is currently implemented only for Kata Containers' built-in VMM, Dragonball.
|
||||
- Unsupported VMMs: Other VMMs such as QEMU, Cloud Hypervisor, and Firecracker do not support this feature at this time.
|
||||
|
||||
## Overview
|
||||
|
||||
The original IO implementation in Kata Containers suffered from an excessively long data path, leading to poor efficiency. For instance, copying a 10GB file could take as long as 10 minutes.
|
||||
|
||||
To address this, Kata AC member @lifupan and @frezcirno introduced a series of optimizations using passthrough-fd technology. This approach allows the VMM to directly handle file descriptors (FDs), dramatically improving IO throughput.
|
||||
|
||||
## Traditional IO Path
|
||||
|
||||
Before the introduction of Passthrough-FD, Kata's IO streams were implemented using `ttrpc + virtio-vsock`.
|
||||
|
||||
The data flow was as follows:
|
||||
|
||||
```mermaid
|
||||
graph LR
|
||||
subgraph Host ["Host"]
|
||||
direction LR
|
||||
Containerd["Containerd"]
|
||||
|
||||
subgraph KS ["kata-shim"]
|
||||
buffer(("buffer"))
|
||||
end
|
||||
|
||||
Vsock["vsock"]
|
||||
|
||||
subgraph VM ["vm"]
|
||||
Agent["kata-agent"]
|
||||
Container["container"]
|
||||
end
|
||||
end
|
||||
|
||||
Containerd -->|stdin| buffer
|
||||
buffer --> Vsock
|
||||
Vsock --> Agent
|
||||
Agent -.-> Container
|
||||
|
||||
%% Style Rendering
|
||||
style Host fill:#f0f8ff,stroke:#333,stroke-dasharray: 5 5
|
||||
style VM fill:#fff9c4,stroke:#e0e0e0
|
||||
style buffer fill:#c8e6c9,stroke:#ff9800,stroke-dasharray: 5 5
|
||||
style Vsock fill:#bbdefb,stroke:#2196f3
|
||||
style Containerd fill:#f5f5f5,stroke:#333
|
||||
style Agent fill:#fff,stroke:#333
|
||||
style Container fill:#fff,stroke:#333
|
||||
|
||||
```
|
||||
|
||||
The kata-shim (containerd-shim-kata-v2) on the Host opens the FIFO pipes provided by containerd via the shimv2 interface.
|
||||
This results in three FDs (stdin, stdout, and stderr).
|
||||
The kata-shim manages three separate threads to handle these streams.
|
||||
The Bottleneck: kata-shim acts as a "middleman," maintaining three internal buffers. It must read data from the FDs into its own buffers before forwarding them via ttrpc over vsock to the destination.
|
||||
This multi-threaded proxying and buffering in the shim layer introduced significant overhead.
|
||||
|
||||
|
||||
## What is Passthrough-FD?
|
||||
|
||||
Passthrough-FD technology enhances the Dragonball VMM's hybrid-vsock implementation with support for recv-fd.
|
||||
|
||||
```mermaid
|
||||
graph LR
|
||||
subgraph Host ["Host"]
|
||||
direction LR
|
||||
Containerd["Containerd"]
|
||||
|
||||
Vsock["vsock"]
|
||||
|
||||
subgraph VM ["vm"]
|
||||
Agent["kata-agent"]
|
||||
Container["container"]
|
||||
end
|
||||
end
|
||||
|
||||
Containerd -->|stdin| Vsock
|
||||
Vsock --> Agent
|
||||
Agent -.-> Container
|
||||
|
||||
%% Style Rendering
|
||||
style Host fill:#f0f8ff,stroke:#333,stroke-dasharray: 5 5
|
||||
style VM fill:#fff9c4,stroke:#e0e0e0
|
||||
style Vsock fill:#bbdefb,stroke:#2196f3
|
||||
style Containerd fill:#f5f5f5,stroke:#333
|
||||
style Agent fill:#fff,stroke:#333
|
||||
style Container fill:#fff,stroke:#333
|
||||
```
|
||||
|
||||
Instead of requiring an intermediate layer to read and forward data, the hybrid-vsock module can now directly receive file descriptors from the Host. This allows the system to "pass through" the host's FDs directly to the kata-agent. By eliminating the proxying logic in kata-shim, the IO stream is effectively connected directly to the guest environment.
|
||||
|
||||
## Technical Details
|
||||
|
||||
The end-to-end process follows these steps:
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
autonumber
|
||||
|
||||
box rgb(220,235,255) Guest (VM)
|
||||
participant Agent as kata-agent<br/>(Server)
|
||||
participant VSOCK as AF_VSOCK socket<br/>(Hybrid Vsock)
|
||||
end
|
||||
|
||||
box rgb(255,240,220) Host
|
||||
participant Shim as kata-shim<br/>(Client)
|
||||
participant FIFO as File or FIFO<br/>(stdin/stdout/stderr)
|
||||
end
|
||||
|
||||
Note over Agent: Agent Initialization:<br/>listen() on passfd_listener_port
|
||||
|
||||
Shim->>FIFO: open() to acquire Fd<br/>(for stdin / stdout / stderr)
|
||||
|
||||
Shim->>VSOCK: connect() + send("passfd\n")<br/>+ send_with_fd(Fd, PortA)
|
||||
|
||||
Note over VSOCK,Agent: FD Transfer via Hybrid Vsock<br/>(repeat for stdin-port, stdout-port, stderr-port)
|
||||
|
||||
VSOCK->>Agent: forward connection + Fd + PortA
|
||||
|
||||
Agent->>Agent: accept() → get conn_fd + host-port<br/>save: map[host-port] = conn_fd<br/>(3 entries: stdin-port, stdout-port, stderr-port)
|
||||
|
||||
Shim->>Agent: create_container RPC<br/>(includes stdin-port, stdout-port, stderr-port)
|
||||
|
||||
Agent->>Agent: lookup map[stdin-port] → bind to container stdin<br/>lookup map[stdout-port] → bind to container stdout<br/>lookup map[stderr-port] → bind to container stderr
|
||||
|
||||
Agent-->>Shim: create_container RPC response (OK)
|
||||
```
|
||||
|
||||
1. Agent Initialization: The kata-agent starts a server listening on the port specified by passfd_listener_port.
|
||||
2. FD Transfer: During the container creation phase, the kata-shim sends the FDs for stdin, stdout, and stderr to the Dragonball hybrid-vsock module using the sendfd mechanism.
|
||||
3. Connection Establishment: Through hybrid-vsock, these FDs connect to the server started by the agent in Step 1.
|
||||
4. Identification: The agent's server calls accept() to obtain the connection FD and a corresponding host-port. It saves the connection using the host-port as a unique identifier. At this stage, the agent has three established connections (identified by stdin-port, stdout-port, and stderr-port).
|
||||
5. RPC Mapping: When kata-shim invokes the create_container RPC, it includes these three port identifiers in the request.
|
||||
6. Final Binding: Upon receiving the RPC, the agent retrieves the saved connections using the provided ports and binds them directly to the container's standard IO streams.
|
||||
|
||||
|
||||
## How to enable PassthroughFD IO within Configuration?
|
||||
|
||||
The Passthrough-FD feature is controlled by two main parameters in the Kata configuration file:
|
||||
|
||||
- use_passfd_io: A boolean flag to enable or disable the Passthrough-FD IO feature.
|
||||
- passfd_listener_port: Specifies the port on which the kata-agent listens for FD connections. The default value is 1027.
|
||||
To enable Passthrough-FD IO, set use_passfd_io to true in the configuration file:
|
||||
|
||||
```toml
|
||||
...
|
||||
# If enabled, the runtime will attempt to use fd passthrough feature for process io.
|
||||
# Note: this feature is only supported by the Dragonball hypervisor.
|
||||
use_passfd_io = true
|
||||
|
||||
# If fd passthrough io is enabled, the runtime will attempt to use the specified port instead of the default port.
|
||||
passfd_listener_port = 1027
|
||||
```
|
||||
@@ -73,5 +73,5 @@ See below example config:
|
||||
privileged_without_host_devices = true
|
||||
```
|
||||
|
||||
- [Kata Containers with CRI-O](../how-to/run-kata-with-k8s.md#cri-o)
|
||||
- [Kata Containers with CRI-O](../how-to/how-to-use-k8s-with-crio-and-kata.md#cri-o)
|
||||
|
||||
|
||||
@@ -16,83 +16,38 @@ which hypervisors you may wish to investigate further.
|
||||
|
||||
## Types
|
||||
|
||||
| Hypervisor | Written in | Architectures | Type |
|
||||
|-|-|-|-|
|
||||
|[Cloud Hypervisor] | rust | `aarch64`, `x86_64` | Type 2 ([KVM]) |
|
||||
|[Firecracker] | rust | `aarch64`, `x86_64` | Type 2 ([KVM]) |
|
||||
|[QEMU] | C | all | Type 2 ([KVM]) | `configuration-qemu.toml` |
|
||||
|[`Dragonball`] | rust | `aarch64`, `x86_64` | Type 2 ([KVM]) |
|
||||
|[StratoVirt] | rust | `aarch64`, `x86_64` | Type 2 ([KVM]) |
|
||||
| Hypervisor | Written in | Architectures | GPU Support | Intel TDX | AMD SEV-SNP |
|
||||
|-|-|-|-|-|-|
|
||||
|[Cloud Hypervisor](#cloud-hypervisor) | rust | `aarch64`, `x86_64` | :x: | :x: | :x: |
|
||||
|[Firecracker](#firecracker) | rust | `aarch64`, `x86_64` | :x: | :x: | :x: |
|
||||
|[QEMU](#qemu) | C | all | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
||||
|[Dragonball](#dragonball) | rust | `aarch64`, `x86_64` | :x: | :x: | :x: |
|
||||
|StratoVirt | rust | `aarch64`, `x86_64` | :x: | :x: | :x: |
|
||||
|
||||
## Determine currently configured hypervisor
|
||||
Each Kata runtime is configured for a specific hypervisor through the runtime's configuration file. For example:
|
||||
|
||||
```bash
|
||||
$ kata-runtime kata-env | awk -v RS= '/\[Hypervisor\]/' | grep Path
|
||||
```toml title="/opt/kata/share/defaults/kata-containers/configuration.toml"
|
||||
[hypervisor.qemu]
|
||||
path = "/opt/kata/bin/qemu-system-x86_64"
|
||||
```
|
||||
|
||||
## Choose a Hypervisor
|
||||
```toml title="/opt/kata/share/defaults/kata-containers/configuration-clh.toml"
|
||||
[hypervisor.clh]
|
||||
path = "/opt/kata/bin/cloud-hypervisor"
|
||||
```
|
||||
|
||||
The table below provides a brief summary of some of the differences between
|
||||
the hypervisors:
|
||||
## Cloud Hypervisor
|
||||
|
||||
| Hypervisor | Summary | Features | Limitations | Container Creation speed | Memory density | Use cases | Comment |
|
||||
|-|-|-|-|-|-|-|-|
|
||||
|[Cloud Hypervisor] | Low latency, small memory footprint, small attack surface | Minimal | | excellent | excellent | High performance modern cloud workloads | |
|
||||
|[Firecracker] | Very slimline | Extremely minimal | Doesn't support all device types | excellent | excellent | Serverless / FaaS | |
|
||||
|[QEMU] | Lots of features | Lots | | good | good | Good option for most users | |
|
||||
|[`Dragonball`] | Built-in VMM, low CPU and memory overhead| Minimal | | excellent | excellent | Optimized for most container workloads | `out-of-the-box` Kata Containers experience |
|
||||
|[StratoVirt] | Unified architecture supporting three scenarios: VM, container, and serverless | Extremely minimal(`MicroVM`) to Lots(`StandardVM`) | | excellent | excellent | Common container workloads | `StandardVM` type of StratoVirt for Kata is under development |
|
||||
[Cloud Hypervisor](https://www.cloudhypervisor.org/) is a more modern hypervisor written in Rust.
|
||||
|
||||
For further details, see the [Virtualization in Kata Containers](design/virtualization.md) document and the official documentation for each hypervisor.
|
||||
## Firecracker
|
||||
|
||||
## Hypervisor configuration files
|
||||
[Firecracker](https://firecracker-microvm.github.io/) is a minimal and lightweight hypervisor created for the AWS Lambda product.
|
||||
|
||||
Since each hypervisor offers different features and options, Kata Containers
|
||||
provides a separate
|
||||
[configuration file](../src/runtime/README.md#configuration)
|
||||
for each. The configuration files contain comments explaining which options
|
||||
are available, their default values and how each setting can be used.
|
||||
## QEMU
|
||||
|
||||
| Hypervisor | Golang runtime config file | golang runtime short name | golang runtime default | rust runtime config file | rust runtime short name | rust runtime default |
|
||||
|-|-|-|-|-|-|-|
|
||||
| [Cloud Hypervisor] | [`configuration-clh.toml`](../src/runtime/config/configuration-clh.toml.in) | `clh` | | [`configuration-cloud-hypervisor.toml`](../src/runtime-rs/config/configuration-cloud-hypervisor.toml.in) | `cloud-hypervisor` | |
|
||||
| [Firecracker] | [`configuration-fc.toml`](../src/runtime/config/configuration-fc.toml.in) | `fc` | | | | |
|
||||
| [QEMU] | [`configuration-qemu.toml`](../src/runtime/config/configuration-qemu.toml.in) | `qemu` | yes | [`configuration-qemu.toml`](../src/runtime-rs/config/configuration-qemu-runtime-rs.toml.in) | `qemu` | |
|
||||
| [`Dragonball`] | | | | [`configuration-dragonball.toml`](../src/runtime-rs/config/configuration-dragonball.toml.in) | `dragonball` | yes |
|
||||
| [StratoVirt] | [`configuration-stratovirt.toml`](../src/runtime/config/configuration-stratovirt.toml.in) | `stratovirt` | | | | |
|
||||
QEMU is the best supported hypervisor for NVIDIA-based GPUs and for confidential computing use-cases (such as Intel TDX and AMD SEV-SNP). Runtimes that use this are normally named `kata-qemu-nvidia-gpu-*`. The Kata project focuses primarily on QEMU runtimes for GPU support.
|
||||
|
||||
> **Notes:**
|
||||
>
|
||||
> - The short names specified are used by the [`kata-manager`](../utils/README.md) tool.
|
||||
> - As shown by the default columns, each runtime type has its own default hypervisor.
|
||||
> - The [golang runtime](../src/runtime) is the current default runtime.
|
||||
> - The [rust runtime](../src/runtime-rs), also known as `runtime-rs`,
|
||||
> is the newer runtime written in the rust language.
|
||||
> - See the [Configuration](../README.md#configuration) for further details.
|
||||
> - The configuration file links in the table link to the "source"
|
||||
> versions: these are not usable configuration files as they contain
|
||||
> variables that need to be expanded:
|
||||
> - The links are provided for reference only.
|
||||
> - The final (installed) versions, where all variables have been
|
||||
> expanded, are built from these source configuration files.
|
||||
> - The pristine configuration files are usually installed in the
|
||||
> `/opt/kata/share/defaults/kata-containers/` or
|
||||
> `/usr/share/defaults/kata-containers/` directories.
|
||||
> - Some hypervisors may have the same name for both golang and rust
|
||||
> runtimes, but the file contents may differ.
|
||||
> - If there is no configuration file listed for the golang or
|
||||
> rust runtimes, this either means the hypervisor cannot be run with
|
||||
> a particular runtime, or that a driver has not yet been made
|
||||
> available for that runtime.
|
||||
## Dragonball
|
||||
|
||||
## Switch configured hypervisor
|
||||
|
||||
To switch the configured hypervisor, you only need to run a single command.
|
||||
See [the `kata-manager` documentation](../utils/README.md#choose-a-hypervisor) for further details.
|
||||
|
||||
[Cloud Hypervisor]: https://github.com/cloud-hypervisor/cloud-hypervisor
|
||||
[Firecracker]: https://github.com/firecracker-microvm/firecracker
|
||||
[KVM]: https://en.wikipedia.org/wiki/Kernel-based_Virtual_Machine
|
||||
[QEMU]: http://www.qemu.org
|
||||
[`Dragonball`]: https://github.com/kata-containers/kata-containers/blob/main/src/dragonball
|
||||
[StratoVirt]: https://gitee.com/openeuler/stratovirt
|
||||
Dragonball is a special hypervisor created by the Ant Group that runs in the same process as the Rust-based containerd shim.
|
||||
|
||||
94
docs/index.md
Normal file
@@ -0,0 +1,94 @@
|
||||
# Kata Containers
|
||||
|
||||
Kata Containers is an open source community working to build a secure container runtime with lightweight virtual machines (VM's) that feel and perform like standard Linux containers, but provide stronger workload isolation using hardware virtualization technology as a second layer of defense.
|
||||
|
||||
## How it Works
|
||||
|
||||
Kata implements the [Open Containers Runtime Specification](https://github.com/opencontainers/runtime-spec). More specifically, it implements a containerd shim that implements the expected interface for managing container lifecycles. The default containerd runtime of `runc` spawns a container like this:
|
||||
|
||||
```mermaid
|
||||
flowchart TD
|
||||
subgraph Host
|
||||
containerd
|
||||
runc
|
||||
process[Container Process]
|
||||
containerd --> runc --> process
|
||||
end
|
||||
```
|
||||
|
||||
When containerd receives a request to spawn a container, it will pull the container image down and then call out to the runc shim (usually located at `/usr/local/bin/containerd-shim-runc-v2`). runc will then create various process isolation resources like Linux namespaces (networking, PIDs, mounts etc), seccomp filters, Linux capability reductions, and then spawn the process inside of those resources. This process runs in the host kernel.
|
||||
|
||||
Kata spawns containers like this:
|
||||
|
||||
```mermaid
|
||||
flowchart TD
|
||||
subgraph Host
|
||||
containerdOuter[containerd]
|
||||
kata
|
||||
|
||||
containerdOuter --> kata
|
||||
kata --> kataAgent
|
||||
|
||||
subgraph VM
|
||||
kataAgent[Kata Agent]
|
||||
process[Container Process]
|
||||
kataAgent --> process
|
||||
end
|
||||
end
|
||||
```
|
||||
|
||||
The container process spawned inside of the VM allows us to isolate the guest kernel from the host system. This is the fundamental principle of how Kata achieves its isolation boundaries.
|
||||
|
||||
## Example
|
||||
|
||||
When Kata is installed in a system, a number of artifacts are laid down. containerd's config will be modified as such:
|
||||
|
||||
```toml title="/etc/containerd/config.toml"
|
||||
imports = ["/opt/kata/containerd/config.d/kata-deploy.toml"]
|
||||
```
|
||||
|
||||
This file will contain configuration for various flavors of Kata runtimes. We can see the vanilla CPU runtime config here:
|
||||
|
||||
```toml title="/opt/kata/containerd/config.d/kata-deploy.toml"
|
||||
[plugins."io.containerd.cri.v1.runtime".containerd.runtimes.kata-qemu]
|
||||
runtime_type = "io.containerd.kata-qemu.v2"
|
||||
runtime_path = "/opt/kata/bin/containerd-shim-kata-v2"
|
||||
privileged_without_host_devices = true
|
||||
pod_annotations = ["io.katacontainers.*"]
|
||||
|
||||
[plugins."io.containerd.cri.v1.runtime".containerd.runtimes.kata-qemu.options]
|
||||
ConfigPath = "/opt/kata/share/defaults/kata-containers/configuration-qemu.toml"
|
||||
```
|
||||
|
||||
Because containerd's CRI is aware of the Kata runtimes, we can spawn Kubernetes pods:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: test
|
||||
spec:
|
||||
runtimeClassName: kata-qemu
|
||||
containers:
|
||||
- name: test
|
||||
image: "quay.io/libpod/ubuntu:latest"
|
||||
command: ["/bin/bash", "-c"]
|
||||
args: ["echo hello"]
|
||||
```
|
||||
|
||||
We can also spawn a Kata container by submitting a request to containerd like so:
|
||||
|
||||
<div class="annotate" markdown>
|
||||
|
||||
```sh
|
||||
$ ctr image pull quay.io/libpod/ubuntu:latest
|
||||
$ ctr run --runtime "io.containerd.kata.v2" --runtime-config-path /opt/kata/share/defaults/kata-containers/configuration-qemu.toml --rm -t "quay.io/libpod/ubuntu:latest" foo sh
|
||||
# echo hello
|
||||
hello
|
||||
```
|
||||
|
||||
</div>
|
||||
|
||||
!!! tip
|
||||
|
||||
`ctr` is not aware of the CRI config in `/etc/containerd/config.toml`. This is why you must specify the `--runtime-config-path`. Additionally, the `--runtime` value is converted into a specific binary name which containerd then searches for in its `PATH`. See the [containerd docs](https://github.com/containerd/containerd/blob/release/2.2/core/runtime/v2/README.md#usage) for more details.
|
||||
@@ -18,6 +18,3 @@ artifacts required to run Kata Containers on Kubernetes.
|
||||
* [upgrading document](../Upgrading.md)
|
||||
* [developer guide](../Developer-Guide.md)
|
||||
* [runtime documentation](../../src/runtime/README.md)
|
||||
|
||||
## Kata Containers 3.0 rust runtime installation
|
||||
* [installation guide](../install/kata-containers-3.0-rust-runtime-installation-guide.md)
|
||||
|
||||
@@ -1,116 +0,0 @@
|
||||
# Kata Containers 3.0 rust runtime installation
|
||||
The following is an overview of the different installation methods available.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Kata Containers 3.0 rust runtime requires nested virtualization or bare metal. Check
|
||||
[hardware requirements](/src/runtime/README.md#hardware-requirements) to see if your system is capable of running Kata
|
||||
Containers.
|
||||
|
||||
### Platform support
|
||||
|
||||
Kata Containers 3.0 rust runtime currently runs on 64-bit systems supporting the following
|
||||
architectures:
|
||||
|
||||
> **Notes:**
|
||||
> For other architectures, see https://github.com/kata-containers/kata-containers/issues/4320
|
||||
|
||||
| Architecture | Virtualization technology |
|
||||
|-|-|
|
||||
| `x86_64`| [Intel](https://www.intel.com) VT-x |
|
||||
| `aarch64` ("`arm64`")| [ARM](https://www.arm.com) Hyp |
|
||||
|
||||
## Packaged installation methods
|
||||
|
||||
| Installation method | Description | Automatic updates | Use case | Availability
|
||||
|------------------------------------------------------|----------------------------------------------------------------------------------------------|-------------------|-----------------------------------------------------------------------------------------------|----------- |
|
||||
| [Using kata-deploy](#kata-deploy-installation) | The preferred way to deploy the Kata Containers distributed binaries on a Kubernetes cluster | **No!** | Best way to give it a try on kata-containers on an already up and running Kubernetes cluster. | Yes |
|
||||
| [Using official distro packages](#official-packages) | Kata packages provided by Linux distributions official repositories | yes | Recommended for most users. | No |
|
||||
| [Automatic](#automatic-installation) | Run a single command to install a full system | **No!** | For those wanting the latest release quickly. | No |
|
||||
| [Manual](#manual-installation) | Follow a guide step-by-step to install a working system | **No!** | For those who want the latest release with more control. | No |
|
||||
| [Build from source](#build-from-source-installation) | Build the software components manually | **No!** | Power users and developers only. | Yes |
|
||||
|
||||
### Kata Deploy Installation
|
||||
|
||||
Follow the [`kata-deploy`](../../tools/packaging/kata-deploy/helm-chart/README.md).
|
||||
### Official packages
|
||||
`ToDo`
|
||||
### Automatic Installation
|
||||
`ToDo`
|
||||
### Manual Installation
|
||||
`ToDo`
|
||||
|
||||
## Build from source installation
|
||||
|
||||
### Rust Environment Set Up
|
||||
|
||||
* Download `Rustup` and install `Rust`
|
||||
> **Notes:**
|
||||
> For Rust version, please set `RUST_VERSION` to the value of `languages.rust.meta.newest-version key` in [`versions.yaml`](../../versions.yaml) or, if `yq` is available on your system, run `export RUST_VERSION=$(yq read versions.yaml languages.rust.meta.newest-version)`.
|
||||
|
||||
Example for `x86_64`
|
||||
```
|
||||
$ curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
|
||||
$ source $HOME/.cargo/env
|
||||
$ rustup install ${RUST_VERSION}
|
||||
$ rustup default ${RUST_VERSION}-x86_64-unknown-linux-gnu
|
||||
```
|
||||
|
||||
* Musl support for fully static binary
|
||||
|
||||
Example for `x86_64`
|
||||
```
|
||||
$ rustup target add x86_64-unknown-linux-musl
|
||||
```
|
||||
* [Musl `libc`](http://musl.libc.org/) install
|
||||
|
||||
Example for musl 1.2.3
|
||||
```
|
||||
$ curl -O https://git.musl-libc.org/cgit/musl/snapshot/musl-1.2.3.tar.gz
|
||||
$ tar vxf musl-1.2.3.tar.gz
|
||||
$ cd musl-1.2.3/
|
||||
$ ./configure --prefix=/usr/local/
|
||||
$ make && sudo make install
|
||||
```
|
||||
|
||||
|
||||
### Install Kata 3.0 Rust Runtime Shim
|
||||
|
||||
```
|
||||
$ git clone https://github.com/kata-containers/kata-containers.git
|
||||
$ cd kata-containers/src/runtime-rs
|
||||
$ make && sudo make install
|
||||
```
|
||||
After running the command above, the default config file `configuration.toml` will be installed under `/usr/share/defaults/kata-containers/`, the binary file `containerd-shim-kata-v2` will be installed under `/usr/local/bin/` .
|
||||
|
||||
### Install Shim Without Builtin Dragonball VMM
|
||||
|
||||
By default, runtime-rs includes the `Dragonball` VMM. To build without the built-in `Dragonball` hypervisor, use `make USE_BUILDIN_DB=false`:
|
||||
```bash
|
||||
$ cd kata-containers/src/runtime-rs
|
||||
$ make USE_BUILDIN_DB=false
|
||||
```
|
||||
After building, specify the desired hypervisor during installation using `HYPERVISOR`. For example, to use `qemu` or `cloud-hypervisor`:
|
||||
|
||||
```
|
||||
sudo make install HYPERVISOR=qemu
|
||||
```
|
||||
or
|
||||
```
|
||||
sudo make install HYPERVISOR=cloud-hypervisor
|
||||
```
|
||||
|
||||
### Build Kata Containers Kernel
|
||||
Follow the [Kernel installation guide](/tools/packaging/kernel/README.md).
|
||||
|
||||
### Build Kata Rootfs
|
||||
Follow the [Rootfs installation guide](../../tools/osbuilder/rootfs-builder/README.md).
|
||||
|
||||
### Build Kata Image
|
||||
Follow the [Image installation guide](../../tools/osbuilder/image-builder/README.md).
|
||||
|
||||
### Install Containerd
|
||||
|
||||
Follow the [Containerd installation guide](container-manager/containerd/containerd-install.md).
|
||||
|
||||
|
||||
64
docs/installation.md
Normal file
@@ -0,0 +1,64 @@
|
||||
# Installation
|
||||
|
||||
## Helm Chart
|
||||
|
||||
[helm](https://helm.sh/docs/intro/install/) can be used to install templated kubernetes manifests.
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- **Kubernetes ≥ v1.22** – v1.22 is the first release where the CRI v1 API
|
||||
became the default and `RuntimeClass` left alpha. The chart depends on those
|
||||
stable interfaces; earlier clusters need `feature‑gates` or CRI shims that are
|
||||
out of scope.
|
||||
|
||||
- **Kata Release 3.12** - v3.12.0 introduced publishing the helm-chart on the
|
||||
release page for easier consumption, since v3.8.0 we shipped the helm-chart
|
||||
via source code in the kata-containers `GitHub` repository.
|
||||
|
||||
- CRI‑compatible runtime (containerd or CRI‑O). If one wants to use the
|
||||
`multiInstallSuffix` feature one needs at least **containerd-2.0** which
|
||||
supports drop-in config files
|
||||
|
||||
- Nodes must allow loading kernel modules and installing Kata artifacts (the
|
||||
chart runs privileged containers to do so)
|
||||
|
||||
### `helm install`
|
||||
|
||||
```sh
|
||||
# Install directly from the official ghcr.io OCI registry
|
||||
# update the VERSION X.YY.Z to your needs or just use the latest
|
||||
|
||||
export VERSION=$(curl -sSL https://api.github.com/repos/kata-containers/kata-containers/releases/latest | jq .tag_name | tr -d '"')
|
||||
export CHART="oci://ghcr.io/kata-containers/kata-deploy-charts/kata-deploy"
|
||||
|
||||
$ helm install kata-deploy "${CHART}" --version "${VERSION}"
|
||||
|
||||
# See everything you can configure
|
||||
$ helm show values "${CHART}" --version "${VERSION}"
|
||||
```
|
||||
|
||||
This installs the `kata-deploy` DaemonSet and the default Kata `RuntimeClass`
|
||||
resources on your cluster.
|
||||
|
||||
To see what versions of the chart are available:
|
||||
|
||||
```sh
|
||||
$ helm show chart oci://ghcr.io/kata-containers/kata-deploy-charts/kata-deploy
|
||||
```
|
||||
|
||||
### `helm uninstall`
|
||||
|
||||
```sh
|
||||
$ helm uninstall kata-deploy -n kube-system
|
||||
```
|
||||
|
||||
During uninstall, Helm will report that some resources were kept due to the
|
||||
resource policy (`ServiceAccount`, `ClusterRole`, `ClusterRoleBinding`). This
|
||||
is **normal**. A post-delete hook Job runs after uninstall and removes those
|
||||
resources so no cluster-wide `RBAC` is left behind.
|
||||
|
||||
## Pre-Built Release
|
||||
|
||||
Kata can also be installed using the pre-built releases: https://github.com/kata-containers/kata-containers/releases
|
||||
|
||||
This method does not have any facilities for artifact lifecycle management.
|
||||
116
docs/prerequisites.md
Normal file
@@ -0,0 +1,116 @@
|
||||
# Prerequisites
|
||||
|
||||
## Kubernetes
|
||||
|
||||
If using Kubernetes, at least version `v1.22` is recommended. This is the first release that the CRI v1 API and the `RuntimeClass` left alpha.
|
||||
|
||||
## containerd
|
||||
|
||||
Kata requires a [CRI](https://kubernetes.io/docs/concepts/containers/cri/)-compatible container runtime. containerd is commonly used for Kata. We recommend installing containerd using your platform's package distribution mechanism. We recommend at least the latest version of containerd v2.1.x.[^1]
|
||||
|
||||
|
||||
### Debian/Ubuntu
|
||||
|
||||
To install on Debian-based systems:
|
||||
|
||||
```sh
|
||||
$ apt update
|
||||
$ apt install containerd
|
||||
$ systemctl status containerd
|
||||
● containerd.service - containerd container runtime
|
||||
Loaded: loaded (/etc/systemd/system/containerd.service; enabled; preset: enabled)
|
||||
Drop-In: /etc/systemd/system/containerd.service.d
|
||||
└─http-proxy.conf
|
||||
Active: active (running) since Wed 2026-02-25 22:58:13 UTC; 5 days ago
|
||||
Docs: https://containerd.io
|
||||
Main PID: 3767885 (containerd)
|
||||
Tasks: 540
|
||||
Memory: 70.7G (peak: 70.8G)
|
||||
CPU: 4h 9min 26.153s
|
||||
CGroup: /runtime.slice/containerd.service
|
||||
├─ 12694 /usr/local/bin/container
|
||||
```
|
||||
|
||||
### Fedora/RedHat
|
||||
|
||||
To install on Fedora-based systems:
|
||||
|
||||
```
|
||||
$ yum install containerd
|
||||
```
|
||||
|
||||
??? help
|
||||
|
||||
Documentation assistance is requested for more specific instructions on Fedora systems.
|
||||
|
||||
### Pre-Built Releases
|
||||
|
||||
Many Linux distributions will not package the latest versions of containerd. If you find that your distribution provides very old versions of containerd, it's recommended to upgrade with the [pre-built releases](https://github.com/containerd/containerd/releases).
|
||||
|
||||
#### Executable
|
||||
|
||||
Download the latest release of containerd:
|
||||
|
||||
```sh
|
||||
$ wget https://github.com/containerd/containerd/releases/download/v${VERSION}/containerd-${VERSION}-linux-${PLATFORM}.tar.gz
|
||||
|
||||
# Extract to the current directory
|
||||
$ tar -xf ./containerd*.tar.gz
|
||||
|
||||
# Extract to root if you want it installed to its final location.
|
||||
$ tar -C / -xf ./*.tar.gz
|
||||
```
|
||||
|
||||
### Containerd Config
|
||||
|
||||
Containerd requires a config file at `/etc/containerd/config.toml`. This needs to be populated with a simple default config:
|
||||
|
||||
```sh
|
||||
$ /usr/local/bin/containerd config default > /etc/containerd/config.toml
|
||||
```
|
||||
|
||||
### Systemd Unit File
|
||||
|
||||
Install the systemd unit file:
|
||||
|
||||
```sh
|
||||
$ wget -O /etc/systemd/system/containerd.service https://raw.githubusercontent.com/containerd/containerd/main/containerd.service
|
||||
```
|
||||
|
||||
!!! info
|
||||
|
||||
- You must modify the `ExecStart` line to the location of the installed containerd executable.
|
||||
- containerd's `PATH` variable must allow it to find `containerd-shim-kata-v2`. You can do this by either creating a symlink from `/usr/local/bin/containerd-shim-kata-v2` to `/opt/kata/bin/containerd-shim-kata-v2` or by modifying containerd's `PATH` variable to search in `/opt/kata/bin/`. See the Environment= command in systemd.exec(5) for further details.
|
||||
|
||||
|
||||
Reload systemd and start containerd:
|
||||
|
||||
```sh
|
||||
$ systemctl daemon-reload
|
||||
$ systemctl enable --now containerd
|
||||
$ systemctl start containerd
|
||||
$ systemctl status containerd
|
||||
```
|
||||
|
||||
More details can be found on the [containerd installation docs](https://github.com/containerd/containerd/blob/main/docs/getting-started.md).
|
||||
|
||||
### Enable CRI
|
||||
|
||||
If you're using Kubernetes, you must enable the containerd Container Runtime Interface (CRI) plugin:
|
||||
|
||||
```sh
|
||||
$ ctr plugins ls | grep cri
|
||||
io.containerd.cri.v1 images - ok
|
||||
io.containerd.cri.v1 runtime linux/amd64 ok
|
||||
io.containerd.grpc.v1 cri - ok
|
||||
```
|
||||
|
||||
If these are not enabled, you'll need to remove it from the `disabled_plugins` section of the containerd config.
|
||||
|
||||
|
||||
[^1]: Kata makes use of containerd's drop-in config merging in `/etc/containerd/config.d/` which is only available starting from containerd v2. containerd v1 may work, but some Kata features will not work as expected.
|
||||
|
||||
|
||||
## runc
|
||||
|
||||
The default `runc` runtime needs to be installed for non-kata containers. More details can be found at the [containerd docs](https://github.com/containerd/containerd/blob/979c80d8a5d7fc7be34102a1ada53ae5a0ff09e8/docs/RUNC.md).
|
||||
9
docs/requirements.txt
Normal file
@@ -0,0 +1,9 @@
|
||||
mkdocs-materialx==10.0.9
|
||||
mkdocs-glightbox==0.4.0
|
||||
mkdocs-macros-plugin==1.5.0
|
||||
mkdocs-awesome-nav==3.3.0
|
||||
mkdocs-open-in-new-tab==1.0.8
|
||||
mkdocs-redirects==1.2.2
|
||||
CairoSVG==2.9.0
|
||||
pillow==12.1.1
|
||||
click==8.2.1
|
||||
56
docs/runtime-configuration.md
Normal file
@@ -0,0 +1,56 @@
|
||||
# Runtime Configuration
|
||||
|
||||
The containerd shims (both the Rust and Go implementations) take configuration files to control their behavior. These files are in `/opt/kata/share/defaults/kata-containers/`. An example excerpt:
|
||||
|
||||
```toml title="/opt/kata/share/defaults/kata-containers/configuration.toml"
|
||||
[hypervisor.qemu]
|
||||
path = "/opt/kata/bin/qemu-system-x86_64"
|
||||
kernel = "/opt/kata/share/kata-containers/vmlinux.container"
|
||||
image = "/opt/kata/share/kata-containers/kata-containers.img"
|
||||
machine_type = "q35"
|
||||
|
||||
# rootfs filesystem type:
|
||||
# - ext4 (default)
|
||||
# - xfs
|
||||
# - erofs
|
||||
rootfs_type = "ext4"
|
||||
|
||||
# Enable running QEMU VMM as a non-root user.
|
||||
# By default QEMU VMM run as root. When this is set to true, QEMU VMM process runs as
|
||||
# a non-root random user. See documentation for the limitations of this mode.
|
||||
rootless = false
|
||||
|
||||
# List of valid annotation names for the hypervisor
|
||||
# Each member of the list is a regular expression, which is the base name
|
||||
# of the annotation, e.g. "path" for io.katacontainers.config.hypervisor.path"
|
||||
enable_annotations = ["enable_iommu", "virtio_fs_extra_args", "kernel_params"]
|
||||
```
|
||||
|
||||
These files should never be modified directly. If you wish to create a modified version of these files, you may create your own [custom runtime](helm-configuration.md#custom-runtimes). For example, to modify the image path, we provide these values to helm:
|
||||
|
||||
```yaml title="values.yaml"
|
||||
customRuntimes:
|
||||
enabled: true
|
||||
runtimes:
|
||||
my-gpu-runtime:
|
||||
baseConfig: "qemu-nvidia-gpu"
|
||||
dropIn: |
|
||||
[hypervisor.qemu]
|
||||
image = "/path/to/custom-image.img"
|
||||
runtimeClass: |
|
||||
kind: RuntimeClass
|
||||
apiVersion: node.k8s.io/v1
|
||||
metadata:
|
||||
name: kata-my-gpu-runtime
|
||||
labels:
|
||||
app.kubernetes.io/managed-by: kata-deploy
|
||||
handler: kata-my-gpu-runtime
|
||||
overhead:
|
||||
podFixed:
|
||||
memory: "640Mi"
|
||||
cpu: "500m"
|
||||
scheduling:
|
||||
nodeSelector:
|
||||
katacontainers.io/kata-runtime: "true"
|
||||
```
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
# Enabling NVIDIA GPU workloads using GPU passthrough with Kata Containers
|
||||
|
||||
This page provides:
|
||||
|
||||
1. A description of the components involved when running GPU workloads with
|
||||
Kata Containers using the NVIDIA TEE and non-TEE GPU runtime classes.
|
||||
1. An explanation of the orchestration flow on a Kubernetes node for this
|
||||
scenario.
|
||||
1. A deployment guide enabling to utilize these runtime classes.
|
||||
1. A deployment guide to utilize these runtime classes.
|
||||
|
||||
The goal is to educate readers familiar with Kubernetes and Kata Containers
|
||||
on NVIDIA's reference implementation which is reflected in Kata CI's build
|
||||
@@ -18,58 +19,56 @@ Confidential Containers.
|
||||
|
||||
> **Note:**
|
||||
>
|
||||
> The current supported mode for enabling GPU workloads in the TEE scenario
|
||||
> is single GPU passthrough (one GPU per pod) on AMD64 platforms (AMD SEV-SNP
|
||||
> being the only supported TEE scenario so far with support for Intel TDX being
|
||||
> on the way).
|
||||
> The currently supported modes for enabling GPU workloads in the TEE
|
||||
> scenario are: (1) single‑GPU passthrough (one physical GPU per pod) and
|
||||
> (2) multi-GPU passthrough on NVSwitch (NVLink) based HGX systems
|
||||
> (for example, HGX Hopper (SXM) and HGX Blackwell / HGX B200).
|
||||
|
||||
## Component Overview
|
||||
|
||||
Before providing deployment guidance, we describe the components involved to
|
||||
support running GPU workloads. We start from a top to bottom perspective
|
||||
from the NVIDIA GPU operator via the Kata runtime to the components within
|
||||
from the NVIDIA GPU Operator via the Kata runtime to the components within
|
||||
the NVIDIA GPU Utility Virtual Machine (UVM) root filesystem.
|
||||
|
||||
### NVIDIA GPU Operator
|
||||
|
||||
A central component is the
|
||||
[NVIDIA GPU operator](https://github.com/NVIDIA/gpu-operator) which can be
|
||||
deployed onto your cluster as a helm chart. Installing the GPU operator
|
||||
[NVIDIA GPU Operator](https://github.com/NVIDIA/gpu-operator) which can be
|
||||
deployed onto your cluster as a helm chart. Installing the GPU Operator
|
||||
delivers various operands on your nodes in the form of Kubernetes DaemonSets.
|
||||
These operands are vital to support the flow of orchestrating pod manifests
|
||||
using NVIDIA GPU runtime classes with GPU passthrough on your nodes. Without
|
||||
getting into the details, the most important operands and their
|
||||
responsibilities are:
|
||||
|
||||
- **nvidia-vfio-manager:** Binding discovered NVIDIA GPUs to the `vfio-pci`
|
||||
driver for VFIO passthrough.
|
||||
- **nvidia-vfio-manager:** Binding discovered NVIDIA GPUs and nvswitches to
|
||||
the `vfio-pci` driver for VFIO passthrough.
|
||||
- **nvidia-cc-manager:** Transitioning GPUs into confidential computing (CC)
|
||||
and non-CC mode (see the
|
||||
[NVIDIA/k8s-cc-manager](https://github.com/NVIDIA/k8s-cc-manager)
|
||||
repository).
|
||||
- **nvidia-kata-manager:** Creating host-side CDI specifications for GPU
|
||||
passthrough, resulting in the file `/var/run/cdi/nvidia.yaml`, containing
|
||||
`kind: nvidia.com/pgpu` (see the
|
||||
[NVIDIA/k8s-kata-manager](https://github.com/NVIDIA/k8s-kata-manager)
|
||||
repository).
|
||||
- **nvidia-sandbox-device-plugin** (see the
|
||||
[NVIDIA/sandbox-device-plugin](https://github.com/NVIDIA/sandbox-device-plugin)
|
||||
repository):
|
||||
- Creating host-side CDI specifications for GPU passthrough,
|
||||
resulting in the file `/var/run/cdi/nvidia.yaml`, containing
|
||||
`kind: nvidia.com/pgpu`
|
||||
- Allocating GPUs during pod deployment.
|
||||
- Discovering NVIDIA GPUs, their capabilities, and advertising these to
|
||||
the Kubernetes control plane (allocatable resources as type
|
||||
`nvidia.com/pgpu` resources will appear for the node and GPU Device IDs
|
||||
will be registered with Kubelet). These GPUs can thus be allocated as
|
||||
container resources in your pod manifests. See below GPU operator
|
||||
container resources in your pod manifests. See below GPU Operator
|
||||
deployment instructions for the use of the key `pgpu`, controlled via a
|
||||
variable.
|
||||
|
||||
To summarize, the GPU operator manages the GPUs on each node, allowing for
|
||||
To summarize, the GPU Operator manages the GPUs on each node, allowing for
|
||||
simple orchestration of pod manifests using Kata Containers. Once the cluster
|
||||
with GPU operator and Kata bits is up and running, the end user can schedule
|
||||
with GPU Operator and Kata bits is up and running, the end user can schedule
|
||||
Kata NVIDIA GPU workloads, using resource limits and the
|
||||
`kata-qemu-nvidia-gpu` or `kata-qemu-nvidia-gpu-snp` runtime classes, for
|
||||
example:
|
||||
`kata-qemu-nvidia-gpu`, `kata-qemu-nvidia-gpu-tdx` or
|
||||
`kata-qemu-nvidia-gpu-snp` runtime classes, for example:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
@@ -213,7 +212,7 @@ API and kernel drivers, interacting with the pass-through GPU device.
|
||||
|
||||
An additional step is exercised in our CI samples: when using images from an
|
||||
authenticated registry, the guest-pull mechanism triggers attestation using
|
||||
trustee's Key Broker Service (KBS) for secure release of the NGC API
|
||||
Trustee's Key Broker Service (KBS) for secure release of the NGC API
|
||||
authentication key used to access the NVCR container registry. As part of
|
||||
this, the attestation agent exercises composite attestation and transitions
|
||||
the GPU into `Ready` state (without this, the GPU has to explicitly be
|
||||
@@ -232,24 +231,40 @@ NVIDIA GPU CI validation jobs. Note that, this setup:
|
||||
- uses the genpolicy tool to attach Kata agent security policies to the pod
|
||||
manifest
|
||||
- has dedicated (composite) attestation tests, a CUDA vectorAdd test, and a
|
||||
NIM/RA test sample with secure API key release
|
||||
NIM/RA test sample with secure API key release using sealed secrets.
|
||||
|
||||
A similar deployment guide and scenario description can be found in NVIDIA resources
|
||||
under
|
||||
[Early Access: NVIDIA GPU Operator with Confidential Containers based on Kata](https://docs.nvidia.com/datacenter/cloud-native/gpu-operator/latest/confidential-containers.html).
|
||||
[NVIDIA Confidential Containers Overview (Early Access)](https://docs.nvidia.com/datacenter/cloud-native/confidential-containers/latest/overview.html).
|
||||
|
||||
### Feature Set
|
||||
|
||||
The NVIDIA stack for Kata Containers leverages features for the confidential
|
||||
computing scenario from both the confidential containers open source project
|
||||
and from the Kata Containers source tree, such as:
|
||||
- composite attestation using Trustee and the NVIDIA Remote Attestation
|
||||
Service NRAS
|
||||
- generating kata agent security policies using the genpolicy tool
|
||||
- use of signed sealed secrets
|
||||
- access to authenticated registries for container image guest-pull
|
||||
- container image signature verification and encrypted container images
|
||||
- ephemeral container data and image layer storage
|
||||
|
||||
### Requirements
|
||||
|
||||
The requirements for the TEE scenario are:
|
||||
|
||||
- Ubuntu 25.10 as host OS
|
||||
- CPU with AMD SEV-SNP support with proper BIOS/UEFI version and settings
|
||||
- CPU with AMD SEV-SNP or Intel TDX support with proper BIOS/UEFI version
|
||||
and settings
|
||||
- CC-capable Hopper/Blackwell GPU with proper VBIOS version.
|
||||
|
||||
BIOS and VBIOS configuration is out of scope for this guide. Other resources,
|
||||
such as the documentation found on the
|
||||
[NVIDIA Trusted Computing Solutions](https://docs.nvidia.com/nvtrust/index.html)
|
||||
page and the above linked NVIDIA documentation, provide guidance on
|
||||
page, on the
|
||||
[Secure AI Compatibility Matrix](https://www.nvidia.com/en-us/data-center/solutions/confidential-computing/secure-ai-compatibility-matrix/)
|
||||
page, and on the above linked NVIDIA documentation, provide guidance on
|
||||
selecting proper hardware and on properly configuring its firmware and OS.
|
||||
|
||||
### Installation
|
||||
@@ -257,12 +272,16 @@ selecting proper hardware and on properly configuring its firmware and OS.
|
||||
#### Containerd and Kubernetes
|
||||
|
||||
First, set up your Kubernetes cluster. For instance, in Kata CI, our NVIDIA
|
||||
jobs use a single-node vanilla Kubernetes cluster with a 2.x containerd
|
||||
version and Kata's current supported Kubernetes version. We set this cluster
|
||||
up using the `deploy_k8s` function from `tests/integration/kubernetes/gha-run.sh`
|
||||
as follows:
|
||||
|
||||
jobs use a single-node vanilla Kubernetes cluster with a 2.1 containerd
|
||||
version and Kata's current supported Kubernetes version. This cluster is
|
||||
being set up using the `deploy_k8s` function from the script file
|
||||
`tests/integration/kubernetes/gha-run.sh`. If you intend to run this script,
|
||||
follow these steps, and make sure you have `yq` and `helm` installed. Note
|
||||
that, these scripts query the GitHub API, so creating and declaring a
|
||||
personal access token prevents rate limiting issues.
|
||||
You can execute the function as follows:
|
||||
```bash
|
||||
$ export GH_TOKEN="<your-gh-pat>"
|
||||
$ export KUBERNETES="vanilla"
|
||||
$ export CONTAINER_ENGINE="containerd"
|
||||
$ export CONTAINER_ENGINE_VERSION="v2.1"
|
||||
@@ -276,8 +295,11 @@ $ deploy_k8s
|
||||
> `runtimeRequestTimeout` timeout value than the two minute default timeout.
|
||||
> Using the guest-pull mechanism, pulling large images may take a significant
|
||||
> amount of time and may delay container start, possibly leading your Kubelet
|
||||
> to de-allocate your pod before it transitions from the *container created*
|
||||
> to the *container running* state.
|
||||
> to de-allocate your pod before it transitions from the *container creating*
|
||||
> to the *container running* state. The NVIDIA shim configurations use a
|
||||
> `create_container_timeout` of 1200s, which is the equivalent value on shim
|
||||
> side, controlling the time the shim allows for a container to remain in
|
||||
> *container creating* state.
|
||||
|
||||
> **Note:**
|
||||
>
|
||||
@@ -291,7 +313,7 @@ $ deploy_k8s
|
||||
#### GPU Operator
|
||||
|
||||
Assuming you have the helm tools installed, deploy the latest version of the
|
||||
GPU Operator as a helm chart (minimum version: `v25.10.0`):
|
||||
GPU Operator as a helm chart (minimum version: `v26.3.0`):
|
||||
|
||||
```bash
|
||||
$ helm repo add nvidia https://helm.ngc.nvidia.com/nvidia && helm repo update
|
||||
@@ -300,33 +322,27 @@ $ helm install --wait --generate-name \
|
||||
nvidia/gpu-operator \
|
||||
--set sandboxWorkloads.enabled=true \
|
||||
--set sandboxWorkloads.defaultWorkload=vm-passthrough \
|
||||
--set kataManager.enabled=true \
|
||||
--set kataManager.config.runtimeClasses=null \
|
||||
--set kataManager.repository=nvcr.io/nvidia/cloud-native \
|
||||
--set kataManager.image=k8s-kata-manager \
|
||||
--set kataManager.version=v0.2.4 \
|
||||
--set ccManager.enabled=true \
|
||||
--set ccManager.defaultMode=on \
|
||||
--set ccManager.repository=nvcr.io/nvidia/cloud-native \
|
||||
--set ccManager.image=k8s-cc-manager \
|
||||
--set ccManager.version=v0.2.0 \
|
||||
--set sandboxDevicePlugin.repository=nvcr.io/nvidia/cloud-native \
|
||||
--set sandboxDevicePlugin.image=nvidia-sandbox-device-plugin \
|
||||
--set sandboxDevicePlugin.version=v0.0.1 \
|
||||
--set 'sandboxDevicePlugin.env[0].name=P_GPU_ALIAS' \
|
||||
--set 'sandboxDevicePlugin.env[0].value=pgpu' \
|
||||
--set sandboxWorkloads.mode=kata \
|
||||
--set nfd.enabled=true \
|
||||
--set nfd.nodefeaturerules=true
|
||||
```
|
||||
|
||||
> **Note:**
|
||||
>
|
||||
> For heterogeneous clusters with different GPU types, you can omit
|
||||
> the `P_GPU_ALIAS` environment variable lines. This will cause the sandbox
|
||||
> device plugin to create GPU model-specific resource types (e.g.,
|
||||
> `nvidia.com/GH100_H100L_94GB`) instead of the generic `nvidia.com/pgpu`,
|
||||
> which in turn can be used by pods through respective resource limits.
|
||||
> For simplicity, this guide uses the generic alias.
|
||||
> For heterogeneous clusters with different GPU types, you can specify an
|
||||
> empty `P_GPU_ALIAS` environment variable for the sandbox device plugin:
|
||||
> `- --set 'sandboxDevicePlugin.env[0].name=P_GPU_ALIAS' \`
|
||||
> `- --set 'sandboxDevicePlugin.env[0].value=""' \`
|
||||
> This will cause the sandbox device plugin to create GPU model-specific
|
||||
> resource types (e.g., `nvidia.com/GH100_H100L_94GB`) instead of the
|
||||
> default `pgpu` type, which usually results in advertising a resource of
|
||||
> type `nvidia.com/pgpu`
|
||||
> The exposed device resource types can be used for pods by specifying
|
||||
> respective resource limits.
|
||||
> Your node's nvswitches are exposed as resources of type
|
||||
> `nvidia.com/nvswitch` by default. Using the variable `NVSWITCH_ALIAS`
|
||||
> allows to control the advertising behavior similar to the `P_GPU_ALIAS`
|
||||
> variable.
|
||||
|
||||
> **Note:**
|
||||
>
|
||||
@@ -351,8 +367,7 @@ $ helm install kata-deploy \
|
||||
--create-namespace \
|
||||
-f "https://raw.githubusercontent.com/kata-containers/kata-containers/refs/tags/${VERSION}/tools/packaging/kata-deploy/helm-chart/kata-deploy/try-kata-nvidia-gpu.values.yaml" \
|
||||
--set nfd.enabled=false \
|
||||
--set shims.qemu-nvidia-gpu-tdx.enabled=false \
|
||||
--wait --timeout 10m --atomic \
|
||||
--wait --timeout 10m \
|
||||
"${CHART}" --version "${VERSION}"
|
||||
```
|
||||
|
||||
@@ -382,31 +397,22 @@ mode which requires entering a licensing agreement with NVIDIA, see the
|
||||
### Cluster validation and preparation
|
||||
|
||||
If you did not use the `sandboxWorkloads.defaultWorkload=vm-passthrough`
|
||||
parameter during GPU operator deployment, label your nodes for GPU VM
|
||||
parameter during GPU Operator deployment, label your nodes for GPU VM
|
||||
passthrough, for the example of using all nodes for GPU passthrough, run:
|
||||
|
||||
```bash
|
||||
$ kubectl label nodes --all nvidia.com/gpu.workload.config=vm-passthrough --overwrite
|
||||
```
|
||||
|
||||
Check if the `nvidia-cc-manager` pod is running if you intend to run GPU TEE
|
||||
scenarios. If not, you need to manually label the node as CC capable. Current
|
||||
GPU Operator node feature rules do not yet recognize all CC capable GPU PCI
|
||||
IDs. Run the following command:
|
||||
|
||||
```bash
|
||||
$ kubectl label nodes --all nvidia.com/cc.capable=true
|
||||
```
|
||||
|
||||
After this, assure the `nvidia-cc-manager` pod is running. With the suggested
|
||||
parameters for GPU Operator deployment, the `nvidia-cc-manager` will
|
||||
automatically transition the GPU into CC mode.
|
||||
With the suggested parameters for GPU Operator deployment, the
|
||||
`nvidia-cc-manager` operand will automatically transition the GPU into CC
|
||||
mode.
|
||||
|
||||
After deployment, you can transition your node(s) to the desired CC state,
|
||||
using either the `on` or `off` value, depending on your scenario. For the
|
||||
non-CC scenario, transition to the `off` state via:
|
||||
using either the `on`, `ppcie`, or `off` value, depending on your scenario.
|
||||
For the non-CC scenario, transition to the `off` state via:
|
||||
`kubectl label nodes --all nvidia.com/cc.mode=off` and wait until all pods
|
||||
are back running. When an actual change is exercised, various GPU operator
|
||||
are back running. When an actual change is exercised, various GPU Operator
|
||||
operands will be restarted.
|
||||
|
||||
Ensure all pods are running:
|
||||
@@ -425,9 +431,10 @@ $ lspci -nnk -d 10de:
|
||||
|
||||
### Run the CUDA vectorAdd sample
|
||||
|
||||
Create the following file:
|
||||
Create the pod manifest with:
|
||||
|
||||
```yaml
|
||||
```bash
|
||||
$ cat > cuda-vectoradd-kata.yaml.in << 'EOF'
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
@@ -445,6 +452,7 @@ spec:
|
||||
limits:
|
||||
nvidia.com/pgpu: "1"
|
||||
memory: 16Gi
|
||||
EOF
|
||||
```
|
||||
|
||||
Depending on your scenario and on the CC state, export your desired runtime
|
||||
@@ -477,6 +485,17 @@ To stop the pod, run: `kubectl delete pod cuda-vectoradd-kata`.
|
||||
|
||||
### Next steps
|
||||
|
||||
#### Use multi-GPU passthrough
|
||||
|
||||
If you have machines supporting multi-GPU passthrough, use a pod deployment
|
||||
manifest which uses 8 pgpu and 4 nvswitch resources.
|
||||
On the NVIDIA Hopper architecture multi-GPU passthrough uses protected PCIe
|
||||
(PPCIE) which claims exclusive use of the nvswitches for a single CVM. In
|
||||
this case, transition your relevant node(s) GPU mode to `ppcie` mode.
|
||||
The NVIDIA Blackwell architecture uses NVLink encryption which places the
|
||||
switches outside of the Trusted Computing Base (TCB) and so does not
|
||||
require a separate switch setting.
|
||||
|
||||
#### Transition between CC and non-CC mode
|
||||
|
||||
Use the previously described node labeling approach to transition between
|
||||
@@ -492,7 +511,7 @@ and a basic NIM/RAG deployment. Running CI tests for the TEE GPU scenario
|
||||
requires KBS to be deployed (except for the CUDA vectorAdd test). The best
|
||||
place to get started running these tests locally is to look into our
|
||||
[NVIDIA CI workflow manifest](https://github.com/kata-containers/kata-containers/blob/main/.github/workflows/run-k8s-tests-on-nvidia-gpu.yaml)
|
||||
and into the underling
|
||||
and into the underlying
|
||||
[run_kubernetes_nv_tests.sh](https://github.com/kata-containers/kata-containers/blob/main/tests/integration/kubernetes/run_kubernetes_nv_tests.sh)
|
||||
script. For example, to run the CUDA vectorAdd scenario against the TEE GPU
|
||||
runtime class use the following commands:
|
||||
@@ -547,6 +566,22 @@ With GPU passthrough being supported by the
|
||||
you can use the tool to create a Kata agent security policy. Our CI deploys
|
||||
all sample pod manifests with a Kata agent security policy.
|
||||
|
||||
Note that, using containerd 2.1 in upstream's CI, we use the following
|
||||
modification to the genpolicy default settings:
|
||||
```bash
|
||||
[
|
||||
{
|
||||
"op": "replace",
|
||||
"path": "/kata_config/oci_version",
|
||||
"value": "1.2.1"
|
||||
}
|
||||
]
|
||||
```
|
||||
This modification is applied via the genpolicy drop-in configuration file
|
||||
`src\tools\genpolicy\drop-in-examples\20-oci-1.2.1-drop-in.json`.
|
||||
When using a newer containerd version, such as containerd 2.2, the OCI
|
||||
version field needs to be adjusted to "1.3.0", for instance.
|
||||
|
||||
#### Deploy pods using your own containers and manifests
|
||||
|
||||
You can author pod manifests leveraging your own containers, for instance,
|
||||
@@ -564,6 +599,3 @@ following annotation in the manifest:
|
||||
>
|
||||
> - musl-based container images (e.g., using Alpine), or distro-less
|
||||
> containers are not supported.
|
||||
> - for the TEE scenario, only single-GPU passthrough per pod is supported,
|
||||
> so your pod resource limit must be: `nvidia.com/pgpu: "1"` (on a system
|
||||
> with multiple GPUs, you can thus pass through one GPU per pod).
|
||||
|
||||
91
mkdocs.yaml
Normal file
@@ -0,0 +1,91 @@
|
||||
site_name: "Kata Containers Docs"
|
||||
site_description: "Developer and user documentation for the Kata Containers project."
|
||||
site_author: "Kata Containers Community"
|
||||
|
||||
repo_url: "https://github.com/kata-containers/kata-containers"
|
||||
site_url: "https://kata-containers.github.io/kata-containers"
|
||||
edit_uri: "edit/main/docs/"
|
||||
repo_name: kata-containers
|
||||
|
||||
theme:
|
||||
name: materialx
|
||||
favicon: "assets/images/favicon.svg"
|
||||
logo: "assets/images/favicon.svg"
|
||||
topbar_style: glass
|
||||
palette:
|
||||
- media: "(prefers-color-scheme)"
|
||||
toggle:
|
||||
icon: material/brightness-auto
|
||||
name: Switch to light mode
|
||||
- media: "(prefers-color-scheme: light)"
|
||||
scheme: default
|
||||
primary: blue
|
||||
accent: light blue
|
||||
toggle:
|
||||
icon: material/weather-sunny
|
||||
name: Switch to dark mode
|
||||
- media: "(prefers-color-scheme: dark)"
|
||||
scheme: slate
|
||||
primary: cyan
|
||||
accent: cyan
|
||||
toggle:
|
||||
icon: material/brightness-4
|
||||
name: Switch to system preference
|
||||
features:
|
||||
- content.action.edit
|
||||
- content.action.view
|
||||
- content.code.annotate
|
||||
- content.code.copy
|
||||
- content.code.select
|
||||
- content.footnote.tooltips
|
||||
- content.tabs.link
|
||||
- content.tooltips
|
||||
- navigation.expand
|
||||
- navigation.indexes
|
||||
- navigation.path
|
||||
- navigation.sections
|
||||
- navigation.tabs
|
||||
- navigation.tracking
|
||||
- navigation.top
|
||||
- navigation.instant
|
||||
- navigation.instant.prefetch
|
||||
- navigation.instant.progress
|
||||
- toc.follow
|
||||
markdown_extensions:
|
||||
- abbr
|
||||
- admonition
|
||||
- attr_list
|
||||
- def_list
|
||||
- footnotes
|
||||
- md_in_html
|
||||
- pymdownx.arithmatex:
|
||||
generic: true
|
||||
- pymdownx.emoji:
|
||||
emoji_index: !!python/name:material.extensions.emoji.twemoji
|
||||
emoji_generator: !!python/name:material.extensions.emoji.to_svg
|
||||
- pymdownx.details
|
||||
- pymdownx.highlight:
|
||||
anchor_linenums: true
|
||||
line_spans: __span
|
||||
pygments_lang_class: true
|
||||
auto_title: true
|
||||
- pymdownx.keys
|
||||
- pymdownx.magiclink
|
||||
- pymdownx.superfences:
|
||||
custom_fences:
|
||||
- name: mermaid
|
||||
class: mermaid
|
||||
format: !!python/name:pymdownx.superfences.fence_code_format
|
||||
- pymdownx.inlinehilite
|
||||
- pymdownx.tabbed:
|
||||
alternate_style: true
|
||||
- pymdownx.tilde
|
||||
- pymdownx.caret
|
||||
- pymdownx.mark
|
||||
- toc:
|
||||
permalink: true
|
||||
|
||||
plugins:
|
||||
- search
|
||||
- awesome-nav
|
||||
|
||||
1830
src/agent/Cargo.lock
generated
@@ -1,103 +1,3 @@
|
||||
[workspace]
|
||||
members = ["rustjail", "policy", "vsock-exporter"]
|
||||
|
||||
[workspace.package]
|
||||
authors = ["The Kata Containers community <kata-dev@lists.katacontainers.io>"]
|
||||
edition = "2018"
|
||||
license = "Apache-2.0"
|
||||
rust-version = "1.88.0"
|
||||
|
||||
[workspace.dependencies]
|
||||
oci-spec = { version = "0.8.1", features = ["runtime"] }
|
||||
lazy_static = "1.3.0"
|
||||
ttrpc = { version = "0.8.4", features = ["async"], default-features = false }
|
||||
protobuf = "3.7.2"
|
||||
libc = "0.2.94"
|
||||
|
||||
# Notes:
|
||||
# - Needs to stay in sync with libs
|
||||
# - Upgrading to 0.27+ will require code changes (see #11842)
|
||||
nix = "0.26.4"
|
||||
|
||||
capctl = "0.2.0"
|
||||
scan_fmt = "0.2.6"
|
||||
scopeguard = "1.0.0"
|
||||
thiserror = "1.0.26"
|
||||
regex = "1.10.5"
|
||||
serial_test = "0.10.0"
|
||||
url = "2.5.0"
|
||||
derivative = "2.2.0"
|
||||
const_format = "0.2.30"
|
||||
|
||||
# Async helpers
|
||||
async-trait = "0.1.50"
|
||||
async-recursion = "0.3.2"
|
||||
futures = "0.3.30"
|
||||
|
||||
# Async runtime
|
||||
tokio = { version = "1.46.1", features = ["full"] }
|
||||
tokio-vsock = "0.3.4"
|
||||
|
||||
netlink-sys = { version = "0.7.0", features = ["tokio_socket"] }
|
||||
rtnetlink = "0.14.0"
|
||||
netlink-packet-route = "0.19.0"
|
||||
netlink-packet-core = "0.7.0"
|
||||
ipnetwork = "0.17.0"
|
||||
|
||||
|
||||
slog = "2.5.2"
|
||||
slog-scope = "4.1.2"
|
||||
slog-term = "2.9.0"
|
||||
|
||||
# Redirect ttrpc log calls
|
||||
slog-stdlog = "4.0.0"
|
||||
log = "0.4.11"
|
||||
|
||||
cfg-if = "1.0.0"
|
||||
prometheus = { version = "0.14.0", features = ["process"] }
|
||||
procfs = "0.12.0"
|
||||
|
||||
anyhow = "1"
|
||||
|
||||
cgroups = { package = "cgroups-rs", git = "https://github.com/kata-containers/cgroups-rs", rev = "v0.3.5" }
|
||||
|
||||
# Tracing
|
||||
tracing = "0.1.41"
|
||||
tracing-subscriber = "0.2.18"
|
||||
tracing-opentelemetry = "0.13.0"
|
||||
opentelemetry = { version = "0.14.0", features = ["rt-tokio-current-thread"] }
|
||||
|
||||
# Configuration
|
||||
serde = { version = "1.0.129", features = ["derive"] }
|
||||
serde_json = "1.0.39"
|
||||
toml = "0.5.8"
|
||||
clap = { version = "4.5.40", features = ["derive"] }
|
||||
strum = "0.26.2"
|
||||
strum_macros = "0.26.2"
|
||||
|
||||
tempfile = "3.19.1"
|
||||
which = "4.3.0"
|
||||
rstest = "0.18.0"
|
||||
async-std = { version = "1.12.0", features = ["attributes"] }
|
||||
|
||||
# Local dependencies
|
||||
kata-agent-policy = { path = "policy" }
|
||||
rustjail = { path = "rustjail" }
|
||||
vsock-exporter = { path = "vsock-exporter" }
|
||||
|
||||
mem-agent = { path = "../libs/mem-agent" }
|
||||
|
||||
kata-sys-util = { path = "../libs/kata-sys-util" }
|
||||
kata-types = { path = "../libs/kata-types", features = ["safe-path"] }
|
||||
# Note: this crate sets the slog 'max_*' features which allows the log level
|
||||
# to be modified at runtime.
|
||||
logging = { path = "../libs/logging" }
|
||||
protocols = { path = "../libs/protocols" }
|
||||
runtime-spec = { path = "../libs/runtime-spec" }
|
||||
safe-path = { path = "../libs/safe-path" }
|
||||
test-utils = { path = "../libs/test-utils" }
|
||||
|
||||
|
||||
[package]
|
||||
name = "kata-agent"
|
||||
version = "0.1.0"
|
||||
@@ -157,7 +57,8 @@ cgroups.workspace = true
|
||||
# Tracing
|
||||
tracing.workspace = true
|
||||
tracing-subscriber.workspace = true
|
||||
tracing-opentelemetry.workspace = true
|
||||
# TODO: bump tracing-opentelemetry to sync with version in workspace
|
||||
tracing-opentelemetry = "0.17.0"
|
||||
opentelemetry.workspace = true
|
||||
|
||||
# Configuration
|
||||
@@ -195,7 +96,6 @@ pv_core = { git = "https://github.com/ibm-s390-linux/s390-tools", rev = "4942504
|
||||
tempfile.workspace = true
|
||||
which.workspace = true
|
||||
rstest.workspace = true
|
||||
async-std.workspace = true
|
||||
|
||||
test-utils.workspace = true
|
||||
|
||||
@@ -207,7 +107,3 @@ seccomp = ["rustjail/seccomp"]
|
||||
standard-oci-runtime = ["rustjail/standard-oci-runtime"]
|
||||
agent-policy = ["kata-agent-policy"]
|
||||
init-data = []
|
||||
|
||||
[[bin]]
|
||||
name = "kata-agent"
|
||||
path = "src/main.rs"
|
||||
|
||||
@@ -63,7 +63,7 @@ ifneq ($(EXTRA_RUSTFEATURES),)
|
||||
override EXTRA_RUSTFEATURES := --features "$(EXTRA_RUSTFEATURES)"
|
||||
endif
|
||||
|
||||
TARGET_PATH = target/$(TRIPLE)/$(BUILD_TYPE)/$(TARGET)
|
||||
TARGET_PATH = ../../target/$(TRIPLE)/$(BUILD_TYPE)/$(TARGET)
|
||||
|
||||
##VAR DESTDIR=<path> is a directory prepended to each installed target file
|
||||
DESTDIR ?=
|
||||
@@ -153,7 +153,7 @@ vendor:
|
||||
|
||||
#TARGET test: run cargo tests
|
||||
test: $(GENERATED_FILES)
|
||||
@RUST_LIB_BACKTRACE=0 RUST_BACKTRACE=1 cargo test --all --target $(TRIPLE) $(EXTRA_RUSTFEATURES) -- --nocapture
|
||||
@RUST_LIB_BACKTRACE=0 RUST_BACKTRACE=1 cargo test -p kata-agent --target $(TRIPLE) $(EXTRA_RUSTFEATURES) -- --nocapture
|
||||
|
||||
##TARGET check: run test
|
||||
check: $(GENERATED_FILES) standard_rust_check
|
||||
|
||||
@@ -89,7 +89,7 @@ pub fn baremount(
|
||||
let destination_str = destination.to_string_lossy();
|
||||
if let Ok(m) = get_linux_mount_info(destination_str.deref()) {
|
||||
if m.fs_type == fs_type && !flags.contains(MsFlags::MS_REMOUNT) {
|
||||
slog_info!(logger, "{source:?} is already mounted at {destination:?}");
|
||||
slog::info!(logger, "{source:?} is already mounted at {destination:?}");
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2322,6 +2322,7 @@ async fn cdh_handler_trusted_storage(oci: &mut Spec) -> Result<()> {
|
||||
&dev_major_minor,
|
||||
"luks2",
|
||||
KATA_IMAGE_WORK_DIR,
|
||||
"-E lazy_journal_init",
|
||||
)
|
||||
.await?;
|
||||
break;
|
||||
@@ -2336,6 +2337,7 @@ pub(crate) async fn cdh_secure_mount(
|
||||
device_id: &str,
|
||||
encrypt_type: &str,
|
||||
mount_point: &str,
|
||||
mkfs_opts: &str,
|
||||
) -> Result<()> {
|
||||
if !confidential_data_hub::is_cdh_client_initialized() {
|
||||
return Ok(());
|
||||
@@ -2345,11 +2347,12 @@ pub(crate) async fn cdh_secure_mount(
|
||||
|
||||
info!(
|
||||
sl(),
|
||||
"cdh_secure_mount: device_type {}, device_id {}, encrypt_type {}, integrity {}",
|
||||
"cdh_secure_mount: device_type {}, device_id {}, encrypt_type {}, integrity {}, mkfs_opts {}",
|
||||
device_type,
|
||||
device_id,
|
||||
encrypt_type,
|
||||
integrity
|
||||
integrity,
|
||||
mkfs_opts
|
||||
);
|
||||
|
||||
let options = std::collections::HashMap::from([
|
||||
@@ -2357,7 +2360,7 @@ pub(crate) async fn cdh_secure_mount(
|
||||
("sourceType".to_string(), "empty".to_string()),
|
||||
("targetType".to_string(), "fileSystem".to_string()),
|
||||
("filesystemType".to_string(), "ext4".to_string()),
|
||||
("mkfsOpts".to_string(), "-E lazy_journal_init".to_string()),
|
||||
("mkfsOpts".to_string(), mkfs_opts.to_string()),
|
||||
("encryptionType".to_string(), encrypt_type.to_string()),
|
||||
("dataIntegrity".to_string(), integrity),
|
||||
]);
|
||||
|
||||
@@ -59,8 +59,14 @@ async fn handle_block_storage(
|
||||
.contains(&"encryption_key=ephemeral".to_string());
|
||||
|
||||
if has_ephemeral_encryption {
|
||||
crate::rpc::cdh_secure_mount("block-device", dev_num, "luks2", &storage.mount_point)
|
||||
.await?;
|
||||
crate::rpc::cdh_secure_mount(
|
||||
"block-device",
|
||||
dev_num,
|
||||
"luks2",
|
||||
&storage.mount_point,
|
||||
"-O ^has_journal -m 0 -i 163840 -I 128",
|
||||
)
|
||||
.await?;
|
||||
set_ownership(logger, storage)?;
|
||||
new_device(storage.mount_point.clone())
|
||||
} else {
|
||||
|
||||
@@ -5,7 +5,8 @@
|
||||
|
||||
use anyhow::Result;
|
||||
use opentelemetry::sdk::propagation::TraceContextPropagator;
|
||||
use opentelemetry::{global, sdk::trace::Config, trace::TracerProvider};
|
||||
use opentelemetry::trace::TracerProvider;
|
||||
use opentelemetry::{global, sdk::trace::Config};
|
||||
use slog::{info, o, Logger};
|
||||
use std::collections::HashMap;
|
||||
use tracing_opentelemetry::OpenTelemetryLayer;
|
||||
@@ -23,15 +24,12 @@ pub fn setup_tracing(name: &'static str, logger: &Logger) -> Result<()> {
|
||||
let config = Config::default();
|
||||
|
||||
let builder = opentelemetry::sdk::trace::TracerProvider::builder()
|
||||
.with_batch_exporter(exporter, opentelemetry::runtime::TokioCurrentThread)
|
||||
.with_batch_exporter(exporter, opentelemetry::runtime::Tokio)
|
||||
.with_config(config);
|
||||
|
||||
let provider = builder.build();
|
||||
|
||||
// We don't need a versioned tracer.
|
||||
let version = None;
|
||||
|
||||
let tracer = provider.get_tracer(name, version);
|
||||
let tracer = provider.tracer(name);
|
||||
|
||||
let _global_provider = global::set_tracer_provider(provider);
|
||||
|
||||
|
||||
@@ -10,7 +10,7 @@ libc.workspace = true
|
||||
thiserror.workspace = true
|
||||
opentelemetry = { workspace = true, features = ["serialize"] }
|
||||
tokio-vsock.workspace = true
|
||||
bincode = "1.3.3"
|
||||
serde_json = "1.0"
|
||||
byteorder = "1.4.3"
|
||||
slog = { workspace = true, features = [
|
||||
"dynamic-keys",
|
||||
|
||||
@@ -58,7 +58,7 @@ pub enum Error {
|
||||
#[error("connection error: {0}")]
|
||||
ConnectionError(String),
|
||||
#[error("serialisation error: {0}")]
|
||||
SerialisationError(#[from] bincode::Error),
|
||||
SerialisationError(#[from] serde_json::Error),
|
||||
#[error("I/O error: {0}")]
|
||||
IOError(#[from] std::io::Error),
|
||||
}
|
||||
@@ -81,8 +81,7 @@ async fn write_span(
|
||||
let mut writer = writer.lock().await;
|
||||
|
||||
let encoded_payload: Vec<u8> =
|
||||
bincode::serialize(&span).map_err(|e| make_io_error(e.to_string()))?;
|
||||
|
||||
serde_json::to_vec(span).map_err(|e| make_io_error(e.to_string()))?;
|
||||
let payload_len: u64 = encoded_payload.len() as u64;
|
||||
|
||||
let mut payload_len_as_bytes: [u8; HEADER_SIZE_BYTES as usize] =
|
||||
|
||||
@@ -50,6 +50,7 @@ vm-memory = { workspace = true, features = ["backend-mmap"] }
|
||||
crossbeam-channel = "0.5.6"
|
||||
vfio-bindings = { workspace = true, optional = true }
|
||||
vfio-ioctls = { workspace = true, optional = true }
|
||||
kata-sys-util = { path = "../libs/kata-sys-util" }
|
||||
|
||||
[dev-dependencies]
|
||||
slog-async = "2.7.0"
|
||||
|
||||
@@ -1,16 +1,15 @@
|
||||
// Copyright (C) 2022 Alibaba Cloud. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
use std::io::{Read, Write};
|
||||
use std::sync::atomic::Ordering;
|
||||
use std::sync::Arc;
|
||||
|
||||
use vm_memory::bitmap::{Bitmap, BS};
|
||||
use vm_memory::guest_memory::GuestMemoryIterator;
|
||||
use vm_memory::mmap::{Error, NewBitmap};
|
||||
use vm_memory::mmap::NewBitmap;
|
||||
use vm_memory::{
|
||||
guest_memory, AtomicAccess, Bytes, FileOffset, GuestAddress, GuestMemory, GuestMemoryRegion,
|
||||
GuestRegionMmap, GuestUsize, MemoryRegionAddress, VolatileSlice,
|
||||
GuestRegionCollectionError, GuestRegionMmap, GuestUsize, MemoryRegionAddress, ReadVolatile,
|
||||
VolatileSlice, WriteVolatile,
|
||||
};
|
||||
|
||||
use crate::GuestRegionRaw;
|
||||
@@ -67,63 +66,63 @@ impl<B: Bitmap> Bytes<MemoryRegionAddress> for GuestRegionHybrid<B> {
|
||||
}
|
||||
}
|
||||
|
||||
fn read_from<F>(
|
||||
fn read_volatile_from<F>(
|
||||
&self,
|
||||
addr: MemoryRegionAddress,
|
||||
src: &mut F,
|
||||
count: usize,
|
||||
) -> guest_memory::Result<usize>
|
||||
where
|
||||
F: Read,
|
||||
F: ReadVolatile,
|
||||
{
|
||||
match self {
|
||||
GuestRegionHybrid::Mmap(region) => region.read_from(addr, src, count),
|
||||
GuestRegionHybrid::Raw(region) => region.read_from(addr, src, count),
|
||||
GuestRegionHybrid::Mmap(region) => region.read_volatile_from(addr, src, count),
|
||||
GuestRegionHybrid::Raw(region) => region.read_volatile_from(addr, src, count),
|
||||
}
|
||||
}
|
||||
|
||||
fn read_exact_from<F>(
|
||||
fn read_exact_volatile_from<F>(
|
||||
&self,
|
||||
addr: MemoryRegionAddress,
|
||||
src: &mut F,
|
||||
count: usize,
|
||||
) -> guest_memory::Result<()>
|
||||
where
|
||||
F: Read,
|
||||
F: ReadVolatile,
|
||||
{
|
||||
match self {
|
||||
GuestRegionHybrid::Mmap(region) => region.read_exact_from(addr, src, count),
|
||||
GuestRegionHybrid::Raw(region) => region.read_exact_from(addr, src, count),
|
||||
GuestRegionHybrid::Mmap(region) => region.read_exact_volatile_from(addr, src, count),
|
||||
GuestRegionHybrid::Raw(region) => region.read_exact_volatile_from(addr, src, count),
|
||||
}
|
||||
}
|
||||
|
||||
fn write_to<F>(
|
||||
fn write_volatile_to<F>(
|
||||
&self,
|
||||
addr: MemoryRegionAddress,
|
||||
dst: &mut F,
|
||||
count: usize,
|
||||
) -> guest_memory::Result<usize>
|
||||
where
|
||||
F: Write,
|
||||
F: WriteVolatile,
|
||||
{
|
||||
match self {
|
||||
GuestRegionHybrid::Mmap(region) => region.write_to(addr, dst, count),
|
||||
GuestRegionHybrid::Raw(region) => region.write_to(addr, dst, count),
|
||||
GuestRegionHybrid::Mmap(region) => region.write_volatile_to(addr, dst, count),
|
||||
GuestRegionHybrid::Raw(region) => region.write_volatile_to(addr, dst, count),
|
||||
}
|
||||
}
|
||||
|
||||
fn write_all_to<F>(
|
||||
fn write_all_volatile_to<F>(
|
||||
&self,
|
||||
addr: MemoryRegionAddress,
|
||||
dst: &mut F,
|
||||
count: usize,
|
||||
) -> guest_memory::Result<()>
|
||||
where
|
||||
F: Write,
|
||||
F: WriteVolatile,
|
||||
{
|
||||
match self {
|
||||
GuestRegionHybrid::Mmap(region) => region.write_all_to(addr, dst, count),
|
||||
GuestRegionHybrid::Raw(region) => region.write_all_to(addr, dst, count),
|
||||
GuestRegionHybrid::Mmap(region) => region.write_all_volatile_to(addr, dst, count),
|
||||
GuestRegionHybrid::Raw(region) => region.write_all_volatile_to(addr, dst, count),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -168,7 +167,7 @@ impl<B: Bitmap> GuestMemoryRegion for GuestRegionHybrid<B> {
|
||||
}
|
||||
}
|
||||
|
||||
fn bitmap(&self) -> &Self::B {
|
||||
fn bitmap(&self) -> BS<'_, Self::B> {
|
||||
match self {
|
||||
GuestRegionHybrid::Mmap(region) => region.bitmap(),
|
||||
GuestRegionHybrid::Raw(region) => region.bitmap(),
|
||||
@@ -189,20 +188,6 @@ impl<B: Bitmap> GuestMemoryRegion for GuestRegionHybrid<B> {
|
||||
}
|
||||
}
|
||||
|
||||
unsafe fn as_slice(&self) -> Option<&[u8]> {
|
||||
match self {
|
||||
GuestRegionHybrid::Mmap(region) => region.as_slice(),
|
||||
GuestRegionHybrid::Raw(region) => region.as_slice(),
|
||||
}
|
||||
}
|
||||
|
||||
unsafe fn as_mut_slice(&self) -> Option<&mut [u8]> {
|
||||
match self {
|
||||
GuestRegionHybrid::Mmap(region) => region.as_mut_slice(),
|
||||
GuestRegionHybrid::Raw(region) => region.as_mut_slice(),
|
||||
}
|
||||
}
|
||||
|
||||
fn get_slice(
|
||||
&self,
|
||||
offset: MemoryRegionAddress,
|
||||
@@ -223,6 +208,39 @@ impl<B: Bitmap> GuestMemoryRegion for GuestRegionHybrid<B> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<B: Bitmap> GuestRegionHybrid<B> {
|
||||
/// Returns a slice corresponding to the region.
|
||||
///
|
||||
/// # Safety
|
||||
/// This is safe because we mapped the area at addr ourselves, so this slice will not
|
||||
/// overflow. However, it is possible to alias.
|
||||
pub unsafe fn as_slice(&self) -> Option<&[u8]> {
|
||||
match self {
|
||||
GuestRegionHybrid::Mmap(region) => {
|
||||
let addr = region.get_host_address(MemoryRegionAddress(0)).ok()?;
|
||||
Some(std::slice::from_raw_parts(addr, region.len() as usize))
|
||||
}
|
||||
GuestRegionHybrid::Raw(region) => region.as_slice(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a mutable slice corresponding to the region.
|
||||
///
|
||||
/// # Safety
|
||||
/// This is safe because we mapped the area at addr ourselves, so this slice will not
|
||||
/// overflow. However, it is possible to alias.
|
||||
#[allow(clippy::mut_from_ref)]
|
||||
pub unsafe fn as_mut_slice(&self) -> Option<&mut [u8]> {
|
||||
match self {
|
||||
GuestRegionHybrid::Mmap(region) => {
|
||||
let addr = region.get_host_address(MemoryRegionAddress(0)).ok()?;
|
||||
Some(std::slice::from_raw_parts_mut(addr, region.len() as usize))
|
||||
}
|
||||
GuestRegionHybrid::Raw(region) => region.as_mut_slice(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// [`GuestMemory`](trait.GuestMemory.html) implementation that manage hybrid types of guest memory
|
||||
/// regions.
|
||||
///
|
||||
@@ -248,7 +266,9 @@ impl<B: Bitmap> GuestMemoryHybrid<B> {
|
||||
/// * `regions` - The vector of regions.
|
||||
/// The regions shouldn't overlap and they should be sorted
|
||||
/// by the starting address.
|
||||
pub fn from_regions(mut regions: Vec<GuestRegionHybrid<B>>) -> Result<Self, Error> {
|
||||
pub fn from_regions(
|
||||
mut regions: Vec<GuestRegionHybrid<B>>,
|
||||
) -> Result<Self, GuestRegionCollectionError> {
|
||||
Self::from_arc_regions(regions.drain(..).map(Arc::new).collect())
|
||||
}
|
||||
|
||||
@@ -264,9 +284,11 @@ impl<B: Bitmap> GuestMemoryHybrid<B> {
|
||||
/// * `regions` - The vector of `Arc` regions.
|
||||
/// The regions shouldn't overlap and they should be sorted
|
||||
/// by the starting address.
|
||||
pub fn from_arc_regions(regions: Vec<Arc<GuestRegionHybrid<B>>>) -> Result<Self, Error> {
|
||||
pub fn from_arc_regions(
|
||||
regions: Vec<Arc<GuestRegionHybrid<B>>>,
|
||||
) -> Result<Self, GuestRegionCollectionError> {
|
||||
if regions.is_empty() {
|
||||
return Err(Error::NoMemoryRegion);
|
||||
return Err(GuestRegionCollectionError::NoMemoryRegion);
|
||||
}
|
||||
|
||||
for window in regions.windows(2) {
|
||||
@@ -274,11 +296,11 @@ impl<B: Bitmap> GuestMemoryHybrid<B> {
|
||||
let next = &window[1];
|
||||
|
||||
if prev.start_addr() > next.start_addr() {
|
||||
return Err(Error::UnsortedMemoryRegions);
|
||||
return Err(GuestRegionCollectionError::UnsortedMemoryRegions);
|
||||
}
|
||||
|
||||
if prev.last_addr() >= next.start_addr() {
|
||||
return Err(Error::MemoryRegionOverlap);
|
||||
return Err(GuestRegionCollectionError::MemoryRegionOverlap);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -292,7 +314,7 @@ impl<B: Bitmap> GuestMemoryHybrid<B> {
|
||||
pub fn insert_region(
|
||||
&self,
|
||||
region: Arc<GuestRegionHybrid<B>>,
|
||||
) -> Result<GuestMemoryHybrid<B>, Error> {
|
||||
) -> Result<GuestMemoryHybrid<B>, GuestRegionCollectionError> {
|
||||
let mut regions = self.regions.clone();
|
||||
regions.push(region);
|
||||
regions.sort_by_key(|x| x.start_addr());
|
||||
@@ -310,7 +332,7 @@ impl<B: Bitmap> GuestMemoryHybrid<B> {
|
||||
&self,
|
||||
base: GuestAddress,
|
||||
size: GuestUsize,
|
||||
) -> Result<(GuestMemoryHybrid<B>, Arc<GuestRegionHybrid<B>>), Error> {
|
||||
) -> Result<(GuestMemoryHybrid<B>, Arc<GuestRegionHybrid<B>>), GuestRegionCollectionError> {
|
||||
if let Ok(region_index) = self.regions.binary_search_by_key(&base, |x| x.start_addr()) {
|
||||
if self.regions.get(region_index).unwrap().len() as GuestUsize == size {
|
||||
let mut regions = self.regions.clone();
|
||||
@@ -319,32 +341,13 @@ impl<B: Bitmap> GuestMemoryHybrid<B> {
|
||||
}
|
||||
}
|
||||
|
||||
Err(Error::InvalidGuestRegion)
|
||||
Err(GuestRegionCollectionError::NoMemoryRegion)
|
||||
}
|
||||
}
|
||||
|
||||
/// An iterator over the elements of `GuestMemoryHybrid`.
|
||||
///
|
||||
/// This struct is created by `GuestMemory::iter()`. See its documentation for more.
|
||||
pub struct Iter<'a, B>(std::slice::Iter<'a, Arc<GuestRegionHybrid<B>>>);
|
||||
|
||||
impl<'a, B> Iterator for Iter<'a, B> {
|
||||
type Item = &'a GuestRegionHybrid<B>;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
self.0.next().map(AsRef::as_ref)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, B: 'a> GuestMemoryIterator<'a, GuestRegionHybrid<B>> for GuestMemoryHybrid<B> {
|
||||
type Iter = Iter<'a, B>;
|
||||
}
|
||||
|
||||
impl<B: Bitmap + 'static> GuestMemory for GuestMemoryHybrid<B> {
|
||||
type R = GuestRegionHybrid<B>;
|
||||
|
||||
type I = Self;
|
||||
|
||||
fn num_regions(&self) -> usize {
|
||||
self.regions.len()
|
||||
}
|
||||
@@ -359,15 +362,15 @@ impl<B: Bitmap + 'static> GuestMemory for GuestMemoryHybrid<B> {
|
||||
index.map(|x| self.regions[x].as_ref())
|
||||
}
|
||||
|
||||
fn iter(&self) -> Iter<'_, B> {
|
||||
Iter(self.regions.iter())
|
||||
fn iter(&self) -> impl Iterator<Item = &GuestRegionHybrid<B>> {
|
||||
self.regions.iter().map(AsRef::as_ref)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::io::Seek;
|
||||
use std::io::{Read, Seek, Write};
|
||||
use vm_memory::{GuestMemoryError, MmapRegion};
|
||||
use vmm_sys_util::tempfile::TempFile;
|
||||
|
||||
@@ -654,14 +657,14 @@ mod tests {
|
||||
// Rewind file pointer after write operation.
|
||||
file_to_write_mmap_region.rewind().unwrap();
|
||||
guest_region
|
||||
.read_from(write_addr, &mut file_to_write_mmap_region, size_of_file)
|
||||
.read_volatile_from(write_addr, &mut file_to_write_mmap_region, size_of_file)
|
||||
.unwrap();
|
||||
let mut file_read_from_mmap_region = TempFile::new().unwrap().into_file();
|
||||
file_read_from_mmap_region
|
||||
.set_len(size_of_file as u64)
|
||||
.unwrap();
|
||||
guest_region
|
||||
.write_all_to(write_addr, &mut file_read_from_mmap_region, size_of_file)
|
||||
.write_all_volatile_to(write_addr, &mut file_read_from_mmap_region, size_of_file)
|
||||
.unwrap();
|
||||
// Rewind file pointer after write operation.
|
||||
file_read_from_mmap_region.rewind().unwrap();
|
||||
@@ -679,7 +682,7 @@ mod tests {
|
||||
let invalid_addr = MemoryRegionAddress(0x900);
|
||||
assert!(matches!(
|
||||
guest_region
|
||||
.read_from(invalid_addr, &mut file_to_write_mmap_region, size_of_file)
|
||||
.read_volatile_from(invalid_addr, &mut file_to_write_mmap_region, size_of_file)
|
||||
.err()
|
||||
.unwrap(),
|
||||
GuestMemoryError::InvalidBackendAddress
|
||||
@@ -689,7 +692,7 @@ mod tests {
|
||||
let invalid_addr = MemoryRegionAddress(0x900);
|
||||
assert!(matches!(
|
||||
guest_region
|
||||
.write_to(invalid_addr, &mut file_read_from_mmap_region, size_of_file)
|
||||
.write_volatile_to(invalid_addr, &mut file_read_from_mmap_region, size_of_file)
|
||||
.err()
|
||||
.unwrap(),
|
||||
GuestMemoryError::InvalidBackendAddress
|
||||
@@ -719,14 +722,14 @@ mod tests {
|
||||
// Rewind file pointer after write operation.
|
||||
file_to_write_mmap_region.rewind().unwrap();
|
||||
guest_region
|
||||
.read_from(write_addr, &mut file_to_write_mmap_region, size_of_file)
|
||||
.read_volatile_from(write_addr, &mut file_to_write_mmap_region, size_of_file)
|
||||
.unwrap();
|
||||
let mut file_read_from_mmap_region = TempFile::new().unwrap().into_file();
|
||||
file_read_from_mmap_region
|
||||
.set_len(size_of_file as u64)
|
||||
.unwrap();
|
||||
guest_region
|
||||
.write_all_to(write_addr, &mut file_read_from_mmap_region, size_of_file)
|
||||
.write_all_volatile_to(write_addr, &mut file_read_from_mmap_region, size_of_file)
|
||||
.unwrap();
|
||||
// Rewind file pointer after write operation.
|
||||
file_read_from_mmap_region.rewind().unwrap();
|
||||
@@ -744,7 +747,7 @@ mod tests {
|
||||
let invalid_addr = MemoryRegionAddress(0x900);
|
||||
assert!(matches!(
|
||||
guest_region
|
||||
.read_from(invalid_addr, &mut file_to_write_mmap_region, size_of_file)
|
||||
.read_volatile_from(invalid_addr, &mut file_to_write_mmap_region, size_of_file)
|
||||
.err()
|
||||
.unwrap(),
|
||||
GuestMemoryError::InvalidBackendAddress
|
||||
@@ -754,7 +757,7 @@ mod tests {
|
||||
let invalid_addr = MemoryRegionAddress(0x900);
|
||||
assert!(matches!(
|
||||
guest_region
|
||||
.write_to(invalid_addr, &mut file_read_from_mmap_region, size_of_file)
|
||||
.write_volatile_to(invalid_addr, &mut file_read_from_mmap_region, size_of_file)
|
||||
.err()
|
||||
.unwrap(),
|
||||
GuestMemoryError::InvalidBackendAddress
|
||||
@@ -788,14 +791,14 @@ mod tests {
|
||||
.unwrap();
|
||||
file_to_write_mmap_region.rewind().unwrap();
|
||||
guest_mmap_region
|
||||
.read_exact_from(write_addr, &mut file_to_write_mmap_region, size_of_file)
|
||||
.read_exact_volatile_from(write_addr, &mut file_to_write_mmap_region, size_of_file)
|
||||
.unwrap();
|
||||
let mut file_read_from_mmap_region = TempFile::new().unwrap().into_file();
|
||||
file_read_from_mmap_region
|
||||
.set_len(size_of_file as u64)
|
||||
.unwrap();
|
||||
guest_mmap_region
|
||||
.write_all_to(write_addr, &mut file_read_from_mmap_region, size_of_file)
|
||||
.write_all_volatile_to(write_addr, &mut file_read_from_mmap_region, size_of_file)
|
||||
.unwrap();
|
||||
file_read_from_mmap_region.rewind().unwrap();
|
||||
let mut content = String::new();
|
||||
@@ -818,14 +821,14 @@ mod tests {
|
||||
.unwrap();
|
||||
file_to_write_raw_region.rewind().unwrap();
|
||||
guest_raw_region
|
||||
.read_exact_from(write_addr, &mut file_to_write_raw_region, size_of_file)
|
||||
.read_exact_volatile_from(write_addr, &mut file_to_write_raw_region, size_of_file)
|
||||
.unwrap();
|
||||
let mut file_read_from_raw_region = TempFile::new().unwrap().into_file();
|
||||
file_read_from_raw_region
|
||||
.set_len(size_of_file as u64)
|
||||
.unwrap();
|
||||
guest_raw_region
|
||||
.write_all_to(write_addr, &mut file_read_from_raw_region, size_of_file)
|
||||
.write_all_volatile_to(write_addr, &mut file_read_from_raw_region, size_of_file)
|
||||
.unwrap();
|
||||
file_read_from_raw_region.rewind().unwrap();
|
||||
let mut content = String::new();
|
||||
@@ -842,7 +845,11 @@ mod tests {
|
||||
let invalid_addr = MemoryRegionAddress(0x900);
|
||||
assert!(matches!(
|
||||
guest_mmap_region
|
||||
.read_exact_from(invalid_addr, &mut file_to_write_mmap_region, size_of_file)
|
||||
.read_exact_volatile_from(
|
||||
invalid_addr,
|
||||
&mut file_to_write_mmap_region,
|
||||
size_of_file
|
||||
)
|
||||
.err()
|
||||
.unwrap(),
|
||||
GuestMemoryError::InvalidBackendAddress
|
||||
@@ -852,7 +859,7 @@ mod tests {
|
||||
let invalid_addr = MemoryRegionAddress(0x900);
|
||||
assert!(matches!(
|
||||
guest_mmap_region
|
||||
.write_all_to(invalid_addr, &mut file_read_from_mmap_region, size_of_file)
|
||||
.write_all_volatile_to(invalid_addr, &mut file_read_from_mmap_region, size_of_file)
|
||||
.err()
|
||||
.unwrap(),
|
||||
GuestMemoryError::InvalidBackendAddress
|
||||
@@ -862,7 +869,7 @@ mod tests {
|
||||
let invalid_addr = MemoryRegionAddress(0x900);
|
||||
assert!(matches!(
|
||||
guest_raw_region
|
||||
.read_exact_from(invalid_addr, &mut file_to_write_raw_region, size_of_file)
|
||||
.read_exact_volatile_from(invalid_addr, &mut file_to_write_raw_region, size_of_file)
|
||||
.err()
|
||||
.unwrap(),
|
||||
GuestMemoryError::InvalidBackendAddress
|
||||
@@ -872,7 +879,7 @@ mod tests {
|
||||
let invalid_addr = MemoryRegionAddress(0x900);
|
||||
assert!(matches!(
|
||||
guest_raw_region
|
||||
.write_all_to(invalid_addr, &mut file_read_from_raw_region, size_of_file)
|
||||
.write_all_volatile_to(invalid_addr, &mut file_read_from_raw_region, size_of_file)
|
||||
.err()
|
||||
.unwrap(),
|
||||
GuestMemoryError::InvalidBackendAddress
|
||||
@@ -1076,13 +1083,16 @@ mod tests {
|
||||
let guest_region = GuestMemoryHybrid::<()>::from_regions(regions);
|
||||
assert!(matches!(
|
||||
guest_region.err().unwrap(),
|
||||
Error::UnsortedMemoryRegions
|
||||
GuestRegionCollectionError::UnsortedMemoryRegions
|
||||
));
|
||||
|
||||
// Error no memory region case.
|
||||
let regions = Vec::<GuestRegionHybrid<()>>::new();
|
||||
let guest_region = GuestMemoryHybrid::<()>::from_regions(regions);
|
||||
assert!(matches!(guest_region.err().unwrap(), Error::NoMemoryRegion));
|
||||
assert!(matches!(
|
||||
guest_region.err().unwrap(),
|
||||
GuestRegionCollectionError::NoMemoryRegion
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
// Copyright (C) 2022 Alibaba Cloud. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
use std::io::{Read, Write};
|
||||
use std::sync::atomic::Ordering;
|
||||
|
||||
use vm_memory::bitmap::{Bitmap, BS};
|
||||
@@ -9,7 +8,7 @@ use vm_memory::mmap::NewBitmap;
|
||||
use vm_memory::volatile_memory::compute_offset;
|
||||
use vm_memory::{
|
||||
guest_memory, volatile_memory, Address, AtomicAccess, Bytes, FileOffset, GuestAddress,
|
||||
GuestMemoryRegion, GuestUsize, MemoryRegionAddress, VolatileSlice,
|
||||
GuestMemoryRegion, GuestUsize, MemoryRegionAddress, ReadVolatile, VolatileSlice, WriteVolatile,
|
||||
};
|
||||
|
||||
/// Guest memory region for virtio-fs DAX window.
|
||||
@@ -73,67 +72,67 @@ impl<B: Bitmap> Bytes<MemoryRegionAddress> for GuestRegionRaw<B> {
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
fn read_from<F>(
|
||||
fn read_volatile_from<F>(
|
||||
&self,
|
||||
addr: MemoryRegionAddress,
|
||||
src: &mut F,
|
||||
count: usize,
|
||||
) -> guest_memory::Result<usize>
|
||||
where
|
||||
F: Read,
|
||||
F: ReadVolatile,
|
||||
{
|
||||
let maddr = addr.raw_value() as usize;
|
||||
self.as_volatile_slice()
|
||||
.unwrap()
|
||||
.read_from::<F>(maddr, src, count)
|
||||
.read_volatile_from::<F>(maddr, src, count)
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
fn read_exact_from<F>(
|
||||
fn read_exact_volatile_from<F>(
|
||||
&self,
|
||||
addr: MemoryRegionAddress,
|
||||
src: &mut F,
|
||||
count: usize,
|
||||
) -> guest_memory::Result<()>
|
||||
where
|
||||
F: Read,
|
||||
F: ReadVolatile,
|
||||
{
|
||||
let maddr = addr.raw_value() as usize;
|
||||
self.as_volatile_slice()
|
||||
.unwrap()
|
||||
.read_exact_from::<F>(maddr, src, count)
|
||||
.read_exact_volatile_from::<F>(maddr, src, count)
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
fn write_to<F>(
|
||||
fn write_volatile_to<F>(
|
||||
&self,
|
||||
addr: MemoryRegionAddress,
|
||||
dst: &mut F,
|
||||
count: usize,
|
||||
) -> guest_memory::Result<usize>
|
||||
where
|
||||
F: Write,
|
||||
F: WriteVolatile,
|
||||
{
|
||||
let maddr = addr.raw_value() as usize;
|
||||
self.as_volatile_slice()
|
||||
.unwrap()
|
||||
.write_to::<F>(maddr, dst, count)
|
||||
.write_volatile_to::<F>(maddr, dst, count)
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
fn write_all_to<F>(
|
||||
fn write_all_volatile_to<F>(
|
||||
&self,
|
||||
addr: MemoryRegionAddress,
|
||||
dst: &mut F,
|
||||
count: usize,
|
||||
) -> guest_memory::Result<()>
|
||||
where
|
||||
F: Write,
|
||||
F: WriteVolatile,
|
||||
{
|
||||
let maddr = addr.raw_value() as usize;
|
||||
self.as_volatile_slice()
|
||||
.unwrap()
|
||||
.write_all_to::<F>(maddr, dst, count)
|
||||
.write_all_volatile_to::<F>(maddr, dst, count)
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
@@ -170,8 +169,8 @@ impl<B: Bitmap> GuestMemoryRegion for GuestRegionRaw<B> {
|
||||
self.guest_base
|
||||
}
|
||||
|
||||
fn bitmap(&self) -> &Self::B {
|
||||
&self.bitmap
|
||||
fn bitmap(&self) -> BS<'_, Self::B> {
|
||||
self.bitmap.slice_at(0)
|
||||
}
|
||||
|
||||
fn get_host_address(&self, addr: MemoryRegionAddress) -> guest_memory::Result<*mut u8> {
|
||||
@@ -186,18 +185,6 @@ impl<B: Bitmap> GuestMemoryRegion for GuestRegionRaw<B> {
|
||||
None
|
||||
}
|
||||
|
||||
unsafe fn as_slice(&self) -> Option<&[u8]> {
|
||||
// This is safe because we mapped the area at addr ourselves, so this slice will not
|
||||
// overflow. However, it is possible to alias.
|
||||
Some(std::slice::from_raw_parts(self.addr, self.size))
|
||||
}
|
||||
|
||||
unsafe fn as_mut_slice(&self) -> Option<&mut [u8]> {
|
||||
// This is safe because we mapped the area at addr ourselves, so this slice will not
|
||||
// overflow. However, it is possible to alias.
|
||||
Some(std::slice::from_raw_parts_mut(self.addr, self.size))
|
||||
}
|
||||
|
||||
fn get_slice(
|
||||
&self,
|
||||
offset: MemoryRegionAddress,
|
||||
@@ -216,6 +203,7 @@ impl<B: Bitmap> GuestMemoryRegion for GuestRegionRaw<B> {
|
||||
(self.addr as usize + offset) as *mut _,
|
||||
count,
|
||||
self.bitmap.slice_at(offset),
|
||||
None,
|
||||
)
|
||||
})
|
||||
}
|
||||
@@ -226,6 +214,27 @@ impl<B: Bitmap> GuestMemoryRegion for GuestRegionRaw<B> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<B: Bitmap> GuestRegionRaw<B> {
|
||||
/// Returns a slice corresponding to the region.
|
||||
///
|
||||
/// # Safety
|
||||
/// This is safe because we mapped the area at addr ourselves, so this slice will not
|
||||
/// overflow. However, it is possible to alias.
|
||||
pub unsafe fn as_slice(&self) -> Option<&[u8]> {
|
||||
Some(std::slice::from_raw_parts(self.addr, self.size))
|
||||
}
|
||||
|
||||
/// Returns a mutable slice corresponding to the region.
|
||||
///
|
||||
/// # Safety
|
||||
/// This is safe because we mapped the area at addr ourselves, so this slice will not
|
||||
/// overflow. However, it is possible to alias.
|
||||
#[allow(clippy::mut_from_ref)]
|
||||
pub unsafe fn as_mut_slice(&self) -> Option<&mut [u8]> {
|
||||
Some(std::slice::from_raw_parts_mut(self.addr, self.size))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
extern crate vmm_sys_util;
|
||||
@@ -348,7 +357,7 @@ mod tests {
|
||||
unsafe { GuestRegionRaw::<()>::new(GuestAddress(0x10_0000), &mut buf as *mut _, 1024) };
|
||||
|
||||
let s = m.get_slice(MemoryRegionAddress(2), 3).unwrap();
|
||||
assert_eq!(s.as_ptr(), &mut buf[2] as *mut _);
|
||||
assert_eq!(s.ptr_guard().as_ptr(), &buf[2] as *const _);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -600,7 +609,7 @@ mod tests {
|
||||
File::open(Path::new("c:\\Windows\\system32\\ntoskrnl.exe")).unwrap()
|
||||
};
|
||||
gm.write_obj(!0u32, addr).unwrap();
|
||||
gm.read_exact_from(addr, &mut file, mem::size_of::<u32>())
|
||||
gm.read_exact_volatile_from(addr, &mut file, mem::size_of::<u32>())
|
||||
.unwrap();
|
||||
let value: u32 = gm.read_obj(addr).unwrap();
|
||||
if cfg!(unix) {
|
||||
@@ -610,7 +619,7 @@ mod tests {
|
||||
}
|
||||
|
||||
let mut sink = Vec::new();
|
||||
gm.write_all_to(addr, &mut sink, mem::size_of::<u32>())
|
||||
gm.write_all_volatile_to(addr, &mut sink, mem::size_of::<u32>())
|
||||
.unwrap();
|
||||
if cfg!(unix) {
|
||||
assert_eq!(sink, vec![0; mem::size_of::<u32>()]);
|
||||
|
||||
@@ -113,20 +113,23 @@ arm64_sys_reg!(MPIDR_EL1, 3, 0, 0, 0, 5);
|
||||
/// * `mem` - Reserved DRAM for current VM.
|
||||
pub fn setup_regs(vcpu: &VcpuFd, cpu_id: u8, boot_ip: u64, fdt_address: u64) -> Result<()> {
|
||||
// Get the register index of the PSTATE (Processor State) register.
|
||||
vcpu.set_one_reg(arm64_core_reg!(pstate), PSTATE_FAULT_BITS_64 as u128)
|
||||
.map_err(Error::SetCoreRegister)?;
|
||||
vcpu.set_one_reg(
|
||||
arm64_core_reg!(pstate),
|
||||
&(PSTATE_FAULT_BITS_64 as u128).to_le_bytes(),
|
||||
)
|
||||
.map_err(Error::SetCoreRegister)?;
|
||||
|
||||
// Other vCPUs are powered off initially awaiting PSCI wakeup.
|
||||
if cpu_id == 0 {
|
||||
// Setting the PC (Processor Counter) to the current program address (kernel address).
|
||||
vcpu.set_one_reg(arm64_core_reg!(pc), boot_ip as u128)
|
||||
vcpu.set_one_reg(arm64_core_reg!(pc), &(boot_ip as u128).to_le_bytes())
|
||||
.map_err(Error::SetCoreRegister)?;
|
||||
|
||||
// Last mandatory thing to set -> the address pointing to the FDT (also called DTB).
|
||||
// "The device tree blob (dtb) must be placed on an 8-byte boundary and must
|
||||
// not exceed 2 megabytes in size." -> https://www.kernel.org/doc/Documentation/arm64/booting.txt.
|
||||
// We are choosing to place it the end of DRAM. See `get_fdt_addr`.
|
||||
vcpu.set_one_reg(arm64_core_reg!(regs), fdt_address as u128)
|
||||
vcpu.set_one_reg(arm64_core_reg!(regs), &(fdt_address as u128).to_le_bytes())
|
||||
.map_err(Error::SetCoreRegister)?;
|
||||
}
|
||||
Ok(())
|
||||
@@ -157,9 +160,10 @@ pub fn is_system_register(regid: u64) -> bool {
|
||||
///
|
||||
/// * `vcpu` - Structure for the VCPU that holds the VCPU's fd.
|
||||
pub fn read_mpidr(vcpu: &VcpuFd) -> Result<u64> {
|
||||
vcpu.get_one_reg(MPIDR_EL1)
|
||||
.map(|value| value as u64)
|
||||
.map_err(Error::GetSysRegister)
|
||||
let mut reg_data = 0u128.to_le_bytes();
|
||||
vcpu.get_one_reg(MPIDR_EL1, &mut reg_data)
|
||||
.map_err(Error::GetSysRegister)?;
|
||||
Ok(u128::from_le_bytes(reg_data) as u64)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -10,7 +10,6 @@
|
||||
|
||||
use libc::c_char;
|
||||
use std::collections::HashMap;
|
||||
use std::io;
|
||||
use std::mem;
|
||||
use std::result;
|
||||
use std::slice;
|
||||
@@ -205,7 +204,7 @@ pub fn setup_mptable<M: GuestMemory>(
|
||||
return Err(Error::AddressOverflow);
|
||||
}
|
||||
|
||||
mem.read_from(base_mp, &mut io::repeat(0), mp_size)
|
||||
mem.write_slice(&vec![0u8; mp_size], base_mp)
|
||||
.map_err(|_| Error::Clear)?;
|
||||
|
||||
{
|
||||
@@ -452,23 +451,11 @@ mod tests {
|
||||
let mpc_offset = GuestAddress(u64::from(mpf_intel.0.physptr));
|
||||
let mpc_table: MpcTableWrapper = mem.read_obj(mpc_offset).unwrap();
|
||||
|
||||
struct Sum(u8);
|
||||
impl io::Write for Sum {
|
||||
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
for v in buf.iter() {
|
||||
self.0 = self.0.wrapping_add(*v);
|
||||
}
|
||||
Ok(buf.len())
|
||||
}
|
||||
fn flush(&mut self) -> io::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
let mut sum = Sum(0);
|
||||
mem.write_to(mpc_offset, &mut sum, mpc_table.0.length as usize)
|
||||
let mut buf = Vec::new();
|
||||
mem.write_volatile_to(mpc_offset, &mut buf, mpc_table.0.length as usize)
|
||||
.unwrap();
|
||||
assert_eq!(sum.0, 0);
|
||||
let sum: u8 = buf.iter().fold(0u8, |acc, &v| acc.wrapping_add(v));
|
||||
assert_eq!(sum, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -25,7 +25,7 @@ use std::collections::HashMap;
|
||||
use std::io::{Error, ErrorKind};
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
use kvm_bindings::{kvm_irq_routing, kvm_irq_routing_entry};
|
||||
use kvm_bindings::{kvm_irq_routing_entry, KvmIrqRouting as KvmIrqRoutingWrapper};
|
||||
use kvm_ioctls::VmFd;
|
||||
|
||||
use super::*;
|
||||
@@ -196,26 +196,18 @@ impl KvmIrqRouting {
|
||||
}
|
||||
|
||||
fn set_routing(&self, routes: &HashMap<u64, kvm_irq_routing_entry>) -> Result<()> {
|
||||
// Allocate enough buffer memory.
|
||||
let elem_sz = std::mem::size_of::<kvm_irq_routing>();
|
||||
let total_sz = std::mem::size_of::<kvm_irq_routing_entry>() * routes.len() + elem_sz;
|
||||
let elem_cnt = total_sz.div_ceil(elem_sz);
|
||||
let mut irq_routings = Vec::<kvm_irq_routing>::with_capacity(elem_cnt);
|
||||
irq_routings.resize_with(elem_cnt, Default::default);
|
||||
let mut irq_routing = KvmIrqRoutingWrapper::new(routes.len())
|
||||
.map_err(|_| Error::other("Failed to create KvmIrqRouting"))?;
|
||||
|
||||
// Prepare the irq_routing header.
|
||||
let irq_routing = &mut irq_routings[0];
|
||||
irq_routing.nr = routes.len() as u32;
|
||||
irq_routing.flags = 0;
|
||||
|
||||
// Safe because we have just allocated enough memory above.
|
||||
let irq_routing_entries = unsafe { irq_routing.entries.as_mut_slice(routes.len()) };
|
||||
for (idx, entry) in routes.values().enumerate() {
|
||||
irq_routing_entries[idx] = *entry;
|
||||
{
|
||||
let irq_routing_entries = irq_routing.as_mut_slice();
|
||||
for (idx, entry) in routes.values().enumerate() {
|
||||
irq_routing_entries[idx] = *entry;
|
||||
}
|
||||
}
|
||||
|
||||
self.vm_fd
|
||||
.set_gsi_routing(irq_routing)
|
||||
.set_gsi_routing(&irq_routing)
|
||||
.map_err(from_sys_util_errno)?;
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -11,7 +11,7 @@ use kvm_bindings::{CpuId, __IncompleteArrayField, KVMIO};
|
||||
use thiserror::Error;
|
||||
use vmm_sys_util::fam::{FamStruct, FamStructWrapper};
|
||||
use vmm_sys_util::ioctl::ioctl_with_val;
|
||||
use vmm_sys_util::{generate_fam_struct_impl, ioctl_ioc_nr, ioctl_iowr_nr};
|
||||
use vmm_sys_util::{generate_fam_struct_impl, ioctl_iowr_nr};
|
||||
|
||||
/// Tdx capability list.
|
||||
pub type TdxCaps = FamStructWrapper<TdxCapabilities>;
|
||||
|
||||
@@ -13,7 +13,7 @@ use std::os::raw::*;
|
||||
use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
|
||||
|
||||
use vmm_sys_util::ioctl::{ioctl_with_mut_ref, ioctl_with_ref, ioctl_with_val};
|
||||
use vmm_sys_util::{ioctl_ioc_nr, ioctl_iow_nr};
|
||||
use vmm_sys_util::ioctl_iow_nr;
|
||||
|
||||
use crate::net::net_gen;
|
||||
|
||||
|
||||
@@ -23,15 +23,15 @@ dbs-address-space = { workspace = true }
|
||||
dbs-boot = { workspace = true }
|
||||
epoll = ">=4.3.1, <4.3.2"
|
||||
io-uring = "0.5.2"
|
||||
fuse-backend-rs = { version = "0.10.5", optional = true }
|
||||
fuse-backend-rs = { version = "0.14.0", optional = true }
|
||||
kvm-bindings = { workspace = true }
|
||||
kvm-ioctls = { workspace = true }
|
||||
libc = "0.2.119"
|
||||
log = "0.4.14"
|
||||
nix = "0.24.3"
|
||||
nydus-api = "0.3.1"
|
||||
nydus-rafs = "0.3.2"
|
||||
nydus-storage = "0.6.4"
|
||||
nydus-api = "0.4.1"
|
||||
nydus-rafs = "0.4.1"
|
||||
nydus-storage = "0.7.2"
|
||||
rlimit = "0.7.0"
|
||||
serde = "1.0.27"
|
||||
serde_json = "1.0.9"
|
||||
@@ -42,8 +42,9 @@ virtio-queue = { workspace = true }
|
||||
vmm-sys-util = { workspace = true }
|
||||
vm-memory = { workspace = true, features = ["backend-mmap"] }
|
||||
sendfd = "0.4.3"
|
||||
vhost-rs = { version = "0.6.1", package = "vhost", optional = true }
|
||||
vhost-rs = { version = "0.15.0", package = "vhost", optional = true }
|
||||
timerfd = "1.0"
|
||||
kata-sys-util = { workspace = true}
|
||||
|
||||
[dev-dependencies]
|
||||
vm-memory = { workspace = true, features = ["backend-mmap", "backend-atomic"] }
|
||||
@@ -63,7 +64,7 @@ virtio-fs-pro = [
|
||||
]
|
||||
virtio-mem = ["virtio-mmio"]
|
||||
virtio-balloon = ["virtio-mmio"]
|
||||
vhost = ["virtio-mmio", "vhost-rs/vhost-user-master", "vhost-rs/vhost-kern"]
|
||||
vhost = ["virtio-mmio", "vhost-rs/vhost-user-frontend", "vhost-rs/vhost-kern"]
|
||||
vhost-net = ["vhost", "vhost-rs/vhost-net"]
|
||||
vhost-user = ["vhost"]
|
||||
vhost-user-fs = ["vhost-user"]
|
||||
|
||||
@@ -34,7 +34,7 @@ use dbs_utils::epoll_manager::{
|
||||
use dbs_utils::metric::{IncMetric, SharedIncMetric, SharedStoreMetric, StoreMetric};
|
||||
use log::{debug, error, info, trace};
|
||||
use serde::Serialize;
|
||||
use virtio_bindings::bindings::virtio_blk::VIRTIO_F_VERSION_1;
|
||||
use virtio_bindings::bindings::virtio_config::VIRTIO_F_VERSION_1;
|
||||
use virtio_queue::{QueueOwnedT, QueueSync, QueueT};
|
||||
use vm_memory::{
|
||||
ByteValued, Bytes, GuestAddress, GuestAddressSpace, GuestMemory, GuestMemoryRegion,
|
||||
|
||||
@@ -20,6 +20,7 @@ use dbs_utils::{
|
||||
};
|
||||
use log::{debug, error, info, warn};
|
||||
use virtio_bindings::bindings::virtio_blk::*;
|
||||
use virtio_bindings::bindings::virtio_config::VIRTIO_F_VERSION_1;
|
||||
use virtio_queue::QueueT;
|
||||
use vm_memory::GuestMemoryRegion;
|
||||
use vmm_sys_util::eventfd::{EventFd, EFD_NONBLOCK};
|
||||
|
||||
@@ -2,13 +2,13 @@
|
||||
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
use std::io::{self, Seek, SeekFrom, Write};
|
||||
use std::io::{self, Read, Seek, SeekFrom, Write};
|
||||
use std::ops::Deref;
|
||||
use std::result;
|
||||
|
||||
use log::error;
|
||||
use virtio_bindings::bindings::virtio_blk::*;
|
||||
use virtio_queue::{Descriptor, DescriptorChain};
|
||||
use virtio_queue::{desc::split::Descriptor, DescriptorChain};
|
||||
use vm_memory::{ByteValued, Bytes, GuestAddress, GuestMemory, GuestMemoryError};
|
||||
|
||||
use crate::{
|
||||
@@ -231,13 +231,19 @@ impl Request {
|
||||
for io in data_descs {
|
||||
match self.request_type {
|
||||
RequestType::In => {
|
||||
mem.read_from(GuestAddress(io.data_addr), disk, io.data_len)
|
||||
let mut buf = vec![0u8; io.data_len];
|
||||
disk.read_exact(&mut buf)
|
||||
.map_err(|e| ExecuteError::Read(GuestMemoryError::IOError(e)))?;
|
||||
mem.write_slice(&buf, GuestAddress(io.data_addr))
|
||||
.map_err(ExecuteError::Read)?;
|
||||
len += io.data_len;
|
||||
}
|
||||
RequestType::Out => {
|
||||
mem.write_to(GuestAddress(io.data_addr), disk, io.data_len)
|
||||
let mut buf = vec![0u8; io.data_len];
|
||||
mem.read_slice(&mut buf, GuestAddress(io.data_addr))
|
||||
.map_err(ExecuteError::Write)?;
|
||||
disk.write_all(&buf)
|
||||
.map_err(|e| ExecuteError::Write(GuestMemoryError::IOError(e)))?;
|
||||
}
|
||||
RequestType::Flush => match disk.flush() {
|
||||
Ok(_) => {}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause
|
||||
|
||||
use kata_sys_util::netns::NetnsGuard;
|
||||
use std::any::Any;
|
||||
use std::collections::HashMap;
|
||||
use std::ffi::CString;
|
||||
@@ -29,7 +30,7 @@ use nydus_api::ConfigV2;
|
||||
use nydus_rafs::blobfs::{BlobFs, Config as BlobfsConfig};
|
||||
use nydus_rafs::{fs::Rafs, RafsIoRead};
|
||||
use rlimit::Resource;
|
||||
use virtio_bindings::bindings::virtio_blk::VIRTIO_F_VERSION_1;
|
||||
use virtio_bindings::bindings::virtio_config::VIRTIO_F_VERSION_1;
|
||||
use virtio_queue::QueueT;
|
||||
use vm_memory::{
|
||||
FileOffset, GuestAddress, GuestAddressSpace, GuestRegionMmap, GuestUsize, MmapRegion,
|
||||
@@ -233,6 +234,7 @@ impl<AS: GuestAddressSpace> VirtioFs<AS> {
|
||||
CachePolicy::Always => Duration::from_secs(CACHE_ALWAYS_TIMEOUT),
|
||||
CachePolicy::Never => Duration::from_secs(CACHE_NONE_TIMEOUT),
|
||||
CachePolicy::Auto => Duration::from_secs(CACHE_AUTO_TIMEOUT),
|
||||
CachePolicy::Metadata => Duration::from_secs(CACHE_AUTO_TIMEOUT),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -453,6 +455,11 @@ impl<AS: GuestAddressSpace> VirtioFs<AS> {
|
||||
prefetch_list_path: Option<String>,
|
||||
) -> FsResult<()> {
|
||||
debug!("http_server rafs");
|
||||
// We need to make sure the nydus worker thread in the runD main process's network namespace
|
||||
// instead of the vmm thread's netns, which wouldn't access the host network.
|
||||
let _netns_guard =
|
||||
NetnsGuard::new("/proc/self/ns/net").map_err(|e| FsError::BackendFs(e.to_string()))?;
|
||||
|
||||
let file = Path::new(&source);
|
||||
let (mut rafs, rafs_cfg) = match config.as_ref() {
|
||||
Some(cfg) => {
|
||||
@@ -541,7 +548,7 @@ impl<AS: GuestAddressSpace> VirtioFs<AS> {
|
||||
)));
|
||||
}
|
||||
};
|
||||
let any_fs = rootfs.deref().as_any();
|
||||
let any_fs = rootfs.0.deref().as_any();
|
||||
if let Some(fs_swap) = any_fs.downcast_ref::<Rafs>() {
|
||||
let mut file = <dyn RafsIoRead>::from_file(&source)
|
||||
.map_err(|e| FsError::BackendFs(format!("RafsIoRead failed: {e:?}")))?;
|
||||
@@ -611,8 +618,7 @@ impl<AS: GuestAddressSpace> VirtioFs<AS> {
|
||||
};
|
||||
|
||||
let region = Arc::new(
|
||||
GuestRegionMmap::new(mmap_region, GuestAddress(guest_addr))
|
||||
.map_err(Error::InsertMmap)?,
|
||||
GuestRegionMmap::new(mmap_region, GuestAddress(guest_addr)).ok_or(Error::InsertMmap)?,
|
||||
);
|
||||
self.handler.insert_region(region.clone())?;
|
||||
|
||||
|
||||
@@ -245,8 +245,8 @@ pub enum Error {
|
||||
#[error("set user memory region failed: {0}")]
|
||||
SetUserMemoryRegion(kvm_ioctls::Error),
|
||||
/// Inserting mmap region failed.
|
||||
#[error("inserting mmap region failed: {0}")]
|
||||
InsertMmap(vm_memory::mmap::Error),
|
||||
#[error("inserting mmap region failed")]
|
||||
InsertMmap,
|
||||
/// Failed to set madvise on guest memory region.
|
||||
#[error("failed to set madvice() on guest memory region")]
|
||||
Madvise(#[source] nix::Error),
|
||||
|
||||
@@ -30,7 +30,7 @@ use dbs_utils::epoll_manager::{
|
||||
};
|
||||
use kvm_ioctls::VmFd;
|
||||
use log::{debug, error, info, trace, warn};
|
||||
use virtio_bindings::bindings::virtio_blk::VIRTIO_F_VERSION_1;
|
||||
use virtio_bindings::bindings::virtio_config::VIRTIO_F_VERSION_1;
|
||||
use virtio_queue::{DescriptorChain, QueueOwnedT, QueueSync, QueueT};
|
||||
use vm_memory::{
|
||||
ByteValued, Bytes, GuestAddress, GuestAddressSpace, GuestMemory, GuestMemoryError,
|
||||
@@ -1389,7 +1389,7 @@ pub(crate) mod tests {
|
||||
.map_err(Error::NewMmapRegion)?;
|
||||
|
||||
let region =
|
||||
Arc::new(GuestRegionMmap::new(mmap_region, guest_addr).map_err(Error::InsertMmap)?);
|
||||
Arc::new(GuestRegionMmap::new(mmap_region, guest_addr).ok_or(Error::InsertMmap)?);
|
||||
|
||||
Ok(region)
|
||||
}
|
||||
|
||||
@@ -22,6 +22,7 @@ use dbs_utils::net::{net_gen, MacAddr, Tap};
|
||||
use dbs_utils::rate_limiter::{BucketUpdate, RateLimiter, TokenType};
|
||||
use libc;
|
||||
use log::{debug, error, info, trace, warn};
|
||||
use virtio_bindings::bindings::virtio_config::VIRTIO_F_VERSION_1;
|
||||
use virtio_bindings::bindings::virtio_net::*;
|
||||
use virtio_queue::{QueueOwnedT, QueueSync, QueueT};
|
||||
use vm_memory::{Bytes, GuestAddress, GuestAddressSpace, GuestMemoryRegion, GuestRegionMmap};
|
||||
|
||||
@@ -6,7 +6,7 @@ use log::{debug, error, warn};
|
||||
use virtio_bindings::bindings::virtio_net::{
|
||||
virtio_net_ctrl_hdr, virtio_net_ctrl_mq, VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET,
|
||||
};
|
||||
use virtio_queue::{Descriptor, DescriptorChain};
|
||||
use virtio_queue::{desc::split::Descriptor, DescriptorChain};
|
||||
use vm_memory::{Bytes, GuestMemory};
|
||||
|
||||
use crate::{DbsGuestAddressSpace, Error as VirtioError, Result as VirtioResult};
|
||||
|
||||
@@ -26,6 +26,7 @@ use vhost_rs::vhost_user::message::VhostUserVringAddrFlags;
|
||||
#[cfg(not(test))]
|
||||
use vhost_rs::VhostBackend;
|
||||
use vhost_rs::{VhostUserMemoryRegionInfo, VringConfigData};
|
||||
use virtio_bindings::bindings::virtio_config::{VIRTIO_F_NOTIFY_ON_EMPTY, VIRTIO_F_VERSION_1};
|
||||
use virtio_bindings::bindings::virtio_net::*;
|
||||
use virtio_bindings::bindings::virtio_ring::*;
|
||||
use virtio_queue::{DescriptorChain, QueueT};
|
||||
|
||||
@@ -25,7 +25,7 @@ use vhost_rs::vhost_user::message::{
|
||||
VhostUserConfigFlags, VhostUserProtocolFeatures, VhostUserVirtioFeatures,
|
||||
VHOST_USER_CONFIG_OFFSET,
|
||||
};
|
||||
use vhost_rs::vhost_user::{Master, VhostUserMaster};
|
||||
use vhost_rs::vhost_user::{Frontend, VhostUserFrontend};
|
||||
use vhost_rs::{Error as VhostError, VhostBackend};
|
||||
use virtio_bindings::bindings::virtio_blk::{VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_SEG_MAX};
|
||||
use virtio_queue::QueueT;
|
||||
@@ -231,7 +231,7 @@ impl VhostUserBlockDevice {
|
||||
|
||||
info!("vhost-user-blk: try to connect to {vhost_socket:?}");
|
||||
// Connect to the vhost-user socket.
|
||||
let mut master = Master::connect(&vhost_socket, 1).map_err(VirtIoError::VhostError)?;
|
||||
let mut master = Frontend::connect(&vhost_socket, 1).map_err(VirtIoError::VhostError)?;
|
||||
|
||||
info!("vhost-user-blk: get features");
|
||||
let avail_features = master.get_features().map_err(VirtIoError::VhostError)?;
|
||||
@@ -290,11 +290,11 @@ impl VhostUserBlockDevice {
|
||||
})
|
||||
}
|
||||
|
||||
fn reconnect_to_server(&mut self) -> VirtIoResult<Master> {
|
||||
fn reconnect_to_server(&mut self) -> VirtIoResult<Frontend> {
|
||||
if !Path::new(self.vhost_socket.as_str()).exists() {
|
||||
return Err(VirtIoError::InternalError);
|
||||
}
|
||||
let master = Master::connect(&self.vhost_socket, 1).map_err(VirtIoError::VhostError)?;
|
||||
let master = Frontend::connect(&self.vhost_socket, 1).map_err(VirtIoError::VhostError)?;
|
||||
|
||||
Ok(master)
|
||||
}
|
||||
@@ -360,7 +360,7 @@ impl VhostUserBlockDevice {
|
||||
if !Path::new(self.vhost_socket.as_str()).exists() {
|
||||
return Err(ActivateError::InternalError);
|
||||
}
|
||||
let master = Master::connect(String::from(self.vhost_socket.as_str()), 1)
|
||||
let master = Frontend::connect(String::from(self.vhost_socket.as_str()), 1)
|
||||
.map_err(VirtIoError::VhostError)?;
|
||||
|
||||
self.endpoint.set_master(master);
|
||||
@@ -388,7 +388,7 @@ impl VhostUserBlockDevice {
|
||||
R: GuestMemoryRegion + Send + Sync + 'static,
|
||||
>(
|
||||
&mut self,
|
||||
master: Master,
|
||||
master: Frontend,
|
||||
config: EndpointParam<AS, Q, R>,
|
||||
ops: &mut EventOps,
|
||||
) -> std::result::Result<(), VirtIoError> {
|
||||
|
||||
@@ -10,10 +10,10 @@ use dbs_utils::epoll_manager::{EventOps, EventSet, Events};
|
||||
use log::*;
|
||||
use vhost_rs::vhost_user::message::{VhostUserProtocolFeatures, VhostUserVringAddrFlags};
|
||||
use vhost_rs::vhost_user::{
|
||||
Error as VhostUserError, Listener as VhostUserListener, Master, VhostUserMaster,
|
||||
Error as VhostUserError, Frontend, Listener as VhostUserListener, VhostUserFrontend,
|
||||
};
|
||||
use vhost_rs::{Error as VhostError, VhostBackend, VhostUserMemoryRegionInfo, VringConfigData};
|
||||
use virtio_bindings::bindings::virtio_net::VIRTIO_F_RING_PACKED;
|
||||
use virtio_bindings::bindings::virtio_config::VIRTIO_F_RING_PACKED;
|
||||
use virtio_queue::QueueT;
|
||||
use vm_memory::{
|
||||
Address, GuestAddress, GuestAddressSpace, GuestMemory, GuestMemoryRegion, MemoryRegionAddress,
|
||||
@@ -50,7 +50,7 @@ impl Listener {
|
||||
}
|
||||
|
||||
// Wait for an incoming connection until success.
|
||||
pub fn accept(&self) -> VirtioResult<(Master, u64)> {
|
||||
pub fn accept(&self) -> VirtioResult<(Frontend, u64)> {
|
||||
loop {
|
||||
match self.try_accept() {
|
||||
Ok(Some((master, mut feature))) => {
|
||||
@@ -65,14 +65,14 @@ impl Listener {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn try_accept(&self) -> VirtioResult<Option<(Master, u64)>> {
|
||||
pub fn try_accept(&self) -> VirtioResult<Option<(Frontend, u64)>> {
|
||||
let sock = match self.listener.accept() {
|
||||
Ok(Some(conn)) => conn,
|
||||
Ok(None) => return Ok(None),
|
||||
Err(e) => return Err(e.into()),
|
||||
};
|
||||
|
||||
let mut master = Master::from_stream(sock, 1);
|
||||
let mut master = Frontend::from_stream(sock, 1);
|
||||
info!("{}: try to get virtio features from slave.", self.name);
|
||||
match Endpoint::initialize(&mut master) {
|
||||
Ok(Some(features)) => Ok(Some((master, features))),
|
||||
@@ -159,8 +159,8 @@ impl<AS: GuestAddressSpace, Q: QueueT, R: GuestMemoryRegion> EndpointParam<'_, A
|
||||
/// Caller needs to ensure mutual exclusive access to the object.
|
||||
pub(super) struct Endpoint {
|
||||
/// Underlying vhost-user communication endpoint.
|
||||
conn: Option<Master>,
|
||||
old: Option<Master>,
|
||||
conn: Option<Frontend>,
|
||||
old: Option<Frontend>,
|
||||
/// Token to register epoll event for the underlying socket.
|
||||
slot: u32,
|
||||
/// Identifier string for logs.
|
||||
@@ -168,7 +168,7 @@ pub(super) struct Endpoint {
|
||||
}
|
||||
|
||||
impl Endpoint {
|
||||
pub fn new(master: Master, slot: u32, name: String) -> Self {
|
||||
pub fn new(master: Frontend, slot: u32, name: String) -> Self {
|
||||
Endpoint {
|
||||
conn: Some(master),
|
||||
old: None,
|
||||
@@ -186,7 +186,7 @@ impl Endpoint {
|
||||
/// * - Ok(Some(avial_features)): virtio features from the slave
|
||||
/// * - Ok(None): underlying communicaiton channel gets broken during negotiation
|
||||
/// * - Err(e): error conditions
|
||||
fn initialize(master: &mut Master) -> VirtioResult<Option<u64>> {
|
||||
fn initialize(master: &mut Frontend) -> VirtioResult<Option<u64>> {
|
||||
// 1. Seems that some vhost-user slaves depend on the get_features request to driver its
|
||||
// internal state machine.
|
||||
// N.B. it's really TDD, we just found it works in this way. Any spec about this?
|
||||
@@ -242,7 +242,7 @@ impl Endpoint {
|
||||
pub fn negotiate<AS: GuestAddressSpace, Q: QueueT, R: GuestMemoryRegion>(
|
||||
&mut self,
|
||||
config: &EndpointParam<AS, Q, R>,
|
||||
mut old: Option<&mut Master>,
|
||||
mut old: Option<&mut Frontend>,
|
||||
) -> VirtioResult<()> {
|
||||
let guard = config.virtio_config.lock_guest_memory();
|
||||
let mem = guard.deref();
|
||||
@@ -286,19 +286,19 @@ impl Endpoint {
|
||||
);
|
||||
|
||||
// Setup slave channel if SLAVE_REQ protocol feature is set
|
||||
if protocol_features.contains(VhostUserProtocolFeatures::SLAVE_REQ) {
|
||||
if protocol_features.contains(VhostUserProtocolFeatures::BACKEND_REQ) {
|
||||
match config.slave_req_fd {
|
||||
Some(fd) => master.set_slave_request_fd(&fd)?,
|
||||
Some(fd) => master.set_backend_request_fd(&fd)?,
|
||||
None => {
|
||||
error!(
|
||||
"{}: Protocol feature SLAVE_REQ is set but not slave channel fd",
|
||||
"{}: Protocol feature BACKEND_REQ is set but not slave channel fd",
|
||||
self.name
|
||||
);
|
||||
return Err(VhostError::VhostUserProtocol(VhostUserError::InvalidParam).into());
|
||||
}
|
||||
}
|
||||
} else {
|
||||
info!("{}: has no SLAVE_REQ protocol feature set", self.name);
|
||||
info!("{}: has no BACKEND_REQ protocol feature set", self.name);
|
||||
}
|
||||
|
||||
// 6. check number of queues supported
|
||||
@@ -454,7 +454,7 @@ impl Endpoint {
|
||||
/// Restore communication with the vhost-user slave on reconnect.
|
||||
pub fn reconnect<AS: GuestAddressSpace, Q: QueueT, R: GuestMemoryRegion>(
|
||||
&mut self,
|
||||
master: Master,
|
||||
master: Frontend,
|
||||
config: &EndpointParam<AS, Q, R>,
|
||||
ops: &mut EventOps,
|
||||
) -> VirtioResult<()> {
|
||||
@@ -515,7 +515,11 @@ impl Endpoint {
|
||||
}
|
||||
|
||||
/// Deregister the underlying socket from the epoll controller.
|
||||
pub fn deregister_epoll_event(&self, master: &Master, ops: &mut EventOps) -> VirtioResult<()> {
|
||||
pub fn deregister_epoll_event(
|
||||
&self,
|
||||
master: &Frontend,
|
||||
ops: &mut EventOps,
|
||||
) -> VirtioResult<()> {
|
||||
info!(
|
||||
"{}: unregister epoll event for fd {}.",
|
||||
self.name,
|
||||
@@ -529,7 +533,7 @@ impl Endpoint {
|
||||
.map_err(VirtioError::EpollMgr)
|
||||
}
|
||||
|
||||
pub fn set_master(&mut self, master: Master) {
|
||||
pub fn set_master(&mut self, master: Frontend) {
|
||||
self.conn = Some(master);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,10 +3,7 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
use std::any::Any;
|
||||
use std::io;
|
||||
use std::marker::PhantomData;
|
||||
use std::ops::Deref;
|
||||
use std::os::fd::AsRawFd;
|
||||
use std::sync::{Arc, Mutex, MutexGuard};
|
||||
|
||||
use dbs_device::resources::{DeviceResources, ResourceConstraint};
|
||||
@@ -15,18 +12,15 @@ use dbs_utils::epoll_manager::{
|
||||
};
|
||||
use kvm_bindings::kvm_userspace_memory_region;
|
||||
use kvm_ioctls::VmFd;
|
||||
use libc::{c_void, off64_t, pread64, pwrite64};
|
||||
use log::*;
|
||||
use vhost_rs::vhost_user::message::{
|
||||
VhostUserFSSlaveMsg, VhostUserFSSlaveMsgFlags, VhostUserProtocolFeatures,
|
||||
VhostUserVirtioFeatures, VHOST_USER_FS_SLAVE_ENTRIES,
|
||||
use vhost_rs::vhost_user::message::{VhostUserProtocolFeatures, VhostUserVirtioFeatures};
|
||||
use vhost_rs::vhost_user::{
|
||||
Frontend, FrontendReqHandler, HandlerResult, VhostUserFrontendReqHandler,
|
||||
};
|
||||
use vhost_rs::vhost_user::{HandlerResult, Master, MasterReqHandler, VhostUserMasterReqHandler};
|
||||
use vhost_rs::VhostBackend;
|
||||
use virtio_queue::QueueT;
|
||||
use vm_memory::{
|
||||
GuestAddress, GuestAddressSpace, GuestMemory, GuestMemoryRegion, GuestRegionMmap, GuestUsize,
|
||||
MmapRegion,
|
||||
GuestAddress, GuestAddressSpace, GuestMemoryRegion, GuestRegionMmap, GuestUsize, MmapRegion,
|
||||
};
|
||||
|
||||
use crate::ConfigResult;
|
||||
@@ -50,6 +44,7 @@ const NUM_QUEUE_OFFSET: usize = 1;
|
||||
const MASTER_SLOT: u32 = 0;
|
||||
const SLAVE_REQ_SLOT: u32 = 1;
|
||||
|
||||
#[allow(dead_code)]
|
||||
struct SlaveReqHandler<AS: GuestAddressSpace> {
|
||||
/// the address of memory region allocated for virtiofs
|
||||
cache_offset: u64,
|
||||
@@ -69,6 +64,7 @@ struct SlaveReqHandler<AS: GuestAddressSpace> {
|
||||
|
||||
impl<AS: GuestAddressSpace> SlaveReqHandler<AS> {
|
||||
// Make sure request is within cache range
|
||||
#[allow(dead_code)]
|
||||
fn is_req_valid(&self, offset: u64, len: u64) -> bool {
|
||||
// TODO: do we need to validate alignment here?
|
||||
match offset.checked_add(len) {
|
||||
@@ -78,274 +74,24 @@ impl<AS: GuestAddressSpace> SlaveReqHandler<AS> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<AS: GuestAddressSpace> VhostUserMasterReqHandler for SlaveReqHandler<AS> {
|
||||
impl<AS: GuestAddressSpace> VhostUserFrontendReqHandler for SlaveReqHandler<AS> {
|
||||
fn handle_config_change(&self) -> HandlerResult<u64> {
|
||||
trace!(target: "vhost-fs", "{}: SlaveReqHandler::handle_config_change()", self.id);
|
||||
debug!("{}: unhandle device_config_change event", self.id);
|
||||
|
||||
Ok(0)
|
||||
}
|
||||
|
||||
fn fs_slave_map(&self, fs: &VhostUserFSSlaveMsg, fd: &dyn AsRawFd) -> HandlerResult<u64> {
|
||||
trace!(target: "vhost-fs", "{}: SlaveReqHandler::fs_slave_map()", self.id);
|
||||
|
||||
for i in 0..VHOST_USER_FS_SLAVE_ENTRIES {
|
||||
let offset = fs.cache_offset[i];
|
||||
let len = fs.len[i];
|
||||
|
||||
// Ignore if the length is 0.
|
||||
if len == 0 {
|
||||
continue;
|
||||
}
|
||||
|
||||
debug!(
|
||||
"{}: fs_slave_map: offset={:x} len={:x} cache_size={:x}",
|
||||
self.id, offset, len, self.cache_size
|
||||
);
|
||||
|
||||
if !self.is_req_valid(offset, len) {
|
||||
debug!(
|
||||
"{}: fs_slave_map: Wrong offset or length, offset={:x} len={:x} cache_size={:x}",
|
||||
self.id, offset, len, self.cache_size
|
||||
);
|
||||
return Err(std::io::Error::from_raw_os_error(libc::EINVAL));
|
||||
}
|
||||
|
||||
let addr = self.mmap_cache_addr + offset;
|
||||
let flags = fs.flags[i];
|
||||
let ret = unsafe {
|
||||
libc::mmap(
|
||||
addr as *mut libc::c_void,
|
||||
len as usize,
|
||||
flags.bits() as i32,
|
||||
libc::MAP_SHARED | libc::MAP_FIXED,
|
||||
fd.as_raw_fd(),
|
||||
fs.fd_offset[i] as libc::off_t,
|
||||
)
|
||||
};
|
||||
if ret == libc::MAP_FAILED {
|
||||
let e = std::io::Error::last_os_error();
|
||||
error!("{}: fs_slave_map: mmap failed, {}", self.id, e);
|
||||
return Err(e);
|
||||
}
|
||||
|
||||
let ret = unsafe { libc::close(fd.as_raw_fd()) };
|
||||
if ret == -1 {
|
||||
let e = std::io::Error::last_os_error();
|
||||
error!("{}: fs_slave_map: close failed, {}", self.id, e);
|
||||
return Err(e);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(0)
|
||||
}
|
||||
|
||||
fn fs_slave_unmap(&self, fs: &VhostUserFSSlaveMsg) -> HandlerResult<u64> {
|
||||
trace!(target: "vhost-fs", "{}: SlaveReqHandler::fs_slave_map()", self.id);
|
||||
|
||||
for i in 0..VHOST_USER_FS_SLAVE_ENTRIES {
|
||||
let offset = fs.cache_offset[i];
|
||||
let mut len = fs.len[i];
|
||||
|
||||
// Ignore if the length is 0.
|
||||
if len == 0 {
|
||||
continue;
|
||||
}
|
||||
|
||||
debug!(
|
||||
"{}: fs_slave_unmap: offset={:x} len={:x} cache_size={:x}",
|
||||
self.id, offset, len, self.cache_size
|
||||
);
|
||||
|
||||
// Need to handle a special case where the slave ask for the unmapping
|
||||
// of the entire mapping.
|
||||
if len == 0xffff_ffff_ffff_ffff {
|
||||
len = self.cache_size;
|
||||
}
|
||||
|
||||
if !self.is_req_valid(offset, len) {
|
||||
error!(
|
||||
"{}: fs_slave_map: Wrong offset or length, offset={:x} len={:x} cache_size={:x}",
|
||||
self.id, offset, len, self.cache_size
|
||||
);
|
||||
return Err(std::io::Error::from_raw_os_error(libc::EINVAL));
|
||||
}
|
||||
|
||||
let addr = self.mmap_cache_addr + offset;
|
||||
#[allow(clippy::unnecessary_cast)]
|
||||
let ret = unsafe {
|
||||
libc::mmap(
|
||||
addr as *mut libc::c_void,
|
||||
len as usize,
|
||||
libc::PROT_NONE,
|
||||
libc::MAP_ANONYMOUS | libc::MAP_PRIVATE | libc::MAP_FIXED,
|
||||
-1,
|
||||
0 as libc::off_t,
|
||||
)
|
||||
};
|
||||
if ret == libc::MAP_FAILED {
|
||||
let e = std::io::Error::last_os_error();
|
||||
error!("{}: fs_slave_map: mmap failed, {}", self.id, e);
|
||||
return Err(e);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(0)
|
||||
}
|
||||
|
||||
fn fs_slave_sync(&self, fs: &VhostUserFSSlaveMsg) -> HandlerResult<u64> {
|
||||
trace!(target: "vhost-fs", "{}: SlaveReqHandler::fs_slave_sync()", self.id);
|
||||
|
||||
for i in 0..VHOST_USER_FS_SLAVE_ENTRIES {
|
||||
let offset = fs.cache_offset[i];
|
||||
let len = fs.len[i];
|
||||
|
||||
// Ignore if the length is 0.
|
||||
if len == 0 {
|
||||
continue;
|
||||
}
|
||||
|
||||
debug!(
|
||||
"{}: fs_slave_sync: offset={:x} len={:x} cache_size={:x}",
|
||||
self.id, offset, len, self.cache_size
|
||||
);
|
||||
|
||||
if !self.is_req_valid(offset, len) {
|
||||
error!(
|
||||
"{}: fs_slave_map: Wrong offset or length, offset={:x} len={:x} cache_size={:x}",
|
||||
self.id, offset, len, self.cache_size
|
||||
);
|
||||
return Err(std::io::Error::from_raw_os_error(libc::EINVAL));
|
||||
}
|
||||
|
||||
let addr = self.mmap_cache_addr + offset;
|
||||
let ret =
|
||||
unsafe { libc::msync(addr as *mut libc::c_void, len as usize, libc::MS_SYNC) };
|
||||
if ret == -1 {
|
||||
let e = std::io::Error::last_os_error();
|
||||
error!("{}: fs_slave_sync: msync failed, {}", self.id, e);
|
||||
return Err(e);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(0)
|
||||
}
|
||||
|
||||
fn fs_slave_io(&self, fs: &VhostUserFSSlaveMsg, fd: &dyn AsRawFd) -> HandlerResult<u64> {
|
||||
trace!(target: "vhost-fs", "{}: SlaveReqHandler::fs_slave_io()", self.id);
|
||||
|
||||
let guard = self.mem.memory();
|
||||
let mem = guard.deref();
|
||||
let mut done: u64 = 0;
|
||||
for i in 0..VHOST_USER_FS_SLAVE_ENTRIES {
|
||||
// Ignore if the length is 0.
|
||||
if fs.len[i] == 0 {
|
||||
continue;
|
||||
}
|
||||
|
||||
let mut foffset = fs.fd_offset[i];
|
||||
let mut len = fs.len[i] as usize;
|
||||
let gpa = fs.cache_offset[i];
|
||||
let cache_end = self.cache_offset + self.cache_size;
|
||||
let efault = libc::EFAULT;
|
||||
|
||||
debug!(
|
||||
"{}: fs_slave_io: gpa={:x} len={:x} foffset={:x} cache_offset={:x} cache_size={:x}",
|
||||
self.id, gpa, len, foffset, self.cache_offset, self.cache_size
|
||||
);
|
||||
|
||||
let mut ptr = if gpa >= self.cache_offset && gpa < cache_end {
|
||||
let offset = gpa
|
||||
.checked_sub(self.cache_offset)
|
||||
.ok_or_else(|| io::Error::from_raw_os_error(efault))?;
|
||||
let end = gpa
|
||||
.checked_add(fs.len[i])
|
||||
.ok_or_else(|| io::Error::from_raw_os_error(efault))?;
|
||||
|
||||
if end >= cache_end {
|
||||
error!( "{}: fs_slave_io: Wrong gpa or len (gpa={:x} len={:x} cache_offset={:x}, cache_size={:x})", self.id, gpa, len, self.cache_offset, self.cache_size );
|
||||
return Err(io::Error::from_raw_os_error(efault));
|
||||
}
|
||||
self.mmap_cache_addr + offset
|
||||
} else {
|
||||
// gpa is a RAM addr.
|
||||
mem.get_host_address(GuestAddress(gpa))
|
||||
.map_err(|e| {
|
||||
error!(
|
||||
"{}: fs_slave_io: Failed to find RAM region associated with gpa 0x{:x}: {:?}",
|
||||
self.id, gpa, e
|
||||
);
|
||||
io::Error::from_raw_os_error(efault)
|
||||
})? as u64
|
||||
};
|
||||
|
||||
while len > 0 {
|
||||
let ret = if (fs.flags[i] & VhostUserFSSlaveMsgFlags::MAP_W)
|
||||
== VhostUserFSSlaveMsgFlags::MAP_W
|
||||
{
|
||||
debug!("{}: write: foffset={:x}, len={:x}", self.id, foffset, len);
|
||||
unsafe {
|
||||
pwrite64(
|
||||
fd.as_raw_fd(),
|
||||
ptr as *const c_void,
|
||||
len,
|
||||
foffset as off64_t,
|
||||
)
|
||||
}
|
||||
} else {
|
||||
debug!("{}: read: foffset={:x}, len={:x}", self.id, foffset, len);
|
||||
unsafe { pread64(fd.as_raw_fd(), ptr as *mut c_void, len, foffset as off64_t) }
|
||||
};
|
||||
|
||||
if ret < 0 {
|
||||
let e = std::io::Error::last_os_error();
|
||||
if (fs.flags[i] & VhostUserFSSlaveMsgFlags::MAP_W)
|
||||
== VhostUserFSSlaveMsgFlags::MAP_W
|
||||
{
|
||||
error!("{}: fs_slave_io: pwrite failed, {}", self.id, e);
|
||||
} else {
|
||||
error!("{}: fs_slave_io: pread failed, {}", self.id, e);
|
||||
}
|
||||
|
||||
return Err(e);
|
||||
}
|
||||
|
||||
if ret == 0 {
|
||||
// EOF
|
||||
let e = io::Error::new(
|
||||
io::ErrorKind::UnexpectedEof,
|
||||
"failed to access whole buffer",
|
||||
);
|
||||
error!("{}: fs_slave_io: IO error, {}", self.id, e);
|
||||
return Err(e);
|
||||
}
|
||||
len -= ret as usize;
|
||||
foffset += ret as u64;
|
||||
ptr += ret as u64;
|
||||
done += ret as u64;
|
||||
}
|
||||
|
||||
let ret = unsafe { libc::close(fd.as_raw_fd()) };
|
||||
if ret == -1 {
|
||||
let e = std::io::Error::last_os_error();
|
||||
error!("{}: fs_slave_io: close failed, {}", self.id, e);
|
||||
return Err(e);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(done)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct VhostUserFsHandler<
|
||||
AS: GuestAddressSpace,
|
||||
Q: QueueT,
|
||||
R: GuestMemoryRegion,
|
||||
S: VhostUserMasterReqHandler,
|
||||
S: VhostUserFrontendReqHandler,
|
||||
> {
|
||||
config: VirtioDeviceConfig<AS, Q, R>,
|
||||
device: Arc<Mutex<VhostUserFsDevice>>,
|
||||
slave_req_handler: Option<MasterReqHandler<S>>,
|
||||
slave_req_handler: Option<FrontendReqHandler<S>>,
|
||||
id: String,
|
||||
}
|
||||
|
||||
@@ -354,7 +100,7 @@ where
|
||||
AS: 'static + GuestAddressSpace + Send + Sync,
|
||||
Q: QueueT + Send + 'static,
|
||||
R: GuestMemoryRegion + Send + Sync + 'static,
|
||||
S: 'static + Send + VhostUserMasterReqHandler,
|
||||
S: 'static + Send + VhostUserFrontendReqHandler,
|
||||
{
|
||||
fn process(&mut self, events: Events, _ops: &mut EventOps) {
|
||||
trace!(target: "vhost-fs", "{}: VhostUserFsHandler::process({})", self.id, events.data());
|
||||
@@ -425,7 +171,7 @@ impl VhostUserFsDevice {
|
||||
// Connect to the vhost-user socket.
|
||||
info!("{VHOST_USER_FS_NAME}: try to connect to {path:?}");
|
||||
let num_queues = NUM_QUEUE_OFFSET + req_num_queues;
|
||||
let master = Master::connect(path, num_queues as u64).map_err(VirtioError::VhostError)?;
|
||||
let master = Frontend::connect(path, num_queues as u64).map_err(VirtioError::VhostError)?;
|
||||
|
||||
info!("{VHOST_USER_FS_NAME}: get features");
|
||||
let avail_features = master.get_features().map_err(VirtioError::VhostError)?;
|
||||
@@ -475,7 +221,7 @@ impl VhostUserFsDevice {
|
||||
let mut features = VhostUserProtocolFeatures::MQ | VhostUserProtocolFeatures::REPLY_ACK;
|
||||
if self.is_dax_on() {
|
||||
features |=
|
||||
VhostUserProtocolFeatures::SLAVE_REQ | VhostUserProtocolFeatures::SLAVE_SEND_FD;
|
||||
VhostUserProtocolFeatures::BACKEND_REQ | VhostUserProtocolFeatures::BACKEND_SEND_FD;
|
||||
}
|
||||
features
|
||||
}
|
||||
@@ -484,7 +230,7 @@ impl VhostUserFsDevice {
|
||||
AS: GuestAddressSpace,
|
||||
Q: QueueT,
|
||||
R: GuestMemoryRegion,
|
||||
S: VhostUserMasterReqHandler,
|
||||
S: VhostUserFrontendReqHandler,
|
||||
>(
|
||||
&mut self,
|
||||
handler: &VhostUserFsHandler<AS, Q, R, S>,
|
||||
@@ -621,7 +367,7 @@ where
|
||||
mem: config.vm_as.clone(),
|
||||
id: device.device_info.driver_name.clone(),
|
||||
});
|
||||
let req_handler = MasterReqHandler::new(vu_master_req_handler)
|
||||
let req_handler = FrontendReqHandler::new(vu_master_req_handler)
|
||||
.map_err(|e| ActivateError::VhostActivate(vhost_rs::Error::VhostUserProtocol(e)))?;
|
||||
|
||||
Some(req_handler)
|
||||
@@ -748,7 +494,7 @@ where
|
||||
|
||||
let guest_mmap_region = Arc::new(
|
||||
GuestRegionMmap::new(mmap_region, GuestAddress(guest_addr))
|
||||
.map_err(VirtioError::InsertMmap)?,
|
||||
.ok_or(VirtioError::InsertMmap)?,
|
||||
);
|
||||
|
||||
Ok(Some(VirtioSharedMemoryList {
|
||||
|
||||
@@ -12,7 +12,7 @@ use dbs_utils::epoll_manager::{EpollManager, EventOps, Events, MutEventSubscribe
|
||||
use dbs_utils::net::MacAddr;
|
||||
use log::{debug, error, info, trace, warn};
|
||||
use vhost_rs::vhost_user::{
|
||||
Error as VhostUserError, Master, VhostUserProtocolFeatures, VhostUserVirtioFeatures,
|
||||
Error as VhostUserError, Frontend, VhostUserProtocolFeatures, VhostUserVirtioFeatures,
|
||||
};
|
||||
use vhost_rs::Error as VhostError;
|
||||
use virtio_bindings::bindings::virtio_net::{
|
||||
@@ -59,7 +59,7 @@ struct VhostUserNetDevice {
|
||||
|
||||
impl VhostUserNetDevice {
|
||||
fn new(
|
||||
master: Master,
|
||||
master: Frontend,
|
||||
mut avail_features: u64,
|
||||
listener: Listener,
|
||||
guest_mac: Option<&MacAddr>,
|
||||
|
||||
@@ -14,13 +14,14 @@ use vhost_rs::vhost_user::message::{
|
||||
VhostUserVringAddr, VhostUserVringState, MAX_MSG_SIZE,
|
||||
};
|
||||
use vhost_rs::vhost_user::Error;
|
||||
use vm_memory::ByteValued;
|
||||
use vmm_sys_util::sock_ctrl_msg::ScmSocket;
|
||||
use vmm_sys_util::tempfile::TempFile;
|
||||
|
||||
pub const MAX_ATTACHED_FD_ENTRIES: usize = 32;
|
||||
|
||||
pub(crate) trait Req:
|
||||
Clone + Copy + Debug + PartialEq + Eq + PartialOrd + Ord + Into<u32>
|
||||
Clone + Copy + Debug + PartialEq + Eq + PartialOrd + Ord + Into<u32> + Send + Sync
|
||||
{
|
||||
fn is_valid(&self) -> bool;
|
||||
}
|
||||
@@ -215,6 +216,10 @@ impl<R: Req> Default for VhostUserMsgHeader<R> {
|
||||
}
|
||||
}
|
||||
|
||||
// SAFETY: VhostUserMsgHeader is a packed struct with only primitive (u32) fields and PhantomData.
|
||||
// All bit patterns are valid, and it has no padding bytes.
|
||||
unsafe impl<R: Req> ByteValued for VhostUserMsgHeader<R> {}
|
||||
|
||||
/// Unix domain socket endpoint for vhost-user connection.
|
||||
pub(crate) struct Endpoint<R: Req> {
|
||||
sock: UnixStream,
|
||||
|
||||
@@ -99,13 +99,13 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_tcp_backend_bind() {
|
||||
let tcp_sock_addr = String::from("127.0.0.2:9000");
|
||||
let tcp_sock_addr = String::from("127.0.0.1:9000");
|
||||
assert!(VsockTcpBackend::new(tcp_sock_addr).is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_tcp_backend_accept() {
|
||||
let tcp_sock_addr = String::from("127.0.0.2:9001");
|
||||
let tcp_sock_addr = String::from("127.0.0.1:9001");
|
||||
|
||||
let mut vsock_backend = VsockTcpBackend::new(tcp_sock_addr.clone()).unwrap();
|
||||
let _stream = TcpStream::connect(&tcp_sock_addr).unwrap();
|
||||
@@ -115,7 +115,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_tcp_backend_communication() {
|
||||
let tcp_sock_addr = String::from("127.0.0.2:9002");
|
||||
let tcp_sock_addr = String::from("127.0.0.1:9002");
|
||||
let test_string = String::from("TEST");
|
||||
let mut buffer = [0; 10];
|
||||
|
||||
@@ -139,7 +139,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_tcp_backend_connect() {
|
||||
let tcp_sock_addr = String::from("127.0.0.2:9003");
|
||||
let tcp_sock_addr = String::from("127.0.0.1:9003");
|
||||
let vsock_backend = VsockTcpBackend::new(tcp_sock_addr).unwrap();
|
||||
// tcp backend don't support peer connection
|
||||
assert!(vsock_backend.connect(0).is_err());
|
||||
@@ -147,14 +147,14 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_tcp_backend_type() {
|
||||
let tcp_sock_addr = String::from("127.0.0.2:9004");
|
||||
let tcp_sock_addr = String::from("127.0.0.1:9004");
|
||||
let vsock_backend = VsockTcpBackend::new(tcp_sock_addr).unwrap();
|
||||
assert_eq!(vsock_backend.r#type(), VsockBackendType::Tcp);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_tcp_backend_vsock_stream() {
|
||||
let tcp_sock_addr = String::from("127.0.0.2:9005");
|
||||
let tcp_sock_addr = String::from("127.0.0.1:9005");
|
||||
let _vsock_backend = VsockTcpBackend::new(tcp_sock_addr.clone()).unwrap();
|
||||
let vsock_stream = TcpStream::connect(&tcp_sock_addr).unwrap();
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
/// backend.
|
||||
use std::ops::{Deref, DerefMut};
|
||||
|
||||
use virtio_queue::{Descriptor, DescriptorChain};
|
||||
use virtio_queue::{desc::split::Descriptor, DescriptorChain};
|
||||
use vm_memory::{Address, GuestMemory};
|
||||
|
||||
use super::defs;
|
||||
|
||||
@@ -118,11 +118,15 @@ pub enum AddressManagerError {
|
||||
|
||||
/// Failure in accessing the memory located at some address.
|
||||
#[error("address manager failed to access guest memory located at 0x{0:x}")]
|
||||
AccessGuestMemory(u64, #[source] vm_memory::mmap::Error),
|
||||
AccessGuestMemory(u64, #[source] vm_memory::GuestMemoryError),
|
||||
|
||||
/// Failed to create GuestMemory
|
||||
#[error("address manager failed to create guest memory object")]
|
||||
CreateGuestMemory(#[source] vm_memory::Error),
|
||||
CreateGuestMemory(#[source] vm_memory::GuestMemoryError),
|
||||
|
||||
/// Failed to insert/manage guest memory region collection
|
||||
#[error("address manager failed to manage guest memory region collection")]
|
||||
GuestRegionCollection(#[source] vm_memory::GuestRegionCollectionError),
|
||||
|
||||
/// Failure in initializing guest memory.
|
||||
#[error("address manager failed to initialize guest memory")]
|
||||
@@ -328,7 +332,7 @@ impl AddressSpaceMgr {
|
||||
|
||||
vm_memory = vm_memory
|
||||
.insert_region(mmap_reg.clone())
|
||||
.map_err(AddressManagerError::CreateGuestMemory)?;
|
||||
.map_err(AddressManagerError::GuestRegionCollection)?;
|
||||
self.map_to_kvm(res_mgr, ¶m, reg, mmap_reg)?;
|
||||
}
|
||||
|
||||
@@ -488,8 +492,11 @@ impl AddressSpaceMgr {
|
||||
self.configure_thp_and_prealloc(®ion, &mmap_reg)?;
|
||||
}
|
||||
|
||||
let reg = GuestRegionImpl::new(mmap_reg, region.start_addr())
|
||||
.map_err(AddressManagerError::CreateGuestMemory)?;
|
||||
let reg = GuestRegionImpl::new(mmap_reg, region.start_addr()).ok_or(
|
||||
AddressManagerError::GuestRegionCollection(
|
||||
vm_memory::GuestRegionCollectionError::NoMemoryRegion,
|
||||
),
|
||||
)?;
|
||||
Ok(Arc::new(reg))
|
||||
}
|
||||
|
||||
|
||||
@@ -31,7 +31,7 @@ pub enum BalloonDeviceError {
|
||||
|
||||
/// guest memory error
|
||||
#[error("failed to access guest memory, {0}")]
|
||||
GuestMemoryError(#[source] vm_memory::mmap::Error),
|
||||
GuestMemoryError(#[source] vm_memory::GuestMemoryError),
|
||||
|
||||
/// create balloon device error
|
||||
#[error("failed to create virtio-balloon device, {0}")]
|
||||
|
||||
@@ -557,15 +557,14 @@ impl MemRegionFactory for MemoryRegionFactory {
|
||||
);
|
||||
|
||||
// All value should be valid.
|
||||
let memory_region = Arc::new(
|
||||
GuestRegionMmap::new(mmap_region, guest_addr).map_err(VirtioError::InsertMmap)?,
|
||||
);
|
||||
let memory_region =
|
||||
Arc::new(GuestRegionMmap::new(mmap_region, guest_addr).ok_or(VirtioError::InsertMmap)?);
|
||||
|
||||
let vm_as_new = self
|
||||
.vm_as
|
||||
.memory()
|
||||
.insert_region(memory_region.clone())
|
||||
.map_err(VirtioError::InsertMmap)?;
|
||||
.map_err(|_| VirtioError::InsertMmap)?;
|
||||
self.vm_as.lock().unwrap().replace(vm_as_new);
|
||||
self.address_space.insert_region(region).map_err(|e| {
|
||||
error!(self.logger, "failed to insert address space region: {}", e);
|
||||
|
||||
@@ -78,7 +78,7 @@ impl DeviceVirtioRegionHandler {
|
||||
) -> std::result::Result<(), VirtioError> {
|
||||
let vm_as_new = self.vm_as.memory().insert_region(region).map_err(|e| {
|
||||
error!("DeviceVirtioRegionHandler failed to insert guest memory region: {e:?}.");
|
||||
VirtioError::InsertMmap(e)
|
||||
VirtioError::InsertMmap
|
||||
})?;
|
||||
// Do not expect poisoned lock here, so safe to unwrap().
|
||||
self.vm_as.lock().unwrap().replace(vm_as_new);
|
||||
|
||||
@@ -13,6 +13,7 @@ use arc_swap::ArcSwap;
|
||||
use dbs_address_space::AddressSpace;
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
use dbs_arch::{DeviceType, MMIODeviceInfo};
|
||||
#[cfg(feature = "host-device")]
|
||||
use dbs_boot::layout::MMIO_LOW_END;
|
||||
use dbs_device::device_manager::{Error as IoManagerError, IoManager, IoManagerContext};
|
||||
use dbs_device::resources::DeviceResources;
|
||||
@@ -24,7 +25,6 @@ use dbs_legacy_devices::ConsoleHandler;
|
||||
use dbs_pci::CAPABILITY_BAR_SIZE;
|
||||
use dbs_utils::epoll_manager::EpollManager;
|
||||
use kvm_ioctls::VmFd;
|
||||
use virtio_queue::QueueSync;
|
||||
|
||||
#[cfg(feature = "dbs-virtio-devices")]
|
||||
use dbs_device::resources::ResourceConstraint;
|
||||
@@ -41,6 +41,7 @@ use dbs_virtio_devices::{
|
||||
|
||||
#[cfg(feature = "host-device")]
|
||||
use dbs_pci::VfioPciDevice;
|
||||
#[cfg(feature = "host-device")]
|
||||
use dbs_pci::VirtioPciDevice;
|
||||
#[cfg(all(feature = "hotplug", feature = "dbs-upcall"))]
|
||||
use dbs_upcall::{
|
||||
@@ -59,6 +60,7 @@ use crate::resource_manager::ResourceManager;
|
||||
use crate::vm::{KernelConfigInfo, Vm, VmConfigInfo};
|
||||
use crate::IoManagerCached;
|
||||
|
||||
#[cfg(feature = "host-device")]
|
||||
use vm_memory::GuestRegionMmap;
|
||||
|
||||
/// Virtual machine console device manager.
|
||||
@@ -187,18 +189,23 @@ pub enum DeviceMgrError {
|
||||
/// Error from Vfio Pci
|
||||
#[error("failed to do vfio pci operation: {0:?}")]
|
||||
VfioPci(#[source] dbs_pci::VfioPciError),
|
||||
#[cfg(feature = "host-device")]
|
||||
/// Error from Virtio Pci
|
||||
#[error("failed to do virtio pci operation")]
|
||||
VirtioPci,
|
||||
#[cfg(feature = "host-device")]
|
||||
/// PCI system manager error
|
||||
#[error("Pci system manager error")]
|
||||
PciSystemManager,
|
||||
#[cfg(feature = "host-device")]
|
||||
/// Dragonball pci system error
|
||||
#[error("pci error: {0:?}")]
|
||||
PciError(#[source] dbs_pci::Error),
|
||||
#[cfg(feature = "host-device")]
|
||||
/// Virtio Pci system error
|
||||
#[error("virtio pci error: {0:?}")]
|
||||
VirtioPciError(#[source] dbs_pci::VirtioPciDeviceError),
|
||||
#[cfg(feature = "host-device")]
|
||||
/// Unsupported pci device type
|
||||
#[error("unsupported pci device type")]
|
||||
InvalidPciDeviceType,
|
||||
@@ -315,6 +322,7 @@ pub struct DeviceOpContext {
|
||||
virtio_devices: Vec<Arc<dyn DeviceIo>>,
|
||||
#[cfg(feature = "host-device")]
|
||||
vfio_manager: Option<Arc<Mutex<VfioDeviceMgr>>>,
|
||||
#[cfg(feature = "host-device")]
|
||||
pci_system_manager: Arc<Mutex<PciSystemManager>>,
|
||||
vm_config: Option<VmConfigInfo>,
|
||||
shared_info: Arc<RwLock<InstanceInfo>>,
|
||||
@@ -366,6 +374,7 @@ impl DeviceOpContext {
|
||||
shared_info,
|
||||
#[cfg(feature = "host-device")]
|
||||
vfio_manager: None,
|
||||
#[cfg(feature = "host-device")]
|
||||
pci_system_manager: device_mgr.pci_system_manager.clone(),
|
||||
}
|
||||
}
|
||||
@@ -659,6 +668,7 @@ pub struct DeviceManager {
|
||||
vhost_user_net_manager: VhostUserNetDeviceMgr,
|
||||
#[cfg(feature = "host-device")]
|
||||
pub(crate) vfio_manager: Arc<Mutex<VfioDeviceMgr>>,
|
||||
#[cfg(feature = "host-device")]
|
||||
pub(crate) pci_system_manager: Arc<Mutex<PciSystemManager>>,
|
||||
}
|
||||
|
||||
@@ -674,15 +684,21 @@ impl DeviceManager {
|
||||
let irq_manager = Arc::new(KvmIrqManager::new(vm_fd.clone()));
|
||||
let io_manager = Arc::new(ArcSwap::new(Arc::new(IoManager::new())));
|
||||
let io_lock = Arc::new(Mutex::new(()));
|
||||
#[cfg(feature = "host-device")]
|
||||
let io_context = DeviceManagerContext::new(io_manager.clone(), io_lock.clone());
|
||||
#[cfg(feature = "host-device")]
|
||||
let mut mgr = PciSystemManager::new(irq_manager.clone(), io_context, res_manager.clone())?;
|
||||
|
||||
#[cfg(feature = "host-device")]
|
||||
let requirements = mgr.resource_requirements();
|
||||
#[cfg(feature = "host-device")]
|
||||
let resources = res_manager
|
||||
.allocate_device_resources(&requirements, USE_SHARED_IRQ)
|
||||
.map_err(DeviceMgrError::ResourceError)?;
|
||||
#[cfg(feature = "host-device")]
|
||||
mgr.activate(resources)?;
|
||||
|
||||
#[cfg(feature = "host-device")]
|
||||
let pci_system_manager = Arc::new(Mutex::new(mgr));
|
||||
|
||||
Ok(DeviceManager {
|
||||
@@ -720,6 +736,7 @@ impl DeviceManager {
|
||||
pci_system_manager.clone(),
|
||||
logger,
|
||||
))),
|
||||
#[cfg(feature = "host-device")]
|
||||
pci_system_manager,
|
||||
})
|
||||
}
|
||||
@@ -1251,6 +1268,7 @@ impl DeviceManager {
|
||||
}
|
||||
|
||||
/// Create an Virtio PCI transport layer device for the virtio backend device.
|
||||
#[cfg(feature = "host-device")]
|
||||
pub fn create_virtio_pci_device(
|
||||
mut device: DbsVirtioDevice,
|
||||
ctx: &mut DeviceOpContext,
|
||||
@@ -1366,6 +1384,7 @@ impl DeviceManager {
|
||||
}
|
||||
|
||||
/// Create an Virtio PCI transport layer device for the virtio backend device.
|
||||
#[cfg(feature = "host-device")]
|
||||
pub fn register_virtio_pci_device(
|
||||
device: Arc<dyn DeviceIo>,
|
||||
ctx: &DeviceOpContext,
|
||||
@@ -1385,6 +1404,7 @@ impl DeviceManager {
|
||||
}
|
||||
|
||||
/// Deregister Virtio device from IoManager
|
||||
#[cfg(feature = "host-device")]
|
||||
pub fn deregister_virtio_device(
|
||||
device: &Arc<dyn DeviceIo>,
|
||||
ctx: &mut DeviceOpContext,
|
||||
@@ -1405,11 +1425,15 @@ impl DeviceManager {
|
||||
}
|
||||
|
||||
/// Destroy/Deregister resources for a Virtio PCI
|
||||
#[cfg(feature = "host-device")]
|
||||
fn destroy_pci_device(
|
||||
device: Arc<dyn DeviceIo>,
|
||||
ctx: &mut DeviceOpContext,
|
||||
dev_id: u8,
|
||||
) -> std::result::Result<(), DeviceMgrError> {
|
||||
use virtio_queue::QueueSync;
|
||||
use vm_memory::GuestRegionMmap;
|
||||
|
||||
// unregister IoManager
|
||||
Self::deregister_virtio_device(&device, ctx)?;
|
||||
// unregister Resource manager
|
||||
@@ -1489,6 +1513,7 @@ impl DeviceManager {
|
||||
}
|
||||
|
||||
/// Teardown the Virtio PCI or MMIO transport layer device associated with the virtio backend device.
|
||||
#[cfg(feature = "dbs-virtio-devices")]
|
||||
pub fn destroy_virtio_device(
|
||||
device: Arc<dyn DeviceIo>,
|
||||
ctx: &mut DeviceOpContext,
|
||||
@@ -1496,12 +1521,18 @@ impl DeviceManager {
|
||||
if let Some(mmio_dev) = device.as_any().downcast_ref::<DbsMmioV2Device>() {
|
||||
Self::destroy_mmio_device(device.clone(), ctx)?;
|
||||
mmio_dev.remove();
|
||||
} else if let Some(pci_dev) = device.as_any().downcast_ref::<VirtioPciDevice<
|
||||
GuestAddressSpaceImpl,
|
||||
QueueSync,
|
||||
GuestRegionMmap,
|
||||
>>() {
|
||||
Self::destroy_pci_device(device.clone(), ctx, pci_dev.device_id())?;
|
||||
}
|
||||
#[cfg(feature = "host-device")]
|
||||
{
|
||||
use virtio_queue::QueueSync;
|
||||
use vm_memory::GuestRegionMmap;
|
||||
if let Some(pci_dev) = device.as_any().downcast_ref::<VirtioPciDevice<
|
||||
GuestAddressSpaceImpl,
|
||||
QueueSync,
|
||||
GuestRegionMmap,
|
||||
>>() {
|
||||
Self::destroy_pci_device(device.clone(), ctx, pci_dev.device_id())?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -1572,18 +1603,25 @@ mod tests {
|
||||
let irq_manager = Arc::new(KvmIrqManager::new(vm_fd.clone()));
|
||||
let io_manager = Arc::new(ArcSwap::new(Arc::new(IoManager::new())));
|
||||
let io_lock = Arc::new(Mutex::new(()));
|
||||
|
||||
#[cfg(feature = "host-device")]
|
||||
let io_context = DeviceManagerContext::new(io_manager.clone(), io_lock.clone());
|
||||
#[cfg(feature = "host-device")]
|
||||
let mut mgr =
|
||||
PciSystemManager::new(irq_manager.clone(), io_context, res_manager.clone())
|
||||
.unwrap();
|
||||
|
||||
#[cfg(feature = "host-device")]
|
||||
let requirements = mgr.resource_requirements();
|
||||
#[cfg(feature = "host-device")]
|
||||
let resources = res_manager
|
||||
.allocate_device_resources(&requirements, USE_SHARED_IRQ)
|
||||
.map_err(DeviceMgrError::ResourceError)
|
||||
.unwrap();
|
||||
#[cfg(feature = "host-device")]
|
||||
mgr.activate(resources).unwrap();
|
||||
|
||||
#[cfg(feature = "host-device")]
|
||||
let pci_system_manager = Arc::new(Mutex::new(mgr));
|
||||
|
||||
DeviceManager {
|
||||
@@ -1619,6 +1657,7 @@ mod tests {
|
||||
pci_system_manager.clone(),
|
||||
&logger,
|
||||
))),
|
||||
#[cfg(feature = "host-device")]
|
||||
pci_system_manager,
|
||||
|
||||
logger,
|
||||
|
||||
@@ -406,9 +406,11 @@ impl VfioDeviceMgr {
|
||||
if let Some(vfio_container) = self.vfio_container.as_ref() {
|
||||
Ok(vfio_container.clone())
|
||||
} else {
|
||||
let kvm_dev_fd = Arc::new(self.get_kvm_dev_fd()?);
|
||||
let vfio_container =
|
||||
Arc::new(VfioContainer::new(kvm_dev_fd).map_err(VfioDeviceError::VfioIoctlError)?);
|
||||
let kvm_dev_fd = self.get_kvm_dev_fd()?;
|
||||
let vfio_dev_fd = Arc::new(vfio_ioctls::VfioDeviceFd::new_from_kvm(kvm_dev_fd));
|
||||
let vfio_container = Arc::new(
|
||||
VfioContainer::new(Some(vfio_dev_fd)).map_err(VfioDeviceError::VfioIoctlError)?,
|
||||
);
|
||||
self.vfio_container = Some(vfio_container.clone());
|
||||
|
||||
Ok(vfio_container)
|
||||
|
||||
@@ -43,7 +43,7 @@ impl Vcpu {
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn new_aarch64(
|
||||
id: u8,
|
||||
vcpu_fd: Arc<VcpuFd>,
|
||||
vcpu_fd: VcpuFd,
|
||||
io_mgr: IoManagerCached,
|
||||
exit_evt: EventFd,
|
||||
vcpu_state_event: EventFd,
|
||||
|
||||
@@ -274,7 +274,7 @@ enum VcpuEmulation {
|
||||
/// A wrapper around creating and using a kvm-based VCPU.
|
||||
pub struct Vcpu {
|
||||
// vCPU fd used by the vCPU
|
||||
fd: Arc<VcpuFd>,
|
||||
fd: VcpuFd,
|
||||
// vCPU id info
|
||||
id: u8,
|
||||
// Io manager Cached for facilitating IO operations
|
||||
@@ -317,7 +317,7 @@ pub struct Vcpu {
|
||||
}
|
||||
|
||||
// Using this for easier explicit type-casting to help IDEs interpret the code.
|
||||
type VcpuCell = Cell<Option<*const Vcpu>>;
|
||||
type VcpuCell = Cell<Option<*mut Vcpu>>;
|
||||
|
||||
impl Vcpu {
|
||||
thread_local!(static TLS_VCPU_PTR: VcpuCell = const { Cell::new(None) });
|
||||
@@ -332,7 +332,7 @@ impl Vcpu {
|
||||
if cell.get().is_some() {
|
||||
return Err(VcpuError::VcpuTlsInit);
|
||||
}
|
||||
cell.set(Some(self as *const Vcpu));
|
||||
cell.set(Some(self as *mut Vcpu));
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
@@ -369,13 +369,13 @@ impl Vcpu {
|
||||
/// dereferencing from pointer an already borrowed `Vcpu`.
|
||||
unsafe fn run_on_thread_local<F>(func: F) -> Result<()>
|
||||
where
|
||||
F: FnOnce(&Vcpu),
|
||||
F: FnOnce(&mut Vcpu),
|
||||
{
|
||||
Self::TLS_VCPU_PTR.with(|cell: &VcpuCell| {
|
||||
if let Some(vcpu_ptr) = cell.get() {
|
||||
// Dereferencing here is safe since `TLS_VCPU_PTR` is populated/non-empty,
|
||||
// and it is being cleared on `Vcpu::drop` so there is no dangling pointer.
|
||||
let vcpu_ref: &Vcpu = &*vcpu_ptr;
|
||||
let vcpu_ref: &mut Vcpu = &mut *vcpu_ptr;
|
||||
func(vcpu_ref);
|
||||
Ok(())
|
||||
} else {
|
||||
@@ -436,7 +436,7 @@ impl Vcpu {
|
||||
|
||||
/// Extract the vcpu running logic for test mocking.
|
||||
#[cfg(not(test))]
|
||||
pub fn emulate(fd: &VcpuFd) -> std::result::Result<VcpuExit<'_>, kvm_ioctls::Error> {
|
||||
pub fn emulate(fd: &mut VcpuFd) -> std::result::Result<VcpuExit<'_>, kvm_ioctls::Error> {
|
||||
fd.run()
|
||||
}
|
||||
|
||||
@@ -444,7 +444,7 @@ impl Vcpu {
|
||||
///
|
||||
/// Returns error or enum specifying whether emulation was handled or interrupted.
|
||||
fn run_emulation(&mut self) -> Result<VcpuEmulation> {
|
||||
match Vcpu::emulate(&self.fd) {
|
||||
match Vcpu::emulate(&mut self.fd) {
|
||||
Ok(run) => {
|
||||
match run {
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
@@ -455,8 +455,9 @@ impl Vcpu {
|
||||
}
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
VcpuExit::IoOut(addr, data) => {
|
||||
if !self.check_io_port_info(addr, data)? {
|
||||
let _ = self.io_mgr.pio_write(addr, data);
|
||||
let data = data.to_vec();
|
||||
if !self.check_io_port_info(addr, &data)? {
|
||||
let _ = self.io_mgr.pio_write(addr, &data);
|
||||
}
|
||||
self.metrics.exit_io_out.inc();
|
||||
Ok(VcpuEmulation::Handled)
|
||||
@@ -493,14 +494,14 @@ impl Vcpu {
|
||||
VcpuExit::SystemEvent(event_type, event_flags) => match event_type {
|
||||
KVM_SYSTEM_EVENT_RESET | KVM_SYSTEM_EVENT_SHUTDOWN => {
|
||||
info!(
|
||||
"Received KVM_SYSTEM_EVENT: type: {event_type}, event: {event_flags}"
|
||||
"Received KVM_SYSTEM_EVENT: type: {event_type}, event: {event_flags:?}"
|
||||
);
|
||||
Ok(VcpuEmulation::Stopped)
|
||||
}
|
||||
_ => {
|
||||
self.metrics.failures.inc();
|
||||
error!(
|
||||
"Received KVM_SYSTEM_EVENT signal type: {event_type}, flag: {event_flags}"
|
||||
"Received KVM_SYSTEM_EVENT signal type: {event_type}, flag: {event_flags:?}"
|
||||
);
|
||||
Err(VcpuError::VcpuUnhandledKvmExit)
|
||||
}
|
||||
@@ -765,7 +766,7 @@ impl Vcpu {
|
||||
|
||||
/// Get vcpu file descriptor.
|
||||
pub fn vcpu_fd(&self) -> &VcpuFd {
|
||||
self.fd.as_ref()
|
||||
&self.fd
|
||||
}
|
||||
|
||||
pub fn metrics(&self) -> Arc<VcpuMetrics> {
|
||||
@@ -804,7 +805,7 @@ pub mod tests {
|
||||
FailEntry(u64, u32),
|
||||
InternalError,
|
||||
Unknown,
|
||||
SystemEvent(u32, u64),
|
||||
SystemEvent(u32, Vec<u64>),
|
||||
Error(i32),
|
||||
}
|
||||
|
||||
@@ -813,7 +814,7 @@ pub mod tests {
|
||||
}
|
||||
|
||||
impl Vcpu {
|
||||
pub fn emulate(_fd: &VcpuFd) -> std::result::Result<VcpuExit<'_>, kvm_ioctls::Error> {
|
||||
pub fn emulate(_fd: &mut VcpuFd) -> std::result::Result<VcpuExit<'_>, kvm_ioctls::Error> {
|
||||
let res = &*EMULATE_RES.lock().unwrap();
|
||||
match res {
|
||||
EmulationCase::IoIn => Ok(VcpuExit::IoIn(0, &mut [])),
|
||||
@@ -828,7 +829,8 @@ pub mod tests {
|
||||
EmulationCase::InternalError => Ok(VcpuExit::InternalError),
|
||||
EmulationCase::Unknown => Ok(VcpuExit::Unknown),
|
||||
EmulationCase::SystemEvent(event_type, event_flags) => {
|
||||
Ok(VcpuExit::SystemEvent(*event_type, *event_flags))
|
||||
let flags = event_flags.clone().into_boxed_slice();
|
||||
Ok(VcpuExit::SystemEvent(*event_type, Box::leak(flags)))
|
||||
}
|
||||
EmulationCase::Error(e) => Err(kvm_ioctls::Error::new(*e)),
|
||||
}
|
||||
@@ -839,7 +841,7 @@ pub mod tests {
|
||||
fn create_vcpu() -> (Vcpu, Receiver<VcpuStateEvent>) {
|
||||
let kvm_context = KvmContext::new(None).unwrap();
|
||||
let vm = kvm_context.kvm().create_vm().unwrap();
|
||||
let vcpu_fd = Arc::new(vm.create_vcpu(0).unwrap());
|
||||
let vcpu_fd = vm.create_vcpu(0).unwrap();
|
||||
let io_manager = IoManagerCached::new(Arc::new(ArcSwap::new(Arc::new(IoManager::new()))));
|
||||
let supported_cpuid = kvm_context
|
||||
.supported_cpuid(kvm_bindings::KVM_MAX_CPUID_ENTRIES)
|
||||
@@ -875,7 +877,7 @@ pub mod tests {
|
||||
let kvm = Kvm::new().unwrap();
|
||||
let vm = Arc::new(kvm.create_vm().unwrap());
|
||||
let _kvm_context = KvmContext::new(Some(kvm.as_raw_fd())).unwrap();
|
||||
let vcpu_fd = Arc::new(vm.create_vcpu(0).unwrap());
|
||||
let vcpu_fd = vm.create_vcpu(0).unwrap();
|
||||
let io_manager = IoManagerCached::new(Arc::new(ArcSwap::new(Arc::new(IoManager::new()))));
|
||||
let reset_event_fd = EventFd::new(libc::EFD_NONBLOCK).unwrap();
|
||||
let vcpu_state_event = EventFd::new(libc::EFD_NONBLOCK).unwrap();
|
||||
@@ -947,17 +949,19 @@ pub mod tests {
|
||||
assert!(matches!(res, Err(VcpuError::VcpuUnhandledKvmExit)));
|
||||
|
||||
// KVM_SYSTEM_EVENT_RESET
|
||||
*(EMULATE_RES.lock().unwrap()) = EmulationCase::SystemEvent(KVM_SYSTEM_EVENT_RESET, 0);
|
||||
*(EMULATE_RES.lock().unwrap()) =
|
||||
EmulationCase::SystemEvent(KVM_SYSTEM_EVENT_RESET, vec![0]);
|
||||
let res = vcpu.run_emulation();
|
||||
assert!(matches!(res, Ok(VcpuEmulation::Stopped)));
|
||||
|
||||
// KVM_SYSTEM_EVENT_SHUTDOWN
|
||||
*(EMULATE_RES.lock().unwrap()) = EmulationCase::SystemEvent(KVM_SYSTEM_EVENT_SHUTDOWN, 0);
|
||||
*(EMULATE_RES.lock().unwrap()) =
|
||||
EmulationCase::SystemEvent(KVM_SYSTEM_EVENT_SHUTDOWN, vec![0]);
|
||||
let res = vcpu.run_emulation();
|
||||
assert!(matches!(res, Ok(VcpuEmulation::Stopped)));
|
||||
|
||||
// Other system event
|
||||
*(EMULATE_RES.lock().unwrap()) = EmulationCase::SystemEvent(0, 0);
|
||||
*(EMULATE_RES.lock().unwrap()) = EmulationCase::SystemEvent(0, vec![0]);
|
||||
let res = vcpu.run_emulation();
|
||||
assert!(matches!(res, Err(VcpuError::VcpuUnhandledKvmExit)));
|
||||
|
||||
|
||||
@@ -189,7 +189,7 @@ pub struct VcpuResizeInfo {
|
||||
#[derive(Default)]
|
||||
pub(crate) struct VcpuInfo {
|
||||
pub(crate) vcpu: Option<Vcpu>,
|
||||
vcpu_fd: Option<Arc<VcpuFd>>,
|
||||
vcpu_fd: Option<VcpuFd>,
|
||||
handle: Option<VcpuHandle>,
|
||||
tid: u32,
|
||||
}
|
||||
@@ -541,18 +541,13 @@ impl VcpuManager {
|
||||
}
|
||||
// We will reuse the kvm's vcpufd after first creation, for we can't
|
||||
// create vcpufd with same id in one kvm instance.
|
||||
let kvm_vcpu = match &self.vcpu_infos[cpu_index as usize].vcpu_fd {
|
||||
Some(vcpu_fd) => vcpu_fd.clone(),
|
||||
None => {
|
||||
let vcpu_fd = Arc::new(
|
||||
self.vm_fd
|
||||
.create_vcpu(cpu_index as u64)
|
||||
.map_err(VcpuError::VcpuFd)
|
||||
.map_err(VcpuManagerError::Vcpu)?,
|
||||
);
|
||||
self.vcpu_infos[cpu_index as usize].vcpu_fd = Some(vcpu_fd.clone());
|
||||
vcpu_fd
|
||||
}
|
||||
let kvm_vcpu = match self.vcpu_infos[cpu_index as usize].vcpu_fd.take() {
|
||||
Some(vcpu_fd) => vcpu_fd,
|
||||
None => self
|
||||
.vm_fd
|
||||
.create_vcpu(cpu_index as u64)
|
||||
.map_err(VcpuError::VcpuFd)
|
||||
.map_err(VcpuManagerError::Vcpu)?,
|
||||
};
|
||||
|
||||
let mut vcpu = self.create_vcpu_arch(cpu_index, kvm_vcpu, request_ts)?;
|
||||
@@ -777,7 +772,7 @@ impl VcpuManager {
|
||||
fn create_vcpu_arch(
|
||||
&self,
|
||||
cpu_index: u8,
|
||||
vcpu_fd: Arc<VcpuFd>,
|
||||
vcpu_fd: VcpuFd,
|
||||
request_ts: TimestampUs,
|
||||
) -> Result<Vcpu> {
|
||||
// It's safe to unwrap because guest_kernel always exist until vcpu manager done
|
||||
@@ -806,7 +801,7 @@ impl VcpuManager {
|
||||
fn create_vcpu_arch(
|
||||
&self,
|
||||
cpu_index: u8,
|
||||
vcpu_fd: Arc<VcpuFd>,
|
||||
vcpu_fd: VcpuFd,
|
||||
request_ts: TimestampUs,
|
||||
) -> Result<Vcpu> {
|
||||
Vcpu::new_aarch64(
|
||||
|
||||
@@ -45,7 +45,7 @@ impl Vcpu {
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn new_x86_64(
|
||||
id: u8,
|
||||
vcpu_fd: Arc<VcpuFd>,
|
||||
vcpu_fd: VcpuFd,
|
||||
io_mgr: IoManagerCached,
|
||||
cpuid: CpuId,
|
||||
exit_evt: EventFd,
|
||||
|
||||
@@ -642,7 +642,7 @@ impl Vm {
|
||||
image: &mut F,
|
||||
) -> std::result::Result<InitrdConfig, LoadInitrdError>
|
||||
where
|
||||
F: Read + Seek,
|
||||
F: Read + Seek + vm_memory::ReadVolatile,
|
||||
{
|
||||
use crate::error::LoadInitrdError::*;
|
||||
|
||||
@@ -666,7 +666,7 @@ impl Vm {
|
||||
|
||||
// Load the image into memory
|
||||
vm_memory
|
||||
.read_from(GuestAddress(address), image, size)
|
||||
.read_volatile_from(GuestAddress(address), image, size)
|
||||
.map_err(|_| LoadInitrd)?;
|
||||
|
||||
Ok(InitrdConfig {
|
||||
@@ -1132,7 +1132,7 @@ pub mod tests {
|
||||
let vm_memory = vm.address_space.vm_memory().unwrap();
|
||||
vm_memory.write_obj(code, load_addr).unwrap();
|
||||
|
||||
let vcpu_fd = vm.vm_fd().create_vcpu(0).unwrap();
|
||||
let mut vcpu_fd = vm.vm_fd().create_vcpu(0).unwrap();
|
||||
let mut vcpu_sregs = vcpu_fd.get_sregs().unwrap();
|
||||
assert_ne!(vcpu_sregs.cs.base, 0);
|
||||
assert_ne!(vcpu_sregs.cs.selector, 0);
|
||||
|
||||
@@ -1,13 +1,114 @@
|
||||
The `src/libs` directory hosts library crates which may be shared by multiple Kata Containers components
|
||||
or published to [`crates.io`](https://crates.io/index.html).
|
||||
# Kata Containers Library Crates
|
||||
|
||||
### Library Crates
|
||||
Currently it provides following library crates:
|
||||
The `src/libs` directory hosts library crates shared by multiple Kata Containers components. These libraries provide common utilities, data types, and protocol definitions to facilitate development and maintain consistency across the project.
|
||||
|
||||
## Library Crates
|
||||
|
||||
| Library | Description |
|
||||
|-|-|
|
||||
| [logging](logging/) | Facilities to setup logging subsystem based on slog. |
|
||||
| [system utilities](kata-sys-util/) | Collection of facilities and helpers to access system services. |
|
||||
| [types](kata-types/) | Collection of constants and data types shared by multiple Kata Containers components. |
|
||||
| [safe-path](safe-path/) | Utilities to safely resolve filesystem paths. |
|
||||
| [test utilities](test-utils/) | Utilities to share test code. |
|
||||
|---------|-------------|
|
||||
| [kata-types](kata-types/) | Constants, data types, and configuration structures shared by Kata Containers components |
|
||||
| [kata-sys-util](kata-sys-util/) | System utilities: CPU, device, filesystem, hooks, K8s, mount, netns, NUMA, PCI, protection, spec validation |
|
||||
| [protocols](protocols/) | ttrpc protocol definitions for agent, health, remote, CSI, OCI, confidential data hub |
|
||||
| [runtime-spec](runtime-spec/) | OCI runtime spec data structures and constants |
|
||||
| [shim-interface](shim-interface/) | Shim management interface with RESTful API over Unix domain socket |
|
||||
| [logging](logging/) | Slog-based logging with JSON output and systemd journal support |
|
||||
| [safe-path](safe-path/) | Safe path resolution to prevent symlink and TOCTOU attacks |
|
||||
| [mem-agent](mem-agent/) | Memory management agent: memcg, compact, PSI monitoring |
|
||||
| [test-utils](test-utils/) | Test macros for root/non-root privileges and KVM accessibility |
|
||||
|
||||
## Details
|
||||
|
||||
### kata-types
|
||||
|
||||
Core types and configurations including:
|
||||
|
||||
- Annotations for CRI-containerd, CRI-O, dockershim
|
||||
- Hypervisor configurations (QEMU, Cloud Hypervisor, Firecracker, Dragonball)
|
||||
- Agent and runtime configurations
|
||||
- Kubernetes-specific utilities
|
||||
|
||||
### kata-sys-util
|
||||
|
||||
System-level utilities:
|
||||
|
||||
- `cpu`: CPU information and affinity
|
||||
- `device`: Device management
|
||||
- `fs`: Filesystem operations
|
||||
- `hooks`: Hook execution
|
||||
- `k8s`: Kubernetes utilities
|
||||
- `mount`: Mount operations
|
||||
- `netns`: Network namespace handling
|
||||
- `numa`: NUMA topology
|
||||
- `pcilibs`: PCI device access
|
||||
- `protection`: Hardware protection features
|
||||
- `spec`: OCI spec loading
|
||||
- `validate`: Input validation
|
||||
|
||||
### protocols
|
||||
|
||||
Generated ttrpc protocol bindings:
|
||||
|
||||
- `agent`: Kata agent API
|
||||
- `health`: Health check service
|
||||
- `remote`: Remote hypervisor API
|
||||
- `csi`: Container storage interface
|
||||
- `oci`: OCI specifications
|
||||
- `confidential_data_hub`: Confidential computing support
|
||||
|
||||
Features: `async` for async ttrpc, `with-serde` for serde support.
|
||||
|
||||
### runtime-spec
|
||||
|
||||
OCI runtime specification types:
|
||||
|
||||
- `ContainerState`: Creating, Created, Running, Stopped, Paused
|
||||
- `State`: Container state with version, id, status, pid, bundle, annotations
|
||||
- Namespace constants: pid, network, mount, ipc, user, uts, cgroup
|
||||
|
||||
### shim-interface
|
||||
|
||||
Shim management service interface:
|
||||
|
||||
- RESTful API over Unix domain socket (`/run/kata/<sid>/shim-monitor.sock`)
|
||||
- `MgmtClient` for HTTP requests to shim management server
|
||||
- Sandbox ID resolution with prefix matching
|
||||
|
||||
### logging
|
||||
|
||||
Slog-based logging framework:
|
||||
|
||||
- JSON output to file or stdout
|
||||
- systemd journal support
|
||||
- Runtime log level filtering per component/subsystem
|
||||
- Async drain for thread safety
|
||||
|
||||
### safe-path
|
||||
|
||||
Secure filesystem path handling:
|
||||
|
||||
- `scoped_join()`: Safely join paths under a root directory
|
||||
- `scoped_resolve()`: Resolve paths constrained by root
|
||||
- `PinnedPathBuf`: TOCTOU-safe path reference
|
||||
- `ScopedDirBuilder`: Safe directory creation
|
||||
|
||||
### mem-agent
|
||||
|
||||
Memory management for containers:
|
||||
|
||||
- `memcg`: Memory cgroup configuration and monitoring
|
||||
- `compact`: Memory compaction control
|
||||
- `psi`: Pressure stall information monitoring
|
||||
- Async runtime with configurable policies
|
||||
|
||||
### test-utils
|
||||
|
||||
Testing utilities:
|
||||
|
||||
- `skip_if_root!`: Skip test if running as root
|
||||
- `skip_if_not_root!`: Skip test if not running as root
|
||||
- `skip_if_kvm_unaccessable!`: Skip test if KVM is unavailable
|
||||
- `assert_result!`: Assert expected vs actual results
|
||||
|
||||
## License
|
||||
|
||||
All crates are licensed under Apache-2.0.
|
||||
|
||||
@@ -1,16 +1,100 @@
|
||||
# `kata-sys-util`
|
||||
|
||||
This crate is a collection of utilities and helpers for
|
||||
[Kata Containers](https://github.com/kata-containers/kata-containers/) components to access system services.
|
||||
System utilities and helpers for [Kata Containers](https://github.com/kata-containers/kata-containers/) components to access Linux system services.
|
||||
|
||||
It provides safe wrappers over system services, such as:
|
||||
- file systems
|
||||
- mount
|
||||
- NUMA
|
||||
## Overview
|
||||
|
||||
## Support
|
||||
This crate provides safe wrappers and utility functions for interacting with various Linux system services and kernel interfaces. It is designed specifically for the Kata Containers ecosystem.
|
||||
|
||||
## Features
|
||||
|
||||
### File System Operations (`fs`)
|
||||
|
||||
- Path canonicalization and basename extraction
|
||||
- Filesystem type detection (FUSE, OverlayFS)
|
||||
- Symlink detection
|
||||
- Reflink copy with fallback to regular copy
|
||||
|
||||
### Mount Operations (`mount`)
|
||||
|
||||
- Bind mount and remount operations
|
||||
- Mount propagation type management (SHARED, PRIVATE, SLAVE, UNBINDABLE)
|
||||
- Overlay filesystem mount option compression
|
||||
- Safe mount destination creation
|
||||
- Umount with timeout support
|
||||
- `/proc/mounts` parsing utilities
|
||||
|
||||
### CPU Utilities (`cpu`)
|
||||
|
||||
- CPU information parsing from `/proc/cpuinfo`
|
||||
- CPU flags detection and validation
|
||||
- Architecture-specific support (x86_64, s390x)
|
||||
|
||||
### NUMA Support (`numa`)
|
||||
|
||||
- CPU to NUMA node mapping
|
||||
- NUMA node information retrieval from sysfs
|
||||
- NUMA CPU validation
|
||||
|
||||
### Device Management (`device`)
|
||||
|
||||
- Block device major/minor number detection
|
||||
- Device ID resolution for cgroup operations
|
||||
|
||||
### Kubernetes Support (`k8s`)
|
||||
|
||||
- Ephemeral volume detection
|
||||
- EmptyDir volume handling
|
||||
- Kubernetes-specific mount type identification
|
||||
|
||||
### Network Namespace (`netns`)
|
||||
|
||||
- Network namespace switching with RAII guard pattern
|
||||
- Network namespace name generation
|
||||
|
||||
### OCI Specification Utilities (`spec`)
|
||||
|
||||
- Container type detection (PodSandbox, PodContainer)
|
||||
- Sandbox ID extraction from OCI annotations
|
||||
- OCI spec loading utilities
|
||||
|
||||
### Validation (`validate`)
|
||||
|
||||
- Container/exec ID validation
|
||||
- Environment variable validation
|
||||
|
||||
### Hooks (`hooks`)
|
||||
|
||||
- OCI hook execution and management
|
||||
- Hook state tracking
|
||||
- Timeout handling for hook execution
|
||||
|
||||
### Guest Protection (`protection`)
|
||||
|
||||
- Confidential computing detection (TDX, SEV, SNP, PEF, SE, ARM CCA , etc.)
|
||||
- Architecture-specific protection checking (x86_64, s390x, aarch64, powerpc64)
|
||||
|
||||
### Random Generation (`rand`)
|
||||
|
||||
- Secure random byte generation
|
||||
- UUID generation
|
||||
|
||||
### PCI Device Management (`pcilibs`)
|
||||
|
||||
- PCI device enumeration and management
|
||||
- PCI configuration space access
|
||||
- Memory resource allocation for PCI devices
|
||||
|
||||
## Supported Architectures
|
||||
|
||||
- x86_64
|
||||
- aarch64
|
||||
- s390x
|
||||
- powerpc64 (little-endian)
|
||||
- riscv64
|
||||
|
||||
## Supported Operating Systems
|
||||
|
||||
**Operating Systems**:
|
||||
- Linux
|
||||
|
||||
## License
|
||||
|
||||
@@ -177,7 +177,7 @@ pub fn get_linux_mount_info(mount_point: &str) -> Result<LinuxMountInfo> {
|
||||
///
|
||||
/// To ensure security, the `create_mount_destination()` function takes an extra parameter `root`,
|
||||
/// which is used to ensure that `dst` is within the specified directory. And a safe version of
|
||||
/// `PathBuf` is returned to avoid TOCTTOU type of flaws.
|
||||
/// `PathBuf` is returned to avoid TOCTOU type of flaws.
|
||||
pub fn create_mount_destination<S: AsRef<Path>, D: AsRef<Path>, R: AsRef<Path>>(
|
||||
src: S,
|
||||
dst: D,
|
||||
|
||||
@@ -1,18 +1,53 @@
|
||||
# kata-types
|
||||
|
||||
This crate is a collection of constants and data types shared by multiple
|
||||
[Kata Containers](https://github.com/kata-containers/kata-containers/) components.
|
||||
Constants and data types shared by Kata Containers components.
|
||||
|
||||
It defines constants and data types used by multiple Kata Containers components. Those constants
|
||||
and data types may be defined by Kata Containers or by other projects/specifications, such as:
|
||||
## Overview
|
||||
|
||||
This crate provides common constants, data types, and configuration structures used across multiple [Kata Containers](https://github.com/kata-containers/kata-containers/) components. It includes definitions from:
|
||||
|
||||
- Kata Containers project
|
||||
- [Containerd](https://github.com/containerd/containerd)
|
||||
- [Kubelet](https://github.com/kubernetes/kubelet)
|
||||
- [Kubelet](https://github.com/kubernetes/kubernetes)
|
||||
|
||||
## Support
|
||||
## Modules
|
||||
|
||||
**Operating Systems**:
|
||||
- Linux
|
||||
| Module | Description |
|
||||
|--------|-------------|
|
||||
| `annotations` | Annotation keys for CRI-containerd, CRI-O, dockershim, and third-party integrations |
|
||||
| `capabilities` | Hypervisor capability flags (block device, multi-queue, filesystem sharing, etc.) |
|
||||
| `config` | Configuration structures for agent, hypervisor (QEMU, Cloud Hypervisor, Firecracker, Dragonball), and runtime |
|
||||
| `container` | Container-related constants and types |
|
||||
| `cpu` | CPU resource management types |
|
||||
| `device` | Device-related definitions |
|
||||
| `fs` | Filesystem constants |
|
||||
| `handler` | Handler-related types |
|
||||
| `initdata` | Initdata specification for TEE data injection |
|
||||
| `k8s` | Kubernetes-specific paths and utilities (empty-dir, configmap, secret, projected volumes) |
|
||||
| `machine_type` | Machine type definitions |
|
||||
| `mount` | Mount point structures and validation |
|
||||
| `rootless` | Rootless VMM support utilities |
|
||||
|
||||
## Configuration
|
||||
|
||||
The `config` module supports:
|
||||
|
||||
- TOML-based configuration loading
|
||||
- Drop-in configuration files
|
||||
- Hypervisor-specific configurations (QEMU, Cloud Hypervisor, Firecracker, Dragonball, Remote)
|
||||
- Agent configuration
|
||||
- Runtime configuration
|
||||
- Shared mount definitions
|
||||
|
||||
## Features
|
||||
|
||||
- `enable-vendor`: Enable vendor-specific extensions
|
||||
- `safe-path`: Enable safe path resolution (platform-specific)
|
||||
|
||||
## Platform Support
|
||||
|
||||
- **Linux**: Fully supported
|
||||
|
||||
## License
|
||||
|
||||
This code is licensed under [Apache-2.0](../../../LICENSE).
|
||||
Apache-2.0 - See [LICENSE](../../../LICENSE)
|
||||
|
||||
@@ -26,7 +26,7 @@
|
||||
//! information and ensure data out of the container rootfs directory won't be affected
|
||||
//! by the container. There are several types of attacks related to container mount namespace:
|
||||
//! - symlink based attack
|
||||
//! - Time of check to time of use (TOCTTOU)
|
||||
//! - Time of check to time of use (TOCTOU)
|
||||
//!
|
||||
//! This crate provides several mechanisms for container runtimes to safely handle filesystem paths
|
||||
//! when preparing mount namespace for containers.
|
||||
@@ -35,13 +35,13 @@
|
||||
//! - [scoped_resolve()](crate::scoped_resolve()): resolve `unsafe_path` to a relative path,
|
||||
//! rooted at and constrained by `root`.
|
||||
//! - [struct PinnedPathBuf](crate::PinnedPathBuf): safe version of `PathBuf` to protect from
|
||||
//! TOCTTOU style of attacks, which ensures:
|
||||
//! TOCTOU style of attacks, which ensures:
|
||||
//! - the value of [`PinnedPathBuf::as_path()`] never changes.
|
||||
//! - the path returned by [`PinnedPathBuf::as_path()`] is always a symlink.
|
||||
//! - the filesystem object referenced by the symlink [`PinnedPathBuf::as_path()`] never changes.
|
||||
//! - the value of [`PinnedPathBuf::target()`] never changes.
|
||||
//! - [struct ScopedDirBuilder](crate::ScopedDirBuilder): safe version of `DirBuilder` to protect
|
||||
//! from symlink race and TOCTTOU style of attacks, which enhances security by:
|
||||
//! from symlink race and TOCTOU style of attacks, which enhances security by:
|
||||
//! - ensuring the new directories are created under a specified `root` directory.
|
||||
//! - avoiding symlink race attacks during making directories.
|
||||
//! - returning a [PinnedPathBuf] for the last level of directory, so it could be used for other
|
||||
|
||||
@@ -15,7 +15,7 @@ use std::path::{Component, Path, PathBuf};
|
||||
use crate::scoped_join;
|
||||
|
||||
/// A safe version of [`PathBuf`] pinned to an underlying filesystem object to protect from
|
||||
/// `TOCTTOU` style of attacks.
|
||||
/// `TOCTOU` style of attacks.
|
||||
///
|
||||
/// A [`PinnedPathBuf`] is a resolved path buffer pinned to an underlying filesystem object, which
|
||||
/// guarantees:
|
||||
|
||||
@@ -117,7 +117,7 @@ pub fn scoped_resolve<R: AsRef<Path>, U: AsRef<Path>>(root: R, unsafe_path: U) -
|
||||
/// Note that the guarantees provided by this function only apply if the path components in the
|
||||
/// returned string are not modified (in other words are not replaced with symlinks on the
|
||||
/// filesystem) after this function has returned. You may use [crate::PinnedPathBuf] to protect
|
||||
/// from such TOCTTOU attacks.
|
||||
/// from such TOCTOU attacks.
|
||||
pub fn scoped_join<R: AsRef<Path>, U: AsRef<Path>>(root: R, unsafe_path: U) -> Result<PathBuf> {
|
||||
do_scoped_resolve(root, unsafe_path).map(|(root, path)| root.join(path))
|
||||
}
|
||||
|
||||