mirror of
https://github.com/kata-containers/kata-containers.git
synced 2026-03-16 17:52:20 +00:00
Compare commits
83 Commits
2.1-alpha0
...
2.0.0-rc1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9b969bb7da | ||
|
|
fb2f3cfce2 | ||
|
|
f32a741c76 | ||
|
|
512e79f61a | ||
|
|
aa70080423 | ||
|
|
34015bae12 | ||
|
|
93b60a8327 | ||
|
|
aa9951f2cd | ||
|
|
9d8c72998b | ||
|
|
033ed13202 | ||
|
|
c058d04b94 | ||
|
|
9d2bb0c452 | ||
|
|
627d062fb2 | ||
|
|
96afe62576 | ||
|
|
d946016eb7 | ||
|
|
37f1a77a6a | ||
|
|
450a81cc54 | ||
|
|
c09f02e6f6 | ||
|
|
58c7469110 | ||
|
|
c36ea0968d | ||
|
|
ba197302e2 | ||
|
|
725ad067c1 | ||
|
|
9858c23c59 | ||
|
|
fc8f1ff03c | ||
|
|
f7b4f76082 | ||
|
|
4fd66fa689 | ||
|
|
e6ff42b8ad | ||
|
|
6710d87c6a | ||
|
|
178b79f122 | ||
|
|
bc545c6549 | ||
|
|
585481990a | ||
|
|
0057f86cfa | ||
|
|
fa0401793f | ||
|
|
60b7265961 | ||
|
|
57b53dbae8 | ||
|
|
ddf1a545d1 | ||
|
|
cbdf6400ae | ||
|
|
ceeecf9c66 | ||
|
|
7c53baea8a | ||
|
|
b549d354bf | ||
|
|
9f3113e1f6 | ||
|
|
ef94742320 | ||
|
|
d71764985d | ||
|
|
0fc04a269d | ||
|
|
8d7ac5f01c | ||
|
|
612acbe319 | ||
|
|
f3a487cd41 | ||
|
|
3a559521d1 | ||
|
|
567daf5a42 | ||
|
|
c7d913f436 | ||
|
|
7bd410c725 | ||
|
|
7fbc789855 | ||
|
|
7fc41a771a | ||
|
|
a31d82fec2 | ||
|
|
9ef4c80340 | ||
|
|
6a4e413758 | ||
|
|
678d4d189d | ||
|
|
718f718764 | ||
|
|
d860ded3f0 | ||
|
|
a141da8a20 | ||
|
|
aaaaee7a4b | ||
|
|
21efaf1fca | ||
|
|
2056623e13 | ||
|
|
34126ee704 | ||
|
|
980a338454 | ||
|
|
e14f766895 | ||
|
|
2e0731f479 | ||
|
|
addf62087c | ||
|
|
c24b68dc4f | ||
|
|
24677d7484 | ||
|
|
9e74c28158 | ||
|
|
b7aae33cc1 | ||
|
|
6d9d58278e | ||
|
|
1bc6fbda8c | ||
|
|
d39f5a85e6 | ||
|
|
d90a0eefbe | ||
|
|
2618c014a0 | ||
|
|
5c4878f37e | ||
|
|
bd6b169e98 | ||
|
|
5770336572 | ||
|
|
45daec7b37 | ||
|
|
ed5a7dc022 | ||
|
|
6fc7c77721 |
2
.github/workflows/commit-message-check.yaml
vendored
2
.github/workflows/commit-message-check.yaml
vendored
@@ -10,7 +10,7 @@ env:
|
||||
error_msg: |+
|
||||
See the document below for help on formatting commits for the project.
|
||||
|
||||
https://github.com/kata-containers/community/blob/master/CONTRIBUTING.md#patch-format
|
||||
https://github.com/kata-containers/community/blob/master/CONTRIBUTING.md#patch-forma
|
||||
|
||||
jobs:
|
||||
commit-message-check:
|
||||
|
||||
58
.github/workflows/main.yaml
vendored
58
.github/workflows/main.yaml
vendored
@@ -38,12 +38,12 @@ jobs:
|
||||
run: |
|
||||
if grep -q $buildstr ./artifact-list/artifact-list.txt; then
|
||||
$GITHUB_WORKSPACE/.github/workflows/generate-artifact-tarball.sh $buildstr
|
||||
echo "artifact-built=true" >> $GITHUB_ENV
|
||||
echo ::set-env name=artifact-built::true
|
||||
else
|
||||
echo "artifact-built=false" >> $GITHUB_ENV
|
||||
echo ::set-env name=artifact-built::false
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: ${{ env.artifact-built }} == 'true'
|
||||
if: env.artifact-built == 'true'
|
||||
uses: actions/upload-artifact@master
|
||||
with:
|
||||
name: kata-artifacts
|
||||
@@ -66,12 +66,12 @@ jobs:
|
||||
run: |
|
||||
if grep -q $buildstr ./artifact-list/artifact-list.txt; then
|
||||
$GITHUB_WORKSPACE/.github/workflows/generate-artifact-tarball.sh $buildstr
|
||||
echo "artifact-built=true" >> $GITHUB_ENV
|
||||
echo ::set-env name=artifact-built::true
|
||||
else
|
||||
echo "artifact-built=false" >> $GITHUB_ENV
|
||||
echo ::set-env name=artifact-built::false
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: ${{ env.artifact-built }} == 'true'
|
||||
if: env.artifact-built == 'true'
|
||||
uses: actions/upload-artifact@master
|
||||
with:
|
||||
name: kata-artifacts
|
||||
@@ -92,12 +92,12 @@ jobs:
|
||||
run: |
|
||||
if grep -q $buildstr ./artifact-list/artifact-list.txt; then
|
||||
$GITHUB_WORKSPACE/.github/workflows/generate-artifact-tarball.sh $buildstr
|
||||
echo "artifact-built=true" >> $GITHUB_ENV
|
||||
echo ::set-env name=artifact-built::true
|
||||
else
|
||||
echo "artifact-built=false" >> $GITHUB_ENV
|
||||
echo ::set-env name=artifact-built::false
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: ${{ env.artifact-built }} == 'true'
|
||||
if: env.artifact-built == 'true'
|
||||
uses: actions/upload-artifact@master
|
||||
with:
|
||||
name: kata-artifacts
|
||||
@@ -118,12 +118,12 @@ jobs:
|
||||
run: |
|
||||
if grep -q $buildstr ./artifact-list/artifact-list.txt; then
|
||||
$GITHUB_WORKSPACE/.github/workflows/generate-artifact-tarball.sh $buildstr
|
||||
echo "artifact-built=true" >> $GITHUB_ENV
|
||||
echo ::set-env name=artifact-built::true
|
||||
else
|
||||
echo "artifact-built=false" >> $GITHUB_ENV
|
||||
echo ::set-env name=artifact-built::false
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: ${{ env.artifact-built }} == 'true'
|
||||
if: env.artifact-built == 'true'
|
||||
uses: actions/upload-artifact@master
|
||||
with:
|
||||
name: kata-artifacts
|
||||
@@ -145,12 +145,12 @@ jobs:
|
||||
run: |
|
||||
if grep -q $buildstr ./artifact-list/artifact-list.txt; then
|
||||
$GITHUB_WORKSPACE/.github/workflows/generate-artifact-tarball.sh $buildstr
|
||||
echo "artifact-built=true" >> $GITHUB_ENV
|
||||
echo ::set-env name=artifact-built::true
|
||||
else
|
||||
echo "artifact-built=false" >> $GITHUB_ENV
|
||||
echo ::set-env name=artifact-built::false
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: ${{ env.artifact-built }} == 'true'
|
||||
if: env.artifact-built == 'true'
|
||||
uses: actions/upload-artifact@master
|
||||
with:
|
||||
name: kata-artifacts
|
||||
@@ -172,12 +172,12 @@ jobs:
|
||||
run: |
|
||||
if grep -q $buildstr ./artifact-list/artifact-list.txt; then
|
||||
$GITHUB_WORKSPACE/.github/workflows/generate-artifact-tarball.sh $buildstr
|
||||
echo "artifact-built=true" >> $GITHUB_ENV
|
||||
echo ::set-env name=artifact-built::true
|
||||
else
|
||||
echo "artifact-built=false" >> $GITHUB_ENV
|
||||
echo ::set-env name=artifact-built::false
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: ${{ env.artifact-built }} == 'true'
|
||||
if: env.artifact-built == 'true'
|
||||
uses: actions/upload-artifact@master
|
||||
with:
|
||||
name: kata-artifacts
|
||||
@@ -199,12 +199,12 @@ jobs:
|
||||
run: |
|
||||
if grep -q $buildstr ./artifact-list/artifact-list.txt; then
|
||||
$GITHUB_WORKSPACE/.github/workflows/generate-artifact-tarball.sh $buildstr
|
||||
echo "artifact-built=true" >> $GITHUB_ENV
|
||||
echo ::set-env name=artifact-built::true
|
||||
else
|
||||
echo "artifact-built=false" >> $GITHUB_ENV
|
||||
echo ::set-env name=artifact-built::false
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: ${{ env.artifact-built }} == 'true'
|
||||
if: env.artifact-built == 'true'
|
||||
uses: actions/upload-artifact@master
|
||||
with:
|
||||
name: kata-artifacts
|
||||
@@ -226,12 +226,12 @@ jobs:
|
||||
run: |
|
||||
if grep -q $buildstr ./artifact-list/artifact-list.txt; then
|
||||
$GITHUB_WORKSPACE/.github/workflows/generate-artifact-tarball.sh $buildstr
|
||||
echo "artifact-built=true" >> $GITHUB_ENV
|
||||
echo ::set-env name=artifact-built::true
|
||||
else
|
||||
echo "artifact-built=false" >> $GITHUB_ENV
|
||||
echo ::set-env name=artifact-built::false
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: ${{ env.artifact-built }} == 'true'
|
||||
if: env.artifact-built == 'true'
|
||||
uses: actions/upload-artifact@master
|
||||
with:
|
||||
name: kata-artifacts
|
||||
@@ -253,12 +253,12 @@ jobs:
|
||||
run: |
|
||||
if grep -q $buildstr ./artifact-list/artifact-list.txt; then
|
||||
$GITHUB_WORKSPACE/.github/workflows/generate-artifact-tarball.sh $buildstr
|
||||
echo "artifact-built=true" >> $GITHUB_ENV
|
||||
echo ::set-env name=artifact-built::true
|
||||
else
|
||||
echo "artifact-built=false" >> $GITHUB_ENV
|
||||
echo ::set-env name=artifact-built::false
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: ${{ env.artifact-built }} == 'true'
|
||||
if: env.artifact-built == 'true'
|
||||
uses: actions/upload-artifact@master
|
||||
with:
|
||||
name: kata-artifacts
|
||||
@@ -303,7 +303,9 @@ jobs:
|
||||
docker build --build-arg KATA_ARTIFACTS=kata-static.tar.xz -t katadocker/kata-deploy-ci:$pkg_sha ./packaging/kata-deploy
|
||||
docker login -u ${{ secrets.DOCKER_USERNAME }} -p ${{ secrets.DOCKER_PASSWORD }}
|
||||
docker push katadocker/kata-deploy-ci:$pkg_sha
|
||||
echo "::set-output name=PKG_SHA::${pkg_sha}"
|
||||
|
||||
echo "##[set-output name=PKG_SHA;]${pkg_sha}"
|
||||
echo ::set-env name=TAG::$tag
|
||||
- name: test-kata-deploy-ci-in-aks
|
||||
uses: ./packaging/kata-deploy/action
|
||||
with:
|
||||
|
||||
52
.github/workflows/release.yaml
vendored
52
.github/workflows/release.yaml
vendored
@@ -39,12 +39,12 @@ jobs:
|
||||
run: |
|
||||
if grep -q $buildstr artifact-list.txt; then
|
||||
$GITHUB_WORKSPACE/.github/workflows/generate-local-artifact-tarball.sh $buildstr
|
||||
echo "artifact-built=true" >> $GITHUB_ENV
|
||||
echo ::set-env name=artifact-built::true
|
||||
else
|
||||
echo "artifact-built=false" >> $GITHUB_ENV
|
||||
echo ::set-env name=artifact-built::false
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: ${{ env.artifact-built }} == 'true'
|
||||
if: env.artifact-built == 'true'
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: kata-artifacts
|
||||
@@ -67,12 +67,12 @@ jobs:
|
||||
run: |
|
||||
if grep -q $buildstr artifact-list.txt; then
|
||||
$GITHUB_WORKSPACE/.github/workflows/generate-local-artifact-tarball.sh $buildstr
|
||||
echo "artifact-built=true" >> $GITHUB_ENV
|
||||
echo ::set-env name=artifact-built::true
|
||||
else
|
||||
echo "artifact-built=false" >> $GITHUB_ENV
|
||||
echo ::set-env name=artifact-built::false
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: ${{ env.artifact-built }} == 'true'
|
||||
if: env.artifact-built == 'true'
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: kata-artifacts
|
||||
@@ -93,12 +93,12 @@ jobs:
|
||||
run: |
|
||||
if grep -q $buildstr artifact-list.txt; then
|
||||
$GITHUB_WORKSPACE/.github/workflows/generate-local-artifact-tarball.sh $buildstr
|
||||
echo "artifact-built=true" >> $GITHUB_ENV
|
||||
echo ::set-env name=artifact-built::true
|
||||
else
|
||||
echo "artifact-built=false" >> $GITHUB_ENV
|
||||
echo ::set-env name=artifact-built::false
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: ${{ env.artifact-built }} == 'true'
|
||||
if: env.artifact-built == 'true'
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: kata-artifacts
|
||||
@@ -119,12 +119,12 @@ jobs:
|
||||
run: |
|
||||
if grep -q $buildstr artifact-list.txt; then
|
||||
$GITHUB_WORKSPACE/.github/workflows/generate-local-artifact-tarball.sh $buildstr
|
||||
echo "artifact-built=true" >> $GITHUB_ENV
|
||||
echo ::set-env name=artifact-built::true
|
||||
else
|
||||
echo "artifact-built=false" >> $GITHUB_ENV
|
||||
echo ::set-env name=artifact-built::false
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: ${{ env.artifact-built }} == 'true'
|
||||
if: env.artifact-built == 'true'
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: kata-artifacts
|
||||
@@ -145,12 +145,12 @@ jobs:
|
||||
run: |
|
||||
if grep -q $buildstr artifact-list.txt; then
|
||||
$GITHUB_WORKSPACE/.github/workflows/generate-local-artifact-tarball.sh $buildstr
|
||||
echo "artifact-built=true" >> $GITHUB_ENV
|
||||
echo ::set-env name=artifact-built::true
|
||||
else
|
||||
echo "artifact-built=false" >> $GITHUB_ENV
|
||||
echo ::set-env name=artifact-built::false
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: ${{ env.artifact-built }} == 'true'
|
||||
if: env.artifact-built == 'true'
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: kata-artifacts
|
||||
@@ -171,12 +171,12 @@ jobs:
|
||||
run: |
|
||||
if grep -q $buildstr artifact-list.txt; then
|
||||
$GITHUB_WORKSPACE/.github/workflows/generate-local-artifact-tarball.sh $buildstr
|
||||
echo "artifact-built=true" >> $GITHUB_ENV
|
||||
echo ::set-env name=artifact-built::true
|
||||
else
|
||||
echo "artifact-built=false" >> $GITHUB_ENV
|
||||
echo ::set-env name=artifact-built::false
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: ${{ env.artifact-built }} == 'true'
|
||||
if: env.artifact-built == 'true'
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: kata-artifacts
|
||||
@@ -198,12 +198,12 @@ jobs:
|
||||
run: |
|
||||
if grep -q $buildstr artifact-list.txt; then
|
||||
$GITHUB_WORKSPACE/.github/workflows/generate-local-artifact-tarball.sh $buildstr
|
||||
echo "artifact-built=true" >> $GITHUB_ENV
|
||||
echo ::set-env name=artifact-built::true
|
||||
else
|
||||
echo "artifact-built=false" >> $GITHUB_ENV
|
||||
echo ::set-env name=artifact-built::false
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: ${{ env.artifact-built }} == 'true'
|
||||
if: env.artifact-built == 'true'
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: kata-artifacts
|
||||
@@ -224,12 +224,12 @@ jobs:
|
||||
run: |
|
||||
if grep -q $buildstr artifact-list.txt; then
|
||||
$GITHUB_WORKSPACE/.github/workflows/generate-local-artifact-tarball.sh $buildstr
|
||||
echo "artifact-built=true" >> $GITHUB_ENV
|
||||
echo ::set-env name=artifact-built::true
|
||||
else
|
||||
echo "artifact-built=false" >> $GITHUB_ENV
|
||||
echo ::set-env name=artifact-built::false
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: ${{ env.artifact-built }} == 'true'
|
||||
if: env.artifact-built == 'true'
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: kata-artifacts
|
||||
@@ -275,9 +275,11 @@ jobs:
|
||||
docker build --build-arg KATA_ARTIFACTS=kata-static.tar.xz -t katadocker/kata-deploy-ci:$pkg_sha $GITHUB_WORKSPACE/tools/packaging/kata-deploy
|
||||
docker login -u ${{ secrets.DOCKER_USERNAME }} -p ${{ secrets.DOCKER_PASSWORD }}
|
||||
docker push katadocker/kata-deploy-ci:$pkg_sha
|
||||
|
||||
echo "##[set-output name=PKG_SHA;]${pkg_sha}"
|
||||
echo ::set-env name=TAG::$tag
|
||||
mkdir -p packaging/kata-deploy
|
||||
ln -s $GITHUB_WORKSPACE/tools/packaging/kata-deploy/action packaging/kata-deploy/action
|
||||
echo "::set-output name=PKG_SHA::${pkg_sha}"
|
||||
- name: test-kata-deploy-ci-in-aks
|
||||
uses: ./packaging/kata-deploy/action
|
||||
with:
|
||||
|
||||
37
.github/workflows/snap-release.yaml
vendored
37
.github/workflows/snap-release.yaml
vendored
@@ -1,37 +0,0 @@
|
||||
name: Release Kata 2.x in snapcraft store
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- '2.*'
|
||||
jobs:
|
||||
release-snap:
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- name: Check out Git repository
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Install Snapcraft
|
||||
uses: samuelmeuli/action-snapcraft@v1
|
||||
with:
|
||||
snapcraft_token: ${{ secrets.snapcraft_token }}
|
||||
|
||||
- name: Build snap
|
||||
run: |
|
||||
sudo apt-get install -y git git-extras
|
||||
kata_url="https://github.com/kata-containers/kata-containers"
|
||||
latest_version=$(git ls-remote --tags ${kata_url} | egrep -o "refs.*" | egrep -v "\-alpha|\-rc|{}" | egrep -o "[[:digit:]]+\.[[:digit:]]+\.[[:digit:]]+" | sort -V -r | head -1)
|
||||
current_version="$(echo ${GITHUB_REF} | cut -d/ -f3)"
|
||||
# Check if the current tag is the latest tag
|
||||
if echo -e "$latest_version\n$current_version" | sort -C -V; then
|
||||
# Current version is the latest version, build it
|
||||
snapcraft -d snap --destructive-mode
|
||||
fi
|
||||
|
||||
- name: Upload snap
|
||||
run: |
|
||||
snap_version="$(echo ${GITHUB_REF} | cut -d/ -f3)"
|
||||
snap_file="kata-containers_${snap_version}_amd64.snap"
|
||||
# Upload the snap if it exists
|
||||
if [ -f ${snap_file} ]; then
|
||||
snapcraft upload --release=candidate ${snap_file}
|
||||
fi
|
||||
25
.github/workflows/snap.yaml
vendored
25
.github/workflows/snap.yaml
vendored
@@ -1,25 +0,0 @@
|
||||
name: snap CI
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- "**/Makefile"
|
||||
- "**/*.go"
|
||||
- "**/*.mk"
|
||||
- "**/*.rs"
|
||||
- "**/*.sh"
|
||||
- "**/*.toml"
|
||||
- "**/*.yaml"
|
||||
- "**/*.yml"
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- name: Check out
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Install Snapcraft
|
||||
uses: samuelmeuli/action-snapcraft@v1
|
||||
|
||||
- name: Build snap
|
||||
run: |
|
||||
snapcraft -d snap --destructive-mode
|
||||
68
.github/workflows/static-checks.yaml
vendored
68
.github/workflows/static-checks.yaml
vendored
@@ -1,68 +0,0 @@
|
||||
on: ["pull_request"]
|
||||
name: Static checks
|
||||
jobs:
|
||||
test:
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.13.x, 1.14.x, 1.15.x]
|
||||
os: [ubuntu-18.04]
|
||||
runs-on: ${{ matrix.os }}
|
||||
env:
|
||||
GO111MODULE: off
|
||||
TRAVIS: "true"
|
||||
TRAVIS_BRANCH: ${{ github.base_ref }}
|
||||
TRAVIS_PULL_REQUEST_BRANCH: ${{ github.head_ref }}
|
||||
TRAVIS_PULL_REQUEST_SHA : ${{ github.event.pull_request.head.sha }}
|
||||
RUST_BACKTRACE: "1"
|
||||
RUST_AGENT: "yes"
|
||||
target_branch: ${TRAVIS_BRANCH}
|
||||
steps:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
env:
|
||||
GOPATH: ${{ runner.workspace }}/kata-containers
|
||||
- name: Setup GOPATH
|
||||
run: |
|
||||
gopath_org=$(go env GOPATH)/src/github.com/kata-containers/
|
||||
mkdir -p ${gopath_org}
|
||||
ln -s ${PWD} ${gopath_org}
|
||||
echo "TRAVIS_BRANCH: ${TRAVIS_BRANCH}"
|
||||
echo "TRAVIS_PULL_REQUEST_BRANCH: ${TRAVIS_PULL_REQUEST_BRANCH}"
|
||||
echo "TRAVIS_PULL_REQUEST_SHA: ${TRAVIS_PULL_REQUEST_SHA}"
|
||||
echo "TRAVIS: ${TRAVIS}"
|
||||
- name: Set env
|
||||
run: |
|
||||
echo "GOPATH=${{ github.workspace }}" >> $GITHUB_ENV
|
||||
echo "${{ github.workspace }}/bin" >> $GITHUB_PATH
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
path: ./src/github.com/${{ github.repository }}
|
||||
- name: Setup travis references
|
||||
run: |
|
||||
echo "TRAVIS_BRANCH=${TRAVIS_BRANCH:-$(echo $GITHUB_REF | awk 'BEGIN { FS = \"/\" } ; { print $3 }')}"
|
||||
target_branch=${TRAVIS_BRANCH}
|
||||
- name: Setup
|
||||
run: |
|
||||
cd ${GOPATH}/src/github.com/kata-containers/kata-containers && ./ci/setup.sh
|
||||
env:
|
||||
GOPATH: ${{ runner.workspace }}/kata-containers
|
||||
- name: Building rust
|
||||
run: |
|
||||
cd ${GOPATH}/src/github.com/kata-containers/kata-containers && ./ci/install_rust.sh
|
||||
PATH=$PATH:"$HOME/.cargo/bin"
|
||||
- name: Make clippy
|
||||
run: |
|
||||
cd ${GOPATH}/src/github.com/kata-containers/kata-containers/src/agent && rustup target add x86_64-unknown-linux-musl && rustup component add rustfmt && rustup component add clippy && make clippy
|
||||
- name: Static checks
|
||||
run: |
|
||||
cd ${GOPATH}/src/github.com/kata-containers/kata-containers && ./ci/static-checks.sh
|
||||
- name: Build agent
|
||||
run: |
|
||||
cd ${GOPATH}/src/github.com/kata-containers/kata-containers/src/agent && make
|
||||
- name: Run agent unit tests
|
||||
run: |
|
||||
cd ${GOPATH}/src/github.com/kata-containers/kata-containers/src/agent && make check
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -3,9 +3,5 @@
|
||||
**/*.rej
|
||||
**/target
|
||||
**/.vscode
|
||||
pkg/logging/Cargo.lock
|
||||
src/agent/src/version.rs
|
||||
src/agent/kata-agent.service
|
||||
src/agent/protocols/src/*.rs
|
||||
!src/agent/protocols/src/lib.rs
|
||||
|
||||
|
||||
62
.travis.yml
Normal file
62
.travis.yml
Normal file
@@ -0,0 +1,62 @@
|
||||
# Copyright (c) 2019 Ant Financial
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
dist: bionic
|
||||
os: linux
|
||||
|
||||
# set cache directories manually, because
|
||||
# we are using a non-standard directory struct
|
||||
# cargo root is in srs/agent
|
||||
#
|
||||
# If needed, caches can be cleared
|
||||
# by ways documented in
|
||||
# https://docs.travis-ci.com/user/caching#clearing-caches
|
||||
language: rust
|
||||
rust:
|
||||
- 1.44.1
|
||||
cache:
|
||||
cargo: true
|
||||
directories:
|
||||
- src/agent/target
|
||||
|
||||
before_install:
|
||||
- git remote set-branches --add origin "${TRAVIS_BRANCH}"
|
||||
- git fetch
|
||||
- export RUST_BACKTRACE=1
|
||||
- export target_branch=$TRAVIS_BRANCH
|
||||
- "ci/setup.sh"
|
||||
|
||||
# we use install to run check agent
|
||||
# so that it is easy to skip for non-amd64 platform
|
||||
install:
|
||||
- export PATH=$PATH:"$HOME/.cargo/bin"
|
||||
- export RUST_AGENT=yes
|
||||
- rustup target add x86_64-unknown-linux-musl
|
||||
- sudo ln -sf /usr/bin/g++ /bin/musl-g++
|
||||
- rustup component add rustfmt
|
||||
- make -C ${TRAVIS_BUILD_DIR}/src/agent
|
||||
- make -C ${TRAVIS_BUILD_DIR}/src/agent check
|
||||
- sudo -E PATH=$PATH make -C ${TRAVIS_BUILD_DIR}/src/agent check
|
||||
|
||||
before_script:
|
||||
- "ci/install_go.sh"
|
||||
- make -C ${TRAVIS_BUILD_DIR}/src/runtime
|
||||
- make -C ${TRAVIS_BUILD_DIR}/src/runtime test
|
||||
- sudo -E PATH=$PATH GOPATH=$GOPATH make -C ${TRAVIS_BUILD_DIR}/src/runtime test
|
||||
|
||||
script:
|
||||
- "ci/static-checks.sh"
|
||||
|
||||
jobs:
|
||||
include:
|
||||
- name: x86_64 test
|
||||
os: linux
|
||||
- name: ppc64le test
|
||||
os: linux-ppc64le
|
||||
install: skip
|
||||
script: skip
|
||||
allow_failures:
|
||||
- name: ppc64le test
|
||||
fast_finish: true
|
||||
207
README.md
207
README.md
@@ -2,143 +2,136 @@
|
||||
|
||||
# Kata Containers
|
||||
|
||||
* [Kata Containers](#kata-containers)
|
||||
* [Introduction](#introduction)
|
||||
* [Getting started](#getting-started)
|
||||
* [Documentation](#documentation)
|
||||
* [Raising issues](#raising-issues)
|
||||
* [Kata Containers repositories](#kata-containers-repositories)
|
||||
* [Code Repositories](#code-repositories)
|
||||
* [Kata Containers-developed components](#kata-containers-developed-components)
|
||||
* [Agent](#agent)
|
||||
* [KSM throttler](#ksm-throttler)
|
||||
* [Runtime](#runtime)
|
||||
* [Trace forwarder](#trace-forwarder)
|
||||
* [Additional](#additional)
|
||||
* [Hypervisor](#hypervisor)
|
||||
* [Kernel](#kernel)
|
||||
* [CI](#ci)
|
||||
* [Community](#community)
|
||||
* [Getting help](#getting-help)
|
||||
* [Raising issues](#raising-issues)
|
||||
* [Kata Containers 1.x versions](#kata-containers-1x-versions)
|
||||
* [Developers](#developers)
|
||||
* [Components](#components)
|
||||
* [Kata Containers 1.x components](#kata-containers-1x-components)
|
||||
* [Common repositories](#common-repositories)
|
||||
* [Packaging and releases](#packaging-and-releases)
|
||||
* [Documentation](#documentation)
|
||||
* [Packaging](#packaging)
|
||||
* [Test code](#test-code)
|
||||
* [Utilities](#utilities)
|
||||
* [OS builder](#os-builder)
|
||||
* [Web content](#web-content)
|
||||
|
||||
---
|
||||
|
||||
Welcome to Kata Containers!
|
||||
|
||||
This repository is the home of the Kata Containers code for the 2.0 and newer
|
||||
releases.
|
||||
The purpose of this repository is to act as a "top level" site for the project. Specifically it is used:
|
||||
|
||||
If you want to learn about Kata Containers, visit the main
|
||||
[Kata Containers website](https://katacontainers.io).
|
||||
- To provide a list of the various *other* [Kata Containers repositories](#kata-containers-repositories),
|
||||
along with a brief explanation of their purpose.
|
||||
|
||||
For further details on the older (first generation) Kata Containers 1.x
|
||||
versions, see the
|
||||
[Kata Containers 1.x components](#kata-containers-1x-components)
|
||||
section.
|
||||
- To provide a general area for [Raising Issues](#raising-issues).
|
||||
|
||||
## Introduction
|
||||
## Raising issues
|
||||
|
||||
Kata Containers is an open source project and community working to build a
|
||||
standard implementation of lightweight Virtual Machines (VMs) that feel and
|
||||
perform like containers, but provide the workload isolation and security
|
||||
advantages of VMs.
|
||||
This repository is used for [raising
|
||||
issues](https://github.com/kata-containers/kata-containers/issues/new):
|
||||
|
||||
## Getting started
|
||||
- That might affect multiple code repositories.
|
||||
|
||||
See the [installation documentation](docs/install).
|
||||
|
||||
## Documentation
|
||||
|
||||
See the [official documentation](docs)
|
||||
(including [installation guides](docs/install),
|
||||
[the developer guide](docs/Developer-Guide.md),
|
||||
[design documents](docs/design) and more).
|
||||
|
||||
## Community
|
||||
|
||||
To learn more about the project, its community and governance, see the
|
||||
[community repository](https://github.com/kata-containers/community). This is
|
||||
the first place to go if you wish to contribute to the project.
|
||||
|
||||
## Getting help
|
||||
|
||||
See the [community](#community) section for ways to contact us.
|
||||
|
||||
### Raising issues
|
||||
|
||||
Please raise an issue
|
||||
[in this repository](https://github.com/kata-containers/kata-containers/issues).
|
||||
- Where the raiser is unsure which repositories are affected.
|
||||
|
||||
> **Note:**
|
||||
> If you are reporting a security issue, please follow the [vulnerability reporting process](https://github.com/kata-containers/community#vulnerability-handling)
|
||||
>
|
||||
> - If an issue affects only a single component, it should be raised in that
|
||||
> components repository.
|
||||
|
||||
#### Kata Containers 1.x versions
|
||||
## Kata Containers repositories
|
||||
|
||||
For older Kata Containers 1.x releases, please raise an issue in the
|
||||
[Kata Containers 1.x component repository](#kata-containers-1x-components)
|
||||
that seems most appropriate.
|
||||
### CI
|
||||
|
||||
If in doubt, raise an issue
|
||||
[in the Kata Containers 1.x runtime repository](https://github.com/kata-containers/runtime/issues).
|
||||
The [CI](https://github.com/kata-containers/ci) repository stores the Continuous
|
||||
Integration (CI) system configuration information.
|
||||
|
||||
## Developers
|
||||
### Community
|
||||
|
||||
### Components
|
||||
The [Community](https://github.com/kata-containers/community) repository is
|
||||
the first place to go if you want to use or contribute to the project.
|
||||
|
||||
| Component | Type | Description |
|
||||
|-|-|-|
|
||||
| [agent-ctl](tools/agent-ctl) | utility | Tool that provides low-level access for testing the agent. |
|
||||
| [agent](src/agent) | core | Management process running inside the virtual machine / POD that sets up the container environment. |
|
||||
| [documentation](docs) | documentation | Documentation common to all components (such as design and install documentation). |
|
||||
| [osbuilder](tools/osbuilder) | infrastructure | Tool to create "mini O/S" rootfs and initrd images for the hypervisor. |
|
||||
| [packaging](tools/packaging) | infrastructure | Scripts and metadata for producing packaged binaries<br/>(components, hypervisors, kernel and rootfs). |
|
||||
| [runtime](src/runtime) | core | Main component run by a container manager and providing a containerd shimv2 runtime implementation. |
|
||||
| [trace-forwarder](src/trace-forwarder) | utility | Agent tracing helper. |
|
||||
### Code Repositories
|
||||
|
||||
#### Kata Containers 1.x components
|
||||
#### Kata Containers-developed components
|
||||
|
||||
For the first generation of Kata Containers (1.x versions), each component was
|
||||
kept in a separate repository.
|
||||
##### Agent
|
||||
|
||||
For information on the Kata Containers 1.x releases, see the
|
||||
[Kata Containers 1.x releases page](https://github.com/kata-containers/runtime/releases).
|
||||
The [`kata-agent`](src/agent/README.md) runs inside the
|
||||
virtual machine and sets up the container environment.
|
||||
|
||||
For further information on particular Kata Containers 1.x components, see the
|
||||
individual component repositories:
|
||||
##### KSM throttler
|
||||
|
||||
| Component | Type | Description |
|
||||
|-|-|-|
|
||||
| [agent](https://github.com/kata-containers/agent) | core | See [components](#components). |
|
||||
| [documentation](https://github.com/kata-containers/documentation) | documentation | |
|
||||
| [KSM throttler](https://github.com/kata-containers/ksm-throttler) | optional core | Daemon that monitors containers and deduplicates memory to maximize container density on the host. |
|
||||
| [osbuilder](https://github.com/kata-containers/osbuilder) | infrastructure | See [components](#components). |
|
||||
| [packaging](https://github.com/kata-containers/packaging) | infrastructure | See [components](#components). |
|
||||
| [proxy](https://github.com/kata-containers/proxy) | core | Multiplexes communications between the shims, agent and runtime. |
|
||||
| [runtime](https://github.com/kata-containers/runtime) | core | See [components](#components). |
|
||||
| [shim](https://github.com/kata-containers/shim) | core | Handles standard I/O and signals on behalf of the container process. |
|
||||
The [`kata-ksm-throttler`](https://github.com/kata-containers/ksm-throttler)
|
||||
is an optional utility that monitors containers and deduplicates memory to
|
||||
maximize container density on a host.
|
||||
|
||||
> **Note:**
|
||||
>
|
||||
> - There are more components for the original Kata Containers 1.x implementation.
|
||||
> - The current implementation simplifies the design significantly:
|
||||
> compare the [current](docs/design/architecture.md) and
|
||||
> [previous generation](https://github.com/kata-containers/documentation/blob/master/design/architecture.md)
|
||||
> designs.
|
||||
##### Runtime
|
||||
|
||||
### Common repositories
|
||||
The [`kata-runtime`](src/runtime/README.md) is usually
|
||||
invoked by a container manager and provides high-level verbs to manage
|
||||
containers.
|
||||
|
||||
The following repositories are used by both the current and first generation Kata Containers implementations:
|
||||
##### Trace forwarder
|
||||
|
||||
| Component | Description | Current | First generation | Notes |
|
||||
|-|-|-|-|-|
|
||||
| CI | Continuous Integration configuration files and scripts. | [Kata 2.x](https://github.com/kata-containers/ci/tree/2.0-dev) | [Kata 1.x](https://github.com/kata-containers/ci/tree/master) | |
|
||||
| kernel | The Linux kernel used by the hypervisor to boot the guest image. | [Kata 2.x][kernel] | [Kata 1.x][kernel] | Patches are stored in the packaging component. |
|
||||
| tests | Test code. | [Kata 2.x](https://github.com/kata-containers/tests/tree/2.0-dev) | [Kata 1.x](https://github.com/kata-containers/tests/tree/master) | Excludes unit tests which live with the main code. |
|
||||
| www.katacontainers.io | Contains the source for the [main web site](https://www.katacontainers.io). | [Kata 2.x][github-katacontainers.io] | [Kata 1.x][github-katacontainers.io] | | |
|
||||
The [`kata-trace-forwarder`](src/trace-forwarder) is a component only used
|
||||
when tracing the [agent](#agent) process.
|
||||
|
||||
### Packaging and releases
|
||||
#### Additional
|
||||
|
||||
Kata Containers is now
|
||||
[available natively for most distributions](docs/install/README.md#packaged-installation-methods).
|
||||
However, packaging scripts and metadata are still used to generate snap and GitHub releases. See
|
||||
the [components](#components) section for further details.
|
||||
##### Hypervisor
|
||||
|
||||
---
|
||||
The [`qemu`](https://github.com/kata-containers/qemu) hypervisor is used to
|
||||
create virtual machines for hosting the containers.
|
||||
|
||||
[kernel]: https://www.kernel.org
|
||||
[github-katacontainers.io]: https://github.com/kata-containers/www.katacontainers.io
|
||||
##### Kernel
|
||||
|
||||
The hypervisor uses a [Linux\* kernel](https://github.com/kata-containers/linux) to boot the guest image.
|
||||
|
||||
### Documentation
|
||||
|
||||
The [docs](docs/README.md) directory holds documentation common to all code components.
|
||||
|
||||
### Packaging
|
||||
|
||||
We use the [packaging](tools/packaging/README.md) to create packages for the [system
|
||||
components](#kata-containers-developed-components) including
|
||||
[rootfs](#os-builder) and [kernel](#kernel) images.
|
||||
|
||||
### Test code
|
||||
|
||||
The [tests](https://github.com/kata-containers/tests) repository hosts all
|
||||
test code except the unit testing code (which is kept in the same repository
|
||||
as the component it tests).
|
||||
|
||||
### Utilities
|
||||
|
||||
#### OS builder
|
||||
|
||||
The [osbuilder](tools/osbuilder/README.md) tool can create
|
||||
a rootfs and a "mini O/S" image. This image is used by the hypervisor to setup
|
||||
the environment before switching to the workload.
|
||||
|
||||
#### `kata-agent-ctl`
|
||||
|
||||
[`kata-agent-ctl`](tools/agent-ctl) is a low-level test tool for
|
||||
interacting with the agent.
|
||||
|
||||
### Web content
|
||||
|
||||
The
|
||||
[www.katacontainers.io](https://github.com/kata-containers/www.katacontainers.io)
|
||||
repository contains all sources for the https://www.katacontainers.io site.
|
||||
|
||||
## Credits
|
||||
|
||||
Kata Containers uses [packagecloud](https://packagecloud.io) for package
|
||||
hosting.
|
||||
|
||||
@@ -41,14 +41,11 @@
|
||||
* [Connect to debug console](#connect-to-debug-console)
|
||||
* [Traditional debug console setup](#traditional-debug-console-setup)
|
||||
* [Create a custom image containing a shell](#create-a-custom-image-containing-a-shell)
|
||||
* [Create a debug systemd service](#create-a-debug-systemd-service)
|
||||
* [Build the debug image](#build-the-debug-image)
|
||||
* [Configure runtime for custom debug image](#configure-runtime-for-custom-debug-image)
|
||||
* [Connect to the virtual machine using the debug console](#connect-to-the-virtual-machine-using-the-debug-console)
|
||||
* [Enabling debug console for QEMU](#enabling-debug-console-for-qemu)
|
||||
* [Enabling debug console for cloud-hypervisor / firecracker](#enabling-debug-console-for-cloud-hypervisor--firecracker)
|
||||
* [Create a container](#create-a-container)
|
||||
* [Connect to the virtual machine using the debug console](#connect-to-the-virtual-machine-using-the-debug-console)
|
||||
* [Obtain details of the image](#obtain-details-of-the-image)
|
||||
* [Capturing kernel boot logs](#capturing-kernel-boot-logs)
|
||||
|
||||
# Warning
|
||||
@@ -78,11 +75,6 @@ You need to install the following to build Kata Containers components:
|
||||
To view the versions of go known to work, see the `golang` entry in the
|
||||
[versions database](../versions.yaml).
|
||||
|
||||
- [rust](https://www.rust-lang.org/tools/install)
|
||||
|
||||
To view the versions of rust known to work, see the `rust` entry in the
|
||||
[versions database](../versions.yaml).
|
||||
|
||||
- `make`.
|
||||
- `gcc` (required for building the shim and runtime).
|
||||
|
||||
@@ -104,7 +96,7 @@ The build will create the following:
|
||||
You can check if your system is capable of creating a Kata Container by running the following:
|
||||
|
||||
```
|
||||
$ sudo kata-runtime check
|
||||
$ sudo kata-runtime kata-check
|
||||
```
|
||||
|
||||
If your system is *not* able to run Kata Containers, the previous command will error out and explain why.
|
||||
@@ -255,15 +247,6 @@ $ sudo systemctl restart systemd-journald
|
||||
>
|
||||
> - You should only do this step if you are testing with the latest version of the agent.
|
||||
|
||||
The rust-agent is built with a static linked `musl.` To configure this:
|
||||
|
||||
```
|
||||
rustup target add x86_64-unknown-linux-musl
|
||||
sudo ln -s /usr/bin/g++ /bin/musl-g++
|
||||
```
|
||||
|
||||
To build the agent:
|
||||
|
||||
```
|
||||
$ go get -d -u github.com/kata-containers/kata-containers
|
||||
$ cd $GOPATH/src/github.com/kata-containers/kata-containers/src/agent && make
|
||||
@@ -305,9 +288,9 @@ You MUST choose one of `alpine`, `centos`, `clearlinux`, `debian`, `euleros`, `f
|
||||
> - You should only do this step if you are testing with the latest version of the agent.
|
||||
|
||||
```
|
||||
$ sudo install -o root -g root -m 0550 -t ${ROOTFS_DIR}/bin ../../../src/agent/target/x86_64-unknown-linux-musl/release/kata-agent
|
||||
$ sudo install -o root -g root -m 0440 ../../../src/agent/kata-agent.service ${ROOTFS_DIR}/usr/lib/systemd/system/
|
||||
$ sudo install -o root -g root -m 0440 ../../../src/agent/kata-containers.target ${ROOTFS_DIR}/usr/lib/systemd/system/
|
||||
$ sudo install -o root -g root -m 0550 -t ${ROOTFS_DIR}/bin ../../agent/kata-agent
|
||||
$ sudo install -o root -g root -m 0440 ../../agent/kata-agent.service ${ROOTFS_DIR}/usr/lib/systemd/system/
|
||||
$ sudo install -o root -g root -m 0440 ../../agent/kata-containers.target ${ROOTFS_DIR}/usr/lib/systemd/system/
|
||||
```
|
||||
|
||||
### Build a rootfs image
|
||||
@@ -354,12 +337,9 @@ You MUST choose one of `alpine`, `centos`, `clearlinux`, `euleros`, and `fedora`
|
||||
>
|
||||
> - Check the [compatibility matrix](../tools/osbuilder/README.md#platform-distro-compatibility-matrix) before creating rootfs.
|
||||
|
||||
Optionally, add your custom agent binary to the rootfs with the following, `LIBC` default is `musl`, if `ARCH` is `ppc64le`, should set the `LIBC=gnu` and `ARCH=powerpc64le`:
|
||||
Optionally, add your custom agent binary to the rootfs with the following:
|
||||
```
|
||||
$ export ARCH=$(shell uname -m)
|
||||
$ [ ${ARCH} == "ppc64le" ] && export LIBC=gnu || export LIBC=musl
|
||||
$ [ ${ARCH} == "ppc64le" ] && export ARCH=powerpc64le
|
||||
$ sudo install -o root -g root -m 0550 -T ../../../src/agent/target/$(ARCH)-unknown-linux-$(LIBC)/release/kata-agent ${ROOTFS_DIR}/sbin/init
|
||||
$ sudo install -o root -g root -m 0550 -T ../../agent/kata-agent ${ROOTFS_DIR}/sbin/init
|
||||
```
|
||||
|
||||
### Build an initrd image
|
||||
@@ -546,6 +526,35 @@ $ export ROOTFS_DIR=${GOPATH}/src/github.com/kata-containers/kata-containers/too
|
||||
$ script -fec 'sudo -E GOPATH=$GOPATH USE_DOCKER=true EXTRA_PKGS="bash coreutils" ./rootfs.sh centos'
|
||||
```
|
||||
|
||||
#### Create a debug systemd service
|
||||
|
||||
Create the service file that starts the shell in the rootfs directory:
|
||||
|
||||
```
|
||||
$ cat <<EOT | sudo tee ${ROOTFS_DIR}/lib/systemd/system/kata-debug.service
|
||||
[Unit]
|
||||
Description=Kata Containers debug console
|
||||
|
||||
[Service]
|
||||
Environment=PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
|
||||
StandardInput=tty
|
||||
StandardOutput=tty
|
||||
# Must be disabled to allow the job to access the real console
|
||||
PrivateDevices=no
|
||||
Type=simple
|
||||
ExecStart=/bin/bash
|
||||
Restart=always
|
||||
EOT
|
||||
```
|
||||
|
||||
**Note**: You might need to adjust the `ExecStart=` path.
|
||||
|
||||
Add a dependency to start the debug console:
|
||||
|
||||
```
|
||||
$ sudo sed -i '$a Requires=kata-debug.service' ${ROOTFS_DIR}/lib/systemd/system/kata-containers.target
|
||||
```
|
||||
|
||||
#### Build the debug image
|
||||
|
||||
Follow the instructions in the [Build a rootfs image](#build-a-rootfs-image)
|
||||
@@ -586,55 +595,10 @@ $ sudo crictl run -r kata container.yaml pod.yaml
|
||||
|
||||
#### Connect to the virtual machine using the debug console
|
||||
|
||||
The steps required to enable debug console for QEMU slightly differ with
|
||||
those for firecracker / cloud-hypervisor.
|
||||
|
||||
##### Enabling debug console for QEMU
|
||||
|
||||
Add `agent.debug_console` to the guest kernel command line to allow the agent process to start a debug console.
|
||||
|
||||
```
|
||||
$ sudo sed -i -e 's/^kernel_params = "\(.*\)"/kernel_params = "\1 agent.debug_console"/g' "${kata_configuration_file}"
|
||||
```
|
||||
|
||||
Here `kata_configuration_file` could point to `/etc/kata-containers/configuration.toml`
|
||||
or `/usr/share/defaults/kata-containers/configuration.toml`
|
||||
or `/opt/kata/share/defaults/kata-containers/configuration-{hypervisor}.toml`, if
|
||||
you installed Kata Containers using `kata-deploy`.
|
||||
|
||||
##### Enabling debug console for cloud-hypervisor / firecracker
|
||||
|
||||
Slightly different configuration is required in case of firecracker and cloud hypervisor.
|
||||
Firecracker and cloud-hypervisor don't have a UNIX socket connected to `/dev/console`.
|
||||
Hence, the kernel command line option `agent.debug_console` will not work for them.
|
||||
These hypervisors support `hybrid vsocks`, which can be used for communication
|
||||
between the host and the guest. The kernel command line option `agent.debug_console_vport`
|
||||
was added to allow developers specify on which `vsock` port the debugging console should be connected.
|
||||
|
||||
|
||||
Add the parameter `agent.debug_console_vport=1026` to the kernel command line
|
||||
as shown below:
|
||||
```
|
||||
sudo sed -i -e 's/^kernel_params = "\(.*\)"/kernel_params = "\1 agent.debug_console_vport=1026"/g' "${kata_configuration_file}"
|
||||
```
|
||||
|
||||
> **Note** Ports 1024 and 1025 are reserved for communication with the agent
|
||||
> and gathering of agent logs respectively.
|
||||
|
||||
Next, connect to the debug console. The VSOCKS paths vary slightly between
|
||||
cloud-hypervisor and firecracker.
|
||||
In case of cloud-hypervisor, connect to the `vsock` as shown:
|
||||
```
|
||||
$ sudo su -c 'cd /var/run/vc/vm/{sandbox_id}/root/ && socat stdin unix-connect:clh.sock'
|
||||
CONNECT 1026
|
||||
```
|
||||
|
||||
**Note**: You need to type `CONNECT 1026` and press `RETURN` key after entering the `socat` command.
|
||||
|
||||
For firecracker, connect to the `hvsock` as shown:
|
||||
```
|
||||
$ sudo su -c 'cd /var/run/vc/firecracker/{sandbox_id}/root/ && socat stdin unix-connect:kata.hvsock'
|
||||
CONNECT 1026
|
||||
$ id=$(sudo crictl pods --no-trunc -q)
|
||||
$ console="/var/run/vc/vm/${id}/console.sock"
|
||||
$ sudo socat "stdin,raw,echo=0,escape=0x11" "unix-connect:${console}"
|
||||
```
|
||||
|
||||
**Note**: You need to press the `RETURN` key to see the shell prompt.
|
||||
|
||||
@@ -25,7 +25,7 @@ All documents must:
|
||||
- Have a `.md` file extension.
|
||||
- Include a TOC (table of contents) at the top of the document with links to
|
||||
all heading sections. We recommend using the
|
||||
[`check-markdown`](https://github.com/kata-containers/tests/tree/master/cmd/check-markdown)
|
||||
[`kata-check-markdown`](https://github.com/kata-containers/tests/tree/master/cmd/check-markdown)
|
||||
tool to generate the TOC.
|
||||
- Be linked to from another document in the same repository.
|
||||
|
||||
|
||||
@@ -40,7 +40,6 @@ See the [howto documentation](how-to).
|
||||
* [Intel QAT with Kata](./use-cases/using-Intel-QAT-and-kata.md)
|
||||
* [VPP with Kata](./use-cases/using-vpp-and-kata.md)
|
||||
* [SPDK vhost-user with Kata](./use-cases/using-SPDK-vhostuser-and-kata.md)
|
||||
* [Intel SGX with Kata](./use-cases/using-Intel-SGX-and-kata.md)
|
||||
|
||||
## Developer Guide
|
||||
|
||||
|
||||
@@ -1,140 +1,185 @@
|
||||
* [Introduction](#introduction)
|
||||
* [Maintenance warning](#maintenance-warning)
|
||||
* [Determine current version](#determine-current-version)
|
||||
* [Determine latest version](#determine-latest-version)
|
||||
* [Configuration changes](#configuration-changes)
|
||||
* [Unsupported scenarios](#unsupported-scenarios)
|
||||
* [Maintenance Warning](#maintenance-warning)
|
||||
* [Upgrade from Clear Containers](#upgrade-from-clear-containers)
|
||||
* [Stop all running Clear Container instances](#stop-all-running-clear-container-instances)
|
||||
* [Configuration migration](#configuration-migration)
|
||||
* [Remove Clear Containers packages](#remove-clear-containers-packages)
|
||||
* [Fedora](#fedora)
|
||||
* [Ubuntu](#ubuntu)
|
||||
* [Disable old container manager configuration](#disable-old-container-manager-configuration)
|
||||
* [Install Kata Containers](#install-kata-containers)
|
||||
* [Create a Kata Container](#create-a-kata-container)
|
||||
* [Upgrade from runV](#upgrade-from-runv)
|
||||
* [Upgrade Kata Containers](#upgrade-kata-containers)
|
||||
* [Upgrade native distribution packaged version](#upgrade-native-distribution-packaged-version)
|
||||
* [Static installation](#static-installation)
|
||||
* [Determine if you are using a static installation](#determine-if-you-are-using-a-static-installation)
|
||||
* [Remove a static installation](#remove-a-static-installation)
|
||||
* [Upgrade a static installation](#upgrade-a-static-installation)
|
||||
* [Custom assets](#custom-assets)
|
||||
* [Appendices](#appendices)
|
||||
* [Assets](#assets)
|
||||
* [Guest kernel](#guest-kernel)
|
||||
* [Image](#image)
|
||||
* [Determining asset versions](#determining-asset-versions)
|
||||
|
||||
# Introduction
|
||||
|
||||
This document outlines the options for upgrading from a
|
||||
[Kata Containers 1.x release](https://github.com/kata-containers/runtime/releases) to a
|
||||
[Kata Containers 2.x release](https://github.com/kata-containers/kata-containers/releases).
|
||||
This document explains how to upgrade from
|
||||
[Clear Containers](https://github.com/clearcontainers) and [runV](https://github.com/hyperhq/runv) to
|
||||
[Kata Containers](https://github.com/kata-containers) and how to upgrade an existing
|
||||
Kata Containers system to the latest version.
|
||||
|
||||
# Maintenance warning
|
||||
# Unsupported scenarios
|
||||
|
||||
Kata Containers 2.x is the new focus for the Kata Containers development
|
||||
community.
|
||||
Upgrading a Clear Containers system on the following distributions is **not**
|
||||
supported since the installation process for these distributions makes use of
|
||||
unpackaged components:
|
||||
|
||||
Although Kata Containers 1.x releases will continue to be published for a
|
||||
period of time, once a stable release for Kata Containers 2.x is published,
|
||||
Kata Containers 1.x stable users should consider switching to the Kata 2.x
|
||||
release.
|
||||
- [CentOS](https://github.com/clearcontainers/runtime/blob/master/docs/centos-installation-guide.md)
|
||||
- [BCLinux](https://github.com/clearcontainers/runtime/blob/master/docs/bclinux-installation-guide.md)
|
||||
- [RHEL](https://github.com/clearcontainers/runtime/blob/master/docs/rhel-installation-guide.md)
|
||||
- [SLES](https://github.com/clearcontainers/runtime/blob/master/docs/sles-installation-guide.md)
|
||||
|
||||
See the [stable branch strategy documentation](Stable-Branch-Strategy.md) for
|
||||
further details.
|
||||
Additionally, upgrading
|
||||
[Clear Linux](https://github.com/clearcontainers/runtime/blob/master/docs/clearlinux-installation-guide.md)
|
||||
is not supported as Kata Containers packages do not yet exist.
|
||||
|
||||
# Determine current version
|
||||
# Maintenance Warning
|
||||
|
||||
To display the current Kata Containers version, run one of the following:
|
||||
The Clear Containers codebase is no longer being developed. Only new releases
|
||||
will be considered for significant bug fixes.
|
||||
|
||||
```bash
|
||||
$ kata-runtime --version
|
||||
$ containerd-shim-kata-v2 --version
|
||||
The main development focus is now on Kata Containers. All Clear Containers
|
||||
users are encouraged to switch to Kata Containers.
|
||||
|
||||
# Upgrade from Clear Containers
|
||||
|
||||
Since Kata Containers can co-exist on the same system as Clear Containers, if
|
||||
you already have Clear Containers installed, the upgrade process is simply to
|
||||
install Kata Containers. However, since Clear Containers is
|
||||
[no longer being actively developed](#maintenance-warning),
|
||||
you are encouraged to remove Clear Containers from your systems.
|
||||
|
||||
## Stop all running Clear Container instances
|
||||
|
||||
Assuming a Docker\* system, to stop all currently running Clear Containers:
|
||||
|
||||
```
|
||||
$ for container in $(sudo docker ps -q); do sudo docker stop $container; done
|
||||
```
|
||||
|
||||
# Determine latest version
|
||||
## Configuration migration
|
||||
|
||||
Kata Containers 2.x releases are published on the
|
||||
[Kata Containers GitHub releases page](https://github.com/kata-containers/kata-containers/releases).
|
||||
The automatic migration of
|
||||
[Clear Containers configuration](https://github.com/clearcontainers/runtime#configuration) to
|
||||
[Kata Containers configuration](../src/runtime/README.md#configuration) is
|
||||
not supported.
|
||||
|
||||
Alternatively, if you are using Kata Containers version 1.12.0 or newer, you
|
||||
can check for newer releases using the command line:
|
||||
If you have made changes to your Clear Containers configuration, you should
|
||||
review those changes and decide whether to manually apply those changes to the
|
||||
Kata Containers configuration.
|
||||
|
||||
```bash
|
||||
$ kata-runtime check --check-version-only
|
||||
> **Note**: This step must be completed before continuing to
|
||||
> [remove the Clear Containers packages](#remove-clear-containers-packages) since doing so will
|
||||
> *delete the default Clear Containers configuration file from your system*.
|
||||
|
||||
## Remove Clear Containers packages
|
||||
|
||||
> **Warning**: If you have modified your
|
||||
> [Clear Containers configuration](https://github.com/clearcontainers/runtime#configuration),
|
||||
> you might want to make a safe copy of the configuration file before removing the
|
||||
> packages since doing so will *delete the default configuration file*
|
||||
|
||||
### Fedora
|
||||
|
||||
```
|
||||
$ sudo -E dnf remove cc-runtime\* cc-proxy\* cc-shim\* linux-container clear-containers-image qemu-lite cc-ksm-throttler
|
||||
$ sudo rm /etc/yum.repos.d/home:clearcontainers:clear-containers-3.repo
|
||||
```
|
||||
|
||||
There are various other related options. Run `kata-runtime check --help`
|
||||
for further details.
|
||||
### Ubuntu
|
||||
|
||||
# Configuration changes
|
||||
```
|
||||
$ sudo apt-get purge cc-runtime\* cc-proxy\* cc-shim\* linux-container clear-containers-image qemu-lite cc-ksm-throttler
|
||||
$ sudo rm /etc/apt/sources.list.d/clear-containers.list
|
||||
```
|
||||
|
||||
The [Kata Containers 2.x configuration file](/src/runtime/README.md#configuration)
|
||||
is compatible with the
|
||||
[Kata Containers 1.x configuration file](https://github.com/kata-containers/runtime/blob/master/README.md#configuration).
|
||||
## Disable old container manager configuration
|
||||
|
||||
However, if you have created a local configuration file
|
||||
(`/etc/kata-containers/configuration.toml`), this will mask the newer Kata
|
||||
Containers 2.x configuration file.
|
||||
Assuming a Docker installation, remove the docker configuration for Clear
|
||||
Containers:
|
||||
|
||||
Since Kata Containers 2.x introduces a number of new options and changes
|
||||
some default values, we recommend that you disable the local configuration
|
||||
file (by moving or renaming it) until you have reviewed the changes to the
|
||||
official configuration file and applied them to your local file if required.
|
||||
```
|
||||
$ sudo rm /etc/systemd/system/docker.service.d/clear-containers.conf
|
||||
```
|
||||
|
||||
## Install Kata Containers
|
||||
|
||||
Follow one of the [installation guides](install).
|
||||
|
||||
## Create a Kata Container
|
||||
|
||||
```
|
||||
$ sudo docker run -ti busybox sh
|
||||
```
|
||||
|
||||
# Upgrade from runV
|
||||
|
||||
runV and Kata Containers can run together on the same system without affecting each other, as long as they are
|
||||
not configured to use the same container root storage. Currently, runV defaults to `/run/runv` and Kata Containers
|
||||
defaults to `/var/run/kata-containers`.
|
||||
|
||||
Now, to upgrade from runV you need to fresh install Kata Containers by following one of
|
||||
the [installation guides](install).
|
||||
|
||||
# Upgrade Kata Containers
|
||||
|
||||
## Upgrade native distribution packaged version
|
||||
|
||||
As shown in the
|
||||
[installation instructions](install),
|
||||
Kata Containers provide binaries for popular distributions in their native
|
||||
packaging formats. This allows Kata Containers to be upgraded using the
|
||||
standard package management tools for your distribution.
|
||||
|
||||
> **Note:**
|
||||
>
|
||||
> Users should prefer the distribution packaged version of Kata Containers
|
||||
> unless they understand the implications of a manual installation.
|
||||
# Appendices
|
||||
|
||||
## Static installation
|
||||
## Assets
|
||||
|
||||
> **Note:**
|
||||
>
|
||||
> Unless you are an advanced user, if you are using a static installation of
|
||||
> Kata Containers, we recommend you remove it and install a
|
||||
> [native distribution packaged version](#upgrade-native-distribution-packaged-version)
|
||||
> instead.
|
||||
Kata Containers requires additional resources to create a virtual machine
|
||||
container. These resources are called
|
||||
[Kata Containers assets](./design/architecture.md#assets),
|
||||
which comprise a guest kernel and a root filesystem or initrd image. This
|
||||
section describes when these components are updated.
|
||||
|
||||
### Determine if you are using a static installation
|
||||
Since the official assets are packaged, they are automatically upgraded when
|
||||
new package versions are published.
|
||||
|
||||
If the following command displays the output "static", you are using a static
|
||||
version of Kata Containers:
|
||||
> **Warning**: Note that if you use custom assets (by modifying the
|
||||
> [Kata Runtime configuration > file](../src/runtime/README.md#configuration)),
|
||||
> it is your responsibility to ensure they are updated as necessary.
|
||||
|
||||
### Guest kernel
|
||||
|
||||
The `kata-linux-container` package contains a Linux\* kernel based on the
|
||||
latest vanilla version of the
|
||||
[long-term kernel](https://www.kernel.org/)
|
||||
plus a small number of
|
||||
[patches](../tools/packaging/kernel).
|
||||
|
||||
The `Longterm` branch is only updated with
|
||||
[important bug fixes](https://www.kernel.org/category/releases.html)
|
||||
meaning this package is only updated when necessary.
|
||||
|
||||
The guest kernel package is updated when a new long-term kernel is released
|
||||
and when any patch updates are required.
|
||||
|
||||
### Image
|
||||
|
||||
The `kata-containers-image` package is updated only when critical updates are
|
||||
available for the packages used to create it, such as:
|
||||
|
||||
- systemd
|
||||
- [Kata Containers Agent](../src/agent)
|
||||
|
||||
### Determining asset versions
|
||||
|
||||
To see which versions of the assets being used:
|
||||
|
||||
```bash
|
||||
$ ls /opt/kata/bin/kata-runtime &>/dev/null && echo static
|
||||
```
|
||||
|
||||
### Remove a static installation
|
||||
|
||||
Static installations are installed in `/opt/kata/`, so to uninstall simply
|
||||
remove this directory.
|
||||
|
||||
### Upgrade a static installation
|
||||
|
||||
If you understand the implications of using a static installation, to upgrade
|
||||
first
|
||||
[remove the existing static installation](#remove-a-static-installation), then
|
||||
[install the latest release](#determine-latest-version).
|
||||
|
||||
See the
|
||||
[manual installation installation documentation](install/README.md#manual-installation)
|
||||
for details on how to automatically install and configuration a static release
|
||||
with containerd.
|
||||
|
||||
# Custom assets
|
||||
|
||||
> **Note:**
|
||||
>
|
||||
> This section only applies to advanced users who have built their own guest
|
||||
> kernel or image.
|
||||
|
||||
If you are using custom
|
||||
[guest assets](design/architecture.md#guest-assets),
|
||||
you must upgrade them to work with Kata Containers 2.x since Kata
|
||||
Containers 1.x assets will **not** work.
|
||||
|
||||
See the following for further details:
|
||||
|
||||
- [Guest kernel documentation](/tools/packaging/kernel)
|
||||
- [Guest image and initrd documentation](/tools/osbuilder)
|
||||
|
||||
The official assets are packaged meaning they are automatically included in
|
||||
new releases.
|
||||
$ kata-runtime kata-env
|
||||
```
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
- [Runtime](#runtime)
|
||||
- [Configuration](#configuration)
|
||||
- [Networking](#networking)
|
||||
- [CNM](#cnm)
|
||||
- [Network Hotplug](#network-hotplug)
|
||||
- [Storage](#storage)
|
||||
- [Kubernetes support](#kubernetes-support)
|
||||
@@ -58,7 +59,7 @@ to go through the VSOCK interface exported by QEMU.
|
||||
|
||||
The container workload, that is, the actual OCI bundle rootfs, is exported from the
|
||||
host to the virtual machine. In the case where a block-based graph driver is
|
||||
configured, `virtio-scsi` will be used. In all other cases a `virtio-fs` VIRTIO mount point
|
||||
configured, `virtio-scsi` will be used. In all other cases a 9pfs VIRTIO mount point
|
||||
will be used. `kata-agent` uses this mount point as the root filesystem for the
|
||||
container processes.
|
||||
|
||||
@@ -156,31 +157,66 @@ In order to do so, container engines will usually add one end of a virtual
|
||||
ethernet (`veth`) pair into the container networking namespace. The other end of
|
||||
the `veth` pair is added to the host networking namespace.
|
||||
|
||||
This is a very namespace-centric approach as many hypervisors/VMMs cannot handle `veth`
|
||||
interfaces. Typically, `TAP` interfaces are created for VM connectivity.
|
||||
This is a very namespace-centric approach as many hypervisors (in particular QEMU)
|
||||
cannot handle `veth` interfaces. Typically, `TAP` interfaces are created for VM
|
||||
connectivity.
|
||||
|
||||
To overcome incompatibility between typical container engines expectations
|
||||
and virtual machines, Kata Containers networking transparently connects `veth`
|
||||
interfaces with `TAP` ones using Traffic Control:
|
||||
interfaces with `TAP` ones using MACVTAP:
|
||||
|
||||

|
||||
|
||||
With a TC filter in place, a redirection is created between the container network and the
|
||||
virtual machine. As an example, the CNI may create a device, `eth0`, in the container's network
|
||||
namespace, which is a VETH device. Kata Containers will create a tap device for the VM, `tap0_kata`,
|
||||
and setup a TC redirection filter to mirror traffic from `eth0`'s ingress to `tap0_kata`'s egress,
|
||||
and a second to mirror traffic from `tap0_kata`'s ingress to `eth0`'s egress.
|
||||
|
||||
Kata Containers maintains support for MACVTAP, which was an earlier implementation used in Kata. TC-filter
|
||||
is the default because it allows for simpler configuration, better CNI plugin compatibility, and performance
|
||||
on par with MACVTAP.
|
||||
|
||||
Kata Containers has deprecated support for bridge due to lacking performance relative to TC-filter and MACVTAP.
|
||||
|
||||
Kata Containers supports both
|
||||
[CNM](https://github.com/docker/libnetwork/blob/master/docs/design.md#the-container-network-model)
|
||||
and [CNI](https://github.com/containernetworking/cni) for networking management.
|
||||
|
||||
### CNM
|
||||
|
||||

|
||||
|
||||
__CNM lifecycle__
|
||||
|
||||
1. `RequestPool`
|
||||
|
||||
2. `CreateNetwork`
|
||||
|
||||
3. `RequestAddress`
|
||||
|
||||
4. `CreateEndPoint`
|
||||
|
||||
5. `CreateContainer`
|
||||
|
||||
6. Create `config.json`
|
||||
|
||||
7. Create PID and network namespace
|
||||
|
||||
8. `ProcessExternalKey`
|
||||
|
||||
9. `JoinEndPoint`
|
||||
|
||||
10. `LaunchContainer`
|
||||
|
||||
11. Launch
|
||||
|
||||
12. Run container
|
||||
|
||||

|
||||
|
||||
__Runtime network setup with CNM__
|
||||
|
||||
1. Read `config.json`
|
||||
|
||||
2. Create the network namespace
|
||||
|
||||
3. Call the `prestart` hook (from inside the netns)
|
||||
|
||||
4. Scan network interfaces inside netns and get the name of the interface
|
||||
created by prestart hook
|
||||
|
||||
5. Create bridge, TAP, and link all together with network interface previously
|
||||
created
|
||||
|
||||
### Network Hotplug
|
||||
|
||||
Kata Containers has developed a set of network sub-commands and APIs to add, list and
|
||||
|
||||
@@ -3,6 +3,7 @@ To fulfill the [Kata design requirements](kata-design-requirements.md), and base
|
||||
- Sandbox based top API
|
||||
- Storage and network hotplug API
|
||||
- Plugin frameworks for external proprietary Kata runtime extensions
|
||||
- Built-in shim and proxy types and capabilities
|
||||
|
||||
## Sandbox Based API
|
||||
### Sandbox Management API
|
||||
@@ -22,17 +23,17 @@ To fulfill the [Kata design requirements](kata-design-requirements.md), and base
|
||||
|`sandbox.Stats()`| Get the stats of a running sandbox, return a `SandboxStats` structure.|
|
||||
|`sandbox.Status()`| Get the status of the sandbox and containers, return a `SandboxStatus` structure.|
|
||||
|`sandbox.Stop(force)`| Stop a sandbox and Destroy the containers in the sandbox. When force is true, ignore guest related stop failures.|
|
||||
|`sandbox.CreateContainer(contConfig)`| Create new container in the sandbox with the `ContainerConfig` parameter. It will add new container config to `sandbox.config.Containers`.|
|
||||
|`sandbox.DeleteContainer(containerID)`| Delete a container from the sandbox by `containerID`, return a `Container` structure.|
|
||||
|`sandbox.CreateContainer(contConfig)`| Create new container in the sandbox with the `ContainerConfig` param. It will add new container config to `sandbox.config.Containers`.|
|
||||
|`sandbox.DeleteContainer(containerID)`| Delete a container from the sandbox by containerID, return a `Container` structure.|
|
||||
|`sandbox.EnterContainer(containerID, cmd)`| Run a new process in a container, executing customer's `types.Cmd` command.|
|
||||
|`sandbox.KillContainer(containerID, signal, all)`| Signal a container in the sandbox by the `containerID`.|
|
||||
|`sandbox.PauseContainer(containerID)`| Pause a running container in the sandbox by the `containerID`.|
|
||||
|`sandbox.KillContainer(containerID, signal, all)`| Signal a container in the sandbox by the containerID.|
|
||||
|`sandbox.PauseContainer(containerID)`| Pause a running container in the sandbox by the containerID.|
|
||||
|`sandbox.ProcessListContainer(containerID, options)`| List every process running inside a specific container in the sandbox, return a `ProcessList` structure.|
|
||||
|`sandbox.ResumeContainer(containerID)`| Resume a paused container in the sandbox by the `containerID`.|
|
||||
|`sandbox.StartContainer(containerID)`| Start a container in the sandbox by the `containerID`.|
|
||||
|`sandbox.ResumeContainer(containerID)`| Resume a paused container in the sandbox by the containerID.|
|
||||
|`sandbox.StartContainer(containerID)`| Start a container in the sandbox by the containerID.|
|
||||
|`sandbox.StatsContainer(containerID)`| Get the stats of a running container, return a `ContainerStats` structure.|
|
||||
|`sandbox.StatusContainer(containerID)`| Get the status of a container in the sandbox, return a `ContainerStatus` structure.|
|
||||
|`sandbox.StopContainer(containerID, force)`| Stop a container in the sandbox by the `containerID`.|
|
||||
|`sandbox.StopContainer(containerID, force)`| Stop a container in the sandbox by the containerID.|
|
||||
|`sandbox.UpdateContainer(containerID, resources)`| Update a running container in the sandbox.|
|
||||
|`sandbox.WaitProcess(containerID, processID)`| Wait on a process to terminate.|
|
||||
### Sandbox Hotplug API
|
||||
@@ -56,7 +57,7 @@ To fulfill the [Kata design requirements](kata-design-requirements.md), and base
|
||||
|Name|Description|
|
||||
|---|---|
|
||||
|`sandbox.GetOOMEvent()`| Monitor the OOM events that occur in the sandbox..|
|
||||
|`sandbox.UpdateRuntimeMetrics()`| Update the `shim/hypervisor` metrics of the running sandbox.|
|
||||
|`sandbox.UpdateRuntimeMetrics()`| Update the shim/hypervisor's metrics of the running sandbox.|
|
||||
|`sandbox.GetAgentMetrics()`| Get metrics of the agent and the guest in the running sandbox.|
|
||||
|
||||
## Plugin framework for external proprietary Kata runtime extensions
|
||||
@@ -98,3 +99,32 @@ Built-in implementations include:
|
||||
|
||||
### Sandbox Connection Plugin Workflow
|
||||

|
||||
|
||||
## Built-in Shim and Proxy Types and Capabilities
|
||||
### Built-in shim/proxy sandbox configurations
|
||||
- Supported shim configurations:
|
||||
|
||||
|Name|Description|
|
||||
|---|---|
|
||||
|`noopshim`|Do not start any shim process.|
|
||||
|`ccshim`| Start the cc-shim binary.|
|
||||
|`katashim`| Start the `kata-shim` binary.|
|
||||
|`katashimbuiltin`|No standalone shim process but shim functionality APIs are exported.|
|
||||
- Supported proxy configurations:
|
||||
|
||||
|Name|Description|
|
||||
|---|---|
|
||||
|`noopProxy`| a dummy proxy implementation of the proxy interface, only used for testing purpose.|
|
||||
|`noProxy`|generic implementation for any case where no actual proxy is needed.|
|
||||
|`ccProxy`|run `ccProxy` to proxy between runtime and agent.|
|
||||
|`kataProxy`|run `kata-proxy` to translate Yamux connections between runtime and Kata agent. |
|
||||
|`kataProxyBuiltin`| no standalone proxy process and connect to Kata agent with internal Yamux translation.|
|
||||
|
||||
### Built-in Shim Capability
|
||||
Built-in shim capability is implemented by removing standalone shim process, and
|
||||
supporting the shim related APIs.
|
||||
|
||||
### Built-in Proxy Capability
|
||||
Built-in proxy capability is achieved by removing standalone proxy process, and
|
||||
connecting to Kata agent with a custom gRPC dialer that is internal Yamux translation.
|
||||
The behavior is enabled when proxy is configured as `kataProxyBuiltin`.
|
||||
|
||||
@@ -22,10 +22,10 @@ the multiple hypervisors and virtual machine monitors that Kata supports.
|
||||
## Mapping container concepts to virtual machine technologies
|
||||
|
||||
A typical deployment of Kata Containers will be in Kubernetes by way of a Container Runtime Interface (CRI) implementation. On every node,
|
||||
Kubelet will interact with a CRI implementer (such as containerd or CRI-O), which will in turn interface with Kata Containers (an OCI based runtime).
|
||||
Kubelet will interact with a CRI implementor (such as containerd or CRI-O), which will in turn interface with Kata Containers (an OCI based runtime).
|
||||
|
||||
The CRI API, as defined at the [Kubernetes CRI-API repo](https://github.com/kubernetes/cri-api/), implies a few constructs being supported by the
|
||||
CRI implementation, and ultimately in Kata Containers. In order to support the full [API](https://github.com/kubernetes/cri-api/blob/a6f63f369f6d50e9d0886f2eda63d585fbd1ab6a/pkg/apis/runtime/v1alpha2/api.proto#L34-L110) with the CRI-implementer, Kata must provide the following constructs:
|
||||
CRI implementation, and ultimately in Kata Containers. In order to support the full [API](https://github.com/kubernetes/cri-api/blob/a6f63f369f6d50e9d0886f2eda63d585fbd1ab6a/pkg/apis/runtime/v1alpha2/api.proto#L34-L110) with the CRI-implementor, Kata must provide the following constructs:
|
||||
|
||||

|
||||
|
||||
@@ -41,9 +41,14 @@ Each hypervisor or VMM varies on how or if it handles each of these.
|
||||
|
||||
## Kata Containers Hypervisor and VMM support
|
||||
|
||||
Kata Containers [supports multiple hypervisors](../hypervisors.md).
|
||||
Kata Containers is designed to support multiple virtual machine monitors (VMMs) and hypervisors.
|
||||
Kata Containers supports:
|
||||
- [ACRN hypervisor](https://projectacrn.org/)
|
||||
- [Cloud Hypervisor](https://github.com/cloud-hypervisor/cloud-hypervisor)/[KVM](https://www.linux-kvm.org/page/Main_Page)
|
||||
- [Firecracker](https://github.com/firecracker-microvm/firecracker)/KVM
|
||||
- [QEMU](http://www.qemu-project.org/)/KVM
|
||||
|
||||
Details of each solution and a summary are provided below.
|
||||
Which configuration to use will depend on the end user's requirements. Details of each solution and a summary are provided below.
|
||||
|
||||
### QEMU/KVM
|
||||
|
||||
@@ -57,7 +62,7 @@ be changed by editing the runtime [`configuration`](./architecture.md/#configura
|
||||
Devices and features used:
|
||||
- virtio VSOCK or virtio serial
|
||||
- virtio block or virtio SCSI
|
||||
- [virtio net](https://www.redhat.com/en/virtio-networking-series)
|
||||
- virtio net
|
||||
- virtio fs or virtio 9p (recommend: virtio fs)
|
||||
- VFIO
|
||||
- hotplug
|
||||
@@ -100,34 +105,25 @@ Devices used:
|
||||
|
||||
### Cloud Hypervisor/KVM
|
||||
|
||||
[Cloud Hypervisor](https://github.com/cloud-hypervisor/cloud-hypervisor), based
|
||||
on [rust-vmm](https://github.com/rust-vmm), is designed to have a
|
||||
lighter footprint and smaller attack surface for running modern cloud
|
||||
workloads. Kata Containers with Cloud
|
||||
Hypervisor provides mostly complete compatibility with Kubernetes
|
||||
comparable to the QEMU configuration. As of the 1.12 and 2.0.0 release
|
||||
of Kata Containers, the Cloud Hypervisor configuration supports both CPU
|
||||
and memory resize, device hotplug (disk and VFIO), file-system sharing through virtio-fs,
|
||||
block-based volumes, booting from VM images backed by pmem device, and
|
||||
fine-grained seccomp filters for each VMM threads (e.g. all virtio
|
||||
device worker threads). Please check [this GitHub Project](https://github.com/orgs/kata-containers/projects/21)
|
||||
for details of ongoing integration efforts.
|
||||
Cloud Hypervisor, based on [rust-VMM](https://github.com/rust-vmm), is designed to have a lighter footprint and attack surface. For Kata Containers,
|
||||
relative to Firecracker, the Cloud Hypervisor configuration provides better compatibility at the expense of exposing additional devices: file system
|
||||
sharing and direct device assignment. As of the 1.10 release of Kata Containers, Cloud Hypervisor does not support device hotplug, and as a result
|
||||
does not support updating container resources after boot, or utilizing block based volumes. While Cloud Hypervisor does support VFIO, Kata is still adding
|
||||
this support. As of 1.10, Kata does not support block based volumes or direct device assignment. See [Cloud Hypervisor device support documentation](https://github.com/cloud-hypervisor/cloud-hypervisor/blob/master/docs/device_model.md)
|
||||
for more details on Cloud Hypervisor.
|
||||
|
||||
Devices and features used:
|
||||
- virtio VSOCK or virtio serial
|
||||
Devices used:
|
||||
- virtio VSOCK
|
||||
- virtio block
|
||||
- virtio net
|
||||
- virtio fs
|
||||
- virtio pmem
|
||||
- VFIO
|
||||
- hotplug
|
||||
- seccomp filters
|
||||
- [HTTP OpenAPI](https://github.com/cloud-hypervisor/cloud-hypervisor/blob/master/vmm/src/api/openapi/cloud-hypervisor.yaml)
|
||||
|
||||
### Summary
|
||||
|
||||
| Solution | release introduced | brief summary |
|
||||
|-|-|-|
|
||||
| Cloud Hypervisor | 1.10 | upstream Cloud Hypervisor with rich feature support, e.g. hotplug, VFIO and FS sharing|
|
||||
| Firecracker | 1.5 | upstream Firecracker, rust-VMM based, no VFIO, no FS sharing, no memory/CPU hotplug |
|
||||
| QEMU | 1.0 | upstream QEMU, with support for hotplug and filesystem sharing |
|
||||
| NEMU | 1.4 | Deprecated, removed as of 1.10 release. Slimmed down fork of QEMU, with experimental support of virtio-fs |
|
||||
| Firecracker | 1.5 | upstream Firecracker, rust-VMM based, no VFIO, no FS sharing, no memory/CPU hotplug |
|
||||
| QEMU-virtio-fs | 1.7 | upstream QEMU with support for virtio-fs. Will be removed once virtio-fs lands in upstream QEMU |
|
||||
| Cloud Hypervisor | 1.10 | rust-VMM based, includes VFIO and FS sharing through virtio-fs, no hotplug |
|
||||
|
||||
@@ -6,7 +6,6 @@
|
||||
* [Advanced Topics](#advanced-topics)
|
||||
|
||||
## Kubernetes Integration
|
||||
- [Run Kata containers with `crictl`](run-kata-with-crictl.md)
|
||||
- [Run Kata Containers with Kubernetes](run-kata-with-k8s.md)
|
||||
- [How to use Kata Containers and Containerd](containerd-kata.md)
|
||||
- [How to use Kata Containers and CRI (containerd plugin) with Kubernetes](how-to-use-k8s-with-cri-containerd-and-kata.md)
|
||||
@@ -14,17 +13,8 @@
|
||||
- [How to import Kata Containers logs into Fluentd](how-to-import-kata-logs-with-fluentd.md)
|
||||
|
||||
## Hypervisors Integration
|
||||
|
||||
Currently supported hypervisors with Kata Containers include:
|
||||
- `qemu`
|
||||
- `cloud-hypervisor`
|
||||
- `firecracker`
|
||||
- `ACRN`
|
||||
|
||||
While `qemu` and `cloud-hypervisor` work out of the box with installation of Kata,
|
||||
some additional configuration is needed in case of `firecracker` and `ACRN`.
|
||||
Refer to the following guides for additional configuration steps:
|
||||
- [Kata Containers with Firecracker](https://github.com/kata-containers/documentation/wiki/Initial-release-of-Kata-Containers-with-Firecracker-support)
|
||||
- [Kata Containers with NEMU](how-to-use-kata-containers-with-nemu.md)
|
||||
- [Kata Containers with ACRN Hypervisor](how-to-use-kata-containers-with-acrn.md)
|
||||
|
||||
## Advanced Topics
|
||||
|
||||
@@ -1,19 +0,0 @@
|
||||
{
|
||||
"metadata": {
|
||||
"name": "busybox-container",
|
||||
"namespace": "test.kata"
|
||||
},
|
||||
"image": {
|
||||
"image": "docker.io/library/busybox:latest"
|
||||
},
|
||||
"command": [
|
||||
"sleep",
|
||||
"9999"
|
||||
],
|
||||
"args": [],
|
||||
"working_dir": "/",
|
||||
"log_path": "",
|
||||
"stdin": false,
|
||||
"stdin_once": false,
|
||||
"tty": false
|
||||
}
|
||||
@@ -1,20 +0,0 @@
|
||||
{
|
||||
"metadata": {
|
||||
"name": "busybox-pod",
|
||||
"uid": "busybox-pod",
|
||||
"namespace": "test.kata"
|
||||
},
|
||||
"hostname": "busybox_host",
|
||||
"log_directory": "",
|
||||
"dns_config": {
|
||||
},
|
||||
"port_mappings": [],
|
||||
"resources": {
|
||||
},
|
||||
"labels": {
|
||||
},
|
||||
"annotations": {
|
||||
},
|
||||
"linux": {
|
||||
}
|
||||
}
|
||||
@@ -1,39 +0,0 @@
|
||||
{
|
||||
"metadata": {
|
||||
"name": "redis-client",
|
||||
"namespace": "test.kata"
|
||||
},
|
||||
"image": {
|
||||
"image": "docker.io/library/redis:6.0.8-alpine"
|
||||
},
|
||||
"command": [
|
||||
"tail", "-f", "/dev/null"
|
||||
],
|
||||
"envs": [
|
||||
{
|
||||
"key": "PATH",
|
||||
"value": "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
|
||||
},
|
||||
{
|
||||
"key": "TERM",
|
||||
"value": "xterm"
|
||||
}
|
||||
],
|
||||
"labels": {
|
||||
"tier": "backend"
|
||||
},
|
||||
"annotations": {
|
||||
"pod": "redis-client-pod"
|
||||
},
|
||||
"log_path": "",
|
||||
"stdin": false,
|
||||
"stdin_once": false,
|
||||
"tty": false,
|
||||
"linux": {
|
||||
"resources": {
|
||||
"memory_limit_in_bytes": 524288000
|
||||
},
|
||||
"security_context": {
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
{
|
||||
"metadata": {
|
||||
"name": "redis-client-pod",
|
||||
"uid": "test-redis-client-pod",
|
||||
"namespace": "test.kata"
|
||||
},
|
||||
"hostname": "redis-client",
|
||||
"log_directory": "",
|
||||
"dns_config": {
|
||||
"searches": [
|
||||
"8.8.8.8"
|
||||
]
|
||||
},
|
||||
"port_mappings": [],
|
||||
"resources": {
|
||||
"cpu": {
|
||||
"limits": 1,
|
||||
"requests": 1
|
||||
}
|
||||
},
|
||||
"labels": {
|
||||
"tier": "backend"
|
||||
},
|
||||
"annotations": {
|
||||
},
|
||||
"linux": {
|
||||
}
|
||||
}
|
||||
@@ -1,36 +0,0 @@
|
||||
{
|
||||
"metadata": {
|
||||
"name": "redis-server",
|
||||
"namespace": "test.kata"
|
||||
},
|
||||
"image": {
|
||||
"image": "docker.io/library/redis:6.0.8-alpine"
|
||||
},
|
||||
"envs": [
|
||||
{
|
||||
"key": "PATH",
|
||||
"value": "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
|
||||
},
|
||||
{
|
||||
"key": "TERM",
|
||||
"value": "xterm"
|
||||
}
|
||||
],
|
||||
"labels": {
|
||||
"tier": "backend"
|
||||
},
|
||||
"annotations": {
|
||||
"pod": "redis-server-pod"
|
||||
},
|
||||
"log_path": "",
|
||||
"stdin": false,
|
||||
"stdin_once": false,
|
||||
"tty": false,
|
||||
"linux": {
|
||||
"resources": {
|
||||
"memory_limit_in_bytes": 524288000
|
||||
},
|
||||
"security_context": {
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
{
|
||||
"metadata": {
|
||||
"name": "redis-server-pod",
|
||||
"uid": "test-redis-server-pod",
|
||||
"namespace": "test.kata"
|
||||
},
|
||||
"hostname": "redis-server",
|
||||
"log_directory": "",
|
||||
"dns_config": {
|
||||
"searches": [
|
||||
"8.8.8.8"
|
||||
]
|
||||
},
|
||||
"port_mappings": [],
|
||||
"resources": {
|
||||
"cpu": {
|
||||
"limits": 1,
|
||||
"requests": 1
|
||||
}
|
||||
},
|
||||
"labels": {
|
||||
"tier": "backend"
|
||||
},
|
||||
"annotations": {
|
||||
},
|
||||
"linux": {
|
||||
}
|
||||
}
|
||||
@@ -56,9 +56,8 @@ There are some limitations with this approach:
|
||||
|
||||
As was mentioned above, not all containers need the same modules, therefore using
|
||||
the configuration file for specifying the list of kernel modules per [POD][3] can
|
||||
be a pain.
|
||||
Unlike the configuration file, [annotations](how-to-set-sandbox-config-kata.md)
|
||||
provide a way to specify custom configurations per POD.
|
||||
be a pain. Unlike the configuration file, annotations provide a way to specify
|
||||
custom configurations per POD.
|
||||
|
||||
The list of kernel modules and parameters can be set using the annotation
|
||||
`io.katacontainers.config.agent.kernel_modules` as a semicolon separated
|
||||
@@ -102,7 +101,7 @@ spec:
|
||||
tty: true
|
||||
```
|
||||
|
||||
> **Note**: To pass annotations to Kata containers, [CRI-O must be configured correctly](how-to-set-sandbox-config-kata.md#cri-o-configuration)
|
||||
> **Note**: To pass annotations to Kata containers, [cri must to be configurated correctly](how-to-set-sandbox-config-kata.md#cri-configuration)
|
||||
|
||||
[1]: ../../src/runtime
|
||||
[2]: ../../src/agent
|
||||
|
||||
@@ -3,11 +3,6 @@
|
||||
Kata Containers gives users freedom to customize at per-pod level, by setting
|
||||
a wide range of Kata specific annotations in the pod specification.
|
||||
|
||||
Some annotations may be [restricted](#restricted-annotations) by the
|
||||
configuration file for security reasons, notably annotations that could lead the
|
||||
runtime to execute programs on the host. Such annotations are marked with _(R)_ in
|
||||
the tables below.
|
||||
|
||||
# Kata Configuration Annotations
|
||||
There are several kinds of Kata configurations and they are listed below.
|
||||
|
||||
@@ -31,7 +26,6 @@ There are several kinds of Kata configurations and they are listed below.
|
||||
| Key | Value Type | Comments |
|
||||
|-------| ----- | ----- |
|
||||
| `io.katacontainers.config.agent.enable_tracing` | `boolean` | enable tracing for the agent |
|
||||
| `io.katacontainers.config.agent.container_pipe_size` | uint32 | specify the size of the std(in/out) pipes created for containers |
|
||||
| `io.katacontainers.config.agent.kernel_modules` | string | the list of kernel modules and their parameters that will be loaded in the guest kernel. Semicolon separated list of kernel modules and their parameters. These modules will be loaded in the guest kernel using `modprobe`(8). E.g., `e1000e InterruptThrottleRate=3000,3000,3000 EEE=1; i915 enable_ppgtt=0` |
|
||||
| `io.katacontainers.config.agent.trace_mode` | string | the trace mode for the agent |
|
||||
| `io.katacontainers.config.agent.trace_type` | string | the trace type for the agent |
|
||||
@@ -44,24 +38,17 @@ There are several kinds of Kata configurations and they are listed below.
|
||||
| `io.katacontainers.config.hypervisor.block_device_cache_noflush` | `boolean` | Denotes whether flush requests for the device are ignored |
|
||||
| `io.katacontainers.config.hypervisor.block_device_cache_set` | `boolean` | cache-related options will be set to block devices or not |
|
||||
| `io.katacontainers.config.hypervisor.block_device_driver` | string | the driver to be used for block device, valid values are `virtio-blk`, `virtio-scsi`, `nvdimm`|
|
||||
| `io.katacontainers.config.hypervisor.cpu_features` | `string` | Comma-separated list of CPU features to pass to the CPU (QEMU) |
|
||||
| `io.katacontainers.config.hypervisor.ctlpath` (R) | `string` | Path to the `acrnctl` binary for the ACRN hypervisor |
|
||||
| `io.katacontainers.config.hypervisor.default_max_vcpus` | uint32| the maximum number of vCPUs allocated for the VM by the hypervisor |
|
||||
| `io.katacontainers.config.hypervisor.default_memory` | uint32| the memory assigned for a VM by the hypervisor in `MiB` |
|
||||
| `io.katacontainers.config.hypervisor.default_vcpus` | uint32| the default vCPUs assigned for a VM by the hypervisor |
|
||||
| `io.katacontainers.config.hypervisor.disable_block_device_use` | `boolean` | disallow a block device from being used |
|
||||
| `io.katacontainers.config.hypervisor.disable_image_nvdimm` | `boolean` | specify if a `nvdimm` device should be used as rootfs for the guest (QEMU) |
|
||||
| `io.katacontainers.config.hypervisor.disable_vhost_net` | `boolean` | specify if `vhost-net` is not available on the host |
|
||||
| `io.katacontainers.config.hypervisor.enable_hugepages` | `boolean` | if the memory should be `pre-allocated` from huge pages |
|
||||
| `io.katacontainers.config.hypervisor.enable_iommu_platform` | `boolean` | enable `iommu` on CCW devices (QEMU s390x) |
|
||||
| `io.katacontainers.config.hypervisor.enable_iommu` | `boolean` | enable `iommu` on Q35 (QEMU x86_64) |
|
||||
| `io.katacontainers.config.hypervisor.enable_iothreads` | `boolean`| enable IO to be processed in a separate thread. Supported currently for virtio-`scsi` driver |
|
||||
| `io.katacontainers.config.hypervisor.enable_mem_prealloc` | `boolean` | the memory space used for `nvdimm` device by the hypervisor |
|
||||
| `io.katacontainers.config.hypervisor.enable_swap` | `boolean` | enable swap of VM memory |
|
||||
| `io.katacontainers.config.hypervisor.enable_vhost_user_store` | `boolean` | enable vhost-user storage device (QEMU) |
|
||||
| `io.katacontainers.config.hypervisor.enable_virtio_mem` | `boolean` | enable virtio-mem (QEMU) |
|
||||
| `io.katacontainers.config.hypervisor.entropy_source` | string| the path to a host source of entropy (`/dev/random`, `/dev/urandom` or real hardware RNG device) |
|
||||
| `io.katacontainers.config.hypervisor.file_mem_backend` (R) | string | file based memory backend root directory |
|
||||
| `io.katacontainers.config.hypervisor.file_mem_backend` | string | file based memory backend root directory |
|
||||
| `io.katacontainers.config.hypervisor.firmware_hash` | string | container firmware SHA-512 hash value |
|
||||
| `io.katacontainers.config.hypervisor.firmware` | string | the guest firmware that will run the container VM |
|
||||
| `io.katacontainers.config.hypervisor.guest_hook_path` | string | the path within the VM that will be used for drop in hooks |
|
||||
@@ -72,7 +59,7 @@ There are several kinds of Kata configurations and they are listed below.
|
||||
| `io.katacontainers.config.hypervisor.initrd_hash` | string | container guest initrd SHA-512 hash value |
|
||||
| `io.katacontainers.config.hypervisor.initrd` | string | the guest initrd image that will run in the container VM |
|
||||
| `io.katacontainers.config.hypervisor.jailer_hash` | string | container jailer SHA-512 hash value |
|
||||
| `io.katacontainers.config.hypervisor.jailer_path` (R) | string | the jailer that will constrain the container VM |
|
||||
| `io.katacontainers.config.hypervisor.jailer_path` | string | the jailer that will constrain the container VM |
|
||||
| `io.katacontainers.config.hypervisor.kernel_hash` | string | container kernel image SHA-512 hash value |
|
||||
| `io.katacontainers.config.hypervisor.kernel_params` | string | additional guest kernel parameters |
|
||||
| `io.katacontainers.config.hypervisor.kernel` | string | the kernel used to boot the container VM |
|
||||
@@ -82,16 +69,14 @@ There are several kinds of Kata configurations and they are listed below.
|
||||
| `io.katacontainers.config.hypervisor.memory_slots` | uint32| the memory slots assigned to the VM by the hypervisor |
|
||||
| `io.katacontainers.config.hypervisor.msize_9p` | uint32 | the `msize` for 9p shares |
|
||||
| `io.katacontainers.config.hypervisor.path` | string | the hypervisor that will run the container VM |
|
||||
| `io.katacontainers.config.hypervisor.pcie_root_port` | specify the number of PCIe Root Port devices. The PCIe Root Port device is used to hot-plug a PCIe device (QEMU) |
|
||||
| `io.katacontainers.config.hypervisor.shared_fs` | string | the shared file system type, either `virtio-9p` or `virtio-fs` |
|
||||
| `io.katacontainers.config.hypervisor.use_vsock` | `boolean` | specify use of `vsock` for agent communication |
|
||||
| `io.katacontainers.config.hypervisor.vhost_user_store_path` (R) | `string` | specify the directory path where vhost-user devices related folders, sockets and device nodes should be (QEMU) |
|
||||
| `io.katacontainers.config.hypervisor.virtio_fs_cache_size` | uint32 | virtio-fs DAX cache size in `MiB` |
|
||||
| `io.katacontainers.config.hypervisor.virtio_fs_cache` | string | the cache mode for virtio-fs, valid values are `always`, `auto` and `none` |
|
||||
| `io.katacontainers.config.hypervisor.virtio_fs_daemon` | string | virtio-fs `vhost-user` daemon path |
|
||||
| `io.katacontainers.config.hypervisor.virtio_fs_extra_args` | string | extra options passed to `virtiofs` daemon |
|
||||
|
||||
# CRI-O Configuration
|
||||
# CRI Configuration
|
||||
|
||||
In case of CRI-O, all annotations specified in the pod spec are passed down to Kata.
|
||||
|
||||
@@ -116,7 +101,7 @@ $ cat /etc/containerd/config
|
||||
|
||||
```
|
||||
|
||||
Additional documentation on the above configuration can be found in the
|
||||
Additional documentation on the above configuration can be found in the
|
||||
[containerd docs](https://github.com/containerd/cri/blob/8d5a8355d07783ba2f8f451209f6bdcc7c412346/docs/config.md).
|
||||
|
||||
# Example - Using annotations
|
||||
@@ -174,31 +159,3 @@ spec:
|
||||
stdin: true
|
||||
tty: true
|
||||
```
|
||||
|
||||
# Restricted annotations
|
||||
|
||||
Some annotations are _restricted_, meaning that the configuration file specifies
|
||||
the acceptable values. Currently, only hypervisor annotations are restricted,
|
||||
for security reason, with the intent to control which binaries the Kata
|
||||
Containers runtime will launch on your behalf.
|
||||
|
||||
The configuration file validates the annotation _name_ as well as the annotation
|
||||
_value_.
|
||||
|
||||
The acceptable annotation names are defined by the `enable_annotations` entry in
|
||||
the configuration file.
|
||||
|
||||
For restricted annotations, an additional configuration entry provides a list of
|
||||
acceptable values. Since most restricted annotations are intended to control
|
||||
which binaries the runtime can execute, the valid value is generally provided by
|
||||
a shell pattern, as defined by `glob(3)`. The table below provides the name of
|
||||
the configuration entry:
|
||||
|
||||
| Key | Config file entry | Comments |
|
||||
|-------| ----- | ----- |
|
||||
| `ctlpath` | `valid_ctlpaths` | Valid paths for `acrnctl` binary |
|
||||
| `file_mem_backend` | `valid_file_mem_backends` | Valid locations for the file-based memory backend root directory |
|
||||
| `jailer_path` | `valid_jailer_paths`| Valid paths for the jailer constraining the container VM (Firecracker) |
|
||||
| `path` | `valid_hypervisor_paths` | Valid hypervisors to run the container VM |
|
||||
| `vhost_user_store_path` | `valid_vhost_user_store_paths` | Valid paths for vhost-user related files|
|
||||
| `virtio_fs_daemon` | `valid_virtio_fs_daemon_paths` | Valid paths for the `virtiofsd` daemon |
|
||||
|
||||
115
docs/how-to/how-to-use-kata-containers-with-nemu.md
Normal file
115
docs/how-to/how-to-use-kata-containers-with-nemu.md
Normal file
@@ -0,0 +1,115 @@
|
||||
|
||||
# Kata Containers with NEMU
|
||||
|
||||
* [Introduction](#introduction)
|
||||
* [Pre-requisites](#pre-requisites)
|
||||
* [NEMU](#nemu)
|
||||
* [Download and build](#download-and-build)
|
||||
* [x86_64](#x86_64)
|
||||
* [aarch64](#aarch64)
|
||||
* [Configure Kata Containers](#configure-kata-containers)
|
||||
|
||||
Kata Containers relies by default on the QEMU hypervisor in order to spawn the virtual machines running containers. [NEMU](https://github.com/intel/nemu) is a fork of QEMU that:
|
||||
- Reduces the number of lines of code.
|
||||
- Removes all legacy devices.
|
||||
- Reduces the emulation as far as possible.
|
||||
|
||||
## Introduction
|
||||
|
||||
This document describes how to run Kata Containers with NEMU, first by explaining how to download, build and install it. Then it walks through the steps needed to update your Kata Containers configuration in order to run with NEMU.
|
||||
|
||||
## Pre-requisites
|
||||
This document requires Kata Containers to be [installed](../install/README.md) on your system.
|
||||
|
||||
Also, it's worth noting that NEMU only supports `x86_64` and `aarch64` architecture.
|
||||
|
||||
## NEMU
|
||||
|
||||
### Download and build
|
||||
|
||||
```bash
|
||||
$ git clone https://github.com/intel/nemu.git
|
||||
$ cd nemu
|
||||
$ git fetch origin
|
||||
$ git checkout origin/experiment/automatic-removal
|
||||
```
|
||||
#### x86_64
|
||||
```
|
||||
$ SRCDIR=$PWD ./tools/build_x86_64_virt.sh
|
||||
```
|
||||
#### aarch64
|
||||
```
|
||||
$ SRCDIR=$PWD ./tools/build_aarch64.sh
|
||||
```
|
||||
|
||||
> **Note:** The branch `experiment/automatic-removal` is a branch published by Jenkins after it has applied the automatic removal script to the `topic/virt-x86` branch. The purpose of this code removal being to reduce the source tree by removing files not being used by NEMU.
|
||||
|
||||
After those commands have successfully returned, you will find the NEMU binary at `$HOME/build-x86_64_virt/x86_64_virt-softmmu/qemu-system-x86_64_virt` (__x86__), or `$HOME/build-aarch64/aarch64-softmmu/qemu-system-aarch64` (__ARM__).
|
||||
|
||||
You also need the `OVMF` firmware in order to boot the virtual machine's kernel. It can currently be found at this [location](https://github.com/intel/ovmf-virt/releases).
|
||||
```bash
|
||||
$ sudo mkdir -p /usr/share/nemu
|
||||
$ OVMF_URL=$(curl -sL https://api.github.com/repos/intel/ovmf-virt/releases/latest | jq -S '.assets[0].browser_download_url')
|
||||
$ curl -o OVMF.fd -L $(sed -e 's/^"//' -e 's/"$//' <<<"$OVMF_URL")
|
||||
$ sudo install -o root -g root -m 0640 OVMF.fd /usr/share/nemu/
|
||||
```
|
||||
> **Note:** The OVMF firmware will be located at this temporary location until the changes can be pushed upstream.
|
||||
|
||||
|
||||
## Configure Kata Containers
|
||||
All you need from this section is to modify the configuration file `/usr/share/defaults/kata-containers/configuration.toml` to specify the options related to the hypervisor.
|
||||
|
||||
|
||||
```diff
|
||||
[hypervisor.qemu]
|
||||
-path = "/usr/bin/qemu-lite-system-x86_64"
|
||||
+path = "/home/foo/build-x86_64_virt/x86_64_virt-softmmu/qemu-system-x86_64_virt"
|
||||
kernel = "/usr/share/kata-containers/vmlinuz.container"
|
||||
initrd = "/usr/share/kata-containers/kata-containers-initrd.img"
|
||||
image = "/usr/share/kata-containers/kata-containers.img"
|
||||
-machine_type = "pc"
|
||||
+machine_type = "virt"
|
||||
|
||||
# Optional space-separated list of options to pass to the guest kernel.
|
||||
# For example, use `kernel_params = "vsyscall=emulate"` if you are having
|
||||
@@ -31,7 +31,7 @@
|
||||
|
||||
# Path to the firmware.
|
||||
# If you want that qemu uses the default firmware leave this option empty
|
||||
-firmware = ""
|
||||
+firmware = "/usr/share/nemu/OVMF.fd"
|
||||
|
||||
# Machine accelerators
|
||||
# comma-separated list of machine accelerators to pass to the hypervisor.
|
||||
```
|
||||
|
||||
As you can see from this snippet above, all you need to change is:
|
||||
- The path to the hypervisor binary, `/home/foo/build-x86_64_virt/x86_64_virt-softmmu/qemu-system-x86_64_virt` in this example.
|
||||
- The machine type from `pc` to `virt`.
|
||||
- The path to the firmware binary, `/usr/share/nemu/OVMF.fd` in this example.
|
||||
|
||||
Once you have saved those modifications, you can start a new container:
|
||||
```bash
|
||||
$ docker run --runtime=kata-runtime -it busybox
|
||||
```
|
||||
And you will be able to verify this new container is running with the NEMU hypervisor by looking for the hypervisor path and the machine type from the `qemu` process running on your system:
|
||||
```bash
|
||||
$ ps -aux | grep qemu
|
||||
root ... /home/foo/build-x86_64_virt/x86_64_virt-softmmu/qemu-system-x86_64_virt
|
||||
... -machine virt,accel=kvm,kernel_irqchip,nvdimm ...
|
||||
```
|
||||
|
||||
Also relying on `kata-runtime kata-env` is a reliable way to validate you are using the expected hypervisor:
|
||||
```bash
|
||||
$ kata-runtime kata-env | awk -v RS= '/\[Hypervisor\]/'
|
||||
[Hypervisor]
|
||||
MachineType = "virt"
|
||||
Version = "NEMU (like QEMU) version 3.0.0 (v3.0.0-179-gaf9a791)\nCopyright (c) 2003-2017 Fabrice Bellard and the QEMU Project developers"
|
||||
Path = "/home/foo/build-x86_64_virt/x86_64_virt-softmmu/qemu-system-x86_64_virt"
|
||||
BlockDeviceDriver = "virtio-scsi"
|
||||
EntropySource = "/dev/urandom"
|
||||
Msize9p = 8192
|
||||
MemorySlots = 10
|
||||
Debug = true
|
||||
UseVSock = false
|
||||
```
|
||||
@@ -1,150 +0,0 @@
|
||||
# Working with `crictl`
|
||||
|
||||
* [What's `cri-tools`](#whats-cri-tools)
|
||||
* [Use `crictl` run Pods in Kata containers](#use-crictl-run-pods-in-kata-containers)
|
||||
* [Run `busybox` Pod](#run-busybox-pod)
|
||||
* [Run pod sandbox with config file](#run-pod-sandbox-with-config-file)
|
||||
* [Create container in the pod sandbox with config file](#create-container-in-the-pod-sandbox-with-config-file)
|
||||
* [Start container](#start-container)
|
||||
* [Run `redis` Pod](#run-redis-pod)
|
||||
* [Create `redis-server` Pod](#create-redis-server-pod)
|
||||
* [Create `redis-client` Pod](#create-redis-client-pod)
|
||||
* [Check `redis` server is working](#check-redis-server-is-working)
|
||||
|
||||
## What's `cri-tools`
|
||||
|
||||
[`cri-tools`](https://github.com/kubernetes-sigs/cri-tools) provides debugging and validation tools for Kubelet Container Runtime Interface (CRI).
|
||||
|
||||
`cri-tools` includes two tools: `crictl` and `critest`. `crictl` is the CLI for Kubelet CRI, in this document, we will show how to use `crictl` to run Pods in Kata containers.
|
||||
|
||||
> **Note:** `cri-tools` is only used for debugging and validation purpose, and don't use it to run production workloads.
|
||||
|
||||
> **Note:** For how to install and configure `cri-tools` with CRI runtimes like `containerd` or CRI-O, please also refer to other [howtos](./README.md).
|
||||
|
||||
## Use `crictl` run Pods in Kata containers
|
||||
|
||||
Sample config files in this document can be found [here](./data/crictl/).
|
||||
|
||||
### Run `busybox` Pod
|
||||
|
||||
#### Run pod sandbox with config file
|
||||
|
||||
```bash
|
||||
$ sudo crictl runp -r kata sandbox_config.json
|
||||
16a62b035940f9c7d79fd53e93902d15ad21f7f9b3735f1ac9f51d16539b836b
|
||||
|
||||
$ sudo crictl pods
|
||||
POD ID CREATED STATE NAME NAMESPACE ATTEMPT
|
||||
16a62b035940f 21 seconds ago Ready busybox-pod 0
|
||||
```
|
||||
|
||||
#### Create container in the pod sandbox with config file
|
||||
|
||||
```bash
|
||||
$ sudo crictl create 16a62b035940f container_config.json sandbox_config.json
|
||||
e6ca0e0f7f532686236b8b1f549e4878e4fe32ea6b599a5d684faf168b429202
|
||||
```
|
||||
|
||||
List containers and check the container is in `Created` state:
|
||||
|
||||
```bash
|
||||
$ sudo crictl ps -a
|
||||
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID
|
||||
e6ca0e0f7f532 docker.io/library/busybox:latest 19 seconds ago Created busybox-container 0 16a62b035940f
|
||||
```
|
||||
|
||||
#### Start container
|
||||
|
||||
```bash
|
||||
$ sudo crictl start e6ca0e0f7f532
|
||||
e6ca0e0f7f532
|
||||
```
|
||||
|
||||
List containers and we can see that the container state has changed from `Created` to `Running`:
|
||||
|
||||
```bash
|
||||
$ sudo crictl ps
|
||||
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID
|
||||
e6ca0e0f7f532 docker.io/library/busybox:latest About a minute ago Running busybox-container 0 16a62b035940f
|
||||
```
|
||||
|
||||
And last we can `exec` into `busybox` container:
|
||||
|
||||
```bash
|
||||
$ sudo crictl exec -it e6ca0e0f7f532 sh
|
||||
```
|
||||
|
||||
And run commands in it:
|
||||
|
||||
```
|
||||
/ # hostname
|
||||
busybox_host
|
||||
/ # id
|
||||
uid=0(root) gid=0(root)
|
||||
```
|
||||
|
||||
### Run `redis` Pod
|
||||
|
||||
In this example, we will create two Pods: one is for `redis` server, and another one is `redis` client.
|
||||
|
||||
#### Create `redis-server` Pod
|
||||
|
||||
It's also possible to start a container within a single command:
|
||||
|
||||
```bash
|
||||
$ sudo crictl run -r kata redis_server_container_config.json redis_server_sandbox_config.json
|
||||
bb36e05c599125842c5193909c4de186b1cee3818f5d17b951b6a0422681ce4b
|
||||
```
|
||||
|
||||
#### Create `redis-client` Pod
|
||||
|
||||
```bash
|
||||
$ sudo crictl run -r kata redis_client_container_config.json redis_client_sandbox_config.json
|
||||
e344346c5414e3f51f97f20b2262e0b7afe457750e94dc0edb109b94622fc693
|
||||
```
|
||||
|
||||
After the new container started, we can check the running Pods and containers.
|
||||
|
||||
```bash
|
||||
$ sudo crictl pods
|
||||
POD ID CREATED STATE NAME NAMESPACE ATTEMPT
|
||||
469d08a7950e3 30 seconds ago Ready redis-client-pod 0
|
||||
02c12fdb08219 About a minute ago Ready redis-server-pod 0
|
||||
|
||||
$ sudo crictl ps
|
||||
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID
|
||||
e344346c5414e docker.io/library/redis:6.0.8-alpine 35 seconds ago Running redis-client 0 469d08a7950e3
|
||||
bb36e05c59912 docker.io/library/redis:6.0.8-alpine About a minute ago Running redis-server 0 02c12fdb08219
|
||||
```
|
||||
|
||||
#### Check `redis` server is working
|
||||
|
||||
To connect to the `redis-server`. First we need to get the `redis-server`'s IP address.
|
||||
|
||||
```bash
|
||||
|
||||
$ server=$(sudo crictl inspectp 02c12fdb08219 | jq .status.network.ip | tr -d '"' )
|
||||
$ echo $server
|
||||
172.19.0.118
|
||||
```
|
||||
|
||||
Launch `redis-cli` in the new Pod and connect server running at `172.19.0.118`.
|
||||
|
||||
```bash
|
||||
$ sudo crictl exec -it e344346c5414e redis-cli -h $server
|
||||
172.19.0.118:6379> get test-key
|
||||
(nil)
|
||||
172.19.0.118:6379> set test-key test-value
|
||||
OK
|
||||
172.19.0.118:6379> get test-key
|
||||
"test-value"
|
||||
```
|
||||
|
||||
Then back to `redis-server`, check if the `test-key` is set in server.
|
||||
|
||||
```bash
|
||||
$ sudo crictl exec -it bb36e05c59912 redis-cli get test-key
|
||||
"test-val"
|
||||
```
|
||||
|
||||
Returned `test-val` is just set by `redis-cli` in `redis-client` Pod.
|
||||
@@ -46,7 +46,6 @@ overridden by `/etc/kata-containers/configuration.toml` if provided) such that:
|
||||
- `enable_template = true`
|
||||
- `initrd =` is set
|
||||
- `image =` option is commented out or removed
|
||||
- `shared_fs` should not be `virtio-fs`
|
||||
|
||||
Then you can create a VM templating for later usage by calling
|
||||
```
|
||||
|
||||
@@ -1,68 +0,0 @@
|
||||
# Hypervisors
|
||||
|
||||
* [Hypervisors](#hypervisors)
|
||||
* [Introduction](#introduction)
|
||||
* [Types](#types)
|
||||
* [Determine currently configured hypervisor](#determine-currently-configured-hypervisor)
|
||||
* [Choose a Hypervisor](#choose-a-hypervisor)
|
||||
|
||||
## Introduction
|
||||
|
||||
Kata Containers supports multiple hypervisors. This document provides a very
|
||||
high level overview of the available hypervisors, giving suggestions as to
|
||||
which hypervisors you may wish to investigate further.
|
||||
|
||||
> **Note:**
|
||||
>
|
||||
> This document is not prescriptive or authoritative:
|
||||
>
|
||||
> - It is up to you to decide which hypervisors may be most appropriate for
|
||||
> your use-case.
|
||||
> - Refer to the official documentation for each hypervisor for further details.
|
||||
|
||||
## Types
|
||||
|
||||
Since each hypervisor offers different features and options, Kata Containers
|
||||
provides a separate
|
||||
[configuration file](/src/runtime/README.md#configuration)
|
||||
for each. The configuration files contain comments explaining which options
|
||||
are available, their default values and how each setting can be used.
|
||||
|
||||
> **Note:**
|
||||
>
|
||||
> The simplest way to switch between hypervisors is to create a symbolic link
|
||||
> to the appropriate hypervisor-specific configuration file.
|
||||
|
||||
| Hypervisor | Written in | Architectures | Type | Configuration file |
|
||||
|-|-|-|-|-|
|
||||
[ACRN] | C | `x86_64` | Type 1 (bare metal) | `configuration-acrn.toml` |
|
||||
[Cloud Hypervisor] | rust | `aarch64`, `x86_64` | Type 2 ([KVM]) | `configuration-clh.toml` |
|
||||
[Firecracker] | rust | `aarch64`, `x86_64` | Type 2 ([KVM]) | `configuration-fc.toml` |
|
||||
[QEMU] | C | all | Type 2 ([KVM]) | `configuration-qemu.toml` |
|
||||
|
||||
## Determine currently configured hypervisor
|
||||
|
||||
```bash
|
||||
$ kata-runtime kata-env | awk -v RS= '/\[Hypervisor\]/' | grep Path
|
||||
```
|
||||
|
||||
## Choose a Hypervisor
|
||||
|
||||
The table below provides a brief summary of some of the differences between
|
||||
the hypervisors:
|
||||
|
||||
|
||||
| Hypervisor | Summary | Features | Limitations | Container Creation speed | Memory density | Use cases | Comment |
|
||||
|-|-|-|-|-|-|-|-|
|
||||
[ACRN] | Safety critical and real-time workloads | | | excellent | excellent | Embedded and IOT systems | For advanced users |
|
||||
[Cloud Hypervisor] | Low latency, small memory footprint, small attack surface | Minimal | | excellent | excellent | High performance modern cloud workloads | |
|
||||
[Firecracker] | Very slimline | Extremely minimal | Doesn't support all device types | excellent | excellent | Serverless / FaaS | |
|
||||
[QEMU] | Lots of features | Lots | | good | good | Good option for most users | | All users |
|
||||
|
||||
For further details, see the [Virtualization in Kata Containers](design/virtualization.md) document and the official documentation for each hypervisor.
|
||||
|
||||
[ACRN]: https://projectacrn.org
|
||||
[Cloud Hypervisor]: https://github.com/cloud-hypervisor/cloud-hypervisor
|
||||
[Firecracker]: https://github.com/firecracker-microvm/firecracker
|
||||
[KVM]: https://en.wikipedia.org/wiki/Kernel-based_Virtual_Machine
|
||||
[QEMU]: http://www.qemu-project.org
|
||||
@@ -1,82 +1,98 @@
|
||||
# Kata Containers installation user guides
|
||||
|
||||
* [Kata Containers installation user guides](#kata-containers-installation-user-guides)
|
||||
* [Prerequisites](#prerequisites)
|
||||
* [Legacy installation](#legacy-installation)
|
||||
* [Packaged installation methods](#packaged-installation-methods)
|
||||
* [Official packages](#official-packages)
|
||||
* [Snap Installation](#snap-installation)
|
||||
* [Automatic Installation](#automatic-installation)
|
||||
* [Manual Installation](#manual-installation)
|
||||
* [Build from source installation](#build-from-source-installation)
|
||||
* [Installing on a Cloud Service Platform](#installing-on-a-cloud-service-platform)
|
||||
* [Further information](#further-information)
|
||||
- [Kata Containers installation user guides](#kata-containers-installation-user-guides)
|
||||
- [Prerequisites](#prerequisites)
|
||||
- [Packaged installation methods](#packaged-installation-methods)
|
||||
- [Official packages](#official-packages)
|
||||
- [Automatic Installation](#automatic-installation)
|
||||
- [Snap Installation](#snap-installation)
|
||||
- [Scripted Installation](#scripted-installation)
|
||||
- [Manual Installation](#manual-installation)
|
||||
- [Build from source installation](#build-from-source-installation)
|
||||
- [Installing on a Cloud Service Platform](#installing-on-a-cloud-service-platform)
|
||||
- [Further information](#further-information)
|
||||
|
||||
The following is an overview of the different installation methods available. All of these methods equally result
|
||||
in a system configured to run Kata Containers.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Kata Containers requires nested virtualization or bare metal.
|
||||
See the
|
||||
[hardware requirements](/src/runtime/README.md#hardware-requirements)
|
||||
[hardware requirements](../../src/runtime/README.md#hardware-requirements)
|
||||
to see if your system is capable of running Kata Containers.
|
||||
|
||||
## Legacy installation
|
||||
|
||||
If you wish to install a legacy 1.x version of Kata Containers, see
|
||||
[the Kata Containers 1.x installation documentation](https://github.com/kata-containers/documentation/tree/master/install/).
|
||||
|
||||
## Packaged installation methods
|
||||
|
||||
> **Notes:**
|
||||
>
|
||||
> - Packaged installation methods uses your distribution's native package format (such as RPM or DEB).
|
||||
> - You are strongly encouraged to choose an installation method that provides
|
||||
> automatic updates, to ensure you benefit from security updates and bug fixes.
|
||||
|
||||
| Installation method | Description | Automatic updates | Use case |
|
||||
|------------------------------------------------------|---------------------------------------------------------------------|-------------------|----------------------------------------------------------|
|
||||
| [Using official distro packages](#official-packages) | Kata packages provided by Linux distributions official repositories | yes | Recommended for most users. |
|
||||
| [Using snap](#snap-installation) | Easy to install | yes | Good alternative to official distro packages. |
|
||||
| [Automatic](#automatic-installation) | Run a single command to install a full system | **No!** | For those wanting the latest release quickly. |
|
||||
| [Manual](#manual-installation) | Follow a guide step-by-step to install a working system | **No!** | For those who want the latest release with more control. |
|
||||
| [Build from source](#build-from-source-installation) | Build the software components manually | **No!** | Power users and developers only. |
|
||||
| Installation method | Description | Distributions supported |
|
||||
|------------------------------------------------------|-----------------------------------------------------------------------------------------|--------------------------------------|
|
||||
| [Automatic](#automatic-installation) |Run a single command to install a full system | |
|
||||
| [Using snap](#snap-installation) |Easy to install and automatic updates |any distro that supports snapd |
|
||||
| [Using official distro packages](#official-packages) |Kata packages provided by Linux distributions official repositories | |
|
||||
| [Scripted](#scripted-installation) |Generates an installation script which will result in a working system when executed | |
|
||||
| [Manual](#manual-installation) |Allows the user to read a brief document and execute the specified commands step-by-step | |
|
||||
|
||||
### Official packages
|
||||
|
||||
Kata packages are provided by official distribution repositories for:
|
||||
|
||||
| Distribution (link to installation guide) | Minimum versions |
|
||||
|----------------------------------------------------------|--------------------------------------------------------------------------------|
|
||||
| [CentOS](centos-installation-guide.md) | 8 |
|
||||
| [Fedora](fedora-installation-guide.md) | 32, Rawhide |
|
||||
| [openSUSE](opensuse-installation-guide.md) | [Leap 15.1](opensuse-leap-15.1-installation-guide.md)<br>Leap 15.2, Tumbleweed |
|
||||
| [SUSE Linux Enterprise (SLE)](sle-installation-guide.md) | SLE 15 SP1, 15 SP2 |
|
||||
| Distribution (link to packages) | Versions | Contacts |
|
||||
| -------------------------------------------------------- | ------------------------------------------------------------------------------ | -------- |
|
||||
| [CentOS](centos-installation-guide.md) | 8 | |
|
||||
| [Fedora](fedora-installation-guide.md) | 32, Rawhide | |
|
||||
| [SUSE Linux Enterprise (SLE)](sle-installation-guide.md) | SLE 15 SP1, 15 SP2 | |
|
||||
| [openSUSE](opensuse-installation-guide.md) | [Leap 15.1](opensuse-leap-15.1-installation-guide.md)<br>Leap 15.2, Tumbleweed | |
|
||||
|
||||
> **Note::**
|
||||
>
|
||||
> All users are encouraged to uses the official distribution versions of Kata
|
||||
> Containers unless they understand the implications of alternative methods.
|
||||
|
||||
### Automatic Installation
|
||||
|
||||
[Use `kata-manager`](installing-with-kata-manager.md) to automatically install Kata packages.
|
||||
|
||||
### Snap Installation
|
||||
|
||||
> **Note:** The snap installation is available for all distributions which support `snapd`.
|
||||
|
||||
[](https://snapcraft.io/kata-containers)
|
||||
|
||||
[Use snap](snap-installation-guide.md) to install Kata Containers from https://snapcraft.io.
|
||||
|
||||
### Automatic Installation
|
||||
|
||||
[Use `kata-manager`](/utils/README.md) to automatically install a working Kata Containers system.
|
||||
### Scripted Installation
|
||||
[Use `kata-doc-to-script`](installing-with-kata-doc-to-script.md) to generate installation scripts that can be reviewed before they are executed.
|
||||
|
||||
### Manual Installation
|
||||
Manual installation instructions are available for [these distributions](#packaged-installation-methods) and document how to:
|
||||
1. Add the Kata Containers repository to your distro package manager, and import the packages signing key.
|
||||
2. Install the Kata Containers packages.
|
||||
3. Install a supported container manager.
|
||||
4. Configure the container manager to use Kata Containers as the default OCI runtime. Or, for Kata Containers 1.5.0 or above, configure the
|
||||
`io.containerd.kata.v2` to be the runtime shim (see [containerd runtime v2 (shim API)](https://github.com/containerd/containerd/tree/master/runtime/v2)
|
||||
and [How to use Kata Containers and CRI (containerd plugin) with Kubernetes](../how-to/how-to-use-k8s-with-cri-containerd-and-kata.md)).
|
||||
|
||||
Follow the [containerd installation guide](container-manager/containerd/containerd-install.md).
|
||||
> **Notes on upgrading**:
|
||||
> - If you are installing Kata Containers on a system that already has Clear Containers or `runv` installed,
|
||||
> first read [the upgrading document](../Upgrading.md).
|
||||
|
||||
> **Notes on releases**:
|
||||
> - [This download server](http://download.opensuse.org/repositories/home:/katacontainers:/releases:/)
|
||||
> hosts the Kata Containers packages built by OBS for all the supported architectures.
|
||||
> Packages are available for the latest and stable releases (more info [here](../Stable-Branch-Strategy.md)).
|
||||
>
|
||||
> - The following guides apply to the latest Kata Containers release
|
||||
> (a.k.a. `master` release).
|
||||
>
|
||||
> - When choosing a stable release, replace all `master` occurrences in the URLs
|
||||
> with a `stable-x.y` version available on the [download server](http://download.opensuse.org/repositories/home:/katacontainers:/releases:/).
|
||||
|
||||
> **Notes on packages source verification**:
|
||||
> - The Kata packages hosted on the download server are signed with GPG to ensure integrity and authenticity.
|
||||
>
|
||||
> - The public key used to sign packages is available [at this link](https://raw.githubusercontent.com/kata-containers/tests/master/data/rpm-signkey.pub); the fingerprint is `9FDC0CB6 3708CF80 3696E2DC D0B37B82 6063F3ED`.
|
||||
>
|
||||
> - Only trust the signing key and fingerprint listed in the previous bullet point. Do not disable GPG checks,
|
||||
> otherwise packages source and authenticity is not guaranteed.
|
||||
|
||||
## Build from source installation
|
||||
|
||||
> **Notes:**
|
||||
>
|
||||
> - Power users who decide to build from sources should be aware of the
|
||||
@@ -88,7 +104,6 @@ who are comfortable building software from source to use the latest component
|
||||
versions. This is not recommended for normal users.
|
||||
|
||||
## Installing on a Cloud Service Platform
|
||||
|
||||
* [Amazon Web Services (AWS)](aws-installation-guide.md)
|
||||
* [Google Compute Engine (GCE)](gce-installation-guide.md)
|
||||
* [Microsoft Azure](azure-installation-guide.md)
|
||||
@@ -96,7 +111,6 @@ versions. This is not recommended for normal users.
|
||||
* [VEXXHOST OpenStack Cloud](vexxhost-installation-guide.md)
|
||||
|
||||
## Further information
|
||||
|
||||
* The [upgrading document](../Upgrading.md).
|
||||
* The [developer guide](../Developer-Guide.md).
|
||||
* The [runtime documentation](../../src/runtime/README.md).
|
||||
|
||||
@@ -1,128 +0,0 @@
|
||||
# Install Kata Containers with containerd
|
||||
|
||||
> **Note:**
|
||||
>
|
||||
> - If Kata Containers and / or containerd are packaged by your distribution,
|
||||
> we recommend you install these versions to ensure they are updated when
|
||||
> new releases are available.
|
||||
|
||||
> **Warning:**
|
||||
>
|
||||
> - These instructions install the **newest** versions of Kata Containers and
|
||||
> containerd from binary release packages. These versions may not have been
|
||||
> tested with your distribution version.
|
||||
>
|
||||
> - Since your package manager is not being used, it is **your**
|
||||
> responsibility to ensure these packages are kept up-to-date when new
|
||||
> versions are released.
|
||||
>
|
||||
> - If you decide to proceed and install a Kata Containers release, you can
|
||||
> still check for the latest version of Kata Containers by running
|
||||
> `kata-runtime check --only-list-releases`.
|
||||
>
|
||||
> - These instructions will not work for Fedora 31 and higher since those
|
||||
> distribution versions only support cgroups version 2 by default. However,
|
||||
> Kata Containers currently requires cgroups version 1 (on the host side). See
|
||||
> https://github.com/kata-containers/kata-containers/issues/927 for further
|
||||
> details.
|
||||
|
||||
## Install Kata Containers
|
||||
|
||||
> **Note:**
|
||||
>
|
||||
> If your distribution packages Kata Containers, we recommend you install that
|
||||
> version. If it does not, or you wish to perform a manual installation,
|
||||
> continue with the steps below.
|
||||
|
||||
- Download a release from:
|
||||
|
||||
- https://github.com/kata-containers/kata-containers/releases
|
||||
|
||||
Note that Kata Containers uses [semantic versioning](https://semver.org) so
|
||||
you should install a version that does *not* include a dash ("-"), since this
|
||||
indicates a pre-release version.
|
||||
|
||||
- Unpack the downloaded archive.
|
||||
|
||||
Kata Containers packages use a `/opt/kata/` prefix so either add that to
|
||||
your `PATH`, or create symbolic links for the following commands. The
|
||||
advantage of using symbolic links is that the `systemd(1)` configuration file
|
||||
for containerd will not need to be modified to allow the daemon to find this
|
||||
binary (see the [section on installing containerd](#install-containerd) below).
|
||||
|
||||
| Command | Description |
|
||||
|-|-|
|
||||
| `/opt/kata/bin/containerd-shim-kata-v2` | The main Kata 2.x binary |
|
||||
| `/opt/kata/bin/kata-collect-data.sh` | Data collection script used for [raising issues](https://github.com/kata-containers/kata-containers/issues) |
|
||||
| `/opt/kata/bin/kata-runtime` | Utility command |
|
||||
|
||||
- Check installation by showing version details:
|
||||
|
||||
```bash
|
||||
$ kata-runtime --version
|
||||
```
|
||||
|
||||
## Install containerd
|
||||
|
||||
> **Note:**
|
||||
>
|
||||
> If your distribution packages containerd, we recommend you install that
|
||||
> version. If it does not, or you wish to perform a manual installation,
|
||||
> continue with the steps below.
|
||||
|
||||
- Download a release from:
|
||||
|
||||
- https://github.com/containerd/containerd/releases
|
||||
|
||||
- Unpack the downloaded archive.
|
||||
|
||||
- Configure containerd
|
||||
|
||||
- Download the standard `systemd(1)` service file and install to
|
||||
`/etc/systemd/system/`:
|
||||
|
||||
- https://raw.githubusercontent.com/containerd/containerd/master/containerd.service
|
||||
|
||||
> **Notes:**
|
||||
>
|
||||
> - You will need to reload the systemd configuration after installing this
|
||||
> file.
|
||||
>
|
||||
> - If you have not created a symbolic link for
|
||||
> `/opt/kata/bin/containerd-shim-kata-v2`, you will need to modify this
|
||||
> file to ensure the containerd daemon's `PATH` contains `/opt/kata/`.
|
||||
> See the `Environment=` command in `systemd.exec(5)` for further
|
||||
> details.
|
||||
|
||||
- Add the Kata Containers configuration to the containerd configuration file:
|
||||
|
||||
```toml
|
||||
[plugins]
|
||||
[plugins.cri]
|
||||
[plugins.cri.containerd]
|
||||
default_runtime_name = "kata"
|
||||
|
||||
[plugins.cri.containerd.runtimes.kata]
|
||||
runtime_type = "io.containerd.kata.v2"
|
||||
```
|
||||
|
||||
> **Note:**
|
||||
>
|
||||
> The containerd daemon needs to be able to find the
|
||||
> `containerd-shim-kata-v2` binary to allow Kata Containers to be created.
|
||||
|
||||
- Start the containerd service.
|
||||
|
||||
## Test the installation
|
||||
|
||||
You are now ready to run Kata Containers. You can perform a simple test by
|
||||
running the following commands:
|
||||
|
||||
```bash
|
||||
$ image="docker.io/library/busybox:latest"
|
||||
$ sudo ctr image pull "$image"
|
||||
$ sudo ctr run --runtime "io.containerd.kata.v2" --rm -t "$image" test-kata uname -r
|
||||
```
|
||||
|
||||
The last command above shows details of the kernel version running inside the
|
||||
container, which will likely be different to the host kernel version.
|
||||
@@ -6,7 +6,7 @@
|
||||
* [Install Kata](#install-kata)
|
||||
* [Create a Kata-enabled Image](#create-a-kata-enabled-image)
|
||||
|
||||
Kata Containers on Google Compute Engine (GCE) makes use of [nested virtualization](https://cloud.google.com/compute/docs/instances/enable-nested-virtualization-vm-instances). Most of the installation procedure is identical to that for Kata on your preferred distribution, but enabling nested virtualization currently requires extra steps on GCE. This guide walks you through creating an image and instance with nested virtualization enabled. Note that `kata-runtime check` checks for nested virtualization, but does not fail if support is not found.
|
||||
Kata Containers on Google Compute Engine (GCE) makes use of [nested virtualization](https://cloud.google.com/compute/docs/instances/enable-nested-virtualization-vm-instances). Most of the installation procedure is identical to that for Kata on your preferred distribution, but enabling nested virtualization currently requires extra steps on GCE. This guide walks you through creating an image and instance with nested virtualization enabled. Note that `kata-runtime kata-check` checks for nested virtualization, but does not fail if support is not found.
|
||||
|
||||
As a pre-requisite this guide assumes an installed and configured instance of the [Google Cloud SDK](https://cloud.google.com/sdk/downloads). For a zero-configuration option, all of the commands below were been tested under [Google Cloud Shell](https://cloud.google.com/shell/) (as of Jun 2018). Verify your `gcloud` installation and configuration:
|
||||
|
||||
|
||||
47
docs/install/installing-with-kata-doc-to-script.md
Normal file
47
docs/install/installing-with-kata-doc-to-script.md
Normal file
@@ -0,0 +1,47 @@
|
||||
# Installing with `kata-doc-to-script`
|
||||
|
||||
* [Introduction](#introduction)
|
||||
* [Packages Installation](#packages-installation)
|
||||
* [Docker Installation and Setup](#docker-installation-and-setup)
|
||||
|
||||
## Introduction
|
||||
Use [these installation instructions](README.md#packaged-installation-methods) together with
|
||||
[`kata-doc-to-script`](https://github.com/kata-containers/tests/blob/master/.ci/kata-doc-to-script.sh)
|
||||
to generate installation bash scripts.
|
||||
|
||||
> Note:
|
||||
> - Only the Docker container manager installation can be scripted. For other setups you must
|
||||
> install and configure the container manager manually.
|
||||
|
||||
## Packages Installation
|
||||
|
||||
```bash
|
||||
$ source /etc/os-release
|
||||
$ curl -fsSL -O https://raw.githubusercontent.com/kata-containers/documentation/master/install/${ID}-installation-guide.md
|
||||
$ bash -c "$(curl -fsSL https://raw.githubusercontent.com/kata-containers/tests/master/.ci/kata-doc-to-script.sh) ${ID}-installation-guide.md ${ID}-install.sh"
|
||||
```
|
||||
|
||||
For example, if your distribution is CentOS, the previous example will generate a runnable shell script called `centos-install.sh`.
|
||||
To proceed with the installation, run:
|
||||
|
||||
```bash
|
||||
$ source /etc/os-release
|
||||
$ bash "./${ID}-install.sh"
|
||||
```
|
||||
|
||||
## Docker Installation and Setup
|
||||
|
||||
```bash
|
||||
$ source /etc/os-release
|
||||
$ curl -fsSL -O https://raw.githubusercontent.com/kata-containers/documentation/master/install/docker/${ID}-docker-install.md
|
||||
$ bash -c "$(curl -fsSL https://raw.githubusercontent.com/kata-containers/tests/master/.ci/kata-doc-to-script.sh) ${ID}-docker-install.md ${ID}-docker-install.sh"
|
||||
```
|
||||
|
||||
For example, if your distribution is CentOS, this will generate a runnable shell script called `centos-docker-install.sh`.
|
||||
|
||||
To proceed with the Docker installation, run:
|
||||
|
||||
```bash
|
||||
$ source /etc/os-release
|
||||
$ bash "./${ID}-docker-install.sh"
|
||||
```
|
||||
47
docs/install/installing-with-kata-manager.md
Normal file
47
docs/install/installing-with-kata-manager.md
Normal file
@@ -0,0 +1,47 @@
|
||||
# Installing with `kata-manager`
|
||||
|
||||
* [Introduction](#introduction)
|
||||
* [Full Installation](#full-installation)
|
||||
* [Install the Kata packages only](#install-the-kata-packages-only)
|
||||
* [Further Information](#further-information)
|
||||
|
||||
## Introduction
|
||||
`kata-manager` automates the Kata Containers installation procedure documented for [these Linux distributions](README.md#packaged-installation-methods).
|
||||
|
||||
> **Note**:
|
||||
> - `kata-manager` requires `curl` and `sudo` installed on your system.
|
||||
>
|
||||
> - Full installation mode is only available for Docker container manager. For other setups, you
|
||||
> can still use `kata-manager` to [install Kata package](#install-the-kata-packages-only), and then setup your container manager manually.
|
||||
>
|
||||
> - You can run `kata-manager` in dry run mode by passing the `-n` flag. Dry run mode allows you to review the
|
||||
> commands that `kata-manager` would run, without doing any change to your system.
|
||||
|
||||
|
||||
## Full Installation
|
||||
This command does the following:
|
||||
1. Installs Kata Containers packages
|
||||
2. Installs Docker
|
||||
3. Configure Docker to use the Kata OCI runtime by default
|
||||
|
||||
```bash
|
||||
$ bash -c "$(curl -fsSL https://raw.githubusercontent.com/kata-containers/tests/master/cmd/kata-manager/kata-manager.sh) install-docker-system"
|
||||
```
|
||||
|
||||
<!--
|
||||
You can ignore the content of this comment.
|
||||
(test code run by test-install-docs.sh to validate code blocks this document)
|
||||
|
||||
```bash
|
||||
$ bash -c "$(curl -fsSL https://raw.githubusercontent.com/kata-containers/tests/master/cmd/kata-manager/kata-manager.sh) remove-packages"
|
||||
```
|
||||
-->
|
||||
## Install the Kata packages only
|
||||
Use the following command to only install Kata Containers packages.
|
||||
|
||||
```bash
|
||||
$ bash -c "$(curl -fsSL https://raw.githubusercontent.com/kata-containers/tests/master/cmd/kata-manager/kata-manager.sh) install-packages"
|
||||
```
|
||||
|
||||
## Further Information
|
||||
For more information on what `kata-manager` can do, refer to the [`kata-manager` page](https://github.com/kata-containers/tests/blob/master/cmd/kata-manager).
|
||||
@@ -54,7 +54,7 @@ to enable nested virtualization can be found on the
|
||||
[KVM Nested Guests page](https://www.linux-kvm.org/page/Nested_Guests)
|
||||
|
||||
Alternatively, and for other architectures, the Kata Containers built in
|
||||
[`check`](../../src/runtime/README.md#hardware-requirements)
|
||||
[`kata-check`](../../src/runtime/README.md#hardware-requirements)
|
||||
command can be used *inside Minikube* once Kata has been installed, to check for compatibility.
|
||||
|
||||
## Setting up Minikube
|
||||
|
||||
@@ -1,123 +1,13 @@
|
||||
# Kata Containers snap package
|
||||
|
||||
* [Install Kata Containers](#install-kata-containers)
|
||||
* [Configure Kata Containers](#configure-kata-containers)
|
||||
* [Integration with non-compatible shim v2 Container Engines](#integration-with-non-compatible-shim-v2-container-engines)
|
||||
* [Integration with Docker](#integration-with-docker)
|
||||
* [Integration with Podman](#integration-with-podman)
|
||||
* [Integration with shim v2 Container Engines](#integration-with-shim-v2-container-engines)
|
||||
* [Remove Kata Containers snap package](#remove-kata-containers-snap-package)
|
||||
|
||||
|
||||
## Install Kata Containers
|
||||
# Install Kata Containers from `snapcraft.io`
|
||||
|
||||
Kata Containers can be installed in any Linux distribution that supports
|
||||
[snapd](https://docs.snapcraft.io/installing-snapd).
|
||||
|
||||
> NOTE: From Kata Containers 2.x, only the [Containerd Runtime V2 (Shim API)](https://github.com/containerd/containerd/tree/master/runtime/v2)
|
||||
> is supported, note that some container engines (`docker`, `podman`, etc) may not
|
||||
> be able to run Kata Containers 2.x.
|
||||
Run the following command to install Kata Containers:
|
||||
|
||||
Kata Containers 1.x is released through the *stable* channel while Kata Containers
|
||||
2.x is available in the *candidate* channel.
|
||||
```bash
|
||||
$ sudo snap install kata-containers --classic
|
||||
```
|
||||
|
||||
Run the following command to install **Kata Containers 1.x**:
|
||||
|
||||
```sh
|
||||
$ sudo snap install kata-containers --classic
|
||||
```
|
||||
|
||||
Run the following command to install **Kata Containers 2.x**:
|
||||
|
||||
```sh
|
||||
$ sudo snap install kata-containers --candidate --classic
|
||||
```
|
||||
|
||||
## Configure Kata Containers
|
||||
|
||||
By default Kata Containers snap image is mounted at `/snap/kata-containers` as a
|
||||
read-only file system, therefore default configuration file can not be edited.
|
||||
Fortunately Kata Containers supports loading a configuration file from another
|
||||
path than the default.
|
||||
|
||||
```sh
|
||||
$ sudo mkdir -p /etc/kata-containers
|
||||
$ sudo cp /snap/kata-containers/current/usr/share/defaults/kata-containers/configuration.toml /etc/kata-containers/
|
||||
$ $EDITOR /etc/kata-containers/configuration.toml
|
||||
```
|
||||
|
||||
## Integration with non-compatible shim v2 Container Engines
|
||||
|
||||
At the time of writing this document, `docker` and `podman` **do not support Kata
|
||||
Containers 2.x, therefore Kata Containers 1.x must be used instead.**
|
||||
|
||||
The path to the runtime provided by the Kata Containers 1.x snap package is
|
||||
`/snap/bin/kata-containers.runtime`, it should be used to run Kata Containers 1.x.
|
||||
|
||||
### Integration with Docker
|
||||
|
||||
`/etc/docker/daemon.json` is the configuration file for `docker`, use the
|
||||
following configuration to add a new runtime (`kata`) to `docker`.
|
||||
|
||||
```json
|
||||
{
|
||||
"runtimes": {
|
||||
"kata": {
|
||||
"path": "/snap/bin/kata-containers.runtime"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Once the above configuration has been applied, use the
|
||||
following commands to restart `docker` and run Kata Containers 1.x.
|
||||
|
||||
```sh
|
||||
$ sudo systemctl restart docker
|
||||
$ docker run -ti --runtime kata busybox sh
|
||||
```
|
||||
|
||||
### Integration with Podman
|
||||
|
||||
`/usr/share/containers/containers.conf` is the configuration file for `podman`,
|
||||
add the following configuration in the `[engine.runtimes]` section.
|
||||
|
||||
```toml
|
||||
kata = [
|
||||
"/snap/bin/kata-containers.runtime"
|
||||
]
|
||||
```
|
||||
|
||||
Once the above configuration has been applied, use the following command to run
|
||||
Kata Containers 1.x with `podman`
|
||||
|
||||
```sh
|
||||
$ sudo podman run -ti --runtime kata docker.io/library/busybox sh
|
||||
```
|
||||
|
||||
## Integration with shim v2 Container Engines
|
||||
|
||||
The Container engine daemon (`cri-o`, `containerd`, etc) needs to be able to find the
|
||||
`containerd-shim-kata-v2` binary to allow Kata Containers to be created.
|
||||
Run the following command to create a symbolic link to the shim v2 binary.
|
||||
|
||||
```sh
|
||||
$ sudo ln -sf /snap/kata-containers/current/usr/bin/containerd-shim-kata-v2 /usr/local/bin/containerd-shim-kata-v2
|
||||
```
|
||||
|
||||
Once the symbolic link has been created and the engine daemon configured, `io.containerd.kata.v2`
|
||||
can be used as runtime.
|
||||
|
||||
Read the following documents to know how to run Kata Containers 2.x with `containerd`.
|
||||
|
||||
* [How to use Kata Containers and Containerd](https://github.com/kata-containers/kata-containers/blob/2.0-dev/docs/how-to/containerd-kata.md)
|
||||
* [Install Kata Containers with containerd](https://github.com/kata-containers/kata-containers/blob/2.0-dev/docs/install/container-manager/containerd/containerd-install.md)
|
||||
|
||||
|
||||
## Remove Kata Containers snap package
|
||||
|
||||
Run the following command to remove the Kata Containers snap:
|
||||
|
||||
```sh
|
||||
$ sudo snap remove kata-containers
|
||||
```
|
||||
For further information on integrating and configuring the `snap` Kata Containers install,
|
||||
refer to the [Kata Containers packaging `snap` documentation](https://github.com/kata-containers/packaging/blob/master/snap/README.md#configure-kata-containers).
|
||||
|
||||
@@ -1,112 +0,0 @@
|
||||
# Kata Containers with SGX
|
||||
|
||||
- [Check if SGX is enabled](#check-if-sgx-is-enabled)
|
||||
- [Install Host kernel with SGX support](#install-host-kernel-with-sgx-support)
|
||||
- [Install Guest kernel with SGX support](#install-guest-kernel-with-sgx-support)
|
||||
- [Run Kata Containers with SGX enabled](#run-kata-containers-with-sgx-enabled)
|
||||
|
||||
Intel® Software Guard Extensions (SGX) is a set of instructions that increases the security
|
||||
of applications code and data, giving them more protections from disclosure or modification.
|
||||
|
||||
> **Note:** At the time of writing this document, SGX patches have not landed on the Linux kernel
|
||||
> project, so specific versions for guest and host kernels must be installed to enable SGX.
|
||||
|
||||
## Check if SGX is enabled
|
||||
|
||||
Run the following command to check if your host supports SGX.
|
||||
|
||||
```sh
|
||||
$ grep -o sgx /proc/cpuinfo
|
||||
```
|
||||
|
||||
Continue to the following section if the output of the above command is empty,
|
||||
otherwise continue to section [Install Guest kernel with SGX support](#install-guest-kernel-with-sgx-support)
|
||||
|
||||
## Install Host kernel with SGX support
|
||||
|
||||
The following commands were tested on Fedora 32, they might work on other distros too.
|
||||
|
||||
```sh
|
||||
$ git clone --depth=1 https://github.com/intel/kvm-sgx
|
||||
$ pushd kvm-sgx
|
||||
$ cp /boot/config-$(uname -r) .config
|
||||
$ yes "" | make oldconfig
|
||||
$ # In the following step, enable: INTEL_SGX and INTEL_SGX_VIRTUALIZATION
|
||||
$ make menuconfig
|
||||
$ make -j$(($(nproc)-1)) bzImage
|
||||
$ make -j$(($(nproc)-1)) modules
|
||||
$ sudo make modules_install
|
||||
$ sudo make install
|
||||
$ popd
|
||||
$ sudo reboot
|
||||
```
|
||||
|
||||
> **Notes:**
|
||||
> * Run: `mokutil --sb-state` to check whether secure boot is enabled, if so, you will need to sign the kernel.
|
||||
> * You'll lose SGX support when a new distro kernel is installed and the system rebooted.
|
||||
|
||||
Once you have restarted your system with the new brand Linux Kernel with SGX support, run
|
||||
the following command to make sure it's enabled. If the output is empty, go to the BIOS
|
||||
setup and enable SGX manually.
|
||||
|
||||
```sh
|
||||
$ grep -o sgx /proc/cpuinfo
|
||||
```
|
||||
|
||||
## Install Guest kernel with SGX support
|
||||
|
||||
Install the guest kernel in the Kata Containers directory, this way it can be used to run
|
||||
Kata Containers.
|
||||
|
||||
```sh
|
||||
$ curl -LOk https://github.com/devimc/kvm-sgx/releases/download/v0.0.1/kata-virtiofs-sgx.tar.gz
|
||||
$ sudo tar -xf kata-virtiofs-sgx.tar.gz -C /usr/share/kata-containers/
|
||||
$ sudo sed -i 's|kernel =|kernel = "/usr/share/kata-containers/vmlinux-virtiofs-sgx.container"|g' \
|
||||
/usr/share/defaults/kata-containers/configuration.toml
|
||||
```
|
||||
|
||||
## Run Kata Containers with SGX enabled
|
||||
|
||||
Before running a Kata Container make sure that your version of `crio` or `containerd`
|
||||
supports annotations.
|
||||
For `containerd` check in `/etc/containerd/config.toml` that the list of `pod_annotations` passed
|
||||
to the `sandbox` are: `["io.katacontainers.*", "sgx.intel.com/epc"]`.
|
||||
|
||||
> `sgx.yaml`
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: sgx
|
||||
annotations:
|
||||
sgx.intel.com/epc: "32Mi"
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 0
|
||||
runtimeClassName: kata
|
||||
containers:
|
||||
- name: c1
|
||||
image: busybox
|
||||
command:
|
||||
- sh
|
||||
stdin: true
|
||||
tty: true
|
||||
volumeMounts:
|
||||
- mountPath: /dev/sgx/
|
||||
name: test-volume
|
||||
volumes:
|
||||
- name: test-volume
|
||||
hostPath:
|
||||
path: /dev/sgx/
|
||||
type: Directory
|
||||
```
|
||||
|
||||
```sh
|
||||
$ kubectl apply -f sgx.yaml
|
||||
$ kubectl exec -ti sgx ls /dev/sgx/
|
||||
enclave provision
|
||||
```
|
||||
|
||||
The output of the latest command shouldn't be empty, otherwise check
|
||||
your system environment to make sure SGX is fully supported.
|
||||
|
||||
[1]: github.com/cloud-hypervisor/cloud-hypervisor/
|
||||
@@ -93,7 +93,9 @@ impl HashSerializer {
|
||||
// Take care to only add the first instance of a key. This matters for loggers (but not
|
||||
// Records) since a child loggers have parents and the loggers are serialised child first
|
||||
// meaning the *newest* fields are serialised first.
|
||||
self.fields.entry(key).or_insert(value);
|
||||
if !self.fields.contains_key(&key) {
|
||||
self.fields.insert(key, value);
|
||||
}
|
||||
}
|
||||
|
||||
fn remove_field(&mut self, key: &str) {
|
||||
@@ -180,6 +182,12 @@ impl<D> RuntimeLevelFilter<D> {
|
||||
level: Mutex::new(level),
|
||||
}
|
||||
}
|
||||
|
||||
fn set_level(&self, level: slog::Level) {
|
||||
let mut log_level = self.level.lock().unwrap();
|
||||
|
||||
*log_level = level;
|
||||
}
|
||||
}
|
||||
|
||||
impl<D> Drain for RuntimeLevelFilter<D>
|
||||
|
||||
@@ -21,10 +21,20 @@ parts:
|
||||
version="9999"
|
||||
kata_url="https://github.com/kata-containers/kata-containers"
|
||||
|
||||
if echo "${GITHUB_REF}" | grep -q -E "^refs/tags"; then
|
||||
version=$(echo ${GITHUB_REF} | cut -d/ -f3)
|
||||
git checkout ${version}
|
||||
fi
|
||||
image_info="${SNAPCRAFT_IMAGE_INFO:-}"
|
||||
snap_env="$(echo "${image_info}" | egrep -o "build_url.*" | egrep -o "snap.*build" | cut -d/ -f2)"
|
||||
|
||||
case "${snap_env}" in
|
||||
stable)
|
||||
# Get the latest stable version
|
||||
version=$(git ls-remote --tags ${kata_url} | egrep -o "refs.*" | egrep -v "\-alpha|\-rc|{}" | egrep -o "[[:digit:]]+\.[[:digit:]]+\.[[:digit:]]+" | sort -V -r | head -1)
|
||||
git checkout ${version}
|
||||
;;
|
||||
|
||||
*-dev)
|
||||
version="${snap_env}"
|
||||
;;
|
||||
esac
|
||||
|
||||
snapcraftctl set-grade "stable"
|
||||
snapcraftctl set-version "${version}"
|
||||
@@ -57,10 +67,15 @@ parts:
|
||||
*) echo "unsupported architecture: $(uname -m)"; exit 1;;
|
||||
esac
|
||||
|
||||
yq_version=3.4.1
|
||||
# Workaround to get latest release from github (to not use github token).
|
||||
# Get the redirection to latest release on github.
|
||||
yq_latest_url=$(curl -Ls -o /dev/null -w %{url_effective} "https://${yq_pkg}/releases/latest")
|
||||
# The redirected url should include the latest release version
|
||||
# https://github.com/mikefarah/yq/releases/tag/<VERSION-HERE>
|
||||
yq_version=$(basename "${yq_latest_url}")
|
||||
yq_url="https://${yq_pkg}/releases/download/${yq_version}/yq_${goos}_${goarch}"
|
||||
curl -o "${yq_path}" -LSsf "${yq_url}"
|
||||
chmod +x "${yq_path}"
|
||||
curl -o "${yq_path}" -LSsf ${yq_url}
|
||||
chmod +x ${yq_path}
|
||||
|
||||
kata_dir=gopath/src/github.com/${SNAPCRAFT_PROJECT_NAME}/${SNAPCRAFT_PROJECT_NAME}
|
||||
version="$(${yq_path} r ${kata_dir}/versions.yaml languages.golang.meta.newest-version)"
|
||||
@@ -209,7 +224,7 @@ parts:
|
||||
after: [godeps, runtime]
|
||||
build-packages:
|
||||
- gcc
|
||||
- python3
|
||||
- python
|
||||
- zlib1g-dev
|
||||
- libcap-ng-dev
|
||||
- libglib2.0-dev
|
||||
@@ -269,7 +284,7 @@ parts:
|
||||
done
|
||||
|
||||
# Only x86_64 supports libpmem
|
||||
[ "$(uname -m)" = "x86_64" ] && sudo apt-get --no-install-recommends install -y apt-utils ca-certificates libpmem-dev libseccomp-dev
|
||||
[ "$(uname -m)" = "x86_64" ] && sudo apt-get --no-install-recommends install -y apt-utils ca-certificates libpmem-dev
|
||||
|
||||
configure_hypervisor=${kata_dir}/tools/packaging/scripts/configure-hypervisor.sh
|
||||
chmod +x ${configure_hypervisor}
|
||||
@@ -305,8 +320,4 @@ parts:
|
||||
|
||||
apps:
|
||||
runtime:
|
||||
command: usr/bin/kata-runtime
|
||||
shim:
|
||||
command: usr/bin/containerd-shim-kata-v2
|
||||
collect-data:
|
||||
command: usr/bin/kata-collect-data.sh
|
||||
|
||||
592
src/agent/Cargo.lock
generated
592
src/agent/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -11,7 +11,7 @@ rustjail = { path = "rustjail" }
|
||||
protocols = { path = "protocols" }
|
||||
netlink = { path = "netlink", features = ["with-log", "with-agent-handler"] }
|
||||
lazy_static = "1.3.0"
|
||||
ttrpc = "0.3.0"
|
||||
ttrpc = { git = "https://github.com/containerd/ttrpc-rust.git", branch="0.3.0" }
|
||||
protobuf = "=2.14.0"
|
||||
libc = "0.2.58"
|
||||
nix = "0.17.0"
|
||||
@@ -21,24 +21,18 @@ signal-hook = "0.1.9"
|
||||
scan_fmt = "0.2.3"
|
||||
scopeguard = "1.0.0"
|
||||
regex = "1"
|
||||
|
||||
# slog:
|
||||
# - Dynamic keys required to allow HashMap keys to be slog::Serialized.
|
||||
# - The 'max_*' features allow changing the log level at runtime
|
||||
# (by stopping the compiler from removing log calls).
|
||||
slog = { version = "2.5.2", features = ["dynamic-keys", "max_level_trace", "release_max_level_info"] }
|
||||
slog-scope = "4.1.2"
|
||||
|
||||
# Redirect ttrpc log calls
|
||||
slog-stdlog = "4.0.0"
|
||||
log = "0.4.11"
|
||||
|
||||
# for testing
|
||||
tempfile = "3.1.0"
|
||||
prometheus = { version = "0.9.0", features = ["process"] }
|
||||
procfs = "0.7.9"
|
||||
anyhow = "1.0.32"
|
||||
cgroups = { package = "cgroups-rs", version = "0.2.0" }
|
||||
cgroups = { git = "https://github.com/kata-containers/cgroups-rs", branch = "stable-0.1.1"}
|
||||
|
||||
[workspace]
|
||||
members = [
|
||||
@@ -47,6 +41,3 @@ members = [
|
||||
"protocols",
|
||||
"rustjail",
|
||||
]
|
||||
|
||||
[profile.release]
|
||||
lto = true
|
||||
|
||||
@@ -25,6 +25,11 @@ export VERSION_COMMIT := $(if $(COMMIT),$(VERSION)-$(COMMIT),$(VERSION))
|
||||
|
||||
BUILD_TYPE = release
|
||||
|
||||
# set proto file to generate
|
||||
ifdef proto
|
||||
PROTO_FILE=${proto}
|
||||
endif
|
||||
|
||||
ARCH = $(shell uname -m)
|
||||
LIBC ?= musl
|
||||
ifneq ($(LIBC),musl)
|
||||
@@ -41,13 +46,6 @@ ifeq ($(ARCH), ppc64le)
|
||||
$(warning "WARNING: powerpc64le-unknown-linux-musl target is unavailable")
|
||||
endif
|
||||
|
||||
|
||||
EXTRA_RUSTFLAGS :=
|
||||
ifeq ($(ARCH), aarch64)
|
||||
override EXTRA_RUSTFLAGS = -C link-arg=-lgcc
|
||||
$(warning "WARNING: aarch64-musl needs extra symbols from libgcc")
|
||||
endif
|
||||
|
||||
TRIPLE = $(ARCH)-unknown-linux-$(LIBC)
|
||||
|
||||
TARGET_PATH = target/$(TRIPLE)/$(BUILD_TYPE)/$(TARGET)
|
||||
@@ -108,20 +106,14 @@ default: $(TARGET) show-header
|
||||
$(TARGET): $(GENERATED_CODE) $(TARGET_PATH)
|
||||
|
||||
$(TARGET_PATH): $(SOURCES) | show-summary
|
||||
@RUSTFLAGS="$(EXTRA_RUSTFLAGS) --deny warnings" cargo build --target $(TRIPLE) --$(BUILD_TYPE)
|
||||
@cargo build --target $(TRIPLE) --$(BUILD_TYPE)
|
||||
|
||||
optimize: $(SOURCES) | show-summary show-header
|
||||
@RUSTFLAGS="-C link-arg=-s $(EXTRA_RUSTFLAGS) --deny-warnings" cargo build --target $(TRIPLE) --$(BUILD_TYPE)
|
||||
@RUSTFLAGS='-C link-arg=-s' cargo build --target $(TRIPLE) --$(BUILD_TYPE)
|
||||
|
||||
show-header:
|
||||
@printf "%s - version %s (commit %s)\n\n" "$(TARGET)" "$(VERSION)" "$(COMMIT_MSG)"
|
||||
|
||||
clippy: $(GENERATED_CODE)
|
||||
cargo clippy --all-targets --all-features --release \
|
||||
-- \
|
||||
-Aclippy::redundant_allocation \
|
||||
-D warnings
|
||||
|
||||
$(GENERATED_FILES): %: %.in
|
||||
@sed $(foreach r,$(GENERATED_REPLACEMENTS),-e 's|@$r@|$($r)|g') "$<" > "$@"
|
||||
|
||||
@@ -170,4 +162,4 @@ help: show-summary
|
||||
optimize
|
||||
|
||||
generate-protocols:
|
||||
protocols/hack/update-generated-proto.sh all
|
||||
protocols/hack/update-generated-proto.sh "${PROTO_FILE}"
|
||||
|
||||
@@ -39,22 +39,11 @@ After that, we drafted the initial code here, and any contributions are welcome.
|
||||
## Getting Started
|
||||
|
||||
### Build from Source
|
||||
The rust-agent needs to be built statically and linked with `musl`
|
||||
|
||||
> **Note:** skip this step for ppc64le, the build scripts explicitly use gnu for ppc64le.
|
||||
|
||||
The rust-agent need to be built with rust newer than 1.37, and static linked with `musl`.
|
||||
```bash
|
||||
$ arch=$(uname -m)
|
||||
$ rustup target add "${arch}-unknown-linux-musl"
|
||||
$ sudo ln -s /usr/bin/g++ /bin/musl-g++
|
||||
```
|
||||
|
||||
Download the source files in the Kata containers repository and build the agent:
|
||||
```bash
|
||||
$ GOPATH="${GOPATH:-$HOME/go}"
|
||||
$ dir="$GOPATH/src/github.com/kata-containers"
|
||||
$ git -C ${dir} clone --depth 1 https://github.com/kata-containers/kata-containers
|
||||
$ make -C ${dir}/kata-containers/src/agent
|
||||
rustup target add x86_64-unknown-linux-musl
|
||||
sudo ln -s /usr/bin/g++ /bin/musl-g++
|
||||
cargo build --target x86_64-unknown-linux-musl --release
|
||||
```
|
||||
|
||||
## Run Kata CI with rust-agent
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
2.0.0
|
||||
1
src/agent/VERSION
Symbolic link
1
src/agent/VERSION
Symbolic link
@@ -0,0 +1 @@
|
||||
../../VERSION
|
||||
@@ -20,5 +20,3 @@ LimitNOFILE=infinity
|
||||
# the runtime handles shutting down the VM.
|
||||
ExecStop=/bin/sync ; /usr/bin/systemctl --force poweroff
|
||||
FailureAction=poweroff
|
||||
# Discourage OOM-killer from touching the agent
|
||||
OOMScoreAdjust=-997
|
||||
|
||||
@@ -142,7 +142,7 @@ pub struct User {
|
||||
pub gid: u32,
|
||||
#[serde(
|
||||
default,
|
||||
rename = "additionalGids",
|
||||
rename = "addtionalGids",
|
||||
skip_serializing_if = "Vec::is_empty"
|
||||
)]
|
||||
pub additional_gids: Vec<u32>,
|
||||
@@ -302,7 +302,6 @@ pub struct LinuxBlockIODevice {
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Default, Clone, PartialEq)]
|
||||
pub struct LinuxWeightDevice {
|
||||
#[serde(flatten)]
|
||||
pub blk: LinuxBlockIODevice,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub weight: Option<u16>,
|
||||
@@ -316,7 +315,6 @@ pub struct LinuxWeightDevice {
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Default, Clone, PartialEq)]
|
||||
pub struct LinuxThrottleDevice {
|
||||
#[serde(flatten)]
|
||||
pub blk: LinuxBlockIODevice,
|
||||
#[serde(default)]
|
||||
pub rate: u64,
|
||||
@@ -377,7 +375,7 @@ pub struct LinuxMemory {
|
||||
#[serde(default, skip_serializing_if = "Option::is_none", rename = "kernelTCP")]
|
||||
pub kernel_tcp: Option<i64>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub swappiness: Option<i64>,
|
||||
pub swapiness: Option<i64>,
|
||||
#[serde(
|
||||
default,
|
||||
skip_serializing_if = "Option::is_none",
|
||||
@@ -835,7 +833,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_deserialize_spec() {
|
||||
fn test_deserialize_sepc() {
|
||||
let data = r#"{
|
||||
"ociVersion": "1.0.1",
|
||||
"process": {
|
||||
@@ -1120,28 +1118,36 @@ mod tests {
|
||||
"leafWeight": 10,
|
||||
"weightDevice": [
|
||||
{
|
||||
"major": 8,
|
||||
"minor": 0,
|
||||
"blk": {
|
||||
"major": 8,
|
||||
"minor": 0
|
||||
},
|
||||
"weight": 500,
|
||||
"leafWeight": 300
|
||||
},
|
||||
{
|
||||
"major": 8,
|
||||
"minor": 16,
|
||||
"blk":{
|
||||
"major": 8,
|
||||
"minor": 16
|
||||
},
|
||||
"weight": 500
|
||||
}
|
||||
],
|
||||
"throttleReadBpsDevice": [
|
||||
{
|
||||
"major": 8,
|
||||
"minor": 0,
|
||||
"blk":{
|
||||
"major": 8,
|
||||
"minor": 0
|
||||
},
|
||||
"rate": 600
|
||||
}
|
||||
],
|
||||
"throttleWriteIOPSDevice": [
|
||||
{
|
||||
"major": 8,
|
||||
"minor": 16,
|
||||
"blk":{
|
||||
"major": 8,
|
||||
"minor": 16
|
||||
},
|
||||
"rate": 300
|
||||
}
|
||||
]
|
||||
@@ -1217,7 +1223,8 @@ mod tests {
|
||||
uid: 1,
|
||||
gid: 1,
|
||||
// incompatible with oci
|
||||
additional_gids: vec![5, 6],
|
||||
// additional_gids: vec![5, 6],
|
||||
additional_gids: vec![],
|
||||
username: "".to_string(),
|
||||
},
|
||||
args: vec!["sh".to_string()],
|
||||
@@ -1430,7 +1437,8 @@ mod tests {
|
||||
swap: Some(536870912),
|
||||
kernel: Some(-1),
|
||||
kernel_tcp: Some(-1),
|
||||
swappiness: Some(0),
|
||||
// incompatible with oci
|
||||
swapiness: None,
|
||||
disable_oom_killer: Some(false),
|
||||
}),
|
||||
cpu: Some(crate::LinuxCPU {
|
||||
@@ -1583,6 +1591,25 @@ mod tests {
|
||||
vm: None,
|
||||
};
|
||||
|
||||
// warning : incompatible with oci : https://github.com/opencontainers/runtime-spec/blob/master/config.md
|
||||
// 1. User use addtionalGids while oci use additionalGids
|
||||
// 2. LinuxMemory use swapiness while oci use swappiness
|
||||
// 3. LinuxWeightDevice with blk
|
||||
// {
|
||||
// "blk": {
|
||||
// "major": 8,
|
||||
// "minor": 0
|
||||
// },
|
||||
// "weight": 500,
|
||||
// "leafWeight": 300
|
||||
// }
|
||||
// oci without blk
|
||||
// {
|
||||
// "major": 8,
|
||||
// "minor": 0,
|
||||
// "weight": 500,
|
||||
// "leafWeight": 300
|
||||
// }
|
||||
let current: crate::Spec = serde_json::from_str(data).unwrap();
|
||||
assert_eq!(expected, current);
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
//
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json;
|
||||
|
||||
use std::error;
|
||||
use std::fmt::{Display, Formatter, Result as FmtResult};
|
||||
|
||||
@@ -5,9 +5,6 @@ authors = ["The Kata Containers community <kata-dev@lists.katacontainers.io>"]
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
ttrpc = "0.3.0"
|
||||
ttrpc = { git = "https://github.com/containerd/ttrpc-rust.git", branch="0.3.0" }
|
||||
protobuf = "=2.14.0"
|
||||
futures = "0.1.27"
|
||||
|
||||
[build-dependencies]
|
||||
ttrpc-codegen = "0.1.2"
|
||||
|
||||
@@ -1,55 +0,0 @@
|
||||
// Copyright (c) 2020 Ant Group
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use std::fs::File;
|
||||
use std::io::{Read, Write};
|
||||
|
||||
fn main() {
|
||||
let protos = vec![
|
||||
"protos/types.proto",
|
||||
"protos/agent.proto",
|
||||
"protos/health.proto",
|
||||
"protos/google/protobuf/empty.proto",
|
||||
"protos/oci.proto",
|
||||
];
|
||||
|
||||
// Tell Cargo that if the .proto files changed, to rerun this build script.
|
||||
protos
|
||||
.iter()
|
||||
.for_each(|p| println!("cargo:rerun-if-changed={}", &p));
|
||||
|
||||
ttrpc_codegen::Codegen::new()
|
||||
.out_dir("src")
|
||||
.inputs(&protos)
|
||||
.include("protos")
|
||||
.rust_protobuf()
|
||||
.run()
|
||||
.expect("Gen codes failed.");
|
||||
|
||||
// There is a message named 'Box' in oci.proto
|
||||
// so there is a struct named 'Box', we should replace Box<Self> to ::std::boxed::Box<Self>
|
||||
// to avoid the conflict.
|
||||
replace_text_in_file(
|
||||
"src/oci.rs",
|
||||
"self: Box<Self>",
|
||||
"self: ::std::boxed::Box<Self>",
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
fn replace_text_in_file(file_name: &str, from: &str, to: &str) -> Result<(), std::io::Error> {
|
||||
let mut src = File::open(file_name)?;
|
||||
let mut contents = String::new();
|
||||
src.read_to_string(&mut contents).unwrap();
|
||||
drop(src);
|
||||
|
||||
let new_contents = contents.replace(from, to);
|
||||
|
||||
let mut dst = File::create(&file_name)?;
|
||||
dst.write_all(new_contents.as_bytes())?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
#!/bin/bash
|
||||
|
||||
# //
|
||||
# // Copyright (c) 2020 Ant Group
|
||||
# // Copyright 2020 Ant Financial
|
||||
# //
|
||||
# // SPDX-License-Identifier: Apache-2.0
|
||||
# //
|
||||
@@ -47,11 +47,11 @@ show_usage() {
|
||||
}
|
||||
|
||||
generate_go_sources() {
|
||||
local cmd="protoc -I$GOPATH/src:$GOPATH/src/github.com/kata-containers/kata-containers/src/agent/protocols/protos \
|
||||
local cmd="protoc -I$GOPATH/src/github.com/kata-containers/agent/vendor/github.com/gogo/protobuf:$GOPATH/src/github.com/kata-containers/agent/vendor:$GOPATH/src/github.com/gogo/protobuf:$GOPATH/src/github.com/gogo/googleapis:$GOPATH/src:$GOPATH/src/github.com/kata-containers/kata-containers/src/agent/protocols/protos \
|
||||
--gogottrpc_out=plugins=ttrpc+fieldpath,\
|
||||
import_path=github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/agent/protocols/grpc,\
|
||||
\
|
||||
Mgithub.com/kata-containers/kata-containers/src/agent/protocols/protos/types.proto=github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/agent/protocols,\
|
||||
Mgithub.com/kata-containers/kata-containers/src/agent/protocols/protos/github.com/kata-containers/agent/pkg/types/types.proto=github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/agent/protocols,\
|
||||
\
|
||||
Mgithub.com/kata-containers/kata-containers/src/agent/protocols/protos/oci.proto=github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/agent/protocols/grpc,\
|
||||
\
|
||||
@@ -64,12 +64,31 @@ $GOPATH/src/github.com/kata-containers/kata-containers/src/agent/protocols/proto
|
||||
[ $? -eq 0 ] || die "Failed to generate golang file from $1"
|
||||
}
|
||||
|
||||
generate_rust_sources() {
|
||||
local cmd="protoc --rust_out=./protocols/src/ \
|
||||
--ttrpc_out=./protocols/src/,plugins=ttrpc:./protocols/src/ \
|
||||
--plugin=protoc-gen-ttrpc=`which ttrpc_rust_plugin` \
|
||||
-I $GOPATH/src/github.com/kata-containers/agent/vendor/github.com/gogo/protobuf:$GOPATH/src/github.com/kata-containers/agent/vendor:$GOPATH/src/github.com/gogo/protobuf:$GOPATH/src/github.com/gogo/googleapis:$GOPATH/src:$GOPATH/src/github.com/kata-containers/kata-containers/src/agent/protocols/protos \
|
||||
$GOPATH/src/github.com/kata-containers/kata-containers/src/agent/protocols/protos/$1"
|
||||
|
||||
echo $cmd
|
||||
$cmd
|
||||
[ $? -eq 0 ] || die "Failed to generate rust file from $1"
|
||||
|
||||
if [ "$1" = "oci.proto" ]; then
|
||||
# Need change Box<Self> to ::std::boxed::Box<Self> because there is another struct Box
|
||||
sed 's/fn into_any(self: Box<Self>) -> ::std::boxed::Box<::std::any::Any> {/fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<::std::any::Any> {/g' ./protocols/src/oci.rs > ./protocols/src/new_oci.rs
|
||||
sed 's/fn into_any(self: Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {/fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {/g' ./protocols/src/oci.rs > ./protocols/src/new_oci.rs
|
||||
mv ./protocols/src/new_oci.rs ./protocols/src/oci.rs
|
||||
fi;
|
||||
}
|
||||
|
||||
if [ "$(basename $(pwd))" != "agent" ]; then
|
||||
die "Please go to directory of protocols before execute this shell"
|
||||
fi
|
||||
|
||||
# Protocol buffer files required to generate golang/rust bindings.
|
||||
proto_files_list=(agent.proto health.proto oci.proto types.proto)
|
||||
proto_files_list=(agent.proto health.proto oci.proto github.com/kata-containers/agent/pkg/types/types.proto)
|
||||
|
||||
if [ "$1" = "" ]; then
|
||||
show_usage "${proto_files_list[@]}"
|
||||
@@ -80,6 +99,12 @@ fi;
|
||||
which protoc
|
||||
[ $? -eq 0 ] || die "Please install protoc from github.com/protocolbuffers/protobuf"
|
||||
|
||||
which protoc-gen-rust
|
||||
[ $? -eq 0 ] || die "Please install protobuf-codegen from github.com/pingcap/grpc-rs"
|
||||
|
||||
which ttrpc_rust_plugin
|
||||
[ $? -eq 0 ] || die "Please install ttrpc_rust_plugin from https://github.com/containerd/ttrpc-rust"
|
||||
|
||||
which protoc-gen-gogottrpc
|
||||
[ $? -eq 0 ] || die "Please install protoc-gen-gogottrpc from https://github.com/containerd/ttrpc"
|
||||
|
||||
@@ -93,6 +118,10 @@ if [ "$target" = "all" ]; then
|
||||
echo -e "\n [golang] compiling ${f} ..."
|
||||
generate_go_sources $f
|
||||
echo -e " [golang] ${f} compiled\n"
|
||||
|
||||
echo -e "\n [rust] compiling ${f} ..."
|
||||
generate_rust_sources $f
|
||||
echo -e " [rust] ${f} compiled\n"
|
||||
done
|
||||
else
|
||||
# compile individual proto file
|
||||
@@ -101,6 +130,10 @@ else
|
||||
echo -e "\n [golang] compiling ${target} ..."
|
||||
generate_go_sources $target
|
||||
echo -e " [golang] ${target} compiled\n"
|
||||
|
||||
echo -e "\n [rust] compiling ${target} ..."
|
||||
generate_rust_sources $target
|
||||
echo -e " [rust] ${target} compiled\n"
|
||||
fi
|
||||
done
|
||||
fi;
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
//
|
||||
// Copyright 2017 HyperHQ Inc.
|
||||
// Copyright (c) 2019-2020 Ant Group
|
||||
// Copyright 2019 Ant Financial
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
@@ -11,8 +11,8 @@ option go_package = "github.com/kata-containers/kata-containers/src/runtime/virt
|
||||
|
||||
package grpc;
|
||||
|
||||
import "oci.proto";
|
||||
import "types.proto";
|
||||
import "github.com/kata-containers/kata-containers/src/agent/protocols/protos/oci.proto";
|
||||
import "github.com/kata-containers/kata-containers/src/agent/protocols/protos/github.com/kata-containers/agent/pkg/types/types.proto";
|
||||
|
||||
import "google/protobuf/empty.proto";
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
//
|
||||
// Copyright 2018 Intel Corporation.
|
||||
// Copyright (c) 2019-2020 Ant Group
|
||||
// Copyright (c) 2019 Ant Financial
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
@@ -1,6 +1,6 @@
|
||||
//
|
||||
// Copyright (c) 2017 HyperHQ Inc.
|
||||
// Copyright (c) 2019-2020 Ant Group
|
||||
// Copyright 2017 HyperHQ Inc.
|
||||
// Copyright (c) 2019 Ant Financial
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
@@ -11,7 +11,7 @@ option go_package = "github.com/kata-containers/kata-containers/src/runtime/virt
|
||||
|
||||
package grpc;
|
||||
|
||||
import "gogo/protobuf/gogoproto/gogo.proto";
|
||||
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
|
||||
|
||||
option (gogoproto.equal_all) = true;
|
||||
option (gogoproto.populate_all) = true;
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
//
|
||||
// Copyright (c) 2017 Intel Corporation
|
||||
// Copyright (c) 2019-2020 Ant Group
|
||||
// Copyright (c) 2019 Ant Financial
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
@@ -11,7 +11,8 @@ option go_package = "github.com/kata-containers/kata-containers/src/runtime/virt
|
||||
|
||||
package grpc;
|
||||
|
||||
import "gogo/protobuf/gogoproto/gogo.proto";
|
||||
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
|
||||
import "google/protobuf/wrappers.proto";
|
||||
|
||||
option (gogoproto.equal_all) = true;
|
||||
option (gogoproto.populate_all) = true;
|
||||
|
||||
14494
src/agent/protocols/src/agent.rs
Normal file
14494
src/agent/protocols/src/agent.rs
Normal file
File diff suppressed because it is too large
Load Diff
808
src/agent/protocols/src/agent_ttrpc.rs
Normal file
808
src/agent/protocols/src/agent_ttrpc.rs
Normal file
@@ -0,0 +1,808 @@
|
||||
// Copyright (c) 2020 Ant Financial
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
// This file is generated by ttrpc-compiler 0.3.0. Do not edit
|
||||
// @generated
|
||||
|
||||
// https://github.com/Manishearth/rust-clippy/issues/702
|
||||
#![allow(unknown_lints)]
|
||||
#![allow(clipto_camel_casepy)]
|
||||
|
||||
#![cfg_attr(rustfmt, rustfmt_skip)]
|
||||
|
||||
#![allow(box_pointers)]
|
||||
#![allow(dead_code)]
|
||||
#![allow(missing_docs)]
|
||||
#![allow(non_camel_case_types)]
|
||||
#![allow(non_snake_case)]
|
||||
#![allow(non_upper_case_globals)]
|
||||
#![allow(trivial_casts)]
|
||||
#![allow(unsafe_code)]
|
||||
#![allow(unused_imports)]
|
||||
#![allow(unused_results)]
|
||||
use protobuf::{CodedInputStream, CodedOutputStream, Message};
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct AgentServiceClient {
|
||||
client: ::ttrpc::Client,
|
||||
}
|
||||
|
||||
impl AgentServiceClient {
|
||||
pub fn new(client: ::ttrpc::Client) -> Self {
|
||||
AgentServiceClient {
|
||||
client: client,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn create_container(&self, req: &super::agent::CreateContainerRequest, timeout_nano: i64) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
let mut cres = super::empty::Empty::new();
|
||||
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "CreateContainer", cres);
|
||||
Ok(cres)
|
||||
}
|
||||
|
||||
pub fn start_container(&self, req: &super::agent::StartContainerRequest, timeout_nano: i64) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
let mut cres = super::empty::Empty::new();
|
||||
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "StartContainer", cres);
|
||||
Ok(cres)
|
||||
}
|
||||
|
||||
pub fn remove_container(&self, req: &super::agent::RemoveContainerRequest, timeout_nano: i64) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
let mut cres = super::empty::Empty::new();
|
||||
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "RemoveContainer", cres);
|
||||
Ok(cres)
|
||||
}
|
||||
|
||||
pub fn exec_process(&self, req: &super::agent::ExecProcessRequest, timeout_nano: i64) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
let mut cres = super::empty::Empty::new();
|
||||
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "ExecProcess", cres);
|
||||
Ok(cres)
|
||||
}
|
||||
|
||||
pub fn signal_process(&self, req: &super::agent::SignalProcessRequest, timeout_nano: i64) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
let mut cres = super::empty::Empty::new();
|
||||
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "SignalProcess", cres);
|
||||
Ok(cres)
|
||||
}
|
||||
|
||||
pub fn wait_process(&self, req: &super::agent::WaitProcessRequest, timeout_nano: i64) -> ::ttrpc::Result<super::agent::WaitProcessResponse> {
|
||||
let mut cres = super::agent::WaitProcessResponse::new();
|
||||
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "WaitProcess", cres);
|
||||
Ok(cres)
|
||||
}
|
||||
|
||||
pub fn list_processes(&self, req: &super::agent::ListProcessesRequest, timeout_nano: i64) -> ::ttrpc::Result<super::agent::ListProcessesResponse> {
|
||||
let mut cres = super::agent::ListProcessesResponse::new();
|
||||
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "ListProcesses", cres);
|
||||
Ok(cres)
|
||||
}
|
||||
|
||||
pub fn update_container(&self, req: &super::agent::UpdateContainerRequest, timeout_nano: i64) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
let mut cres = super::empty::Empty::new();
|
||||
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "UpdateContainer", cres);
|
||||
Ok(cres)
|
||||
}
|
||||
|
||||
pub fn stats_container(&self, req: &super::agent::StatsContainerRequest, timeout_nano: i64) -> ::ttrpc::Result<super::agent::StatsContainerResponse> {
|
||||
let mut cres = super::agent::StatsContainerResponse::new();
|
||||
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "StatsContainer", cres);
|
||||
Ok(cres)
|
||||
}
|
||||
|
||||
pub fn pause_container(&self, req: &super::agent::PauseContainerRequest, timeout_nano: i64) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
let mut cres = super::empty::Empty::new();
|
||||
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "PauseContainer", cres);
|
||||
Ok(cres)
|
||||
}
|
||||
|
||||
pub fn resume_container(&self, req: &super::agent::ResumeContainerRequest, timeout_nano: i64) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
let mut cres = super::empty::Empty::new();
|
||||
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "ResumeContainer", cres);
|
||||
Ok(cres)
|
||||
}
|
||||
|
||||
pub fn write_stdin(&self, req: &super::agent::WriteStreamRequest, timeout_nano: i64) -> ::ttrpc::Result<super::agent::WriteStreamResponse> {
|
||||
let mut cres = super::agent::WriteStreamResponse::new();
|
||||
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "WriteStdin", cres);
|
||||
Ok(cres)
|
||||
}
|
||||
|
||||
pub fn read_stdout(&self, req: &super::agent::ReadStreamRequest, timeout_nano: i64) -> ::ttrpc::Result<super::agent::ReadStreamResponse> {
|
||||
let mut cres = super::agent::ReadStreamResponse::new();
|
||||
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "ReadStdout", cres);
|
||||
Ok(cres)
|
||||
}
|
||||
|
||||
pub fn read_stderr(&self, req: &super::agent::ReadStreamRequest, timeout_nano: i64) -> ::ttrpc::Result<super::agent::ReadStreamResponse> {
|
||||
let mut cres = super::agent::ReadStreamResponse::new();
|
||||
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "ReadStderr", cres);
|
||||
Ok(cres)
|
||||
}
|
||||
|
||||
pub fn close_stdin(&self, req: &super::agent::CloseStdinRequest, timeout_nano: i64) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
let mut cres = super::empty::Empty::new();
|
||||
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "CloseStdin", cres);
|
||||
Ok(cres)
|
||||
}
|
||||
|
||||
pub fn tty_win_resize(&self, req: &super::agent::TtyWinResizeRequest, timeout_nano: i64) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
let mut cres = super::empty::Empty::new();
|
||||
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "TtyWinResize", cres);
|
||||
Ok(cres)
|
||||
}
|
||||
|
||||
pub fn update_interface(&self, req: &super::agent::UpdateInterfaceRequest, timeout_nano: i64) -> ::ttrpc::Result<super::types::Interface> {
|
||||
let mut cres = super::types::Interface::new();
|
||||
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "UpdateInterface", cres);
|
||||
Ok(cres)
|
||||
}
|
||||
|
||||
pub fn update_routes(&self, req: &super::agent::UpdateRoutesRequest, timeout_nano: i64) -> ::ttrpc::Result<super::agent::Routes> {
|
||||
let mut cres = super::agent::Routes::new();
|
||||
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "UpdateRoutes", cres);
|
||||
Ok(cres)
|
||||
}
|
||||
|
||||
pub fn list_interfaces(&self, req: &super::agent::ListInterfacesRequest, timeout_nano: i64) -> ::ttrpc::Result<super::agent::Interfaces> {
|
||||
let mut cres = super::agent::Interfaces::new();
|
||||
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "ListInterfaces", cres);
|
||||
Ok(cres)
|
||||
}
|
||||
|
||||
pub fn list_routes(&self, req: &super::agent::ListRoutesRequest, timeout_nano: i64) -> ::ttrpc::Result<super::agent::Routes> {
|
||||
let mut cres = super::agent::Routes::new();
|
||||
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "ListRoutes", cres);
|
||||
Ok(cres)
|
||||
}
|
||||
|
||||
pub fn add_arp_neighbors(&self, req: &super::agent::AddARPNeighborsRequest, timeout_nano: i64) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
let mut cres = super::empty::Empty::new();
|
||||
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "AddARPNeighbors", cres);
|
||||
Ok(cres)
|
||||
}
|
||||
|
||||
pub fn start_tracing(&self, req: &super::agent::StartTracingRequest, timeout_nano: i64) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
let mut cres = super::empty::Empty::new();
|
||||
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "StartTracing", cres);
|
||||
Ok(cres)
|
||||
}
|
||||
|
||||
pub fn stop_tracing(&self, req: &super::agent::StopTracingRequest, timeout_nano: i64) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
let mut cres = super::empty::Empty::new();
|
||||
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "StopTracing", cres);
|
||||
Ok(cres)
|
||||
}
|
||||
|
||||
pub fn get_metrics(&self, req: &super::agent::GetMetricsRequest, timeout_nano: i64) -> ::ttrpc::Result<super::agent::Metrics> {
|
||||
let mut cres = super::agent::Metrics::new();
|
||||
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "GetMetrics", cres);
|
||||
Ok(cres)
|
||||
}
|
||||
|
||||
pub fn create_sandbox(&self, req: &super::agent::CreateSandboxRequest, timeout_nano: i64) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
let mut cres = super::empty::Empty::new();
|
||||
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "CreateSandbox", cres);
|
||||
Ok(cres)
|
||||
}
|
||||
|
||||
pub fn destroy_sandbox(&self, req: &super::agent::DestroySandboxRequest, timeout_nano: i64) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
let mut cres = super::empty::Empty::new();
|
||||
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "DestroySandbox", cres);
|
||||
Ok(cres)
|
||||
}
|
||||
|
||||
pub fn online_cpu_mem(&self, req: &super::agent::OnlineCPUMemRequest, timeout_nano: i64) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
let mut cres = super::empty::Empty::new();
|
||||
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "OnlineCPUMem", cres);
|
||||
Ok(cres)
|
||||
}
|
||||
|
||||
pub fn reseed_random_dev(&self, req: &super::agent::ReseedRandomDevRequest, timeout_nano: i64) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
let mut cres = super::empty::Empty::new();
|
||||
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "ReseedRandomDev", cres);
|
||||
Ok(cres)
|
||||
}
|
||||
|
||||
pub fn get_guest_details(&self, req: &super::agent::GuestDetailsRequest, timeout_nano: i64) -> ::ttrpc::Result<super::agent::GuestDetailsResponse> {
|
||||
let mut cres = super::agent::GuestDetailsResponse::new();
|
||||
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "GetGuestDetails", cres);
|
||||
Ok(cres)
|
||||
}
|
||||
|
||||
pub fn mem_hotplug_by_probe(&self, req: &super::agent::MemHotplugByProbeRequest, timeout_nano: i64) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
let mut cres = super::empty::Empty::new();
|
||||
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "MemHotplugByProbe", cres);
|
||||
Ok(cres)
|
||||
}
|
||||
|
||||
pub fn set_guest_date_time(&self, req: &super::agent::SetGuestDateTimeRequest, timeout_nano: i64) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
let mut cres = super::empty::Empty::new();
|
||||
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "SetGuestDateTime", cres);
|
||||
Ok(cres)
|
||||
}
|
||||
|
||||
pub fn copy_file(&self, req: &super::agent::CopyFileRequest, timeout_nano: i64) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
let mut cres = super::empty::Empty::new();
|
||||
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "CopyFile", cres);
|
||||
Ok(cres)
|
||||
}
|
||||
|
||||
pub fn get_oom_event(&self, req: &super::agent::GetOOMEventRequest, timeout_nano: i64) -> ::ttrpc::Result<super::agent::OOMEvent> {
|
||||
let mut cres = super::agent::OOMEvent::new();
|
||||
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "GetOOMEvent", cres);
|
||||
Ok(cres)
|
||||
}
|
||||
}
|
||||
|
||||
struct CreateContainerMethod {
|
||||
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl ::ttrpc::MethodHandler for CreateContainerMethod {
|
||||
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
|
||||
::ttrpc::request_handler!(self, ctx, req, agent, CreateContainerRequest, create_container);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct StartContainerMethod {
|
||||
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl ::ttrpc::MethodHandler for StartContainerMethod {
|
||||
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
|
||||
::ttrpc::request_handler!(self, ctx, req, agent, StartContainerRequest, start_container);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct RemoveContainerMethod {
|
||||
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl ::ttrpc::MethodHandler for RemoveContainerMethod {
|
||||
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
|
||||
::ttrpc::request_handler!(self, ctx, req, agent, RemoveContainerRequest, remove_container);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct ExecProcessMethod {
|
||||
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl ::ttrpc::MethodHandler for ExecProcessMethod {
|
||||
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
|
||||
::ttrpc::request_handler!(self, ctx, req, agent, ExecProcessRequest, exec_process);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct SignalProcessMethod {
|
||||
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl ::ttrpc::MethodHandler for SignalProcessMethod {
|
||||
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
|
||||
::ttrpc::request_handler!(self, ctx, req, agent, SignalProcessRequest, signal_process);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct WaitProcessMethod {
|
||||
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl ::ttrpc::MethodHandler for WaitProcessMethod {
|
||||
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
|
||||
::ttrpc::request_handler!(self, ctx, req, agent, WaitProcessRequest, wait_process);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct ListProcessesMethod {
|
||||
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl ::ttrpc::MethodHandler for ListProcessesMethod {
|
||||
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
|
||||
::ttrpc::request_handler!(self, ctx, req, agent, ListProcessesRequest, list_processes);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct UpdateContainerMethod {
|
||||
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl ::ttrpc::MethodHandler for UpdateContainerMethod {
|
||||
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
|
||||
::ttrpc::request_handler!(self, ctx, req, agent, UpdateContainerRequest, update_container);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct StatsContainerMethod {
|
||||
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl ::ttrpc::MethodHandler for StatsContainerMethod {
|
||||
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
|
||||
::ttrpc::request_handler!(self, ctx, req, agent, StatsContainerRequest, stats_container);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct PauseContainerMethod {
|
||||
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl ::ttrpc::MethodHandler for PauseContainerMethod {
|
||||
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
|
||||
::ttrpc::request_handler!(self, ctx, req, agent, PauseContainerRequest, pause_container);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct ResumeContainerMethod {
|
||||
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl ::ttrpc::MethodHandler for ResumeContainerMethod {
|
||||
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
|
||||
::ttrpc::request_handler!(self, ctx, req, agent, ResumeContainerRequest, resume_container);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct WriteStdinMethod {
|
||||
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl ::ttrpc::MethodHandler for WriteStdinMethod {
|
||||
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
|
||||
::ttrpc::request_handler!(self, ctx, req, agent, WriteStreamRequest, write_stdin);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct ReadStdoutMethod {
|
||||
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl ::ttrpc::MethodHandler for ReadStdoutMethod {
|
||||
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
|
||||
::ttrpc::request_handler!(self, ctx, req, agent, ReadStreamRequest, read_stdout);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct ReadStderrMethod {
|
||||
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl ::ttrpc::MethodHandler for ReadStderrMethod {
|
||||
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
|
||||
::ttrpc::request_handler!(self, ctx, req, agent, ReadStreamRequest, read_stderr);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct CloseStdinMethod {
|
||||
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl ::ttrpc::MethodHandler for CloseStdinMethod {
|
||||
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
|
||||
::ttrpc::request_handler!(self, ctx, req, agent, CloseStdinRequest, close_stdin);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct TtyWinResizeMethod {
|
||||
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl ::ttrpc::MethodHandler for TtyWinResizeMethod {
|
||||
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
|
||||
::ttrpc::request_handler!(self, ctx, req, agent, TtyWinResizeRequest, tty_win_resize);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct UpdateInterfaceMethod {
|
||||
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl ::ttrpc::MethodHandler for UpdateInterfaceMethod {
|
||||
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
|
||||
::ttrpc::request_handler!(self, ctx, req, agent, UpdateInterfaceRequest, update_interface);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct UpdateRoutesMethod {
|
||||
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl ::ttrpc::MethodHandler for UpdateRoutesMethod {
|
||||
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
|
||||
::ttrpc::request_handler!(self, ctx, req, agent, UpdateRoutesRequest, update_routes);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct ListInterfacesMethod {
|
||||
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl ::ttrpc::MethodHandler for ListInterfacesMethod {
|
||||
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
|
||||
::ttrpc::request_handler!(self, ctx, req, agent, ListInterfacesRequest, list_interfaces);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct ListRoutesMethod {
|
||||
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl ::ttrpc::MethodHandler for ListRoutesMethod {
|
||||
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
|
||||
::ttrpc::request_handler!(self, ctx, req, agent, ListRoutesRequest, list_routes);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct AddArpNeighborsMethod {
|
||||
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl ::ttrpc::MethodHandler for AddArpNeighborsMethod {
|
||||
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
|
||||
::ttrpc::request_handler!(self, ctx, req, agent, AddARPNeighborsRequest, add_arp_neighbors);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct StartTracingMethod {
|
||||
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl ::ttrpc::MethodHandler for StartTracingMethod {
|
||||
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
|
||||
::ttrpc::request_handler!(self, ctx, req, agent, StartTracingRequest, start_tracing);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct StopTracingMethod {
|
||||
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl ::ttrpc::MethodHandler for StopTracingMethod {
|
||||
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
|
||||
::ttrpc::request_handler!(self, ctx, req, agent, StopTracingRequest, stop_tracing);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct GetMetricsMethod {
|
||||
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl ::ttrpc::MethodHandler for GetMetricsMethod {
|
||||
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
|
||||
::ttrpc::request_handler!(self, ctx, req, agent, GetMetricsRequest, get_metrics);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct CreateSandboxMethod {
|
||||
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl ::ttrpc::MethodHandler for CreateSandboxMethod {
|
||||
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
|
||||
::ttrpc::request_handler!(self, ctx, req, agent, CreateSandboxRequest, create_sandbox);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct DestroySandboxMethod {
|
||||
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl ::ttrpc::MethodHandler for DestroySandboxMethod {
|
||||
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
|
||||
::ttrpc::request_handler!(self, ctx, req, agent, DestroySandboxRequest, destroy_sandbox);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct OnlineCpuMemMethod {
|
||||
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl ::ttrpc::MethodHandler for OnlineCpuMemMethod {
|
||||
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
|
||||
::ttrpc::request_handler!(self, ctx, req, agent, OnlineCPUMemRequest, online_cpu_mem);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct ReseedRandomDevMethod {
|
||||
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl ::ttrpc::MethodHandler for ReseedRandomDevMethod {
|
||||
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
|
||||
::ttrpc::request_handler!(self, ctx, req, agent, ReseedRandomDevRequest, reseed_random_dev);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct GetGuestDetailsMethod {
|
||||
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl ::ttrpc::MethodHandler for GetGuestDetailsMethod {
|
||||
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
|
||||
::ttrpc::request_handler!(self, ctx, req, agent, GuestDetailsRequest, get_guest_details);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct MemHotplugByProbeMethod {
|
||||
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl ::ttrpc::MethodHandler for MemHotplugByProbeMethod {
|
||||
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
|
||||
::ttrpc::request_handler!(self, ctx, req, agent, MemHotplugByProbeRequest, mem_hotplug_by_probe);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct SetGuestDateTimeMethod {
|
||||
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl ::ttrpc::MethodHandler for SetGuestDateTimeMethod {
|
||||
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
|
||||
::ttrpc::request_handler!(self, ctx, req, agent, SetGuestDateTimeRequest, set_guest_date_time);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct CopyFileMethod {
|
||||
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl ::ttrpc::MethodHandler for CopyFileMethod {
|
||||
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
|
||||
::ttrpc::request_handler!(self, ctx, req, agent, CopyFileRequest, copy_file);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct GetOomEventMethod {
|
||||
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl ::ttrpc::MethodHandler for GetOomEventMethod {
|
||||
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
|
||||
::ttrpc::request_handler!(self, ctx, req, agent, GetOOMEventRequest, get_oom_event);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub trait AgentService {
|
||||
fn create_container(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::CreateContainerRequest) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/CreateContainer is not supported".to_string())))
|
||||
}
|
||||
fn start_container(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::StartContainerRequest) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/StartContainer is not supported".to_string())))
|
||||
}
|
||||
fn remove_container(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::RemoveContainerRequest) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/RemoveContainer is not supported".to_string())))
|
||||
}
|
||||
fn exec_process(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::ExecProcessRequest) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/ExecProcess is not supported".to_string())))
|
||||
}
|
||||
fn signal_process(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::SignalProcessRequest) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/SignalProcess is not supported".to_string())))
|
||||
}
|
||||
fn wait_process(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::WaitProcessRequest) -> ::ttrpc::Result<super::agent::WaitProcessResponse> {
|
||||
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/WaitProcess is not supported".to_string())))
|
||||
}
|
||||
fn list_processes(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::ListProcessesRequest) -> ::ttrpc::Result<super::agent::ListProcessesResponse> {
|
||||
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/ListProcesses is not supported".to_string())))
|
||||
}
|
||||
fn update_container(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::UpdateContainerRequest) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/UpdateContainer is not supported".to_string())))
|
||||
}
|
||||
fn stats_container(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::StatsContainerRequest) -> ::ttrpc::Result<super::agent::StatsContainerResponse> {
|
||||
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/StatsContainer is not supported".to_string())))
|
||||
}
|
||||
fn pause_container(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::PauseContainerRequest) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/PauseContainer is not supported".to_string())))
|
||||
}
|
||||
fn resume_container(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::ResumeContainerRequest) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/ResumeContainer is not supported".to_string())))
|
||||
}
|
||||
fn write_stdin(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::WriteStreamRequest) -> ::ttrpc::Result<super::agent::WriteStreamResponse> {
|
||||
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/WriteStdin is not supported".to_string())))
|
||||
}
|
||||
fn read_stdout(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::ReadStreamRequest) -> ::ttrpc::Result<super::agent::ReadStreamResponse> {
|
||||
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/ReadStdout is not supported".to_string())))
|
||||
}
|
||||
fn read_stderr(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::ReadStreamRequest) -> ::ttrpc::Result<super::agent::ReadStreamResponse> {
|
||||
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/ReadStderr is not supported".to_string())))
|
||||
}
|
||||
fn close_stdin(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::CloseStdinRequest) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/CloseStdin is not supported".to_string())))
|
||||
}
|
||||
fn tty_win_resize(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::TtyWinResizeRequest) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/TtyWinResize is not supported".to_string())))
|
||||
}
|
||||
fn update_interface(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::UpdateInterfaceRequest) -> ::ttrpc::Result<super::types::Interface> {
|
||||
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/UpdateInterface is not supported".to_string())))
|
||||
}
|
||||
fn update_routes(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::UpdateRoutesRequest) -> ::ttrpc::Result<super::agent::Routes> {
|
||||
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/UpdateRoutes is not supported".to_string())))
|
||||
}
|
||||
fn list_interfaces(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::ListInterfacesRequest) -> ::ttrpc::Result<super::agent::Interfaces> {
|
||||
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/ListInterfaces is not supported".to_string())))
|
||||
}
|
||||
fn list_routes(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::ListRoutesRequest) -> ::ttrpc::Result<super::agent::Routes> {
|
||||
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/ListRoutes is not supported".to_string())))
|
||||
}
|
||||
fn add_arp_neighbors(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::AddARPNeighborsRequest) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/AddARPNeighbors is not supported".to_string())))
|
||||
}
|
||||
fn start_tracing(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::StartTracingRequest) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/StartTracing is not supported".to_string())))
|
||||
}
|
||||
fn stop_tracing(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::StopTracingRequest) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/StopTracing is not supported".to_string())))
|
||||
}
|
||||
fn get_metrics(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::GetMetricsRequest) -> ::ttrpc::Result<super::agent::Metrics> {
|
||||
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/GetMetrics is not supported".to_string())))
|
||||
}
|
||||
fn create_sandbox(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::CreateSandboxRequest) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/CreateSandbox is not supported".to_string())))
|
||||
}
|
||||
fn destroy_sandbox(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::DestroySandboxRequest) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/DestroySandbox is not supported".to_string())))
|
||||
}
|
||||
fn online_cpu_mem(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::OnlineCPUMemRequest) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/OnlineCPUMem is not supported".to_string())))
|
||||
}
|
||||
fn reseed_random_dev(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::ReseedRandomDevRequest) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/ReseedRandomDev is not supported".to_string())))
|
||||
}
|
||||
fn get_guest_details(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::GuestDetailsRequest) -> ::ttrpc::Result<super::agent::GuestDetailsResponse> {
|
||||
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/GetGuestDetails is not supported".to_string())))
|
||||
}
|
||||
fn mem_hotplug_by_probe(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::MemHotplugByProbeRequest) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/MemHotplugByProbe is not supported".to_string())))
|
||||
}
|
||||
fn set_guest_date_time(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::SetGuestDateTimeRequest) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/SetGuestDateTime is not supported".to_string())))
|
||||
}
|
||||
fn copy_file(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::CopyFileRequest) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/CopyFile is not supported".to_string())))
|
||||
}
|
||||
fn get_oom_event(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::GetOOMEventRequest) -> ::ttrpc::Result<super::agent::OOMEvent> {
|
||||
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/GetOOMEvent is not supported".to_string())))
|
||||
}
|
||||
}
|
||||
|
||||
pub fn create_agent_service(service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>) -> HashMap <String, Box<dyn ::ttrpc::MethodHandler + Send + Sync>> {
|
||||
let mut methods = HashMap::new();
|
||||
|
||||
methods.insert("/grpc.AgentService/CreateContainer".to_string(),
|
||||
std::boxed::Box::new(CreateContainerMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
|
||||
|
||||
methods.insert("/grpc.AgentService/StartContainer".to_string(),
|
||||
std::boxed::Box::new(StartContainerMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
|
||||
|
||||
methods.insert("/grpc.AgentService/RemoveContainer".to_string(),
|
||||
std::boxed::Box::new(RemoveContainerMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
|
||||
|
||||
methods.insert("/grpc.AgentService/ExecProcess".to_string(),
|
||||
std::boxed::Box::new(ExecProcessMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
|
||||
|
||||
methods.insert("/grpc.AgentService/SignalProcess".to_string(),
|
||||
std::boxed::Box::new(SignalProcessMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
|
||||
|
||||
methods.insert("/grpc.AgentService/WaitProcess".to_string(),
|
||||
std::boxed::Box::new(WaitProcessMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
|
||||
|
||||
methods.insert("/grpc.AgentService/ListProcesses".to_string(),
|
||||
std::boxed::Box::new(ListProcessesMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
|
||||
|
||||
methods.insert("/grpc.AgentService/UpdateContainer".to_string(),
|
||||
std::boxed::Box::new(UpdateContainerMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
|
||||
|
||||
methods.insert("/grpc.AgentService/StatsContainer".to_string(),
|
||||
std::boxed::Box::new(StatsContainerMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
|
||||
|
||||
methods.insert("/grpc.AgentService/PauseContainer".to_string(),
|
||||
std::boxed::Box::new(PauseContainerMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
|
||||
|
||||
methods.insert("/grpc.AgentService/ResumeContainer".to_string(),
|
||||
std::boxed::Box::new(ResumeContainerMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
|
||||
|
||||
methods.insert("/grpc.AgentService/WriteStdin".to_string(),
|
||||
std::boxed::Box::new(WriteStdinMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
|
||||
|
||||
methods.insert("/grpc.AgentService/ReadStdout".to_string(),
|
||||
std::boxed::Box::new(ReadStdoutMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
|
||||
|
||||
methods.insert("/grpc.AgentService/ReadStderr".to_string(),
|
||||
std::boxed::Box::new(ReadStderrMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
|
||||
|
||||
methods.insert("/grpc.AgentService/CloseStdin".to_string(),
|
||||
std::boxed::Box::new(CloseStdinMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
|
||||
|
||||
methods.insert("/grpc.AgentService/TtyWinResize".to_string(),
|
||||
std::boxed::Box::new(TtyWinResizeMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
|
||||
|
||||
methods.insert("/grpc.AgentService/UpdateInterface".to_string(),
|
||||
std::boxed::Box::new(UpdateInterfaceMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
|
||||
|
||||
methods.insert("/grpc.AgentService/UpdateRoutes".to_string(),
|
||||
std::boxed::Box::new(UpdateRoutesMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
|
||||
|
||||
methods.insert("/grpc.AgentService/ListInterfaces".to_string(),
|
||||
std::boxed::Box::new(ListInterfacesMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
|
||||
|
||||
methods.insert("/grpc.AgentService/ListRoutes".to_string(),
|
||||
std::boxed::Box::new(ListRoutesMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
|
||||
|
||||
methods.insert("/grpc.AgentService/AddARPNeighbors".to_string(),
|
||||
std::boxed::Box::new(AddArpNeighborsMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
|
||||
|
||||
methods.insert("/grpc.AgentService/StartTracing".to_string(),
|
||||
std::boxed::Box::new(StartTracingMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
|
||||
|
||||
methods.insert("/grpc.AgentService/StopTracing".to_string(),
|
||||
std::boxed::Box::new(StopTracingMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
|
||||
|
||||
methods.insert("/grpc.AgentService/GetMetrics".to_string(),
|
||||
std::boxed::Box::new(GetMetricsMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
|
||||
|
||||
methods.insert("/grpc.AgentService/CreateSandbox".to_string(),
|
||||
std::boxed::Box::new(CreateSandboxMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
|
||||
|
||||
methods.insert("/grpc.AgentService/DestroySandbox".to_string(),
|
||||
std::boxed::Box::new(DestroySandboxMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
|
||||
|
||||
methods.insert("/grpc.AgentService/OnlineCPUMem".to_string(),
|
||||
std::boxed::Box::new(OnlineCpuMemMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
|
||||
|
||||
methods.insert("/grpc.AgentService/ReseedRandomDev".to_string(),
|
||||
std::boxed::Box::new(ReseedRandomDevMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
|
||||
|
||||
methods.insert("/grpc.AgentService/GetGuestDetails".to_string(),
|
||||
std::boxed::Box::new(GetGuestDetailsMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
|
||||
|
||||
methods.insert("/grpc.AgentService/MemHotplugByProbe".to_string(),
|
||||
std::boxed::Box::new(MemHotplugByProbeMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
|
||||
|
||||
methods.insert("/grpc.AgentService/SetGuestDateTime".to_string(),
|
||||
std::boxed::Box::new(SetGuestDateTimeMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
|
||||
|
||||
methods.insert("/grpc.AgentService/CopyFile".to_string(),
|
||||
std::boxed::Box::new(CopyFileMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
|
||||
|
||||
methods.insert("/grpc.AgentService/GetOOMEvent".to_string(),
|
||||
std::boxed::Box::new(GetOomEventMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
|
||||
|
||||
methods
|
||||
}
|
||||
242
src/agent/protocols/src/empty.rs
Normal file
242
src/agent/protocols/src/empty.rs
Normal file
@@ -0,0 +1,242 @@
|
||||
// Copyright (c) 2019 Ant Financial
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
// This file is generated by rust-protobuf 2.14.0. Do not edit
|
||||
// @generated
|
||||
|
||||
// https://github.com/rust-lang/rust-clippy/issues/702
|
||||
#![allow(unknown_lints)]
|
||||
#![allow(clippy::all)]
|
||||
|
||||
#![cfg_attr(rustfmt, rustfmt_skip)]
|
||||
|
||||
#![allow(box_pointers)]
|
||||
#![allow(dead_code)]
|
||||
#![allow(missing_docs)]
|
||||
#![allow(non_camel_case_types)]
|
||||
#![allow(non_snake_case)]
|
||||
#![allow(non_upper_case_globals)]
|
||||
#![allow(trivial_casts)]
|
||||
#![allow(unsafe_code)]
|
||||
#![allow(unused_imports)]
|
||||
#![allow(unused_results)]
|
||||
//! Generated file from `google/protobuf/empty.proto`
|
||||
|
||||
use protobuf::Message as Message_imported_for_functions;
|
||||
use protobuf::ProtobufEnum as ProtobufEnum_imported_for_functions;
|
||||
|
||||
/// Generated files are compatible only with the same version
|
||||
/// of protobuf runtime.
|
||||
// const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_14_0;
|
||||
|
||||
#[derive(PartialEq,Clone,Default)]
|
||||
pub struct Empty {
|
||||
// special fields
|
||||
pub unknown_fields: ::protobuf::UnknownFields,
|
||||
pub cached_size: ::protobuf::CachedSize,
|
||||
}
|
||||
|
||||
impl<'a> ::std::default::Default for &'a Empty {
|
||||
fn default() -> &'a Empty {
|
||||
<Empty as ::protobuf::Message>::default_instance()
|
||||
}
|
||||
}
|
||||
|
||||
impl Empty {
|
||||
pub fn new() -> Empty {
|
||||
::std::default::Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
impl ::protobuf::Message for Empty {
|
||||
fn is_initialized(&self) -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
|
||||
while !is.eof()? {
|
||||
let (field_number, wire_type) = is.read_tag_unpack()?;
|
||||
match field_number {
|
||||
_ => {
|
||||
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
|
||||
},
|
||||
};
|
||||
}
|
||||
::std::result::Result::Ok(())
|
||||
}
|
||||
|
||||
// Compute sizes of nested messages
|
||||
#[allow(unused_variables)]
|
||||
fn compute_size(&self) -> u32 {
|
||||
let mut my_size = 0;
|
||||
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
|
||||
self.cached_size.set(my_size);
|
||||
my_size
|
||||
}
|
||||
|
||||
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
|
||||
os.write_unknown_fields(self.get_unknown_fields())?;
|
||||
::std::result::Result::Ok(())
|
||||
}
|
||||
|
||||
fn get_cached_size(&self) -> u32 {
|
||||
self.cached_size.get()
|
||||
}
|
||||
|
||||
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
|
||||
&self.unknown_fields
|
||||
}
|
||||
|
||||
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
|
||||
&mut self.unknown_fields
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn (::std::any::Any) {
|
||||
self as &dyn (::std::any::Any)
|
||||
}
|
||||
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
|
||||
self as &mut dyn (::std::any::Any)
|
||||
}
|
||||
fn into_any(self: Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
|
||||
self
|
||||
}
|
||||
|
||||
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
|
||||
Self::descriptor_static()
|
||||
}
|
||||
|
||||
fn new() -> Empty {
|
||||
Empty::new()
|
||||
}
|
||||
|
||||
fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
|
||||
static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy::INIT;
|
||||
unsafe {
|
||||
descriptor.get(|| {
|
||||
let fields = ::std::vec::Vec::new();
|
||||
::protobuf::reflect::MessageDescriptor::new_pb_name::<Empty>(
|
||||
"Empty",
|
||||
fields,
|
||||
file_descriptor_proto()
|
||||
)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn default_instance() -> &'static Empty {
|
||||
static mut instance: ::protobuf::lazy::Lazy<Empty> = ::protobuf::lazy::Lazy::INIT;
|
||||
unsafe {
|
||||
instance.get(Empty::new)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ::protobuf::Clear for Empty {
|
||||
fn clear(&mut self) {
|
||||
self.unknown_fields.clear();
|
||||
}
|
||||
}
|
||||
|
||||
impl ::std::fmt::Debug for Empty {
|
||||
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
|
||||
::protobuf::text_format::fmt(self, f)
|
||||
}
|
||||
}
|
||||
|
||||
impl ::protobuf::reflect::ProtobufValue for Empty {
|
||||
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
|
||||
::protobuf::reflect::ReflectValueRef::Message(self)
|
||||
}
|
||||
}
|
||||
|
||||
static file_descriptor_proto_data: &'static [u8] = b"\
|
||||
\n\x1bgoogle/protobuf/empty.proto\x12\x0fgoogle.protobuf\"\x07\n\x05Empt\
|
||||
yBT\n\x13com.google.protobufB\nEmptyProtoP\x01Z\x05types\xf8\x01\x01\xa2\
|
||||
\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesJ\xa9\x14\n\x06\x12\
|
||||
\x04\x1e\03\x10\n\xcc\x0c\n\x01\x0c\x12\x03\x1e\0\x122\xc1\x0c\x20Protoc\
|
||||
ol\x20Buffers\x20-\x20Google's\x20data\x20interchange\x20format\n\x20Cop\
|
||||
yright\x202008\x20Google\x20Inc.\x20\x20All\x20rights\x20reserved.\n\x20\
|
||||
https://developers.google.com/protocol-buffers/\n\n\x20Redistribution\
|
||||
\x20and\x20use\x20in\x20source\x20and\x20binary\x20forms,\x20with\x20or\
|
||||
\x20without\n\x20modification,\x20are\x20permitted\x20provided\x20that\
|
||||
\x20the\x20following\x20conditions\x20are\n\x20met:\n\n\x20\x20\x20\x20\
|
||||
\x20*\x20Redistributions\x20of\x20source\x20code\x20must\x20retain\x20th\
|
||||
e\x20above\x20copyright\n\x20notice,\x20this\x20list\x20of\x20conditions\
|
||||
\x20and\x20the\x20following\x20disclaimer.\n\x20\x20\x20\x20\x20*\x20Red\
|
||||
istributions\x20in\x20binary\x20form\x20must\x20reproduce\x20the\x20abov\
|
||||
e\n\x20copyright\x20notice,\x20this\x20list\x20of\x20conditions\x20and\
|
||||
\x20the\x20following\x20disclaimer\n\x20in\x20the\x20documentation\x20an\
|
||||
d/or\x20other\x20materials\x20provided\x20with\x20the\n\x20distribution.\
|
||||
\n\x20\x20\x20\x20\x20*\x20Neither\x20the\x20name\x20of\x20Google\x20Inc\
|
||||
.\x20nor\x20the\x20names\x20of\x20its\n\x20contributors\x20may\x20be\x20\
|
||||
used\x20to\x20endorse\x20or\x20promote\x20products\x20derived\x20from\n\
|
||||
\x20this\x20software\x20without\x20specific\x20prior\x20written\x20permi\
|
||||
ssion.\n\n\x20THIS\x20SOFTWARE\x20IS\x20PROVIDED\x20BY\x20THE\x20COPYRIG\
|
||||
HT\x20HOLDERS\x20AND\x20CONTRIBUTORS\n\x20\"AS\x20IS\"\x20AND\x20ANY\x20\
|
||||
EXPRESS\x20OR\x20IMPLIED\x20WARRANTIES,\x20INCLUDING,\x20BUT\x20NOT\n\
|
||||
\x20LIMITED\x20TO,\x20THE\x20IMPLIED\x20WARRANTIES\x20OF\x20MERCHANTABIL\
|
||||
ITY\x20AND\x20FITNESS\x20FOR\n\x20A\x20PARTICULAR\x20PURPOSE\x20ARE\x20D\
|
||||
ISCLAIMED.\x20IN\x20NO\x20EVENT\x20SHALL\x20THE\x20COPYRIGHT\n\x20OWNER\
|
||||
\x20OR\x20CONTRIBUTORS\x20BE\x20LIABLE\x20FOR\x20ANY\x20DIRECT,\x20INDIR\
|
||||
ECT,\x20INCIDENTAL,\n\x20SPECIAL,\x20EXEMPLARY,\x20OR\x20CONSEQUENTIAL\
|
||||
\x20DAMAGES\x20(INCLUDING,\x20BUT\x20NOT\n\x20LIMITED\x20TO,\x20PROCUREM\
|
||||
ENT\x20OF\x20SUBSTITUTE\x20GOODS\x20OR\x20SERVICES;\x20LOSS\x20OF\x20USE\
|
||||
,\n\x20DATA,\x20OR\x20PROFITS;\x20OR\x20BUSINESS\x20INTERRUPTION)\x20HOW\
|
||||
EVER\x20CAUSED\x20AND\x20ON\x20ANY\n\x20THEORY\x20OF\x20LIABILITY,\x20WH\
|
||||
ETHER\x20IN\x20CONTRACT,\x20STRICT\x20LIABILITY,\x20OR\x20TORT\n\x20(INC\
|
||||
LUDING\x20NEGLIGENCE\x20OR\x20OTHERWISE)\x20ARISING\x20IN\x20ANY\x20WAY\
|
||||
\x20OUT\x20OF\x20THE\x20USE\n\x20OF\x20THIS\x20SOFTWARE,\x20EVEN\x20IF\
|
||||
\x20ADVISED\x20OF\x20THE\x20POSSIBILITY\x20OF\x20SUCH\x20DAMAGE.\n\n\x08\
|
||||
\n\x01\x02\x12\x03\x20\x08\x17\n\x08\n\x01\x08\x12\x03\"\0;\n\x0b\n\x04\
|
||||
\x08\xe7\x07\0\x12\x03\"\0;\n\x0c\n\x05\x08\xe7\x07\0\x02\x12\x03\"\x07\
|
||||
\x17\n\r\n\x06\x08\xe7\x07\0\x02\0\x12\x03\"\x07\x17\n\x0e\n\x07\x08\xe7\
|
||||
\x07\0\x02\0\x01\x12\x03\"\x07\x17\n\x0c\n\x05\x08\xe7\x07\0\x07\x12\x03\
|
||||
\"\x1a:\n\x08\n\x01\x08\x12\x03#\0\x1c\n\x0b\n\x04\x08\xe7\x07\x01\x12\
|
||||
\x03#\0\x1c\n\x0c\n\x05\x08\xe7\x07\x01\x02\x12\x03#\x07\x11\n\r\n\x06\
|
||||
\x08\xe7\x07\x01\x02\0\x12\x03#\x07\x11\n\x0e\n\x07\x08\xe7\x07\x01\x02\
|
||||
\0\x01\x12\x03#\x07\x11\n\x0c\n\x05\x08\xe7\x07\x01\x07\x12\x03#\x14\x1b\
|
||||
\n\x08\n\x01\x08\x12\x03$\0,\n\x0b\n\x04\x08\xe7\x07\x02\x12\x03$\0,\n\
|
||||
\x0c\n\x05\x08\xe7\x07\x02\x02\x12\x03$\x07\x13\n\r\n\x06\x08\xe7\x07\
|
||||
\x02\x02\0\x12\x03$\x07\x13\n\x0e\n\x07\x08\xe7\x07\x02\x02\0\x01\x12\
|
||||
\x03$\x07\x13\n\x0c\n\x05\x08\xe7\x07\x02\x07\x12\x03$\x16+\n\x08\n\x01\
|
||||
\x08\x12\x03%\0+\n\x0b\n\x04\x08\xe7\x07\x03\x12\x03%\0+\n\x0c\n\x05\x08\
|
||||
\xe7\x07\x03\x02\x12\x03%\x07\x1b\n\r\n\x06\x08\xe7\x07\x03\x02\0\x12\
|
||||
\x03%\x07\x1b\n\x0e\n\x07\x08\xe7\x07\x03\x02\0\x01\x12\x03%\x07\x1b\n\
|
||||
\x0c\n\x05\x08\xe7\x07\x03\x07\x12\x03%\x1e*\n\x08\n\x01\x08\x12\x03&\0\
|
||||
\"\n\x0b\n\x04\x08\xe7\x07\x04\x12\x03&\0\"\n\x0c\n\x05\x08\xe7\x07\x04\
|
||||
\x02\x12\x03&\x07\x1a\n\r\n\x06\x08\xe7\x07\x04\x02\0\x12\x03&\x07\x1a\n\
|
||||
\x0e\n\x07\x08\xe7\x07\x04\x02\0\x01\x12\x03&\x07\x1a\n\x0c\n\x05\x08\
|
||||
\xe7\x07\x04\x03\x12\x03&\x1d!\n\x08\n\x01\x08\x12\x03'\0!\n\x0b\n\x04\
|
||||
\x08\xe7\x07\x05\x12\x03'\0!\n\x0c\n\x05\x08\xe7\x07\x05\x02\x12\x03'\
|
||||
\x07\x18\n\r\n\x06\x08\xe7\x07\x05\x02\0\x12\x03'\x07\x18\n\x0e\n\x07\
|
||||
\x08\xe7\x07\x05\x02\0\x01\x12\x03'\x07\x18\n\x0c\n\x05\x08\xe7\x07\x05\
|
||||
\x07\x12\x03'\x1b\x20\n\x08\n\x01\x08\x12\x03(\0\x1f\n\x0b\n\x04\x08\xe7\
|
||||
\x07\x06\x12\x03(\0\x1f\n\x0c\n\x05\x08\xe7\x07\x06\x02\x12\x03(\x07\x17\
|
||||
\n\r\n\x06\x08\xe7\x07\x06\x02\0\x12\x03(\x07\x17\n\x0e\n\x07\x08\xe7\
|
||||
\x07\x06\x02\0\x01\x12\x03(\x07\x17\n\x0c\n\x05\x08\xe7\x07\x06\x03\x12\
|
||||
\x03(\x1a\x1e\n\xfb\x02\n\x02\x04\0\x12\x033\0\x10\x1a\xef\x02\x20A\x20g\
|
||||
eneric\x20empty\x20message\x20that\x20you\x20can\x20re-use\x20to\x20avoi\
|
||||
d\x20defining\x20duplicated\n\x20empty\x20messages\x20in\x20your\x20APIs\
|
||||
.\x20A\x20typical\x20example\x20is\x20to\x20use\x20it\x20as\x20the\x20re\
|
||||
quest\n\x20or\x20the\x20response\x20type\x20of\x20an\x20API\x20method.\
|
||||
\x20For\x20instance:\n\n\x20\x20\x20\x20\x20service\x20Foo\x20{\n\x20\
|
||||
\x20\x20\x20\x20\x20\x20rpc\x20Bar(google.protobuf.Empty)\x20returns\x20\
|
||||
(google.protobuf.Empty);\n\x20\x20\x20\x20\x20}\n\n\x20The\x20JSON\x20re\
|
||||
presentation\x20for\x20`Empty`\x20is\x20empty\x20JSON\x20object\x20`{}`.\
|
||||
\n\n\n\n\x03\x04\0\x01\x12\x033\x08\rb\x06proto3\
|
||||
";
|
||||
|
||||
static mut file_descriptor_proto_lazy: ::protobuf::lazy::Lazy<::protobuf::descriptor::FileDescriptorProto> = ::protobuf::lazy::Lazy::INIT;
|
||||
|
||||
fn parse_descriptor_proto() -> ::protobuf::descriptor::FileDescriptorProto {
|
||||
::protobuf::parse_from_bytes(file_descriptor_proto_data).unwrap()
|
||||
}
|
||||
|
||||
pub fn file_descriptor_proto() -> &'static ::protobuf::descriptor::FileDescriptorProto {
|
||||
unsafe {
|
||||
file_descriptor_proto_lazy.get(|| {
|
||||
parse_descriptor_proto()
|
||||
})
|
||||
}
|
||||
}
|
||||
672
src/agent/protocols/src/health.rs
Normal file
672
src/agent/protocols/src/health.rs
Normal file
@@ -0,0 +1,672 @@
|
||||
// This file is generated by rust-protobuf 2.14.0. Do not edit
|
||||
// @generated
|
||||
|
||||
// https://github.com/rust-lang/rust-clippy/issues/702
|
||||
#![allow(unknown_lints)]
|
||||
#![allow(clippy::all)]
|
||||
|
||||
#![cfg_attr(rustfmt, rustfmt_skip)]
|
||||
|
||||
#![allow(box_pointers)]
|
||||
#![allow(dead_code)]
|
||||
#![allow(missing_docs)]
|
||||
#![allow(non_camel_case_types)]
|
||||
#![allow(non_snake_case)]
|
||||
#![allow(non_upper_case_globals)]
|
||||
#![allow(trivial_casts)]
|
||||
#![allow(unsafe_code)]
|
||||
#![allow(unused_imports)]
|
||||
#![allow(unused_results)]
|
||||
//! Generated file from `github.com/kata-containers/kata-containers/src/agent/protocols/protos/health.proto`
|
||||
|
||||
use protobuf::Message as Message_imported_for_functions;
|
||||
use protobuf::ProtobufEnum as ProtobufEnum_imported_for_functions;
|
||||
|
||||
/// Generated files are compatible only with the same version
|
||||
/// of protobuf runtime.
|
||||
// const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_14_0;
|
||||
|
||||
#[derive(PartialEq,Clone,Default)]
|
||||
pub struct CheckRequest {
|
||||
// message fields
|
||||
pub service: ::std::string::String,
|
||||
// special fields
|
||||
pub unknown_fields: ::protobuf::UnknownFields,
|
||||
pub cached_size: ::protobuf::CachedSize,
|
||||
}
|
||||
|
||||
impl<'a> ::std::default::Default for &'a CheckRequest {
|
||||
fn default() -> &'a CheckRequest {
|
||||
<CheckRequest as ::protobuf::Message>::default_instance()
|
||||
}
|
||||
}
|
||||
|
||||
impl CheckRequest {
|
||||
pub fn new() -> CheckRequest {
|
||||
::std::default::Default::default()
|
||||
}
|
||||
|
||||
// string service = 1;
|
||||
|
||||
|
||||
pub fn get_service(&self) -> &str {
|
||||
&self.service
|
||||
}
|
||||
pub fn clear_service(&mut self) {
|
||||
self.service.clear();
|
||||
}
|
||||
|
||||
// Param is passed by value, moved
|
||||
pub fn set_service(&mut self, v: ::std::string::String) {
|
||||
self.service = v;
|
||||
}
|
||||
|
||||
// Mutable pointer to the field.
|
||||
// If field is not initialized, it is initialized with default value first.
|
||||
pub fn mut_service(&mut self) -> &mut ::std::string::String {
|
||||
&mut self.service
|
||||
}
|
||||
|
||||
// Take field
|
||||
pub fn take_service(&mut self) -> ::std::string::String {
|
||||
::std::mem::replace(&mut self.service, ::std::string::String::new())
|
||||
}
|
||||
}
|
||||
|
||||
impl ::protobuf::Message for CheckRequest {
|
||||
fn is_initialized(&self) -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
|
||||
while !is.eof()? {
|
||||
let (field_number, wire_type) = is.read_tag_unpack()?;
|
||||
match field_number {
|
||||
1 => {
|
||||
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.service)?;
|
||||
},
|
||||
_ => {
|
||||
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
|
||||
},
|
||||
};
|
||||
}
|
||||
::std::result::Result::Ok(())
|
||||
}
|
||||
|
||||
// Compute sizes of nested messages
|
||||
#[allow(unused_variables)]
|
||||
fn compute_size(&self) -> u32 {
|
||||
let mut my_size = 0;
|
||||
if !self.service.is_empty() {
|
||||
my_size += ::protobuf::rt::string_size(1, &self.service);
|
||||
}
|
||||
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
|
||||
self.cached_size.set(my_size);
|
||||
my_size
|
||||
}
|
||||
|
||||
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
|
||||
if !self.service.is_empty() {
|
||||
os.write_string(1, &self.service)?;
|
||||
}
|
||||
os.write_unknown_fields(self.get_unknown_fields())?;
|
||||
::std::result::Result::Ok(())
|
||||
}
|
||||
|
||||
fn get_cached_size(&self) -> u32 {
|
||||
self.cached_size.get()
|
||||
}
|
||||
|
||||
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
|
||||
&self.unknown_fields
|
||||
}
|
||||
|
||||
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
|
||||
&mut self.unknown_fields
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn (::std::any::Any) {
|
||||
self as &dyn (::std::any::Any)
|
||||
}
|
||||
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
|
||||
self as &mut dyn (::std::any::Any)
|
||||
}
|
||||
fn into_any(self: Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
|
||||
self
|
||||
}
|
||||
|
||||
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
|
||||
Self::descriptor_static()
|
||||
}
|
||||
|
||||
fn new() -> CheckRequest {
|
||||
CheckRequest::new()
|
||||
}
|
||||
|
||||
fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
|
||||
static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy::INIT;
|
||||
unsafe {
|
||||
descriptor.get(|| {
|
||||
let mut fields = ::std::vec::Vec::new();
|
||||
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeString>(
|
||||
"service",
|
||||
|m: &CheckRequest| { &m.service },
|
||||
|m: &mut CheckRequest| { &mut m.service },
|
||||
));
|
||||
::protobuf::reflect::MessageDescriptor::new_pb_name::<CheckRequest>(
|
||||
"CheckRequest",
|
||||
fields,
|
||||
file_descriptor_proto()
|
||||
)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn default_instance() -> &'static CheckRequest {
|
||||
static mut instance: ::protobuf::lazy::Lazy<CheckRequest> = ::protobuf::lazy::Lazy::INIT;
|
||||
unsafe {
|
||||
instance.get(CheckRequest::new)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ::protobuf::Clear for CheckRequest {
|
||||
fn clear(&mut self) {
|
||||
self.service.clear();
|
||||
self.unknown_fields.clear();
|
||||
}
|
||||
}
|
||||
|
||||
impl ::std::fmt::Debug for CheckRequest {
|
||||
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
|
||||
::protobuf::text_format::fmt(self, f)
|
||||
}
|
||||
}
|
||||
|
||||
impl ::protobuf::reflect::ProtobufValue for CheckRequest {
|
||||
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
|
||||
::protobuf::reflect::ReflectValueRef::Message(self)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(PartialEq,Clone,Default)]
|
||||
pub struct HealthCheckResponse {
|
||||
// message fields
|
||||
pub status: HealthCheckResponse_ServingStatus,
|
||||
// special fields
|
||||
pub unknown_fields: ::protobuf::UnknownFields,
|
||||
pub cached_size: ::protobuf::CachedSize,
|
||||
}
|
||||
|
||||
impl<'a> ::std::default::Default for &'a HealthCheckResponse {
|
||||
fn default() -> &'a HealthCheckResponse {
|
||||
<HealthCheckResponse as ::protobuf::Message>::default_instance()
|
||||
}
|
||||
}
|
||||
|
||||
impl HealthCheckResponse {
|
||||
pub fn new() -> HealthCheckResponse {
|
||||
::std::default::Default::default()
|
||||
}
|
||||
|
||||
// .grpc.HealthCheckResponse.ServingStatus status = 1;
|
||||
|
||||
|
||||
pub fn get_status(&self) -> HealthCheckResponse_ServingStatus {
|
||||
self.status
|
||||
}
|
||||
pub fn clear_status(&mut self) {
|
||||
self.status = HealthCheckResponse_ServingStatus::UNKNOWN;
|
||||
}
|
||||
|
||||
// Param is passed by value, moved
|
||||
pub fn set_status(&mut self, v: HealthCheckResponse_ServingStatus) {
|
||||
self.status = v;
|
||||
}
|
||||
}
|
||||
|
||||
impl ::protobuf::Message for HealthCheckResponse {
|
||||
fn is_initialized(&self) -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
|
||||
while !is.eof()? {
|
||||
let (field_number, wire_type) = is.read_tag_unpack()?;
|
||||
match field_number {
|
||||
1 => {
|
||||
::protobuf::rt::read_proto3_enum_with_unknown_fields_into(wire_type, is, &mut self.status, 1, &mut self.unknown_fields)?
|
||||
},
|
||||
_ => {
|
||||
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
|
||||
},
|
||||
};
|
||||
}
|
||||
::std::result::Result::Ok(())
|
||||
}
|
||||
|
||||
// Compute sizes of nested messages
|
||||
#[allow(unused_variables)]
|
||||
fn compute_size(&self) -> u32 {
|
||||
let mut my_size = 0;
|
||||
if self.status != HealthCheckResponse_ServingStatus::UNKNOWN {
|
||||
my_size += ::protobuf::rt::enum_size(1, self.status);
|
||||
}
|
||||
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
|
||||
self.cached_size.set(my_size);
|
||||
my_size
|
||||
}
|
||||
|
||||
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
|
||||
if self.status != HealthCheckResponse_ServingStatus::UNKNOWN {
|
||||
os.write_enum(1, self.status.value())?;
|
||||
}
|
||||
os.write_unknown_fields(self.get_unknown_fields())?;
|
||||
::std::result::Result::Ok(())
|
||||
}
|
||||
|
||||
fn get_cached_size(&self) -> u32 {
|
||||
self.cached_size.get()
|
||||
}
|
||||
|
||||
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
|
||||
&self.unknown_fields
|
||||
}
|
||||
|
||||
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
|
||||
&mut self.unknown_fields
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn (::std::any::Any) {
|
||||
self as &dyn (::std::any::Any)
|
||||
}
|
||||
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
|
||||
self as &mut dyn (::std::any::Any)
|
||||
}
|
||||
fn into_any(self: Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
|
||||
self
|
||||
}
|
||||
|
||||
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
|
||||
Self::descriptor_static()
|
||||
}
|
||||
|
||||
fn new() -> HealthCheckResponse {
|
||||
HealthCheckResponse::new()
|
||||
}
|
||||
|
||||
fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
|
||||
static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy::INIT;
|
||||
unsafe {
|
||||
descriptor.get(|| {
|
||||
let mut fields = ::std::vec::Vec::new();
|
||||
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeEnum<HealthCheckResponse_ServingStatus>>(
|
||||
"status",
|
||||
|m: &HealthCheckResponse| { &m.status },
|
||||
|m: &mut HealthCheckResponse| { &mut m.status },
|
||||
));
|
||||
::protobuf::reflect::MessageDescriptor::new_pb_name::<HealthCheckResponse>(
|
||||
"HealthCheckResponse",
|
||||
fields,
|
||||
file_descriptor_proto()
|
||||
)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn default_instance() -> &'static HealthCheckResponse {
|
||||
static mut instance: ::protobuf::lazy::Lazy<HealthCheckResponse> = ::protobuf::lazy::Lazy::INIT;
|
||||
unsafe {
|
||||
instance.get(HealthCheckResponse::new)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ::protobuf::Clear for HealthCheckResponse {
|
||||
fn clear(&mut self) {
|
||||
self.status = HealthCheckResponse_ServingStatus::UNKNOWN;
|
||||
self.unknown_fields.clear();
|
||||
}
|
||||
}
|
||||
|
||||
impl ::std::fmt::Debug for HealthCheckResponse {
|
||||
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
|
||||
::protobuf::text_format::fmt(self, f)
|
||||
}
|
||||
}
|
||||
|
||||
impl ::protobuf::reflect::ProtobufValue for HealthCheckResponse {
|
||||
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
|
||||
::protobuf::reflect::ReflectValueRef::Message(self)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone,PartialEq,Eq,Debug,Hash)]
|
||||
pub enum HealthCheckResponse_ServingStatus {
|
||||
UNKNOWN = 0,
|
||||
SERVING = 1,
|
||||
NOT_SERVING = 2,
|
||||
}
|
||||
|
||||
impl ::protobuf::ProtobufEnum for HealthCheckResponse_ServingStatus {
|
||||
fn value(&self) -> i32 {
|
||||
*self as i32
|
||||
}
|
||||
|
||||
fn from_i32(value: i32) -> ::std::option::Option<HealthCheckResponse_ServingStatus> {
|
||||
match value {
|
||||
0 => ::std::option::Option::Some(HealthCheckResponse_ServingStatus::UNKNOWN),
|
||||
1 => ::std::option::Option::Some(HealthCheckResponse_ServingStatus::SERVING),
|
||||
2 => ::std::option::Option::Some(HealthCheckResponse_ServingStatus::NOT_SERVING),
|
||||
_ => ::std::option::Option::None
|
||||
}
|
||||
}
|
||||
|
||||
fn values() -> &'static [Self] {
|
||||
static values: &'static [HealthCheckResponse_ServingStatus] = &[
|
||||
HealthCheckResponse_ServingStatus::UNKNOWN,
|
||||
HealthCheckResponse_ServingStatus::SERVING,
|
||||
HealthCheckResponse_ServingStatus::NOT_SERVING,
|
||||
];
|
||||
values
|
||||
}
|
||||
|
||||
fn enum_descriptor_static() -> &'static ::protobuf::reflect::EnumDescriptor {
|
||||
static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::EnumDescriptor> = ::protobuf::lazy::Lazy::INIT;
|
||||
unsafe {
|
||||
descriptor.get(|| {
|
||||
::protobuf::reflect::EnumDescriptor::new_pb_name::<HealthCheckResponse_ServingStatus>("HealthCheckResponse.ServingStatus", file_descriptor_proto())
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ::std::marker::Copy for HealthCheckResponse_ServingStatus {
|
||||
}
|
||||
|
||||
impl ::std::default::Default for HealthCheckResponse_ServingStatus {
|
||||
fn default() -> Self {
|
||||
HealthCheckResponse_ServingStatus::UNKNOWN
|
||||
}
|
||||
}
|
||||
|
||||
impl ::protobuf::reflect::ProtobufValue for HealthCheckResponse_ServingStatus {
|
||||
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
|
||||
::protobuf::reflect::ReflectValueRef::Enum(self.descriptor())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(PartialEq,Clone,Default)]
|
||||
pub struct VersionCheckResponse {
|
||||
// message fields
|
||||
pub grpc_version: ::std::string::String,
|
||||
pub agent_version: ::std::string::String,
|
||||
// special fields
|
||||
pub unknown_fields: ::protobuf::UnknownFields,
|
||||
pub cached_size: ::protobuf::CachedSize,
|
||||
}
|
||||
|
||||
impl<'a> ::std::default::Default for &'a VersionCheckResponse {
|
||||
fn default() -> &'a VersionCheckResponse {
|
||||
<VersionCheckResponse as ::protobuf::Message>::default_instance()
|
||||
}
|
||||
}
|
||||
|
||||
impl VersionCheckResponse {
|
||||
pub fn new() -> VersionCheckResponse {
|
||||
::std::default::Default::default()
|
||||
}
|
||||
|
||||
// string grpc_version = 1;
|
||||
|
||||
|
||||
pub fn get_grpc_version(&self) -> &str {
|
||||
&self.grpc_version
|
||||
}
|
||||
pub fn clear_grpc_version(&mut self) {
|
||||
self.grpc_version.clear();
|
||||
}
|
||||
|
||||
// Param is passed by value, moved
|
||||
pub fn set_grpc_version(&mut self, v: ::std::string::String) {
|
||||
self.grpc_version = v;
|
||||
}
|
||||
|
||||
// Mutable pointer to the field.
|
||||
// If field is not initialized, it is initialized with default value first.
|
||||
pub fn mut_grpc_version(&mut self) -> &mut ::std::string::String {
|
||||
&mut self.grpc_version
|
||||
}
|
||||
|
||||
// Take field
|
||||
pub fn take_grpc_version(&mut self) -> ::std::string::String {
|
||||
::std::mem::replace(&mut self.grpc_version, ::std::string::String::new())
|
||||
}
|
||||
|
||||
// string agent_version = 2;
|
||||
|
||||
|
||||
pub fn get_agent_version(&self) -> &str {
|
||||
&self.agent_version
|
||||
}
|
||||
pub fn clear_agent_version(&mut self) {
|
||||
self.agent_version.clear();
|
||||
}
|
||||
|
||||
// Param is passed by value, moved
|
||||
pub fn set_agent_version(&mut self, v: ::std::string::String) {
|
||||
self.agent_version = v;
|
||||
}
|
||||
|
||||
// Mutable pointer to the field.
|
||||
// If field is not initialized, it is initialized with default value first.
|
||||
pub fn mut_agent_version(&mut self) -> &mut ::std::string::String {
|
||||
&mut self.agent_version
|
||||
}
|
||||
|
||||
// Take field
|
||||
pub fn take_agent_version(&mut self) -> ::std::string::String {
|
||||
::std::mem::replace(&mut self.agent_version, ::std::string::String::new())
|
||||
}
|
||||
}
|
||||
|
||||
impl ::protobuf::Message for VersionCheckResponse {
|
||||
fn is_initialized(&self) -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
|
||||
while !is.eof()? {
|
||||
let (field_number, wire_type) = is.read_tag_unpack()?;
|
||||
match field_number {
|
||||
1 => {
|
||||
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.grpc_version)?;
|
||||
},
|
||||
2 => {
|
||||
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.agent_version)?;
|
||||
},
|
||||
_ => {
|
||||
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
|
||||
},
|
||||
};
|
||||
}
|
||||
::std::result::Result::Ok(())
|
||||
}
|
||||
|
||||
// Compute sizes of nested messages
|
||||
#[allow(unused_variables)]
|
||||
fn compute_size(&self) -> u32 {
|
||||
let mut my_size = 0;
|
||||
if !self.grpc_version.is_empty() {
|
||||
my_size += ::protobuf::rt::string_size(1, &self.grpc_version);
|
||||
}
|
||||
if !self.agent_version.is_empty() {
|
||||
my_size += ::protobuf::rt::string_size(2, &self.agent_version);
|
||||
}
|
||||
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
|
||||
self.cached_size.set(my_size);
|
||||
my_size
|
||||
}
|
||||
|
||||
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
|
||||
if !self.grpc_version.is_empty() {
|
||||
os.write_string(1, &self.grpc_version)?;
|
||||
}
|
||||
if !self.agent_version.is_empty() {
|
||||
os.write_string(2, &self.agent_version)?;
|
||||
}
|
||||
os.write_unknown_fields(self.get_unknown_fields())?;
|
||||
::std::result::Result::Ok(())
|
||||
}
|
||||
|
||||
fn get_cached_size(&self) -> u32 {
|
||||
self.cached_size.get()
|
||||
}
|
||||
|
||||
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
|
||||
&self.unknown_fields
|
||||
}
|
||||
|
||||
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
|
||||
&mut self.unknown_fields
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn (::std::any::Any) {
|
||||
self as &dyn (::std::any::Any)
|
||||
}
|
||||
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
|
||||
self as &mut dyn (::std::any::Any)
|
||||
}
|
||||
fn into_any(self: Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
|
||||
self
|
||||
}
|
||||
|
||||
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
|
||||
Self::descriptor_static()
|
||||
}
|
||||
|
||||
fn new() -> VersionCheckResponse {
|
||||
VersionCheckResponse::new()
|
||||
}
|
||||
|
||||
fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
|
||||
static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy::INIT;
|
||||
unsafe {
|
||||
descriptor.get(|| {
|
||||
let mut fields = ::std::vec::Vec::new();
|
||||
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeString>(
|
||||
"grpc_version",
|
||||
|m: &VersionCheckResponse| { &m.grpc_version },
|
||||
|m: &mut VersionCheckResponse| { &mut m.grpc_version },
|
||||
));
|
||||
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeString>(
|
||||
"agent_version",
|
||||
|m: &VersionCheckResponse| { &m.agent_version },
|
||||
|m: &mut VersionCheckResponse| { &mut m.agent_version },
|
||||
));
|
||||
::protobuf::reflect::MessageDescriptor::new_pb_name::<VersionCheckResponse>(
|
||||
"VersionCheckResponse",
|
||||
fields,
|
||||
file_descriptor_proto()
|
||||
)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn default_instance() -> &'static VersionCheckResponse {
|
||||
static mut instance: ::protobuf::lazy::Lazy<VersionCheckResponse> = ::protobuf::lazy::Lazy::INIT;
|
||||
unsafe {
|
||||
instance.get(VersionCheckResponse::new)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ::protobuf::Clear for VersionCheckResponse {
|
||||
fn clear(&mut self) {
|
||||
self.grpc_version.clear();
|
||||
self.agent_version.clear();
|
||||
self.unknown_fields.clear();
|
||||
}
|
||||
}
|
||||
|
||||
impl ::std::fmt::Debug for VersionCheckResponse {
|
||||
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
|
||||
::protobuf::text_format::fmt(self, f)
|
||||
}
|
||||
}
|
||||
|
||||
impl ::protobuf::reflect::ProtobufValue for VersionCheckResponse {
|
||||
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
|
||||
::protobuf::reflect::ReflectValueRef::Message(self)
|
||||
}
|
||||
}
|
||||
|
||||
static file_descriptor_proto_data: &'static [u8] = b"\
|
||||
\nRgithub.com/kata-containers/kata-containers/src/agent/protocols/protos\
|
||||
/health.proto\x12\x04grpc\x1a-github.com/gogo/protobuf/gogoproto/gogo.pr\
|
||||
oto\"(\n\x0cCheckRequest\x12\x18\n\x07service\x18\x01\x20\x01(\tR\x07ser\
|
||||
vice\"\x92\x01\n\x13HealthCheckResponse\x12?\n\x06status\x18\x01\x20\x01\
|
||||
(\x0e2'.grpc.HealthCheckResponse.ServingStatusR\x06status\":\n\rServingS\
|
||||
tatus\x12\x0b\n\x07UNKNOWN\x10\0\x12\x0b\n\x07SERVING\x10\x01\x12\x0f\n\
|
||||
\x0bNOT_SERVING\x10\x02\"^\n\x14VersionCheckResponse\x12!\n\x0cgrpc_vers\
|
||||
ion\x18\x01\x20\x01(\tR\x0bgrpcVersion\x12#\n\ragent_version\x18\x02\x20\
|
||||
\x01(\tR\x0cagentVersion2{\n\x06Health\x126\n\x05Check\x12\x12.grpc.Chec\
|
||||
kRequest\x1a\x19.grpc.HealthCheckResponse\x129\n\x07Version\x12\x12.grpc\
|
||||
.CheckRequest\x1a\x1a.grpc.VersionCheckResponseBpZ^github.com/kata-conta\
|
||||
iners/kata-containers/src/runtime/virtcontainers/pkg/agent/protocols/grp\
|
||||
c\xb8\xe2\x1e\x01\xc0\xe2\x1e\x01\xa8\xe2\x1e\x01\xf8\xe1\x1e\x01J\x90\
|
||||
\x07\n\x06\x12\x04\x07\0)\x01\nq\n\x01\x0c\x12\x03\x07\0\x122g\n\x20Copy\
|
||||
right\x202017\x20HyperHQ\x20Inc.\n\x20Copyright\x20(c)\x202019\x20Ant\
|
||||
\x20Financial\n\n\x20SPDX-License-Identifier:\x20Apache-2.0\n\n\n\x08\n\
|
||||
\x01\x08\x12\x03\t\0u\n\t\n\x02\x08\x0b\x12\x03\t\0u\n\x08\n\x01\x02\x12\
|
||||
\x03\x0b\0\r\n\t\n\x02\x03\0\x12\x03\r\07\n\x08\n\x01\x08\x12\x03\x0f\0$\
|
||||
\n\x0b\n\x04\x08\xa5\xec\x03\x12\x03\x0f\0$\n\x08\n\x01\x08\x12\x03\x10\
|
||||
\0'\n\x0b\n\x04\x08\x9f\xec\x03\x12\x03\x10\0'\n\x08\n\x01\x08\x12\x03\
|
||||
\x11\0&\n\x0b\n\x04\x08\xa7\xec\x03\x12\x03\x11\0&\n\x08\n\x01\x08\x12\
|
||||
\x03\x12\0'\n\x0b\n\x04\x08\xa8\xec\x03\x12\x03\x12\0'\n\n\n\x02\x04\0\
|
||||
\x12\x04\x14\0\x16\x01\n\n\n\x03\x04\0\x01\x12\x03\x14\x08\x14\n\x0b\n\
|
||||
\x04\x04\0\x02\0\x12\x03\x15\x08\x1b\n\r\n\x05\x04\0\x02\0\x04\x12\x04\
|
||||
\x15\x08\x14\x16\n\x0c\n\x05\x04\0\x02\0\x05\x12\x03\x15\x08\x0e\n\x0c\n\
|
||||
\x05\x04\0\x02\0\x01\x12\x03\x15\x0f\x16\n\x0c\n\x05\x04\0\x02\0\x03\x12\
|
||||
\x03\x15\x19\x1a\n\n\n\x02\x04\x01\x12\x04\x18\0\x1f\x01\n\n\n\x03\x04\
|
||||
\x01\x01\x12\x03\x18\x08\x1b\n\x0c\n\x04\x04\x01\x04\0\x12\x04\x19\x08\
|
||||
\x1d\t\n\x0c\n\x05\x04\x01\x04\0\x01\x12\x03\x19\r\x1a\n\r\n\x06\x04\x01\
|
||||
\x04\0\x02\0\x12\x03\x1a\x10\x1c\n\x0e\n\x07\x04\x01\x04\0\x02\0\x01\x12\
|
||||
\x03\x1a\x10\x17\n\x0e\n\x07\x04\x01\x04\0\x02\0\x02\x12\x03\x1a\x1a\x1b\
|
||||
\n\r\n\x06\x04\x01\x04\0\x02\x01\x12\x03\x1b\x10\x1c\n\x0e\n\x07\x04\x01\
|
||||
\x04\0\x02\x01\x01\x12\x03\x1b\x10\x17\n\x0e\n\x07\x04\x01\x04\0\x02\x01\
|
||||
\x02\x12\x03\x1b\x1a\x1b\n\r\n\x06\x04\x01\x04\0\x02\x02\x12\x03\x1c\x10\
|
||||
\x20\n\x0e\n\x07\x04\x01\x04\0\x02\x02\x01\x12\x03\x1c\x10\x1b\n\x0e\n\
|
||||
\x07\x04\x01\x04\0\x02\x02\x02\x12\x03\x1c\x1e\x1f\n\x0b\n\x04\x04\x01\
|
||||
\x02\0\x12\x03\x1e\x08!\n\r\n\x05\x04\x01\x02\0\x04\x12\x04\x1e\x08\x1d\
|
||||
\t\n\x0c\n\x05\x04\x01\x02\0\x06\x12\x03\x1e\x08\x15\n\x0c\n\x05\x04\x01\
|
||||
\x02\0\x01\x12\x03\x1e\x16\x1c\n\x0c\n\x05\x04\x01\x02\0\x03\x12\x03\x1e\
|
||||
\x1f\x20\n\n\n\x02\x04\x02\x12\x04!\0$\x01\n\n\n\x03\x04\x02\x01\x12\x03\
|
||||
!\x08\x1c\n\x0b\n\x04\x04\x02\x02\0\x12\x03\"\x08\x20\n\r\n\x05\x04\x02\
|
||||
\x02\0\x04\x12\x04\"\x08!\x1e\n\x0c\n\x05\x04\x02\x02\0\x05\x12\x03\"\
|
||||
\x08\x0e\n\x0c\n\x05\x04\x02\x02\0\x01\x12\x03\"\x0f\x1b\n\x0c\n\x05\x04\
|
||||
\x02\x02\0\x03\x12\x03\"\x1e\x1f\n\x0b\n\x04\x04\x02\x02\x01\x12\x03#\
|
||||
\x08!\n\r\n\x05\x04\x02\x02\x01\x04\x12\x04#\x08\"\x20\n\x0c\n\x05\x04\
|
||||
\x02\x02\x01\x05\x12\x03#\x08\x0e\n\x0c\n\x05\x04\x02\x02\x01\x01\x12\
|
||||
\x03#\x0f\x1c\n\x0c\n\x05\x04\x02\x02\x01\x03\x12\x03#\x1f\x20\n\n\n\x02\
|
||||
\x06\0\x12\x04&\0)\x01\n\n\n\x03\x06\0\x01\x12\x03&\x08\x0e\n\x0b\n\x04\
|
||||
\x06\0\x02\0\x12\x03'\x08>\n\x0c\n\x05\x06\0\x02\0\x01\x12\x03'\x0c\x11\
|
||||
\n\x0c\n\x05\x06\0\x02\0\x02\x12\x03'\x12\x1e\n\x0c\n\x05\x06\0\x02\0\
|
||||
\x03\x12\x03')<\n\x0b\n\x04\x06\0\x02\x01\x12\x03(\x08A\n\x0c\n\x05\x06\
|
||||
\0\x02\x01\x01\x12\x03(\x0c\x13\n\x0c\n\x05\x06\0\x02\x01\x02\x12\x03(\
|
||||
\x14\x20\n\x0c\n\x05\x06\0\x02\x01\x03\x12\x03(+?b\x06proto3\
|
||||
";
|
||||
|
||||
static mut file_descriptor_proto_lazy: ::protobuf::lazy::Lazy<::protobuf::descriptor::FileDescriptorProto> = ::protobuf::lazy::Lazy::INIT;
|
||||
|
||||
fn parse_descriptor_proto() -> ::protobuf::descriptor::FileDescriptorProto {
|
||||
::protobuf::parse_from_bytes(file_descriptor_proto_data).unwrap()
|
||||
}
|
||||
|
||||
pub fn file_descriptor_proto() -> &'static ::protobuf::descriptor::FileDescriptorProto {
|
||||
unsafe {
|
||||
file_descriptor_proto_lazy.get(|| {
|
||||
parse_descriptor_proto()
|
||||
})
|
||||
}
|
||||
}
|
||||
90
src/agent/protocols/src/health_ttrpc.rs
Normal file
90
src/agent/protocols/src/health_ttrpc.rs
Normal file
@@ -0,0 +1,90 @@
|
||||
// This file is generated by ttrpc-compiler 0.3.0. Do not edit
|
||||
// @generated
|
||||
|
||||
// https://github.com/Manishearth/rust-clippy/issues/702
|
||||
#![allow(unknown_lints)]
|
||||
#![allow(clipto_camel_casepy)]
|
||||
|
||||
#![cfg_attr(rustfmt, rustfmt_skip)]
|
||||
|
||||
#![allow(box_pointers)]
|
||||
#![allow(dead_code)]
|
||||
#![allow(missing_docs)]
|
||||
#![allow(non_camel_case_types)]
|
||||
#![allow(non_snake_case)]
|
||||
#![allow(non_upper_case_globals)]
|
||||
#![allow(trivial_casts)]
|
||||
#![allow(unsafe_code)]
|
||||
#![allow(unused_imports)]
|
||||
#![allow(unused_results)]
|
||||
use protobuf::{CodedInputStream, CodedOutputStream, Message};
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct HealthClient {
|
||||
client: ::ttrpc::Client,
|
||||
}
|
||||
|
||||
impl HealthClient {
|
||||
pub fn new(client: ::ttrpc::Client) -> Self {
|
||||
HealthClient {
|
||||
client: client,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn check(&self, req: &super::health::CheckRequest, timeout_nano: i64) -> ::ttrpc::Result<super::health::HealthCheckResponse> {
|
||||
let mut cres = super::health::HealthCheckResponse::new();
|
||||
::ttrpc::client_request!(self, req, timeout_nano, "grpc.Health", "Check", cres);
|
||||
Ok(cres)
|
||||
}
|
||||
|
||||
pub fn version(&self, req: &super::health::CheckRequest, timeout_nano: i64) -> ::ttrpc::Result<super::health::VersionCheckResponse> {
|
||||
let mut cres = super::health::VersionCheckResponse::new();
|
||||
::ttrpc::client_request!(self, req, timeout_nano, "grpc.Health", "Version", cres);
|
||||
Ok(cres)
|
||||
}
|
||||
}
|
||||
|
||||
struct CheckMethod {
|
||||
service: Arc<std::boxed::Box<dyn Health + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl ::ttrpc::MethodHandler for CheckMethod {
|
||||
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
|
||||
::ttrpc::request_handler!(self, ctx, req, health, CheckRequest, check);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct VersionMethod {
|
||||
service: Arc<std::boxed::Box<dyn Health + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl ::ttrpc::MethodHandler for VersionMethod {
|
||||
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
|
||||
::ttrpc::request_handler!(self, ctx, req, health, CheckRequest, version);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub trait Health {
|
||||
fn check(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::health::CheckRequest) -> ::ttrpc::Result<super::health::HealthCheckResponse> {
|
||||
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.Health/Check is not supported".to_string())))
|
||||
}
|
||||
fn version(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::health::CheckRequest) -> ::ttrpc::Result<super::health::VersionCheckResponse> {
|
||||
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.Health/Version is not supported".to_string())))
|
||||
}
|
||||
}
|
||||
|
||||
pub fn create_health(service: Arc<std::boxed::Box<dyn Health + Send + Sync>>) -> HashMap <String, Box<dyn ::ttrpc::MethodHandler + Send + Sync>> {
|
||||
let mut methods = HashMap::new();
|
||||
|
||||
methods.insert("/grpc.Health/Check".to_string(),
|
||||
std::boxed::Box::new(CheckMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
|
||||
|
||||
methods.insert("/grpc.Health/Version".to_string(),
|
||||
std::boxed::Box::new(VersionMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
|
||||
|
||||
methods
|
||||
}
|
||||
@@ -3,7 +3,6 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
#![allow(bare_trait_objects)]
|
||||
#![allow(clippy::redundant_field_names)]
|
||||
|
||||
pub mod agent;
|
||||
pub mod agent_ttrpc;
|
||||
@@ -12,3 +11,11 @@ pub mod health;
|
||||
pub mod health_ttrpc;
|
||||
pub mod oci;
|
||||
pub mod types;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
#[test]
|
||||
fn it_works() {
|
||||
assert_eq!(2 + 2, 4);
|
||||
}
|
||||
}
|
||||
|
||||
10293
src/agent/protocols/src/oci.rs
Normal file
10293
src/agent/protocols/src/oci.rs
Normal file
File diff suppressed because it is too large
Load Diff
1556
src/agent/protocols/src/types.rs
Normal file
1556
src/agent/protocols/src/types.rs
Normal file
File diff suppressed because it is too large
Load Diff
@@ -24,9 +24,8 @@ regex = "1.1"
|
||||
path-absolutize = "1.2.0"
|
||||
dirs = "3.0.1"
|
||||
anyhow = "1.0.32"
|
||||
cgroups = { package = "cgroups-rs", version = "0.2.0" }
|
||||
cgroups = { git = "https://github.com/kata-containers/cgroups-rs", branch = "stable-0.1.1"}
|
||||
tempfile = "3.1.0"
|
||||
epoll = "4.3.1"
|
||||
|
||||
[dev-dependencies]
|
||||
serial_test = "0.5.0"
|
||||
|
||||
@@ -6,6 +6,8 @@
|
||||
// looks like we can use caps to manipulate capabilities
|
||||
// conveniently, use caps to do it directly.. maybe
|
||||
|
||||
use lazy_static;
|
||||
|
||||
use crate::log_child;
|
||||
use crate::sync::write_count;
|
||||
use anyhow::{anyhow, Result};
|
||||
@@ -124,12 +126,13 @@ pub fn drop_privileges(cfd_log: RawFd, caps: &LinuxCapabilities) -> Result<()> {
|
||||
)
|
||||
.map_err(|e| anyhow!(e.to_string()))?;
|
||||
|
||||
let _ = caps::set(
|
||||
if let Err(_) = caps::set(
|
||||
None,
|
||||
CapSet::Ambient,
|
||||
to_capshashset(cfd_log, caps.ambient.as_ref()),
|
||||
)
|
||||
.map_err(|_| log_child!(cfd_log, "failed to set ambient capability"));
|
||||
) {
|
||||
log_child!(cfd_log, "failed to set ambient capability");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use cgroups::blkio::{BlkIoController, BlkIoData, IoService};
|
||||
use cgroups::blkio::{BlkIo, BlkIoController, BlkIoData, IoService};
|
||||
use cgroups::cpu::CpuController;
|
||||
use cgroups::cpuacct::CpuAcctController;
|
||||
use cgroups::cpuset::CpuSetController;
|
||||
@@ -15,17 +15,18 @@ use cgroups::memory::MemController;
|
||||
use cgroups::pid::PidController;
|
||||
use cgroups::{
|
||||
BlkIoDeviceResource, BlkIoDeviceThrottleResource, Cgroup, CgroupPid, Controller,
|
||||
DeviceResource, HugePageResource, MaxValue, NetworkPriority,
|
||||
DeviceResource, DeviceResources, HugePageResource, MaxValue, NetworkPriority,
|
||||
};
|
||||
|
||||
use crate::cgroups::Manager as CgroupManager;
|
||||
use crate::container::DEFAULT_DEVICES;
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use anyhow::{anyhow, Context, Error, Result};
|
||||
use lazy_static;
|
||||
use libc::{self, pid_t};
|
||||
use nix::errno::Errno;
|
||||
use oci::{
|
||||
LinuxBlockIO, LinuxCPU, LinuxDevice, LinuxDeviceCgroup, LinuxHugepageLimit, LinuxMemory,
|
||||
LinuxNetwork, LinuxPids, LinuxResources,
|
||||
LinuxNetwork, LinuxPids, LinuxResources, LinuxThrottleDevice, LinuxWeightDevice,
|
||||
};
|
||||
|
||||
use protobuf::{CachedSize, RepeatedField, SingularPtrField, UnknownFields};
|
||||
@@ -33,6 +34,7 @@ use protocols::agent::{
|
||||
BlkioStats, BlkioStatsEntry, CgroupStats, CpuStats, CpuUsage, HugetlbStats, MemoryData,
|
||||
MemoryStats, PidsStats, ThrottlingData,
|
||||
};
|
||||
use regex::Regex;
|
||||
use std::collections::HashMap;
|
||||
use std::fs;
|
||||
use std::path::Path;
|
||||
@@ -44,23 +46,34 @@ macro_rules! sl {
|
||||
};
|
||||
}
|
||||
|
||||
macro_rules! get_controller_or_return_singular_none {
|
||||
($cg:ident) => {
|
||||
match $cg.controller_of() {
|
||||
Some(c) => c,
|
||||
None => return SingularPtrField::none(),
|
||||
}
|
||||
};
|
||||
pub fn load_or_create<'a>(h: Box<&'a dyn cgroups::Hierarchy>, path: &str) -> Cgroup<'a> {
|
||||
let valid_path = path.trim_start_matches("/").to_string();
|
||||
let cg = load(h.clone(), &valid_path);
|
||||
if cg.is_none() {
|
||||
info!(sl!(), "create new cgroup: {}", &valid_path);
|
||||
cgroups::Cgroup::new(h, valid_path.as_str())
|
||||
} else {
|
||||
cg.unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn load<'a>(h: Box<&'a dyn cgroups::Hierarchy>, path: &str) -> Option<Cgroup<'a>> {
|
||||
let valid_path = path.trim_start_matches("/").to_string();
|
||||
let cg = cgroups::Cgroup::load(h, valid_path.as_str());
|
||||
let cpu_controller: &CpuController = cg.controller_of().unwrap();
|
||||
if cpu_controller.exists() {
|
||||
Some(cg)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub struct Manager {
|
||||
pub paths: HashMap<String, String>,
|
||||
pub mounts: HashMap<String, String>,
|
||||
// pub rels: HashMap<String, String>,
|
||||
pub cpath: String,
|
||||
#[serde(skip)]
|
||||
cgroup: cgroups::Cgroup,
|
||||
relative_paths: HashMap<String, String>,
|
||||
}
|
||||
|
||||
// set_resource is used to set reources by cgroup controller.
|
||||
@@ -75,11 +88,17 @@ macro_rules! set_resource {
|
||||
|
||||
impl CgroupManager for Manager {
|
||||
fn apply(&self, pid: pid_t) -> Result<()> {
|
||||
self.cgroup.add_task(CgroupPid::from(pid as u64))?;
|
||||
let h = cgroups::hierarchies::auto();
|
||||
let h = Box::new(&*h);
|
||||
let cg = load_or_create(h, &self.cpath);
|
||||
cg.add_task(CgroupPid::from(pid as u64));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn set(&self, r: &LinuxResources, update: bool) -> Result<()> {
|
||||
let h = cgroups::hierarchies::auto();
|
||||
let h = Box::new(&*h);
|
||||
let cg = load_or_create(h, &self.cpath);
|
||||
info!(
|
||||
sl!(),
|
||||
"cgroup manager set resources for container. Resources input {:?}", r
|
||||
@@ -89,49 +108,53 @@ impl CgroupManager for Manager {
|
||||
|
||||
// set cpuset and cpu reources
|
||||
if let Some(cpu) = &r.cpu {
|
||||
set_cpu_resources(&self.cgroup, cpu)?;
|
||||
set_cpu_resources(&cg, cpu)?;
|
||||
}
|
||||
|
||||
// set memory resources
|
||||
if let Some(memory) = &r.memory {
|
||||
set_memory_resources(&self.cgroup, memory, update)?;
|
||||
set_memory_resources(&cg, memory, update)?;
|
||||
}
|
||||
|
||||
// set pids resources
|
||||
if let Some(pids_resources) = &r.pids {
|
||||
set_pids_resources(&self.cgroup, pids_resources)?;
|
||||
set_pids_resources(&cg, pids_resources)?;
|
||||
}
|
||||
|
||||
// set block_io resources
|
||||
if let Some(blkio) = &r.block_io {
|
||||
set_block_io_resources(&self.cgroup, blkio, res)?;
|
||||
set_block_io_resources(&cg, blkio, res)?;
|
||||
}
|
||||
|
||||
// set hugepages resources
|
||||
if !r.hugepage_limits.is_empty() {
|
||||
set_hugepages_resources(&self.cgroup, &r.hugepage_limits, res)?;
|
||||
if r.hugepage_limits.len() > 0 {
|
||||
set_hugepages_resources(&cg, &r.hugepage_limits, res)?;
|
||||
}
|
||||
|
||||
// set network resources
|
||||
if let Some(network) = &r.network {
|
||||
set_network_resources(&self.cgroup, network, res)?;
|
||||
set_network_resources(&cg, network, res)?;
|
||||
}
|
||||
|
||||
// set devices resources
|
||||
set_devices_resources(&self.cgroup, &r.devices, res)?;
|
||||
set_devices_resources(&cg, &r.devices, res)?;
|
||||
info!(sl!(), "resources after processed {:?}", res);
|
||||
|
||||
// apply resources
|
||||
self.cgroup.apply(res)?;
|
||||
cg.apply(res)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_stats(&self) -> Result<CgroupStats> {
|
||||
// CpuStats
|
||||
let cpu_usage = get_cpuacct_stats(&self.cgroup);
|
||||
let h = cgroups::hierarchies::auto();
|
||||
let h = Box::new(&*h);
|
||||
let cg = load_or_create(h, &self.cpath);
|
||||
|
||||
let throttling_data = get_cpu_stats(&self.cgroup);
|
||||
// CpuStats
|
||||
let cpu_usage = get_cpuacct_stats(&cg);
|
||||
|
||||
let throttling_data = get_cpu_stats(&cg);
|
||||
|
||||
let cpu_stats = SingularPtrField::some(CpuStats {
|
||||
cpu_usage,
|
||||
@@ -141,17 +164,17 @@ impl CgroupManager for Manager {
|
||||
});
|
||||
|
||||
// Memorystats
|
||||
let memory_stats = get_memory_stats(&self.cgroup);
|
||||
let memory_stats = get_memory_stats(&cg);
|
||||
|
||||
// PidsStats
|
||||
let pids_stats = get_pids_stats(&self.cgroup);
|
||||
let pids_stats = get_pids_stats(&cg);
|
||||
|
||||
// BlkioStats
|
||||
// note that virtiofs has no blkio stats
|
||||
let blkio_stats = get_blkio_stats(&self.cgroup);
|
||||
let blkio_stats = get_blkio_stats(&cg);
|
||||
|
||||
// HugetlbStats
|
||||
let hugetlb_stats = get_hugetlb_stats(&self.cgroup);
|
||||
let hugetlb_stats = get_hugetlb_stats(&cg);
|
||||
|
||||
Ok(CgroupStats {
|
||||
cpu_stats,
|
||||
@@ -165,13 +188,16 @@ impl CgroupManager for Manager {
|
||||
}
|
||||
|
||||
fn freeze(&self, state: FreezerState) -> Result<()> {
|
||||
let freezer_controller: &FreezerController = self.cgroup.controller_of().unwrap();
|
||||
let h = cgroups::hierarchies::auto();
|
||||
let h = Box::new(&*h);
|
||||
let cg = load_or_create(h, &self.cpath);
|
||||
let freezer_controller: &FreezerController = cg.controller_of().unwrap();
|
||||
match state {
|
||||
FreezerState::Thawed => {
|
||||
freezer_controller.thaw()?;
|
||||
freezer_controller.thaw();
|
||||
}
|
||||
FreezerState::Frozen => {
|
||||
freezer_controller.freeze()?;
|
||||
freezer_controller.freeze();
|
||||
}
|
||||
_ => {
|
||||
return Err(nix::Error::Sys(Errno::EINVAL).into());
|
||||
@@ -182,12 +208,20 @@ impl CgroupManager for Manager {
|
||||
}
|
||||
|
||||
fn destroy(&mut self) -> Result<()> {
|
||||
let _ = self.cgroup.delete();
|
||||
let h = cgroups::hierarchies::auto();
|
||||
let h = Box::new(&*h);
|
||||
let cg = load(h, &self.cpath);
|
||||
if cg.is_some() {
|
||||
cg.unwrap().delete();
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_pids(&self) -> Result<Vec<pid_t>> {
|
||||
let mem_controller: &MemController = self.cgroup.controller_of().unwrap();
|
||||
let h = cgroups::hierarchies::auto();
|
||||
let h = Box::new(&*h);
|
||||
let cg = load_or_create(h, &self.cpath);
|
||||
let mem_controller: &MemController = cg.controller_of().unwrap();
|
||||
let pids = mem_controller.tasks();
|
||||
let result = pids.iter().map(|x| x.pid as i32).collect::<Vec<i32>>();
|
||||
|
||||
@@ -196,7 +230,7 @@ impl CgroupManager for Manager {
|
||||
}
|
||||
|
||||
fn set_network_resources(
|
||||
_cg: &cgroups::Cgroup,
|
||||
cg: &cgroups::Cgroup,
|
||||
network: &LinuxNetwork,
|
||||
res: &mut cgroups::Resources,
|
||||
) -> Result<()> {
|
||||
@@ -206,7 +240,7 @@ fn set_network_resources(
|
||||
// description can be found at https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v1/net_cls.html
|
||||
let class_id = network.class_id.unwrap_or(0) as u64;
|
||||
if class_id != 0 {
|
||||
res.network.class_id = Some(class_id);
|
||||
res.network.class_id = class_id;
|
||||
}
|
||||
|
||||
// set network priorities
|
||||
@@ -219,47 +253,47 @@ fn set_network_resources(
|
||||
});
|
||||
}
|
||||
|
||||
res.network.update_values = true;
|
||||
res.network.priorities = priorities;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn set_devices_resources(
|
||||
_cg: &cgroups::Cgroup,
|
||||
device_resources: &[LinuxDeviceCgroup],
|
||||
cg: &cgroups::Cgroup,
|
||||
device_resources: &Vec<LinuxDeviceCgroup>,
|
||||
res: &mut cgroups::Resources,
|
||||
) -> Result<()> {
|
||||
info!(sl!(), "cgroup manager set devices");
|
||||
let mut devices = vec![];
|
||||
|
||||
for d in device_resources.iter() {
|
||||
if let Some(dev) = linux_device_group_to_cgroup_device(&d) {
|
||||
devices.push(dev);
|
||||
}
|
||||
let dev = linux_device_group_to_cgroup_device(&d);
|
||||
devices.push(dev);
|
||||
}
|
||||
|
||||
for d in DEFAULT_DEVICES.iter() {
|
||||
if let Some(dev) = linux_device_to_cgroup_device(&d) {
|
||||
devices.push(dev);
|
||||
}
|
||||
let dev = linux_device_to_cgroup_device(&d);
|
||||
devices.push(dev);
|
||||
}
|
||||
|
||||
for d in DEFAULT_ALLOWED_DEVICES.iter() {
|
||||
if let Some(dev) = linux_device_group_to_cgroup_device(&d) {
|
||||
devices.push(dev);
|
||||
}
|
||||
let dev = linux_device_group_to_cgroup_device(&d);
|
||||
devices.push(dev);
|
||||
}
|
||||
|
||||
res.devices.update_values = true;
|
||||
res.devices.devices = devices;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn set_hugepages_resources(
|
||||
_cg: &cgroups::Cgroup,
|
||||
hugepage_limits: &[LinuxHugepageLimit],
|
||||
cg: &cgroups::Cgroup,
|
||||
hugepage_limits: &Vec<LinuxHugepageLimit>,
|
||||
res: &mut cgroups::Resources,
|
||||
) -> Result<()> {
|
||||
info!(sl!(), "cgroup manager set hugepage");
|
||||
res.hugepages.update_values = true;
|
||||
let mut limits = vec![];
|
||||
|
||||
for l in hugepage_limits.iter() {
|
||||
@@ -280,6 +314,7 @@ fn set_block_io_resources(
|
||||
res: &mut cgroups::Resources,
|
||||
) -> Result<()> {
|
||||
info!(sl!(), "cgroup manager set block io");
|
||||
res.blkio.update_values = true;
|
||||
|
||||
if cg.v2() {
|
||||
res.blkio.weight = convert_blk_io_to_v2_value(blkio.weight);
|
||||
@@ -328,13 +363,11 @@ fn set_cpu_resources(cg: &cgroups::Cgroup, cpu: &LinuxCPU) -> Result<()> {
|
||||
let cpuset_controller: &CpuSetController = cg.controller_of().unwrap();
|
||||
|
||||
if !cpu.cpus.is_empty() {
|
||||
if let Err(e) = cpuset_controller.set_cpus(&cpu.cpus) {
|
||||
warn!(sl!(), "write cpuset failed: {:?}", e);
|
||||
}
|
||||
cpuset_controller.set_cpus(&cpu.cpus);
|
||||
}
|
||||
|
||||
if !cpu.mems.is_empty() {
|
||||
cpuset_controller.set_mems(&cpu.mems)?;
|
||||
cpuset_controller.set_mems(&cpu.mems);
|
||||
}
|
||||
|
||||
let cpu_controller: &CpuController = cg.controller_of().unwrap();
|
||||
@@ -346,12 +379,11 @@ fn set_cpu_resources(cg: &cgroups::Cgroup, cpu: &LinuxCPU) -> Result<()> {
|
||||
shares
|
||||
};
|
||||
if shares != 0 {
|
||||
cpu_controller.set_shares(shares)?;
|
||||
cpu_controller.set_shares(shares);
|
||||
}
|
||||
}
|
||||
|
||||
set_resource!(cpu_controller, set_cfs_quota, cpu, quota);
|
||||
set_resource!(cpu_controller, set_cfs_period, cpu, period);
|
||||
cpu_controller.set_cfs_quota_and_period(cpu.quota, cpu.period);
|
||||
|
||||
set_resource!(cpu_controller, set_rt_runtime, cpu, realtime_runtime);
|
||||
set_resource!(cpu_controller, set_rt_period_us, cpu, realtime_period);
|
||||
@@ -386,13 +418,13 @@ fn set_memory_resources(cg: &cgroups::Cgroup, memory: &LinuxMemory, update: bool
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(swappiness) = memory.swappiness {
|
||||
if swappiness >= 0 && swappiness <= 100 {
|
||||
mem_controller.set_swappiness(swappiness as u64)?;
|
||||
if let Some(swapiness) = memory.swapiness {
|
||||
if swapiness >= 0 && swapiness <= 100 {
|
||||
mem_controller.set_swappiness(swapiness as u64)?;
|
||||
} else {
|
||||
return Err(anyhow!(
|
||||
"invalid value:{}. valid memory swappiness range is 0-100",
|
||||
swappiness
|
||||
swapiness
|
||||
));
|
||||
}
|
||||
}
|
||||
@@ -418,7 +450,7 @@ fn set_pids_resources(cg: &cgroups::Cgroup, pids: &LinuxPids) -> Result<()> {
|
||||
}
|
||||
|
||||
fn build_blk_io_device_throttle_resource(
|
||||
input: &[oci::LinuxThrottleDevice],
|
||||
input: &Vec<oci::LinuxThrottleDevice>,
|
||||
) -> Vec<BlkIoDeviceThrottleResource> {
|
||||
let mut blk_io_device_throttle_resources = vec![];
|
||||
for d in input.iter() {
|
||||
@@ -433,32 +465,26 @@ fn build_blk_io_device_throttle_resource(
|
||||
blk_io_device_throttle_resources
|
||||
}
|
||||
|
||||
fn linux_device_to_cgroup_device(d: &LinuxDevice) -> Option<DeviceResource> {
|
||||
let dev_type = match DeviceType::from_char(d.r#type.chars().next()) {
|
||||
Some(t) => t,
|
||||
None => return None,
|
||||
};
|
||||
fn linux_device_to_cgroup_device(d: &LinuxDevice) -> DeviceResource {
|
||||
let dev_type = DeviceType::from_char(d.r#type.chars().next()).unwrap();
|
||||
|
||||
let permissions = vec![
|
||||
let mut permissions = vec![
|
||||
DevicePermissions::Read,
|
||||
DevicePermissions::Write,
|
||||
DevicePermissions::MkNod,
|
||||
];
|
||||
|
||||
Some(DeviceResource {
|
||||
DeviceResource {
|
||||
allow: true,
|
||||
devtype: dev_type,
|
||||
major: d.major,
|
||||
minor: d.minor,
|
||||
access: permissions,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn linux_device_group_to_cgroup_device(d: &LinuxDeviceCgroup) -> Option<DeviceResource> {
|
||||
let dev_type = match DeviceType::from_char(d.r#type.chars().next()) {
|
||||
Some(t) => t,
|
||||
None => return None,
|
||||
};
|
||||
fn linux_device_group_to_cgroup_device(d: &LinuxDeviceCgroup) -> DeviceResource {
|
||||
let dev_type = DeviceType::from_char(d.r#type.chars().next()).unwrap();
|
||||
|
||||
let mut permissions: Vec<DevicePermissions> = vec![];
|
||||
for p in d.access.chars().collect::<Vec<char>>() {
|
||||
@@ -470,13 +496,13 @@ fn linux_device_group_to_cgroup_device(d: &LinuxDeviceCgroup) -> Option<DeviceRe
|
||||
}
|
||||
}
|
||||
|
||||
Some(DeviceResource {
|
||||
DeviceResource {
|
||||
allow: d.allow,
|
||||
devtype: dev_type,
|
||||
major: d.major.unwrap_or(0),
|
||||
minor: d.minor.unwrap_or(0),
|
||||
access: permissions,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// split space separated values into an vector of u64
|
||||
@@ -492,7 +518,7 @@ fn lines_to_map(content: &str) -> HashMap<String, u64> {
|
||||
.lines()
|
||||
.map(|x| x.split_whitespace().collect::<Vec<&str>>())
|
||||
.filter(|x| x.len() == 2 && x[1].parse::<u64>().is_ok())
|
||||
.fold(HashMap::new(), |mut hm, x| {
|
||||
.fold(HashMap::new(), |mut hm, mut x| {
|
||||
hm.insert(x[0].to_string(), x[1].parse::<u64>().unwrap());
|
||||
hm
|
||||
})
|
||||
@@ -570,8 +596,10 @@ lazy_static! {
|
||||
}
|
||||
|
||||
fn get_cpu_stats(cg: &cgroups::Cgroup) -> SingularPtrField<ThrottlingData> {
|
||||
let cpu_controller: &CpuController = get_controller_or_return_singular_none!(cg);
|
||||
let cpu_controller: &CpuController = cg.controller_of().unwrap();
|
||||
|
||||
let stat = cpu_controller.cpu().stat;
|
||||
|
||||
let h = lines_to_map(&stat);
|
||||
|
||||
SingularPtrField::some(ThrottlingData {
|
||||
@@ -584,18 +612,27 @@ fn get_cpu_stats(cg: &cgroups::Cgroup) -> SingularPtrField<ThrottlingData> {
|
||||
}
|
||||
|
||||
fn get_cpuacct_stats(cg: &cgroups::Cgroup) -> SingularPtrField<CpuUsage> {
|
||||
if let Some(cpuacct_controller) = cg.controller_of::<CpuAcctController>() {
|
||||
let cpuacct = cpuacct_controller.cpuacct();
|
||||
let cpuacct_controller: Option<&CpuAcctController> = cg.controller_of();
|
||||
if cpuacct_controller.is_none() {
|
||||
if cg.v2() {
|
||||
return SingularPtrField::some(CpuUsage {
|
||||
total_usage: 0,
|
||||
percpu_usage: vec![],
|
||||
usage_in_kernelmode: 0,
|
||||
usage_in_usermode: 0,
|
||||
unknown_fields: UnknownFields::default(),
|
||||
cached_size: CachedSize::default(),
|
||||
});
|
||||
}
|
||||
|
||||
let h = lines_to_map(&cpuacct.stat);
|
||||
let usage_in_usermode =
|
||||
(((*h.get("user").unwrap() * NANO_PER_SECOND) as f64) / *CLOCK_TICKS) as u64;
|
||||
let usage_in_kernelmode =
|
||||
(((*h.get("system").unwrap() * NANO_PER_SECOND) as f64) / *CLOCK_TICKS) as u64;
|
||||
|
||||
let total_usage = cpuacct.usage;
|
||||
|
||||
let percpu_usage = line_to_vec(&cpuacct.usage_percpu);
|
||||
// try to get from cpu controller
|
||||
let cpu_controller: &CpuController = cg.controller_of().unwrap();
|
||||
let stat = cpu_controller.cpu().stat;
|
||||
let h = lines_to_map(&stat);
|
||||
let usage_in_usermode = *h.get("user_usec").unwrap();
|
||||
let usage_in_kernelmode = *h.get("system_usec").unwrap();
|
||||
let total_usage = *h.get("usage_usec").unwrap();
|
||||
let percpu_usage = vec![];
|
||||
|
||||
return SingularPtrField::some(CpuUsage {
|
||||
total_usage,
|
||||
@@ -607,25 +644,18 @@ fn get_cpuacct_stats(cg: &cgroups::Cgroup) -> SingularPtrField<CpuUsage> {
|
||||
});
|
||||
}
|
||||
|
||||
if cg.v2() {
|
||||
return SingularPtrField::some(CpuUsage {
|
||||
total_usage: 0,
|
||||
percpu_usage: vec![],
|
||||
usage_in_kernelmode: 0,
|
||||
usage_in_usermode: 0,
|
||||
unknown_fields: UnknownFields::default(),
|
||||
cached_size: CachedSize::default(),
|
||||
});
|
||||
}
|
||||
let cpuacct_controller = cpuacct_controller.unwrap();
|
||||
let cpuacct = cpuacct_controller.cpuacct();
|
||||
|
||||
// try to get from cpu controller
|
||||
let cpu_controller: &CpuController = get_controller_or_return_singular_none!(cg);
|
||||
let stat = cpu_controller.cpu().stat;
|
||||
let h = lines_to_map(&stat);
|
||||
let usage_in_usermode = *h.get("user_usec").unwrap();
|
||||
let usage_in_kernelmode = *h.get("system_usec").unwrap();
|
||||
let total_usage = *h.get("usage_usec").unwrap();
|
||||
let percpu_usage = vec![];
|
||||
let h = lines_to_map(&cpuacct.stat);
|
||||
let usage_in_usermode =
|
||||
(((*h.get("user").unwrap() * NANO_PER_SECOND) as f64) / *CLOCK_TICKS) as u64;
|
||||
let usage_in_kernelmode =
|
||||
(((*h.get("system").unwrap() * NANO_PER_SECOND) as f64) / *CLOCK_TICKS) as u64;
|
||||
|
||||
let total_usage = cpuacct.usage;
|
||||
|
||||
let percpu_usage = line_to_vec(&cpuacct.usage_percpu);
|
||||
|
||||
SingularPtrField::some(CpuUsage {
|
||||
total_usage,
|
||||
@@ -638,7 +668,7 @@ fn get_cpuacct_stats(cg: &cgroups::Cgroup) -> SingularPtrField<CpuUsage> {
|
||||
}
|
||||
|
||||
fn get_memory_stats(cg: &cgroups::Cgroup) -> SingularPtrField<MemoryStats> {
|
||||
let memory_controller: &MemController = get_controller_or_return_singular_none!(cg);
|
||||
let memory_controller: &MemController = cg.controller_of().unwrap();
|
||||
|
||||
// cache from memory stat
|
||||
let memory = memory_controller.memory_stat();
|
||||
@@ -646,7 +676,7 @@ fn get_memory_stats(cg: &cgroups::Cgroup) -> SingularPtrField<MemoryStats> {
|
||||
|
||||
// use_hierarchy
|
||||
let value = memory.use_hierarchy;
|
||||
let use_hierarchy = value == 1;
|
||||
let use_hierarchy = if value == 1 { true } else { false };
|
||||
|
||||
// gte memory datas
|
||||
let usage = SingularPtrField::some(MemoryData {
|
||||
@@ -695,17 +725,18 @@ fn get_memory_stats(cg: &cgroups::Cgroup) -> SingularPtrField<MemoryStats> {
|
||||
}
|
||||
|
||||
fn get_pids_stats(cg: &cgroups::Cgroup) -> SingularPtrField<PidsStats> {
|
||||
let pid_controller: &PidController = get_controller_or_return_singular_none!(cg);
|
||||
let pid_controller: &PidController = cg.controller_of().unwrap();
|
||||
|
||||
let current = pid_controller.get_pid_current().unwrap_or(0);
|
||||
let max = pid_controller.get_pid_max();
|
||||
|
||||
let limit = match max {
|
||||
Err(_) => 0,
|
||||
Ok(max) => match max {
|
||||
let limit = if max.is_err() {
|
||||
0
|
||||
} else {
|
||||
match max.unwrap() {
|
||||
MaxValue::Value(v) => v,
|
||||
MaxValue::Max => 0,
|
||||
},
|
||||
}
|
||||
} as u64;
|
||||
|
||||
SingularPtrField::some(PidsStats {
|
||||
@@ -748,9 +779,9 @@ https://github.com/opencontainers/runc/blob/a5847db387ae28c0ca4ebe4beee1a76900c8
|
||||
Total 0
|
||||
*/
|
||||
|
||||
fn get_blkio_stat_blkiodata(blkiodata: &[BlkIoData]) -> RepeatedField<BlkioStatsEntry> {
|
||||
fn get_blkio_stat_blkiodata(blkiodata: &Vec<BlkIoData>) -> RepeatedField<BlkioStatsEntry> {
|
||||
let mut m = RepeatedField::new();
|
||||
if blkiodata.is_empty() {
|
||||
if blkiodata.len() == 0 {
|
||||
return m;
|
||||
}
|
||||
|
||||
@@ -770,10 +801,10 @@ fn get_blkio_stat_blkiodata(blkiodata: &[BlkIoData]) -> RepeatedField<BlkioStats
|
||||
m
|
||||
}
|
||||
|
||||
fn get_blkio_stat_ioservice(services: &[IoService]) -> RepeatedField<BlkioStatsEntry> {
|
||||
fn get_blkio_stat_ioservice(services: &Vec<IoService>) -> RepeatedField<BlkioStatsEntry> {
|
||||
let mut m = RepeatedField::new();
|
||||
|
||||
if services.is_empty() {
|
||||
if services.len() == 0 {
|
||||
return m;
|
||||
}
|
||||
|
||||
@@ -794,14 +825,14 @@ fn build_blkio_stats_entry(major: i16, minor: i16, op: &str, value: u64) -> Blki
|
||||
major: major as u64,
|
||||
minor: minor as u64,
|
||||
op: op.to_string(),
|
||||
value,
|
||||
value: value,
|
||||
unknown_fields: UnknownFields::default(),
|
||||
cached_size: CachedSize::default(),
|
||||
}
|
||||
}
|
||||
|
||||
fn get_blkio_stats_v2(cg: &cgroups::Cgroup) -> SingularPtrField<BlkioStats> {
|
||||
let blkio_controller: &BlkIoController = get_controller_or_return_singular_none!(cg);
|
||||
let blkio_controller: &BlkIoController = cg.controller_of().unwrap();
|
||||
let blkio = blkio_controller.blkio();
|
||||
|
||||
let mut resp = BlkioStats::new();
|
||||
@@ -829,13 +860,13 @@ fn get_blkio_stats(cg: &cgroups::Cgroup) -> SingularPtrField<BlkioStats> {
|
||||
return get_blkio_stats_v2(&cg);
|
||||
}
|
||||
|
||||
let blkio_controller: &BlkIoController = get_controller_or_return_singular_none!(cg);
|
||||
let blkio_controller: &BlkIoController = cg.controller_of().unwrap();
|
||||
let blkio = blkio_controller.blkio();
|
||||
|
||||
let mut m = BlkioStats::new();
|
||||
let io_serviced_recursive = blkio.io_serviced_recursive;
|
||||
|
||||
if io_serviced_recursive.is_empty() {
|
||||
if io_serviced_recursive.len() == 0 {
|
||||
// fall back to generic stats
|
||||
// blkio.throttle.io_service_bytes,
|
||||
// maybe io_service_bytes_recursive?
|
||||
@@ -890,8 +921,8 @@ fn get_hugetlb_stats(cg: &cgroups::Cgroup) -> HashMap<String, HugetlbStats> {
|
||||
h
|
||||
}
|
||||
|
||||
pub const PATHS: &str = "/proc/self/cgroup";
|
||||
pub const MOUNTS: &str = "/proc/self/mountinfo";
|
||||
pub const PATHS: &'static str = "/proc/self/cgroup";
|
||||
pub const MOUNTS: &'static str = "/proc/self/mountinfo";
|
||||
|
||||
pub fn get_paths() -> Result<HashMap<String, String>> {
|
||||
let mut m = HashMap::new();
|
||||
@@ -946,19 +977,9 @@ pub fn get_mounts() -> Result<HashMap<String, String>> {
|
||||
Ok(m)
|
||||
}
|
||||
|
||||
fn new_cgroup(
|
||||
h: Box<dyn cgroups::Hierarchy>,
|
||||
path: &str,
|
||||
relative_paths: HashMap<String, String>,
|
||||
) -> Cgroup {
|
||||
let valid_path = path.trim_start_matches('/').to_string();
|
||||
cgroups::Cgroup::new_with_relative_paths(h, valid_path.as_str(), relative_paths)
|
||||
}
|
||||
|
||||
impl Manager {
|
||||
pub fn new(cpath: &str) -> Result<Self> {
|
||||
let mut m = HashMap::new();
|
||||
let mut relative_paths = HashMap::new();
|
||||
|
||||
let paths = get_paths()?;
|
||||
let mounts = get_mounts()?;
|
||||
@@ -977,7 +998,6 @@ impl Manager {
|
||||
};
|
||||
|
||||
m.insert(key.to_string(), p);
|
||||
relative_paths.insert(key.to_string(), value.to_string());
|
||||
}
|
||||
|
||||
Ok(Self {
|
||||
@@ -985,27 +1005,29 @@ impl Manager {
|
||||
mounts,
|
||||
// rels: paths,
|
||||
cpath: cpath.to_string(),
|
||||
cgroup: new_cgroup(cgroups::hierarchies::auto(), cpath, relative_paths.clone()),
|
||||
relative_paths,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn update_cpuset_path(&self, guest_cpuset: &str, container_cpuset: &str) -> Result<()> {
|
||||
if guest_cpuset == "" {
|
||||
pub fn update_cpuset_path(&self, cpuset_cpus: &str) -> Result<()> {
|
||||
if cpuset_cpus == "" {
|
||||
return Ok(());
|
||||
}
|
||||
info!(sl!(), "update_cpuset_path to: {}", guest_cpuset);
|
||||
info!(sl!(), "update_cpuset_path to: {}", cpuset_cpus);
|
||||
|
||||
let h = cgroups::hierarchies::auto();
|
||||
let root_cg = h.root_control_group();
|
||||
let h = Box::new(&*h);
|
||||
let root_cg = load_or_create(h, "");
|
||||
|
||||
let root_cpuset_controller: &CpuSetController = root_cg.controller_of().unwrap();
|
||||
let path = root_cpuset_controller.path();
|
||||
let root_path = Path::new(path);
|
||||
info!(sl!(), "root cpuset path: {:?}", &path);
|
||||
|
||||
let container_cpuset_controller: &CpuSetController = self.cgroup.controller_of().unwrap();
|
||||
let path = container_cpuset_controller.path();
|
||||
let h = cgroups::hierarchies::auto();
|
||||
let h = Box::new(&*h);
|
||||
let cg = load_or_create(h, &self.cpath);
|
||||
let cpuset_controller: &CpuSetController = cg.controller_of().unwrap();
|
||||
let path = cpuset_controller.path();
|
||||
let container_path = Path::new(path);
|
||||
info!(sl!(), "container cpuset path: {:?}", &path);
|
||||
|
||||
@@ -1014,40 +1036,30 @@ impl Manager {
|
||||
if ancestor == root_path {
|
||||
break;
|
||||
}
|
||||
paths.push(ancestor);
|
||||
if ancestor != container_path {
|
||||
paths.push(ancestor);
|
||||
}
|
||||
}
|
||||
info!(sl!(), "parent paths to update cpuset: {:?}", &paths);
|
||||
info!(sl!(), "paths to update cpuset: {:?}", &paths);
|
||||
|
||||
let mut i = paths.len();
|
||||
loop {
|
||||
if i == 0 {
|
||||
break;
|
||||
}
|
||||
i -= 1;
|
||||
i = i - 1;
|
||||
let h = cgroups::hierarchies::auto();
|
||||
let h = Box::new(&*h);
|
||||
|
||||
// remove cgroup root from path
|
||||
let r_path = &paths[i]
|
||||
.to_str()
|
||||
.unwrap()
|
||||
.trim_start_matches(root_path.to_str().unwrap());
|
||||
info!(sl!(), "updating cpuset for parent path {:?}", &r_path);
|
||||
let cg = new_cgroup(
|
||||
cgroups::hierarchies::auto(),
|
||||
&r_path,
|
||||
self.relative_paths.clone(),
|
||||
);
|
||||
info!(sl!(), "updating cpuset for path {:?}", &r_path);
|
||||
let cg = load_or_create(h, &r_path);
|
||||
let cpuset_controller: &CpuSetController = cg.controller_of().unwrap();
|
||||
cpuset_controller.set_cpus(guest_cpuset)?;
|
||||
}
|
||||
|
||||
if !container_cpuset.is_empty() {
|
||||
info!(
|
||||
sl!(),
|
||||
"updating cpuset for container path: {:?} cpuset: {}",
|
||||
&container_path,
|
||||
container_cpuset
|
||||
);
|
||||
container_cpuset_controller.set_cpus(container_cpuset)?;
|
||||
cpuset_controller.set_cpus(cpuset_cpus);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -1,74 +0,0 @@
|
||||
// Copyright (c) 2020 Intel Corporation
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use protobuf::{CachedSize, SingularPtrField, UnknownFields};
|
||||
|
||||
use crate::cgroups::Manager as CgroupManager;
|
||||
use crate::protocols::agent::{BlkioStats, CgroupStats, CpuStats, MemoryStats, PidsStats};
|
||||
use anyhow::Result;
|
||||
use cgroups::freezer::FreezerState;
|
||||
use libc::{self, pid_t};
|
||||
use oci::LinuxResources;
|
||||
use std::collections::HashMap;
|
||||
use std::string::String;
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub struct Manager {
|
||||
pub paths: HashMap<String, String>,
|
||||
pub mounts: HashMap<String, String>,
|
||||
pub cpath: String,
|
||||
}
|
||||
|
||||
impl CgroupManager for Manager {
|
||||
fn apply(&self, _: pid_t) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn set(&self, _: &LinuxResources, _: bool) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_stats(&self) -> Result<CgroupStats> {
|
||||
Ok(CgroupStats {
|
||||
cpu_stats: SingularPtrField::some(CpuStats::default()),
|
||||
memory_stats: SingularPtrField::some(MemoryStats::new()),
|
||||
pids_stats: SingularPtrField::some(PidsStats::new()),
|
||||
blkio_stats: SingularPtrField::some(BlkioStats::new()),
|
||||
hugetlb_stats: HashMap::new(),
|
||||
unknown_fields: UnknownFields::default(),
|
||||
cached_size: CachedSize::default(),
|
||||
})
|
||||
}
|
||||
|
||||
fn freeze(&self, _: FreezerState) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn destroy(&mut self) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_pids(&self) -> Result<Vec<pid_t>> {
|
||||
Ok(Vec::new())
|
||||
}
|
||||
}
|
||||
|
||||
impl Manager {
|
||||
pub fn new(cpath: &str) -> Result<Self> {
|
||||
Ok(Self {
|
||||
paths: HashMap::new(),
|
||||
mounts: HashMap::new(),
|
||||
cpath: cpath.to_string(),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn update_cpuset_path(&self, _: &str, _: &str) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn get_cg_path(&self, _: &str) -> Option<String> {
|
||||
Some("".to_string())
|
||||
}
|
||||
}
|
||||
@@ -3,14 +3,15 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
// use crate::configs::{FreezerState, Config};
|
||||
use anyhow::{anyhow, Result};
|
||||
use oci::LinuxResources;
|
||||
use protocols::agent::CgroupStats;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use cgroups::freezer::FreezerState;
|
||||
|
||||
pub mod fs;
|
||||
pub mod mock;
|
||||
pub mod notifier;
|
||||
pub mod systemd;
|
||||
|
||||
|
||||
@@ -41,7 +41,7 @@ fn get_value_from_cgroup(path: &PathBuf, key: &str) -> Result<i64> {
|
||||
);
|
||||
|
||||
for line in content.lines() {
|
||||
let arr: Vec<&str> = line.split(' ').collect();
|
||||
let arr: Vec<&str> = line.split(" ").collect();
|
||||
if arr.len() == 2 && arr[0] == key {
|
||||
let r = arr[1].parse::<i64>()?;
|
||||
return Ok(r);
|
||||
|
||||
@@ -366,3 +366,128 @@ impl IfPrioMap {
|
||||
format!("{} {}", self.interface, self.priority)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
impl Config {
|
||||
fn new(opts: &CreateOpts) -> Result<Self> {
|
||||
if opts.spec.is_none() {
|
||||
return Err(ErrorKind::ErrorCode("invalid createopts!".into()));
|
||||
}
|
||||
|
||||
let root = unistd::getcwd().chain_err(|| "cannot getwd")?;
|
||||
let root = root.as_path().canonicalize().chain_err(||
|
||||
"cannot resolve root into absolute path")?;
|
||||
let mut root = root.into();
|
||||
let cwd = root.clone();
|
||||
|
||||
let spec = opts.spec.as_ref().unwrap();
|
||||
if spec.root.is_none() {
|
||||
return Err(ErrorKind::ErrorCode("no root".into()));
|
||||
}
|
||||
|
||||
let rootfs = PathBuf::from(&spec.root.as_ref().unwrap().path);
|
||||
if rootfs.is_relative() {
|
||||
root = format!("{}/{}", root, rootfs.into());
|
||||
}
|
||||
|
||||
// handle annotations
|
||||
let mut label = spec.annotations
|
||||
.iter()
|
||||
.map(|(key, value)| format!("{}={}", key, value)).collect();
|
||||
label.push(format!("bundle={}", cwd));
|
||||
|
||||
let mut config = Config {
|
||||
rootfs: root,
|
||||
no_pivot_root: opts.no_pivot_root,
|
||||
readonlyfs: spec.root.as_ref().unwrap().readonly,
|
||||
hostname: spec.hostname.clone(),
|
||||
labels: label,
|
||||
no_new_keyring: opts.no_new_keyring,
|
||||
rootless_euid: opts.rootless_euid,
|
||||
rootless_cgroups: opts.rootless_cgroups,
|
||||
};
|
||||
|
||||
config.mounts = Vec::new();
|
||||
for m in &spec.mounts {
|
||||
config.mounts.push(Mount::new(&cwd, &m)?);
|
||||
}
|
||||
|
||||
config.devices = create_devices(&spec)?;
|
||||
config.cgroups = Cgroups::new(&opts)?;
|
||||
|
||||
if spec.linux.as_ref().is_none() {
|
||||
return Err(ErrorKind::ErrorCode("no linux configuration".into()));
|
||||
}
|
||||
let linux = spec.linux.as_ref().unwrap();
|
||||
|
||||
let propagation = MOUNTPROPAGATIONMAPPING.get(linux.rootfs_propagation);
|
||||
if propagation.is_none() {
|
||||
Err(ErrorKind::ErrorCode("rootfs propagation not support".into()));
|
||||
}
|
||||
|
||||
config.root_propagation = propagation.unwrap();
|
||||
if config.no_pivot_root && (config.root_propagation & MSFlags::MSPRIVATE != 0) {
|
||||
return Err(ErrorKind::ErrorCode("[r]private is not safe without pivot root".into()));
|
||||
}
|
||||
|
||||
// handle namespaces
|
||||
let m: HashMap<String, String> = HashMap::new();
|
||||
for ns in &linux.namespaces {
|
||||
if NAMESPACEMAPPING.get(&ns.r#type.as_str()).is_none() {
|
||||
return Err(ErrorKind::ErrorCode("namespace don't exist".into()));
|
||||
}
|
||||
|
||||
if m.get(&ns.r#type).is_some() {
|
||||
return Err(ErrorKind::ErrorCode(format!("duplicate ns {}", ns.r#type)));
|
||||
}
|
||||
|
||||
m.insert(ns.r#type, ns.path);
|
||||
}
|
||||
|
||||
if m.contains_key(oci::NETWORKNAMESPACE) {
|
||||
let path = m.get(oci::NETWORKNAMESPACE).unwrap();
|
||||
if path == "" {
|
||||
config.networks = vec![Network {
|
||||
r#type: "loopback",
|
||||
}];
|
||||
}
|
||||
}
|
||||
|
||||
if m.contains_key(oci::USERNAMESPACE) {
|
||||
setup_user_namespace(&spec, &mut config)?;
|
||||
}
|
||||
|
||||
config.namespaces = m.iter().map(|(key, value)| Namespace {
|
||||
r#type: key,
|
||||
path: value,
|
||||
}).collect();
|
||||
config.mask_paths = linux.mask_paths;
|
||||
config.readonly_path = linux.readonly_path;
|
||||
config.mount_label = linux.mount_label;
|
||||
config.sysctl = linux.sysctl;
|
||||
config.seccomp = None;
|
||||
config.intelrdt = None;
|
||||
|
||||
if spec.process.is_some() {
|
||||
let process = spec.process.as_ref().unwrap();
|
||||
config.oom_score_adj = process.oom_score_adj;
|
||||
config.process_label = process.selinux_label.clone();
|
||||
if process.capabilities.as_ref().is_some() {
|
||||
let cap = process.capabilities.as_ref().unwrap();
|
||||
config.capabilities = Some(Capabilities {
|
||||
..cap
|
||||
})
|
||||
}
|
||||
}
|
||||
config.hooks = None;
|
||||
config.version = spec.version;
|
||||
Ok(config)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl Mount {
|
||||
fn new(cwd: &str, m: &oci::Mount) -> Result<Self> {
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -15,6 +15,7 @@
|
||||
#[macro_use]
|
||||
#[cfg(test)]
|
||||
extern crate serial_test;
|
||||
#[macro_use]
|
||||
extern crate serde;
|
||||
extern crate serde_json;
|
||||
#[macro_use]
|
||||
@@ -36,12 +37,18 @@ extern crate oci;
|
||||
extern crate path_absolutize;
|
||||
extern crate regex;
|
||||
|
||||
// Convenience macro to obtain the scope logger
|
||||
macro_rules! sl {
|
||||
() => {
|
||||
slog_scope::logger().new(o!("subsystem" => "rustjail"))
|
||||
};
|
||||
}
|
||||
|
||||
pub mod capabilities;
|
||||
pub mod cgroups;
|
||||
pub mod container;
|
||||
pub mod mount;
|
||||
pub mod process;
|
||||
pub mod reaper;
|
||||
pub mod specconv;
|
||||
pub mod sync;
|
||||
pub mod validator;
|
||||
@@ -70,6 +77,7 @@ use protocols::oci::{
|
||||
Root as grpcRoot, Spec as grpcSpec,
|
||||
};
|
||||
use std::collections::HashMap;
|
||||
use std::mem::MaybeUninit;
|
||||
|
||||
pub fn process_grpc_to_oci(p: &grpcProcess) -> ociProcess {
|
||||
let console_size = if p.ConsoleSize.is_some() {
|
||||
@@ -91,12 +99,7 @@ pub fn process_grpc_to_oci(p: &grpcProcess) -> ociProcess {
|
||||
username: u.Username.clone(),
|
||||
}
|
||||
} else {
|
||||
ociUser {
|
||||
uid: 0,
|
||||
gid: 0,
|
||||
additional_gids: vec![],
|
||||
username: String::from(""),
|
||||
}
|
||||
unsafe { MaybeUninit::zeroed().assume_init() }
|
||||
};
|
||||
|
||||
let capabilities = if p.Capabilities.is_some() {
|
||||
@@ -141,6 +144,11 @@ pub fn process_grpc_to_oci(p: &grpcProcess) -> ociProcess {
|
||||
}
|
||||
}
|
||||
|
||||
fn process_oci_to_grpc(_p: ociProcess) -> grpcProcess {
|
||||
// dont implement it for now
|
||||
unsafe { MaybeUninit::zeroed().assume_init() }
|
||||
}
|
||||
|
||||
fn root_grpc_to_oci(root: &grpcRoot) -> ociRoot {
|
||||
ociRoot {
|
||||
path: root.Path.clone(),
|
||||
@@ -148,6 +156,10 @@ fn root_grpc_to_oci(root: &grpcRoot) -> ociRoot {
|
||||
}
|
||||
}
|
||||
|
||||
fn root_oci_to_grpc(_root: &ociRoot) -> grpcRoot {
|
||||
unsafe { MaybeUninit::zeroed().assume_init() }
|
||||
}
|
||||
|
||||
fn mount_grpc_to_oci(m: &grpcMount) -> ociMount {
|
||||
ociMount {
|
||||
destination: m.destination.clone(),
|
||||
@@ -157,6 +169,10 @@ fn mount_grpc_to_oci(m: &grpcMount) -> ociMount {
|
||||
}
|
||||
}
|
||||
|
||||
fn mount_oci_to_grpc(_m: &ociMount) -> grpcMount {
|
||||
unsafe { MaybeUninit::zeroed().assume_init() }
|
||||
}
|
||||
|
||||
use oci::Hook as ociHook;
|
||||
use protocols::oci::Hook as grpcHook;
|
||||
|
||||
@@ -187,6 +203,10 @@ fn hooks_grpc_to_oci(h: &grpcHooks) -> ociHooks {
|
||||
}
|
||||
}
|
||||
|
||||
fn hooks_oci_to_grpc(_h: &ociHooks) -> grpcHooks {
|
||||
unsafe { MaybeUninit::zeroed().assume_init() }
|
||||
}
|
||||
|
||||
use oci::{
|
||||
LinuxDevice as ociLinuxDevice, LinuxIDMapping as ociLinuxIDMapping,
|
||||
LinuxIntelRdt as ociLinuxIntelRdt, LinuxNamespace as ociLinuxNamespace,
|
||||
@@ -310,7 +330,7 @@ pub fn resources_grpc_to_oci(res: &grpcLinuxResources) -> ociLinuxResources {
|
||||
swap: Some(mem.Swap),
|
||||
kernel: Some(mem.Kernel),
|
||||
kernel_tcp: Some(mem.KernelTCP),
|
||||
swappiness: Some(mem.Swappiness as i64),
|
||||
swapiness: Some(mem.Swappiness as i64),
|
||||
disable_oom_killer: Some(mem.DisableOOMKiller),
|
||||
})
|
||||
} else {
|
||||
@@ -553,8 +573,17 @@ pub fn grpc_to_oci(grpc: &grpcSpec) -> ociSpec {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn oci_to_grpc(_oci: &ociSpec) -> grpcSpec {
|
||||
unsafe { MaybeUninit::zeroed().assume_init() }
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
#[test]
|
||||
fn it_works() {
|
||||
assert_eq!(2 + 2, 4);
|
||||
}
|
||||
|
||||
#[allow(unused_macros)]
|
||||
#[macro_export]
|
||||
macro_rules! skip_if_not_root {
|
||||
|
||||
@@ -3,13 +3,11 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use anyhow::{anyhow, bail, Context, Result};
|
||||
use anyhow::{anyhow, bail, Context, Error, Result};
|
||||
use libc::uid_t;
|
||||
use nix::errno::Errno;
|
||||
use nix::fcntl::{self, OFlag};
|
||||
#[cfg(not(test))]
|
||||
use nix::mount;
|
||||
use nix::mount::{MntFlags, MsFlags};
|
||||
use nix::mount::{self, MntFlags, MsFlags};
|
||||
use nix::sys::stat::{self, Mode, SFlag};
|
||||
use nix::unistd::{self, Gid, Uid};
|
||||
use nix::NixPath;
|
||||
@@ -22,11 +20,13 @@ use std::os::unix::io::RawFd;
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use path_absolutize::*;
|
||||
use scan_fmt;
|
||||
use std::fs::File;
|
||||
use std::io::{BufRead, BufReader};
|
||||
|
||||
use crate::container::DEFAULT_DEVICES;
|
||||
use crate::sync::write_count;
|
||||
use lazy_static;
|
||||
use std::string::ToString;
|
||||
|
||||
use crate::log_child;
|
||||
@@ -48,7 +48,7 @@ pub struct Info {
|
||||
vfs_opts: String,
|
||||
}
|
||||
|
||||
const MOUNTINFOFORMAT: &str = "{d} {d} {d}:{d} {} {} {} {}";
|
||||
const MOUNTINFOFORMAT: &'static str = "{d} {d} {d}:{d} {} {} {} {}";
|
||||
const PROC_PATH: &str = "/proc";
|
||||
|
||||
// since libc didn't defined this const for musl, thus redefined it here.
|
||||
@@ -111,13 +111,7 @@ lazy_static! {
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
#[allow(unused_variables)]
|
||||
pub fn mount<
|
||||
P1: ?Sized + NixPath,
|
||||
P2: ?Sized + NixPath,
|
||||
P3: ?Sized + NixPath,
|
||||
P4: ?Sized + NixPath,
|
||||
>(
|
||||
fn mount<P1: ?Sized + NixPath, P2: ?Sized + NixPath, P3: ?Sized + NixPath, P4: ?Sized + NixPath>(
|
||||
source: Option<&P1>,
|
||||
target: &P2,
|
||||
fstype: Option<&P3>,
|
||||
@@ -131,8 +125,7 @@ pub fn mount<
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
#[allow(unused_variables)]
|
||||
pub fn umount2<P: ?Sized + NixPath>(
|
||||
fn umount2<P: ?Sized + NixPath>(
|
||||
target: &P,
|
||||
flags: MntFlags,
|
||||
) -> std::result::Result<(), nix::Error> {
|
||||
@@ -156,7 +149,7 @@ pub fn init_rootfs(
|
||||
let linux = &spec
|
||||
.linux
|
||||
.as_ref()
|
||||
.ok_or_else(|| anyhow!("Could not get linux configuration from spec"))?;
|
||||
.ok_or::<Error>(anyhow!("Could not get linux configuration from spec"))?;
|
||||
|
||||
let mut flags = MsFlags::MS_REC;
|
||||
match PROPAGATION.get(&linux.rootfs_propagation.as_str()) {
|
||||
@@ -167,14 +160,14 @@ pub fn init_rootfs(
|
||||
let root = spec
|
||||
.root
|
||||
.as_ref()
|
||||
.ok_or_else(|| anyhow!("Could not get rootfs path from spec"))
|
||||
.ok_or(anyhow!("Could not get rootfs path from spec"))
|
||||
.and_then(|r| {
|
||||
fs::canonicalize(r.path.as_str()).context("Could not canonicalize rootfs path")
|
||||
})?;
|
||||
|
||||
let rootfs = (*root)
|
||||
.to_str()
|
||||
.ok_or_else(|| anyhow!("Could not convert rootfs path to string"))?;
|
||||
.ok_or(anyhow!("Could not convert rootfs path to string"))?;
|
||||
|
||||
mount(None::<&str>, "/", None::<&str>, flags, None::<&str>)?;
|
||||
|
||||
@@ -188,10 +181,9 @@ pub fn init_rootfs(
|
||||
None::<&str>,
|
||||
)?;
|
||||
|
||||
let mut bind_mount_dev = false;
|
||||
for m in &spec.mounts {
|
||||
let (mut flags, data) = parse_mount(&m);
|
||||
if !m.destination.starts_with('/') || m.destination.contains("..") {
|
||||
if !m.destination.starts_with("/") || m.destination.contains("..") {
|
||||
return Err(anyhow!(
|
||||
"the mount destination {} is invalid",
|
||||
m.destination
|
||||
@@ -202,9 +194,6 @@ pub fn init_rootfs(
|
||||
mount_cgroups(cfd_log, &m, rootfs, flags, &data, cpath, mounts)?;
|
||||
} else {
|
||||
if m.destination == "/dev" {
|
||||
if m.r#type == "bind" {
|
||||
bind_mount_dev = true;
|
||||
}
|
||||
flags &= !MsFlags::MS_RDONLY;
|
||||
}
|
||||
|
||||
@@ -212,21 +201,6 @@ pub fn init_rootfs(
|
||||
check_proc_mount(m)?;
|
||||
}
|
||||
|
||||
// If the destination already exists and is not a directory, we bail
|
||||
// out This is to avoid mounting through a symlink or similar -- which
|
||||
// has been a "fun" attack scenario in the past.
|
||||
if m.r#type == "proc" || m.r#type == "sysfs" {
|
||||
if let Ok(meta) = fs::symlink_metadata(&m.destination) {
|
||||
if !meta.is_dir() {
|
||||
return Err(anyhow!(
|
||||
"Mount point {} must be ordinary directory: got {:?}",
|
||||
m.destination,
|
||||
meta.file_type()
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mount_from(cfd_log, &m, &rootfs, flags, &data, "")?;
|
||||
// bind mount won't change mount options, we need remount to make mount options
|
||||
// effective.
|
||||
@@ -246,14 +220,9 @@ pub fn init_rootfs(
|
||||
let olddir = unistd::getcwd()?;
|
||||
unistd::chdir(rootfs)?;
|
||||
|
||||
// in case the /dev directory was binded mount from guest,
|
||||
// then there's no need to create devices nodes and symlinks
|
||||
// in /dev.
|
||||
if !bind_mount_dev {
|
||||
default_symlinks()?;
|
||||
create_devices(&linux.devices, bind_device)?;
|
||||
ensure_ptmx()?;
|
||||
}
|
||||
default_symlinks()?;
|
||||
create_devices(&linux.devices, bind_device)?;
|
||||
ensure_ptmx()?;
|
||||
|
||||
unistd::chdir(&olddir)?;
|
||||
|
||||
@@ -285,9 +254,9 @@ fn check_proc_mount(m: &Mount) -> Result<()> {
|
||||
// only allow a mount on-top of proc if it's source is "proc"
|
||||
unsafe {
|
||||
let mut stats = MaybeUninit::<libc::statfs>::uninit();
|
||||
if m.source
|
||||
if let Ok(_) = m
|
||||
.source
|
||||
.with_nix_path(|path| libc::statfs(path.as_ptr(), stats.as_mut_ptr()))
|
||||
.is_ok()
|
||||
{
|
||||
if stats.assume_init().f_type == PROC_SUPER_MAGIC {
|
||||
return Ok(());
|
||||
@@ -310,7 +279,7 @@ fn check_proc_mount(m: &Mount) -> Result<()> {
|
||||
)));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
fn mount_cgroups_v2(cfd_log: RawFd, m: &Mount, rootfs: &str, flags: MsFlags) -> Result<()> {
|
||||
@@ -419,17 +388,20 @@ fn mount_cgroups(
|
||||
|
||||
if key != base {
|
||||
let src = format!("{}/{}", m.destination.as_str(), key);
|
||||
unix::fs::symlink(destination.as_str(), &src[1..]).map_err(|e| {
|
||||
log_child!(
|
||||
cfd_log,
|
||||
"symlink: {} {} err: {}",
|
||||
key,
|
||||
destination.as_str(),
|
||||
e.to_string()
|
||||
);
|
||||
match unix::fs::symlink(destination.as_str(), &src[1..]) {
|
||||
Err(e) => {
|
||||
log_child!(
|
||||
cfd_log,
|
||||
"symlink: {} {} err: {}",
|
||||
key,
|
||||
destination.as_str(),
|
||||
e.to_string()
|
||||
);
|
||||
|
||||
e
|
||||
})?;
|
||||
return Err(e.into());
|
||||
}
|
||||
Ok(_) => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -449,7 +421,6 @@ fn mount_cgroups(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[allow(unused_variables)]
|
||||
fn pivot_root<P1: ?Sized + NixPath, P2: ?Sized + NixPath>(
|
||||
new_root: &P1,
|
||||
put_old: &P2,
|
||||
@@ -582,7 +553,6 @@ fn parse_mount_table() -> Result<Vec<Info>> {
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
#[allow(unused_variables)]
|
||||
fn chroot<P: ?Sized + NixPath>(path: &P) -> Result<(), nix::Error> {
|
||||
#[cfg(not(test))]
|
||||
return unistd::chroot(path);
|
||||
@@ -598,14 +568,15 @@ pub fn ms_move_root(rootfs: &str) -> Result<bool> {
|
||||
let abs_root_buf = root_path.absolutize()?;
|
||||
let abs_root = abs_root_buf
|
||||
.to_str()
|
||||
.ok_or_else(|| anyhow!("failed to parse {} to absolute path", rootfs))?;
|
||||
.ok_or::<Error>(anyhow!("failed to parse {} to absolute path", rootfs))?;
|
||||
|
||||
for info in mount_infos.iter() {
|
||||
let mount_point = Path::new(&info.mount_point);
|
||||
let abs_mount_buf = mount_point.absolutize()?;
|
||||
let abs_mount_point = abs_mount_buf
|
||||
.to_str()
|
||||
.ok_or_else(|| anyhow!("failed to parse {} to absolute path", info.mount_point))?;
|
||||
let abs_mount_point = abs_mount_buf.to_str().ok_or::<Error>(anyhow!(
|
||||
"failed to parse {} to absolute path",
|
||||
info.mount_point
|
||||
))?;
|
||||
let abs_mount_point_string = String::from(abs_mount_point);
|
||||
|
||||
// Umount every syfs and proc file systems, except those under the container rootfs
|
||||
@@ -623,23 +594,24 @@ pub fn ms_move_root(rootfs: &str) -> Result<bool> {
|
||||
MsFlags::MS_SLAVE | MsFlags::MS_REC,
|
||||
None::<&str>,
|
||||
)?;
|
||||
umount2(abs_mount_point, MntFlags::MNT_DETACH).or_else(|e| {
|
||||
if e.ne(&nix::Error::from(Errno::EINVAL)) && e.ne(&nix::Error::from(Errno::EPERM)) {
|
||||
return Err(anyhow!(e));
|
||||
match umount2(abs_mount_point, MntFlags::MNT_DETACH) {
|
||||
Ok(_) => (),
|
||||
Err(e) => {
|
||||
if e.ne(&nix::Error::from(Errno::EINVAL)) && e.ne(&nix::Error::from(Errno::EPERM)) {
|
||||
return Err(anyhow!(e));
|
||||
}
|
||||
|
||||
// If we have not privileges for umounting (e.g. rootless), then
|
||||
// cover the path.
|
||||
mount(
|
||||
Some("tmpfs"),
|
||||
abs_mount_point,
|
||||
Some("tmpfs"),
|
||||
MsFlags::empty(),
|
||||
None::<&str>,
|
||||
)?;
|
||||
}
|
||||
|
||||
// If we have not privileges for umounting (e.g. rootless), then
|
||||
// cover the path.
|
||||
mount(
|
||||
Some("tmpfs"),
|
||||
abs_mount_point,
|
||||
Some("tmpfs"),
|
||||
MsFlags::empty(),
|
||||
None::<&str>,
|
||||
)?;
|
||||
|
||||
Ok(())
|
||||
})?;
|
||||
}
|
||||
}
|
||||
|
||||
mount(
|
||||
@@ -696,14 +668,18 @@ fn mount_from(
|
||||
Path::new(&dest)
|
||||
};
|
||||
|
||||
let _ = fs::create_dir_all(&dir).map_err(|e| {
|
||||
log_child!(
|
||||
cfd_log,
|
||||
"creat dir {}: {}",
|
||||
dir.to_str().unwrap(),
|
||||
e.to_string()
|
||||
)
|
||||
});
|
||||
// let _ = fs::create_dir_all(&dir);
|
||||
match fs::create_dir_all(&dir) {
|
||||
Ok(_) => {}
|
||||
Err(e) => {
|
||||
log_child!(
|
||||
cfd_log,
|
||||
"creat dir {}: {}",
|
||||
dir.to_str().unwrap(),
|
||||
e.to_string()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// make sure file exists so we can bind over it
|
||||
if src.is_file() {
|
||||
@@ -720,26 +696,31 @@ fn mount_from(
|
||||
}
|
||||
};
|
||||
|
||||
let _ = stat::stat(dest.as_str()).map_err(|e| {
|
||||
log_child!(
|
||||
cfd_log,
|
||||
"dest stat error. {}: {:?}",
|
||||
dest.as_str(),
|
||||
e.as_errno()
|
||||
)
|
||||
});
|
||||
match stat::stat(dest.as_str()) {
|
||||
Ok(_) => {}
|
||||
Err(e) => {
|
||||
log_child!(
|
||||
cfd_log,
|
||||
"dest stat error. {}: {}",
|
||||
dest.as_str(),
|
||||
e.as_errno().unwrap().desc()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
mount(
|
||||
match mount(
|
||||
Some(src.as_str()),
|
||||
dest.as_str(),
|
||||
Some(m.r#type.as_str()),
|
||||
flags,
|
||||
Some(d.as_str()),
|
||||
)
|
||||
.map_err(|e| {
|
||||
log_child!(cfd_log, "mount error: {:?}", e.as_errno());
|
||||
e
|
||||
})?;
|
||||
) {
|
||||
Ok(_) => {}
|
||||
Err(e) => {
|
||||
log_child!(cfd_log, "mount error: {}", e.as_errno().unwrap().desc());
|
||||
return Err(e.into());
|
||||
}
|
||||
}
|
||||
|
||||
if flags.contains(MsFlags::MS_BIND)
|
||||
&& flags.intersects(
|
||||
@@ -751,22 +732,29 @@ fn mount_from(
|
||||
| MsFlags::MS_SLAVE),
|
||||
)
|
||||
{
|
||||
mount(
|
||||
match mount(
|
||||
Some(dest.as_str()),
|
||||
dest.as_str(),
|
||||
None::<&str>,
|
||||
flags | MsFlags::MS_REMOUNT,
|
||||
None::<&str>,
|
||||
)
|
||||
.map_err(|e| {
|
||||
log_child!(cfd_log, "remout {}: {:?}", dest.as_str(), e.as_errno());
|
||||
e
|
||||
})?;
|
||||
) {
|
||||
Err(e) => {
|
||||
log_child!(
|
||||
cfd_log,
|
||||
"remout {}: {}",
|
||||
dest.as_str(),
|
||||
e.as_errno().unwrap().desc()
|
||||
);
|
||||
return Err(e.into());
|
||||
}
|
||||
Ok(_) => {}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
static SYMLINKS: &[(&str, &str)] = &[
|
||||
static SYMLINKS: &'static [(&'static str, &'static str)] = &[
|
||||
("/proc/self/fd", "dev/fd"),
|
||||
("/proc/self/fd/0", "dev/stdin"),
|
||||
("/proc/self/fd/1", "dev/stdout"),
|
||||
@@ -899,10 +887,12 @@ pub fn finish_rootfs(cfd_log: RawFd, spec: &Spec) -> Result<()> {
|
||||
}
|
||||
|
||||
fn mask_path(path: &str) -> Result<()> {
|
||||
if !path.starts_with('/') || path.contains("..") {
|
||||
if !path.starts_with("/") || path.contains("..") {
|
||||
return Err(nix::Error::Sys(Errno::EINVAL).into());
|
||||
}
|
||||
|
||||
//info!("{}", path);
|
||||
|
||||
match mount(
|
||||
Some("/dev/null"),
|
||||
path,
|
||||
@@ -918,6 +908,7 @@ fn mask_path(path: &str) -> Result<()> {
|
||||
}
|
||||
|
||||
Err(e) => {
|
||||
//info!("{}: {}", path, e.as_errno().unwrap().desc());
|
||||
return Err(e.into());
|
||||
}
|
||||
|
||||
@@ -928,10 +919,12 @@ fn mask_path(path: &str) -> Result<()> {
|
||||
}
|
||||
|
||||
fn readonly_path(path: &str) -> Result<()> {
|
||||
if !path.starts_with('/') || path.contains("..") {
|
||||
if !path.starts_with("/") || path.contains("..") {
|
||||
return Err(nix::Error::Sys(Errno::EINVAL).into());
|
||||
}
|
||||
|
||||
//info!("{}", path);
|
||||
|
||||
match mount(
|
||||
Some(&path[1..]),
|
||||
path,
|
||||
@@ -949,6 +942,7 @@ fn readonly_path(path: &str) -> Result<()> {
|
||||
}
|
||||
|
||||
Err(e) => {
|
||||
//info!("{}: {}", path, e.as_errno().unwrap().desc());
|
||||
return Err(e.into());
|
||||
}
|
||||
|
||||
@@ -1010,8 +1004,8 @@ mod tests {
|
||||
// there is no spec.mounts, but should pass
|
||||
let ret = init_rootfs(stdout_fd, &spec, &cpath, &mounts, true);
|
||||
assert!(ret.is_ok(), "Should pass. Got: {:?}", ret);
|
||||
let _ = fs::remove_dir_all(rootfs.path().join("dev"));
|
||||
let _ = fs::create_dir(rootfs.path().join("dev"));
|
||||
let ret = fs::remove_dir_all(rootfs.path().join("dev"));
|
||||
let ret = fs::create_dir(rootfs.path().join("dev"));
|
||||
|
||||
// Adding bad mount point to spec.mounts
|
||||
spec.mounts.push(oci::Mount {
|
||||
@@ -1029,8 +1023,8 @@ mod tests {
|
||||
ret
|
||||
);
|
||||
spec.mounts.pop();
|
||||
let _ = fs::remove_dir_all(rootfs.path().join("dev"));
|
||||
let _ = fs::create_dir(rootfs.path().join("dev"));
|
||||
let ret = fs::remove_dir_all(rootfs.path().join("dev"));
|
||||
let ret = fs::create_dir(rootfs.path().join("dev"));
|
||||
|
||||
// mounting a cgroup
|
||||
spec.mounts.push(oci::Mount {
|
||||
@@ -1043,8 +1037,8 @@ mod tests {
|
||||
let ret = init_rootfs(stdout_fd, &spec, &cpath, &mounts, true);
|
||||
assert!(ret.is_ok(), "Should pass. Got: {:?}", ret);
|
||||
spec.mounts.pop();
|
||||
let _ = fs::remove_dir_all(rootfs.path().join("dev"));
|
||||
let _ = fs::create_dir(rootfs.path().join("dev"));
|
||||
let ret = fs::remove_dir_all(rootfs.path().join("dev"));
|
||||
let ret = fs::create_dir(rootfs.path().join("dev"));
|
||||
|
||||
// mounting /dev
|
||||
spec.mounts.push(oci::Mount {
|
||||
@@ -1185,8 +1179,8 @@ mod tests {
|
||||
let tempdir = tempdir().unwrap();
|
||||
|
||||
let olddir = unistd::getcwd().unwrap();
|
||||
defer!(let _ = unistd::chdir(&olddir););
|
||||
let _ = unistd::chdir(tempdir.path());
|
||||
defer!(unistd::chdir(&olddir););
|
||||
unistd::chdir(tempdir.path());
|
||||
|
||||
let dev = oci::LinuxDevice {
|
||||
path: "/fifo".to_string(),
|
||||
|
||||
@@ -3,18 +3,24 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
// use std::process::{Stdio, Command, ExitStatus};
|
||||
use libc::pid_t;
|
||||
use std::fs::File;
|
||||
use std::os::unix::io::RawFd;
|
||||
use std::sync::mpsc::Sender;
|
||||
|
||||
// use crate::configs::{Capabilities, Rlimit};
|
||||
// use crate::cgroups::Manager as CgroupManager;
|
||||
// use crate::intelrdt::Manager as RdtManager;
|
||||
|
||||
use nix::fcntl::{fcntl, FcntlArg, OFlag};
|
||||
use nix::sys::signal::{self, Signal};
|
||||
use nix::sys::socket::{self, AddressFamily, SockFlag, SockType};
|
||||
use nix::sys::wait::{self, WaitStatus};
|
||||
use nix::unistd::{self, Pid};
|
||||
use nix::Result;
|
||||
|
||||
use crate::reaper::Epoller;
|
||||
use nix::Error;
|
||||
use oci::Process as OCIProcess;
|
||||
use slog::Logger;
|
||||
|
||||
@@ -27,6 +33,8 @@ pub struct Process {
|
||||
pub exit_pipe_r: Option<RawFd>,
|
||||
pub exit_pipe_w: Option<RawFd>,
|
||||
pub extra_files: Vec<File>,
|
||||
// pub caps: Capabilities,
|
||||
// pub rlimits: Vec<Rlimit>,
|
||||
pub term_master: Option<RawFd>,
|
||||
pub tty: bool,
|
||||
pub parent_stdin: Option<RawFd>,
|
||||
@@ -41,7 +49,6 @@ pub struct Process {
|
||||
pub exit_watchers: Vec<Sender<i32>>,
|
||||
pub oci: OCIProcess,
|
||||
pub logger: Logger,
|
||||
pub epoller: Option<Epoller>,
|
||||
}
|
||||
|
||||
pub trait ProcessOperations {
|
||||
@@ -93,7 +100,6 @@ impl Process {
|
||||
exit_watchers: Vec::new(),
|
||||
oci: ocip.clone(),
|
||||
logger: logger.clone(),
|
||||
epoller: None,
|
||||
};
|
||||
|
||||
info!(logger, "before create console socket!");
|
||||
@@ -115,29 +121,6 @@ impl Process {
|
||||
}
|
||||
Ok(p)
|
||||
}
|
||||
|
||||
pub fn close_epoller(&mut self) {
|
||||
if let Some(epoller) = self.epoller.take() {
|
||||
epoller.close();
|
||||
}
|
||||
}
|
||||
|
||||
pub fn create_epoller(&mut self) -> anyhow::Result<()> {
|
||||
match self.term_master {
|
||||
Some(term_master) => {
|
||||
// add epoller to process
|
||||
let epoller = Epoller::new(&self.logger, term_master)?;
|
||||
self.epoller = Some(epoller)
|
||||
}
|
||||
None => {
|
||||
info!(
|
||||
self.logger,
|
||||
"try to add epoller to a process without a term master fd"
|
||||
);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn create_extended_pipe(flags: OFlag, pipe_size: i32) -> Result<(RawFd, RawFd)> {
|
||||
@@ -168,11 +151,11 @@ mod tests {
|
||||
#[test]
|
||||
fn test_create_extended_pipe() {
|
||||
// Test the default
|
||||
let (_r, _w) = create_extended_pipe(OFlag::O_CLOEXEC, 0).unwrap();
|
||||
let (r, w) = create_extended_pipe(OFlag::O_CLOEXEC, 0).unwrap();
|
||||
|
||||
// Test setting to the max size
|
||||
let max_size = get_pipe_max_size();
|
||||
let (_, w) = create_extended_pipe(OFlag::O_CLOEXEC, max_size).unwrap();
|
||||
let (r, w) = create_extended_pipe(OFlag::O_CLOEXEC, max_size).unwrap();
|
||||
let actual_size = get_pipe_size(w);
|
||||
assert_eq!(max_size, actual_size);
|
||||
}
|
||||
|
||||
@@ -1,150 +0,0 @@
|
||||
// Copyright (c) 2020 Ant Group
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use nix::fcntl::OFlag;
|
||||
use slog::Logger;
|
||||
|
||||
use nix::unistd;
|
||||
use std::os::unix::io::RawFd;
|
||||
|
||||
use anyhow::Result;
|
||||
|
||||
const MAX_EVENTS: usize = 2;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Epoller {
|
||||
logger: Logger,
|
||||
epoll_fd: RawFd,
|
||||
// rfd and wfd are a pipe's files two ends, this pipe is
|
||||
// used to sync between the readStdio and the process exits.
|
||||
// once the process exits, it will close one end to notify
|
||||
// the readStdio that the process has exited and it should not
|
||||
// wait on the process's terminal which has been inherited
|
||||
// by it's children and hasn't exited.
|
||||
rfd: RawFd,
|
||||
wfd: RawFd,
|
||||
}
|
||||
|
||||
impl Epoller {
|
||||
pub fn new(logger: &Logger, fd: RawFd) -> Result<Epoller> {
|
||||
let epoll_fd = epoll::create(true)?;
|
||||
let (rfd, wfd) = unistd::pipe2(OFlag::O_CLOEXEC)?;
|
||||
|
||||
let mut epoller = Self {
|
||||
logger: logger.clone(),
|
||||
epoll_fd,
|
||||
rfd,
|
||||
wfd,
|
||||
};
|
||||
|
||||
epoller.add(rfd)?;
|
||||
epoller.add(fd)?;
|
||||
|
||||
Ok(epoller)
|
||||
}
|
||||
|
||||
pub fn close_wfd(&self) {
|
||||
let _ = unistd::close(self.wfd);
|
||||
}
|
||||
|
||||
pub fn close(&self) {
|
||||
let _ = unistd::close(self.rfd);
|
||||
let _ = unistd::close(self.wfd);
|
||||
let _ = unistd::close(self.epoll_fd);
|
||||
}
|
||||
|
||||
fn add(&mut self, fd: RawFd) -> Result<()> {
|
||||
info!(self.logger, "Epoller add fd {}", fd);
|
||||
// add creates an epoll which is used to monitor the process's pty's master and
|
||||
// one end of its exit notify pipe. Those files will be registered with level-triggered
|
||||
// notification.
|
||||
epoll::ctl(
|
||||
self.epoll_fd,
|
||||
epoll::ControlOptions::EPOLL_CTL_ADD,
|
||||
fd,
|
||||
epoll::Event::new(
|
||||
epoll::Events::EPOLLHUP
|
||||
| epoll::Events::EPOLLIN
|
||||
| epoll::Events::EPOLLERR
|
||||
| epoll::Events::EPOLLRDHUP,
|
||||
fd as u64,
|
||||
),
|
||||
)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// There will be three cases on the epoller once it poll:
|
||||
// a: only pty's master get an event(other than self.rfd);
|
||||
// b: only the pipe get an event(self.rfd);
|
||||
// c: both of pty and pipe have event occur;
|
||||
// for case a, it means there is output in process's terminal and what needed to do is
|
||||
// just read the terminal and send them out; for case b, it means the process has exited
|
||||
// and there is no data in the terminal, thus just return the "EOF" to end the io;
|
||||
// for case c, it means the process has exited but there is some data in the terminal which
|
||||
// hasn't been send out, thus it should send those data out first and then send "EOF" last to
|
||||
// end the io.
|
||||
pub fn poll(&self) -> Result<RawFd> {
|
||||
let mut rfd = self.rfd;
|
||||
let mut epoll_events = vec![epoll::Event::new(epoll::Events::empty(), 0); MAX_EVENTS];
|
||||
|
||||
loop {
|
||||
let event_count = match epoll::wait(self.epoll_fd, -1, epoll_events.as_mut_slice()) {
|
||||
Ok(ec) => ec,
|
||||
Err(e) => {
|
||||
info!(self.logger, "loop wait err {:?}", e);
|
||||
// EINTR: The call was interrupted by a signal handler before either
|
||||
// any of the requested events occurred or the timeout expired
|
||||
if e.kind() == std::io::ErrorKind::Interrupted {
|
||||
continue;
|
||||
}
|
||||
return Err(e.into());
|
||||
}
|
||||
};
|
||||
|
||||
for event in epoll_events.iter().take(event_count) {
|
||||
let fd = event.data as i32;
|
||||
// fd has been assigned with one end of process's exited pipe by default, and
|
||||
// here to check is there any event occur on process's terminal, if "yes", it
|
||||
// should be dealt first, otherwise, it means the process has exited and there
|
||||
// is nothing left in the process's terminal needed to be read.
|
||||
if fd != rfd {
|
||||
rfd = fd;
|
||||
break;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
Ok(rfd)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::Epoller;
|
||||
use nix::fcntl::OFlag;
|
||||
use nix::unistd;
|
||||
use std::thread;
|
||||
|
||||
#[test]
|
||||
fn test_epoller_poll() {
|
||||
let logger = slog::Logger::root(slog::Discard, o!());
|
||||
let (rfd, wfd) = unistd::pipe2(OFlag::O_CLOEXEC).unwrap();
|
||||
let epoller = Epoller::new(&logger, rfd).unwrap();
|
||||
|
||||
let child = thread::spawn(move || {
|
||||
let _ = unistd::write(wfd, "temporary file's content".as_bytes());
|
||||
});
|
||||
|
||||
// wait write to finish
|
||||
let _ = child.join();
|
||||
|
||||
let fd = epoller.poll().unwrap();
|
||||
assert_eq!(fd, rfd, "Should get rfd");
|
||||
|
||||
epoller.close();
|
||||
}
|
||||
}
|
||||
@@ -4,6 +4,8 @@
|
||||
//
|
||||
|
||||
use oci::Spec;
|
||||
// use crate::configs::namespaces;
|
||||
// use crate::configs::device::Device;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct CreateOpts {
|
||||
@@ -15,3 +17,143 @@ pub struct CreateOpts {
|
||||
pub rootless_euid: bool,
|
||||
pub rootless_cgroup: bool,
|
||||
}
|
||||
/*
|
||||
const WILDCARD: i32 = -1;
|
||||
|
||||
lazy_static! {
|
||||
static ref NAEMSPACEMAPPING: HashMap<&'static str, &'static str> = {
|
||||
let mut m = HashMap::new();
|
||||
m.insert(oci::PIDNAMESPACE, namespaces::NEWPID);
|
||||
m.insert(oci::NETWORKNAMESPACE, namespaces::NEWNET);
|
||||
m.insert(oci::UTSNAMESPACE, namespaces::NEWUTS);
|
||||
m.insert(oci::MOUNTNAMESPACE, namespaces::NEWNS);
|
||||
m.insert(oci::IPCNAMESPACE, namespaces::NEWIPC);
|
||||
m.insert(oci::USERNAMESPACE, namespaces::NEWUSER);
|
||||
m.insert(oci::CGROUPNAMESPACE, namespaces::NEWCGROUP);
|
||||
m
|
||||
};
|
||||
|
||||
static ref MOUNTPROPAGATIONMAPPING: HashMap<&'static str, MsFlags> = {
|
||||
let mut m = HashMap::new();
|
||||
m.insert("rprivate", MsFlags::MS_PRIVATE | MsFlags::MS_REC);
|
||||
m.insert("private", MsFlags::MS_PRIVATE);
|
||||
m.insert("rslave", MsFlags::MS_SLAVE | MsFlags::MS_REC);
|
||||
m.insert("slave", MsFlags::MS_SLAVE);
|
||||
m.insert("rshared", MsFlags::MS_SHARED | MsFlags::MS_REC);
|
||||
m.insert("shared", MsFlags::MS_SHARED);
|
||||
m.insert("runbindable", MsFlags::MS_UNBINDABLE | MsFlags::MS_REC);
|
||||
m.insert("unbindable", MsFlags::MS_UNBINDABLE);
|
||||
m
|
||||
};
|
||||
|
||||
static ref ALLOWED_DEVICES: Vec<Device> = {
|
||||
let mut m = Vec::new();
|
||||
m.push(Device {
|
||||
r#type: 'c',
|
||||
major: WILDCARD,
|
||||
minor: WILDCARD,
|
||||
permissions: "m",
|
||||
allow: true,
|
||||
});
|
||||
|
||||
m.push(Device {
|
||||
r#type: 'b',
|
||||
major: WILDCARD,
|
||||
minor: WILDCARD,
|
||||
permissions: "m",
|
||||
allow: true,
|
||||
});
|
||||
|
||||
m.push(Device {
|
||||
r#type: 'c',
|
||||
path: "/dev/null".to_string(),
|
||||
major: 1,
|
||||
minor: 3,
|
||||
permissions: "rwm",
|
||||
allow: true,
|
||||
});
|
||||
|
||||
m.push(Device {
|
||||
r#type: 'c',
|
||||
path: String::from("/dev/random"),
|
||||
major: 1,
|
||||
minor: 8,
|
||||
permissions: "rwm",
|
||||
allow: true,
|
||||
});
|
||||
|
||||
m.push(Device {
|
||||
r#type: 'c',
|
||||
path: String::from("/dev/full"),
|
||||
major: 1,
|
||||
minor: 7,
|
||||
permissions: "rwm",
|
||||
allow: true,
|
||||
});
|
||||
|
||||
m.push(Device {
|
||||
r#type: 'c',
|
||||
path: String::from("/dev/tty"),
|
||||
major: 5,
|
||||
minor: 0,
|
||||
permissions: "rwm",
|
||||
allow: true,
|
||||
});
|
||||
|
||||
m.push(Device {
|
||||
r#type: 'c',
|
||||
path: String::from("/dev/zero"),
|
||||
major: 1,
|
||||
minor: 5,
|
||||
permissions: "rwm",
|
||||
allow: true,
|
||||
});
|
||||
|
||||
m.push(Device {
|
||||
r#type: 'c',
|
||||
path: String::from("/dev/urandom"),
|
||||
major: 1,
|
||||
minor: 9,
|
||||
permissions: "rwm",
|
||||
allow: true,
|
||||
});
|
||||
|
||||
m.push(Device {
|
||||
r#type: 'c',
|
||||
path: String::from("/dev/console"),
|
||||
major: 5,
|
||||
minor: 1,
|
||||
permissions: "rwm",
|
||||
allow: true,
|
||||
});
|
||||
|
||||
m.push(Device {
|
||||
r#type: 'c',
|
||||
path: String::from(""),
|
||||
major: 136,
|
||||
minor: WILDCARD,
|
||||
permissions: "rwm",
|
||||
allow: true,
|
||||
});
|
||||
|
||||
m.push(Device {
|
||||
r#type: 'c',
|
||||
path: String::from(""),
|
||||
major: 5,
|
||||
minor: 2,
|
||||
permissions: "rwm",
|
||||
allow: true,
|
||||
});
|
||||
|
||||
m.push(Device {
|
||||
r#type: 'c',
|
||||
path: String::from(""),
|
||||
major: 10,
|
||||
minor: 200,
|
||||
permissions: "rwm",
|
||||
allow: true,
|
||||
});
|
||||
m
|
||||
};
|
||||
}
|
||||
*/
|
||||
|
||||
@@ -23,8 +23,7 @@ macro_rules! log_child {
|
||||
let lfd = $fd;
|
||||
let mut log_str = format_args!($($arg)+).to_string();
|
||||
log_str.push('\n');
|
||||
// Ignore error writing to the logger, not much we can do
|
||||
let _ = write_count(lfd, log_str.as_bytes(), log_str.len());
|
||||
write_count(lfd, log_str.as_bytes(), log_str.len());
|
||||
})
|
||||
}
|
||||
|
||||
@@ -72,15 +71,7 @@ fn read_count(fd: RawFd, count: usize) -> Result<Vec<u8>> {
|
||||
}
|
||||
}
|
||||
|
||||
if len != count {
|
||||
Err(anyhow::anyhow!(
|
||||
"invalid read count expect {} get {}",
|
||||
count,
|
||||
len
|
||||
))
|
||||
} else {
|
||||
Ok(v[0..len].to_vec())
|
||||
}
|
||||
Ok(v[0..len].to_vec())
|
||||
}
|
||||
|
||||
pub fn read_sync(fd: RawFd) -> Result<Vec<u8>> {
|
||||
@@ -96,14 +87,14 @@ pub fn read_sync(fd: RawFd) -> Result<Vec<u8>> {
|
||||
let buf_array: [u8; MSG_SIZE] = [buf[0], buf[1], buf[2], buf[3]];
|
||||
let msg: i32 = i32::from_be_bytes(buf_array);
|
||||
match msg {
|
||||
SYNC_SUCCESS => Ok(Vec::new()),
|
||||
SYNC_SUCCESS => return Ok(Vec::new()),
|
||||
SYNC_DATA => {
|
||||
let buf = read_count(fd, MSG_SIZE)?;
|
||||
let buf_array: [u8; MSG_SIZE] = [buf[0], buf[1], buf[2], buf[3]];
|
||||
let msg_length: i32 = i32::from_be_bytes(buf_array);
|
||||
let data_buf = read_count(fd, msg_length as usize)?;
|
||||
|
||||
Ok(data_buf)
|
||||
return Ok(data_buf);
|
||||
}
|
||||
SYNC_FAILED => {
|
||||
let mut error_buf = vec![];
|
||||
@@ -127,9 +118,9 @@ pub fn read_sync(fd: RawFd) -> Result<Vec<u8>> {
|
||||
}
|
||||
};
|
||||
|
||||
Err(anyhow!(error_str))
|
||||
return Err(anyhow!(error_str));
|
||||
}
|
||||
_ => Err(anyhow!("error in receive sync message")),
|
||||
_ => return Err(anyhow!("error in receive sync message")),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -151,15 +142,21 @@ pub fn write_sync(fd: RawFd, msg_type: i32, data_str: &str) -> Result<()> {
|
||||
},
|
||||
SYNC_DATA => {
|
||||
let length: i32 = data_str.len() as i32;
|
||||
write_count(fd, &length.to_be_bytes(), MSG_SIZE).or_else(|e| {
|
||||
unistd::close(fd)?;
|
||||
Err(anyhow!(e).context("error in send message to process"))
|
||||
})?;
|
||||
match write_count(fd, &length.to_be_bytes(), MSG_SIZE) {
|
||||
Ok(_count) => (),
|
||||
Err(e) => {
|
||||
unistd::close(fd)?;
|
||||
return Err(anyhow!(e).context("error in send message to process"));
|
||||
}
|
||||
}
|
||||
|
||||
write_count(fd, data_str.as_bytes(), data_str.len()).or_else(|e| {
|
||||
unistd::close(fd)?;
|
||||
Err(anyhow!(e).context("error in send message to process"))
|
||||
})?;
|
||||
match write_count(fd, data_str.as_bytes(), data_str.len()) {
|
||||
Ok(_count) => (),
|
||||
Err(e) => {
|
||||
unistd::close(fd)?;
|
||||
return Err(anyhow!(e).context("error in send message to process"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_ => (),
|
||||
|
||||
@@ -4,21 +4,15 @@
|
||||
//
|
||||
|
||||
use crate::container::Config;
|
||||
use anyhow::{anyhow, Context, Error, Result};
|
||||
use anyhow::{anyhow, Result};
|
||||
use lazy_static;
|
||||
use nix::errno::Errno;
|
||||
use oci::{Linux, LinuxIDMapping, LinuxNamespace, Spec};
|
||||
use oci::{LinuxIDMapping, LinuxNamespace, Spec};
|
||||
use protobuf::RepeatedField;
|
||||
use std::collections::HashMap;
|
||||
use std::path::{Component, PathBuf};
|
||||
|
||||
fn einval() -> Error {
|
||||
anyhow!(nix::Error::from_errno(Errno::EINVAL))
|
||||
}
|
||||
|
||||
fn get_linux(oci: &Spec) -> Result<&Linux> {
|
||||
oci.linux.as_ref().ok_or_else(einval)
|
||||
}
|
||||
|
||||
fn contain_namespace(nses: &[LinuxNamespace], key: &str) -> bool {
|
||||
fn contain_namespace(nses: &Vec<LinuxNamespace>, key: &str) -> bool {
|
||||
for ns in nses {
|
||||
if ns.r#type.as_str() == key {
|
||||
return true;
|
||||
@@ -28,28 +22,30 @@ fn contain_namespace(nses: &[LinuxNamespace], key: &str) -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
fn get_namespace_path(nses: &[LinuxNamespace], key: &str) -> Result<String> {
|
||||
fn get_namespace_path(nses: &Vec<LinuxNamespace>, key: &str) -> Result<String> {
|
||||
for ns in nses {
|
||||
if ns.r#type.as_str() == key {
|
||||
return Ok(ns.path.clone());
|
||||
}
|
||||
}
|
||||
|
||||
Err(einval())
|
||||
Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)))
|
||||
}
|
||||
|
||||
fn rootfs(root: &str) -> Result<()> {
|
||||
let path = PathBuf::from(root);
|
||||
// not absolute path or not exists
|
||||
if !path.exists() || !path.is_absolute() {
|
||||
return Err(einval());
|
||||
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
|
||||
}
|
||||
|
||||
// symbolic link? ..?
|
||||
let mut stack: Vec<String> = Vec::new();
|
||||
for c in path.components() {
|
||||
if stack.is_empty() && (c == Component::RootDir || c == Component::ParentDir) {
|
||||
continue;
|
||||
if stack.is_empty() {
|
||||
if c == Component::RootDir || c == Component::ParentDir {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if c == Component::ParentDir {
|
||||
@@ -57,11 +53,7 @@ fn rootfs(root: &str) -> Result<()> {
|
||||
continue;
|
||||
}
|
||||
|
||||
if let Some(v) = c.as_os_str().to_str() {
|
||||
stack.push(v.to_string());
|
||||
} else {
|
||||
return Err(einval());
|
||||
}
|
||||
stack.push(c.as_os_str().to_str().unwrap().to_string());
|
||||
}
|
||||
|
||||
let mut cleaned = PathBuf::from("/");
|
||||
@@ -69,10 +61,10 @@ fn rootfs(root: &str) -> Result<()> {
|
||||
cleaned.push(e);
|
||||
}
|
||||
|
||||
let canon = path.canonicalize().context("canonicalize")?;
|
||||
let canon = path.canonicalize()?;
|
||||
if cleaned != canon {
|
||||
// There is symbolic in path
|
||||
return Err(einval());
|
||||
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -83,27 +75,29 @@ fn network(_oci: &Spec) -> Result<()> {
|
||||
}
|
||||
|
||||
fn hostname(oci: &Spec) -> Result<()> {
|
||||
if oci.hostname.is_empty() || oci.hostname == "" {
|
||||
if oci.hostname.is_empty() || oci.hostname == "".to_string() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let linux = get_linux(oci)?;
|
||||
if oci.linux.is_none() {
|
||||
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
|
||||
}
|
||||
let linux = oci.linux.as_ref().unwrap();
|
||||
if !contain_namespace(&linux.namespaces, "uts") {
|
||||
return Err(einval());
|
||||
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn security(oci: &Spec) -> Result<()> {
|
||||
let linux = get_linux(oci)?;
|
||||
|
||||
if linux.masked_paths.is_empty() && linux.readonly_paths.is_empty() {
|
||||
let linux = oci.linux.as_ref().unwrap();
|
||||
if linux.masked_paths.len() == 0 && linux.readonly_paths.len() == 0 {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if !contain_namespace(&linux.namespaces, "mount") {
|
||||
return Err(einval());
|
||||
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
|
||||
}
|
||||
|
||||
// don't care about selinux at present
|
||||
@@ -111,19 +105,18 @@ fn security(oci: &Spec) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn idmapping(maps: &[LinuxIDMapping]) -> Result<()> {
|
||||
fn idmapping(maps: &Vec<LinuxIDMapping>) -> Result<()> {
|
||||
for map in maps {
|
||||
if map.size > 0 {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
Err(einval())
|
||||
Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)))
|
||||
}
|
||||
|
||||
fn usernamespace(oci: &Spec) -> Result<()> {
|
||||
let linux = get_linux(oci)?;
|
||||
|
||||
let linux = oci.linux.as_ref().unwrap();
|
||||
if contain_namespace(&linux.namespaces, "user") {
|
||||
let user_ns = PathBuf::from("/proc/self/ns/user");
|
||||
if !user_ns.exists() {
|
||||
@@ -131,12 +124,12 @@ fn usernamespace(oci: &Spec) -> Result<()> {
|
||||
}
|
||||
// check if idmappings is correct, at least I saw idmaps
|
||||
// with zero size was passed to agent
|
||||
idmapping(&linux.uid_mappings).context("idmapping uid")?;
|
||||
idmapping(&linux.gid_mappings).context("idmapping gid")?;
|
||||
idmapping(&linux.uid_mappings)?;
|
||||
idmapping(&linux.gid_mappings)?;
|
||||
} else {
|
||||
// no user namespace but idmap
|
||||
if !linux.uid_mappings.is_empty() || !linux.gid_mappings.is_empty() {
|
||||
return Err(einval());
|
||||
if linux.uid_mappings.len() != 0 || linux.gid_mappings.len() != 0 {
|
||||
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -144,8 +137,7 @@ fn usernamespace(oci: &Spec) -> Result<()> {
|
||||
}
|
||||
|
||||
fn cgroupnamespace(oci: &Spec) -> Result<()> {
|
||||
let linux = get_linux(oci)?;
|
||||
|
||||
let linux = oci.linux.as_ref().unwrap();
|
||||
if contain_namespace(&linux.namespaces, "cgroup") {
|
||||
let path = PathBuf::from("/proc/self/ns/cgroup");
|
||||
if !path.exists() {
|
||||
@@ -174,42 +166,43 @@ fn check_host_ns(path: &str) -> Result<()> {
|
||||
let cpath = PathBuf::from(path);
|
||||
let hpath = PathBuf::from("/proc/self/ns/net");
|
||||
|
||||
let real_hpath = hpath
|
||||
.read_link()
|
||||
.context(format!("read link {:?}", hpath))?;
|
||||
let meta = cpath
|
||||
.symlink_metadata()
|
||||
.context(format!("symlink metadata {:?}", cpath))?;
|
||||
let real_hpath = hpath.read_link()?;
|
||||
let meta = cpath.symlink_metadata()?;
|
||||
let file_type = meta.file_type();
|
||||
|
||||
if !file_type.is_symlink() {
|
||||
return Ok(());
|
||||
}
|
||||
let real_cpath = cpath
|
||||
.read_link()
|
||||
.context(format!("read link {:?}", cpath))?;
|
||||
let real_cpath = cpath.read_link()?;
|
||||
if real_cpath == real_hpath {
|
||||
return Err(einval());
|
||||
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn sysctl(oci: &Spec) -> Result<()> {
|
||||
let linux = get_linux(oci)?;
|
||||
|
||||
let linux = oci.linux.as_ref().unwrap();
|
||||
for (key, _) in linux.sysctl.iter() {
|
||||
if SYSCTLS.contains_key(key.as_str()) || key.starts_with("fs.mqueue.") {
|
||||
if contain_namespace(&linux.namespaces, "ipc") {
|
||||
continue;
|
||||
} else {
|
||||
return Err(einval());
|
||||
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
|
||||
}
|
||||
}
|
||||
|
||||
if key.starts_with("net.") {
|
||||
// the network ns is shared with the guest, don't expect to find it in spec
|
||||
continue;
|
||||
if !contain_namespace(&linux.namespaces, "network") {
|
||||
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
|
||||
}
|
||||
|
||||
let net = get_namespace_path(&linux.namespaces, "network")?;
|
||||
if net.is_empty() || net == "".to_string() {
|
||||
continue;
|
||||
}
|
||||
|
||||
check_host_ns(net.as_str())?;
|
||||
}
|
||||
|
||||
if contain_namespace(&linux.namespaces, "uts") {
|
||||
@@ -218,31 +211,29 @@ fn sysctl(oci: &Spec) -> Result<()> {
|
||||
}
|
||||
|
||||
if key == "kernel.hostname" {
|
||||
return Err(einval());
|
||||
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
|
||||
}
|
||||
}
|
||||
|
||||
return Err(einval());
|
||||
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn rootless_euid_mapping(oci: &Spec) -> Result<()> {
|
||||
let linux = get_linux(oci)?;
|
||||
|
||||
let linux = oci.linux.as_ref().unwrap();
|
||||
if !contain_namespace(&linux.namespaces, "user") {
|
||||
return Err(einval());
|
||||
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
|
||||
}
|
||||
|
||||
if linux.uid_mappings.is_empty() || linux.gid_mappings.is_empty() {
|
||||
// rootless containers requires at least one UID/GID mapping
|
||||
return Err(einval());
|
||||
if linux.gid_mappings.len() == 0 || linux.gid_mappings.len() == 0 {
|
||||
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn has_idmapping(maps: &[LinuxIDMapping], id: u32) -> bool {
|
||||
fn has_idmapping(maps: &Vec<LinuxIDMapping>, id: u32) -> bool {
|
||||
for map in maps {
|
||||
if id >= map.container_id && id < map.container_id + map.size {
|
||||
return true;
|
||||
@@ -252,7 +243,7 @@ fn has_idmapping(maps: &[LinuxIDMapping], id: u32) -> bool {
|
||||
}
|
||||
|
||||
fn rootless_euid_mount(oci: &Spec) -> Result<()> {
|
||||
let linux = get_linux(oci)?;
|
||||
let linux = oci.linux.as_ref().unwrap();
|
||||
|
||||
for mnt in oci.mounts.iter() {
|
||||
for opt in mnt.options.iter() {
|
||||
@@ -260,20 +251,21 @@ fn rootless_euid_mount(oci: &Spec) -> Result<()> {
|
||||
let fields: Vec<&str> = opt.split('=').collect();
|
||||
|
||||
if fields.len() != 2 {
|
||||
return Err(einval());
|
||||
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
|
||||
}
|
||||
|
||||
let id = fields[1]
|
||||
.trim()
|
||||
.parse::<u32>()
|
||||
.context(format!("parse field {}", &fields[1]))?;
|
||||
let id = fields[1].trim().parse::<u32>()?;
|
||||
|
||||
if opt.starts_with("uid=") && !has_idmapping(&linux.uid_mappings, id) {
|
||||
return Err(einval());
|
||||
if opt.starts_with("uid=") {
|
||||
if !has_idmapping(&linux.uid_mappings, id) {
|
||||
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
|
||||
}
|
||||
}
|
||||
|
||||
if opt.starts_with("gid=") && !has_idmapping(&linux.gid_mappings, id) {
|
||||
return Err(einval());
|
||||
if opt.starts_with("gid=") {
|
||||
if !has_idmapping(&linux.gid_mappings, id) {
|
||||
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -282,306 +274,35 @@ fn rootless_euid_mount(oci: &Spec) -> Result<()> {
|
||||
}
|
||||
|
||||
fn rootless_euid(oci: &Spec) -> Result<()> {
|
||||
rootless_euid_mapping(oci).context("rootless euid mapping")?;
|
||||
rootless_euid_mount(oci).context("rotless euid mount")?;
|
||||
rootless_euid_mapping(oci)?;
|
||||
rootless_euid_mount(oci)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn validate(conf: &Config) -> Result<()> {
|
||||
lazy_static::initialize(&SYSCTLS);
|
||||
let oci = conf.spec.as_ref().ok_or_else(einval)?;
|
||||
let oci = conf.spec.as_ref().unwrap();
|
||||
|
||||
if oci.linux.is_none() {
|
||||
return Err(einval());
|
||||
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
|
||||
}
|
||||
|
||||
let root = match oci.root.as_ref() {
|
||||
Some(v) => v.path.as_str(),
|
||||
None => return Err(einval()),
|
||||
};
|
||||
if oci.root.is_none() {
|
||||
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
|
||||
}
|
||||
let root = oci.root.as_ref().unwrap().path.as_str();
|
||||
|
||||
rootfs(root).context("rootfs")?;
|
||||
network(oci).context("network")?;
|
||||
hostname(oci).context("hostname")?;
|
||||
security(oci).context("security")?;
|
||||
usernamespace(oci).context("usernamespace")?;
|
||||
cgroupnamespace(oci).context("cgroupnamespace")?;
|
||||
sysctl(&oci).context("sysctl")?;
|
||||
rootfs(root)?;
|
||||
network(oci)?;
|
||||
hostname(oci)?;
|
||||
security(oci)?;
|
||||
usernamespace(oci)?;
|
||||
cgroupnamespace(oci)?;
|
||||
sysctl(&oci)?;
|
||||
|
||||
if conf.rootless_euid {
|
||||
rootless_euid(oci).context("rootless euid")?;
|
||||
rootless_euid(oci)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use oci::Mount;
|
||||
|
||||
#[test]
|
||||
fn test_namespace() {
|
||||
let namespaces = [
|
||||
LinuxNamespace {
|
||||
r#type: "net".to_owned(),
|
||||
path: "/sys/cgroups/net".to_owned(),
|
||||
},
|
||||
LinuxNamespace {
|
||||
r#type: "uts".to_owned(),
|
||||
path: "/sys/cgroups/uts".to_owned(),
|
||||
},
|
||||
];
|
||||
|
||||
assert_eq!(contain_namespace(&namespaces, "net"), true);
|
||||
assert_eq!(contain_namespace(&namespaces, "uts"), true);
|
||||
|
||||
assert_eq!(contain_namespace(&namespaces, ""), false);
|
||||
assert_eq!(contain_namespace(&namespaces, "Net"), false);
|
||||
assert_eq!(contain_namespace(&namespaces, "ipc"), false);
|
||||
|
||||
assert_eq!(
|
||||
get_namespace_path(&namespaces, "net").unwrap(),
|
||||
"/sys/cgroups/net"
|
||||
);
|
||||
assert_eq!(
|
||||
get_namespace_path(&namespaces, "uts").unwrap(),
|
||||
"/sys/cgroups/uts"
|
||||
);
|
||||
|
||||
get_namespace_path(&namespaces, "").unwrap_err();
|
||||
get_namespace_path(&namespaces, "Uts").unwrap_err();
|
||||
get_namespace_path(&namespaces, "ipc").unwrap_err();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rootfs() {
|
||||
rootfs("/_no_exit_fs_xxxxxxxxxxx").unwrap_err();
|
||||
rootfs("sys").unwrap_err();
|
||||
rootfs("/proc/self/root").unwrap_err();
|
||||
rootfs("/proc/self/root/sys").unwrap_err();
|
||||
|
||||
rootfs("/proc/self").unwrap_err();
|
||||
rootfs("/./proc/self").unwrap_err();
|
||||
rootfs("/proc/././self").unwrap_err();
|
||||
rootfs("/proc/.././self").unwrap_err();
|
||||
|
||||
rootfs("/proc/uptime").unwrap();
|
||||
rootfs("/../proc/uptime").unwrap();
|
||||
rootfs("/../../proc/uptime").unwrap();
|
||||
rootfs("/proc/../proc/uptime").unwrap();
|
||||
rootfs("/proc/../../proc/uptime").unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hostname() {
|
||||
let mut spec = Spec::default();
|
||||
|
||||
hostname(&spec).unwrap();
|
||||
|
||||
spec.hostname = "a.test.com".to_owned();
|
||||
hostname(&spec).unwrap_err();
|
||||
|
||||
let mut linux = Linux::default();
|
||||
linux.namespaces = vec![
|
||||
LinuxNamespace {
|
||||
r#type: "net".to_owned(),
|
||||
path: "/sys/cgroups/net".to_owned(),
|
||||
},
|
||||
LinuxNamespace {
|
||||
r#type: "uts".to_owned(),
|
||||
path: "/sys/cgroups/uts".to_owned(),
|
||||
},
|
||||
];
|
||||
spec.linux = Some(linux);
|
||||
hostname(&spec).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_security() {
|
||||
let mut spec = Spec::default();
|
||||
|
||||
let linux = Linux::default();
|
||||
spec.linux = Some(linux);
|
||||
security(&spec).unwrap();
|
||||
|
||||
let mut linux = Linux::default();
|
||||
linux.masked_paths.push("/test".to_owned());
|
||||
linux.namespaces = vec![
|
||||
LinuxNamespace {
|
||||
r#type: "net".to_owned(),
|
||||
path: "/sys/cgroups/net".to_owned(),
|
||||
},
|
||||
LinuxNamespace {
|
||||
r#type: "uts".to_owned(),
|
||||
path: "/sys/cgroups/uts".to_owned(),
|
||||
},
|
||||
];
|
||||
spec.linux = Some(linux);
|
||||
security(&spec).unwrap_err();
|
||||
|
||||
let mut linux = Linux::default();
|
||||
linux.masked_paths.push("/test".to_owned());
|
||||
linux.namespaces = vec![
|
||||
LinuxNamespace {
|
||||
r#type: "net".to_owned(),
|
||||
path: "/sys/cgroups/net".to_owned(),
|
||||
},
|
||||
LinuxNamespace {
|
||||
r#type: "mount".to_owned(),
|
||||
path: "/sys/cgroups/mount".to_owned(),
|
||||
},
|
||||
];
|
||||
spec.linux = Some(linux);
|
||||
security(&spec).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_usernamespace() {
|
||||
let mut spec = Spec::default();
|
||||
usernamespace(&spec).unwrap_err();
|
||||
|
||||
let linux = Linux::default();
|
||||
spec.linux = Some(linux);
|
||||
usernamespace(&spec).unwrap();
|
||||
|
||||
let mut linux = Linux::default();
|
||||
linux.uid_mappings = vec![LinuxIDMapping {
|
||||
container_id: 0,
|
||||
host_id: 1000,
|
||||
size: 0,
|
||||
}];
|
||||
spec.linux = Some(linux);
|
||||
usernamespace(&spec).unwrap_err();
|
||||
|
||||
let mut linux = Linux::default();
|
||||
linux.uid_mappings = vec![LinuxIDMapping {
|
||||
container_id: 0,
|
||||
host_id: 1000,
|
||||
size: 100,
|
||||
}];
|
||||
spec.linux = Some(linux);
|
||||
usernamespace(&spec).unwrap_err();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rootless_euid() {
|
||||
let mut spec = Spec::default();
|
||||
|
||||
// Test case: without linux
|
||||
rootless_euid_mapping(&spec).unwrap_err();
|
||||
rootless_euid_mount(&spec).unwrap_err();
|
||||
|
||||
// Test case: without user namespace
|
||||
let linux = Linux::default();
|
||||
spec.linux = Some(linux);
|
||||
rootless_euid_mapping(&spec).unwrap_err();
|
||||
|
||||
// Test case: without user namespace
|
||||
let linux = spec.linux.as_mut().unwrap();
|
||||
linux.namespaces = vec![
|
||||
LinuxNamespace {
|
||||
r#type: "net".to_owned(),
|
||||
path: "/sys/cgroups/net".to_owned(),
|
||||
},
|
||||
LinuxNamespace {
|
||||
r#type: "uts".to_owned(),
|
||||
path: "/sys/cgroups/uts".to_owned(),
|
||||
},
|
||||
];
|
||||
rootless_euid_mapping(&spec).unwrap_err();
|
||||
|
||||
let linux = spec.linux.as_mut().unwrap();
|
||||
linux.namespaces = vec![
|
||||
LinuxNamespace {
|
||||
r#type: "net".to_owned(),
|
||||
path: "/sys/cgroups/net".to_owned(),
|
||||
},
|
||||
LinuxNamespace {
|
||||
r#type: "user".to_owned(),
|
||||
path: "/sys/cgroups/user".to_owned(),
|
||||
},
|
||||
];
|
||||
linux.uid_mappings = vec![LinuxIDMapping {
|
||||
container_id: 0,
|
||||
host_id: 1000,
|
||||
size: 1000,
|
||||
}];
|
||||
linux.gid_mappings = vec![LinuxIDMapping {
|
||||
container_id: 0,
|
||||
host_id: 1000,
|
||||
size: 1000,
|
||||
}];
|
||||
rootless_euid_mapping(&spec).unwrap();
|
||||
|
||||
spec.mounts.push(Mount {
|
||||
destination: "/app".to_owned(),
|
||||
r#type: "tmpfs".to_owned(),
|
||||
source: "".to_owned(),
|
||||
options: vec!["uid=10000".to_owned()],
|
||||
});
|
||||
rootless_euid_mount(&spec).unwrap_err();
|
||||
|
||||
spec.mounts = vec![
|
||||
(Mount {
|
||||
destination: "/app".to_owned(),
|
||||
r#type: "tmpfs".to_owned(),
|
||||
source: "".to_owned(),
|
||||
options: vec!["uid=500".to_owned(), "gid=500".to_owned()],
|
||||
}),
|
||||
];
|
||||
rootless_euid(&spec).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_check_host_ns() {
|
||||
check_host_ns("/proc/self/ns/net").unwrap_err();
|
||||
check_host_ns("/proc/sys/net/ipv4/tcp_sack").unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sysctl() {
|
||||
let mut spec = Spec::default();
|
||||
|
||||
let mut linux = Linux::default();
|
||||
linux.namespaces = vec![LinuxNamespace {
|
||||
r#type: "net".to_owned(),
|
||||
path: "/sys/cgroups/net".to_owned(),
|
||||
}];
|
||||
linux
|
||||
.sysctl
|
||||
.insert("kernel.domainname".to_owned(), "test.com".to_owned());
|
||||
spec.linux = Some(linux);
|
||||
sysctl(&spec).unwrap_err();
|
||||
|
||||
spec.linux
|
||||
.as_mut()
|
||||
.unwrap()
|
||||
.namespaces
|
||||
.push(LinuxNamespace {
|
||||
r#type: "uts".to_owned(),
|
||||
path: "/sys/cgroups/uts".to_owned(),
|
||||
});
|
||||
sysctl(&spec).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_validate() {
|
||||
let spec = Spec::default();
|
||||
let mut config = Config {
|
||||
cgroup_name: "container1".to_owned(),
|
||||
use_systemd_cgroup: false,
|
||||
no_pivot_root: true,
|
||||
no_new_keyring: true,
|
||||
rootless_euid: false,
|
||||
rootless_cgroup: false,
|
||||
spec: Some(spec),
|
||||
};
|
||||
|
||||
validate(&config).unwrap_err();
|
||||
|
||||
let linux = Linux::default();
|
||||
config.spec.as_mut().unwrap().linux = Some(linux);
|
||||
validate(&config).unwrap_err();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,10 +21,7 @@ const DEFAULT_HOTPLUG_TIMEOUT: time::Duration = time::Duration::from_secs(3);
|
||||
const DEFAULT_CONTAINER_PIPE_SIZE: i32 = 0;
|
||||
const VSOCK_ADDR: &str = "vsock://-1";
|
||||
const VSOCK_PORT: u16 = 1024;
|
||||
|
||||
// Environment variables used for development and testing
|
||||
const SERVER_ADDR_ENV_VAR: &str = "KATA_AGENT_SERVER_ADDR";
|
||||
const LOG_LEVEL_ENV_VAR: &str = "KATA_AGENT_LOG_LEVEL";
|
||||
|
||||
// FIXME: unused
|
||||
const TRACE_MODE_FLAG: &str = "agent.trace";
|
||||
@@ -43,36 +40,6 @@ pub struct agentConfig {
|
||||
pub unified_cgroup_hierarchy: bool,
|
||||
}
|
||||
|
||||
// parse_cmdline_param parse commandline parameters.
|
||||
macro_rules! parse_cmdline_param {
|
||||
// commandline flags, without func to parse the option values
|
||||
($param:ident, $key:ident, $field:expr) => {
|
||||
if $param.eq(&$key) {
|
||||
$field = true;
|
||||
continue;
|
||||
}
|
||||
};
|
||||
// commandline options, with func to parse the option values
|
||||
($param:ident, $key:ident, $field:expr, $func:ident) => {
|
||||
if $param.starts_with(format!("{}=", $key).as_str()) {
|
||||
let val = $func($param)?;
|
||||
$field = val;
|
||||
continue;
|
||||
}
|
||||
};
|
||||
// commandline options, with func to parse the option values, and match func
|
||||
// to valid the values
|
||||
($param:ident, $key:ident, $field:expr, $func:ident, $guard:expr) => {
|
||||
if $param.starts_with(format!("{}=", $key).as_str()) {
|
||||
let val = $func($param)?;
|
||||
if $guard(val) {
|
||||
$field = val;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
impl agentConfig {
|
||||
pub fn new() -> agentConfig {
|
||||
agentConfig {
|
||||
@@ -93,67 +60,63 @@ impl agentConfig {
|
||||
let params: Vec<&str> = cmdline.split_ascii_whitespace().collect();
|
||||
for param in params.iter() {
|
||||
// parse cmdline flags
|
||||
parse_cmdline_param!(param, DEBUG_CONSOLE_FLAG, self.debug_console);
|
||||
parse_cmdline_param!(param, DEV_MODE_FLAG, self.dev_mode);
|
||||
if param.eq(&DEBUG_CONSOLE_FLAG) {
|
||||
self.debug_console = true;
|
||||
}
|
||||
|
||||
if param.eq(&DEV_MODE_FLAG) {
|
||||
self.dev_mode = true;
|
||||
}
|
||||
|
||||
// parse cmdline options
|
||||
parse_cmdline_param!(param, LOG_LEVEL_OPTION, self.log_level, get_log_level);
|
||||
if param.starts_with(format!("{}=", LOG_LEVEL_OPTION).as_str()) {
|
||||
let level = get_log_level(param)?;
|
||||
self.log_level = level;
|
||||
}
|
||||
|
||||
// ensure the timeout is a positive value
|
||||
parse_cmdline_param!(
|
||||
param,
|
||||
HOTPLUG_TIMOUT_OPTION,
|
||||
self.hotplug_timeout,
|
||||
get_hotplug_timeout,
|
||||
|hotplugTimeout: time::Duration| hotplugTimeout.as_secs() > 0
|
||||
);
|
||||
if param.starts_with(format!("{}=", HOTPLUG_TIMOUT_OPTION).as_str()) {
|
||||
let hotplugTimeout = get_hotplug_timeout(param)?;
|
||||
// ensure the timeout is a positive value
|
||||
if hotplugTimeout.as_secs() > 0 {
|
||||
self.hotplug_timeout = hotplugTimeout;
|
||||
}
|
||||
}
|
||||
|
||||
// vsock port should be positive values
|
||||
parse_cmdline_param!(
|
||||
param,
|
||||
DEBUG_CONSOLE_VPORT_OPTION,
|
||||
self.debug_console_vport,
|
||||
get_vsock_port,
|
||||
|port| port > 0
|
||||
);
|
||||
parse_cmdline_param!(
|
||||
param,
|
||||
LOG_VPORT_OPTION,
|
||||
self.log_vport,
|
||||
get_vsock_port,
|
||||
|port| port > 0
|
||||
);
|
||||
if param.starts_with(format!("{}=", DEBUG_CONSOLE_VPORT_OPTION).as_str()) {
|
||||
let port = get_vsock_port(param)?;
|
||||
if port > 0 {
|
||||
self.debug_console_vport = port;
|
||||
}
|
||||
}
|
||||
|
||||
parse_cmdline_param!(
|
||||
param,
|
||||
CONTAINER_PIPE_SIZE_OPTION,
|
||||
self.container_pipe_size,
|
||||
get_container_pipe_size
|
||||
);
|
||||
parse_cmdline_param!(
|
||||
param,
|
||||
UNIFIED_CGROUP_HIERARCHY_OPTION,
|
||||
self.unified_cgroup_hierarchy,
|
||||
get_bool_value
|
||||
);
|
||||
if param.starts_with(format!("{}=", LOG_VPORT_OPTION).as_str()) {
|
||||
let port = get_vsock_port(param)?;
|
||||
if port > 0 {
|
||||
self.log_vport = port;
|
||||
}
|
||||
}
|
||||
|
||||
if param.starts_with(format!("{}=", CONTAINER_PIPE_SIZE_OPTION).as_str()) {
|
||||
let container_pipe_size = get_container_pipe_size(param)?;
|
||||
self.container_pipe_size = container_pipe_size
|
||||
}
|
||||
|
||||
if param.starts_with(format!("{}=", UNIFIED_CGROUP_HIERARCHY_OPTION).as_str()) {
|
||||
let b = get_bool_value(param, false);
|
||||
self.unified_cgroup_hierarchy = b;
|
||||
}
|
||||
}
|
||||
|
||||
if let Ok(addr) = env::var(SERVER_ADDR_ENV_VAR) {
|
||||
self.server_addr = addr;
|
||||
}
|
||||
|
||||
if let Ok(addr) = env::var(LOG_LEVEL_ENV_VAR) {
|
||||
if let Ok(level) = logrus_to_slog_level(&addr) {
|
||||
self.log_level = level;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn get_vsock_port(p: &str) -> Result<i32> {
|
||||
let fields: Vec<&str> = p.split('=').collect();
|
||||
let fields: Vec<&str> = p.split("=").collect();
|
||||
if fields.len() != 2 {
|
||||
return Err(anyhow!("invalid port parameter"));
|
||||
}
|
||||
@@ -189,7 +152,7 @@ fn logrus_to_slog_level(logrus_level: &str) -> Result<slog::Level> {
|
||||
}
|
||||
|
||||
fn get_log_level(param: &str) -> Result<slog::Level> {
|
||||
let fields: Vec<&str> = param.split('=').collect();
|
||||
let fields: Vec<&str> = param.split("=").collect();
|
||||
|
||||
if fields.len() != 2 {
|
||||
return Err(anyhow!("invalid log level parameter"));
|
||||
@@ -203,7 +166,7 @@ fn get_log_level(param: &str) -> Result<slog::Level> {
|
||||
}
|
||||
|
||||
fn get_hotplug_timeout(param: &str) -> Result<time::Duration> {
|
||||
let fields: Vec<&str> = param.split('=').collect();
|
||||
let fields: Vec<&str> = param.split("=").collect();
|
||||
|
||||
if fields.len() != 2 {
|
||||
return Err(anyhow!("invalid hotplug timeout parameter"));
|
||||
@@ -222,24 +185,36 @@ fn get_hotplug_timeout(param: &str) -> Result<time::Duration> {
|
||||
Ok(time::Duration::from_secs(value.unwrap()))
|
||||
}
|
||||
|
||||
fn get_bool_value(param: &str) -> Result<bool> {
|
||||
let fields: Vec<&str> = param.split('=').collect();
|
||||
fn get_bool_value(param: &str, default: bool) -> bool {
|
||||
let fields: Vec<&str> = param.split("=").collect();
|
||||
|
||||
if fields.len() != 2 {
|
||||
return Ok(false);
|
||||
return default;
|
||||
}
|
||||
|
||||
let v = fields[1];
|
||||
|
||||
// first try to parse as bool value
|
||||
v.parse::<bool>().or_else(|_err1| {
|
||||
// then try to parse as integer value
|
||||
v.parse::<u64>().or(Ok(0)).map(|v| !matches!(v, 0))
|
||||
})
|
||||
// bool
|
||||
let t: std::result::Result<bool, std::str::ParseBoolError> = v.parse();
|
||||
if t.is_ok() {
|
||||
return t.unwrap();
|
||||
}
|
||||
|
||||
// integer
|
||||
let i: std::result::Result<u64, std::num::ParseIntError> = v.parse();
|
||||
if i.is_err() {
|
||||
return default;
|
||||
}
|
||||
|
||||
// only `0` returns false, otherwise returns true
|
||||
match i.unwrap() {
|
||||
0 => false,
|
||||
_ => true,
|
||||
}
|
||||
}
|
||||
|
||||
fn get_container_pipe_size(param: &str) -> Result<i32> {
|
||||
let fields: Vec<&str> = param.split('=').collect();
|
||||
let fields: Vec<&str> = param.split("=").collect();
|
||||
|
||||
if fields.len() != 2 {
|
||||
return Err(anyhow!("invalid container pipe size parameter"));
|
||||
@@ -324,495 +299,297 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_parse_cmdline() {
|
||||
const TEST_SERVER_ADDR: &str = "vsock://-1:1024";
|
||||
|
||||
#[derive(Debug)]
|
||||
struct TestData<'a> {
|
||||
contents: &'a str,
|
||||
env_vars: Vec<&'a str>,
|
||||
debug_console: bool,
|
||||
dev_mode: bool,
|
||||
log_level: slog::Level,
|
||||
hotplug_timeout: time::Duration,
|
||||
container_pipe_size: i32,
|
||||
server_addr: &'a str,
|
||||
unified_cgroup_hierarchy: bool,
|
||||
}
|
||||
|
||||
let tests = &[
|
||||
TestData {
|
||||
contents: "agent.debug_consolex agent.devmode",
|
||||
env_vars: Vec::new(),
|
||||
debug_console: false,
|
||||
dev_mode: true,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
server_addr: TEST_SERVER_ADDR,
|
||||
unified_cgroup_hierarchy: false,
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.debug_console agent.devmodex",
|
||||
env_vars: Vec::new(),
|
||||
debug_console: true,
|
||||
dev_mode: false,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
server_addr: TEST_SERVER_ADDR,
|
||||
unified_cgroup_hierarchy: false,
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.logx=debug",
|
||||
env_vars: Vec::new(),
|
||||
debug_console: false,
|
||||
dev_mode: false,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
server_addr: TEST_SERVER_ADDR,
|
||||
unified_cgroup_hierarchy: false,
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.log=debug",
|
||||
env_vars: Vec::new(),
|
||||
debug_console: false,
|
||||
dev_mode: false,
|
||||
log_level: slog::Level::Debug,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
server_addr: TEST_SERVER_ADDR,
|
||||
unified_cgroup_hierarchy: false,
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.log=debug",
|
||||
env_vars: vec!["KATA_AGENT_LOG_LEVEL=trace"],
|
||||
debug_console: false,
|
||||
dev_mode: false,
|
||||
log_level: slog::Level::Trace,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
server_addr: TEST_SERVER_ADDR,
|
||||
unified_cgroup_hierarchy: false,
|
||||
},
|
||||
TestData {
|
||||
contents: "",
|
||||
env_vars: Vec::new(),
|
||||
debug_console: false,
|
||||
dev_mode: false,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
server_addr: TEST_SERVER_ADDR,
|
||||
unified_cgroup_hierarchy: false,
|
||||
},
|
||||
TestData {
|
||||
contents: "foo",
|
||||
env_vars: Vec::new(),
|
||||
debug_console: false,
|
||||
dev_mode: false,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
server_addr: TEST_SERVER_ADDR,
|
||||
unified_cgroup_hierarchy: false,
|
||||
},
|
||||
TestData {
|
||||
contents: "foo bar",
|
||||
env_vars: Vec::new(),
|
||||
debug_console: false,
|
||||
dev_mode: false,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
server_addr: TEST_SERVER_ADDR,
|
||||
unified_cgroup_hierarchy: false,
|
||||
},
|
||||
TestData {
|
||||
contents: "foo bar",
|
||||
env_vars: Vec::new(),
|
||||
debug_console: false,
|
||||
dev_mode: false,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
server_addr: TEST_SERVER_ADDR,
|
||||
unified_cgroup_hierarchy: false,
|
||||
},
|
||||
TestData {
|
||||
contents: "foo agent bar",
|
||||
env_vars: Vec::new(),
|
||||
debug_console: false,
|
||||
dev_mode: false,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
server_addr: TEST_SERVER_ADDR,
|
||||
unified_cgroup_hierarchy: false,
|
||||
},
|
||||
TestData {
|
||||
contents: "foo debug_console agent bar devmode",
|
||||
env_vars: Vec::new(),
|
||||
debug_console: false,
|
||||
dev_mode: false,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
server_addr: TEST_SERVER_ADDR,
|
||||
unified_cgroup_hierarchy: false,
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.debug_console",
|
||||
env_vars: Vec::new(),
|
||||
debug_console: true,
|
||||
dev_mode: false,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
server_addr: TEST_SERVER_ADDR,
|
||||
unified_cgroup_hierarchy: false,
|
||||
},
|
||||
TestData {
|
||||
contents: " agent.debug_console ",
|
||||
env_vars: Vec::new(),
|
||||
debug_console: true,
|
||||
dev_mode: false,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
server_addr: TEST_SERVER_ADDR,
|
||||
unified_cgroup_hierarchy: false,
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.debug_console foo",
|
||||
env_vars: Vec::new(),
|
||||
debug_console: true,
|
||||
dev_mode: false,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
server_addr: TEST_SERVER_ADDR,
|
||||
unified_cgroup_hierarchy: false,
|
||||
},
|
||||
TestData {
|
||||
contents: " agent.debug_console foo",
|
||||
env_vars: Vec::new(),
|
||||
debug_console: true,
|
||||
dev_mode: false,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
server_addr: TEST_SERVER_ADDR,
|
||||
unified_cgroup_hierarchy: false,
|
||||
},
|
||||
TestData {
|
||||
contents: "foo agent.debug_console bar",
|
||||
env_vars: Vec::new(),
|
||||
debug_console: true,
|
||||
dev_mode: false,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
server_addr: TEST_SERVER_ADDR,
|
||||
unified_cgroup_hierarchy: false,
|
||||
},
|
||||
TestData {
|
||||
contents: "foo agent.debug_console",
|
||||
env_vars: Vec::new(),
|
||||
debug_console: true,
|
||||
dev_mode: false,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
server_addr: TEST_SERVER_ADDR,
|
||||
unified_cgroup_hierarchy: false,
|
||||
},
|
||||
TestData {
|
||||
contents: "foo agent.debug_console ",
|
||||
env_vars: Vec::new(),
|
||||
debug_console: true,
|
||||
dev_mode: false,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
server_addr: TEST_SERVER_ADDR,
|
||||
unified_cgroup_hierarchy: false,
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.devmode",
|
||||
env_vars: Vec::new(),
|
||||
debug_console: false,
|
||||
dev_mode: true,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
server_addr: TEST_SERVER_ADDR,
|
||||
unified_cgroup_hierarchy: false,
|
||||
},
|
||||
TestData {
|
||||
contents: " agent.devmode ",
|
||||
env_vars: Vec::new(),
|
||||
debug_console: false,
|
||||
dev_mode: true,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
server_addr: TEST_SERVER_ADDR,
|
||||
unified_cgroup_hierarchy: false,
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.devmode foo",
|
||||
env_vars: Vec::new(),
|
||||
debug_console: false,
|
||||
dev_mode: true,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
server_addr: TEST_SERVER_ADDR,
|
||||
unified_cgroup_hierarchy: false,
|
||||
},
|
||||
TestData {
|
||||
contents: " agent.devmode foo",
|
||||
env_vars: Vec::new(),
|
||||
debug_console: false,
|
||||
dev_mode: true,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
server_addr: TEST_SERVER_ADDR,
|
||||
unified_cgroup_hierarchy: false,
|
||||
},
|
||||
TestData {
|
||||
contents: "foo agent.devmode bar",
|
||||
env_vars: Vec::new(),
|
||||
debug_console: false,
|
||||
dev_mode: true,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
server_addr: TEST_SERVER_ADDR,
|
||||
unified_cgroup_hierarchy: false,
|
||||
},
|
||||
TestData {
|
||||
contents: "foo agent.devmode",
|
||||
env_vars: Vec::new(),
|
||||
debug_console: false,
|
||||
dev_mode: true,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
server_addr: TEST_SERVER_ADDR,
|
||||
unified_cgroup_hierarchy: false,
|
||||
},
|
||||
TestData {
|
||||
contents: "foo agent.devmode ",
|
||||
env_vars: Vec::new(),
|
||||
debug_console: false,
|
||||
dev_mode: true,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
server_addr: TEST_SERVER_ADDR,
|
||||
unified_cgroup_hierarchy: false,
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.devmode agent.debug_console",
|
||||
env_vars: Vec::new(),
|
||||
debug_console: true,
|
||||
dev_mode: true,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
server_addr: TEST_SERVER_ADDR,
|
||||
unified_cgroup_hierarchy: false,
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.devmode agent.debug_console agent.hotplug_timeout=100 agent.unified_cgroup_hierarchy=a",
|
||||
env_vars: Vec::new(),
|
||||
debug_console: true,
|
||||
dev_mode: true,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: time::Duration::from_secs(100),
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
server_addr: TEST_SERVER_ADDR,
|
||||
unified_cgroup_hierarchy: false,
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.devmode agent.debug_console agent.hotplug_timeout=0 agent.unified_cgroup_hierarchy=11",
|
||||
env_vars: Vec::new(),
|
||||
debug_console: true,
|
||||
dev_mode: true,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
server_addr: TEST_SERVER_ADDR,
|
||||
unified_cgroup_hierarchy: true,
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.devmode agent.debug_console agent.container_pipe_size=2097152 agent.unified_cgroup_hierarchy=false",
|
||||
env_vars: Vec::new(),
|
||||
debug_console: true,
|
||||
dev_mode: true,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: 2097152,
|
||||
server_addr: TEST_SERVER_ADDR,
|
||||
unified_cgroup_hierarchy: false,
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.devmode agent.debug_console agent.container_pipe_size=100 agent.unified_cgroup_hierarchy=true",
|
||||
env_vars: Vec::new(),
|
||||
debug_console: true,
|
||||
dev_mode: true,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: 100,
|
||||
server_addr: TEST_SERVER_ADDR,
|
||||
unified_cgroup_hierarchy: true,
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.devmode agent.debug_console agent.container_pipe_size=0 agent.unified_cgroup_hierarchy=0",
|
||||
env_vars: Vec::new(),
|
||||
debug_console: true,
|
||||
dev_mode: true,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
server_addr: TEST_SERVER_ADDR,
|
||||
unified_cgroup_hierarchy: false,
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.devmode agent.debug_console agent.container_pip_siz=100 agent.unified_cgroup_hierarchy=1",
|
||||
env_vars: Vec::new(),
|
||||
debug_console: true,
|
||||
dev_mode: true,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
server_addr: TEST_SERVER_ADDR,
|
||||
unified_cgroup_hierarchy: true,
|
||||
},
|
||||
TestData {
|
||||
contents: "",
|
||||
env_vars: Vec::new(),
|
||||
debug_console: false,
|
||||
dev_mode: false,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
server_addr: TEST_SERVER_ADDR,
|
||||
unified_cgroup_hierarchy: false,
|
||||
},
|
||||
TestData {
|
||||
contents: "",
|
||||
env_vars: vec!["KATA_AGENT_SERVER_ADDR=foo"],
|
||||
debug_console: false,
|
||||
dev_mode: false,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
server_addr: "foo",
|
||||
unified_cgroup_hierarchy: false,
|
||||
},
|
||||
TestData {
|
||||
contents: "",
|
||||
env_vars: vec!["KATA_AGENT_SERVER_ADDR=="],
|
||||
debug_console: false,
|
||||
dev_mode: false,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
server_addr: "=",
|
||||
unified_cgroup_hierarchy: false,
|
||||
},
|
||||
TestData {
|
||||
contents: "",
|
||||
env_vars: vec!["KATA_AGENT_SERVER_ADDR==foo"],
|
||||
debug_console: false,
|
||||
dev_mode: false,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
server_addr: "=foo",
|
||||
unified_cgroup_hierarchy: false,
|
||||
},
|
||||
TestData {
|
||||
contents: "",
|
||||
env_vars: vec!["KATA_AGENT_SERVER_ADDR=foo=bar=baz="],
|
||||
debug_console: false,
|
||||
dev_mode: false,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
server_addr: "foo=bar=baz=",
|
||||
unified_cgroup_hierarchy: false,
|
||||
},
|
||||
TestData {
|
||||
contents: "",
|
||||
env_vars: vec!["KATA_AGENT_SERVER_ADDR=unix:///tmp/foo.socket"],
|
||||
debug_console: false,
|
||||
dev_mode: false,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
server_addr: "unix:///tmp/foo.socket",
|
||||
unified_cgroup_hierarchy: false,
|
||||
},
|
||||
TestData {
|
||||
contents: "",
|
||||
env_vars: vec!["KATA_AGENT_SERVER_ADDR=unix://@/tmp/foo.socket"],
|
||||
debug_console: false,
|
||||
dev_mode: false,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
server_addr: "unix://@/tmp/foo.socket",
|
||||
unified_cgroup_hierarchy: false,
|
||||
},
|
||||
TestData {
|
||||
contents: "",
|
||||
env_vars: vec!["KATA_AGENT_LOG_LEVEL="],
|
||||
debug_console: false,
|
||||
dev_mode: false,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
server_addr: TEST_SERVER_ADDR,
|
||||
unified_cgroup_hierarchy: false,
|
||||
},
|
||||
TestData {
|
||||
contents: "",
|
||||
env_vars: vec!["KATA_AGENT_LOG_LEVEL=invalid"],
|
||||
debug_console: false,
|
||||
dev_mode: false,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
server_addr: TEST_SERVER_ADDR,
|
||||
unified_cgroup_hierarchy: false,
|
||||
},
|
||||
TestData {
|
||||
contents: "",
|
||||
env_vars: vec!["KATA_AGENT_LOG_LEVEL=debug"],
|
||||
debug_console: false,
|
||||
dev_mode: false,
|
||||
log_level: slog::Level::Debug,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
server_addr: TEST_SERVER_ADDR,
|
||||
unified_cgroup_hierarchy: false,
|
||||
},
|
||||
TestData {
|
||||
contents: "",
|
||||
env_vars: vec!["KATA_AGENT_LOG_LEVEL=debugger"],
|
||||
debug_console: false,
|
||||
dev_mode: false,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
server_addr: TEST_SERVER_ADDR,
|
||||
unified_cgroup_hierarchy: false,
|
||||
},
|
||||
];
|
||||
|
||||
let dir = tempdir().expect("failed to create tmpdir");
|
||||
@@ -826,8 +603,7 @@ mod tests {
|
||||
let result = config.parse_cmdline(&filename.to_owned());
|
||||
assert!(result.is_err());
|
||||
|
||||
// Now, test various combinations of file contents and environment
|
||||
// variables.
|
||||
// Now, test various combinations of file contents
|
||||
for (i, d) in tests.iter().enumerate() {
|
||||
let msg = format!("test[{}]: {:?}", i, d);
|
||||
|
||||
@@ -836,23 +612,10 @@ mod tests {
|
||||
let filename = file_path.to_str().expect("failed to create filename");
|
||||
|
||||
let mut file =
|
||||
File::create(filename).unwrap_or_else(|_| panic!("{}: failed to create file", msg));
|
||||
File::create(filename).expect(&format!("{}: failed to create file", msg));
|
||||
|
||||
file.write_all(d.contents.as_bytes())
|
||||
.unwrap_or_else(|_| panic!("{}: failed to write file contents", msg));
|
||||
|
||||
let mut vars_to_unset = Vec::new();
|
||||
|
||||
for v in &d.env_vars {
|
||||
let fields: Vec<&str> = v.split('=').collect();
|
||||
|
||||
let name = fields[0];
|
||||
let value = fields[1..].join("=");
|
||||
|
||||
env::set_var(name, value);
|
||||
|
||||
vars_to_unset.push(name);
|
||||
}
|
||||
.expect(&format!("{}: failed to write file contents", msg));
|
||||
|
||||
let mut config = agentConfig::new();
|
||||
assert_eq!(config.debug_console, false, "{}", msg);
|
||||
@@ -865,7 +628,6 @@ mod tests {
|
||||
msg
|
||||
);
|
||||
assert_eq!(config.container_pipe_size, 0, "{}", msg);
|
||||
assert_eq!(config.server_addr, TEST_SERVER_ADDR, "{}", msg);
|
||||
|
||||
let result = config.parse_cmdline(filename);
|
||||
assert!(result.is_ok(), "{}", msg);
|
||||
@@ -880,11 +642,6 @@ mod tests {
|
||||
assert_eq!(d.log_level, config.log_level, "{}", msg);
|
||||
assert_eq!(d.hotplug_timeout, config.hotplug_timeout, "{}", msg);
|
||||
assert_eq!(d.container_pipe_size, config.container_pipe_size, "{}", msg);
|
||||
assert_eq!(d.server_addr, config.server_addr, "{}", msg);
|
||||
|
||||
for v in vars_to_unset {
|
||||
env::remove_var(v);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -958,7 +715,7 @@ mod tests {
|
||||
|
||||
let msg = format!("{}: result: {:?}", msg, result);
|
||||
|
||||
assert_result!(d.result, result, msg);
|
||||
assert_result!(d.result, result, format!("{}", msg));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1052,7 +809,7 @@ mod tests {
|
||||
|
||||
let msg = format!("{}: result: {:?}", msg, result);
|
||||
|
||||
assert_result!(d.result, result, msg);
|
||||
assert_result!(d.result, result, format!("{}", msg));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1122,7 +879,7 @@ mod tests {
|
||||
|
||||
let msg = format!("{}: result: {:?}", msg, result);
|
||||
|
||||
assert_result!(d.result, result, msg);
|
||||
assert_result!(d.result, result, format!("{}", msg));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1196,7 +953,7 @@ mod tests {
|
||||
|
||||
let msg = format!("{}: result: {:?}", msg, result);
|
||||
|
||||
assert_result!(d.result, result, msg);
|
||||
assert_result!(d.result, result, format!("{}", msg));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -28,18 +28,11 @@ macro_rules! sl {
|
||||
|
||||
const VM_ROOTFS: &str = "/";
|
||||
|
||||
struct DevIndexEntry {
|
||||
idx: usize,
|
||||
residx: Vec<usize>,
|
||||
}
|
||||
|
||||
struct DevIndex(HashMap<String, DevIndexEntry>);
|
||||
|
||||
// DeviceHandler is the type of callback to be defined to handle every type of device driver.
|
||||
type DeviceHandler = fn(&Device, &mut Spec, &Arc<Mutex<Sandbox>>, &DevIndex) -> Result<()>;
|
||||
type DeviceHandler = fn(&Device, &mut Spec, &Arc<Mutex<Sandbox>>) -> Result<()>;
|
||||
|
||||
// DEVICEHANDLERLIST lists the supported drivers.
|
||||
#[rustfmt::skip]
|
||||
// DeviceHandlerList lists the supported drivers.
|
||||
#[cfg_attr(rustfmt, rustfmt_skip)]
|
||||
lazy_static! {
|
||||
static ref DEVICEHANDLERLIST: HashMap<&'static str, DeviceHandler> = {
|
||||
let mut m: HashMap<&'static str, DeviceHandler> = HashMap::new();
|
||||
@@ -65,7 +58,7 @@ pub fn online_device(path: &str) -> Result<()> {
|
||||
// Here, bridgeAddr is the address at which the bridge is attached on the root bus,
|
||||
// while deviceAddr is the address at which the device is attached on the bridge.
|
||||
fn get_pci_device_address(pci_id: &str) -> Result<String> {
|
||||
let tokens: Vec<&str> = pci_id.split('/').collect();
|
||||
let tokens: Vec<&str> = pci_id.split("/").collect();
|
||||
|
||||
if tokens.len() != 2 {
|
||||
return Err(anyhow!(
|
||||
@@ -137,14 +130,17 @@ fn get_device_name(sandbox: &Arc<Mutex<Sandbox>>, dev_addr: &str) -> Result<Stri
|
||||
|
||||
info!(sl!(), "Waiting on channel for device notification\n");
|
||||
let hotplug_timeout = AGENT_CONFIG.read().unwrap().hotplug_timeout;
|
||||
let dev_name = rx.recv_timeout(hotplug_timeout).map_err(|_| {
|
||||
GLOBAL_DEVICE_WATCHER.lock().unwrap().remove_entry(dev_addr);
|
||||
anyhow!(
|
||||
"Timeout reached after {:?} waiting for device {}",
|
||||
hotplug_timeout,
|
||||
dev_addr
|
||||
)
|
||||
})?;
|
||||
let dev_name = match rx.recv_timeout(hotplug_timeout) {
|
||||
Ok(name) => name,
|
||||
Err(_) => {
|
||||
GLOBAL_DEVICE_WATCHER.lock().unwrap().remove_entry(dev_addr);
|
||||
return Err(anyhow!(
|
||||
"Timeout reached after {:?} waiting for device {}",
|
||||
hotplug_timeout,
|
||||
dev_addr
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
Ok(format!("{}/{}", SYSTEM_DEV_PATH, &dev_name))
|
||||
}
|
||||
@@ -165,7 +161,7 @@ pub fn get_pci_device_name(sandbox: &Arc<Mutex<Sandbox>>, pci_id: &str) -> Resul
|
||||
|
||||
/// Scan SCSI bus for the given SCSI address(SCSI-Id and LUN)
|
||||
fn scan_scsi_bus(scsi_addr: &str) -> Result<()> {
|
||||
let tokens: Vec<&str> = scsi_addr.split(':').collect();
|
||||
let tokens: Vec<&str> = scsi_addr.split(":").collect();
|
||||
if tokens.len() != 2 {
|
||||
return Err(anyhow!(
|
||||
"Unexpected format for SCSI Address: {}, expect SCSIID:LUA",
|
||||
@@ -198,7 +194,7 @@ fn scan_scsi_bus(scsi_addr: &str) -> Result<()> {
|
||||
// the same device in the list of devices provided through the OCI spec.
|
||||
// This is needed to update information about minor/major numbers that cannot
|
||||
// be predicted from the caller.
|
||||
fn update_spec_device_list(device: &Device, spec: &mut Spec, devidx: &DevIndex) -> Result<()> {
|
||||
fn update_spec_device_list(device: &Device, spec: &mut Spec) -> Result<()> {
|
||||
let major_id: c_uint;
|
||||
let minor_id: c_uint;
|
||||
|
||||
@@ -211,10 +207,10 @@ fn update_spec_device_list(device: &Device, spec: &mut Spec, devidx: &DevIndex)
|
||||
));
|
||||
}
|
||||
|
||||
let linux = spec
|
||||
.linux
|
||||
.as_mut()
|
||||
.ok_or_else(|| anyhow!("Spec didn't container linux field"))?;
|
||||
let linux = match spec.linux.as_mut() {
|
||||
None => return Err(anyhow!("Spec didn't container linux field")),
|
||||
Some(l) => l,
|
||||
};
|
||||
|
||||
if !Path::new(&device.vm_path).exists() {
|
||||
return Err(anyhow!("vm_path:{} doesn't exist", device.vm_path));
|
||||
@@ -232,44 +228,44 @@ fn update_spec_device_list(device: &Device, spec: &mut Spec, devidx: &DevIndex)
|
||||
"got the device: dev_path: {}, major: {}, minor: {}\n", &device.vm_path, major_id, minor_id
|
||||
);
|
||||
|
||||
if let Some(idxdata) = devidx.0.get(device.container_path.as_str()) {
|
||||
let dev = &mut linux.devices[idxdata.idx];
|
||||
let host_major = dev.major;
|
||||
let host_minor = dev.minor;
|
||||
let devices = linux.devices.as_mut_slice();
|
||||
for dev in devices.iter_mut() {
|
||||
if dev.path == device.container_path {
|
||||
let host_major = dev.major;
|
||||
let host_minor = dev.minor;
|
||||
|
||||
dev.major = major_id as i64;
|
||||
dev.minor = minor_id as i64;
|
||||
|
||||
info!(
|
||||
sl!(),
|
||||
"change the device from major: {} minor: {} to vm device major: {} minor: {}",
|
||||
host_major,
|
||||
host_minor,
|
||||
major_id,
|
||||
minor_id
|
||||
);
|
||||
|
||||
// Resources must be updated since they are used to identify
|
||||
// the device in the devices cgroup.
|
||||
for ridx in &idxdata.residx {
|
||||
// unwrap is safe, because residx would be empty if there
|
||||
// were no resources
|
||||
let res = &mut linux.resources.as_mut().unwrap().devices[*ridx];
|
||||
res.major = Some(major_id as i64);
|
||||
res.minor = Some(minor_id as i64);
|
||||
dev.major = major_id as i64;
|
||||
dev.minor = minor_id as i64;
|
||||
|
||||
info!(
|
||||
sl!(),
|
||||
"set resources for device major: {} minor: {}\n", major_id, minor_id
|
||||
"change the device from major: {} minor: {} to vm device major: {} minor: {}",
|
||||
host_major,
|
||||
host_minor,
|
||||
major_id,
|
||||
minor_id
|
||||
);
|
||||
|
||||
// Resources must be updated since they are used to identify the
|
||||
// device in the devices cgroup.
|
||||
if let Some(res) = linux.resources.as_mut() {
|
||||
let ds = res.devices.as_mut_slice();
|
||||
for d in ds.iter_mut() {
|
||||
if d.major == Some(host_major) && d.minor == Some(host_minor) {
|
||||
d.major = Some(major_id as i64);
|
||||
d.minor = Some(minor_id as i64);
|
||||
|
||||
info!(
|
||||
sl!(),
|
||||
"set resources for device major: {} minor: {}\n", major_id, minor_id
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
} else {
|
||||
Err(anyhow!(
|
||||
"Should have found a matching device {} in the spec",
|
||||
device.vm_path
|
||||
))
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// device.Id should be the predicted device name (vda, vdb, ...)
|
||||
@@ -278,13 +274,12 @@ fn virtiommio_blk_device_handler(
|
||||
device: &Device,
|
||||
spec: &mut Spec,
|
||||
_sandbox: &Arc<Mutex<Sandbox>>,
|
||||
devidx: &DevIndex,
|
||||
) -> Result<()> {
|
||||
if device.vm_path == "" {
|
||||
return Err(anyhow!("Invalid path for virtio mmio blk device"));
|
||||
}
|
||||
|
||||
update_spec_device_list(device, spec, devidx)
|
||||
update_spec_device_list(device, spec)
|
||||
}
|
||||
|
||||
// device.Id should be the PCI address in the format "bridgeAddr/deviceAddr".
|
||||
@@ -294,7 +289,6 @@ fn virtio_blk_device_handler(
|
||||
device: &Device,
|
||||
spec: &mut Spec,
|
||||
sandbox: &Arc<Mutex<Sandbox>>,
|
||||
devidx: &DevIndex,
|
||||
) -> Result<()> {
|
||||
let mut dev = device.clone();
|
||||
|
||||
@@ -304,7 +298,7 @@ fn virtio_blk_device_handler(
|
||||
dev.vm_path = get_pci_device_name(sandbox, &device.id)?;
|
||||
}
|
||||
|
||||
update_spec_device_list(&dev, spec, devidx)
|
||||
update_spec_device_list(&dev, spec)
|
||||
}
|
||||
|
||||
// device.Id should be the SCSI address of the disk in the format "scsiID:lunID"
|
||||
@@ -312,49 +306,22 @@ fn virtio_scsi_device_handler(
|
||||
device: &Device,
|
||||
spec: &mut Spec,
|
||||
sandbox: &Arc<Mutex<Sandbox>>,
|
||||
devidx: &DevIndex,
|
||||
) -> Result<()> {
|
||||
let mut dev = device.clone();
|
||||
dev.vm_path = get_scsi_device_name(sandbox, &device.id)?;
|
||||
update_spec_device_list(&dev, spec, devidx)
|
||||
update_spec_device_list(&dev, spec)
|
||||
}
|
||||
|
||||
fn virtio_nvdimm_device_handler(
|
||||
device: &Device,
|
||||
spec: &mut Spec,
|
||||
_sandbox: &Arc<Mutex<Sandbox>>,
|
||||
devidx: &DevIndex,
|
||||
) -> Result<()> {
|
||||
if device.vm_path == "" {
|
||||
return Err(anyhow!("Invalid path for nvdimm device"));
|
||||
}
|
||||
|
||||
update_spec_device_list(device, spec, devidx)
|
||||
}
|
||||
|
||||
impl DevIndex {
|
||||
fn new(spec: &Spec) -> DevIndex {
|
||||
let mut map = HashMap::new();
|
||||
|
||||
if let Some(linux) = spec.linux.as_ref() {
|
||||
for (i, d) in linux.devices.iter().enumerate() {
|
||||
let mut residx = Vec::new();
|
||||
|
||||
if let Some(linuxres) = linux.resources.as_ref() {
|
||||
for (j, r) in linuxres.devices.iter().enumerate() {
|
||||
if r.r#type == d.r#type
|
||||
&& r.major == Some(d.major)
|
||||
&& r.minor == Some(d.minor)
|
||||
{
|
||||
residx.push(j);
|
||||
}
|
||||
}
|
||||
}
|
||||
map.insert(d.path.clone(), DevIndexEntry { idx: i, residx });
|
||||
}
|
||||
}
|
||||
DevIndex(map)
|
||||
}
|
||||
update_spec_device_list(device, spec)
|
||||
}
|
||||
|
||||
pub fn add_devices(
|
||||
@@ -362,21 +329,14 @@ pub fn add_devices(
|
||||
spec: &mut Spec,
|
||||
sandbox: &Arc<Mutex<Sandbox>>,
|
||||
) -> Result<()> {
|
||||
let devidx = DevIndex::new(spec);
|
||||
|
||||
for device in devices.iter() {
|
||||
add_device(device, spec, sandbox, &devidx)?;
|
||||
add_device(device, spec, sandbox)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn add_device(
|
||||
device: &Device,
|
||||
spec: &mut Spec,
|
||||
sandbox: &Arc<Mutex<Sandbox>>,
|
||||
devidx: &DevIndex,
|
||||
) -> Result<()> {
|
||||
fn add_device(device: &Device, spec: &mut Spec, sandbox: &Arc<Mutex<Sandbox>>) -> Result<()> {
|
||||
// log before validation to help with debugging gRPC protocol version differences.
|
||||
info!(sl!(), "device-id: {}, device-type: {}, device-vm-path: {}, device-container-path: {}, device-options: {:?}",
|
||||
device.id, device.field_type, device.vm_path, device.container_path, device.options);
|
||||
@@ -395,7 +355,7 @@ fn add_device(
|
||||
|
||||
match DEVICEHANDLERLIST.get(device.field_type.as_str()) {
|
||||
None => Err(anyhow!("Unknown device type {}", device.field_type)),
|
||||
Some(dev_handler) => dev_handler(device, spec, sandbox, devidx),
|
||||
Some(dev_handler) => dev_handler(device, spec, sandbox),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -408,10 +368,10 @@ pub fn update_device_cgroup(spec: &mut Spec) -> Result<()> {
|
||||
let major = stat::major(rdev) as i64;
|
||||
let minor = stat::minor(rdev) as i64;
|
||||
|
||||
let linux = spec
|
||||
.linux
|
||||
.as_mut()
|
||||
.ok_or_else(|| anyhow!("Spec didn't container linux field"))?;
|
||||
let linux = match spec.linux.as_mut() {
|
||||
None => return Err(anyhow!("Spec didn't container linux field")),
|
||||
Some(l) => l,
|
||||
};
|
||||
|
||||
if linux.resources.is_none() {
|
||||
linux.resources = Some(LinuxResources::default());
|
||||
@@ -453,263 +413,4 @@ mod tests {
|
||||
assert_eq!(devices[0].major, Some(major));
|
||||
assert_eq!(devices[0].minor, Some(minor));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_update_spec_device_list() {
|
||||
let (major, minor) = (7, 2);
|
||||
let mut device = Device::default();
|
||||
let mut spec = Spec::default();
|
||||
|
||||
// container_path empty
|
||||
let devidx = DevIndex::new(&spec);
|
||||
let res = update_spec_device_list(&device, &mut spec, &devidx);
|
||||
assert!(res.is_err());
|
||||
|
||||
device.container_path = "/dev/null".to_string();
|
||||
|
||||
// linux is empty
|
||||
let devidx = DevIndex::new(&spec);
|
||||
let res = update_spec_device_list(&device, &mut spec, &devidx);
|
||||
assert!(res.is_err());
|
||||
|
||||
spec.linux = Some(Linux::default());
|
||||
|
||||
// linux.devices is empty
|
||||
let devidx = DevIndex::new(&spec);
|
||||
let res = update_spec_device_list(&device, &mut spec, &devidx);
|
||||
assert!(res.is_err());
|
||||
|
||||
spec.linux.as_mut().unwrap().devices = vec![oci::LinuxDevice {
|
||||
path: "/dev/null2".to_string(),
|
||||
major,
|
||||
minor,
|
||||
..oci::LinuxDevice::default()
|
||||
}];
|
||||
|
||||
// vm_path empty
|
||||
let devidx = DevIndex::new(&spec);
|
||||
let res = update_spec_device_list(&device, &mut spec, &devidx);
|
||||
assert!(res.is_err());
|
||||
|
||||
device.vm_path = "/dev/null".to_string();
|
||||
|
||||
// guest and host path are not the same
|
||||
let devidx = DevIndex::new(&spec);
|
||||
let res = update_spec_device_list(&device, &mut spec, &devidx);
|
||||
assert!(res.is_err(), "device={:?} spec={:?}", device, spec);
|
||||
|
||||
spec.linux.as_mut().unwrap().devices[0].path = device.container_path.clone();
|
||||
|
||||
// spec.linux.resources is empty
|
||||
let devidx = DevIndex::new(&spec);
|
||||
let res = update_spec_device_list(&device, &mut spec, &devidx);
|
||||
assert!(res.is_ok());
|
||||
|
||||
// update both devices and cgroup lists
|
||||
spec.linux.as_mut().unwrap().devices = vec![oci::LinuxDevice {
|
||||
path: device.container_path.clone(),
|
||||
major,
|
||||
minor,
|
||||
..oci::LinuxDevice::default()
|
||||
}];
|
||||
|
||||
spec.linux.as_mut().unwrap().resources = Some(oci::LinuxResources {
|
||||
devices: vec![oci::LinuxDeviceCgroup {
|
||||
major: Some(major),
|
||||
minor: Some(minor),
|
||||
..oci::LinuxDeviceCgroup::default()
|
||||
}],
|
||||
..oci::LinuxResources::default()
|
||||
});
|
||||
|
||||
let devidx = DevIndex::new(&spec);
|
||||
let res = update_spec_device_list(&device, &mut spec, &devidx);
|
||||
assert!(res.is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_update_spec_device_list_guest_host_conflict() {
|
||||
let null_rdev = fs::metadata("/dev/null").unwrap().rdev();
|
||||
let zero_rdev = fs::metadata("/dev/zero").unwrap().rdev();
|
||||
let full_rdev = fs::metadata("/dev/full").unwrap().rdev();
|
||||
|
||||
let host_major_a = stat::major(null_rdev) as i64;
|
||||
let host_minor_a = stat::minor(null_rdev) as i64;
|
||||
let host_major_b = stat::major(zero_rdev) as i64;
|
||||
let host_minor_b = stat::minor(zero_rdev) as i64;
|
||||
|
||||
let mut spec = Spec {
|
||||
linux: Some(Linux {
|
||||
devices: vec![
|
||||
oci::LinuxDevice {
|
||||
path: "/dev/a".to_string(),
|
||||
r#type: "c".to_string(),
|
||||
major: host_major_a,
|
||||
minor: host_minor_a,
|
||||
..oci::LinuxDevice::default()
|
||||
},
|
||||
oci::LinuxDevice {
|
||||
path: "/dev/b".to_string(),
|
||||
r#type: "c".to_string(),
|
||||
major: host_major_b,
|
||||
minor: host_minor_b,
|
||||
..oci::LinuxDevice::default()
|
||||
},
|
||||
],
|
||||
resources: Some(LinuxResources {
|
||||
devices: vec![
|
||||
oci::LinuxDeviceCgroup {
|
||||
r#type: "c".to_string(),
|
||||
major: Some(host_major_a),
|
||||
minor: Some(host_minor_a),
|
||||
..oci::LinuxDeviceCgroup::default()
|
||||
},
|
||||
oci::LinuxDeviceCgroup {
|
||||
r#type: "c".to_string(),
|
||||
major: Some(host_major_b),
|
||||
minor: Some(host_minor_b),
|
||||
..oci::LinuxDeviceCgroup::default()
|
||||
},
|
||||
],
|
||||
..LinuxResources::default()
|
||||
}),
|
||||
..Linux::default()
|
||||
}),
|
||||
..Spec::default()
|
||||
};
|
||||
let devidx = DevIndex::new(&spec);
|
||||
|
||||
let dev_a = Device {
|
||||
container_path: "/dev/a".to_string(),
|
||||
vm_path: "/dev/zero".to_string(),
|
||||
..Device::default()
|
||||
};
|
||||
|
||||
let guest_major_a = stat::major(zero_rdev) as i64;
|
||||
let guest_minor_a = stat::minor(zero_rdev) as i64;
|
||||
|
||||
let dev_b = Device {
|
||||
container_path: "/dev/b".to_string(),
|
||||
vm_path: "/dev/full".to_string(),
|
||||
..Device::default()
|
||||
};
|
||||
|
||||
let guest_major_b = stat::major(full_rdev) as i64;
|
||||
let guest_minor_b = stat::minor(full_rdev) as i64;
|
||||
|
||||
let specdevices = &spec.linux.as_ref().unwrap().devices;
|
||||
assert_eq!(host_major_a, specdevices[0].major);
|
||||
assert_eq!(host_minor_a, specdevices[0].minor);
|
||||
assert_eq!(host_major_b, specdevices[1].major);
|
||||
assert_eq!(host_minor_b, specdevices[1].minor);
|
||||
|
||||
let specresources = spec.linux.as_ref().unwrap().resources.as_ref().unwrap();
|
||||
assert_eq!(Some(host_major_a), specresources.devices[0].major);
|
||||
assert_eq!(Some(host_minor_a), specresources.devices[0].minor);
|
||||
assert_eq!(Some(host_major_b), specresources.devices[1].major);
|
||||
assert_eq!(Some(host_minor_b), specresources.devices[1].minor);
|
||||
|
||||
let res = update_spec_device_list(&dev_a, &mut spec, &devidx);
|
||||
assert!(res.is_ok());
|
||||
|
||||
let specdevices = &spec.linux.as_ref().unwrap().devices;
|
||||
assert_eq!(guest_major_a, specdevices[0].major);
|
||||
assert_eq!(guest_minor_a, specdevices[0].minor);
|
||||
assert_eq!(host_major_b, specdevices[1].major);
|
||||
assert_eq!(host_minor_b, specdevices[1].minor);
|
||||
|
||||
let specresources = spec.linux.as_ref().unwrap().resources.as_ref().unwrap();
|
||||
assert_eq!(Some(guest_major_a), specresources.devices[0].major);
|
||||
assert_eq!(Some(guest_minor_a), specresources.devices[0].minor);
|
||||
assert_eq!(Some(host_major_b), specresources.devices[1].major);
|
||||
assert_eq!(Some(host_minor_b), specresources.devices[1].minor);
|
||||
|
||||
let res = update_spec_device_list(&dev_b, &mut spec, &devidx);
|
||||
assert!(res.is_ok());
|
||||
|
||||
let specdevices = &spec.linux.as_ref().unwrap().devices;
|
||||
assert_eq!(guest_major_a, specdevices[0].major);
|
||||
assert_eq!(guest_minor_a, specdevices[0].minor);
|
||||
assert_eq!(guest_major_b, specdevices[1].major);
|
||||
assert_eq!(guest_minor_b, specdevices[1].minor);
|
||||
|
||||
let specresources = spec.linux.as_ref().unwrap().resources.as_ref().unwrap();
|
||||
assert_eq!(Some(guest_major_a), specresources.devices[0].major);
|
||||
assert_eq!(Some(guest_minor_a), specresources.devices[0].minor);
|
||||
assert_eq!(Some(guest_major_b), specresources.devices[1].major);
|
||||
assert_eq!(Some(guest_minor_b), specresources.devices[1].minor);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_update_spec_device_list_char_block_conflict() {
|
||||
let null_rdev = fs::metadata("/dev/null").unwrap().rdev();
|
||||
|
||||
let guest_major = stat::major(null_rdev) as i64;
|
||||
let guest_minor = stat::minor(null_rdev) as i64;
|
||||
let host_major: i64 = 99;
|
||||
let host_minor: i64 = 99;
|
||||
|
||||
let mut spec = Spec {
|
||||
linux: Some(Linux {
|
||||
devices: vec![
|
||||
oci::LinuxDevice {
|
||||
path: "/dev/char".to_string(),
|
||||
r#type: "c".to_string(),
|
||||
major: host_major,
|
||||
minor: host_minor,
|
||||
..oci::LinuxDevice::default()
|
||||
},
|
||||
oci::LinuxDevice {
|
||||
path: "/dev/block".to_string(),
|
||||
r#type: "b".to_string(),
|
||||
major: host_major,
|
||||
minor: host_minor,
|
||||
..oci::LinuxDevice::default()
|
||||
},
|
||||
],
|
||||
resources: Some(LinuxResources {
|
||||
devices: vec![
|
||||
LinuxDeviceCgroup {
|
||||
r#type: "c".to_string(),
|
||||
major: Some(host_major),
|
||||
minor: Some(host_minor),
|
||||
..LinuxDeviceCgroup::default()
|
||||
},
|
||||
LinuxDeviceCgroup {
|
||||
r#type: "b".to_string(),
|
||||
major: Some(host_major),
|
||||
minor: Some(host_minor),
|
||||
..LinuxDeviceCgroup::default()
|
||||
},
|
||||
],
|
||||
..LinuxResources::default()
|
||||
}),
|
||||
..Linux::default()
|
||||
}),
|
||||
..Spec::default()
|
||||
};
|
||||
let devidx = DevIndex::new(&spec);
|
||||
|
||||
let dev = Device {
|
||||
container_path: "/dev/char".to_string(),
|
||||
vm_path: "/dev/null".to_string(),
|
||||
..Device::default()
|
||||
};
|
||||
|
||||
let specresources = spec.linux.as_ref().unwrap().resources.as_ref().unwrap();
|
||||
assert_eq!(Some(host_major), specresources.devices[0].major);
|
||||
assert_eq!(Some(host_minor), specresources.devices[0].minor);
|
||||
assert_eq!(Some(host_major), specresources.devices[1].major);
|
||||
assert_eq!(Some(host_minor), specresources.devices[1].minor);
|
||||
|
||||
let res = update_spec_device_list(&dev, &mut spec, &devidx);
|
||||
assert!(res.is_ok());
|
||||
|
||||
// Only the char device, not the block device should be updated
|
||||
let specresources = spec.linux.as_ref().unwrap().resources.as_ref().unwrap();
|
||||
assert_eq!(Some(guest_major), specresources.devices[0].major);
|
||||
assert_eq!(Some(guest_minor), specresources.devices[0].minor);
|
||||
assert_eq!(Some(host_major), specresources.devices[1].major);
|
||||
assert_eq!(Some(host_minor), specresources.devices[1].minor);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,10 +5,8 @@
|
||||
|
||||
/// Linux ABI related constants.
|
||||
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
use std::fs;
|
||||
|
||||
pub const SYSFS_DIR: &str = "/sys";
|
||||
|
||||
pub const SYSFS_PCI_BUS_PREFIX: &str = "/sys/bus/pci/devices";
|
||||
pub const SYSFS_PCI_BUS_RESCAN_FILE: &str = "/sys/bus/pci/rescan";
|
||||
#[cfg(any(
|
||||
@@ -17,46 +15,9 @@ pub const SYSFS_PCI_BUS_RESCAN_FILE: &str = "/sys/bus/pci/rescan";
|
||||
target_arch = "x86_64",
|
||||
target_arch = "x86"
|
||||
))]
|
||||
pub fn create_pci_root_bus_path() -> String {
|
||||
String::from("/devices/pci0000:00")
|
||||
}
|
||||
|
||||
pub const PCI_ROOT_BUS_PATH: &str = "/devices/pci0000:00";
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
pub fn create_pci_root_bus_path() -> String {
|
||||
let ret = String::from("/devices/platform/4010000000.pcie/pci0000:00");
|
||||
|
||||
let mut sysfs_dir = String::from(SYSFS_DIR);
|
||||
let mut start_root_bus_path = String::from("/devices/platform/");
|
||||
let end_root_bus_path = String::from("/pci0000:00");
|
||||
|
||||
sysfs_dir.push_str(&start_root_bus_path);
|
||||
let entries = match fs::read_dir(sysfs_dir) {
|
||||
Ok(e) => e,
|
||||
Err(_) => return ret,
|
||||
};
|
||||
for entry in entries {
|
||||
let pathname = match entry {
|
||||
Ok(p) => p.path(),
|
||||
Err(_) => return ret,
|
||||
};
|
||||
let dir_name = match pathname.file_name() {
|
||||
Some(p) => p.to_str(),
|
||||
None => return ret,
|
||||
};
|
||||
let dir_name = match dir_name {
|
||||
Some(p) => p,
|
||||
None => return ret,
|
||||
};
|
||||
let dir_name = String::from(dir_name);
|
||||
if dir_name.ends_with(".pcie") {
|
||||
start_root_bus_path.push_str(&dir_name);
|
||||
start_root_bus_path.push_str(&end_root_bus_path);
|
||||
return start_root_bus_path;
|
||||
}
|
||||
}
|
||||
|
||||
ret
|
||||
}
|
||||
pub const PCI_ROOT_BUS_PATH: &str = "/devices/platform/4010000000.pcie/pci0000:00";
|
||||
|
||||
pub const SYSFS_CPU_ONLINE_PATH: &str = "/sys/devices/system/cpu";
|
||||
|
||||
|
||||
@@ -25,6 +25,7 @@ extern crate scopeguard;
|
||||
|
||||
#[macro_use]
|
||||
extern crate slog;
|
||||
#[macro_use]
|
||||
extern crate netlink;
|
||||
|
||||
use crate::netlink::{RtnlHandle, NETLINK_ROUTE};
|
||||
@@ -128,6 +129,7 @@ fn main() -> Result<()> {
|
||||
|
||||
// support vsock log
|
||||
let (rfd, wfd) = unistd::pipe2(OFlag::O_CLOEXEC)?;
|
||||
let writer = unsafe { File::from_raw_fd(wfd) };
|
||||
|
||||
let agentConfig = AGENT_CONFIG.clone();
|
||||
|
||||
@@ -198,13 +200,6 @@ fn main() -> Result<()> {
|
||||
// which is required to satisfy the the lifetime constraints of the auto-generated gRPC code.
|
||||
let _guard = slog_scope::set_global_logger(logger.new(o!("subsystem" => "rpc")));
|
||||
|
||||
let mut _log_guard: Result<(), log::SetLoggerError> = Ok(());
|
||||
|
||||
if config.log_level == slog::Level::Trace {
|
||||
// Redirect ttrpc log calls to slog iff full debug requested
|
||||
_log_guard = Ok(slog_stdlog::init().map_err(|e| e)?);
|
||||
}
|
||||
|
||||
start_sandbox(&logger, &config, init_mode)?;
|
||||
|
||||
let _ = log_handle.join();
|
||||
@@ -253,8 +248,8 @@ fn start_sandbox(logger: &Logger, config: &agentConfig, init_mode: bool) -> Resu
|
||||
let (tx, rx) = mpsc::channel::<i32>();
|
||||
sandbox.lock().unwrap().sender = Some(tx);
|
||||
|
||||
// vsock:///dev/vsock, port
|
||||
let mut server = rpc::start(sandbox, config.server_addr.as_str());
|
||||
//vsock:///dev/vsock, port
|
||||
let mut server = rpc::start(sandbox.clone(), config.server_addr.as_str());
|
||||
|
||||
let _ = server.start().unwrap();
|
||||
|
||||
@@ -279,6 +274,8 @@ fn setup_signal_handler(logger: &Logger, sandbox: Arc<Mutex<Sandbox>>) -> Result
|
||||
|
||||
let signals = Signals::new(&[SIGCHLD])?;
|
||||
|
||||
let s = sandbox.clone();
|
||||
|
||||
thread::spawn(move || {
|
||||
'outer: for sig in signals.forever() {
|
||||
info!(logger, "received signal"; "signal" => sig);
|
||||
@@ -306,16 +303,15 @@ fn setup_signal_handler(logger: &Logger, sandbox: Arc<Mutex<Sandbox>>) -> Result
|
||||
continue 'outer;
|
||||
}
|
||||
};
|
||||
info!(logger, "wait_status"; "wait_status result" => format!("{:?}", wait_status));
|
||||
|
||||
let pid = wait_status.pid();
|
||||
if let Some(pid) = pid {
|
||||
let raw_pid = pid.as_raw();
|
||||
if pid.is_some() {
|
||||
let raw_pid = pid.unwrap().as_raw();
|
||||
let child_pid = format!("{}", raw_pid);
|
||||
|
||||
let logger = logger.new(o!("child-pid" => child_pid));
|
||||
|
||||
let mut sandbox = sandbox.lock().unwrap();
|
||||
let mut sandbox = s.lock().unwrap();
|
||||
let process = sandbox.find_process(raw_pid);
|
||||
if process.is_none() {
|
||||
info!(logger, "child exited unexpectedly");
|
||||
@@ -343,13 +339,6 @@ fn setup_signal_handler(logger: &Logger, sandbox: Arc<Mutex<Sandbox>>) -> Result
|
||||
|
||||
p.exit_code = ret;
|
||||
let _ = unistd::close(pipe_write);
|
||||
|
||||
if let Some(ref poller) = p.epoller {
|
||||
info!(logger, "close epoller");
|
||||
// close the socket file to notify readStdio to close terminal specifically
|
||||
// in case this process's terminal has been inherited by its children.
|
||||
poller.close_wfd()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -379,8 +368,7 @@ fn init_agent_as_init(logger: &Logger, unified_cgroup_hierarchy: bool) -> Result
|
||||
|
||||
env::set_var("PATH", "/bin:/sbin/:/usr/bin/:/usr/sbin/");
|
||||
|
||||
let contents =
|
||||
std::fs::read_to_string("/etc/hostname").unwrap_or_else(|_| String::from("localhost"));
|
||||
let contents = std::fs::read_to_string("/etc/hostname").unwrap_or(String::from("localhost"));
|
||||
let contents_array: Vec<&str> = contents.split(' ').collect();
|
||||
let hostname = contents_array[0].trim();
|
||||
|
||||
@@ -495,8 +483,8 @@ where
|
||||
|
||||
// write and return
|
||||
match writer.write_all(&buf[..buf_len]) {
|
||||
Ok(_) => Ok(buf_len as u64),
|
||||
Err(err) => Err(err),
|
||||
Ok(_) => return Ok(buf_len as u64),
|
||||
Err(err) => return Err(err),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -526,12 +514,14 @@ fn run_debug_console_shell(logger: &Logger, shell: &str, socket_fd: RawFd) -> Re
|
||||
let args: Vec<&CStr> = vec![];
|
||||
|
||||
// run shell
|
||||
let _ = unistd::execvp(cmd.as_c_str(), args.as_slice()).map_err(|e| match e {
|
||||
nix::Error::Sys(errno) => {
|
||||
std::process::exit(errno as i32);
|
||||
if let Err(e) = unistd::execvp(cmd.as_c_str(), args.as_slice()) {
|
||||
match e {
|
||||
nix::Error::Sys(errno) => {
|
||||
std::process::exit(errno as i32);
|
||||
}
|
||||
_ => std::process::exit(-2),
|
||||
}
|
||||
_ => std::process::exit(-2),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
Ok(ForkResult::Parent { child: child_pid }) => {
|
||||
@@ -648,6 +638,8 @@ fn run_debug_console_shell(logger: &Logger, shell: &str, socket_fd: RawFd) -> Re
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::fs::File;
|
||||
use std::io::Write;
|
||||
use tempfile::tempdir;
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -8,6 +8,7 @@ extern crate procfs;
|
||||
use prometheus::{Encoder, Gauge, GaugeVec, IntCounter, TextEncoder};
|
||||
|
||||
use anyhow::Result;
|
||||
use protocols;
|
||||
|
||||
const NAMESPACE_KATA_AGENT: &str = "kata_agent";
|
||||
const NAMESPACE_KATA_GUEST: &str = "kata_guest";
|
||||
@@ -84,15 +85,17 @@ pub fn get_metrics(_: &protocols::agent::GetMetricsRequest) -> Result<String> {
|
||||
let encoder = TextEncoder::new();
|
||||
encoder.encode(&metric_families, &mut buffer).unwrap();
|
||||
|
||||
Ok(String::from_utf8(buffer).unwrap())
|
||||
Ok(String::from_utf8(buffer.clone()).unwrap())
|
||||
}
|
||||
|
||||
fn update_agent_metrics() {
|
||||
let me = procfs::process::Process::myself();
|
||||
|
||||
if let Err(err) = me {
|
||||
error!(sl!(), "failed to create process instance: {:?}", err);
|
||||
return;
|
||||
match me {
|
||||
Err(err) => {
|
||||
error!(sl!(), "failed to create process instance: {:?}", err);
|
||||
return;
|
||||
}
|
||||
Ok(_) => {}
|
||||
}
|
||||
|
||||
let me = me.unwrap();
|
||||
|
||||
@@ -39,7 +39,7 @@ pub const DRIVERLOCALTYPE: &str = "local";
|
||||
|
||||
pub const TYPEROOTFS: &str = "rootfs";
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[cfg_attr(rustfmt, rustfmt_skip)]
|
||||
lazy_static! {
|
||||
pub static ref FLAGS: HashMap<&'static str, (bool, MsFlags)> = {
|
||||
let mut m = HashMap::new();
|
||||
@@ -88,7 +88,7 @@ pub struct INIT_MOUNT {
|
||||
options: Vec<&'static str>,
|
||||
}
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[cfg_attr(rustfmt, rustfmt_skip)]
|
||||
lazy_static!{
|
||||
static ref CGROUPS: HashMap<&'static str, &'static str> = {
|
||||
let mut m = HashMap::new();
|
||||
@@ -109,7 +109,7 @@ lazy_static!{
|
||||
};
|
||||
}
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[cfg_attr(rustfmt, rustfmt_skip)]
|
||||
lazy_static! {
|
||||
pub static ref INIT_ROOTFS_MOUNTS: Vec<INIT_MOUNT> = vec![
|
||||
INIT_MOUNT{fstype: "proc", src: "proc", dest: "/proc", options: vec!["nosuid", "nodev", "noexec"]},
|
||||
@@ -125,8 +125,8 @@ lazy_static! {
|
||||
// type of storage driver.
|
||||
type StorageHandler = fn(&Logger, &Storage, Arc<Mutex<Sandbox>>) -> Result<String>;
|
||||
|
||||
// STORAGEHANDLERLIST lists the supported drivers.
|
||||
#[rustfmt::skip]
|
||||
// StorageHandlerList lists the supported drivers.
|
||||
#[cfg_attr(rustfmt, rustfmt_skip)]
|
||||
lazy_static! {
|
||||
pub static ref STORAGEHANDLERLIST: HashMap<&'static str, StorageHandler> = {
|
||||
let mut m = HashMap::new();
|
||||
@@ -173,9 +173,9 @@ impl<'a> BareMount<'a> {
|
||||
BareMount {
|
||||
source: s,
|
||||
destination: d,
|
||||
fs_type,
|
||||
flags,
|
||||
options,
|
||||
fs_type: fs_type,
|
||||
flags: flags,
|
||||
options: options,
|
||||
logger: logger.new(o!("subsystem" => "baremount")),
|
||||
}
|
||||
}
|
||||
@@ -190,11 +190,11 @@ impl<'a> BareMount<'a> {
|
||||
let cstr_dest: CString;
|
||||
let cstr_fs_type: CString;
|
||||
|
||||
if self.source.is_empty() {
|
||||
if self.source.len() == 0 {
|
||||
return Err(anyhow!("need mount source"));
|
||||
}
|
||||
|
||||
if self.destination.is_empty() {
|
||||
if self.destination.len() == 0 {
|
||||
return Err(anyhow!("need mount destination"));
|
||||
}
|
||||
|
||||
@@ -204,14 +204,14 @@ impl<'a> BareMount<'a> {
|
||||
cstr_dest = CString::new(self.destination)?;
|
||||
dest = cstr_dest.as_ptr();
|
||||
|
||||
if self.fs_type.is_empty() {
|
||||
if self.fs_type.len() == 0 {
|
||||
return Err(anyhow!("need mount FS type"));
|
||||
}
|
||||
|
||||
cstr_fs_type = CString::new(self.fs_type)?;
|
||||
fs_type = cstr_fs_type.as_ptr();
|
||||
|
||||
if !self.options.is_empty() {
|
||||
if self.options.len() > 0 {
|
||||
cstr_options = CString::new(self.options)?;
|
||||
options = cstr_options.as_ptr() as *const c_void;
|
||||
}
|
||||
@@ -243,14 +243,18 @@ fn ephemeral_storage_handler(
|
||||
storage: &Storage,
|
||||
sandbox: Arc<Mutex<Sandbox>>,
|
||||
) -> Result<String> {
|
||||
let mut sb = sandbox.lock().unwrap();
|
||||
let s = sandbox.clone();
|
||||
let mut sb = s.lock().unwrap();
|
||||
let new_storage = sb.set_sandbox_storage(&storage.mount_point);
|
||||
|
||||
if !new_storage {
|
||||
return Ok("".to_string());
|
||||
}
|
||||
|
||||
fs::create_dir_all(Path::new(&storage.mount_point))?;
|
||||
if let Err(err) = fs::create_dir_all(Path::new(&storage.mount_point)) {
|
||||
return Err(err.into());
|
||||
}
|
||||
|
||||
common_storage_handler(logger, storage)?;
|
||||
|
||||
Ok("".to_string())
|
||||
@@ -261,7 +265,8 @@ fn local_storage_handler(
|
||||
storage: &Storage,
|
||||
sandbox: Arc<Mutex<Sandbox>>,
|
||||
) -> Result<String> {
|
||||
let mut sb = sandbox.lock().unwrap();
|
||||
let s = sandbox.clone();
|
||||
let mut sb = s.lock().unwrap();
|
||||
let new_storage = sb.set_sandbox_storage(&storage.mount_point);
|
||||
|
||||
if !new_storage {
|
||||
@@ -277,7 +282,8 @@ fn local_storage_handler(
|
||||
|
||||
let opts = parse_options(opts_vec);
|
||||
let mode = opts.get("mode");
|
||||
if let Some(mode) = mode {
|
||||
if mode.is_some() {
|
||||
let mode = mode.unwrap();
|
||||
let mut permission = fs::metadata(&storage.mount_point)?.permissions();
|
||||
|
||||
let o_mode = u32::from_str_radix(mode, 8)?;
|
||||
@@ -407,17 +413,17 @@ fn parse_mount_flags_and_options(options_vec: Vec<&str>) -> (MsFlags, String) {
|
||||
let mut options: String = "".to_string();
|
||||
|
||||
for opt in options_vec {
|
||||
if !opt.is_empty() {
|
||||
if opt.len() != 0 {
|
||||
match FLAGS.get(opt) {
|
||||
Some(x) => {
|
||||
let (_, f) = *x;
|
||||
flags |= f;
|
||||
flags = flags | f;
|
||||
}
|
||||
None => {
|
||||
if !options.is_empty() {
|
||||
if options.len() > 0 {
|
||||
options.push_str(format!(",{}", opt).as_str());
|
||||
} else {
|
||||
options.push_str(opt.to_string().as_str());
|
||||
options.push_str(format!("{}", opt).as_str());
|
||||
}
|
||||
}
|
||||
};
|
||||
@@ -443,19 +449,23 @@ pub fn add_storages(
|
||||
"subsystem" => "storage",
|
||||
"storage-type" => handler_name.to_owned()));
|
||||
|
||||
let handler = STORAGEHANDLERLIST
|
||||
.get(&handler_name.as_str())
|
||||
.ok_or_else(|| {
|
||||
anyhow!(
|
||||
let handler = match STORAGEHANDLERLIST.get(&handler_name.as_str()) {
|
||||
None => {
|
||||
return Err(anyhow!(
|
||||
"Failed to find the storage handler {}",
|
||||
storage.driver.to_owned()
|
||||
)
|
||||
})?;
|
||||
));
|
||||
}
|
||||
Some(f) => f,
|
||||
};
|
||||
|
||||
// Todo need to rollback the mounted storage if err met.
|
||||
let mount_point = handler(&logger, &storage, sandbox.clone())?;
|
||||
let mount_point = match handler(&logger, &storage, sandbox.clone()) {
|
||||
// Todo need to rollback the mounted storage if err met.
|
||||
Err(e) => return Err(e),
|
||||
Ok(m) => m,
|
||||
};
|
||||
|
||||
if !mount_point.is_empty() {
|
||||
if mount_point.len() > 0 {
|
||||
mount_list.push(mount_point);
|
||||
}
|
||||
}
|
||||
@@ -472,18 +482,15 @@ fn mount_to_rootfs(logger: &Logger, m: &INIT_MOUNT) -> Result<()> {
|
||||
|
||||
fs::create_dir_all(Path::new(m.dest)).context("could not create directory")?;
|
||||
|
||||
bare_mount.mount().or_else(|e| {
|
||||
if let Err(err) = bare_mount.mount() {
|
||||
if m.src != "dev" {
|
||||
return Err(e);
|
||||
return Err(err.into());
|
||||
}
|
||||
|
||||
error!(
|
||||
logger,
|
||||
"Could not mount filesystem from {} to {}", m.src, m.dest
|
||||
);
|
||||
|
||||
Ok(())
|
||||
})?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -503,7 +510,7 @@ pub fn get_mount_fs_type(mount_point: &str) -> Result<String> {
|
||||
get_mount_fs_type_from_file(PROC_MOUNTSTATS, mount_point)
|
||||
}
|
||||
|
||||
// get_mount_fs_type_from_file returns the FS type corresponding to the passed mount point and
|
||||
// get_mount_fs_type returns the FS type corresponding to the passed mount point and
|
||||
// any error ecountered.
|
||||
pub fn get_mount_fs_type_from_file(mount_file: &str, mount_point: &str) -> Result<String> {
|
||||
if mount_point == "" {
|
||||
@@ -567,10 +574,10 @@ pub fn get_cgroup_mounts(
|
||||
'outer: for (_, line) in reader.lines().enumerate() {
|
||||
let line = line?;
|
||||
|
||||
let fields: Vec<&str> = line.split('\t').collect();
|
||||
let fields: Vec<&str> = line.split("\t").collect();
|
||||
|
||||
// Ignore comment header
|
||||
if fields[0].starts_with('#') {
|
||||
if fields[0].starts_with("#") {
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -636,25 +643,26 @@ pub fn cgroups_mount(logger: &Logger, unified_cgroup_hierarchy: bool) -> Result<
|
||||
|
||||
// Enable memory hierarchical account.
|
||||
// For more information see https://www.kernel.org/doc/Documentation/cgroup-v1/memory.txt
|
||||
online_device("/sys/fs/cgroup/memory/memory.use_hierarchy")?;
|
||||
online_device("/sys/fs/cgroup/memory//memory.use_hierarchy")?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn remove_mounts(mounts: &[String]) -> Result<()> {
|
||||
pub fn remove_mounts(mounts: &Vec<String>) -> Result<()> {
|
||||
for m in mounts.iter() {
|
||||
mount::umount(m.as_str()).context(format!("failed to umount {:?}", m))?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ensure_destination_exists will recursively create a given mountpoint. If directories
|
||||
// are created, their permissions are initialized to mountPerm(0755)
|
||||
// ensureDestinationExists will recursively create a given mountpoint. If directories
|
||||
// are created, their permissions are initialized to mountPerm
|
||||
fn ensure_destination_exists(destination: &str, fs_type: &str) -> Result<()> {
|
||||
let d = Path::new(destination);
|
||||
if !d.exists() {
|
||||
let dir = d
|
||||
.parent()
|
||||
.ok_or_else(|| anyhow!("mount destination {} doesn't exist", destination))?;
|
||||
let dir = match d.parent() {
|
||||
Some(d) => d,
|
||||
None => return Err(anyhow!("mount destination {} doesn't exist", destination)),
|
||||
};
|
||||
if !dir.exists() {
|
||||
fs::create_dir_all(dir).context(format!("create dir all failed on {:?}", dir))?;
|
||||
}
|
||||
@@ -672,7 +680,7 @@ fn ensure_destination_exists(destination: &str, fs_type: &str) -> Result<()> {
|
||||
fn parse_options(option_list: Vec<String>) -> HashMap<String, String> {
|
||||
let mut options = HashMap::new();
|
||||
for opt in option_list.iter() {
|
||||
let fields: Vec<&str> = opt.split('=').collect();
|
||||
let fields: Vec<&str> = opt.split("=").collect();
|
||||
if fields.len() != 2 {
|
||||
continue;
|
||||
}
|
||||
@@ -853,7 +861,7 @@ mod tests {
|
||||
|
||||
let msg = format!("{}: umount result: {:?}", msg, result);
|
||||
|
||||
assert!(ret == 0, msg);
|
||||
assert!(ret == 0, format!("{}", msg));
|
||||
};
|
||||
|
||||
continue;
|
||||
@@ -911,8 +919,7 @@ mod tests {
|
||||
.expect("failed to create mount destination filename");
|
||||
|
||||
for d in [test_dir_filename, mnt_src_filename, mnt_dest_filename].iter() {
|
||||
std::fs::create_dir_all(d)
|
||||
.unwrap_or_else(|_| panic!("failed to create directory {}", d));
|
||||
std::fs::create_dir_all(d).expect(&format!("failed to create directory {}", d));
|
||||
}
|
||||
|
||||
// Create an actual mount
|
||||
@@ -1053,13 +1060,13 @@ mod tests {
|
||||
|
||||
let filename = file_path
|
||||
.to_str()
|
||||
.unwrap_or_else(|| panic!("{}: failed to create filename", msg));
|
||||
.expect(&format!("{}: failed to create filename", msg));
|
||||
|
||||
let mut file =
|
||||
File::create(filename).unwrap_or_else(|_| panic!("{}: failed to create file", msg));
|
||||
File::create(filename).expect(&format!("{}: failed to create file", msg));
|
||||
|
||||
file.write_all(d.contents.as_bytes())
|
||||
.unwrap_or_else(|_| panic!("{}: failed to write file contents", msg));
|
||||
.expect(&format!("{}: failed to write file contents", msg));
|
||||
|
||||
let result = get_mount_fs_type_from_file(filename, d.mount_point);
|
||||
|
||||
@@ -1081,7 +1088,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_get_cgroup_v2_mounts() {
|
||||
let _ = tempdir().expect("failed to create tmpdir");
|
||||
let dir = tempdir().expect("failed to create tmpdir");
|
||||
let drain = slog::Discard;
|
||||
let logger = slog::Logger::root(drain, o!());
|
||||
let result = get_cgroup_mounts(&logger, "", true);
|
||||
@@ -1215,10 +1222,10 @@ mod tests {
|
||||
.expect("failed to create cgroup file filename");
|
||||
|
||||
let mut file =
|
||||
File::create(filename).unwrap_or_else(|_| panic!("{}: failed to create file", msg));
|
||||
File::create(filename).expect(&format!("{}: failed to create file", msg));
|
||||
|
||||
file.write_all(d.contents.as_bytes())
|
||||
.unwrap_or_else(|_| panic!("{}: failed to write file contents", msg));
|
||||
.expect(&format!("{}: failed to write file contents", msg));
|
||||
|
||||
let result = get_cgroup_mounts(&logger, filename, false);
|
||||
let msg = format!("{}: result: {:?}", msg, result);
|
||||
|
||||
@@ -3,19 +3,20 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use anyhow::{anyhow, Result};
|
||||
use nix::mount::MsFlags;
|
||||
use nix::sched::{unshare, CloneFlags};
|
||||
use nix::unistd::{getpid, gettid};
|
||||
use std::fmt;
|
||||
use std::fs;
|
||||
use std::fs::File;
|
||||
use std::os::unix::io::AsRawFd;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::thread::{self};
|
||||
use std::thread;
|
||||
|
||||
use crate::mount::{BareMount, FLAGS};
|
||||
use slog::Logger;
|
||||
|
||||
//use container::Process;
|
||||
const PERSISTENT_NS_DIR: &str = "/var/run/sandbox-ns";
|
||||
pub const NSTYPEIPC: &str = "ipc";
|
||||
pub const NSTYPEUTS: &str = "uts";
|
||||
@@ -51,12 +52,12 @@ impl Namespace {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_ipc(mut self) -> Self {
|
||||
pub fn as_ipc(mut self) -> Self {
|
||||
self.ns_type = NamespaceType::IPC;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn get_uts(mut self, hostname: &str) -> Self {
|
||||
pub fn as_uts(mut self, hostname: &str) -> Self {
|
||||
self.ns_type = NamespaceType::UTS;
|
||||
if hostname != "" {
|
||||
self.hostname = Some(String::from(hostname));
|
||||
@@ -64,7 +65,7 @@ impl Namespace {
|
||||
self
|
||||
}
|
||||
|
||||
pub fn get_pid(mut self) -> Self {
|
||||
pub fn as_pid(mut self) -> Self {
|
||||
self.ns_type = NamespaceType::PID;
|
||||
self
|
||||
}
|
||||
@@ -74,37 +75,46 @@ impl Namespace {
|
||||
self
|
||||
}
|
||||
|
||||
// setup creates persistent namespace without switching to it.
|
||||
// setup_persistent_ns creates persistent namespace without switching to it.
|
||||
// Note, pid namespaces cannot be persisted.
|
||||
pub fn setup(mut self) -> Result<Self> {
|
||||
fs::create_dir_all(&self.persistent_ns_dir)?;
|
||||
pub fn setup(mut self) -> Result<Self, String> {
|
||||
if let Err(err) = fs::create_dir_all(&self.persistent_ns_dir) {
|
||||
return Err(err.to_string());
|
||||
}
|
||||
|
||||
let ns_path = PathBuf::from(&self.persistent_ns_dir);
|
||||
let ns_type = self.ns_type;
|
||||
if ns_type == NamespaceType::PID {
|
||||
return Err(anyhow!("Cannot persist namespace of PID type"));
|
||||
}
|
||||
let ns_type = self.ns_type.clone();
|
||||
let logger = self.logger.clone();
|
||||
|
||||
let new_ns_path = ns_path.join(&ns_type.get());
|
||||
|
||||
File::create(new_ns_path.as_path())?;
|
||||
if let Err(err) = File::create(new_ns_path.as_path()) {
|
||||
return Err(err.to_string());
|
||||
}
|
||||
|
||||
self.path = new_ns_path.clone().into_os_string().into_string().unwrap();
|
||||
let hostname = self.hostname.clone();
|
||||
|
||||
let new_thread = thread::spawn(move || -> Result<()> {
|
||||
let new_thread = thread::spawn(move || {
|
||||
let origin_ns_path = get_current_thread_ns_path(&ns_type.get());
|
||||
|
||||
File::open(Path::new(&origin_ns_path))?;
|
||||
let _origin_ns_fd = match File::open(Path::new(&origin_ns_path)) {
|
||||
Err(err) => return Err(err.to_string()),
|
||||
Ok(file) => file.as_raw_fd(),
|
||||
};
|
||||
|
||||
// Create a new netns on the current thread.
|
||||
let cf = ns_type.get_flags();
|
||||
let cf = ns_type.get_flags().clone();
|
||||
|
||||
unshare(cf)?;
|
||||
if let Err(err) = unshare(cf) {
|
||||
return Err(err.to_string());
|
||||
}
|
||||
|
||||
if ns_type == NamespaceType::UTS && hostname.is_some() {
|
||||
nix::unistd::sethostname(hostname.unwrap())?;
|
||||
match nix::unistd::sethostname(hostname.unwrap()) {
|
||||
Err(err) => return Err(err.to_string()),
|
||||
Ok(_) => (),
|
||||
}
|
||||
}
|
||||
// Bind mount the new namespace from the current thread onto the mount point to persist it.
|
||||
let source: &str = origin_ns_path.as_str();
|
||||
@@ -112,27 +122,32 @@ impl Namespace {
|
||||
|
||||
let mut flags = MsFlags::empty();
|
||||
|
||||
if let Some(x) = FLAGS.get("rbind") {
|
||||
let (_, f) = *x;
|
||||
flags |= f;
|
||||
match FLAGS.get("rbind") {
|
||||
Some(x) => {
|
||||
let (_, f) = *x;
|
||||
flags = flags | f;
|
||||
}
|
||||
None => (),
|
||||
};
|
||||
|
||||
let bare_mount = BareMount::new(source, destination, "none", flags, "", &logger);
|
||||
bare_mount.mount().map_err(|e| {
|
||||
anyhow!(
|
||||
if let Err(err) = bare_mount.mount() {
|
||||
return Err(format!(
|
||||
"Failed to mount {} to {} with err:{:?}",
|
||||
source,
|
||||
destination,
|
||||
e
|
||||
)
|
||||
})?;
|
||||
source, destination, err
|
||||
));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
});
|
||||
|
||||
new_thread
|
||||
.join()
|
||||
.map_err(|e| anyhow!("Failed to join thread {:?}!", e))??;
|
||||
match new_thread.join() {
|
||||
Ok(t) => match t {
|
||||
Err(err) => return Err(err),
|
||||
Ok(()) => (),
|
||||
},
|
||||
Err(err) => return Err(format!("Failed to join thread {:?}!", err)),
|
||||
}
|
||||
|
||||
Ok(self)
|
||||
}
|
||||
@@ -193,34 +208,23 @@ mod tests {
|
||||
let tmpdir = Builder::new().prefix("ipc").tempdir().unwrap();
|
||||
|
||||
let ns_ipc = Namespace::new(&logger)
|
||||
.get_ipc()
|
||||
.as_ipc()
|
||||
.set_root_dir(tmpdir.path().to_str().unwrap())
|
||||
.setup();
|
||||
|
||||
assert!(ns_ipc.is_ok());
|
||||
assert!(remove_mounts(&[ns_ipc.unwrap().path]).is_ok());
|
||||
assert!(remove_mounts(&vec![ns_ipc.unwrap().path]).is_ok());
|
||||
|
||||
let logger = slog::Logger::root(slog::Discard, o!());
|
||||
let tmpdir = Builder::new().prefix("uts").tempdir().unwrap();
|
||||
let tmpdir = Builder::new().prefix("ipc").tempdir().unwrap();
|
||||
|
||||
let ns_uts = Namespace::new(&logger)
|
||||
.get_uts("test_hostname")
|
||||
.as_uts("test_hostname")
|
||||
.set_root_dir(tmpdir.path().to_str().unwrap())
|
||||
.setup();
|
||||
|
||||
assert!(ns_uts.is_ok());
|
||||
assert!(remove_mounts(&[ns_uts.unwrap().path]).is_ok());
|
||||
|
||||
// Check it cannot persist pid namespaces.
|
||||
let logger = slog::Logger::root(slog::Discard, o!());
|
||||
let tmpdir = Builder::new().prefix("pid").tempdir().unwrap();
|
||||
|
||||
let ns_pid = Namespace::new(&logger)
|
||||
.get_pid()
|
||||
.set_root_dir(tmpdir.path().to_str().unwrap())
|
||||
.setup();
|
||||
|
||||
assert!(ns_pid.is_err());
|
||||
assert!(remove_mounts(&vec![ns_uts.unwrap().path]).is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -3,13 +3,15 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use anyhow::{anyhow, Result};
|
||||
use nix::mount::{self, MsFlags};
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use nix::mount::{self, MntFlags, MsFlags};
|
||||
use protocols::types::{Interface, Route};
|
||||
use slog::Logger;
|
||||
use std::collections::HashMap;
|
||||
use std::fs;
|
||||
|
||||
use crate::Sandbox;
|
||||
|
||||
const KATA_GUEST_SANDBOX_DNS_FILE: &str = "/run/kata-containers/sandbox/resolv.conf";
|
||||
const GUEST_DNS_FILE: &str = "/etc/resolv.conf";
|
||||
|
||||
@@ -48,7 +50,7 @@ pub fn setup_guest_dns(logger: Logger, dns_list: Vec<String>) -> Result<()> {
|
||||
fn do_setup_guest_dns(logger: Logger, dns_list: Vec<String>, src: &str, dst: &str) -> Result<()> {
|
||||
let logger = logger.new(o!( "subsystem" => "network"));
|
||||
|
||||
if dns_list.is_empty() {
|
||||
if dns_list.len() == 0 {
|
||||
info!(
|
||||
logger,
|
||||
"Did not set sandbox DNS as DNS not received as part of request."
|
||||
@@ -117,12 +119,12 @@ mod tests {
|
||||
];
|
||||
|
||||
// write to /run/kata-containers/sandbox/resolv.conf
|
||||
let mut src_file = File::create(src_filename)
|
||||
.unwrap_or_else(|_| panic!("failed to create file {:?}", src_filename));
|
||||
let mut src_file =
|
||||
File::create(src_filename).expect(&format!("failed to create file {:?}", src_filename));
|
||||
let content = dns.join("\n");
|
||||
src_file
|
||||
.write_all(content.as_bytes())
|
||||
.expect("failed to write file contents");
|
||||
.expect(&format!("failed to write file contents"));
|
||||
|
||||
// call do_setup_guest_dns
|
||||
let result = do_setup_guest_dns(logger, dns.clone(), src_filename, dst_filename);
|
||||
|
||||
@@ -4,11 +4,11 @@
|
||||
//
|
||||
|
||||
use anyhow::Result;
|
||||
use libc;
|
||||
use nix::errno::Errno;
|
||||
use nix::fcntl::{self, OFlag};
|
||||
use nix::sys::stat::Mode;
|
||||
use std::fs;
|
||||
use std::os::unix::io::{AsRawFd, FromRawFd};
|
||||
|
||||
pub const RNGDEV: &str = "/dev/random";
|
||||
pub const RNDADDTOENTCNT: libc::c_int = 0x40045201;
|
||||
@@ -24,22 +24,18 @@ pub fn reseed_rng(data: &[u8]) -> Result<()> {
|
||||
let len = data.len() as libc::c_long;
|
||||
fs::write(RNGDEV, data)?;
|
||||
|
||||
let f = {
|
||||
let fd = fcntl::open(RNGDEV, OFlag::O_RDWR, Mode::from_bits_truncate(0o022))?;
|
||||
// Wrap fd with `File` to properly close descriptor on exit
|
||||
unsafe { fs::File::from_raw_fd(fd) }
|
||||
};
|
||||
let fd = fcntl::open(RNGDEV, OFlag::O_RDWR, Mode::from_bits_truncate(0o022))?;
|
||||
|
||||
let ret = unsafe {
|
||||
libc::ioctl(
|
||||
f.as_raw_fd(),
|
||||
fd,
|
||||
RNDADDTOENTCNT as IoctlRequestType,
|
||||
&len as *const libc::c_long,
|
||||
)
|
||||
};
|
||||
let _ = Errno::result(ret).map(drop)?;
|
||||
|
||||
let ret = unsafe { libc::ioctl(f.as_raw_fd(), RNDRESEEDRNG as IoctlRequestType, 0) };
|
||||
let ret = unsafe { libc::ioctl(fd, RNDRESEEDRNG as IoctlRequestType, 0) };
|
||||
let _ = Errno::result(ret).map(drop)?;
|
||||
|
||||
Ok(())
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -3,11 +3,14 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
//use crate::container::Container;
|
||||
use crate::linux_abi::*;
|
||||
use crate::mount::{get_mount_fs_type, remove_mounts, TYPEROOTFS};
|
||||
use crate::namespace::Namespace;
|
||||
use crate::namespace::NSTYPEPID;
|
||||
use crate::network::Network;
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use cgroups;
|
||||
use libc::pid_t;
|
||||
use netlink::{RtnlHandle, NETLINK_ROUTE};
|
||||
use oci::{Hook, Hooks};
|
||||
@@ -74,7 +77,7 @@ impl Sandbox {
|
||||
sender: None,
|
||||
rtnl: Some(RtnlHandle::new(NETLINK_ROUTE, 0).unwrap()),
|
||||
hooks: None,
|
||||
event_rx,
|
||||
event_rx: event_rx,
|
||||
event_tx: tx,
|
||||
})
|
||||
}
|
||||
@@ -111,14 +114,14 @@ impl Sandbox {
|
||||
// acquiring a lock on sandbox.
|
||||
pub fn unset_sandbox_storage(&mut self, path: &str) -> Result<bool> {
|
||||
match self.storages.get_mut(path) {
|
||||
None => Err(anyhow!("Sandbox storage with path {} not found", path)),
|
||||
None => return Err(anyhow!("Sandbox storage with path {} not found", path)),
|
||||
Some(count) => {
|
||||
*count -= 1;
|
||||
if *count < 1 {
|
||||
self.storages.remove(path);
|
||||
return Ok(true);
|
||||
}
|
||||
Ok(false)
|
||||
return Ok(false);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -142,10 +145,16 @@ impl Sandbox {
|
||||
// It's assumed that caller is calling this method after
|
||||
// acquiring a lock on sandbox.
|
||||
pub fn unset_and_remove_sandbox_storage(&mut self, path: &str) -> Result<()> {
|
||||
if self.unset_sandbox_storage(path)? {
|
||||
return self.remove_sandbox_storage(path);
|
||||
match self.unset_sandbox_storage(path) {
|
||||
Ok(res) => {
|
||||
if res {
|
||||
return self.remove_sandbox_storage(path);
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
return Err(err);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -159,17 +168,23 @@ impl Sandbox {
|
||||
|
||||
pub fn setup_shared_namespaces(&mut self) -> Result<bool> {
|
||||
// Set up shared IPC namespace
|
||||
self.shared_ipcns = Namespace::new(&self.logger)
|
||||
.get_ipc()
|
||||
.setup()
|
||||
.context("Failed to setup persistent IPC namespace")?;
|
||||
self.shared_ipcns = match Namespace::new(&self.logger).as_ipc().setup() {
|
||||
Ok(ns) => ns,
|
||||
Err(err) => {
|
||||
return Err(anyhow!(err).context("Failed to setup persistent IPC namespace"));
|
||||
}
|
||||
};
|
||||
|
||||
// // Set up shared UTS namespace
|
||||
self.shared_utsns = Namespace::new(&self.logger)
|
||||
.get_uts(self.hostname.as_str())
|
||||
self.shared_utsns = match Namespace::new(&self.logger)
|
||||
.as_uts(self.hostname.as_str())
|
||||
.setup()
|
||||
.context("Failed to setup persistent UTS namespace")?;
|
||||
|
||||
{
|
||||
Ok(ns) => ns,
|
||||
Err(err) => {
|
||||
return Err(anyhow!(err).context("Failed to setup persistent UTS namespace"));
|
||||
}
|
||||
};
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
@@ -183,7 +198,7 @@ impl Sandbox {
|
||||
// This means a separate pause process has not been created. We treat the
|
||||
// first container created as the infra container in that case
|
||||
// and use its pid namespace in case pid namespace needs to be shared.
|
||||
if self.sandbox_pidns.is_none() && self.containers.is_empty() {
|
||||
if self.sandbox_pidns.is_none() && self.containers.len() == 0 {
|
||||
let init_pid = c.init_process_pid;
|
||||
if init_pid == -1 {
|
||||
return Err(anyhow!(
|
||||
@@ -191,7 +206,7 @@ impl Sandbox {
|
||||
));
|
||||
}
|
||||
|
||||
let mut pid_ns = Namespace::new(&self.logger).get_pid();
|
||||
let mut pid_ns = Namespace::new(&self.logger).as_pid();
|
||||
pid_ns.path = format!("/proc/{}/ns/pid", init_pid);
|
||||
|
||||
self.sandbox_pidns = Some(pid_ns);
|
||||
@@ -215,7 +230,7 @@ impl Sandbox {
|
||||
}
|
||||
|
||||
pub fn destroy(&mut self) -> Result<()> {
|
||||
for ctr in self.containers.values_mut() {
|
||||
for (_, ctr) in &mut self.containers {
|
||||
ctr.destroy()?;
|
||||
}
|
||||
Ok(())
|
||||
@@ -232,33 +247,14 @@ impl Sandbox {
|
||||
online_memory(&self.logger)?;
|
||||
}
|
||||
|
||||
if req.nb_cpus == 0 {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let guest_cpuset = rustjail_cgroups::fs::get_guest_cpuset()?;
|
||||
let cpuset = rustjail_cgroups::fs::get_guest_cpuset()?;
|
||||
|
||||
for (_, ctr) in self.containers.iter() {
|
||||
let cpu = ctr
|
||||
.config
|
||||
.spec
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.linux
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.resources
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.cpu
|
||||
.as_ref();
|
||||
let container_cpust = if let Some(c) = cpu { &c.cpus } else { "" };
|
||||
|
||||
info!(self.logger, "updating {}", ctr.id.as_str());
|
||||
ctr.cgroup_manager
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.update_cpuset_path(guest_cpuset.as_str(), &container_cpust)?;
|
||||
.update_cpuset_path(cpuset.as_str())?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -322,9 +318,10 @@ impl Sandbox {
|
||||
thread::spawn(move || {
|
||||
for event in rx {
|
||||
info!(logger, "got an OOM event {:?}", event);
|
||||
let _ = tx
|
||||
.send(container_id.clone())
|
||||
.map_err(|e| error!(logger, "failed to send message: {:?}", e));
|
||||
match tx.send(container_id.clone()) {
|
||||
Err(err) => error!(logger, "failed to send message: {:?}", err),
|
||||
Ok(_) => {}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
@@ -350,7 +347,7 @@ fn online_resources(logger: &Logger, path: &str, pattern: &str, num: i32) -> Res
|
||||
}
|
||||
let c = c.unwrap();
|
||||
|
||||
if c.trim().contains('0') {
|
||||
if c.trim().contains("0") {
|
||||
let r = fs::write(file.as_str(), "1");
|
||||
if r.is_err() {
|
||||
continue;
|
||||
@@ -411,6 +408,7 @@ fn online_memory(logger: &Logger) -> Result<()> {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
//use rustjail::Error;
|
||||
use super::Sandbox;
|
||||
use crate::{mount::BareMount, skip_if_not_root};
|
||||
use anyhow::Error;
|
||||
@@ -629,8 +627,8 @@ mod tests {
|
||||
|
||||
let linux = Linux::default();
|
||||
let mut spec = Spec::default();
|
||||
spec.root = Some(root);
|
||||
spec.linux = Some(linux);
|
||||
spec.root = Some(root).into();
|
||||
spec.linux = Some(linux).into();
|
||||
|
||||
CreateOpts {
|
||||
cgroup_name: "".to_string(),
|
||||
@@ -724,31 +722,4 @@ mod tests {
|
||||
assert!(s.hooks.as_ref().unwrap().poststart.is_empty());
|
||||
assert!(s.hooks.as_ref().unwrap().poststop.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_sandbox_is_running() {
|
||||
let logger = slog::Logger::root(slog::Discard, o!());
|
||||
let mut s = Sandbox::new(&logger).unwrap();
|
||||
s.running = true;
|
||||
assert!(s.is_running());
|
||||
s.running = false;
|
||||
assert!(!s.is_running());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sandbox_set_hostname() {
|
||||
let logger = slog::Logger::root(slog::Discard, o!());
|
||||
let mut s = Sandbox::new(&logger).unwrap();
|
||||
let hostname = "abc123";
|
||||
s.set_hostname(hostname.to_string());
|
||||
assert_eq!(s.hostname, hostname);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sandbox_set_destroy() {
|
||||
let logger = slog::Logger::root(slog::Discard, o!());
|
||||
let mut s = Sandbox::new(&logger).unwrap();
|
||||
let ret = s.destroy();
|
||||
assert!(ret.is_ok());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
#![allow(clippy::module_inception)]
|
||||
|
||||
#[cfg(test)]
|
||||
mod test_utils {
|
||||
|
||||
@@ -48,16 +48,13 @@ impl Uevent {
|
||||
|
||||
// Check whether this is a block device hot-add event.
|
||||
fn is_block_add_event(&self) -> bool {
|
||||
let pci_root_bus_path = create_pci_root_bus_path();
|
||||
self.action == U_EVENT_ACTION_ADD
|
||||
&& self.subsystem == "block"
|
||||
&& self.devpath.starts_with(&pci_root_bus_path)
|
||||
&& self.devpath.starts_with(PCI_ROOT_BUS_PATH)
|
||||
&& self.devname != ""
|
||||
}
|
||||
|
||||
fn handle_block_add_event(&self, sandbox: &Arc<Mutex<Sandbox>>) {
|
||||
let pci_root_bus_path = create_pci_root_bus_path();
|
||||
|
||||
// Keep the same lock order as device::get_device_name(), otherwise it may cause deadlock.
|
||||
let mut w = GLOBAL_DEVICE_WATCHER.lock().unwrap();
|
||||
let mut sb = sandbox.lock().unwrap();
|
||||
@@ -72,7 +69,7 @@ impl Uevent {
|
||||
let empties: Vec<_> = w
|
||||
.iter()
|
||||
.filter(|(dev_addr, _)| {
|
||||
let pci_p = format!("{}/{}", pci_root_bus_path, *dev_addr);
|
||||
let pci_p = format!("{}/{}", PCI_ROOT_BUS_PATH, *dev_addr);
|
||||
|
||||
// blk block device
|
||||
devpath.starts_with(pci_p.as_str()) ||
|
||||
@@ -102,14 +99,14 @@ impl Uevent {
|
||||
let online_path = format!("{}/{}/online", SYSFS_DIR, &self.devpath);
|
||||
// It's a memory hot-add event.
|
||||
if online_path.starts_with(SYSFS_MEMORY_ONLINE_PATH) {
|
||||
let _ = online_device(online_path.as_ref()).map_err(|e| {
|
||||
if let Err(e) = online_device(online_path.as_ref()) {
|
||||
error!(
|
||||
*logger,
|
||||
"failed to online device";
|
||||
"device" => &self.devpath,
|
||||
"error" => format!("{}", e),
|
||||
)
|
||||
});
|
||||
);
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -68,7 +68,7 @@ NETMON_TARGET = $(PROJECT_TYPE)-netmon
|
||||
NETMON_TARGET_OUTPUT = $(CURDIR)/$(NETMON_TARGET)
|
||||
BINLIBEXECLIST += $(NETMON_TARGET)
|
||||
|
||||
DESTDIR ?= /
|
||||
DESTDIR := /
|
||||
|
||||
ifeq ($(PREFIX),)
|
||||
PREFIX := /usr
|
||||
@@ -83,6 +83,7 @@ QEMUBINDIR := $(PREFIXDEPS)/bin
|
||||
CLHBINDIR := $(PREFIXDEPS)/bin
|
||||
FCBINDIR := $(PREFIXDEPS)/bin
|
||||
ACRNBINDIR := $(PREFIXDEPS)/bin
|
||||
VIRTIOFSDBINDIR := $(PREFIXDEPS)/bin
|
||||
SYSCONFDIR := /etc
|
||||
LOCALSTATEDIR := /var
|
||||
|
||||
@@ -93,17 +94,7 @@ DEFAULTSDIR := $(SHAREDIR)/defaults
|
||||
COLLECT_SCRIPT = data/kata-collect-data.sh
|
||||
COLLECT_SCRIPT_SRC = $(COLLECT_SCRIPT).in
|
||||
|
||||
# @RUNTIME_NAME@ should be replaced with the target in generated files
|
||||
RUNTIME_NAME = $(TARGET)
|
||||
|
||||
GENERATED_FILES += $(COLLECT_SCRIPT)
|
||||
GENERATED_VARS = \
|
||||
VERSION \
|
||||
CONFIG_ACRN_IN \
|
||||
CONFIG_QEMU_IN \
|
||||
CONFIG_CLH_IN \
|
||||
CONFIG_FC_IN \
|
||||
$(USER_VARS)
|
||||
SCRIPTS += $(COLLECT_SCRIPT)
|
||||
SCRIPTS_DIR := $(BINDIR)
|
||||
|
||||
@@ -129,31 +120,25 @@ HYPERVISOR_FC = firecracker
|
||||
JAILER_FC = jailer
|
||||
HYPERVISOR_QEMU = qemu
|
||||
HYPERVISOR_CLH = cloud-hypervisor
|
||||
HYPERVISOR_QEMU_VIRTIOFS = qemu-virtiofs
|
||||
|
||||
# Determines which hypervisor is specified in $(CONFIG_FILE).
|
||||
DEFAULT_HYPERVISOR ?= $(HYPERVISOR_QEMU)
|
||||
|
||||
# List of hypervisors this build system can generate configuration for.
|
||||
HYPERVISORS := $(HYPERVISOR_ACRN) $(HYPERVISOR_FC) $(HYPERVISOR_QEMU) $(HYPERVISOR_CLH)
|
||||
HYPERVISORS := $(HYPERVISOR_ACRN) $(HYPERVISOR_FC) $(HYPERVISOR_QEMU) $(HYPERVISOR_QEMU_VIRTIOFS) $(HYPERVISOR_CLH)
|
||||
|
||||
QEMUPATH := $(QEMUBINDIR)/$(QEMUCMD)
|
||||
QEMUVALIDHYPERVISORPATHS := [\"$(QEMUPATH)\"]
|
||||
|
||||
QEMUVIRTIOFSPATH := $(QEMUBINDIR)/$(QEMUVIRTIOFSCMD)
|
||||
QEMUVALIDVIRTIOFSPATHS := [\"$(QEMUVIRTIOFSPATH)\"]
|
||||
|
||||
CLHPATH := $(CLHBINDIR)/$(CLHCMD)
|
||||
CLHVALIDHYPERVISORPATHS := [\"$(CLHPATH)\"]
|
||||
|
||||
FCPATH = $(FCBINDIR)/$(FCCMD)
|
||||
FCVALIDHYPERVISORPATHS := [\"$(FCPATH)\"]
|
||||
FCJAILERPATH = $(FCBINDIR)/$(FCJAILERCMD)
|
||||
FCVALIDJAILERPATHS = [\"$(FCJAILERPATH)\"]
|
||||
|
||||
ACRNPATH := $(ACRNBINDIR)/$(ACRNCMD)
|
||||
ACRNVALIDHYPERVISORPATHS := [\"$(ACRNPATH)\"]
|
||||
ACRNCTLPATH := $(ACRNBINDIR)/$(ACRNCTLCMD)
|
||||
ACRNVALIDCTLPATHS := [\"$(ACRNCTLPATH)\"]
|
||||
|
||||
SHIMCMD := $(BIN_PREFIX)-shim
|
||||
SHIMPATH := $(PKGLIBEXECDIR)/$(SHIMCMD)
|
||||
@@ -176,7 +161,6 @@ DEFMEMSZ := 2048
|
||||
DEFMEMSLOTS := 10
|
||||
#Default number of bridges
|
||||
DEFBRIDGES := 1
|
||||
DEFENABLEANNOTATIONS := []
|
||||
DEFDISABLEGUESTSECCOMP := true
|
||||
#Default experimental features enabled
|
||||
DEFAULTEXPFEATURES := []
|
||||
@@ -187,26 +171,21 @@ DEFENTROPYSOURCE := /dev/urandom
|
||||
DEFDISABLEBLOCK := false
|
||||
DEFSHAREDFS := virtio-9p
|
||||
DEFSHAREDFS_QEMU_VIRTIOFS := virtio-fs
|
||||
DEFVIRTIOFSDAEMON := $(LIBEXECDIR)/kata-qemu/virtiofsd
|
||||
DEFVALIDVIRTIOFSDAEMONPATHS := [\"$(DEFVIRTIOFSDAEMON)\"]
|
||||
DEFVIRTIOFSDAEMON := $(VIRTIOFSDBINDIR)/virtiofsd
|
||||
# Default DAX mapping cache size in MiB
|
||||
#if value is 0, DAX is not enabled
|
||||
DEFVIRTIOFSCACHESIZE := 0
|
||||
DEFVIRTIOFSCACHESIZE := 1024
|
||||
DEFVIRTIOFSCACHE ?= auto
|
||||
# Format example:
|
||||
# [\"-o\", \"arg1=xxx,arg2\", \"-o\", \"hello world\", \"--arg3=yyy\"]
|
||||
#
|
||||
# see `virtiofsd -h` for possible options.
|
||||
# Make sure you quote args.
|
||||
DEFVIRTIOFSEXTRAARGS ?= [\"--thread-pool-size=1\"]
|
||||
DEFVIRTIOFSEXTRAARGS ?= []
|
||||
DEFENABLEIOTHREADS := false
|
||||
DEFENABLEMEMPREALLOC := false
|
||||
DEFENABLEHUGEPAGES := false
|
||||
DEFENABLEVHOSTUSERSTORE := false
|
||||
DEFVHOSTUSERSTOREPATH := $(PKGRUNDIR)/vhost-user
|
||||
DEFVALIDVHOSTUSERSTOREPATHS := [\"$(DEFVHOSTUSERSTOREPATH)\"]
|
||||
DEFFILEMEMBACKEND := ""
|
||||
DEFVALIDFILEMEMBACKENDS := [\"$(DEFFILEMEMBACKEND)\"]
|
||||
DEFENABLESWAP := false
|
||||
DEFENABLEDEBUG := false
|
||||
DEFDISABLENESTINGCHECKS := false
|
||||
@@ -266,6 +245,28 @@ ifneq (,$(QEMUCMD))
|
||||
KERNELPATH = $(KERNELDIR)/$(KERNELNAME)
|
||||
endif
|
||||
|
||||
ifneq (,$(QEMUVIRTIOFSCMD))
|
||||
KNOWN_HYPERVISORS += $(HYPERVISOR_QEMU_VIRTIOFS)
|
||||
|
||||
CONFIG_FILE_QEMU_VIRTIOFS = configuration-qemu-virtiofs.toml
|
||||
CONFIG_QEMU_VIRTIOFS = $(CLI_DIR)/config/$(CONFIG_FILE_QEMU_VIRTIOFS)
|
||||
CONFIG_QEMU_VIRTIOFS_IN = $(CONFIG_QEMU_VIRTIOFS).in
|
||||
|
||||
CONFIG_PATH_QEMU_VIRTIOFS = $(abspath $(CONFDIR)/$(CONFIG_FILE_QEMU_VIRTIOFS))
|
||||
CONFIG_PATHS += $(CONFIG_PATH_QEMU_VIRTIOFS)
|
||||
|
||||
SYSCONFIG_QEMU_VIRTIOFS = $(abspath $(SYSCONFDIR)/$(CONFIG_FILE_QEMU_VIRTIOFS))
|
||||
SYSCONFIG_PATHS += $(SYSCONFIG_QEMU_VIRTIOFS)
|
||||
|
||||
CONFIGS += $(CONFIG_QEMU_VIRTIOFS)
|
||||
|
||||
# qemu-specific options (all should be suffixed by "_QEMU")
|
||||
DEFBLOCKSTORAGEDRIVER_QEMU_VIRTIOFS := virtio-fs
|
||||
DEFNETWORKMODEL_QEMU := tcfilter
|
||||
KERNELNAMEVIRTIOFS = $(call MAKE_KERNEL_VIRTIOFS_NAME,$(KERNELTYPE))
|
||||
KERNELVIRTIOFSPATH = $(KERNELDIR)/$(KERNELNAMEVIRTIOFS)
|
||||
endif
|
||||
|
||||
ifneq (,$(CLHCMD))
|
||||
KNOWN_HYPERVISORS += $(HYPERVISOR_CLH)
|
||||
|
||||
@@ -383,28 +384,16 @@ SHAREDIR := $(SHAREDIR)
|
||||
# list of variables the user may wish to override
|
||||
USER_VARS += ARCH
|
||||
USER_VARS += BINDIR
|
||||
USER_VARS += CONFIG_ACRN_IN
|
||||
USER_VARS += CONFIG_CLH_IN
|
||||
USER_VARS += CONFIG_FC_IN
|
||||
USER_VARS += CONFIG_PATH
|
||||
USER_VARS += CONFIG_QEMU_IN
|
||||
USER_VARS += DESTDIR
|
||||
USER_VARS += DEFAULT_HYPERVISOR
|
||||
USER_VARS += DEFENABLEMSWAP
|
||||
USER_VARS += ACRNCMD
|
||||
USER_VARS += ACRNCTLCMD
|
||||
USER_VARS += ACRNPATH
|
||||
USER_VARS += ACRNVALIDHYPERVISORPATHS
|
||||
USER_VARS += ACRNCTLPATH
|
||||
USER_VARS += ACRNVALIDCTLPATHS
|
||||
USER_VARS += CLHPATH
|
||||
USER_VARS += CLHVALIDHYPERVISORPATHS
|
||||
USER_VARS += FIRMWAREPATH_CLH
|
||||
USER_VARS += FCCMD
|
||||
USER_VARS += FCPATH
|
||||
USER_VARS += FCVALIDHYPERVISORPATHS
|
||||
USER_VARS += FCJAILERPATH
|
||||
USER_VARS += FCVALIDJAILERPATHS
|
||||
USER_VARS += SYSCONFIG
|
||||
USER_VARS += IMAGENAME
|
||||
USER_VARS += IMAGEPATH
|
||||
@@ -416,11 +405,6 @@ USER_VARS += KERNELTYPE
|
||||
USER_VARS += KERNELTYPE_FC
|
||||
USER_VARS += KERNELTYPE_ACRN
|
||||
USER_VARS += KERNELTYPE_CLH
|
||||
USER_VARS += KERNELPATH_ACRN
|
||||
USER_VARS += KERNELPATH
|
||||
USER_VARS += KERNELPATH_CLH
|
||||
USER_VARS += KERNELPATH_FC
|
||||
USER_VARS += KERNELVIRTIOFSPATH
|
||||
USER_VARS += FIRMWAREPATH
|
||||
USER_VARS += MACHINEACCELERATORS
|
||||
USER_VARS += CPUFEATURES
|
||||
@@ -433,22 +417,15 @@ USER_VARS += PKGLIBDIR
|
||||
USER_VARS += PKGLIBEXECDIR
|
||||
USER_VARS += PKGRUNDIR
|
||||
USER_VARS += PREFIX
|
||||
USER_VARS += PROJECT_BUG_URL
|
||||
USER_VARS += PROJECT_NAME
|
||||
USER_VARS += PROJECT_ORG
|
||||
USER_VARS += PROJECT_PREFIX
|
||||
USER_VARS += PROJECT_TAG
|
||||
USER_VARS += PROJECT_TYPE
|
||||
USER_VARS += PROJECT_URL
|
||||
USER_VARS += NETMONPATH
|
||||
USER_VARS += QEMUBINDIR
|
||||
USER_VARS += QEMUCMD
|
||||
USER_VARS += QEMUPATH
|
||||
USER_VARS += QEMUVALIDHYPERVISORPATHS
|
||||
USER_VARS += QEMUVIRTIOFSCMD
|
||||
USER_VARS += QEMUVIRTIOFSPATH
|
||||
USER_VARS += QEMUVALIDVIRTIOFSPATHS
|
||||
USER_VARS += RUNTIME_NAME
|
||||
USER_VARS += SHAREDIR
|
||||
USER_VARS += SHIMPATH
|
||||
USER_VARS += SYSCONFDIR
|
||||
@@ -459,7 +436,6 @@ USER_VARS += DEFMEMSZ
|
||||
USER_VARS += DEFMEMSLOTS
|
||||
USER_VARS += DEFBRIDGES
|
||||
USER_VARS += DEFNETWORKMODEL_ACRN
|
||||
USER_VARS += DEFNETWORKMODEL_CLH
|
||||
USER_VARS += DEFNETWORKMODEL_FC
|
||||
USER_VARS += DEFNETWORKMODEL_QEMU
|
||||
USER_VARS += DEFDISABLEGUESTSECCOMP
|
||||
@@ -468,22 +444,18 @@ USER_VARS += DEFDISABLEBLOCK
|
||||
USER_VARS += DEFBLOCKSTORAGEDRIVER_ACRN
|
||||
USER_VARS += DEFBLOCKSTORAGEDRIVER_FC
|
||||
USER_VARS += DEFBLOCKSTORAGEDRIVER_QEMU
|
||||
USER_VARS += DEFBLOCKSTORAGEDRIVER_QEMU_VIRTIOFS
|
||||
USER_VARS += DEFSHAREDFS
|
||||
USER_VARS += DEFSHAREDFS_QEMU_VIRTIOFS
|
||||
USER_VARS += DEFVIRTIOFSDAEMON
|
||||
USER_VARS += DEFVALIDVIRTIOFSDAEMONPATHS
|
||||
USER_VARS += DEFVIRTIOFSCACHESIZE
|
||||
USER_VARS += DEFVIRTIOFSCACHE
|
||||
USER_VARS += DEFVIRTIOFSEXTRAARGS
|
||||
USER_VARS += DEFENABLEANNOTATIONS
|
||||
USER_VARS += DEFENABLEIOTHREADS
|
||||
USER_VARS += DEFENABLEMEMPREALLOC
|
||||
USER_VARS += DEFENABLEHUGEPAGES
|
||||
USER_VARS += DEFENABLEVHOSTUSERSTORE
|
||||
USER_VARS += DEFVHOSTUSERSTOREPATH
|
||||
USER_VARS += DEFVALIDVHOSTUSERSTOREPATHS
|
||||
USER_VARS += DEFFILEMEMBACKEND
|
||||
USER_VARS += DEFVALIDFILEMEMBACKENDS
|
||||
USER_VARS += DEFENABLESWAP
|
||||
USER_VARS += DEFENABLEDEBUG
|
||||
USER_VARS += DEFDISABLENESTINGCHECKS
|
||||
@@ -604,9 +576,8 @@ $(SHIMV2_OUTPUT): $(SOURCES) $(GENERATED_FILES) $(MAKEFILE_LIST)
|
||||
$(QUIET_BUILD)(cd $(SHIMV2_DIR)/ && ln -fs $(GENERATED_CONFIG))
|
||||
$(QUIET_BUILD)(cd $(SHIMV2_DIR)/ && go build $(KATA_LDFLAGS) $(BUILDFLAGS) -o $@ .)
|
||||
|
||||
$(MONITOR_OUTPUT): $(SOURCES) $(GENERATED_FILES) $(MAKEFILE_LIST) .git-commit
|
||||
$(QUIET_BUILD)(cd $(MONITOR_DIR)/ && go build \
|
||||
--ldflags "-X main.GitCommit=$(shell cat .git-commit)" -o $@ .)
|
||||
$(MONITOR_OUTPUT): $(SOURCES) $(GENERATED_FILES) $(MAKEFILE_LIST)
|
||||
$(QUIET_BUILD)(cd $(MONITOR_DIR)/ && go build $(KATA_LDFLAGS) $(BUILDFLAGS) -o $@ .)
|
||||
|
||||
.PHONY: \
|
||||
check \
|
||||
@@ -626,7 +597,84 @@ GENERATED_FILES += $(CONFIGS)
|
||||
$(GENERATED_FILES): %: %.in $(MAKEFILE_LIST) VERSION .git-commit
|
||||
$(QUIET_GENERATE)$(SED) \
|
||||
-e "s|@COMMIT@|$(shell cat .git-commit)|g" \
|
||||
$(foreach v,$(GENERATED_VARS),-e "s|@$v@|$($v)|g") \
|
||||
-e "s|@VERSION@|$(VERSION)|g" \
|
||||
-e "s|@CONFIG_ACRN_IN@|$(CONFIG_ACRN_IN)|g" \
|
||||
-e "s|@CONFIG_QEMU_IN@|$(CONFIG_QEMU_IN)|g" \
|
||||
-e "s|@CONFIG_QEMU_VIRTIOFS_IN@|$(CONFIG_QEMU_VIRTIOFS_IN)|g" \
|
||||
-e "s|@CONFIG_CLH_IN@|$(CONFIG_CLH_IN)|g" \
|
||||
-e "s|@CONFIG_FC_IN@|$(CONFIG_FC_IN)|g" \
|
||||
-e "s|@CONFIG_PATH@|$(CONFIG_PATH)|g" \
|
||||
-e "s|@FCPATH@|$(FCPATH)|g" \
|
||||
-e "s|@FCJAILERPATH@|$(FCJAILERPATH)|g" \
|
||||
-e "s|@ACRNPATH@|$(ACRNPATH)|g" \
|
||||
-e "s|@ACRNCTLPATH@|$(ACRNCTLPATH)|g" \
|
||||
-e "s|@CLHPATH@|$(CLHPATH)|g" \
|
||||
-e "s|@SYSCONFIG@|$(SYSCONFIG)|g" \
|
||||
-e "s|@IMAGEPATH@|$(IMAGEPATH)|g" \
|
||||
-e "s|@KERNELPATH_ACRN@|$(KERNELPATH_ACRN)|g" \
|
||||
-e "s|@KERNELPATH_FC@|$(KERNELPATH_FC)|g" \
|
||||
-e "s|@KERNELPATH_CLH@|$(KERNELPATH_CLH)|g" \
|
||||
-e "s|@KERNELPATH@|$(KERNELPATH)|g" \
|
||||
-e "s|@KERNELVIRTIOFSPATH@|$(KERNELVIRTIOFSPATH)|g" \
|
||||
-e "s|@INITRDPATH@|$(INITRDPATH)|g" \
|
||||
-e "s|@FIRMWAREPATH@|$(FIRMWAREPATH)|g" \
|
||||
-e "s|@MACHINEACCELERATORS@|$(MACHINEACCELERATORS)|g" \
|
||||
-e "s|@CPUFEATURES@|$(CPUFEATURES)|g" \
|
||||
-e "s|@FIRMWAREPATH_CLH@|$(FIRMWAREPATH_CLH)|g" \
|
||||
-e "s|@DEFMACHINETYPE_CLH@|$(DEFMACHINETYPE_CLH)|g" \
|
||||
-e "s|@KERNELPARAMS@|$(KERNELPARAMS)|g" \
|
||||
-e "s|@LOCALSTATEDIR@|$(LOCALSTATEDIR)|g" \
|
||||
-e "s|@PKGLIBEXECDIR@|$(PKGLIBEXECDIR)|g" \
|
||||
-e "s|@PKGRUNDIR@|$(PKGRUNDIR)|g" \
|
||||
-e "s|@NETMONPATH@|$(NETMONPATH)|g" \
|
||||
-e "s|@PROJECT_BUG_URL@|$(PROJECT_BUG_URL)|g" \
|
||||
-e "s|@PROJECT_ORG@|$(PROJECT_ORG)|g" \
|
||||
-e "s|@PROJECT_URL@|$(PROJECT_URL)|g" \
|
||||
-e "s|@PROJECT_NAME@|$(PROJECT_NAME)|g" \
|
||||
-e "s|@PROJECT_TAG@|$(PROJECT_TAG)|g" \
|
||||
-e "s|@PROJECT_TYPE@|$(PROJECT_TYPE)|g" \
|
||||
-e "s|@QEMUPATH@|$(QEMUPATH)|g" \
|
||||
-e "s|@QEMUVIRTIOFSPATH@|$(QEMUVIRTIOFSPATH)|g" \
|
||||
-e "s|@RUNTIME_NAME@|$(TARGET)|g" \
|
||||
-e "s|@MACHINETYPE@|$(MACHINETYPE)|g" \
|
||||
-e "s|@SHIMPATH@|$(SHIMPATH)|g" \
|
||||
-e "s|@DEFVCPUS@|$(DEFVCPUS)|g" \
|
||||
-e "s|@DEFMAXVCPUS@|$(DEFMAXVCPUS)|g" \
|
||||
-e "s|@DEFMAXVCPUS_ACRN@|$(DEFMAXVCPUS_ACRN)|g" \
|
||||
-e "s|@DEFMEMSZ@|$(DEFMEMSZ)|g" \
|
||||
-e "s|@DEFMEMSLOTS@|$(DEFMEMSLOTS)|g" \
|
||||
-e "s|@DEFBRIDGES@|$(DEFBRIDGES)|g" \
|
||||
-e "s|@DEFNETWORKMODEL_ACRN@|$(DEFNETWORKMODEL_ACRN)|g" \
|
||||
-e "s|@DEFNETWORKMODEL_CLH@|$(DEFNETWORKMODEL_CLH)|g" \
|
||||
-e "s|@DEFNETWORKMODEL_FC@|$(DEFNETWORKMODEL_FC)|g" \
|
||||
-e "s|@DEFNETWORKMODEL_QEMU@|$(DEFNETWORKMODEL_QEMU)|g" \
|
||||
-e "s|@DEFDISABLEGUESTSECCOMP@|$(DEFDISABLEGUESTSECCOMP)|g" \
|
||||
-e "s|@DEFAULTEXPFEATURES@|$(DEFAULTEXPFEATURES)|g" \
|
||||
-e "s|@DEFDISABLEBLOCK@|$(DEFDISABLEBLOCK)|g" \
|
||||
-e "s|@DEFBLOCKSTORAGEDRIVER_ACRN@|$(DEFBLOCKSTORAGEDRIVER_ACRN)|g" \
|
||||
-e "s|@DEFBLOCKSTORAGEDRIVER_FC@|$(DEFBLOCKSTORAGEDRIVER_FC)|g" \
|
||||
-e "s|@DEFBLOCKSTORAGEDRIVER_QEMU@|$(DEFBLOCKSTORAGEDRIVER_QEMU)|g" \
|
||||
-e "s|@DEFBLOCKSTORAGEDRIVER_QEMU_VIRTIOFS@|$(DEFBLOCKSTORAGEDRIVER_QEMU_VIRTIOFS)|g" \
|
||||
-e "s|@DEFSHAREDFS@|$(DEFSHAREDFS)|g" \
|
||||
-e "s|@DEFSHAREDFS_QEMU_VIRTIOFS@|$(DEFSHAREDFS_QEMU_VIRTIOFS)|g" \
|
||||
-e "s|@DEFVIRTIOFSDAEMON@|$(DEFVIRTIOFSDAEMON)|g" \
|
||||
-e "s|@DEFVIRTIOFSCACHESIZE@|$(DEFVIRTIOFSCACHESIZE)|g" \
|
||||
-e "s|@DEFVIRTIOFSCACHE@|$(DEFVIRTIOFSCACHE)|g" \
|
||||
-e "s|@DEFVIRTIOFSEXTRAARGS@|$(DEFVIRTIOFSEXTRAARGS)|g" \
|
||||
-e "s|@DEFENABLEIOTHREADS@|$(DEFENABLEIOTHREADS)|g" \
|
||||
-e "s|@DEFENABLEMEMPREALLOC@|$(DEFENABLEMEMPREALLOC)|g" \
|
||||
-e "s|@DEFENABLEHUGEPAGES@|$(DEFENABLEHUGEPAGES)|g" \
|
||||
-e "s|@DEFENABLEVHOSTUSERSTORE@|$(DEFENABLEVHOSTUSERSTORE)|g" \
|
||||
-e "s|@DEFVHOSTUSERSTOREPATH@|$(DEFVHOSTUSERSTOREPATH)|g" \
|
||||
-e "s|@DEFENABLEMSWAP@|$(DEFENABLESWAP)|g" \
|
||||
-e "s|@DEFENABLEDEBUG@|$(DEFENABLEDEBUG)|g" \
|
||||
-e "s|@DEFDISABLENESTINGCHECKS@|$(DEFDISABLENESTINGCHECKS)|g" \
|
||||
-e "s|@DEFMSIZE9P@|$(DEFMSIZE9P)|g" \
|
||||
-e "s|@DEFHOTPLUGVFIOONROOTBUS@|$(DEFHOTPLUGVFIOONROOTBUS)|g" \
|
||||
-e "s|@DEFPCIEROOTPORT@|$(DEFPCIEROOTPORT)|g" \
|
||||
-e "s|@DEFENTROPYSOURCE@|$(DEFENTROPYSOURCE)|g" \
|
||||
-e "s|@DEFSANDBOXCGROUPONLY@|$(DEFSANDBOXCGROUPONLY)|g" \
|
||||
-e "s|@FEATURE_SELINUX@|$(FEATURE_SELINUX)|g" \
|
||||
$< > $@
|
||||
|
||||
generate-config: $(CONFIGS)
|
||||
|
||||
@@ -19,8 +19,6 @@ For details of the other Kata Containers repositories, see the
|
||||
* [Quick start for developers](#quick-start-for-developers)
|
||||
* [Architecture overview](#architecture-overview)
|
||||
* [Configuration](#configuration)
|
||||
* [Hypervisor specific configuration](#hypervisor-specific-configuration)
|
||||
* [Stateless systems](#stateless-systems)
|
||||
* [Logging](#logging)
|
||||
* [Kata OCI](#kata-oci)
|
||||
* [Kata containerd shimv2](#kata-containerd-shimv2)
|
||||
@@ -67,7 +65,7 @@ The runtime has a built-in command to determine if your host system is capable
|
||||
of running and creating a Kata Container:
|
||||
|
||||
```bash
|
||||
$ kata-runtime check
|
||||
$ kata-runtime kata-check
|
||||
```
|
||||
|
||||
> **Note:**
|
||||
@@ -108,15 +106,6 @@ The file contains comments explaining all options.
|
||||
> You may need to modify this file to optimise or tailor your system, or if you have
|
||||
> specific requirements.
|
||||
|
||||
### Hypervisor specific configuration
|
||||
|
||||
Kata Containers supports multiple hypervisors so your `configuration.toml`
|
||||
configuration file may be a symbolic link to a hypervisor-specific
|
||||
configuration file. See
|
||||
[the hypervisors document](../../docs/hypervisors.md) for further details.
|
||||
|
||||
### Stateless systems
|
||||
|
||||
Since the runtime supports a
|
||||
[stateless system](https://clearlinux.org/about),
|
||||
it checks for this configuration file in multiple locations, two of which are
|
||||
@@ -146,7 +135,7 @@ To see details of your systems runtime environment (including the location of
|
||||
the configuration file being used), run:
|
||||
|
||||
```bash
|
||||
$ kata-runtime env
|
||||
$ kata-runtime kata-env
|
||||
```
|
||||
|
||||
## Logging
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
2.0.0
|
||||
1
src/runtime/VERSION
Symbolic link
1
src/runtime/VERSION
Symbolic link
@@ -0,0 +1 @@
|
||||
../../VERSION
|
||||
@@ -36,6 +36,10 @@ var commit = "@COMMIT@"
|
||||
// version is the runtime version.
|
||||
var version = "@VERSION@"
|
||||
|
||||
// project-specific command names
|
||||
var envCmd = fmt.Sprintf("%s-env", projectPrefix)
|
||||
var checkCmd = fmt.Sprintf("%s-check", projectPrefix)
|
||||
|
||||
// project-specific option names
|
||||
var configFilePathOption = fmt.Sprintf("%s-config", projectPrefix)
|
||||
var showConfigPathsOption = fmt.Sprintf("%s-show-default-config-paths", projectPrefix)
|
||||
|
||||
@@ -16,22 +16,6 @@ ctlpath = "@ACRNCTLPATH@"
|
||||
kernel = "@KERNELPATH_ACRN@"
|
||||
image = "@IMAGEPATH@"
|
||||
|
||||
# List of valid annotation names for the hypervisor
|
||||
# Each member of the list is a regular expression, which is the base name
|
||||
# of the annotation, e.g. "path" for io.katacontainers.config.hypervisor.path"
|
||||
enable_annotations = @DEFENABLEANNOTATIONS@
|
||||
|
||||
# List of valid annotations values for the hypervisor
|
||||
# Each member of the list is a path pattern as described by glob(3).
|
||||
# The default if not set is empty (all annotations rejected.)
|
||||
# Your distribution recommends: @ACRNVALIDHYPERVISORPATHS@
|
||||
valid_hypervisor_paths = @ACRNVALIDHYPERVISORPATHS@
|
||||
|
||||
# List of valid annotations values for ctlpath
|
||||
# The default if not set is empty (all annotations rejected.)
|
||||
# Your distribution recommends: @ACRNVALIDCTLPATHS@
|
||||
valid_ctlpaths = @ACRNVALIDCTLPATHS@
|
||||
|
||||
# Optional space-separated list of options to pass to the guest kernel.
|
||||
# For example, use `kernel_params = "vsyscall=emulate"` if you are having
|
||||
# trouble running pre-2.15 glibc.
|
||||
@@ -235,4 +219,4 @@ experimental=@DEFAULTEXPFEATURES@
|
||||
|
||||
# If enabled, user can run pprof tools with shim v2 process through kata-monitor.
|
||||
# (default: false)
|
||||
# enable_pprof = true
|
||||
# EnablePprof = true
|
||||
|
||||
@@ -15,17 +15,6 @@ path = "@CLHPATH@"
|
||||
kernel = "@KERNELPATH_CLH@"
|
||||
image = "@IMAGEPATH@"
|
||||
|
||||
# List of valid annotation names for the hypervisor
|
||||
# Each member of the list is a regular expression, which is the base name
|
||||
# of the annotation, e.g. "path" for io.katacontainers.config.hypervisor.path"
|
||||
enable_annotations = @DEFENABLEANNOTATIONS@
|
||||
|
||||
# List of valid annotations values for the hypervisor
|
||||
# Each member of the list is a path pattern as described by glob(3).
|
||||
# The default if not set is empty (all annotations rejected.)
|
||||
# Your distribution recommends: @CLHVALIDHYPERVISORPATHS@
|
||||
valid_hypervisor_paths = @CLHVALIDHYPERVISORPATHS@
|
||||
|
||||
# Optional space-separated list of options to pass to the guest kernel.
|
||||
# For example, use `kernel_params = "vsyscall=emulate"` if you are having
|
||||
# trouble running pre-2.15 glibc.
|
||||
@@ -73,11 +62,6 @@ default_memory = @DEFMEMSZ@
|
||||
# Path to vhost-user-fs daemon.
|
||||
virtio_fs_daemon = "@DEFVIRTIOFSDAEMON@"
|
||||
|
||||
# List of valid annotations values for the virtiofs daemon
|
||||
# The default if not set is empty (all annotations rejected.)
|
||||
# Your distribution recommends: @DEFVALIDVIRTIOFSDAEMONPATHS@
|
||||
valid_virtio_fs_daemon_paths = @DEFVALIDVIRTIOFSDAEMONPATHS@
|
||||
|
||||
# Default size of DAX cache in MiB
|
||||
virtio_fs_cache_size = @DEFVIRTIOFSCACHESIZE@
|
||||
|
||||
@@ -234,4 +218,4 @@ experimental=@DEFAULTEXPFEATURES@
|
||||
|
||||
# If enabled, user can run pprof tools with shim v2 process through kata-monitor.
|
||||
# (default: false)
|
||||
# enable_pprof = true
|
||||
# EnablePprof = true
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user