mirror of
https://github.com/kata-containers/kata-containers.git
synced 2026-02-28 17:52:07 +00:00
Compare commits
140 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8b9607a742 | ||
|
|
1c62bd1240 | ||
|
|
8d185e1972 | ||
|
|
80e891664f | ||
|
|
3d33250eac | ||
|
|
afe4df0449 | ||
|
|
f859f8af22 | ||
|
|
657d75524d | ||
|
|
7d96f22b13 | ||
|
|
2f67e831e3 | ||
|
|
4f9b5fafcc | ||
|
|
974e0e3b31 | ||
|
|
91e12404f3 | ||
|
|
02aaab2213 | ||
|
|
165988a394 | ||
|
|
9d49a69fd0 | ||
|
|
cab530cb1f | ||
|
|
8d16767bec | ||
|
|
01b2bbc1c8 | ||
|
|
c60951f51e | ||
|
|
c750ce13af | ||
|
|
0704641c03 | ||
|
|
04dcbd4d21 | ||
|
|
f1c633806d | ||
|
|
ee202408f8 | ||
|
|
1ced542251 | ||
|
|
64a144f439 | ||
|
|
aad549fe34 | ||
|
|
16e358b31b | ||
|
|
f96a4684af | ||
|
|
a8137eef51 | ||
|
|
364290e0a8 | ||
|
|
351a01bd7e | ||
|
|
ef11ce13ea | ||
|
|
ea3f9b22a2 | ||
|
|
86ad7e486c | ||
|
|
624ff41318 | ||
|
|
6bb3f44100 | ||
|
|
4d4aba2e64 | ||
|
|
5f4f8ff337 | ||
|
|
f0d6316004 | ||
|
|
4e868ad981 | ||
|
|
a24ff2b51c | ||
|
|
1c70ef544f | ||
|
|
e5df408f64 | ||
|
|
985b9fa479 | ||
|
|
6d5e47bab1 | ||
|
|
514af3624b | ||
|
|
a6e3fb6514 | ||
|
|
55bdd1fcf4 | ||
|
|
6586f3b725 | ||
|
|
f5adc4c114 | ||
|
|
a67bdc369a | ||
|
|
67be55834d | ||
|
|
abfff68de6 | ||
|
|
0466ee04da | ||
|
|
6b223194a9 | ||
|
|
fb01d51573 | ||
|
|
144be14547 | ||
|
|
017c7cf249 | ||
|
|
52c6b0737c | ||
|
|
e7bdeb49b9 | ||
|
|
c0ca9f9a90 | ||
|
|
81f389903a | ||
|
|
179a98d678 | ||
|
|
e3efcfd40f | ||
|
|
5a92333f4b | ||
|
|
ec0424e153 | ||
|
|
b26e94ffba | ||
|
|
f6f4023508 | ||
|
|
814e7d7285 | ||
|
|
92d1197f10 | ||
|
|
a2484d0088 | ||
|
|
9e2cbe8ea1 | ||
|
|
fc676f76de | ||
|
|
ac9f838e33 | ||
|
|
9ea851ee53 | ||
|
|
2c1b957642 | ||
|
|
dfe5ef36b4 | ||
|
|
8a374af6b7 | ||
|
|
50aa89fa05 | ||
|
|
57aa746d0d | ||
|
|
ce2798b688 | ||
|
|
b7208b3c6c | ||
|
|
7e4dc08b0e | ||
|
|
a649d33a45 | ||
|
|
c628ecf298 | ||
|
|
d87076eea5 | ||
|
|
2dd859bfce | ||
|
|
4c9af982e6 | ||
|
|
06f964843a | ||
|
|
c27c3c40dd | ||
|
|
476467115f | ||
|
|
73645d1742 | ||
|
|
c7db337f10 | ||
|
|
72af86f686 | ||
|
|
95b2cad095 | ||
|
|
506f4f2adc | ||
|
|
a3e35e7e92 | ||
|
|
fdf69ab84c | ||
|
|
56b94e200c | ||
|
|
0533bee222 | ||
|
|
2114576be5 | ||
|
|
bcd8fd538d | ||
|
|
6fe3f331c9 | ||
|
|
3f3a2533a3 | ||
|
|
fc72d392b7 | ||
|
|
ef4ebfba48 | ||
|
|
336b80626c | ||
|
|
dd3c5fc617 | ||
|
|
93bd2e4716 | ||
|
|
7eb882a797 | ||
|
|
a60cf37879 | ||
|
|
ca6438728d | ||
|
|
32feb10331 | ||
|
|
3c618a61d6 | ||
|
|
7c888b34be | ||
|
|
234d53b6df | ||
|
|
cf81d400d8 | ||
|
|
79ed33adb5 | ||
|
|
f1cea9a022 | ||
|
|
4f802cc993 | ||
|
|
dda4279a2b | ||
|
|
5888971e18 | ||
|
|
ca28ca422c | ||
|
|
50ad323a21 | ||
|
|
f8314bedb0 | ||
|
|
99d9a24a51 | ||
|
|
0091b89184 | ||
|
|
9da2707202 | ||
|
|
2a0ff0bec3 | ||
|
|
fa581d334f | ||
|
|
a3967e9a59 | ||
|
|
272d39bc87 | ||
|
|
7a86c2eedd | ||
|
|
5096bd6a11 | ||
|
|
3fe59a99ff | ||
|
|
61fa4a3c75 | ||
|
|
856af1a886 | ||
|
|
74b587431f |
26
.github/workflows/kata-deploy-test.yaml
vendored
26
.github/workflows/kata-deploy-test.yaml
vendored
@@ -1,7 +1,12 @@
|
||||
on: issue_comment
|
||||
on:
|
||||
issue_comment:
|
||||
types: [created, edited]
|
||||
|
||||
name: test-kata-deploy
|
||||
|
||||
jobs:
|
||||
check_comments:
|
||||
if: ${{ github.event.issue.pull_request }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check for Command
|
||||
@@ -9,7 +14,7 @@ jobs:
|
||||
uses: kata-containers/slash-command-action@v1
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
command: "test-kata-deploy"
|
||||
command: "test_kata_deploy"
|
||||
reaction: "true"
|
||||
reaction-type: "eyes"
|
||||
allow-edits: "false"
|
||||
@@ -17,6 +22,7 @@ jobs:
|
||||
- name: verify command arg is kata-deploy
|
||||
run: |
|
||||
echo "The command was '${{ steps.command.outputs.command-name }}' with arguments '${{ steps.command.outputs.command-arguments }}'"
|
||||
|
||||
create-and-test-container:
|
||||
needs: check_comments
|
||||
runs-on: ubuntu-latest
|
||||
@@ -27,22 +33,26 @@ jobs:
|
||||
ref=$(cat $GITHUB_EVENT_PATH | jq -r '.issue.pull_request.url' | sed 's#^.*\/pulls#refs\/pull#' | sed 's#$#\/merge#')
|
||||
echo "reference for PR: " ${ref}
|
||||
echo "##[set-output name=pr-ref;]${ref}"
|
||||
- uses: actions/checkout@v2-beta
|
||||
|
||||
- name: check out
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
ref: ${{ steps.get-PR-ref.outputs.pr-ref }}
|
||||
ref: ${{ steps.get-PR-ref.outputs.pr-ref }}
|
||||
|
||||
- name: build-container-image
|
||||
id: build-container-image
|
||||
run: |
|
||||
PR_SHA=$(git log --format=format:%H -n1)
|
||||
VERSION=$(curl https://raw.githubusercontent.com/kata-containers/kata-containers/2.0-dev/VERSION)
|
||||
VERSION="2.0.0"
|
||||
ARTIFACT_URL="https://github.com/kata-containers/kata-containers/releases/download/${VERSION}/kata-static-${VERSION}-x86_64.tar.xz"
|
||||
wget "${ARTIFACT_URL}" -O ./kata-deploy/kata-static.tar.xz
|
||||
docker build --build-arg KATA_ARTIFACTS=kata-static.tar.xz -t katadocker/kata-deploy-ci:${PR_SHA} ./kata-deploy
|
||||
wget "${ARTIFACT_URL}" -O tools/packaging/kata-deploy/kata-static.tar.xz
|
||||
docker build --build-arg KATA_ARTIFACTS=kata-static.tar.xz -t katadocker/kata-deploy-ci:${PR_SHA} ./tools/packaging/kata-deploy
|
||||
docker login -u ${{ secrets.DOCKER_USERNAME }} -p ${{ secrets.DOCKER_PASSWORD }}
|
||||
docker push katadocker/kata-deploy-ci:$PR_SHA
|
||||
echo "##[set-output name=pr-sha;]${PR_SHA}"
|
||||
|
||||
- name: test-kata-deploy-ci-in-aks
|
||||
uses: ./kata-deploy/action
|
||||
uses: ./tools/packaging/kata-deploy/action
|
||||
with:
|
||||
packaging-sha: ${{ steps.build-container-image.outputs.pr-sha }}
|
||||
env:
|
||||
|
||||
55
.github/workflows/main.yaml
vendored
55
.github/workflows/main.yaml
vendored
@@ -103,59 +103,6 @@ jobs:
|
||||
name: kata-artifacts
|
||||
path: kata-static-qemu.tar.gz
|
||||
|
||||
build-nemu:
|
||||
runs-on: ubuntu-16.04
|
||||
needs: get-artifact-list
|
||||
env:
|
||||
buildstr: "install_nemu"
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: get-artifact-list
|
||||
uses: actions/download-artifact@master
|
||||
with:
|
||||
name: artifact-list
|
||||
- name: build-nemu
|
||||
run: |
|
||||
if grep -q $buildstr ./artifact-list/artifact-list.txt; then
|
||||
$GITHUB_WORKSPACE/.github/workflows/generate-artifact-tarball.sh $buildstr
|
||||
echo "artifact-built=true" >> $GITHUB_ENV
|
||||
else
|
||||
echo "artifact-built=false" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: env.artifact-built == 'true'
|
||||
uses: actions/upload-artifact@master
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-static-nemu.tar.gz
|
||||
|
||||
# Job for building the QEMU binaries with virtiofs support
|
||||
build-qemu-virtiofsd:
|
||||
runs-on: ubuntu-16.04
|
||||
needs: get-artifact-list
|
||||
env:
|
||||
buildstr: "install_qemu_virtiofsd"
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: get-artifact-list
|
||||
uses: actions/download-artifact@master
|
||||
with:
|
||||
name: artifact-list
|
||||
- name: build-qemu-virtiofsd
|
||||
run: |
|
||||
if grep -q $buildstr ./artifact-list/artifact-list.txt; then
|
||||
$GITHUB_WORKSPACE/.github/workflows/generate-artifact-tarball.sh $buildstr
|
||||
echo "artifact-built=true" >> $GITHUB_ENV
|
||||
else
|
||||
echo "artifact-built=false" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: env.artifact-built == 'true'
|
||||
uses: actions/upload-artifact@master
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-static-qemu-virtiofsd.tar.gz
|
||||
|
||||
# Job for building the image
|
||||
build-image:
|
||||
runs-on: ubuntu-16.04
|
||||
@@ -266,7 +213,7 @@ jobs:
|
||||
|
||||
gather-artifacts:
|
||||
runs-on: ubuntu-16.04
|
||||
needs: [build-experimental-kernel, build-kernel, build-qemu, build-qemu-virtiofsd, build-image, build-firecracker, build-kata-components, build-nemu, build-clh]
|
||||
needs: [build-experimental-kernel, build-kernel, build-qemu, build-image, build-firecracker, build-kata-components, build-clh]
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: get-artifacts
|
||||
|
||||
28
.github/workflows/release.yaml
vendored
28
.github/workflows/release.yaml
vendored
@@ -104,32 +104,6 @@ jobs:
|
||||
name: kata-artifacts
|
||||
path: kata-static-qemu.tar.gz
|
||||
|
||||
build-qemu-virtiofsd:
|
||||
runs-on: ubuntu-16.04
|
||||
needs: get-artifact-list
|
||||
env:
|
||||
buildstr: "install_qemu_virtiofsd"
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: get-artifact-list
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: artifact-list
|
||||
- name: build-qemu-virtiofsd
|
||||
run: |
|
||||
if grep -q $buildstr artifact-list.txt; then
|
||||
$GITHUB_WORKSPACE/.github/workflows/generate-local-artifact-tarball.sh $buildstr
|
||||
echo "artifact-built=true" >> $GITHUB_ENV
|
||||
else
|
||||
echo "artifact-built=false" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: env.artifact-built == 'true'
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-static-qemu-virtiofsd.tar.gz
|
||||
|
||||
build-image:
|
||||
runs-on: ubuntu-16.04
|
||||
needs: get-artifact-list
|
||||
@@ -237,7 +211,7 @@ jobs:
|
||||
|
||||
gather-artifacts:
|
||||
runs-on: ubuntu-16.04
|
||||
needs: [build-experimental-kernel, build-kernel, build-qemu, build-qemu-virtiofsd, build-image, build-firecracker, build-kata-components, build-clh]
|
||||
needs: [build-experimental-kernel, build-kernel, build-qemu, build-image, build-firecracker, build-kata-components, build-clh]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: get-artifacts
|
||||
|
||||
4
.github/workflows/snap-release.yaml
vendored
4
.github/workflows/snap-release.yaml
vendored
@@ -21,8 +21,8 @@ jobs:
|
||||
kata_url="https://github.com/kata-containers/kata-containers"
|
||||
latest_version=$(git ls-remote --tags ${kata_url} | egrep -o "refs.*" | egrep -o "[[:digit:]]+\.[[:digit:]]+\.[[:digit:]]+" | sort -V -r -u | head -1)
|
||||
current_version="$(echo ${GITHUB_REF} | cut -d/ -f3)"
|
||||
# Check if the current tag is the latest tag
|
||||
if echo -e "$latest_version\n$current_version" | sort -C -V; then
|
||||
# Check semantic versioning format (x.y.z) and if the current tag is the latest tag
|
||||
if echo "${current_version}" | grep -q "^[[:digit:]]\+\.[[:digit:]]\+\.[[:digit:]]\+$" && echo -e "$latest_version\n$current_version" | sort -C -V; then
|
||||
# Current version is the latest version, build it
|
||||
snapcraft -d snap --destructive-mode
|
||||
fi
|
||||
|
||||
12
.github/workflows/snap.yaml
vendored
12
.github/workflows/snap.yaml
vendored
@@ -1,15 +1,5 @@
|
||||
name: snap CI
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- "**/Makefile"
|
||||
- "**/*.go"
|
||||
- "**/*.mk"
|
||||
- "**/*.rs"
|
||||
- "**/*.sh"
|
||||
- "**/*.toml"
|
||||
- "**/*.yaml"
|
||||
- "**/*.yml"
|
||||
on: ["pull_request"]
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-20.04
|
||||
|
||||
66
.github/workflows/static-checks.yaml
vendored
Normal file
66
.github/workflows/static-checks.yaml
vendored
Normal file
@@ -0,0 +1,66 @@
|
||||
on: ["pull_request"]
|
||||
name: Static checks
|
||||
jobs:
|
||||
test:
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.13.x, 1.14.x, 1.15.x]
|
||||
os: [ubuntu-20.04]
|
||||
runs-on: ${{ matrix.os }}
|
||||
env:
|
||||
TRAVIS: "true"
|
||||
TRAVIS_BRANCH: ${{ github.base_ref }}
|
||||
TRAVIS_PULL_REQUEST_BRANCH: ${{ github.head_ref }}
|
||||
TRAVIS_PULL_REQUEST_SHA : ${{ github.event.pull_request.head.sha }}
|
||||
RUST_BACKTRACE: "1"
|
||||
target_branch: ${TRAVIS_BRANCH}
|
||||
steps:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
env:
|
||||
GOPATH: ${{ runner.workspace }}/kata-containers
|
||||
- name: Setup GOPATH
|
||||
run: |
|
||||
echo "TRAVIS_BRANCH: ${TRAVIS_BRANCH}"
|
||||
echo "TRAVIS_PULL_REQUEST_BRANCH: ${TRAVIS_PULL_REQUEST_BRANCH}"
|
||||
echo "TRAVIS_PULL_REQUEST_SHA: ${TRAVIS_PULL_REQUEST_SHA}"
|
||||
echo "TRAVIS: ${TRAVIS}"
|
||||
- name: Set env
|
||||
run: |
|
||||
echo "GOPATH=${{ github.workspace }}" >> $GITHUB_ENV
|
||||
echo "${{ github.workspace }}/bin" >> $GITHUB_PATH
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
path: ./src/github.com/${{ github.repository }}
|
||||
- name: Setup travis references
|
||||
run: |
|
||||
echo "TRAVIS_BRANCH=${TRAVIS_BRANCH:-$(echo $GITHUB_REF | awk 'BEGIN { FS = \"/\" } ; { print $3 }')}"
|
||||
target_branch=${TRAVIS_BRANCH}
|
||||
- name: Setup
|
||||
run: |
|
||||
cd ${GOPATH}/src/github.com/${{ github.repository }} && ./ci/setup.sh
|
||||
env:
|
||||
GOPATH: ${{ runner.workspace }}/kata-containers
|
||||
- name: Building rust
|
||||
run: |
|
||||
cd ${GOPATH}/src/github.com/${{ github.repository }} && ./ci/install_rust.sh
|
||||
PATH=$PATH:"$HOME/.cargo/bin"
|
||||
rustup target add x86_64-unknown-linux-musl
|
||||
rustup component add rustfmt clippy
|
||||
# Must build before static checks as we depend on some generated code in runtime and agent
|
||||
- name: Build
|
||||
run: |
|
||||
cd ${GOPATH}/src/github.com/${{ github.repository }} && make
|
||||
- name: Static Checks
|
||||
run: |
|
||||
cd ${GOPATH}/src/github.com/${{ github.repository }} && ./ci/static-checks.sh
|
||||
- name: Run Compiler Checks
|
||||
run: |
|
||||
cd ${GOPATH}/src/github.com/${{ github.repository }} && make check
|
||||
- name: Run Unit Tests
|
||||
run: |
|
||||
cd ${GOPATH}/src/github.com/${{ github.repository }} && make test
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -3,6 +3,7 @@
|
||||
**/*.rej
|
||||
**/target
|
||||
**/.vscode
|
||||
pkg/logging/Cargo.lock
|
||||
src/agent/src/version.rs
|
||||
src/agent/kata-agent.service
|
||||
src/agent/protocols/src/*.rs
|
||||
|
||||
62
.travis.yml
62
.travis.yml
@@ -1,62 +0,0 @@
|
||||
# Copyright (c) 2019 Ant Financial
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
dist: bionic
|
||||
os: linux
|
||||
|
||||
# set cache directories manually, because
|
||||
# we are using a non-standard directory struct
|
||||
# cargo root is in srs/agent
|
||||
#
|
||||
# If needed, caches can be cleared
|
||||
# by ways documented in
|
||||
# https://docs.travis-ci.com/user/caching#clearing-caches
|
||||
language: rust
|
||||
rust:
|
||||
- 1.44.1
|
||||
cache:
|
||||
cargo: true
|
||||
directories:
|
||||
- src/agent/target
|
||||
|
||||
before_install:
|
||||
- git remote set-branches --add origin "${TRAVIS_BRANCH}"
|
||||
- git fetch
|
||||
- export RUST_BACKTRACE=1
|
||||
- export target_branch=$TRAVIS_BRANCH
|
||||
- "ci/setup.sh"
|
||||
|
||||
# we use install to run check agent
|
||||
# so that it is easy to skip for non-amd64 platform
|
||||
install:
|
||||
- export PATH=$PATH:"$HOME/.cargo/bin"
|
||||
- export RUST_AGENT=yes
|
||||
- rustup target add x86_64-unknown-linux-musl
|
||||
- sudo ln -sf /usr/bin/g++ /bin/musl-g++
|
||||
- rustup component add rustfmt
|
||||
- make -C ${TRAVIS_BUILD_DIR}/src/agent
|
||||
- make -C ${TRAVIS_BUILD_DIR}/src/agent check
|
||||
- sudo -E PATH=$PATH make -C ${TRAVIS_BUILD_DIR}/src/agent check
|
||||
|
||||
before_script:
|
||||
- "ci/install_go.sh"
|
||||
- make -C ${TRAVIS_BUILD_DIR}/src/runtime
|
||||
- make -C ${TRAVIS_BUILD_DIR}/src/runtime test
|
||||
- sudo -E PATH=$PATH GOPATH=$GOPATH make -C ${TRAVIS_BUILD_DIR}/src/runtime test
|
||||
|
||||
script:
|
||||
- "ci/static-checks.sh"
|
||||
|
||||
jobs:
|
||||
include:
|
||||
- name: x86_64 test
|
||||
os: linux
|
||||
- name: ppc64le test
|
||||
os: linux-ppc64le
|
||||
install: skip
|
||||
script: skip
|
||||
allow_failures:
|
||||
- name: ppc64le test
|
||||
fast_finish: true
|
||||
@@ -5,7 +5,7 @@
|
||||
|
||||
export tests_repo="${tests_repo:-github.com/kata-containers/tests}"
|
||||
export tests_repo_dir="$GOPATH/src/$tests_repo"
|
||||
export branch="${branch:-2.0-dev}"
|
||||
export branch="${branch:-$TRAVIS_BRANCH}"
|
||||
|
||||
clone_tests_repo()
|
||||
{
|
||||
|
||||
@@ -1,55 +1,54 @@
|
||||
* [Warning](#warning)
|
||||
* [Assumptions](#assumptions)
|
||||
* [Initial setup](#initial-setup)
|
||||
* [Requirements to build individual components](#requirements-to-build-individual-components)
|
||||
* [Build and install the Kata Containers runtime](#build-and-install-the-kata-containers-runtime)
|
||||
* [Check hardware requirements](#check-hardware-requirements)
|
||||
* [Configure to use initrd or rootfs image](#configure-to-use-initrd-or-rootfs-image)
|
||||
* [Enable full debug](#enable-full-debug)
|
||||
* [debug logs and shimv2](#debug-logs-and-shimv2)
|
||||
* [Enabling full `containerd` debug](#enabling-full-containerd-debug)
|
||||
* [Enabling just `containerd shim` debug](#enabling-just-containerd-shim-debug)
|
||||
* [Enabling `CRI-O` and `shimv2` debug](#enabling-cri-o-and-shimv2-debug)
|
||||
* [journald rate limiting](#journald-rate-limiting)
|
||||
* [`systemd-journald` suppressing messages](#systemd-journald-suppressing-messages)
|
||||
* [Disabling `systemd-journald` rate limiting](#disabling-systemd-journald-rate-limiting)
|
||||
* [Create and install rootfs and initrd image](#create-and-install-rootfs-and-initrd-image)
|
||||
* [Build a custom Kata agent - OPTIONAL](#build-a-custom-kata-agent---optional)
|
||||
* [Get the osbuilder](#get-the-osbuilder)
|
||||
* [Create a rootfs image](#create-a-rootfs-image)
|
||||
* [Create a local rootfs](#create-a-local-rootfs)
|
||||
* [Add a custom agent to the image - OPTIONAL](#add-a-custom-agent-to-the-image---optional)
|
||||
* [Build a rootfs image](#build-a-rootfs-image)
|
||||
* [Install the rootfs image](#install-the-rootfs-image)
|
||||
* [Create an initrd image - OPTIONAL](#create-an-initrd-image---optional)
|
||||
* [Create a local rootfs for initrd image](#create-a-local-rootfs-for-initrd-image)
|
||||
* [Build an initrd image](#build-an-initrd-image)
|
||||
* [Install the initrd image](#install-the-initrd-image)
|
||||
* [Install guest kernel images](#install-guest-kernel-images)
|
||||
* [Install a hypervisor](#install-a-hypervisor)
|
||||
* [Build a custom QEMU](#build-a-custom-qemu)
|
||||
* [Build a custom QEMU for aarch64/arm64 - REQUIRED](#build-a-custom-qemu-for-aarch64arm64---required)
|
||||
* [Run Kata Containers with Containerd](#run-kata-containers-with-containerd)
|
||||
* [Run Kata Containers with Kubernetes](#run-kata-containers-with-kubernetes)
|
||||
* [Troubleshoot Kata Containers](#troubleshoot-kata-containers)
|
||||
* [Appendices](#appendices)
|
||||
* [Checking Docker default runtime](#checking-docker-default-runtime)
|
||||
* [Set up a debug console](#set-up-a-debug-console)
|
||||
* [Simple debug console setup](#simple-debug-console-setup)
|
||||
* [Enable agent debug console](#enable-agent-debug-console)
|
||||
* [Start `kata-monitor`](#start-kata-monitor)
|
||||
* [Connect to debug console](#connect-to-debug-console)
|
||||
* [Traditional debug console setup](#traditional-debug-console-setup)
|
||||
* [Create a custom image containing a shell](#create-a-custom-image-containing-a-shell)
|
||||
* [Build the debug image](#build-the-debug-image)
|
||||
* [Configure runtime for custom debug image](#configure-runtime-for-custom-debug-image)
|
||||
* [Connect to the virtual machine using the debug console](#connect-to-the-virtual-machine-using-the-debug-console)
|
||||
* [Enabling debug console for QEMU](#enabling-debug-console-for-qemu)
|
||||
* [Enabling debug console for cloud-hypervisor / firecracker](#enabling-debug-console-for-cloud-hypervisor--firecracker)
|
||||
* [Create a container](#create-a-container)
|
||||
* [Connect to the virtual machine using the debug console](#connect-to-the-virtual-machine-using-the-debug-console)
|
||||
* [Obtain details of the image](#obtain-details-of-the-image)
|
||||
* [Capturing kernel boot logs](#capturing-kernel-boot-logs)
|
||||
- [Warning](#warning)
|
||||
- [Assumptions](#assumptions)
|
||||
- [Initial setup](#initial-setup)
|
||||
- [Requirements to build individual components](#requirements-to-build-individual-components)
|
||||
- [Build and install the Kata Containers runtime](#build-and-install-the-kata-containers-runtime)
|
||||
- [Check hardware requirements](#check-hardware-requirements)
|
||||
- [Configure to use initrd or rootfs image](#configure-to-use-initrd-or-rootfs-image)
|
||||
- [Enable full debug](#enable-full-debug)
|
||||
- [debug logs and shimv2](#debug-logs-and-shimv2)
|
||||
- [Enabling full `containerd` debug](#enabling-full-containerd-debug)
|
||||
- [Enabling just `containerd shim` debug](#enabling-just-containerd-shim-debug)
|
||||
- [Enabling `CRI-O` and `shimv2` debug](#enabling-cri-o-and-shimv2-debug)
|
||||
- [journald rate limiting](#journald-rate-limiting)
|
||||
- [`systemd-journald` suppressing messages](#systemd-journald-suppressing-messages)
|
||||
- [Disabling `systemd-journald` rate limiting](#disabling-systemd-journald-rate-limiting)
|
||||
- [Create and install rootfs and initrd image](#create-and-install-rootfs-and-initrd-image)
|
||||
- [Build a custom Kata agent - OPTIONAL](#build-a-custom-kata-agent---optional)
|
||||
- [Get the osbuilder](#get-the-osbuilder)
|
||||
- [Create a rootfs image](#create-a-rootfs-image)
|
||||
- [Create a local rootfs](#create-a-local-rootfs)
|
||||
- [Add a custom agent to the image - OPTIONAL](#add-a-custom-agent-to-the-image---optional)
|
||||
- [Build a rootfs image](#build-a-rootfs-image)
|
||||
- [Install the rootfs image](#install-the-rootfs-image)
|
||||
- [Create an initrd image - OPTIONAL](#create-an-initrd-image---optional)
|
||||
- [Create a local rootfs for initrd image](#create-a-local-rootfs-for-initrd-image)
|
||||
- [Build an initrd image](#build-an-initrd-image)
|
||||
- [Install the initrd image](#install-the-initrd-image)
|
||||
- [Install guest kernel images](#install-guest-kernel-images)
|
||||
- [Install a hypervisor](#install-a-hypervisor)
|
||||
- [Build a custom QEMU](#build-a-custom-qemu)
|
||||
- [Build a custom QEMU for aarch64/arm64 - REQUIRED](#build-a-custom-qemu-for-aarch64arm64---required)
|
||||
- [Run Kata Containers with Containerd](#run-kata-containers-with-containerd)
|
||||
- [Run Kata Containers with Kubernetes](#run-kata-containers-with-kubernetes)
|
||||
- [Troubleshoot Kata Containers](#troubleshoot-kata-containers)
|
||||
- [Appendices](#appendices)
|
||||
- [Checking Docker default runtime](#checking-docker-default-runtime)
|
||||
- [Set up a debug console](#set-up-a-debug-console)
|
||||
- [Simple debug console setup](#simple-debug-console-setup)
|
||||
- [Enable agent debug console](#enable-agent-debug-console)
|
||||
- [Connect to debug console](#connect-to-debug-console)
|
||||
- [Traditional debug console setup](#traditional-debug-console-setup)
|
||||
- [Create a custom image containing a shell](#create-a-custom-image-containing-a-shell)
|
||||
- [Build the debug image](#build-the-debug-image)
|
||||
- [Configure runtime for custom debug image](#configure-runtime-for-custom-debug-image)
|
||||
- [Create a container](#create-a-container)
|
||||
- [Connect to the virtual machine using the debug console](#connect-to-the-virtual-machine-using-the-debug-console)
|
||||
- [Enabling debug console for QEMU](#enabling-debug-console-for-qemu)
|
||||
- [Enabling debug console for cloud-hypervisor / firecracker](#enabling-debug-console-for-cloud-hypervisor--firecracker)
|
||||
- [Connecting to the debug console](#connecting-to-the-debug-console)
|
||||
- [Obtain details of the image](#obtain-details-of-the-image)
|
||||
- [Capturing kernel boot logs](#capturing-kernel-boot-logs)
|
||||
|
||||
# Warning
|
||||
|
||||
@@ -382,22 +381,19 @@ You can build and install the guest kernel image as shown [here](../tools/packag
|
||||
|
||||
# Install a hypervisor
|
||||
|
||||
When setting up Kata using a [packaged installation method](install/README.md#installing-on-a-linux-system), the `qemu-lite` hypervisor is installed automatically. For other installation methods, you will need to manually install a suitable hypervisor.
|
||||
When setting up Kata using a [packaged installation method](install/README.md#installing-on-a-linux-system), the
|
||||
`QEMU` VMM is installed automatically. Cloud-Hypervisor and Firecracker VMMs are available from the [release tarballs](https://github.com/kata-containers/kata-containers/releases), as well as through [`kata-deploy`](../tools/packaging/kata-deploy/README.md).
|
||||
You may choose to manually build your VMM/hypervisor.
|
||||
|
||||
## Build a custom QEMU
|
||||
|
||||
Your QEMU directory need to be prepared with source code. Alternatively, you can use the [Kata containers QEMU](https://github.com/kata-containers/qemu/tree/master) and checkout the recommended branch:
|
||||
Kata Containers makes use of upstream QEMU branch. The exact version
|
||||
and repository utilized can be found by looking at the [versions file](../versions.yaml).
|
||||
|
||||
```
|
||||
$ go get -d github.com/kata-containers/qemu
|
||||
$ qemu_branch=$(grep qemu-lite- ${GOPATH}/src/github.com/kata-containers/kata-containers/versions.yaml | cut -d '"' -f2)
|
||||
$ cd ${GOPATH}/src/github.com/kata-containers/qemu
|
||||
$ git checkout -b $qemu_branch remotes/origin/$qemu_branch
|
||||
$ your_qemu_directory=${GOPATH}/src/github.com/kata-containers/qemu
|
||||
```
|
||||
|
||||
To build a version of QEMU using the same options as the default `qemu-lite` version , you could use the `configure-hypervisor.sh` script:
|
||||
Kata often utilizes patches for not-yet-upstream fixes for components,
|
||||
including QEMU. These can be found in the [packaging/QEMU directory](../tools/packaging/qemu/patches)
|
||||
|
||||
To build utilizing the same options as Kata, you should make use of the `configure-hypervisor.sh` script. For example:
|
||||
```
|
||||
$ go get -d github.com/kata-containers/kata-containers/tools/packaging
|
||||
$ cd $your_qemu_directory
|
||||
@@ -407,6 +403,8 @@ $ make -j $(nproc)
|
||||
$ sudo -E make install
|
||||
```
|
||||
|
||||
See the [static-build script for QEMU](../tools/packaging/static-build/qemu/build-static-qemu.sh) for a reference on how to get, setup, configure and build QEMU for Kata.
|
||||
|
||||
### Build a custom QEMU for aarch64/arm64 - REQUIRED
|
||||
> **Note:**
|
||||
>
|
||||
@@ -618,8 +616,11 @@ sudo sed -i -e 's/^kernel_params = "\(.*\)"/kernel_params = "\1 agent.debug_cons
|
||||
> **Note** Ports 1024 and 1025 are reserved for communication with the agent
|
||||
> and gathering of agent logs respectively.
|
||||
|
||||
Next, connect to the debug console. The VSOCKS paths vary slightly between
|
||||
cloud-hypervisor and firecracker.
|
||||
##### Connecting to the debug console
|
||||
|
||||
Next, connect to the debug console. The VSOCKS paths vary slightly between each
|
||||
VMM solution.
|
||||
|
||||
In case of cloud-hypervisor, connect to the `vsock` as shown:
|
||||
```
|
||||
$ sudo su -c 'cd /var/run/vc/vm/{sandbox_id}/root/ && socat stdin unix-connect:clh.sock'
|
||||
@@ -636,6 +637,12 @@ CONNECT 1026
|
||||
|
||||
**Note**: You need to press the `RETURN` key to see the shell prompt.
|
||||
|
||||
|
||||
For QEMU, connect to the `vsock` as shown:
|
||||
```
|
||||
$ sudo su -c 'cd /var/run/vc/vm/{sandbox_id} && socat "stdin,raw,echo=0,escape=0x11" "unix-connect:console.sock"
|
||||
```
|
||||
|
||||
To disconnect from the virtual machine, type `CONTROL+q` (hold down the
|
||||
`CONTROL` key and press `q`).
|
||||
|
||||
|
||||
@@ -19,6 +19,8 @@
|
||||
* [Support for joining an existing VM network](#support-for-joining-an-existing-vm-network)
|
||||
* [docker --net=host](#docker---nethost)
|
||||
* [docker run --link](#docker-run---link)
|
||||
* [Storage limitations](#storage-limitations)
|
||||
* [Kubernetes `volumeMounts.subPaths`](#kubernetes-volumemountssubpaths)
|
||||
* [Host resource sharing](#host-resource-sharing)
|
||||
* [docker run --privileged](#docker-run---privileged)
|
||||
* [Miscellaneous](#miscellaneous)
|
||||
@@ -216,6 +218,17 @@ Equivalent functionality can be achieved with the newer docker networking comman
|
||||
See more documentation at
|
||||
[docs.docker.com](https://docs.docker.com/engine/userguide/networking/default_network/dockerlinks/).
|
||||
|
||||
## Storage limitations
|
||||
|
||||
### Kubernetes `volumeMounts.subPaths`
|
||||
|
||||
Kubernetes `volumeMount.subPath` is not supported by Kata Containers at the
|
||||
moment.
|
||||
|
||||
See [this issue](https://github.com/kata-containers/runtime/issues/2812) for more details.
|
||||
[Another issue](https://github.com/kata-containers/kata-containers/issues/1728) focuses on the case of `emptyDir`.
|
||||
|
||||
|
||||
## Host resource sharing
|
||||
|
||||
### docker run --privileged
|
||||
@@ -224,7 +237,7 @@ Privileged support in Kata is essentially different from `runc` containers.
|
||||
Kata does support `docker run --privileged` command, but in this case full access
|
||||
to the guest VM is provided in addition to some host access.
|
||||
|
||||
The container runs with elevated capabilities within the guest and is granted
|
||||
The container runs with elevated capabilities within the guest and is granted
|
||||
access to guest devices instead of the host devices.
|
||||
This is also true with using `securityContext privileged=true` with Kubernetes.
|
||||
|
||||
|
||||
@@ -48,6 +48,7 @@ Documents that help to understand and contribute to Kata Containers.
|
||||
### Design and Implementations
|
||||
|
||||
* [Kata Containers Architecture](design/architecture.md): Architectural overview of Kata Containers
|
||||
* [Kata Containers E2E Flow](design/end-to-end-flow.md): The entire end-to-end flow of Kata Containers
|
||||
* [Kata Containers design](./design/README.md): More Kata Containers design documents
|
||||
|
||||
### How to Contribute
|
||||
|
||||
@@ -79,9 +79,9 @@
|
||||
```
|
||||
$ cd ${GOPATH}/src/github.com/kata-containers/kata-containers/tools/packaging/release
|
||||
# Note: OLD_VERSION is where the script should start to get changes.
|
||||
$ ./runtime-release-notes.sh ${OLD_VERSION} ${NEW_VERSION} > notes.md
|
||||
$ ./release-notes.sh ${OLD_VERSION} ${NEW_VERSION} > notes.md
|
||||
# Edit the `notes.md` file to review and make any changes to the release notes.
|
||||
# Add the release notes in GitHub runtime.
|
||||
# Add the release notes in the project's GitHub.
|
||||
$ hub release edit -F notes.md "${NEW_VERSION}"
|
||||
```
|
||||
|
||||
|
||||
BIN
docs/design/arch-images/katacontainers-e2e-with-bg.jpg
Normal file
BIN
docs/design/arch-images/katacontainers-e2e-with-bg.jpg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 1.2 MiB |
16
docs/design/arch-images/katacontainers-e2e.svg
Normal file
16
docs/design/arch-images/katacontainers-e2e.svg
Normal file
File diff suppressed because one or more lines are too long
|
After Width: | Height: | Size: 1.0 MiB |
4
docs/design/end-to-end-flow.md
Normal file
4
docs/design/end-to-end-flow.md
Normal file
@@ -0,0 +1,4 @@
|
||||
# Kata Containers E2E Flow
|
||||
|
||||
|
||||

|
||||
@@ -185,7 +185,7 @@ in Kibana:
|
||||
.
|
||||
|
||||
We can however further sub-parse the Kata entries using the
|
||||
[Fluentd plugins](https://docs.fluentbit.io/manual/parser/logfmt) that will parse
|
||||
[Fluentd plugins](https://docs.fluentbit.io/manual/v/1.3/parser/logfmt) that will parse
|
||||
`logfmt` formatted data. We can utilise these to parse the sub-fields using a Fluentd filter
|
||||
section. At the same time, we will prefix the new fields with `kata_` to make it clear where
|
||||
they have come from:
|
||||
|
||||
@@ -80,6 +80,8 @@ There are several kinds of Kata configurations and they are listed below.
|
||||
|
||||
In case of CRI-O, all annotations specified in the pod spec are passed down to Kata.
|
||||
|
||||
# containerd Configuration
|
||||
|
||||
For containerd, annotations specified in the pod spec are passed down to Kata
|
||||
starting with version `1.3.0` of containerd. Additionally, extra configuration is
|
||||
needed for containerd, by providing a `pod_annotations` field in the containerd config
|
||||
@@ -92,11 +94,9 @@ for passing annotations to Kata from containerd:
|
||||
$ cat /etc/containerd/config
|
||||
....
|
||||
|
||||
[plugins.cri.containerd.runtimes.kata]
|
||||
runtime_type = "io.containerd.runc.v1"
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.kata]
|
||||
runtime_type = "io.containerd.kata.v2"
|
||||
pod_annotations = ["io.katacontainers.*"]
|
||||
[plugins.cri.containerd.runtimes.kata.options]
|
||||
BinaryName = "/usr/bin/kata-runtime"
|
||||
....
|
||||
|
||||
```
|
||||
|
||||
@@ -7,9 +7,10 @@
|
||||
* [Configure Kubelet to use containerd](#configure-kubelet-to-use-containerd)
|
||||
* [Configure HTTP proxy - OPTIONAL](#configure-http-proxy---optional)
|
||||
* [Start Kubernetes](#start-kubernetes)
|
||||
* [Install a Pod Network](#install-a-pod-network)
|
||||
* [Configure Pod Network](#configure-pod-network)
|
||||
* [Allow pods to run in the master node](#allow-pods-to-run-in-the-master-node)
|
||||
* [Create an untrusted pod using Kata Containers](#create-an-untrusted-pod-using-kata-containers)
|
||||
* [Create runtime class for Kata Containers](#create-runtime-class-for-kata-containers)
|
||||
* [Run pod in Kata Containers](#run-pod-in-kata-containers)
|
||||
* [Delete created pod](#delete-created-pod)
|
||||
|
||||
This document describes how to set up a single-machine Kubernetes (k8s) cluster.
|
||||
@@ -18,9 +19,6 @@ The Kubernetes cluster will use the
|
||||
[CRI containerd plugin](https://github.com/containerd/cri) and
|
||||
[Kata Containers](https://katacontainers.io) to launch untrusted workloads.
|
||||
|
||||
For Kata Containers 1.5.0-rc2 and above, we will use `containerd-shim-kata-v2` (short as `shimv2` in this documentation)
|
||||
to launch Kata Containers. For the previous version of Kata Containers, the Pods are launched with `kata-runtime`.
|
||||
|
||||
## Requirements
|
||||
|
||||
- Kubernetes, Kubelet, `kubeadm`
|
||||
@@ -125,43 +123,33 @@ $ sudo systemctl daemon-reload
|
||||
$ sudo -E kubectl get pods
|
||||
```
|
||||
|
||||
## Install a Pod Network
|
||||
## Configure Pod Network
|
||||
|
||||
A pod network plugin is needed to allow pods to communicate with each other.
|
||||
You can find more about CNI plugins from the [Creating a cluster with `kubeadm`](https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/#instructions) guide.
|
||||
|
||||
- Install the `flannel` plugin by following the
|
||||
[Using `kubeadm` to Create a Cluster](https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/#instructions)
|
||||
guide, starting from the **Installing a pod network** section.
|
||||
|
||||
- Create a pod network using flannel
|
||||
|
||||
> **Note:** There is no known way to determine programmatically the best version (commit) to use.
|
||||
> See https://github.com/coreos/flannel/issues/995.
|
||||
By default the CNI plugin binaries is installed under `/opt/cni/bin` (in package `kubernetes-cni`), you only need to create a configuration file for CNI plugin.
|
||||
|
||||
```bash
|
||||
$ sudo -E kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
|
||||
```
|
||||
$ sudo -E mkdir -p /etc/cni/net.d
|
||||
|
||||
- Wait for the pod network to become available
|
||||
|
||||
```bash
|
||||
# number of seconds to wait for pod network to become available
|
||||
$ timeout_dns=420
|
||||
|
||||
$ while [ "$timeout_dns" -gt 0 ]; do
|
||||
if sudo -E kubectl get pods --all-namespaces | grep dns | grep Running; then
|
||||
break
|
||||
fi
|
||||
|
||||
sleep 1s
|
||||
((timeout_dns--))
|
||||
done
|
||||
```
|
||||
|
||||
- Check the pod network is running
|
||||
|
||||
```bash
|
||||
$ sudo -E kubectl get pods --all-namespaces | grep dns | grep Running && echo "OK" || ( echo "FAIL" && false )
|
||||
$ sudo -E cat > /etc/cni/net.d/10-mynet.conf <<EOF
|
||||
{
|
||||
"cniVersion": "0.2.0",
|
||||
"name": "mynet",
|
||||
"type": "bridge",
|
||||
"bridge": "cni0",
|
||||
"isGateway": true,
|
||||
"ipMasq": true,
|
||||
"ipam": {
|
||||
"type": "host-local",
|
||||
"subnet": "172.19.0.0/24",
|
||||
"routes": [
|
||||
{ "dst": "0.0.0.0/0" }
|
||||
]
|
||||
}
|
||||
}
|
||||
EOF
|
||||
```
|
||||
|
||||
## Allow pods to run in the master node
|
||||
@@ -172,24 +160,38 @@ By default, the cluster will not schedule pods in the master node. To enable mas
|
||||
$ sudo -E kubectl taint nodes --all node-role.kubernetes.io/master-
|
||||
```
|
||||
|
||||
## Create an untrusted pod using Kata Containers
|
||||
## Create runtime class for Kata Containers
|
||||
|
||||
By default, all pods are created with the default runtime configured in CRI containerd plugin.
|
||||
From Kubernetes v1.12, users can use [`RuntimeClass`](https://kubernetes.io/docs/concepts/containers/runtime-class/#runtime-class) to specify a different runtime for Pods.
|
||||
|
||||
If a pod has the `io.kubernetes.cri.untrusted-workload` annotation set to `"true"`, the CRI plugin runs the pod with the
|
||||
```bash
|
||||
$ cat > runtime.yaml <<EOF
|
||||
apiVersion: node.k8s.io/v1beta1
|
||||
kind: RuntimeClass
|
||||
metadata:
|
||||
name: kata
|
||||
handler: kata
|
||||
EOF
|
||||
|
||||
$ sudo -E kubectl apply -f runtime.yaml
|
||||
```
|
||||
|
||||
## Run pod in Kata Containers
|
||||
|
||||
If a pod has the `runtimeClassName` set to `kata`, the CRI plugin runs the pod with the
|
||||
[Kata Containers runtime](../../src/runtime/README.md).
|
||||
|
||||
- Create an untrusted pod configuration
|
||||
- Create an pod configuration that using Kata Containers runtime
|
||||
|
||||
```bash
|
||||
$ cat << EOT | tee nginx-untrusted.yaml
|
||||
$ cat << EOT | tee nginx-kata.yaml
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: nginx-untrusted
|
||||
annotations:
|
||||
io.kubernetes.cri.untrusted-workload: "true"
|
||||
name: nginx-kata
|
||||
spec:
|
||||
runtimeClassName: kata
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx
|
||||
@@ -197,9 +199,9 @@ If a pod has the `io.kubernetes.cri.untrusted-workload` annotation set to `"true
|
||||
EOT
|
||||
```
|
||||
|
||||
- Create an untrusted pod
|
||||
- Create the pod
|
||||
```bash
|
||||
$ sudo -E kubectl apply -f nginx-untrusted.yaml
|
||||
$ sudo -E kubectl apply -f nginx-kata.yaml
|
||||
```
|
||||
|
||||
- Check pod is running
|
||||
@@ -216,5 +218,5 @@ If a pod has the `io.kubernetes.cri.untrusted-workload` annotation set to `"true
|
||||
## Delete created pod
|
||||
|
||||
```bash
|
||||
$ sudo -E kubectl delete -f nginx-untrusted.yaml
|
||||
$ sudo -E kubectl delete -f nginx-kata.yaml
|
||||
```
|
||||
|
||||
@@ -1,61 +1,12 @@
|
||||
# Kata Containers with virtio-fs
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Pre-requisites](#pre-requisites)
|
||||
- [Install Kata Containers with virtio-fs support](#install-kata-containers-with-virtio-fs-support)
|
||||
- [Run a Kata Container utilizing virtio-fs](#run-a-kata-container-utilizing-virtio-fs)
|
||||
- [Kata Containers with virtio-fs](#kata-containers-with-virtio-fs)
|
||||
- [Introduction](#introduction)
|
||||
|
||||
## Introduction
|
||||
|
||||
Container deployments utilize explicit or implicit file sharing between host filesystem and containers. From a trust perspective, avoiding a shared file-system between the trusted host and untrusted container is recommended. This is not always feasible. In Kata Containers, block-based volumes are preferred as they allow usage of either device pass through or `virtio-blk` for access within the virtual machine.
|
||||
|
||||
As of the 1.7 release of Kata Containers, [9pfs](https://www.kernel.org/doc/Documentation/filesystems/9p.txt) is the default filesystem sharing mechanism. While this does allow for workload compatibility, it does so with degraded performance and potential for POSIX compliance limitations.
|
||||
As of the 2.0 release of Kata Containers, [virtio-fs](https://virtio-fs.gitlab.io/) is the default filesystem sharing mechanism.
|
||||
|
||||
To help address these limitations, [virtio-fs](https://virtio-fs.gitlab.io/) has been developed. virtio-fs is a shared file system that lets virtual machines access a directory tree on the host. In Kata Containers, virtio-fs can be used to share container volumes, secrets, config-maps, configuration files (hostname, hosts, `resolv.conf`) and the container rootfs on the host with the guest. virtio-fs provides significant performance and POSIX compliance improvements compared to 9pfs.
|
||||
|
||||
Enabling of virtio-fs requires changes in the guest kernel as well as the VMM. For Kata Containers, experimental virtio-fs support is enabled through `qemu` and `cloud-hypervisor` VMMs.
|
||||
|
||||
**Note: virtio-fs support is experimental in the 1.7 release of Kata Containers. Work is underway to improve stability, performance and upstream integration. This is available for early preview - use at your own risk**
|
||||
|
||||
This document describes how to get Kata Containers to work with virtio-fs.
|
||||
|
||||
## Pre-requisites
|
||||
|
||||
Before Kata 1.8 this feature required the host to have hugepages support enabled. Enable this with the `sysctl vm.nr_hugepages=1024` command on the host.In later versions of Kata, virtio-fs leverages `/dev/shm` as the shared memory backend. The default size of `/dev/shm` on a system is typically half of the total system memory. This can pose a physical limit to the maximum number of pods that can be launched with virtio-fs. This can be overcome by increasing the size of `/dev/shm` as shown below:
|
||||
|
||||
```bash
|
||||
$ mount -o remount,size=${desired_shm_size} /dev/shm
|
||||
```
|
||||
|
||||
## Install Kata Containers with virtio-fs support
|
||||
|
||||
The Kata Containers `qemu` configuration with virtio-fs and the `virtiofs` daemon are available in the [Kata Container release](https://github.com/kata-containers/runtime/releases) artifacts starting with the 1.9 release. Installation is available through [distribution packages](https://github.com/kata-containers/documentation/blob/master/install/README.md#supported-distributions) as well through [`kata-deploy`](https://github.com/kata-containers/packaging/tree/master/kata-deploy).
|
||||
|
||||
**Note: Support for virtio-fs was first introduced in `NEMU` hypervisor in Kata 1.8 release. This hypervisor has been deprecated.**
|
||||
|
||||
Install the latest release of Kata with `kata-deploy` as follows:
|
||||
```
|
||||
docker run --runtime=runc -v /opt/kata:/opt/kata -v /var/run/dbus:/var/run/dbus -v /run/systemd:/run/systemd -v /etc/docker:/etc/docker -it katadocker/kata-deploy kata-deploy-docker install
|
||||
```
|
||||
|
||||
This will place the Kata release artifacts in `/opt/kata`, and update Docker's configuration to include a runtime target, `kata-qemu-virtiofs`. Learn more about `kata-deploy` and how to use `kata-deploy` in Kubernetes [here](https://github.com/kata-containers/packaging/tree/master/kata-deploy#kubernetes-quick-start).
|
||||
|
||||
## Run a Kata Container utilizing virtio-fs
|
||||
|
||||
Once installed, start a new container, utilizing `qemu` + `virtiofs`:
|
||||
```bash
|
||||
$ docker run --runtime=kata-qemu-virtiofs -it busybox
|
||||
```
|
||||
|
||||
Verify the new container is running with the `qemu` hypervisor as well as using `virtiofsd`. To do this look for the hypervisor path and the `virtiofs` daemon process on the host:
|
||||
```bash
|
||||
$ ps -aux | grep virtiofs
|
||||
root ... /home/foo/build-x86_64_virt/x86_64_virt-softmmu/qemu-system-x86_64_virt
|
||||
... -machine virt,accel=kvm,kernel_irqchip,nvdimm ...
|
||||
root ... /home/foo/build-x86_64_virt/virtiofsd-x86_64 ...
|
||||
```
|
||||
|
||||
You can also try out virtio-fs using `cloud-hypervisor` VMM:
|
||||
```bash
|
||||
$ docker run --runtime=kata-clh -it busybox
|
||||
```
|
||||
virtio-fs support works out of the box for `cloud-hypervisor` and `qemu`, when Kata Containers is deployed using `kata-deploy`. Learn more about `kata-deploy` and how to use `kata-deploy` in Kubernetes [here](https://github.com/kata-containers/packaging/tree/master/kata-deploy#kubernetes-quick-start).
|
||||
@@ -52,7 +52,6 @@ Kata packages are provided by official distribution repositories for:
|
||||
| [CentOS](centos-installation-guide.md) | 8 |
|
||||
| [Fedora](fedora-installation-guide.md) | 32, Rawhide |
|
||||
| [openSUSE](opensuse-installation-guide.md) | [Leap 15.1](opensuse-leap-15.1-installation-guide.md)<br>Leap 15.2, Tumbleweed |
|
||||
| [SUSE Linux Enterprise (SLE)](sle-installation-guide.md) | SLE 15 SP1, 15 SP2 |
|
||||
|
||||
> **Note::**
|
||||
>
|
||||
|
||||
@@ -3,15 +3,9 @@
|
||||
1. Install the Kata Containers components with the following commands:
|
||||
|
||||
```bash
|
||||
$ sudo -E dnf install -y centos-release-advanced-virtualization
|
||||
$ sudo -E dnf module disable -y virt:rhel
|
||||
$ source /etc/os-release
|
||||
$ cat <<EOF | sudo -E tee /etc/yum.repos.d/advanced-virt.repo
|
||||
[advanced-virt]
|
||||
name=Advanced Virtualization
|
||||
baseurl=http://mirror.centos.org/\$contentdir/\$releasever/virt/\$basearch/advanced-virtualization
|
||||
enabled=1
|
||||
gpgcheck=1
|
||||
skip_if_unavailable=1
|
||||
EOF
|
||||
$ cat <<EOF | sudo -E tee /etc/yum.repos.d/kata-containers.repo
|
||||
[kata-containers]
|
||||
name=Kata Containers
|
||||
@@ -20,8 +14,7 @@
|
||||
gpgcheck=1
|
||||
skip_if_unavailable=1
|
||||
EOF
|
||||
$ sudo -E dnf module disable -y virt:rhel
|
||||
$ sudo -E dnf install -y kata-runtime
|
||||
$ sudo -E dnf install -y kata-containers
|
||||
```
|
||||
|
||||
2. Decide which container manager to use and select the corresponding link that follows:
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
1. Install the Kata Containers components with the following commands:
|
||||
|
||||
```bash
|
||||
$ sudo -E dnf -y install kata-runtime
|
||||
$ sudo -E dnf -y install kata-containers
|
||||
```
|
||||
|
||||
2. Decide which container manager to use and select the corresponding link that follows:
|
||||
|
||||
@@ -1,13 +0,0 @@
|
||||
# Install Kata Containers on SLE
|
||||
|
||||
1. Install the Kata Containers components with the following commands:
|
||||
|
||||
```bash
|
||||
$ source /etc/os-release
|
||||
$ DISTRO_VERSION=$(sed "s/-/_/g" <<< "$VERSION")
|
||||
$ sudo -E zypper addrepo --refresh "https://download.opensuse.org/repositories/devel:/kubic/SLE_${DISTRO_VERSION}_Backports/devel:kubic.repo"
|
||||
$ sudo -E zypper -n --gpg-auto-import-keys install katacontainers
|
||||
```
|
||||
|
||||
2. Decide which container manager to use and select the corresponding link that follows:
|
||||
- [Kubernetes](../Developer-Guide.md#run-kata-containers-with-kubernetes)
|
||||
@@ -1,13 +1,58 @@
|
||||
# Install Kata Containers from `snapcraft.io`
|
||||
# Kata Containers snap package
|
||||
|
||||
* [Install Kata Containers](#install-kata-containers)
|
||||
* [Configure Kata Containers](#configure-kata-containers)
|
||||
* [Integration with shim v2 Container Engines](#integration-with-shim-v2-container-engines)
|
||||
* [Remove Kata Containers snap package](#remove-kata-containers-snap-package)
|
||||
|
||||
|
||||
## Install Kata Containers
|
||||
|
||||
Kata Containers can be installed in any Linux distribution that supports
|
||||
[snapd](https://docs.snapcraft.io/installing-snapd).
|
||||
|
||||
Run the following command to install Kata Containers:
|
||||
Run the following command to install **Kata Containers**:
|
||||
|
||||
```bash
|
||||
$ sudo snap install kata-containers --classic
|
||||
```
|
||||
```sh
|
||||
$ sudo snap install kata-containers --candidate --classic
|
||||
```
|
||||
|
||||
For further information on integrating and configuring the `snap` Kata Containers install,
|
||||
refer to the [Kata Containers packaging `snap` documentation](https://github.com/kata-containers/packaging/blob/master/snap/README.md#configure-kata-containers).
|
||||
## Configure Kata Containers
|
||||
|
||||
By default Kata Containers snap image is mounted at `/snap/kata-containers` as a
|
||||
read-only file system, therefore default configuration file can not be edited.
|
||||
Fortunately Kata Containers supports loading a configuration file from another
|
||||
path than the default.
|
||||
|
||||
```sh
|
||||
$ sudo mkdir -p /etc/kata-containers
|
||||
$ sudo cp /snap/kata-containers/current/usr/share/defaults/kata-containers/configuration.toml /etc/kata-containers/
|
||||
$ $EDITOR /etc/kata-containers/configuration.toml
|
||||
```
|
||||
|
||||
## Integration with shim v2 Container Engines
|
||||
|
||||
The Container engine daemon (`cri-o`, `containerd`, etc) needs to be able to find the
|
||||
`containerd-shim-kata-v2` binary to allow Kata Containers to be created.
|
||||
Run the following command to create a symbolic link to the shim v2 binary.
|
||||
|
||||
```sh
|
||||
$ sudo ln -sf /snap/kata-containers/current/usr/bin/containerd-shim-kata-v2 /usr/local/bin/containerd-shim-kata-v2
|
||||
```
|
||||
|
||||
Once the symbolic link has been created and the engine daemon configured, `io.containerd.kata.v2`
|
||||
can be used as runtime.
|
||||
|
||||
Read the following documents to know how to run Kata Containers 2.x with `containerd`.
|
||||
|
||||
* [How to use Kata Containers and Containerd](https://github.com/kata-containers/kata-containers/blob/main/docs/how-to/containerd-kata.md)
|
||||
* [Install Kata Containers with containerd](https://github.com/kata-containers/kata-containers/blob/main/docs/install/container-manager/containerd/containerd-install.md)
|
||||
|
||||
|
||||
## Remove Kata Containers snap package
|
||||
|
||||
Run the following command to remove the Kata Containers snap:
|
||||
|
||||
```sh
|
||||
$ sudo snap remove kata-containers
|
||||
```
|
||||
|
||||
@@ -1,15 +0,0 @@
|
||||
# Install Kata Containers on Ubuntu
|
||||
|
||||
1. Install the Kata Containers components with the following commands:
|
||||
|
||||
```bash
|
||||
$ ARCH=$(arch)
|
||||
$ BRANCH="${BRANCH:-master}"
|
||||
$ sudo sh -c "echo 'deb http://download.opensuse.org/repositories/home:/katacontainers:/releases:/${ARCH}:/${BRANCH}/xUbuntu_$(lsb_release -rs)/ /' > /etc/apt/sources.list.d/kata-containers.list"
|
||||
$ curl -sL http://download.opensuse.org/repositories/home:/katacontainers:/releases:/${ARCH}:/${BRANCH}/xUbuntu_$(lsb_release -rs)/Release.key | sudo apt-key add -
|
||||
$ sudo -E apt-get update
|
||||
$ sudo -E apt-get -y install kata-runtime kata-proxy kata-shim
|
||||
```
|
||||
|
||||
2. Decide which container manager to use and select the corresponding link that follows:
|
||||
- [Kubernetes](../Developer-Guide.md#run-kata-containers-with-kubernetes)
|
||||
@@ -10,9 +10,6 @@ Currently, the instructions are based on the following links:
|
||||
|
||||
- https://docs.openstack.org/zun/latest/admin/clear-containers.html
|
||||
|
||||
- ../install/ubuntu-installation-guide.md
|
||||
|
||||
|
||||
## Install Git to use with DevStack
|
||||
|
||||
```sh
|
||||
@@ -54,7 +51,7 @@ $ zun delete test
|
||||
|
||||
## Install Kata Containers
|
||||
|
||||
Follow [these instructions](../install/ubuntu-installation-guide.md)
|
||||
Follow [these instructions](../install/README.md)
|
||||
to install the Kata Containers components.
|
||||
|
||||
## Update Docker with new Kata Containers runtime
|
||||
|
||||
@@ -93,9 +93,7 @@ impl HashSerializer {
|
||||
// Take care to only add the first instance of a key. This matters for loggers (but not
|
||||
// Records) since a child loggers have parents and the loggers are serialised child first
|
||||
// meaning the *newest* fields are serialised first.
|
||||
if !self.fields.contains_key(&key) {
|
||||
self.fields.insert(key, value);
|
||||
}
|
||||
self.fields.entry(key).or_insert(value);
|
||||
}
|
||||
|
||||
fn remove_field(&mut self, key: &str) {
|
||||
|
||||
@@ -69,7 +69,7 @@ parts:
|
||||
tar -xf ${tarfile} --strip-components=1
|
||||
|
||||
image:
|
||||
after: [godeps]
|
||||
after: [godeps, qemu, kernel]
|
||||
plugin: nil
|
||||
build-packages:
|
||||
- docker.io
|
||||
@@ -89,6 +89,8 @@ parts:
|
||||
export GOROOT=${SNAPCRAFT_STAGE}
|
||||
export PATH="${GOROOT}/bin:${PATH}"
|
||||
|
||||
http_proxy=${http_proxy:-""}
|
||||
https_proxy=${https_proxy:-""}
|
||||
if [ -n "$http_proxy" ]; then
|
||||
echo "Setting proxy $http_proxy"
|
||||
sudo -E systemctl set-environment http_proxy=$http_proxy || true
|
||||
@@ -169,7 +171,7 @@ parts:
|
||||
fi
|
||||
|
||||
kernel:
|
||||
after: [godeps, image]
|
||||
after: [godeps]
|
||||
plugin: nil
|
||||
build-packages:
|
||||
- libelf-dev
|
||||
@@ -183,8 +185,8 @@ parts:
|
||||
|
||||
cd ${kata_dir}/tools/packaging/kernel
|
||||
|
||||
# Say 'no' to everithing, fix issues with incomplete .config files
|
||||
yes "n" | ./build-kernel.sh setup
|
||||
# Setup and build kernel
|
||||
./build-kernel.sh -d setup
|
||||
kernel_dir_prefix="kata-linux-"
|
||||
cd ${kernel_dir_prefix}*
|
||||
version=$(basename ${PWD} | sed 's|'"${kernel_dir_prefix}"'||' | cut -d- -f1)
|
||||
@@ -206,7 +208,7 @@ parts:
|
||||
|
||||
qemu:
|
||||
plugin: make
|
||||
after: [godeps, runtime]
|
||||
after: [godeps]
|
||||
build-packages:
|
||||
- gcc
|
||||
- python3
|
||||
|
||||
1
src/agent/.gitignore
vendored
Normal file
1
src/agent/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
tarpaulin-report.html
|
||||
1
src/agent/.rustfmt.toml
Normal file
1
src/agent/.rustfmt.toml
Normal file
@@ -0,0 +1 @@
|
||||
edition = "2018"
|
||||
@@ -3,6 +3,11 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
# To show variables or targets help on `make help`
|
||||
# Use the following format:
|
||||
# '##VAR VARIABLE_NAME: help about variable'
|
||||
# '##TARGET TARGET_NAME: help about target'
|
||||
|
||||
PROJECT_NAME = Kata Containers
|
||||
PROJECT_URL = https://github.com/kata-containers
|
||||
PROJECT_COMPONENT = kata-agent
|
||||
@@ -23,9 +28,12 @@ COMMIT_MSG = $(if $(COMMIT),$(COMMIT),unknown)
|
||||
# Exported to allow cargo to see it
|
||||
export VERSION_COMMIT := $(if $(COMMIT),$(VERSION)-$(COMMIT),$(VERSION))
|
||||
|
||||
##VAR BUILD_TYPE=release|debug type of rust build
|
||||
BUILD_TYPE = release
|
||||
|
||||
##VAR ARCH=arch target to build (format: uname -m)
|
||||
ARCH = $(shell uname -m)
|
||||
##VAR LIBC=musl|gnu
|
||||
LIBC ?= musl
|
||||
ifneq ($(LIBC),musl)
|
||||
ifeq ($(LIBC),gnu)
|
||||
@@ -41,6 +49,11 @@ ifeq ($(ARCH), ppc64le)
|
||||
$(warning "WARNING: powerpc64le-unknown-linux-musl target is unavailable")
|
||||
endif
|
||||
|
||||
ifeq ($(ARCH), s390x)
|
||||
override LIBC = gnu
|
||||
$(warning "WARNING: s390x-unknown-linux-musl target is unavailable")
|
||||
endif
|
||||
|
||||
|
||||
EXTRA_RUSTFLAGS :=
|
||||
ifeq ($(ARCH), aarch64)
|
||||
@@ -52,10 +65,12 @@ TRIPLE = $(ARCH)-unknown-linux-$(LIBC)
|
||||
|
||||
TARGET_PATH = target/$(TRIPLE)/$(BUILD_TYPE)/$(TARGET)
|
||||
|
||||
##VAR DESTDIR=<path> is a directory prepended to each installed target file
|
||||
DESTDIR :=
|
||||
##VAR BINDIR=<path> is a directory for installing executable programs
|
||||
BINDIR := /usr/bin
|
||||
|
||||
# Define if agent will be installed as init
|
||||
##VAR INIT=yes|no define if agent will be installed as init
|
||||
INIT := no
|
||||
|
||||
# Path to systemd unit directory if installed as not init.
|
||||
@@ -103,6 +118,7 @@ define INSTALL_FILE
|
||||
install -D -m 644 $1 $(DESTDIR)$2/$1 || exit 1;
|
||||
endef
|
||||
|
||||
##TARGET default: build code
|
||||
default: $(TARGET) show-header
|
||||
|
||||
$(TARGET): $(GENERATED_CODE) $(TARGET_PATH)
|
||||
@@ -110,36 +126,54 @@ $(TARGET): $(GENERATED_CODE) $(TARGET_PATH)
|
||||
$(TARGET_PATH): $(SOURCES) | show-summary
|
||||
@RUSTFLAGS="$(EXTRA_RUSTFLAGS) --deny warnings" cargo build --target $(TRIPLE) --$(BUILD_TYPE)
|
||||
|
||||
optimize: $(SOURCES) | show-summary show-header
|
||||
@RUSTFLAGS="-C link-arg=-s $(EXTRA_RUSTFLAGS) --deny-warnings" cargo build --target $(TRIPLE) --$(BUILD_TYPE)
|
||||
|
||||
show-header:
|
||||
@printf "%s - version %s (commit %s)\n\n" "$(TARGET)" "$(VERSION)" "$(COMMIT_MSG)"
|
||||
|
||||
$(GENERATED_FILES): %: %.in
|
||||
@sed $(foreach r,$(GENERATED_REPLACEMENTS),-e 's|@$r@|$($r)|g') "$<" > "$@"
|
||||
|
||||
install: build-service
|
||||
##TARGET optimize: optimized build
|
||||
optimize: $(SOURCES) | show-summary show-header
|
||||
@RUSTFLAGS="-C link-arg=-s $(EXTRA_RUSTFLAGS) --deny-warnings" cargo build --target $(TRIPLE) --$(BUILD_TYPE)
|
||||
|
||||
##TARGET clippy: run clippy linter
|
||||
clippy: $(GENERATED_CODE)
|
||||
cargo clippy --all-targets --all-features --release \
|
||||
-- \
|
||||
-Aclippy::redundant_allocation \
|
||||
-D warnings
|
||||
|
||||
format:
|
||||
cargo fmt -- --check
|
||||
|
||||
|
||||
##TARGET install: install agent
|
||||
install: install-services
|
||||
@install -D $(TARGET_PATH) $(DESTDIR)/$(BINDIR)/$(TARGET)
|
||||
|
||||
##TARGET clean: clean build
|
||||
clean:
|
||||
@cargo clean
|
||||
@rm -f $(GENERATED_FILES)
|
||||
@rm -f tarpaulin-report.html
|
||||
|
||||
#TARGET test: run cargo tests
|
||||
test:
|
||||
@cargo test --all --target $(TRIPLE)
|
||||
|
||||
check: test
|
||||
##TARGET check: run test
|
||||
check: clippy format
|
||||
|
||||
##TARGET run: build and run agent
|
||||
run:
|
||||
@cargo run --target $(TRIPLE)
|
||||
|
||||
build-service: $(GENERATED_FILES)
|
||||
install-services: $(GENERATED_FILES)
|
||||
ifeq ($(INIT),no)
|
||||
@echo "Installing systemd unit files..."
|
||||
$(foreach f,$(UNIT_FILES),$(call INSTALL_FILE,$f,$(UNIT_DIR)))
|
||||
endif
|
||||
|
||||
show-header:
|
||||
@printf "%s - version %s (commit %s)\n\n" "$(TARGET)" "$(VERSION)" "$(COMMIT_MSG)"
|
||||
|
||||
show-summary: show-header
|
||||
@printf "project:\n"
|
||||
@printf " name: $(PROJECT_NAME)\n"
|
||||
@@ -155,7 +189,35 @@ show-summary: show-header
|
||||
@printf " %s\n" "$(call get_toolchain_version)"
|
||||
@printf "\n"
|
||||
|
||||
help: show-summary
|
||||
## help: Show help comments that start with `##VAR` and `##TARGET`
|
||||
help: Makefile show-summary
|
||||
@echo "==========================Help============================="
|
||||
@echo "Variables:"
|
||||
@sed -n 's/^##VAR//p' $< | sort
|
||||
@echo ""
|
||||
@echo "Targets:"
|
||||
@sed -n 's/^##TARGET//p' $< | sort
|
||||
|
||||
TARPAULIN_ARGS:=-v --workspace
|
||||
install-tarpaulin:
|
||||
cargo install cargo-tarpaulin
|
||||
|
||||
# Check if cargo tarpaulin is installed
|
||||
HAS_TARPAULIN:= $(shell cargo --list | grep tarpaulin 2>/dev/null)
|
||||
check_tarpaulin:
|
||||
ifndef HAS_TARPAULIN
|
||||
$(error "tarpaulin is not available please: run make install-tarpaulin ")
|
||||
else
|
||||
$(info OK: tarpaulin installed)
|
||||
endif
|
||||
|
||||
##TARGET codecov: Generate code coverage report
|
||||
codecov: check_tarpaulin
|
||||
cargo tarpaulin $(TARPAULIN_ARGS)
|
||||
|
||||
##TARGET codecov-html: Generate code coverage html report
|
||||
codecov-html: check_tarpaulin
|
||||
cargo tarpaulin $(TARPAULIN_ARGS) -o Html
|
||||
|
||||
.PHONY: \
|
||||
help \
|
||||
@@ -163,5 +225,6 @@ help: show-summary
|
||||
show-summary \
|
||||
optimize
|
||||
|
||||
##TARGET generate-protocols: generate/update grpc agent protocols
|
||||
generate-protocols:
|
||||
protocols/hack/update-generated-proto.sh all
|
||||
|
||||
@@ -784,7 +784,17 @@ pub struct LinuxIntelRdt {
|
||||
pub l3_cache_schema: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Default, Clone, PartialEq)]
|
||||
#[derive(Debug, Serialize, Deserialize, Copy, Clone, PartialEq)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum ContainerState {
|
||||
CREATING,
|
||||
CREATED,
|
||||
RUNNING,
|
||||
STOPPED,
|
||||
PAUSED,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
|
||||
pub struct State {
|
||||
#[serde(
|
||||
default,
|
||||
@@ -794,8 +804,7 @@ pub struct State {
|
||||
pub version: String,
|
||||
#[serde(default, skip_serializing_if = "String::is_empty")]
|
||||
pub id: String,
|
||||
#[serde(default, skip_serializing_if = "String::is_empty")]
|
||||
pub status: String,
|
||||
pub status: ContainerState,
|
||||
#[serde(default)]
|
||||
pub pid: i32,
|
||||
#[serde(default, skip_serializing_if = "String::is_empty")]
|
||||
@@ -806,6 +815,8 @@ pub struct State {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_deserialize_state() {
|
||||
let data = r#"{
|
||||
@@ -818,10 +829,10 @@ mod tests {
|
||||
"myKey": "myValue"
|
||||
}
|
||||
}"#;
|
||||
let expected = crate::State {
|
||||
let expected = State {
|
||||
version: "0.2.0".to_string(),
|
||||
id: "oci-container1".to_string(),
|
||||
status: "running".to_string(),
|
||||
status: ContainerState::RUNNING,
|
||||
pid: 4422,
|
||||
bundle: "/containers/redis".to_string(),
|
||||
annotations: [("myKey".to_string(), "myValue".to_string())]
|
||||
|
||||
@@ -4,7 +4,6 @@
|
||||
//
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json;
|
||||
|
||||
use std::error;
|
||||
use std::fmt::{Display, Formatter, Result as FmtResult};
|
||||
|
||||
@@ -52,4 +52,3 @@ fn replace_text_in_file(file_name: &str, from: &str, to: &str) -> Result<(), std
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
#![allow(bare_trait_objects)]
|
||||
#![allow(clippy::redundant_field_names)]
|
||||
|
||||
pub mod agent;
|
||||
pub mod agent_ttrpc;
|
||||
@@ -11,11 +12,3 @@ pub mod health;
|
||||
pub mod health_ttrpc;
|
||||
pub mod oci;
|
||||
pub mod types;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
#[test]
|
||||
fn it_works() {
|
||||
assert_eq!(2 + 2, 4);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,8 +6,6 @@
|
||||
// looks like we can use caps to manipulate capabilities
|
||||
// conveniently, use caps to do it directly.. maybe
|
||||
|
||||
use lazy_static;
|
||||
|
||||
use crate::log_child;
|
||||
use crate::sync::write_count;
|
||||
use anyhow::{anyhow, Result};
|
||||
|
||||
@@ -21,7 +21,6 @@ use cgroups::{
|
||||
use crate::cgroups::Manager as CgroupManager;
|
||||
use crate::container::DEFAULT_DEVICES;
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use lazy_static;
|
||||
use libc::{self, pid_t};
|
||||
use nix::errno::Errno;
|
||||
use oci::{
|
||||
@@ -46,18 +45,19 @@ macro_rules! sl {
|
||||
}
|
||||
|
||||
pub fn load_or_create<'a>(h: Box<&'a dyn cgroups::Hierarchy>, path: &str) -> Cgroup<'a> {
|
||||
let valid_path = path.trim_start_matches("/").to_string();
|
||||
let valid_path = path.trim_start_matches('/').to_string();
|
||||
let cg = load(h.clone(), &valid_path);
|
||||
if cg.is_none() {
|
||||
info!(sl!(), "create new cgroup: {}", &valid_path);
|
||||
cgroups::Cgroup::new(h, valid_path.as_str())
|
||||
} else {
|
||||
cg.unwrap()
|
||||
match cg {
|
||||
Some(cg) => cg,
|
||||
None => {
|
||||
info!(sl!(), "create new cgroup: {}", &valid_path);
|
||||
cgroups::Cgroup::new(h, valid_path.as_str())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn load<'a>(h: Box<&'a dyn cgroups::Hierarchy>, path: &str) -> Option<Cgroup<'a>> {
|
||||
let valid_path = path.trim_start_matches("/").to_string();
|
||||
let valid_path = path.trim_start_matches('/').to_string();
|
||||
let cg = cgroups::Cgroup::load(h, valid_path.as_str());
|
||||
let cpu_controller: &CpuController = cg.controller_of().unwrap();
|
||||
if cpu_controller.exists() {
|
||||
@@ -131,21 +131,21 @@ impl CgroupManager for Manager {
|
||||
|
||||
// set block_io resources
|
||||
if let Some(blkio) = &r.block_io {
|
||||
set_block_io_resources(&cg, blkio, res)?;
|
||||
set_block_io_resources(&cg, blkio, res);
|
||||
}
|
||||
|
||||
// set hugepages resources
|
||||
if r.hugepage_limits.len() > 0 {
|
||||
set_hugepages_resources(&cg, &r.hugepage_limits, res)?;
|
||||
if !r.hugepage_limits.is_empty() {
|
||||
set_hugepages_resources(&cg, &r.hugepage_limits, res);
|
||||
}
|
||||
|
||||
// set network resources
|
||||
if let Some(network) = &r.network {
|
||||
set_network_resources(&cg, network, res)?;
|
||||
set_network_resources(&cg, network, res);
|
||||
}
|
||||
|
||||
// set devices resources
|
||||
set_devices_resources(&cg, &r.devices, res)?;
|
||||
set_devices_resources(&cg, &r.devices, res);
|
||||
info!(sl!(), "resources after processed {:?}", res);
|
||||
|
||||
// apply resources
|
||||
@@ -219,8 +219,8 @@ impl CgroupManager for Manager {
|
||||
let h = cgroups::hierarchies::auto();
|
||||
let h = Box::new(&*h);
|
||||
let cg = load(h, &self.cpath);
|
||||
if cg.is_some() {
|
||||
cg.unwrap().delete();
|
||||
if let Some(cg) = cg {
|
||||
cg.delete();
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@@ -241,7 +241,7 @@ fn set_network_resources(
|
||||
_cg: &cgroups::Cgroup,
|
||||
network: &LinuxNetwork,
|
||||
res: &mut cgroups::Resources,
|
||||
) -> Result<()> {
|
||||
) {
|
||||
info!(sl!(), "cgroup manager set network");
|
||||
|
||||
// set classid
|
||||
@@ -263,14 +263,13 @@ fn set_network_resources(
|
||||
|
||||
res.network.update_values = true;
|
||||
res.network.priorities = priorities;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn set_devices_resources(
|
||||
_cg: &cgroups::Cgroup,
|
||||
device_resources: &Vec<LinuxDeviceCgroup>,
|
||||
device_resources: &[LinuxDeviceCgroup],
|
||||
res: &mut cgroups::Resources,
|
||||
) -> Result<()> {
|
||||
) {
|
||||
info!(sl!(), "cgroup manager set devices");
|
||||
let mut devices = vec![];
|
||||
|
||||
@@ -294,15 +293,13 @@ fn set_devices_resources(
|
||||
|
||||
res.devices.update_values = true;
|
||||
res.devices.devices = devices;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn set_hugepages_resources(
|
||||
_cg: &cgroups::Cgroup,
|
||||
hugepage_limits: &Vec<LinuxHugepageLimit>,
|
||||
hugepage_limits: &[LinuxHugepageLimit],
|
||||
res: &mut cgroups::Resources,
|
||||
) -> Result<()> {
|
||||
) {
|
||||
info!(sl!(), "cgroup manager set hugepage");
|
||||
res.hugepages.update_values = true;
|
||||
let mut limits = vec![];
|
||||
@@ -315,42 +312,26 @@ fn set_hugepages_resources(
|
||||
limits.push(hr);
|
||||
}
|
||||
res.hugepages.limits = limits;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn set_block_io_resources(
|
||||
cg: &cgroups::Cgroup,
|
||||
_cg: &cgroups::Cgroup,
|
||||
blkio: &LinuxBlockIO,
|
||||
res: &mut cgroups::Resources,
|
||||
) -> Result<()> {
|
||||
) {
|
||||
info!(sl!(), "cgroup manager set block io");
|
||||
res.blkio.update_values = true;
|
||||
|
||||
if cg.v2() {
|
||||
res.blkio.weight = convert_blk_io_to_v2_value(blkio.weight);
|
||||
res.blkio.leaf_weight = convert_blk_io_to_v2_value(blkio.leaf_weight);
|
||||
} else {
|
||||
res.blkio.weight = blkio.weight;
|
||||
res.blkio.leaf_weight = blkio.leaf_weight;
|
||||
}
|
||||
res.blkio.weight = blkio.weight;
|
||||
res.blkio.leaf_weight = blkio.leaf_weight;
|
||||
|
||||
let mut blk_device_resources = vec![];
|
||||
for d in blkio.weight_device.iter() {
|
||||
let (w, lw) = if cg.v2() {
|
||||
(
|
||||
convert_blk_io_to_v2_value(blkio.weight),
|
||||
convert_blk_io_to_v2_value(blkio.leaf_weight),
|
||||
)
|
||||
} else {
|
||||
(blkio.weight, blkio.leaf_weight)
|
||||
};
|
||||
|
||||
let dr = BlkIoDeviceResource {
|
||||
major: d.blk.major as u64,
|
||||
minor: d.blk.minor as u64,
|
||||
weight: w,
|
||||
leaf_weight: lw,
|
||||
weight: blkio.weight,
|
||||
leaf_weight: blkio.leaf_weight,
|
||||
};
|
||||
blk_device_resources.push(dr);
|
||||
}
|
||||
@@ -364,8 +345,6 @@ fn set_block_io_resources(
|
||||
build_blk_io_device_throttle_resource(&blkio.throttle_read_iops_device);
|
||||
res.blkio.throttle_write_iops_device =
|
||||
build_blk_io_device_throttle_resource(&blkio.throttle_write_iops_device);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn set_cpu_resources(cg: &cgroups::Cgroup, cpu: &LinuxCPU) -> Result<()> {
|
||||
@@ -431,7 +410,7 @@ fn set_memory_resources(cg: &cgroups::Cgroup, memory: &LinuxMemory, update: bool
|
||||
}
|
||||
|
||||
if let Some(swappiness) = memory.swappiness {
|
||||
if swappiness >= 0 && swappiness <= 100 {
|
||||
if (0..=100).contains(&swappiness) {
|
||||
mem_controller.set_swappiness(swappiness as u64)?;
|
||||
} else {
|
||||
return Err(anyhow!(
|
||||
@@ -462,7 +441,7 @@ fn set_pids_resources(cg: &cgroups::Cgroup, pids: &LinuxPids) -> Result<()> {
|
||||
}
|
||||
|
||||
fn build_blk_io_device_throttle_resource(
|
||||
input: &Vec<oci::LinuxThrottleDevice>,
|
||||
input: &[oci::LinuxThrottleDevice],
|
||||
) -> Vec<BlkIoDeviceThrottleResource> {
|
||||
let mut blk_io_device_throttle_resources = vec![];
|
||||
for d in input.iter() {
|
||||
@@ -690,7 +669,7 @@ fn get_memory_stats(cg: &cgroups::Cgroup) -> SingularPtrField<MemoryStats> {
|
||||
|
||||
// use_hierarchy
|
||||
let value = memory.use_hierarchy;
|
||||
let use_hierarchy = if value == 1 { true } else { false };
|
||||
let use_hierarchy = value == 1;
|
||||
|
||||
// gte memory datas
|
||||
let usage = SingularPtrField::some(MemoryData {
|
||||
@@ -744,13 +723,12 @@ fn get_pids_stats(cg: &cgroups::Cgroup) -> SingularPtrField<PidsStats> {
|
||||
let current = pid_controller.get_pid_current().unwrap_or(0);
|
||||
let max = pid_controller.get_pid_max();
|
||||
|
||||
let limit = if max.is_err() {
|
||||
0
|
||||
} else {
|
||||
match max.unwrap() {
|
||||
let limit = match max {
|
||||
Err(_) => 0,
|
||||
Ok(max) => match max {
|
||||
MaxValue::Value(v) => v,
|
||||
MaxValue::Max => 0,
|
||||
}
|
||||
},
|
||||
} as u64;
|
||||
|
||||
SingularPtrField::some(PidsStats {
|
||||
@@ -793,9 +771,9 @@ https://github.com/opencontainers/runc/blob/a5847db387ae28c0ca4ebe4beee1a76900c8
|
||||
Total 0
|
||||
*/
|
||||
|
||||
fn get_blkio_stat_blkiodata(blkiodata: &Vec<BlkIoData>) -> RepeatedField<BlkioStatsEntry> {
|
||||
fn get_blkio_stat_blkiodata(blkiodata: &[BlkIoData]) -> RepeatedField<BlkioStatsEntry> {
|
||||
let mut m = RepeatedField::new();
|
||||
if blkiodata.len() == 0 {
|
||||
if blkiodata.is_empty() {
|
||||
return m;
|
||||
}
|
||||
|
||||
@@ -815,10 +793,10 @@ fn get_blkio_stat_blkiodata(blkiodata: &Vec<BlkIoData>) -> RepeatedField<BlkioSt
|
||||
m
|
||||
}
|
||||
|
||||
fn get_blkio_stat_ioservice(services: &Vec<IoService>) -> RepeatedField<BlkioStatsEntry> {
|
||||
fn get_blkio_stat_ioservice(services: &[IoService]) -> RepeatedField<BlkioStatsEntry> {
|
||||
let mut m = RepeatedField::new();
|
||||
|
||||
if services.len() == 0 {
|
||||
if services.is_empty() {
|
||||
return m;
|
||||
}
|
||||
|
||||
@@ -839,7 +817,7 @@ fn build_blkio_stats_entry(major: i16, minor: i16, op: &str, value: u64) -> Blki
|
||||
major: major as u64,
|
||||
minor: minor as u64,
|
||||
op: op.to_string(),
|
||||
value: value,
|
||||
value,
|
||||
unknown_fields: UnknownFields::default(),
|
||||
cached_size: CachedSize::default(),
|
||||
}
|
||||
@@ -880,7 +858,7 @@ fn get_blkio_stats(cg: &cgroups::Cgroup) -> SingularPtrField<BlkioStats> {
|
||||
let mut m = BlkioStats::new();
|
||||
let io_serviced_recursive = blkio.io_serviced_recursive;
|
||||
|
||||
if io_serviced_recursive.len() == 0 {
|
||||
if io_serviced_recursive.is_empty() {
|
||||
// fall back to generic stats
|
||||
// blkio.throttle.io_service_bytes,
|
||||
// maybe io_service_bytes_recursive?
|
||||
@@ -935,8 +913,8 @@ fn get_hugetlb_stats(cg: &cgroups::Cgroup) -> HashMap<String, HugetlbStats> {
|
||||
h
|
||||
}
|
||||
|
||||
pub const PATHS: &'static str = "/proc/self/cgroup";
|
||||
pub const MOUNTS: &'static str = "/proc/self/mountinfo";
|
||||
pub const PATHS: &str = "/proc/self/cgroup";
|
||||
pub const MOUNTS: &str = "/proc/self/mountinfo";
|
||||
|
||||
pub fn get_paths() -> Result<HashMap<String, String>> {
|
||||
let mut m = HashMap::new();
|
||||
@@ -1022,11 +1000,11 @@ impl Manager {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn update_cpuset_path(&self, cpuset_cpus: &str) -> Result<()> {
|
||||
if cpuset_cpus == "" {
|
||||
pub fn update_cpuset_path(&self, guest_cpuset: &str, container_cpuset: &str) -> Result<()> {
|
||||
if guest_cpuset.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
info!(sl!(), "update_cpuset_path to: {}", cpuset_cpus);
|
||||
info!(sl!(), "update_cpuset_path to: {}", guest_cpuset);
|
||||
|
||||
let h = cgroups::hierarchies::auto();
|
||||
let h = Box::new(&*h);
|
||||
@@ -1040,8 +1018,8 @@ impl Manager {
|
||||
let h = cgroups::hierarchies::auto();
|
||||
let h = Box::new(&*h);
|
||||
let cg = load_or_create(h, &self.cpath);
|
||||
let cpuset_controller: &CpuSetController = cg.controller_of().unwrap();
|
||||
let path = cpuset_controller.path();
|
||||
let container_cpuset_controller: &CpuSetController = cg.controller_of().unwrap();
|
||||
let path = container_cpuset_controller.path();
|
||||
let container_path = Path::new(path);
|
||||
info!(sl!(), "container cpuset path: {:?}", &path);
|
||||
|
||||
@@ -1050,18 +1028,16 @@ impl Manager {
|
||||
if ancestor == root_path {
|
||||
break;
|
||||
}
|
||||
if ancestor != container_path {
|
||||
paths.push(ancestor);
|
||||
}
|
||||
paths.push(ancestor);
|
||||
}
|
||||
info!(sl!(), "paths to update cpuset: {:?}", &paths);
|
||||
info!(sl!(), "parent paths to update cpuset: {:?}", &paths);
|
||||
|
||||
let mut i = paths.len();
|
||||
loop {
|
||||
if i == 0 {
|
||||
break;
|
||||
}
|
||||
i = i - 1;
|
||||
i -= 1;
|
||||
let h = cgroups::hierarchies::auto();
|
||||
let h = Box::new(&*h);
|
||||
|
||||
@@ -1070,10 +1046,20 @@ impl Manager {
|
||||
.to_str()
|
||||
.unwrap()
|
||||
.trim_start_matches(root_path.to_str().unwrap());
|
||||
info!(sl!(), "updating cpuset for path {:?}", &r_path);
|
||||
info!(sl!(), "updating cpuset for parent path {:?}", &r_path);
|
||||
let cg = load_or_create(h, &r_path);
|
||||
let cpuset_controller: &CpuSetController = cg.controller_of().unwrap();
|
||||
cpuset_controller.set_cpus(cpuset_cpus)?;
|
||||
cpuset_controller.set_cpus(guest_cpuset)?;
|
||||
}
|
||||
|
||||
if !container_cpuset.is_empty() {
|
||||
info!(
|
||||
sl!(),
|
||||
"updating cpuset for container path: {:?} cpuset: {}",
|
||||
&container_path,
|
||||
container_cpuset
|
||||
);
|
||||
container_cpuset_controller.set_cpus(container_cpuset)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -1149,20 +1135,6 @@ fn convert_memory_swap_to_v2_value(memory_swap: i64, memory: i64) -> Result<i64>
|
||||
Ok(memory_swap - memory)
|
||||
}
|
||||
|
||||
// Since the OCI spec is designed for cgroup v1, in some cases
|
||||
// there is need to convert from the cgroup v1 configuration to cgroup v2
|
||||
// the formula for BlkIOWeight is y = (1 + (x - 10) * 9999 / 990)
|
||||
// convert linearly from [10-1000] to [1-10000]
|
||||
// https://github.com/opencontainers/runc/blob/a5847db387ae28c0ca4ebe4beee1a76900c86414/libcontainer/cgroups/utils.go#L382
|
||||
fn convert_blk_io_to_v2_value(blk_io_weight: Option<u16>) -> Option<u16> {
|
||||
let v = blk_io_weight.unwrap_or(0);
|
||||
if v != 0 {
|
||||
return None;
|
||||
}
|
||||
|
||||
Some(1 + (v - 10) * 9999 / 990 as u16)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
74
src/agent/rustjail/src/cgroups/mock.rs
Normal file
74
src/agent/rustjail/src/cgroups/mock.rs
Normal file
@@ -0,0 +1,74 @@
|
||||
// Copyright (c) 2020 Intel Corporation
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use protobuf::{CachedSize, SingularPtrField, UnknownFields};
|
||||
|
||||
use crate::cgroups::Manager as CgroupManager;
|
||||
use crate::protocols::agent::{BlkioStats, CgroupStats, CpuStats, MemoryStats, PidsStats};
|
||||
use anyhow::Result;
|
||||
use cgroups::freezer::FreezerState;
|
||||
use libc::{self, pid_t};
|
||||
use oci::LinuxResources;
|
||||
use std::collections::HashMap;
|
||||
use std::string::String;
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub struct Manager {
|
||||
pub paths: HashMap<String, String>,
|
||||
pub mounts: HashMap<String, String>,
|
||||
pub cpath: String,
|
||||
}
|
||||
|
||||
impl CgroupManager for Manager {
|
||||
fn apply(&self, _: pid_t) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn set(&self, _: &LinuxResources, _: bool) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_stats(&self) -> Result<CgroupStats> {
|
||||
Ok(CgroupStats {
|
||||
cpu_stats: SingularPtrField::some(CpuStats::default()),
|
||||
memory_stats: SingularPtrField::some(MemoryStats::new()),
|
||||
pids_stats: SingularPtrField::some(PidsStats::new()),
|
||||
blkio_stats: SingularPtrField::some(BlkioStats::new()),
|
||||
hugetlb_stats: HashMap::new(),
|
||||
unknown_fields: UnknownFields::default(),
|
||||
cached_size: CachedSize::default(),
|
||||
})
|
||||
}
|
||||
|
||||
fn freeze(&self, _: FreezerState) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn destroy(&mut self) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_pids(&self) -> Result<Vec<pid_t>> {
|
||||
Ok(Vec::new())
|
||||
}
|
||||
}
|
||||
|
||||
impl Manager {
|
||||
pub fn new(cpath: &str) -> Result<Self> {
|
||||
Ok(Self {
|
||||
paths: HashMap::new(),
|
||||
mounts: HashMap::new(),
|
||||
cpath: cpath.to_string(),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn update_cpuset_path(&self, _: &str, _: &str) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn get_cg_path(&self, _: &str) -> Option<String> {
|
||||
Some("".to_string())
|
||||
}
|
||||
}
|
||||
@@ -41,7 +41,7 @@ fn get_value_from_cgroup(path: &PathBuf, key: &str) -> Result<i64> {
|
||||
);
|
||||
|
||||
for line in content.lines() {
|
||||
let arr: Vec<&str> = line.split(" ").collect();
|
||||
let arr: Vec<&str> = line.split(' ').collect();
|
||||
if arr.len() == 2 && arr[0] == key {
|
||||
let r = arr[1].parse::<i64>()?;
|
||||
return Ok(r);
|
||||
@@ -130,7 +130,7 @@ fn register_memory_event_v2(
|
||||
// notify_on_oom returns channel on which you can expect event about OOM,
|
||||
// if process died without OOM this channel will be closed.
|
||||
fn notify_on_oom(cid: &str, dir: String) -> Result<Receiver<String>> {
|
||||
if dir == "" {
|
||||
if dir.is_empty() {
|
||||
return Err(anyhow!("memory controller missing"));
|
||||
}
|
||||
|
||||
@@ -139,7 +139,7 @@ fn notify_on_oom(cid: &str, dir: String) -> Result<Receiver<String>> {
|
||||
|
||||
// level is one of "low", "medium", or "critical"
|
||||
fn notify_memory_pressure(cid: &str, dir: String, level: &str) -> Result<Receiver<String>> {
|
||||
if dir == "" {
|
||||
if dir.is_empty() {
|
||||
return Err(anyhow!("memory controller missing"));
|
||||
}
|
||||
|
||||
@@ -163,7 +163,7 @@ fn register_memory_event(
|
||||
|
||||
let event_control_path = Path::new(&cg_dir).join("cgroup.event_control");
|
||||
let data;
|
||||
if arg == "" {
|
||||
if arg.is_empty() {
|
||||
data = format!("{} {}", eventfd, event_file.as_raw_fd());
|
||||
} else {
|
||||
data = format!("{} {} {}", eventfd, event_file.as_raw_fd(), arg);
|
||||
|
||||
@@ -4,15 +4,11 @@
|
||||
//
|
||||
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use dirs;
|
||||
use lazy_static;
|
||||
use libc::pid_t;
|
||||
use oci::{ContainerState, LinuxDevice, LinuxIDMapping};
|
||||
use oci::{Hook, Linux, LinuxNamespace, LinuxResources, POSIXRlimit, Spec};
|
||||
use oci::{LinuxDevice, LinuxIDMapping};
|
||||
use serde_json;
|
||||
use std::clone::Clone;
|
||||
use std::ffi::{CStr, CString};
|
||||
use std::fmt;
|
||||
use std::fmt::Display;
|
||||
use std::fs;
|
||||
use std::os::unix::io::RawFd;
|
||||
@@ -43,7 +39,6 @@ use nix::sys::signal::{self, Signal};
|
||||
use nix::sys::stat::{self, Mode};
|
||||
use nix::unistd::{self, ForkResult, Gid, Pid, Uid};
|
||||
|
||||
use libc;
|
||||
use protobuf::SingularPtrField;
|
||||
|
||||
use oci::State as OCIState;
|
||||
@@ -54,9 +49,9 @@ use std::os::unix::io::FromRawFd;
|
||||
|
||||
use slog::{info, o, Logger};
|
||||
|
||||
const STATE_FILENAME: &'static str = "state.json";
|
||||
const EXEC_FIFO_FILENAME: &'static str = "exec.fifo";
|
||||
const VER_MARKER: &'static str = "1.2.5";
|
||||
const STATE_FILENAME: &str = "state.json";
|
||||
const EXEC_FIFO_FILENAME: &str = "exec.fifo";
|
||||
const VER_MARKER: &str = "1.2.5";
|
||||
const PID_NS_PATH: &str = "/proc/self/ns/pid";
|
||||
|
||||
const INIT: &str = "INIT";
|
||||
@@ -67,37 +62,29 @@ const CLOG_FD: &str = "CLOG_FD";
|
||||
const FIFO_FD: &str = "FIFO_FD";
|
||||
const HOME_ENV_KEY: &str = "HOME";
|
||||
|
||||
#[derive(PartialEq, Clone, Copy)]
|
||||
pub enum Status {
|
||||
CREATED,
|
||||
RUNNING,
|
||||
STOPPED,
|
||||
PAUSED,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct ContainerStatus {
|
||||
pre_status: Status,
|
||||
cur_status: Status,
|
||||
pre_status: ContainerState,
|
||||
cur_status: ContainerState,
|
||||
}
|
||||
|
||||
impl ContainerStatus {
|
||||
fn new() -> Self {
|
||||
ContainerStatus {
|
||||
pre_status: Status::CREATED,
|
||||
cur_status: Status::CREATED,
|
||||
pre_status: ContainerState::CREATED,
|
||||
cur_status: ContainerState::CREATED,
|
||||
}
|
||||
}
|
||||
|
||||
fn status(&self) -> Status {
|
||||
fn status(&self) -> ContainerState {
|
||||
self.cur_status
|
||||
}
|
||||
|
||||
fn pre_status(&self) -> Status {
|
||||
fn pre_status(&self) -> ContainerState {
|
||||
self.pre_status
|
||||
}
|
||||
|
||||
fn transition(&mut self, to: Status) {
|
||||
fn transition(&mut self, to: ContainerState) {
|
||||
self.pre_status = self.status();
|
||||
self.cur_status = to;
|
||||
}
|
||||
@@ -106,17 +93,6 @@ impl ContainerStatus {
|
||||
pub type Config = CreateOpts;
|
||||
type NamespaceType = String;
|
||||
|
||||
impl fmt::Debug for Status {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
match self {
|
||||
Status::CREATED => write!(f, "{:?}", "created"),
|
||||
Status::RUNNING => write!(f, "{:?}", "running"),
|
||||
Status::STOPPED => write!(f, "{:?}", "stopped"),
|
||||
Status::PAUSED => write!(f, "{:?}", "paused"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
static ref NAMESPACES: HashMap<&'static str, CloneFlags> = {
|
||||
let mut m = HashMap::new();
|
||||
@@ -215,7 +191,7 @@ pub struct BaseState {
|
||||
|
||||
pub trait BaseContainer {
|
||||
fn id(&self) -> String;
|
||||
fn status(&self) -> Status;
|
||||
fn status(&self) -> ContainerState;
|
||||
fn state(&self) -> Result<State>;
|
||||
fn oci_state(&self) -> Result<OCIState>;
|
||||
fn config(&self) -> Result<&Config>;
|
||||
@@ -279,7 +255,7 @@ pub trait Container: BaseContainer {
|
||||
impl Container for LinuxContainer {
|
||||
fn pause(&mut self) -> Result<()> {
|
||||
let status = self.status();
|
||||
if status != Status::RUNNING && status != Status::CREATED {
|
||||
if status != ContainerState::RUNNING && status != ContainerState::CREATED {
|
||||
return Err(anyhow!(
|
||||
"failed to pause container: current status is: {:?}",
|
||||
status
|
||||
@@ -292,7 +268,7 @@ impl Container for LinuxContainer {
|
||||
.unwrap()
|
||||
.freeze(FreezerState::Frozen)?;
|
||||
|
||||
self.status.transition(Status::PAUSED);
|
||||
self.status.transition(ContainerState::PAUSED);
|
||||
return Ok(());
|
||||
}
|
||||
Err(anyhow!("failed to get container's cgroup manager"))
|
||||
@@ -300,7 +276,7 @@ impl Container for LinuxContainer {
|
||||
|
||||
fn resume(&mut self) -> Result<()> {
|
||||
let status = self.status();
|
||||
if status != Status::PAUSED {
|
||||
if status != ContainerState::PAUSED {
|
||||
return Err(anyhow!("container status is: {:?}, not paused", status));
|
||||
}
|
||||
|
||||
@@ -310,7 +286,7 @@ impl Container for LinuxContainer {
|
||||
.unwrap()
|
||||
.freeze(FreezerState::Thawed)?;
|
||||
|
||||
self.status.transition(Status::RUNNING);
|
||||
self.status.transition(ContainerState::RUNNING);
|
||||
return Ok(());
|
||||
}
|
||||
Err(anyhow!("failed to get container's cgroup manager"))
|
||||
@@ -551,7 +527,7 @@ fn do_init_child(cwfd: RawFd) -> Result<()> {
|
||||
|
||||
setid(uid, gid)?;
|
||||
|
||||
if guser.additional_gids.len() > 0 {
|
||||
if !guser.additional_gids.is_empty() {
|
||||
setgroups(guser.additional_gids.as_slice()).map_err(|e| {
|
||||
let _ = write_sync(
|
||||
cwfd,
|
||||
@@ -595,7 +571,7 @@ fn do_init_child(cwfd: RawFd) -> Result<()> {
|
||||
|
||||
// setup the envs
|
||||
for e in env.iter() {
|
||||
let v: Vec<&str> = e.splitn(2, "=").collect();
|
||||
let v: Vec<&str> = e.splitn(2, '=').collect();
|
||||
if v.len() != 2 {
|
||||
continue;
|
||||
}
|
||||
@@ -648,7 +624,7 @@ impl BaseContainer for LinuxContainer {
|
||||
self.id.clone()
|
||||
}
|
||||
|
||||
fn status(&self) -> Status {
|
||||
fn status(&self) -> ContainerState {
|
||||
self.status.status()
|
||||
}
|
||||
|
||||
@@ -659,7 +635,7 @@ impl BaseContainer for LinuxContainer {
|
||||
fn oci_state(&self) -> Result<OCIState> {
|
||||
let oci = self.config.spec.as_ref().unwrap();
|
||||
let status = self.status();
|
||||
let pid = if status != Status::STOPPED {
|
||||
let pid = if status != ContainerState::STOPPED {
|
||||
self.init_process_pid
|
||||
} else {
|
||||
0
|
||||
@@ -671,7 +647,7 @@ impl BaseContainer for LinuxContainer {
|
||||
Ok(OCIState {
|
||||
version: oci.version.clone(),
|
||||
id: self.id(),
|
||||
status: format!("{:?}", status),
|
||||
status,
|
||||
pid,
|
||||
bundle,
|
||||
annotations: oci.annotations.clone(),
|
||||
@@ -731,7 +707,7 @@ impl BaseContainer for LinuxContainer {
|
||||
info!(logger, "enter container.start!");
|
||||
let mut fifofd: RawFd = -1;
|
||||
if p.init {
|
||||
if let Ok(_) = stat::stat(fifo_file.as_str()) {
|
||||
if stat::stat(fifo_file.as_str()).is_ok() {
|
||||
return Err(anyhow!("exec fifo exists"));
|
||||
}
|
||||
unistd::mkfifo(fifo_file.as_str(), Mode::from_bits(0o622).unwrap())?;
|
||||
@@ -754,8 +730,6 @@ impl BaseContainer for LinuxContainer {
|
||||
}
|
||||
let linux = spec.linux.as_ref().unwrap();
|
||||
|
||||
let st = self.oci_state()?;
|
||||
|
||||
let (pfd_log, cfd_log) = unistd::pipe().context("failed to create pipe")?;
|
||||
|
||||
let _ = fcntl::fcntl(pfd_log, FcntlArg::F_SETFD(FdFlag::FD_CLOEXEC))
|
||||
@@ -885,6 +859,8 @@ impl BaseContainer for LinuxContainer {
|
||||
|
||||
info!(logger, "child pid: {}", p.pid);
|
||||
|
||||
let st = self.oci_state()?;
|
||||
|
||||
join_namespaces(
|
||||
&logger,
|
||||
&spec,
|
||||
@@ -931,7 +907,7 @@ impl BaseContainer for LinuxContainer {
|
||||
.join()
|
||||
.map_err(|e| warn!(logger, "joining log handler {:?}", e));
|
||||
info!(logger, "create process completed");
|
||||
return Ok(());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn run(&mut self, p: Process) -> Result<()> {
|
||||
@@ -940,7 +916,7 @@ impl BaseContainer for LinuxContainer {
|
||||
|
||||
if init {
|
||||
self.exec()?;
|
||||
self.status.transition(Status::RUNNING);
|
||||
self.status.transition(ContainerState::RUNNING);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -962,12 +938,16 @@ impl BaseContainer for LinuxContainer {
|
||||
}
|
||||
}
|
||||
|
||||
self.status.transition(Status::STOPPED);
|
||||
self.status.transition(ContainerState::STOPPED);
|
||||
nix::mount::umount2(
|
||||
spec.root.as_ref().unwrap().path.as_str(),
|
||||
MntFlags::MNT_DETACH,
|
||||
)?;
|
||||
fs::remove_dir_all(&self.root)?;
|
||||
|
||||
if let Some(cgm) = self.cgroup_manager.as_mut() {
|
||||
cgm.destroy().context("destroy cgroups")?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -994,7 +974,7 @@ impl BaseContainer for LinuxContainer {
|
||||
.unwrap()
|
||||
.as_secs();
|
||||
|
||||
self.status.transition(Status::RUNNING);
|
||||
self.status.transition(ContainerState::RUNNING);
|
||||
unistd::close(fd)?;
|
||||
|
||||
Ok(())
|
||||
@@ -1056,7 +1036,7 @@ fn update_namespaces(logger: &Logger, spec: &mut Spec, init_pid: RawFd) -> Resul
|
||||
TYPETONAME.get(namespace.r#type.as_str()).unwrap()
|
||||
);
|
||||
|
||||
if namespace.path == "" {
|
||||
if namespace.path.is_empty() {
|
||||
namespace.path = ns_path;
|
||||
}
|
||||
}
|
||||
@@ -1068,7 +1048,7 @@ fn update_namespaces(logger: &Logger, spec: &mut Spec, init_pid: RawFd) -> Resul
|
||||
fn get_pid_namespace(logger: &Logger, linux: &Linux) -> Result<Option<RawFd>> {
|
||||
for ns in &linux.namespaces {
|
||||
if ns.r#type == "pid" {
|
||||
if ns.path == "" {
|
||||
if ns.path.is_empty() {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
@@ -1096,7 +1076,7 @@ fn is_userns_enabled(linux: &Linux) -> bool {
|
||||
linux
|
||||
.namespaces
|
||||
.iter()
|
||||
.any(|ns| ns.r#type == "user" && ns.path == "")
|
||||
.any(|ns| ns.r#type == "user" && ns.path.is_empty())
|
||||
}
|
||||
|
||||
fn get_namespaces(linux: &Linux) -> Vec<LinuxNamespace> {
|
||||
@@ -1164,11 +1144,9 @@ fn join_namespaces(
|
||||
}
|
||||
|
||||
// apply cgroups
|
||||
if p.init {
|
||||
if res.is_some() {
|
||||
info!(logger, "apply cgroups!");
|
||||
cm.set(res.unwrap(), false)?;
|
||||
}
|
||||
if p.init && res.is_some() {
|
||||
info!(logger, "apply cgroups!");
|
||||
cm.set(res.unwrap(), false)?;
|
||||
}
|
||||
|
||||
if res.is_some() {
|
||||
@@ -1464,7 +1442,7 @@ fn execute_hook(logger: &Logger, h: &Hook, st: &OCIState) -> Result<()> {
|
||||
}
|
||||
}
|
||||
|
||||
return Ok(());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
ForkResult::Child => {
|
||||
@@ -1567,13 +1545,11 @@ fn execute_hook(logger: &Logger, h: &Hook, st: &OCIState) -> Result<()> {
|
||||
error
|
||||
}
|
||||
}
|
||||
} else if let Ok(s) = rx.recv() {
|
||||
s
|
||||
} else {
|
||||
if let Ok(s) = rx.recv() {
|
||||
s
|
||||
} else {
|
||||
let _ = signal::kill(Pid::from_raw(pid), Some(Signal::SIGKILL));
|
||||
-libc::EPIPE
|
||||
}
|
||||
let _ = signal::kill(Pid::from_raw(pid), Some(Signal::SIGKILL));
|
||||
-libc::EPIPE
|
||||
}
|
||||
};
|
||||
|
||||
@@ -1591,15 +1567,22 @@ fn execute_hook(logger: &Logger, h: &Hook, st: &OCIState) -> Result<()> {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use tempfile::tempdir;
|
||||
|
||||
macro_rules! sl {
|
||||
() => {
|
||||
slog_scope::logger()
|
||||
};
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_status_transtition() {
|
||||
let mut status = ContainerStatus::new();
|
||||
let status_table: [Status; 4] = [
|
||||
Status::CREATED,
|
||||
Status::RUNNING,
|
||||
Status::PAUSED,
|
||||
Status::STOPPED,
|
||||
let status_table: [ContainerState; 4] = [
|
||||
ContainerState::CREATED,
|
||||
ContainerState::RUNNING,
|
||||
ContainerState::PAUSED,
|
||||
ContainerState::STOPPED,
|
||||
];
|
||||
|
||||
for s in status_table.iter() {
|
||||
@@ -1609,4 +1592,96 @@ mod tests {
|
||||
assert_eq!(pre_status, status.pre_status());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_namespaces() {
|
||||
lazy_static::initialize(&NAMESPACES);
|
||||
assert_eq!(NAMESPACES.len(), 7);
|
||||
|
||||
let ns = NAMESPACES.get("user");
|
||||
assert!(ns.is_some());
|
||||
|
||||
let ns = NAMESPACES.get("ipc");
|
||||
assert!(ns.is_some());
|
||||
|
||||
let ns = NAMESPACES.get("pid");
|
||||
assert!(ns.is_some());
|
||||
|
||||
let ns = NAMESPACES.get("network");
|
||||
assert!(ns.is_some());
|
||||
|
||||
let ns = NAMESPACES.get("mount");
|
||||
assert!(ns.is_some());
|
||||
|
||||
let ns = NAMESPACES.get("uts");
|
||||
assert!(ns.is_some());
|
||||
|
||||
let ns = NAMESPACES.get("cgroup");
|
||||
assert!(ns.is_some());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_typetoname() {
|
||||
lazy_static::initialize(&TYPETONAME);
|
||||
assert_eq!(TYPETONAME.len(), 7);
|
||||
|
||||
let ns = TYPETONAME.get("user");
|
||||
assert!(ns.is_some());
|
||||
|
||||
let ns = TYPETONAME.get("ipc");
|
||||
assert!(ns.is_some());
|
||||
|
||||
let ns = TYPETONAME.get("pid");
|
||||
assert!(ns.is_some());
|
||||
|
||||
let ns = TYPETONAME.get("network");
|
||||
assert!(ns.is_some());
|
||||
|
||||
let ns = TYPETONAME.get("mount");
|
||||
assert!(ns.is_some());
|
||||
|
||||
let ns = TYPETONAME.get("uts");
|
||||
assert!(ns.is_some());
|
||||
|
||||
let ns = TYPETONAME.get("cgroup");
|
||||
assert!(ns.is_some());
|
||||
}
|
||||
|
||||
fn create_dummy_opts() -> CreateOpts {
|
||||
let mut root = oci::Root::default();
|
||||
root.path = "/tmp".to_string();
|
||||
|
||||
let linux = Linux::default();
|
||||
let mut spec = Spec::default();
|
||||
spec.root = Some(root).into();
|
||||
spec.linux = Some(linux).into();
|
||||
|
||||
CreateOpts {
|
||||
cgroup_name: "".to_string(),
|
||||
use_systemd_cgroup: false,
|
||||
no_pivot_root: false,
|
||||
no_new_keyring: false,
|
||||
spec: Some(spec),
|
||||
rootless_euid: false,
|
||||
rootless_cgroup: false,
|
||||
}
|
||||
}
|
||||
|
||||
fn new_linux_container() -> (Result<LinuxContainer>, tempfile::TempDir) {
|
||||
// Create a temporal directory
|
||||
let dir = tempdir()
|
||||
.map_err(|e| anyhow!(e).context("tempdir failed"))
|
||||
.unwrap();
|
||||
|
||||
// Create a new container
|
||||
(
|
||||
LinuxContainer::new(
|
||||
"some_id",
|
||||
&dir.path().join("rootfs").to_str().unwrap(),
|
||||
create_dummy_opts(),
|
||||
&slog_scope::logger(),
|
||||
),
|
||||
dir,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use anyhow::{anyhow, bail, Context, Error, Result};
|
||||
use anyhow::{anyhow, bail, Context, Result};
|
||||
use libc::uid_t;
|
||||
use nix::errno::Errno;
|
||||
use nix::fcntl::{self, OFlag};
|
||||
@@ -22,13 +22,11 @@ use std::os::unix::io::RawFd;
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use path_absolutize::*;
|
||||
use scan_fmt;
|
||||
use std::fs::File;
|
||||
use std::io::{BufRead, BufReader};
|
||||
|
||||
use crate::container::DEFAULT_DEVICES;
|
||||
use crate::sync::write_count;
|
||||
use lazy_static;
|
||||
use std::string::ToString;
|
||||
|
||||
use crate::log_child;
|
||||
@@ -50,7 +48,7 @@ pub struct Info {
|
||||
vfs_opts: String,
|
||||
}
|
||||
|
||||
const MOUNTINFOFORMAT: &'static str = "{d} {d} {d}:{d} {} {} {} {}";
|
||||
const MOUNTINFOFORMAT: &str = "{d} {d} {d}:{d} {} {} {} {}";
|
||||
const PROC_PATH: &str = "/proc";
|
||||
|
||||
// since libc didn't defined this const for musl, thus redefined it here.
|
||||
@@ -153,7 +151,7 @@ pub fn init_rootfs(
|
||||
let linux = &spec
|
||||
.linux
|
||||
.as_ref()
|
||||
.ok_or::<Error>(anyhow!("Could not get linux configuration from spec"))?;
|
||||
.ok_or_else(|| anyhow!("Could not get linux configuration from spec"))?;
|
||||
|
||||
let mut flags = MsFlags::MS_REC;
|
||||
match PROPAGATION.get(&linux.rootfs_propagation.as_str()) {
|
||||
@@ -164,14 +162,14 @@ pub fn init_rootfs(
|
||||
let root = spec
|
||||
.root
|
||||
.as_ref()
|
||||
.ok_or(anyhow!("Could not get rootfs path from spec"))
|
||||
.ok_or_else(|| anyhow!("Could not get rootfs path from spec"))
|
||||
.and_then(|r| {
|
||||
fs::canonicalize(r.path.as_str()).context("Could not canonicalize rootfs path")
|
||||
})?;
|
||||
|
||||
let rootfs = (*root)
|
||||
.to_str()
|
||||
.ok_or(anyhow!("Could not convert rootfs path to string"))?;
|
||||
.ok_or_else(|| anyhow!("Could not convert rootfs path to string"))?;
|
||||
|
||||
mount(None::<&str>, "/", None::<&str>, flags, None::<&str>)?;
|
||||
|
||||
@@ -188,7 +186,7 @@ pub fn init_rootfs(
|
||||
let mut bind_mount_dev = false;
|
||||
for m in &spec.mounts {
|
||||
let (mut flags, data) = parse_mount(&m);
|
||||
if !m.destination.starts_with("/") || m.destination.contains("..") {
|
||||
if !m.destination.starts_with('/') || m.destination.contains("..") {
|
||||
return Err(anyhow!(
|
||||
"the mount destination {} is invalid",
|
||||
m.destination
|
||||
@@ -232,7 +230,7 @@ pub fn init_rootfs(
|
||||
if m.r#type == "bind" {
|
||||
for o in &m.options {
|
||||
if let Some(fl) = PROPAGATION.get(o.as_str()) {
|
||||
let dest = format!("{}{}", &rootfs, &m.destination);
|
||||
let dest = secure_join(rootfs, &m.destination);
|
||||
mount(None::<&str>, dest.as_str(), None::<&str>, *fl, None::<&str>)?;
|
||||
}
|
||||
}
|
||||
@@ -282,9 +280,9 @@ fn check_proc_mount(m: &Mount) -> Result<()> {
|
||||
// only allow a mount on-top of proc if it's source is "proc"
|
||||
unsafe {
|
||||
let mut stats = MaybeUninit::<libc::statfs>::uninit();
|
||||
if let Ok(_) = m
|
||||
.source
|
||||
if m.source
|
||||
.with_nix_path(|path| libc::statfs(path.as_ptr(), stats.as_mut_ptr()))
|
||||
.is_ok()
|
||||
{
|
||||
if stats.assume_init().f_type == PROC_SUPER_MAGIC {
|
||||
return Ok(());
|
||||
@@ -307,7 +305,7 @@ fn check_proc_mount(m: &Mount) -> Result<()> {
|
||||
)));
|
||||
}
|
||||
|
||||
return Ok(());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn mount_cgroups_v2(cfd_log: RawFd, m: &Mount, rootfs: &str, flags: MsFlags) -> Result<()> {
|
||||
@@ -595,15 +593,14 @@ pub fn ms_move_root(rootfs: &str) -> Result<bool> {
|
||||
let abs_root_buf = root_path.absolutize()?;
|
||||
let abs_root = abs_root_buf
|
||||
.to_str()
|
||||
.ok_or::<Error>(anyhow!("failed to parse {} to absolute path", rootfs))?;
|
||||
.ok_or_else(|| anyhow!("failed to parse {} to absolute path", rootfs))?;
|
||||
|
||||
for info in mount_infos.iter() {
|
||||
let mount_point = Path::new(&info.mount_point);
|
||||
let abs_mount_buf = mount_point.absolutize()?;
|
||||
let abs_mount_point = abs_mount_buf.to_str().ok_or::<Error>(anyhow!(
|
||||
"failed to parse {} to absolute path",
|
||||
info.mount_point
|
||||
))?;
|
||||
let abs_mount_point = abs_mount_buf
|
||||
.to_str()
|
||||
.ok_or_else(|| anyhow!("failed to parse {} to absolute path", info.mount_point))?;
|
||||
let abs_mount_point_string = String::from(abs_mount_point);
|
||||
|
||||
// Umount every syfs and proc file systems, except those under the container rootfs
|
||||
@@ -675,6 +672,52 @@ fn parse_mount(m: &Mount) -> (MsFlags, String) {
|
||||
(flags, data.join(","))
|
||||
}
|
||||
|
||||
// This function constructs a canonicalized path by combining the `rootfs` and `unsafe_path` elements.
|
||||
// The resulting path is guaranteed to be ("below" / "in a directory under") the `rootfs` directory.
|
||||
//
|
||||
// Parameters:
|
||||
//
|
||||
// - `rootfs` is the absolute path to the root of the containers root filesystem directory.
|
||||
// - `unsafe_path` is path inside a container. It is unsafe since it may try to "escape" from the containers
|
||||
// rootfs by using one or more "../" path elements or is its a symlink to path.
|
||||
fn secure_join(rootfs: &str, unsafe_path: &str) -> String {
|
||||
let mut path = PathBuf::from(format!("{}/", rootfs));
|
||||
let unsafe_p = Path::new(&unsafe_path);
|
||||
|
||||
for it in unsafe_p.iter() {
|
||||
let it_p = Path::new(&it);
|
||||
|
||||
// if it_p leads with "/", path.push(it) will be replace as it, so ignore "/"
|
||||
if it_p.has_root() {
|
||||
continue;
|
||||
};
|
||||
|
||||
path.push(it);
|
||||
if let Ok(v) = path.read_link() {
|
||||
if v.is_absolute() {
|
||||
path = PathBuf::from(format!("{}{}", rootfs, v.to_str().unwrap().to_string()));
|
||||
} else {
|
||||
path.pop();
|
||||
for it in v.iter() {
|
||||
path.push(it);
|
||||
if path.exists() {
|
||||
path = path.canonicalize().unwrap();
|
||||
if !path.starts_with(rootfs) {
|
||||
path = PathBuf::from(rootfs.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// skip any ".."
|
||||
if path.ends_with("..") {
|
||||
path.pop();
|
||||
}
|
||||
}
|
||||
|
||||
path.to_str().unwrap().to_string()
|
||||
}
|
||||
|
||||
fn mount_from(
|
||||
cfd_log: RawFd,
|
||||
m: &Mount,
|
||||
@@ -684,7 +727,7 @@ fn mount_from(
|
||||
_label: &str,
|
||||
) -> Result<()> {
|
||||
let d = String::from(data);
|
||||
let dest = format!("{}{}", rootfs, &m.destination);
|
||||
let dest = secure_join(rootfs, &m.destination);
|
||||
|
||||
let src = if m.r#type.as_str() == "bind" {
|
||||
let src = fs::canonicalize(m.source.as_str())?;
|
||||
@@ -764,7 +807,7 @@ fn mount_from(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
static SYMLINKS: &'static [(&'static str, &'static str)] = &[
|
||||
static SYMLINKS: &[(&str, &str)] = &[
|
||||
("/proc/self/fd", "dev/fd"),
|
||||
("/proc/self/fd/0", "dev/stdin"),
|
||||
("/proc/self/fd/1", "dev/stdout"),
|
||||
@@ -897,7 +940,7 @@ pub fn finish_rootfs(cfd_log: RawFd, spec: &Spec) -> Result<()> {
|
||||
}
|
||||
|
||||
fn mask_path(path: &str) -> Result<()> {
|
||||
if !path.starts_with("/") || path.contains("..") {
|
||||
if !path.starts_with('/') || path.contains("..") {
|
||||
return Err(nix::Error::Sys(Errno::EINVAL).into());
|
||||
}
|
||||
|
||||
@@ -926,7 +969,7 @@ fn mask_path(path: &str) -> Result<()> {
|
||||
}
|
||||
|
||||
fn readonly_path(path: &str) -> Result<()> {
|
||||
if !path.starts_with("/") || path.contains("..") {
|
||||
if !path.starts_with('/') || path.contains("..") {
|
||||
return Err(nix::Error::Sys(Errno::EINVAL).into());
|
||||
}
|
||||
|
||||
@@ -968,6 +1011,10 @@ fn readonly_path(path: &str) -> Result<()> {
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::skip_if_not_root;
|
||||
use std::fs::create_dir;
|
||||
use std::fs::create_dir_all;
|
||||
use std::fs::remove_dir_all;
|
||||
use std::os::unix::fs;
|
||||
use std::os::unix::io::AsRawFd;
|
||||
use tempfile::tempdir;
|
||||
|
||||
@@ -997,7 +1044,7 @@ mod tests {
|
||||
);
|
||||
|
||||
let rootfs = tempdir().unwrap();
|
||||
let ret = fs::create_dir(rootfs.path().join("dev"));
|
||||
let ret = create_dir(rootfs.path().join("dev"));
|
||||
assert!(ret.is_ok(), "Got: {:?}", ret);
|
||||
|
||||
spec.root = Some(oci::Root {
|
||||
@@ -1008,8 +1055,8 @@ mod tests {
|
||||
// there is no spec.mounts, but should pass
|
||||
let ret = init_rootfs(stdout_fd, &spec, &cpath, &mounts, true);
|
||||
assert!(ret.is_ok(), "Should pass. Got: {:?}", ret);
|
||||
let _ = fs::remove_dir_all(rootfs.path().join("dev"));
|
||||
let _ = fs::create_dir(rootfs.path().join("dev"));
|
||||
let _ = remove_dir_all(rootfs.path().join("dev"));
|
||||
let _ = create_dir(rootfs.path().join("dev"));
|
||||
|
||||
// Adding bad mount point to spec.mounts
|
||||
spec.mounts.push(oci::Mount {
|
||||
@@ -1027,8 +1074,8 @@ mod tests {
|
||||
ret
|
||||
);
|
||||
spec.mounts.pop();
|
||||
let _ = fs::remove_dir_all(rootfs.path().join("dev"));
|
||||
let _ = fs::create_dir(rootfs.path().join("dev"));
|
||||
let _ = remove_dir_all(rootfs.path().join("dev"));
|
||||
let _ = create_dir(rootfs.path().join("dev"));
|
||||
|
||||
// mounting a cgroup
|
||||
spec.mounts.push(oci::Mount {
|
||||
@@ -1041,8 +1088,8 @@ mod tests {
|
||||
let ret = init_rootfs(stdout_fd, &spec, &cpath, &mounts, true);
|
||||
assert!(ret.is_ok(), "Should pass. Got: {:?}", ret);
|
||||
spec.mounts.pop();
|
||||
let _ = fs::remove_dir_all(rootfs.path().join("dev"));
|
||||
let _ = fs::create_dir(rootfs.path().join("dev"));
|
||||
let _ = remove_dir_all(rootfs.path().join("dev"));
|
||||
let _ = create_dir(rootfs.path().join("dev"));
|
||||
|
||||
// mounting /dev
|
||||
spec.mounts.push(oci::Mount {
|
||||
@@ -1079,11 +1126,11 @@ mod tests {
|
||||
cgroup_mounts.insert("cpu".to_string(), "cpu".to_string());
|
||||
cgroup_mounts.insert("memory".to_string(), "memory".to_string());
|
||||
|
||||
let ret = fs::create_dir_all(tempdir.path().join("cgroups"));
|
||||
let ret = create_dir_all(tempdir.path().join("cgroups"));
|
||||
assert!(ret.is_ok(), "Should pass. Got {:?}", ret);
|
||||
let ret = fs::create_dir_all(tempdir.path().join("cpu"));
|
||||
let ret = create_dir_all(tempdir.path().join("cpu"));
|
||||
assert!(ret.is_ok(), "Should pass. Got {:?}", ret);
|
||||
let ret = fs::create_dir_all(tempdir.path().join("memory"));
|
||||
let ret = create_dir_all(tempdir.path().join("memory"));
|
||||
assert!(ret.is_ok(), "Should pass. Got {:?}", ret);
|
||||
|
||||
let ret = mount_cgroups(
|
||||
@@ -1231,4 +1278,89 @@ mod tests {
|
||||
|
||||
assert!(check_proc_mount(&mount).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_secure_join() {
|
||||
#[derive(Debug)]
|
||||
struct TestData<'a> {
|
||||
name: &'a str,
|
||||
rootfs: &'a str,
|
||||
unsafe_path: &'a str,
|
||||
symlink_path: &'a str,
|
||||
result: &'a str,
|
||||
}
|
||||
|
||||
// create tempory directory to simulate container rootfs with symlink
|
||||
let rootfs_dir = tempdir().expect("failed to create tmpdir");
|
||||
let rootfs_path = rootfs_dir.path().to_str().unwrap();
|
||||
|
||||
let tests = &[
|
||||
TestData {
|
||||
name: "rootfs_not_exist",
|
||||
rootfs: "/home/rootfs",
|
||||
unsafe_path: "a/b/c",
|
||||
symlink_path: "",
|
||||
result: "/home/rootfs/a/b/c",
|
||||
},
|
||||
TestData {
|
||||
name: "relative_path",
|
||||
rootfs: "/home/rootfs",
|
||||
unsafe_path: "../../../a/b/c",
|
||||
symlink_path: "",
|
||||
result: "/home/rootfs/a/b/c",
|
||||
},
|
||||
TestData {
|
||||
name: "skip any ..",
|
||||
rootfs: "/home/rootfs",
|
||||
unsafe_path: "../../../a/../../b/../../c",
|
||||
symlink_path: "",
|
||||
result: "/home/rootfs/a/b/c",
|
||||
},
|
||||
TestData {
|
||||
name: "rootfs is null",
|
||||
rootfs: "",
|
||||
unsafe_path: "",
|
||||
symlink_path: "",
|
||||
result: "/",
|
||||
},
|
||||
TestData {
|
||||
name: "relative softlink beyond container rootfs",
|
||||
rootfs: rootfs_path,
|
||||
unsafe_path: "1",
|
||||
symlink_path: "../../../",
|
||||
result: rootfs_path,
|
||||
},
|
||||
TestData {
|
||||
name: "abs softlink points to the non-exist directory",
|
||||
rootfs: rootfs_path,
|
||||
unsafe_path: "2",
|
||||
symlink_path: "/dddd",
|
||||
result: &format!("{}/dddd", rootfs_path).as_str().to_owned(),
|
||||
},
|
||||
TestData {
|
||||
name: "abs softlink points to the root",
|
||||
rootfs: rootfs_path,
|
||||
unsafe_path: "3",
|
||||
symlink_path: "/",
|
||||
result: &format!("{}/", rootfs_path).as_str().to_owned(),
|
||||
},
|
||||
];
|
||||
|
||||
for (i, t) in tests.iter().enumerate() {
|
||||
// Create a string containing details of the test
|
||||
let msg = format!("test[{}]: {:?}", i, t);
|
||||
|
||||
// if is_symlink, then should be prepare the softlink environment
|
||||
if t.symlink_path != "" {
|
||||
fs::symlink(t.symlink_path, format!("{}/{}", t.rootfs, t.unsafe_path)).unwrap();
|
||||
}
|
||||
let result = secure_join(t.rootfs, t.unsafe_path);
|
||||
|
||||
// Update the test details string with the results of the call
|
||||
let msg = format!("{}, result: {:?}", msg, result);
|
||||
|
||||
// Perform the checks
|
||||
assert!(result == t.result, msg);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -96,14 +96,14 @@ pub fn read_sync(fd: RawFd) -> Result<Vec<u8>> {
|
||||
let buf_array: [u8; MSG_SIZE] = [buf[0], buf[1], buf[2], buf[3]];
|
||||
let msg: i32 = i32::from_be_bytes(buf_array);
|
||||
match msg {
|
||||
SYNC_SUCCESS => return Ok(Vec::new()),
|
||||
SYNC_SUCCESS => Ok(Vec::new()),
|
||||
SYNC_DATA => {
|
||||
let buf = read_count(fd, MSG_SIZE)?;
|
||||
let buf_array: [u8; MSG_SIZE] = [buf[0], buf[1], buf[2], buf[3]];
|
||||
let msg_length: i32 = i32::from_be_bytes(buf_array);
|
||||
let data_buf = read_count(fd, msg_length as usize)?;
|
||||
|
||||
return Ok(data_buf);
|
||||
Ok(data_buf)
|
||||
}
|
||||
SYNC_FAILED => {
|
||||
let mut error_buf = vec![];
|
||||
@@ -127,9 +127,9 @@ pub fn read_sync(fd: RawFd) -> Result<Vec<u8>> {
|
||||
}
|
||||
};
|
||||
|
||||
return Err(anyhow!(error_str));
|
||||
Err(anyhow!(error_str))
|
||||
}
|
||||
_ => return Err(anyhow!("error in receive sync message")),
|
||||
_ => Err(anyhow!("error in receive sync message")),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -5,13 +5,12 @@
|
||||
|
||||
use crate::container::Config;
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use lazy_static;
|
||||
use nix::errno::Errno;
|
||||
use oci::{LinuxIDMapping, LinuxNamespace, Spec};
|
||||
use std::collections::HashMap;
|
||||
use std::path::{Component, PathBuf};
|
||||
|
||||
fn contain_namespace(nses: &Vec<LinuxNamespace>, key: &str) -> bool {
|
||||
fn contain_namespace(nses: &[LinuxNamespace], key: &str) -> bool {
|
||||
for ns in nses {
|
||||
if ns.r#type.as_str() == key {
|
||||
return true;
|
||||
@@ -21,7 +20,7 @@ fn contain_namespace(nses: &Vec<LinuxNamespace>, key: &str) -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
fn get_namespace_path(nses: &Vec<LinuxNamespace>, key: &str) -> Result<String> {
|
||||
fn get_namespace_path(nses: &[LinuxNamespace], key: &str) -> Result<String> {
|
||||
for ns in nses {
|
||||
if ns.r#type.as_str() == key {
|
||||
return Ok(ns.path.clone());
|
||||
@@ -41,10 +40,8 @@ fn rootfs(root: &str) -> Result<()> {
|
||||
// symbolic link? ..?
|
||||
let mut stack: Vec<String> = Vec::new();
|
||||
for c in path.components() {
|
||||
if stack.is_empty() {
|
||||
if c == Component::RootDir || c == Component::ParentDir {
|
||||
continue;
|
||||
}
|
||||
if stack.is_empty() && (c == Component::RootDir || c == Component::ParentDir) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if c == Component::ParentDir {
|
||||
@@ -73,12 +70,8 @@ fn rootfs(root: &str) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn network(_oci: &Spec) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn hostname(oci: &Spec) -> Result<()> {
|
||||
if oci.hostname.is_empty() || oci.hostname == "".to_string() {
|
||||
if oci.hostname.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
@@ -111,7 +104,7 @@ fn security(oci: &Spec) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn idmapping(maps: &Vec<LinuxIDMapping>) -> Result<()> {
|
||||
fn idmapping(maps: &[LinuxIDMapping]) -> Result<()> {
|
||||
for map in maps {
|
||||
if map.size > 0 {
|
||||
return Ok(());
|
||||
@@ -137,7 +130,7 @@ fn usernamespace(oci: &Spec) -> Result<()> {
|
||||
idmapping(&linux.gid_mappings).context("idmapping gid")?;
|
||||
} else {
|
||||
// no user namespace but idmap
|
||||
if linux.uid_mappings.len() != 0 || linux.gid_mappings.len() != 0 {
|
||||
if !linux.uid_mappings.is_empty() || !linux.gid_mappings.is_empty() {
|
||||
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
|
||||
}
|
||||
}
|
||||
@@ -242,7 +235,7 @@ fn rootless_euid_mapping(oci: &Spec) -> Result<()> {
|
||||
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
|
||||
}
|
||||
|
||||
if linux.uid_mappings.len() == 0 || linux.gid_mappings.len() == 0 {
|
||||
if linux.uid_mappings.is_empty() || linux.gid_mappings.is_empty() {
|
||||
// rootless containers requires at least one UID/GID mapping
|
||||
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
|
||||
}
|
||||
@@ -250,7 +243,7 @@ fn rootless_euid_mapping(oci: &Spec) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn has_idmapping(maps: &Vec<LinuxIDMapping>, id: u32) -> bool {
|
||||
fn has_idmapping(maps: &[LinuxIDMapping], id: u32) -> bool {
|
||||
for map in maps {
|
||||
if id >= map.container_id && id < map.container_id + map.size {
|
||||
return true;
|
||||
@@ -279,16 +272,12 @@ fn rootless_euid_mount(oci: &Spec) -> Result<()> {
|
||||
.parse::<u32>()
|
||||
.context(format!("parse field {}", &fields[1]))?;
|
||||
|
||||
if opt.starts_with("uid=") {
|
||||
if !has_idmapping(&linux.uid_mappings, id) {
|
||||
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
|
||||
}
|
||||
if opt.starts_with("uid=") && !has_idmapping(&linux.uid_mappings, id) {
|
||||
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
|
||||
}
|
||||
|
||||
if opt.starts_with("gid=") {
|
||||
if !has_idmapping(&linux.gid_mappings, id) {
|
||||
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
|
||||
}
|
||||
if opt.starts_with("gid=") && !has_idmapping(&linux.gid_mappings, id) {
|
||||
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -319,7 +308,6 @@ pub fn validate(conf: &Config) -> Result<()> {
|
||||
};
|
||||
|
||||
rootfs(root).context("rootfs")?;
|
||||
network(oci).context("network")?;
|
||||
hostname(oci).context("hostname")?;
|
||||
security(oci).context("security")?;
|
||||
usernamespace(oci).context("usernamespace")?;
|
||||
|
||||
@@ -144,7 +144,7 @@ impl agentConfig {
|
||||
}
|
||||
|
||||
fn get_vsock_port(p: &str) -> Result<i32> {
|
||||
let fields: Vec<&str> = p.split("=").collect();
|
||||
let fields: Vec<&str> = p.split('=').collect();
|
||||
if fields.len() != 2 {
|
||||
return Err(anyhow!("invalid port parameter"));
|
||||
}
|
||||
@@ -180,7 +180,7 @@ fn logrus_to_slog_level(logrus_level: &str) -> Result<slog::Level> {
|
||||
}
|
||||
|
||||
fn get_log_level(param: &str) -> Result<slog::Level> {
|
||||
let fields: Vec<&str> = param.split("=").collect();
|
||||
let fields: Vec<&str> = param.split('=').collect();
|
||||
|
||||
if fields.len() != 2 {
|
||||
return Err(anyhow!("invalid log level parameter"));
|
||||
@@ -194,7 +194,7 @@ fn get_log_level(param: &str) -> Result<slog::Level> {
|
||||
}
|
||||
|
||||
fn get_hotplug_timeout(param: &str) -> Result<time::Duration> {
|
||||
let fields: Vec<&str> = param.split("=").collect();
|
||||
let fields: Vec<&str> = param.split('=').collect();
|
||||
|
||||
if fields.len() != 2 {
|
||||
return Err(anyhow!("invalid hotplug timeout parameter"));
|
||||
@@ -214,7 +214,7 @@ fn get_hotplug_timeout(param: &str) -> Result<time::Duration> {
|
||||
}
|
||||
|
||||
fn get_bool_value(param: &str) -> Result<bool> {
|
||||
let fields: Vec<&str> = param.split("=").collect();
|
||||
let fields: Vec<&str> = param.split('=').collect();
|
||||
|
||||
if fields.len() != 2 {
|
||||
return Ok(false);
|
||||
@@ -225,18 +225,14 @@ fn get_bool_value(param: &str) -> Result<bool> {
|
||||
// first try to parse as bool value
|
||||
v.parse::<bool>().or_else(|_err1| {
|
||||
// then try to parse as integer value
|
||||
v.parse::<u64>().or_else(|_err2| Ok(0)).and_then(|v| {
|
||||
// only `0` returns false, otherwise returns true
|
||||
Ok(match v {
|
||||
0 => false,
|
||||
_ => true,
|
||||
})
|
||||
})
|
||||
v.parse::<u64>()
|
||||
.or_else(|_err2| Ok(0))
|
||||
.map(|v| !matches!(v, 0))
|
||||
})
|
||||
}
|
||||
|
||||
fn get_container_pipe_size(param: &str) -> Result<i32> {
|
||||
let fields: Vec<&str> = param.split("=").collect();
|
||||
let fields: Vec<&str> = param.split('=').collect();
|
||||
|
||||
if fields.len() != 2 {
|
||||
return Err(anyhow!("invalid container pipe size parameter"));
|
||||
@@ -634,10 +630,10 @@ mod tests {
|
||||
let filename = file_path.to_str().expect("failed to create filename");
|
||||
|
||||
let mut file =
|
||||
File::create(filename).expect(&format!("{}: failed to create file", msg));
|
||||
File::create(filename).unwrap_or_else(|_| panic!("{}: failed to create file", msg));
|
||||
|
||||
file.write_all(d.contents.as_bytes())
|
||||
.expect(&format!("{}: failed to write file contents", msg));
|
||||
.unwrap_or_else(|_| panic!("{}: failed to write file contents", msg));
|
||||
|
||||
let mut config = agentConfig::new();
|
||||
assert_eq!(config.debug_console, false, "{}", msg);
|
||||
@@ -737,7 +733,7 @@ mod tests {
|
||||
|
||||
let msg = format!("{}: result: {:?}", msg, result);
|
||||
|
||||
assert_result!(d.result, result, format!("{}", msg));
|
||||
assert_result!(d.result, result, msg);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -831,7 +827,7 @@ mod tests {
|
||||
|
||||
let msg = format!("{}: result: {:?}", msg, result);
|
||||
|
||||
assert_result!(d.result, result, format!("{}", msg));
|
||||
assert_result!(d.result, result, msg);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -901,7 +897,7 @@ mod tests {
|
||||
|
||||
let msg = format!("{}: result: {:?}", msg, result);
|
||||
|
||||
assert_result!(d.result, result, format!("{}", msg));
|
||||
assert_result!(d.result, result, msg);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -975,7 +971,7 @@ mod tests {
|
||||
|
||||
let msg = format!("{}: result: {:?}", msg, result);
|
||||
|
||||
assert_result!(d.result, result, format!("{}", msg));
|
||||
assert_result!(d.result, result, msg);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -38,8 +38,8 @@ struct DevIndex(HashMap<String, DevIndexEntry>);
|
||||
// DeviceHandler is the type of callback to be defined to handle every type of device driver.
|
||||
type DeviceHandler = fn(&Device, &mut Spec, &Arc<Mutex<Sandbox>>, &DevIndex) -> Result<()>;
|
||||
|
||||
// DeviceHandlerList lists the supported drivers.
|
||||
#[cfg_attr(rustfmt, rustfmt_skip)]
|
||||
// DEVICEHANDLERLIST lists the supported drivers.
|
||||
#[rustfmt::skip]
|
||||
lazy_static! {
|
||||
static ref DEVICEHANDLERLIST: HashMap<&'static str, DeviceHandler> = {
|
||||
let mut m: HashMap<&'static str, DeviceHandler> = HashMap::new();
|
||||
@@ -65,7 +65,7 @@ pub fn online_device(path: &str) -> Result<()> {
|
||||
// Here, bridgeAddr is the address at which the bridge is attached on the root bus,
|
||||
// while deviceAddr is the address at which the device is attached on the bridge.
|
||||
fn get_pci_device_address(pci_id: &str) -> Result<String> {
|
||||
let tokens: Vec<&str> = pci_id.split("/").collect();
|
||||
let tokens: Vec<&str> = pci_id.split('/').collect();
|
||||
|
||||
if tokens.len() != 2 {
|
||||
return Err(anyhow!(
|
||||
@@ -165,7 +165,7 @@ pub fn get_pci_device_name(sandbox: &Arc<Mutex<Sandbox>>, pci_id: &str) -> Resul
|
||||
|
||||
/// Scan SCSI bus for the given SCSI address(SCSI-Id and LUN)
|
||||
fn scan_scsi_bus(scsi_addr: &str) -> Result<()> {
|
||||
let tokens: Vec<&str> = scsi_addr.split(":").collect();
|
||||
let tokens: Vec<&str> = scsi_addr.split(':').collect();
|
||||
if tokens.len() != 2 {
|
||||
return Err(anyhow!(
|
||||
"Unexpected format for SCSI Address: {}, expect SCSIID:LUA",
|
||||
@@ -204,7 +204,7 @@ fn update_spec_device_list(device: &Device, spec: &mut Spec, devidx: &DevIndex)
|
||||
|
||||
// If no container_path is provided, we won't be able to match and
|
||||
// update the device in the OCI spec device list. This is an error.
|
||||
if device.container_path == "" {
|
||||
if device.container_path.is_empty() {
|
||||
return Err(anyhow!(
|
||||
"container_path cannot empty for device {:?}",
|
||||
device
|
||||
@@ -280,7 +280,7 @@ fn virtiommio_blk_device_handler(
|
||||
_sandbox: &Arc<Mutex<Sandbox>>,
|
||||
devidx: &DevIndex,
|
||||
) -> Result<()> {
|
||||
if device.vm_path == "" {
|
||||
if device.vm_path.is_empty() {
|
||||
return Err(anyhow!("Invalid path for virtio mmio blk device"));
|
||||
}
|
||||
|
||||
@@ -300,7 +300,7 @@ fn virtio_blk_device_handler(
|
||||
|
||||
// When "Id (PCIAddr)" is not set, we allow to use the predicted "VmPath" passed from kata-runtime
|
||||
// Note this is a special code path for cloud-hypervisor when BDF information is not available
|
||||
if device.id != "" {
|
||||
if !device.id.is_empty() {
|
||||
dev.vm_path = get_pci_device_name(sandbox, &device.id)?;
|
||||
}
|
||||
|
||||
@@ -325,7 +325,7 @@ fn virtio_nvdimm_device_handler(
|
||||
_sandbox: &Arc<Mutex<Sandbox>>,
|
||||
devidx: &DevIndex,
|
||||
) -> Result<()> {
|
||||
if device.vm_path == "" {
|
||||
if device.vm_path.is_empty() {
|
||||
return Err(anyhow!("Invalid path for nvdimm device"));
|
||||
}
|
||||
|
||||
@@ -336,11 +336,11 @@ impl DevIndex {
|
||||
fn new(spec: &Spec) -> DevIndex {
|
||||
let mut map = HashMap::new();
|
||||
|
||||
for linux in spec.linux.as_ref() {
|
||||
if let Some(linux) = spec.linux.as_ref() {
|
||||
for (i, d) in linux.devices.iter().enumerate() {
|
||||
let mut residx = Vec::new();
|
||||
|
||||
for linuxres in linux.resources.as_ref() {
|
||||
if let Some(linuxres) = linux.resources.as_ref() {
|
||||
for (j, r) in linuxres.devices.iter().enumerate() {
|
||||
if r.r#type == d.r#type
|
||||
&& r.major == Some(d.major)
|
||||
@@ -381,15 +381,15 @@ fn add_device(
|
||||
info!(sl!(), "device-id: {}, device-type: {}, device-vm-path: {}, device-container-path: {}, device-options: {:?}",
|
||||
device.id, device.field_type, device.vm_path, device.container_path, device.options);
|
||||
|
||||
if device.field_type == "" {
|
||||
if device.field_type.is_empty() {
|
||||
return Err(anyhow!("invalid type for device {:?}", device));
|
||||
}
|
||||
|
||||
if device.id == "" && device.vm_path == "" {
|
||||
if device.id.is_empty() && device.vm_path.is_empty() {
|
||||
return Err(anyhow!("invalid ID and VM path for device {:?}", device));
|
||||
}
|
||||
|
||||
if device.container_path == "" {
|
||||
if device.container_path.is_empty() {
|
||||
return Err(anyhow!("invalid container path for device {:?}", device));
|
||||
}
|
||||
|
||||
@@ -436,9 +436,10 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_update_device_cgroup() {
|
||||
let mut spec = Spec::default();
|
||||
|
||||
spec.linux = Some(Linux::default());
|
||||
let mut spec = Spec {
|
||||
linux: Some(Linux::default()),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
update_device_cgroup(&mut spec).unwrap();
|
||||
|
||||
|
||||
@@ -246,8 +246,8 @@ fn start_sandbox(logger: &Logger, config: &agentConfig, init_mode: bool) -> Resu
|
||||
let (tx, rx) = mpsc::channel::<i32>();
|
||||
sandbox.lock().unwrap().sender = Some(tx);
|
||||
|
||||
//vsock:///dev/vsock, port
|
||||
let mut server = rpc::start(sandbox.clone(), config.server_addr.as_str());
|
||||
// vsock:///dev/vsock, port
|
||||
let mut server = rpc::start(sandbox, config.server_addr.as_str());
|
||||
|
||||
let _ = server.start().unwrap();
|
||||
|
||||
@@ -272,8 +272,6 @@ fn setup_signal_handler(logger: &Logger, sandbox: Arc<Mutex<Sandbox>>) -> Result
|
||||
|
||||
let signals = Signals::new(&[SIGCHLD])?;
|
||||
|
||||
let s = sandbox.clone();
|
||||
|
||||
thread::spawn(move || {
|
||||
'outer: for sig in signals.forever() {
|
||||
info!(logger, "received signal"; "signal" => sig);
|
||||
@@ -303,13 +301,13 @@ fn setup_signal_handler(logger: &Logger, sandbox: Arc<Mutex<Sandbox>>) -> Result
|
||||
};
|
||||
|
||||
let pid = wait_status.pid();
|
||||
if pid.is_some() {
|
||||
let raw_pid = pid.unwrap().as_raw();
|
||||
if let Some(pid) = pid {
|
||||
let raw_pid = pid.as_raw();
|
||||
let child_pid = format!("{}", raw_pid);
|
||||
|
||||
let logger = logger.new(o!("child-pid" => child_pid));
|
||||
|
||||
let mut sandbox = s.lock().unwrap();
|
||||
let mut sandbox = sandbox.lock().unwrap();
|
||||
let process = sandbox.find_process(raw_pid);
|
||||
if process.is_none() {
|
||||
info!(logger, "child exited unexpectedly");
|
||||
@@ -366,7 +364,8 @@ fn init_agent_as_init(logger: &Logger, unified_cgroup_hierarchy: bool) -> Result
|
||||
|
||||
env::set_var("PATH", "/bin:/sbin/:/usr/bin/:/usr/sbin/");
|
||||
|
||||
let contents = std::fs::read_to_string("/etc/hostname").unwrap_or(String::from("localhost"));
|
||||
let contents =
|
||||
std::fs::read_to_string("/etc/hostname").unwrap_or_else(|_| String::from("localhost"));
|
||||
let contents_array: Vec<&str> = contents.split(' ').collect();
|
||||
let hostname = contents_array[0].trim();
|
||||
|
||||
@@ -481,8 +480,8 @@ where
|
||||
|
||||
// write and return
|
||||
match writer.write_all(&buf[..buf_len]) {
|
||||
Ok(_) => return Ok(buf_len as u64),
|
||||
Err(err) => return Err(err),
|
||||
Ok(_) => Ok(buf_len as u64),
|
||||
Err(err) => Err(err),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -8,7 +8,6 @@ extern crate procfs;
|
||||
use prometheus::{Encoder, Gauge, GaugeVec, IntCounter, TextEncoder};
|
||||
|
||||
use anyhow::Result;
|
||||
use protocols;
|
||||
|
||||
const NAMESPACE_KATA_AGENT: &str = "kata_agent";
|
||||
const NAMESPACE_KATA_GUEST: &str = "kata_guest";
|
||||
@@ -85,17 +84,15 @@ pub fn get_metrics(_: &protocols::agent::GetMetricsRequest) -> Result<String> {
|
||||
let encoder = TextEncoder::new();
|
||||
encoder.encode(&metric_families, &mut buffer).unwrap();
|
||||
|
||||
Ok(String::from_utf8(buffer.clone()).unwrap())
|
||||
Ok(String::from_utf8(buffer).unwrap())
|
||||
}
|
||||
|
||||
fn update_agent_metrics() {
|
||||
let me = procfs::process::Process::myself();
|
||||
match me {
|
||||
Err(err) => {
|
||||
error!(sl!(), "failed to create process instance: {:?}", err);
|
||||
return;
|
||||
}
|
||||
Ok(_) => {}
|
||||
|
||||
if let Err(err) = me {
|
||||
error!(sl!(), "failed to create process instance: {:?}", err);
|
||||
return;
|
||||
}
|
||||
|
||||
let me = me.unwrap();
|
||||
|
||||
@@ -7,7 +7,6 @@ use std::collections::HashMap;
|
||||
use std::ffi::CString;
|
||||
use std::fs;
|
||||
use std::io;
|
||||
use std::iter::FromIterator;
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
|
||||
use std::path::Path;
|
||||
@@ -39,7 +38,7 @@ pub const DRIVERLOCALTYPE: &str = "local";
|
||||
|
||||
pub const TYPEROOTFS: &str = "rootfs";
|
||||
|
||||
#[cfg_attr(rustfmt, rustfmt_skip)]
|
||||
#[rustfmt::skip]
|
||||
lazy_static! {
|
||||
pub static ref FLAGS: HashMap<&'static str, (bool, MsFlags)> = {
|
||||
let mut m = HashMap::new();
|
||||
@@ -88,7 +87,7 @@ pub struct INIT_MOUNT {
|
||||
options: Vec<&'static str>,
|
||||
}
|
||||
|
||||
#[cfg_attr(rustfmt, rustfmt_skip)]
|
||||
#[rustfmt::skip]
|
||||
lazy_static!{
|
||||
static ref CGROUPS: HashMap<&'static str, &'static str> = {
|
||||
let mut m = HashMap::new();
|
||||
@@ -109,7 +108,7 @@ lazy_static!{
|
||||
};
|
||||
}
|
||||
|
||||
#[cfg_attr(rustfmt, rustfmt_skip)]
|
||||
#[rustfmt::skip]
|
||||
lazy_static! {
|
||||
pub static ref INIT_ROOTFS_MOUNTS: Vec<INIT_MOUNT> = vec![
|
||||
INIT_MOUNT{fstype: "proc", src: "proc", dest: "/proc", options: vec!["nosuid", "nodev", "noexec"]},
|
||||
@@ -126,7 +125,7 @@ lazy_static! {
|
||||
type StorageHandler = fn(&Logger, &Storage, Arc<Mutex<Sandbox>>) -> Result<String>;
|
||||
|
||||
// STORAGEHANDLERLIST lists the supported drivers.
|
||||
#[cfg_attr(rustfmt, rustfmt_skip)]
|
||||
#[rustfmt::skip]
|
||||
lazy_static! {
|
||||
pub static ref STORAGEHANDLERLIST: HashMap<&'static str, StorageHandler> = {
|
||||
let mut m = HashMap::new();
|
||||
@@ -173,9 +172,9 @@ impl<'a> BareMount<'a> {
|
||||
BareMount {
|
||||
source: s,
|
||||
destination: d,
|
||||
fs_type: fs_type,
|
||||
flags: flags,
|
||||
options: options,
|
||||
fs_type,
|
||||
flags,
|
||||
options,
|
||||
logger: logger.new(o!("subsystem" => "baremount")),
|
||||
}
|
||||
}
|
||||
@@ -190,11 +189,11 @@ impl<'a> BareMount<'a> {
|
||||
let cstr_dest: CString;
|
||||
let cstr_fs_type: CString;
|
||||
|
||||
if self.source.len() == 0 {
|
||||
if self.source.is_empty() {
|
||||
return Err(anyhow!("need mount source"));
|
||||
}
|
||||
|
||||
if self.destination.len() == 0 {
|
||||
if self.destination.is_empty() {
|
||||
return Err(anyhow!("need mount destination"));
|
||||
}
|
||||
|
||||
@@ -204,14 +203,14 @@ impl<'a> BareMount<'a> {
|
||||
cstr_dest = CString::new(self.destination)?;
|
||||
dest = cstr_dest.as_ptr();
|
||||
|
||||
if self.fs_type.len() == 0 {
|
||||
if self.fs_type.is_empty() {
|
||||
return Err(anyhow!("need mount FS type"));
|
||||
}
|
||||
|
||||
cstr_fs_type = CString::new(self.fs_type)?;
|
||||
fs_type = cstr_fs_type.as_ptr();
|
||||
|
||||
if self.options.len() > 0 {
|
||||
if !self.options.is_empty() {
|
||||
cstr_options = CString::new(self.options)?;
|
||||
options = cstr_options.as_ptr() as *const c_void;
|
||||
}
|
||||
@@ -243,8 +242,7 @@ fn ephemeral_storage_handler(
|
||||
storage: &Storage,
|
||||
sandbox: Arc<Mutex<Sandbox>>,
|
||||
) -> Result<String> {
|
||||
let s = sandbox.clone();
|
||||
let mut sb = s.lock().unwrap();
|
||||
let mut sb = sandbox.lock().unwrap();
|
||||
let new_storage = sb.set_sandbox_storage(&storage.mount_point);
|
||||
|
||||
if !new_storage {
|
||||
@@ -262,8 +260,7 @@ fn local_storage_handler(
|
||||
storage: &Storage,
|
||||
sandbox: Arc<Mutex<Sandbox>>,
|
||||
) -> Result<String> {
|
||||
let s = sandbox.clone();
|
||||
let mut sb = s.lock().unwrap();
|
||||
let mut sb = sandbox.lock().unwrap();
|
||||
let new_storage = sb.set_sandbox_storage(&storage.mount_point);
|
||||
|
||||
if !new_storage {
|
||||
@@ -279,8 +276,7 @@ fn local_storage_handler(
|
||||
|
||||
let opts = parse_options(opts_vec);
|
||||
let mode = opts.get("mode");
|
||||
if mode.is_some() {
|
||||
let mode = mode.unwrap();
|
||||
if let Some(mode) = mode {
|
||||
let mut permission = fs::metadata(&storage.mount_point)?.permissions();
|
||||
|
||||
let o_mode = u32::from_str_radix(mode, 8)?;
|
||||
@@ -383,7 +379,7 @@ fn mount_storage(logger: &Logger, storage: &Storage) -> Result<()> {
|
||||
}
|
||||
|
||||
let options_vec = storage.options.to_vec();
|
||||
let options_vec = Vec::from_iter(options_vec.iter().map(String::as_str));
|
||||
let options_vec = options_vec.iter().map(String::as_str).collect();
|
||||
let (flags, options) = parse_mount_flags_and_options(options_vec);
|
||||
|
||||
info!(logger, "mounting storage";
|
||||
@@ -410,17 +406,17 @@ fn parse_mount_flags_and_options(options_vec: Vec<&str>) -> (MsFlags, String) {
|
||||
let mut options: String = "".to_string();
|
||||
|
||||
for opt in options_vec {
|
||||
if opt.len() != 0 {
|
||||
if !opt.is_empty() {
|
||||
match FLAGS.get(opt) {
|
||||
Some(x) => {
|
||||
let (_, f) = *x;
|
||||
flags = flags | f;
|
||||
flags |= f;
|
||||
}
|
||||
None => {
|
||||
if options.len() > 0 {
|
||||
if !options.is_empty() {
|
||||
options.push_str(format!(",{}", opt).as_str());
|
||||
} else {
|
||||
options.push_str(format!("{}", opt).as_str());
|
||||
options.push_str(opt.to_string().as_str());
|
||||
}
|
||||
}
|
||||
};
|
||||
@@ -458,7 +454,7 @@ pub fn add_storages(
|
||||
// Todo need to rollback the mounted storage if err met.
|
||||
let mount_point = handler(&logger, &storage, sandbox.clone())?;
|
||||
|
||||
if mount_point.len() > 0 {
|
||||
if !mount_point.is_empty() {
|
||||
mount_list.push(mount_point);
|
||||
}
|
||||
}
|
||||
@@ -509,7 +505,7 @@ pub fn get_mount_fs_type(mount_point: &str) -> Result<String> {
|
||||
// get_mount_fs_type_from_file returns the FS type corresponding to the passed mount point and
|
||||
// any error ecountered.
|
||||
pub fn get_mount_fs_type_from_file(mount_file: &str, mount_point: &str) -> Result<String> {
|
||||
if mount_point == "" {
|
||||
if mount_point.is_empty() {
|
||||
return Err(anyhow!("Invalid mount point {}", mount_point));
|
||||
}
|
||||
|
||||
@@ -570,10 +566,10 @@ pub fn get_cgroup_mounts(
|
||||
'outer: for (_, line) in reader.lines().enumerate() {
|
||||
let line = line?;
|
||||
|
||||
let fields: Vec<&str> = line.split("\t").collect();
|
||||
let fields: Vec<&str> = line.split('\t').collect();
|
||||
|
||||
// Ignore comment header
|
||||
if fields[0].starts_with("#") {
|
||||
if fields[0].starts_with('#') {
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -594,7 +590,7 @@ pub fn get_cgroup_mounts(
|
||||
}
|
||||
}
|
||||
|
||||
if fields[0] == "" {
|
||||
if fields[0].is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -643,7 +639,7 @@ pub fn cgroups_mount(logger: &Logger, unified_cgroup_hierarchy: bool) -> Result<
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn remove_mounts(mounts: &Vec<String>) -> Result<()> {
|
||||
pub fn remove_mounts(mounts: &[String]) -> Result<()> {
|
||||
for m in mounts.iter() {
|
||||
mount::umount(m.as_str()).context(format!("failed to umount {:?}", m))?;
|
||||
}
|
||||
@@ -675,7 +671,7 @@ fn ensure_destination_exists(destination: &str, fs_type: &str) -> Result<()> {
|
||||
fn parse_options(option_list: Vec<String>) -> HashMap<String, String> {
|
||||
let mut options = HashMap::new();
|
||||
for opt in option_list.iter() {
|
||||
let fields: Vec<&str> = opt.split("=").collect();
|
||||
let fields: Vec<&str> = opt.split('=').collect();
|
||||
if fields.len() != 2 {
|
||||
continue;
|
||||
}
|
||||
@@ -801,7 +797,7 @@ mod tests {
|
||||
let src_filename: String;
|
||||
let dest_filename: String;
|
||||
|
||||
if d.src != "" {
|
||||
if !d.src.is_empty() {
|
||||
src = dir.path().join(d.src.to_string());
|
||||
src_filename = src
|
||||
.to_str()
|
||||
@@ -811,7 +807,7 @@ mod tests {
|
||||
src_filename = "".to_owned();
|
||||
}
|
||||
|
||||
if d.dest != "" {
|
||||
if !d.dest.is_empty() {
|
||||
dest = dir.path().join(d.dest.to_string());
|
||||
dest_filename = dest
|
||||
.to_str()
|
||||
@@ -823,7 +819,7 @@ mod tests {
|
||||
|
||||
// Create the mount directories
|
||||
for d in [src_filename.clone(), dest_filename.clone()].iter() {
|
||||
if d == "" {
|
||||
if d.is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -843,7 +839,7 @@ mod tests {
|
||||
|
||||
let msg = format!("{}: result: {:?}", msg, result);
|
||||
|
||||
if d.error_contains == "" {
|
||||
if d.error_contains.is_empty() {
|
||||
assert!(result.is_ok(), msg);
|
||||
|
||||
// Cleanup
|
||||
@@ -856,7 +852,7 @@ mod tests {
|
||||
|
||||
let msg = format!("{}: umount result: {:?}", msg, result);
|
||||
|
||||
assert!(ret == 0, format!("{}", msg));
|
||||
assert!(ret == 0, msg);
|
||||
};
|
||||
|
||||
continue;
|
||||
@@ -914,7 +910,8 @@ mod tests {
|
||||
.expect("failed to create mount destination filename");
|
||||
|
||||
for d in [test_dir_filename, mnt_src_filename, mnt_dest_filename].iter() {
|
||||
std::fs::create_dir_all(d).expect(&format!("failed to create directory {}", d));
|
||||
std::fs::create_dir_all(d)
|
||||
.unwrap_or_else(|_| panic!("failed to create directory {}", d));
|
||||
}
|
||||
|
||||
// Create an actual mount
|
||||
@@ -960,7 +957,7 @@ mod tests {
|
||||
|
||||
let msg = format!("{}: result: {:?}", msg, result);
|
||||
|
||||
if d.error_contains == "" {
|
||||
if d.error_contains.is_empty() {
|
||||
assert!(result.is_ok(), msg);
|
||||
continue;
|
||||
}
|
||||
@@ -1055,20 +1052,20 @@ mod tests {
|
||||
|
||||
let filename = file_path
|
||||
.to_str()
|
||||
.expect(&format!("{}: failed to create filename", msg));
|
||||
.unwrap_or_else(|| panic!("{}: failed to create filename", msg));
|
||||
|
||||
let mut file =
|
||||
File::create(filename).expect(&format!("{}: failed to create file", msg));
|
||||
File::create(filename).unwrap_or_else(|_| panic!("{}: failed to create file", msg));
|
||||
|
||||
file.write_all(d.contents.as_bytes())
|
||||
.expect(&format!("{}: failed to write file contents", msg));
|
||||
.unwrap_or_else(|_| panic!("{}: failed to write file contents", msg));
|
||||
|
||||
let result = get_mount_fs_type_from_file(filename, d.mount_point);
|
||||
|
||||
// add more details if an assertion fails
|
||||
let msg = format!("{}: result: {:?}", msg, result);
|
||||
|
||||
if d.error_contains == "" {
|
||||
if d.error_contains.is_empty() {
|
||||
let fs_type = result.unwrap();
|
||||
|
||||
assert!(d.fs_type == fs_type, msg);
|
||||
@@ -1217,15 +1214,15 @@ mod tests {
|
||||
.expect("failed to create cgroup file filename");
|
||||
|
||||
let mut file =
|
||||
File::create(filename).expect(&format!("{}: failed to create file", msg));
|
||||
File::create(filename).unwrap_or_else(|_| panic!("{}: failed to create file", msg));
|
||||
|
||||
file.write_all(d.contents.as_bytes())
|
||||
.expect(&format!("{}: failed to write file contents", msg));
|
||||
.unwrap_or_else(|_| panic!("{}: failed to write file contents", msg));
|
||||
|
||||
let result = get_cgroup_mounts(&logger, filename, false);
|
||||
let msg = format!("{}: result: {:?}", msg, result);
|
||||
|
||||
if d.error_contains != "" {
|
||||
if !d.error_contains.is_empty() {
|
||||
assert!(result.is_err(), msg);
|
||||
|
||||
let error_msg = format!("{}", result.unwrap_err());
|
||||
|
||||
@@ -51,20 +51,20 @@ impl Namespace {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn as_ipc(mut self) -> Self {
|
||||
pub fn get_ipc(mut self) -> Self {
|
||||
self.ns_type = NamespaceType::IPC;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn as_uts(mut self, hostname: &str) -> Self {
|
||||
pub fn get_uts(mut self, hostname: &str) -> Self {
|
||||
self.ns_type = NamespaceType::UTS;
|
||||
if hostname != "" {
|
||||
if !hostname.is_empty() {
|
||||
self.hostname = Some(String::from(hostname));
|
||||
}
|
||||
self
|
||||
}
|
||||
|
||||
pub fn as_pid(mut self) -> Self {
|
||||
pub fn get_pid(mut self) -> Self {
|
||||
self.ns_type = NamespaceType::PID;
|
||||
self
|
||||
}
|
||||
@@ -99,7 +99,7 @@ impl Namespace {
|
||||
File::open(Path::new(&origin_ns_path))?;
|
||||
|
||||
// Create a new netns on the current thread.
|
||||
let cf = ns_type.get_flags().clone();
|
||||
let cf = ns_type.get_flags();
|
||||
|
||||
unshare(cf)?;
|
||||
|
||||
@@ -112,12 +112,9 @@ impl Namespace {
|
||||
|
||||
let mut flags = MsFlags::empty();
|
||||
|
||||
match FLAGS.get("rbind") {
|
||||
Some(x) => {
|
||||
let (_, f) = *x;
|
||||
flags = flags | f;
|
||||
}
|
||||
None => (),
|
||||
if let Some(x) = FLAGS.get("rbind") {
|
||||
let (_, f) = *x;
|
||||
flags |= f;
|
||||
};
|
||||
|
||||
let bare_mount = BareMount::new(source, destination, "none", flags, "", &logger);
|
||||
@@ -196,30 +193,30 @@ mod tests {
|
||||
let tmpdir = Builder::new().prefix("ipc").tempdir().unwrap();
|
||||
|
||||
let ns_ipc = Namespace::new(&logger)
|
||||
.as_ipc()
|
||||
.get_ipc()
|
||||
.set_root_dir(tmpdir.path().to_str().unwrap())
|
||||
.setup();
|
||||
|
||||
assert!(ns_ipc.is_ok());
|
||||
assert!(remove_mounts(&vec![ns_ipc.unwrap().path]).is_ok());
|
||||
assert!(remove_mounts(&[ns_ipc.unwrap().path]).is_ok());
|
||||
|
||||
let logger = slog::Logger::root(slog::Discard, o!());
|
||||
let tmpdir = Builder::new().prefix("uts").tempdir().unwrap();
|
||||
|
||||
let ns_uts = Namespace::new(&logger)
|
||||
.as_uts("test_hostname")
|
||||
.get_uts("test_hostname")
|
||||
.set_root_dir(tmpdir.path().to_str().unwrap())
|
||||
.setup();
|
||||
|
||||
assert!(ns_uts.is_ok());
|
||||
assert!(remove_mounts(&vec![ns_uts.unwrap().path]).is_ok());
|
||||
assert!(remove_mounts(&[ns_uts.unwrap().path]).is_ok());
|
||||
|
||||
// Check it cannot persist pid namespaces.
|
||||
let logger = slog::Logger::root(slog::Discard, o!());
|
||||
let tmpdir = Builder::new().prefix("pid").tempdir().unwrap();
|
||||
|
||||
let ns_pid = Namespace::new(&logger)
|
||||
.as_pid()
|
||||
.get_pid()
|
||||
.set_root_dir(tmpdir.path().to_str().unwrap())
|
||||
.setup();
|
||||
|
||||
|
||||
1028
src/agent/src/netlink.rs
Normal file
1028
src/agent/src/netlink.rs
Normal file
File diff suppressed because it is too large
Load Diff
@@ -48,7 +48,7 @@ pub fn setup_guest_dns(logger: Logger, dns_list: Vec<String>) -> Result<()> {
|
||||
fn do_setup_guest_dns(logger: Logger, dns_list: Vec<String>, src: &str, dst: &str) -> Result<()> {
|
||||
let logger = logger.new(o!( "subsystem" => "network"));
|
||||
|
||||
if dns_list.len() == 0 {
|
||||
if dns_list.is_empty() {
|
||||
info!(
|
||||
logger,
|
||||
"Did not set sandbox DNS as DNS not received as part of request."
|
||||
@@ -117,12 +117,12 @@ mod tests {
|
||||
];
|
||||
|
||||
// write to /run/kata-containers/sandbox/resolv.conf
|
||||
let mut src_file =
|
||||
File::create(src_filename).expect(&format!("failed to create file {:?}", src_filename));
|
||||
let mut src_file = File::create(src_filename)
|
||||
.unwrap_or_else(|_| panic!("failed to create file {:?}", src_filename));
|
||||
let content = dns.join("\n");
|
||||
src_file
|
||||
.write_all(content.as_bytes())
|
||||
.expect(&format!("failed to write file contents"));
|
||||
.expect("failed to write file contents");
|
||||
|
||||
// call do_setup_guest_dns
|
||||
let result = do_setup_guest_dns(logger, dns.clone(), src_filename, dst_filename);
|
||||
|
||||
@@ -4,7 +4,6 @@
|
||||
//
|
||||
|
||||
use anyhow::Result;
|
||||
use libc;
|
||||
use nix::errno::Errno;
|
||||
use nix::fcntl::{self, OFlag};
|
||||
use nix::sys::stat::Mode;
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
use std::path::Path;
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use ttrpc;
|
||||
use ttrpc::{self, error::get_rpc_status as ttrpc_error};
|
||||
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use oci::{LinuxNamespace, Root, Spec};
|
||||
@@ -21,7 +21,6 @@ use protocols::health::{
|
||||
HealthCheckResponse, HealthCheckResponse_ServingStatus, VersionCheckResponse,
|
||||
};
|
||||
use protocols::types::Interface;
|
||||
use rustjail;
|
||||
use rustjail::cgroups::notifier;
|
||||
use rustjail::container::{BaseContainer, Container, LinuxContainer};
|
||||
use rustjail::process::Process;
|
||||
@@ -47,7 +46,6 @@ use crate::AGENT_CONFIG;
|
||||
use netlink::{RtnlHandle, NETLINK_ROUTE};
|
||||
|
||||
use libc::{self, c_ushort, pid_t, winsize, TIOCSWINSZ};
|
||||
use serde_json;
|
||||
use std::convert::TryFrom;
|
||||
use std::fs;
|
||||
use std::os::unix::io::RawFd;
|
||||
@@ -152,14 +150,13 @@ impl agentService {
|
||||
|
||||
let pipe_size = AGENT_CONFIG.read().unwrap().container_pipe_size;
|
||||
let p = if oci.process.is_some() {
|
||||
let tp = Process::new(
|
||||
Process::new(
|
||||
&sl!(),
|
||||
&oci.process.as_ref().unwrap(),
|
||||
cid.as_str(),
|
||||
true,
|
||||
pipe_size,
|
||||
)?;
|
||||
tp
|
||||
)?
|
||||
} else {
|
||||
info!(sl!(), "no process configurations!");
|
||||
return Err(anyhow!(nix::Error::from_errno(nix::errno::Errno::EINVAL)));
|
||||
@@ -175,7 +172,7 @@ impl agentService {
|
||||
}
|
||||
|
||||
fn do_start_container(&self, req: protocols::agent::StartContainerRequest) -> Result<()> {
|
||||
let cid = req.container_id.clone();
|
||||
let cid = req.container_id;
|
||||
|
||||
let sandbox = self.sandbox.clone();
|
||||
let mut s = sandbox.lock().unwrap();
|
||||
@@ -183,7 +180,7 @@ impl agentService {
|
||||
|
||||
let ctr = s
|
||||
.get_container(&cid)
|
||||
.ok_or(anyhow!("Invalid container id"))?;
|
||||
.ok_or_else(|| anyhow!("Invalid container id"))?;
|
||||
|
||||
ctr.exec()?;
|
||||
|
||||
@@ -206,9 +203,7 @@ impl agentService {
|
||||
let mut remove_container_resources = |sandbox: &mut Sandbox| -> Result<()> {
|
||||
// Find the sandbox storage used by this container
|
||||
let mounts = sandbox.container_mounts.get(&cid);
|
||||
if mounts.is_some() {
|
||||
let mounts = mounts.unwrap();
|
||||
|
||||
if let Some(mounts) = mounts {
|
||||
remove_mounts(&mounts)?;
|
||||
|
||||
for m in mounts.iter() {
|
||||
@@ -232,7 +227,7 @@ impl agentService {
|
||||
let mut sandbox = s.lock().unwrap();
|
||||
let ctr = sandbox
|
||||
.get_container(&cid)
|
||||
.ok_or(anyhow!("Invalid container id"))?;
|
||||
.ok_or_else(|| anyhow!("Invalid container id"))?;
|
||||
|
||||
ctr.destroy()?;
|
||||
|
||||
@@ -250,11 +245,11 @@ impl agentService {
|
||||
let mut sandbox = s.lock().unwrap();
|
||||
let _ctr = sandbox
|
||||
.get_container(&cid2)
|
||||
.ok_or(anyhow!("Invalid container id"))
|
||||
.and_then(|ctr| {
|
||||
.ok_or_else(|| anyhow!("Invalid container id"))
|
||||
.map(|ctr| {
|
||||
ctr.destroy().unwrap();
|
||||
tx.send(1).unwrap();
|
||||
Ok(ctr)
|
||||
ctr
|
||||
});
|
||||
});
|
||||
|
||||
@@ -277,7 +272,7 @@ impl agentService {
|
||||
let cid = req.container_id.clone();
|
||||
let exec_id = req.exec_id.clone();
|
||||
|
||||
info!(sl!(), "cid: {} eid: {}", cid.clone(), exec_id.clone());
|
||||
info!(sl!(), "cid: {} eid: {}", cid, exec_id);
|
||||
|
||||
let s = self.sandbox.clone();
|
||||
let mut sandbox = s.lock().unwrap();
|
||||
@@ -294,7 +289,7 @@ impl agentService {
|
||||
|
||||
let ctr = sandbox
|
||||
.get_container(&cid)
|
||||
.ok_or(anyhow!("Invalid container id"))?;
|
||||
.ok_or_else(|| anyhow!("Invalid container id"))?;
|
||||
|
||||
ctr.run(p)?;
|
||||
|
||||
@@ -315,7 +310,7 @@ impl agentService {
|
||||
"exec-id" => eid.clone(),
|
||||
);
|
||||
|
||||
if eid == "" {
|
||||
if eid.is_empty() {
|
||||
init = true;
|
||||
}
|
||||
|
||||
@@ -340,7 +335,7 @@ impl agentService {
|
||||
req: protocols::agent::WaitProcessRequest,
|
||||
) -> Result<protocols::agent::WaitProcessResponse> {
|
||||
let cid = req.container_id.clone();
|
||||
let eid = req.exec_id.clone();
|
||||
let eid = req.exec_id;
|
||||
let s = self.sandbox.clone();
|
||||
let mut resp = WaitProcessResponse::new();
|
||||
let pid: pid_t;
|
||||
@@ -376,7 +371,7 @@ impl agentService {
|
||||
let mut sandbox = s.lock().unwrap();
|
||||
let ctr = sandbox
|
||||
.get_container(&cid)
|
||||
.ok_or(anyhow!("Invalid container id"))?;
|
||||
.ok_or_else(|| anyhow!("Invalid container id"))?;
|
||||
|
||||
let mut p = match ctr.processes.get_mut(&pid) {
|
||||
Some(p) => p,
|
||||
@@ -519,10 +514,7 @@ impl protocols::agent_ttrpc::AgentService for agentService {
|
||||
req: protocols::agent::CreateContainerRequest,
|
||||
) -> ttrpc::Result<Empty> {
|
||||
match self.do_create_container(req) {
|
||||
Err(e) => Err(ttrpc::Error::RpcStatus(ttrpc::get_status(
|
||||
ttrpc::Code::INTERNAL,
|
||||
e.to_string(),
|
||||
))),
|
||||
Err(e) => Err(ttrpc_error(ttrpc::Code::INTERNAL, e.to_string())),
|
||||
Ok(_) => Ok(Empty::new()),
|
||||
}
|
||||
}
|
||||
@@ -533,10 +525,7 @@ impl protocols::agent_ttrpc::AgentService for agentService {
|
||||
req: protocols::agent::StartContainerRequest,
|
||||
) -> ttrpc::Result<Empty> {
|
||||
match self.do_start_container(req) {
|
||||
Err(e) => Err(ttrpc::Error::RpcStatus(ttrpc::get_status(
|
||||
ttrpc::Code::INTERNAL,
|
||||
e.to_string(),
|
||||
))),
|
||||
Err(e) => Err(ttrpc_error(ttrpc::Code::INTERNAL, e.to_string())),
|
||||
Ok(_) => Ok(Empty::new()),
|
||||
}
|
||||
}
|
||||
@@ -547,10 +536,7 @@ impl protocols::agent_ttrpc::AgentService for agentService {
|
||||
req: protocols::agent::RemoveContainerRequest,
|
||||
) -> ttrpc::Result<Empty> {
|
||||
match self.do_remove_container(req) {
|
||||
Err(e) => Err(ttrpc::Error::RpcStatus(ttrpc::get_status(
|
||||
ttrpc::Code::INTERNAL,
|
||||
e.to_string(),
|
||||
))),
|
||||
Err(e) => Err(ttrpc_error(ttrpc::Code::INTERNAL, e.to_string())),
|
||||
Ok(_) => Ok(Empty::new()),
|
||||
}
|
||||
}
|
||||
@@ -561,10 +547,7 @@ impl protocols::agent_ttrpc::AgentService for agentService {
|
||||
req: protocols::agent::ExecProcessRequest,
|
||||
) -> ttrpc::Result<Empty> {
|
||||
match self.do_exec_process(req) {
|
||||
Err(e) => Err(ttrpc::Error::RpcStatus(ttrpc::get_status(
|
||||
ttrpc::Code::INTERNAL,
|
||||
e.to_string(),
|
||||
))),
|
||||
Err(e) => Err(ttrpc_error(ttrpc::Code::INTERNAL, e.to_string())),
|
||||
Ok(_) => Ok(Empty::new()),
|
||||
}
|
||||
}
|
||||
@@ -575,10 +558,7 @@ impl protocols::agent_ttrpc::AgentService for agentService {
|
||||
req: protocols::agent::SignalProcessRequest,
|
||||
) -> ttrpc::Result<Empty> {
|
||||
match self.do_signal_process(req) {
|
||||
Err(e) => Err(ttrpc::Error::RpcStatus(ttrpc::get_status(
|
||||
ttrpc::Code::INTERNAL,
|
||||
e.to_string(),
|
||||
))),
|
||||
Err(e) => Err(ttrpc_error(ttrpc::Code::INTERNAL, e.to_string())),
|
||||
Ok(_) => Ok(Empty::new()),
|
||||
}
|
||||
}
|
||||
@@ -588,9 +568,8 @@ impl protocols::agent_ttrpc::AgentService for agentService {
|
||||
_ctx: &ttrpc::TtrpcContext,
|
||||
req: protocols::agent::WaitProcessRequest,
|
||||
) -> ttrpc::Result<WaitProcessResponse> {
|
||||
self.do_wait_process(req).map_err(|e| {
|
||||
ttrpc::Error::RpcStatus(ttrpc::get_status(ttrpc::Code::INTERNAL, e.to_string()))
|
||||
})
|
||||
self.do_wait_process(req)
|
||||
.map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()))
|
||||
}
|
||||
|
||||
fn list_processes(
|
||||
@@ -600,18 +579,18 @@ impl protocols::agent_ttrpc::AgentService for agentService {
|
||||
) -> ttrpc::Result<ListProcessesResponse> {
|
||||
let cid = req.container_id.clone();
|
||||
let format = req.format.clone();
|
||||
let mut args = req.args.clone().into_vec();
|
||||
let mut args = req.args.into_vec();
|
||||
let mut resp = ListProcessesResponse::new();
|
||||
|
||||
let s = Arc::clone(&self.sandbox);
|
||||
let mut sandbox = s.lock().unwrap();
|
||||
|
||||
let ctr = sandbox
|
||||
.get_container(&cid)
|
||||
.ok_or(ttrpc::Error::RpcStatus(ttrpc::get_status(
|
||||
let ctr = sandbox.get_container(&cid).ok_or_else(|| {
|
||||
ttrpc_error(
|
||||
ttrpc::Code::INVALID_ARGUMENT,
|
||||
"invalid container id".to_string(),
|
||||
)))?;
|
||||
)
|
||||
})?;
|
||||
|
||||
let pids = ctr.processes().unwrap();
|
||||
|
||||
@@ -622,15 +601,15 @@ impl protocols::agent_ttrpc::AgentService for agentService {
|
||||
return Ok(resp);
|
||||
}
|
||||
_ => {
|
||||
return Err(ttrpc::Error::RpcStatus(ttrpc::get_status(
|
||||
return Err(ttrpc_error(
|
||||
ttrpc::Code::INVALID_ARGUMENT,
|
||||
"invalid format!".to_string(),
|
||||
)));
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
// format "table"
|
||||
if args.len() == 0 {
|
||||
if args.is_empty() {
|
||||
// default argument
|
||||
args = vec!["-ef".to_string()];
|
||||
}
|
||||
@@ -688,12 +667,12 @@ impl protocols::agent_ttrpc::AgentService for agentService {
|
||||
let s = Arc::clone(&self.sandbox);
|
||||
let mut sandbox = s.lock().unwrap();
|
||||
|
||||
let ctr = sandbox
|
||||
.get_container(&cid)
|
||||
.ok_or(ttrpc::Error::RpcStatus(ttrpc::get_status(
|
||||
let ctr = sandbox.get_container(&cid).ok_or_else(|| {
|
||||
ttrpc_error(
|
||||
ttrpc::Code::INVALID_ARGUMENT,
|
||||
"invalid container id".to_string(),
|
||||
)))?;
|
||||
)
|
||||
})?;
|
||||
|
||||
let resp = Empty::new();
|
||||
|
||||
@@ -701,10 +680,7 @@ impl protocols::agent_ttrpc::AgentService for agentService {
|
||||
let ociRes = rustjail::resources_grpc_to_oci(&res.unwrap());
|
||||
match ctr.set(ociRes) {
|
||||
Err(e) => {
|
||||
return Err(ttrpc::Error::RpcStatus(ttrpc::get_status(
|
||||
ttrpc::Code::INTERNAL,
|
||||
e.to_string(),
|
||||
)));
|
||||
return Err(ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()));
|
||||
}
|
||||
|
||||
Ok(_) => return Ok(resp),
|
||||
@@ -719,20 +695,19 @@ impl protocols::agent_ttrpc::AgentService for agentService {
|
||||
_ctx: &ttrpc::TtrpcContext,
|
||||
req: protocols::agent::StatsContainerRequest,
|
||||
) -> ttrpc::Result<StatsContainerResponse> {
|
||||
let cid = req.container_id.clone();
|
||||
let cid = req.container_id;
|
||||
let s = Arc::clone(&self.sandbox);
|
||||
let mut sandbox = s.lock().unwrap();
|
||||
|
||||
let ctr = sandbox
|
||||
.get_container(&cid)
|
||||
.ok_or(ttrpc::Error::RpcStatus(ttrpc::get_status(
|
||||
let ctr = sandbox.get_container(&cid).ok_or_else(|| {
|
||||
ttrpc_error(
|
||||
ttrpc::Code::INVALID_ARGUMENT,
|
||||
"invalid container id".to_string(),
|
||||
)))?;
|
||||
)
|
||||
})?;
|
||||
|
||||
ctr.stats().map_err(|e| {
|
||||
ttrpc::Error::RpcStatus(ttrpc::get_status(ttrpc::Code::INTERNAL, e.to_string()))
|
||||
})
|
||||
ctr.stats()
|
||||
.map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()))
|
||||
}
|
||||
|
||||
fn pause_container(
|
||||
@@ -744,17 +719,16 @@ impl protocols::agent_ttrpc::AgentService for agentService {
|
||||
let s = Arc::clone(&self.sandbox);
|
||||
let mut sandbox = s.lock().unwrap();
|
||||
|
||||
let ctr = sandbox
|
||||
.get_container(&cid)
|
||||
.ok_or(ttrpc::Error::RpcStatus(ttrpc::get_status(
|
||||
let ctr = sandbox.get_container(&cid).ok_or_else(|| {
|
||||
ttrpc_error(
|
||||
ttrpc::Code::INVALID_ARGUMENT,
|
||||
"invalid container id".to_string(),
|
||||
)))?;
|
||||
|
||||
ctr.pause().map_err(|e| {
|
||||
ttrpc::Error::RpcStatus(ttrpc::get_status(ttrpc::Code::INTERNAL, e.to_string()))
|
||||
)
|
||||
})?;
|
||||
|
||||
ctr.pause()
|
||||
.map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()))?;
|
||||
|
||||
Ok(Empty::new())
|
||||
}
|
||||
|
||||
@@ -767,17 +741,16 @@ impl protocols::agent_ttrpc::AgentService for agentService {
|
||||
let s = Arc::clone(&self.sandbox);
|
||||
let mut sandbox = s.lock().unwrap();
|
||||
|
||||
let ctr = sandbox
|
||||
.get_container(&cid)
|
||||
.ok_or(ttrpc::Error::RpcStatus(ttrpc::get_status(
|
||||
let ctr = sandbox.get_container(&cid).ok_or_else(|| {
|
||||
ttrpc_error(
|
||||
ttrpc::Code::INVALID_ARGUMENT,
|
||||
"invalid container id".to_string(),
|
||||
)))?;
|
||||
|
||||
ctr.resume().map_err(|e| {
|
||||
ttrpc::Error::RpcStatus(ttrpc::get_status(ttrpc::Code::INTERNAL, e.to_string()))
|
||||
)
|
||||
})?;
|
||||
|
||||
ctr.resume()
|
||||
.map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()))?;
|
||||
|
||||
Ok(Empty::new())
|
||||
}
|
||||
|
||||
@@ -786,9 +759,8 @@ impl protocols::agent_ttrpc::AgentService for agentService {
|
||||
_ctx: &ttrpc::TtrpcContext,
|
||||
req: protocols::agent::WriteStreamRequest,
|
||||
) -> ttrpc::Result<WriteStreamResponse> {
|
||||
self.do_write_stream(req).map_err(|e| {
|
||||
ttrpc::Error::RpcStatus(ttrpc::get_status(ttrpc::Code::INTERNAL, e.to_string()))
|
||||
})
|
||||
self.do_write_stream(req)
|
||||
.map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()))
|
||||
}
|
||||
|
||||
fn read_stdout(
|
||||
@@ -796,9 +768,8 @@ impl protocols::agent_ttrpc::AgentService for agentService {
|
||||
_ctx: &ttrpc::TtrpcContext,
|
||||
req: protocols::agent::ReadStreamRequest,
|
||||
) -> ttrpc::Result<ReadStreamResponse> {
|
||||
self.do_read_stream(req, true).map_err(|e| {
|
||||
ttrpc::Error::RpcStatus(ttrpc::get_status(ttrpc::Code::INTERNAL, e.to_string()))
|
||||
})
|
||||
self.do_read_stream(req, true)
|
||||
.map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()))
|
||||
}
|
||||
|
||||
fn read_stderr(
|
||||
@@ -806,9 +777,8 @@ impl protocols::agent_ttrpc::AgentService for agentService {
|
||||
_ctx: &ttrpc::TtrpcContext,
|
||||
req: protocols::agent::ReadStreamRequest,
|
||||
) -> ttrpc::Result<ReadStreamResponse> {
|
||||
self.do_read_stream(req, false).map_err(|e| {
|
||||
ttrpc::Error::RpcStatus(ttrpc::get_status(ttrpc::Code::INTERNAL, e.to_string()))
|
||||
})
|
||||
self.do_read_stream(req, false)
|
||||
.map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()))
|
||||
}
|
||||
|
||||
fn close_stdin(
|
||||
@@ -817,15 +787,15 @@ impl protocols::agent_ttrpc::AgentService for agentService {
|
||||
req: protocols::agent::CloseStdinRequest,
|
||||
) -> ttrpc::Result<Empty> {
|
||||
let cid = req.container_id.clone();
|
||||
let eid = req.exec_id.clone();
|
||||
let eid = req.exec_id;
|
||||
let s = Arc::clone(&self.sandbox);
|
||||
let mut sandbox = s.lock().unwrap();
|
||||
|
||||
let p = find_process(&mut sandbox, cid.as_str(), eid.as_str(), false).map_err(|e| {
|
||||
ttrpc::Error::RpcStatus(ttrpc::get_status(
|
||||
ttrpc_error(
|
||||
ttrpc::Code::INVALID_ARGUMENT,
|
||||
format!("invalid argument: {:?}", e),
|
||||
))
|
||||
)
|
||||
})?;
|
||||
|
||||
if p.term_master.is_some() {
|
||||
@@ -851,17 +821,14 @@ impl protocols::agent_ttrpc::AgentService for agentService {
|
||||
let s = Arc::clone(&self.sandbox);
|
||||
let mut sandbox = s.lock().unwrap();
|
||||
let p = find_process(&mut sandbox, cid.as_str(), eid.as_str(), false).map_err(|e| {
|
||||
ttrpc::Error::RpcStatus(ttrpc::get_status(
|
||||
ttrpc_error(
|
||||
ttrpc::Code::UNAVAILABLE,
|
||||
format!("invalid argument: {:?}", e),
|
||||
))
|
||||
)
|
||||
})?;
|
||||
|
||||
if p.term_master.is_none() {
|
||||
return Err(ttrpc::Error::RpcStatus(ttrpc::get_status(
|
||||
ttrpc::Code::UNAVAILABLE,
|
||||
"no tty".to_string(),
|
||||
)));
|
||||
return Err(ttrpc_error(ttrpc::Code::UNAVAILABLE, "no tty".to_string()));
|
||||
}
|
||||
|
||||
let fd = p.term_master.unwrap();
|
||||
@@ -874,12 +841,9 @@ impl protocols::agent_ttrpc::AgentService for agentService {
|
||||
};
|
||||
|
||||
let err = libc::ioctl(fd, TIOCSWINSZ, &win);
|
||||
Errno::result(err).map(drop).map_err(|e| {
|
||||
ttrpc::Error::RpcStatus(ttrpc::get_status(
|
||||
ttrpc::Code::INTERNAL,
|
||||
format!("ioctl error: {:?}", e),
|
||||
))
|
||||
})?;
|
||||
Errno::result(err)
|
||||
.map(drop)
|
||||
.map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, format!("ioctl error: {:?}", e)))?;
|
||||
}
|
||||
|
||||
Ok(Empty::new())
|
||||
@@ -891,13 +855,13 @@ impl protocols::agent_ttrpc::AgentService for agentService {
|
||||
req: protocols::agent::UpdateInterfaceRequest,
|
||||
) -> ttrpc::Result<Interface> {
|
||||
if req.interface.is_none() {
|
||||
return Err(ttrpc::Error::RpcStatus(ttrpc::get_status(
|
||||
return Err(ttrpc_error(
|
||||
ttrpc::Code::INVALID_ARGUMENT,
|
||||
format!("empty update interface request"),
|
||||
)));
|
||||
"empty update interface request".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let interface = req.interface.clone();
|
||||
let interface = req.interface;
|
||||
let s = Arc::clone(&self.sandbox);
|
||||
let mut sandbox = s.lock().unwrap();
|
||||
|
||||
@@ -910,10 +874,7 @@ impl protocols::agent_ttrpc::AgentService for agentService {
|
||||
let iface = rtnl
|
||||
.update_interface(interface.as_ref().unwrap())
|
||||
.map_err(|e| {
|
||||
ttrpc::Error::RpcStatus(ttrpc::get_status(
|
||||
ttrpc::Code::INTERNAL,
|
||||
format!("update interface: {:?}", e),
|
||||
))
|
||||
ttrpc_error(ttrpc::Code::INTERNAL, format!("update interface: {:?}", e))
|
||||
})?;
|
||||
|
||||
Ok(iface)
|
||||
@@ -926,13 +887,13 @@ impl protocols::agent_ttrpc::AgentService for agentService {
|
||||
) -> ttrpc::Result<Routes> {
|
||||
let mut routes = protocols::agent::Routes::new();
|
||||
if req.routes.is_none() {
|
||||
return Err(ttrpc::Error::RpcStatus(ttrpc::get_status(
|
||||
return Err(ttrpc_error(
|
||||
ttrpc::Code::INVALID_ARGUMENT,
|
||||
format!("empty update routes request"),
|
||||
)));
|
||||
"empty update routes request".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let rs = req.routes.clone().unwrap().Routes.into_vec();
|
||||
let rs = req.routes.unwrap().Routes.into_vec();
|
||||
|
||||
let s = Arc::clone(&self.sandbox);
|
||||
let mut sandbox = s.lock().unwrap();
|
||||
@@ -944,12 +905,9 @@ impl protocols::agent_ttrpc::AgentService for agentService {
|
||||
let rtnl = sandbox.rtnl.as_mut().unwrap();
|
||||
|
||||
// get current routes to return when error out
|
||||
let crs = rtnl.list_routes().map_err(|e| {
|
||||
ttrpc::Error::RpcStatus(ttrpc::get_status(
|
||||
ttrpc::Code::INTERNAL,
|
||||
format!("update routes: {:?}", e),
|
||||
))
|
||||
})?;
|
||||
let crs = rtnl
|
||||
.list_routes()
|
||||
.map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, format!("update routes: {:?}", e)))?;
|
||||
|
||||
let v = match rtnl.update_routes(rs.as_ref()) {
|
||||
Ok(value) => value,
|
||||
@@ -975,12 +933,9 @@ impl protocols::agent_ttrpc::AgentService for agentService {
|
||||
}
|
||||
|
||||
let rtnl = sandbox.rtnl.as_mut().unwrap();
|
||||
let v = rtnl.list_interfaces().map_err(|e| {
|
||||
ttrpc::Error::RpcStatus(ttrpc::get_status(
|
||||
ttrpc::Code::INTERNAL,
|
||||
format!("list interface: {:?}", e),
|
||||
))
|
||||
})?;
|
||||
let v = rtnl
|
||||
.list_interfaces()
|
||||
.map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, format!("list interface: {:?}", e)))?;
|
||||
|
||||
interface.set_Interfaces(RepeatedField::from_vec(v));
|
||||
|
||||
@@ -1002,12 +957,9 @@ impl protocols::agent_ttrpc::AgentService for agentService {
|
||||
|
||||
let rtnl = sandbox.rtnl.as_mut().unwrap();
|
||||
|
||||
let v = rtnl.list_routes().map_err(|e| {
|
||||
ttrpc::Error::RpcStatus(ttrpc::get_status(
|
||||
ttrpc::Code::INTERNAL,
|
||||
format!("list routes: {:?}", e),
|
||||
))
|
||||
})?;
|
||||
let v = rtnl
|
||||
.list_routes()
|
||||
.map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, format!("list routes: {:?}", e)))?;
|
||||
|
||||
routes.set_Routes(RepeatedField::from_vec(v));
|
||||
|
||||
@@ -1055,19 +1007,17 @@ impl protocols::agent_ttrpc::AgentService for agentService {
|
||||
});
|
||||
}
|
||||
|
||||
if req.sandbox_id.len() > 0 {
|
||||
if !req.sandbox_id.is_empty() {
|
||||
s.id = req.sandbox_id.clone();
|
||||
}
|
||||
|
||||
for m in req.kernel_modules.iter() {
|
||||
let _ = load_kernel_module(m).map_err(|e| {
|
||||
ttrpc::Error::RpcStatus(ttrpc::get_status(ttrpc::Code::INTERNAL, e.to_string()))
|
||||
})?;
|
||||
let _ = load_kernel_module(m)
|
||||
.map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()))?;
|
||||
}
|
||||
|
||||
s.setup_shared_namespaces().map_err(|e| {
|
||||
ttrpc::Error::RpcStatus(ttrpc::get_status(ttrpc::Code::INTERNAL, e.to_string()))
|
||||
})?;
|
||||
s.setup_shared_namespaces()
|
||||
.map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()))?;
|
||||
}
|
||||
|
||||
match add_storages(sl!(), req.storages.to_vec(), self.sandbox.clone()) {
|
||||
@@ -1076,30 +1026,20 @@ impl protocols::agent_ttrpc::AgentService for agentService {
|
||||
let mut s = sandbox.lock().unwrap();
|
||||
s.mounts = m
|
||||
}
|
||||
Err(e) => {
|
||||
return Err(ttrpc::Error::RpcStatus(ttrpc::get_status(
|
||||
ttrpc::Code::INTERNAL,
|
||||
e.to_string(),
|
||||
)))
|
||||
}
|
||||
Err(e) => return Err(ttrpc_error(ttrpc::Code::INTERNAL, e.to_string())),
|
||||
};
|
||||
|
||||
match setup_guest_dns(sl!(), req.dns.to_vec()) {
|
||||
Ok(_) => {
|
||||
let sandbox = self.sandbox.clone();
|
||||
let mut s = sandbox.lock().unwrap();
|
||||
let _ = req
|
||||
let _dns = req
|
||||
.dns
|
||||
.to_vec()
|
||||
.iter()
|
||||
.map(|dns| s.network.set_dns(dns.to_string()));
|
||||
}
|
||||
Err(e) => {
|
||||
return Err(ttrpc::Error::RpcStatus(ttrpc::get_status(
|
||||
ttrpc::Code::INTERNAL,
|
||||
e.to_string(),
|
||||
)))
|
||||
}
|
||||
Err(e) => return Err(ttrpc_error(ttrpc::Code::INTERNAL, e.to_string())),
|
||||
};
|
||||
|
||||
Ok(Empty::new())
|
||||
@@ -1128,13 +1068,13 @@ impl protocols::agent_ttrpc::AgentService for agentService {
|
||||
req: protocols::agent::AddARPNeighborsRequest,
|
||||
) -> ttrpc::Result<Empty> {
|
||||
if req.neighbors.is_none() {
|
||||
return Err(ttrpc::Error::RpcStatus(ttrpc::get_status(
|
||||
return Err(ttrpc_error(
|
||||
ttrpc::Code::INVALID_ARGUMENT,
|
||||
format!("empty add arp neighbours request"),
|
||||
)));
|
||||
"empty add arp neighbours request".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let neighs = req.neighbors.clone().unwrap().ARPNeighbors.into_vec();
|
||||
let neighs = req.neighbors.unwrap().ARPNeighbors.into_vec();
|
||||
|
||||
let s = Arc::clone(&self.sandbox);
|
||||
let mut sandbox = s.lock().unwrap();
|
||||
@@ -1145,9 +1085,8 @@ impl protocols::agent_ttrpc::AgentService for agentService {
|
||||
|
||||
let rtnl = sandbox.rtnl.as_mut().unwrap();
|
||||
|
||||
rtnl.add_arp_neighbors(neighs.as_ref()).map_err(|e| {
|
||||
ttrpc::Error::RpcStatus(ttrpc::get_status(ttrpc::Code::INTERNAL, e.to_string()))
|
||||
})?;
|
||||
rtnl.add_arp_neighbors(neighs.as_ref())
|
||||
.map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()))?;
|
||||
|
||||
Ok(Empty::new())
|
||||
}
|
||||
@@ -1160,9 +1099,9 @@ impl protocols::agent_ttrpc::AgentService for agentService {
|
||||
let s = Arc::clone(&self.sandbox);
|
||||
let sandbox = s.lock().unwrap();
|
||||
|
||||
sandbox.online_cpu_memory(&req).map_err(|e| {
|
||||
ttrpc::Error::RpcStatus(ttrpc::get_status(ttrpc::Code::INTERNAL, e.to_string()))
|
||||
})?;
|
||||
sandbox
|
||||
.online_cpu_memory(&req)
|
||||
.map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()))?;
|
||||
|
||||
Ok(Empty::new())
|
||||
}
|
||||
@@ -1172,9 +1111,8 @@ impl protocols::agent_ttrpc::AgentService for agentService {
|
||||
_ctx: &ttrpc::TtrpcContext,
|
||||
req: protocols::agent::ReseedRandomDevRequest,
|
||||
) -> ttrpc::Result<Empty> {
|
||||
random::reseed_rng(req.data.as_slice()).map_err(|e| {
|
||||
ttrpc::Error::RpcStatus(ttrpc::get_status(ttrpc::Code::INTERNAL, e.to_string()))
|
||||
})?;
|
||||
random::reseed_rng(req.data.as_slice())
|
||||
.map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()))?;
|
||||
|
||||
Ok(Empty::new())
|
||||
}
|
||||
@@ -1194,10 +1132,7 @@ impl protocols::agent_ttrpc::AgentService for agentService {
|
||||
}
|
||||
Err(e) => {
|
||||
info!(sl!(), "fail to get memory info!");
|
||||
return Err(ttrpc::Error::RpcStatus(ttrpc::get_status(
|
||||
ttrpc::Code::INTERNAL,
|
||||
e.to_string(),
|
||||
)));
|
||||
return Err(ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1213,9 +1148,8 @@ impl protocols::agent_ttrpc::AgentService for agentService {
|
||||
_ctx: &ttrpc::TtrpcContext,
|
||||
req: protocols::agent::MemHotplugByProbeRequest,
|
||||
) -> ttrpc::Result<Empty> {
|
||||
do_mem_hotplug_by_probe(&req.memHotplugProbeAddr).map_err(|e| {
|
||||
ttrpc::Error::RpcStatus(ttrpc::get_status(ttrpc::Code::INTERNAL, e.to_string()))
|
||||
})?;
|
||||
do_mem_hotplug_by_probe(&req.memHotplugProbeAddr)
|
||||
.map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()))?;
|
||||
|
||||
Ok(Empty::new())
|
||||
}
|
||||
@@ -1225,9 +1159,8 @@ impl protocols::agent_ttrpc::AgentService for agentService {
|
||||
_ctx: &ttrpc::TtrpcContext,
|
||||
req: protocols::agent::SetGuestDateTimeRequest,
|
||||
) -> ttrpc::Result<Empty> {
|
||||
do_set_guest_date_time(req.Sec, req.Usec).map_err(|e| {
|
||||
ttrpc::Error::RpcStatus(ttrpc::get_status(ttrpc::Code::INTERNAL, e.to_string()))
|
||||
})?;
|
||||
do_set_guest_date_time(req.Sec, req.Usec)
|
||||
.map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()))?;
|
||||
|
||||
Ok(Empty::new())
|
||||
}
|
||||
@@ -1237,9 +1170,7 @@ impl protocols::agent_ttrpc::AgentService for agentService {
|
||||
_ctx: &ttrpc::TtrpcContext,
|
||||
req: protocols::agent::CopyFileRequest,
|
||||
) -> ttrpc::Result<Empty> {
|
||||
do_copy_file(&req).map_err(|e| {
|
||||
ttrpc::Error::RpcStatus(ttrpc::get_status(ttrpc::Code::INTERNAL, e.to_string()))
|
||||
})?;
|
||||
do_copy_file(&req).map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()))?;
|
||||
|
||||
Ok(Empty::new())
|
||||
}
|
||||
@@ -1250,10 +1181,7 @@ impl protocols::agent_ttrpc::AgentService for agentService {
|
||||
req: protocols::agent::GetMetricsRequest,
|
||||
) -> ttrpc::Result<Metrics> {
|
||||
match get_metrics(&req) {
|
||||
Err(e) => Err(ttrpc::Error::RpcStatus(ttrpc::get_status(
|
||||
ttrpc::Code::INTERNAL,
|
||||
e.to_string(),
|
||||
))),
|
||||
Err(e) => Err(ttrpc_error(ttrpc::Code::INTERNAL, e.to_string())),
|
||||
Ok(s) => {
|
||||
let mut metrics = Metrics::new();
|
||||
metrics.set_metrics(s);
|
||||
@@ -1275,17 +1203,12 @@ impl protocols::agent_ttrpc::AgentService for agentService {
|
||||
drop(sandbox);
|
||||
|
||||
match event_rx.recv() {
|
||||
Err(err) => {
|
||||
return Err(ttrpc::Error::RpcStatus(ttrpc::get_status(
|
||||
ttrpc::Code::INTERNAL,
|
||||
err.to_string(),
|
||||
)))
|
||||
}
|
||||
Err(err) => Err(ttrpc_error(ttrpc::Code::INTERNAL, err.to_string())),
|
||||
Ok(container_id) => {
|
||||
info!(sl!(), "get_oom_event return {}", &container_id);
|
||||
let mut resp = OOMEvent::new();
|
||||
resp.container_id = container_id;
|
||||
return Ok(resp);
|
||||
Ok(resp)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1325,7 +1248,7 @@ fn get_memory_info(block_size: bool, hotplug: bool) -> Result<(u64, bool)> {
|
||||
if block_size {
|
||||
match fs::read_to_string(SYSFS_MEMORY_BLOCK_SIZE_PATH) {
|
||||
Ok(v) => {
|
||||
if v.len() == 0 {
|
||||
if v.is_empty() {
|
||||
info!(sl!(), "string in empty???");
|
||||
return Err(anyhow!("Invalid block size"));
|
||||
}
|
||||
@@ -1404,7 +1327,7 @@ fn read_stream(fd: RawFd, l: usize) -> Result<Vec<u8>> {
|
||||
}
|
||||
Err(e) => match e {
|
||||
nix::Error::Sys(errno) => match errno {
|
||||
Errno::EAGAIN => v.resize(0, 0),
|
||||
Errno::EAGAIN => v.clear(),
|
||||
_ => return Err(anyhow!(nix::Error::Sys(errno))),
|
||||
},
|
||||
_ => return Err(anyhow!("read error")),
|
||||
@@ -1422,13 +1345,13 @@ fn find_process<'a>(
|
||||
) -> Result<&'a mut Process> {
|
||||
let ctr = sandbox
|
||||
.get_container(cid)
|
||||
.ok_or(anyhow!("Invalid container id"))?;
|
||||
.ok_or_else(|| anyhow!("Invalid container id"))?;
|
||||
|
||||
if init || eid == "" {
|
||||
if init || eid.is_empty() {
|
||||
return ctr
|
||||
.processes
|
||||
.get_mut(&ctr.init_process_pid)
|
||||
.ok_or(anyhow!("cannot find init process!"));
|
||||
.ok_or_else(|| anyhow!("cannot find init process!"));
|
||||
}
|
||||
|
||||
ctr.get_process(eid).map_err(|_| anyhow!("Invalid exec id"))
|
||||
@@ -1478,7 +1401,7 @@ fn update_container_namespaces(
|
||||
let linux = spec
|
||||
.linux
|
||||
.as_mut()
|
||||
.ok_or(anyhow!("Spec didn't container linux field"))?;
|
||||
.ok_or_else(|| anyhow!("Spec didn't container linux field"))?;
|
||||
|
||||
let namespaces = linux.namespaces.as_mut_slice();
|
||||
for namespace in namespaces.iter_mut() {
|
||||
@@ -1492,8 +1415,10 @@ fn update_container_namespaces(
|
||||
}
|
||||
}
|
||||
// update pid namespace
|
||||
let mut pid_ns = LinuxNamespace::default();
|
||||
pid_ns.r#type = NSTYPEPID.to_string();
|
||||
let mut pid_ns = LinuxNamespace {
|
||||
r#type: NSTYPEPID.to_string(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// Use shared pid ns if useSandboxPidns has been set in either
|
||||
// the create_sandbox request or create_container request.
|
||||
@@ -1546,7 +1471,7 @@ fn is_signal_handled(pid: pid_t, signum: u32) -> bool {
|
||||
}
|
||||
};
|
||||
if line.starts_with("SigCgt:") {
|
||||
let mask_vec: Vec<&str> = line.split(":").collect();
|
||||
let mask_vec: Vec<&str> = line.split(':').collect();
|
||||
if mask_vec.len() != 2 {
|
||||
warn!(sl!(), "parse the SigCgt field failed\n");
|
||||
return false;
|
||||
@@ -1566,7 +1491,7 @@ fn is_signal_handled(pid: pid_t, signum: u32) -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
fn do_mem_hotplug_by_probe(addrs: &Vec<u64>) -> Result<()> {
|
||||
fn do_mem_hotplug_by_probe(addrs: &[u64]) -> Result<()> {
|
||||
for addr in addrs.iter() {
|
||||
fs::write(SYSFS_MEMORY_HOTPLUG_PROBE_PATH, format!("{:#X}", *addr))?;
|
||||
}
|
||||
@@ -1579,8 +1504,12 @@ fn do_set_guest_date_time(sec: i64, usec: i64) -> Result<()> {
|
||||
tv_usec: usec,
|
||||
};
|
||||
|
||||
let ret =
|
||||
unsafe { libc::settimeofday(&tv as *const libc::timeval, 0 as *const libc::timezone) };
|
||||
let ret = unsafe {
|
||||
libc::settimeofday(
|
||||
&tv as *const libc::timeval,
|
||||
std::ptr::null::<libc::timezone>(),
|
||||
)
|
||||
};
|
||||
|
||||
Errno::result(ret).map(drop)?;
|
||||
|
||||
@@ -1596,8 +1525,8 @@ fn do_copy_file(req: &CopyFileRequest) -> Result<()> {
|
||||
|
||||
let parent = path.parent();
|
||||
|
||||
let dir = if parent.is_some() {
|
||||
parent.unwrap().to_path_buf()
|
||||
let dir = if let Some(parent) = parent {
|
||||
parent.to_path_buf()
|
||||
} else {
|
||||
PathBuf::from("/")
|
||||
};
|
||||
@@ -1657,8 +1586,8 @@ fn setup_bundle(cid: &str, spec: &mut Spec) -> Result<PathBuf> {
|
||||
let spec_root = spec.root.as_ref().unwrap();
|
||||
|
||||
let bundle_path = Path::new(CONTAINER_BASE).join(cid);
|
||||
let config_path = bundle_path.clone().join("config.json");
|
||||
let rootfs_path = bundle_path.clone().join("rootfs");
|
||||
let config_path = bundle_path.join("config.json");
|
||||
let rootfs_path = bundle_path.join("rootfs");
|
||||
|
||||
fs::create_dir_all(&rootfs_path)?;
|
||||
BareMount::new(
|
||||
@@ -1689,7 +1618,7 @@ fn setup_bundle(cid: &str, spec: &mut Spec) -> Result<PathBuf> {
|
||||
}
|
||||
|
||||
fn load_kernel_module(module: &protocols::agent::KernelModule) -> Result<()> {
|
||||
if module.name == "" {
|
||||
if module.name.is_empty() {
|
||||
return Err(anyhow!("Kernel module name is empty"));
|
||||
}
|
||||
|
||||
@@ -1722,9 +1651,9 @@ fn load_kernel_module(module: &protocols::agent::KernelModule) -> Result<()> {
|
||||
"load_kernel_module return code: {} stdout:{} stderr:{}",
|
||||
code, std_out, std_err
|
||||
);
|
||||
return Err(anyhow!(msg));
|
||||
Err(anyhow!(msg))
|
||||
}
|
||||
None => return Err(anyhow!("Process terminated by signal")),
|
||||
None => Err(anyhow!("Process terminated by signal")),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1736,17 +1665,16 @@ mod tests {
|
||||
use std::sync::mpsc::{Receiver, Sender};
|
||||
use ttrpc::{MessageHeader, TtrpcContext};
|
||||
|
||||
fn mk_ttrpc_context() -> (TtrpcContext, Receiver<(MessageHeader, Vec<u8>)>) {
|
||||
type Message = (MessageHeader, Vec<u8>);
|
||||
|
||||
fn mk_ttrpc_context() -> (TtrpcContext, Receiver<Message>) {
|
||||
let mh = MessageHeader::default();
|
||||
|
||||
let (tx, rx): (
|
||||
Sender<(MessageHeader, Vec<u8>)>,
|
||||
Receiver<(MessageHeader, Vec<u8>)>,
|
||||
) = channel();
|
||||
let (tx, rx): (Sender<Message>, Receiver<Message>) = channel();
|
||||
|
||||
let ctx = TtrpcContext {
|
||||
fd: -1,
|
||||
mh: mh,
|
||||
mh,
|
||||
res_tx: tx,
|
||||
};
|
||||
|
||||
@@ -1755,10 +1683,12 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_load_kernel_module() {
|
||||
let mut m = protocols::agent::KernelModule::default();
|
||||
let mut m = protocols::agent::KernelModule {
|
||||
name: "module_not_exists".to_string(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// case 1: module not exists
|
||||
m.name = "module_not_exists".to_string();
|
||||
let result = load_kernel_module(&m);
|
||||
assert!(result.is_err(), "load module should failed");
|
||||
|
||||
|
||||
@@ -74,7 +74,7 @@ impl Sandbox {
|
||||
sender: None,
|
||||
rtnl: Some(RtnlHandle::new(NETLINK_ROUTE, 0).unwrap()),
|
||||
hooks: None,
|
||||
event_rx: event_rx,
|
||||
event_rx,
|
||||
event_tx: tx,
|
||||
})
|
||||
}
|
||||
@@ -111,14 +111,14 @@ impl Sandbox {
|
||||
// acquiring a lock on sandbox.
|
||||
pub fn unset_sandbox_storage(&mut self, path: &str) -> Result<bool> {
|
||||
match self.storages.get_mut(path) {
|
||||
None => return Err(anyhow!("Sandbox storage with path {} not found", path)),
|
||||
None => Err(anyhow!("Sandbox storage with path {} not found", path)),
|
||||
Some(count) => {
|
||||
*count -= 1;
|
||||
if *count < 1 {
|
||||
self.storages.remove(path);
|
||||
return Ok(true);
|
||||
}
|
||||
return Ok(false);
|
||||
Ok(false)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -160,13 +160,13 @@ impl Sandbox {
|
||||
pub fn setup_shared_namespaces(&mut self) -> Result<bool> {
|
||||
// Set up shared IPC namespace
|
||||
self.shared_ipcns = Namespace::new(&self.logger)
|
||||
.as_ipc()
|
||||
.get_ipc()
|
||||
.setup()
|
||||
.context("Failed to setup persistent IPC namespace")?;
|
||||
|
||||
// // Set up shared UTS namespace
|
||||
self.shared_utsns = Namespace::new(&self.logger)
|
||||
.as_uts(self.hostname.as_str())
|
||||
.get_uts(self.hostname.as_str())
|
||||
.setup()
|
||||
.context("Failed to setup persistent UTS namespace")?;
|
||||
|
||||
@@ -183,7 +183,7 @@ impl Sandbox {
|
||||
// This means a separate pause process has not been created. We treat the
|
||||
// first container created as the infra container in that case
|
||||
// and use its pid namespace in case pid namespace needs to be shared.
|
||||
if self.sandbox_pidns.is_none() && self.containers.len() == 0 {
|
||||
if self.sandbox_pidns.is_none() && self.containers.is_empty() {
|
||||
let init_pid = c.init_process_pid;
|
||||
if init_pid == -1 {
|
||||
return Err(anyhow!(
|
||||
@@ -191,7 +191,7 @@ impl Sandbox {
|
||||
));
|
||||
}
|
||||
|
||||
let mut pid_ns = Namespace::new(&self.logger).as_pid();
|
||||
let mut pid_ns = Namespace::new(&self.logger).get_pid();
|
||||
pid_ns.path = format!("/proc/{}/ns/pid", init_pid);
|
||||
|
||||
self.sandbox_pidns = Some(pid_ns);
|
||||
@@ -215,7 +215,7 @@ impl Sandbox {
|
||||
}
|
||||
|
||||
pub fn destroy(&mut self) -> Result<()> {
|
||||
for (_, ctr) in &mut self.containers {
|
||||
for ctr in self.containers.values_mut() {
|
||||
ctr.destroy()?;
|
||||
}
|
||||
Ok(())
|
||||
@@ -236,14 +236,29 @@ impl Sandbox {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let cpuset = rustjail_cgroups::fs::get_guest_cpuset()?;
|
||||
let guest_cpuset = rustjail_cgroups::fs::get_guest_cpuset()?;
|
||||
|
||||
for (_, ctr) in self.containers.iter() {
|
||||
let cpu = ctr
|
||||
.config
|
||||
.spec
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.linux
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.resources
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.cpu
|
||||
.as_ref();
|
||||
let container_cpust = if let Some(c) = cpu { &c.cpus } else { "" };
|
||||
|
||||
info!(self.logger, "updating {}", ctr.id.as_str());
|
||||
ctr.cgroup_manager
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.update_cpuset_path(cpuset.as_str())?;
|
||||
.update_cpuset_path(guest_cpuset.as_str(), &container_cpust)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -335,7 +350,7 @@ fn online_resources(logger: &Logger, path: &str, pattern: &str, num: i32) -> Res
|
||||
}
|
||||
let c = c.unwrap();
|
||||
|
||||
if c.trim().contains("0") {
|
||||
if c.trim().contains('0') {
|
||||
let r = fs::write(file.as_str(), "1");
|
||||
if r.is_err() {
|
||||
continue;
|
||||
@@ -609,13 +624,16 @@ mod tests {
|
||||
}
|
||||
|
||||
fn create_dummy_opts() -> CreateOpts {
|
||||
let mut root = Root::default();
|
||||
root.path = String::from("/");
|
||||
let root = Root {
|
||||
path: String::from("/"),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let linux = Linux::default();
|
||||
let mut spec = Spec::default();
|
||||
spec.root = Some(root).into();
|
||||
spec.linux = Some(linux).into();
|
||||
let spec = Spec {
|
||||
linux: Some(Linux::default()),
|
||||
root: Some(root),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
CreateOpts {
|
||||
cgroup_name: "".to_string(),
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
#![allow(clippy::module_inception)]
|
||||
|
||||
#[cfg(test)]
|
||||
mod test_utils {
|
||||
|
||||
@@ -51,7 +51,7 @@ impl Uevent {
|
||||
self.action == U_EVENT_ACTION_ADD
|
||||
&& self.subsystem == "block"
|
||||
&& self.devpath.starts_with(PCI_ROOT_BUS_PATH)
|
||||
&& self.devname != ""
|
||||
&& !self.devname.is_empty()
|
||||
}
|
||||
|
||||
fn handle_block_add_event(&self, sandbox: &Arc<Mutex<Sandbox>>) {
|
||||
|
||||
2
src/runtime/.gitignore
vendored
2
src/runtime/.gitignore
vendored
@@ -8,9 +8,7 @@ coverage.html
|
||||
/cli/config/configuration-acrn.toml
|
||||
/cli/config/configuration-clh.toml
|
||||
/cli/config/configuration-fc.toml
|
||||
/cli/config/configuration-nemu.toml
|
||||
/cli/config/configuration-qemu.toml
|
||||
/cli/config/configuration-qemu-virtiofs.toml
|
||||
/cli/config/configuration-clh.toml
|
||||
/cli/config-generated.go
|
||||
/cli/containerd-shim-kata-v2/config-generated.go
|
||||
|
||||
@@ -93,6 +93,9 @@ DEFAULTSDIR := $(SHAREDIR)/defaults
|
||||
COLLECT_SCRIPT = data/kata-collect-data.sh
|
||||
COLLECT_SCRIPT_SRC = $(COLLECT_SCRIPT).in
|
||||
|
||||
# @RUNTIME_NAME@ should be replaced with the target in generated files
|
||||
RUNTIME_NAME = $(TARGET)
|
||||
|
||||
GENERATED_FILES += $(COLLECT_SCRIPT)
|
||||
GENERATED_VARS = \
|
||||
VERSION \
|
||||
@@ -282,7 +285,7 @@ ifneq (,$(CLHCMD))
|
||||
DEFENABLEHUGEPAGES_CLH := true
|
||||
DEFNETWORKMODEL_CLH := tcfilter
|
||||
KERNELTYPE_CLH = uncompressed
|
||||
KERNEL_NAME_CLH = $(call MAKE_KERNEL_VIRTIOFS_NAME,$(KERNELTYPE_CLH))
|
||||
KERNEL_NAME_CLH = $(call MAKE_KERNEL_NAME,$(KERNELTYPE_CLH))
|
||||
KERNELPATH_CLH = $(KERNELDIR)/$(KERNEL_NAME_CLH)
|
||||
endif
|
||||
|
||||
@@ -600,8 +603,9 @@ $(SHIMV2_OUTPUT): $(SOURCES) $(GENERATED_FILES) $(MAKEFILE_LIST)
|
||||
$(QUIET_BUILD)(cd $(SHIMV2_DIR)/ && ln -fs $(GENERATED_CONFIG))
|
||||
$(QUIET_BUILD)(cd $(SHIMV2_DIR)/ && go build $(KATA_LDFLAGS) $(BUILDFLAGS) -o $@ .)
|
||||
|
||||
$(MONITOR_OUTPUT): $(SOURCES) $(GENERATED_FILES) $(MAKEFILE_LIST)
|
||||
$(QUIET_BUILD)(cd $(MONITOR_DIR)/ && go build $(KATA_LDFLAGS) $(BUILDFLAGS) -o $@ .)
|
||||
$(MONITOR_OUTPUT): $(SOURCES) $(GENERATED_FILES) $(MAKEFILE_LIST) .git-commit
|
||||
$(QUIET_BUILD)(cd $(MONITOR_DIR)/ && CGO_ENABLED=0 go build \
|
||||
--ldflags "-X main.GitCommit=$(shell cat .git-commit)" $(BUILDFLAGS) -buildmode=exe -o $@ .)
|
||||
|
||||
.PHONY: \
|
||||
check \
|
||||
@@ -634,7 +638,6 @@ go-test: $(GENERATED_FILES)
|
||||
go test -v -mod=vendor ./...
|
||||
|
||||
check-go-static:
|
||||
$(QUIET_CHECK)../../ci/static-checks.sh
|
||||
$(QUIET_CHECK)../../ci/go-no-os-exit.sh ./cli
|
||||
$(QUIET_CHECK)../../ci/go-no-os-exit.sh ./virtcontainers
|
||||
|
||||
|
||||
@@ -12,9 +12,6 @@ CPUFEATURES := pmu=off
|
||||
|
||||
QEMUCMD := qemu-system-x86_64
|
||||
|
||||
# Qemu experimental with virtiofs
|
||||
QEMUVIRTIOFSCMD := qemu-virtiofs-system-x86_64
|
||||
|
||||
# Firecracker binary name
|
||||
FCCMD := firecracker
|
||||
# Firecracker's jailer binary name
|
||||
|
||||
@@ -113,7 +113,7 @@ block_device_driver = "@DEFBLOCKSTORAGEDRIVER_ACRN@"
|
||||
# lexicographical order, to the lifecycle of the guest container.
|
||||
# Hooks are executed in the runtime namespace of the guest. See the official documentation:
|
||||
# https://github.com/opencontainers/runtime-spec/blob/v1.0.1/config.md#posix-platform-hooks
|
||||
# Warnings will be logged if any error is encountered will scanning for hooks,
|
||||
# Warnings will be logged if any error is encountered while scanning for hooks,
|
||||
# but it will not abort container execution.
|
||||
#guest_hook_path = "/usr/share/oci/hooks"
|
||||
|
||||
|
||||
@@ -115,6 +115,23 @@ block_device_driver = "virtio-blk"
|
||||
# Default false
|
||||
#enable_debug = true
|
||||
|
||||
# Path to OCI hook binaries in the *guest rootfs*.
|
||||
# This does not affect host-side hooks which must instead be added to
|
||||
# the OCI spec passed to the runtime.
|
||||
#
|
||||
# You can create a rootfs with hooks by customizing the osbuilder scripts:
|
||||
# https://github.com/kata-containers/osbuilder
|
||||
#
|
||||
# Hooks must be stored in a subdirectory of guest_hook_path according to their
|
||||
# hook type, i.e. "guest_hook_path/{prestart,postart,poststop}".
|
||||
# The agent will scan these directories for executable files and add them, in
|
||||
# lexicographical order, to the lifecycle of the guest container.
|
||||
# Hooks are executed in the runtime namespace of the guest. See the official documentation:
|
||||
# https://github.com/opencontainers/runtime-spec/blob/v1.0.1/config.md#posix-platform-hooks
|
||||
# Warnings will be logged if any error is encountered while scanning for hooks,
|
||||
# but it will not abort container execution.
|
||||
#guest_hook_path = "/usr/share/oci/hooks"
|
||||
#
|
||||
[agent.@PROJECT_TYPE@]
|
||||
# If enabled, make the agent display debug-level messages.
|
||||
# (default: disabled)
|
||||
|
||||
@@ -305,7 +305,7 @@ valid_file_mem_backends = @DEFVALIDFILEMEMBACKENDS@
|
||||
# lexicographical order, to the lifecycle of the guest container.
|
||||
# Hooks are executed in the runtime namespace of the guest. See the official documentation:
|
||||
# https://github.com/opencontainers/runtime-spec/blob/v1.0.1/config.md#posix-platform-hooks
|
||||
# Warnings will be logged if any error is encountered will scanning for hooks,
|
||||
# Warnings will be logged if any error is encountered while scanning for hooks,
|
||||
# but it will not abort container execution.
|
||||
#guest_hook_path = "/usr/share/oci/hooks"
|
||||
#
|
||||
|
||||
@@ -63,7 +63,6 @@ const (
|
||||
moduleParamDir = "parameters"
|
||||
successMessageCapable = "System is capable of running " + project
|
||||
successMessageCreate = "System can currently create " + project
|
||||
successMessageVersion = "Version consistency of " + project + " is verified"
|
||||
failMessage = "System is not capable of running " + project
|
||||
kernelPropertyCorrect = "Kernel property value correct"
|
||||
|
||||
@@ -389,7 +388,7 @@ EXAMPLES:
|
||||
span, _ := katautils.Trace(ctx, "kata-check")
|
||||
defer span.Finish()
|
||||
|
||||
if context.Bool("no-network-checks") == false && os.Getenv(noNetworkEnvVar) == "" {
|
||||
if !context.Bool("no-network-checks") && os.Getenv(noNetworkEnvVar) == "" {
|
||||
cmd := RelCmdCheck
|
||||
|
||||
if context.Bool("only-list-releases") {
|
||||
|
||||
@@ -62,9 +62,6 @@ var originalLoggerLevel = logrus.WarnLevel
|
||||
|
||||
var debug = false
|
||||
|
||||
// if true, coredump when an internal error occurs or a fatal signal is received
|
||||
var crashOnError = false
|
||||
|
||||
// concrete virtcontainer implementation
|
||||
var virtcontainersImpl = &vc.VCImpl{}
|
||||
|
||||
@@ -325,7 +322,6 @@ func beforeSubcommands(c *cli.Context) error {
|
||||
}
|
||||
if !subCmdIsCheckCmd {
|
||||
debug = runtimeConfig.Debug
|
||||
crashOnError = runtimeConfig.Debug
|
||||
|
||||
if traceRootSpan != "" {
|
||||
// Create the tracer.
|
||||
|
||||
@@ -8,7 +8,6 @@ package main
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
@@ -28,7 +27,6 @@ import (
|
||||
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/compatoci"
|
||||
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/oci"
|
||||
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/vcmock"
|
||||
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/types"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/stretchr/testify/assert"
|
||||
jaeger "github.com/uber/jaeger-client-go"
|
||||
@@ -43,10 +41,8 @@ const (
|
||||
// small docker image used to create root filesystems from
|
||||
testDockerImage = "busybox"
|
||||
|
||||
testSandboxID = "99999999-9999-9999-99999999999999999"
|
||||
testContainerID = "1"
|
||||
testBundle = "bundle"
|
||||
testConsole = "/dev/pts/999"
|
||||
testBundle = "bundle"
|
||||
testConsole = "/dev/pts/999"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -386,44 +382,6 @@ func makeOCIBundle(bundleDir string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeOCIConfigFile(spec specs.Spec, configPath string) error {
|
||||
if configPath == "" {
|
||||
return errors.New("BUG: need config file path")
|
||||
}
|
||||
|
||||
bytes, err := json.MarshalIndent(spec, "", "\t")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return ioutil.WriteFile(configPath, bytes, testFileMode)
|
||||
}
|
||||
|
||||
func newSingleContainerStatus(containerID string, containerState types.ContainerState, annotations map[string]string, spec *specs.Spec) vc.ContainerStatus {
|
||||
return vc.ContainerStatus{
|
||||
ID: containerID,
|
||||
State: containerState,
|
||||
Annotations: annotations,
|
||||
Spec: spec,
|
||||
}
|
||||
}
|
||||
|
||||
func execCLICommandFunc(assertHandler *assert.Assertions, cliCommand cli.Command, set *flag.FlagSet, expectedErr bool) {
|
||||
ctx := createCLIContext(set)
|
||||
ctx.App.Name = "foo"
|
||||
|
||||
fn, ok := cliCommand.Action.(func(context *cli.Context) error)
|
||||
assertHandler.True(ok)
|
||||
|
||||
err := fn(ctx)
|
||||
|
||||
if expectedErr {
|
||||
assertHandler.Error(err)
|
||||
} else {
|
||||
assertHandler.Nil(err)
|
||||
}
|
||||
}
|
||||
|
||||
func createCLIContextWithApp(flagSet *flag.FlagSet, app *cli.App) *cli.Context {
|
||||
ctx := cli.NewContext(app, flagSet, nil)
|
||||
|
||||
|
||||
@@ -189,21 +189,3 @@ func constructVersionInfo(version string) VersionInfo {
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func versionEqual(a VersionInfo, b VersionInfo) bool {
|
||||
av, err := semver.Make(a.Semver)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
bv, err := semver.Make(b.Semver)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if av.Major == bv.Major && av.Minor == bv.Minor && av.Patch == bv.Patch {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -1,10 +0,0 @@
|
||||
// +build !s390x
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
package main
|
||||
|
||||
func archConvertStatFs(cgroupFsType int) int64 {
|
||||
return int64(cgroupFsType)
|
||||
}
|
||||
@@ -1,10 +0,0 @@
|
||||
// Copyright (c) 2018 IBM
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
package main
|
||||
|
||||
func archConvertStatFs(cgroupFsType int) uint32 {
|
||||
return uint32(cgroupFsType)
|
||||
}
|
||||
@@ -87,6 +87,12 @@ func create(ctx context.Context, s *service, r *taskAPI.CreateTaskRequest) (*con
|
||||
return nil, err
|
||||
}
|
||||
s.sandbox = sandbox
|
||||
pid, err := s.sandbox.GetHypervisorPid()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.hpid = uint32(pid)
|
||||
|
||||
go s.startManagementServer(ctx, ociSpec)
|
||||
|
||||
case vc.PodContainer:
|
||||
|
||||
@@ -29,7 +29,9 @@ func deleteContainer(ctx context.Context, s *service, c *container) error {
|
||||
|
||||
// Run post-stop OCI hooks.
|
||||
if err := katautils.PostStopHooks(ctx, *c.spec, s.sandbox.ID(), c.bundle); err != nil {
|
||||
return err
|
||||
// log warning and continue, as defined in oci runtime spec
|
||||
// https://github.com/opencontainers/runtime-spec/blob/master/runtime.md#lifecycle
|
||||
shimLog.WithError(err).Warn("Failed to run post-stop hooks")
|
||||
}
|
||||
|
||||
if c.mounted {
|
||||
|
||||
@@ -59,7 +59,10 @@ var (
|
||||
var vci vc.VC = &vc.VCImpl{}
|
||||
|
||||
// shimLog is logger for shim package
|
||||
var shimLog = logrus.WithField("source", "containerd-kata-shim-v2")
|
||||
var shimLog = logrus.WithFields(logrus.Fields{
|
||||
"source": "containerd-kata-shim-v2",
|
||||
"name": "containerd-shim-v2",
|
||||
})
|
||||
|
||||
// New returns a new shim service that can be used via GRPC
|
||||
func New(ctx context.Context, id string, publisher events.Publisher) (cdshim.Shim, error) {
|
||||
@@ -110,9 +113,12 @@ type service struct {
|
||||
mu sync.Mutex
|
||||
eventSendMu sync.Mutex
|
||||
|
||||
// pid Since this shimv2 cannot get the container processes pid from VM,
|
||||
// thus for the returned values needed pid, just return this shim's
|
||||
// hypervisor pid, Since this shimv2 cannot get the container processes pid from VM,
|
||||
// thus for the returned values needed pid, just return the hypervisor's
|
||||
// pid directly.
|
||||
hpid uint32
|
||||
|
||||
// shim's pid
|
||||
pid uint32
|
||||
|
||||
ctx context.Context
|
||||
@@ -367,11 +373,11 @@ func (s *service) Create(ctx context.Context, r *taskAPI.CreateTaskRequest) (_ *
|
||||
Terminal: r.Terminal,
|
||||
},
|
||||
Checkpoint: r.Checkpoint,
|
||||
Pid: s.pid,
|
||||
Pid: s.hpid,
|
||||
})
|
||||
|
||||
return &taskAPI.CreateTaskResponse{
|
||||
Pid: s.pid,
|
||||
Pid: s.hpid,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -403,7 +409,7 @@ func (s *service) Start(ctx context.Context, r *taskAPI.StartRequest) (_ *taskAP
|
||||
}
|
||||
s.send(&eventstypes.TaskStart{
|
||||
ContainerID: c.id,
|
||||
Pid: s.pid,
|
||||
Pid: s.hpid,
|
||||
})
|
||||
} else {
|
||||
//start an exec
|
||||
@@ -414,12 +420,12 @@ func (s *service) Start(ctx context.Context, r *taskAPI.StartRequest) (_ *taskAP
|
||||
s.send(&eventstypes.TaskExecStarted{
|
||||
ContainerID: c.id,
|
||||
ExecID: r.ExecID,
|
||||
Pid: s.pid,
|
||||
Pid: s.hpid,
|
||||
})
|
||||
}
|
||||
|
||||
return &taskAPI.StartResponse{
|
||||
Pid: s.pid,
|
||||
Pid: s.hpid,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -446,7 +452,7 @@ func (s *service) Delete(ctx context.Context, r *taskAPI.DeleteRequest) (_ *task
|
||||
|
||||
s.send(&eventstypes.TaskDelete{
|
||||
ContainerID: c.id,
|
||||
Pid: s.pid,
|
||||
Pid: s.hpid,
|
||||
ExitStatus: c.exit,
|
||||
ExitedAt: c.exitTime,
|
||||
})
|
||||
@@ -454,7 +460,7 @@ func (s *service) Delete(ctx context.Context, r *taskAPI.DeleteRequest) (_ *task
|
||||
return &taskAPI.DeleteResponse{
|
||||
ExitStatus: c.exit,
|
||||
ExitedAt: c.exitTime,
|
||||
Pid: s.pid,
|
||||
Pid: s.hpid,
|
||||
}, nil
|
||||
}
|
||||
//deal with the exec case
|
||||
@@ -468,7 +474,7 @@ func (s *service) Delete(ctx context.Context, r *taskAPI.DeleteRequest) (_ *task
|
||||
return &taskAPI.DeleteResponse{
|
||||
ExitStatus: uint32(execs.exitCode),
|
||||
ExitedAt: execs.exitTime,
|
||||
Pid: s.pid,
|
||||
Pid: s.hpid,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -563,7 +569,7 @@ func (s *service) State(ctx context.Context, r *taskAPI.StateRequest) (_ *taskAP
|
||||
return &taskAPI.StateResponse{
|
||||
ID: c.id,
|
||||
Bundle: c.bundle,
|
||||
Pid: s.pid,
|
||||
Pid: s.hpid,
|
||||
Status: c.status,
|
||||
Stdin: c.stdin,
|
||||
Stdout: c.stdout,
|
||||
@@ -582,7 +588,7 @@ func (s *service) State(ctx context.Context, r *taskAPI.StateRequest) (_ *taskAP
|
||||
return &taskAPI.StateResponse{
|
||||
ID: execs.id,
|
||||
Bundle: c.bundle,
|
||||
Pid: s.pid,
|
||||
Pid: s.hpid,
|
||||
Status: execs.status,
|
||||
Stdin: execs.tty.stdin,
|
||||
Stdout: execs.tty.stdout,
|
||||
@@ -732,7 +738,7 @@ func (s *service) Pids(ctx context.Context, r *taskAPI.PidsRequest) (_ *taskAPI.
|
||||
}()
|
||||
|
||||
pInfo := task.ProcessInfo{
|
||||
Pid: s.pid,
|
||||
Pid: s.hpid,
|
||||
}
|
||||
processes = append(processes, &pInfo)
|
||||
|
||||
@@ -804,7 +810,7 @@ func (s *service) Connect(ctx context.Context, r *taskAPI.ConnectRequest) (_ *ta
|
||||
return &taskAPI.ConnectResponse{
|
||||
ShimPid: s.pid,
|
||||
//Since kata cannot get the container's pid in VM, thus only return the shim's pid.
|
||||
TaskPid: s.pid,
|
||||
TaskPid: s.hpid,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -65,8 +65,7 @@ func (s *service) serveMetrics(w http.ResponseWriter, r *http.Request) {
|
||||
// encode the metrics
|
||||
encoder := expfmt.NewEncoder(w, expfmt.FmtText)
|
||||
for _, mf := range mfs {
|
||||
if err := encoder.Encode(mf); err != nil {
|
||||
}
|
||||
encoder.Encode(mf)
|
||||
}
|
||||
|
||||
// if using an old agent, only collect shim/sandbox metrics.
|
||||
@@ -149,7 +148,7 @@ func (s *service) startManagementServer(ctx context.Context, ociSpec *specs.Spec
|
||||
|
||||
shimMgtLog.Info("kata management inited")
|
||||
|
||||
// bind hanlder
|
||||
// bind handler
|
||||
m := http.NewServeMux()
|
||||
m.Handle("/metrics", http.HandlerFunc(s.serveMetrics))
|
||||
m.Handle("/agent-url", http.HandlerFunc(s.agentURL))
|
||||
|
||||
@@ -176,7 +176,7 @@ func calcOverhead(initialSandboxStats, finishSandboxStats vc.SandboxStats, initi
|
||||
cpuUsageGuest := float64(guestFinalCPU-guestInitCPU) / deltaTime * 100
|
||||
cpuUsageHost := float64(hostFinalCPU-hostInitCPU) / deltaTime * 100
|
||||
|
||||
return float64(hostMemoryUsage - guestMemoryUsage), float64(cpuUsageHost - cpuUsageGuest)
|
||||
return float64(hostMemoryUsage - guestMemoryUsage), cpuUsageHost - cpuUsageGuest
|
||||
}
|
||||
|
||||
func (s *service) getPodOverhead() (float64, float64, error) {
|
||||
|
||||
@@ -96,7 +96,7 @@ func TestStatsSandbox(t *testing.T) {
|
||||
sandbox.StatsFunc = getSandboxCPUFunc(2000, 110000)
|
||||
sandbox.StatsContainerFunc = getStatsContainerCPUFunc(200, 400, 20000, 40000)
|
||||
|
||||
finishSandboxStats, finishContainersStats, err := s.statsSandbox()
|
||||
finishSandboxStats, finishContainersStats, _ := s.statsSandbox()
|
||||
|
||||
// calc overhead
|
||||
mem, cpu := calcOverhead(initialSandboxStats, finishSandboxStats, initialContainerStats, finishContainersStats, 1e9)
|
||||
|
||||
@@ -52,7 +52,9 @@ func startContainer(ctx context.Context, s *service, c *container) error {
|
||||
return katautils.PostStartHooks(ctx, *c.spec, s.sandbox.ID(), c.bundle)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
// log warning and continue, as defined in oci runtime spec
|
||||
// https://github.com/opencontainers/runtime-spec/blob/master/runtime.md#lifecycle
|
||||
shimLog.WithError(err).Warn("Failed to run post-start hooks")
|
||||
}
|
||||
|
||||
c.status = task.StatusRunning
|
||||
|
||||
@@ -24,7 +24,7 @@ import (
|
||||
func cReap(s *service, status int, id, execid string, exitat time.Time) {
|
||||
s.ec <- exit{
|
||||
timestamp: exitat,
|
||||
pid: s.pid,
|
||||
pid: s.hpid,
|
||||
status: status,
|
||||
id: id,
|
||||
execid: execid,
|
||||
|
||||
@@ -107,14 +107,14 @@ func TestEncodeMetricFamily(t *testing.T) {
|
||||
scrapeCount.Inc()
|
||||
scrapeCount.Inc()
|
||||
|
||||
mfs, err := prometheus.DefaultGatherer.Gather()
|
||||
mfs, _ := prometheus.DefaultGatherer.Gather()
|
||||
|
||||
// create encoder
|
||||
buf := bytes.NewBufferString("")
|
||||
encoder := expfmt.NewEncoder(buf, expfmt.FmtText)
|
||||
|
||||
// encode metrics to text format
|
||||
err = encodeMetricFamily(mfs, encoder)
|
||||
err := encodeMetricFamily(mfs, encoder)
|
||||
assert.Nil(err, "encodeMetricFamily should not return error")
|
||||
|
||||
// here will be to many metrics,
|
||||
|
||||
@@ -38,7 +38,7 @@ type KataMonitor struct {
|
||||
// NewKataMonitor create and return a new KataMonitor instance
|
||||
func NewKataMonitor(containerdAddr, containerdConfigFile string) (*KataMonitor, error) {
|
||||
if containerdAddr == "" {
|
||||
return nil, fmt.Errorf("Containerd serve address missing.")
|
||||
return nil, fmt.Errorf("containerd serve address missing")
|
||||
}
|
||||
|
||||
containerdConf := &srvconfig.Config{
|
||||
@@ -82,7 +82,7 @@ func (km *KataMonitor) initSandboxCache() error {
|
||||
|
||||
// GetAgentURL returns agent URL
|
||||
func (km *KataMonitor) GetAgentURL(w http.ResponseWriter, r *http.Request) {
|
||||
sandboxID, err := getSandboxIdFromReq(r)
|
||||
sandboxID, err := getSandboxIDFromReq(r)
|
||||
if err != nil {
|
||||
commonServeError(w, http.StatusBadRequest, err)
|
||||
return
|
||||
|
||||
@@ -21,7 +21,7 @@ func serveError(w http.ResponseWriter, status int, txt string) {
|
||||
}
|
||||
|
||||
func (km *KataMonitor) composeSocketAddress(r *http.Request) (string, error) {
|
||||
sandbox, err := getSandboxIdFromReq(r)
|
||||
sandbox, err := getSandboxIDFromReq(r)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
@@ -157,7 +157,7 @@ func (sc *sandboxCache) startEventsListener(addr string) error {
|
||||
// if the container is a sandbox container,
|
||||
// means the VM is started, and can start to collect metrics from the VM.
|
||||
if isSandboxContainer(&c) {
|
||||
// we can simply put the contaienrid in sandboxes list if the conatiner is a sandbox container
|
||||
// we can simply put the contaienrid in sandboxes list if the container is a sandbox container
|
||||
sc.putIfNotExists(cc.ID, e.Namespace)
|
||||
monitorLog.WithField("container", cc.ID).Info("add sandbox to cache")
|
||||
}
|
||||
|
||||
@@ -25,7 +25,7 @@ func commonServeError(w http.ResponseWriter, status int, err error) {
|
||||
}
|
||||
}
|
||||
|
||||
func getSandboxIdFromReq(r *http.Request) (string, error) {
|
||||
func getSandboxIDFromReq(r *http.Request) (string, error) {
|
||||
sandbox := r.URL.Query().Get("sandbox")
|
||||
if sandbox != "" {
|
||||
return sandbox, nil
|
||||
|
||||
@@ -17,7 +17,6 @@ var defaultInitrdPath = "/usr/share/kata-containers/kata-containers-initrd.img"
|
||||
var defaultFirmwarePath = ""
|
||||
var defaultMachineAccelerators = ""
|
||||
var defaultCPUFeatures = ""
|
||||
var defaultShimPath = "/usr/libexec/kata-containers/kata-shim"
|
||||
var systemdUnitName = "kata-containers.target"
|
||||
|
||||
const defaultKernelParams = ""
|
||||
|
||||
@@ -71,12 +71,9 @@ type factory struct {
|
||||
|
||||
type hypervisor struct {
|
||||
Path string `toml:"path"`
|
||||
HypervisorPathList []string `toml:"valid_hypervisor_paths"`
|
||||
JailerPath string `toml:"jailer_path"`
|
||||
JailerPathList []string `toml:"valid_jailer_paths"`
|
||||
Kernel string `toml:"kernel"`
|
||||
CtlPath string `toml:"ctlpath"`
|
||||
CtlPathList []string `toml:"valid_ctlpaths"`
|
||||
Initrd string `toml:"initrd"`
|
||||
Image string `toml:"image"`
|
||||
Firmware string `toml:"firmware"`
|
||||
@@ -88,16 +85,23 @@ type hypervisor struct {
|
||||
EntropySource string `toml:"entropy_source"`
|
||||
SharedFS string `toml:"shared_fs"`
|
||||
VirtioFSDaemon string `toml:"virtio_fs_daemon"`
|
||||
VirtioFSDaemonList []string `toml:"valid_virtio_fs_daemon_paths"`
|
||||
VirtioFSCache string `toml:"virtio_fs_cache"`
|
||||
VirtioFSExtraArgs []string `toml:"virtio_fs_extra_args"`
|
||||
VirtioFSCacheSize uint32 `toml:"virtio_fs_cache_size"`
|
||||
BlockDeviceCacheSet bool `toml:"block_device_cache_set"`
|
||||
BlockDeviceCacheDirect bool `toml:"block_device_cache_direct"`
|
||||
BlockDeviceCacheNoflush bool `toml:"block_device_cache_noflush"`
|
||||
EnableVhostUserStore bool `toml:"enable_vhost_user_store"`
|
||||
VhostUserStorePath string `toml:"vhost_user_store_path"`
|
||||
FileBackedMemRootDir string `toml:"file_mem_backend"`
|
||||
GuestHookPath string `toml:"guest_hook_path"`
|
||||
GuestMemoryDumpPath string `toml:"guest_memory_dump_path"`
|
||||
HypervisorPathList []string `toml:"valid_hypervisor_paths"`
|
||||
JailerPathList []string `toml:"valid_jailer_paths"`
|
||||
CtlPathList []string `toml:"valid_ctlpaths"`
|
||||
VirtioFSDaemonList []string `toml:"valid_virtio_fs_daemon_paths"`
|
||||
VirtioFSExtraArgs []string `toml:"virtio_fs_extra_args"`
|
||||
PFlashList []string `toml:"pflashes"`
|
||||
VhostUserStorePathList []string `toml:"valid_vhost_user_store_paths"`
|
||||
FileBackedMemRootList []string `toml:"valid_file_mem_backends"`
|
||||
EnableAnnotations []string `toml:"enable_annotations"`
|
||||
RxRateLimiterMaxRate uint64 `toml:"rx_rate_limiter_max_rate"`
|
||||
TxRateLimiterMaxRate uint64 `toml:"tx_rate_limiter_max_rate"`
|
||||
VirtioFSCacheSize uint32 `toml:"virtio_fs_cache_size"`
|
||||
NumVCPUs int32 `toml:"default_vcpus"`
|
||||
DefaultMaxVCPUs uint32 `toml:"default_maxvcpus"`
|
||||
MemorySize uint32 `toml:"default_memory"`
|
||||
@@ -106,14 +110,16 @@ type hypervisor struct {
|
||||
DefaultBridges uint32 `toml:"default_bridges"`
|
||||
Msize9p uint32 `toml:"msize_9p"`
|
||||
PCIeRootPort uint32 `toml:"pcie_root_port"`
|
||||
BlockDeviceCacheSet bool `toml:"block_device_cache_set"`
|
||||
BlockDeviceCacheDirect bool `toml:"block_device_cache_direct"`
|
||||
BlockDeviceCacheNoflush bool `toml:"block_device_cache_noflush"`
|
||||
EnableVhostUserStore bool `toml:"enable_vhost_user_store"`
|
||||
DisableBlockDeviceUse bool `toml:"disable_block_device_use"`
|
||||
MemPrealloc bool `toml:"enable_mem_prealloc"`
|
||||
HugePages bool `toml:"enable_hugepages"`
|
||||
VirtioMem bool `toml:"enable_virtio_mem"`
|
||||
IOMMU bool `toml:"enable_iommu"`
|
||||
IOMMUPlatform bool `toml:"enable_iommu_platform"`
|
||||
FileBackedMemRootDir string `toml:"file_mem_backend"`
|
||||
FileBackedMemRootList []string `toml:"valid_file_mem_backends"`
|
||||
Swap bool `toml:"enable_swap"`
|
||||
Debug bool `toml:"enable_debug"`
|
||||
DisableNestingChecks bool `toml:"disable_nesting_checks"`
|
||||
@@ -121,29 +127,30 @@ type hypervisor struct {
|
||||
DisableImageNvdimm bool `toml:"disable_image_nvdimm"`
|
||||
HotplugVFIOOnRootBus bool `toml:"hotplug_vfio_on_root_bus"`
|
||||
DisableVhostNet bool `toml:"disable_vhost_net"`
|
||||
GuestHookPath string `toml:"guest_hook_path"`
|
||||
RxRateLimiterMaxRate uint64 `toml:"rx_rate_limiter_max_rate"`
|
||||
TxRateLimiterMaxRate uint64 `toml:"tx_rate_limiter_max_rate"`
|
||||
EnableAnnotations []string `toml:"enable_annotations"`
|
||||
GuestMemoryDumpPaging bool `toml:"guest_memory_dump_paging"`
|
||||
}
|
||||
|
||||
type runtime struct {
|
||||
InterNetworkModel string `toml:"internetworking_model"`
|
||||
JaegerEndpoint string `toml:"jaeger_endpoint"`
|
||||
JaegerUser string `toml:"jaeger_user"`
|
||||
JaegerPassword string `toml:"jaeger_password"`
|
||||
SandboxBindMounts []string `toml:"sandbox_bind_mounts"`
|
||||
Experimental []string `toml:"experimental"`
|
||||
Debug bool `toml:"enable_debug"`
|
||||
Tracing bool `toml:"enable_tracing"`
|
||||
DisableNewNetNs bool `toml:"disable_new_netns"`
|
||||
DisableGuestSeccomp bool `toml:"disable_guest_seccomp"`
|
||||
SandboxCgroupOnly bool `toml:"sandbox_cgroup_only"`
|
||||
Experimental []string `toml:"experimental"`
|
||||
InterNetworkModel string `toml:"internetworking_model"`
|
||||
EnablePprof bool `toml:"enable_pprof"`
|
||||
}
|
||||
|
||||
type agent struct {
|
||||
Debug bool `toml:"enable_debug"`
|
||||
Tracing bool `toml:"enable_tracing"`
|
||||
TraceMode string `toml:"trace_mode"`
|
||||
TraceType string `toml:"trace_type"`
|
||||
KernelModules []string `toml:"kernel_modules"`
|
||||
Debug bool `toml:"enable_debug"`
|
||||
Tracing bool `toml:"enable_tracing"`
|
||||
DebugConsoleEnabled bool `toml:"debug_console_enabled"`
|
||||
}
|
||||
|
||||
@@ -424,20 +431,12 @@ func (h hypervisor) getInitrdAndImage() (initrd string, image string, err error)
|
||||
return
|
||||
}
|
||||
|
||||
func (h hypervisor) getRxRateLimiterCfg() (uint64, error) {
|
||||
if h.RxRateLimiterMaxRate < 0 {
|
||||
return 0, fmt.Errorf("rx Rate Limiter configuration must be greater than or equal to 0, max_rate %v", h.RxRateLimiterMaxRate)
|
||||
}
|
||||
|
||||
return h.RxRateLimiterMaxRate, nil
|
||||
func (h hypervisor) getRxRateLimiterCfg() uint64 {
|
||||
return h.RxRateLimiterMaxRate
|
||||
}
|
||||
|
||||
func (h hypervisor) getTxRateLimiterCfg() (uint64, error) {
|
||||
if h.TxRateLimiterMaxRate < 0 {
|
||||
return 0, fmt.Errorf("tx Rate Limiter configuration must be greater than or equal to 0, max_rate %v", h.TxRateLimiterMaxRate)
|
||||
}
|
||||
|
||||
return h.TxRateLimiterMaxRate, nil
|
||||
func (h hypervisor) getTxRateLimiterCfg() uint64 {
|
||||
return h.TxRateLimiterMaxRate
|
||||
}
|
||||
|
||||
func (h hypervisor) getIOMMUPlatform() bool {
|
||||
@@ -522,15 +521,8 @@ func newFirecrackerHypervisorConfig(h hypervisor) (vc.HypervisorConfig, error) {
|
||||
return vc.HypervisorConfig{}, err
|
||||
}
|
||||
|
||||
rxRateLimiterMaxRate, err := h.getRxRateLimiterCfg()
|
||||
if err != nil {
|
||||
return vc.HypervisorConfig{}, err
|
||||
}
|
||||
|
||||
txRateLimiterMaxRate, err := h.getTxRateLimiterCfg()
|
||||
if err != nil {
|
||||
return vc.HypervisorConfig{}, err
|
||||
}
|
||||
rxRateLimiterMaxRate := h.getRxRateLimiterCfg()
|
||||
txRateLimiterMaxRate := h.getTxRateLimiterCfg()
|
||||
|
||||
return vc.HypervisorConfig{
|
||||
HypervisorPath: hypervisor,
|
||||
@@ -626,15 +618,8 @@ func newQemuHypervisorConfig(h hypervisor) (vc.HypervisorConfig, error) {
|
||||
return vc.HypervisorConfig{}, err
|
||||
}
|
||||
|
||||
rxRateLimiterMaxRate, err := h.getRxRateLimiterCfg()
|
||||
if err != nil {
|
||||
return vc.HypervisorConfig{}, err
|
||||
}
|
||||
|
||||
txRateLimiterMaxRate, err := h.getTxRateLimiterCfg()
|
||||
if err != nil {
|
||||
return vc.HypervisorConfig{}, err
|
||||
}
|
||||
rxRateLimiterMaxRate := h.getRxRateLimiterCfg()
|
||||
txRateLimiterMaxRate := h.getTxRateLimiterCfg()
|
||||
|
||||
return vc.HypervisorConfig{
|
||||
HypervisorPath: hypervisor,
|
||||
@@ -842,6 +827,7 @@ func newClhHypervisorConfig(h hypervisor) (vc.HypervisorConfig, error) {
|
||||
HotplugVFIOOnRootBus: h.HotplugVFIOOnRootBus,
|
||||
PCIeRootPort: h.PCIeRootPort,
|
||||
DisableVhostNet: true,
|
||||
GuestHookPath: h.guestHookPath(),
|
||||
VirtioFSExtraArgs: h.VirtioFSExtraArgs,
|
||||
EnableAnnotations: h.EnableAnnotations,
|
||||
}, nil
|
||||
|
||||
@@ -15,7 +15,6 @@ import (
|
||||
"testing"
|
||||
|
||||
ktu "github.com/kata-containers/kata-containers/src/runtime/pkg/katatestutils"
|
||||
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/persist"
|
||||
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/annotations"
|
||||
vccgroups "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/cgroups"
|
||||
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/mock"
|
||||
@@ -74,16 +73,6 @@ func newBasicTestCmd() types.Cmd {
|
||||
return cmd
|
||||
}
|
||||
|
||||
func rmSandboxDir(sid string) error {
|
||||
store, err := persist.GetDriver()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get fs persist driver: %v", err)
|
||||
}
|
||||
|
||||
store.Destroy(sid)
|
||||
return nil
|
||||
}
|
||||
|
||||
func newTestSandboxConfigNoop() SandboxConfig {
|
||||
bundlePath := filepath.Join(testDir, testBundle)
|
||||
containerAnnotations[annotations.BundlePathKey] = bundlePath
|
||||
@@ -203,26 +192,6 @@ func TestCreateSandboxFailing(t *testing.T) {
|
||||
* Benchmarks
|
||||
*/
|
||||
|
||||
func createNewSandboxConfig(hType HypervisorType) SandboxConfig {
|
||||
hypervisorConfig := HypervisorConfig{
|
||||
KernelPath: "/usr/share/kata-containers/vmlinux.container",
|
||||
ImagePath: "/usr/share/kata-containers/kata-containers.img",
|
||||
HypervisorPath: "/usr/bin/qemu-system-x86_64",
|
||||
}
|
||||
|
||||
netConfig := NetworkConfig{}
|
||||
|
||||
return SandboxConfig{
|
||||
ID: testSandboxID,
|
||||
HypervisorType: hType,
|
||||
HypervisorConfig: hypervisorConfig,
|
||||
|
||||
AgentConfig: KataAgentConfig{},
|
||||
|
||||
NetworkConfig: netConfig,
|
||||
}
|
||||
}
|
||||
|
||||
func newTestContainerConfigNoop(contID string) ContainerConfig {
|
||||
// Define the container command and bundle.
|
||||
container := ContainerConfig{
|
||||
|
||||
@@ -435,7 +435,7 @@ func (c *Container) setContainerState(state types.StateString) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Container) shareFiles(m Mount, idx int, hostSharedDir, guestSharedDir string) (string, bool, error) {
|
||||
func (c *Container) shareFiles(m Mount, idx int, hostSharedDir, hostMountDir, guestSharedDir string) (string, bool, error) {
|
||||
randBytes, err := utils.GenerateRandomBytes(8)
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
@@ -469,9 +469,34 @@ func (c *Container) shareFiles(m Mount, idx int, hostSharedDir, guestSharedDir s
|
||||
}
|
||||
} else {
|
||||
// These mounts are created in the shared dir
|
||||
mountDest := filepath.Join(hostSharedDir, filename)
|
||||
if err := bindMount(c.ctx, m.Source, mountDest, false, "private"); err != nil {
|
||||
return "", false, err
|
||||
mountDest := filepath.Join(hostMountDir, filename)
|
||||
if !m.ReadOnly {
|
||||
if err := bindMount(c.ctx, m.Source, mountDest, false, "private"); err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
} else {
|
||||
// For RO mounts, bindmount remount event is not propagated to mount subtrees,
|
||||
// and it doesn't present in the virtiofsd standalone mount namespace either.
|
||||
// So we end up a bit tricky:
|
||||
// 1. make a private bind mount to the mount source
|
||||
// 2. make another ro bind mount on the private mount
|
||||
// 3. move the ro bind mount to mountDest
|
||||
// 4. umount the private bind mount created in step 1
|
||||
privateDest := filepath.Join(getPrivatePath(c.sandboxID), filename)
|
||||
if err := bindMount(c.ctx, m.Source, privateDest, false, "private"); err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
defer func() {
|
||||
syscall.Unmount(privateDest, syscall.MNT_DETACH|UmountNoFollow)
|
||||
}()
|
||||
if err := bindMount(c.ctx, privateDest, privateDest, true, "private"); err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
if err := moveMount(c.ctx, privateDest, mountDest); err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
|
||||
syscall.Unmount(privateDest, syscall.MNT_DETACH|UmountNoFollow)
|
||||
}
|
||||
// Save HostPath mount value into the mount list of the container.
|
||||
c.mounts[idx].HostPath = mountDest
|
||||
@@ -485,7 +510,7 @@ func (c *Container) shareFiles(m Mount, idx int, hostSharedDir, guestSharedDir s
|
||||
// It also updates the container mount list with the HostPath info, and store
|
||||
// container mounts to the storage. This way, we will have the HostPath info
|
||||
// available when we will need to unmount those mounts.
|
||||
func (c *Container) mountSharedDirMounts(hostSharedDir, guestSharedDir string) (sharedDirMounts map[string]Mount, ignoredMounts map[string]Mount, err error) {
|
||||
func (c *Container) mountSharedDirMounts(hostSharedDir, hostMountDir, guestSharedDir string) (sharedDirMounts map[string]Mount, ignoredMounts map[string]Mount, err error) {
|
||||
sharedDirMounts = make(map[string]Mount)
|
||||
ignoredMounts = make(map[string]Mount)
|
||||
var devicesToDetach []string
|
||||
@@ -536,7 +561,7 @@ func (c *Container) mountSharedDirMounts(hostSharedDir, guestSharedDir string) (
|
||||
|
||||
var ignore bool
|
||||
var guestDest string
|
||||
guestDest, ignore, err = c.shareFiles(m, idx, hostSharedDir, guestSharedDir)
|
||||
guestDest, ignore, err = c.shareFiles(m, idx, hostSharedDir, hostMountDir, guestSharedDir)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -547,22 +572,12 @@ func (c *Container) mountSharedDirMounts(hostSharedDir, guestSharedDir string) (
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if mount is readonly, let the agent handle the readonly mount
|
||||
// within the VM.
|
||||
readonly := false
|
||||
for _, flag := range m.Options {
|
||||
if flag == "ro" {
|
||||
readonly = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
sharedDirMount := Mount{
|
||||
Source: guestDest,
|
||||
Destination: m.Destination,
|
||||
Type: m.Type,
|
||||
Options: m.Options,
|
||||
ReadOnly: readonly,
|
||||
ReadOnly: m.ReadOnly,
|
||||
}
|
||||
|
||||
sharedDirMounts[sharedDirMount.Destination] = sharedDirMount
|
||||
@@ -1029,7 +1044,7 @@ func (c *Container) stop(force bool) error {
|
||||
return err
|
||||
}
|
||||
|
||||
shareDir := filepath.Join(kataHostSharedDir(), c.sandbox.id, c.id)
|
||||
shareDir := filepath.Join(getMountPath(c.sandbox.id), c.id)
|
||||
if err := syscall.Rmdir(shareDir); err != nil {
|
||||
c.Logger().WithError(err).WithField("share-dir", shareDir).Warn("Could not remove container share dir")
|
||||
}
|
||||
|
||||
@@ -1243,7 +1243,6 @@ func revertBytes(num uint64) uint64 {
|
||||
b := num % 1000
|
||||
if a == 0 {
|
||||
return num
|
||||
} else {
|
||||
return 1024*revertBytes(a) + b
|
||||
}
|
||||
return 1024*revertBytes(a) + b
|
||||
}
|
||||
|
||||
@@ -174,11 +174,11 @@ func registerFirecrackerMetrics() {
|
||||
|
||||
// updateFirecrackerMetrics update all metrics to the latest values.
|
||||
func updateFirecrackerMetrics(fm *FirecrackerMetrics) {
|
||||
// set metrics for ApiServerMetrics
|
||||
apiServerMetrics.WithLabelValues("process_startup_time_us").Set(float64(fm.ApiServer.ProcessStartupTimeUs))
|
||||
apiServerMetrics.WithLabelValues("process_startup_time_cpu_us").Set(float64(fm.ApiServer.ProcessStartupTimeCpuUs))
|
||||
apiServerMetrics.WithLabelValues("sync_response_fails").Set(float64(fm.ApiServer.SyncResponseFails))
|
||||
apiServerMetrics.WithLabelValues("sync_vmm_send_timeout_count").Set(float64(fm.ApiServer.SyncVmmSendTimeoutCount))
|
||||
// set metrics for APIServerMetrics
|
||||
apiServerMetrics.WithLabelValues("process_startup_time_us").Set(float64(fm.APIServer.ProcessStartupTimeUs))
|
||||
apiServerMetrics.WithLabelValues("process_startup_time_cpu_us").Set(float64(fm.APIServer.ProcessStartupTimeCPUUs))
|
||||
apiServerMetrics.WithLabelValues("sync_response_fails").Set(float64(fm.APIServer.SyncResponseFails))
|
||||
apiServerMetrics.WithLabelValues("sync_vmm_send_timeout_count").Set(float64(fm.APIServer.SyncVmmSendTimeoutCount))
|
||||
|
||||
// set metrics for BlockDeviceMetrics
|
||||
blockDeviceMetrics.WithLabelValues("activate_fails").Set(float64(fm.Block.ActivateFails))
|
||||
@@ -199,10 +199,10 @@ func updateFirecrackerMetrics(fm *FirecrackerMetrics) {
|
||||
blockDeviceMetrics.WithLabelValues("rate_limiter_throttled_events").Set(float64(fm.Block.RateLimiterThrottledEvents))
|
||||
|
||||
// set metrics for GetRequestsMetrics
|
||||
getRequestsMetrics.WithLabelValues("instance_info_count").Set(float64(fm.GetApiRequests.InstanceInfoCount))
|
||||
getRequestsMetrics.WithLabelValues("instance_info_fails").Set(float64(fm.GetApiRequests.InstanceInfoFails))
|
||||
getRequestsMetrics.WithLabelValues("machine_cfg_count").Set(float64(fm.GetApiRequests.MachineCfgCount))
|
||||
getRequestsMetrics.WithLabelValues("machine_cfg_fails").Set(float64(fm.GetApiRequests.MachineCfgFails))
|
||||
getRequestsMetrics.WithLabelValues("instance_info_count").Set(float64(fm.GetAPIRequests.InstanceInfoCount))
|
||||
getRequestsMetrics.WithLabelValues("instance_info_fails").Set(float64(fm.GetAPIRequests.InstanceInfoFails))
|
||||
getRequestsMetrics.WithLabelValues("machine_cfg_count").Set(float64(fm.GetAPIRequests.MachineCfgCount))
|
||||
getRequestsMetrics.WithLabelValues("machine_cfg_fails").Set(float64(fm.GetAPIRequests.MachineCfgFails))
|
||||
|
||||
// set metrics for I8042DeviceMetrics
|
||||
i8042DeviceMetrics.WithLabelValues("error_count").Set(float64(fm.I8042.ErrorCount))
|
||||
@@ -216,13 +216,13 @@ func updateFirecrackerMetrics(fm *FirecrackerMetrics) {
|
||||
performanceMetrics.WithLabelValues("full_create_snapshot").Set(float64(fm.LatenciesUs.FullCreateSnapshot))
|
||||
performanceMetrics.WithLabelValues("diff_create_snapshot").Set(float64(fm.LatenciesUs.DiffCreateSnapshot))
|
||||
performanceMetrics.WithLabelValues("load_snapshot").Set(float64(fm.LatenciesUs.LoadSnapshot))
|
||||
performanceMetrics.WithLabelValues("pause_vm").Set(float64(fm.LatenciesUs.PauseVm))
|
||||
performanceMetrics.WithLabelValues("resume_vm").Set(float64(fm.LatenciesUs.ResumeVm))
|
||||
performanceMetrics.WithLabelValues("pause_vm").Set(float64(fm.LatenciesUs.PauseVM))
|
||||
performanceMetrics.WithLabelValues("resume_vm").Set(float64(fm.LatenciesUs.ResumeVM))
|
||||
performanceMetrics.WithLabelValues("vmm_full_create_snapshot").Set(float64(fm.LatenciesUs.VmmFullCreateSnapshot))
|
||||
performanceMetrics.WithLabelValues("vmm_diff_create_snapshot").Set(float64(fm.LatenciesUs.VmmDiffCreateSnapshot))
|
||||
performanceMetrics.WithLabelValues("vmm_load_snapshot").Set(float64(fm.LatenciesUs.VmmLoadSnapshot))
|
||||
performanceMetrics.WithLabelValues("vmm_pause_vm").Set(float64(fm.LatenciesUs.VmmPauseVm))
|
||||
performanceMetrics.WithLabelValues("vmm_resume_vm").Set(float64(fm.LatenciesUs.VmmResumeVm))
|
||||
performanceMetrics.WithLabelValues("vmm_pause_vm").Set(float64(fm.LatenciesUs.VmmPauseVM))
|
||||
performanceMetrics.WithLabelValues("vmm_resume_vm").Set(float64(fm.LatenciesUs.VmmResumeVM))
|
||||
|
||||
// set metrics for LoggerSystemMetrics
|
||||
loggerSystemMetrics.WithLabelValues("missed_metrics_count").Set(float64(fm.Logger.MissedMetricsCount))
|
||||
@@ -273,28 +273,28 @@ func updateFirecrackerMetrics(fm *FirecrackerMetrics) {
|
||||
netDeviceMetrics.WithLabelValues("tx_spoofed_mac_count").Set(float64(fm.Net.TxSpoofedMacCount))
|
||||
|
||||
// set metrics for PatchRequestsMetrics
|
||||
patchRequestsMetrics.WithLabelValues("drive_count").Set(float64(fm.PatchApiRequests.DriveCount))
|
||||
patchRequestsMetrics.WithLabelValues("drive_fails").Set(float64(fm.PatchApiRequests.DriveFails))
|
||||
patchRequestsMetrics.WithLabelValues("network_count").Set(float64(fm.PatchApiRequests.NetworkCount))
|
||||
patchRequestsMetrics.WithLabelValues("network_fails").Set(float64(fm.PatchApiRequests.NetworkFails))
|
||||
patchRequestsMetrics.WithLabelValues("machine_cfg_count").Set(float64(fm.PatchApiRequests.MachineCfgCount))
|
||||
patchRequestsMetrics.WithLabelValues("machine_cfg_fails").Set(float64(fm.PatchApiRequests.MachineCfgFails))
|
||||
patchRequestsMetrics.WithLabelValues("drive_count").Set(float64(fm.PatchAPIRequests.DriveCount))
|
||||
patchRequestsMetrics.WithLabelValues("drive_fails").Set(float64(fm.PatchAPIRequests.DriveFails))
|
||||
patchRequestsMetrics.WithLabelValues("network_count").Set(float64(fm.PatchAPIRequests.NetworkCount))
|
||||
patchRequestsMetrics.WithLabelValues("network_fails").Set(float64(fm.PatchAPIRequests.NetworkFails))
|
||||
patchRequestsMetrics.WithLabelValues("machine_cfg_count").Set(float64(fm.PatchAPIRequests.MachineCfgCount))
|
||||
patchRequestsMetrics.WithLabelValues("machine_cfg_fails").Set(float64(fm.PatchAPIRequests.MachineCfgFails))
|
||||
|
||||
// set metrics for PutRequestsMetrics
|
||||
putRequestsMetrics.WithLabelValues("actions_count").Set(float64(fm.PutApiRequests.ActionsCount))
|
||||
putRequestsMetrics.WithLabelValues("actions_fails").Set(float64(fm.PutApiRequests.ActionsFails))
|
||||
putRequestsMetrics.WithLabelValues("boot_source_count").Set(float64(fm.PutApiRequests.BootSourceCount))
|
||||
putRequestsMetrics.WithLabelValues("boot_source_fails").Set(float64(fm.PutApiRequests.BootSourceFails))
|
||||
putRequestsMetrics.WithLabelValues("drive_count").Set(float64(fm.PutApiRequests.DriveCount))
|
||||
putRequestsMetrics.WithLabelValues("drive_fails").Set(float64(fm.PutApiRequests.DriveFails))
|
||||
putRequestsMetrics.WithLabelValues("logger_count").Set(float64(fm.PutApiRequests.LoggerCount))
|
||||
putRequestsMetrics.WithLabelValues("logger_fails").Set(float64(fm.PutApiRequests.LoggerFails))
|
||||
putRequestsMetrics.WithLabelValues("machine_cfg_count").Set(float64(fm.PutApiRequests.MachineCfgCount))
|
||||
putRequestsMetrics.WithLabelValues("machine_cfg_fails").Set(float64(fm.PutApiRequests.MachineCfgFails))
|
||||
putRequestsMetrics.WithLabelValues("metrics_count").Set(float64(fm.PutApiRequests.MetricsCount))
|
||||
putRequestsMetrics.WithLabelValues("metrics_fails").Set(float64(fm.PutApiRequests.MetricsFails))
|
||||
putRequestsMetrics.WithLabelValues("network_count").Set(float64(fm.PutApiRequests.NetworkCount))
|
||||
putRequestsMetrics.WithLabelValues("network_fails").Set(float64(fm.PutApiRequests.NetworkFails))
|
||||
putRequestsMetrics.WithLabelValues("actions_count").Set(float64(fm.PutAPIRequests.ActionsCount))
|
||||
putRequestsMetrics.WithLabelValues("actions_fails").Set(float64(fm.PutAPIRequests.ActionsFails))
|
||||
putRequestsMetrics.WithLabelValues("boot_source_count").Set(float64(fm.PutAPIRequests.BootSourceCount))
|
||||
putRequestsMetrics.WithLabelValues("boot_source_fails").Set(float64(fm.PutAPIRequests.BootSourceFails))
|
||||
putRequestsMetrics.WithLabelValues("drive_count").Set(float64(fm.PutAPIRequests.DriveCount))
|
||||
putRequestsMetrics.WithLabelValues("drive_fails").Set(float64(fm.PutAPIRequests.DriveFails))
|
||||
putRequestsMetrics.WithLabelValues("logger_count").Set(float64(fm.PutAPIRequests.LoggerCount))
|
||||
putRequestsMetrics.WithLabelValues("logger_fails").Set(float64(fm.PutAPIRequests.LoggerFails))
|
||||
putRequestsMetrics.WithLabelValues("machine_cfg_count").Set(float64(fm.PutAPIRequests.MachineCfgCount))
|
||||
putRequestsMetrics.WithLabelValues("machine_cfg_fails").Set(float64(fm.PutAPIRequests.MachineCfgFails))
|
||||
putRequestsMetrics.WithLabelValues("metrics_count").Set(float64(fm.PutAPIRequests.MetricsCount))
|
||||
putRequestsMetrics.WithLabelValues("metrics_fails").Set(float64(fm.PutAPIRequests.MetricsFails))
|
||||
putRequestsMetrics.WithLabelValues("network_count").Set(float64(fm.PutAPIRequests.NetworkCount))
|
||||
putRequestsMetrics.WithLabelValues("network_fails").Set(float64(fm.PutAPIRequests.NetworkFails))
|
||||
|
||||
// set metrics for RTCDeviceMetrics
|
||||
rTCDeviceMetrics.WithLabelValues("error_count").Set(float64(fm.Rtc.ErrorCount))
|
||||
@@ -310,7 +310,7 @@ func updateFirecrackerMetrics(fm *FirecrackerMetrics) {
|
||||
vcpuMetrics.WithLabelValues("exit_mmio_read").Set(float64(fm.Vcpu.ExitMmioRead))
|
||||
vcpuMetrics.WithLabelValues("exit_mmio_write").Set(float64(fm.Vcpu.ExitMmioWrite))
|
||||
vcpuMetrics.WithLabelValues("failures").Set(float64(fm.Vcpu.Failures))
|
||||
vcpuMetrics.WithLabelValues("filter_cpuid").Set(float64(fm.Vcpu.FilterCpuid))
|
||||
vcpuMetrics.WithLabelValues("filter_cpuid").Set(float64(fm.Vcpu.FilterCPUid))
|
||||
|
||||
// set metrics for VmmMetrics
|
||||
vmmMetrics.WithLabelValues("device_events").Set(float64(fm.Vmm.DeviceEvents))
|
||||
@@ -355,11 +355,11 @@ func updateFirecrackerMetrics(fm *FirecrackerMetrics) {
|
||||
// Structure storing all metrics while enforcing serialization support on them.
|
||||
type FirecrackerMetrics struct {
|
||||
// API Server related metrics.
|
||||
ApiServer ApiServerMetrics `json:"api_server"`
|
||||
APIServer APIServerMetrics `json:"api_server"`
|
||||
// A block device's related metrics.
|
||||
Block BlockDeviceMetrics `json:"block"`
|
||||
// Metrics related to API GET requests.
|
||||
GetApiRequests GetRequestsMetrics `json:"get_api_requests"`
|
||||
GetAPIRequests GetRequestsMetrics `json:"get_api_requests"`
|
||||
// Metrics related to the i8042 device.
|
||||
I8042 I8042DeviceMetrics `json:"i8042"`
|
||||
// Metrics related to performance measurements.
|
||||
@@ -371,9 +371,9 @@ type FirecrackerMetrics struct {
|
||||
// A network device's related metrics.
|
||||
Net NetDeviceMetrics `json:"net"`
|
||||
// Metrics related to API PATCH requests.
|
||||
PatchApiRequests PatchRequestsMetrics `json:"patch_api_requests"`
|
||||
PatchAPIRequests PatchRequestsMetrics `json:"patch_api_requests"`
|
||||
// Metrics related to API PUT requests.
|
||||
PutApiRequests PutRequestsMetrics `json:"put_api_requests"`
|
||||
PutAPIRequests PutRequestsMetrics `json:"put_api_requests"`
|
||||
// Metrics related to the RTC device.
|
||||
Rtc RTCDeviceMetrics `json:"rtc"`
|
||||
// Metrics related to seccomp filtering.
|
||||
@@ -391,11 +391,11 @@ type FirecrackerMetrics struct {
|
||||
}
|
||||
|
||||
// API Server related metrics.
|
||||
type ApiServerMetrics struct {
|
||||
type APIServerMetrics struct {
|
||||
// Measures the process's startup time in microseconds.
|
||||
ProcessStartupTimeUs uint64 `json:"process_startup_time_us"`
|
||||
// Measures the cpu's startup time in microseconds.
|
||||
ProcessStartupTimeCpuUs uint64 `json:"process_startup_time_cpu_us"`
|
||||
ProcessStartupTimeCPUUs uint64 `json:"process_startup_time_cpu_us"`
|
||||
// Number of failures on API requests triggered by internal errors.
|
||||
SyncResponseFails uint64 `json:"sync_response_fails"`
|
||||
// Number of timeouts during communication with the VMM.
|
||||
@@ -475,9 +475,9 @@ type PerformanceMetrics struct {
|
||||
// Measures the snapshot load time, at the API (user) level, in microseconds.
|
||||
LoadSnapshot uint64 `json:"load_snapshot"`
|
||||
// Measures the microVM pausing duration, at the API (user) level, in microseconds.
|
||||
PauseVm uint64 `json:"pause_vm"`
|
||||
PauseVM uint64 `json:"pause_vm"`
|
||||
// Measures the microVM resuming duration, at the API (user) level, in microseconds.
|
||||
ResumeVm uint64 `json:"resume_vm"`
|
||||
ResumeVM uint64 `json:"resume_vm"`
|
||||
// Measures the snapshot full create time, at the VMM level, in microseconds.
|
||||
VmmFullCreateSnapshot uint64 `json:"vmm_full_create_snapshot"`
|
||||
// Measures the snapshot diff create time, at the VMM level, in microseconds.
|
||||
@@ -485,9 +485,9 @@ type PerformanceMetrics struct {
|
||||
// Measures the snapshot load time, at the VMM level, in microseconds.
|
||||
VmmLoadSnapshot uint64 `json:"vmm_load_snapshot"`
|
||||
// Measures the microVM pausing duration, at the VMM level, in microseconds.
|
||||
VmmPauseVm uint64 `json:"vmm_pause_vm"`
|
||||
VmmPauseVM uint64 `json:"vmm_pause_vm"`
|
||||
// Measures the microVM resuming duration, at the VMM level, in microseconds.
|
||||
VmmResumeVm uint64 `json:"vmm_resume_vm"`
|
||||
VmmResumeVM uint64 `json:"vmm_resume_vm"`
|
||||
}
|
||||
|
||||
// Logging related metrics.
|
||||
@@ -662,7 +662,7 @@ type VcpuMetrics struct {
|
||||
// Number of errors during this VCPU's run.
|
||||
Failures uint64 `json:"failures"`
|
||||
// Failures in configuring the CPUID.
|
||||
FilterCpuid uint64 `json:"filter_cpuid"`
|
||||
FilterCPUid uint64 `json:"filter_cpuid"`
|
||||
}
|
||||
|
||||
// Metrics related to the virtual machine manager.
|
||||
|
||||
@@ -72,6 +72,7 @@ type VCSandbox interface {
|
||||
ListRoutes() ([]*pbTypes.Route, error)
|
||||
|
||||
GetOOMEvent() (string, error)
|
||||
GetHypervisorPid() (int, error)
|
||||
|
||||
UpdateRuntimeMetrics() error
|
||||
GetAgentMetrics() (string, error)
|
||||
|
||||
@@ -26,7 +26,6 @@ import (
|
||||
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/agent/protocols/grpc"
|
||||
vcAnnotations "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/annotations"
|
||||
vccgroups "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/cgroups"
|
||||
ns "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/nsenter"
|
||||
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/rootless"
|
||||
vcTypes "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/types"
|
||||
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/uuid"
|
||||
@@ -149,9 +148,10 @@ var kataHostSharedDir = func() string {
|
||||
}
|
||||
|
||||
// Shared path handling:
|
||||
// 1. create two directories for each sandbox:
|
||||
// 1. create three directories for each sandbox:
|
||||
// -. /run/kata-containers/shared/sandboxes/$sbx_id/mounts/, a directory to hold all host/guest shared mounts
|
||||
// -. /run/kata-containers/shared/sandboxes/$sbx_id/shared/, a host/guest shared directory (9pfs/virtiofs source dir)
|
||||
// -. /run/kata-containers/shared/sandboxes/$sbx_id/private/, a directory to hold all temporary private mounts when creating ro mounts
|
||||
//
|
||||
// 2. /run/kata-containers/shared/sandboxes/$sbx_id/mounts/ is bind mounted readonly to /run/kata-containers/shared/sandboxes/$sbx_id/shared/, so guest cannot modify it
|
||||
//
|
||||
@@ -164,6 +164,10 @@ func getMountPath(id string) string {
|
||||
return filepath.Join(kataHostSharedDir(), id, "mounts")
|
||||
}
|
||||
|
||||
func getPrivatePath(id string) string {
|
||||
return filepath.Join(kataHostSharedDir(), id, "private")
|
||||
}
|
||||
|
||||
func getSandboxPath(id string) string {
|
||||
return filepath.Join(kataHostSharedDir(), id)
|
||||
}
|
||||
@@ -963,6 +967,10 @@ func (k *kataAgent) constraintGRPCSpec(grpcSpec *grpc.Spec, passSeccomp bool) {
|
||||
grpcSpec.Linux.Resources.BlockIO = nil
|
||||
grpcSpec.Linux.Resources.HugepageLimits = nil
|
||||
grpcSpec.Linux.Resources.Network = nil
|
||||
if grpcSpec.Linux.Resources.CPU != nil {
|
||||
grpcSpec.Linux.Resources.CPU.Cpus = ""
|
||||
grpcSpec.Linux.Resources.CPU.Mems = ""
|
||||
}
|
||||
|
||||
// There are three main reasons to do not apply systemd cgroups in the VM
|
||||
// - Initrd image doesn't have systemd.
|
||||
@@ -1257,7 +1265,7 @@ func (k *kataAgent) createContainer(sandbox *Sandbox, c *Container) (p *Process,
|
||||
}
|
||||
|
||||
// Handle container mounts
|
||||
newMounts, ignoredMounts, err := c.mountSharedDirMounts(getMountPath(sandbox.id), kataGuestSharedDir())
|
||||
newMounts, ignoredMounts, err := c.mountSharedDirMounts(getSharePath(sandbox.id), getMountPath(sandbox.id), kataGuestSharedDir())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1328,14 +1336,6 @@ func (k *kataAgent) createContainer(sandbox *Sandbox, c *Container) (p *Process,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
enterNSList := []ns.Namespace{}
|
||||
if sandbox.networkNS.NetNsPath != "" {
|
||||
enterNSList = append(enterNSList, ns.Namespace{
|
||||
Path: sandbox.networkNS.NetNsPath,
|
||||
Type: ns.NSTypeNet,
|
||||
})
|
||||
}
|
||||
|
||||
return buildProcessFromExecID(req.ExecId)
|
||||
}
|
||||
|
||||
@@ -1966,7 +1966,7 @@ func (k *kataAgent) sendReq(request interface{}) (interface{}, error) {
|
||||
k.Logger().WithField("name", msgName).WithField("req", message.String()).Debug("sending request")
|
||||
|
||||
defer func() {
|
||||
agentRpcDurationsHistogram.WithLabelValues(msgName).Observe(float64(time.Since(start).Nanoseconds() / int64(time.Millisecond)))
|
||||
agentRPCDurationsHistogram.WithLabelValues(msgName).Observe(float64(time.Since(start).Nanoseconds() / int64(time.Millisecond)))
|
||||
}()
|
||||
return handler(ctx, request)
|
||||
}
|
||||
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
type mockAgent struct {
|
||||
}
|
||||
|
||||
// nolint:golint
|
||||
func NewMockAgent() agent {
|
||||
return &mockAgent{}
|
||||
}
|
||||
@@ -237,6 +238,6 @@ func (n *mockAgent) getOOMEvent() (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (k *mockAgent) getAgentMetrics(req *grpc.GetMetricsRequest) (*grpc.Metrics, error) {
|
||||
func (n *mockAgent) getAgentMetrics(req *grpc.GetMetricsRequest) (*grpc.Metrics, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@@ -212,6 +212,42 @@ func isDeviceMapper(major, minor int) (bool, error) {
|
||||
|
||||
const mountPerm = os.FileMode(0755)
|
||||
|
||||
func evalMountPath(source, destination string) (string, string, error) {
|
||||
if source == "" {
|
||||
return "", "", fmt.Errorf("source must be specified")
|
||||
}
|
||||
if destination == "" {
|
||||
return "", "", fmt.Errorf("destination must be specified")
|
||||
}
|
||||
|
||||
absSource, err := filepath.EvalSymlinks(source)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("Could not resolve symlink for source %v", source)
|
||||
}
|
||||
|
||||
if err := ensureDestinationExists(absSource, destination); err != nil {
|
||||
return "", "", fmt.Errorf("Could not create destination mount point %v: %v", destination, err)
|
||||
}
|
||||
|
||||
return absSource, destination, nil
|
||||
}
|
||||
|
||||
// moveMount moves a mountpoint to another path with some bookkeeping:
|
||||
// * evaluate all symlinks
|
||||
// * ensure the source exists
|
||||
// * recursively create the destination
|
||||
func moveMount(ctx context.Context, source, destination string) error {
|
||||
span, _ := trace(ctx, "moveMount")
|
||||
defer span.Finish()
|
||||
|
||||
source, destination, err := evalMountPath(source, destination)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return syscall.Mount(source, destination, "move", syscall.MS_MOVE, "")
|
||||
}
|
||||
|
||||
// bindMount bind mounts a source in to a destination. This will
|
||||
// do some bookkeeping:
|
||||
// * evaluate all symlinks
|
||||
@@ -222,20 +258,9 @@ func bindMount(ctx context.Context, source, destination string, readonly bool, p
|
||||
span, _ := trace(ctx, "bindMount")
|
||||
defer span.Finish()
|
||||
|
||||
if source == "" {
|
||||
return fmt.Errorf("source must be specified")
|
||||
}
|
||||
if destination == "" {
|
||||
return fmt.Errorf("destination must be specified")
|
||||
}
|
||||
|
||||
absSource, err := filepath.EvalSymlinks(source)
|
||||
absSource, destination, err := evalMountPath(source, destination)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Could not resolve symlink for source %v", source)
|
||||
}
|
||||
|
||||
if err := ensureDestinationExists(absSource, destination); err != nil {
|
||||
return fmt.Errorf("Could not create destination mount point %v: %v", destination, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := syscall.Mount(absSource, destination, "bind", syscall.MS_BIND, ""); err != nil {
|
||||
|
||||
@@ -35,8 +35,7 @@ const (
|
||||
MockHybridVSockScheme = "mock"
|
||||
)
|
||||
|
||||
var defaultDialTimeout = 15 * time.Second
|
||||
var defaultCloseTimeout = 5 * time.Second
|
||||
var defaultDialTimeout = 30 * time.Second
|
||||
|
||||
var hybridVSockPort uint32
|
||||
|
||||
@@ -72,8 +71,7 @@ func NewAgentClient(ctx context.Context, sock string) (*AgentClient, error) {
|
||||
}
|
||||
|
||||
var conn net.Conn
|
||||
var d dialer
|
||||
d = agentDialer(parsedAddr)
|
||||
var d = agentDialer(parsedAddr)
|
||||
conn, err = d(grpcAddr, defaultDialTimeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -53,9 +53,6 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
// If set to true, expects cgroupsPath to be of form "slice:prefix:name", otherwise cgroups creation will fail
|
||||
systemdCgroup *bool
|
||||
|
||||
cgroupsLogger = logrus.WithField("source", "virtcontainers/pkg/cgroups")
|
||||
)
|
||||
|
||||
@@ -66,18 +63,6 @@ func SetLogger(logger *logrus.Entry) {
|
||||
cgroupsLogger = logger.WithFields(fields)
|
||||
}
|
||||
|
||||
func EnableSystemdCgroup() {
|
||||
systemd := true
|
||||
systemdCgroup = &systemd
|
||||
}
|
||||
|
||||
func UseSystemdCgroup() bool {
|
||||
if systemdCgroup != nil {
|
||||
return *systemdCgroup
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// returns the list of devices that a hypervisor may need
|
||||
func hypervisorDevices() []specs.LinuxDeviceCgroup {
|
||||
devices := []specs.LinuxDeviceCgroup{}
|
||||
@@ -107,7 +92,6 @@ func hypervisorDevices() []specs.LinuxDeviceCgroup {
|
||||
// New creates a new CgroupManager
|
||||
func New(config *Config) (*Manager, error) {
|
||||
var err error
|
||||
useSystemdCgroup := UseSystemdCgroup()
|
||||
|
||||
devices := config.Resources.Devices
|
||||
devices = append(devices, hypervisorDevices()...)
|
||||
@@ -125,6 +109,9 @@ func New(config *Config) (*Manager, error) {
|
||||
cgroups := config.Cgroups
|
||||
cgroupPaths := config.CgroupPaths
|
||||
|
||||
// determine if we are utilizing systemd managed cgroups based on the path provided
|
||||
useSystemdCgroup := IsSystemdCgroup(config.CgroupPath)
|
||||
|
||||
// Create a new cgroup if the current one is nil
|
||||
// this cgroups must be saved later
|
||||
if cgroups == nil {
|
||||
@@ -220,7 +207,14 @@ func (m *Manager) moveToParent() error {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
for _, cgroupPath := range m.mgr.GetPaths() {
|
||||
|
||||
pids, err := readPids(cgroupPath)
|
||||
// possible that the cgroupPath doesn't exist. If so, skip:
|
||||
if os.IsNotExist(err) {
|
||||
// The cgroup is not present on the filesystem: no pids to move. The systemd cgroup
|
||||
// manager lists all of the subsystems, including those that are not actually being managed.
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -286,7 +280,10 @@ func (m *Manager) GetPaths() map[string]string {
|
||||
func (m *Manager) Destroy() error {
|
||||
// cgroup can't be destroyed if it contains running processes
|
||||
if err := m.moveToParent(); err != nil {
|
||||
return fmt.Errorf("Could not move processes into parent cgroup: %v", err)
|
||||
// If the process migration to the parent cgroup fails, then
|
||||
// we expect the Destroy to fail as well. Let's log an error here
|
||||
// and attempt to execute the Destroy still to help cleanup the hosts' FS.
|
||||
m.logger().WithError(err).Error("Could not move processes into parent cgroup")
|
||||
}
|
||||
|
||||
m.Lock()
|
||||
|
||||
@@ -11,34 +11,11 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestEnableSystemdCgroup(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
orgSystemdCgroup := systemdCgroup
|
||||
defer func() {
|
||||
systemdCgroup = orgSystemdCgroup
|
||||
}()
|
||||
|
||||
useSystemdCgroup := UseSystemdCgroup()
|
||||
if systemdCgroup != nil {
|
||||
assert.Equal(*systemdCgroup, useSystemdCgroup)
|
||||
} else {
|
||||
assert.False(useSystemdCgroup)
|
||||
}
|
||||
|
||||
EnableSystemdCgroup()
|
||||
assert.True(UseSystemdCgroup())
|
||||
}
|
||||
|
||||
//very very basic test; should be expanded
|
||||
func TestNew(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
useSystemdCgroup := false
|
||||
orgSystemdCgroup := systemdCgroup
|
||||
defer func() {
|
||||
systemdCgroup = orgSystemdCgroup
|
||||
}()
|
||||
systemdCgroup = &useSystemdCgroup
|
||||
|
||||
// create a cgroupfs cgroup manager
|
||||
c := &Config{
|
||||
Cgroups: nil,
|
||||
CgroupPath: "",
|
||||
@@ -48,8 +25,14 @@ func TestNew(t *testing.T) {
|
||||
assert.NoError(err)
|
||||
assert.NotNil(mgr.mgr)
|
||||
|
||||
useSystemdCgroup = true
|
||||
mgr, err = New(c)
|
||||
assert.Error(err)
|
||||
assert.Nil(mgr)
|
||||
// create a systemd cgroup manager
|
||||
s := &Config{
|
||||
Cgroups: nil,
|
||||
CgroupPath: "system.slice:kubepod:container",
|
||||
}
|
||||
|
||||
mgr, err = New(s)
|
||||
assert.NoError(err)
|
||||
assert.NotNil(mgr.mgr)
|
||||
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
@@ -60,13 +60,20 @@ func ValidCgroupPath(path string, systemdCgroup bool) (string, error) {
|
||||
}
|
||||
|
||||
func IsSystemdCgroup(cgroupPath string) bool {
|
||||
// systemd cgroup path: slice:prefix:name
|
||||
re := regexp.MustCompile(`([[:alnum:]]|\.)+:([[:alnum:]]|\.)+:([[:alnum:]]|\.)+`)
|
||||
found := re.FindStringIndex(cgroupPath)
|
||||
|
||||
// if found string is equal to cgroupPath then
|
||||
// it's a correct systemd cgroup path.
|
||||
return found != nil && cgroupPath[found[0]:found[1]] == cgroupPath
|
||||
// If we are utilizing systemd to manage cgroups, we expect to receive a path
|
||||
// in the format slice:scopeprefix:name. A typical example would be:
|
||||
//
|
||||
// system.slice:docker:6b4c4a4d0cc2a12c529dcb13a2b8e438dfb3b2a6af34d548d7d
|
||||
//
|
||||
// Based on this, let's split by the ':' delimiter and verify that the first
|
||||
// section has .slice as a suffix.
|
||||
parts := strings.Split(cgroupPath, ":")
|
||||
if len(parts) == 3 && strings.HasSuffix(parts[0], ".slice") {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func DeviceToCgroupDevice(device string) (*configs.Device, error) {
|
||||
|
||||
@@ -22,8 +22,8 @@ func TestIsSystemdCgroup(t *testing.T) {
|
||||
path string
|
||||
expected bool
|
||||
}{
|
||||
{"slice:kata:afhts2e5d4g5s", true},
|
||||
{"slice.system:kata:afhts2e5d4g5s", true},
|
||||
{"foo.slice:kata:afhts2e5d4g5s", true},
|
||||
{"system.slice:kata:afhts2e5d4g5s", true},
|
||||
{"/kata/afhts2e5d4g5s", false},
|
||||
{"a:b:c:d", false},
|
||||
{":::", false},
|
||||
@@ -78,9 +78,9 @@ func TestValidCgroupPath(t *testing.T) {
|
||||
{":a:b", true, true},
|
||||
{"@:@:@", true, true},
|
||||
|
||||
// valid system paths
|
||||
{"slice:kata:55555", true, false},
|
||||
{"slice.system:kata:afhts2e5d4g5s", true, false},
|
||||
// valid systemd paths
|
||||
{"x.slice:kata:55555", true, false},
|
||||
{"system.slice:kata:afhts2e5d4g5s", true, false},
|
||||
} {
|
||||
path, err := ValidCgroupPath(t.path, t.systemdCgroup)
|
||||
if t.error {
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
all: | update-yaml generate-client-code
|
||||
all: | update-yaml generate-client-code go-fmt
|
||||
MK_DIR := $(dir $(realpath $(firstword $(MAKEFILE_LIST))))
|
||||
YQ_INSTALLER := "$(MK_DIR)/../../../../../ci/install_yq.sh"
|
||||
VERSIONS_FILE := "$(MK_DIR)/../../../../../versions.yaml"
|
||||
@@ -17,8 +17,9 @@ generate-client-code: clean-generated-code
|
||||
-i /local/cloud-hypervisor.yaml \
|
||||
-g go \
|
||||
-o /local/client
|
||||
|
||||
|
||||
go-fmt:
|
||||
rm client/go.mod; \
|
||||
go fmt ./...
|
||||
|
||||
update-yaml:
|
||||
ifndef YQ
|
||||
|
||||
@@ -36,6 +36,7 @@ Class | Method | HTTP request | Description
|
||||
*DefaultApi* | [**CreateVM**](docs/DefaultApi.md#createvm) | **Put** /vm.create | Create the cloud-hypervisor Virtual Machine (VM) instance. The instance is not booted, only created.
|
||||
*DefaultApi* | [**DeleteVM**](docs/DefaultApi.md#deletevm) | **Put** /vm.delete | Delete the cloud-hypervisor Virtual Machine (VM) instance.
|
||||
*DefaultApi* | [**PauseVM**](docs/DefaultApi.md#pausevm) | **Put** /vm.pause | Pause a previously booted VM instance.
|
||||
*DefaultApi* | [**PowerButtonVM**](docs/DefaultApi.md#powerbuttonvm) | **Put** /vm.power-button | Trigger a power button in the VM
|
||||
*DefaultApi* | [**RebootVM**](docs/DefaultApi.md#rebootvm) | **Put** /vm.reboot | Reboot the VM instance.
|
||||
*DefaultApi* | [**ResumeVM**](docs/DefaultApi.md#resumevm) | **Put** /vm.resume | Resume a previously paused VM instance.
|
||||
*DefaultApi* | [**ShutdownVM**](docs/DefaultApi.md#shutdownvm) | **Put** /vm.shutdown | Shut the VM instance down.
|
||||
@@ -64,6 +65,7 @@ Class | Method | HTTP request | Description
|
||||
- [CpuTopology](docs/CpuTopology.md)
|
||||
- [CpusConfig](docs/CpusConfig.md)
|
||||
- [DeviceConfig](docs/DeviceConfig.md)
|
||||
- [DeviceNode](docs/DeviceNode.md)
|
||||
- [DiskConfig](docs/DiskConfig.md)
|
||||
- [FsConfig](docs/FsConfig.md)
|
||||
- [InitramfsConfig](docs/InitramfsConfig.md)
|
||||
@@ -75,9 +77,11 @@ Class | Method | HTTP request | Description
|
||||
- [NumaDistance](docs/NumaDistance.md)
|
||||
- [PciDeviceInfo](docs/PciDeviceInfo.md)
|
||||
- [PmemConfig](docs/PmemConfig.md)
|
||||
- [RateLimiterConfig](docs/RateLimiterConfig.md)
|
||||
- [RestoreConfig](docs/RestoreConfig.md)
|
||||
- [RngConfig](docs/RngConfig.md)
|
||||
- [SgxEpcConfig](docs/SgxEpcConfig.md)
|
||||
- [TokenBucket](docs/TokenBucket.md)
|
||||
- [VmAddDevice](docs/VmAddDevice.md)
|
||||
- [VmConfig](docs/VmConfig.md)
|
||||
- [VmInfo](docs/VmInfo.md)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user