mirror of
https://github.com/kata-containers/kata-containers.git
synced 2026-03-16 01:32:26 +00:00
Compare commits
316 Commits
2.0.0-alph
...
2.0.0-rc1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9b969bb7da | ||
|
|
fb2f3cfce2 | ||
|
|
f32a741c76 | ||
|
|
512e79f61a | ||
|
|
aa70080423 | ||
|
|
34015bae12 | ||
|
|
93b60a8327 | ||
|
|
aa9951f2cd | ||
|
|
9d8c72998b | ||
|
|
033ed13202 | ||
|
|
c058d04b94 | ||
|
|
9d2bb0c452 | ||
|
|
627d062fb2 | ||
|
|
96afe62576 | ||
|
|
d946016eb7 | ||
|
|
37f1a77a6a | ||
|
|
450a81cc54 | ||
|
|
c09f02e6f6 | ||
|
|
58c7469110 | ||
|
|
c36ea0968d | ||
|
|
ba197302e2 | ||
|
|
725ad067c1 | ||
|
|
9858c23c59 | ||
|
|
fc8f1ff03c | ||
|
|
f7b4f76082 | ||
|
|
4fd66fa689 | ||
|
|
e6ff42b8ad | ||
|
|
6710d87c6a | ||
|
|
178b79f122 | ||
|
|
bc545c6549 | ||
|
|
585481990a | ||
|
|
0057f86cfa | ||
|
|
fa0401793f | ||
|
|
60b7265961 | ||
|
|
57b53dbae8 | ||
|
|
ddf1a545d1 | ||
|
|
cbdf6400ae | ||
|
|
ceeecf9c66 | ||
|
|
7c53baea8a | ||
|
|
b549d354bf | ||
|
|
9f3113e1f6 | ||
|
|
ef94742320 | ||
|
|
d71764985d | ||
|
|
0fc04a269d | ||
|
|
8d7ac5f01c | ||
|
|
612acbe319 | ||
|
|
f3a487cd41 | ||
|
|
3a559521d1 | ||
|
|
567daf5a42 | ||
|
|
c7d913f436 | ||
|
|
7bd410c725 | ||
|
|
7fbc789855 | ||
|
|
7fc41a771a | ||
|
|
a31d82fec2 | ||
|
|
9ef4c80340 | ||
|
|
6a4e413758 | ||
|
|
678d4d189d | ||
|
|
718f718764 | ||
|
|
d860ded3f0 | ||
|
|
a141da8a20 | ||
|
|
aaaaee7a4b | ||
|
|
21efaf1fca | ||
|
|
2056623e13 | ||
|
|
34126ee704 | ||
|
|
980a338454 | ||
|
|
e14f766895 | ||
|
|
2e0731f479 | ||
|
|
addf62087c | ||
|
|
c24b68dc4f | ||
|
|
24677d7484 | ||
|
|
9e74c28158 | ||
|
|
b7aae33cc1 | ||
|
|
6d9d58278e | ||
|
|
1bc6fbda8c | ||
|
|
d39f5a85e6 | ||
|
|
d90a0eefbe | ||
|
|
2618c014a0 | ||
|
|
5c4878f37e | ||
|
|
bd6b169e98 | ||
|
|
5770336572 | ||
|
|
45daec7b37 | ||
|
|
ed5a7dc022 | ||
|
|
6fc7c77721 | ||
|
|
84af1c20f3 | ||
|
|
3f39bdfc3e | ||
|
|
6c034baa00 | ||
|
|
81d8b48301 | ||
|
|
18d325edb0 | ||
|
|
d1c9ba5043 | ||
|
|
e6f16b7bb1 | ||
|
|
65c0464222 | ||
|
|
5c676689df | ||
|
|
e579321cc6 | ||
|
|
729a3b1d33 | ||
|
|
7e33e36f4a | ||
|
|
922a55e4fe | ||
|
|
54aa6ede96 | ||
|
|
615ffb93e5 | ||
|
|
f13ca94e10 | ||
|
|
c823b4cd99 | ||
|
|
0bb3117a51 | ||
|
|
357d788517 | ||
|
|
a06142fc5d | ||
|
|
22876b2da6 | ||
|
|
295f5100a3 | ||
|
|
6487044fa1 | ||
|
|
325a4f868d | ||
|
|
d7c77b69dc | ||
|
|
15065e4472 | ||
|
|
059e6426e9 | ||
|
|
b5e9d6060f | ||
|
|
d4a5258234 | ||
|
|
2ce97ec680 | ||
|
|
b081f26a7e | ||
|
|
6e328c0599 | ||
|
|
6520320fc6 | ||
|
|
37e3f89424 | ||
|
|
90e0dc8809 | ||
|
|
91dec532e5 | ||
|
|
e3ea8544f4 | ||
|
|
55354d5509 | ||
|
|
c133a4561d | ||
|
|
20a084ae79 | ||
|
|
63138a4f28 | ||
|
|
d86e74674c | ||
|
|
b6ee8ae149 | ||
|
|
8667df4d09 | ||
|
|
ebd3f316f2 | ||
|
|
7f20587433 | ||
|
|
1ab1d0e728 | ||
|
|
2dfb8bc549 | ||
|
|
fcfe00369b | ||
|
|
1a734bbb79 | ||
|
|
b30dc26986 | ||
|
|
2086e2e9e1 | ||
|
|
2019f00e2c | ||
|
|
677c87d248 | ||
|
|
fd3ce81518 | ||
|
|
d2a89d668e | ||
|
|
16f4ff38e5 | ||
|
|
99811645d2 | ||
|
|
0be02a8fd3 | ||
|
|
8b07bc2c80 | ||
|
|
6c96d66667 | ||
|
|
46d7b9b8dc | ||
|
|
fbb79739c9 | ||
|
|
33759af548 | ||
|
|
c192446a59 | ||
|
|
2e3e2ce114 | ||
|
|
6a4c9b14f2 | ||
|
|
359286a87d | ||
|
|
dd60e56f28 | ||
|
|
6e54767344 | ||
|
|
cb9993759b | ||
|
|
0d198f930e | ||
|
|
1de9bc0fa7 | ||
|
|
85642c328d | ||
|
|
c7745a3350 | ||
|
|
60ea0a5c37 | ||
|
|
7e4673d542 | ||
|
|
92dfa4634b | ||
|
|
b474828052 | ||
|
|
82efd2f267 | ||
|
|
8666e01e11 | ||
|
|
2d12da8ed0 | ||
|
|
cf3ac9f72a | ||
|
|
11e8a49410 | ||
|
|
00bd04f923 | ||
|
|
517dda02a3 | ||
|
|
ae98ea450d | ||
|
|
f5b71d3455 | ||
|
|
fcd29a28cc | ||
|
|
c422d061cb | ||
|
|
d20ea14c8a | ||
|
|
e429f79d36 | ||
|
|
dae6c7d967 | ||
|
|
1236e22475 | ||
|
|
8df06a046e | ||
|
|
65970d3858 | ||
|
|
c624fa7469 | ||
|
|
b24f2cb969 | ||
|
|
cf1b72d6c6 | ||
|
|
7b5ab58689 | ||
|
|
76c18aa345 | ||
|
|
5216815d1c | ||
|
|
aa3fb4db28 | ||
|
|
86a6e0b3e7 | ||
|
|
ceebd06b64 | ||
|
|
dadab1febf | ||
|
|
1bd5825955 | ||
|
|
f56f68bf2f | ||
|
|
60245a83fa | ||
|
|
7b54ef69f9 | ||
|
|
a145f22ddf | ||
|
|
66d3e4f727 | ||
|
|
66f308c6af | ||
|
|
4c328b3bb4 | ||
|
|
544219d9ad | ||
|
|
fd8f3ee951 | ||
|
|
be2f7e6ad9 | ||
|
|
bc9c8ddf8e | ||
|
|
bba8bcb340 | ||
|
|
10b1deb274 | ||
|
|
f5598a1bc2 | ||
|
|
f879acd6e7 | ||
|
|
7be95b15eb | ||
|
|
5b0e6f375b | ||
|
|
adf9ecc50c | ||
|
|
ad7dce47ca | ||
|
|
32b86a8d8b | ||
|
|
fd3b9ff9ed | ||
|
|
f1fd00da78 | ||
|
|
26506d832e | ||
|
|
bee17d1c8f | ||
|
|
219f93ffad | ||
|
|
4b62fc165e | ||
|
|
f7ff6d3297 | ||
|
|
c14d44aba8 | ||
|
|
0a9b8e0ae2 | ||
|
|
6f2eab40d3 | ||
|
|
b4c105336e | ||
|
|
81644003eb | ||
|
|
8e18cec804 | ||
|
|
5fbac0a380 | ||
|
|
bb30759e4e | ||
|
|
095ebb8ca3 | ||
|
|
03a4d107ba | ||
|
|
e7bfeb418a | ||
|
|
2ee40027e5 | ||
|
|
60770f4629 | ||
|
|
e6757de2ac | ||
|
|
4c30b2554f | ||
|
|
282bff9f94 | ||
|
|
29aae01779 | ||
|
|
5b707234d8 | ||
|
|
9f1a3d15db | ||
|
|
f13506163b | ||
|
|
b67325c3ac | ||
|
|
454dd85432 | ||
|
|
62b45064d4 | ||
|
|
6dca74ba72 | ||
|
|
7c85decc82 | ||
|
|
efe51b291c | ||
|
|
08d046d994 | ||
|
|
7b49fa121b | ||
|
|
ccfb73cb8c | ||
|
|
fd13c93c6a | ||
|
|
c3fc09b95b | ||
|
|
942041ecd7 | ||
|
|
965825564d | ||
|
|
d12f920b3f | ||
|
|
572de288f0 | ||
|
|
d5fbba3b0a | ||
|
|
1b2fe4a5be | ||
|
|
bac79eeef0 | ||
|
|
e2952b5354 | ||
|
|
cfa35a90b2 | ||
|
|
bba2773d7d | ||
|
|
39b53f4467 | ||
|
|
04b156f604 | ||
|
|
3ec05a9f95 | ||
|
|
45e32e1b77 | ||
|
|
895959d007 | ||
|
|
3159438701 | ||
|
|
89836cd31a | ||
|
|
8d5a60ac0a | ||
|
|
76a64667d5 | ||
|
|
5f2a9fad4d | ||
|
|
bfd78104b2 | ||
|
|
03b7bc17de | ||
|
|
485fc8ddc8 | ||
|
|
2d28043cb2 | ||
|
|
646148b2d4 | ||
|
|
ecaa1f9e25 | ||
|
|
37a331a1e8 | ||
|
|
64b0694436 | ||
|
|
2511cabbc3 | ||
|
|
17e2a35cbe | ||
|
|
de3fd3f325 | ||
|
|
5c7f0016fb | ||
|
|
58dfd50317 | ||
|
|
e79c57274b | ||
|
|
d0a45637ba | ||
|
|
c1d3e8f7fa | ||
|
|
2889af7710 | ||
|
|
9f0fef5add | ||
|
|
d81af48ae5 | ||
|
|
8c46a41b96 | ||
|
|
2466ac73bb | ||
|
|
e7d3ba12d0 | ||
|
|
ba70a15798 | ||
|
|
50c76b696f | ||
|
|
998a634366 | ||
|
|
5231a3eddf | ||
|
|
0a233ff4bd | ||
|
|
c305911def | ||
|
|
bd78ccaf31 | ||
|
|
93b55280b0 | ||
|
|
e43a49eafc | ||
|
|
06834931a6 | ||
|
|
b03cd1bf46 | ||
|
|
ec84a94a61 | ||
|
|
c15ef219e5 | ||
|
|
5726926201 | ||
|
|
5010e3a368 | ||
|
|
b94b46db08 | ||
|
|
3871b3c67c | ||
|
|
a3c300f06b | ||
|
|
61d133f941 | ||
|
|
31d601b772 | ||
|
|
7169a269e1 | ||
|
|
f24ad25d7b | ||
|
|
1637e9d367 | ||
|
|
b61c9ca25a | ||
|
|
e1a79e6945 | ||
|
|
d1d5c69b64 |
17
.github/ISSUE_TEMPLATE.md
vendored
17
.github/ISSUE_TEMPLATE.md
vendored
@@ -1,17 +0,0 @@
|
||||
# Description of problem
|
||||
|
||||
(replace this text with the list of steps you followed)
|
||||
|
||||
# Expected result
|
||||
|
||||
(replace this text with an explanation of what you thought would happen)
|
||||
|
||||
# Actual result
|
||||
|
||||
(replace this text with details of what actually happened)
|
||||
|
||||
---
|
||||
|
||||
(replace this text with the output of the `kata-collect-data.sh` script, after
|
||||
you have reviewed its content to ensure it does not contain any private
|
||||
information).
|
||||
55
.github/workflows/add-issues-to-project.yaml
vendored
Normal file
55
.github/workflows/add-issues-to-project.yaml
vendored
Normal file
@@ -0,0 +1,55 @@
|
||||
# Copyright (c) 2020 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
name: Add newly created issues to the backlog project
|
||||
|
||||
on:
|
||||
issues:
|
||||
types:
|
||||
- opened
|
||||
- reopened
|
||||
|
||||
jobs:
|
||||
add-new-issues-to-backlog:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Install hub
|
||||
run: |
|
||||
HUB_ARCH="amd64"
|
||||
HUB_VER=$(curl -sL "https://api.github.com/repos/github/hub/releases/latest" |\
|
||||
jq -r .tag_name | sed 's/^v//')
|
||||
curl -sL \
|
||||
"https://github.com/github/hub/releases/download/v${HUB_VER}/hub-linux-${HUB_ARCH}-${HUB_VER}.tgz" |\
|
||||
tar xz --strip-components=2 --wildcards '*/bin/hub' && \
|
||||
sudo install hub /usr/local/bin
|
||||
|
||||
- name: Install hub extension script
|
||||
run: |
|
||||
# Clone into a temporary directory to avoid overwriting
|
||||
# any existing github directory.
|
||||
pushd $(mktemp -d) &>/dev/null
|
||||
git clone --single-branch --depth 1 "https://github.com/kata-containers/.github" && cd .github/scripts
|
||||
sudo install hub-util.sh /usr/local/bin
|
||||
popd &>/dev/null
|
||||
|
||||
- name: Checkout code to allow hub to communicate with the project
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Add issue to issue backlog
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.KATA_GITHUB_ACTIONS_TOKEN }}
|
||||
run: |
|
||||
issue=${{ github.event.issue.number }}
|
||||
|
||||
project_name="Issue backlog"
|
||||
project_type="org"
|
||||
project_column="To do"
|
||||
|
||||
hub-util.sh \
|
||||
add-issue \
|
||||
"$issue" \
|
||||
"$project_name" \
|
||||
"$project_type" \
|
||||
"$project_column"
|
||||
91
.github/workflows/commit-message-check.yaml
vendored
Normal file
91
.github/workflows/commit-message-check.yaml
vendored
Normal file
@@ -0,0 +1,91 @@
|
||||
name: Commit Message Check
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
- reopened
|
||||
- synchronize
|
||||
|
||||
env:
|
||||
error_msg: |+
|
||||
See the document below for help on formatting commits for the project.
|
||||
|
||||
https://github.com/kata-containers/community/blob/master/CONTRIBUTING.md#patch-forma
|
||||
|
||||
jobs:
|
||||
commit-message-check:
|
||||
runs-on: ubuntu-latest
|
||||
name: Commit Message Check
|
||||
steps:
|
||||
- name: Get PR Commits
|
||||
id: 'get-pr-commits'
|
||||
uses: tim-actions/get-pr-commits@v1.0.0
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: DCO Check
|
||||
uses: tim-actions/dco@2fd0504dc0d27b33f542867c300c60840c6dcb20
|
||||
with:
|
||||
commits: ${{ steps.get-pr-commits.outputs.commits }}
|
||||
|
||||
- name: Commit Body Missing Check
|
||||
if: ${{ success() || failure() }}
|
||||
uses: tim-actions/commit-body-check@v1.0.2
|
||||
with:
|
||||
commits: ${{ steps.get-pr-commits.outputs.commits }}
|
||||
|
||||
- name: Check Subject Line Length
|
||||
if: ${{ success() || failure() }}
|
||||
uses: tim-actions/commit-message-checker-with-regex@v0.3.1
|
||||
with:
|
||||
commits: ${{ steps.get-pr-commits.outputs.commits }}
|
||||
pattern: '^.{0,75}(\n.*)*$'
|
||||
error: 'Subject too long (max 75)'
|
||||
post_error: ${{ env.error_msg }}
|
||||
|
||||
- name: Check Body Line Length
|
||||
if: ${{ success() || failure() }}
|
||||
uses: tim-actions/commit-message-checker-with-regex@v0.3.1
|
||||
with:
|
||||
commits: ${{ steps.get-pr-commits.outputs.commits }}
|
||||
# Notes:
|
||||
#
|
||||
# - The subject line is not enforced here (see other check), but has
|
||||
# to be specified at the start of the regex as the action is passed
|
||||
# the entire commit message.
|
||||
#
|
||||
# - Body lines *can* be longer than the maximum if they start
|
||||
# with a non-alphabetic character.
|
||||
#
|
||||
# This allows stack traces, log files snippets, emails, long URLs,
|
||||
# etc to be specified. Some of these naturally "work" as they start
|
||||
# with numeric timestamps or addresses. Emails can but quoted using
|
||||
# the normal ">" character, markdown bullets ("-", "*") are also
|
||||
# useful for lists of URLs, but it is always possible to override
|
||||
# the check by simply space indenting the content you need to add.
|
||||
#
|
||||
# - A SoB comment can be any length (as it is unreasonable to penalise
|
||||
# people with long names/email addresses :)
|
||||
pattern: '^.+(\n([a-zA-Z].{0,149}|[^a-zA-Z\n].*|Signed-off-by:.*|))+$'
|
||||
error: 'Body line too long (max 72)'
|
||||
post_error: ${{ env.error_msg }}
|
||||
|
||||
- name: Check Fixes
|
||||
if: ${{ success() || failure() }}
|
||||
uses: tim-actions/commit-message-checker-with-regex@v0.3.1
|
||||
with:
|
||||
commits: ${{ steps.get-pr-commits.outputs.commits }}
|
||||
pattern: '\s*Fixes\s*:?\s*(#\d+|github\.com\/kata-containers\/[a-z-.]*#\d+)|^\s*release\s*:'
|
||||
flags: 'i'
|
||||
error: 'No "Fixes" found'
|
||||
post_error: ${{ env.error_msg }}
|
||||
one_pass_all_pass: 'true'
|
||||
|
||||
- name: Check Subsystem
|
||||
if: ${{ success() || failure() }}
|
||||
uses: tim-actions/commit-message-checker-with-regex@v0.3.1
|
||||
with:
|
||||
commits: ${{ steps.get-pr-commits.outputs.commits }}
|
||||
pattern: '^[\s\t]*[^:\s\t]+[\s\t]*:'
|
||||
error: 'Failed to find subsystem in subject'
|
||||
post_error: ${{ env.error_msg }}
|
||||
22
.github/workflows/dco-check.yaml
vendored
22
.github/workflows/dco-check.yaml
vendored
@@ -1,22 +0,0 @@
|
||||
name: DCO check
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
- reopened
|
||||
- synchronize
|
||||
|
||||
jobs:
|
||||
dco_check_job:
|
||||
runs-on: ubuntu-latest
|
||||
name: DCO Check
|
||||
steps:
|
||||
- name: Get PR Commits
|
||||
id: 'get-pr-commits'
|
||||
uses: tim-actions/get-pr-commits@ed97a21c3f83c3417e67a4733ea76887293a2c8f
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: DCO Check
|
||||
uses: tim-actions/dco@2fd0504dc0d27b33f542867c300c60840c6dcb20
|
||||
with:
|
||||
commits: ${{ steps.get-pr-commits.outputs.commits }}
|
||||
34
.github/workflows/generate-local-artifact-tarball.sh
vendored
Executable file
34
.github/workflows/generate-local-artifact-tarball.sh
vendored
Executable file
@@ -0,0 +1,34 @@
|
||||
#!/bin/bash
|
||||
# Copyright (c) 2019 Intel Corporation
|
||||
# Copyright (c) 2020 Ant Group
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
set -o errexit
|
||||
set -o pipefail
|
||||
|
||||
|
||||
main() {
|
||||
artifact_stage=${1:-}
|
||||
artifact=$(echo ${artifact_stage} | sed -n -e 's/^install_//p' | sed -r 's/_/-/g')
|
||||
if [ -z "${artifact}" ]; then
|
||||
"Scripts needs artifact name to build"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
tag=$(echo $GITHUB_REF | cut -d/ -f3-)
|
||||
pushd $GITHUB_WORKSPACE/tools/packaging
|
||||
git checkout $tag
|
||||
./scripts/gen_versions_txt.sh $tag
|
||||
popd
|
||||
|
||||
pushd $GITHUB_WORKSPACE/tools/packaging/release
|
||||
source ./kata-deploy-binaries.sh
|
||||
${artifact_stage} $tag
|
||||
popd
|
||||
|
||||
mv $GITHUB_WORKSPACE/tools/packaging/release/kata-static-${artifact}.tar.gz .
|
||||
}
|
||||
|
||||
main $@
|
||||
53
.github/workflows/kata-deploy-test.yaml
vendored
Normal file
53
.github/workflows/kata-deploy-test.yaml
vendored
Normal file
@@ -0,0 +1,53 @@
|
||||
on: issue_comment
|
||||
name: test-kata-deploy
|
||||
jobs:
|
||||
check_comments:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check for Command
|
||||
id: command
|
||||
uses: kata-containers/slash-command-action@v1
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
command: "test-kata-deploy"
|
||||
reaction: "true"
|
||||
reaction-type: "eyes"
|
||||
allow-edits: "false"
|
||||
permission-level: admin
|
||||
- name: verify command arg is kata-deploy
|
||||
run: |
|
||||
echo "The command was '${{ steps.command.outputs.command-name }}' with arguments '${{ steps.command.outputs.command-arguments }}'"
|
||||
create-and-test-container:
|
||||
needs: check_comments
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: get-PR-ref
|
||||
id: get-PR-ref
|
||||
run: |
|
||||
ref=$(cat $GITHUB_EVENT_PATH | jq -r '.issue.pull_request.url' | sed 's#^.*\/pulls#refs\/pull#' | sed 's#$#\/merge#')
|
||||
echo "reference for PR: " ${ref}
|
||||
echo "##[set-output name=pr-ref;]${ref}"
|
||||
- uses: actions/checkout@v2-beta
|
||||
with:
|
||||
ref: ${{ steps.get-PR-ref.outputs.pr-ref }}
|
||||
- name: build-container-image
|
||||
id: build-container-image
|
||||
run: |
|
||||
PR_SHA=$(git log --format=format:%H -n1)
|
||||
VERSION=$(curl https://raw.githubusercontent.com/kata-containers/kata-containers/2.0-dev/VERSION)
|
||||
ARTIFACT_URL="https://github.com/kata-containers/kata-containers/releases/download/${VERSION}/kata-static-${VERSION}-x86_64.tar.xz"
|
||||
wget "${ARTIFACT_URL}" -O ./kata-deploy/kata-static.tar.xz
|
||||
docker build --build-arg KATA_ARTIFACTS=kata-static.tar.xz -t katadocker/kata-deploy-ci:${PR_SHA} ./kata-deploy
|
||||
docker login -u ${{ secrets.DOCKER_USERNAME }} -p ${{ secrets.DOCKER_PASSWORD }}
|
||||
docker push katadocker/kata-deploy-ci:$PR_SHA
|
||||
echo "##[set-output name=pr-sha;]${PR_SHA}"
|
||||
- name: test-kata-deploy-ci-in-aks
|
||||
uses: ./kata-deploy/action
|
||||
with:
|
||||
packaging-sha: ${{ steps.build-container-image.outputs.pr-sha }}
|
||||
env:
|
||||
PKG_SHA: ${{ steps.build-container-image.outputs.pr-sha }}
|
||||
AZ_APPID: ${{ secrets.AZ_APPID }}
|
||||
AZ_PASSWORD: ${{ secrets.AZ_PASSWORD }}
|
||||
AZ_SUBSCRIPTION_ID: ${{ secrets.AZ_SUBSCRIPTION_ID }}
|
||||
AZ_TENANT_ID: ${{ secrets.AZ_TENANT_ID }}
|
||||
7
.github/workflows/main.yaml
vendored
7
.github/workflows/main.yaml
vendored
@@ -2,7 +2,7 @@ name: Publish release tarball
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- '*'
|
||||
- '1.*'
|
||||
|
||||
jobs:
|
||||
get-artifact-list:
|
||||
@@ -10,12 +10,11 @@ jobs:
|
||||
steps:
|
||||
- name: get the list
|
||||
run: |
|
||||
git clone https://github.com/kata-containers/packaging
|
||||
pushd packaging
|
||||
pushd $GITHUB_WORKSPACE
|
||||
tag=$(echo $GITHUB_REF | cut -d/ -f3-)
|
||||
git checkout $tag
|
||||
popd
|
||||
./packaging/artifact-list.sh > artifact-list.txt
|
||||
$GITHUB_WORKSPACE/tools/packaging/artifact-list.sh > artifact-list.txt
|
||||
- name: save-artifact-list
|
||||
uses: actions/upload-artifact@master
|
||||
with:
|
||||
|
||||
78
.github/workflows/move-issues-to-in-progress.yaml
vendored
Normal file
78
.github/workflows/move-issues-to-in-progress.yaml
vendored
Normal file
@@ -0,0 +1,78 @@
|
||||
# Copyright (c) 2020 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
name: Move issues to "In progress" in backlog project when referenced by a PR
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types:
|
||||
- opened
|
||||
- reopened
|
||||
|
||||
jobs:
|
||||
move-linked-issues-to-in-progress:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Install hub
|
||||
run: |
|
||||
HUB_ARCH="amd64"
|
||||
HUB_VER=$(curl -sL "https://api.github.com/repos/github/hub/releases/latest" |\
|
||||
jq -r .tag_name | sed 's/^v//')
|
||||
curl -sL \
|
||||
"https://github.com/github/hub/releases/download/v${HUB_VER}/hub-linux-${HUB_ARCH}-${HUB_VER}.tgz" |\
|
||||
tar xz --strip-components=2 --wildcards '*/bin/hub' && \
|
||||
sudo install hub /usr/local/bin
|
||||
|
||||
- name: Install hub extension script
|
||||
run: |
|
||||
# Clone into a temporary directory to avoid overwriting
|
||||
# any existing github directory.
|
||||
pushd $(mktemp -d) &>/dev/null
|
||||
git clone --single-branch --depth 1 "https://github.com/kata-containers/.github" && cd .github/scripts
|
||||
sudo install hub-util.sh /usr/local/bin
|
||||
popd &>/dev/null
|
||||
|
||||
- name: Checkout code to allow hub to communicate with the project
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Move issue to "In progress"
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.KATA_GITHUB_ACTIONS_TOKEN }}
|
||||
run: |
|
||||
pr=${{ github.event.pull_request.number }}
|
||||
|
||||
linked_issue_urls=$(hub-util.sh \
|
||||
list-issues-for-pr "$pr" |\
|
||||
grep -v "^\#" |\
|
||||
cut -d';' -f3 || true)
|
||||
|
||||
# PR doesn't have any linked issues
|
||||
# (it should, but maybe a new user forgot to add a "Fixes: #XXX" commit).
|
||||
[ -z "$linked_issue_urls" ] && {
|
||||
echo "::error::No linked issues for PR $pr"
|
||||
exit 1
|
||||
}
|
||||
|
||||
project_name="Issue backlog"
|
||||
project_type="org"
|
||||
project_column="In progress"
|
||||
|
||||
for issue_url in $(echo "$linked_issue_urls")
|
||||
do
|
||||
issue=$(echo "$issue_url"| awk -F\/ '{print $NF}' || true)
|
||||
|
||||
[ -z "$issue" ] && {
|
||||
echo "::error::Cannot determine issue number from $issue_url for PR $pr"
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Move the issue to the correct column on the project board
|
||||
hub-util.sh \
|
||||
move-issue \
|
||||
"$issue" \
|
||||
"$project_name" \
|
||||
"$project_type" \
|
||||
"$project_column"
|
||||
done
|
||||
321
.github/workflows/release.yaml
vendored
Normal file
321
.github/workflows/release.yaml
vendored
Normal file
@@ -0,0 +1,321 @@
|
||||
name: Publish Kata 2.x release artifacts
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- '2.*'
|
||||
|
||||
jobs:
|
||||
get-artifact-list:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: get the list
|
||||
run: |
|
||||
pushd $GITHUB_WORKSPACE
|
||||
tag=$(echo $GITHUB_REF | cut -d/ -f3-)
|
||||
git checkout $tag
|
||||
popd
|
||||
$GITHUB_WORKSPACE/tools/packaging/artifact-list.sh > artifact-list.txt
|
||||
- name: save-artifact-list
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: artifact-list
|
||||
path: artifact-list.txt
|
||||
|
||||
build-kernel:
|
||||
runs-on: ubuntu-16.04
|
||||
needs: get-artifact-list
|
||||
env:
|
||||
buildstr: "install_kernel"
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: get-artifact-list
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: artifact-list
|
||||
- run: |
|
||||
sudo apt-get update && sudo apt install -y flex bison libelf-dev bc iptables
|
||||
- name: build-kernel
|
||||
run: |
|
||||
if grep -q $buildstr artifact-list.txt; then
|
||||
$GITHUB_WORKSPACE/.github/workflows/generate-local-artifact-tarball.sh $buildstr
|
||||
echo ::set-env name=artifact-built::true
|
||||
else
|
||||
echo ::set-env name=artifact-built::false
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: env.artifact-built == 'true'
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-static-kernel.tar.gz
|
||||
|
||||
build-experimental-kernel:
|
||||
runs-on: ubuntu-16.04
|
||||
needs: get-artifact-list
|
||||
env:
|
||||
buildstr: "install_experimental_kernel"
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: get-artifact-list
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: artifact-list
|
||||
- run: |
|
||||
sudo apt-get update && sudo apt install -y flex bison libelf-dev bc iptables
|
||||
- name: build-experimental-kernel
|
||||
run: |
|
||||
if grep -q $buildstr artifact-list.txt; then
|
||||
$GITHUB_WORKSPACE/.github/workflows/generate-local-artifact-tarball.sh $buildstr
|
||||
echo ::set-env name=artifact-built::true
|
||||
else
|
||||
echo ::set-env name=artifact-built::false
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: env.artifact-built == 'true'
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-static-experimental-kernel.tar.gz
|
||||
|
||||
build-qemu:
|
||||
runs-on: ubuntu-16.04
|
||||
needs: get-artifact-list
|
||||
env:
|
||||
buildstr: "install_qemu"
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: get-artifact-list
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: artifact-list
|
||||
- name: build-qemu
|
||||
run: |
|
||||
if grep -q $buildstr artifact-list.txt; then
|
||||
$GITHUB_WORKSPACE/.github/workflows/generate-local-artifact-tarball.sh $buildstr
|
||||
echo ::set-env name=artifact-built::true
|
||||
else
|
||||
echo ::set-env name=artifact-built::false
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: env.artifact-built == 'true'
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-static-qemu.tar.gz
|
||||
|
||||
build-qemu-virtiofsd:
|
||||
runs-on: ubuntu-16.04
|
||||
needs: get-artifact-list
|
||||
env:
|
||||
buildstr: "install_qemu_virtiofsd"
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: get-artifact-list
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: artifact-list
|
||||
- name: build-qemu-virtiofsd
|
||||
run: |
|
||||
if grep -q $buildstr artifact-list.txt; then
|
||||
$GITHUB_WORKSPACE/.github/workflows/generate-local-artifact-tarball.sh $buildstr
|
||||
echo ::set-env name=artifact-built::true
|
||||
else
|
||||
echo ::set-env name=artifact-built::false
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: env.artifact-built == 'true'
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-static-qemu-virtiofsd.tar.gz
|
||||
|
||||
build-image:
|
||||
runs-on: ubuntu-16.04
|
||||
needs: get-artifact-list
|
||||
env:
|
||||
buildstr: "install_image"
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: get-artifact-list
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: artifact-list
|
||||
- name: build-image
|
||||
run: |
|
||||
if grep -q $buildstr artifact-list.txt; then
|
||||
$GITHUB_WORKSPACE/.github/workflows/generate-local-artifact-tarball.sh $buildstr
|
||||
echo ::set-env name=artifact-built::true
|
||||
else
|
||||
echo ::set-env name=artifact-built::false
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: env.artifact-built == 'true'
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-static-image.tar.gz
|
||||
|
||||
build-firecracker:
|
||||
runs-on: ubuntu-16.04
|
||||
needs: get-artifact-list
|
||||
env:
|
||||
buildstr: "install_firecracker"
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: get-artifact-list
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: artifact-list
|
||||
- name: build-firecracker
|
||||
run: |
|
||||
if grep -q $buildstr artifact-list.txt; then
|
||||
$GITHUB_WORKSPACE/.github/workflows/generate-local-artifact-tarball.sh $buildstr
|
||||
echo ::set-env name=artifact-built::true
|
||||
else
|
||||
echo ::set-env name=artifact-built::false
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: env.artifact-built == 'true'
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-static-firecracker.tar.gz
|
||||
|
||||
|
||||
build-clh:
|
||||
runs-on: ubuntu-16.04
|
||||
needs: get-artifact-list
|
||||
env:
|
||||
buildstr: "install_clh"
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: get-artifact-list
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: artifact-list
|
||||
- name: build-clh
|
||||
run: |
|
||||
if grep -q $buildstr artifact-list.txt; then
|
||||
$GITHUB_WORKSPACE/.github/workflows/generate-local-artifact-tarball.sh $buildstr
|
||||
echo ::set-env name=artifact-built::true
|
||||
else
|
||||
echo ::set-env name=artifact-built::false
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: env.artifact-built == 'true'
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-static-clh.tar.gz
|
||||
|
||||
build-kata-components:
|
||||
runs-on: ubuntu-16.04
|
||||
needs: get-artifact-list
|
||||
env:
|
||||
buildstr: "install_kata_components"
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: get-artifact-list
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: artifact-list
|
||||
- name: build-kata-components
|
||||
run: |
|
||||
if grep -q $buildstr artifact-list.txt; then
|
||||
$GITHUB_WORKSPACE/.github/workflows/generate-local-artifact-tarball.sh $buildstr
|
||||
echo ::set-env name=artifact-built::true
|
||||
else
|
||||
echo ::set-env name=artifact-built::false
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: env.artifact-built == 'true'
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-static-kata-components.tar.gz
|
||||
|
||||
gather-artifacts:
|
||||
runs-on: ubuntu-16.04
|
||||
needs: [build-experimental-kernel, build-kernel, build-qemu, build-qemu-virtiofsd, build-image, build-firecracker, build-kata-components, build-clh]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: get-artifacts
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-artifacts
|
||||
- name: colate-artifacts
|
||||
run: |
|
||||
$GITHUB_WORKSPACE/.github/workflows/gather-artifacts.sh
|
||||
- name: store-artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: release-candidate
|
||||
path: kata-static.tar.xz
|
||||
|
||||
kata-deploy:
|
||||
needs: gather-artifacts
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: get-artifacts
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: release-candidate
|
||||
- name: build-and-push-kata-deploy-ci
|
||||
id: build-and-push-kata-deploy-ci
|
||||
run: |
|
||||
tag=$(echo $GITHUB_REF | cut -d/ -f3-)
|
||||
pushd $GITHUB_WORKSPACE
|
||||
git checkout $tag
|
||||
pkg_sha=$(git rev-parse HEAD)
|
||||
popd
|
||||
mv kata-static.tar.xz $GITHUB_WORKSPACE/tools/packaging/kata-deploy/kata-static.tar.xz
|
||||
docker build --build-arg KATA_ARTIFACTS=kata-static.tar.xz -t katadocker/kata-deploy-ci:$pkg_sha $GITHUB_WORKSPACE/tools/packaging/kata-deploy
|
||||
docker login -u ${{ secrets.DOCKER_USERNAME }} -p ${{ secrets.DOCKER_PASSWORD }}
|
||||
docker push katadocker/kata-deploy-ci:$pkg_sha
|
||||
|
||||
echo "##[set-output name=PKG_SHA;]${pkg_sha}"
|
||||
echo ::set-env name=TAG::$tag
|
||||
mkdir -p packaging/kata-deploy
|
||||
ln -s $GITHUB_WORKSPACE/tools/packaging/kata-deploy/action packaging/kata-deploy/action
|
||||
- name: test-kata-deploy-ci-in-aks
|
||||
uses: ./packaging/kata-deploy/action
|
||||
with:
|
||||
packaging-sha: ${{steps.build-and-push-kata-deploy-ci.outputs.PKG_SHA}}
|
||||
env:
|
||||
PKG_SHA: ${{steps.build-and-push-kata-deploy-ci.outputs.PKG_SHA}}
|
||||
AZ_APPID: ${{ secrets.AZ_APPID }}
|
||||
AZ_PASSWORD: ${{ secrets.AZ_PASSWORD }}
|
||||
AZ_SUBSCRIPTION_ID: ${{ secrets.AZ_SUBSCRIPTION_ID }}
|
||||
AZ_TENANT_ID: ${{ secrets.AZ_TENANT_ID }}
|
||||
- name: push-tarball
|
||||
run: |
|
||||
# tag the container image we created and push to DockerHub
|
||||
tag=$(echo $GITHUB_REF | cut -d/ -f3-)
|
||||
docker tag katadocker/kata-deploy-ci:${{steps.build-and-push-kata-deploy-ci.outputs.PKG_SHA}} katadocker/kata-deploy:${tag}
|
||||
docker push katadocker/kata-deploy:${tag}
|
||||
|
||||
upload-static-tarball:
|
||||
needs: kata-deploy
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: download-artifacts
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: release-candidate
|
||||
- name: install hub
|
||||
run: |
|
||||
HUB_VER=$(curl -s "https://api.github.com/repos/github/hub/releases/latest" | jq -r .tag_name | sed 's/^v//')
|
||||
wget -q -O- https://github.com/github/hub/releases/download/v$HUB_VER/hub-linux-amd64-$HUB_VER.tgz | \
|
||||
tar xz --strip-components=2 --wildcards '*/bin/hub' && sudo mv hub /usr/local/bin/hub
|
||||
- name: push static tarball to github
|
||||
run: |
|
||||
tag=$(echo $GITHUB_REF | cut -d/ -f3-)
|
||||
tarball="kata-static-$tag-x86_64.tar.xz"
|
||||
mv kata-static.tar.xz "$GITHUB_WORKSPACE/${tarball}"
|
||||
pushd $GITHUB_WORKSPACE
|
||||
echo "uploading asset '${tarball}' for tag: ${tag}"
|
||||
GITHUB_TOKEN=${{ secrets.GIT_UPLOAD_TOKEN }} hub release edit -m "" -a "${tarball}" "${tag}"
|
||||
51
.github/workflows/require-pr-porting-labels.yaml
vendored
Normal file
51
.github/workflows/require-pr-porting-labels.yaml
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
# Copyright (c) 2020 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
name: Ensure PR has required porting labels
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types:
|
||||
- opened
|
||||
- reopened
|
||||
- labeled
|
||||
- unlabeled
|
||||
|
||||
jobs:
|
||||
check-pr-porting-labels:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Install hub
|
||||
run: |
|
||||
HUB_ARCH="amd64"
|
||||
HUB_VER=$(curl -sL "https://api.github.com/repos/github/hub/releases/latest" |\
|
||||
jq -r .tag_name | sed 's/^v//')
|
||||
curl -sL \
|
||||
"https://github.com/github/hub/releases/download/v${HUB_VER}/hub-linux-${HUB_ARCH}-${HUB_VER}.tgz" |\
|
||||
tar xz --strip-components=2 --wildcards '*/bin/hub' && \
|
||||
sudo install hub /usr/local/bin
|
||||
|
||||
- name: Checkout code to allow hub to communicate with the project
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
token: ${{ secrets.KATA_GITHUB_ACTIONS_TOKEN }}
|
||||
|
||||
- name: Install porting checker script
|
||||
run: |
|
||||
# Clone into a temporary directory to avoid overwriting
|
||||
# any existing github directory.
|
||||
pushd $(mktemp -d) &>/dev/null
|
||||
git clone --single-branch --depth 1 "https://github.com/kata-containers/.github" && cd .github/scripts
|
||||
sudo install pr-porting-checks.sh /usr/local/bin
|
||||
popd &>/dev/null
|
||||
|
||||
- name: Stop PR being merged unless it has a correct set of porting labels
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.KATA_GITHUB_ACTIONS_TOKEN }}
|
||||
run: |
|
||||
pr=${{ github.event.number }}
|
||||
repo=${{ github.repository }}
|
||||
|
||||
pr-porting-checks.sh "$pr" "$repo"
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -2,3 +2,6 @@
|
||||
**/*.orig
|
||||
**/*.rej
|
||||
**/target
|
||||
**/.vscode
|
||||
src/agent/src/version.rs
|
||||
src/agent/kata-agent.service
|
||||
|
||||
27
.travis.yml
27
.travis.yml
@@ -5,27 +5,43 @@
|
||||
|
||||
dist: bionic
|
||||
os: linux
|
||||
language: go
|
||||
go: 1.14.4
|
||||
env: target_branch=$TRAVIS_BRANCH
|
||||
|
||||
# set cache directories manually, because
|
||||
# we are using a non-standard directory struct
|
||||
# cargo root is in srs/agent
|
||||
#
|
||||
# If needed, caches can be cleared
|
||||
# by ways documented in
|
||||
# https://docs.travis-ci.com/user/caching#clearing-caches
|
||||
language: rust
|
||||
rust:
|
||||
- 1.44.1
|
||||
cache:
|
||||
cargo: true
|
||||
directories:
|
||||
- src/agent/target
|
||||
|
||||
before_install:
|
||||
- git remote set-branches --add origin "${TRAVIS_BRANCH}"
|
||||
- git fetch
|
||||
- export RUST_BACKTRACE=1
|
||||
- export target_branch=$TRAVIS_BRANCH
|
||||
- "ci/setup.sh"
|
||||
|
||||
# we use install to run check agent
|
||||
# so that it is easy to skip for non-amd64 platform
|
||||
install:
|
||||
- "ci/install_rust.sh"
|
||||
- export PATH=$PATH:"$HOME/.cargo/bin"
|
||||
- export RUST_AGENT=yes
|
||||
- rustup target add x86_64-unknown-linux-musl
|
||||
- sudo ln -sf /usr/bin/g++ /bin/musl-g++
|
||||
- rustup component add rustfmt
|
||||
- make -C ${TRAVIS_BUILD_DIR}/src/agent
|
||||
- make -C ${TRAVIS_BUILD_DIR}/src/agent check
|
||||
- sudo -E PATH=$PATH make -C ${TRAVIS_BUILD_DIR}/src/agent check
|
||||
|
||||
before_script:
|
||||
- "ci/install_go.sh"
|
||||
- "ci/install_vc.sh"
|
||||
- make -C ${TRAVIS_BUILD_DIR}/src/runtime
|
||||
- make -C ${TRAVIS_BUILD_DIR}/src/runtime test
|
||||
- sudo -E PATH=$PATH GOPATH=$GOPATH make -C ${TRAVIS_BUILD_DIR}/src/runtime test
|
||||
@@ -40,6 +56,7 @@ jobs:
|
||||
- name: ppc64le test
|
||||
os: linux-ppc64le
|
||||
install: skip
|
||||
script: skip
|
||||
allow_failures:
|
||||
- name: ppc64le test
|
||||
fast_finish: true
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Copyright 2019 Intel Corporation.
|
||||
# Copyright (c) 2019 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
@@ -10,4 +10,3 @@
|
||||
# used. See https://help.github.com/articles/about-code-owners/
|
||||
|
||||
*.md @kata-containers/documentation
|
||||
|
||||
30
ci/go-no-os-exit.sh
Executable file
30
ci/go-no-os-exit.sh
Executable file
@@ -0,0 +1,30 @@
|
||||
#!/bin/bash
|
||||
# Copyright (c) 2018 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# Check there are no os.Exit() calls creeping into the code
|
||||
# We don't use that exit path in the Kata codebase.
|
||||
|
||||
# Allow the path to check to be over-ridden.
|
||||
# Default to the current directory.
|
||||
go_packages=${1:-.}
|
||||
|
||||
echo "Checking for no os.Exit() calls for package [${go_packages}]"
|
||||
|
||||
candidates=`go list -f '{{.Dir}}/*.go' $go_packages`
|
||||
for f in $candidates; do
|
||||
filename=`basename $f`
|
||||
# skip all go test files
|
||||
[[ $filename == *_test.go ]] && continue
|
||||
# skip exit.go where, the only file we should call os.Exit() from.
|
||||
[[ $filename == "exit.go" ]] && continue
|
||||
files="$f $files"
|
||||
done
|
||||
|
||||
[ -z "$files" ] && echo "No files to check, skipping" && exit 0
|
||||
|
||||
if egrep -n '\<os\.Exit\>' $files; then
|
||||
echo "Direct calls to os.Exit() are forbidden, please use exit() so atexit() works"
|
||||
exit 1
|
||||
fi
|
||||
23
ci/install_musl.sh
Executable file
23
ci/install_musl.sh
Executable file
@@ -0,0 +1,23 @@
|
||||
#!/bin/bash
|
||||
# Copyright (c) 2020 Ant Group
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
set -e
|
||||
|
||||
install_aarch64_musl() {
|
||||
local arch=$(uname -m)
|
||||
if [ "${arch}" == "aarch64" ]; then
|
||||
local musl_tar="${arch}-linux-musl-native.tgz"
|
||||
local musl_dir="${arch}-linux-musl-native"
|
||||
pushd /tmp
|
||||
curl -sLO https://musl.cc/${musl_tar}
|
||||
tar -zxf ${musl_tar}
|
||||
mkdir -p /usr/local/musl/
|
||||
cp -r ${musl_dir}/* /usr/local/musl/
|
||||
popd
|
||||
fi
|
||||
}
|
||||
|
||||
install_aarch64_musl
|
||||
10
tools/osbuilder/scripts/install-yq.sh → ci/install_yq.sh
Normal file → Executable file
10
tools/osbuilder/scripts/install-yq.sh → ci/install_yq.sh
Normal file → Executable file
@@ -56,12 +56,13 @@ function install_yq() {
|
||||
die "Please install curl"
|
||||
fi
|
||||
|
||||
local yq_version=2.3.0
|
||||
local yq_version=3.1.0
|
||||
|
||||
local yq_url="https://${yq_pkg}/releases/download/${yq_version}/yq_${goos}_${goarch}"
|
||||
curl -o "${yq_path}" -LSsf ${yq_url}
|
||||
## NOTE: ${var,,} => gives lowercase value of var
|
||||
local yq_url="https://${yq_pkg}/releases/download/${yq_version}/yq_${goos,,}_${goarch}"
|
||||
curl -o "${yq_path}" -LSsf "${yq_url}"
|
||||
[ $? -ne 0 ] && die "Download ${yq_url} failed"
|
||||
chmod +x ${yq_path}
|
||||
chmod +x "${yq_path}"
|
||||
|
||||
if ! command -v "${yq_path}" >/dev/null; then
|
||||
die "Cannot not get ${yq_path} executable"
|
||||
@@ -69,4 +70,3 @@ function install_yq() {
|
||||
}
|
||||
|
||||
install_yq
|
||||
|
||||
12
ci/lib.sh
12
ci/lib.sh
@@ -5,26 +5,26 @@
|
||||
|
||||
export tests_repo="${tests_repo:-github.com/kata-containers/tests}"
|
||||
export tests_repo_dir="$GOPATH/src/$tests_repo"
|
||||
export branch="${branch:-2.0-dev}"
|
||||
|
||||
clone_tests_repo()
|
||||
{
|
||||
# KATA_CI_NO_NETWORK is (has to be) ignored if there is
|
||||
# no existing clone.
|
||||
if [ -d "$tests_repo_dir" -a -n "$KATA_CI_NO_NETWORK" ]
|
||||
if [ -d "$tests_repo_dir" -a -n "$CI" ]
|
||||
then
|
||||
return
|
||||
fi
|
||||
|
||||
go get -d -u "$tests_repo" || true
|
||||
|
||||
if [ -n "${TRAVIS_BRANCH:-}" ]; then
|
||||
( cd "${tests_repo_dir}" && git checkout "${TRAVIS_BRANCH}" )
|
||||
fi
|
||||
pushd "${tests_repo_dir}" && git checkout "${branch}" && popd
|
||||
}
|
||||
|
||||
run_static_checks()
|
||||
{
|
||||
clone_tests_repo
|
||||
# Make sure we have the targeting branch
|
||||
git remote set-branches --add origin "${branch}"
|
||||
git fetch -a
|
||||
bash "$tests_repo_dir/.ci/static-checks.sh" "github.com/kata-containers/kata-containers"
|
||||
}
|
||||
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
## Kata Containers Documentation Code of Conduct
|
||||
|
||||
Kata Containers follows the [OpenStack Foundation Code of Conduct](https://www.openstack.org/legal/community-code-of-conduct/).
|
||||
@@ -1,5 +0,0 @@
|
||||
# Contributing
|
||||
|
||||
## This repo is part of [Kata Containers](https://katacontainers.io)
|
||||
|
||||
For details on how to contribute to the Kata Containers project, please see the main [contributing document](https://github.com/kata-containers/community/blob/master/CONTRIBUTING.md).
|
||||
@@ -13,8 +13,6 @@
|
||||
* [journald rate limiting](#journald-rate-limiting)
|
||||
* [`systemd-journald` suppressing messages](#systemd-journald-suppressing-messages)
|
||||
* [Disabling `systemd-journald` rate limiting](#disabling-systemd-journald-rate-limiting)
|
||||
* [Build and install Kata proxy](#build-and-install-kata-proxy)
|
||||
* [Build and install Kata shim](#build-and-install-kata-shim)
|
||||
* [Create and install rootfs and initrd image](#create-and-install-rootfs-and-initrd-image)
|
||||
* [Build a custom Kata agent - OPTIONAL](#build-a-custom-kata-agent---optional)
|
||||
* [Get the osbuilder](#get-the-osbuilder)
|
||||
@@ -31,26 +29,24 @@
|
||||
* [Install a hypervisor](#install-a-hypervisor)
|
||||
* [Build a custom QEMU](#build-a-custom-qemu)
|
||||
* [Build a custom QEMU for aarch64/arm64 - REQUIRED](#build-a-custom-qemu-for-aarch64arm64---required)
|
||||
* [Run Kata Containers with Docker](#run-kata-containers-with-docker)
|
||||
* [Update the Docker systemd unit file](#update-the-docker-systemd-unit-file)
|
||||
* [Create a container using Kata](#create-a-container-using-kata)
|
||||
* [Run Kata Containers with Containerd](#run-kata-containers-with-containerd)
|
||||
* [Run Kata Containers with Kubernetes](#run-kata-containers-with-kubernetes)
|
||||
* [Troubleshoot Kata Containers](#troubleshoot-kata-containers)
|
||||
* [Appendices](#appendices)
|
||||
* [Checking Docker default runtime](#checking-docker-default-runtime)
|
||||
* [Set up a debug console](#set-up-a-debug-console)
|
||||
* [Create a custom image containing a shell](#create-a-custom-image-containing-a-shell)
|
||||
* [Create a debug systemd service](#create-a-debug-systemd-service)
|
||||
* [Build the debug image](#build-the-debug-image)
|
||||
* [Configure runtime for custom debug image](#configure-runtime-for-custom-debug-image)
|
||||
* [Ensure debug options are valid](#ensure-debug-options-are-valid)
|
||||
* [Create a container](#create-a-container)
|
||||
* [Connect to the virtual machine using the debug console](#connect-to-the-virtual-machine-using-the-debug-console)
|
||||
* [Obtain details of the image](#obtain-details-of-the-image)
|
||||
* [Simple debug console setup](#simple-debug-console-setup)
|
||||
* [Enable agent debug console](#enable-agent-debug-console)
|
||||
* [Start `kata-monitor`](#start-kata-monitor)
|
||||
* [Connect to debug console](#connect-to-debug-console)
|
||||
* [Traditional debug console setup](#traditional-debug-console-setup)
|
||||
* [Create a custom image containing a shell](#create-a-custom-image-containing-a-shell)
|
||||
* [Create a debug systemd service](#create-a-debug-systemd-service)
|
||||
* [Build the debug image](#build-the-debug-image)
|
||||
* [Configure runtime for custom debug image](#configure-runtime-for-custom-debug-image)
|
||||
* [Create a container](#create-a-container)
|
||||
* [Connect to the virtual machine using the debug console](#connect-to-the-virtual-machine-using-the-debug-console)
|
||||
* [Capturing kernel boot logs](#capturing-kernel-boot-logs)
|
||||
* [Running standalone](#running-standalone)
|
||||
* [Create an OCI bundle](#create-an-oci-bundle)
|
||||
* [Launch the runtime to create a container](#launch-the-runtime-to-create-a-container)
|
||||
|
||||
# Warning
|
||||
|
||||
@@ -67,7 +63,7 @@ The recommended way to create a development environment is to first
|
||||
to create a working system.
|
||||
|
||||
The installation guide instructions will install all required Kata Containers
|
||||
components, plus Docker*, the hypervisor, and the Kata Containers image and
|
||||
components, plus *Docker*, the hypervisor, and the Kata Containers image and
|
||||
guest kernel.
|
||||
|
||||
# Requirements to build individual components
|
||||
@@ -77,7 +73,7 @@ You need to install the following to build Kata Containers components:
|
||||
- [golang](https://golang.org/dl)
|
||||
|
||||
To view the versions of go known to work, see the `golang` entry in the
|
||||
[versions database](https://github.com/kata-containers/runtime/blob/master/versions.yaml).
|
||||
[versions database](../versions.yaml).
|
||||
|
||||
- `make`.
|
||||
- `gcc` (required for building the shim and runtime).
|
||||
@@ -85,14 +81,14 @@ You need to install the following to build Kata Containers components:
|
||||
# Build and install the Kata Containers runtime
|
||||
|
||||
```
|
||||
$ go get -d -u github.com/kata-containers/runtime
|
||||
$ cd $GOPATH/src/github.com/kata-containers/runtime
|
||||
$ go get -d -u github.com/kata-containers/kata-containers
|
||||
$ cd $GOPATH/src/github.com/kata-containers/kata-containers/src/runtime
|
||||
$ make && sudo -E PATH=$PATH make install
|
||||
```
|
||||
|
||||
The build will create the following:
|
||||
|
||||
- runtime binary: `/usr/local/bin/kata-runtime`
|
||||
- runtime binary: `/usr/local/bin/kata-runtime` and `/usr/local/bin/containerd-shim-kata-v2`
|
||||
- configuration file: `/usr/share/defaults/kata-containers/configuration.toml`
|
||||
|
||||
# Check hardware requirements
|
||||
@@ -243,20 +239,6 @@ Restart `systemd-journald` for the changes to take effect:
|
||||
$ sudo systemctl restart systemd-journald
|
||||
```
|
||||
|
||||
# Build and install Kata proxy
|
||||
|
||||
```
|
||||
$ go get -d -u github.com/kata-containers/proxy
|
||||
$ cd $GOPATH/src/github.com/kata-containers/proxy && make && sudo make install
|
||||
```
|
||||
|
||||
# Build and install Kata shim
|
||||
|
||||
```
|
||||
$ go get -d -u github.com/kata-containers/shim
|
||||
$ cd $GOPATH/src/github.com/kata-containers/shim && make && sudo make install
|
||||
```
|
||||
|
||||
# Create and install rootfs and initrd image
|
||||
|
||||
## Build a custom Kata agent - OPTIONAL
|
||||
@@ -266,14 +248,15 @@ $ cd $GOPATH/src/github.com/kata-containers/shim && make && sudo make install
|
||||
> - You should only do this step if you are testing with the latest version of the agent.
|
||||
|
||||
```
|
||||
$ go get -d -u github.com/kata-containers/agent
|
||||
$ cd $GOPATH/src/github.com/kata-containers/agent && make
|
||||
$ go get -d -u github.com/kata-containers/kata-containers
|
||||
$ cd $GOPATH/src/github.com/kata-containers/kata-containers/src/agent && make
|
||||
```
|
||||
|
||||
## Get the osbuilder
|
||||
|
||||
```
|
||||
$ go get -d -u github.com/kata-containers/osbuilder
|
||||
$ go get -d -u github.com/kata-containers/kata-containers
|
||||
$ cd $GOPATH/src/github.com/kata-containers/kata-containers/tools/osbuilder
|
||||
```
|
||||
|
||||
## Create a rootfs image
|
||||
@@ -284,16 +267,16 @@ able to run the `rootfs.sh` script with `USE_DOCKER=true` as expected in
|
||||
the following example.
|
||||
|
||||
```
|
||||
$ export ROOTFS_DIR=${GOPATH}/src/github.com/kata-containers/osbuilder/rootfs-builder/rootfs
|
||||
$ export ROOTFS_DIR=${GOPATH}/src/github.com/kata-containers/kata-containers/tools/osbuilder/rootfs-builder/rootfs
|
||||
$ sudo rm -rf ${ROOTFS_DIR}
|
||||
$ cd $GOPATH/src/github.com/kata-containers/osbuilder/rootfs-builder
|
||||
$ cd $GOPATH/src/github.com/kata-containers/kata-containers/tools/osbuilder/rootfs-builder
|
||||
$ script -fec 'sudo -E GOPATH=$GOPATH USE_DOCKER=true SECCOMP=no ./rootfs.sh ${distro}'
|
||||
```
|
||||
You MUST choose one of `alpine`, `centos`, `clearlinux`, `debian`, `euleros`, `fedora`, `suse`, and `ubuntu` for `${distro}`. By default `seccomp` packages are not included in the rootfs image. Set `SECCOMP` to `yes` to include them.
|
||||
|
||||
> **Note:**
|
||||
>
|
||||
> - Check the [compatibility matrix](https://github.com/kata-containers/osbuilder#platform-distro-compatibility-matrix) before creating rootfs.
|
||||
> - Check the [compatibility matrix](../tools/osbuilder/README.md#platform-distro-compatibility-matrix) before creating rootfs.
|
||||
> - You must ensure that the *default Docker runtime* is `runc` to make use of
|
||||
> the `USE_DOCKER` variable. If that is not the case, remove the variable
|
||||
> from the previous command. See [Checking Docker default runtime](#checking-docker-default-runtime).
|
||||
@@ -313,7 +296,7 @@ $ sudo install -o root -g root -m 0440 ../../agent/kata-containers.target ${ROOT
|
||||
### Build a rootfs image
|
||||
|
||||
```
|
||||
$ cd $GOPATH/src/github.com/kata-containers/osbuilder/image-builder
|
||||
$ cd $GOPATH/src/github.com/kata-containers/kata-containers/tools/osbuilder/image-builder
|
||||
$ script -fec 'sudo -E USE_DOCKER=true ./image_builder.sh ${ROOTFS_DIR}'
|
||||
```
|
||||
|
||||
@@ -340,9 +323,9 @@ $ (cd /usr/share/kata-containers && sudo ln -sf "$image" kata-containers.img)
|
||||
## Create an initrd image - OPTIONAL
|
||||
### Create a local rootfs for initrd image
|
||||
```
|
||||
$ export ROOTFS_DIR="${GOPATH}/src/github.com/kata-containers/osbuilder/rootfs-builder/rootfs"
|
||||
$ export ROOTFS_DIR="${GOPATH}/src/github.com/kata-containers/kata-containers/tools/osbuilder/rootfs-builder/rootfs"
|
||||
$ sudo rm -rf ${ROOTFS_DIR}
|
||||
$ cd $GOPATH/src/github.com/kata-containers/osbuilder/rootfs-builder
|
||||
$ cd $GOPATH/src/github.com/kata-containers/kata-containers/tools/osbuilder/rootfs-builder
|
||||
$ script -fec 'sudo -E GOPATH=$GOPATH AGENT_INIT=yes USE_DOCKER=true SECCOMP=no ./rootfs.sh ${distro}'
|
||||
```
|
||||
`AGENT_INIT` controls if the guest image uses the Kata agent as the guest `init` process. When you create an initrd image,
|
||||
@@ -352,7 +335,7 @@ You MUST choose one of `alpine`, `centos`, `clearlinux`, `euleros`, and `fedora`
|
||||
|
||||
> **Note:**
|
||||
>
|
||||
> - Check the [compatibility matrix](https://github.com/kata-containers/osbuilder#platform-distro-compatibility-matrix) before creating rootfs.
|
||||
> - Check the [compatibility matrix](../tools/osbuilder/README.md#platform-distro-compatibility-matrix) before creating rootfs.
|
||||
|
||||
Optionally, add your custom agent binary to the rootfs with the following:
|
||||
```
|
||||
@@ -362,7 +345,7 @@ $ sudo install -o root -g root -m 0550 -T ../../agent/kata-agent ${ROOTFS_DIR}/s
|
||||
### Build an initrd image
|
||||
|
||||
```
|
||||
$ cd $GOPATH/src/github.com/kata-containers/osbuilder/initrd-builder
|
||||
$ cd $GOPATH/src/github.com/kata-containers/kata-containers/tools/osbuilder/initrd-builder
|
||||
$ script -fec 'sudo -E AGENT_INIT=yes USE_DOCKER=true ./initrd_builder.sh ${ROOTFS_DIR}'
|
||||
```
|
||||
|
||||
@@ -390,7 +373,7 @@ Your QEMU directory need to be prepared with source code. Alternatively, you can
|
||||
|
||||
```
|
||||
$ go get -d github.com/kata-containers/qemu
|
||||
$ qemu_branch=$(grep qemu-lite- ${GOPATH}/src/github.com/kata-containers/runtime/versions.yaml | cut -d '"' -f2)
|
||||
$ qemu_branch=$(grep qemu-lite- ${GOPATH}/src/github.com/kata-containers/kata-containers/versions.yaml | cut -d '"' -f2)
|
||||
$ cd ${GOPATH}/src/github.com/kata-containers/qemu
|
||||
$ git checkout -b $qemu_branch remotes/origin/$qemu_branch
|
||||
$ your_qemu_directory=${GOPATH}/src/github.com/kata-containers/qemu
|
||||
@@ -399,9 +382,9 @@ $ your_qemu_directory=${GOPATH}/src/github.com/kata-containers/qemu
|
||||
To build a version of QEMU using the same options as the default `qemu-lite` version , you could use the `configure-hypervisor.sh` script:
|
||||
|
||||
```
|
||||
$ go get -d github.com/kata-containers/packaging
|
||||
$ go get -d github.com/kata-containers/kata-containers/tools/packaging
|
||||
$ cd $your_qemu_directory
|
||||
$ ${GOPATH}/src/github.com/kata-containers/packaging/scripts/configure-hypervisor.sh qemu > kata.cfg
|
||||
$ ${GOPATH}/src/github.com/kata-containers/kata-containers/tools/packaging/scripts/configure-hypervisor.sh qemu > kata.cfg
|
||||
$ eval ./configure "$(cat kata.cfg)"
|
||||
$ make -j $(nproc)
|
||||
$ sudo -E make install
|
||||
@@ -420,27 +403,11 @@ $ go get -d github.com/kata-containers/tests
|
||||
$ script -fec 'sudo -E ${GOPATH}/src/github.com/kata-containers/tests/.ci/install_qemu.sh'
|
||||
```
|
||||
|
||||
# Run Kata Containers with Docker
|
||||
|
||||
## Update the Docker systemd unit file
|
||||
|
||||
```
|
||||
$ dockerUnit=$(systemctl show -p FragmentPath docker.service | cut -d "=" -f 2)
|
||||
$ unitFile=${dockerUnit:-/etc/systemd/system/docker.service.d/kata-containers.conf}
|
||||
$ test -e "$unitFile" || { sudo mkdir -p "$(dirname $unitFile)"; echo -e "[Service]\nType=simple\nExecStart=\nExecStart=/usr/bin/dockerd -D --default-runtime runc" | sudo tee "$unitFile"; }
|
||||
$ grep -q "kata-runtime=" $unitFile || sudo sed -i 's!^\(ExecStart=[^$].*$\)!\1 --add-runtime kata-runtime=/usr/local/bin/kata-runtime!g' "$unitFile"
|
||||
$ sudo systemctl daemon-reload
|
||||
$ sudo systemctl restart docker
|
||||
```
|
||||
|
||||
## Create a container using Kata
|
||||
|
||||
```
|
||||
$ sudo docker run -ti --runtime kata-runtime busybox sh
|
||||
```
|
||||
# Run Kata Containers with Containerd
|
||||
Refer to the [How to use Kata Containers and Containerd](how-to/containerd-kata.md) how-to guide.
|
||||
|
||||
# Run Kata Containers with Kubernetes
|
||||
Refer to to the [Run Kata Containers with Kubernetes](how-to/run-kata-with-k8s.md) how-to guide.
|
||||
Refer to the [Run Kata Containers with Kubernetes](how-to/run-kata-with-k8s.md) how-to guide.
|
||||
|
||||
# Troubleshoot Kata Containers
|
||||
|
||||
@@ -460,18 +427,6 @@ To perform analysis on Kata logs, use the
|
||||
[`kata-log-parser`](https://github.com/kata-containers/tests/tree/master/cmd/log-parser)
|
||||
tool, which can convert the logs into formats (e.g. JSON, TOML, XML, and YAML).
|
||||
|
||||
To obtain a full backtrace for the agent, proxy, runtime, or shim send the
|
||||
`SIGUSR1` signal to the process ID of the component. The component will send a
|
||||
backtrace to the system log on the host system and continue to run without
|
||||
interruption.
|
||||
|
||||
For example, to obtain a backtrace for `kata-proxy`:
|
||||
|
||||
```
|
||||
$ sudo kill -USR1 $kata_proxy_pid
|
||||
$ sudo journalctl -t kata-proxy
|
||||
```
|
||||
|
||||
See [Set up a debug console](#set-up-a-debug-console).
|
||||
|
||||
# Appendices
|
||||
@@ -481,9 +436,56 @@ See [Set up a debug console](#set-up-a-debug-console).
|
||||
```
|
||||
$ sudo docker info 2>/dev/null | grep -i "default runtime" | cut -d: -f2- | grep -q runc && echo "SUCCESS" || echo "ERROR: Incorrect default Docker runtime"
|
||||
```
|
||||
|
||||
## Set up a debug console
|
||||
|
||||
Kata containers provides two ways to connect to the guest. One is using traditional login service, which needs additional works. In contrast the simple debug console is easy to setup.
|
||||
|
||||
### Simple debug console setup
|
||||
|
||||
Kata Containers 2.0 supports a shell simulated *console* for quick debug purpose. This approach uses VSOCK to
|
||||
connect to the shell running inside the guest which the agent starts. This method only requires the guest image to
|
||||
contain either `/bin/sh` or `/bin/bash`.
|
||||
|
||||
#### Enable agent debug console
|
||||
|
||||
Enable debug_console_enabled in the `configuration.toml` configuration file:
|
||||
|
||||
```
|
||||
[agent.kata]
|
||||
debug_console_enabled = true
|
||||
```
|
||||
|
||||
This will pass `agent.debug_console agent.debug_console_vport=1026` to agent as kernel parameters, and sandboxes created using this parameters will start a shell in guest if new connection is accept from VSOCK.
|
||||
|
||||
#### Start `kata-monitor`
|
||||
|
||||
The `kata-runtime exec` command needs `kata-monitor` to get the sandbox's `vsock` address to connect to, first start `kata-monitor`.
|
||||
|
||||
```
|
||||
$ sudo kata-monitor
|
||||
```
|
||||
|
||||
`kata-monitor` will serve at `localhost:8090` by default.
|
||||
|
||||
|
||||
#### Connect to debug console
|
||||
|
||||
Command `kata-runtime exec` is used to connect to the debug console.
|
||||
|
||||
```
|
||||
$ kata-runtime exec 1a9ab65be63b8b03dfd0c75036d27f0ed09eab38abb45337fea83acd3cd7bacd
|
||||
bash-4.2# id
|
||||
uid=0(root) gid=0(root) groups=0(root)
|
||||
bash-4.2# pwd
|
||||
/
|
||||
bash-4.2# exit
|
||||
exit
|
||||
```
|
||||
|
||||
If you want to access guest OS through a traditional way, see [Traditional debug console setup)](#traditional-debug-console-setup).
|
||||
|
||||
### Traditional debug console setup
|
||||
|
||||
By default you cannot login to a virtual machine, since this can be sensitive
|
||||
from a security perspective. Also, allowing logins would require additional
|
||||
packages in the rootfs, which would increase the size of the image used to
|
||||
@@ -494,12 +496,12 @@ the following steps (using rootfs or initrd image).
|
||||
|
||||
> **Note:** The following debug console instructions assume a systemd-based guest
|
||||
> O/S image. This means you must create a rootfs for a distro that supports systemd.
|
||||
> Currently, all distros supported by [osbuilder](https://github.com/kata-containers/osbuilder) support systemd
|
||||
> Currently, all distros supported by [osbuilder](../tools/osbuilder) support systemd
|
||||
> except for Alpine Linux.
|
||||
>
|
||||
> Look for `INIT_PROCESS=systemd` in the `config.sh` osbuilder rootfs config file
|
||||
> to verify an osbuilder distro supports systemd for the distro you want to build rootfs for.
|
||||
> For an example, see the [Clear Linux config.sh file](https://github.com/kata-containers/osbuilder/blob/master/rootfs-builder/clearlinux/config.sh).
|
||||
> For an example, see the [Clear Linux config.sh file](../tools/osbuilder/rootfs-builder/clearlinux/config.sh).
|
||||
>
|
||||
> For a non-systemd-based distro, create an equivalent system
|
||||
> service using that distro’s init system syntax. Alternatively, you can build a distro
|
||||
@@ -509,7 +511,7 @@ the following steps (using rootfs or initrd image).
|
||||
>
|
||||
> Once these steps are taken you can connect to the virtual machine using the [debug console](Developer-Guide.md#connect-to-the-virtual-machine-using-the-debug-console).
|
||||
|
||||
### Create a custom image containing a shell
|
||||
#### Create a custom image containing a shell
|
||||
|
||||
To login to a virtual machine, you must
|
||||
[create a custom rootfs](#create-a-rootfs-image) or [custom initrd](#create-an-initrd-image---optional)
|
||||
@@ -519,12 +521,12 @@ an additional `coreutils` package.
|
||||
For example using CentOS:
|
||||
|
||||
```
|
||||
$ cd $GOPATH/src/github.com/kata-containers/osbuilder/rootfs-builder
|
||||
$ export ROOTFS_DIR=${GOPATH}/src/github.com/kata-containers/osbuilder/rootfs-builder/rootfs
|
||||
$ cd $GOPATH/src/github.com/kata-containers/kata-containers/tools/osbuilder/rootfs-builder
|
||||
$ export ROOTFS_DIR=${GOPATH}/src/github.com/kata-containers/kata-containers/tools/osbuilder/rootfs-builder/rootfs
|
||||
$ script -fec 'sudo -E GOPATH=$GOPATH USE_DOCKER=true EXTRA_PKGS="bash coreutils" ./rootfs.sh centos'
|
||||
```
|
||||
|
||||
### Create a debug systemd service
|
||||
#### Create a debug systemd service
|
||||
|
||||
Create the service file that starts the shell in the rootfs directory:
|
||||
|
||||
@@ -553,12 +555,12 @@ Add a dependency to start the debug console:
|
||||
$ sudo sed -i '$a Requires=kata-debug.service' ${ROOTFS_DIR}/lib/systemd/system/kata-containers.target
|
||||
```
|
||||
|
||||
### Build the debug image
|
||||
#### Build the debug image
|
||||
|
||||
Follow the instructions in the [Build a rootfs image](#build-a-rootfs-image)
|
||||
section when using rootfs, or when using initrd, complete the steps in the [Build an initrd image](#build-an-initrd-image) section.
|
||||
|
||||
### Configure runtime for custom debug image
|
||||
#### Configure runtime for custom debug image
|
||||
|
||||
Install the image:
|
||||
|
||||
@@ -583,31 +585,18 @@ $ (cd /usr/share/kata-containers && sudo ln -sf "$name" kata-containers.img)
|
||||
**Note**: You should take care to undo this change after you finish debugging
|
||||
to avoid all subsequently created containers from using the debug image.
|
||||
|
||||
### Ensure debug options are valid
|
||||
#### Create a container
|
||||
|
||||
For the debug console to work, you **must** ensure that proxy debug is
|
||||
**disabled** in the configuration file. If proxy debug is enabled, you will
|
||||
not see any output when you connect to the virtual machine:
|
||||
Create a container as normal. For example using `crictl`:
|
||||
|
||||
```
|
||||
$ sudo mkdir -p /etc/kata-containers/
|
||||
$ sudo install -o root -g root -m 0640 /usr/share/defaults/kata-containers/configuration.toml /etc/kata-containers
|
||||
$ sudo awk '{if (/^\[proxy\.kata\]/) {got=1}; if (got == 1 && /^.*enable_debug/) {print "#enable_debug = true"; got=0; next; } else {print}}' /etc/kata-containers/configuration.toml > /tmp/configuration.toml
|
||||
$ sudo install -o root -g root -m 0640 /tmp/configuration.toml /etc/kata-containers/
|
||||
$ sudo crictl run -r kata container.yaml pod.yaml
|
||||
```
|
||||
|
||||
### Create a container
|
||||
|
||||
Create a container as normal. For example using Docker:
|
||||
#### Connect to the virtual machine using the debug console
|
||||
|
||||
```
|
||||
$ sudo docker run -ti busybox sh
|
||||
```
|
||||
|
||||
### Connect to the virtual machine using the debug console
|
||||
|
||||
```
|
||||
$ id=$(sudo docker ps -q --no-trunc)
|
||||
$ id=$(sudo crictl pods --no-trunc -q)
|
||||
$ console="/var/run/vc/vm/${id}/console.sock"
|
||||
$ sudo socat "stdin,raw,echo=0,escape=0x11" "unix-connect:${console}"
|
||||
```
|
||||
@@ -617,10 +606,10 @@ $ sudo socat "stdin,raw,echo=0,escape=0x11" "unix-connect:${console}"
|
||||
To disconnect from the virtual machine, type `CONTROL+q` (hold down the
|
||||
`CONTROL` key and press `q`).
|
||||
|
||||
### Obtain details of the image
|
||||
## Obtain details of the image
|
||||
|
||||
If the image is created using
|
||||
[osbuilder](https://github.com/kata-containers/osbuilder), the following YAML
|
||||
[osbuilder](../tools/osbuilder), the following YAML
|
||||
file exists and contains details of the image and how it was created:
|
||||
|
||||
```
|
||||
@@ -637,54 +626,22 @@ command inside the container to view the kernel boot logs.
|
||||
If however you are unable to `exec` into the container, you can enable some debug
|
||||
options to have the kernel boot messages logged into the system journal.
|
||||
|
||||
Which debug options you enable depends on if you are using the hypervisor `vsock` mode
|
||||
or not, as defined by the `use_vsock` setting in the `[hypervisor.qemu]` section of
|
||||
the configuration file. The following details the settings:
|
||||
|
||||
- For `use_vsock = false`:
|
||||
- Set `enable_debug = true` in both the `[hypervisor.qemu]` and `[proxy.kata]` sections
|
||||
- For `use_vsock = true`:
|
||||
- Set `enable_debug = true` in both the `[hypervisor.qemu]` and `[shim.kata]` sections
|
||||
- Set `enable_debug = true` in the `[hypervisor.qemu]` and `[runtime]` sections
|
||||
|
||||
For generic information on enabling debug in the configuration file, see the
|
||||
[Enable full debug](#enable-full-debug) section.
|
||||
|
||||
The kernel boot messages will appear in the `kata-proxy` or `kata-shim` log appropriately,
|
||||
The kernel boot messages will appear in the `containerd` or `CRI-O` log appropriately,
|
||||
such as:
|
||||
|
||||
```bash
|
||||
$ sudo journalctl -t kata-proxy
|
||||
$ sudo journalctl -t containerd
|
||||
-- Logs begin at Thu 2020-02-13 16:20:40 UTC, end at Thu 2020-02-13 16:30:23 UTC. --
|
||||
...
|
||||
Feb 13 16:20:56 minikube kata-proxy[17371]: time="2020-02-13T16:20:56.608714324Z" level=info msg="[ 1.418768] brd: module loaded\n" name=kata-proxy pid=17371 sandbox=a13ffb2b9b5a66f7787bdae9a427fa954a4d21ec4031d0179eee2573986a8a6e source=agent
|
||||
Feb 13 16:20:56 minikube kata-proxy[17371]: time="2020-02-13T16:20:56.628493231Z" level=info msg="[ 1.438612] loop: module loaded\n" name=kata-proxy pid=17371 sandbox=a13ffb2b9b5a66f7787bdae9a427fa954a4d21ec4031d0179eee2573986a8a6e source=agent
|
||||
Feb 13 16:20:56 minikube kata-proxy[17371]: time="2020-02-13T16:20:56.67707956Z" level=info msg="[ 1.487165] pmem0: p1\n" name=kata-proxy pid=17371 sandbox=a13ffb2b9b5a66f7787bdae9a427fa954a4d21ec4031d0179eee2573986a8a6e source=agent
|
||||
time="2020-09-15T14:56:23.095113803+08:00" level=debug msg="reading guest console" console-protocol=unix console-url=/run/vc/vm/ab9f633385d4987828d342e47554fc6442445b32039023eeddaa971c1bb56791/console.sock pid=107642 sandbox=ab9f633385d4987828d342e47554fc6442445b32039023eeddaa971c1bb56791 source=virtcontainers subsystem=sandbox vmconsole="[ 0.395399] brd: module loaded"
|
||||
time="2020-09-15T14:56:23.102633107+08:00" level=debug msg="reading guest console" console-protocol=unix console-url=/run/vc/vm/ab9f633385d4987828d342e47554fc6442445b32039023eeddaa971c1bb56791/console.sock pid=107642 sandbox=ab9f633385d4987828d342e47554fc6442445b32039023eeddaa971c1bb56791 source=virtcontainers subsystem=sandbox vmconsole="[ 0.402845] random: fast init done"
|
||||
time="2020-09-15T14:56:23.103125469+08:00" level=debug msg="reading guest console" console-protocol=unix console-url=/run/vc/vm/ab9f633385d4987828d342e47554fc6442445b32039023eeddaa971c1bb56791/console.sock pid=107642 sandbox=ab9f633385d4987828d342e47554fc6442445b32039023eeddaa971c1bb56791 source=virtcontainers subsystem=sandbox vmconsole="[ 0.403544] random: crng init done"
|
||||
time="2020-09-15T14:56:23.105268162+08:00" level=debug msg="reading guest console" console-protocol=unix console-url=/run/vc/vm/ab9f633385d4987828d342e47554fc6442445b32039023eeddaa971c1bb56791/console.sock pid=107642 sandbox=ab9f633385d4987828d342e47554fc6442445b32039023eeddaa971c1bb56791 source=virtcontainers subsystem=sandbox vmconsole="[ 0.405599] loop: module loaded"
|
||||
time="2020-09-15T14:56:23.121121598+08:00" level=debug msg="reading guest console" console-protocol=unix console-url=/run/vc/vm/ab9f633385d4987828d342e47554fc6442445b32039023eeddaa971c1bb56791/console.sock pid=107642 sandbox=ab9f633385d4987828d342e47554fc6442445b32039023eeddaa971c1bb56791 source=virtcontainers subsystem=sandbox vmconsole="[ 0.421324] memmap_init_zone_device initialised 32768 pages in 12ms"
|
||||
...
|
||||
```
|
||||
|
||||
## Running standalone
|
||||
|
||||
It is possible to start the runtime without a container manager. This is
|
||||
mostly useful for testing and debugging purposes.
|
||||
|
||||
### Create an OCI bundle
|
||||
|
||||
To build an
|
||||
[OCI bundle](https://github.com/opencontainers/runtime-spec/blob/master/bundle.md),
|
||||
required by the runtime:
|
||||
|
||||
```
|
||||
$ bundle="/tmp/bundle"
|
||||
$ rootfs="$bundle/rootfs"
|
||||
$ mkdir -p "$rootfs" && (cd "$bundle" && kata-runtime spec)
|
||||
$ sudo docker export $(sudo docker create busybox) | tar -C "$rootfs" -xvf -
|
||||
```
|
||||
|
||||
### Launch the runtime to create a container
|
||||
|
||||
Run the runtime standalone by providing it with the path to the
|
||||
previously-created [OCI bundle](#create-an-oci-bundle):
|
||||
|
||||
```
|
||||
$ sudo kata-runtime --log=/dev/stdout run --bundle "$bundle" foo
|
||||
```
|
||||
|
||||
@@ -78,7 +78,7 @@ The following link shows the latest list of limitations:
|
||||
If you would like to work on resolving a limitation, please refer to the
|
||||
[contributors guide](https://github.com/kata-containers/community/blob/master/CONTRIBUTING.md).
|
||||
If you wish to raise an issue for a new limitation, either
|
||||
[raise an issue directly on the runtime](https://github.com/kata-containers/runtime/issues/new)
|
||||
[raise an issue directly on the runtime](https://github.com/kata-containers/kata-containers/issues/new)
|
||||
or see the
|
||||
[project table of contents](https://github.com/kata-containers/kata-containers)
|
||||
for advice on which repository to raise the issue against.
|
||||
|
||||
@@ -54,7 +54,7 @@ Documents that help to understand and contribute to Kata Containers.
|
||||
|
||||
* [Developer Guide](Developer-Guide.md): Setup the Kata Containers developing environments
|
||||
* [How to contribute to Kata Containers](https://github.com/kata-containers/community/blob/master/CONTRIBUTING.md)
|
||||
* [Code of Conduct](CODE_OF_CONDUCT.md)
|
||||
* [Code of Conduct](../CODE_OF_CONDUCT.md)
|
||||
|
||||
### Code Licensing
|
||||
|
||||
|
||||
@@ -10,7 +10,6 @@
|
||||
- [Merge all bump version Pull requests](#merge-all-bump-version-pull-requests)
|
||||
- [Tag all Kata repositories](#tag-all-kata-repositories)
|
||||
- [Check Git-hub Actions](#check-git-hub-actions)
|
||||
- [Create OBS Packages](#create-obs-packages)
|
||||
- [Create release notes](#create-release-notes)
|
||||
- [Announce the release](#announce-the-release)
|
||||
<!-- TOC END -->
|
||||
@@ -42,7 +41,7 @@
|
||||
|
||||
Alternatively, you can also bump the repositories using a script in the Kata packaging repo
|
||||
```
|
||||
$ cd ${GOPATH}/src/github.com/kata-containers/packaging/release
|
||||
$ cd ${GOPATH}/src/github.com/kata-containers/kata-containers/tools/packaging/release
|
||||
$ export NEW_VERSION=<the-new-kata-version>
|
||||
$ export BRANCH=<the-branch-you-want-to-bump>
|
||||
$ ./update-repository-version.sh -p "$NEW_VERSION" "$BRANCH"
|
||||
@@ -59,7 +58,7 @@
|
||||
Once all the pull requests to bump versions in all Kata repositories are merged,
|
||||
tag all the repositories as shown below.
|
||||
```
|
||||
$ cd ${GOPATH}/src/github.com/kata-containers/packaging/release
|
||||
$ cd ${GOPATH}/src/github.com/kata-containers/kata-containers/tools/packaging/release
|
||||
$ git checkout <kata-branch-to-release>
|
||||
$ git pull
|
||||
$ ./tag_repos.sh -p -b "$BRANCH" tag
|
||||
@@ -71,33 +70,6 @@
|
||||
|
||||
Check the [actions status page](https://github.com/kata-containers/kata-containers/actions) to verify all steps in the actions workflow have completed successfully. On success, a static tarball containing Kata release artifacts will be uploaded to the [Release page](https://github.com/kata-containers/kata-containers/releases).
|
||||
|
||||
### Create OBS Packages
|
||||
|
||||
- We have set up an [Azure Pipelines](https://azure.microsoft.com/en-us/services/devops/pipelines/) job
|
||||
to trigger generation of Kata packages in [OBS](https://build.opensuse.org/).
|
||||
Go to the [Azure Pipelines job that creates OBS packages](https://dev.azure.com/kata-containers/release-process/_release?_a=releases&view=mine&definitionId=1).
|
||||
- Click on "Create release" (blue button, at top right corner).
|
||||
It should prompt you for variables to be passed to the release job. They should look like:
|
||||
|
||||
```
|
||||
BRANCH="the-kata-branch-that-is-release"
|
||||
BUILD_HEAD=false
|
||||
OBS_BRANCH="the-kata-branch-that-is-release"
|
||||
```
|
||||
Note: If the release is `Alpha` , `Beta` , or `RC` (that is part of a `master` release), please use `OBS_BRANCH=master`.
|
||||
|
||||
The above step shall create OBS packages for Kata for various distributions that Kata supports and test them as well.
|
||||
- Verify that the packages have built successfully by checking the [Kata OBS project page](https://build.opensuse.org/project/subprojects/home:katacontainers).
|
||||
- Make sure packages work correctly. This can be done manually or via the [package testing pipeline](http://jenkins.katacontainers.io/job/package-release-testing).
|
||||
You have to make sure the packages are already published by OBS before this step.
|
||||
It should prompt you for variables to be passed to the pipeline:
|
||||
|
||||
```
|
||||
BRANCH="<kata-branch-to-release>"
|
||||
NEW_VERSION=<the-version-you-expect-to-be-packaged|latest>
|
||||
```
|
||||
Note: `latest` will verify that a package provides the latest Kata tag in that branch.
|
||||
|
||||
### Create release notes
|
||||
|
||||
We have a script in place in the packaging repository to create release notes that include a short-log of the commits across Kata components.
|
||||
@@ -105,12 +77,12 @@
|
||||
Run the script as shown below:
|
||||
|
||||
```
|
||||
$ cd ${GOPATH}/src/github.com/kata-containers/packaging/release
|
||||
$ cd ${GOPATH}/src/github.com/kata-containers/kata-containers/tools/packaging/release
|
||||
# Note: OLD_VERSION is where the script should start to get changes.
|
||||
$ ./runtime-release-notes.sh ${OLD_VERSION} ${NEW_VERSION} > notes.md
|
||||
# Edit the `notes.md` file to review and make any changes to the release notes.
|
||||
# Add the release notes in GitHub runtime.
|
||||
$ hub -C "${GOPATH}/src/github.com/kata-containers/runtime" release edit -F notes.md "${NEW_VERSION}"
|
||||
$ hub release edit -F notes.md "${NEW_VERSION}"
|
||||
```
|
||||
|
||||
### Announce the release
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 46 KiB |
File diff suppressed because one or more lines are too long
|
Before Width: | Height: | Size: 14 KiB |
@@ -1,72 +1,57 @@
|
||||
# Kata Containers Architecture
|
||||
|
||||
|
||||
* [Overview](#overview)
|
||||
* [Virtualization](#virtualization)
|
||||
* [Guest assets](#guest-assets)
|
||||
* [Guest kernel](#guest-kernel)
|
||||
* [Guest Image](#guest-image)
|
||||
* [Root filesystem image](#root-filesystem-image)
|
||||
* [Initrd image](#initrd-image)
|
||||
* [Agent](#agent)
|
||||
* [Runtime](#runtime)
|
||||
* [Configuration](#configuration)
|
||||
* [Significant OCI commands](#significant-oci-commands)
|
||||
* [create](#create)
|
||||
* [start](#start)
|
||||
* [exec](#exec)
|
||||
* [kill](#kill)
|
||||
* [delete](#delete)
|
||||
* [Networking](#networking)
|
||||
* [Storage](#storage)
|
||||
* [Kubernetes Support](#kubernetes-support)
|
||||
* [Problem Statement](#problem-statement)
|
||||
* [Containerd](#containerd)
|
||||
* [CRI-O](#cri-o)
|
||||
* [OCI Annotations](#oci-annotations)
|
||||
* [Mixing VM based and namespace based runtimes](#mixing-vm-based-and-namespace-based-runtimes)
|
||||
* [Appendices](#appendices)
|
||||
* [DAX](#dax)
|
||||
- [Kata Containers Architecture](#kata-containers-architecture)
|
||||
- [Overview](#overview)
|
||||
- [Virtualization](#virtualization)
|
||||
- [Guest assets](#guest-assets)
|
||||
- [Guest kernel](#guest-kernel)
|
||||
- [Guest image](#guest-image)
|
||||
- [Root filesystem image](#root-filesystem-image)
|
||||
- [Initrd image](#initrd-image)
|
||||
- [Agent](#agent)
|
||||
- [Runtime](#runtime)
|
||||
- [Configuration](#configuration)
|
||||
- [Networking](#networking)
|
||||
- [CNM](#cnm)
|
||||
- [Network Hotplug](#network-hotplug)
|
||||
- [Storage](#storage)
|
||||
- [Kubernetes support](#kubernetes-support)
|
||||
- [OCI annotations](#oci-annotations)
|
||||
- [Mixing VM based and namespace based runtimes](#mixing-vm-based-and-namespace-based-runtimes)
|
||||
- [Appendices](#appendices)
|
||||
- [DAX](#dax)
|
||||
|
||||
## Overview
|
||||
|
||||
This is an architectural overview of Kata Containers, based on the 1.5.0 release.
|
||||
This is an architectural overview of Kata Containers, based on the 2.0 release.
|
||||
|
||||
The two primary deliverables of the Kata Containers project are a container runtime
|
||||
and a CRI friendly shim. There is also a CRI friendly library API behind them.
|
||||
The primary deliverable of the Kata Containers project is a CRI friendly shim. There is also a CRI friendly library API behind them.
|
||||
|
||||
The [Kata Containers runtime (`kata-runtime`)](../../src/runtime)
|
||||
The [Kata Containers runtime](../../src/runtime)
|
||||
is compatible with the [OCI](https://github.com/opencontainers) [runtime specification](https://github.com/opencontainers/runtime-spec)
|
||||
and therefore works seamlessly with the
|
||||
[Docker\* Engine](https://www.docker.com/products/docker-engine) pluggable runtime
|
||||
architecture. It also supports the [Kubernetes\* Container Runtime Interface (CRI)](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-node/container-runtime-interface.md)
|
||||
and therefore works seamlessly with the [Kubernetes\* Container Runtime Interface (CRI)](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-node/container-runtime-interface.md)
|
||||
through the [CRI-O\*](https://github.com/kubernetes-incubator/cri-o) and
|
||||
[Containerd CRI Plugin\*](https://github.com/containerd/cri) implementation. In other words, you can transparently
|
||||
select between the [default Docker and CRI shim runtime (runc)](https://github.com/opencontainers/runc)
|
||||
and `kata-runtime`.
|
||||
[Containerd\*](https://github.com/containerd/containerd) implementation.
|
||||
|
||||
`kata-runtime` creates a QEMU\*/KVM virtual machine for each container or pod,
|
||||
the Docker engine or `kubelet` (Kubernetes) creates respectively.
|
||||
|
||||

|
||||
Kata Containers creates a QEMU\*/KVM virtual machine for pod that `kubelet` (Kubernetes) creates respectively.
|
||||
|
||||
The [`containerd-shim-kata-v2` (shown as `shimv2` from this point onwards)](../../src/runtime/containerd-shim-v2)
|
||||
is another Kata Containers entrypoint, which
|
||||
is the Kata Containers entrypoint, which
|
||||
implements the [Containerd Runtime V2 (Shim API)](https://github.com/containerd/containerd/tree/master/runtime/v2) for Kata.
|
||||
With `shimv2`, Kubernetes can launch Pod and OCI compatible containers with one shim (the `shimv2`) per Pod instead
|
||||
of `2N+1` shims (a `containerd-shim` and a `kata-shim` for each container and the Pod sandbox itself), and no standalone
|
||||
`kata-proxy` process even if no VSOCK is available.
|
||||
|
||||
Before `shimv2` (as done in [Kata Containers 1.x releases](https://github.com/kata-containers/runtime/releases)), we need to create a `containerd-shim` and a [`kata-shim`](https://github.com/kata-containers/shim) for each container and the Pod sandbox itself, plus an optional [`kata-proxy`](https://github.com/kata-containers/proxy) when VSOCK is not available. With `shimv2`, Kubernetes can launch Pod and OCI compatible containers with one shim (the `shimv2`) per Pod instead of `2N+1` shims, and no standalone `kata-proxy` process even if no VSOCK is available.
|
||||
|
||||

|
||||
|
||||
The container process is then spawned by
|
||||
[agent](../../src/agent), an agent process running
|
||||
as a daemon inside the virtual machine. `kata-agent` runs a gRPC server in
|
||||
[`kata-agent`](../../src/agent), an agent process running
|
||||
as a daemon inside the virtual machine. `kata-agent` runs a [`ttRPC`](https://github.com/containerd/ttrpc-rust) server in
|
||||
the guest using a VIRTIO serial or VSOCK interface which QEMU exposes as a socket
|
||||
file on the host. `kata-runtime` uses a gRPC protocol to communicate with
|
||||
file on the host. `shimv2` uses a `ttRPC` protocol to communicate with
|
||||
the agent. This protocol allows the runtime to send container management
|
||||
commands to the agent. The protocol is also used to carry the I/O streams (stdout,
|
||||
stderr, stdin) between the containers and the manage engines (e.g. Docker Engine).
|
||||
stderr, stdin) between the containers and the manage engines (e.g. CRI-O or containerd).
|
||||
|
||||
For any given container, both the init process and all potentially executed
|
||||
commands within that container, together with their related I/O streams, need
|
||||
@@ -111,7 +96,7 @@ The only services running in the context of the mini O/S are the init daemon
|
||||
is created using libcontainer, creating a container in the same manner that is done
|
||||
by `runc`.
|
||||
|
||||
For example, when `docker run -ti ubuntu date` is run:
|
||||
For example, when `ctr run -ti ubuntu date` is run:
|
||||
|
||||
- The hypervisor will boot the mini-OS image using the guest kernel.
|
||||
- `systemd`, running inside the mini-OS context, will launch the `kata-agent` in
|
||||
@@ -130,170 +115,37 @@ The only service running in the context of the initrd is the [Agent](#agent) as
|
||||
|
||||
## Agent
|
||||
|
||||
[`kata-agent`](../../src/agent) is a process running in the
|
||||
guest as a supervisor for managing containers and processes running within
|
||||
those containers.
|
||||
[`kata-agent`](../../src/agent) is a process running in the guest as a supervisor for managing containers and processes running within those containers.
|
||||
|
||||
The `kata-agent` execution unit is the sandbox. A `kata-agent` sandbox is a container sandbox defined by a set of namespaces (NS, UTS, IPC and PID). `kata-runtime` can
|
||||
For the 2.0 release, the `kata-agent` is rewritten in the [RUST programming language](https://www.rust-lang.org/) so that we can minimize its memory footprint while keeping the memory safety of the original GO version of [`kata-agent` used in Kata Container 1.x](https://github.com/kata-containers/agent). This memory footprint reduction is pretty impressive, from tens of megabytes down to less than 100 kilobytes, enabling Kata Containers in more use cases like functional computing and edge computing.
|
||||
|
||||
The `kata-agent` execution unit is the sandbox. A `kata-agent` sandbox is a container sandbox defined by a set of namespaces (NS, UTS, IPC and PID). `shimv2` can
|
||||
run several containers per VM to support container engines that require multiple
|
||||
containers running inside a pod. In the case of docker, `kata-runtime` creates a
|
||||
single container per pod.
|
||||
containers running inside a pod.
|
||||
|
||||
`kata-agent` communicates with the other Kata components over ttRPC.
|
||||
|
||||
### Agent gRPC protocol
|
||||
|
||||
placeholder
|
||||
`kata-agent` communicates with the other Kata components over `ttRPC`.
|
||||
|
||||
## Runtime
|
||||
|
||||
`kata-runtime` is an OCI compatible container runtime and is responsible for handling
|
||||
all commands specified by
|
||||
[the OCI runtime specification](https://github.com/opencontainers/runtime-spec)
|
||||
and launching `kata-shim` instances.
|
||||
`containerd-shim-kata-v2` is a [containerd runtime shimv2](https://github.com/containerd/containerd/blob/v1.4.1/runtime/v2/README.md) implementation and is responsible for handling the `runtime v2 shim APIs`, which is similar to [the OCI runtime specification](https://github.com/opencontainers/runtime-spec) but simplifies the architecture by loading the runtime once and making RPC calls to handle the various container lifecycle commands. This refinement is an improvement on the OCI specification which requires the container manager call the runtime binary multiple times, at least once for each lifecycle command.
|
||||
|
||||
`kata-runtime` heavily utilizes the
|
||||
[virtcontainers project](https://github.com/containers/virtcontainers), which
|
||||
provides a generic, runtime-specification agnostic, hardware-virtualized containers
|
||||
library.
|
||||
`containerd-shim-kata-v2` heavily utilizes the
|
||||
[virtcontainers package](../../src/runtime/virtcontainers/), which provides a generic, runtime-specification agnostic, hardware-virtualized containers library.
|
||||
|
||||
### Configuration
|
||||
|
||||
The runtime uses a TOML format configuration file called `configuration.toml`. By
|
||||
default this file is installed in the `/usr/share/defaults/kata-containers`
|
||||
directory and contains various settings such as the paths to the hypervisor,
|
||||
the guest kernel and the mini-OS image.
|
||||
The runtime uses a TOML format configuration file called `configuration.toml`. By default this file is installed in the `/usr/share/defaults/kata-containers` directory and contains various settings such as the paths to the hypervisor, the guest kernel and the mini-OS image.
|
||||
|
||||
The actual configuration file paths can be determined by running:
|
||||
```
|
||||
$ kata-runtime --kata-show-default-config-paths
|
||||
```
|
||||
Most users will not need to modify the configuration file.
|
||||
|
||||
The file is well commented and provides a few "knobs" that can be used to modify
|
||||
the behavior of the runtime.
|
||||
The file is well commented and provides a few "knobs" that can be used to modify the behavior of the runtime and your chosen hypervisor.
|
||||
|
||||
The configuration file is also used to enable runtime [debug output](../Developer-Guide.md#enable-full-debug).
|
||||
|
||||
### Significant OCI commands
|
||||
|
||||
Here we describe how `kata-runtime` handles the most important OCI commands.
|
||||
|
||||
#### `create`
|
||||
|
||||
When handling the OCI
|
||||
[`create`](https://github.com/kata-containers/runtime/blob/master/cli/create.go)
|
||||
command, `kata-runtime` goes through the following steps:
|
||||
|
||||
1. Create the network namespace where we will spawn VM and shims processes.
|
||||
2. Call into the pre-start hooks. One of them should be responsible for creating
|
||||
the `veth` network pair between the host network namespace and the network namespace
|
||||
freshly created.
|
||||
3. Scan the network from the new network namespace, and create a MACVTAP connection
|
||||
between the `veth` interface and a `tap` interface into the VM.
|
||||
4. Start the VM inside the network namespace by providing the `tap` interface
|
||||
previously created.
|
||||
5. Wait for the VM to be ready.
|
||||
6. Start `kata-proxy`, which will connect to the created VM. The `kata-proxy` process
|
||||
will take care of proxying all communications with the VM. Kata has a single proxy
|
||||
per VM.
|
||||
7. Communicate with `kata-agent` (through the proxy) to configure the sandbox
|
||||
inside the VM.
|
||||
8. Communicate with `kata-agent` to create the container, relying on the OCI
|
||||
configuration file `config.json` initially provided to `kata-runtime`. This
|
||||
spawns the container process inside the VM, leveraging the `libcontainer` package.
|
||||
9. Start `kata-shim`, which will connect to the gRPC server socket provided by the `kata-proxy`. `kata-shim` will spawn a few Go routines to parallelize blocking calls `ReadStdout()` , `ReadStderr()` and `WaitProcess()`. Both `ReadStdout()` and `ReadStderr()` are run through infinite loops since `kata-shim` wants the output of those until the container process terminates. `WaitProcess()` is a unique call which returns the exit code of the container process when it terminates inside the VM. Note that `kata-shim` is started inside the network namespace, to allow upper layers to determine which network namespace has been created and by checking the `kata-shim` process. It also creates a new PID namespace by entering into it. This ensures that all `kata-shim` processes belonging to the same container will get killed when the `kata-shim` representing the container process terminates.
|
||||
|
||||
At this point the container process is running inside of the VM, and it is represented
|
||||
on the host system by the `kata-shim` process.
|
||||
|
||||

|
||||
|
||||
#### `start`
|
||||
|
||||
With traditional containers, [`start`](https://github.com/kata-containers/runtime/blob/master/cli/start.go) launches a container process in its own set of namespaces. With Kata Containers, the main task of `kata-runtime` is to ask [`kata-agent`](#agent) to start the container workload inside the virtual machine. `kata-runtime` will run through the following steps:
|
||||
|
||||
1. Communicate with `kata-agent` (through the proxy) to start the container workload
|
||||
inside the VM. If, for example, the command to execute inside of the container is `top`,
|
||||
the `kata-shim`'s `ReadStdOut()` will start returning text output for top, and
|
||||
`WaitProcess()` will continue to block as long as the `top` process runs.
|
||||
2. Call into the post-start hooks. Usually, this is a no-op since nothing is provided
|
||||
(this needs clarification)
|
||||
|
||||

|
||||
|
||||
#### `exec`
|
||||
|
||||
OCI [`exec`](https://github.com/kata-containers/runtime/blob/master/cli/exec.go) allows you to run an additional command within an already running
|
||||
container. In Kata Containers, this is handled as follows:
|
||||
|
||||
1. A request is sent to the `kata agent` (through the proxy) to start a new process
|
||||
inside an existing container running within the VM.
|
||||
2. A new `kata-shim` is created within the same network and PID namespaces as the
|
||||
original `kata-shim` representing the container process. This new `kata-shim` is
|
||||
used for the new exec process.
|
||||
|
||||
Now the process started with `exec` is running within the VM, sharing `uts`, `pid`, `mnt` and `ipc` namespaces with the container process.
|
||||
|
||||

|
||||
|
||||
#### `kill`
|
||||
|
||||
When sending the OCI [`kill`](https://github.com/kata-containers/runtime/blob/master/cli/kill.go) command, the container runtime should send a
|
||||
[UNIX signal](https://en.wikipedia.org/wiki/Unix_signal) to the container process.
|
||||
A `kill` sending a termination signal such as `SIGKILL` or `SIGTERM` is expected
|
||||
to terminate the container process. In the context of a traditional container,
|
||||
this means stopping the container. For `kata-runtime`, this translates to stopping
|
||||
the container and the VM associated with it.
|
||||
|
||||
1. Send a request to kill the container process to the `kata-agent` (through the proxy).
|
||||
2. Wait for `kata-shim` process to exit.
|
||||
3. Force kill the container process if `kata-shim` process didn't return after a
|
||||
timeout. This is done by communicating with `kata-agent` (connecting the proxy),
|
||||
sending `SIGKILL` signal to the container process inside the VM.
|
||||
4. Wait for `kata-shim` process to exit, and return an error if we reach the
|
||||
timeout again.
|
||||
5. Communicate with `kata-agent` (through the proxy) to remove the container
|
||||
configuration from the VM.
|
||||
6. Communicate with `kata-agent` (through the proxy) to destroy the sandbox
|
||||
configuration from the VM.
|
||||
7. Stop the VM.
|
||||
8. Remove all network configurations inside the network namespace and delete the
|
||||
namespace.
|
||||
9. Execute post-stop hooks.
|
||||
|
||||
If `kill` was invoked with a non-termination signal, this simply signals the container process. Otherwise, everything has been torn down, and the VM has been removed.
|
||||
|
||||
#### `delete`
|
||||
|
||||
[`delete`](https://github.com/kata-containers/runtime/blob/master/cli/delete.go) removes all internal resources related to a container. A running container
|
||||
cannot be deleted unless the OCI runtime is explicitly being asked to, by using
|
||||
`--force` flag.
|
||||
|
||||
If the sandbox is not stopped, but the particular container process returned on
|
||||
its own already, the `kata-runtime` will first go through most of the steps a `kill`
|
||||
would go through for a termination signal. After this process, or if the `sandboxID` was already stopped to begin with, then `kata-runtime` will:
|
||||
|
||||
1. Remove container resources. Every file kept under `/var/{lib,run}/virtcontainers/sandboxes/<sandboxID>/<containerID>`.
|
||||
2. Remove sandbox resources. Every file kept under `/var/{lib,run}/virtcontainers/sandboxes/<sandboxID>`.
|
||||
|
||||
At this point, everything related to the container should have been removed from the host system, and no related process should be running.
|
||||
|
||||
#### `state`
|
||||
|
||||
[`state`](https://github.com/kata-containers/runtime/blob/master/cli/state.go)
|
||||
returns the status of the container. For `kata-runtime`, this means being
|
||||
able to detect if the container is still running by looking at the state of `kata-shim`
|
||||
process representing this container process.
|
||||
|
||||
1. Ask the container status by checking information stored on disk. (clarification needed)
|
||||
2. Check `kata-shim` process representing the container.
|
||||
3. In case the container status on disk was supposed to be `ready` or `running`,
|
||||
and the `kata-shim` process no longer exists, this involves the detection of a
|
||||
stopped container. This means that before returning the container status,
|
||||
the container has to be properly stopped. Here are the steps involved in this detection:
|
||||
1. Wait for `kata-shim` process to exit.
|
||||
2. Force kill the container process if `kata-shim` process didn't return after a timeout. This is done by communicating with `kata-agent` (connecting the proxy), sending `SIGKILL` signal to the container process inside the VM.
|
||||
3. Wait for `kata-shim` process to exit, and return an error if we reach the timeout again.
|
||||
4. Communicate with `kata-agent` (connecting the proxy) to remove the container configuration from the VM.
|
||||
4. Return container status.
|
||||
|
||||
## Networking
|
||||
|
||||
Containers will typically live in their own, possibly shared, networking namespace.
|
||||
@@ -310,7 +162,7 @@ cannot handle `veth` interfaces. Typically, `TAP` interfaces are created for VM
|
||||
connectivity.
|
||||
|
||||
To overcome incompatibility between typical container engines expectations
|
||||
and virtual machines, `kata-runtime` networking transparently connects `veth`
|
||||
and virtual machines, Kata Containers networking transparently connects `veth`
|
||||
interfaces with `TAP` ones using MACVTAP:
|
||||
|
||||

|
||||
@@ -375,35 +227,14 @@ The following diagram illustrates the Kata Containers network hotplug workflow.
|
||||

|
||||
|
||||
## Storage
|
||||
Container workloads are shared with the virtualized environment through [9pfs](https://www.kernel.org/doc/Documentation/filesystems/9p.txt).
|
||||
The devicemapper storage driver is a special case. The driver uses dedicated block
|
||||
devices rather than formatted filesystems, and operates at the block level rather
|
||||
than the file level. This knowledge is used to directly use the underlying block
|
||||
device instead of the overlay file system for the container root file system. The
|
||||
block device maps to the top read-write layer for the overlay. This approach gives
|
||||
much better I/O performance compared to using 9pfs to share the container file system.
|
||||
Container workloads are shared with the virtualized environment through [virtio-fs](https://virtio-fs.gitlab.io/).
|
||||
|
||||
The approach above does introduce a limitation in terms of dynamic file copy
|
||||
in/out of the container using the `docker cp` operations. The copy operation from
|
||||
host to container accesses the mounted file system on the host-side. This is
|
||||
not expected to work and may lead to inconsistencies as the block device will
|
||||
be simultaneously written to from two different mounts. The copy operation from
|
||||
container to host will work, provided the user calls `sync(1)` from within the
|
||||
container prior to the copy to make sure any outstanding cached data is written
|
||||
to the block device.
|
||||
The [devicemapper `snapshotter`](https://github.com/containerd/containerd/tree/master/snapshots/devmapper) is a special case. The `snapshotter` uses dedicated block devices rather than formatted filesystems, and operates at the block level rather than the file level. This knowledge is used to directly use the underlying block device instead of the overlay file system for the container root file system. The block device maps to the top read-write layer for the overlay. This approach gives much better I/O performance compared to using `virtio-fs` to share the container file system.
|
||||
|
||||
```
|
||||
docker cp [OPTIONS] CONTAINER:SRC_PATH HOST:DEST_PATH
|
||||
docker cp [OPTIONS] HOST:SRC_PATH CONTAINER:DEST_PATH
|
||||
```
|
||||
Kata Containers has the ability to hotplug and remove block devices, which makes it possible to use block devices for containers started after the VM has been launched.
|
||||
|
||||
Kata Containers has the ability to hotplug and remove block devices, which makes it
|
||||
possible to use block devices for containers started after the VM has been launched.
|
||||
|
||||
Users can check to see if the container uses the devicemapper block device as its
|
||||
rootfs by calling `mount(8)` within the container. If the devicemapper block device
|
||||
is used, `/` will be mounted on `/dev/vda`. Users can disable direct mounting
|
||||
of the underlying block device through the runtime configuration.
|
||||
Users can check to see if the container uses the devicemapper block device as its rootfs by calling `mount(8)` within the container. If the devicemapper block device
|
||||
is used, `/` will be mounted on `/dev/vda`. Users can disable direct mounting of the underlying block device through the runtime configuration.
|
||||
|
||||
## Kubernetes support
|
||||
|
||||
@@ -424,44 +255,13 @@ lifecycle management from container execution through the dedicated
|
||||
|
||||
In other words, a Kubelet is a CRI client and expects a CRI implementation to
|
||||
handle the server side of the interface.
|
||||
[CRI-O\*](https://github.com/kubernetes-incubator/cri-o) and [Containerd CRI Plugin\*](https://github.com/containerd/cri) are CRI implementations that rely on [OCI](https://github.com/opencontainers/runtime-spec)
|
||||
[CRI-O\*](https://github.com/kubernetes-incubator/cri-o) and [Containerd\*](https://github.com/containerd/containerd/) are CRI implementations that rely on [OCI](https://github.com/opencontainers/runtime-spec)
|
||||
compatible runtimes for managing container instances.
|
||||
|
||||
Kata Containers is an officially supported CRI-O and Containerd CRI Plugin runtime. It is OCI compatible and therefore aligns with project's architecture and requirements.
|
||||
However, due to the fact that Kubernetes execution units are sets of containers (also
|
||||
known as pods) rather than single containers, the Kata Containers runtime needs to
|
||||
get extra information to seamlessly integrate with Kubernetes.
|
||||
|
||||
### Problem statement
|
||||
|
||||
The Kubernetes\* execution unit is a pod that has specifications detailing constraints
|
||||
such as namespaces, groups, hardware resources, security contents, *etc* shared by all
|
||||
the containers within that pod.
|
||||
By default the Kubelet will send a container creation request to its CRI runtime for
|
||||
each pod and container creation. Without additional metadata from the CRI runtime,
|
||||
the Kata Containers runtime will thus create one virtual machine for each pod and for
|
||||
each containers within a pod. However the task of providing the Kubernetes pod semantics
|
||||
when creating one virtual machine for each container within the same pod is complex given
|
||||
the resources of these virtual machines (such as networking or PID) need to be shared.
|
||||
|
||||
The challenge with Kata Containers when working as a Kubernetes\* runtime is thus to know
|
||||
when to create a full virtual machine (for pods) and when to create a new container inside
|
||||
a previously created virtual machine. In both cases it will get called with very similar
|
||||
arguments, so it needs the help of the Kubernetes CRI runtime to be able to distinguish a
|
||||
pod creation request from a container one.
|
||||
|
||||
### Containerd
|
||||
|
||||
As of Kata Containers 1.5, using `shimv2` with containerd 1.2.0 or above is the preferred
|
||||
way to run Kata Containers with Kubernetes ([see the howto](../how-to/how-to-use-k8s-with-cri-containerd-and-kata.md#configure-containerd-to-use-kata-containers)).
|
||||
The CRI-O will catch up soon ([`kubernetes-sigs/cri-o#2024`](https://github.com/kubernetes-sigs/cri-o/issues/2024)).
|
||||
|
||||
Refer to the following how-to guides:
|
||||
Kata Containers is an officially supported CRI-O and Containerd runtime. Refer to the following guides on how to set up Kata Containers with Kubernetes:
|
||||
|
||||
- [How to use Kata Containers and Containerd](../how-to/containerd-kata.md)
|
||||
- [How to use Kata Containers and CRI (containerd plugin) with Kubernetes](../how-to/how-to-use-k8s-with-cri-containerd-and-kata.md)
|
||||
|
||||
### CRI-O
|
||||
- [Run Kata Containers with Kubernetes](../how-to/run-kata-with-k8s.md)
|
||||
|
||||
#### OCI annotations
|
||||
|
||||
@@ -506,36 +306,10 @@ with a Kubernetes pod:
|
||||
|
||||
#### Mixing VM based and namespace based runtimes
|
||||
|
||||
> **Note:** Since Kubernetes 1.12, the [`Kubernetes RuntimeClass`](../how-to/containerd-kata.md#kubernetes-runtimeclass)
|
||||
> **Note:** Since Kubernetes 1.12, the [`Kubernetes RuntimeClass`](https://kubernetes.io/docs/concepts/containers/runtime-class/)
|
||||
> has been supported and the user can specify runtime without the non-standardized annotations.
|
||||
|
||||
One interesting evolution of the CRI-O support for `kata-runtime` is the ability
|
||||
to run virtual machine based pods alongside namespace ones. With CRI-O and Kata
|
||||
Containers, one can introduce the concept of workload trust inside a Kubernetes
|
||||
cluster.
|
||||
|
||||
A cluster operator can now tag (through Kubernetes annotations) container workloads
|
||||
as `trusted` or `untrusted`. The former labels known to be safe workloads while
|
||||
the latter describes potentially malicious or misbehaving workloads that need the
|
||||
highest degree of isolation. In a software development context, an example of a `trusted` workload would be a containerized continuous integration engine whereas all
|
||||
developers applications would be `untrusted` by default. Developers workloads can
|
||||
be buggy, unstable or even include malicious code and thus from a security perspective
|
||||
it makes sense to tag them as `untrusted`. A CRI-O and Kata Containers based
|
||||
Kubernetes cluster handles this use case transparently as long as the deployed
|
||||
containers are properly tagged. All `untrusted` containers will be handled by Kata Containers and thus run in a hardware virtualized secure sandbox while `runc`, for
|
||||
example, could handle the `trusted` ones.
|
||||
|
||||
CRI-O's default behavior is to trust all pods, except when they're annotated with
|
||||
`io.kubernetes.cri-o.TrustedSandbox` set to `false`. The default CRI-O trust level
|
||||
is set through its `configuration.toml` configuration file. Generally speaking,
|
||||
the CRI-O runtime selection between its trusted runtime (typically `runc`) and its untrusted one (`kata-runtime`) is a function of the pod `Privileged` setting, the `io.kubernetes.cri-o.TrustedSandbox` annotation value, and the default CRI-O trust
|
||||
level. When a pod is `Privileged`, the runtime will always be `runc`. However, when
|
||||
a pod is **not** `Privileged` the runtime selection is done as follows:
|
||||
|
||||
| | `io.kubernetes.cri-o.TrustedSandbox` not set | `io.kubernetes.cri-o.TrustedSandbox` = `true` | `io.kubernetes.cri-o.TrustedSandbox` = `false` |
|
||||
| :--- | :---: | :---: | :---: |
|
||||
| Default CRI-O trust level: `trusted` | runc | runc | Kata Containers |
|
||||
| Default CRI-O trust level: `untrusted` | Kata Containers | Kata Containers | Kata Containers |
|
||||
With `RuntimeClass`, users can define Kata Containers as a `RuntimeClass` and then explicitly specify that a pod being created as a Kata Containers pod. For details, please refer to [How to use Kata Containers and Containerd](../../docs/how-to/containerd-kata.md).
|
||||
|
||||
|
||||
# Appendices
|
||||
|
||||
@@ -220,6 +220,566 @@ components:
|
||||
fixed: false
|
||||
values: []
|
||||
since: 2.0.0
|
||||
- prefix: kata_firecracker
|
||||
title: Firecracker vmm metrics
|
||||
desc: Metrics for Firecracker vmm
|
||||
metrics:
|
||||
- name: kata_firecracker_api_server
|
||||
type: GAUGE
|
||||
unit: ""
|
||||
help: Metrics related to the internal API server.
|
||||
labels:
|
||||
- name: item
|
||||
desc: ""
|
||||
manually_edit: false
|
||||
fixed: true
|
||||
values:
|
||||
- value: process_startup_time_cpu_us
|
||||
desc: ""
|
||||
- value: process_startup_time_us
|
||||
desc: ""
|
||||
- value: sync_response_fails
|
||||
desc: ""
|
||||
- value: sync_vmm_send_timeout_count
|
||||
desc: ""
|
||||
- name: sandbox_id
|
||||
desc: ""
|
||||
manually_edit: false
|
||||
fixed: false
|
||||
values: []
|
||||
since: 2.0.0
|
||||
- name: kata_firecracker_block
|
||||
type: GAUGE
|
||||
unit: ""
|
||||
help: Block Device associated metrics.
|
||||
labels:
|
||||
- name: item
|
||||
desc: ""
|
||||
manually_edit: false
|
||||
fixed: true
|
||||
values:
|
||||
- value: activate_fails
|
||||
desc: ""
|
||||
- value: cfg_fails
|
||||
desc: ""
|
||||
- value: event_fails
|
||||
desc: ""
|
||||
- value: execute_fails
|
||||
desc: ""
|
||||
- value: flush_count
|
||||
desc: ""
|
||||
- value: invalid_reqs_count
|
||||
desc: ""
|
||||
- value: no_avail_buffer
|
||||
desc: ""
|
||||
- value: queue_event_count
|
||||
desc: ""
|
||||
- value: rate_limiter_event_count
|
||||
desc: ""
|
||||
- value: rate_limiter_throttled_events
|
||||
desc: ""
|
||||
- value: read_bytes
|
||||
desc: ""
|
||||
- value: read_count
|
||||
desc: ""
|
||||
- value: update_count
|
||||
desc: ""
|
||||
- value: update_fails
|
||||
desc: ""
|
||||
- value: write_bytes
|
||||
desc: ""
|
||||
- value: write_count
|
||||
desc: ""
|
||||
- name: sandbox_id
|
||||
desc: ""
|
||||
manually_edit: false
|
||||
fixed: false
|
||||
values: []
|
||||
since: 2.0.0
|
||||
- name: kata_firecracker_get_api_requests
|
||||
type: GAUGE
|
||||
unit: ""
|
||||
help: Metrics specific to GET API Requests for counting user triggered actions and/or failures.
|
||||
labels:
|
||||
- name: item
|
||||
desc: ""
|
||||
manually_edit: false
|
||||
fixed: true
|
||||
values:
|
||||
- value: instance_info_count
|
||||
desc: ""
|
||||
- value: instance_info_fails
|
||||
desc: ""
|
||||
- value: machine_cfg_count
|
||||
desc: ""
|
||||
- value: machine_cfg_fails
|
||||
desc: ""
|
||||
- name: sandbox_id
|
||||
desc: ""
|
||||
manually_edit: false
|
||||
fixed: false
|
||||
values: []
|
||||
since: 2.0.0
|
||||
- name: kata_firecracker_i8042
|
||||
type: GAUGE
|
||||
unit: ""
|
||||
help: Metrics specific to the i8042 device.
|
||||
labels:
|
||||
- name: item
|
||||
desc: ""
|
||||
manually_edit: false
|
||||
fixed: true
|
||||
values:
|
||||
- value: error_count
|
||||
desc: ""
|
||||
- value: missed_read_count
|
||||
desc: ""
|
||||
- value: missed_write_count
|
||||
desc: ""
|
||||
- value: read_count
|
||||
desc: ""
|
||||
- value: reset_count
|
||||
desc: ""
|
||||
- value: write_count
|
||||
desc: ""
|
||||
- name: sandbox_id
|
||||
desc: ""
|
||||
manually_edit: false
|
||||
fixed: false
|
||||
values: []
|
||||
since: 2.0.0
|
||||
- name: kata_firecracker_latencies_us
|
||||
type: GAUGE
|
||||
unit: ""
|
||||
help: Performance metrics related for the moment only to snapshots.
|
||||
labels:
|
||||
- name: item
|
||||
desc: ""
|
||||
manually_edit: false
|
||||
fixed: true
|
||||
values:
|
||||
- value: diff_create_snapshot
|
||||
desc: ""
|
||||
- value: full_create_snapshot
|
||||
desc: ""
|
||||
- value: load_snapshot
|
||||
desc: ""
|
||||
- value: pause_vm
|
||||
desc: ""
|
||||
- value: resume_vm
|
||||
desc: ""
|
||||
- value: vmm_diff_create_snapshot
|
||||
desc: ""
|
||||
- value: vmm_full_create_snapshot
|
||||
desc: ""
|
||||
- value: vmm_load_snapshot
|
||||
desc: ""
|
||||
- value: vmm_pause_vm
|
||||
desc: ""
|
||||
- value: vmm_resume_vm
|
||||
desc: ""
|
||||
- name: sandbox_id
|
||||
desc: ""
|
||||
manually_edit: false
|
||||
fixed: false
|
||||
values: []
|
||||
since: 2.0.0
|
||||
- name: kata_firecracker_logger
|
||||
type: GAUGE
|
||||
unit: ""
|
||||
help: Metrics for the logging subsystem.
|
||||
labels:
|
||||
- name: item
|
||||
desc: ""
|
||||
manually_edit: false
|
||||
fixed: true
|
||||
values:
|
||||
- value: log_fails
|
||||
desc: ""
|
||||
- value: metrics_fails
|
||||
desc: ""
|
||||
- value: missed_log_count
|
||||
desc: ""
|
||||
- value: missed_metrics_count
|
||||
desc: ""
|
||||
- name: sandbox_id
|
||||
desc: ""
|
||||
manually_edit: false
|
||||
fixed: false
|
||||
values: []
|
||||
since: 2.0.0
|
||||
- name: kata_firecracker_mmds
|
||||
type: GAUGE
|
||||
unit: ""
|
||||
help: Metrics for the MMDS functionality.
|
||||
labels:
|
||||
- name: item
|
||||
desc: ""
|
||||
manually_edit: false
|
||||
fixed: true
|
||||
values:
|
||||
- value: connections_created
|
||||
desc: ""
|
||||
- value: connections_destroyed
|
||||
desc: ""
|
||||
- value: rx_accepted
|
||||
desc: ""
|
||||
- value: rx_accepted_err
|
||||
desc: ""
|
||||
- value: rx_accepted_unusual
|
||||
desc: ""
|
||||
- value: rx_bad_eth
|
||||
desc: ""
|
||||
- value: rx_count
|
||||
desc: ""
|
||||
- value: tx_bytes
|
||||
desc: ""
|
||||
- value: tx_count
|
||||
desc: ""
|
||||
- value: tx_errors
|
||||
desc: ""
|
||||
- value: tx_frames
|
||||
desc: ""
|
||||
- name: sandbox_id
|
||||
desc: ""
|
||||
manually_edit: false
|
||||
fixed: false
|
||||
values: []
|
||||
since: 2.0.0
|
||||
- name: kata_firecracker_net
|
||||
type: GAUGE
|
||||
unit: ""
|
||||
help: Network-related metrics.
|
||||
labels:
|
||||
- name: item
|
||||
desc: ""
|
||||
manually_edit: false
|
||||
fixed: true
|
||||
values:
|
||||
- value: activate_fails
|
||||
desc: ""
|
||||
- value: cfg_fails
|
||||
desc: ""
|
||||
- value: event_fails
|
||||
desc: ""
|
||||
- value: mac_address_updates
|
||||
desc: ""
|
||||
- value: no_rx_avail_buffer
|
||||
desc: ""
|
||||
- value: no_tx_avail_buffer
|
||||
desc: ""
|
||||
- value: rx_bytes_count
|
||||
desc: ""
|
||||
- value: rx_count
|
||||
desc: ""
|
||||
- value: rx_event_rate_limiter_count
|
||||
desc: ""
|
||||
- value: rx_fails
|
||||
desc: ""
|
||||
- value: rx_packets_count
|
||||
desc: ""
|
||||
- value: rx_partial_writes
|
||||
desc: ""
|
||||
- value: rx_queue_event_count
|
||||
desc: ""
|
||||
- value: rx_rate_limiter_throttled
|
||||
desc: ""
|
||||
- value: rx_tap_event_count
|
||||
desc: ""
|
||||
- value: tap_read_fails
|
||||
desc: ""
|
||||
- value: tap_write_fails
|
||||
desc: ""
|
||||
- value: tx_bytes_count
|
||||
desc: ""
|
||||
- value: tx_count
|
||||
desc: ""
|
||||
- value: tx_fails
|
||||
desc: ""
|
||||
- value: tx_malformed_frames
|
||||
desc: ""
|
||||
- value: tx_packets_count
|
||||
desc: ""
|
||||
- value: tx_partial_reads
|
||||
desc: ""
|
||||
- value: tx_queue_event_count
|
||||
desc: ""
|
||||
- value: tx_rate_limiter_event_count
|
||||
desc: ""
|
||||
- value: tx_rate_limiter_throttled
|
||||
desc: ""
|
||||
- value: tx_spoofed_mac_count
|
||||
desc: ""
|
||||
- name: sandbox_id
|
||||
desc: ""
|
||||
manually_edit: false
|
||||
fixed: false
|
||||
values: []
|
||||
since: 2.0.0
|
||||
- name: kata_firecracker_patch_api_requests
|
||||
type: GAUGE
|
||||
unit: ""
|
||||
help: Metrics specific to PATCH API Requests for counting user triggered actions and/or failures.
|
||||
labels:
|
||||
- name: item
|
||||
desc: ""
|
||||
manually_edit: false
|
||||
fixed: true
|
||||
values:
|
||||
- value: drive_count
|
||||
desc: ""
|
||||
- value: drive_fails
|
||||
desc: ""
|
||||
- value: machine_cfg_count
|
||||
desc: ""
|
||||
- value: machine_cfg_fails
|
||||
desc: ""
|
||||
- value: network_count
|
||||
desc: ""
|
||||
- value: network_fails
|
||||
desc: ""
|
||||
- name: sandbox_id
|
||||
desc: ""
|
||||
manually_edit: false
|
||||
fixed: false
|
||||
values: []
|
||||
since: 2.0.0
|
||||
- name: kata_firecracker_put_api_requests
|
||||
type: GAUGE
|
||||
unit: ""
|
||||
help: Metrics specific to PUT API Requests for counting user triggered actions and/or failures.
|
||||
labels:
|
||||
- name: item
|
||||
desc: ""
|
||||
manually_edit: false
|
||||
fixed: true
|
||||
values:
|
||||
- value: actions_count
|
||||
desc: ""
|
||||
- value: actions_fails
|
||||
desc: ""
|
||||
- value: boot_source_count
|
||||
desc: ""
|
||||
- value: boot_source_fails
|
||||
desc: ""
|
||||
- value: drive_count
|
||||
desc: ""
|
||||
- value: drive_fails
|
||||
desc: ""
|
||||
- value: logger_count
|
||||
desc: ""
|
||||
- value: logger_fails
|
||||
desc: ""
|
||||
- value: machine_cfg_count
|
||||
desc: ""
|
||||
- value: machine_cfg_fails
|
||||
desc: ""
|
||||
- value: metrics_count
|
||||
desc: ""
|
||||
- value: metrics_fails
|
||||
desc: ""
|
||||
- value: network_count
|
||||
desc: ""
|
||||
- value: network_fails
|
||||
desc: ""
|
||||
- name: sandbox_id
|
||||
desc: ""
|
||||
manually_edit: false
|
||||
fixed: false
|
||||
values: []
|
||||
since: 2.0.0
|
||||
- name: kata_firecracker_rtc
|
||||
type: GAUGE
|
||||
unit: ""
|
||||
help: Metrics specific to the RTC device.
|
||||
labels:
|
||||
- name: item
|
||||
desc: ""
|
||||
manually_edit: false
|
||||
fixed: true
|
||||
values:
|
||||
- value: error_count
|
||||
desc: ""
|
||||
- value: missed_read_count
|
||||
desc: ""
|
||||
- value: missed_write_count
|
||||
desc: ""
|
||||
- name: sandbox_id
|
||||
desc: ""
|
||||
manually_edit: false
|
||||
fixed: false
|
||||
values: []
|
||||
since: 2.0.0
|
||||
- name: kata_firecracker_seccomp
|
||||
type: GAUGE
|
||||
unit: ""
|
||||
help: Metrics for the seccomp filtering.
|
||||
labels:
|
||||
- name: item
|
||||
desc: ""
|
||||
manually_edit: false
|
||||
fixed: true
|
||||
values:
|
||||
- value: num_faults
|
||||
desc: ""
|
||||
- name: sandbox_id
|
||||
desc: ""
|
||||
manually_edit: false
|
||||
fixed: false
|
||||
values: []
|
||||
since: 2.0.0
|
||||
- name: kata_firecracker_signals
|
||||
type: GAUGE
|
||||
unit: ""
|
||||
help: Metrics related to signals.
|
||||
labels:
|
||||
- name: item
|
||||
desc: ""
|
||||
manually_edit: false
|
||||
fixed: true
|
||||
values:
|
||||
- value: sigbus
|
||||
desc: ""
|
||||
- value: sigsegv
|
||||
desc: ""
|
||||
- name: sandbox_id
|
||||
desc: ""
|
||||
manually_edit: false
|
||||
fixed: false
|
||||
values: []
|
||||
since: 2.0.0
|
||||
- name: kata_firecracker_uart
|
||||
type: GAUGE
|
||||
unit: ""
|
||||
help: Metrics specific to the UART device.
|
||||
labels:
|
||||
- name: item
|
||||
desc: ""
|
||||
manually_edit: false
|
||||
fixed: true
|
||||
values:
|
||||
- value: error_count
|
||||
desc: ""
|
||||
- value: flush_count
|
||||
desc: ""
|
||||
- value: missed_read_count
|
||||
desc: ""
|
||||
- value: missed_write_count
|
||||
desc: ""
|
||||
- value: read_count
|
||||
desc: ""
|
||||
- value: write_count
|
||||
desc: ""
|
||||
- name: sandbox_id
|
||||
desc: ""
|
||||
manually_edit: false
|
||||
fixed: false
|
||||
values: []
|
||||
since: 2.0.0
|
||||
- name: kata_firecracker_vcpu
|
||||
type: GAUGE
|
||||
unit: ""
|
||||
help: Metrics specific to VCPUs' mode of functioning.
|
||||
labels:
|
||||
- name: item
|
||||
desc: ""
|
||||
manually_edit: false
|
||||
fixed: true
|
||||
values:
|
||||
- value: exit_io_in
|
||||
desc: ""
|
||||
- value: exit_io_out
|
||||
desc: ""
|
||||
- value: exit_mmio_read
|
||||
desc: ""
|
||||
- value: exit_mmio_write
|
||||
desc: ""
|
||||
- value: failures
|
||||
desc: ""
|
||||
- value: filter_cpuid
|
||||
desc: ""
|
||||
- name: sandbox_id
|
||||
desc: ""
|
||||
manually_edit: false
|
||||
fixed: false
|
||||
values: []
|
||||
since: 2.0.0
|
||||
- name: kata_firecracker_vmm
|
||||
type: GAUGE
|
||||
unit: ""
|
||||
help: Metrics specific to the machine manager as a whole.
|
||||
labels:
|
||||
- name: item
|
||||
desc: ""
|
||||
manually_edit: false
|
||||
fixed: true
|
||||
values:
|
||||
- value: device_events
|
||||
desc: ""
|
||||
- value: panic_count
|
||||
desc: ""
|
||||
- name: sandbox_id
|
||||
desc: ""
|
||||
manually_edit: false
|
||||
fixed: false
|
||||
values: []
|
||||
since: 2.0.0
|
||||
- name: kata_firecracker_vsock
|
||||
type: GAUGE
|
||||
unit: ""
|
||||
help: Vsock-related metrics.
|
||||
labels:
|
||||
- name: item
|
||||
desc: ""
|
||||
manually_edit: false
|
||||
fixed: true
|
||||
values:
|
||||
- value: activate_fails
|
||||
desc: ""
|
||||
- value: cfg_fails
|
||||
desc: ""
|
||||
- value: conn_event_fails
|
||||
desc: ""
|
||||
- value: conns_added
|
||||
desc: ""
|
||||
- value: conns_killed
|
||||
desc: ""
|
||||
- value: conns_removed
|
||||
desc: ""
|
||||
- value: ev_queue_event_fails
|
||||
desc: ""
|
||||
- value: killq_resync
|
||||
desc: ""
|
||||
- value: muxer_event_fails
|
||||
desc: ""
|
||||
- value: rx_bytes_count
|
||||
desc: ""
|
||||
- value: rx_packets_count
|
||||
desc: ""
|
||||
- value: rx_queue_event_count
|
||||
desc: ""
|
||||
- value: rx_queue_event_fails
|
||||
desc: ""
|
||||
- value: rx_read_fails
|
||||
desc: ""
|
||||
- value: tx_bytes_count
|
||||
desc: ""
|
||||
- value: tx_flush_fails
|
||||
desc: ""
|
||||
- value: tx_packets_count
|
||||
desc: ""
|
||||
- value: tx_queue_event_count
|
||||
desc: ""
|
||||
- value: tx_queue_event_fails
|
||||
desc: ""
|
||||
- value: tx_write_fails
|
||||
desc: ""
|
||||
- name: sandbox_id
|
||||
desc: ""
|
||||
manually_edit: false
|
||||
fixed: false
|
||||
values: []
|
||||
since: 2.0.0
|
||||
- prefix: kata_guest
|
||||
title: Kata guest OS metrics
|
||||
desc: Guest OS's metrics in hypervisor.
|
||||
|
||||
@@ -9,6 +9,7 @@
|
||||
* [Metrics list](#metrics-list)
|
||||
* [Metric types](#metric-types)
|
||||
* [Kata agent metrics](#kata-agent-metrics)
|
||||
* [Firecracker metrics](#firecracker-metrics)
|
||||
* [Kata guest OS metrics](#kata-guest-os-metrics)
|
||||
* [Hypervisor metrics](#hypervisor-metrics)
|
||||
* [Kata monitor metrics](#kata-monitor-metrics)
|
||||
@@ -152,6 +153,7 @@ Metrics is categorized by component where metrics are collected from and for.
|
||||
|
||||
* [Metric types](#metric-types)
|
||||
* [Kata agent metrics](#kata-agent-metrics)
|
||||
* [Firecracker metrics](#firecracker-metrics)
|
||||
* [Kata guest OS metrics](#kata-guest-os-metrics)
|
||||
* [Hypervisor metrics](#hypervisor-metrics)
|
||||
* [Kata monitor metrics](#kata-monitor-metrics)
|
||||
@@ -198,6 +200,30 @@ Agent's metrics contains metrics about agent process.
|
||||
| `kata_agent_total_time`: <br> Agent process total time | `GAUGE` | | <ul><li>`sandbox_id`</li></ul> | 2.0.0 |
|
||||
| `kata_agent_total_vm`: <br> Agent process total `vm` size | `GAUGE` | | <ul><li>`sandbox_id`</li></ul> | 2.0.0 |
|
||||
|
||||
### Firecracker metrics
|
||||
|
||||
Metrics for Firecracker vmm.
|
||||
|
||||
| Metric name | Type | Units | Labels | Introduced in Kata version |
|
||||
|---|---|---|---|---|
|
||||
| `kata_firecracker_api_server`: <br> Metrics related to the internal API server. | `GAUGE` | | <ul><li>`item`<ul><li>`process_startup_time_cpu_us`</li><li>`process_startup_time_us`</li><li>`sync_response_fails`</li><li>`sync_vmm_send_timeout_count`</li></ul></li><li>`sandbox_id`</li></ul> | 2.0.0 |
|
||||
| `kata_firecracker_block`: <br> Block Device associated metrics. | `GAUGE` | | <ul><li>`item`<ul><li>`activate_fails`</li><li>`cfg_fails`</li><li>`event_fails`</li><li>`execute_fails`</li><li>`flush_count`</li><li>`invalid_reqs_count`</li><li>`no_avail_buffer`</li><li>`queue_event_count`</li><li>`rate_limiter_event_count`</li><li>`rate_limiter_throttled_events`</li><li>`read_bytes`</li><li>`read_count`</li><li>`update_count`</li><li>`update_fails`</li><li>`write_bytes`</li><li>`write_count`</li></ul></li><li>`sandbox_id`</li></ul> | 2.0.0 |
|
||||
| `kata_firecracker_get_api_requests`: <br> Metrics specific to GET API Requests for counting user triggered actions and/or failures. | `GAUGE` | | <ul><li>`item`<ul><li>`instance_info_count`</li><li>`instance_info_fails`</li><li>`machine_cfg_count`</li><li>`machine_cfg_fails`</li></ul></li><li>`sandbox_id`</li></ul> | 2.0.0 |
|
||||
| `kata_firecracker_i8042`: <br> Metrics specific to the i8042 device. | `GAUGE` | | <ul><li>`item`<ul><li>`error_count`</li><li>`missed_read_count`</li><li>`missed_write_count`</li><li>`read_count`</li><li>`reset_count`</li><li>`write_count`</li></ul></li><li>`sandbox_id`</li></ul> | 2.0.0 |
|
||||
| `kata_firecracker_latencies_us`: <br> Performance metrics related for the moment only to snapshots. | `GAUGE` | | <ul><li>`item`<ul><li>`diff_create_snapshot`</li><li>`full_create_snapshot`</li><li>`load_snapshot`</li><li>`pause_vm`</li><li>`resume_vm`</li><li>`vmm_diff_create_snapshot`</li><li>`vmm_full_create_snapshot`</li><li>`vmm_load_snapshot`</li><li>`vmm_pause_vm`</li><li>`vmm_resume_vm`</li></ul></li><li>`sandbox_id`</li></ul> | 2.0.0 |
|
||||
| `kata_firecracker_logger`: <br> Metrics for the logging subsystem. | `GAUGE` | | <ul><li>`item`<ul><li>`log_fails`</li><li>`metrics_fails`</li><li>`missed_log_count`</li><li>`missed_metrics_count`</li></ul></li><li>`sandbox_id`</li></ul> | 2.0.0 |
|
||||
| `kata_firecracker_mmds`: <br> Metrics for the MMDS functionality. | `GAUGE` | | <ul><li>`item`<ul><li>`connections_created`</li><li>`connections_destroyed`</li><li>`rx_accepted`</li><li>`rx_accepted_err`</li><li>`rx_accepted_unusual`</li><li>`rx_bad_eth`</li><li>`rx_count`</li><li>`tx_bytes`</li><li>`tx_count`</li><li>`tx_errors`</li><li>`tx_frames`</li></ul></li><li>`sandbox_id`</li></ul> | 2.0.0 |
|
||||
| `kata_firecracker_net`: <br> Network-related metrics. | `GAUGE` | | <ul><li>`item`<ul><li>`activate_fails`</li><li>`cfg_fails`</li><li>`event_fails`</li><li>`mac_address_updates`</li><li>`no_rx_avail_buffer`</li><li>`no_tx_avail_buffer`</li><li>`rx_bytes_count`</li><li>`rx_count`</li><li>`rx_event_rate_limiter_count`</li><li>`rx_fails`</li><li>`rx_packets_count`</li><li>`rx_partial_writes`</li><li>`rx_queue_event_count`</li><li>`rx_rate_limiter_throttled`</li><li>`rx_tap_event_count`</li><li>`tap_read_fails`</li><li>`tap_write_fails`</li><li>`tx_bytes_count`</li><li>`tx_count`</li><li>`tx_fails`</li><li>`tx_malformed_frames`</li><li>`tx_packets_count`</li><li>`tx_partial_reads`</li><li>`tx_queue_event_count`</li><li>`tx_rate_limiter_event_count`</li><li>`tx_rate_limiter_throttled`</li><li>`tx_spoofed_mac_count`</li></ul></li><li>`sandbox_id`</li></ul> | 2.0.0 |
|
||||
| `kata_firecracker_patch_api_requests`: <br> Metrics specific to PATCH API Requests for counting user triggered actions and/or failures. | `GAUGE` | | <ul><li>`item`<ul><li>`drive_count`</li><li>`drive_fails`</li><li>`machine_cfg_count`</li><li>`machine_cfg_fails`</li><li>`network_count`</li><li>`network_fails`</li></ul></li><li>`sandbox_id`</li></ul> | 2.0.0 |
|
||||
| `kata_firecracker_put_api_requests`: <br> Metrics specific to PUT API Requests for counting user triggered actions and/or failures. | `GAUGE` | | <ul><li>`item`<ul><li>`actions_count`</li><li>`actions_fails`</li><li>`boot_source_count`</li><li>`boot_source_fails`</li><li>`drive_count`</li><li>`drive_fails`</li><li>`logger_count`</li><li>`logger_fails`</li><li>`machine_cfg_count`</li><li>`machine_cfg_fails`</li><li>`metrics_count`</li><li>`metrics_fails`</li><li>`network_count`</li><li>`network_fails`</li></ul></li><li>`sandbox_id`</li></ul> | 2.0.0 |
|
||||
| `kata_firecracker_rtc`: <br> Metrics specific to the RTC device. | `GAUGE` | | <ul><li>`item`<ul><li>`error_count`</li><li>`missed_read_count`</li><li>`missed_write_count`</li></ul></li><li>`sandbox_id`</li></ul> | 2.0.0 |
|
||||
| `kata_firecracker_seccomp`: <br> Metrics for the seccomp filtering. | `GAUGE` | | <ul><li>`item`<ul><li>`num_faults`</li></ul></li><li>`sandbox_id`</li></ul> | 2.0.0 |
|
||||
| `kata_firecracker_signals`: <br> Metrics related to signals. | `GAUGE` | | <ul><li>`item`<ul><li>`sigbus`</li><li>`sigsegv`</li></ul></li><li>`sandbox_id`</li></ul> | 2.0.0 |
|
||||
| `kata_firecracker_uart`: <br> Metrics specific to the UART device. | `GAUGE` | | <ul><li>`item`<ul><li>`error_count`</li><li>`flush_count`</li><li>`missed_read_count`</li><li>`missed_write_count`</li><li>`read_count`</li><li>`write_count`</li></ul></li><li>`sandbox_id`</li></ul> | 2.0.0 |
|
||||
| `kata_firecracker_vcpu`: <br> Metrics specific to VCPUs' mode of functioning. | `GAUGE` | | <ul><li>`item`<ul><li>`exit_io_in`</li><li>`exit_io_out`</li><li>`exit_mmio_read`</li><li>`exit_mmio_write`</li><li>`failures`</li><li>`filter_cpuid`</li></ul></li><li>`sandbox_id`</li></ul> | 2.0.0 |
|
||||
| `kata_firecracker_vmm`: <br> Metrics specific to the machine manager as a whole. | `GAUGE` | | <ul><li>`item`<ul><li>`device_events`</li><li>`panic_count`</li></ul></li><li>`sandbox_id`</li></ul> | 2.0.0 |
|
||||
| `kata_firecracker_vsock`: <br> Vsock-related metrics. | `GAUGE` | | <ul><li>`item`<ul><li>`activate_fails`</li><li>`cfg_fails`</li><li>`conn_event_fails`</li><li>`conns_added`</li><li>`conns_killed`</li><li>`conns_removed`</li><li>`ev_queue_event_fails`</li><li>`killq_resync`</li><li>`muxer_event_fails`</li><li>`rx_bytes_count`</li><li>`rx_packets_count`</li><li>`rx_queue_event_count`</li><li>`rx_queue_event_fails`</li><li>`rx_read_fails`</li><li>`tx_bytes_count`</li><li>`tx_flush_fails`</li><li>`tx_packets_count`</li><li>`tx_queue_event_count`</li><li>`tx_queue_event_fails`</li><li>`tx_write_fails`</li></ul></li><li>`sandbox_id`</li></ul> | 2.0.0 |
|
||||
|
||||
### Kata guest OS metrics
|
||||
|
||||
Guest OS's metrics in hypervisor.
|
||||
|
||||
@@ -10,43 +10,56 @@ To fulfill the [Kata design requirements](kata-design-requirements.md), and base
|
||||
|
||||
|Name|Description|
|
||||
|---|---|
|
||||
|`CreateSandbox(SandboxConfig)`| Create and start a sandbox, and return the sandbox structure.|
|
||||
|`FetchSandbox(ID)`| Connect to an existing sandbox and return the sandbox structure.|
|
||||
|`ListSandboxes()`| List all existing sandboxes with status. |
|
||||
|`CreateSandbox(SandboxConfig, Factory)`| Create a sandbox and its containers, base on `SandboxConfig` and `Factory`. Return the `Sandbox` structure, but do not start them.|
|
||||
|
||||
### Sandbox Operation API
|
||||
|
||||
|Name|Description|
|
||||
|---|---|
|
||||
|`sandbox.Pause()`| Pause the sandbox.|
|
||||
|`sandbox.Resume()`| Resume the paused sandbox.|
|
||||
|`sandbox.Release()`| Release a sandbox data structure, close connections to the agent, and quit any goroutines associated with the sandbox. Mostly used for daemon restart.|
|
||||
|`sandbox.Delete()`| Destroy the sandbox and remove all persistent metadata.|
|
||||
|`sandbox.Status()`| Get the status of the sandbox and containers.|
|
||||
|`sandbox.Delete()`| Shut down the VM in which the sandbox, and destroy the sandbox and remove all persistent metadata.|
|
||||
|`sandbox.Monitor()`| Return a context handler for caller to monitor sandbox callbacks such as error termination.|
|
||||
|`sandbox.CreateContainer()`| Create new container in the sandbox.|
|
||||
|`sandbox.DeleteContainer()`| Delete a container from the sandbox.|
|
||||
|`sandbox.StartContainer()`| Start a container in the sandbox.|
|
||||
|`sandbox.StatusContainer()`| Get the status of a container in the sandbox.|
|
||||
|`sandbox.EnterContainer()`| Run a new process in a container.|
|
||||
|`sandbox.WaitProcess()`| Wait on a process to terminate.|
|
||||
|`sandbox.Release()`| Release a sandbox data structure, close connections to the agent, and quit any goroutines associated with the Sandbox. Mostly used for daemon restart.|
|
||||
|`sandbox.Start()`| Start a sandbox and the containers making the sandbox.|
|
||||
|`sandbox.Stats()`| Get the stats of a running sandbox, return a `SandboxStats` structure.|
|
||||
|`sandbox.Status()`| Get the status of the sandbox and containers, return a `SandboxStatus` structure.|
|
||||
|`sandbox.Stop(force)`| Stop a sandbox and Destroy the containers in the sandbox. When force is true, ignore guest related stop failures.|
|
||||
|`sandbox.CreateContainer(contConfig)`| Create new container in the sandbox with the `ContainerConfig` param. It will add new container config to `sandbox.config.Containers`.|
|
||||
|`sandbox.DeleteContainer(containerID)`| Delete a container from the sandbox by containerID, return a `Container` structure.|
|
||||
|`sandbox.EnterContainer(containerID, cmd)`| Run a new process in a container, executing customer's `types.Cmd` command.|
|
||||
|`sandbox.KillContainer(containerID, signal, all)`| Signal a container in the sandbox by the containerID.|
|
||||
|`sandbox.PauseContainer(containerID)`| Pause a running container in the sandbox by the containerID.|
|
||||
|`sandbox.ProcessListContainer(containerID, options)`| List every process running inside a specific container in the sandbox, return a `ProcessList` structure.|
|
||||
|`sandbox.ResumeContainer(containerID)`| Resume a paused container in the sandbox by the containerID.|
|
||||
|`sandbox.StartContainer(containerID)`| Start a container in the sandbox by the containerID.|
|
||||
|`sandbox.StatsContainer(containerID)`| Get the stats of a running container, return a `ContainerStats` structure.|
|
||||
|`sandbox.StatusContainer(containerID)`| Get the status of a container in the sandbox, return a `ContainerStatus` structure.|
|
||||
|`sandbox.StopContainer(containerID, force)`| Stop a container in the sandbox by the containerID.|
|
||||
|`sandbox.UpdateContainer(containerID, resources)`| Update a running container in the sandbox.|
|
||||
|`sandbox.WaitProcess(containerID, processID)`| Wait on a process to terminate.|
|
||||
### Sandbox Hotplug API
|
||||
|Name|Description|
|
||||
|---|---|
|
||||
|`sandbox.AddDevice()`| Add new storage device to the sandbox.|
|
||||
|`sandbox.AddInterface()`| Add new NIC to the sandbox.|
|
||||
|`sandbox.RemoveInterface()`| Remove a NIC from the sandbox.|
|
||||
|`sandbox.ListInterfaces()`| List all NICs and their configurations in the sandbox.|
|
||||
|`sandbox.UpdateRoutes()`| Update the sandbox route table (e.g. for portmapping support).|
|
||||
|`sandbox.ListRoutes()`| List the sandbox route table.|
|
||||
|`sandbox.AddDevice(info)`| Add new storage device `DeviceInfo` to the sandbox, return a `Device` structure.|
|
||||
|`sandbox.AddInterface(inf)`| Add new NIC to the sandbox.|
|
||||
|`sandbox.RemoveInterface(inf)`| Remove a NIC from the sandbox.|
|
||||
|`sandbox.ListInterfaces()`| List all NICs and their configurations in the sandbox, return a `pbTypes.Interface` list.|
|
||||
|`sandbox.UpdateRoutes(routes)`| Update the sandbox route table (e.g. for portmapping support), return a `pbTypes.Route` list.|
|
||||
|`sandbox.ListRoutes()`| List the sandbox route table, return a `pbTypes.Route` list.|
|
||||
|
||||
### Sandbox Relay API
|
||||
|Name|Description|
|
||||
|---|---|
|
||||
|`sandbox.WinsizeProcess(containerID, processID, Height, Width)`|Relay TTY resize request to a process.|
|
||||
|`sandbox.WinsizeProcess(containerID, processID, Height, Width)`| Relay TTY resize request to a process.|
|
||||
|`sandbox.SignalProcess(containerID, processID, signalID, signalALL)`| Relay a signal to a process or all processes in a container.|
|
||||
|`sandbox.IOStream(containerID, processID)`| Relay a process stdio. Return stdin/stdout/stderr pipes to the process stdin/stdout/stderr streams.|
|
||||
|
||||
### Sandbox Monitor API
|
||||
|Name|Description|
|
||||
|---|---|
|
||||
|`sandbox.GetOOMEvent()`| Monitor the OOM events that occur in the sandbox..|
|
||||
|`sandbox.UpdateRuntimeMetrics()`| Update the shim/hypervisor's metrics of the running sandbox.|
|
||||
|`sandbox.GetAgentMetrics()`| Get metrics of the agent and the guest in the running sandbox.|
|
||||
|
||||
## Plugin framework for external proprietary Kata runtime extensions
|
||||
### Hypervisor plugin
|
||||
|
||||
|
||||
@@ -193,13 +193,16 @@ From Containerd v1.2.4 and Kata v1.6.0, there is a new runtime option supported,
|
||||
```toml
|
||||
[plugins.cri.containerd.runtimes.kata]
|
||||
runtime_type = "io.containerd.kata.v2"
|
||||
privileged_without_host_devices = true
|
||||
[plugins.cri.containerd.runtimes.kata.options]
|
||||
ConfigPath = "/etc/kata-containers/config.toml"
|
||||
```
|
||||
|
||||
`privileged_without_host_devices` tells containerd that a privileged Kata container should not have direct access to all host devices. If unset, containerd will pass all host devices to Kata container, which may cause security issues.
|
||||
|
||||
This `ConfigPath` option is optional. If you do not specify it, shimv2 first tries to get the configuration file from the environment variable `KATA_CONF_FILE`. If neither are set, shimv2 will use the default Kata configuration file paths (`/etc/kata-containers/configuration.toml` and `/usr/share/defaults/kata-containers/configuration.toml`).
|
||||
|
||||
If you use Containerd older than v1.2.4 or a version of Kata older than v1.6.0 and also want to specify a configuration file, you can use the following workaround, since the shimv2 accepts an environment variable, `KATA_CONF_FILE` for the configuration file path. Then, you can create a
|
||||
If you use Containerd older than v1.2.4 or a version of Kata older than v1.6.0 and also want to specify a configuration file, you can use the following workaround, since the shimv2 accepts an environment variable, `KATA_CONF_FILE` for the configuration file path. Then, you can create a
|
||||
shell script with the following:
|
||||
|
||||
```bash
|
||||
|
||||
@@ -34,7 +34,7 @@ Also you should ensure that `kubectl` working correctly.
|
||||
Start Prometheus by utilizing our sample manifest:
|
||||
|
||||
```
|
||||
$ kubectl apply -f https://raw.githubusercontent.com/kata-containers/documentation/master/how-to/data/prometheus.yml
|
||||
$ kubectl apply -f https://raw.githubusercontent.com/kata-containers/kata-containers/2.0-dev/docs/how-to/data/prometheus.yml
|
||||
```
|
||||
|
||||
This will create a new namespace, `prometheus`, and create the following resources:
|
||||
@@ -60,7 +60,7 @@ go_gc_duration_seconds{quantile="0.75"} 0.000229911
|
||||
`kata-monitor` can be started on the cluster as follows:
|
||||
|
||||
```
|
||||
$ kubectl apply -f https://raw.githubusercontent.com/kata-containers/documentation/master/how-to/data/kata-monitor-daemontset.yml
|
||||
$ kubectl apply -f https://raw.githubusercontent.com/kata-containers/kata-containers/2.0-dev/docs/how-to/data/kata-monitor-daemonset.yml
|
||||
```
|
||||
|
||||
This will create a new namespace `kata-system` and a `daemonset` in it.
|
||||
@@ -73,7 +73,7 @@ Once the `daemonset` is running, Prometheus should discover `kata-monitor` as a
|
||||
Run this command to run Grafana in Kubernetes:
|
||||
|
||||
```
|
||||
$ kubectl apply -f https://raw.githubusercontent.com/kata-containers/documentation/master/how-to/data/grafana.yml
|
||||
$ kubectl apply -f https://raw.githubusercontent.com/kata-containers/kata-containers/2.0-dev/docs/how-to/data/grafana.yml
|
||||
```
|
||||
|
||||
This will create deployment and service for Grafana under namespace `prometheus`.
|
||||
@@ -99,7 +99,7 @@ You can import this dashboard using Grafana UI, or using `curl` command in conso
|
||||
$ curl -XPOST -i localhost:3000/api/dashboards/import \
|
||||
-u admin:admin \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"dashboard\":$(curl -sL https://raw.githubusercontent.com/kata-containers/documentation/master/how-to/data/dashboard.json )}"
|
||||
-d "{\"dashboard\":$(curl -sL https://raw.githubusercontent.com/kata-containers/kata-containers/2.0-dev/docs/how-to/data/dashboard.json )}"
|
||||
```
|
||||
|
||||
## References
|
||||
|
||||
@@ -29,7 +29,7 @@ to launch Kata Containers. For the previous version of Kata Containers, the Pods
|
||||
|
||||
> **Note:** For information about the supported versions of these components,
|
||||
> see the Kata Containers
|
||||
> [`versions.yaml`](https://github.com/kata-containers/runtime/blob/master/versions.yaml)
|
||||
> [`versions.yaml`](../../versions.yaml)
|
||||
> file.
|
||||
|
||||
## Install and configure containerd
|
||||
|
||||
@@ -13,7 +13,7 @@ As of the 1.7 release of Kata Containers, [9pfs](https://www.kernel.org/doc/Docu
|
||||
|
||||
To help address these limitations, [virtio-fs](https://virtio-fs.gitlab.io/) has been developed. virtio-fs is a shared file system that lets virtual machines access a directory tree on the host. In Kata Containers, virtio-fs can be used to share container volumes, secrets, config-maps, configuration files (hostname, hosts, `resolv.conf`) and the container rootfs on the host with the guest. virtio-fs provides significant performance and POSIX compliance improvements compared to 9pfs.
|
||||
|
||||
Enabling of virtio-fs requires changes in the guest kernel as well as the VMM. For Kata Containers, experimental virtio-fs support is enabled through the [NEMU VMM](https://github.com/intel/nemu).
|
||||
Enabling of virtio-fs requires changes in the guest kernel as well as the VMM. For Kata Containers, experimental virtio-fs support is enabled through `qemu` and `cloud-hypervisor` VMMs.
|
||||
|
||||
**Note: virtio-fs support is experimental in the 1.7 release of Kata Containers. Work is underway to improve stability, performance and upstream integration. This is available for early preview - use at your own risk**
|
||||
|
||||
@@ -21,31 +21,41 @@ This document describes how to get Kata Containers to work with virtio-fs.
|
||||
|
||||
## Pre-requisites
|
||||
|
||||
* Before Kata 1.8 this feature required the host to have hugepages support enabled. Enable this with the `sysctl vm.nr_hugepages=1024` command on the host.
|
||||
Before Kata 1.8 this feature required the host to have hugepages support enabled. Enable this with the `sysctl vm.nr_hugepages=1024` command on the host.In later versions of Kata, virtio-fs leverages `/dev/shm` as the shared memory backend. The default size of `/dev/shm` on a system is typically half of the total system memory. This can pose a physical limit to the maximum number of pods that can be launched with virtio-fs. This can be overcome by increasing the size of `/dev/shm` as shown below:
|
||||
|
||||
```bash
|
||||
$ mount -o remount,size=${desired_shm_size} /dev/shm
|
||||
```
|
||||
|
||||
## Install Kata Containers with virtio-fs support
|
||||
|
||||
The Kata Containers NEMU configuration, the NEMU VMM and the `virtiofs` daemon are available in the [Kata Container release](https://github.com/kata-containers/kata-containers/releases) artifacts starting with the 1.7 release. While the feature is experimental, distribution packages are not supported, but installation is available through [`kata-deploy`](../../tools/packaging/kata-deploy).
|
||||
The Kata Containers `qemu` configuration with virtio-fs and the `virtiofs` daemon are available in the [Kata Container release](https://github.com/kata-containers/runtime/releases) artifacts starting with the 1.9 release. Installation is available through [distribution packages](https://github.com/kata-containers/documentation/blob/master/install/README.md#supported-distributions) as well through [`kata-deploy`](https://github.com/kata-containers/packaging/tree/master/kata-deploy).
|
||||
|
||||
Install the latest release of Kata as follows:
|
||||
**Note: Support for virtio-fs was first introduced in `NEMU` hypervisor in Kata 1.8 release. This hypervisor has been deprecated.**
|
||||
|
||||
Install the latest release of Kata with `kata-deploy` as follows:
|
||||
```
|
||||
docker run --runtime=runc -v /opt/kata:/opt/kata -v /var/run/dbus:/var/run/dbus -v /run/systemd:/run/systemd -v /etc/docker:/etc/docker -it katadocker/kata-deploy kata-deploy-docker install
|
||||
```
|
||||
|
||||
This will place the Kata release artifacts in `/opt/kata`, and update Docker's configuration to include a runtime target, `kata-nemu`. Learn more about `kata-deploy` and how to use `kata-deploy` in Kubernetes [here](../../tools/packaging/kata-deploy/README.md#kubernetes-quick-start).
|
||||
|
||||
This will place the Kata release artifacts in `/opt/kata`, and update Docker's configuration to include a runtime target, `kata-qemu-virtiofs`. Learn more about `kata-deploy` and how to use `kata-deploy` in Kubernetes [here](https://github.com/kata-containers/packaging/tree/master/kata-deploy#kubernetes-quick-start).
|
||||
|
||||
## Run a Kata Container utilizing virtio-fs
|
||||
|
||||
Once installed, start a new container, utilizing NEMU + `virtiofs`:
|
||||
Once installed, start a new container, utilizing `qemu` + `virtiofs`:
|
||||
```bash
|
||||
$ docker run --runtime=kata-nemu -it busybox
|
||||
$ docker run --runtime=kata-qemu-virtiofs -it busybox
|
||||
```
|
||||
|
||||
Verify the new container is running with the NEMU hypervisor as well as using `virtiofsd`. To do this look for the hypervisor path and the `virtiofs` daemon process on the host:
|
||||
Verify the new container is running with the `qemu` hypervisor as well as using `virtiofsd`. To do this look for the hypervisor path and the `virtiofs` daemon process on the host:
|
||||
```bash
|
||||
$ ps -aux | grep virtiofs
|
||||
root ... /home/foo/build-x86_64_virt/x86_64_virt-softmmu/qemu-system-x86_64_virt
|
||||
... -machine virt,accel=kvm,kernel_irqchip,nvdimm ...
|
||||
root ... /home/foo/build-x86_64_virt/virtiofsd-x86_64 ...
|
||||
```
|
||||
|
||||
You can also try out virtio-fs using `cloud-hypervisor` VMM:
|
||||
```bash
|
||||
$ docker run --runtime=kata-clh -it busybox
|
||||
```
|
||||
|
||||
@@ -1,16 +1,16 @@
|
||||
# Kata Containers installation user guides
|
||||
|
||||
* [Prerequisites](#prerequisites)
|
||||
* [Packaged installation methods](#packaged-installation-methods)
|
||||
* [Supported Distributions](#supported-distributions)
|
||||
* [Official packages](#official-packages)
|
||||
* [Automatic Installation](#automatic-installation)
|
||||
* [Snap Installation](#snap-installation)
|
||||
* [Scripted Installation](#scripted-installation)
|
||||
* [Manual Installation](#manual-installation)
|
||||
* [Build from source installation](#build-from-source-installation)
|
||||
* [Installing on a Cloud Service Platform](#installing-on-a-cloud-service-platform)
|
||||
* [Further information](#further-information)
|
||||
- [Kata Containers installation user guides](#kata-containers-installation-user-guides)
|
||||
- [Prerequisites](#prerequisites)
|
||||
- [Packaged installation methods](#packaged-installation-methods)
|
||||
- [Official packages](#official-packages)
|
||||
- [Automatic Installation](#automatic-installation)
|
||||
- [Snap Installation](#snap-installation)
|
||||
- [Scripted Installation](#scripted-installation)
|
||||
- [Manual Installation](#manual-installation)
|
||||
- [Build from source installation](#build-from-source-installation)
|
||||
- [Installing on a Cloud Service Platform](#installing-on-a-cloud-service-platform)
|
||||
- [Further information](#further-information)
|
||||
|
||||
The following is an overview of the different installation methods available. All of these methods equally result
|
||||
in a system configured to run Kata Containers.
|
||||
@@ -29,33 +29,22 @@ to see if your system is capable of running Kata Containers.
|
||||
|
||||
| Installation method | Description | Distributions supported |
|
||||
|------------------------------------------------------|-----------------------------------------------------------------------------------------|--------------------------------------|
|
||||
| [Automatic](#automatic-installation) |Run a single command to install a full system |[see table](#supported-distributions) |
|
||||
| [Automatic](#automatic-installation) |Run a single command to install a full system | |
|
||||
| [Using snap](#snap-installation) |Easy to install and automatic updates |any distro that supports snapd |
|
||||
| [Using official distro packages](#official-packages) |Kata packages provided by Linux distributions official repositories |[see table](#supported-distributions) |
|
||||
| [Scripted](#scripted-installation) |Generates an installation script which will result in a working system when executed |[see table](#supported-distributions) |
|
||||
| [Manual](#manual-installation) |Allows the user to read a brief document and execute the specified commands step-by-step |[see table](#supported-distributions) |
|
||||
| [Using official distro packages](#official-packages) |Kata packages provided by Linux distributions official repositories | |
|
||||
| [Scripted](#scripted-installation) |Generates an installation script which will result in a working system when executed | |
|
||||
| [Manual](#manual-installation) |Allows the user to read a brief document and execute the specified commands step-by-step | |
|
||||
|
||||
### Supported Distributions
|
||||
|
||||
Kata is packaged by the Kata community for:
|
||||
|
||||
|Distribution (link to installation guide) | Versions |
|
||||
|-----------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------|
|
||||
|[CentOS](centos-installation-guide.md) | 7 |
|
||||
|[Debian](debian-installation-guide.md) | 9, 10 |
|
||||
|[Fedora](fedora-installation-guide.md) | 28, 29, 30 |
|
||||
|[openSUSE](opensuse-installation-guide.md) | [Leap](opensuse-leap-installation-guide.md) (15, 15.1)<br>[Tumbleweed](opensuse-tumbleweed-installation-guide.md) |
|
||||
|[Red Hat Enterprise Linux (RHEL)](rhel-installation-guide.md) | 7 |
|
||||
|[SUSE Linux Enterprise Server (SLES)](sles-installation-guide.md)| SLES 12 SP3 |
|
||||
|[Ubuntu](ubuntu-installation-guide.md) | 16.04, 18.04 |
|
||||
|
||||
#### Official packages
|
||||
### Official packages
|
||||
|
||||
Kata packages are provided by official distribution repositories for:
|
||||
|
||||
|Distribution (link to packages) | Versions |
|
||||
|-----------------------------------------------------------------|------------|
|
||||
|[openSUSE](https://software.opensuse.org/package/katacontainers) | Tumbleweed |
|
||||
| Distribution (link to packages) | Versions | Contacts |
|
||||
| -------------------------------------------------------- | ------------------------------------------------------------------------------ | -------- |
|
||||
| [CentOS](centos-installation-guide.md) | 8 | |
|
||||
| [Fedora](fedora-installation-guide.md) | 32, Rawhide | |
|
||||
| [SUSE Linux Enterprise (SLE)](sle-installation-guide.md) | SLE 15 SP1, 15 SP2 | |
|
||||
| [openSUSE](opensuse-installation-guide.md) | [Leap 15.1](opensuse-leap-15.1-installation-guide.md)<br>Leap 15.2, Tumbleweed | |
|
||||
|
||||
|
||||
### Automatic Installation
|
||||
@@ -72,11 +61,11 @@ Kata packages are provided by official distribution repositories for:
|
||||
[Use `kata-doc-to-script`](installing-with-kata-doc-to-script.md) to generate installation scripts that can be reviewed before they are executed.
|
||||
|
||||
### Manual Installation
|
||||
Manual installation instructions are available for [these distributions](#supported-distributions) and document how to:
|
||||
Manual installation instructions are available for [these distributions](#packaged-installation-methods) and document how to:
|
||||
1. Add the Kata Containers repository to your distro package manager, and import the packages signing key.
|
||||
2. Install the Kata Containers packages.
|
||||
3. Install a supported container manager.
|
||||
4. Configure the container manager to use `kata-runtime` as the default OCI runtime. Or, for Kata Containers 1.5.0 or above, configure the
|
||||
4. Configure the container manager to use Kata Containers as the default OCI runtime. Or, for Kata Containers 1.5.0 or above, configure the
|
||||
`io.containerd.kata.v2` to be the runtime shim (see [containerd runtime v2 (shim API)](https://github.com/containerd/containerd/tree/master/runtime/v2)
|
||||
and [How to use Kata Containers and CRI (containerd plugin) with Kubernetes](../how-to/how-to-use-k8s-with-cri-containerd-and-kata.md)).
|
||||
|
||||
|
||||
@@ -15,4 +15,4 @@ Create a new virtual machine with:
|
||||
|
||||
## Set up with distribution specific quick start
|
||||
|
||||
Follow distribution specific [install guides](../install/README.md#supported-distributions).
|
||||
Follow distribution specific [install guides](../install/README.md#packaged-installation-methods).
|
||||
|
||||
@@ -4,14 +4,25 @@
|
||||
|
||||
```bash
|
||||
$ source /etc/os-release
|
||||
$ sudo yum -y install yum-utils
|
||||
$ ARCH=$(arch)
|
||||
$ BRANCH="${BRANCH:-master}"
|
||||
$ sudo -E yum-config-manager --add-repo "http://download.opensuse.org/repositories/home:/katacontainers:/releases:/${ARCH}:/${BRANCH}/CentOS_${VERSION_ID}/home:katacontainers:releases:${ARCH}:${BRANCH}.repo"
|
||||
$ sudo -E yum -y install kata-runtime kata-proxy kata-shim
|
||||
$ cat <<EOF | sudo -E tee /etc/yum.repos.d/advanced-virt.repo
|
||||
[advanced-virt]
|
||||
name=Advanced Virtualization
|
||||
baseurl=http://mirror.centos.org/\$contentdir/\$releasever/virt/\$basearch/advanced-virtualization
|
||||
enabled=1
|
||||
gpgcheck=1
|
||||
skip_if_unavailable=1
|
||||
EOF
|
||||
$ cat <<EOF | sudo -E tee /etc/yum.repos.d/kata-containers.repo
|
||||
[kata-containers]
|
||||
name=Kata Containers
|
||||
baseurl=http://mirror.centos.org/\$contentdir/\$releasever/virt/\$basearch/kata-containers
|
||||
enabled=1
|
||||
gpgcheck=1
|
||||
skip_if_unavailable=1
|
||||
EOF
|
||||
$ sudo -E dnf module disable -y virt:rhel
|
||||
$ sudo -E dnf install -y kata-runtime
|
||||
```
|
||||
|
||||
2. Decide which container manager to use and select the corresponding link that follows:
|
||||
|
||||
- [Docker](docker/centos-docker-install.md)
|
||||
- [Kubernetes](../Developer-Guide.md#run-kata-containers-with-kubernetes)
|
||||
|
||||
@@ -1,22 +0,0 @@
|
||||
# Install Kata Containers on Debian
|
||||
|
||||
1. Install the Kata Containers components with the following commands:
|
||||
|
||||
```bash
|
||||
$ export DEBIAN_FRONTEND=noninteractive
|
||||
$ ARCH=$(arch)
|
||||
$ BRANCH="${BRANCH:-master}"
|
||||
$ source /etc/os-release
|
||||
$ [ "$ID" = debian ] && [ -z "$VERSION_ID" ] && echo >&2 "ERROR: Debian unstable not supported.
|
||||
You can try stable packages here:
|
||||
http://download.opensuse.org/repositories/home:/katacontainers:/releases:/${ARCH}:/${BRANCH}" && exit 1
|
||||
$ sudo sh -c "echo 'deb http://download.opensuse.org/repositories/home:/katacontainers:/releases:/${ARCH}:/${BRANCH}/Debian_${VERSION_ID}/ /' > /etc/apt/sources.list.d/kata-containers.list"
|
||||
$ curl -sL http://download.opensuse.org/repositories/home:/katacontainers:/releases:/${ARCH}:/${BRANCH}/Debian_${VERSION_ID}/Release.key | sudo apt-key add -
|
||||
$ sudo -E apt-get update
|
||||
$ sudo -E apt-get -y install kata-runtime kata-proxy kata-shim
|
||||
```
|
||||
|
||||
2. Decide which container manager to use and select the corresponding link that follows:
|
||||
|
||||
- [Docker](docker/debian-docker-install.md)
|
||||
- [Kubernetes](../Developer-Guide.md#run-kata-containers-with-kubernetes)
|
||||
@@ -1,75 +0,0 @@
|
||||
# Install Docker for Kata Containers on CentOS
|
||||
|
||||
> **Note:**
|
||||
>
|
||||
> - This guide assumes you have
|
||||
> [already installed the Kata Containers packages](../centos-installation-guide.md).
|
||||
|
||||
1. Install the latest version of Docker with the following commands:
|
||||
|
||||
> **Notes:**
|
||||
>
|
||||
> - This step is only required if Docker is not installed on the system.
|
||||
> - Docker version 18.09 [removed devicemapper support](https://github.com/kata-containers/documentation/issues/373).
|
||||
> If you wish to use a block based backend, see the options listed on https://github.com/kata-containers/documentation/issues/407.
|
||||
|
||||
```bash
|
||||
$ sudo yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
|
||||
$ sudo yum -y install docker-ce
|
||||
```
|
||||
|
||||
For more information on installing Docker please refer to the
|
||||
[Docker Guide](https://docs.docker.com/engine/installation/linux/centos).
|
||||
|
||||
2. Configure Docker to use Kata Containers by default with **ONE** of the following methods:
|
||||
|
||||
1. systemd (this is the default and is applied automatically if you select the
|
||||
[automatic installation](../../install/README.md#automatic-installation) option)
|
||||
|
||||
```bash
|
||||
$ sudo mkdir -p /etc/systemd/system/docker.service.d/
|
||||
$ cat <<EOF | sudo tee /etc/systemd/system/docker.service.d/kata-containers.conf
|
||||
[Service]
|
||||
ExecStart=
|
||||
ExecStart=/usr/bin/dockerd -D --add-runtime kata-runtime=/usr/bin/kata-runtime --default-runtime=kata-runtime
|
||||
EOF
|
||||
```
|
||||
|
||||
2. Docker `daemon.json`
|
||||
|
||||
Create docker configuration folder.
|
||||
|
||||
```
|
||||
$ sudo mkdir -p /etc/docker
|
||||
```
|
||||
|
||||
Add the following definitions to `/etc/docker/daemon.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"default-runtime": "kata-runtime",
|
||||
"runtimes": {
|
||||
"kata-runtime": {
|
||||
"path": "/usr/bin/kata-runtime"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
3. Restart the Docker systemd service with the following commands:
|
||||
|
||||
```bash
|
||||
$ sudo systemctl daemon-reload
|
||||
$ sudo systemctl restart docker
|
||||
```
|
||||
|
||||
4. Run Kata Containers
|
||||
|
||||
You are now ready to run Kata Containers:
|
||||
|
||||
```bash
|
||||
$ sudo docker run busybox uname -a
|
||||
```
|
||||
|
||||
The previous command shows details of the kernel version running inside the
|
||||
container, which is different to the host kernel version.
|
||||
@@ -1,103 +0,0 @@
|
||||
# Install Docker for Kata Containers on Debian
|
||||
|
||||
> **Note:**
|
||||
>
|
||||
> - This guide assumes you have
|
||||
> [already installed the Kata Containers packages](../debian-installation-guide.md).
|
||||
> - This guide allows for installation with `systemd` or `sysVinit` init systems.
|
||||
|
||||
1. Install the latest version of Docker with the following commands:
|
||||
|
||||
> **Notes:**
|
||||
>
|
||||
> - This step is only required if Docker is not installed on the system.
|
||||
> - Docker version 18.09 [removed devicemapper support](https://github.com/kata-containers/documentation/issues/373).
|
||||
> If you wish to use a block based backend, see the options listed on https://github.com/kata-containers/documentation/issues/407.
|
||||
|
||||
```bash
|
||||
$ sudo apt-get -y install apt-transport-https ca-certificates curl gnupg2 software-properties-common
|
||||
$ curl -fsSL https://download.docker.com/linux/$(. /etc/os-release; echo "$ID")/gpg | sudo apt-key add -
|
||||
$ sudo add-apt-repository "deb https://download.docker.com/linux/$(. /etc/os-release; echo "$ID") $(lsb_release -cs) stable"
|
||||
$ sudo apt-get update
|
||||
$ sudo -E apt-get -y install docker-ce
|
||||
```
|
||||
|
||||
For more information on installing Docker please refer to the
|
||||
[Docker Guide](https://docs.docker.com/engine/installation/linux/debian).
|
||||
|
||||
2. Configure Docker to use Kata Containers by default with **ONE** of the following methods:
|
||||
|
||||
a. `sysVinit`
|
||||
|
||||
- with `sysVinit`, docker config is stored in `/etc/default/docker`, edit the options similar to the following:
|
||||
|
||||
```sh
|
||||
$ sudo sh -c "echo '# specify docker runtime for kata-containers
|
||||
DOCKER_OPTS=\"-D --add-runtime kata-runtime=/usr/bin/kata-runtime --default-runtime=kata-runtime\"' >> /etc/default/docker"
|
||||
```
|
||||
|
||||
b. systemd (this is the default and is applied automatically if you select the
|
||||
[automatic installation](../../install/README.md#automatic-installation) option)
|
||||
|
||||
```bash
|
||||
$ sudo mkdir -p /etc/systemd/system/docker.service.d/
|
||||
$ cat <<EOF | sudo tee /etc/systemd/system/docker.service.d/kata-containers.conf
|
||||
[Service]
|
||||
ExecStart=
|
||||
ExecStart=/usr/bin/dockerd -D --add-runtime kata-runtime=/usr/bin/kata-runtime --default-runtime=kata-runtime
|
||||
EOF
|
||||
```
|
||||
|
||||
c. Docker `daemon.json`
|
||||
|
||||
Create docker configuration folder.
|
||||
|
||||
```
|
||||
$ sudo mkdir -p /etc/docker
|
||||
```
|
||||
|
||||
Add the following definitions to `/etc/docker/daemon.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"default-runtime": "kata-runtime",
|
||||
"runtimes": {
|
||||
"kata-runtime": {
|
||||
"path": "/usr/bin/kata-runtime"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
3. Restart the Docker systemd service with one of the following (depending on init choice):
|
||||
|
||||
a. `sysVinit`
|
||||
|
||||
```sh
|
||||
$ sudo /etc/init.d/docker stop
|
||||
$ sudo /etc/init.d/docker start
|
||||
```
|
||||
|
||||
To watch for errors:
|
||||
|
||||
```sh
|
||||
$ tail -f /var/log/docker.log
|
||||
```
|
||||
|
||||
b. systemd
|
||||
|
||||
```bash
|
||||
$ sudo systemctl daemon-reload
|
||||
$ sudo systemctl restart docker
|
||||
```
|
||||
|
||||
4. Run Kata Containers
|
||||
|
||||
You are now ready to run Kata Containers:
|
||||
|
||||
```bash
|
||||
$ sudo docker run busybox uname -a
|
||||
```
|
||||
|
||||
The previous command shows details of the kernel version running inside the
|
||||
container, which is different to the host kernel version.
|
||||
@@ -1,77 +0,0 @@
|
||||
# Install Docker for Kata Containers on Fedora
|
||||
|
||||
> **Note:**
|
||||
>
|
||||
> - This guide assumes you have
|
||||
> [already installed the Kata Containers packages](../fedora-installation-guide.md).
|
||||
|
||||
1. Install the latest version of Docker with the following commands:
|
||||
|
||||
> **Notes:**
|
||||
>
|
||||
> - This step is only required if Docker is not installed on the system.
|
||||
> - Docker version 18.09 [removed devicemapper support](https://github.com/kata-containers/documentation/issues/373).
|
||||
> If you wish to use a block based backend, see the options listed on https://github.com/kata-containers/documentation/issues/407.
|
||||
|
||||
```bash
|
||||
$ source /etc/os-release
|
||||
$ sudo dnf config-manager --add-repo https://download.docker.com/linux/fedora/docker-ce.repo
|
||||
$ sudo dnf makecache
|
||||
$ sudo dnf -y install docker-ce
|
||||
```
|
||||
|
||||
For more information on installing Docker please refer to the
|
||||
[Docker Guide](https://docs.docker.com/engine/installation/linux/fedora).
|
||||
|
||||
2. Configure Docker to use Kata Containers by default with **ONE** of the following methods:
|
||||
|
||||
1. systemd (this is the default and is applied automatically if you select the
|
||||
[automatic installation](../../install/README.md#automatic-installation) option)
|
||||
|
||||
```bash
|
||||
$ sudo mkdir -p /etc/systemd/system/docker.service.d/
|
||||
$ cat <<EOF | sudo tee /etc/systemd/system/docker.service.d/kata-containers.conf
|
||||
[Service]
|
||||
ExecStart=
|
||||
ExecStart=/usr/bin/dockerd -D --add-runtime kata-runtime=/usr/bin/kata-runtime --default-runtime=kata-runtime
|
||||
EOF
|
||||
```
|
||||
|
||||
2. Docker `daemon.json`
|
||||
|
||||
Create docker configuration folder.
|
||||
|
||||
```
|
||||
$ sudo mkdir -p /etc/docker
|
||||
```
|
||||
|
||||
Add the following definitions to `/etc/docker/daemon.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"default-runtime": "kata-runtime",
|
||||
"runtimes": {
|
||||
"kata-runtime": {
|
||||
"path": "/usr/bin/kata-runtime"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
3. Restart the Docker systemd service with the following commands:
|
||||
|
||||
```bash
|
||||
$ sudo systemctl daemon-reload
|
||||
$ sudo systemctl restart docker
|
||||
```
|
||||
|
||||
4. Run Kata Containers
|
||||
|
||||
You are now ready to run Kata Containers:
|
||||
|
||||
```bash
|
||||
$ sudo docker run busybox uname -a
|
||||
```
|
||||
|
||||
The previous command shows details of the kernel version running inside the
|
||||
container, which is different to the host kernel version.
|
||||
@@ -1,75 +0,0 @@
|
||||
# Install Docker for Kata Containers on openSUSE
|
||||
|
||||
> **Note:**
|
||||
>
|
||||
> - This guide assumes you have
|
||||
> [already installed the Kata Containers packages](../opensuse-installation-guide.md).
|
||||
|
||||
1. Install the latest version of Docker with the following commands:
|
||||
|
||||
> **Notes:**
|
||||
>
|
||||
> - This step is only required if Docker is not installed on the system.
|
||||
> - Docker version 18.09 [removed devicemapper support](https://github.com/kata-containers/documentation/issues/373).
|
||||
> If you wish to use a block based backend, see the options listed on https://github.com/kata-containers/documentation/issues/407.
|
||||
|
||||
```bash
|
||||
$ sudo zypper -n install docker
|
||||
```
|
||||
|
||||
For more information on installing Docker please refer to the
|
||||
[Docker Guide](https://software.opensuse.org/package/docker).
|
||||
|
||||
2. Configure Docker to use Kata Containers by default with **ONE** of the following methods:
|
||||
|
||||
1. Specify the runtime options in `/etc/sysconfig/docker` (this is the default and is applied automatically if you select the
|
||||
[automatic installation](../../install/README.md#automatic-installation) option)
|
||||
|
||||
```bash
|
||||
$ DOCKER_SYSCONFIG=/etc/sysconfig/docker
|
||||
# Add kata-runtime to the list of available runtimes, if not already listed
|
||||
$ grep -qE "^ *DOCKER_OPTS=.+--add-runtime[= ] *kata-runtime" $DOCKER_SYSCONFIG || sudo -E sed -i -E "s|^( *DOCKER_OPTS=.+)\" *$|\1 --add-runtime kata-runtime=/usr/bin/kata-runtime\"|g" $DOCKER_SYSCONFIG
|
||||
# If a current default runtime is specified, overwrite it with kata-runtime
|
||||
$ sudo -E sed -i -E "s|^( *DOCKER_OPTS=.+--default-runtime[= ] *)[^ \"]+(.*\"$)|\1kata-runtime\2|g" $DOCKER_SYSCONFIG
|
||||
# Add kata-runtime as default runtime, if no default runtime is specified
|
||||
$ grep -qE "^ *DOCKER_OPTS=.+--default-runtime" $DOCKER_SYSCONFIG || sudo -E sed -i -E "s|^( *DOCKER_OPTS=.+)(\"$)|\1 --default-runtime=kata-runtime\2|g" $DOCKER_SYSCONFIG
|
||||
```
|
||||
|
||||
2. Docker `daemon.json`
|
||||
|
||||
Create docker configuration folder.
|
||||
|
||||
```
|
||||
$ sudo mkdir -p /etc/docker
|
||||
```
|
||||
|
||||
Add the following definitions to `/etc/docker/daemon.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"default-runtime": "kata-runtime",
|
||||
"runtimes": {
|
||||
"kata-runtime": {
|
||||
"path": "/usr/bin/kata-runtime"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
3. Restart the Docker systemd service with the following commands:
|
||||
|
||||
```bash
|
||||
$ sudo systemctl daemon-reload
|
||||
$ sudo systemctl restart docker
|
||||
```
|
||||
|
||||
4. Run Kata Containers
|
||||
|
||||
You are now ready to run Kata Containers:
|
||||
|
||||
```bash
|
||||
$ sudo docker run busybox uname -a
|
||||
```
|
||||
|
||||
The previous command shows details of the kernel version running inside the
|
||||
container, which is different to the host kernel version.
|
||||
@@ -1,14 +0,0 @@
|
||||
# Install Docker for Kata Containers on openSUSE Leap
|
||||
|
||||
Follow the instructions in the generic [openSUSE Docker install guide](opensuse-docker-install.md).
|
||||
<!--
|
||||
You can ignore the content of this comment.
|
||||
(test code run by test-install-docs.sh to validate code blocks this document)
|
||||
|
||||
```bash
|
||||
$ echo "NOTE: this document is just a link to the generic openSUSE install guide located at:
|
||||
https://raw.githubusercontent.com/kata-containers/documentation/master/install/docker/opensuse-docker-install.md
|
||||
|
||||
Please download this file and run kata-doc-to-script.sh again."
|
||||
```
|
||||
-->
|
||||
@@ -1,14 +0,0 @@
|
||||
# Install Docker for Kata Containers on openSUSE Tumbleweed
|
||||
|
||||
Follow the instructions in the generic [openSUSE Docker install guide](opensuse-docker-install.md).
|
||||
<!--
|
||||
You can ignore the content of this comment.
|
||||
(test code run by test-install-docs.sh to validate code blocks this document)
|
||||
|
||||
```bash
|
||||
$ echo "NOTE: this document is just a link to the generic openSUSE install guide located at:
|
||||
https://raw.githubusercontent.com/kata-containers/documentation/master/install/docker/opensuse-docker-install.md
|
||||
|
||||
Please download this file and run kata-doc-to-script.sh again."
|
||||
```
|
||||
-->
|
||||
@@ -1,76 +0,0 @@
|
||||
# Install Docker for Kata Containers on RHEL
|
||||
|
||||
> **Note:**
|
||||
>
|
||||
> - This guide assumes you have
|
||||
> [already installed the Kata Containers packages](../rhel-installation-guide.md).
|
||||
|
||||
1. Install the latest version of Docker with the following commands:
|
||||
|
||||
> **Notes:**
|
||||
>
|
||||
> - This step is only required if Docker is not installed on the system.
|
||||
> - Docker version 18.09 [removed devicemapper support](https://github.com/kata-containers/documentation/issues/373).
|
||||
> If you wish to use a block based backend, see the options listed on https://github.com/kata-containers/documentation/issues/407.
|
||||
|
||||
```bash
|
||||
$ export rhel_devtoolset_version="7"
|
||||
$ sudo subscription-manager repos --enable=rhel-${rhel_devtoolset_version}-server-extras-rpms
|
||||
$ sudo yum -y install docker && systemctl enable --now docker
|
||||
```
|
||||
|
||||
For more information on installing Docker please refer to the
|
||||
[Docker Guide](https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux_atomic_host/7/html-single/getting_started_with_containers/#getting_docker_in_rhel_7).
|
||||
|
||||
2. Configure Docker to use Kata Containers by default with **ONE** of the following methods:
|
||||
|
||||
1. systemd (this is the default and is applied automatically if you select the
|
||||
[automatic installation](../../install/README.md#automatic-installation) option)
|
||||
|
||||
```bash
|
||||
$ sudo mkdir -p /etc/systemd/system/docker.service.d/
|
||||
$ cat <<EOF | sudo tee /etc/systemd/system/docker.service.d/kata-containers.conf
|
||||
[Service]
|
||||
ExecStart=
|
||||
ExecStart=/usr/bin/dockerd -D --add-runtime kata-runtime=/usr/bin/kata-runtime --default-runtime=kata-runtime
|
||||
EOF
|
||||
```
|
||||
|
||||
2. Docker `daemon.json`
|
||||
|
||||
Create docker configuration folder.
|
||||
|
||||
```
|
||||
$ sudo mkdir -p /etc/docker
|
||||
```
|
||||
|
||||
Add the following definitions to `/etc/docker/daemon.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"default-runtime": "kata-runtime",
|
||||
"runtimes": {
|
||||
"kata-runtime": {
|
||||
"path": "/usr/bin/kata-runtime"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
3. Restart the Docker systemd service with the following commands:
|
||||
|
||||
```bash
|
||||
$ sudo systemctl daemon-reload
|
||||
$ sudo systemctl restart docker
|
||||
```
|
||||
|
||||
4. Run Kata Containers
|
||||
|
||||
You are now ready to run Kata Containers:
|
||||
|
||||
```bash
|
||||
$ sudo docker run busybox uname -a
|
||||
```
|
||||
|
||||
The previous command shows details of the kernel version running inside the
|
||||
container, which is different to the host kernel version.
|
||||
@@ -1,74 +0,0 @@
|
||||
# Install Docker for Kata Containers on SLES
|
||||
|
||||
> **Note:**
|
||||
>
|
||||
> - This guide assumes you have
|
||||
> [already installed the Kata Containers packages](../sles-installation-guide.md).
|
||||
|
||||
1. Install the latest version of Docker with the following commands:
|
||||
|
||||
> **Notes:**
|
||||
>
|
||||
> - This step is only required if Docker is not installed on the system.
|
||||
> - Docker version 18.09 [removed devicemapper support](https://github.com/kata-containers/documentation/issues/373).
|
||||
> If you wish to use a block based backend, see the options listed on https://github.com/kata-containers/documentation/issues/407.
|
||||
|
||||
```bash
|
||||
$ sudo zypper -n install docker
|
||||
```
|
||||
|
||||
For more information on installing Docker please refer to the
|
||||
[Docker Guide](https://www.suse.com/documentation/sles-12/singlehtml/book_sles_docker/book_sles_docker.html).
|
||||
|
||||
2. Configure Docker to use Kata Containers by default with **ONE** of the following methods:
|
||||
|
||||
1. systemd (this is the default and is applied automatically if you select the
|
||||
[automatic installation](../../install/README.md#automatic-installation) option)
|
||||
|
||||
```bash
|
||||
$ sudo mkdir -p /etc/systemd/system/docker.service.d/
|
||||
$ cat <<EOF | sudo tee /etc/systemd/system/docker.service.d/kata-containers.conf
|
||||
[Service]
|
||||
ExecStart=
|
||||
ExecStart=/usr/bin/dockerd -D --add-runtime kata-runtime=/usr/bin/kata-runtime --default-runtime=kata-runtime
|
||||
EOF
|
||||
```
|
||||
|
||||
2. Docker `daemon.json`
|
||||
|
||||
Create docker configuration folder.
|
||||
|
||||
```
|
||||
$ sudo mkdir -p /etc/docker
|
||||
```
|
||||
|
||||
Add the following definitions to `/etc/docker/daemon.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"default-runtime": "kata-runtime",
|
||||
"runtimes": {
|
||||
"kata-runtime": {
|
||||
"path": "/usr/bin/kata-runtime"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
3. Restart the Docker systemd service with the following commands:
|
||||
|
||||
```bash
|
||||
$ sudo systemctl daemon-reload
|
||||
$ sudo systemctl restart docker
|
||||
```
|
||||
|
||||
4. Run Kata Containers
|
||||
|
||||
You are now ready to run Kata Containers:
|
||||
|
||||
```bash
|
||||
$ sudo docker run busybox uname -a
|
||||
```
|
||||
|
||||
The previous command shows details of the kernel version running inside the
|
||||
container, which is different to the host kernel version.
|
||||
@@ -1,79 +0,0 @@
|
||||
# Install Docker for Kata Containers on Ubuntu
|
||||
|
||||
> **Note:**
|
||||
>
|
||||
> - This guide assumes you have
|
||||
> [already installed the Kata Containers packages](../ubuntu-installation-guide.md).
|
||||
|
||||
1. Install the latest version of Docker with the following commands:
|
||||
|
||||
> **Notes:**
|
||||
>
|
||||
> - This step is only required if Docker is not installed on the system.
|
||||
> - Docker version 18.09 [removed devicemapper support](https://github.com/kata-containers/documentation/issues/373).
|
||||
> If you wish to use a block based backend, see the options listed on https://github.com/kata-containers/documentation/issues/407.
|
||||
|
||||
```bash
|
||||
$ sudo -E apt-get -y install apt-transport-https ca-certificates software-properties-common
|
||||
$ curl -sL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
|
||||
$ arch=$(dpkg --print-architecture)
|
||||
$ sudo -E add-apt-repository "deb [arch=${arch}] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
|
||||
$ sudo -E apt-get update
|
||||
$ sudo -E apt-get -y install docker-ce
|
||||
```
|
||||
|
||||
For more information on installing Docker please refer to the
|
||||
[Docker Guide](https://docs.docker.com/engine/installation/linux/ubuntu).
|
||||
|
||||
2. Configure Docker to use Kata Containers by default with **ONE** of the following methods:
|
||||
|
||||
1. systemd (this is the default and is applied automatically if you select the
|
||||
[automatic installation](../../install/README.md#automatic-installation) option)
|
||||
|
||||
```bash
|
||||
$ sudo mkdir -p /etc/systemd/system/docker.service.d/
|
||||
$ cat <<EOF | sudo tee /etc/systemd/system/docker.service.d/kata-containers.conf
|
||||
[Service]
|
||||
ExecStart=
|
||||
ExecStart=/usr/bin/dockerd -D --add-runtime kata-runtime=/usr/bin/kata-runtime --default-runtime=kata-runtime
|
||||
EOF
|
||||
```
|
||||
|
||||
2. Docker `daemon.json`
|
||||
|
||||
Create docker configuration folder.
|
||||
|
||||
```
|
||||
$ sudo mkdir -p /etc/docker
|
||||
```
|
||||
|
||||
Add the following definitions to `/etc/docker/daemon.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"default-runtime": "kata-runtime",
|
||||
"runtimes": {
|
||||
"kata-runtime": {
|
||||
"path": "/usr/bin/kata-runtime"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
3. Restart the Docker systemd service with the following commands:
|
||||
|
||||
```bash
|
||||
$ sudo systemctl daemon-reload
|
||||
$ sudo systemctl restart docker
|
||||
```
|
||||
|
||||
4. Run Kata Containers
|
||||
|
||||
You are now ready to run Kata Containers:
|
||||
|
||||
```bash
|
||||
$ sudo docker run busybox uname -a
|
||||
```
|
||||
|
||||
The previous command shows details of the kernel version running inside the
|
||||
container, which is different to the host kernel version.
|
||||
@@ -3,15 +3,8 @@
|
||||
1. Install the Kata Containers components with the following commands:
|
||||
|
||||
```bash
|
||||
$ source /etc/os-release
|
||||
$ ARCH=$(arch)
|
||||
$ BRANCH="${BRANCH:-master}"
|
||||
$ sudo dnf -y install dnf-plugins-core
|
||||
$ sudo -E dnf config-manager --add-repo "http://download.opensuse.org/repositories/home:/katacontainers:/releases:/${ARCH}:/${BRANCH}/Fedora_${VERSION_ID}/home:katacontainers:releases:${ARCH}:${BRANCH}.repo"
|
||||
$ sudo -E dnf -y install kata-runtime kata-proxy kata-shim
|
||||
$ sudo -E dnf -y install kata-runtime
|
||||
```
|
||||
|
||||
2. Decide which container manager to use and select the corresponding link that follows:
|
||||
|
||||
- [Docker](docker/fedora-docker-install.md)
|
||||
- [Kubernetes](../Developer-Guide.md#run-kata-containers-with-kubernetes)
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
* [Docker Installation and Setup](#docker-installation-and-setup)
|
||||
|
||||
## Introduction
|
||||
Use [these installation instructions](README.md#supported-distributions) together with
|
||||
Use [these installation instructions](README.md#packaged-installation-methods) together with
|
||||
[`kata-doc-to-script`](https://github.com/kata-containers/tests/blob/master/.ci/kata-doc-to-script.sh)
|
||||
to generate installation bash scripts.
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
* [Further Information](#further-information)
|
||||
|
||||
## Introduction
|
||||
`kata-manager` automates the Kata Containers installation procedure documented for [these Linux distributions](README.md#supported-distributions).
|
||||
`kata-manager` automates the Kata Containers installation procedure documented for [these Linux distributions](README.md#packaged-installation-methods).
|
||||
|
||||
> **Note**:
|
||||
> - `kata-manager` requires `curl` and `sudo` installed on your system.
|
||||
|
||||
@@ -126,10 +126,10 @@ Containers components to help with this, and then use `kubectl` on the host (tha
|
||||
configured for you) to deploy them:
|
||||
|
||||
```sh
|
||||
$ git clone https://github.com/kata-containers/packaging.git
|
||||
$ cd packaging/kata-deploy
|
||||
$ kubectl apply -f kata-rbac.yaml
|
||||
$ kubectl apply -f kata-deploy.yaml
|
||||
$ git clone https://github.com/kata-containers/kata-containers.git
|
||||
$ cd kata-containers/tools/packaging/kata-deploy
|
||||
$ kubectl apply -f kata-rbac/base/kata-rbac.yaml
|
||||
$ kubectl apply -f kata-deploy/base/kata-deploy.yaml
|
||||
```
|
||||
|
||||
This installs the Kata Containers components into `/opt/kata` inside the Minikube node. It can take
|
||||
@@ -166,7 +166,7 @@ $ kubectl apply -f https://raw.githubusercontent.com/kubernetes/node-api/master/
|
||||
Now register the `kata qemu` runtime with that class. This should result in no errors:
|
||||
|
||||
```sh
|
||||
$ cd packaging/kata-deploy/k8s-1.14
|
||||
$ cd kata-containers/tools/packaging/kata-deploy/k8s-1.14
|
||||
$ kubectl apply -f kata-qemu-runtimeClass.yaml
|
||||
```
|
||||
|
||||
@@ -187,7 +187,7 @@ for more details.
|
||||
Perform the following action to launch a Kata Containers based Apache PHP pod:
|
||||
|
||||
```sh
|
||||
$ cd packaging/kata-deploy/examples
|
||||
$ cd kata-containers/tools/packaging/kata-deploy/examples
|
||||
$ kubectl apply -f test-deploy-kata-qemu.yaml
|
||||
```
|
||||
|
||||
|
||||
@@ -3,21 +3,8 @@
|
||||
1. Install the Kata Containers components with the following commands:
|
||||
|
||||
```bash
|
||||
$ source /etc/os-release
|
||||
$ DISTRO_REPO=$(sed "s/ /_/g" <<< "$NAME")
|
||||
$ [ -n "$VERSION" ] && DISTRO_REPO+="_${VERSION}"
|
||||
$ DISTRO_REPO=$(echo $DISTRO_REPO | tr -d ' ')
|
||||
$ ARCH=$(arch)
|
||||
$ BRANCH="${BRANCH:-master}"
|
||||
$ REPO_ALIAS="kata-${BRANCH}"
|
||||
$ PUBKEY="/tmp/rpm-signkey.pub"
|
||||
$ curl -SsL -o "$PUBKEY" "https://raw.githubusercontent.com/kata-containers/tests/master/data/rpm-signkey.pub"
|
||||
$ sudo -E rpm --import "$PUBKEY"
|
||||
$ zypper lr "$REPO_ALIAS" && sudo -E zypper -n removerepo "$REPO_ALIAS"
|
||||
$ sudo -E zypper addrepo --refresh "http://download.opensuse.org/repositories/home:/katacontainers:/releases:/${ARCH}:/${BRANCH}/${DISTRO_REPO}/" "$REPO_ALIAS"
|
||||
$ sudo -E zypper -n install kata-runtime
|
||||
$ sudo -E zypper -n install katacontainers
|
||||
```
|
||||
|
||||
2. Decide which container manager to use and select the corresponding link that follows:
|
||||
- [Docker](docker/opensuse-docker-install.md)
|
||||
- [Kubernetes](../Developer-Guide.md#run-kata-containers-with-kubernetes)
|
||||
|
||||
11
docs/install/opensuse-leap-15.1-installation-guide.md
Normal file
11
docs/install/opensuse-leap-15.1-installation-guide.md
Normal file
@@ -0,0 +1,11 @@
|
||||
# Install Kata Containers on openSUSE Leap 15.1
|
||||
|
||||
1. Install the Kata Containers components with the following commands:
|
||||
|
||||
```bash
|
||||
$ sudo -E zypper addrepo --refresh "https://download.opensuse.org/repositories/devel:/kubic/openSUSE_Leap_15.1/devel:kubic.repo"
|
||||
$ sudo -E zypper -n --gpg-auto-import-keys install katacontainers
|
||||
```
|
||||
|
||||
2. Decide which container manager to use and select the corresponding link that follows:
|
||||
- [Kubernetes](../Developer-Guide.md#run-kata-containers-with-kubernetes)
|
||||
@@ -1,19 +0,0 @@
|
||||
# Install Kata Containers on openSUSE Leap
|
||||
|
||||
1. Install Kata Containers on openSUSE by following the instructions in the
|
||||
[openSUSE install guide](opensuse-installation-guide.md).
|
||||
<!--
|
||||
You can ignore the content of this comment.
|
||||
(test code run by test-install-docs.sh to validate code blocks this document)
|
||||
|
||||
```bash
|
||||
$ echo "NOTE: this document is just a link to the generic openSUSE install guide located at:
|
||||
https://raw.githubusercontent.com/kata-containers/documentation/master/install/opensuse-installation-guide.md
|
||||
|
||||
Please download this file and run kata-doc-to-script.sh again."
|
||||
```
|
||||
-->
|
||||
|
||||
2. Decide which container manager to use and select the corresponding link that follows:
|
||||
- [Docker](docker/opensuse-leap-docker-install.md)
|
||||
- [Kubernetes](../Developer-Guide.md#run-kata-containers-with-kubernetes)
|
||||
@@ -1,19 +0,0 @@
|
||||
# Install Kata Containers on openSUSE Tumbleweed
|
||||
|
||||
1. Install Kata Containers on openSUSE by following the instructions in the
|
||||
[openSUSE install guide](opensuse-installation-guide.md).
|
||||
<!--
|
||||
You can ignore the content of this comment.
|
||||
(test code run by test-install-docs.sh to validate code blocks this document)
|
||||
|
||||
```bash
|
||||
$ echo "NOTE: this document is just a link to the generic openSUSE install guide located at:
|
||||
https://raw.githubusercontent.com/kata-containers/documentation/master/install/opensuse-installation-guide.md
|
||||
|
||||
Please download this file and run kata-doc-to-script.sh again."
|
||||
```
|
||||
-->
|
||||
|
||||
2. Decide which container manager to use and select the corresponding link that follows:
|
||||
- [Docker](docker/opensuse-tumbleweed-docker-install.md)
|
||||
- [Kubernetes](../Developer-Guide.md#run-kata-containers-with-kubernetes)
|
||||
@@ -1,16 +0,0 @@
|
||||
# Install Kata Containers on RHEL
|
||||
|
||||
1. Install the Kata Containers components with the following commands:
|
||||
|
||||
```bash
|
||||
$ source /etc/os-release
|
||||
$ ARCH=$(arch)
|
||||
$ BRANCH="${BRANCH:-master}"
|
||||
$ sudo -E yum-config-manager --add-repo "http://download.opensuse.org/repositories/home:/katacontainers:/releases:/${ARCH}:/${BRANCH}/RHEL_${VERSION_ID}/home:katacontainers:releases:${ARCH}:${BRANCH}.repo"
|
||||
$ sudo -E yum -y install kata-runtime kata-proxy kata-shim
|
||||
```
|
||||
|
||||
2. Decide which container manager to use and select the corresponding link that follows:
|
||||
|
||||
- [Docker](docker/rhel-docker-install.md)
|
||||
- [Kubernetes](../Developer-Guide.md#run-kata-containers-with-kubernetes)
|
||||
13
docs/install/sle-installation-guide.md
Normal file
13
docs/install/sle-installation-guide.md
Normal file
@@ -0,0 +1,13 @@
|
||||
# Install Kata Containers on SLE
|
||||
|
||||
1. Install the Kata Containers components with the following commands:
|
||||
|
||||
```bash
|
||||
$ source /etc/os-release
|
||||
$ DISTRO_VERSION=$(sed "s/-/_/g" <<< "$VERSION")
|
||||
$ sudo -E zypper addrepo --refresh "https://download.opensuse.org/repositories/devel:/kubic/SLE_${DISTRO_VERSION}_Backports/devel:kubic.repo"
|
||||
$ sudo -E zypper -n --gpg-auto-import-keys install katacontainers
|
||||
```
|
||||
|
||||
2. Decide which container manager to use and select the corresponding link that follows:
|
||||
- [Kubernetes](../Developer-Guide.md#run-kata-containers-with-kubernetes)
|
||||
@@ -1,15 +0,0 @@
|
||||
# Install Kata Containers on SLES
|
||||
|
||||
1. Install the Kata Containers components with the following commands:
|
||||
|
||||
```bash
|
||||
$ ARCH=$(arch)
|
||||
$ BRANCH="${BRANCH:-master}"
|
||||
$ sudo -E zypper addrepo "http://download.opensuse.org/repositories/home:/katacontainers:/releases:/${ARCH}:/${BRANCH}/SLE_15_SP1/home:katacontainers:releases:${ARCH}:${BRANCH}.repo"
|
||||
$ sudo -E zypper -n --no-gpg-checks install kata-runtime kata-proxy kata-shim
|
||||
```
|
||||
|
||||
2. Decide which container manager to use and select the corresponding link that follows:
|
||||
|
||||
- [Docker](docker/sles-docker-install.md)
|
||||
- [Kubernetes](../Developer-Guide.md#run-kata-containers-with-kubernetes)
|
||||
@@ -12,6 +12,4 @@
|
||||
```
|
||||
|
||||
2. Decide which container manager to use and select the corresponding link that follows:
|
||||
|
||||
- [Docker](docker/ubuntu-docker-install.md)
|
||||
- [Kubernetes](../Developer-Guide.md#run-kata-containers-with-kubernetes)
|
||||
|
||||
@@ -13,4 +13,4 @@ with v2). The recommended machine type for container workloads is `v2-highcpu`
|
||||
|
||||
## Set up with distribution specific quick start
|
||||
|
||||
Follow distribution specific [install guides](../install/README.md#supported-distributions).
|
||||
Follow distribution specific [install guides](../install/README.md#packaged-installation-methods).
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "logging"
|
||||
version = "0.1.0"
|
||||
authors = ["Tim Zhang <tim@hyper.sh>"]
|
||||
authors = ["The Kata Containers community <kata-dev@lists.katacontainers.io>"]
|
||||
edition = "2018"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
@@ -84,12 +84,12 @@ then a new configuration file can be [created](#configure-kata-containers)
|
||||
and [configured][7].
|
||||
|
||||
[1]: https://docs.snapcraft.io/snaps/intro
|
||||
[2]: ../../../docs/design/architecture.md#root-filesystem-image
|
||||
[2]: ../docs/design/architecture.md#root-filesystem-image
|
||||
[3]: https://docs.snapcraft.io/reference/confinement#classic
|
||||
[4]: https://github.com/kata-containers/runtime#configuration
|
||||
[5]: https://docs.docker.com/engine/reference/commandline/dockerd
|
||||
[6]: ../../../docs/install/docker/ubuntu-docker-install.md
|
||||
[7]: ../../../docs/Developer-Guide.md#configure-to-use-initrd-or-rootfs-image
|
||||
[6]: ../docs/install/docker/ubuntu-docker-install.md
|
||||
[7]: ../docs/Developer-Guide.md#configure-to-use-initrd-or-rootfs-image
|
||||
[8]: https://snapcraft.io/kata-containers
|
||||
[9]: ../../../docs/Developer-Guide.md#run-kata-containers-with-docker
|
||||
[10]: ../../../docs/Developer-Guide.md#run-kata-containers-with-kubernetes
|
||||
[9]: ../docs/Developer-Guide.md#run-kata-containers-with-docker
|
||||
[10]: ../docs/Developer-Guide.md#run-kata-containers-with-kubernetes
|
||||
323
snap/snapcraft.yaml
Normal file
323
snap/snapcraft.yaml
Normal file
@@ -0,0 +1,323 @@
|
||||
name: kata-containers
|
||||
summary: Build lightweight VMs that seamlessly plug into the containers ecosystem
|
||||
description: |
|
||||
Kata Containers is an open source project and community working to build a
|
||||
standard implementation of lightweight Virtual Machines (VMs) that feel and
|
||||
perform like containers, but provide the workload isolation and security
|
||||
advantages of VMs
|
||||
confinement: classic
|
||||
adopt-info: metadata
|
||||
base: core20
|
||||
|
||||
parts:
|
||||
metadata:
|
||||
plugin: nil
|
||||
prime:
|
||||
- -*
|
||||
build-packages:
|
||||
- git
|
||||
- git-extras
|
||||
override-pull: |
|
||||
version="9999"
|
||||
kata_url="https://github.com/kata-containers/kata-containers"
|
||||
|
||||
image_info="${SNAPCRAFT_IMAGE_INFO:-}"
|
||||
snap_env="$(echo "${image_info}" | egrep -o "build_url.*" | egrep -o "snap.*build" | cut -d/ -f2)"
|
||||
|
||||
case "${snap_env}" in
|
||||
stable)
|
||||
# Get the latest stable version
|
||||
version=$(git ls-remote --tags ${kata_url} | egrep -o "refs.*" | egrep -v "\-alpha|\-rc|{}" | egrep -o "[[:digit:]]+\.[[:digit:]]+\.[[:digit:]]+" | sort -V -r | head -1)
|
||||
git checkout ${version}
|
||||
;;
|
||||
|
||||
*-dev)
|
||||
version="${snap_env}"
|
||||
;;
|
||||
esac
|
||||
|
||||
snapcraftctl set-grade "stable"
|
||||
snapcraftctl set-version "${version}"
|
||||
|
||||
# setup GOPATH - this repo dir should be there
|
||||
export GOPATH=${SNAPCRAFT_STAGE}/gopath
|
||||
kata_dir=${GOPATH}/src/github.com/${SNAPCRAFT_PROJECT_NAME}/${SNAPCRAFT_PROJECT_NAME}
|
||||
mkdir -p $(dirname ${kata_dir})
|
||||
ln -sf $(realpath "${SNAPCRAFT_STAGE}/..") ${kata_dir}
|
||||
|
||||
godeps:
|
||||
after: [metadata]
|
||||
plugin: nil
|
||||
prime:
|
||||
- -*
|
||||
build-packages:
|
||||
- curl
|
||||
override-build: |
|
||||
# put everything in stage
|
||||
cd ${SNAPCRAFT_STAGE}
|
||||
|
||||
yq_path="./yq"
|
||||
yq_pkg="github.com/mikefarah/yq"
|
||||
goos="linux"
|
||||
case "$(uname -m)" in
|
||||
aarch64) goarch="arm64";;
|
||||
ppc64le) goarch="ppc64le";;
|
||||
x86_64) goarch="amd64";;
|
||||
s390x) goarch="s390x";;
|
||||
*) echo "unsupported architecture: $(uname -m)"; exit 1;;
|
||||
esac
|
||||
|
||||
# Workaround to get latest release from github (to not use github token).
|
||||
# Get the redirection to latest release on github.
|
||||
yq_latest_url=$(curl -Ls -o /dev/null -w %{url_effective} "https://${yq_pkg}/releases/latest")
|
||||
# The redirected url should include the latest release version
|
||||
# https://github.com/mikefarah/yq/releases/tag/<VERSION-HERE>
|
||||
yq_version=$(basename "${yq_latest_url}")
|
||||
yq_url="https://${yq_pkg}/releases/download/${yq_version}/yq_${goos}_${goarch}"
|
||||
curl -o "${yq_path}" -LSsf ${yq_url}
|
||||
chmod +x ${yq_path}
|
||||
|
||||
kata_dir=gopath/src/github.com/${SNAPCRAFT_PROJECT_NAME}/${SNAPCRAFT_PROJECT_NAME}
|
||||
version="$(${yq_path} r ${kata_dir}/versions.yaml languages.golang.meta.newest-version)"
|
||||
tarfile="go${version}.${goos}-${goarch}.tar.gz"
|
||||
curl -LO https://golang.org/dl/${tarfile}
|
||||
tar -xf ${tarfile} --strip-components=1
|
||||
|
||||
image:
|
||||
after: [godeps]
|
||||
plugin: nil
|
||||
build-packages:
|
||||
- docker.io
|
||||
- cpio
|
||||
- git
|
||||
- iptables
|
||||
- software-properties-common
|
||||
- uidmap
|
||||
- gnupg2
|
||||
override-build: |
|
||||
yq=${SNAPCRAFT_STAGE}/yq
|
||||
|
||||
# set GOPATH
|
||||
export GOPATH=${SNAPCRAFT_STAGE}/gopath
|
||||
kata_dir=${GOPATH}/src/github.com/${SNAPCRAFT_PROJECT_NAME}/${SNAPCRAFT_PROJECT_NAME}
|
||||
|
||||
export GOROOT=${SNAPCRAFT_STAGE}
|
||||
export PATH="${GOROOT}/bin:${PATH}"
|
||||
|
||||
if [ -n "$http_proxy" ]; then
|
||||
echo "Setting proxy $http_proxy"
|
||||
sudo -E systemctl set-environment http_proxy=$http_proxy || true
|
||||
sudo -E systemctl set-environment https_proxy=$https_proxy || true
|
||||
fi
|
||||
|
||||
# Copy yq binary. It's used in the container
|
||||
mkdir -p "${GOPATH}/bin/"
|
||||
cp -a "${yq}" "${GOPATH}/bin/"
|
||||
|
||||
echo "Unmasking docker service"
|
||||
sudo -E systemctl unmask docker.service || true
|
||||
sudo -E systemctl unmask docker.socket || true
|
||||
echo "Adding $USER into docker group"
|
||||
sudo -E gpasswd -a $USER docker
|
||||
echo "Starting docker"
|
||||
sudo -E systemctl start docker || true
|
||||
|
||||
cd ${kata_dir}/tools/osbuilder
|
||||
|
||||
# build image
|
||||
export AGENT_VERSION=$(cat ${kata_dir}/VERSION)
|
||||
export AGENT_INIT=yes
|
||||
export USE_DOCKER=1
|
||||
export DEBUG=1
|
||||
case "$(uname -m)" in
|
||||
aarch64|ppc64le|s390x)
|
||||
sudo -E PATH=$PATH make initrd DISTRO=alpine
|
||||
;;
|
||||
x86_64)
|
||||
# In some build systems it's impossible to build a rootfs image, try with the initrd image
|
||||
sudo -E PATH=$PATH make image DISTRO=clearlinux || sudo -E PATH=$PATH make initrd DISTRO=alpine
|
||||
;;
|
||||
*) echo "unsupported architecture: $(uname -m)"; exit 1;;
|
||||
esac
|
||||
|
||||
# Install image
|
||||
kata_image_dir=${SNAPCRAFT_PART_INSTALL}/usr/share/kata-containers
|
||||
mkdir -p ${kata_image_dir}
|
||||
cp kata-containers*.img ${kata_image_dir}
|
||||
|
||||
runtime:
|
||||
after: [godeps, image]
|
||||
plugin: nil
|
||||
build-attributes: [no-patchelf]
|
||||
override-build: |
|
||||
# set GOPATH
|
||||
export GOPATH=${SNAPCRAFT_STAGE}/gopath
|
||||
export GOROOT=${SNAPCRAFT_STAGE}
|
||||
export PATH="${GOROOT}/bin:${PATH}"
|
||||
kata_dir=${GOPATH}/src/github.com/${SNAPCRAFT_PROJECT_NAME}/${SNAPCRAFT_PROJECT_NAME}
|
||||
|
||||
cd ${kata_dir}/src/runtime
|
||||
|
||||
# setup arch
|
||||
arch=$(uname -m)
|
||||
if [ ${arch} = "ppc64le" ]; then
|
||||
arch="ppc64"
|
||||
fi
|
||||
|
||||
# build and install runtime
|
||||
make \
|
||||
PREFIX=/snap/${SNAPCRAFT_PROJECT_NAME}/current/usr \
|
||||
SKIP_GO_VERSION_CHECK=1 \
|
||||
QEMUCMD=qemu-system-$arch
|
||||
make install \
|
||||
PREFIX=/usr \
|
||||
DESTDIR=${SNAPCRAFT_PART_INSTALL} \
|
||||
SKIP_GO_VERSION_CHECK=1 \
|
||||
QEMUCMD=qemu-system-$arch
|
||||
|
||||
if [ -e ${SNAPCRAFT_PART_INSTALL}/../../image/install/usr/share/kata-containers/kata-containers.img ]; then
|
||||
# Use rootfs image by default
|
||||
sed -i -e '/^initrd =/d' ${SNAPCRAFT_PART_INSTALL}/usr/share/defaults/${SNAPCRAFT_PROJECT_NAME}/configuration.toml
|
||||
else
|
||||
# Use initrd by default
|
||||
sed -i -e '/^image =/d' ${SNAPCRAFT_PART_INSTALL}/usr/share/defaults/${SNAPCRAFT_PROJECT_NAME}/configuration.toml
|
||||
fi
|
||||
|
||||
kernel:
|
||||
after: [godeps, image]
|
||||
plugin: nil
|
||||
build-packages:
|
||||
- libelf-dev
|
||||
- curl
|
||||
- build-essential
|
||||
- bison
|
||||
- flex
|
||||
override-build: |
|
||||
export GOPATH=${SNAPCRAFT_STAGE}/gopath
|
||||
kata_dir=${GOPATH}/src/github.com/${SNAPCRAFT_PROJECT_NAME}/${SNAPCRAFT_PROJECT_NAME}
|
||||
|
||||
cd ${kata_dir}/tools/packaging/kernel
|
||||
|
||||
# Say 'no' to everithing, fix issues with incomplete .config files
|
||||
yes "n" | ./build-kernel.sh setup
|
||||
kernel_dir_prefix="kata-linux-"
|
||||
cd ${kernel_dir_prefix}*
|
||||
version=$(basename ${PWD} | sed 's|'"${kernel_dir_prefix}"'||' | cut -d- -f1)
|
||||
make -j $(($(nproc)-1)) EXTRAVERSION=".container"
|
||||
|
||||
kernel_suffix=${version}.container
|
||||
kata_kernel_dir=${SNAPCRAFT_PART_INSTALL}/usr/share/kata-containers
|
||||
mkdir -p ${kata_kernel_dir}
|
||||
|
||||
# Install bz kernel
|
||||
make install INSTALL_PATH=${kata_kernel_dir} EXTRAVERSION=".container" || true
|
||||
vmlinuz_name=vmlinuz-${kernel_suffix}
|
||||
ln -sf ${vmlinuz_name} ${kata_kernel_dir}/vmlinuz.container
|
||||
|
||||
# Install raw kernel
|
||||
vmlinux_name=vmlinux-${kernel_suffix}
|
||||
cp vmlinux ${kata_kernel_dir}/${vmlinux_name}
|
||||
ln -sf ${vmlinux_name} ${kata_kernel_dir}/vmlinux.container
|
||||
|
||||
qemu:
|
||||
plugin: make
|
||||
after: [godeps, runtime]
|
||||
build-packages:
|
||||
- gcc
|
||||
- python
|
||||
- zlib1g-dev
|
||||
- libcap-ng-dev
|
||||
- libglib2.0-dev
|
||||
- libpixman-1-dev
|
||||
- libnuma-dev
|
||||
- libltdl-dev
|
||||
- libcap-dev
|
||||
- libattr1-dev
|
||||
- libfdt-dev
|
||||
- curl
|
||||
- libcapstone-dev
|
||||
- bc
|
||||
- libblkid-dev
|
||||
- libffi-dev
|
||||
- libmount-dev
|
||||
- libselinux1-dev
|
||||
override-build: |
|
||||
yq=${SNAPCRAFT_STAGE}/yq
|
||||
export GOPATH=${SNAPCRAFT_STAGE}/gopath
|
||||
kata_dir=${GOPATH}/src/github.com/${SNAPCRAFT_PROJECT_NAME}/${SNAPCRAFT_PROJECT_NAME}
|
||||
|
||||
versions_file="${kata_dir}/versions.yaml"
|
||||
# arch-specific definition
|
||||
case "$(uname -m)" in
|
||||
"aarch64")
|
||||
branch="$(${yq} r ${versions_file} assets.hypervisor.qemu.architecture.aarch64.branch)"
|
||||
url="$(${yq} r ${versions_file} assets.hypervisor.qemu.url)"
|
||||
commit="$(${yq} r ${versions_file} assets.hypervisor.qemu.architecture.aarch64.commit)"
|
||||
patches_dir="${kata_dir}/tools/packaging/obs-packaging/qemu-aarch64/patches/"
|
||||
;;
|
||||
|
||||
*)
|
||||
branch="$(${yq} r ${versions_file} assets.hypervisor.qemu.tag)"
|
||||
url="$(${yq} r ${versions_file} assets.hypervisor.qemu.url)"
|
||||
commit=""
|
||||
patches_dir="${kata_dir}/tools/packaging/qemu/patches/$(echo ${branch} | sed -e 's/.[[:digit:]]*$//' -e 's/^v//').x"
|
||||
;;
|
||||
esac
|
||||
|
||||
# download source
|
||||
qemu_dir=${SNAPCRAFT_STAGE}/qemu
|
||||
git clone --branch ${branch} --single-branch ${url} "${qemu_dir}"
|
||||
cd ${qemu_dir}
|
||||
[ -z "${commit}" ] || git checkout ${commit}
|
||||
|
||||
[ -n "$(ls -A ui/keycodemapdb)" ] || git clone https://github.com/qemu/keycodemapdb ui/keycodemapdb/
|
||||
[ -n "$(ls -A capstone)" ] || git clone https://github.com/qemu/capstone capstone
|
||||
|
||||
# Apply patches
|
||||
for patch in ${patches_dir}/*.patch; do
|
||||
echo "Applying $(basename "$patch") ..."
|
||||
patch \
|
||||
--batch \
|
||||
--forward \
|
||||
--strip 1 \
|
||||
--input "$patch"
|
||||
done
|
||||
|
||||
# Only x86_64 supports libpmem
|
||||
[ "$(uname -m)" = "x86_64" ] && sudo apt-get --no-install-recommends install -y apt-utils ca-certificates libpmem-dev
|
||||
|
||||
configure_hypervisor=${kata_dir}/tools/packaging/scripts/configure-hypervisor.sh
|
||||
chmod +x ${configure_hypervisor}
|
||||
# static build
|
||||
echo "$(${configure_hypervisor} -s qemu) \
|
||||
--disable-rbd
|
||||
--prefix=/snap/${SNAPCRAFT_PROJECT_NAME}/current/usr \
|
||||
--datadir=/snap/${SNAPCRAFT_PROJECT_NAME}/current/usr/share \
|
||||
--libexecdir=/snap/${SNAPCRAFT_PROJECT_NAME}/current/usr/libexec/qemu" \
|
||||
| xargs ./configure
|
||||
|
||||
# Copy QEMU configurations (Kconfigs)
|
||||
cp -a ${kata_dir}/tools/packaging/qemu/default-configs/* default-configs/
|
||||
|
||||
# build and install
|
||||
make -j $(($(nproc)-1))
|
||||
make install DESTDIR=${SNAPCRAFT_PART_INSTALL}
|
||||
prime:
|
||||
- -snap/
|
||||
- -usr/bin/qemu-ga
|
||||
- -usr/bin/qemu-pr-helper
|
||||
- -usr/bin/virtfs-proxy-helper
|
||||
- -usr/include/
|
||||
- -usr/libexec/
|
||||
- -usr/share/applications/
|
||||
- -usr/share/icons/
|
||||
- -usr/var/
|
||||
- usr/*
|
||||
- lib/*
|
||||
organize:
|
||||
# Hack: move qemu to /
|
||||
"snap/kata-containers/current/": "./"
|
||||
|
||||
apps:
|
||||
runtime:
|
||||
command: usr/bin/containerd-shim-kata-v2
|
||||
221
src/agent/Cargo.lock
generated
221
src/agent/Cargo.lock
generated
@@ -1,14 +1,5 @@
|
||||
# This file is automatically @generated by Cargo.
|
||||
# It is not intended for manual editing.
|
||||
[[package]]
|
||||
name = "addr2line"
|
||||
version = "0.12.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a49806b9dadc843c61e7c97e72490ad7f7220ae249012fbda9ad0609457c0543"
|
||||
dependencies = [
|
||||
"gimli",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "adler32"
|
||||
version = "1.0.4"
|
||||
@@ -24,12 +15,30 @@ dependencies = [
|
||||
"memchr",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "anyhow"
|
||||
version = "1.0.32"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6b602bfe940d21c130f3895acd65221e8a61270debe89d628b9cb4e3ccb8569b"
|
||||
|
||||
[[package]]
|
||||
name = "arc-swap"
|
||||
version = "0.4.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b585a98a234c46fc563103e9278c9391fde1f4e6850334da895d27edb9580f62"
|
||||
|
||||
[[package]]
|
||||
name = "arrayref"
|
||||
version = "0.3.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544"
|
||||
|
||||
[[package]]
|
||||
name = "arrayvec"
|
||||
version = "0.5.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cff77d8686867eceff3105329d4698d96c2391c176d5d03adc90c7389162b5b8"
|
||||
|
||||
[[package]]
|
||||
name = "autocfg"
|
||||
version = "1.0.0"
|
||||
@@ -37,17 +46,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d"
|
||||
|
||||
[[package]]
|
||||
name = "backtrace"
|
||||
version = "0.3.48"
|
||||
name = "base64"
|
||||
version = "0.11.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0df2f85c8a2abbe3b7d7e748052fdd9b76a0458fdeb16ad4223f5eca78c7c130"
|
||||
dependencies = [
|
||||
"addr2line",
|
||||
"cfg-if",
|
||||
"libc",
|
||||
"object",
|
||||
"rustc-demangle",
|
||||
]
|
||||
checksum = "b41b7ea54a0c9d92199de89e20e58d49f02f8e699814ef3fdf266f6f748d15c7"
|
||||
|
||||
[[package]]
|
||||
name = "bitflags"
|
||||
@@ -55,6 +57,17 @@ version = "1.2.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693"
|
||||
|
||||
[[package]]
|
||||
name = "blake2b_simd"
|
||||
version = "0.5.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d8fb2d74254a3a0b5cac33ac9f8ed0e44aa50378d9dbb2e5d83bd21ed1dc2c8a"
|
||||
dependencies = [
|
||||
"arrayref",
|
||||
"arrayvec",
|
||||
"constant_time_eq",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "byteorder"
|
||||
version = "1.3.4"
|
||||
@@ -84,6 +97,17 @@ version = "0.1.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
|
||||
|
||||
[[package]]
|
||||
name = "cgroups"
|
||||
version = "0.1.1-alpha.0"
|
||||
source = "git+https://github.com/kata-containers/cgroups-rs?branch=stable-0.1.1#8717524f2c95aacd30768b6f0f7d7f2fddef5cac"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"log",
|
||||
"nix 0.18.0",
|
||||
"regex",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "chrono"
|
||||
version = "0.4.11"
|
||||
@@ -95,6 +119,21 @@ dependencies = [
|
||||
"time",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cloudabi"
|
||||
version = "0.0.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f"
|
||||
dependencies = [
|
||||
"bitflags",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "constant_time_eq"
|
||||
version = "0.1.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc"
|
||||
|
||||
[[package]]
|
||||
name = "crc32fast"
|
||||
version = "1.2.0"
|
||||
@@ -125,6 +164,26 @@ dependencies = [
|
||||
"lazy_static",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "dirs"
|
||||
version = "3.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "142995ed02755914747cc6ca76fc7e4583cd18578746716d0508ea6ed558b9ff"
|
||||
dependencies = [
|
||||
"dirs-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "dirs-sys"
|
||||
version = "0.3.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8e93d7f5705de3e49895a2b5e0b8855a1c27f080192ae9c32a6432d50741a57a"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"redox_users",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "errno"
|
||||
version = "0.2.5"
|
||||
@@ -152,7 +211,6 @@ version = "0.12.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d371106cc88ffdfb1eabd7111e432da544f16f3e2d7bf1dfe8bf575f1df045cd"
|
||||
dependencies = [
|
||||
"backtrace",
|
||||
"version_check",
|
||||
]
|
||||
|
||||
@@ -185,12 +243,6 @@ dependencies = [
|
||||
"wasi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "gimli"
|
||||
version = "0.21.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bcc8e0c9bce37868955864dbecd2b1ab2bdf967e6f28066d65aaac620444b65c"
|
||||
|
||||
[[package]]
|
||||
name = "hex"
|
||||
version = "0.4.2"
|
||||
@@ -207,7 +259,8 @@ checksum = "b8b7a7c0c47db5545ed3fef7468ee7bb5b74691498139e4b3f6a20685dc6dd8e"
|
||||
name = "kata-agent"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"error-chain",
|
||||
"anyhow",
|
||||
"cgroups",
|
||||
"lazy_static",
|
||||
"libc",
|
||||
"logging",
|
||||
@@ -239,9 +292,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
|
||||
|
||||
[[package]]
|
||||
name = "libc"
|
||||
version = "0.2.70"
|
||||
version = "0.2.77"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3baa92041a6fec78c687fa0cc2b3fae8884f743d672cf551bed1d6dac6988d0f"
|
||||
checksum = "f2f96b10ec2560088a8e76961b00d47107b3a625fecb76dedb29ee7ccbf98235"
|
||||
|
||||
[[package]]
|
||||
name = "libflate"
|
||||
@@ -261,6 +314,15 @@ version = "1.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3286f09f7d4926fc486334f28d8d2e6ebe4f7f9994494b6dab27ddfad2c9b11b"
|
||||
|
||||
[[package]]
|
||||
name = "lock_api"
|
||||
version = "0.3.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c4da24a77a3d8a6d4862d95f72e6fdb9c09a643ecdb402d754004a557f2bec75"
|
||||
dependencies = [
|
||||
"scopeguard",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "log"
|
||||
version = "0.4.8"
|
||||
@@ -331,6 +393,18 @@ dependencies = [
|
||||
"void",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "nix"
|
||||
version = "0.18.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "83450fe6a6142ddd95fb064b746083fc4ef1705fe81f64a64e1d4b39f54a1055"
|
||||
dependencies = [
|
||||
"bitflags",
|
||||
"cc",
|
||||
"cfg-if",
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "num-integer"
|
||||
version = "0.1.42"
|
||||
@@ -350,12 +424,6 @@ dependencies = [
|
||||
"autocfg",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "object"
|
||||
version = "0.19.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9cbca9424c482ee628fa549d9c812e2cd22f1180b9222c9200fdfa6eb31aecb2"
|
||||
|
||||
[[package]]
|
||||
name = "oci"
|
||||
version = "0.1.0"
|
||||
@@ -366,6 +434,30 @@ dependencies = [
|
||||
"serde_json",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "parking_lot"
|
||||
version = "0.10.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d3a704eb390aafdc107b0e392f56a82b668e3a71366993b5340f5833fd62505e"
|
||||
dependencies = [
|
||||
"lock_api",
|
||||
"parking_lot_core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "parking_lot_core"
|
||||
version = "0.7.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d58c7c768d4ba344e3e8d72518ac13e259d7c7ade24167003b8488e10b6740a3"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"cloudabi",
|
||||
"libc",
|
||||
"redox_syscall",
|
||||
"smallvec",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "path-absolutize"
|
||||
version = "1.2.1"
|
||||
@@ -398,7 +490,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "059a34f111a9dee2ce1ac2826a68b24601c4298cfeb1a587c3cb493d5ab46f52"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"nix 0.17.0",
|
||||
"nix 0.18.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -531,6 +623,17 @@ version = "0.1.56"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2439c63f3f6139d1b57529d16bc3b8bb855230c8efcc5d3a896c8bea7c3b1e84"
|
||||
|
||||
[[package]]
|
||||
name = "redox_users"
|
||||
version = "0.3.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "09b23093265f8d200fa7b4c2c76297f47e681c655f6f1285a8780d6a022f7431"
|
||||
dependencies = [
|
||||
"getrandom",
|
||||
"redox_syscall",
|
||||
"rust-argon2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "regex"
|
||||
version = "1.3.7"
|
||||
@@ -565,17 +668,25 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cabe4fa914dec5870285fa7f71f602645da47c486e68486d2b4ceb4a343e90ac"
|
||||
|
||||
[[package]]
|
||||
name = "rustc-demangle"
|
||||
version = "0.1.16"
|
||||
name = "rust-argon2"
|
||||
version = "0.7.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4c691c0e608126e00913e33f0ccf3727d5fc84573623b8d65b2df340b5201783"
|
||||
checksum = "2bc8af4bda8e1ff4932523b94d3dd20ee30a87232323eda55903ffd71d2fb017"
|
||||
dependencies = [
|
||||
"base64",
|
||||
"blake2b_simd",
|
||||
"constant_time_eq",
|
||||
"crossbeam-utils",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rustjail"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"caps",
|
||||
"error-chain",
|
||||
"cgroups",
|
||||
"dirs",
|
||||
"lazy_static",
|
||||
"libc",
|
||||
"nix 0.17.0",
|
||||
@@ -590,8 +701,10 @@ dependencies = [
|
||||
"serde",
|
||||
"serde_derive",
|
||||
"serde_json",
|
||||
"serial_test",
|
||||
"slog",
|
||||
"slog-scope",
|
||||
"tempfile",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -643,6 +756,28 @@ dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serial_test"
|
||||
version = "0.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1b15f74add9a9d4a3eb2bf739c9a427d266d3895b53d992c3a7c234fec2ff1f1"
|
||||
dependencies = [
|
||||
"lazy_static",
|
||||
"parking_lot",
|
||||
"serial_test_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serial_test_derive"
|
||||
version = "0.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "65f59259be9fc1bf677d06cc1456e97756004a1a5a577480f71430bd7c17ba33"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "signal-hook"
|
||||
version = "0.1.15"
|
||||
@@ -710,6 +845,12 @@ dependencies = [
|
||||
"slog",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "smallvec"
|
||||
version = "1.4.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fbee7696b84bbf3d89a1c2eccff0850e3047ed46bfcd2e92c29a2d074d57e252"
|
||||
|
||||
[[package]]
|
||||
name = "spin"
|
||||
version = "0.5.2"
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "kata-agent"
|
||||
version = "0.1.0"
|
||||
authors = ["Yang Bo <bo@hyper.sh>"]
|
||||
authors = ["The Kata Containers community <kata-dev@lists.katacontainers.io>"]
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
@@ -11,7 +11,6 @@ rustjail = { path = "rustjail" }
|
||||
protocols = { path = "protocols" }
|
||||
netlink = { path = "netlink", features = ["with-log", "with-agent-handler"] }
|
||||
lazy_static = "1.3.0"
|
||||
error-chain = "0.12.1"
|
||||
ttrpc = { git = "https://github.com/containerd/ttrpc-rust.git", branch="0.3.0" }
|
||||
protobuf = "=2.14.0"
|
||||
libc = "0.2.58"
|
||||
@@ -32,6 +31,8 @@ slog-scope = "4.1.2"
|
||||
tempfile = "3.1.0"
|
||||
prometheus = { version = "0.9.0", features = ["process"] }
|
||||
procfs = "0.7.9"
|
||||
anyhow = "1.0.32"
|
||||
cgroups = { git = "https://github.com/kata-containers/cgroups-rs", branch = "stable-0.1.1"}
|
||||
|
||||
[workspace]
|
||||
members = [
|
||||
|
||||
@@ -42,6 +42,8 @@ endif
|
||||
|
||||
ifeq ($(ARCH), ppc64le)
|
||||
override ARCH = powerpc64le
|
||||
override LIBC = gnu
|
||||
$(warning "WARNING: powerpc64le-unknown-linux-musl target is unavailable")
|
||||
endif
|
||||
|
||||
TRIPLE = $(ARCH)-unknown-linux-$(LIBC)
|
||||
@@ -57,12 +59,27 @@ INIT := no
|
||||
# Path to systemd unit directory if installed as not init.
|
||||
UNIT_DIR := /usr/lib/systemd/system
|
||||
|
||||
GENERATED_CODE = src/version.rs
|
||||
|
||||
AGENT_NAME=$(TARGET)
|
||||
API_VERSION=0.0.1
|
||||
AGENT_VERSION=$(VERSION)
|
||||
|
||||
GENERATED_REPLACEMENTS= \
|
||||
AGENT_NAME \
|
||||
AGENT_VERSION \
|
||||
API_VERSION \
|
||||
BINDIR \
|
||||
COMMIT \
|
||||
VERSION_COMMIT
|
||||
GENERATED_FILES :=
|
||||
|
||||
GENERATED_FILES += $(GENERATED_CODE)
|
||||
|
||||
ifeq ($(INIT),no)
|
||||
# Unit file to start kata agent in systemd systems
|
||||
UNIT_FILES = kata-agent.service
|
||||
GENERATED_FILES := $(UNIT_FILES)
|
||||
GENERATED_FILES += $(UNIT_FILES)
|
||||
# Target to be reached in systemd services
|
||||
UNIT_FILES += kata-containers.target
|
||||
endif
|
||||
@@ -86,25 +103,26 @@ endef
|
||||
|
||||
default: $(TARGET) show-header
|
||||
|
||||
$(TARGET): $(TARGET_PATH)
|
||||
$(TARGET): $(GENERATED_CODE) $(TARGET_PATH)
|
||||
|
||||
$(TARGET_PATH): $(SOURCES) | show-summary
|
||||
@cargo build --target $(TRIPLE) --$(BUILD_TYPE)
|
||||
|
||||
optimize: $(SOURCES) | show-summary show-header
|
||||
@RUSTFLAGS='-C link-arg=-s' cargo build --target $(TRIPLE) --$(BUILD_TYPE)
|
||||
|
||||
show-header:
|
||||
@printf "%s - version %s (commit %s)\n\n" "$(TARGET)" "$(VERSION)" "$(COMMIT_MSG)"
|
||||
|
||||
$(GENERATED_FILES): %: %.in
|
||||
@sed \
|
||||
-e 's|[@]bindir[@]|$(BINDIR)|g' \
|
||||
-e 's|[@]kata-agent[@]|$(TARGET)|g' \
|
||||
"$<" > "$@"
|
||||
@sed $(foreach r,$(GENERATED_REPLACEMENTS),-e 's|@$r@|$($r)|g') "$<" > "$@"
|
||||
|
||||
install: build-service
|
||||
@install -D $(TARGET_PATH) $(DESTDIR)/$(BINDIR)/$(TARGET)
|
||||
|
||||
clean:
|
||||
@cargo clean
|
||||
@rm -f $(GENERATED_FILES)
|
||||
|
||||
test:
|
||||
@cargo test --all --target $(TRIPLE)
|
||||
@@ -140,7 +158,8 @@ help: show-summary
|
||||
.PHONY: \
|
||||
help \
|
||||
show-header \
|
||||
show-summary
|
||||
show-summary \
|
||||
optimize
|
||||
|
||||
generate-protocols:
|
||||
protocols/hack/update-generated-proto.sh "${PROTO_FILE}"
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
# Kata Agent in Rust
|
||||
|
||||
This is a rust version of the [`kata-agent`](https://github.com/kata-containers/kata-agent).
|
||||
This is a rust version of the [`kata-agent`](https://github.com/kata-containers/agent).
|
||||
|
||||
In Denver PTG, [we discussed about re-writing agent in rust](https://etherpad.openstack.org/p/katacontainers-2019-ptg-denver-agenda):
|
||||
|
||||
> In general, we all think about re-write agent in rust to reduce the footprint of agent. Moreover, Eric mentioned the possibility to stop using gRPC, which may have some impact on footprint. We may begin to do some PoC to show how much we could save by re-writing agent in rust.
|
||||
> In general, we all think about re-write agent in rust to reduce the footprint of agent. Moreover, Eric mentioned the possibility to stop using gRPC, which may have some impact on footprint. We may begin to do some POC to show how much we could save by re-writing agent in rust.
|
||||
|
||||
After that, we drafted the initial code here, and any contributions are welcome.
|
||||
|
||||
@@ -18,7 +18,7 @@ After that, we drafted the initial code here, and any contributions are welcome.
|
||||
| exec/list process | :white_check_mark: |
|
||||
| I/O stream | :white_check_mark: |
|
||||
| Cgroups | :white_check_mark: |
|
||||
| Capabilities, rlimit, readonly path, masked path, users | :white_check_mark: |
|
||||
| Capabilities, `rlimit`, readonly path, masked path, users | :white_check_mark: |
|
||||
| container stats (`stats_container`) | :white_check_mark: |
|
||||
| Hooks | :white_check_mark: |
|
||||
| **Agent Features & APIs** |
|
||||
@@ -28,7 +28,7 @@ After that, we drafted the initial code here, and any contributions are welcome.
|
||||
| network, interface/routes (`update_container`) | :white_check_mark: |
|
||||
| File transfer API (`copy_file`) | :white_check_mark: |
|
||||
| Device APIs (`reseed_random_device`, , `online_cpu_memory`, `mem_hotplug_probe`, `set_guet_data_time`) | :white_check_mark: |
|
||||
| vsock support | :white_check_mark: |
|
||||
| VSOCK support | :white_check_mark: |
|
||||
| virtio-serial support | :heavy_multiplication_x: |
|
||||
| OCI Spec validator | :white_check_mark: |
|
||||
| **Infrastructures**|
|
||||
@@ -38,30 +38,25 @@ After that, we drafted the initial code here, and any contributions are welcome.
|
||||
|
||||
## Getting Started
|
||||
|
||||
### Dependencies
|
||||
The `rust-agent` depends on [`grpc-rs`](https://github.com/pingcap/grpc-rs) by PingCAP. However, the upstream `grpc-rs` and [gRPC](https://github.com/grpc/grpc) need some changes to be used here, which may take some time to be landed. Therefore, we created a temporary fork or `grpc-rs` here:
|
||||
- https://github.com/alipay/grpc-rs/tree/rust_agent
|
||||
|
||||
### Build from Source
|
||||
The rust-agent need to be built with rust nightly, and static linked with musl.
|
||||
The rust-agent need to be built with rust newer than 1.37, and static linked with `musl`.
|
||||
```bash
|
||||
rustup target add x86_64-unknown-linux-musl
|
||||
git submodule update --init --recursive
|
||||
sudo ln -s /usr/bin/g++ /bin/musl-g++
|
||||
cargo build --target x86_64-unknown-linux-musl --release
|
||||
```
|
||||
|
||||
## Run Kata CI with rust-agent
|
||||
* Firstly, install kata as noted by ["how to install Kata"](../../docs/install/README.md)
|
||||
* Secondly, build your own kata initrd/image following the steps in ["how to build your own initrd/image"](../../docs/Developer-Guide.md#create-and-install-rootfs-and-initrd-image).
|
||||
* Firstly, install Kata as noted by ["how to install Kata"](../../docs/install/README.md)
|
||||
* Secondly, build your own Kata initrd/image following the steps in ["how to build your own initrd/image"](../../docs/Developer-Guide.md#create-and-install-rootfs-and-initrd-image).
|
||||
notes: Please use your rust agent instead of the go agent when building your initrd/image.
|
||||
* Clone the kata ci test cases from: https://github.com/kata-containers/tests.git, and then run the cri test with:
|
||||
* Clone the Kata CI test cases from: https://github.com/kata-containers/tests.git, and then run the CRI test with:
|
||||
|
||||
```bash
|
||||
$sudo -E PATH=$PATH -E GOPATH=$GOPATH integration/containerd/shimv2/shimv2-tests.sh
|
||||
```
|
||||
|
||||
## Mini Benchmark
|
||||
The memory of 'RssAnon' consumed by the go-agent and rust-agent as below:
|
||||
The memory of `RssAnon` consumed by the go-agent and rust-agent as below:
|
||||
go-agent: about 11M
|
||||
rust-agent: about 1.1M
|
||||
|
||||
@@ -14,7 +14,7 @@ Wants=kata-containers.target
|
||||
# from a VM vsock port
|
||||
StandardOutput=tty
|
||||
Type=simple
|
||||
ExecStart=@bindir@/@kata-agent@
|
||||
ExecStart=@BINDIR@/@AGENT_NAME@
|
||||
LimitNOFILE=infinity
|
||||
# ExecStop is required for static agent tracing; in all other scenarios
|
||||
# the runtime handles shutting down the VM.
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "netlink"
|
||||
version = "0.1.0"
|
||||
authors = ["Yang Bo <yb203166@antfin.com>"]
|
||||
authors = ["The Kata Containers community <kata-dev@lists.katacontainers.io>"]
|
||||
edition = "2018"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "oci"
|
||||
version = "0.1.0"
|
||||
authors = ["Yang Bo <bo@hyper.sh>"]
|
||||
authors = ["The Kata Containers community <kata-dev@lists.katacontainers.io>"]
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
|
||||
@@ -653,7 +653,7 @@ pub struct WindowsNetwork {
|
||||
#[serde(
|
||||
default,
|
||||
skip_serializing_if = "String::is_empty",
|
||||
rename = "nwtworkSharedContainerName"
|
||||
rename = "networkSharedContainerName"
|
||||
)]
|
||||
pub network_shared_container_name: String,
|
||||
}
|
||||
|
||||
@@ -29,13 +29,6 @@ impl Display for Error {
|
||||
}
|
||||
|
||||
impl error::Error for Error {
|
||||
fn description(&self) -> &str {
|
||||
match *self {
|
||||
Error::Io(ref e) => e.description(),
|
||||
Error::Json(ref e) => e.description(),
|
||||
}
|
||||
}
|
||||
|
||||
fn cause(&self) -> Option<&dyn error::Error> {
|
||||
match *self {
|
||||
Error::Io(ref e) => Some(e),
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "protocols"
|
||||
version = "0.1.0"
|
||||
authors = ["Hui Zhu <teawater@hyper.sh>"]
|
||||
authors = ["The Kata Containers community <kata-dev@lists.katacontainers.io>"]
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
|
||||
@@ -6,11 +6,11 @@
|
||||
|
||||
pub mod agent;
|
||||
pub mod agent_ttrpc;
|
||||
pub mod empty;
|
||||
pub mod health;
|
||||
pub mod health_ttrpc;
|
||||
pub mod oci;
|
||||
pub mod types;
|
||||
pub mod empty;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
[package]
|
||||
name = "rustjail"
|
||||
version = "0.1.0"
|
||||
authors = ["Yang Bo <bo@hyper.sh>"]
|
||||
authors = ["The Kata Containers community <kata-dev@lists.katacontainers.io>"]
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
error-chain = "0.12.1"
|
||||
serde = "1.0.91"
|
||||
serde_json = "1.0.39"
|
||||
serde_derive = "1.0.91"
|
||||
@@ -23,3 +22,10 @@ slog-scope = "4.1.2"
|
||||
scan_fmt = "0.2"
|
||||
regex = "1.1"
|
||||
path-absolutize = "1.2.0"
|
||||
dirs = "3.0.1"
|
||||
anyhow = "1.0.32"
|
||||
cgroups = { git = "https://github.com/kata-containers/cgroups-rs", branch = "stable-0.1.1"}
|
||||
tempfile = "3.1.0"
|
||||
|
||||
[dev-dependencies]
|
||||
serial_test = "0.5.0"
|
||||
|
||||
@@ -8,9 +8,9 @@
|
||||
|
||||
use lazy_static;
|
||||
|
||||
use crate::errors::*;
|
||||
use crate::log_child;
|
||||
use crate::sync::write_count;
|
||||
use anyhow::{anyhow, Result};
|
||||
use caps::{self, CapSet, Capability, CapsHashSet};
|
||||
use oci::LinuxCapabilities;
|
||||
use std::collections::HashMap;
|
||||
@@ -96,32 +96,35 @@ fn to_capshashset(cfd_log: RawFd, caps: &[String]) -> CapsHashSet {
|
||||
}
|
||||
|
||||
pub fn reset_effective() -> Result<()> {
|
||||
caps::set(None, CapSet::Effective, caps::all())?;
|
||||
caps::set(None, CapSet::Effective, caps::all()).map_err(|e| anyhow!(e.to_string()))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn drop_priviledges(cfd_log: RawFd, caps: &LinuxCapabilities) -> Result<()> {
|
||||
pub fn drop_privileges(cfd_log: RawFd, caps: &LinuxCapabilities) -> Result<()> {
|
||||
let all = caps::all();
|
||||
|
||||
for c in all.difference(&to_capshashset(cfd_log, caps.bounding.as_ref())) {
|
||||
caps::drop(None, CapSet::Bounding, *c)?;
|
||||
caps::drop(None, CapSet::Bounding, *c).map_err(|e| anyhow!(e.to_string()))?;
|
||||
}
|
||||
|
||||
caps::set(
|
||||
None,
|
||||
CapSet::Effective,
|
||||
to_capshashset(cfd_log, caps.effective.as_ref()),
|
||||
)?;
|
||||
)
|
||||
.map_err(|e| anyhow!(e.to_string()))?;
|
||||
caps::set(
|
||||
None,
|
||||
CapSet::Permitted,
|
||||
to_capshashset(cfd_log, caps.permitted.as_ref()),
|
||||
)?;
|
||||
)
|
||||
.map_err(|e| anyhow!(e.to_string()))?;
|
||||
caps::set(
|
||||
None,
|
||||
CapSet::Inheritable,
|
||||
to_capshashset(cfd_log, caps.inheritable.as_ref()),
|
||||
)?;
|
||||
)
|
||||
.map_err(|e| anyhow!(e.to_string()))?;
|
||||
|
||||
if let Err(_) = caps::set(
|
||||
None,
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,49 +1,42 @@
|
||||
// Copyright (c) 2019 Ant Financial
|
||||
// Copyright (c) 2019,2020 Ant Financial
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use crate::errors::*;
|
||||
// use crate::configs::{FreezerState, Config};
|
||||
use anyhow::{anyhow, Result};
|
||||
use oci::LinuxResources;
|
||||
use protocols::agent::CgroupStats;
|
||||
use std::collections::HashMap;
|
||||
|
||||
pub mod fs;
|
||||
pub mod systemd;
|
||||
use cgroups::freezer::FreezerState;
|
||||
|
||||
pub type FreezerState = &'static str;
|
||||
pub mod fs;
|
||||
pub mod notifier;
|
||||
pub mod systemd;
|
||||
|
||||
pub trait Manager {
|
||||
fn apply(&self, _pid: i32) -> Result<()> {
|
||||
Err(ErrorKind::ErrorCode("not supported!".to_string()).into())
|
||||
Err(anyhow!("not supported!".to_string()))
|
||||
}
|
||||
|
||||
fn get_pids(&self) -> Result<Vec<i32>> {
|
||||
Err(ErrorKind::ErrorCode("not supported!".to_string()).into())
|
||||
}
|
||||
|
||||
fn get_all_pids(&self) -> Result<Vec<i32>> {
|
||||
Err(ErrorKind::ErrorCode("not supported!".to_string()).into())
|
||||
Err(anyhow!("not supported!"))
|
||||
}
|
||||
|
||||
fn get_stats(&self) -> Result<CgroupStats> {
|
||||
Err(ErrorKind::ErrorCode("not supported!".to_string()).into())
|
||||
Err(anyhow!("not supported!"))
|
||||
}
|
||||
|
||||
fn freeze(&self, _state: FreezerState) -> Result<()> {
|
||||
Err(ErrorKind::ErrorCode("not supported!".to_string()).into())
|
||||
Err(anyhow!("not supported!"))
|
||||
}
|
||||
|
||||
fn destroy(&mut self) -> Result<()> {
|
||||
Err(ErrorKind::ErrorCode("not supported!".to_string()).into())
|
||||
}
|
||||
|
||||
fn get_paths(&self) -> Result<HashMap<String, String>> {
|
||||
Err(ErrorKind::ErrorCode("not supported!".to_string()).into())
|
||||
Err(anyhow!("not supported!"))
|
||||
}
|
||||
|
||||
fn set(&self, _container: &LinuxResources, _update: bool) -> Result<()> {
|
||||
Err(ErrorKind::ErrorCode("not supported!".to_string()).into())
|
||||
Err(anyhow!("not supported!"))
|
||||
}
|
||||
}
|
||||
|
||||
206
src/agent/rustjail/src/cgroups/notifier.rs
Normal file
206
src/agent/rustjail/src/cgroups/notifier.rs
Normal file
@@ -0,0 +1,206 @@
|
||||
// Copyright (c) 2020 Ant Group
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use anyhow::{anyhow, Result};
|
||||
use eventfd::{eventfd, EfdFlags};
|
||||
use nix::sys::eventfd;
|
||||
use nix::sys::inotify::{AddWatchFlags, InitFlags, Inotify};
|
||||
use std::fs::{self, File};
|
||||
use std::io::Read;
|
||||
use std::os::unix::io::{AsRawFd, FromRawFd};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::mpsc::{self, Receiver};
|
||||
use std::thread;
|
||||
|
||||
// Convenience macro to obtain the scope logger
|
||||
macro_rules! sl {
|
||||
() => {
|
||||
slog_scope::logger().new(o!("subsystem" => "cgroups_notifier"))
|
||||
};
|
||||
}
|
||||
|
||||
pub fn notify_oom(cid: &str, cg_dir: String) -> Result<Receiver<String>> {
|
||||
if cgroups::hierarchies::is_cgroup2_unified_mode() {
|
||||
return notify_on_oom_v2(cid, cg_dir);
|
||||
}
|
||||
notify_on_oom(cid, cg_dir)
|
||||
}
|
||||
|
||||
// get_value_from_cgroup parse cgroup file with `Flat keyed`
|
||||
// and get the value of `key`.
|
||||
// Flat keyed file format:
|
||||
// KEY0 VAL0\n
|
||||
// KEY1 VAL1\n
|
||||
fn get_value_from_cgroup(path: &PathBuf, key: &str) -> Result<i64> {
|
||||
let content = fs::read_to_string(path)?;
|
||||
info!(
|
||||
sl!(),
|
||||
"get_value_from_cgroup file: {:?}, content: {}", &path, &content
|
||||
);
|
||||
|
||||
for line in content.lines() {
|
||||
let arr: Vec<&str> = line.split(" ").collect();
|
||||
if arr.len() == 2 && arr[0] == key {
|
||||
let r = arr[1].parse::<i64>()?;
|
||||
return Ok(r);
|
||||
}
|
||||
}
|
||||
Ok(0)
|
||||
}
|
||||
|
||||
// notify_on_oom returns channel on which you can expect event about OOM,
|
||||
// if process died without OOM this channel will be closed.
|
||||
pub fn notify_on_oom_v2(containere_id: &str, cg_dir: String) -> Result<Receiver<String>> {
|
||||
register_memory_event_v2(containere_id, cg_dir, "memory.events", "cgroup.events")
|
||||
}
|
||||
|
||||
fn register_memory_event_v2(
|
||||
containere_id: &str,
|
||||
cg_dir: String,
|
||||
memory_event_name: &str,
|
||||
cgroup_event_name: &str,
|
||||
) -> Result<Receiver<String>> {
|
||||
let event_control_path = Path::new(&cg_dir).join(memory_event_name);
|
||||
let cgroup_event_control_path = Path::new(&cg_dir).join(cgroup_event_name);
|
||||
info!(
|
||||
sl!(),
|
||||
"register_memory_event_v2 event_control_path: {:?}", &event_control_path
|
||||
);
|
||||
info!(
|
||||
sl!(),
|
||||
"register_memory_event_v2 cgroup_event_control_path: {:?}", &cgroup_event_control_path
|
||||
);
|
||||
|
||||
let fd = Inotify::init(InitFlags::empty()).unwrap();
|
||||
|
||||
// watching oom kill
|
||||
let ev_fd = fd
|
||||
.add_watch(&event_control_path, AddWatchFlags::IN_MODIFY)
|
||||
.unwrap();
|
||||
// Because no `unix.IN_DELETE|unix.IN_DELETE_SELF` event for cgroup file system, so watching all process exited
|
||||
let cg_fd = fd
|
||||
.add_watch(&cgroup_event_control_path, AddWatchFlags::IN_MODIFY)
|
||||
.unwrap();
|
||||
info!(sl!(), "ev_fd: {:?}", ev_fd);
|
||||
info!(sl!(), "cg_fd: {:?}", cg_fd);
|
||||
|
||||
let (sender, receiver) = mpsc::channel();
|
||||
let containere_id = containere_id.to_string();
|
||||
|
||||
thread::spawn(move || {
|
||||
loop {
|
||||
let events = fd.read_events().unwrap();
|
||||
info!(
|
||||
sl!(),
|
||||
"container[{}] get events for container: {:?}", &containere_id, &events
|
||||
);
|
||||
|
||||
for event in events {
|
||||
if event.mask & AddWatchFlags::IN_MODIFY != AddWatchFlags::IN_MODIFY {
|
||||
continue;
|
||||
}
|
||||
info!(sl!(), "event.wd: {:?}", event.wd);
|
||||
|
||||
if event.wd == ev_fd {
|
||||
let oom = get_value_from_cgroup(&event_control_path, "oom_kill");
|
||||
if oom.unwrap_or(0) > 0 {
|
||||
sender.send(containere_id.clone()).unwrap();
|
||||
return;
|
||||
}
|
||||
} else if event.wd == cg_fd {
|
||||
let pids = get_value_from_cgroup(&cgroup_event_control_path, "populated");
|
||||
if pids.unwrap_or(-1) == 0 {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
// When a cgroup is destroyed, an event is sent to eventfd.
|
||||
// So if the control path is gone, return instead of notifying.
|
||||
if !Path::new(&event_control_path).exists() {
|
||||
return;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
Ok(receiver)
|
||||
}
|
||||
|
||||
// notify_on_oom returns channel on which you can expect event about OOM,
|
||||
// if process died without OOM this channel will be closed.
|
||||
fn notify_on_oom(cid: &str, dir: String) -> Result<Receiver<String>> {
|
||||
if dir == "" {
|
||||
return Err(anyhow!("memory controller missing"));
|
||||
}
|
||||
|
||||
register_memory_event(cid, dir, "memory.oom_control", "")
|
||||
}
|
||||
|
||||
// level is one of "low", "medium", or "critical"
|
||||
fn notify_memory_pressure(cid: &str, dir: String, level: &str) -> Result<Receiver<String>> {
|
||||
if dir == "" {
|
||||
return Err(anyhow!("memory controller missing"));
|
||||
}
|
||||
|
||||
if level != "low" && level != "medium" && level != "critical" {
|
||||
return Err(anyhow!("invalid pressure level {}", level));
|
||||
}
|
||||
|
||||
register_memory_event(cid, dir, "memory.pressure_level", level)
|
||||
}
|
||||
|
||||
fn register_memory_event(
|
||||
cid: &str,
|
||||
cg_dir: String,
|
||||
event_name: &str,
|
||||
arg: &str,
|
||||
) -> Result<Receiver<String>> {
|
||||
let path = Path::new(&cg_dir).join(event_name);
|
||||
let event_file = File::open(path.clone())?;
|
||||
|
||||
let eventfd = eventfd(0, EfdFlags::EFD_CLOEXEC)?;
|
||||
|
||||
let event_control_path = Path::new(&cg_dir).join("cgroup.event_control");
|
||||
let data;
|
||||
if arg == "" {
|
||||
data = format!("{} {}", eventfd, event_file.as_raw_fd());
|
||||
} else {
|
||||
data = format!("{} {} {}", eventfd, event_file.as_raw_fd(), arg);
|
||||
}
|
||||
|
||||
fs::write(&event_control_path, data)?;
|
||||
|
||||
let mut eventfd_file = unsafe { File::from_raw_fd(eventfd) };
|
||||
|
||||
let (sender, receiver) = mpsc::channel();
|
||||
let containere_id = cid.to_string();
|
||||
|
||||
thread::spawn(move || {
|
||||
loop {
|
||||
let mut buf = [0; 8];
|
||||
match eventfd_file.read(&mut buf) {
|
||||
Err(err) => {
|
||||
warn!(sl!(), "failed to read from eventfd: {:?}", err);
|
||||
return;
|
||||
}
|
||||
Ok(_) => {
|
||||
let content = fs::read_to_string(path.clone());
|
||||
info!(
|
||||
sl!(),
|
||||
"OOM event for container: {}, content: {:?}", &containere_id, content
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// When a cgroup is destroyed, an event is sent to eventfd.
|
||||
// So if the control path is gone, return instead of notifying.
|
||||
if !Path::new(&event_control_path).exists() {
|
||||
return;
|
||||
}
|
||||
sender.send(containere_id.clone()).unwrap();
|
||||
}
|
||||
});
|
||||
|
||||
Ok(receiver)
|
||||
}
|
||||
@@ -10,7 +10,6 @@ use serde_json;
|
||||
|
||||
use protocols::oci::State as OCIState;
|
||||
|
||||
use crate::errors::*;
|
||||
use std::collections::HashMap;
|
||||
use std::fmt;
|
||||
use std::path::PathBuf;
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
// Copyright (c) 2019 Ant Financial
|
||||
// Copyright (c) 2019, 2020 Ant Group
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use dirs;
|
||||
use lazy_static;
|
||||
use oci::{Hook, Linux, LinuxNamespace, LinuxResources, POSIXRlimit, Spec};
|
||||
use serde_json;
|
||||
@@ -13,23 +14,24 @@ use std::os::unix::io::RawFd;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::time::SystemTime;
|
||||
// use crate::sync::Cond;
|
||||
use anyhow::{anyhow, bail, Context, Result};
|
||||
use libc::pid_t;
|
||||
use oci::{LinuxDevice, LinuxIDMapping};
|
||||
use std::clone::Clone;
|
||||
use std::fmt::Display;
|
||||
use std::process::{Child, Command};
|
||||
|
||||
// use crate::configs::namespaces::{NamespaceType};
|
||||
use crate::cgroups::Manager as CgroupManager;
|
||||
use cgroups::freezer::FreezerState;
|
||||
|
||||
use crate::process::Process;
|
||||
// use crate::intelrdt::Manager as RdtManager;
|
||||
use crate::errors::*;
|
||||
use crate::log_child;
|
||||
use crate::specconv::CreateOpts;
|
||||
use crate::sync::*;
|
||||
// use crate::stats::Stats;
|
||||
use crate::capabilities::{self, CAPSMAP};
|
||||
use crate::cgroups::fs::{self as fscgroup, Manager as FsManager};
|
||||
use crate::cgroups::Manager;
|
||||
use crate::{mount, validator};
|
||||
|
||||
use protocols::agent::StatsContainerResponse;
|
||||
@@ -37,12 +39,12 @@ use protocols::agent::StatsContainerResponse;
|
||||
use nix::errno::Errno;
|
||||
use nix::fcntl::{self, OFlag};
|
||||
use nix::fcntl::{FcntlArg, FdFlag};
|
||||
use nix::mount::MntFlags;
|
||||
use nix::pty;
|
||||
use nix::sched::{self, CloneFlags};
|
||||
use nix::sys::signal::{self, Signal};
|
||||
use nix::sys::stat::{self, Mode};
|
||||
use nix::unistd::{self, ForkResult, Gid, Pid, Uid};
|
||||
use nix::Error;
|
||||
|
||||
use libc;
|
||||
use protobuf::SingularPtrField;
|
||||
@@ -66,6 +68,7 @@ const CRFD_FD: &str = "CRFD_FD";
|
||||
const CWFD_FD: &str = "CWFD_FD";
|
||||
const CLOG_FD: &str = "CLOG_FD";
|
||||
const FIFO_FD: &str = "FIFO_FD";
|
||||
const HOME_ENV_KEY: &str = "HOME";
|
||||
|
||||
#[derive(PartialEq, Clone, Copy)]
|
||||
pub enum Status {
|
||||
@@ -150,7 +153,7 @@ lazy_static! {
|
||||
r#type: "c".to_string(),
|
||||
major: 1,
|
||||
minor: 3,
|
||||
file_mode: Some(0o066),
|
||||
file_mode: Some(0o666),
|
||||
uid: Some(0xffffffff),
|
||||
gid: Some(0xffffffff),
|
||||
});
|
||||
@@ -159,7 +162,7 @@ lazy_static! {
|
||||
r#type: "c".to_string(),
|
||||
major: 1,
|
||||
minor: 5,
|
||||
file_mode: Some(0o066),
|
||||
file_mode: Some(0o666),
|
||||
uid: Some(0xffffffff),
|
||||
gid: Some(0xffffffff),
|
||||
});
|
||||
@@ -168,7 +171,7 @@ lazy_static! {
|
||||
r#type: String::from("c"),
|
||||
major: 1,
|
||||
minor: 7,
|
||||
file_mode: Some(0o066),
|
||||
file_mode: Some(0o666),
|
||||
uid: Some(0xffffffff),
|
||||
gid: Some(0xffffffff),
|
||||
});
|
||||
@@ -177,7 +180,7 @@ lazy_static! {
|
||||
r#type: "c".to_string(),
|
||||
major: 5,
|
||||
minor: 0,
|
||||
file_mode: Some(0o066),
|
||||
file_mode: Some(0o666),
|
||||
uid: Some(0xffffffff),
|
||||
gid: Some(0xffffffff),
|
||||
});
|
||||
@@ -186,7 +189,7 @@ lazy_static! {
|
||||
r#type: "c".to_string(),
|
||||
major: 1,
|
||||
minor: 9,
|
||||
file_mode: Some(0o066),
|
||||
file_mode: Some(0o666),
|
||||
uid: Some(0xffffffff),
|
||||
gid: Some(0xffffffff),
|
||||
});
|
||||
@@ -195,7 +198,7 @@ lazy_static! {
|
||||
r#type: "c".to_string(),
|
||||
major: 1,
|
||||
minor: 8,
|
||||
file_mode: Some(0o066),
|
||||
file_mode: Some(0o666),
|
||||
uid: Some(0xffffffff),
|
||||
gid: Some(0xffffffff),
|
||||
});
|
||||
@@ -240,9 +243,7 @@ pub trait BaseContainer {
|
||||
// Or use Mutex<xx> as a member of struct, like C?
|
||||
// a lot of String in the struct might be &str
|
||||
#[derive(Debug)]
|
||||
pub struct LinuxContainer
|
||||
// where T: CgroupManager
|
||||
{
|
||||
pub struct LinuxContainer {
|
||||
pub id: String,
|
||||
pub root: String,
|
||||
pub config: Config,
|
||||
@@ -291,45 +292,40 @@ impl Container for LinuxContainer {
|
||||
fn pause(&mut self) -> Result<()> {
|
||||
let status = self.status();
|
||||
if status != Status::RUNNING && status != Status::CREATED {
|
||||
return Err(ErrorKind::ErrorCode(format!(
|
||||
return Err(anyhow!(
|
||||
"failed to pause container: current status is: {:?}",
|
||||
status
|
||||
))
|
||||
.into());
|
||||
));
|
||||
}
|
||||
|
||||
if self.cgroup_manager.is_some() {
|
||||
self.cgroup_manager
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.freeze(fscgroup::FROZEN)?;
|
||||
.freeze(FreezerState::Frozen)?;
|
||||
|
||||
self.status.transition(Status::PAUSED);
|
||||
return Ok(());
|
||||
}
|
||||
Err(ErrorKind::ErrorCode(String::from("failed to get container's cgroup manager")).into())
|
||||
Err(anyhow!("failed to get container's cgroup manager"))
|
||||
}
|
||||
|
||||
fn resume(&mut self) -> Result<()> {
|
||||
let status = self.status();
|
||||
if status != Status::PAUSED {
|
||||
return Err(ErrorKind::ErrorCode(format!(
|
||||
"container status is: {:?}, not paused",
|
||||
status
|
||||
))
|
||||
.into());
|
||||
return Err(anyhow!("container status is: {:?}, not paused", status));
|
||||
}
|
||||
|
||||
if self.cgroup_manager.is_some() {
|
||||
self.cgroup_manager
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.freeze(fscgroup::THAWED)?;
|
||||
.freeze(FreezerState::Thawed)?;
|
||||
|
||||
self.status.transition(Status::RUNNING);
|
||||
return Ok(());
|
||||
}
|
||||
Err(ErrorKind::ErrorCode(String::from("failed to get container's cgroup manager")).into())
|
||||
Err(anyhow!("failed to get container's cgroup manager"))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -344,8 +340,6 @@ pub fn init_child() {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
std::process::exit(-1);
|
||||
}
|
||||
|
||||
fn do_init_child(cwfd: RawFd) -> Result<()> {
|
||||
@@ -355,6 +349,7 @@ fn do_init_child(cwfd: RawFd) -> Result<()> {
|
||||
lazy_static::initialize(&CAPSMAP);
|
||||
|
||||
let init = std::env::var(INIT)?.eq(format!("{}", true).as_str());
|
||||
|
||||
let no_pivot = std::env::var(NO_PIVOT)?.eq(format!("{}", true).as_str());
|
||||
let crfd = std::env::var(CRFD_FD)?.parse::<i32>().unwrap();
|
||||
let cfd_log = std::env::var(CLOG_FD)?.parse::<i32>().unwrap();
|
||||
@@ -375,16 +370,17 @@ fn do_init_child(cwfd: RawFd) -> Result<()> {
|
||||
|
||||
let buf = read_sync(crfd)?;
|
||||
let cm_str = std::str::from_utf8(&buf)?;
|
||||
|
||||
let cm: FsManager = serde_json::from_str(cm_str)?;
|
||||
|
||||
let p = if spec.process.is_some() {
|
||||
spec.process.as_ref().unwrap()
|
||||
} else {
|
||||
return Err(ErrorKind::ErrorCode("didn't find process in Spec".to_string()).into());
|
||||
return Err(anyhow!("didn't find process in Spec"));
|
||||
};
|
||||
|
||||
if spec.linux.is_none() {
|
||||
return Err(ErrorKind::ErrorCode("no linux config".to_string()).into());
|
||||
return Err(anyhow!("no linux config"));
|
||||
}
|
||||
let linux = spec.linux.as_ref().unwrap();
|
||||
|
||||
@@ -398,7 +394,7 @@ fn do_init_child(cwfd: RawFd) -> Result<()> {
|
||||
for ns in &nses {
|
||||
let s = NAMESPACES.get(&ns.r#type.as_str());
|
||||
if s.is_none() {
|
||||
return Err(ErrorKind::ErrorCode("invalid ns type".to_string()).into());
|
||||
return Err(anyhow!("invalid ns type"));
|
||||
}
|
||||
let s = s.unwrap();
|
||||
|
||||
@@ -566,13 +562,13 @@ fn do_init_child(cwfd: RawFd) -> Result<()> {
|
||||
// NoNewPeiviledges, Drop capabilities
|
||||
if oci_process.no_new_privileges {
|
||||
if let Err(_) = prctl::set_no_new_privileges(true) {
|
||||
return Err(ErrorKind::ErrorCode("cannot set no new privileges".to_string()).into());
|
||||
return Err(anyhow!("cannot set no new privileges"));
|
||||
}
|
||||
}
|
||||
|
||||
if oci_process.capabilities.is_some() {
|
||||
let c = oci_process.capabilities.as_ref().unwrap();
|
||||
capabilities::drop_priviledges(cfd_log, c)?;
|
||||
capabilities::drop_privileges(cfd_log, c)?;
|
||||
}
|
||||
|
||||
if init {
|
||||
@@ -605,15 +601,20 @@ fn do_init_child(cwfd: RawFd) -> Result<()> {
|
||||
env::set_var(v[0], v[1]);
|
||||
}
|
||||
|
||||
// set the "HOME" env getting from "/etc/passwd"
|
||||
if env::var_os(HOME_ENV_KEY).is_none() {
|
||||
if let Some(home_dir) = dirs::home_dir() {
|
||||
env::set_var(HOME_ENV_KEY, home_dir);
|
||||
}
|
||||
}
|
||||
|
||||
let exec_file = Path::new(&args[0]);
|
||||
log_child!(cfd_log, "process command: {:?}", &args);
|
||||
if !exec_file.exists() {
|
||||
match find_file(exec_file) {
|
||||
Some(_) => (),
|
||||
None => {
|
||||
return Err(
|
||||
ErrorKind::ErrorCode(format!("the file {} is not exist", &args[0])).into(),
|
||||
);
|
||||
return Err(anyhow!("the file {} is not exist", &args[0]));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -644,8 +645,6 @@ fn do_init_child(cwfd: RawFd) -> Result<()> {
|
||||
}
|
||||
|
||||
do_exec(&args);
|
||||
|
||||
Err(ErrorKind::ErrorCode("fail to create container".to_string()).into())
|
||||
}
|
||||
|
||||
impl BaseContainer for LinuxContainer {
|
||||
@@ -658,7 +657,7 @@ impl BaseContainer for LinuxContainer {
|
||||
}
|
||||
|
||||
fn state(&self) -> Result<State> {
|
||||
Err(ErrorKind::ErrorCode(String::from("not suppoerted")).into())
|
||||
Err(anyhow!("not suppoerted"))
|
||||
}
|
||||
|
||||
fn oci_state(&self) -> Result<OCIState> {
|
||||
@@ -698,7 +697,7 @@ impl BaseContainer for LinuxContainer {
|
||||
}
|
||||
}
|
||||
|
||||
Err(ErrorKind::ErrorCode(format!("invalid eid {}", eid)).into())
|
||||
Err(anyhow!("invalid eid {}", eid))
|
||||
}
|
||||
|
||||
fn stats(&self) -> Result<StatsContainerResponse> {
|
||||
@@ -737,7 +736,7 @@ impl BaseContainer for LinuxContainer {
|
||||
let mut fifofd: RawFd = -1;
|
||||
if p.init {
|
||||
if let Ok(_) = stat::stat(fifo_file.as_str()) {
|
||||
return Err(ErrorKind::ErrorCode("exec fifo exists".to_string()).into());
|
||||
return Err(anyhow!("exec fifo exists"));
|
||||
}
|
||||
unistd::mkfifo(fifo_file.as_str(), Mode::from_bits(0o622).unwrap())?;
|
||||
// defer!(fs::remove_file(&fifo_file)?);
|
||||
@@ -750,21 +749,19 @@ impl BaseContainer for LinuxContainer {
|
||||
}
|
||||
info!(logger, "exec fifo opened!");
|
||||
|
||||
fscgroup::init_static();
|
||||
|
||||
if self.config.spec.is_none() {
|
||||
return Err(ErrorKind::ErrorCode("no spec".to_string()).into());
|
||||
return Err(anyhow!("no spec"));
|
||||
}
|
||||
|
||||
let spec = self.config.spec.as_ref().unwrap();
|
||||
if spec.linux.is_none() {
|
||||
return Err(ErrorKind::ErrorCode("no linux config".to_string()).into());
|
||||
return Err(anyhow!("no linux config"));
|
||||
}
|
||||
let linux = spec.linux.as_ref().unwrap();
|
||||
|
||||
let st = self.oci_state()?;
|
||||
|
||||
let (pfd_log, cfd_log) = unistd::pipe().chain_err(|| "failed to create pipe")?;
|
||||
let (pfd_log, cfd_log) = unistd::pipe().context("failed to create pipe")?;
|
||||
fcntl::fcntl(pfd_log, FcntlArg::F_SETFD(FdFlag::FD_CLOEXEC));
|
||||
|
||||
let child_logger = logger.new(o!("action" => "child process log"));
|
||||
@@ -792,8 +789,8 @@ impl BaseContainer for LinuxContainer {
|
||||
});
|
||||
|
||||
info!(logger, "exec fifo opened!");
|
||||
let (prfd, cwfd) = unistd::pipe().chain_err(|| "failed to create pipe")?;
|
||||
let (crfd, pwfd) = unistd::pipe().chain_err(|| "failed to create pipe")?;
|
||||
let (prfd, cwfd) = unistd::pipe().context("failed to create pipe")?;
|
||||
let (crfd, pwfd) = unistd::pipe().context("failed to create pipe")?;
|
||||
fcntl::fcntl(prfd, FcntlArg::F_SETFD(FdFlag::FD_CLOEXEC));
|
||||
fcntl::fcntl(pwfd, FcntlArg::F_SETFD(FdFlag::FD_CLOEXEC));
|
||||
|
||||
@@ -848,7 +845,7 @@ impl BaseContainer for LinuxContainer {
|
||||
|
||||
if pidns.is_some() {
|
||||
sched::setns(pidns.unwrap(), CloneFlags::CLONE_NEWPID)
|
||||
.chain_err(|| "failed to join pidns")?;
|
||||
.context("failed to join pidns")?;
|
||||
unistd::close(pidns.unwrap())?;
|
||||
} else {
|
||||
sched::unshare(CloneFlags::CLONE_NEWPID)?;
|
||||
@@ -913,7 +910,7 @@ impl BaseContainer for LinuxContainer {
|
||||
|
||||
// create the pipes for notify process exited
|
||||
let (exit_pipe_r, exit_pipe_w) = unistd::pipe2(OFlag::O_CLOEXEC)
|
||||
.chain_err(|| "failed to create pipe")
|
||||
.context("failed to create pipe")
|
||||
.map_err(|e| {
|
||||
signal::kill(Pid::from_raw(child.id() as i32), Some(Signal::SIGKILL));
|
||||
e
|
||||
@@ -963,6 +960,10 @@ impl BaseContainer for LinuxContainer {
|
||||
}
|
||||
|
||||
self.status.transition(Status::STOPPED);
|
||||
nix::mount::umount2(
|
||||
spec.root.as_ref().unwrap().path.as_str(),
|
||||
MntFlags::MNT_DETACH,
|
||||
)?;
|
||||
fs::remove_dir_all(&self.root)?;
|
||||
Ok(())
|
||||
}
|
||||
@@ -1017,7 +1018,7 @@ where
|
||||
})
|
||||
}
|
||||
|
||||
fn do_exec(args: &[String]) -> Result<()> {
|
||||
fn do_exec(args: &[String]) -> ! {
|
||||
let path = &args[0];
|
||||
let p = CString::new(path.to_string()).unwrap();
|
||||
let sa: Vec<CString> = args
|
||||
@@ -1036,17 +1037,13 @@ fn do_exec(args: &[String]) -> Result<()> {
|
||||
_ => std::process::exit(-2),
|
||||
}
|
||||
}
|
||||
// should never reach here
|
||||
Ok(())
|
||||
|
||||
unreachable!()
|
||||
}
|
||||
|
||||
fn update_namespaces(logger: &Logger, spec: &mut Spec, init_pid: RawFd) -> Result<()> {
|
||||
let linux = match spec.linux.as_mut() {
|
||||
None => {
|
||||
return Err(
|
||||
ErrorKind::ErrorCode("Spec didn't container linux field".to_string()).into(),
|
||||
)
|
||||
}
|
||||
None => return Err(anyhow!("Spec didn't container linux field")),
|
||||
Some(l) => l,
|
||||
};
|
||||
|
||||
@@ -1093,7 +1090,7 @@ fn get_pid_namespace(logger: &Logger, linux: &Linux) -> Result<Option<RawFd>> {
|
||||
}
|
||||
}
|
||||
|
||||
Err(ErrorKind::ErrorCode("cannot find the pid ns".to_string()).into())
|
||||
Err(anyhow!("cannot find the pid ns"))
|
||||
}
|
||||
|
||||
fn is_userns_enabled(linux: &Linux) -> bool {
|
||||
@@ -1123,7 +1120,7 @@ fn join_namespaces(
|
||||
p: &Process,
|
||||
cm: &FsManager,
|
||||
st: &OCIState,
|
||||
child: &mut Child,
|
||||
_child: &mut Child,
|
||||
pwfd: RawFd,
|
||||
prfd: RawFd,
|
||||
) -> Result<()> {
|
||||
@@ -1257,7 +1254,7 @@ fn write_mappings(logger: &Logger, path: &str, maps: &[LinuxIDMapping]) -> Resul
|
||||
fn setid(uid: Uid, gid: Gid) -> Result<()> {
|
||||
// set uid/gid
|
||||
if let Err(e) = prctl::set_keep_capabilities(true) {
|
||||
bail!(format!("set keep capabilities returned {}", e));
|
||||
bail!(anyhow!(e).context("set keep capabilities returned"));
|
||||
};
|
||||
{
|
||||
unistd::setresgid(gid, gid, gid)?;
|
||||
@@ -1271,7 +1268,7 @@ fn setid(uid: Uid, gid: Gid) -> Result<()> {
|
||||
}
|
||||
|
||||
if let Err(e) = prctl::set_keep_capabilities(false) {
|
||||
bail!(format!("set keep capabilities returned {}", e));
|
||||
bail!(anyhow!(e).context("set keep capabilities returned"));
|
||||
};
|
||||
Ok(())
|
||||
}
|
||||
@@ -1292,10 +1289,10 @@ impl LinuxContainer {
|
||||
|
||||
if let Err(e) = fs::create_dir_all(root.as_str()) {
|
||||
if e.kind() == std::io::ErrorKind::AlreadyExists {
|
||||
return Err(e).chain_err(|| format!("container {} already exists", id.as_str()));
|
||||
return Err(e).context(format!("container {} already exists", id.as_str()));
|
||||
}
|
||||
|
||||
return Err(e).chain_err(|| format!("fail to create container directory {}", root));
|
||||
return Err(e).context(format!("fail to create container directory {}", root));
|
||||
}
|
||||
|
||||
unistd::chown(
|
||||
@@ -1303,7 +1300,7 @@ impl LinuxContainer {
|
||||
Some(unistd::getuid()),
|
||||
Some(unistd::getgid()),
|
||||
)
|
||||
.chain_err(|| format!("cannot change onwer of container {} root", id))?;
|
||||
.context(format!("cannot change onwer of container {} root", id))?;
|
||||
|
||||
if config.spec.is_none() {
|
||||
return Err(nix::Error::Sys(Errno::EINVAL).into());
|
||||
@@ -1324,6 +1321,7 @@ impl LinuxContainer {
|
||||
};
|
||||
|
||||
let cgroup_manager = FsManager::new(cpath.as_str())?;
|
||||
info!(logger, "new cgroup_manager {:?}", &cgroup_manager);
|
||||
|
||||
Ok(LinuxContainer {
|
||||
id: id.clone(),
|
||||
@@ -1345,50 +1343,8 @@ impl LinuxContainer {
|
||||
}
|
||||
|
||||
fn load<T: Into<String>>(_id: T, _base: T) -> Result<Self> {
|
||||
Err(ErrorKind::ErrorCode("not supported".to_string()).into())
|
||||
Err(anyhow!("not supported"))
|
||||
}
|
||||
/*
|
||||
fn new_parent_process(&self, p: &Process) -> Result<Box<ParentProcess>> {
|
||||
let (pfd, cfd) = socket::socketpair(AddressFamily::Unix,
|
||||
SockType::Stream, SockProtocol::Tcp,
|
||||
SockFlag::SOCK_CLOEXEC)?;
|
||||
|
||||
let cmd = Command::new(self.init_path)
|
||||
.args(self.init_args[1..])
|
||||
.env("_LIBCONTAINER_INITPIPE", format!("{}",
|
||||
cfd))
|
||||
.env("_LIBCONTAINER_STATEDIR", self.root)
|
||||
.current_dir(Path::new(self.config.rootfs))
|
||||
.stdin(p.stdin)
|
||||
.stdout(p.stdout)
|
||||
.stderr(p.stderr);
|
||||
|
||||
if p.console_socket.is_some() {
|
||||
cmd.env("_LIBCONTAINER_CONSOLE", format!("{}",
|
||||
unsafe { p.console_socket.unwrap().as_raw_fd() }));
|
||||
}
|
||||
|
||||
if !p.init {
|
||||
return self.new_setns_process(p, cmd, pfd, cfd);
|
||||
}
|
||||
|
||||
let fifo_file = format!("{}/{}", self.root, EXEC_FIFO_FILENAME);
|
||||
let fifofd = fcntl::open(fifo_file,
|
||||
OFlag::O_PATH | OFlag::O_CLOEXEC,
|
||||
Mode::from_bits(0).unwrap())?;
|
||||
|
||||
cmd.env("_LIBCONTAINER_FIFOFD", format!("{}", fifofd));
|
||||
|
||||
self.new_init_process(p, cmd, pfd, cfd)
|
||||
}
|
||||
|
||||
fn new_setns_process(&self, p: &Process, cmd: &mut Command, pfd: Rawfd, cfd: Rawfd) -> Result<SetnsProcess> {
|
||||
}
|
||||
|
||||
fn new_init_process(&self, p: &Process, cmd: &mut Command, pfd: Rawfd, cfd: Rawfd) -> Result<InitProcess> {
|
||||
cmd.env("_LINCONTAINER_INITTYPE", INITSTANDARD);
|
||||
}
|
||||
*/
|
||||
}
|
||||
|
||||
// Handle the differing rlimit types for different targets
|
||||
@@ -1486,7 +1442,7 @@ fn execute_hook(logger: &Logger, h: &Hook, st: &OCIState) -> Result<()> {
|
||||
let binary = PathBuf::from(h.path.as_str());
|
||||
let path = binary.canonicalize()?;
|
||||
if !path.exists() {
|
||||
return Err(ErrorKind::Nix(Error::from_errno(Errno::EINVAL)).into());
|
||||
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
|
||||
}
|
||||
|
||||
let args = h.args.clone();
|
||||
@@ -1495,24 +1451,33 @@ fn execute_hook(logger: &Logger, h: &Hook, st: &OCIState) -> Result<()> {
|
||||
// state.push_str("\n");
|
||||
|
||||
let (rfd, wfd) = unistd::pipe2(OFlag::O_CLOEXEC)?;
|
||||
match unistd::fork()? {
|
||||
ForkResult::Parent { child: _ch } => {
|
||||
let buf = read_sync(rfd)?;
|
||||
let buf_array: [u8; 4] = [buf[0], buf[1], buf[2], buf[3]];
|
||||
let status: i32 = i32::from_be_bytes(buf_array);
|
||||
defer!({
|
||||
let _ = unistd::close(rfd);
|
||||
let _ = unistd::close(wfd);
|
||||
});
|
||||
|
||||
info!(logger, "hook child: {}", _ch);
|
||||
match unistd::fork()? {
|
||||
ForkResult::Parent { child } => {
|
||||
let buf = read_sync(rfd)?;
|
||||
let status = if buf.len() == 4 {
|
||||
let buf_array: [u8; 4] = [buf[0], buf[1], buf[2], buf[3]];
|
||||
i32::from_be_bytes(buf_array)
|
||||
} else {
|
||||
-libc::EPIPE
|
||||
};
|
||||
|
||||
info!(logger, "hook child: {} status: {}", child, status);
|
||||
|
||||
// let _ = wait::waitpid(_ch,
|
||||
// Some(WaitPidFlag::WEXITED | WaitPidFlag::__WALL));
|
||||
|
||||
if status != 0 {
|
||||
if status == -libc::ETIMEDOUT {
|
||||
return Err(ErrorKind::Nix(Error::from_errno(Errno::ETIMEDOUT)).into());
|
||||
return Err(anyhow!(nix::Error::from_errno(Errno::ETIMEDOUT)));
|
||||
} else if status == -libc::EPIPE {
|
||||
return Err(ErrorKind::Nix(Error::from_errno(Errno::EPIPE)).into());
|
||||
return Err(anyhow!(nix::Error::from_errno(Errno::EPIPE)));
|
||||
} else {
|
||||
return Err(ErrorKind::Nix(Error::from_errno(Errno::UnknownErrno)).into());
|
||||
return Err(anyhow!(nix::Error::from_errno(Errno::UnknownErrno)));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1630,7 +1595,11 @@ fn execute_hook(logger: &Logger, h: &Hook, st: &OCIState) -> Result<()> {
|
||||
};
|
||||
|
||||
handle.join().unwrap();
|
||||
let _ = write_sync(wfd, status, "");
|
||||
let _ = write_sync(
|
||||
wfd,
|
||||
SYNC_DATA,
|
||||
std::str::from_utf8(&status.to_be_bytes()).unwrap_or_default(),
|
||||
);
|
||||
// let _ = wait::waitpid(Pid::from_raw(pid),
|
||||
// Some(WaitPidFlag::WEXITED | WaitPidFlag::__WALL));
|
||||
std::process::exit(0);
|
||||
|
||||
@@ -1,34 +0,0 @@
|
||||
// Copyright (c) 2019 Ant Financial
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
// define errors here
|
||||
|
||||
error_chain! {
|
||||
types {
|
||||
Error, ErrorKind, ResultExt, Result;
|
||||
}
|
||||
// foreign error conv to chain error
|
||||
foreign_links {
|
||||
Io(std::io::Error);
|
||||
Nix(nix::Error);
|
||||
Ffi(std::ffi::NulError);
|
||||
Caps(caps::errors::Error);
|
||||
Serde(serde_json::Error);
|
||||
FromUTF8(std::string::FromUtf8Error);
|
||||
Parse(std::num::ParseIntError);
|
||||
Scanfmt(scan_fmt::parse::ScanError);
|
||||
Ip(std::net::AddrParseError);
|
||||
Regex(regex::Error);
|
||||
EnvVar(std::env::VarError);
|
||||
UTF8(std::str::Utf8Error);
|
||||
}
|
||||
// define new errors
|
||||
errors {
|
||||
ErrorCode(t: String) {
|
||||
description("Error Code")
|
||||
display("Error Code: '{}'", t)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -13,7 +13,9 @@
|
||||
#![allow(non_upper_case_globals)]
|
||||
// #![allow(unused_comparisons)]
|
||||
#[macro_use]
|
||||
extern crate error_chain;
|
||||
#[cfg(test)]
|
||||
extern crate serial_test;
|
||||
#[macro_use]
|
||||
extern crate serde;
|
||||
extern crate serde_json;
|
||||
#[macro_use]
|
||||
@@ -45,7 +47,6 @@ macro_rules! sl {
|
||||
pub mod capabilities;
|
||||
pub mod cgroups;
|
||||
pub mod container;
|
||||
pub mod errors;
|
||||
pub mod mount;
|
||||
pub mod process;
|
||||
pub mod specconv;
|
||||
@@ -582,4 +583,15 @@ mod tests {
|
||||
fn it_works() {
|
||||
assert_eq!(2 + 2, 4);
|
||||
}
|
||||
|
||||
#[allow(unused_macros)]
|
||||
#[macro_export]
|
||||
macro_rules! skip_if_not_root {
|
||||
() => {
|
||||
if !nix::unistd::Uid::effective().is_root() {
|
||||
println!("INFO: skipping {} which needs root", module_path!());
|
||||
return;
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use anyhow::{anyhow, bail, Context, Error, Result};
|
||||
use libc::uid_t;
|
||||
use nix::errno::Errno;
|
||||
use nix::fcntl::{self, OFlag};
|
||||
@@ -13,6 +14,7 @@ use nix::NixPath;
|
||||
use oci::{LinuxDevice, Mount, Spec};
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::fs::{self, OpenOptions};
|
||||
use std::mem::MaybeUninit;
|
||||
use std::os::unix;
|
||||
use std::os::unix::io::RawFd;
|
||||
use std::path::{Path, PathBuf};
|
||||
@@ -23,7 +25,6 @@ use std::fs::File;
|
||||
use std::io::{BufRead, BufReader};
|
||||
|
||||
use crate::container::DEFAULT_DEVICES;
|
||||
use crate::errors::*;
|
||||
use crate::sync::write_count;
|
||||
use lazy_static;
|
||||
use std::string::ToString;
|
||||
@@ -32,6 +33,7 @@ use crate::log_child;
|
||||
|
||||
// Info reveals information about a particular mounted filesystem. This
|
||||
// struct is populated from the content in the /proc/<pid>/mountinfo file.
|
||||
#[derive(std::fmt::Debug)]
|
||||
pub struct Info {
|
||||
id: i32,
|
||||
parent: i32,
|
||||
@@ -47,13 +49,23 @@ pub struct Info {
|
||||
}
|
||||
|
||||
const MOUNTINFOFORMAT: &'static str = "{d} {d} {d}:{d} {} {} {} {}";
|
||||
const PROC_PATH: &str = "/proc";
|
||||
|
||||
// since libc didn't defined this const for musl, thus redefined it here.
|
||||
#[cfg(all(target_os = "linux", target_env = "gnu"))]
|
||||
const PROC_SUPER_MAGIC: libc::c_long = 0x00009fa0;
|
||||
#[cfg(all(target_os = "linux", target_env = "musl"))]
|
||||
const PROC_SUPER_MAGIC: libc::c_ulong = 0x00009fa0;
|
||||
|
||||
lazy_static! {
|
||||
static ref PROPAGATION: HashMap<&'static str, MsFlags> = {
|
||||
let mut m = HashMap::new();
|
||||
m.insert("shared", MsFlags::MS_SHARED | MsFlags::MS_REC);
|
||||
m.insert("private", MsFlags::MS_PRIVATE | MsFlags::MS_REC);
|
||||
m.insert("slave", MsFlags::MS_SLAVE | MsFlags::MS_REC);
|
||||
m.insert("shared", MsFlags::MS_SHARED);
|
||||
m.insert("rshared", MsFlags::MS_SHARED | MsFlags::MS_REC);
|
||||
m.insert("private", MsFlags::MS_PRIVATE);
|
||||
m.insert("rprivate", MsFlags::MS_PRIVATE | MsFlags::MS_REC);
|
||||
m.insert("slave", MsFlags::MS_SLAVE);
|
||||
m.insert("rslave", MsFlags::MS_SLAVE | MsFlags::MS_REC);
|
||||
m
|
||||
};
|
||||
static ref OPTIONS: HashMap<&'static str, (bool, MsFlags)> = {
|
||||
@@ -98,6 +110,31 @@ lazy_static! {
|
||||
};
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn mount<P1: ?Sized + NixPath, P2: ?Sized + NixPath, P3: ?Sized + NixPath, P4: ?Sized + NixPath>(
|
||||
source: Option<&P1>,
|
||||
target: &P2,
|
||||
fstype: Option<&P3>,
|
||||
flags: MsFlags,
|
||||
data: Option<&P4>,
|
||||
) -> std::result::Result<(), nix::Error> {
|
||||
#[cfg(not(test))]
|
||||
return mount::mount(source, target, fstype, flags, data);
|
||||
#[cfg(test)]
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn umount2<P: ?Sized + NixPath>(
|
||||
target: &P,
|
||||
flags: MntFlags,
|
||||
) -> std::result::Result<(), nix::Error> {
|
||||
#[cfg(not(test))]
|
||||
return mount::umount2(target, flags);
|
||||
#[cfg(test)]
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
pub fn init_rootfs(
|
||||
cfd_log: RawFd,
|
||||
spec: &Spec,
|
||||
@@ -109,19 +146,34 @@ pub fn init_rootfs(
|
||||
lazy_static::initialize(&PROPAGATION);
|
||||
lazy_static::initialize(&LINUXDEVICETYPE);
|
||||
|
||||
let linux = spec.linux.as_ref().unwrap();
|
||||
let linux = &spec
|
||||
.linux
|
||||
.as_ref()
|
||||
.ok_or::<Error>(anyhow!("Could not get linux configuration from spec"))?;
|
||||
|
||||
let mut flags = MsFlags::MS_REC;
|
||||
match PROPAGATION.get(&linux.rootfs_propagation.as_str()) {
|
||||
Some(fl) => flags |= *fl,
|
||||
None => flags |= MsFlags::MS_SLAVE,
|
||||
}
|
||||
|
||||
let rootfs = spec.root.as_ref().unwrap().path.as_str();
|
||||
let root = fs::canonicalize(rootfs)?;
|
||||
let rootfs = root.to_str().unwrap();
|
||||
let root = spec
|
||||
.root
|
||||
.as_ref()
|
||||
.ok_or(anyhow!("Could not get rootfs path from spec"))
|
||||
.and_then(|r| {
|
||||
fs::canonicalize(r.path.as_str()).context("Could not canonicalize rootfs path")
|
||||
})?;
|
||||
|
||||
mount::mount(None::<&str>, "/", None::<&str>, flags, None::<&str>)?;
|
||||
mount::mount(
|
||||
let rootfs = (*root)
|
||||
.to_str()
|
||||
.ok_or(anyhow!("Could not convert rootfs path to string"))?;
|
||||
|
||||
mount(None::<&str>, "/", None::<&str>, flags, None::<&str>)?;
|
||||
|
||||
rootfs_parent_mount_private(rootfs)?;
|
||||
|
||||
mount(
|
||||
Some(rootfs),
|
||||
rootfs,
|
||||
None::<&str>,
|
||||
@@ -132,8 +184,12 @@ pub fn init_rootfs(
|
||||
for m in &spec.mounts {
|
||||
let (mut flags, data) = parse_mount(&m);
|
||||
if !m.destination.starts_with("/") || m.destination.contains("..") {
|
||||
return Err(ErrorKind::Nix(nix::Error::Sys(Errno::EINVAL)).into());
|
||||
return Err(anyhow!(
|
||||
"the mount destination {} is invalid",
|
||||
m.destination
|
||||
));
|
||||
}
|
||||
|
||||
if m.r#type == "cgroup" {
|
||||
mount_cgroups(cfd_log, &m, rootfs, flags, &data, cpath, mounts)?;
|
||||
} else {
|
||||
@@ -141,7 +197,23 @@ pub fn init_rootfs(
|
||||
flags &= !MsFlags::MS_RDONLY;
|
||||
}
|
||||
|
||||
if m.r#type == "bind" {
|
||||
check_proc_mount(m)?;
|
||||
}
|
||||
|
||||
mount_from(cfd_log, &m, &rootfs, flags, &data, "")?;
|
||||
// bind mount won't change mount options, we need remount to make mount options
|
||||
// effective.
|
||||
// first check that we have non-default options required before attempting a
|
||||
// remount
|
||||
if m.r#type == "bind" {
|
||||
for o in &m.options {
|
||||
if let Some(fl) = PROPAGATION.get(o.as_str()) {
|
||||
let dest = format!("{}{}", &rootfs, &m.destination);
|
||||
mount(None::<&str>, dest.as_str(), None::<&str>, *fl, None::<&str>)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -157,6 +229,91 @@ pub fn init_rootfs(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn check_proc_mount(m: &Mount) -> Result<()> {
|
||||
// White list, it should be sub directories of invalid destinations
|
||||
// These entries can be bind mounted by files emulated by fuse,
|
||||
// so commands like top, free displays stats in container.
|
||||
let valid_destinations = [
|
||||
"/proc/cpuinfo",
|
||||
"/proc/diskstats",
|
||||
"/proc/meminfo",
|
||||
"/proc/stat",
|
||||
"/proc/swaps",
|
||||
"/proc/uptime",
|
||||
"/proc/loadavg",
|
||||
"/proc/net/dev",
|
||||
];
|
||||
|
||||
for i in valid_destinations.iter() {
|
||||
if m.destination.as_str() == *i {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
if m.destination == PROC_PATH {
|
||||
// only allow a mount on-top of proc if it's source is "proc"
|
||||
unsafe {
|
||||
let mut stats = MaybeUninit::<libc::statfs>::uninit();
|
||||
if let Ok(_) = m
|
||||
.source
|
||||
.with_nix_path(|path| libc::statfs(path.as_ptr(), stats.as_mut_ptr()))
|
||||
{
|
||||
if stats.assume_init().f_type == PROC_SUPER_MAGIC {
|
||||
return Ok(());
|
||||
}
|
||||
} else {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
return Err(anyhow!(format!(
|
||||
"{} cannot be mounted to {} because it is not of type proc",
|
||||
m.source, m.destination
|
||||
)));
|
||||
}
|
||||
}
|
||||
|
||||
if m.destination.starts_with(PROC_PATH) {
|
||||
return Err(anyhow!(format!(
|
||||
"{} cannot be mounted because it is inside /proc",
|
||||
m.destination
|
||||
)));
|
||||
}
|
||||
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
fn mount_cgroups_v2(cfd_log: RawFd, m: &Mount, rootfs: &str, flags: MsFlags) -> Result<()> {
|
||||
let olddir = unistd::getcwd()?;
|
||||
unistd::chdir(rootfs)?;
|
||||
|
||||
// https://github.com/opencontainers/runc/blob/09ddc63afdde16d5fb859a1d3ab010bd45f08497/libcontainer/rootfs_linux.go#L287
|
||||
let bm = Mount {
|
||||
source: "cgroup".to_string(),
|
||||
r#type: "cgroup2".to_string(),
|
||||
destination: m.destination.clone(),
|
||||
options: Vec::new(),
|
||||
};
|
||||
|
||||
let mount_flags: MsFlags = flags;
|
||||
|
||||
mount_from(cfd_log, &bm, rootfs, mount_flags, "", "")?;
|
||||
|
||||
unistd::chdir(&olddir)?;
|
||||
|
||||
if flags.contains(MsFlags::MS_RDONLY) {
|
||||
let dest = format!("{}{}", rootfs, m.destination.as_str());
|
||||
mount(
|
||||
Some(dest.as_str()),
|
||||
dest.as_str(),
|
||||
None::<&str>,
|
||||
flags | MsFlags::MS_BIND | MsFlags::MS_REMOUNT,
|
||||
None::<&str>,
|
||||
)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn mount_cgroups(
|
||||
cfd_log: RawFd,
|
||||
m: &Mount,
|
||||
@@ -166,6 +323,9 @@ fn mount_cgroups(
|
||||
cpath: &HashMap<String, String>,
|
||||
mounts: &HashMap<String, String>,
|
||||
) -> Result<()> {
|
||||
if cgroups::hierarchies::is_cgroup2_unified_mode() {
|
||||
return mount_cgroups_v2(cfd_log, &m, rootfs, flags);
|
||||
}
|
||||
// mount tmpfs
|
||||
let ctm = Mount {
|
||||
source: "tmpfs".to_string(),
|
||||
@@ -175,7 +335,6 @@ fn mount_cgroups(
|
||||
};
|
||||
|
||||
let cflags = MsFlags::MS_NOEXEC | MsFlags::MS_NOSUID | MsFlags::MS_NODEV;
|
||||
// info!(logger, "tmpfs");
|
||||
mount_from(cfd_log, &ctm, rootfs, cflags, "", "")?;
|
||||
let olddir = unistd::getcwd()?;
|
||||
|
||||
@@ -250,7 +409,7 @@ fn mount_cgroups(
|
||||
|
||||
if flags.contains(MsFlags::MS_RDONLY) {
|
||||
let dest = format!("{}{}", rootfs, m.destination.as_str());
|
||||
mount::mount(
|
||||
mount(
|
||||
Some(dest.as_str()),
|
||||
dest.as_str(),
|
||||
None::<&str>,
|
||||
@@ -262,18 +421,81 @@ fn mount_cgroups(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn pivot_rootfs<P: ?Sized + NixPath>(path: &P) -> Result<()> {
|
||||
fn pivot_root<P1: ?Sized + NixPath, P2: ?Sized + NixPath>(
|
||||
new_root: &P1,
|
||||
put_old: &P2,
|
||||
) -> anyhow::Result<(), nix::Error> {
|
||||
#[cfg(not(test))]
|
||||
return unistd::pivot_root(new_root, put_old);
|
||||
#[cfg(test)]
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
pub fn pivot_rootfs<P: ?Sized + NixPath + std::fmt::Debug>(path: &P) -> Result<()> {
|
||||
let oldroot = fcntl::open("/", OFlag::O_DIRECTORY | OFlag::O_RDONLY, Mode::empty())?;
|
||||
defer!(unistd::close(oldroot).unwrap());
|
||||
let newroot = fcntl::open(path, OFlag::O_DIRECTORY | OFlag::O_RDONLY, Mode::empty())?;
|
||||
defer!(unistd::close(newroot).unwrap());
|
||||
unistd::pivot_root(path, path)?;
|
||||
mount::umount2("/", MntFlags::MNT_DETACH)?;
|
||||
|
||||
// Change to the new root so that the pivot_root actually acts on it.
|
||||
unistd::fchdir(newroot)?;
|
||||
pivot_root(".", ".").context(format!("failed to pivot_root on {:?}", path))?;
|
||||
|
||||
// Currently our "." is oldroot (according to the current kernel code).
|
||||
// However, purely for safety, we will fchdir(oldroot) since there isn't
|
||||
// really any guarantee from the kernel what /proc/self/cwd will be after a
|
||||
// pivot_root(2).
|
||||
unistd::fchdir(oldroot)?;
|
||||
|
||||
// Make oldroot rslave to make sure our unmounts don't propagate to the
|
||||
// host. We don't use rprivate because this is known to cause issues due
|
||||
// to races where we still have a reference to a mount while a process in
|
||||
// the host namespace are trying to operate on something they think has no
|
||||
// mounts (devicemapper in particular).
|
||||
mount(
|
||||
Some("none"),
|
||||
".",
|
||||
Some(""),
|
||||
MsFlags::MS_SLAVE | MsFlags::MS_REC,
|
||||
Some(""),
|
||||
)?;
|
||||
|
||||
// Preform the unmount. MNT_DETACH allows us to unmount /proc/self/cwd.
|
||||
umount2(".", MntFlags::MNT_DETACH).context("failed to do umount2")?;
|
||||
|
||||
// Switch back to our shiny new root.
|
||||
unistd::chdir("/")?;
|
||||
stat::umask(Mode::from_bits_truncate(0o022));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn rootfs_parent_mount_private(path: &str) -> Result<()> {
|
||||
let mount_infos = parse_mount_table()?;
|
||||
|
||||
let mut max_len = 0;
|
||||
let mut mount_point = String::from("");
|
||||
let mut options = String::from("");
|
||||
for i in mount_infos {
|
||||
if path.starts_with(&i.mount_point) && i.mount_point.len() > max_len {
|
||||
max_len = i.mount_point.len();
|
||||
mount_point = i.mount_point;
|
||||
options = i.optional;
|
||||
}
|
||||
}
|
||||
|
||||
if options.contains("shared:") {
|
||||
mount(
|
||||
None::<&str>,
|
||||
mount_point.as_str(),
|
||||
None::<&str>,
|
||||
MsFlags::MS_PRIVATE,
|
||||
None::<&str>,
|
||||
)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Parse /proc/self/mountinfo because comparing Dev and ino does not work from
|
||||
// bind mounts
|
||||
fn parse_mount_table() -> Result<Vec<Info>> {
|
||||
@@ -323,33 +545,38 @@ fn parse_mount_table() -> Result<Vec<Info>> {
|
||||
|
||||
infos.push(info);
|
||||
} else {
|
||||
return Err(ErrorKind::ErrorCode("failed to parse mount info file".to_string()).into());
|
||||
return Err(anyhow!("failed to parse mount info file".to_string()));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(infos)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn chroot<P: ?Sized + NixPath>(path: &P) -> Result<(), nix::Error> {
|
||||
#[cfg(not(test))]
|
||||
return unistd::chroot(path);
|
||||
#[cfg(test)]
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
pub fn ms_move_root(rootfs: &str) -> Result<bool> {
|
||||
unistd::chdir(rootfs)?;
|
||||
let mount_infos = parse_mount_table()?;
|
||||
|
||||
let root_path = Path::new(rootfs);
|
||||
let abs_root_buf = root_path.absolutize()?;
|
||||
let abs_root = abs_root_buf.to_str().ok_or::<Error>(
|
||||
ErrorKind::ErrorCode(format!("failed to parse {} to absolute path", rootfs)).into(),
|
||||
)?;
|
||||
let abs_root = abs_root_buf
|
||||
.to_str()
|
||||
.ok_or::<Error>(anyhow!("failed to parse {} to absolute path", rootfs))?;
|
||||
|
||||
for info in mount_infos.iter() {
|
||||
let mount_point = Path::new(&info.mount_point);
|
||||
let abs_mount_buf = mount_point.absolutize()?;
|
||||
let abs_mount_point = abs_mount_buf.to_str().ok_or::<Error>(
|
||||
ErrorKind::ErrorCode(format!(
|
||||
"failed to parse {} to absolute path",
|
||||
info.mount_point
|
||||
))
|
||||
.into(),
|
||||
)?;
|
||||
let abs_mount_point = abs_mount_buf.to_str().ok_or::<Error>(anyhow!(
|
||||
"failed to parse {} to absolute path",
|
||||
info.mount_point
|
||||
))?;
|
||||
let abs_mount_point_string = String::from(abs_mount_point);
|
||||
|
||||
// Umount every syfs and proc file systems, except those under the container rootfs
|
||||
@@ -360,23 +587,23 @@ pub fn ms_move_root(rootfs: &str) -> Result<bool> {
|
||||
}
|
||||
|
||||
// Be sure umount events are not propagated to the host.
|
||||
mount::mount(
|
||||
mount(
|
||||
None::<&str>,
|
||||
abs_mount_point,
|
||||
None::<&str>,
|
||||
MsFlags::MS_SLAVE | MsFlags::MS_REC,
|
||||
None::<&str>,
|
||||
)?;
|
||||
match mount::umount2(abs_mount_point, MntFlags::MNT_DETACH) {
|
||||
match umount2(abs_mount_point, MntFlags::MNT_DETACH) {
|
||||
Ok(_) => (),
|
||||
Err(e) => {
|
||||
if e.ne(&nix::Error::from(Errno::EINVAL)) && e.ne(&nix::Error::from(Errno::EPERM)) {
|
||||
return Err(ErrorKind::ErrorCode(e.to_string()).into());
|
||||
return Err(anyhow!(e));
|
||||
}
|
||||
|
||||
// If we have not privileges for umounting (e.g. rootless), then
|
||||
// cover the path.
|
||||
mount::mount(
|
||||
mount(
|
||||
Some("tmpfs"),
|
||||
abs_mount_point,
|
||||
Some("tmpfs"),
|
||||
@@ -387,14 +614,14 @@ pub fn ms_move_root(rootfs: &str) -> Result<bool> {
|
||||
}
|
||||
}
|
||||
|
||||
mount::mount(
|
||||
mount(
|
||||
Some(abs_root),
|
||||
"/",
|
||||
None::<&str>,
|
||||
MsFlags::MS_MOVE,
|
||||
None::<&str>,
|
||||
)?;
|
||||
unistd::chroot(".")?;
|
||||
chroot(".")?;
|
||||
unistd::chdir("/")?;
|
||||
|
||||
Ok(true)
|
||||
@@ -458,37 +685,31 @@ fn mount_from(
|
||||
if src.is_file() {
|
||||
let _ = OpenOptions::new().create(true).write(true).open(&dest);
|
||||
}
|
||||
src
|
||||
src.to_str().unwrap().to_string()
|
||||
} else {
|
||||
let _ = fs::create_dir_all(&dest);
|
||||
PathBuf::from(&m.source)
|
||||
};
|
||||
|
||||
// ignore this check since some mount's src didn't been a directory
|
||||
// such as tmpfs.
|
||||
/*
|
||||
match stat::stat(src.to_str().unwrap()) {
|
||||
Ok(_) => {}
|
||||
Err(e) => {
|
||||
info!("{}: {}", src.to_str().unwrap(), e.as_errno().unwrap().desc());
|
||||
}
|
||||
if m.r#type.as_str() == "cgroup2" {
|
||||
"cgroup2".to_string()
|
||||
} else {
|
||||
let tmp = PathBuf::from(&m.source);
|
||||
tmp.to_str().unwrap().to_string()
|
||||
}
|
||||
*/
|
||||
};
|
||||
|
||||
match stat::stat(dest.as_str()) {
|
||||
Ok(_) => {}
|
||||
Err(e) => {
|
||||
log_child!(
|
||||
cfd_log,
|
||||
"{}: {}",
|
||||
"dest stat error. {}: {}",
|
||||
dest.as_str(),
|
||||
e.as_errno().unwrap().desc()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
match mount::mount(
|
||||
Some(src.to_str().unwrap()),
|
||||
match mount(
|
||||
Some(src.as_str()),
|
||||
dest.as_str(),
|
||||
Some(m.r#type.as_str()),
|
||||
flags,
|
||||
@@ -511,7 +732,7 @@ fn mount_from(
|
||||
| MsFlags::MS_SLAVE),
|
||||
)
|
||||
{
|
||||
match mount::mount(
|
||||
match mount(
|
||||
Some(dest.as_str()),
|
||||
dest.as_str(),
|
||||
None::<&str>,
|
||||
@@ -558,7 +779,7 @@ fn create_devices(devices: &[LinuxDevice], bind: bool) -> Result<()> {
|
||||
for dev in devices {
|
||||
if !dev.path.starts_with("/dev") || dev.path.contains("..") {
|
||||
let msg = format!("{} is not a valid device path", dev.path);
|
||||
bail!(ErrorKind::ErrorCode(msg));
|
||||
bail!(anyhow!(msg));
|
||||
}
|
||||
op(dev)?;
|
||||
}
|
||||
@@ -572,10 +793,6 @@ fn ensure_ptmx() -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn makedev(major: u64, minor: u64) -> u64 {
|
||||
(minor & 0xff) | ((major & 0xfff) << 8) | ((minor & !0xff) << 12) | ((major & !0xfff) << 32)
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
static ref LINUXDEVICETYPE: HashMap<&'static str, SFlag> = {
|
||||
let mut m = HashMap::new();
|
||||
@@ -589,14 +806,14 @@ lazy_static! {
|
||||
fn mknod_dev(dev: &LinuxDevice) -> Result<()> {
|
||||
let f = match LINUXDEVICETYPE.get(dev.r#type.as_str()) {
|
||||
Some(v) => v,
|
||||
None => return Err(ErrorKind::ErrorCode("invalid spec".to_string()).into()),
|
||||
None => return Err(anyhow!("invalid spec".to_string())),
|
||||
};
|
||||
|
||||
stat::mknod(
|
||||
&dev.path[1..],
|
||||
*f,
|
||||
Mode::from_bits_truncate(dev.file_mode.unwrap_or(0)),
|
||||
makedev(dev.major as u64, dev.minor as u64),
|
||||
nix::sys::stat::makedev(dev.major as u64, dev.minor as u64),
|
||||
)?;
|
||||
|
||||
unistd::chown(
|
||||
@@ -617,7 +834,7 @@ fn bind_dev(dev: &LinuxDevice) -> Result<()> {
|
||||
|
||||
unistd::close(fd)?;
|
||||
|
||||
mount::mount(
|
||||
mount(
|
||||
Some(&*dev.path),
|
||||
&dev.path[1..],
|
||||
None::<&str>,
|
||||
@@ -647,7 +864,7 @@ pub fn finish_rootfs(cfd_log: RawFd, spec: &Spec) -> Result<()> {
|
||||
if m.destination == "/dev" {
|
||||
let (flags, _) = parse_mount(m);
|
||||
if flags.contains(MsFlags::MS_RDONLY) {
|
||||
mount::mount(
|
||||
mount(
|
||||
Some("/dev"),
|
||||
"/dev",
|
||||
None::<&str>,
|
||||
@@ -661,7 +878,7 @@ pub fn finish_rootfs(cfd_log: RawFd, spec: &Spec) -> Result<()> {
|
||||
if spec.root.as_ref().unwrap().readonly {
|
||||
let flags = MsFlags::MS_BIND | MsFlags::MS_RDONLY | MsFlags::MS_NODEV | MsFlags::MS_REMOUNT;
|
||||
|
||||
mount::mount(Some("/"), "/", None::<&str>, flags, None::<&str>)?;
|
||||
mount(Some("/"), "/", None::<&str>, flags, None::<&str>)?;
|
||||
}
|
||||
stat::umask(Mode::from_bits_truncate(0o022));
|
||||
unistd::chdir(&olddir)?;
|
||||
@@ -676,7 +893,7 @@ fn mask_path(path: &str) -> Result<()> {
|
||||
|
||||
//info!("{}", path);
|
||||
|
||||
match mount::mount(
|
||||
match mount(
|
||||
Some("/dev/null"),
|
||||
path,
|
||||
None::<&str>,
|
||||
@@ -708,7 +925,7 @@ fn readonly_path(path: &str) -> Result<()> {
|
||||
|
||||
//info!("{}", path);
|
||||
|
||||
match mount::mount(
|
||||
match mount(
|
||||
Some(&path[1..]),
|
||||
path,
|
||||
None::<&str>,
|
||||
@@ -732,7 +949,7 @@ fn readonly_path(path: &str) -> Result<()> {
|
||||
Ok(_) => {}
|
||||
}
|
||||
|
||||
mount::mount(
|
||||
mount(
|
||||
Some(&path[1..]),
|
||||
&path[1..],
|
||||
None::<&str>,
|
||||
@@ -742,3 +959,272 @@ fn readonly_path(path: &str) -> Result<()> {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::skip_if_not_root;
|
||||
use std::os::unix::io::AsRawFd;
|
||||
use tempfile::tempdir;
|
||||
|
||||
#[test]
|
||||
#[serial(chdir)]
|
||||
fn test_init_rootfs() {
|
||||
let stdout_fd = std::io::stdout().as_raw_fd();
|
||||
let mut spec = oci::Spec::default();
|
||||
let cpath = HashMap::new();
|
||||
let mounts = HashMap::new();
|
||||
|
||||
// there is no spec.linux, should fail
|
||||
let ret = init_rootfs(stdout_fd, &spec, &cpath, &mounts, true);
|
||||
assert!(
|
||||
ret.is_err(),
|
||||
"Should fail: there is no spec.linux. Got: {:?}",
|
||||
ret
|
||||
);
|
||||
|
||||
// there is no spec.Root, should fail
|
||||
spec.linux = Some(oci::Linux::default());
|
||||
let ret = init_rootfs(stdout_fd, &spec, &cpath, &mounts, true);
|
||||
assert!(
|
||||
ret.is_err(),
|
||||
"should fail: there is no spec.Root. Got: {:?}",
|
||||
ret
|
||||
);
|
||||
|
||||
let rootfs = tempdir().unwrap();
|
||||
let ret = fs::create_dir(rootfs.path().join("dev"));
|
||||
assert!(ret.is_ok(), "Got: {:?}", ret);
|
||||
|
||||
spec.root = Some(oci::Root {
|
||||
path: rootfs.path().to_str().unwrap().to_string(),
|
||||
readonly: false,
|
||||
});
|
||||
|
||||
// there is no spec.mounts, but should pass
|
||||
let ret = init_rootfs(stdout_fd, &spec, &cpath, &mounts, true);
|
||||
assert!(ret.is_ok(), "Should pass. Got: {:?}", ret);
|
||||
let ret = fs::remove_dir_all(rootfs.path().join("dev"));
|
||||
let ret = fs::create_dir(rootfs.path().join("dev"));
|
||||
|
||||
// Adding bad mount point to spec.mounts
|
||||
spec.mounts.push(oci::Mount {
|
||||
destination: "error".into(),
|
||||
r#type: "bind".into(),
|
||||
source: "error".into(),
|
||||
options: vec!["shared".into(), "rw".into(), "dev".into()],
|
||||
});
|
||||
|
||||
// destination doesn't start with /, should fail
|
||||
let ret = init_rootfs(stdout_fd, &spec, &cpath, &mounts, true);
|
||||
assert!(
|
||||
ret.is_err(),
|
||||
"Should fail: destination doesn't start with '/'. Got: {:?}",
|
||||
ret
|
||||
);
|
||||
spec.mounts.pop();
|
||||
let ret = fs::remove_dir_all(rootfs.path().join("dev"));
|
||||
let ret = fs::create_dir(rootfs.path().join("dev"));
|
||||
|
||||
// mounting a cgroup
|
||||
spec.mounts.push(oci::Mount {
|
||||
destination: "/cgroup".into(),
|
||||
r#type: "cgroup".into(),
|
||||
source: "/cgroup".into(),
|
||||
options: vec!["shared".into()],
|
||||
});
|
||||
|
||||
let ret = init_rootfs(stdout_fd, &spec, &cpath, &mounts, true);
|
||||
assert!(ret.is_ok(), "Should pass. Got: {:?}", ret);
|
||||
spec.mounts.pop();
|
||||
let ret = fs::remove_dir_all(rootfs.path().join("dev"));
|
||||
let ret = fs::create_dir(rootfs.path().join("dev"));
|
||||
|
||||
// mounting /dev
|
||||
spec.mounts.push(oci::Mount {
|
||||
destination: "/dev".into(),
|
||||
r#type: "bind".into(),
|
||||
source: "/dev".into(),
|
||||
options: vec!["shared".into()],
|
||||
});
|
||||
|
||||
let ret = init_rootfs(stdout_fd, &spec, &cpath, &mounts, true);
|
||||
assert!(ret.is_ok(), "Should pass. Got: {:?}", ret);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial(chdir)]
|
||||
fn test_mount_cgroups() {
|
||||
let stdout_fd = std::io::stdout().as_raw_fd();
|
||||
let mount = oci::Mount {
|
||||
destination: "/cgroups".to_string(),
|
||||
r#type: "cgroup".to_string(),
|
||||
source: "/cgroups".to_string(),
|
||||
options: vec!["shared".to_string()],
|
||||
};
|
||||
let tempdir = tempdir().unwrap();
|
||||
let rootfs = tempdir.path().to_str().unwrap().to_string();
|
||||
let flags = MsFlags::MS_RDONLY;
|
||||
let mut cpath = HashMap::new();
|
||||
let mut cgroup_mounts = HashMap::new();
|
||||
|
||||
cpath.insert("cpu".to_string(), "cpu".to_string());
|
||||
cpath.insert("memory".to_string(), "memory".to_string());
|
||||
|
||||
cgroup_mounts.insert("default".to_string(), "default".to_string());
|
||||
cgroup_mounts.insert("cpu".to_string(), "cpu".to_string());
|
||||
cgroup_mounts.insert("memory".to_string(), "memory".to_string());
|
||||
|
||||
let ret = fs::create_dir_all(tempdir.path().join("cgroups"));
|
||||
assert!(ret.is_ok(), "Should pass. Got {:?}", ret);
|
||||
let ret = fs::create_dir_all(tempdir.path().join("cpu"));
|
||||
assert!(ret.is_ok(), "Should pass. Got {:?}", ret);
|
||||
let ret = fs::create_dir_all(tempdir.path().join("memory"));
|
||||
assert!(ret.is_ok(), "Should pass. Got {:?}", ret);
|
||||
|
||||
let ret = mount_cgroups(
|
||||
stdout_fd,
|
||||
&mount,
|
||||
&rootfs,
|
||||
flags,
|
||||
"",
|
||||
&cpath,
|
||||
&cgroup_mounts,
|
||||
);
|
||||
assert!(ret.is_ok(), "Should pass. Got: {:?}", ret);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial(chdir)]
|
||||
fn test_pivot_root() {
|
||||
let ret = pivot_rootfs("/tmp");
|
||||
assert!(ret.is_ok(), "Should pass. Got: {:?}", ret);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial(chdir)]
|
||||
fn test_ms_move_rootfs() {
|
||||
let ret = ms_move_root("/abc");
|
||||
assert!(
|
||||
ret.is_err(),
|
||||
"Should fail. path doesn't exist. Got: {:?}",
|
||||
ret
|
||||
);
|
||||
|
||||
let ret = ms_move_root("/tmp");
|
||||
assert!(ret.is_ok(), "Should pass. Got: {:?}", ret);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_mask_path() {
|
||||
let ret = mask_path("abc");
|
||||
assert!(
|
||||
ret.is_err(),
|
||||
"Should fail: path doesn't start with '/'. Got: {:?}",
|
||||
ret
|
||||
);
|
||||
|
||||
let ret = mask_path("abc/../");
|
||||
assert!(
|
||||
ret.is_err(),
|
||||
"Should fail: path contains '..'. Got: {:?}",
|
||||
ret
|
||||
);
|
||||
|
||||
let ret = mask_path("/tmp");
|
||||
assert!(ret.is_ok(), "Should pass. Got: {:?}", ret);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial(chdir)]
|
||||
fn test_finish_rootfs() {
|
||||
let stdout_fd = std::io::stdout().as_raw_fd();
|
||||
let mut spec = oci::Spec::default();
|
||||
|
||||
spec.linux = Some(oci::Linux::default());
|
||||
spec.linux.as_mut().unwrap().masked_paths = vec!["/tmp".to_string()];
|
||||
spec.linux.as_mut().unwrap().readonly_paths = vec!["/tmp".to_string()];
|
||||
spec.root = Some(oci::Root {
|
||||
path: "/tmp".to_string(),
|
||||
readonly: true,
|
||||
});
|
||||
spec.mounts = vec![oci::Mount {
|
||||
destination: "/dev".to_string(),
|
||||
r#type: "bind".to_string(),
|
||||
source: "/dev".to_string(),
|
||||
options: vec!["ro".to_string(), "shared".to_string()],
|
||||
}];
|
||||
|
||||
let ret = finish_rootfs(stdout_fd, &spec);
|
||||
assert!(ret.is_ok(), "Should pass. Got: {:?}", ret);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_readonly_path() {
|
||||
let ret = readonly_path("abc");
|
||||
assert!(ret.is_err(), "Should fail. Got: {:?}", ret);
|
||||
|
||||
let ret = readonly_path("../../");
|
||||
assert!(ret.is_err(), "Should fail. Got: {:?}", ret);
|
||||
|
||||
let ret = readonly_path("/tmp");
|
||||
assert!(ret.is_ok(), "Should pass. Got: {:?}", ret);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial(chdir)]
|
||||
fn test_mknod_dev() {
|
||||
skip_if_not_root!();
|
||||
|
||||
let tempdir = tempdir().unwrap();
|
||||
|
||||
let olddir = unistd::getcwd().unwrap();
|
||||
defer!(unistd::chdir(&olddir););
|
||||
unistd::chdir(tempdir.path());
|
||||
|
||||
let dev = oci::LinuxDevice {
|
||||
path: "/fifo".to_string(),
|
||||
r#type: "c".to_string(),
|
||||
major: 0,
|
||||
minor: 0,
|
||||
file_mode: Some(0660),
|
||||
uid: Some(unistd::getuid().as_raw()),
|
||||
gid: Some(unistd::getgid().as_raw()),
|
||||
};
|
||||
|
||||
let ret = mknod_dev(&dev);
|
||||
assert!(ret.is_ok(), "Should pass. Got: {:?}", ret);
|
||||
|
||||
let ret = stat::stat("fifo");
|
||||
assert!(ret.is_ok(), "Should pass. Got: {:?}", ret);
|
||||
}
|
||||
#[test]
|
||||
fn test_check_proc_mount() {
|
||||
let mount = oci::Mount {
|
||||
destination: "/proc".to_string(),
|
||||
r#type: "bind".to_string(),
|
||||
source: "/test".to_string(),
|
||||
options: vec!["shared".to_string()],
|
||||
};
|
||||
|
||||
assert!(check_proc_mount(&mount).is_err());
|
||||
|
||||
let mount = oci::Mount {
|
||||
destination: "/proc/cpuinfo".to_string(),
|
||||
r#type: "bind".to_string(),
|
||||
source: "/test".to_string(),
|
||||
options: vec!["shared".to_string()],
|
||||
};
|
||||
|
||||
assert!(check_proc_mount(&mount).is_ok());
|
||||
|
||||
let mount = oci::Mount {
|
||||
destination: "/proc/test".to_string(),
|
||||
r#type: "bind".to_string(),
|
||||
source: "/test".to_string(),
|
||||
options: vec!["shared".to_string()],
|
||||
};
|
||||
|
||||
assert!(check_proc_mount(&mount).is_err());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
use libc::pid_t;
|
||||
use std::fs::File;
|
||||
use std::os::unix::io::RawFd;
|
||||
use std::sync::mpsc::Sender;
|
||||
|
||||
// use crate::configs::{Capabilities, Rlimit};
|
||||
// use crate::cgroups::Manager as CgroupManager;
|
||||
@@ -45,6 +46,7 @@ pub struct Process {
|
||||
pub pid: pid_t,
|
||||
|
||||
pub exit_code: i32,
|
||||
pub exit_watchers: Vec<Sender<i32>>,
|
||||
pub oci: OCIProcess,
|
||||
pub logger: Logger,
|
||||
}
|
||||
@@ -95,6 +97,7 @@ impl Process {
|
||||
init,
|
||||
pid: -1,
|
||||
exit_code: 0,
|
||||
exit_watchers: Vec::new(),
|
||||
oci: ocip.clone(),
|
||||
logger: logger.clone(),
|
||||
};
|
||||
@@ -130,10 +133,8 @@ fn create_extended_pipe(flags: OFlag, pipe_size: i32) -> Result<(RawFd, RawFd)>
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::process::create_extended_pipe;
|
||||
use nix::fcntl::{fcntl, FcntlArg, OFlag};
|
||||
use super::*;
|
||||
use std::fs;
|
||||
use std::os::unix::io::RawFd;
|
||||
|
||||
fn get_pipe_max_size() -> i32 {
|
||||
fs::read_to_string("/proc/sys/fs/pipe-max-size")
|
||||
@@ -158,4 +159,29 @@ mod tests {
|
||||
let actual_size = get_pipe_size(w);
|
||||
assert_eq!(max_size, actual_size);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_process() {
|
||||
let id = "abc123rgb";
|
||||
let init = true;
|
||||
let process = Process::new(
|
||||
&Logger::root(slog::Discard, o!("source" => "unit-test")),
|
||||
&OCIProcess::default(),
|
||||
id,
|
||||
init,
|
||||
32,
|
||||
);
|
||||
|
||||
let mut process = process.unwrap();
|
||||
assert_eq!(process.exec_id, id);
|
||||
assert_eq!(process.init, init);
|
||||
|
||||
// -1 by default
|
||||
assert_eq!(process.pid, -1);
|
||||
assert!(process.wait().is_err());
|
||||
// signal to every process in the process
|
||||
// group of the calling process.
|
||||
process.pid = 0;
|
||||
assert!(process.signal(Signal::SIGCONT).is_ok());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,13 +3,13 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use crate::errors::*;
|
||||
use nix::errno::Errno;
|
||||
use nix::unistd;
|
||||
use nix::Error;
|
||||
use std::mem;
|
||||
use std::os::unix::io::RawFd;
|
||||
|
||||
use anyhow::{anyhow, Result};
|
||||
|
||||
pub const SYNC_SUCCESS: i32 = 1;
|
||||
pub const SYNC_FAILED: i32 = 2;
|
||||
pub const SYNC_DATA: i32 = 3;
|
||||
@@ -40,7 +40,7 @@ pub fn write_count(fd: RawFd, buf: &[u8], count: usize) -> Result<usize> {
|
||||
}
|
||||
|
||||
Err(e) => {
|
||||
if e != Error::from_errno(Errno::EINTR) {
|
||||
if e != nix::Error::from_errno(Errno::EINTR) {
|
||||
return Err(e.into());
|
||||
}
|
||||
}
|
||||
@@ -64,7 +64,7 @@ fn read_count(fd: RawFd, count: usize) -> Result<Vec<u8>> {
|
||||
}
|
||||
|
||||
Err(e) => {
|
||||
if e != Error::from_errno(Errno::EINTR) {
|
||||
if e != nix::Error::from_errno(Errno::EINTR) {
|
||||
return Err(e.into());
|
||||
}
|
||||
}
|
||||
@@ -77,13 +77,12 @@ fn read_count(fd: RawFd, count: usize) -> Result<Vec<u8>> {
|
||||
pub fn read_sync(fd: RawFd) -> Result<Vec<u8>> {
|
||||
let buf = read_count(fd, MSG_SIZE)?;
|
||||
if buf.len() != MSG_SIZE {
|
||||
return Err(ErrorKind::ErrorCode(format!(
|
||||
return Err(anyhow!(
|
||||
"process: {} failed to receive sync message from peer: got msg length: {}, expected: {}",
|
||||
std::process::id(),
|
||||
buf.len(),
|
||||
MSG_SIZE
|
||||
))
|
||||
.into());
|
||||
));
|
||||
}
|
||||
let buf_array: [u8; MSG_SIZE] = [buf[0], buf[1], buf[2], buf[3]];
|
||||
let msg: i32 = i32::from_be_bytes(buf_array);
|
||||
@@ -111,19 +110,17 @@ pub fn read_sync(fd: RawFd) -> Result<Vec<u8>> {
|
||||
}
|
||||
|
||||
let error_str = match std::str::from_utf8(&error_buf) {
|
||||
Ok(v) => v,
|
||||
Ok(v) => String::from(v),
|
||||
Err(e) => {
|
||||
return Err(ErrorKind::ErrorCode(format!(
|
||||
"receive error message from child process failed: {:?}",
|
||||
e
|
||||
))
|
||||
.into())
|
||||
return Err(
|
||||
anyhow!(e).context("receive error message from child process failed")
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
return Err(ErrorKind::ErrorCode(String::from(error_str)).into());
|
||||
return Err(anyhow!(error_str));
|
||||
}
|
||||
_ => return Err(ErrorKind::ErrorCode("error in receive sync message".to_string()).into()),
|
||||
_ => return Err(anyhow!("error in receive sync message")),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -132,7 +129,7 @@ pub fn write_sync(fd: RawFd, msg_type: i32, data_str: &str) -> Result<()> {
|
||||
|
||||
let count = write_count(fd, &buf, MSG_SIZE)?;
|
||||
if count != MSG_SIZE {
|
||||
return Err(ErrorKind::ErrorCode("error in send sync message".to_string()).into());
|
||||
return Err(anyhow!("error in send sync message"));
|
||||
}
|
||||
|
||||
match msg_type {
|
||||
@@ -140,9 +137,7 @@ pub fn write_sync(fd: RawFd, msg_type: i32, data_str: &str) -> Result<()> {
|
||||
Ok(_count) => unistd::close(fd)?,
|
||||
Err(e) => {
|
||||
unistd::close(fd)?;
|
||||
return Err(
|
||||
ErrorKind::ErrorCode("error in send message to process".to_string()).into(),
|
||||
);
|
||||
return Err(anyhow!(e).context("error in send message to process"));
|
||||
}
|
||||
},
|
||||
SYNC_DATA => {
|
||||
@@ -151,10 +146,7 @@ pub fn write_sync(fd: RawFd, msg_type: i32, data_str: &str) -> Result<()> {
|
||||
Ok(_count) => (),
|
||||
Err(e) => {
|
||||
unistd::close(fd)?;
|
||||
return Err(ErrorKind::ErrorCode(
|
||||
"error in send message to process".to_string(),
|
||||
)
|
||||
.into());
|
||||
return Err(anyhow!(e).context("error in send message to process"));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -162,10 +154,7 @@ pub fn write_sync(fd: RawFd, msg_type: i32, data_str: &str) -> Result<()> {
|
||||
Ok(_count) => (),
|
||||
Err(e) => {
|
||||
unistd::close(fd)?;
|
||||
return Err(ErrorKind::ErrorCode(
|
||||
"error in send message to process".to_string(),
|
||||
)
|
||||
.into());
|
||||
return Err(anyhow!(e).context("error in send message to process"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,10 +4,9 @@
|
||||
//
|
||||
|
||||
use crate::container::Config;
|
||||
use crate::errors::*;
|
||||
use anyhow::{anyhow, Result};
|
||||
use lazy_static;
|
||||
use nix::errno::Errno;
|
||||
use nix::Error;
|
||||
use oci::{LinuxIDMapping, LinuxNamespace, Spec};
|
||||
use protobuf::RepeatedField;
|
||||
use std::collections::HashMap;
|
||||
@@ -30,14 +29,14 @@ fn get_namespace_path(nses: &Vec<LinuxNamespace>, key: &str) -> Result<String> {
|
||||
}
|
||||
}
|
||||
|
||||
Err(ErrorKind::Nix(Error::from_errno(Errno::EINVAL)).into())
|
||||
Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)))
|
||||
}
|
||||
|
||||
fn rootfs(root: &str) -> Result<()> {
|
||||
let path = PathBuf::from(root);
|
||||
// not absolute path or not exists
|
||||
if !path.exists() || !path.is_absolute() {
|
||||
return Err(ErrorKind::Nix(Error::from_errno(Errno::EINVAL)).into());
|
||||
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
|
||||
}
|
||||
|
||||
// symbolic link? ..?
|
||||
@@ -65,7 +64,7 @@ fn rootfs(root: &str) -> Result<()> {
|
||||
let canon = path.canonicalize()?;
|
||||
if cleaned != canon {
|
||||
// There is symbolic in path
|
||||
return Err(ErrorKind::Nix(Error::from_errno(Errno::EINVAL)).into());
|
||||
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -81,11 +80,11 @@ fn hostname(oci: &Spec) -> Result<()> {
|
||||
}
|
||||
|
||||
if oci.linux.is_none() {
|
||||
return Err(ErrorKind::Nix(Error::from_errno(Errno::EINVAL)).into());
|
||||
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
|
||||
}
|
||||
let linux = oci.linux.as_ref().unwrap();
|
||||
if !contain_namespace(&linux.namespaces, "uts") {
|
||||
return Err(ErrorKind::Nix(Error::from_errno(Errno::EINVAL)).into());
|
||||
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -98,7 +97,7 @@ fn security(oci: &Spec) -> Result<()> {
|
||||
}
|
||||
|
||||
if !contain_namespace(&linux.namespaces, "mount") {
|
||||
return Err(ErrorKind::Nix(Error::from_errno(Errno::EINVAL)).into());
|
||||
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
|
||||
}
|
||||
|
||||
// don't care about selinux at present
|
||||
@@ -113,7 +112,7 @@ fn idmapping(maps: &Vec<LinuxIDMapping>) -> Result<()> {
|
||||
}
|
||||
}
|
||||
|
||||
Err(ErrorKind::Nix(Error::from_errno(Errno::EINVAL)).into())
|
||||
Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)))
|
||||
}
|
||||
|
||||
fn usernamespace(oci: &Spec) -> Result<()> {
|
||||
@@ -121,7 +120,7 @@ fn usernamespace(oci: &Spec) -> Result<()> {
|
||||
if contain_namespace(&linux.namespaces, "user") {
|
||||
let user_ns = PathBuf::from("/proc/self/ns/user");
|
||||
if !user_ns.exists() {
|
||||
return Err(ErrorKind::ErrorCode("user namespace not supported!".to_string()).into());
|
||||
return Err(anyhow!("user namespace not supported!"));
|
||||
}
|
||||
// check if idmappings is correct, at least I saw idmaps
|
||||
// with zero size was passed to agent
|
||||
@@ -130,7 +129,7 @@ fn usernamespace(oci: &Spec) -> Result<()> {
|
||||
} else {
|
||||
// no user namespace but idmap
|
||||
if linux.uid_mappings.len() != 0 || linux.gid_mappings.len() != 0 {
|
||||
return Err(ErrorKind::Nix(Error::from_errno(Errno::EINVAL)).into());
|
||||
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -142,7 +141,7 @@ fn cgroupnamespace(oci: &Spec) -> Result<()> {
|
||||
if contain_namespace(&linux.namespaces, "cgroup") {
|
||||
let path = PathBuf::from("/proc/self/ns/cgroup");
|
||||
if !path.exists() {
|
||||
return Err(ErrorKind::ErrorCode("cgroup unsupported!".to_string()).into());
|
||||
return Err(anyhow!("cgroup unsupported!"));
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
@@ -176,7 +175,7 @@ fn check_host_ns(path: &str) -> Result<()> {
|
||||
}
|
||||
let real_cpath = cpath.read_link()?;
|
||||
if real_cpath == real_hpath {
|
||||
return Err(ErrorKind::Nix(Error::from_errno(Errno::EINVAL)).into());
|
||||
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -189,13 +188,13 @@ fn sysctl(oci: &Spec) -> Result<()> {
|
||||
if contain_namespace(&linux.namespaces, "ipc") {
|
||||
continue;
|
||||
} else {
|
||||
return Err(ErrorKind::Nix(Error::from_errno(Errno::EINVAL)).into());
|
||||
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
|
||||
}
|
||||
}
|
||||
|
||||
if key.starts_with("net.") {
|
||||
if !contain_namespace(&linux.namespaces, "network") {
|
||||
return Err(ErrorKind::Nix(Error::from_errno(Errno::EINVAL)).into());
|
||||
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
|
||||
}
|
||||
|
||||
let net = get_namespace_path(&linux.namespaces, "network")?;
|
||||
@@ -212,11 +211,11 @@ fn sysctl(oci: &Spec) -> Result<()> {
|
||||
}
|
||||
|
||||
if key == "kernel.hostname" {
|
||||
return Err(ErrorKind::Nix(Error::from_errno(Errno::EINVAL)).into());
|
||||
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
|
||||
}
|
||||
}
|
||||
|
||||
return Err(ErrorKind::Nix(Error::from_errno(Errno::EINVAL)).into());
|
||||
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@@ -224,11 +223,11 @@ fn sysctl(oci: &Spec) -> Result<()> {
|
||||
fn rootless_euid_mapping(oci: &Spec) -> Result<()> {
|
||||
let linux = oci.linux.as_ref().unwrap();
|
||||
if !contain_namespace(&linux.namespaces, "user") {
|
||||
return Err(ErrorKind::Nix(Error::from_errno(Errno::EINVAL)).into());
|
||||
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
|
||||
}
|
||||
|
||||
if linux.gid_mappings.len() == 0 || linux.gid_mappings.len() == 0 {
|
||||
return Err(ErrorKind::Nix(Error::from_errno(Errno::EINVAL)).into());
|
||||
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -252,20 +251,20 @@ fn rootless_euid_mount(oci: &Spec) -> Result<()> {
|
||||
let fields: Vec<&str> = opt.split('=').collect();
|
||||
|
||||
if fields.len() != 2 {
|
||||
return Err(ErrorKind::Nix(Error::from_errno(Errno::EINVAL)).into());
|
||||
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
|
||||
}
|
||||
|
||||
let id = fields[1].trim().parse::<u32>()?;
|
||||
|
||||
if opt.starts_with("uid=") {
|
||||
if !has_idmapping(&linux.uid_mappings, id) {
|
||||
return Err(ErrorKind::Nix(Error::from_errno(Errno::EINVAL)).into());
|
||||
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
|
||||
}
|
||||
}
|
||||
|
||||
if opt.starts_with("gid=") {
|
||||
if !has_idmapping(&linux.gid_mappings, id) {
|
||||
return Err(ErrorKind::Nix(Error::from_errno(Errno::EINVAL)).into());
|
||||
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -285,11 +284,11 @@ pub fn validate(conf: &Config) -> Result<()> {
|
||||
let oci = conf.spec.as_ref().unwrap();
|
||||
|
||||
if oci.linux.is_none() {
|
||||
return Err(ErrorKind::Nix(Error::from_errno(Errno::EINVAL)).into());
|
||||
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
|
||||
}
|
||||
|
||||
if oci.root.is_none() {
|
||||
return Err(ErrorKind::Nix(Error::from_errno(Errno::EINVAL)).into());
|
||||
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
|
||||
}
|
||||
let root = oci.root.as_ref().unwrap().path.as_str();
|
||||
|
||||
|
||||
@@ -2,7 +2,8 @@
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
use rustjail::errors::*;
|
||||
use anyhow::{anyhow, Result};
|
||||
use std::env;
|
||||
use std::fs;
|
||||
use std::time;
|
||||
|
||||
@@ -13,10 +14,14 @@ const HOTPLUG_TIMOUT_OPTION: &str = "agent.hotplug_timeout";
|
||||
const DEBUG_CONSOLE_VPORT_OPTION: &str = "agent.debug_console_vport";
|
||||
const LOG_VPORT_OPTION: &str = "agent.log_vport";
|
||||
const CONTAINER_PIPE_SIZE_OPTION: &str = "agent.container_pipe_size";
|
||||
const UNIFIED_CGROUP_HIERARCHY_OPTION: &str = "agent.unified_cgroup_hierarchy";
|
||||
|
||||
const DEFAULT_LOG_LEVEL: slog::Level = slog::Level::Info;
|
||||
const DEFAULT_HOTPLUG_TIMEOUT: time::Duration = time::Duration::from_secs(3);
|
||||
const DEFAULT_CONTAINER_PIPE_SIZE: i32 = 0;
|
||||
const VSOCK_ADDR: &str = "vsock://-1";
|
||||
const VSOCK_PORT: u16 = 1024;
|
||||
const SERVER_ADDR_ENV_VAR: &str = "KATA_AGENT_SERVER_ADDR";
|
||||
|
||||
// FIXME: unused
|
||||
const TRACE_MODE_FLAG: &str = "agent.trace";
|
||||
@@ -31,6 +36,8 @@ pub struct agentConfig {
|
||||
pub debug_console_vport: i32,
|
||||
pub log_vport: i32,
|
||||
pub container_pipe_size: i32,
|
||||
pub server_addr: String,
|
||||
pub unified_cgroup_hierarchy: bool,
|
||||
}
|
||||
|
||||
impl agentConfig {
|
||||
@@ -43,6 +50,8 @@ impl agentConfig {
|
||||
debug_console_vport: 0,
|
||||
log_vport: 0,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
server_addr: format!("{}:{}", VSOCK_ADDR, VSOCK_PORT),
|
||||
unified_cgroup_hierarchy: false,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -50,6 +59,7 @@ impl agentConfig {
|
||||
let cmdline = fs::read_to_string(file)?;
|
||||
let params: Vec<&str> = cmdline.split_ascii_whitespace().collect();
|
||||
for param in params.iter() {
|
||||
// parse cmdline flags
|
||||
if param.eq(&DEBUG_CONSOLE_FLAG) {
|
||||
self.debug_console = true;
|
||||
}
|
||||
@@ -58,6 +68,7 @@ impl agentConfig {
|
||||
self.dev_mode = true;
|
||||
}
|
||||
|
||||
// parse cmdline options
|
||||
if param.starts_with(format!("{}=", LOG_LEVEL_OPTION).as_str()) {
|
||||
let level = get_log_level(param)?;
|
||||
self.log_level = level;
|
||||
@@ -89,6 +100,15 @@ impl agentConfig {
|
||||
let container_pipe_size = get_container_pipe_size(param)?;
|
||||
self.container_pipe_size = container_pipe_size
|
||||
}
|
||||
|
||||
if param.starts_with(format!("{}=", UNIFIED_CGROUP_HIERARCHY_OPTION).as_str()) {
|
||||
let b = get_bool_value(param, false);
|
||||
self.unified_cgroup_hierarchy = b;
|
||||
}
|
||||
}
|
||||
|
||||
if let Ok(addr) = env::var(SERVER_ADDR_ENV_VAR) {
|
||||
self.server_addr = addr;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -98,7 +118,7 @@ impl agentConfig {
|
||||
fn get_vsock_port(p: &str) -> Result<i32> {
|
||||
let fields: Vec<&str> = p.split("=").collect();
|
||||
if fields.len() != 2 {
|
||||
return Err(ErrorKind::ErrorCode("invalid port parameter".to_string()).into());
|
||||
return Err(anyhow!("invalid port parameter"));
|
||||
}
|
||||
|
||||
Ok(fields[1].parse::<i32>()?)
|
||||
@@ -124,7 +144,7 @@ fn logrus_to_slog_level(logrus_level: &str) -> Result<slog::Level> {
|
||||
"trace" => slog::Level::Trace,
|
||||
|
||||
_ => {
|
||||
return Err(ErrorKind::ErrorCode(String::from("invalid log level")).into());
|
||||
return Err(anyhow!("invalid log level"));
|
||||
}
|
||||
};
|
||||
|
||||
@@ -135,11 +155,11 @@ fn get_log_level(param: &str) -> Result<slog::Level> {
|
||||
let fields: Vec<&str> = param.split("=").collect();
|
||||
|
||||
if fields.len() != 2 {
|
||||
return Err(ErrorKind::ErrorCode(String::from("invalid log level parameter")).into());
|
||||
return Err(anyhow!("invalid log level parameter"));
|
||||
}
|
||||
|
||||
if fields[0] != LOG_LEVEL_OPTION {
|
||||
Err(ErrorKind::ErrorCode(String::from("invalid log level key name")).into())
|
||||
Err(anyhow!("invalid log level key name"))
|
||||
} else {
|
||||
Ok(logrus_to_slog_level(fields[1])?)
|
||||
}
|
||||
@@ -149,51 +169,70 @@ fn get_hotplug_timeout(param: &str) -> Result<time::Duration> {
|
||||
let fields: Vec<&str> = param.split("=").collect();
|
||||
|
||||
if fields.len() != 2 {
|
||||
return Err(ErrorKind::ErrorCode(String::from("invalid hotplug timeout parameter")).into());
|
||||
return Err(anyhow!("invalid hotplug timeout parameter"));
|
||||
}
|
||||
|
||||
let key = fields[0];
|
||||
if key != HOTPLUG_TIMOUT_OPTION {
|
||||
return Err(ErrorKind::ErrorCode(String::from("invalid hotplug timeout key name")).into());
|
||||
return Err(anyhow!("invalid hotplug timeout key name"));
|
||||
}
|
||||
|
||||
let value = fields[1].parse::<u64>();
|
||||
if value.is_err() {
|
||||
return Err(ErrorKind::ErrorCode(String::from("unable to parse hotplug timeout")).into());
|
||||
return Err(anyhow!("unable to parse hotplug timeout"));
|
||||
}
|
||||
|
||||
Ok(time::Duration::from_secs(value.unwrap()))
|
||||
}
|
||||
|
||||
fn get_bool_value(param: &str, default: bool) -> bool {
|
||||
let fields: Vec<&str> = param.split("=").collect();
|
||||
|
||||
if fields.len() != 2 {
|
||||
return default;
|
||||
}
|
||||
|
||||
let v = fields[1];
|
||||
|
||||
// bool
|
||||
let t: std::result::Result<bool, std::str::ParseBoolError> = v.parse();
|
||||
if t.is_ok() {
|
||||
return t.unwrap();
|
||||
}
|
||||
|
||||
// integer
|
||||
let i: std::result::Result<u64, std::num::ParseIntError> = v.parse();
|
||||
if i.is_err() {
|
||||
return default;
|
||||
}
|
||||
|
||||
// only `0` returns false, otherwise returns true
|
||||
match i.unwrap() {
|
||||
0 => false,
|
||||
_ => true,
|
||||
}
|
||||
}
|
||||
|
||||
fn get_container_pipe_size(param: &str) -> Result<i32> {
|
||||
let fields: Vec<&str> = param.split("=").collect();
|
||||
|
||||
if fields.len() != 2 {
|
||||
return Err(
|
||||
ErrorKind::ErrorCode(String::from("invalid container pipe size parameter")).into(),
|
||||
);
|
||||
return Err(anyhow!("invalid container pipe size parameter"));
|
||||
}
|
||||
|
||||
let key = fields[0];
|
||||
if key != CONTAINER_PIPE_SIZE_OPTION {
|
||||
return Err(
|
||||
ErrorKind::ErrorCode(String::from("invalid container pipe size key name")).into(),
|
||||
);
|
||||
return Err(anyhow!("invalid container pipe size key name"));
|
||||
}
|
||||
|
||||
let res = fields[1].parse::<i32>();
|
||||
if res.is_err() {
|
||||
return Err(
|
||||
ErrorKind::ErrorCode(String::from("unable to parse container pipe size")).into(),
|
||||
);
|
||||
return Err(anyhow!("unable to parse container pipe size"));
|
||||
}
|
||||
|
||||
let value = res.unwrap();
|
||||
if value < 0 {
|
||||
return Err(ErrorKind::ErrorCode(String::from(
|
||||
"container pipe size should not be negative",
|
||||
))
|
||||
.into());
|
||||
return Err(anyhow!("container pipe size should not be negative"));
|
||||
}
|
||||
|
||||
Ok(value)
|
||||
@@ -202,6 +241,7 @@ fn get_container_pipe_size(param: &str) -> Result<i32> {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use anyhow::Error;
|
||||
use std::fs::File;
|
||||
use std::io::Write;
|
||||
use std::time;
|
||||
@@ -222,7 +262,7 @@ mod tests {
|
||||
|
||||
// helper function to make errors less crazy-long
|
||||
fn make_err(desc: &str) -> Error {
|
||||
ErrorKind::ErrorCode(desc.to_string()).into()
|
||||
anyhow!(desc.to_string())
|
||||
}
|
||||
|
||||
// Parameters:
|
||||
@@ -267,6 +307,7 @@ mod tests {
|
||||
log_level: slog::Level,
|
||||
hotplug_timeout: time::Duration,
|
||||
container_pipe_size: i32,
|
||||
unified_cgroup_hierarchy: bool,
|
||||
}
|
||||
|
||||
let tests = &[
|
||||
@@ -277,6 +318,7 @@ mod tests {
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
unified_cgroup_hierarchy: false,
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.debug_console agent.devmodex",
|
||||
@@ -285,6 +327,7 @@ mod tests {
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
unified_cgroup_hierarchy: false,
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.logx=debug",
|
||||
@@ -293,6 +336,7 @@ mod tests {
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
unified_cgroup_hierarchy: false,
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.log=debug",
|
||||
@@ -301,6 +345,7 @@ mod tests {
|
||||
log_level: slog::Level::Debug,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
unified_cgroup_hierarchy: false,
|
||||
},
|
||||
TestData {
|
||||
contents: "",
|
||||
@@ -309,6 +354,7 @@ mod tests {
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
unified_cgroup_hierarchy: false,
|
||||
},
|
||||
TestData {
|
||||
contents: "foo",
|
||||
@@ -317,6 +363,7 @@ mod tests {
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
unified_cgroup_hierarchy: false,
|
||||
},
|
||||
TestData {
|
||||
contents: "foo bar",
|
||||
@@ -325,6 +372,7 @@ mod tests {
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
unified_cgroup_hierarchy: false,
|
||||
},
|
||||
TestData {
|
||||
contents: "foo bar",
|
||||
@@ -333,6 +381,7 @@ mod tests {
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
unified_cgroup_hierarchy: false,
|
||||
},
|
||||
TestData {
|
||||
contents: "foo agent bar",
|
||||
@@ -341,6 +390,7 @@ mod tests {
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
unified_cgroup_hierarchy: false,
|
||||
},
|
||||
TestData {
|
||||
contents: "foo debug_console agent bar devmode",
|
||||
@@ -349,6 +399,7 @@ mod tests {
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
unified_cgroup_hierarchy: false,
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.debug_console",
|
||||
@@ -357,6 +408,7 @@ mod tests {
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
unified_cgroup_hierarchy: false,
|
||||
},
|
||||
TestData {
|
||||
contents: " agent.debug_console ",
|
||||
@@ -365,6 +417,7 @@ mod tests {
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
unified_cgroup_hierarchy: false,
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.debug_console foo",
|
||||
@@ -373,6 +426,7 @@ mod tests {
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
unified_cgroup_hierarchy: false,
|
||||
},
|
||||
TestData {
|
||||
contents: " agent.debug_console foo",
|
||||
@@ -381,6 +435,7 @@ mod tests {
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
unified_cgroup_hierarchy: false,
|
||||
},
|
||||
TestData {
|
||||
contents: "foo agent.debug_console bar",
|
||||
@@ -389,6 +444,7 @@ mod tests {
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
unified_cgroup_hierarchy: false,
|
||||
},
|
||||
TestData {
|
||||
contents: "foo agent.debug_console",
|
||||
@@ -397,6 +453,7 @@ mod tests {
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
unified_cgroup_hierarchy: false,
|
||||
},
|
||||
TestData {
|
||||
contents: "foo agent.debug_console ",
|
||||
@@ -405,6 +462,7 @@ mod tests {
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
unified_cgroup_hierarchy: false,
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.devmode",
|
||||
@@ -413,6 +471,7 @@ mod tests {
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
unified_cgroup_hierarchy: false,
|
||||
},
|
||||
TestData {
|
||||
contents: " agent.devmode ",
|
||||
@@ -421,6 +480,7 @@ mod tests {
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
unified_cgroup_hierarchy: false,
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.devmode foo",
|
||||
@@ -429,6 +489,7 @@ mod tests {
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
unified_cgroup_hierarchy: false,
|
||||
},
|
||||
TestData {
|
||||
contents: " agent.devmode foo",
|
||||
@@ -437,6 +498,7 @@ mod tests {
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
unified_cgroup_hierarchy: false,
|
||||
},
|
||||
TestData {
|
||||
contents: "foo agent.devmode bar",
|
||||
@@ -445,6 +507,7 @@ mod tests {
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
unified_cgroup_hierarchy: false,
|
||||
},
|
||||
TestData {
|
||||
contents: "foo agent.devmode",
|
||||
@@ -453,6 +516,7 @@ mod tests {
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
unified_cgroup_hierarchy: false,
|
||||
},
|
||||
TestData {
|
||||
contents: "foo agent.devmode ",
|
||||
@@ -461,6 +525,7 @@ mod tests {
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
unified_cgroup_hierarchy: false,
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.devmode agent.debug_console",
|
||||
@@ -469,54 +534,61 @@ mod tests {
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
unified_cgroup_hierarchy: false,
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.devmode agent.debug_console agent.hotplug_timeout=100",
|
||||
contents: "agent.devmode agent.debug_console agent.hotplug_timeout=100 agent.unified_cgroup_hierarchy=a",
|
||||
debug_console: true,
|
||||
dev_mode: true,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: time::Duration::from_secs(100),
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
unified_cgroup_hierarchy: false,
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.devmode agent.debug_console agent.hotplug_timeout=0",
|
||||
contents: "agent.devmode agent.debug_console agent.hotplug_timeout=0 agent.unified_cgroup_hierarchy=11",
|
||||
debug_console: true,
|
||||
dev_mode: true,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
unified_cgroup_hierarchy: true,
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.devmode agent.debug_console agent.container_pipe_size=2097152",
|
||||
contents: "agent.devmode agent.debug_console agent.container_pipe_size=2097152 agent.unified_cgroup_hierarchy=false",
|
||||
debug_console: true,
|
||||
dev_mode: true,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: 2097152,
|
||||
unified_cgroup_hierarchy: false,
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.devmode agent.debug_console agent.container_pipe_size=100",
|
||||
contents: "agent.devmode agent.debug_console agent.container_pipe_size=100 agent.unified_cgroup_hierarchy=true",
|
||||
debug_console: true,
|
||||
dev_mode: true,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: 100,
|
||||
unified_cgroup_hierarchy: true,
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.devmode agent.debug_console agent.container_pipe_size=0",
|
||||
contents: "agent.devmode agent.debug_console agent.container_pipe_size=0 agent.unified_cgroup_hierarchy=0",
|
||||
debug_console: true,
|
||||
dev_mode: true,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
unified_cgroup_hierarchy: false,
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.devmode agent.debug_console agent.container_pip_siz=100",
|
||||
contents: "agent.devmode agent.debug_console agent.container_pip_siz=100 agent.unified_cgroup_hierarchy=1",
|
||||
debug_console: true,
|
||||
dev_mode: true,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
unified_cgroup_hierarchy: true,
|
||||
},
|
||||
];
|
||||
|
||||
@@ -548,6 +620,7 @@ mod tests {
|
||||
let mut config = agentConfig::new();
|
||||
assert_eq!(config.debug_console, false, "{}", msg);
|
||||
assert_eq!(config.dev_mode, false, "{}", msg);
|
||||
assert_eq!(config.unified_cgroup_hierarchy, false, "{}", msg);
|
||||
assert_eq!(
|
||||
config.hotplug_timeout,
|
||||
time::Duration::from_secs(3),
|
||||
@@ -561,6 +634,11 @@ mod tests {
|
||||
|
||||
assert_eq!(d.debug_console, config.debug_console, "{}", msg);
|
||||
assert_eq!(d.dev_mode, config.dev_mode, "{}", msg);
|
||||
assert_eq!(
|
||||
d.unified_cgroup_hierarchy, config.unified_cgroup_hierarchy,
|
||||
"{}",
|
||||
msg
|
||||
);
|
||||
assert_eq!(d.log_level, config.log_level, "{}", msg);
|
||||
assert_eq!(d.hotplug_timeout, config.hotplug_timeout, "{}", msg);
|
||||
assert_eq!(d.container_pipe_size, config.container_pipe_size, "{}", msg);
|
||||
|
||||
@@ -15,9 +15,9 @@ use crate::linux_abi::*;
|
||||
use crate::mount::{DRIVERBLKTYPE, DRIVERMMIOBLKTYPE, DRIVERNVDIMMTYPE, DRIVERSCSITYPE};
|
||||
use crate::sandbox::Sandbox;
|
||||
use crate::{AGENT_CONFIG, GLOBAL_DEVICE_WATCHER};
|
||||
use anyhow::{anyhow, Result};
|
||||
use oci::{LinuxDeviceCgroup, LinuxResources, Spec};
|
||||
use protocols::agent::Device;
|
||||
use rustjail::errors::*;
|
||||
|
||||
// Convenience macro to obtain the scope logger
|
||||
macro_rules! sl {
|
||||
@@ -61,11 +61,10 @@ fn get_pci_device_address(pci_id: &str) -> Result<String> {
|
||||
let tokens: Vec<&str> = pci_id.split("/").collect();
|
||||
|
||||
if tokens.len() != 2 {
|
||||
return Err(ErrorKind::ErrorCode(format!(
|
||||
return Err(anyhow!(
|
||||
"PCI Identifier for device should be of format [bridgeAddr/deviceAddr], got {}",
|
||||
pci_id
|
||||
))
|
||||
.into());
|
||||
));
|
||||
}
|
||||
|
||||
let bridge_id = tokens[0];
|
||||
@@ -85,11 +84,11 @@ fn get_pci_device_address(pci_id: &str) -> Result<String> {
|
||||
let bus_num = files_slice.len();
|
||||
|
||||
if bus_num != 1 {
|
||||
return Err(ErrorKind::ErrorCode(format!(
|
||||
return Err(anyhow!(
|
||||
"Expected an entry for bus in {}, got {} entries instead",
|
||||
bridge_bus_path, bus_num
|
||||
))
|
||||
.into());
|
||||
bridge_bus_path,
|
||||
bus_num
|
||||
));
|
||||
}
|
||||
|
||||
let bus = files_slice[0].file_name().unwrap().to_str().unwrap();
|
||||
@@ -135,11 +134,11 @@ fn get_device_name(sandbox: &Arc<Mutex<Sandbox>>, dev_addr: &str) -> Result<Stri
|
||||
Ok(name) => name,
|
||||
Err(_) => {
|
||||
GLOBAL_DEVICE_WATCHER.lock().unwrap().remove_entry(dev_addr);
|
||||
return Err(ErrorKind::ErrorCode(format!(
|
||||
return Err(anyhow!(
|
||||
"Timeout reached after {:?} waiting for device {}",
|
||||
hotplug_timeout, dev_addr
|
||||
))
|
||||
.into());
|
||||
hotplug_timeout,
|
||||
dev_addr
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
@@ -164,11 +163,10 @@ pub fn get_pci_device_name(sandbox: &Arc<Mutex<Sandbox>>, pci_id: &str) -> Resul
|
||||
fn scan_scsi_bus(scsi_addr: &str) -> Result<()> {
|
||||
let tokens: Vec<&str> = scsi_addr.split(":").collect();
|
||||
if tokens.len() != 2 {
|
||||
return Err(ErrorKind::Msg(format!(
|
||||
return Err(anyhow!(
|
||||
"Unexpected format for SCSI Address: {}, expect SCSIID:LUA",
|
||||
scsi_addr
|
||||
))
|
||||
.into());
|
||||
));
|
||||
}
|
||||
|
||||
// Scan scsi host passing in the channel, SCSI id and LUN.
|
||||
@@ -203,24 +201,19 @@ fn update_spec_device_list(device: &Device, spec: &mut Spec) -> Result<()> {
|
||||
// If no container_path is provided, we won't be able to match and
|
||||
// update the device in the OCI spec device list. This is an error.
|
||||
if device.container_path == "" {
|
||||
return Err(ErrorKind::Msg(format!(
|
||||
return Err(anyhow!(
|
||||
"container_path cannot empty for device {:?}",
|
||||
device
|
||||
))
|
||||
.into());
|
||||
));
|
||||
}
|
||||
|
||||
let linux = match spec.linux.as_mut() {
|
||||
None => {
|
||||
return Err(
|
||||
ErrorKind::ErrorCode("Spec didn't container linux field".to_string()).into(),
|
||||
)
|
||||
}
|
||||
None => return Err(anyhow!("Spec didn't container linux field")),
|
||||
Some(l) => l,
|
||||
};
|
||||
|
||||
if !Path::new(&device.vm_path).exists() {
|
||||
return Err(ErrorKind::Msg(format!("vm_path:{} doesn't exist", device.vm_path)).into());
|
||||
return Err(anyhow!("vm_path:{} doesn't exist", device.vm_path));
|
||||
}
|
||||
|
||||
let meta = fs::metadata(&device.vm_path)?;
|
||||
@@ -283,7 +276,7 @@ fn virtiommio_blk_device_handler(
|
||||
_sandbox: &Arc<Mutex<Sandbox>>,
|
||||
) -> Result<()> {
|
||||
if device.vm_path == "" {
|
||||
return Err(ErrorKind::Msg("Invalid path for virtio mmio blk device".to_string()).into());
|
||||
return Err(anyhow!("Invalid path for virtio mmio blk device"));
|
||||
}
|
||||
|
||||
update_spec_device_list(device, spec)
|
||||
@@ -325,7 +318,7 @@ fn virtio_nvdimm_device_handler(
|
||||
_sandbox: &Arc<Mutex<Sandbox>>,
|
||||
) -> Result<()> {
|
||||
if device.vm_path == "" {
|
||||
return Err(ErrorKind::Msg("Invalid path for nvdimm device".to_string()).into());
|
||||
return Err(anyhow!("Invalid path for nvdimm device"));
|
||||
}
|
||||
|
||||
update_spec_device_list(device, spec)
|
||||
@@ -349,23 +342,19 @@ fn add_device(device: &Device, spec: &mut Spec, sandbox: &Arc<Mutex<Sandbox>>) -
|
||||
device.id, device.field_type, device.vm_path, device.container_path, device.options);
|
||||
|
||||
if device.field_type == "" {
|
||||
return Err(ErrorKind::Msg(format!("invalid type for device {:?}", device)).into());
|
||||
return Err(anyhow!("invalid type for device {:?}", device));
|
||||
}
|
||||
|
||||
if device.id == "" && device.vm_path == "" {
|
||||
return Err(
|
||||
ErrorKind::Msg(format!("invalid ID and VM path for device {:?}", device)).into(),
|
||||
);
|
||||
return Err(anyhow!("invalid ID and VM path for device {:?}", device));
|
||||
}
|
||||
|
||||
if device.container_path == "" {
|
||||
return Err(
|
||||
ErrorKind::Msg(format!("invalid container path for device {:?}", device)).into(),
|
||||
);
|
||||
return Err(anyhow!("invalid container path for device {:?}", device));
|
||||
}
|
||||
|
||||
match DEVICEHANDLERLIST.get(device.field_type.as_str()) {
|
||||
None => Err(ErrorKind::Msg(format!("Unknown device type {}", device.field_type)).into()),
|
||||
None => Err(anyhow!("Unknown device type {}", device.field_type)),
|
||||
Some(dev_handler) => dev_handler(device, spec, sandbox),
|
||||
}
|
||||
}
|
||||
@@ -380,11 +369,7 @@ pub fn update_device_cgroup(spec: &mut Spec) -> Result<()> {
|
||||
let minor = stat::minor(rdev) as i64;
|
||||
|
||||
let linux = match spec.linux.as_mut() {
|
||||
None => {
|
||||
return Err(
|
||||
ErrorKind::ErrorCode("Spec didn't container linux field".to_string()).into(),
|
||||
)
|
||||
}
|
||||
None => return Err(anyhow!("Spec didn't container linux field")),
|
||||
Some(l) => l,
|
||||
};
|
||||
|
||||
|
||||
@@ -28,25 +28,30 @@ extern crate slog;
|
||||
#[macro_use]
|
||||
extern crate netlink;
|
||||
|
||||
use crate::netlink::{RtnlHandle, NETLINK_ROUTE};
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use nix::fcntl::{self, OFlag};
|
||||
use nix::fcntl::{FcntlArg, FdFlag};
|
||||
use nix::libc::{STDERR_FILENO, STDIN_FILENO, STDOUT_FILENO};
|
||||
use nix::pty;
|
||||
use nix::sys::select::{select, FdSet};
|
||||
use nix::sys::socket::{self, AddressFamily, SockAddr, SockFlag, SockType};
|
||||
use nix::sys::wait::{self, WaitStatus};
|
||||
use nix::unistd;
|
||||
use nix::unistd::dup;
|
||||
use nix::unistd::{self, close, dup, dup2, fork, setsid, ForkResult};
|
||||
use prctl::set_child_subreaper;
|
||||
use rustjail::errors::*;
|
||||
use signal_hook::{iterator::Signals, SIGCHLD};
|
||||
use std::collections::HashMap;
|
||||
use std::env;
|
||||
use std::ffi::OsStr;
|
||||
use std::ffi::{CStr, CString, OsStr};
|
||||
use std::fs::{self, File};
|
||||
use std::io::{Read, Write};
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
use std::os::unix::fs as unixfs;
|
||||
use std::os::unix::io::AsRawFd;
|
||||
use std::path::Path;
|
||||
use std::sync::mpsc::{self, Sender};
|
||||
use std::sync::{Arc, Mutex, RwLock};
|
||||
use std::{io, thread};
|
||||
use std::{io, thread, thread::JoinHandle};
|
||||
use unistd::Pid;
|
||||
|
||||
mod config;
|
||||
@@ -71,11 +76,11 @@ use uevent::watch_uevents;
|
||||
mod rpc;
|
||||
|
||||
const NAME: &str = "kata-agent";
|
||||
const VSOCK_ADDR: &str = "vsock://-1";
|
||||
const VSOCK_PORT: u16 = 1024;
|
||||
const KERNEL_CMDLINE_FILE: &str = "/proc/cmdline";
|
||||
const CONSOLE_PATH: &str = "/dev/console";
|
||||
|
||||
const DEFAULT_BUF_SIZE: usize = 8 * 1024;
|
||||
|
||||
lazy_static! {
|
||||
static ref GLOBAL_DEVICE_WATCHER: Arc<Mutex<HashMap<String, Sender<String>>>> =
|
||||
Arc::new(Mutex::new(HashMap::new()));
|
||||
@@ -83,27 +88,34 @@ lazy_static! {
|
||||
Arc::new(RwLock::new(config::agentConfig::new()));
|
||||
}
|
||||
|
||||
use std::mem::MaybeUninit;
|
||||
|
||||
fn announce(logger: &Logger) {
|
||||
let commit = match env::var("VERSION_COMMIT") {
|
||||
Ok(s) => s,
|
||||
Err(_) => String::from(""),
|
||||
};
|
||||
|
||||
fn announce(logger: &Logger, config: &agentConfig) {
|
||||
info!(logger, "announce";
|
||||
"agent-commit" => commit.as_str(),
|
||||
"agent-commit" => version::VERSION_COMMIT,
|
||||
|
||||
// Avoid any possibility of confusion with the old agent
|
||||
"agent-type" => "rust",
|
||||
|
||||
"agent-version" => version::AGENT_VERSION,
|
||||
"api-version" => version::API_VERSION,
|
||||
"config" => format!("{:?}", config),
|
||||
);
|
||||
}
|
||||
|
||||
fn main() -> Result<()> {
|
||||
let args: Vec<String> = env::args().collect();
|
||||
|
||||
if args.len() == 2 && args[1] == "--version" {
|
||||
println!(
|
||||
"{} version {} (api version: {}, commit version: {}, type: rust)",
|
||||
NAME,
|
||||
version::AGENT_VERSION,
|
||||
version::API_VERSION,
|
||||
version::VERSION_COMMIT,
|
||||
);
|
||||
|
||||
exit(0);
|
||||
}
|
||||
|
||||
if args.len() == 2 && args[1] == "init" {
|
||||
rustjail::container::init_child();
|
||||
exit(0);
|
||||
@@ -121,7 +133,8 @@ fn main() -> Result<()> {
|
||||
|
||||
let agentConfig = AGENT_CONFIG.clone();
|
||||
|
||||
if unistd::getpid() == Pid::from_raw(1) {
|
||||
let init_mode = unistd::getpid() == Pid::from_raw(1);
|
||||
if init_mode {
|
||||
// dup a new file descriptor for this temporary logger writer,
|
||||
// since this logger would be dropped and it's writer would
|
||||
// be closed out of this code block.
|
||||
@@ -132,18 +145,26 @@ fn main() -> Result<()> {
|
||||
// since before do the base mount, it wouldn't access "/proc/cmdline"
|
||||
// to get the customzied debug level.
|
||||
let logger = logging::create_logger(NAME, "agent", slog::Level::Debug, writer);
|
||||
init_agent_as_init(&logger)?;
|
||||
}
|
||||
|
||||
// once parsed cmdline and set the config, release the write lock
|
||||
// as soon as possible in case other thread would get read lock on
|
||||
// it.
|
||||
{
|
||||
// Must mount proc fs before parsing kernel command line
|
||||
general_mount(&logger).map_err(|e| {
|
||||
error!(logger, "fail general mount: {}", e);
|
||||
e
|
||||
})?;
|
||||
|
||||
let mut config = agentConfig.write().unwrap();
|
||||
config.parse_cmdline(KERNEL_CMDLINE_FILE)?;
|
||||
|
||||
init_agent_as_init(&logger, config.unified_cgroup_hierarchy)?;
|
||||
} else {
|
||||
// once parsed cmdline and set the config, release the write lock
|
||||
// as soon as possible in case other thread would get read lock on
|
||||
// it.
|
||||
let mut config = agentConfig.write().unwrap();
|
||||
config.parse_cmdline(KERNEL_CMDLINE_FILE)?;
|
||||
}
|
||||
|
||||
let config = agentConfig.read().unwrap();
|
||||
|
||||
let log_vport = config.log_vport as u32;
|
||||
let log_handle = thread::spawn(move || -> Result<()> {
|
||||
let mut reader = unsafe { File::from_raw_fd(rfd) };
|
||||
@@ -173,43 +194,51 @@ fn main() -> Result<()> {
|
||||
// Recreate a logger with the log level get from "/proc/cmdline".
|
||||
let logger = logging::create_logger(NAME, "agent", config.log_level, writer);
|
||||
|
||||
announce(&logger);
|
||||
|
||||
if args.len() == 2 && args[1] == "--version" {
|
||||
// force logger to flush
|
||||
drop(logger);
|
||||
|
||||
exit(0);
|
||||
}
|
||||
announce(&logger, &config);
|
||||
|
||||
// This "unused" variable is required as it enables the global (and crucially static) logger,
|
||||
// which is required to satisfy the the lifetime constraints of the auto-generated gRPC code.
|
||||
let _guard = slog_scope::set_global_logger(logger.new(o!("subsystem" => "rpc")));
|
||||
|
||||
start_sandbox(&logger, &config, init_mode)?;
|
||||
|
||||
let _ = log_handle.join();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn start_sandbox(logger: &Logger, config: &agentConfig, init_mode: bool) -> Result<()> {
|
||||
let shells = SHELLS.clone();
|
||||
let debug_console_vport = config.debug_console_vport as u32;
|
||||
|
||||
let shell_handle = if config.debug_console {
|
||||
let mut shell_handle: Option<JoinHandle<()>> = None;
|
||||
if config.debug_console {
|
||||
let thread_logger = logger.clone();
|
||||
|
||||
thread::spawn(move || {
|
||||
let builder = thread::Builder::new();
|
||||
|
||||
let handle = builder.spawn(move || {
|
||||
let shells = shells.lock().unwrap();
|
||||
let result = setup_debug_console(shells.to_vec(), debug_console_vport);
|
||||
let result = setup_debug_console(&thread_logger, shells.to_vec(), debug_console_vport);
|
||||
if result.is_err() {
|
||||
// Report error, but don't fail
|
||||
warn!(thread_logger, "failed to setup debug console";
|
||||
"error" => format!("{}", result.unwrap_err()));
|
||||
}
|
||||
})
|
||||
} else {
|
||||
unsafe { MaybeUninit::zeroed().assume_init() }
|
||||
};
|
||||
})?;
|
||||
|
||||
shell_handle = Some(handle);
|
||||
}
|
||||
|
||||
// Initialize unique sandbox structure.
|
||||
let s = Sandbox::new(&logger).map_err(|e| {
|
||||
error!(logger, "Failed to create sandbox with error: {:?}", e);
|
||||
e
|
||||
})?;
|
||||
let mut s = Sandbox::new(&logger).context("Failed to create sandbox")?;
|
||||
|
||||
if init_mode {
|
||||
let mut rtnl = RtnlHandle::new(NETLINK_ROUTE, 0).unwrap();
|
||||
rtnl.handle_localhost()?;
|
||||
|
||||
s.rtnl = Some(rtnl);
|
||||
}
|
||||
|
||||
let sandbox = Arc::new(Mutex::new(s));
|
||||
|
||||
@@ -220,40 +249,18 @@ fn main() -> Result<()> {
|
||||
sandbox.lock().unwrap().sender = Some(tx);
|
||||
|
||||
//vsock:///dev/vsock, port
|
||||
let mut server = rpc::start(sandbox.clone(), VSOCK_ADDR, VSOCK_PORT);
|
||||
|
||||
/*
|
||||
let _ = fs::remove_file("/tmp/testagent");
|
||||
let _ = fs::remove_dir_all("/run/agent");
|
||||
let mut server = grpc::start(sandbox.clone(), "unix:///tmp/testagent", 1);
|
||||
*/
|
||||
|
||||
let handle = thread::spawn(move || {
|
||||
// info!("Press ENTER to exit...");
|
||||
// let _ = io::stdin().read(&mut [0]).unwrap();
|
||||
// thread::sleep(Duration::from_secs(3000));
|
||||
|
||||
let _ = rx.recv().unwrap();
|
||||
});
|
||||
// receive something from destroy_sandbox here?
|
||||
// or in the thread above? It depneds whether grpc request
|
||||
// are run in another thread or in the main thead?
|
||||
// let _ = rx.wait();
|
||||
let mut server = rpc::start(sandbox.clone(), config.server_addr.as_str());
|
||||
|
||||
let _ = server.start().unwrap();
|
||||
|
||||
handle.join().unwrap();
|
||||
let _ = rx.recv()?;
|
||||
|
||||
server.shutdown();
|
||||
|
||||
let _ = log_handle.join();
|
||||
|
||||
if config.debug_console {
|
||||
shell_handle.join().unwrap();
|
||||
if let Some(handle) = shell_handle {
|
||||
handle.join().map_err(|e| anyhow!("{:?}", e))?;
|
||||
}
|
||||
|
||||
let _ = fs::remove_file("/tmp/testagent");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -262,12 +269,8 @@ use nix::sys::wait::WaitPidFlag;
|
||||
fn setup_signal_handler(logger: &Logger, sandbox: Arc<Mutex<Sandbox>>) -> Result<()> {
|
||||
let logger = logger.new(o!("subsystem" => "signals"));
|
||||
|
||||
set_child_subreaper(true).map_err(|err| {
|
||||
format!(
|
||||
"failed to setup agent as a child subreaper, failed with {}",
|
||||
err
|
||||
)
|
||||
})?;
|
||||
set_child_subreaper(true)
|
||||
.map_err(|err| anyhow!(err).context("failed to setup agent as a child subreaper"))?;
|
||||
|
||||
let signals = Signals::new(&[SIGCHLD])?;
|
||||
|
||||
@@ -345,9 +348,14 @@ fn setup_signal_handler(logger: &Logger, sandbox: Arc<Mutex<Sandbox>>) -> Result
|
||||
|
||||
// init_agent_as_init will do the initializations such as setting up the rootfs
|
||||
// when this agent has been run as the init process.
|
||||
fn init_agent_as_init(logger: &Logger) -> Result<()> {
|
||||
general_mount(logger)?;
|
||||
cgroups_mount(logger)?;
|
||||
fn init_agent_as_init(logger: &Logger, unified_cgroup_hierarchy: bool) -> Result<()> {
|
||||
cgroups_mount(logger, unified_cgroup_hierarchy).map_err(|e| {
|
||||
error!(
|
||||
logger,
|
||||
"fail cgroups mount, unified_cgroup_hierarchy {}: {}", unified_cgroup_hierarchy, e
|
||||
);
|
||||
e
|
||||
})?;
|
||||
|
||||
fs::remove_file(Path::new("/dev/ptmx"))?;
|
||||
unixfs::symlink(Path::new("/dev/pts/ptmx"), Path::new("/dev/ptmx"))?;
|
||||
@@ -378,7 +386,7 @@ fn sethostname(hostname: &OsStr) -> Result<()> {
|
||||
unsafe { libc::sethostname(hostname.as_bytes().as_ptr() as *const libc::c_char, size) };
|
||||
|
||||
if result != 0 {
|
||||
Err(ErrorKind::ErrorCode("failed to set hostname".to_string()).into())
|
||||
Err(anyhow!("failed to set hostname"))
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
@@ -404,9 +412,9 @@ use crate::config::agentConfig;
|
||||
use nix::sys::stat::Mode;
|
||||
use std::os::unix::io::{FromRawFd, RawFd};
|
||||
use std::path::PathBuf;
|
||||
use std::process::{exit, Command, Stdio};
|
||||
use std::process::exit;
|
||||
|
||||
fn setup_debug_console(shells: Vec<String>, port: u32) -> Result<()> {
|
||||
fn setup_debug_console(logger: &Logger, shells: Vec<String>, port: u32) -> Result<()> {
|
||||
let mut shell: &str = "";
|
||||
for sh in shells.iter() {
|
||||
let binary = PathBuf::from(sh);
|
||||
@@ -417,12 +425,10 @@ fn setup_debug_console(shells: Vec<String>, port: u32) -> Result<()> {
|
||||
}
|
||||
|
||||
if shell == "" {
|
||||
return Err(
|
||||
ErrorKind::ErrorCode("no shell found to launch debug console".to_string()).into(),
|
||||
);
|
||||
return Err(anyhow!("no shell found to launch debug console"));
|
||||
}
|
||||
|
||||
let f: RawFd = if port > 0 {
|
||||
if port > 0 {
|
||||
let listenfd = socket::socket(
|
||||
AddressFamily::Vsock,
|
||||
SockType::Stream,
|
||||
@@ -432,29 +438,201 @@ fn setup_debug_console(shells: Vec<String>, port: u32) -> Result<()> {
|
||||
let addr = SockAddr::new_vsock(libc::VMADDR_CID_ANY, port);
|
||||
socket::bind(listenfd, &addr)?;
|
||||
socket::listen(listenfd, 1)?;
|
||||
socket::accept4(listenfd, SockFlag::SOCK_CLOEXEC)?
|
||||
loop {
|
||||
let f: RawFd = socket::accept4(listenfd, SockFlag::SOCK_CLOEXEC)?;
|
||||
match run_debug_console_shell(logger, shell, f) {
|
||||
Ok(_) => {
|
||||
info!(logger, "run_debug_console_shell session finished");
|
||||
}
|
||||
Err(err) => {
|
||||
error!(logger, "run_debug_console_shell failed: {:?}", err);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
let mut flags = OFlag::empty();
|
||||
flags.insert(OFlag::O_RDWR);
|
||||
flags.insert(OFlag::O_CLOEXEC);
|
||||
fcntl::open(CONSOLE_PATH, flags, Mode::empty())?
|
||||
loop {
|
||||
let f: RawFd = fcntl::open(CONSOLE_PATH, flags, Mode::empty())?;
|
||||
match run_debug_console_shell(logger, shell, f) {
|
||||
Ok(_) => {
|
||||
info!(logger, "run_debug_console_shell session finished");
|
||||
}
|
||||
Err(err) => {
|
||||
error!(logger, "run_debug_console_shell failed: {:?}", err);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
fn io_copy<R: ?Sized, W: ?Sized>(reader: &mut R, writer: &mut W) -> io::Result<u64>
|
||||
where
|
||||
R: Read,
|
||||
W: Write,
|
||||
{
|
||||
let mut buf = [0; DEFAULT_BUF_SIZE];
|
||||
let buf_len;
|
||||
|
||||
match reader.read(&mut buf) {
|
||||
Ok(0) => return Ok(0),
|
||||
Ok(len) => buf_len = len,
|
||||
Err(err) => return Err(err),
|
||||
};
|
||||
|
||||
let cmd = Command::new(shell)
|
||||
.arg("-i")
|
||||
.stdin(unsafe { Stdio::from_raw_fd(f) })
|
||||
.stdout(unsafe { Stdio::from_raw_fd(f) })
|
||||
.stderr(unsafe { Stdio::from_raw_fd(f) })
|
||||
.spawn();
|
||||
// write and return
|
||||
match writer.write_all(&buf[..buf_len]) {
|
||||
Ok(_) => return Ok(buf_len as u64),
|
||||
Err(err) => return Err(err),
|
||||
}
|
||||
}
|
||||
|
||||
let mut cmd = match cmd {
|
||||
Ok(c) => c,
|
||||
Err(_) => return Err(ErrorKind::ErrorCode("failed to spawn shell".to_string()).into()),
|
||||
};
|
||||
fn run_debug_console_shell(logger: &Logger, shell: &str, socket_fd: RawFd) -> Result<()> {
|
||||
let pseduo = pty::openpty(None, None)?;
|
||||
let _ = fcntl::fcntl(pseduo.master, FcntlArg::F_SETFD(FdFlag::FD_CLOEXEC));
|
||||
let _ = fcntl::fcntl(pseduo.slave, FcntlArg::F_SETFD(FdFlag::FD_CLOEXEC));
|
||||
|
||||
cmd.wait()?;
|
||||
let slave_fd = pseduo.slave;
|
||||
|
||||
return Ok(());
|
||||
match fork() {
|
||||
Ok(ForkResult::Child) => {
|
||||
// create new session with child as session leader
|
||||
setsid()?;
|
||||
|
||||
// dup stdin, stdout, stderr to let child act as a terminal
|
||||
dup2(slave_fd, STDIN_FILENO)?;
|
||||
dup2(slave_fd, STDOUT_FILENO)?;
|
||||
dup2(slave_fd, STDERR_FILENO)?;
|
||||
|
||||
// set tty
|
||||
unsafe {
|
||||
libc::ioctl(0, libc::TIOCSCTTY);
|
||||
}
|
||||
|
||||
let cmd = CString::new(shell).unwrap();
|
||||
let args: Vec<&CStr> = vec![];
|
||||
|
||||
// run shell
|
||||
if let Err(e) = unistd::execvp(cmd.as_c_str(), args.as_slice()) {
|
||||
match e {
|
||||
nix::Error::Sys(errno) => {
|
||||
std::process::exit(errno as i32);
|
||||
}
|
||||
_ => std::process::exit(-2),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(ForkResult::Parent { child: child_pid }) => {
|
||||
info!(logger, "get debug shell pid {:?}", child_pid);
|
||||
|
||||
let (rfd, wfd) = unistd::pipe2(OFlag::O_CLOEXEC)?;
|
||||
let master_fd = pseduo.master;
|
||||
let debug_shell_logger = logger.clone();
|
||||
|
||||
// channel that used to sync between thread and main process
|
||||
let (tx, rx) = mpsc::channel::<i32>();
|
||||
|
||||
// start a thread to do IO copy between socket and pseduo.master
|
||||
thread::spawn(move || {
|
||||
let mut master_reader = unsafe { File::from_raw_fd(master_fd) };
|
||||
let mut master_writer = unsafe { File::from_raw_fd(master_fd) };
|
||||
let mut socket_reader = unsafe { File::from_raw_fd(socket_fd) };
|
||||
let mut socket_writer = unsafe { File::from_raw_fd(socket_fd) };
|
||||
|
||||
loop {
|
||||
let mut fd_set = FdSet::new();
|
||||
fd_set.insert(rfd);
|
||||
fd_set.insert(master_fd);
|
||||
fd_set.insert(socket_fd);
|
||||
|
||||
match select(
|
||||
Some(fd_set.highest().unwrap() + 1),
|
||||
&mut fd_set,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
) {
|
||||
Ok(_) => (),
|
||||
Err(e) => {
|
||||
if e == nix::Error::from(nix::errno::Errno::EINTR) {
|
||||
continue;
|
||||
} else {
|
||||
error!(debug_shell_logger, "select error {:?}", e);
|
||||
tx.send(1).unwrap();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if fd_set.contains(rfd) {
|
||||
info!(
|
||||
debug_shell_logger,
|
||||
"debug shell process {} exited", child_pid
|
||||
);
|
||||
tx.send(1).unwrap();
|
||||
break;
|
||||
}
|
||||
|
||||
if fd_set.contains(master_fd) {
|
||||
match io_copy(&mut master_reader, &mut socket_writer) {
|
||||
Ok(0) => {
|
||||
debug!(debug_shell_logger, "master fd closed");
|
||||
tx.send(1).unwrap();
|
||||
break;
|
||||
}
|
||||
Ok(_) => {}
|
||||
Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => continue,
|
||||
Err(e) => {
|
||||
error!(debug_shell_logger, "read master fd error {:?}", e);
|
||||
tx.send(1).unwrap();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if fd_set.contains(socket_fd) {
|
||||
match io_copy(&mut socket_reader, &mut master_writer) {
|
||||
Ok(0) => {
|
||||
debug!(debug_shell_logger, "socket fd closed");
|
||||
tx.send(1).unwrap();
|
||||
break;
|
||||
}
|
||||
Ok(_) => {}
|
||||
Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => continue,
|
||||
Err(e) => {
|
||||
error!(debug_shell_logger, "read socket fd error {:?}", e);
|
||||
tx.send(1).unwrap();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
let wait_status = wait::waitpid(child_pid, None);
|
||||
info!(logger, "debug console process exit code: {:?}", wait_status);
|
||||
|
||||
info!(logger, "notify debug monitor thread to exit");
|
||||
// close pipe to exit select loop
|
||||
let _ = close(wfd);
|
||||
|
||||
// wait for thread exit.
|
||||
let _ = rx.recv().unwrap();
|
||||
info!(logger, "debug monitor thread has exited");
|
||||
|
||||
// close files
|
||||
let _ = close(rfd);
|
||||
let _ = close(master_fd);
|
||||
let _ = close(slave_fd);
|
||||
}
|
||||
Err(err) => {
|
||||
return Err(anyhow!("fork error: {:?}", err));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -472,13 +650,14 @@ mod tests {
|
||||
let shells_ref = SHELLS.clone();
|
||||
let mut shells = shells_ref.lock().unwrap();
|
||||
shells.clear();
|
||||
let logger = slog_scope::logger();
|
||||
|
||||
let result = setup_debug_console(shells.to_vec(), 0);
|
||||
let result = setup_debug_console(&logger, shells.to_vec(), 0);
|
||||
|
||||
assert!(result.is_err());
|
||||
assert_eq!(
|
||||
result.unwrap_err().to_string(),
|
||||
"Error Code: 'no shell found to launch debug console'"
|
||||
"no shell found to launch debug console"
|
||||
);
|
||||
}
|
||||
|
||||
@@ -498,13 +677,14 @@ mod tests {
|
||||
.to_string();
|
||||
|
||||
shells.push(shell);
|
||||
let logger = slog_scope::logger();
|
||||
|
||||
let result = setup_debug_console(shells.to_vec(), 0);
|
||||
let result = setup_debug_console(&logger, shells.to_vec(), 0);
|
||||
|
||||
assert!(result.is_err());
|
||||
assert_eq!(
|
||||
result.unwrap_err().to_string(),
|
||||
"Error Code: 'no shell found to launch debug console'"
|
||||
"no shell found to launch debug console"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,8 +7,8 @@ extern crate procfs;
|
||||
|
||||
use prometheus::{Encoder, Gauge, GaugeVec, IntCounter, TextEncoder};
|
||||
|
||||
use anyhow::Result;
|
||||
use protocols;
|
||||
use rustjail::errors::*;
|
||||
|
||||
const NAMESPACE_KATA_AGENT: &str = "kata_agent";
|
||||
const NAMESPACE_KATA_GUEST: &str = "kata_guest";
|
||||
|
||||
@@ -3,7 +3,6 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use rustjail::errors::*;
|
||||
use std::collections::HashMap;
|
||||
use std::ffi::CString;
|
||||
use std::fs;
|
||||
@@ -26,6 +25,7 @@ use crate::device::{get_pci_device_name, get_scsi_device_name, online_device};
|
||||
use crate::linux_abi::*;
|
||||
use crate::protocols::agent::Storage;
|
||||
use crate::Sandbox;
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use slog::Logger;
|
||||
|
||||
pub const DRIVER9PTYPE: &str = "9p";
|
||||
@@ -191,11 +191,11 @@ impl<'a> BareMount<'a> {
|
||||
let cstr_fs_type: CString;
|
||||
|
||||
if self.source.len() == 0 {
|
||||
return Err(ErrorKind::ErrorCode("need mount source".to_string()).into());
|
||||
return Err(anyhow!("need mount source"));
|
||||
}
|
||||
|
||||
if self.destination.len() == 0 {
|
||||
return Err(ErrorKind::ErrorCode("need mount destination".to_string()).into());
|
||||
return Err(anyhow!("need mount destination"));
|
||||
}
|
||||
|
||||
cstr_source = CString::new(self.source)?;
|
||||
@@ -205,7 +205,7 @@ impl<'a> BareMount<'a> {
|
||||
dest = cstr_dest.as_ptr();
|
||||
|
||||
if self.fs_type.len() == 0 {
|
||||
return Err(ErrorKind::ErrorCode("need mount FS type".to_string()).into());
|
||||
return Err(anyhow!("need mount FS type"));
|
||||
}
|
||||
|
||||
cstr_fs_type = CString::new(self.fs_type)?;
|
||||
@@ -227,13 +227,12 @@ impl<'a> BareMount<'a> {
|
||||
let rc = unsafe { mount(source, dest, fs_type, self.flags.bits(), options) };
|
||||
|
||||
if rc < 0 {
|
||||
return Err(ErrorKind::ErrorCode(format!(
|
||||
return Err(anyhow!(
|
||||
"failed to mount {:?} to {:?}, with error: {}",
|
||||
self.source,
|
||||
self.destination,
|
||||
io::Error::last_os_error()
|
||||
))
|
||||
.into());
|
||||
));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@@ -256,7 +255,9 @@ fn ephemeral_storage_handler(
|
||||
return Err(err.into());
|
||||
}
|
||||
|
||||
common_storage_handler(logger, storage)
|
||||
common_storage_handler(logger, storage)?;
|
||||
|
||||
Ok("".to_string())
|
||||
}
|
||||
|
||||
fn local_storage_handler(
|
||||
@@ -272,7 +273,10 @@ fn local_storage_handler(
|
||||
return Ok("".to_string());
|
||||
}
|
||||
|
||||
fs::create_dir_all(&storage.mount_point)?;
|
||||
fs::create_dir_all(&storage.mount_point).context(format!(
|
||||
"failed to create dir all {:?}",
|
||||
&storage.mount_point
|
||||
))?;
|
||||
|
||||
let opts_vec: Vec<String> = storage.options.to_vec();
|
||||
|
||||
@@ -328,11 +332,12 @@ fn virtio_blk_storage_handler(
|
||||
// If hot-plugged, get the device node path based on the PCI address else
|
||||
// use the virt path provided in Storage Source
|
||||
if storage.source.starts_with("/dev") {
|
||||
let metadata = fs::metadata(&storage.source)?;
|
||||
let metadata = fs::metadata(&storage.source)
|
||||
.context(format!("get metadata on file {:?}", &storage.source))?;
|
||||
|
||||
let mode = metadata.permissions().mode();
|
||||
if mode & libc::S_IFBLK == 0 {
|
||||
return Err(ErrorKind::ErrorCode(format!("Invalid device {}", &storage.source)).into());
|
||||
return Err(anyhow!("Invalid device {}", &storage.source));
|
||||
}
|
||||
} else {
|
||||
let dev_path = get_pci_device_name(&sandbox, &storage.source)?;
|
||||
@@ -372,7 +377,7 @@ fn mount_storage(logger: &Logger, storage: &Storage) -> Result<()> {
|
||||
DRIVER9PTYPE | DRIVERVIRTIOFSTYPE => {
|
||||
let dest_path = Path::new(storage.mount_point.as_str());
|
||||
if !dest_path.exists() {
|
||||
fs::create_dir_all(dest_path).chain_err(|| "Create mount destination failed")?;
|
||||
fs::create_dir_all(dest_path).context("Create mount destination failed")?;
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
@@ -446,11 +451,10 @@ pub fn add_storages(
|
||||
|
||||
let handler = match STORAGEHANDLERLIST.get(&handler_name.as_str()) {
|
||||
None => {
|
||||
return Err(ErrorKind::ErrorCode(format!(
|
||||
return Err(anyhow!(
|
||||
"Failed to find the storage handler {}",
|
||||
storage.driver.to_owned()
|
||||
))
|
||||
.into());
|
||||
));
|
||||
}
|
||||
Some(f) => f,
|
||||
};
|
||||
@@ -476,7 +480,7 @@ fn mount_to_rootfs(logger: &Logger, m: &INIT_MOUNT) -> Result<()> {
|
||||
|
||||
let bare_mount = BareMount::new(m.src, m.dest, m.fstype, flags, options.as_str(), logger);
|
||||
|
||||
fs::create_dir_all(Path::new(m.dest)).chain_err(|| "could not create directory")?;
|
||||
fs::create_dir_all(Path::new(m.dest)).context("could not create directory")?;
|
||||
|
||||
if let Err(err) = bare_mount.mount() {
|
||||
if m.src != "dev" {
|
||||
@@ -510,7 +514,7 @@ pub fn get_mount_fs_type(mount_point: &str) -> Result<String> {
|
||||
// any error ecountered.
|
||||
pub fn get_mount_fs_type_from_file(mount_file: &str, mount_point: &str) -> Result<String> {
|
||||
if mount_point == "" {
|
||||
return Err(ErrorKind::ErrorCode(format!("Invalid mount point {}", mount_point)).into());
|
||||
return Err(anyhow!("Invalid mount point {}", mount_point));
|
||||
}
|
||||
|
||||
let file = File::open(mount_file)?;
|
||||
@@ -532,14 +536,28 @@ pub fn get_mount_fs_type_from_file(mount_file: &str, mount_point: &str) -> Resul
|
||||
}
|
||||
}
|
||||
|
||||
Err(ErrorKind::ErrorCode(format!(
|
||||
Err(anyhow!(
|
||||
"failed to find FS type for mount point {}",
|
||||
mount_point
|
||||
))
|
||||
.into())
|
||||
}
|
||||
|
||||
pub fn get_cgroup_mounts(logger: &Logger, cg_path: &str) -> Result<Vec<INIT_MOUNT>> {
|
||||
pub fn get_cgroup_mounts(
|
||||
logger: &Logger,
|
||||
cg_path: &str,
|
||||
unified_cgroup_hierarchy: bool,
|
||||
) -> Result<Vec<INIT_MOUNT>> {
|
||||
// cgroup v2
|
||||
// https://github.com/kata-containers/agent/blob/8c9bbadcd448c9a67690fbe11a860aaacc69813c/agent.go#L1249
|
||||
if unified_cgroup_hierarchy {
|
||||
return Ok(vec![INIT_MOUNT {
|
||||
fstype: "cgroup2",
|
||||
src: "cgroup2",
|
||||
dest: "/sys/fs/cgroup",
|
||||
options: vec!["nosuid", "nodev", "noexec", "relatime", "nsdelegate"],
|
||||
}]);
|
||||
}
|
||||
|
||||
let file = File::open(&cg_path)?;
|
||||
let reader = BufReader::new(file);
|
||||
|
||||
@@ -614,10 +632,10 @@ pub fn get_cgroup_mounts(logger: &Logger, cg_path: &str) -> Result<Vec<INIT_MOUN
|
||||
Ok(cg_mounts)
|
||||
}
|
||||
|
||||
pub fn cgroups_mount(logger: &Logger) -> Result<()> {
|
||||
pub fn cgroups_mount(logger: &Logger, unified_cgroup_hierarchy: bool) -> Result<()> {
|
||||
let logger = logger.new(o!("subsystem" => "mount"));
|
||||
|
||||
let cgroups = get_cgroup_mounts(&logger, PROC_CGROUPS)?;
|
||||
let cgroups = get_cgroup_mounts(&logger, PROC_CGROUPS, unified_cgroup_hierarchy)?;
|
||||
|
||||
for cg in cgroups.iter() {
|
||||
mount_to_rootfs(&logger, cg)?;
|
||||
@@ -631,7 +649,7 @@ pub fn cgroups_mount(logger: &Logger) -> Result<()> {
|
||||
|
||||
pub fn remove_mounts(mounts: &Vec<String>) -> Result<()> {
|
||||
for m in mounts.iter() {
|
||||
mount::umount(m.as_str())?;
|
||||
mount::umount(m.as_str()).context(format!("failed to umount {:?}", m))?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@@ -643,21 +661,15 @@ fn ensure_destination_exists(destination: &str, fs_type: &str) -> Result<()> {
|
||||
if !d.exists() {
|
||||
let dir = match d.parent() {
|
||||
Some(d) => d,
|
||||
None => {
|
||||
return Err(ErrorKind::ErrorCode(format!(
|
||||
"mount destination {} doesn't exist",
|
||||
destination
|
||||
))
|
||||
.into())
|
||||
}
|
||||
None => return Err(anyhow!("mount destination {} doesn't exist", destination)),
|
||||
};
|
||||
if !dir.exists() {
|
||||
fs::create_dir_all(dir)?;
|
||||
fs::create_dir_all(dir).context(format!("create dir all failed on {:?}", dir))?;
|
||||
}
|
||||
}
|
||||
|
||||
if fs_type != "bind" || d.is_dir() {
|
||||
fs::create_dir_all(d)?;
|
||||
fs::create_dir_all(d).context(format!("create dir all failed on {:?}", d))?;
|
||||
} else {
|
||||
fs::OpenOptions::new().create(true).open(d)?;
|
||||
}
|
||||
@@ -958,7 +970,7 @@ mod tests {
|
||||
continue;
|
||||
}
|
||||
|
||||
let error_msg = format!("{}", result.unwrap_err());
|
||||
let error_msg = format!("{:#}", result.unwrap_err());
|
||||
|
||||
assert!(error_msg.contains(d.error_contains), msg);
|
||||
}
|
||||
@@ -1074,6 +1086,20 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_cgroup_v2_mounts() {
|
||||
let dir = tempdir().expect("failed to create tmpdir");
|
||||
let drain = slog::Discard;
|
||||
let logger = slog::Logger::root(drain, o!());
|
||||
let result = get_cgroup_mounts(&logger, "", true);
|
||||
|
||||
assert_eq!(true, result.is_ok());
|
||||
let result = result.unwrap();
|
||||
assert_eq!(1, result.len());
|
||||
assert_eq!(result[0].fstype, "cgroup2");
|
||||
assert_eq!(result[0].src, "cgroup2");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_cgroup_mounts() {
|
||||
#[derive(Debug)]
|
||||
@@ -1178,7 +1204,7 @@ mod tests {
|
||||
];
|
||||
|
||||
// First, test a missing file
|
||||
let result = get_cgroup_mounts(&logger, enoent_filename);
|
||||
let result = get_cgroup_mounts(&logger, enoent_filename, false);
|
||||
|
||||
assert!(result.is_err());
|
||||
let error_msg = format!("{}", result.unwrap_err());
|
||||
@@ -1201,7 +1227,7 @@ mod tests {
|
||||
file.write_all(d.contents.as_bytes())
|
||||
.expect(&format!("{}: failed to write file contents", msg));
|
||||
|
||||
let result = get_cgroup_mounts(&logger, filename);
|
||||
let result = get_cgroup_mounts(&logger, filename, false);
|
||||
let msg = format!("{}: result: {:?}", msg, result);
|
||||
|
||||
if d.error_contains != "" {
|
||||
|
||||
@@ -3,8 +3,17 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use nix::mount::{self, MntFlags, MsFlags};
|
||||
use protocols::types::{Interface, Route};
|
||||
use slog::Logger;
|
||||
use std::collections::HashMap;
|
||||
use std::fs;
|
||||
|
||||
use crate::Sandbox;
|
||||
|
||||
const KATA_GUEST_SANDBOX_DNS_FILE: &str = "/run/kata-containers/sandbox/resolv.conf";
|
||||
const GUEST_DNS_FILE: &str = "/etc/resolv.conf";
|
||||
|
||||
// Network fully describes a sandbox network with its interfaces, routes and dns
|
||||
// related information.
|
||||
@@ -28,3 +37,116 @@ impl Network {
|
||||
self.dns.push(dns);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn setup_guest_dns(logger: Logger, dns_list: Vec<String>) -> Result<()> {
|
||||
do_setup_guest_dns(
|
||||
logger,
|
||||
dns_list,
|
||||
KATA_GUEST_SANDBOX_DNS_FILE,
|
||||
GUEST_DNS_FILE,
|
||||
)
|
||||
}
|
||||
|
||||
fn do_setup_guest_dns(logger: Logger, dns_list: Vec<String>, src: &str, dst: &str) -> Result<()> {
|
||||
let logger = logger.new(o!( "subsystem" => "network"));
|
||||
|
||||
if dns_list.len() == 0 {
|
||||
info!(
|
||||
logger,
|
||||
"Did not set sandbox DNS as DNS not received as part of request."
|
||||
);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let attr = fs::metadata(dst);
|
||||
if attr.is_err() {
|
||||
// not exists or other errors that we could not use it anymore.
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if attr.unwrap().is_dir() {
|
||||
return Err(anyhow!("{} is a directory", GUEST_DNS_FILE));
|
||||
}
|
||||
|
||||
// write DNS to file
|
||||
let content = dns_list
|
||||
.iter()
|
||||
.map(|x| x.trim())
|
||||
.collect::<Vec<&str>>()
|
||||
.join("\n");
|
||||
fs::write(src, &content)?;
|
||||
|
||||
// bind mount to /etc/resolv.conf
|
||||
mount::mount(Some(src), dst, Some("bind"), MsFlags::MS_BIND, None::<&str>)
|
||||
.map_err(|err| anyhow!(err).context("failed to setup guest DNS"))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::skip_if_not_root;
|
||||
use nix::mount;
|
||||
use std::fs::File;
|
||||
use std::io::Write;
|
||||
use tempfile::tempdir;
|
||||
|
||||
#[test]
|
||||
fn test_setup_guest_dns() {
|
||||
skip_if_not_root!();
|
||||
|
||||
let drain = slog::Discard;
|
||||
let logger = slog::Logger::root(drain, o!());
|
||||
|
||||
// create temp for /run/kata-containers/sandbox/resolv.conf
|
||||
let src_dir = tempdir().expect("failed to create tmpdir");
|
||||
let tmp = src_dir.path().join("resolv.conf");
|
||||
let src_filename = tmp.to_str().expect("failed to get resolv file filename");
|
||||
|
||||
// create temp for /etc/resolv.conf
|
||||
let dst_dir = tempdir().expect("failed to create tmpdir");
|
||||
let tmp = dst_dir.path().join("resolv.conf");
|
||||
let dst_filename = tmp.to_str().expect("failed to get resolv file filename");
|
||||
{
|
||||
let _file = File::create(dst_filename).unwrap();
|
||||
}
|
||||
|
||||
// test DNS
|
||||
let dns = vec![
|
||||
"nameserver 1.2.3.4".to_string(),
|
||||
"nameserver 5.6.7.8".to_string(),
|
||||
];
|
||||
|
||||
// write to /run/kata-containers/sandbox/resolv.conf
|
||||
let mut src_file =
|
||||
File::create(src_filename).expect(&format!("failed to create file {:?}", src_filename));
|
||||
let content = dns.join("\n");
|
||||
src_file
|
||||
.write_all(content.as_bytes())
|
||||
.expect(&format!("failed to write file contents"));
|
||||
|
||||
// call do_setup_guest_dns
|
||||
let result = do_setup_guest_dns(logger, dns.clone(), src_filename, dst_filename);
|
||||
|
||||
assert_eq!(
|
||||
true,
|
||||
result.is_ok(),
|
||||
"result should be ok, but {:?}",
|
||||
result
|
||||
);
|
||||
|
||||
// get content of /etc/resolv.conf
|
||||
let content = fs::read_to_string(dst_filename);
|
||||
assert_eq!(true, content.is_ok());
|
||||
let content = content.unwrap();
|
||||
|
||||
let expected_DNS: Vec<&str> = content.split('\n').collect();
|
||||
|
||||
// assert the data are the same as /run/kata-containers/sandbox/resolv.conf
|
||||
assert_eq!(dns, expected_DNS);
|
||||
|
||||
// umount /etc/resolv.conf
|
||||
let _ = mount::umount(dst_filename);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,11 +3,11 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use anyhow::Result;
|
||||
use libc;
|
||||
use nix::errno::Errno;
|
||||
use nix::fcntl::{self, OFlag};
|
||||
use nix::sys::stat::Mode;
|
||||
use rustjail::errors::*;
|
||||
use std::fs;
|
||||
|
||||
pub const RNGDEV: &str = "/dev/random";
|
||||
|
||||
@@ -3,14 +3,17 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use std::path::Path;
|
||||
use std::sync::mpsc::{channel, Sender};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use ttrpc;
|
||||
|
||||
use oci::{LinuxNamespace, Spec};
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use oci::{LinuxNamespace, Root, Spec};
|
||||
use protobuf::{RepeatedField, SingularPtrField};
|
||||
use protocols::agent::{
|
||||
AgentDetails, CopyFileRequest, GuestDetailsResponse, Interfaces, ListProcessesResponse,
|
||||
Metrics, ReadStreamResponse, Routes, StatsContainerResponse, WaitProcessResponse,
|
||||
Metrics, OOMEvent, ReadStreamResponse, Routes, StatsContainerResponse, WaitProcessResponse,
|
||||
WriteStreamResponse,
|
||||
};
|
||||
use protocols::empty::Empty;
|
||||
@@ -19,12 +22,13 @@ use protocols::health::{
|
||||
};
|
||||
use protocols::types::Interface;
|
||||
use rustjail;
|
||||
use rustjail::cgroups::notifier;
|
||||
use rustjail::container::{BaseContainer, Container, LinuxContainer};
|
||||
use rustjail::errors::*;
|
||||
use rustjail::process::Process;
|
||||
use rustjail::specconv::CreateOpts;
|
||||
|
||||
use nix::errno::Errno;
|
||||
use nix::mount::MsFlags;
|
||||
use nix::sys::signal::Signal;
|
||||
use nix::sys::stat;
|
||||
use nix::unistd::{self, Pid};
|
||||
@@ -33,8 +37,10 @@ use rustjail::process::ProcessOperations;
|
||||
use crate::device::{add_devices, rescan_pci_bus, update_device_cgroup};
|
||||
use crate::linux_abi::*;
|
||||
use crate::metrics::get_metrics;
|
||||
use crate::mount::{add_storages, remove_mounts, STORAGEHANDLERLIST};
|
||||
use crate::mount::{add_storages, remove_mounts, BareMount, STORAGEHANDLERLIST};
|
||||
use crate::namespace::{NSTYPEIPC, NSTYPEPID, NSTYPEUTS};
|
||||
use crate::network::setup_guest_dns;
|
||||
use crate::network::Network;
|
||||
use crate::random;
|
||||
use crate::sandbox::Sandbox;
|
||||
use crate::version::{AGENT_VERSION, API_VERSION};
|
||||
@@ -88,18 +94,16 @@ impl agentService {
|
||||
Some(spec) => rustjail::grpc_to_oci(spec),
|
||||
None => {
|
||||
error!(sl!(), "no oci spec in the create container request!");
|
||||
return Err(
|
||||
ErrorKind::Nix(nix::Error::from_errno(nix::errno::Errno::EINVAL)).into(),
|
||||
);
|
||||
return Err(anyhow!(nix::Error::from_errno(nix::errno::Errno::EINVAL)));
|
||||
}
|
||||
};
|
||||
|
||||
info!(sl!(), "receive createcontainer {}", &cid);
|
||||
info!(sl!(), "receive createcontainer, spec: {:?}", &oci);
|
||||
|
||||
// re-scan PCI bus
|
||||
// looking for hidden devices
|
||||
|
||||
rescan_pci_bus().chain_err(|| "Could not rescan PCI bus")?;
|
||||
rescan_pci_bus().context("Could not rescan PCI bus")?;
|
||||
|
||||
// Some devices need some extra processing (the ones invoked with
|
||||
// --device for instance), and that's what this call is doing. It
|
||||
@@ -127,9 +131,12 @@ impl agentService {
|
||||
// Add the root partition to the device cgroup to prevent access
|
||||
update_device_cgroup(&mut oci)?;
|
||||
|
||||
// Append guest hooks
|
||||
append_guest_hooks(&s, &mut oci);
|
||||
|
||||
// write spec to bundle path, hooks might
|
||||
// read ocispec
|
||||
let olddir = setup_bundle(&oci)?;
|
||||
let olddir = setup_bundle(&cid, &mut oci)?;
|
||||
// restore the cwd for kata-agent process.
|
||||
defer!(unistd::chdir(&olddir).unwrap());
|
||||
|
||||
@@ -158,7 +165,7 @@ impl agentService {
|
||||
tp
|
||||
} else {
|
||||
info!(sl!(), "no process configurations!");
|
||||
return Err(ErrorKind::Nix(nix::Error::from_errno(nix::errno::Errno::EINVAL)).into());
|
||||
return Err(anyhow!(nix::Error::from_errno(nix::errno::Errno::EINVAL)));
|
||||
};
|
||||
|
||||
ctr.start(p)?;
|
||||
@@ -175,16 +182,26 @@ impl agentService {
|
||||
|
||||
let sandbox = self.sandbox.clone();
|
||||
let mut s = sandbox.lock().unwrap();
|
||||
let sid = s.id.clone();
|
||||
|
||||
let ctr: &mut LinuxContainer = match s.get_container(cid.as_str()) {
|
||||
Some(cr) => cr,
|
||||
None => {
|
||||
return Err(ErrorKind::Nix(nix::Error::from_errno(Errno::EINVAL)).into());
|
||||
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
|
||||
}
|
||||
};
|
||||
|
||||
ctr.exec()?;
|
||||
|
||||
// start oom event loop
|
||||
if sid != cid && ctr.cgroup_manager.is_some() {
|
||||
let cg_path = ctr.cgroup_manager.as_ref().unwrap().get_cg_path("memory");
|
||||
if cg_path.is_some() {
|
||||
let rx = notifier::notify_oom(cid.as_str(), cg_path.unwrap())?;
|
||||
s.run_oom_event_monitor(rx, cid);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -198,7 +215,7 @@ impl agentService {
|
||||
let ctr: &mut LinuxContainer = match sandbox.get_container(cid.as_str()) {
|
||||
Some(cr) => cr,
|
||||
None => {
|
||||
return Err(ErrorKind::Nix(nix::Error::from_errno(Errno::EINVAL)).into());
|
||||
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
|
||||
}
|
||||
};
|
||||
|
||||
@@ -247,13 +264,13 @@ impl agentService {
|
||||
});
|
||||
|
||||
if let Err(_) = rx.recv_timeout(Duration::from_secs(req.timeout as u64)) {
|
||||
return Err(ErrorKind::Nix(nix::Error::from_errno(nix::errno::Errno::ETIME)).into());
|
||||
return Err(anyhow!(nix::Error::from_errno(nix::errno::Errno::ETIME)));
|
||||
}
|
||||
|
||||
if let Err(_) = handle.join() {
|
||||
return Err(
|
||||
ErrorKind::Nix(nix::Error::from_errno(nix::errno::Errno::UnknownErrno)).into(),
|
||||
);
|
||||
return Err(anyhow!(nix::Error::from_errno(
|
||||
nix::errno::Errno::UnknownErrno
|
||||
)));
|
||||
}
|
||||
|
||||
let s = self.sandbox.clone();
|
||||
@@ -296,7 +313,7 @@ impl agentService {
|
||||
let process = if req.process.is_some() {
|
||||
req.process.as_ref().unwrap()
|
||||
} else {
|
||||
return Err(ErrorKind::Nix(nix::Error::from_errno(nix::errno::Errno::EINVAL)).into());
|
||||
return Err(anyhow!(nix::Error::from_errno(nix::errno::Errno::EINVAL)));
|
||||
};
|
||||
|
||||
let pipe_size = AGENT_CONFIG.read().unwrap().container_pipe_size;
|
||||
@@ -306,9 +323,7 @@ impl agentService {
|
||||
let ctr = match sandbox.get_container(cid.as_str()) {
|
||||
Some(v) => v,
|
||||
None => {
|
||||
return Err(
|
||||
ErrorKind::Nix(nix::Error::from_errno(nix::errno::Errno::EINVAL)).into(),
|
||||
);
|
||||
return Err(anyhow!(nix::Error::from_errno(nix::errno::Errno::EINVAL)));
|
||||
}
|
||||
};
|
||||
|
||||
@@ -328,7 +343,7 @@ impl agentService {
|
||||
sl!(),
|
||||
"signal process";
|
||||
"container-id" => cid.clone(),
|
||||
"exec-id" => eid.clone()
|
||||
"exec-id" => eid.clone(),
|
||||
);
|
||||
|
||||
if eid == "" {
|
||||
@@ -362,6 +377,7 @@ impl agentService {
|
||||
let pid: pid_t;
|
||||
let mut exit_pipe_r: RawFd = -1;
|
||||
let mut buf: Vec<u8> = vec![0, 1];
|
||||
let (exit_send, exit_recv) = channel();
|
||||
|
||||
info!(
|
||||
sl!(),
|
||||
@@ -379,6 +395,7 @@ impl agentService {
|
||||
exit_pipe_r = p.exit_pipe_r.unwrap();
|
||||
}
|
||||
|
||||
p.exit_watchers.push(exit_send);
|
||||
pid = p.pid;
|
||||
}
|
||||
|
||||
@@ -391,13 +408,20 @@ impl agentService {
|
||||
let ctr: &mut LinuxContainer = match sandbox.get_container(cid.as_str()) {
|
||||
Some(cr) => cr,
|
||||
None => {
|
||||
return Err(ErrorKind::Nix(nix::Error::from_errno(Errno::EINVAL)).into());
|
||||
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
|
||||
}
|
||||
};
|
||||
|
||||
let mut p = match ctr.processes.get_mut(&pid) {
|
||||
Some(p) => p,
|
||||
None => {
|
||||
// Lost race, pick up exit code from channel
|
||||
resp.status = exit_recv.recv().unwrap();
|
||||
return Ok(resp);
|
||||
}
|
||||
};
|
||||
|
||||
// need to close all fds
|
||||
let mut p = ctr.processes.get_mut(&pid).unwrap();
|
||||
|
||||
if p.parent_stdin.is_some() {
|
||||
let _ = unistd::close(p.parent_stdin.unwrap());
|
||||
}
|
||||
@@ -424,6 +448,11 @@ impl agentService {
|
||||
p.term_master = None;
|
||||
|
||||
resp.status = p.exit_code;
|
||||
// broadcast exit code to all parallel watchers
|
||||
for s in p.exit_watchers.iter() {
|
||||
// Just ignore errors in case any watcher quits unexpectedly
|
||||
let _ = s.send(p.exit_code);
|
||||
}
|
||||
|
||||
ctr.processes.remove(&pid);
|
||||
|
||||
@@ -467,9 +496,7 @@ impl agentService {
|
||||
Err(e) => match e {
|
||||
nix::Error::Sys(nix::errno::Errno::EAGAIN) => l = 0,
|
||||
_ => {
|
||||
return Err(
|
||||
ErrorKind::Nix(nix::Error::from_errno(nix::errno::Errno::EIO)).into(),
|
||||
);
|
||||
return Err(anyhow!(nix::Error::from_errno(nix::errno::Errno::EIO)));
|
||||
}
|
||||
},
|
||||
}
|
||||
@@ -489,7 +516,6 @@ impl agentService {
|
||||
let eid = req.exec_id;
|
||||
|
||||
let mut fd: RawFd = -1;
|
||||
info!(sl!(), "read stdout for {}/{}", cid.clone(), eid.clone());
|
||||
{
|
||||
let s = self.sandbox.clone();
|
||||
let mut sandbox = s.lock().unwrap();
|
||||
@@ -508,7 +534,7 @@ impl agentService {
|
||||
}
|
||||
|
||||
if fd == -1 {
|
||||
return Err(ErrorKind::Nix(nix::Error::from_errno(nix::errno::Errno::EINVAL)).into());
|
||||
return Err(anyhow!(nix::Error::from_errno(nix::errno::Errno::EINVAL)));
|
||||
}
|
||||
|
||||
let vector = read_stream(fd, req.len as usize)?;
|
||||
@@ -865,10 +891,10 @@ impl protocols::agent_ttrpc::AgentService for agentService {
|
||||
|
||||
let p = match find_process(&mut sandbox, cid.as_str(), eid.as_str(), false) {
|
||||
Ok(v) => v,
|
||||
Err(_) => {
|
||||
Err(e) => {
|
||||
return Err(ttrpc::Error::RpcStatus(ttrpc::get_status(
|
||||
ttrpc::Code::INVALID_ARGUMENT,
|
||||
"invalid argument".to_string(),
|
||||
format!("invalid argument: {:?}", e),
|
||||
)));
|
||||
}
|
||||
};
|
||||
@@ -897,10 +923,10 @@ impl protocols::agent_ttrpc::AgentService for agentService {
|
||||
let mut sandbox = s.lock().unwrap();
|
||||
let p = match find_process(&mut sandbox, cid.as_str(), eid.as_str(), false) {
|
||||
Ok(v) => v,
|
||||
Err(_e) => {
|
||||
Err(e) => {
|
||||
return Err(ttrpc::Error::RpcStatus(ttrpc::get_status(
|
||||
ttrpc::Code::UNAVAILABLE,
|
||||
"cannot find the process".to_string(),
|
||||
format!("invalid argument: {:?}", e),
|
||||
)));
|
||||
}
|
||||
};
|
||||
@@ -922,10 +948,10 @@ impl protocols::agent_ttrpc::AgentService for agentService {
|
||||
};
|
||||
|
||||
let err = libc::ioctl(fd, TIOCSWINSZ, &win);
|
||||
if let Err(_) = Errno::result(err).map(drop) {
|
||||
if let Err(e) = Errno::result(err).map(drop) {
|
||||
return Err(ttrpc::Error::RpcStatus(ttrpc::get_status(
|
||||
ttrpc::Code::INTERNAL,
|
||||
"ioctl error".to_string(),
|
||||
format!("ioctl error: {:?}", e),
|
||||
)));
|
||||
}
|
||||
}
|
||||
@@ -950,10 +976,10 @@ impl protocols::agent_ttrpc::AgentService for agentService {
|
||||
|
||||
let iface = match rtnl.update_interface(interface.as_ref().unwrap()) {
|
||||
Ok(v) => v,
|
||||
Err(_) => {
|
||||
Err(e) => {
|
||||
return Err(ttrpc::Error::RpcStatus(ttrpc::get_status(
|
||||
ttrpc::Code::INTERNAL,
|
||||
"update interface".to_string(),
|
||||
format!("update interface: {:?}", e),
|
||||
)));
|
||||
}
|
||||
};
|
||||
@@ -979,10 +1005,10 @@ impl protocols::agent_ttrpc::AgentService for agentService {
|
||||
// get current routes to return when error out
|
||||
let crs = match rtnl.list_routes() {
|
||||
Ok(routes) => routes,
|
||||
Err(_) => {
|
||||
Err(e) => {
|
||||
return Err(ttrpc::Error::RpcStatus(ttrpc::get_status(
|
||||
ttrpc::Code::INTERNAL,
|
||||
"update routes".to_string(),
|
||||
format!("update routes: {:?}", e),
|
||||
)));
|
||||
}
|
||||
};
|
||||
@@ -1011,10 +1037,10 @@ impl protocols::agent_ttrpc::AgentService for agentService {
|
||||
let rtnl = sandbox.rtnl.as_mut().unwrap();
|
||||
let v = match rtnl.list_interfaces() {
|
||||
Ok(value) => value,
|
||||
Err(_) => {
|
||||
Err(e) => {
|
||||
return Err(ttrpc::Error::RpcStatus(ttrpc::get_status(
|
||||
ttrpc::Code::INTERNAL,
|
||||
"list interface".to_string(),
|
||||
format!("list interface: {:?}", e),
|
||||
)));
|
||||
}
|
||||
};
|
||||
@@ -1040,10 +1066,10 @@ impl protocols::agent_ttrpc::AgentService for agentService {
|
||||
|
||||
let v = match rtnl.list_routes() {
|
||||
Ok(value) => value,
|
||||
Err(_) => {
|
||||
Err(e) => {
|
||||
return Err(ttrpc::Error::RpcStatus(ttrpc::get_status(
|
||||
ttrpc::Code::INTERNAL,
|
||||
"list routes".to_string(),
|
||||
format!("list routes: {:?}", e),
|
||||
)));
|
||||
}
|
||||
};
|
||||
@@ -1067,6 +1093,7 @@ impl protocols::agent_ttrpc::AgentService for agentService {
|
||||
) -> ttrpc::Result<Empty> {
|
||||
Ok(Empty::new())
|
||||
}
|
||||
|
||||
fn create_sandbox(
|
||||
&self,
|
||||
_ctx: &ttrpc::TtrpcContext,
|
||||
@@ -1082,6 +1109,15 @@ impl protocols::agent_ttrpc::AgentService for agentService {
|
||||
s.hostname = req.hostname.clone();
|
||||
s.running = true;
|
||||
|
||||
if !req.guest_hook_path.is_empty() {
|
||||
if let Err(e) = s.add_hooks(&req.guest_hook_path) {
|
||||
error!(
|
||||
sl!(),
|
||||
"add guest hook {} failed: {:?}", req.guest_hook_path, e
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if req.sandbox_id.len() > 0 {
|
||||
s.id = req.sandbox_id.clone();
|
||||
}
|
||||
@@ -1123,8 +1159,27 @@ impl protocols::agent_ttrpc::AgentService for agentService {
|
||||
}
|
||||
};
|
||||
|
||||
match setup_guest_dns(sl!(), req.dns.to_vec()) {
|
||||
Ok(dns_list) => {
|
||||
let sandbox = self.sandbox.clone();
|
||||
let mut s = sandbox.lock().unwrap();
|
||||
let _ = req
|
||||
.dns
|
||||
.to_vec()
|
||||
.iter()
|
||||
.map(|dns| s.network.set_dns(dns.to_string()));
|
||||
}
|
||||
Err(e) => {
|
||||
return Err(ttrpc::Error::RpcStatus(ttrpc::get_status(
|
||||
ttrpc::Code::INTERNAL,
|
||||
e.to_string(),
|
||||
)))
|
||||
}
|
||||
};
|
||||
|
||||
Ok(Empty::new())
|
||||
}
|
||||
|
||||
fn destroy_sandbox(
|
||||
&self,
|
||||
_ctx: &ttrpc::TtrpcContext,
|
||||
@@ -1171,8 +1226,6 @@ impl protocols::agent_ttrpc::AgentService for agentService {
|
||||
_ctx: &ttrpc::TtrpcContext,
|
||||
req: protocols::agent::OnlineCPUMemRequest,
|
||||
) -> ttrpc::Result<Empty> {
|
||||
// sleep 5 seconds for debug
|
||||
// thread::sleep(Duration::new(5, 0));
|
||||
let s = Arc::clone(&self.sandbox);
|
||||
let sandbox = s.lock().unwrap();
|
||||
|
||||
@@ -1287,6 +1340,34 @@ impl protocols::agent_ttrpc::AgentService for agentService {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn get_oom_event(
|
||||
&self,
|
||||
_ctx: &ttrpc::TtrpcContext,
|
||||
_req: protocols::agent::GetOOMEventRequest,
|
||||
) -> ttrpc::Result<OOMEvent> {
|
||||
let sandbox = self.sandbox.clone();
|
||||
let s = sandbox.lock().unwrap();
|
||||
let event_rx = &s.event_rx.clone();
|
||||
let event_rx = event_rx.lock().unwrap();
|
||||
drop(s);
|
||||
drop(sandbox);
|
||||
|
||||
match event_rx.recv() {
|
||||
Err(err) => {
|
||||
return Err(ttrpc::Error::RpcStatus(ttrpc::get_status(
|
||||
ttrpc::Code::INTERNAL,
|
||||
err.to_string(),
|
||||
)))
|
||||
}
|
||||
Ok(container_id) => {
|
||||
info!(sl!(), "get_oom_event return {}", &container_id);
|
||||
let mut resp = OOMEvent::new();
|
||||
resp.container_id = container_id;
|
||||
return Ok(resp);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
@@ -1324,7 +1405,7 @@ fn get_memory_info(block_size: bool, hotplug: bool) -> Result<(u64, bool)> {
|
||||
Ok(v) => {
|
||||
if v.len() == 0 {
|
||||
info!(sl!(), "string in empty???");
|
||||
return Err(ErrorKind::ErrorCode("Invalid block size".to_string()).into());
|
||||
return Err(anyhow!("Invalid block size"));
|
||||
}
|
||||
|
||||
size = v.trim().parse::<u64>()?;
|
||||
@@ -1332,7 +1413,7 @@ fn get_memory_info(block_size: bool, hotplug: bool) -> Result<(u64, bool)> {
|
||||
Err(e) => {
|
||||
info!(sl!(), "memory block size error: {:?}", e.kind());
|
||||
if e.kind() != std::io::ErrorKind::NotFound {
|
||||
return Err(ErrorKind::Io(e).into());
|
||||
return Err(anyhow!(e));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1350,9 +1431,9 @@ fn get_memory_info(block_size: bool, hotplug: bool) -> Result<(u64, bool)> {
|
||||
match e {
|
||||
nix::Error::Sys(errno) => match errno {
|
||||
Errno::ENOENT => plug = false,
|
||||
_ => return Err(ErrorKind::Nix(e).into()),
|
||||
_ => return Err(anyhow!(e)),
|
||||
},
|
||||
_ => return Err(ErrorKind::Nix(e).into()),
|
||||
_ => return Err(anyhow!(e)),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1393,15 +1474,15 @@ fn read_stream(fd: RawFd, l: usize) -> Result<Vec<u8>> {
|
||||
// was closed, instead it would return a 0 reading length, please
|
||||
// see https://github.com/rust-lang/rfcs/blob/master/text/0517-io-os-reform.md#errors
|
||||
if len == 0 {
|
||||
return Err(ErrorKind::ErrorCode("read meet eof".to_string()).into());
|
||||
return Err(anyhow!("read meet eof"));
|
||||
}
|
||||
}
|
||||
Err(e) => match e {
|
||||
nix::Error::Sys(errno) => match errno {
|
||||
Errno::EAGAIN => v.resize(0, 0),
|
||||
_ => return Err(ErrorKind::Nix(nix::Error::Sys(errno)).into()),
|
||||
_ => return Err(anyhow!(nix::Error::Sys(errno))),
|
||||
},
|
||||
_ => return Err(ErrorKind::ErrorCode("read error".to_string()).into()),
|
||||
_ => return Err(anyhow!("read error")),
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1416,15 +1497,13 @@ fn find_process<'a>(
|
||||
) -> Result<&'a mut Process> {
|
||||
let ctr = match sandbox.get_container(cid) {
|
||||
Some(v) => v,
|
||||
None => return Err(ErrorKind::ErrorCode(String::from("Invalid container id")).into()),
|
||||
None => return Err(anyhow!("Invalid container id")),
|
||||
};
|
||||
|
||||
if init || eid == "" {
|
||||
let p = match ctr.processes.get_mut(&ctr.init_process_pid) {
|
||||
Some(v) => v,
|
||||
None => {
|
||||
return Err(ErrorKind::ErrorCode(String::from("cannot find init process!")).into())
|
||||
}
|
||||
None => return Err(anyhow!("cannot find init process!")),
|
||||
};
|
||||
|
||||
return Ok(p);
|
||||
@@ -1432,13 +1511,13 @@ fn find_process<'a>(
|
||||
|
||||
let p = match ctr.get_process(eid) {
|
||||
Ok(v) => v,
|
||||
Err(_) => return Err(ErrorKind::ErrorCode("Invalid exec id".to_string()).into()),
|
||||
Err(_) => return Err(anyhow!("Invalid exec id")),
|
||||
};
|
||||
|
||||
Ok(p)
|
||||
}
|
||||
|
||||
pub fn start<S: Into<String>>(s: Arc<Mutex<Sandbox>>, host: S, port: u16) -> ttrpc::Server {
|
||||
pub fn start(s: Arc<Mutex<Sandbox>>, server_address: &str) -> ttrpc::Server {
|
||||
let agent_service = Box::new(agentService {
|
||||
sandbox: s,
|
||||
test: 1,
|
||||
@@ -1454,17 +1533,13 @@ pub fn start<S: Into<String>>(s: Arc<Mutex<Sandbox>>, host: S, port: u16) -> ttr
|
||||
|
||||
let hservice = protocols::health_ttrpc::create_health(health_worker);
|
||||
|
||||
let mut addr: String = host.into();
|
||||
addr.push_str(":");
|
||||
addr.push_str(&port.to_string());
|
||||
|
||||
let server = ttrpc::Server::new()
|
||||
.bind(addr.as_str())
|
||||
.bind(server_address)
|
||||
.unwrap()
|
||||
.register_service(aservice)
|
||||
.register_service(hservice);
|
||||
|
||||
info!(sl!(), "ttRPC server started");
|
||||
info!(sl!(), "ttRPC server started"; "address" => server_address);
|
||||
|
||||
server
|
||||
}
|
||||
@@ -1486,11 +1561,7 @@ fn update_container_namespaces(
|
||||
sandbox_pidns: bool,
|
||||
) -> Result<()> {
|
||||
let linux = match spec.linux.as_mut() {
|
||||
None => {
|
||||
return Err(
|
||||
ErrorKind::ErrorCode("Spec didn't container linux field".to_string()).into(),
|
||||
)
|
||||
}
|
||||
None => return Err(anyhow!("Spec didn't container linux field")),
|
||||
Some(l) => l,
|
||||
};
|
||||
|
||||
@@ -1521,6 +1592,18 @@ fn update_container_namespaces(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn append_guest_hooks(s: &Sandbox, oci: &mut Spec) {
|
||||
if s.hooks.is_none() {
|
||||
return;
|
||||
}
|
||||
let guest_hooks = s.hooks.as_ref().unwrap();
|
||||
let mut hooks = oci.hooks.take().unwrap_or_default();
|
||||
hooks.prestart.append(&mut guest_hooks.prestart.clone());
|
||||
hooks.poststart.append(&mut guest_hooks.poststart.clone());
|
||||
hooks.poststop.append(&mut guest_hooks.poststop.clone());
|
||||
oci.hooks = Some(hooks);
|
||||
}
|
||||
|
||||
// Check is the container process installed the
|
||||
// handler for specific signal.
|
||||
fn is_signal_handled(pid: pid_t, signum: u32) -> bool {
|
||||
@@ -1644,33 +1727,53 @@ fn do_copy_file(req: &CopyFileRequest) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn setup_bundle(spec: &Spec) -> Result<PathBuf> {
|
||||
// Setup container bundle under CONTAINER_BASE, which is cleaned up
|
||||
// before removing a container.
|
||||
// - bundle path is /<CONTAINER_BASE>/<cid>/
|
||||
// - config.json at /<CONTAINER_BASE>/<cid>/config.json
|
||||
// - container rootfs bind mounted at /<CONTAINER_BASE>/<cid>/rootfs
|
||||
// - modify container spec root to point to /<CONTAINER_BASE>/<cid>/rootfs
|
||||
fn setup_bundle(cid: &str, spec: &mut Spec) -> Result<PathBuf> {
|
||||
if spec.root.is_none() {
|
||||
return Err(nix::Error::Sys(Errno::EINVAL).into());
|
||||
}
|
||||
let root = spec.root.as_ref().unwrap().path.as_str();
|
||||
let spec_root = spec.root.as_ref().unwrap();
|
||||
|
||||
let rootfs = fs::canonicalize(root)?;
|
||||
let bundle_path = rootfs.parent().unwrap().to_str().unwrap();
|
||||
let bundle_path = Path::new(CONTAINER_BASE).join(cid);
|
||||
let config_path = bundle_path.clone().join("config.json");
|
||||
let rootfs_path = bundle_path.clone().join("rootfs");
|
||||
|
||||
let config = format!("{}/{}", bundle_path, "config.json");
|
||||
fs::create_dir_all(&rootfs_path)?;
|
||||
BareMount::new(
|
||||
&spec_root.path,
|
||||
rootfs_path.to_str().unwrap(),
|
||||
"bind",
|
||||
MsFlags::MS_BIND,
|
||||
"",
|
||||
&sl!(),
|
||||
)
|
||||
.mount()?;
|
||||
spec.root = Some(Root {
|
||||
path: rootfs_path.to_str().unwrap().to_owned(),
|
||||
readonly: spec_root.readonly,
|
||||
});
|
||||
|
||||
info!(
|
||||
sl!(),
|
||||
"{:?}",
|
||||
spec.process.as_ref().unwrap().console_size.as_ref()
|
||||
);
|
||||
let _ = spec.save(config.as_str());
|
||||
let _ = spec.save(config_path.to_str().unwrap());
|
||||
|
||||
let olddir = unistd::getcwd().chain_err(|| "cannot getcwd")?;
|
||||
unistd::chdir(bundle_path)?;
|
||||
let olddir = unistd::getcwd().context("cannot getcwd")?;
|
||||
unistd::chdir(bundle_path.to_str().unwrap())?;
|
||||
|
||||
Ok(olddir)
|
||||
}
|
||||
|
||||
fn load_kernel_module(module: &protocols::agent::KernelModule) -> Result<()> {
|
||||
if module.name == "" {
|
||||
return Err(ErrorKind::ErrorCode("Kernel module name is empty".to_string()).into());
|
||||
return Err(anyhow!("Kernel module name is empty"));
|
||||
}
|
||||
|
||||
info!(
|
||||
@@ -1702,17 +1805,16 @@ fn load_kernel_module(module: &protocols::agent::KernelModule) -> Result<()> {
|
||||
"load_kernel_module return code: {} stdout:{} stderr:{}",
|
||||
code, std_out, std_err
|
||||
);
|
||||
return Err(ErrorKind::ErrorCode(msg).into());
|
||||
}
|
||||
None => {
|
||||
return Err(ErrorKind::ErrorCode("Process terminated by signal".to_string()).into())
|
||||
return Err(anyhow!(msg));
|
||||
}
|
||||
None => return Err(anyhow!("Process terminated by signal")),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use oci::{Hook, Hooks};
|
||||
|
||||
#[test]
|
||||
fn test_load_kernel_module() {
|
||||
@@ -1734,4 +1836,22 @@ mod tests {
|
||||
let result = load_kernel_module(&m);
|
||||
assert!(result.is_ok(), "load module should success");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_append_guest_hooks() {
|
||||
let logger = slog::Logger::root(slog::Discard, o!());
|
||||
let mut s = Sandbox::new(&logger).unwrap();
|
||||
s.hooks = Some(Hooks {
|
||||
prestart: vec![Hook {
|
||||
path: "foo".to_string(),
|
||||
..Default::default()
|
||||
}],
|
||||
..Default::default()
|
||||
});
|
||||
let mut oci = Spec {
|
||||
..Default::default()
|
||||
};
|
||||
append_guest_hooks(&s, &mut oci);
|
||||
assert_eq!(s.hooks, oci.hooks);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,20 +9,25 @@ use crate::mount::{get_mount_fs_type, remove_mounts, TYPEROOTFS};
|
||||
use crate::namespace::Namespace;
|
||||
use crate::namespace::NSTYPEPID;
|
||||
use crate::network::Network;
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use cgroups;
|
||||
use libc::pid_t;
|
||||
use netlink::{RtnlHandle, NETLINK_ROUTE};
|
||||
use oci::LinuxNamespace;
|
||||
use oci::{Hook, Hooks};
|
||||
use protocols::agent::OnlineCPUMemRequest;
|
||||
use regex::Regex;
|
||||
use rustjail::cgroups;
|
||||
use rustjail::cgroups as rustjail_cgroups;
|
||||
use rustjail::container::BaseContainer;
|
||||
use rustjail::container::LinuxContainer;
|
||||
use rustjail::errors::*;
|
||||
use rustjail::process::Process;
|
||||
use slog::Logger;
|
||||
use std::collections::HashMap;
|
||||
use std::fs;
|
||||
use std::sync::mpsc::Sender;
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
use std::path::Path;
|
||||
use std::sync::mpsc::{self, Receiver, Sender};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::{thread, time};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Sandbox {
|
||||
@@ -42,12 +47,17 @@ pub struct Sandbox {
|
||||
pub no_pivot_root: bool,
|
||||
pub sender: Option<Sender<i32>>,
|
||||
pub rtnl: Option<RtnlHandle>,
|
||||
pub hooks: Option<Hooks>,
|
||||
pub event_rx: Arc<Mutex<Receiver<String>>>,
|
||||
pub event_tx: Sender<String>,
|
||||
}
|
||||
|
||||
impl Sandbox {
|
||||
pub fn new(logger: &Logger) -> Result<Self> {
|
||||
let fs_type = get_mount_fs_type("/")?;
|
||||
let logger = logger.new(o!("subsystem" => "sandbox"));
|
||||
let (tx, rx) = mpsc::channel::<String>();
|
||||
let event_rx = Arc::new(Mutex::new(rx));
|
||||
|
||||
Ok(Sandbox {
|
||||
logger: logger.clone(),
|
||||
@@ -66,6 +76,9 @@ impl Sandbox {
|
||||
no_pivot_root: fs_type.eq(TYPEROOTFS),
|
||||
sender: None,
|
||||
rtnl: Some(RtnlHandle::new(NETLINK_ROUTE, 0).unwrap()),
|
||||
hooks: None,
|
||||
event_rx: event_rx,
|
||||
event_tx: tx,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -101,13 +114,7 @@ impl Sandbox {
|
||||
// acquiring a lock on sandbox.
|
||||
pub fn unset_sandbox_storage(&mut self, path: &str) -> Result<bool> {
|
||||
match self.storages.get_mut(path) {
|
||||
None => {
|
||||
return Err(ErrorKind::ErrorCode(format!(
|
||||
"Sandbox storage with path {} not found",
|
||||
path
|
||||
))
|
||||
.into())
|
||||
}
|
||||
None => return Err(anyhow!("Sandbox storage with path {} not found", path)),
|
||||
Some(count) => {
|
||||
*count -= 1;
|
||||
if *count < 1 {
|
||||
@@ -127,7 +134,7 @@ impl Sandbox {
|
||||
pub fn remove_sandbox_storage(&self, path: &str) -> Result<()> {
|
||||
let mounts = vec![path.to_string()];
|
||||
remove_mounts(&mounts)?;
|
||||
fs::remove_dir_all(path)?;
|
||||
fs::remove_dir_all(path).context(format!("failed to remove dir {:?}", path))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -164,11 +171,7 @@ impl Sandbox {
|
||||
self.shared_ipcns = match Namespace::new(&self.logger).as_ipc().setup() {
|
||||
Ok(ns) => ns,
|
||||
Err(err) => {
|
||||
return Err(ErrorKind::ErrorCode(format!(
|
||||
"Failed to setup persistent IPC namespace with error: {}",
|
||||
err
|
||||
))
|
||||
.into())
|
||||
return Err(anyhow!(err).context("Failed to setup persistent IPC namespace"));
|
||||
}
|
||||
};
|
||||
|
||||
@@ -179,11 +182,7 @@ impl Sandbox {
|
||||
{
|
||||
Ok(ns) => ns,
|
||||
Err(err) => {
|
||||
return Err(ErrorKind::ErrorCode(format!(
|
||||
"Failed to setup persistent UTS namespace with error: {}",
|
||||
err
|
||||
))
|
||||
.into())
|
||||
return Err(anyhow!(err).context("Failed to setup persistent UTS namespace"));
|
||||
}
|
||||
};
|
||||
Ok(true)
|
||||
@@ -202,10 +201,9 @@ impl Sandbox {
|
||||
if self.sandbox_pidns.is_none() && self.containers.len() == 0 {
|
||||
let init_pid = c.init_process_pid;
|
||||
if init_pid == -1 {
|
||||
return Err(ErrorKind::ErrorCode(String::from(
|
||||
"Failed to setup pid namespace: init container pid is -1",
|
||||
))
|
||||
.into());
|
||||
return Err(anyhow!(
|
||||
"Failed to setup pid namespace: init container pid is -1"
|
||||
));
|
||||
}
|
||||
|
||||
let mut pid_ns = Namespace::new(&self.logger).as_pid();
|
||||
@@ -249,7 +247,7 @@ impl Sandbox {
|
||||
online_memory(&self.logger)?;
|
||||
}
|
||||
|
||||
let cpuset = cgroups::fs::get_guest_cpuset()?;
|
||||
let cpuset = rustjail_cgroups::fs::get_guest_cpuset()?;
|
||||
|
||||
for (_, ctr) in self.containers.iter() {
|
||||
info!(self.logger, "updating {}", ctr.id.as_str());
|
||||
@@ -261,6 +259,72 @@ impl Sandbox {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn add_hooks(&mut self, dir: &str) -> Result<()> {
|
||||
let mut hooks = Hooks::default();
|
||||
if let Ok(hook) = self.find_hooks(dir, "prestart") {
|
||||
hooks.prestart = hook;
|
||||
}
|
||||
if let Ok(hook) = self.find_hooks(dir, "poststart") {
|
||||
hooks.poststart = hook;
|
||||
}
|
||||
if let Ok(hook) = self.find_hooks(dir, "poststop") {
|
||||
hooks.poststop = hook;
|
||||
}
|
||||
self.hooks = Some(hooks);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn find_hooks(&self, hook_path: &str, hook_type: &str) -> Result<Vec<Hook>> {
|
||||
let mut hooks = Vec::new();
|
||||
for entry in fs::read_dir(Path::new(hook_path).join(hook_type))? {
|
||||
let entry = entry?;
|
||||
// Reject non-file, symlinks and non-executable files
|
||||
if !entry.file_type()?.is_file()
|
||||
|| entry.file_type()?.is_symlink()
|
||||
|| entry.metadata()?.permissions().mode() & 0o777 & 0o111 == 0
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
let name = entry.file_name();
|
||||
let hook = Hook {
|
||||
path: Path::new(hook_path)
|
||||
.join(hook_type)
|
||||
.join(&name)
|
||||
.to_str()
|
||||
.unwrap()
|
||||
.to_owned(),
|
||||
args: vec![name.to_str().unwrap().to_owned(), hook_type.to_owned()],
|
||||
..Default::default()
|
||||
};
|
||||
info!(
|
||||
self.logger,
|
||||
"found {} hook {:?} mode {:o}",
|
||||
hook_type,
|
||||
hook,
|
||||
entry.metadata()?.permissions().mode()
|
||||
);
|
||||
hooks.push(hook);
|
||||
}
|
||||
|
||||
Ok(hooks)
|
||||
}
|
||||
|
||||
pub fn run_oom_event_monitor(&self, rx: Receiver<String>, container_id: String) {
|
||||
let tx = self.event_tx.clone();
|
||||
let logger = self.logger.clone();
|
||||
|
||||
thread::spawn(move || {
|
||||
for event in rx {
|
||||
info!(logger, "got an OOM event {:?}", event);
|
||||
match tx.send(container_id.clone()) {
|
||||
Err(err) => error!(logger, "failed to send message: {:?}", err),
|
||||
Ok(_) => {}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
fn online_resources(logger: &Logger, path: &str, pattern: &str, num: i32) -> Result<i32> {
|
||||
@@ -276,10 +340,18 @@ fn online_resources(logger: &Logger, path: &str, pattern: &str, num: i32) -> Res
|
||||
if re.is_match(name) {
|
||||
let file = format!("{}/{}", p.to_str().unwrap(), SYSFS_ONLINE_FILE);
|
||||
info!(logger, "{}", file.as_str());
|
||||
let c = fs::read_to_string(file.as_str())?;
|
||||
|
||||
let c = fs::read_to_string(file.as_str());
|
||||
if c.is_err() {
|
||||
continue;
|
||||
}
|
||||
let c = c.unwrap();
|
||||
|
||||
if c.trim().contains("0") {
|
||||
fs::write(file.as_str(), "1")?;
|
||||
let r = fs::write(file.as_str(), "1");
|
||||
if r.is_err() {
|
||||
continue;
|
||||
}
|
||||
count += 1;
|
||||
|
||||
if num > 0 && count == num {
|
||||
@@ -296,8 +368,37 @@ fn online_resources(logger: &Logger, path: &str, pattern: &str, num: i32) -> Res
|
||||
Ok(0)
|
||||
}
|
||||
|
||||
// max wait for all CPUs to online will use 50 * 100 = 5 seconds.
|
||||
const ONLINE_CPUMEM_WATI_MILLIS: u64 = 50;
|
||||
const ONLINE_CPUMEM_MAX_RETRIES: u32 = 100;
|
||||
|
||||
fn online_cpus(logger: &Logger, num: i32) -> Result<i32> {
|
||||
online_resources(logger, SYSFS_CPU_ONLINE_PATH, r"cpu[0-9]+", num)
|
||||
let mut onlined_count: i32 = 0;
|
||||
|
||||
for i in 0..ONLINE_CPUMEM_MAX_RETRIES {
|
||||
let r = online_resources(
|
||||
logger,
|
||||
SYSFS_CPU_ONLINE_PATH,
|
||||
r"cpu[0-9]+",
|
||||
(num - onlined_count),
|
||||
);
|
||||
if r.is_err() {
|
||||
return r;
|
||||
}
|
||||
|
||||
onlined_count += r.unwrap();
|
||||
if onlined_count == num {
|
||||
info!(logger, "online {} CPU(s) after {} retries", num, i);
|
||||
return Ok(num);
|
||||
}
|
||||
thread::sleep(time::Duration::from_millis(ONLINE_CPUMEM_WATI_MILLIS));
|
||||
}
|
||||
|
||||
Err(anyhow!(
|
||||
"failed to online {} CPU(s) after {} retries",
|
||||
num,
|
||||
ONLINE_CPUMEM_MAX_RETRIES
|
||||
))
|
||||
}
|
||||
|
||||
fn online_memory(logger: &Logger) -> Result<()> {
|
||||
@@ -310,14 +411,17 @@ mod tests {
|
||||
//use rustjail::Error;
|
||||
use super::Sandbox;
|
||||
use crate::{mount::BareMount, skip_if_not_root};
|
||||
use anyhow::Error;
|
||||
use nix::mount::MsFlags;
|
||||
use oci::{Linux, Root, Spec};
|
||||
use rustjail::container::LinuxContainer;
|
||||
use rustjail::specconv::CreateOpts;
|
||||
use slog::Logger;
|
||||
use std::fs::{self, File};
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
use tempfile::Builder;
|
||||
|
||||
fn bind_mount(src: &str, dst: &str, logger: &Logger) -> Result<(), rustjail::errors::Error> {
|
||||
fn bind_mount(src: &str, dst: &str, logger: &Logger) -> Result<(), Error> {
|
||||
let baremount = BareMount::new(src, dst, "bind", MsFlags::MS_BIND, "", &logger);
|
||||
baremount.mount()
|
||||
}
|
||||
@@ -596,4 +700,26 @@ mod tests {
|
||||
let ns_path = format!("/proc/{}/ns/pid", test_pid);
|
||||
assert_eq!(s.sandbox_pidns.unwrap().path, ns_path);
|
||||
}
|
||||
#[test]
|
||||
fn add_guest_hooks() {
|
||||
let logger = slog::Logger::root(slog::Discard, o!());
|
||||
let mut s = Sandbox::new(&logger).unwrap();
|
||||
let tmpdir = Builder::new().tempdir().unwrap();
|
||||
let tmpdir_path = tmpdir.path().to_str().unwrap();
|
||||
|
||||
assert!(fs::create_dir_all(tmpdir.path().join("prestart")).is_ok());
|
||||
assert!(fs::create_dir_all(tmpdir.path().join("poststop")).is_ok());
|
||||
|
||||
let file = File::create(tmpdir.path().join("prestart").join("prestart.sh")).unwrap();
|
||||
let mut perm = file.metadata().unwrap().permissions();
|
||||
perm.set_mode(0o777);
|
||||
assert!(file.set_permissions(perm).is_ok());
|
||||
assert!(File::create(tmpdir.path().join("poststop").join("poststop.sh")).is_ok());
|
||||
|
||||
assert!(s.add_hooks(tmpdir_path).is_ok());
|
||||
assert!(s.hooks.is_some());
|
||||
assert!(s.hooks.as_ref().unwrap().prestart.len() == 1);
|
||||
assert!(s.hooks.as_ref().unwrap().poststart.is_empty());
|
||||
assert!(s.hooks.as_ref().unwrap().poststop.is_empty());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
// Copyright (c) 2019 Ant Financial
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
pub const AGENT_VERSION: &str = "1.4.5";
|
||||
pub const API_VERSION: &str = "0.0.1";
|
||||
16
src/agent/src/version.rs.in
Normal file
16
src/agent/src/version.rs.in
Normal file
@@ -0,0 +1,16 @@
|
||||
// Copyright (c) 2020 Intel Corporation
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
//
|
||||
// WARNING: This file is auto-generated - DO NOT EDIT!
|
||||
//
|
||||
|
||||
pub const AGENT_VERSION: &str = "@AGENT_VERSION@";
|
||||
pub const API_VERSION: &str = "@API_VERSION@";
|
||||
pub const VERSION_COMMIT: &str = "@VERSION_COMMIT@";
|
||||
pub const GIT_COMMIT: &str = "@COMMIT@";
|
||||
pub const AGENT_NAME: &str = "@AGENT_NAME@";
|
||||
pub const AGENT_DIR: &str = "@BINDIR@";
|
||||
pub const AGENT_PATH: &str = "@BINDIR@/@AGENT_NAME@";
|
||||
@@ -1,5 +0,0 @@
|
||||
# Contributing
|
||||
|
||||
## This repo is part of [Kata Containers](https://katacontainers.io)
|
||||
|
||||
For details on how to contribute to the Kata Containers project, please see the main [contributing document](https://github.com/kata-containers/community/blob/master/CONTRIBUTING.md).
|
||||
@@ -43,7 +43,8 @@ include $(ARCH_FILE)
|
||||
PROJECT_TYPE = kata
|
||||
PROJECT_NAME = Kata Containers
|
||||
PROJECT_TAG = kata-containers
|
||||
PROJECT_URL = https://github.com/kata-containers
|
||||
PROJECT_ORG = $(PROJECT_TAG)
|
||||
PROJECT_URL = https://github.com/$(PROJECT_ORG)
|
||||
PROJECT_BUG_URL = $(PROJECT_URL)/kata-containers/issues/new
|
||||
|
||||
# list of scripts to install
|
||||
@@ -239,6 +240,7 @@ ifneq (,$(QEMUCMD))
|
||||
# qemu-specific options (all should be suffixed by "_QEMU")
|
||||
DEFBLOCKSTORAGEDRIVER_QEMU := virtio-scsi
|
||||
DEFNETWORKMODEL_QEMU := tcfilter
|
||||
KERNELTYPE = uncompressed
|
||||
KERNELNAME = $(call MAKE_KERNEL_NAME,$(KERNELTYPE))
|
||||
KERNELPATH = $(KERNELDIR)/$(KERNELNAME)
|
||||
endif
|
||||
@@ -580,7 +582,6 @@ $(MONITOR_OUTPUT): $(SOURCES) $(GENERATED_FILES) $(MAKEFILE_LIST)
|
||||
.PHONY: \
|
||||
check \
|
||||
check-go-static \
|
||||
check-go-test \
|
||||
coverage \
|
||||
default \
|
||||
install \
|
||||
@@ -627,6 +628,7 @@ $(GENERATED_FILES): %: %.in $(MAKEFILE_LIST) VERSION .git-commit
|
||||
-e "s|@PKGRUNDIR@|$(PKGRUNDIR)|g" \
|
||||
-e "s|@NETMONPATH@|$(NETMONPATH)|g" \
|
||||
-e "s|@PROJECT_BUG_URL@|$(PROJECT_BUG_URL)|g" \
|
||||
-e "s|@PROJECT_ORG@|$(PROJECT_ORG)|g" \
|
||||
-e "s|@PROJECT_URL@|$(PROJECT_URL)|g" \
|
||||
-e "s|@PROJECT_NAME@|$(PROJECT_NAME)|g" \
|
||||
-e "s|@PROJECT_TAG@|$(PROJECT_TAG)|g" \
|
||||
@@ -685,9 +687,9 @@ go-test: $(GENERATED_FILES)
|
||||
go test -v -mod=vendor ./...
|
||||
|
||||
check-go-static:
|
||||
$(QUIET_CHECK).ci/static-checks.sh
|
||||
$(QUIET_CHECK).ci/go-no-os-exit.sh ./cli
|
||||
$(QUIET_CHECK).ci/go-no-os-exit.sh ./virtcontainers
|
||||
$(QUIET_CHECK)../../ci/static-checks.sh
|
||||
$(QUIET_CHECK)../../ci/go-no-os-exit.sh ./cli
|
||||
$(QUIET_CHECK)../../ci/go-no-os-exit.sh ./virtcontainers
|
||||
|
||||
coverage:
|
||||
go test -v -mod=vendor -covermode=atomic -coverprofile=coverage.txt ./...
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user