mirror of
https://github.com/kata-containers/kata-containers.git
synced 2026-02-22 14:54:23 +00:00
Compare commits
520 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5b7009f2f9 | ||
|
|
b4d3a79b56 | ||
|
|
38b5818813 | ||
|
|
4d0658e3fa | ||
|
|
45857859ec | ||
|
|
542b42ad58 | ||
|
|
d45f7e54c4 | ||
|
|
62c5e4e9a9 | ||
|
|
5f122a0760 | ||
|
|
98121472da | ||
|
|
656a3e06a7 | ||
|
|
1b93cd1661 | ||
|
|
411482bf19 | ||
|
|
3dd655d60d | ||
|
|
1f799d6a29 | ||
|
|
b14921937a | ||
|
|
c72fdaf916 | ||
|
|
05db886e27 | ||
|
|
af75ce04c1 | ||
|
|
68956ad127 | ||
|
|
6f16071359 | ||
|
|
95fbe46891 | ||
|
|
360e01c0f4 | ||
|
|
b6bf1c3f2c | ||
|
|
76ef07a22d | ||
|
|
9aa4afee63 | ||
|
|
46a6c52ef4 | ||
|
|
5b1df532da | ||
|
|
d71e7bbd59 | ||
|
|
5f5b4f7da9 | ||
|
|
3623c033c7 | ||
|
|
fe9ea1351f | ||
|
|
61ec234b6a | ||
|
|
8f56ad57ad | ||
|
|
aa4d803e35 | ||
|
|
04f0fcc5eb | ||
|
|
72a13f6064 | ||
|
|
68c962601c | ||
|
|
112a3d2bae | ||
|
|
a1571721dd | ||
|
|
5885f005f1 | ||
|
|
30990872f1 | ||
|
|
27affb2a63 | ||
|
|
1b8b2f9dce | ||
|
|
63e6abfa9d | ||
|
|
423778aec7 | ||
|
|
b30deca617 | ||
|
|
64009be3d7 | ||
|
|
2ac01cac0d | ||
|
|
6cd021ce86 | ||
|
|
855e63a121 | ||
|
|
86905cdcdf | ||
|
|
c720869eef | ||
|
|
c3cb65d0bb | ||
|
|
959dc3226b | ||
|
|
44414e1708 | ||
|
|
712177a337 | ||
|
|
25a961f5f3 | ||
|
|
c916c98ab5 | ||
|
|
111ad87828 | ||
|
|
ebf6c83839 | ||
|
|
94807e73e7 | ||
|
|
d4db7ed3c8 | ||
|
|
9e1df04e66 | ||
|
|
b26cd250c8 | ||
|
|
a036584ed9 | ||
|
|
29f64d6181 | ||
|
|
9ba01f36de | ||
|
|
42fd229f26 | ||
|
|
55cdd92b57 | ||
|
|
ca8abc6cae | ||
|
|
1c1034255a | ||
|
|
92d5dbb20c | ||
|
|
5cef4d9837 | ||
|
|
31a13e8081 | ||
|
|
3cd900da6d | ||
|
|
fe8b246ae4 | ||
|
|
c1aac0cdea | ||
|
|
a6c0bf8823 | ||
|
|
b1454dbcaa | ||
|
|
fa1bf8f75c | ||
|
|
fb89a83c89 | ||
|
|
18283fd65a | ||
|
|
5ddbce0746 | ||
|
|
a5dd0cd3ab | ||
|
|
53bcaf0547 | ||
|
|
299829aec0 | ||
|
|
2ca6319f18 | ||
|
|
323fb9cfe8 | ||
|
|
b3bd4e432c | ||
|
|
65de96e774 | ||
|
|
74b2ab001d | ||
|
|
4648d8bec7 | ||
|
|
c57f8ff669 | ||
|
|
f4437980b4 | ||
|
|
54544dd617 | ||
|
|
a06c6dd861 | ||
|
|
f087380180 | ||
|
|
411888796b | ||
|
|
07e76c7392 | ||
|
|
821dd63710 | ||
|
|
8797a87bfd | ||
|
|
babd9924c6 | ||
|
|
f0041f01ed | ||
|
|
842d278206 | ||
|
|
af4f3cdfbd | ||
|
|
bfc3c45854 | ||
|
|
e42fce6ece | ||
|
|
938de13c50 | ||
|
|
c1fa5d60b7 | ||
|
|
325bafa7d8 | ||
|
|
cde438ceb7 | ||
|
|
c7e4548fc2 | ||
|
|
2e90c62c31 | ||
|
|
7dd8b78d44 | ||
|
|
d70fe49b28 | ||
|
|
7de2cecfff | ||
|
|
760c899efd | ||
|
|
c1fff85805 | ||
|
|
cc0561bcff | ||
|
|
8e34807d49 | ||
|
|
4906228701 | ||
|
|
f476470c37 | ||
|
|
8c3846d431 | ||
|
|
f8ad25e875 | ||
|
|
e410c04622 | ||
|
|
8ad86e2ec9 | ||
|
|
c9af89d094 | ||
|
|
fb56efd658 | ||
|
|
77176cd7b9 | ||
|
|
f80723a483 | ||
|
|
63f931b719 | ||
|
|
ad49a11761 | ||
|
|
59537ceb2d | ||
|
|
50a959e6cf | ||
|
|
c53f063acd | ||
|
|
6233fa95d1 | ||
|
|
31164eeedf | ||
|
|
ef381b084d | ||
|
|
812818d381 | ||
|
|
216c066795 | ||
|
|
3aebead189 | ||
|
|
9f643ac9c8 | ||
|
|
8225457dd5 | ||
|
|
056d85de2c | ||
|
|
30460044a5 | ||
|
|
895150e383 | ||
|
|
69230fac97 | ||
|
|
fa11294b0f | ||
|
|
9b49a6ddc6 | ||
|
|
65b4261aa8 | ||
|
|
502a78730b | ||
|
|
93a1780bdb | ||
|
|
9373ec7d80 | ||
|
|
5422a056f2 | ||
|
|
de232b8030 | ||
|
|
c3e6b66051 | ||
|
|
f9278f22c3 | ||
|
|
150e8aba6d | ||
|
|
55c8c7226d | ||
|
|
7849c7977c | ||
|
|
61fd408594 | ||
|
|
b11b6e3756 | ||
|
|
2e7e81b8d8 | ||
|
|
9ad37bfd90 | ||
|
|
c17a6f1b53 | ||
|
|
f5a6522398 | ||
|
|
9cad7fb045 | ||
|
|
124c0e7af4 | ||
|
|
3c7fe93997 | ||
|
|
01e29fc1fd | ||
|
|
fb54dfd648 | ||
|
|
012a76d098 | ||
|
|
48c0cf5b5d | ||
|
|
518137f781 | ||
|
|
fb711e0e8e | ||
|
|
a43f95d01b | ||
|
|
f684d00d50 | ||
|
|
1f610ea5cc | ||
|
|
abe89586c6 | ||
|
|
a8feee68a8 | ||
|
|
a2d9633dad | ||
|
|
b6873f9581 | ||
|
|
b9c0f7fb09 | ||
|
|
981c0b1646 | ||
|
|
656d72bd74 | ||
|
|
683755483b | ||
|
|
e46364ad98 | ||
|
|
9931d4cbf0 | ||
|
|
20b999c479 | ||
|
|
2e77eb4bdb | ||
|
|
512a92a543 | ||
|
|
e528b63f4f | ||
|
|
c362257142 | ||
|
|
0f4b5c08fe | ||
|
|
433ee7c92a | ||
|
|
4d1c0a3235 | ||
|
|
20129dea87 | ||
|
|
6125587750 | ||
|
|
cfece9b796 | ||
|
|
f454bcdef1 | ||
|
|
0364184f90 | ||
|
|
c61d075cf7 | ||
|
|
132d0e9927 | ||
|
|
4e9972fb50 | ||
|
|
ccf21299cc | ||
|
|
c6c8018730 | ||
|
|
2611779255 | ||
|
|
4960f43ef6 | ||
|
|
80a831e537 | ||
|
|
4831193bde | ||
|
|
813e36e615 | ||
|
|
ad4a811c39 | ||
|
|
96c47df5d8 | ||
|
|
73566bb4b9 | ||
|
|
acb7a16522 | ||
|
|
bb9bbc7523 | ||
|
|
bb196d56ca | ||
|
|
5df9cadc63 | ||
|
|
59566c0f69 | ||
|
|
dc2e8cd317 | ||
|
|
edf3cba463 | ||
|
|
75b9f3fa3c | ||
|
|
72691ed6af | ||
|
|
d08bb20e98 | ||
|
|
81e065f7bd | ||
|
|
668672643f | ||
|
|
df7529ee18 | ||
|
|
a87698fe56 | ||
|
|
5ade87c16e | ||
|
|
bff9f90d24 | ||
|
|
0b34a8a186 | ||
|
|
89a5faef7a | ||
|
|
4f49423c91 | ||
|
|
4cf502fb20 | ||
|
|
9d0d5b9361 | ||
|
|
04be5521d0 | ||
|
|
554dff20c3 | ||
|
|
9259646235 | ||
|
|
91cae52fe3 | ||
|
|
a924faeead | ||
|
|
a51164f314 | ||
|
|
e672401f7f | ||
|
|
a19321f5ad | ||
|
|
7f71cdc290 | ||
|
|
c13380ba69 | ||
|
|
6a77af527f | ||
|
|
401e69eab0 | ||
|
|
c8d783e5ef | ||
|
|
69b1a072f2 | ||
|
|
da7ba2ef71 | ||
|
|
8488d02c23 | ||
|
|
64a9363925 | ||
|
|
f4979a9aa5 | ||
|
|
1485634e28 | ||
|
|
be165c40f9 | ||
|
|
6d9d8e0660 | ||
|
|
d2f17ee55a | ||
|
|
6f79928df7 | ||
|
|
516ed240f4 | ||
|
|
423162d2aa | ||
|
|
965c0b1ad2 | ||
|
|
e8902bb373 | ||
|
|
df0cc78e57 | ||
|
|
441399df1f | ||
|
|
ec20089c1b | ||
|
|
9d524b29ad | ||
|
|
889ed4f14b | ||
|
|
097fe823e5 | ||
|
|
07bdf75913 | ||
|
|
1ba29c3e0c | ||
|
|
226abc4a47 | ||
|
|
dc5f0c7d0c | ||
|
|
186cec6889 | ||
|
|
b307531c29 | ||
|
|
c4cc16efcd | ||
|
|
caabd54b6e | ||
|
|
8f6eca517a | ||
|
|
df486533fa | ||
|
|
7ceeeba9a2 | ||
|
|
d4d178359b | ||
|
|
e23322b95c | ||
|
|
3a655c4198 | ||
|
|
995c14d429 | ||
|
|
a438d6114b | ||
|
|
335ddd5876 | ||
|
|
6c2b9f67d7 | ||
|
|
eb9836ff8e | ||
|
|
bda68b16f1 | ||
|
|
03170c2651 | ||
|
|
38b61bb743 | ||
|
|
74a748f36e | ||
|
|
c6a5814a91 | ||
|
|
761786324e | ||
|
|
d6924182f3 | ||
|
|
94a6edcfa3 | ||
|
|
e52d6b1d0b | ||
|
|
54f47cceaa | ||
|
|
ebacd986bb | ||
|
|
305532db02 | ||
|
|
20ef9e9f5c | ||
|
|
40b1c79c97 | ||
|
|
62864b5041 | ||
|
|
404515f568 | ||
|
|
d21c3c340d | ||
|
|
386af028be | ||
|
|
0782f4a43b | ||
|
|
676b1d6048 | ||
|
|
2e5c4a9245 | ||
|
|
8f8c2215f4 | ||
|
|
4f80ea1962 | ||
|
|
cfa3e1e933 | ||
|
|
a1e16ff6e0 | ||
|
|
dc92e134aa | ||
|
|
4af8f0a999 | ||
|
|
9c73babdb2 | ||
|
|
0b065444fc | ||
|
|
245fa7caf5 | ||
|
|
eeff63375f | ||
|
|
94695869b0 | ||
|
|
d3fe110765 | ||
|
|
2300521c1b | ||
|
|
aa9d875a8d | ||
|
|
6e399dcb61 | ||
|
|
be223b1db5 | ||
|
|
7eb74e51be | ||
|
|
b772cc6b45 | ||
|
|
b08ea1fd99 | ||
|
|
5eb109c6da | ||
|
|
92a7b2f5f0 | ||
|
|
6a51c6615a | ||
|
|
d9e7966714 | ||
|
|
e708ef3c7d | ||
|
|
44c6d5bcea | ||
|
|
e68cb28129 | ||
|
|
322c6dab66 | ||
|
|
4d5e446643 | ||
|
|
7040b297c5 | ||
|
|
a48d13f68d | ||
|
|
756a07537c | ||
|
|
060fed814c | ||
|
|
5453128159 | ||
|
|
79a060ac68 | ||
|
|
c84be3c6cd | ||
|
|
69122d2a05 | ||
|
|
01c878e293 | ||
|
|
dd78e4915c | ||
|
|
1cda87bea7 | ||
|
|
d50f98b603 | ||
|
|
e64c2244f2 | ||
|
|
e318023ed9 | ||
|
|
b89af0b373 | ||
|
|
cc560cb85a | ||
|
|
a259b1360d | ||
|
|
6cd4497b66 | ||
|
|
ef1ae5bc93 | ||
|
|
9b27329281 | ||
|
|
67015ac1d7 | ||
|
|
2d67b1ee1d | ||
|
|
738ae8c60e | ||
|
|
e231501558 | ||
|
|
a36e9ba87f | ||
|
|
e4bae434de | ||
|
|
8add48d759 | ||
|
|
1f22f9ca38 | ||
|
|
c95dd8f57e | ||
|
|
bdb0f6b471 | ||
|
|
c5b39c5686 | ||
|
|
5fdbdaafd3 | ||
|
|
8fe5b97c2b | ||
|
|
1e78f5e66a | ||
|
|
252044613d | ||
|
|
51383243b7 | ||
|
|
4e7b6306b4 | ||
|
|
fd20824a00 | ||
|
|
8fbf6c4e14 | ||
|
|
35360d4ad6 | ||
|
|
578678e051 | ||
|
|
527d741c07 | ||
|
|
e167237b13 | ||
|
|
77ea087ae7 | ||
|
|
207e325a0d | ||
|
|
6d5a329535 | ||
|
|
da10350d9b | ||
|
|
480c4d9716 | ||
|
|
ef8ba4bbec | ||
|
|
fa3aced1ac | ||
|
|
3b7955a02d | ||
|
|
ead111abf7 | ||
|
|
039a15efff | ||
|
|
a5b72720d2 | ||
|
|
f921688c8c | ||
|
|
fc7ffe8cfc | ||
|
|
ff36e6a72e | ||
|
|
b34374d554 | ||
|
|
f00724a1aa | ||
|
|
d4041f1a62 | ||
|
|
1d47e893cb | ||
|
|
64fbf8435c | ||
|
|
d42bc8c76f | ||
|
|
307e30fd1b | ||
|
|
f54d999c3a | ||
|
|
91af844497 | ||
|
|
1e79f7c9d9 | ||
|
|
ac65feeae3 | ||
|
|
fbb2f0afd0 | ||
|
|
5e05de2a51 | ||
|
|
fd8e162f25 | ||
|
|
e3adbf95a5 | ||
|
|
341e098329 | ||
|
|
deee3cf4a2 | ||
|
|
c7a7fc1267 | ||
|
|
6fdafd47ef | ||
|
|
8327fcd5c2 | ||
|
|
7b4fd200ca | ||
|
|
51a9de8079 | ||
|
|
75e2e5ab46 | ||
|
|
6637d92cb2 | ||
|
|
6da2eac059 | ||
|
|
035f539bbf | ||
|
|
5451c8da4a | ||
|
|
c9cbdd085f | ||
|
|
39d6b826c1 | ||
|
|
fe52465bdb | ||
|
|
4decf30b3e | ||
|
|
433a5de354 | ||
|
|
2656b466b9 | ||
|
|
fca91c4fa7 | ||
|
|
e7e4ba9fc4 | ||
|
|
1559e5390c | ||
|
|
45e1268c70 | ||
|
|
e636e67e92 | ||
|
|
8cc68970ed | ||
|
|
7159a35d20 | ||
|
|
9f3b2aaf6a | ||
|
|
aae311caaf | ||
|
|
b185e6e704 | ||
|
|
4cbcc23a55 | ||
|
|
79a7da4e72 | ||
|
|
46522a3e46 | ||
|
|
9d4cd77560 | ||
|
|
e71592d5da | ||
|
|
f68f73a849 | ||
|
|
f19811df43 | ||
|
|
d5a351877d | ||
|
|
c68f0360f0 | ||
|
|
a3d8bc3346 | ||
|
|
d85ef3f6a5 | ||
|
|
5f7115eca8 | ||
|
|
39d438763c | ||
|
|
18e6267730 | ||
|
|
bb7a722ce0 | ||
|
|
f5e6961dcb | ||
|
|
a570b6a0a6 | ||
|
|
3c79630b87 | ||
|
|
a2926324f5 | ||
|
|
637b519c5a | ||
|
|
822efa344d | ||
|
|
bf6b1102db | ||
|
|
85bb1e5e45 | ||
|
|
934f1f6a8b | ||
|
|
e1ba87408c | ||
|
|
a7b6d16c88 | ||
|
|
ff572fa219 | ||
|
|
15494d7e06 | ||
|
|
6f2d89ef6f | ||
|
|
bb66dbdccc | ||
|
|
95ab38ae54 | ||
|
|
389bbcb183 | ||
|
|
af91084887 | ||
|
|
365acda643 | ||
|
|
263bbe937f | ||
|
|
378a4ce7a2 | ||
|
|
3454785c9b | ||
|
|
77ba9dcdbb | ||
|
|
48ecac3c98 | ||
|
|
7a1085fb74 | ||
|
|
6237b9991c | ||
|
|
855878b803 | ||
|
|
f740f97bed | ||
|
|
69d157f78b | ||
|
|
0214eac4c5 | ||
|
|
8962cce365 | ||
|
|
7c41af4082 | ||
|
|
27c0dc260c | ||
|
|
7152448f21 | ||
|
|
d17aaba475 | ||
|
|
6f1bdd7079 | ||
|
|
7670792f97 | ||
|
|
e3c8c9023f | ||
|
|
ea34b30839 | ||
|
|
3f68265d60 | ||
|
|
ab6b1cbfe9 | ||
|
|
5691e66e1b | ||
|
|
e502fb23ea | ||
|
|
e1fcc2529c | ||
|
|
d22706c060 | ||
|
|
429ab089f7 | ||
|
|
0a1d7893ff | ||
|
|
6c3e03958a | ||
|
|
08c34f2ac2 | ||
|
|
dfb8c965e6 | ||
|
|
23bd6fe5da | ||
|
|
13f6418c46 | ||
|
|
4762da105c | ||
|
|
9b34595ad0 | ||
|
|
d57648eb13 | ||
|
|
02f6db595c | ||
|
|
18834810e6 | ||
|
|
7854cf007a | ||
|
|
f2bdd846fd | ||
|
|
85d455791c | ||
|
|
03cf3aa168 | ||
|
|
c624e7fd97 | ||
|
|
522b9e33c3 | ||
|
|
40c406506b | ||
|
|
500356ace1 | ||
|
|
87c632baf2 | ||
|
|
76b70a7a82 | ||
|
|
af44b7a591 |
9
.github/workflows/cargo-deny-runner.yaml
vendored
9
.github/workflows/cargo-deny-runner.yaml
vendored
@@ -1,12 +1,5 @@
|
||||
name: Cargo Crates Check Runner
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
- edited
|
||||
- reopened
|
||||
- synchronize
|
||||
paths-ignore: [ '**.md', '**.png', '**.jpg', '**.jpeg', '**.svg', '/docs/**' ]
|
||||
on: [pull_request]
|
||||
jobs:
|
||||
cargo-deny-runner:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
98
.github/workflows/cc-payload-after-push.yaml
vendored
Normal file
98
.github/workflows/cc-payload-after-push.yaml
vendored
Normal file
@@ -0,0 +1,98 @@
|
||||
name: CI | Publish Kata Containers payload for Confidential Containers
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- CCv0
|
||||
|
||||
jobs:
|
||||
build-asset:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
asset:
|
||||
- cc-cloud-hypervisor
|
||||
- cc-kernel
|
||||
- cc-qemu
|
||||
- cc-rootfs-image
|
||||
- cc-shim-v2
|
||||
- cc-virtiofsd
|
||||
- cc-sev-kernel
|
||||
- cc-sev-ovmf
|
||||
- cc-sev-rootfs-initrd
|
||||
- cc-tdx-kernel
|
||||
- cc-tdx-rootfs-image
|
||||
- cc-tdx-qemu
|
||||
- cc-tdx-td-shim
|
||||
- cc-tdx-tdvf
|
||||
steps:
|
||||
- name: Login to Kata Containers quay.io
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.QUAY_DEPLOYER_USERNAME }}
|
||||
password: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0 # This is needed in order to keep the commit ids history
|
||||
- name: Build ${{ matrix.asset }}
|
||||
run: |
|
||||
make "${KATA_ASSET}-tarball"
|
||||
build_dir=$(readlink -f build)
|
||||
# store-artifact does not work with symlink
|
||||
sudo cp -r "${build_dir}" "kata-build"
|
||||
env:
|
||||
KATA_ASSET: ${{ matrix.asset }}
|
||||
TAR_OUTPUT: ${{ matrix.asset }}.tar.gz
|
||||
PUSH_TO_REGISTRY: yes
|
||||
|
||||
- name: store-artifact ${{ matrix.asset }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-build/kata-static-${{ matrix.asset }}.tar.xz
|
||||
retention-days: 1
|
||||
if-no-files-found: error
|
||||
|
||||
create-kata-tarball:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build-asset
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: get-artifacts
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-artifacts
|
||||
- name: merge-artifacts
|
||||
run: |
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-merge-builds.sh kata-artifacts
|
||||
- name: store-artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: kata-static-tarball
|
||||
path: kata-static.tar.xz
|
||||
retention-days: 1
|
||||
if-no-files-found: error
|
||||
|
||||
kata-payload:
|
||||
needs: create-kata-tarball
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Login to Confidential Containers quay.io
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.COCO_QUAY_DEPLOYER_USERNAME }}
|
||||
password: ${{ secrets.COCO_QUAY_DEPLOYER_PASSWORD }}
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: kata-static-tarball
|
||||
|
||||
- name: build-and-push-kata-payload
|
||||
id: build-and-push-kata-payload
|
||||
run: |
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh $(pwd)/kata-static.tar.xz "quay.io/confidential-containers/runtime-payload-ci" "kata-containers-latest"
|
||||
88
.github/workflows/cc-payload.yaml
vendored
Normal file
88
.github/workflows/cc-payload.yaml
vendored
Normal file
@@ -0,0 +1,88 @@
|
||||
name: Publish Kata Containers payload for Confidential Containers
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'CC\-[0-9]+.[0-9]+.[0-9]+'
|
||||
|
||||
jobs:
|
||||
build-asset:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
asset:
|
||||
- cc-cloud-hypervisor
|
||||
- cc-kernel
|
||||
- cc-qemu
|
||||
- cc-rootfs-image
|
||||
- cc-shim-v2
|
||||
- cc-virtiofsd
|
||||
- cc-sev-kernel
|
||||
- cc-sev-ovmf
|
||||
- cc-sev-rootfs-initrd
|
||||
- cc-tdx-kernel
|
||||
- cc-tdx-rootfs-image
|
||||
- cc-tdx-qemu
|
||||
- cc-tdx-td-shim
|
||||
- cc-tdx-tdvf
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Build ${{ matrix.asset }}
|
||||
run: |
|
||||
make "${KATA_ASSET}-tarball"
|
||||
build_dir=$(readlink -f build)
|
||||
# store-artifact does not work with symlink
|
||||
sudo cp -r "${build_dir}" "kata-build"
|
||||
env:
|
||||
KATA_ASSET: ${{ matrix.asset }}
|
||||
TAR_OUTPUT: ${{ matrix.asset }}.tar.gz
|
||||
|
||||
- name: store-artifact ${{ matrix.asset }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-build/kata-static-${{ matrix.asset }}.tar.xz
|
||||
retention-days: 1
|
||||
if-no-files-found: error
|
||||
|
||||
create-kata-tarball:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build-asset
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: get-artifacts
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-artifacts
|
||||
- name: merge-artifacts
|
||||
run: |
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-merge-builds.sh kata-artifacts
|
||||
- name: store-artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: kata-static-tarball
|
||||
path: kata-static.tar.xz
|
||||
retention-days: 1
|
||||
if-no-files-found: error
|
||||
|
||||
kata-payload:
|
||||
needs: create-kata-tarball
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Login to quay.io
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.COCO_QUAY_DEPLOYER_USERNAME }}
|
||||
password: ${{ secrets.COCO_QUAY_DEPLOYER_PASSWORD }}
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: kata-static-tarball
|
||||
|
||||
- name: build-and-push-kata-payload
|
||||
id: build-and-push-kata-payload
|
||||
run: |
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh $(pwd)/kata-static.tar.xz
|
||||
4
.github/workflows/commit-message-check.yaml
vendored
4
.github/workflows/commit-message-check.yaml
vendored
@@ -47,7 +47,7 @@ jobs:
|
||||
uses: tim-actions/commit-message-checker-with-regex@v0.3.1
|
||||
with:
|
||||
commits: ${{ steps.get-pr-commits.outputs.commits }}
|
||||
pattern: '^.{0,75}(\n.*)*$'
|
||||
pattern: '^.{0,75}(\n.*)*$|^Merge pull request (?:kata-containers)?#[\d]+ from.*'
|
||||
error: 'Subject too long (max 75)'
|
||||
post_error: ${{ env.error_msg }}
|
||||
|
||||
@@ -95,6 +95,6 @@ jobs:
|
||||
uses: tim-actions/commit-message-checker-with-regex@v0.3.1
|
||||
with:
|
||||
commits: ${{ steps.get-pr-commits.outputs.commits }}
|
||||
pattern: '^[\s\t]*[^:\s\t]+[\s\t]*:'
|
||||
pattern: '^[\s\t]*[^:\s\t]+[\s\t]*:|^Merge pull request (?:kata-containers)?#[\d]+ from.*'
|
||||
error: 'Failed to find subsystem in subject'
|
||||
post_error: ${{ env.error_msg }}
|
||||
|
||||
10
.github/workflows/darwin-tests.yaml
vendored
10
.github/workflows/darwin-tests.yaml
vendored
@@ -5,16 +5,20 @@ on:
|
||||
- edited
|
||||
- reopened
|
||||
- synchronize
|
||||
paths-ignore: [ '**.md', '**.png', '**.jpg', '**.jpeg', '**.svg', '/docs/**' ]
|
||||
|
||||
name: Darwin tests
|
||||
jobs:
|
||||
test:
|
||||
runs-on: macos-latest
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.16.x, 1.17.x]
|
||||
os: [macos-latest]
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19.3
|
||||
go-version: ${{ matrix.go-version }}
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Build utils
|
||||
|
||||
129
.github/workflows/deploy-ccv0-demo.yaml
vendored
Normal file
129
.github/workflows/deploy-ccv0-demo.yaml
vendored
Normal file
@@ -0,0 +1,129 @@
|
||||
on:
|
||||
issue_comment:
|
||||
types: [created, edited]
|
||||
|
||||
name: deploy-ccv0-demo
|
||||
|
||||
jobs:
|
||||
check-comment-and-membership:
|
||||
runs-on: ubuntu-latest
|
||||
if: |
|
||||
github.event.issue.pull_request
|
||||
&& github.event_name == 'issue_comment'
|
||||
&& github.event.action == 'created'
|
||||
&& startsWith(github.event.comment.body, '/deploy-ccv0-demo')
|
||||
steps:
|
||||
- name: Check membership
|
||||
uses: kata-containers/is-organization-member@1.0.1
|
||||
id: is_organization_member
|
||||
with:
|
||||
organization: kata-containers
|
||||
username: ${{ github.event.comment.user.login }}
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Fail if not member
|
||||
run: |
|
||||
result=${{ steps.is_organization_member.outputs.result }}
|
||||
if [ $result == false ]; then
|
||||
user=${{ github.event.comment.user.login }}
|
||||
echo Either ${user} is not part of the kata-containers organization
|
||||
echo or ${user} has its Organization Visibility set to Private at
|
||||
echo https://github.com/orgs/kata-containers/people?query=${user}
|
||||
echo
|
||||
echo Ensure you change your Organization Visibility to Public and
|
||||
echo trigger the test again.
|
||||
exit 1
|
||||
fi
|
||||
|
||||
build-asset:
|
||||
runs-on: ubuntu-latest
|
||||
needs: check-comment-and-membership
|
||||
strategy:
|
||||
matrix:
|
||||
asset:
|
||||
- cloud-hypervisor
|
||||
- firecracker
|
||||
- kernel
|
||||
- qemu
|
||||
- rootfs-image
|
||||
- rootfs-initrd
|
||||
- shim-v2
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Install docker
|
||||
run: |
|
||||
curl -fsSL https://test.docker.com -o test-docker.sh
|
||||
sh test-docker.sh
|
||||
|
||||
- name: Prepare confidential container rootfs
|
||||
if: ${{ matrix.asset == 'rootfs-initrd' }}
|
||||
run: |
|
||||
pushd include_rootfs/etc
|
||||
curl -LO https://raw.githubusercontent.com/confidential-containers/documentation/main/demos/ssh-demo/aa-offline_fs_kbc-keys.json
|
||||
mkdir kata-containers
|
||||
envsubst < docs/how-to/data/confidential-agent-config.toml.in > kata-containers/agent.toml
|
||||
popd
|
||||
env:
|
||||
AA_KBC_PARAMS: offline_fs_kbc::null
|
||||
|
||||
- name: Build ${{ matrix.asset }}
|
||||
run: |
|
||||
make "${KATA_ASSET}-tarball"
|
||||
build_dir=$(readlink -f build)
|
||||
# store-artifact does not work with symlink
|
||||
sudo cp -r "${build_dir}" "kata-build"
|
||||
env:
|
||||
AA_KBC: offline_fs_kbc
|
||||
INCLUDE_ROOTFS: include_rootfs
|
||||
KATA_ASSET: ${{ matrix.asset }}
|
||||
TAR_OUTPUT: ${{ matrix.asset }}.tar.gz
|
||||
|
||||
- name: store-artifact ${{ matrix.asset }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-build/kata-static-${{ matrix.asset }}.tar.xz
|
||||
if-no-files-found: error
|
||||
|
||||
create-kata-tarball:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build-asset
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: get-artifacts
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-artifacts
|
||||
- name: merge-artifacts
|
||||
run: |
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-merge-builds.sh kata-artifacts
|
||||
- name: store-artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: kata-static-tarball
|
||||
path: kata-static.tar.xz
|
||||
|
||||
kata-deploy:
|
||||
needs: create-kata-tarball
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: kata-static-tarball
|
||||
- name: build-and-push-kata-deploy-ci
|
||||
id: build-and-push-kata-deploy-ci
|
||||
run: |
|
||||
tag=$(echo $GITHUB_REF | cut -d/ -f3-)
|
||||
pushd $GITHUB_WORKSPACE
|
||||
git checkout $tag
|
||||
pkg_sha=$(git rev-parse HEAD)
|
||||
popd
|
||||
mv kata-static.tar.xz $GITHUB_WORKSPACE/tools/packaging/kata-deploy/kata-static.tar.xz
|
||||
docker build --build-arg KATA_ARTIFACTS=kata-static.tar.xz -t quay.io/confidential-containers/runtime-payload:$pkg_sha $GITHUB_WORKSPACE/tools/packaging/kata-deploy
|
||||
docker login -u ${{ secrets.QUAY_DEPLOYER_USERNAME }} -p ${{ secrets.QUAY_DEPLOYER_PASSWORD }} quay.io
|
||||
docker push quay.io/confidential-containers/runtime-payload:$pkg_sha
|
||||
mkdir -p packaging/kata-deploy
|
||||
ln -s $GITHUB_WORKSPACE/tools/packaging/kata-deploy/action packaging/kata-deploy/action
|
||||
echo "::set-output name=PKG_SHA::${pkg_sha}"
|
||||
8
.github/workflows/docs-url-alive-check.yaml
vendored
8
.github/workflows/docs-url-alive-check.yaml
vendored
@@ -5,7 +5,11 @@ on:
|
||||
name: Docs URL Alive Check
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-20.04
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.17.x]
|
||||
os: [ubuntu-20.04]
|
||||
runs-on: ${{ matrix.os }}
|
||||
# don't run this action on forks
|
||||
if: github.repository_owner == 'kata-containers'
|
||||
env:
|
||||
@@ -14,7 +18,7 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19.3
|
||||
go-version: ${{ matrix.go-version }}
|
||||
env:
|
||||
GOPATH: ${{ runner.workspace }}/kata-containers
|
||||
- name: Set env
|
||||
|
||||
8
.github/workflows/kata-deploy-push.yaml
vendored
8
.github/workflows/kata-deploy-push.yaml
vendored
@@ -18,7 +18,6 @@ jobs:
|
||||
matrix:
|
||||
asset:
|
||||
- kernel
|
||||
- kernel-dragonball-experimental
|
||||
- shim-v2
|
||||
- qemu
|
||||
- cloud-hypervisor
|
||||
@@ -26,9 +25,14 @@ jobs:
|
||||
- rootfs-image
|
||||
- rootfs-initrd
|
||||
- virtiofsd
|
||||
- nydus
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Install docker
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
curl -fsSL https://test.docker.com -o test-docker.sh
|
||||
sh test-docker.sh
|
||||
|
||||
- name: Build ${{ matrix.asset }}
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
|
||||
7
.github/workflows/kata-deploy-test.yaml
vendored
7
.github/workflows/kata-deploy-test.yaml
vendored
@@ -50,8 +50,6 @@ jobs:
|
||||
- cloud-hypervisor
|
||||
- firecracker
|
||||
- kernel
|
||||
- kernel-dragonball-experimental
|
||||
- nydus
|
||||
- qemu
|
||||
- rootfs-image
|
||||
- rootfs-initrd
|
||||
@@ -72,6 +70,11 @@ jobs:
|
||||
with:
|
||||
ref: ${{ steps.get-PR-ref.outputs.pr-ref }}
|
||||
|
||||
- name: Install docker
|
||||
run: |
|
||||
curl -fsSL https://test.docker.com -o test-docker.sh
|
||||
sh test-docker.sh
|
||||
|
||||
- name: Build ${{ matrix.asset }}
|
||||
run: |
|
||||
make "${KATA_ASSET}-tarball"
|
||||
|
||||
7
.github/workflows/release.yaml
vendored
7
.github/workflows/release.yaml
vendored
@@ -13,8 +13,6 @@ jobs:
|
||||
- cloud-hypervisor
|
||||
- firecracker
|
||||
- kernel
|
||||
- kernel-dragonball-experimental
|
||||
- nydus
|
||||
- qemu
|
||||
- rootfs-image
|
||||
- rootfs-initrd
|
||||
@@ -22,6 +20,11 @@ jobs:
|
||||
- virtiofsd
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Install docker
|
||||
run: |
|
||||
curl -fsSL https://test.docker.com -o test-docker.sh
|
||||
sh test-docker.sh
|
||||
|
||||
- name: Build ${{ matrix.asset }}
|
||||
run: |
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-copy-yq-installer.sh
|
||||
|
||||
16
.github/workflows/snap-release.yaml
vendored
16
.github/workflows/snap-release.yaml
vendored
@@ -4,9 +4,6 @@ on:
|
||||
tags:
|
||||
- '[0-9]+.[0-9]+.[0-9]+*'
|
||||
|
||||
env:
|
||||
SNAPCRAFT_STORE_CREDENTIALS: ${{ secrets.snapcraft_token }}
|
||||
|
||||
jobs:
|
||||
release-snap:
|
||||
runs-on: ubuntu-20.04
|
||||
@@ -17,16 +14,9 @@ jobs:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Install Snapcraft
|
||||
run: |
|
||||
# Required to avoid snapcraft install failure
|
||||
sudo chown root:root /
|
||||
|
||||
# "--classic" is needed for the GitHub action runner
|
||||
# environment.
|
||||
sudo snap install snapcraft --classic
|
||||
|
||||
# Allow other parts to access snap binaries
|
||||
echo /snap/bin >> "$GITHUB_PATH"
|
||||
uses: samuelmeuli/action-snapcraft@v1
|
||||
with:
|
||||
snapcraft_token: ${{ secrets.snapcraft_token }}
|
||||
|
||||
- name: Build snap
|
||||
run: |
|
||||
|
||||
12
.github/workflows/snap.yaml
vendored
12
.github/workflows/snap.yaml
vendored
@@ -6,7 +6,6 @@ on:
|
||||
- synchronize
|
||||
- reopened
|
||||
- edited
|
||||
paths-ignore: [ '**.md', '**.png', '**.jpg', '**.jpeg', '**.svg', '/docs/**' ]
|
||||
|
||||
jobs:
|
||||
test:
|
||||
@@ -20,16 +19,7 @@ jobs:
|
||||
|
||||
- name: Install Snapcraft
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
# Required to avoid snapcraft install failure
|
||||
sudo chown root:root /
|
||||
|
||||
# "--classic" is needed for the GitHub action runner
|
||||
# environment.
|
||||
sudo snap install snapcraft --classic
|
||||
|
||||
# Allow other parts to access snap binaries
|
||||
echo /snap/bin >> "$GITHUB_PATH"
|
||||
uses: samuelmeuli/action-snapcraft@v1
|
||||
|
||||
- name: Build snap
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
|
||||
33
.github/workflows/static-checks-dragonball.yaml
vendored
33
.github/workflows/static-checks-dragonball.yaml
vendored
@@ -1,33 +0,0 @@
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
- edited
|
||||
- reopened
|
||||
- synchronize
|
||||
paths-ignore: [ '**.md', '**.png', '**.jpg', '**.jpeg', '**.svg', '/docs/**' ]
|
||||
|
||||
name: Static checks dragonball
|
||||
jobs:
|
||||
test-dragonball:
|
||||
runs-on: self-hosted
|
||||
env:
|
||||
RUST_BACKTRACE: "1"
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Set env
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
echo "GOPATH=${{ github.workspace }}" >> $GITHUB_ENV
|
||||
- name: Install Rust
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
./ci/install_rust.sh
|
||||
PATH=$PATH:"$HOME/.cargo/bin"
|
||||
- name: Run Unit Test
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
cd src/dragonball
|
||||
cargo version
|
||||
rustc --version
|
||||
sudo -E env PATH=$PATH LIBC=gnu SUPPORT_VIRTUALIZATION=true make test
|
||||
328
.github/workflows/static-checks.yaml
vendored
328
.github/workflows/static-checks.yaml
vendored
@@ -8,16 +8,12 @@ on:
|
||||
|
||||
name: Static checks
|
||||
jobs:
|
||||
static-checks:
|
||||
runs-on: ubuntu-20.04
|
||||
check-vendored-code:
|
||||
strategy:
|
||||
matrix:
|
||||
cmd:
|
||||
- "make vendor"
|
||||
- "make static-checks"
|
||||
- "make check"
|
||||
- "make test"
|
||||
- "sudo -E PATH=\"$PATH\" make test"
|
||||
go-version: [1.16.x, 1.17.x]
|
||||
os: [ubuntu-20.04]
|
||||
runs-on: ${{ matrix.os }}
|
||||
env:
|
||||
TRAVIS: "true"
|
||||
TRAVIS_BRANCH: ${{ github.base_ref }}
|
||||
@@ -26,33 +22,13 @@ jobs:
|
||||
RUST_BACKTRACE: "1"
|
||||
target_branch: ${{ github.base_ref }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
path: ./src/github.com/${{ github.repository }}
|
||||
- name: Install Go
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.19.3
|
||||
go-version: ${{ matrix.go-version }}
|
||||
env:
|
||||
GOPATH: ${{ runner.workspace }}/kata-containers
|
||||
- name: Check kernel config version
|
||||
run: |
|
||||
cd "${{ github.workspace }}/src/github.com/${{ github.repository }}"
|
||||
kernel_dir="tools/packaging/kernel/"
|
||||
kernel_version_file="${kernel_dir}kata_config_version"
|
||||
modified_files=$(git diff --name-only origin/main..HEAD)
|
||||
result=$(git whatchanged origin/main..HEAD "${kernel_dir}" >>"/dev/null")
|
||||
if git whatchanged origin/main..HEAD "${kernel_dir}" >>"/dev/null"; then
|
||||
echo "Kernel directory has changed, checking if $kernel_version_file has been updated"
|
||||
if echo "$modified_files" | grep -v "README.md" | grep "${kernel_dir}" >>"/dev/null"; then
|
||||
echo "$modified_files" | grep "$kernel_version_file" >>/dev/null || ( echo "Please bump version in $kernel_version_file" && exit 1)
|
||||
else
|
||||
echo "Readme file changed, no need for kernel config version update."
|
||||
fi
|
||||
echo "Check passed"
|
||||
fi
|
||||
- name: Setup GOPATH
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
@@ -65,6 +41,68 @@ jobs:
|
||||
run: |
|
||||
echo "GOPATH=${{ github.workspace }}" >> $GITHUB_ENV
|
||||
echo "${{ github.workspace }}/bin" >> $GITHUB_PATH
|
||||
- name: Checkout code
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
path: ./src/github.com/${{ github.repository }}
|
||||
- name: Setup travis references
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
echo "TRAVIS_BRANCH=${TRAVIS_BRANCH:-$(echo $GITHUB_REF | awk 'BEGIN { FS = \"/\" } ; { print $3 }')}"
|
||||
target_branch=${TRAVIS_BRANCH}
|
||||
- name: Setup
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
cd ${GOPATH}/src/github.com/${{ github.repository }} && ./ci/setup.sh
|
||||
env:
|
||||
GOPATH: ${{ runner.workspace }}/kata-containers
|
||||
# Check whether the vendored code is up-to-date & working as the first thing
|
||||
- name: Check vendored code
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
cd ${GOPATH}/src/github.com/${{ github.repository }} && make vendor
|
||||
|
||||
static-checks:
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.16.x, 1.17.x]
|
||||
os: [ubuntu-20.04]
|
||||
runs-on: ${{ matrix.os }}
|
||||
env:
|
||||
TRAVIS: "true"
|
||||
TRAVIS_BRANCH: ${{ github.base_ref }}
|
||||
TRAVIS_PULL_REQUEST_BRANCH: ${{ github.head_ref }}
|
||||
TRAVIS_PULL_REQUEST_SHA : ${{ github.event.pull_request.head.sha }}
|
||||
RUST_BACKTRACE: "1"
|
||||
target_branch: ${{ github.base_ref }}
|
||||
steps:
|
||||
- name: Install Go
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
env:
|
||||
GOPATH: ${{ runner.workspace }}/kata-containers
|
||||
- name: Setup GOPATH
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
echo "TRAVIS_BRANCH: ${TRAVIS_BRANCH}"
|
||||
echo "TRAVIS_PULL_REQUEST_BRANCH: ${TRAVIS_PULL_REQUEST_BRANCH}"
|
||||
echo "TRAVIS_PULL_REQUEST_SHA: ${TRAVIS_PULL_REQUEST_SHA}"
|
||||
echo "TRAVIS: ${TRAVIS}"
|
||||
- name: Set env
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
echo "GOPATH=${{ github.workspace }}" >> $GITHUB_ENV
|
||||
echo "${{ github.workspace }}/bin" >> $GITHUB_PATH
|
||||
- name: Checkout code
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
path: ./src/github.com/${{ github.repository }}
|
||||
- name: Setup travis references
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
@@ -84,7 +122,6 @@ jobs:
|
||||
rustup target add x86_64-unknown-linux-musl
|
||||
rustup component add rustfmt clippy
|
||||
- name: Setup seccomp
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
libseccomp_install_dir=$(mktemp -d -t libseccomp.XXXXXXXXXX)
|
||||
gperf_install_dir=$(mktemp -d -t gperf.XXXXXXXXXX)
|
||||
@@ -92,7 +129,232 @@ jobs:
|
||||
echo "Set environment variables for the libseccomp crate to link the libseccomp library statically"
|
||||
echo "LIBSECCOMP_LINK_TYPE=static" >> $GITHUB_ENV
|
||||
echo "LIBSECCOMP_LIB_PATH=${libseccomp_install_dir}/lib" >> $GITHUB_ENV
|
||||
- name: Run check
|
||||
- name: Static Checks
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
cd ${GOPATH}/src/github.com/${{ github.repository }} && ${{ matrix.cmd }}
|
||||
cd ${GOPATH}/src/github.com/${{ github.repository }} && make static-checks
|
||||
|
||||
compiler-checks:
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.16.x, 1.17.x]
|
||||
os: [ubuntu-20.04]
|
||||
runs-on: ${{ matrix.os }}
|
||||
env:
|
||||
TRAVIS: "true"
|
||||
TRAVIS_BRANCH: ${{ github.base_ref }}
|
||||
TRAVIS_PULL_REQUEST_BRANCH: ${{ github.head_ref }}
|
||||
TRAVIS_PULL_REQUEST_SHA : ${{ github.event.pull_request.head.sha }}
|
||||
RUST_BACKTRACE: "1"
|
||||
target_branch: ${{ github.base_ref }}
|
||||
steps:
|
||||
- name: Install Go
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
env:
|
||||
GOPATH: ${{ runner.workspace }}/kata-containers
|
||||
- name: Setup GOPATH
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
echo "TRAVIS_BRANCH: ${TRAVIS_BRANCH}"
|
||||
echo "TRAVIS_PULL_REQUEST_BRANCH: ${TRAVIS_PULL_REQUEST_BRANCH}"
|
||||
echo "TRAVIS_PULL_REQUEST_SHA: ${TRAVIS_PULL_REQUEST_SHA}"
|
||||
echo "TRAVIS: ${TRAVIS}"
|
||||
- name: Set env
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
echo "GOPATH=${{ github.workspace }}" >> $GITHUB_ENV
|
||||
echo "${{ github.workspace }}/bin" >> $GITHUB_PATH
|
||||
- name: Checkout code
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
path: ./src/github.com/${{ github.repository }}
|
||||
- name: Setup travis references
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
echo "TRAVIS_BRANCH=${TRAVIS_BRANCH:-$(echo $GITHUB_REF | awk 'BEGIN { FS = \"/\" } ; { print $3 }')}"
|
||||
target_branch=${TRAVIS_BRANCH}
|
||||
- name: Setup
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
cd ${GOPATH}/src/github.com/${{ github.repository }} && ./ci/setup.sh
|
||||
env:
|
||||
GOPATH: ${{ runner.workspace }}/kata-containers
|
||||
- name: Installing rust
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
cd ${GOPATH}/src/github.com/${{ github.repository }} && ./ci/install_rust.sh
|
||||
PATH=$PATH:"$HOME/.cargo/bin"
|
||||
rustup target add x86_64-unknown-linux-musl
|
||||
rustup component add rustfmt clippy
|
||||
- name: Setup seccomp
|
||||
run: |
|
||||
libseccomp_install_dir=$(mktemp -d -t libseccomp.XXXXXXXXXX)
|
||||
gperf_install_dir=$(mktemp -d -t gperf.XXXXXXXXXX)
|
||||
cd ${GOPATH}/src/github.com/${{ github.repository }} && ./ci/install_libseccomp.sh "${libseccomp_install_dir}" "${gperf_install_dir}"
|
||||
echo "Set environment variables for the libseccomp crate to link the libseccomp library statically"
|
||||
echo "LIBSECCOMP_LINK_TYPE=static" >> $GITHUB_ENV
|
||||
echo "LIBSECCOMP_LIB_PATH=${libseccomp_install_dir}/lib" >> $GITHUB_ENV
|
||||
- name: Run Compiler Checks
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
cd ${GOPATH}/src/github.com/${{ github.repository }} && make check
|
||||
|
||||
unit-tests:
|
||||
runs-on: ubuntu-20.04
|
||||
env:
|
||||
TRAVIS: "true"
|
||||
TRAVIS_BRANCH: ${{ github.base_ref }}
|
||||
TRAVIS_PULL_REQUEST_BRANCH: ${{ github.head_ref }}
|
||||
TRAVIS_PULL_REQUEST_SHA : ${{ github.event.pull_request.head.sha }}
|
||||
RUST_BACKTRACE: "1"
|
||||
target_branch: ${{ github.base_ref }}
|
||||
steps:
|
||||
- name: Install Go
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.17.x
|
||||
env:
|
||||
GOPATH: ${{ runner.workspace }}/kata-containers
|
||||
- name: Setup GOPATH
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
echo "TRAVIS_BRANCH: ${TRAVIS_BRANCH}"
|
||||
echo "TRAVIS_PULL_REQUEST_BRANCH: ${TRAVIS_PULL_REQUEST_BRANCH}"
|
||||
echo "TRAVIS_PULL_REQUEST_SHA: ${TRAVIS_PULL_REQUEST_SHA}"
|
||||
echo "TRAVIS: ${TRAVIS}"
|
||||
- name: Set env
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
echo "GOPATH=${{ github.workspace }}" >> $GITHUB_ENV
|
||||
echo "${{ github.workspace }}/bin" >> $GITHUB_PATH
|
||||
- name: Checkout code
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
path: ./src/github.com/${{ github.repository }}
|
||||
- name: Setup travis references
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
echo "TRAVIS_BRANCH=${TRAVIS_BRANCH:-$(echo $GITHUB_REF | awk 'BEGIN { FS = \"/\" } ; { print $3 }')}"
|
||||
target_branch=${TRAVIS_BRANCH}
|
||||
- name: Setup
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
cd ${GOPATH}/src/github.com/${{ github.repository }} && ./ci/setup.sh
|
||||
env:
|
||||
GOPATH: ${{ runner.workspace }}/kata-containers
|
||||
- name: Installing rust
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
cd ${GOPATH}/src/github.com/${{ github.repository }} && ./ci/install_rust.sh
|
||||
PATH=$PATH:"$HOME/.cargo/bin"
|
||||
rustup target add x86_64-unknown-linux-musl
|
||||
rustup component add rustfmt clippy
|
||||
- name: Setup seccomp
|
||||
run: |
|
||||
libseccomp_install_dir=$(mktemp -d -t libseccomp.XXXXXXXXXX)
|
||||
gperf_install_dir=$(mktemp -d -t gperf.XXXXXXXXXX)
|
||||
cd ${GOPATH}/src/github.com/${{ github.repository }} && ./ci/install_libseccomp.sh "${libseccomp_install_dir}" "${gperf_install_dir}"
|
||||
echo "Set environment variables for the libseccomp crate to link the libseccomp library statically"
|
||||
echo "LIBSECCOMP_LINK_TYPE=static" >> $GITHUB_ENV
|
||||
echo "LIBSECCOMP_LIB_PATH=${libseccomp_install_dir}/lib" >> $GITHUB_ENV
|
||||
- name: Run Unit Tests
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
cd ${GOPATH}/src/github.com/${{ github.repository }} && make test
|
||||
|
||||
unit-tests-as-root:
|
||||
runs-on: ubuntu-20.04
|
||||
env:
|
||||
TRAVIS: "true"
|
||||
TRAVIS_BRANCH: ${{ github.base_ref }}
|
||||
TRAVIS_PULL_REQUEST_BRANCH: ${{ github.head_ref }}
|
||||
TRAVIS_PULL_REQUEST_SHA : ${{ github.event.pull_request.head.sha }}
|
||||
RUST_BACKTRACE: "1"
|
||||
target_branch: ${{ github.base_ref }}
|
||||
steps:
|
||||
- name: Install Go
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.17.x
|
||||
env:
|
||||
GOPATH: ${{ runner.workspace }}/kata-containers
|
||||
- name: Setup GOPATH
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
echo "TRAVIS_BRANCH: ${TRAVIS_BRANCH}"
|
||||
echo "TRAVIS_PULL_REQUEST_BRANCH: ${TRAVIS_PULL_REQUEST_BRANCH}"
|
||||
echo "TRAVIS_PULL_REQUEST_SHA: ${TRAVIS_PULL_REQUEST_SHA}"
|
||||
echo "TRAVIS: ${TRAVIS}"
|
||||
- name: Set env
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
echo "GOPATH=${{ github.workspace }}" >> $GITHUB_ENV
|
||||
echo "${{ github.workspace }}/bin" >> $GITHUB_PATH
|
||||
- name: Checkout code
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
path: ./src/github.com/${{ github.repository }}
|
||||
- name: Setup travis references
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
echo "TRAVIS_BRANCH=${TRAVIS_BRANCH:-$(echo $GITHUB_REF | awk 'BEGIN { FS = \"/\" } ; { print $3 }')}"
|
||||
target_branch=${TRAVIS_BRANCH}
|
||||
- name: Setup
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
cd ${GOPATH}/src/github.com/${{ github.repository }} && ./ci/setup.sh
|
||||
env:
|
||||
GOPATH: ${{ runner.workspace }}/kata-containers
|
||||
- name: Installing rust
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
cd ${GOPATH}/src/github.com/${{ github.repository }} && ./ci/install_rust.sh
|
||||
PATH=$PATH:"$HOME/.cargo/bin"
|
||||
rustup target add x86_64-unknown-linux-musl
|
||||
rustup component add rustfmt clippy
|
||||
- name: Setup seccomp
|
||||
run: |
|
||||
libseccomp_install_dir=$(mktemp -d -t libseccomp.XXXXXXXXXX)
|
||||
gperf_install_dir=$(mktemp -d -t gperf.XXXXXXXXXX)
|
||||
cd ${GOPATH}/src/github.com/${{ github.repository }} && ./ci/install_libseccomp.sh "${libseccomp_install_dir}" "${gperf_install_dir}"
|
||||
echo "Set environment variables for the libseccomp crate to link the libseccomp library statically"
|
||||
echo "LIBSECCOMP_LINK_TYPE=static" >> $GITHUB_ENV
|
||||
echo "LIBSECCOMP_LIB_PATH=${libseccomp_install_dir}/lib" >> $GITHUB_ENV
|
||||
- name: Run Unit Tests As Root User
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
cd ${GOPATH}/src/github.com/${{ github.repository }} && sudo -E PATH="$PATH" make test
|
||||
|
||||
test-dragonball:
|
||||
runs-on: self-hosted
|
||||
env:
|
||||
RUST_BACKTRACE: "1"
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Set env
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
echo "GOPATH=${{ github.workspace }}" >> $GITHUB_ENV
|
||||
- name: Install Rust
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
./ci/install_rust.sh
|
||||
PATH=$PATH:"$HOME/.cargo/bin"
|
||||
- name: Run Unit Test
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
cd src/dragonball
|
||||
/root/.cargo/bin/cargo version
|
||||
rustc --version
|
||||
sudo -E env PATH=$PATH LIBC=gnu SUPPORT_VIRTUALIZATION=true make test
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -4,8 +4,6 @@
|
||||
**/*.rej
|
||||
**/target
|
||||
**/.vscode
|
||||
**/.idea
|
||||
**/.fleet
|
||||
pkg/logging/Cargo.lock
|
||||
src/agent/src/version.rs
|
||||
src/agent/kata-agent.service
|
||||
@@ -13,3 +11,4 @@ src/agent/protocols/src/*.rs
|
||||
!src/agent/protocols/src/lib.rs
|
||||
build
|
||||
src/tools/log-parser/kata-log-parser
|
||||
|
||||
|
||||
10
Makefile
10
Makefile
@@ -21,7 +21,7 @@ TOOLS += log-parser
|
||||
TOOLS += runk
|
||||
TOOLS += trace-forwarder
|
||||
|
||||
STANDARD_TARGETS = build check clean install static-checks-build test vendor
|
||||
STANDARD_TARGETS = build check clean install test vendor
|
||||
|
||||
default: all
|
||||
|
||||
@@ -37,7 +37,7 @@ generate-protocols:
|
||||
make -C src/agent generate-protocols
|
||||
|
||||
# Some static checks rely on generated source files of components.
|
||||
static-checks: static-checks-build
|
||||
static-checks: build
|
||||
bash ci/static-checks.sh
|
||||
|
||||
docs-url-alive-check:
|
||||
@@ -45,8 +45,10 @@ docs-url-alive-check:
|
||||
|
||||
.PHONY: \
|
||||
all \
|
||||
kata-tarball \
|
||||
install-tarball \
|
||||
binary-tarball \
|
||||
default \
|
||||
install-binary-tarball \
|
||||
static-checks \
|
||||
docs-url-alive-check
|
||||
|
||||
|
||||
|
||||
@@ -72,7 +72,8 @@ build_and_install_gperf() {
|
||||
curl -sLO "${gperf_tarball_url}"
|
||||
tar -xf "${gperf_tarball}"
|
||||
pushd "gperf-${gperf_version}"
|
||||
# Unset $CC for configure, we will always use native for gperf
|
||||
# gperf is a build time dependency of libseccomp and not to be used in the target.
|
||||
# Unset $CC since that might point to a cross compiler.
|
||||
CC= ./configure --prefix="${gperf_install_dir}"
|
||||
make
|
||||
make install
|
||||
|
||||
@@ -43,16 +43,6 @@ function install_yq() {
|
||||
"aarch64")
|
||||
goarch=arm64
|
||||
;;
|
||||
"arm64")
|
||||
# If we're on an apple silicon machine, just assign amd64.
|
||||
# The version of yq we use doesn't have a darwin arm build,
|
||||
# but Rosetta can come to the rescue here.
|
||||
if [ $goos == "Darwin" ]; then
|
||||
goarch=amd64
|
||||
else
|
||||
goarch=arm64
|
||||
fi
|
||||
;;
|
||||
"ppc64le")
|
||||
goarch=ppc64le
|
||||
;;
|
||||
@@ -74,7 +64,7 @@ function install_yq() {
|
||||
fi
|
||||
|
||||
## NOTE: ${var,,} => gives lowercase value of var
|
||||
local yq_url="https://${yq_pkg}/releases/download/${yq_version}/yq_${goos}_${goarch}"
|
||||
local yq_url="https://${yq_pkg}/releases/download/${yq_version}/yq_${goos,,}_${goarch}"
|
||||
curl -o "${yq_path}" -LSsf "${yq_url}"
|
||||
[ $? -ne 0 ] && die "Download ${yq_url} failed"
|
||||
chmod +x "${yq_path}"
|
||||
|
||||
@@ -86,27 +86,6 @@ $ sudo sed -i '/^disable_guest_seccomp/ s/true/false/' /etc/kata-containers/conf
|
||||
|
||||
This will pass container seccomp profiles to the kata agent.
|
||||
|
||||
## Enable SELinux on the guest
|
||||
|
||||
> **Note:**
|
||||
>
|
||||
> - To enable SELinux on the guest, SELinux MUST be also enabled on the host.
|
||||
> - You MUST create and build a rootfs image for SELinux in advance.
|
||||
> See [Create a rootfs image](#create-a-rootfs-image) and [Build a rootfs image](#build-a-rootfs-image).
|
||||
> - SELinux on the guest is supported in only a rootfs image currently, so
|
||||
> you cannot enable SELinux with the agent init (`AGENT_INIT=yes`) yet.
|
||||
|
||||
Enable guest SELinux in Enforcing mode as follows:
|
||||
|
||||
```
|
||||
$ sudo sed -i '/^disable_guest_selinux/ s/true/false/g' /etc/kata-containers/configuration.toml
|
||||
```
|
||||
|
||||
The runtime automatically will set `selinux=1` to the kernel parameters and `xattr` option to
|
||||
`virtiofsd` when `disable_guest_selinux` is set to `false`.
|
||||
|
||||
If you want to enable SELinux in Permissive mode, add `enforcing=0` to the kernel parameters.
|
||||
|
||||
## Enable full debug
|
||||
|
||||
Enable full debug as follows:
|
||||
@@ -232,6 +211,10 @@ $ rustup target add "${ARCH}-unknown-linux-${LIBC}"
|
||||
|
||||
To build the agent:
|
||||
|
||||
```bash
|
||||
$ make -C kata-containers/src/agent
|
||||
```
|
||||
|
||||
The agent is built with seccomp capability by default.
|
||||
If you want to build the agent without the seccomp capability, you need to run `make` with `SECCOMP=no` as follows.
|
||||
|
||||
@@ -239,31 +222,6 @@ If you want to build the agent without the seccomp capability, you need to run `
|
||||
$ make -C kata-containers/src/agent SECCOMP=no
|
||||
```
|
||||
|
||||
For building the agent with seccomp support using `musl`, set the environment
|
||||
variables for the [`libseccomp` crate](https://github.com/libseccomp-rs/libseccomp-rs).
|
||||
|
||||
```bash
|
||||
$ export LIBSECCOMP_LINK_TYPE=static
|
||||
$ export LIBSECCOMP_LIB_PATH="the path of the directory containing libseccomp.a"
|
||||
$ make -C kata-containers/src/agent
|
||||
```
|
||||
|
||||
If the compilation fails when the agent tries to link the `libseccomp` library statically
|
||||
against `musl`, you will need to build `libseccomp` manually with `-U_FORTIFY_SOURCE`.
|
||||
You can use [our script](https://github.com/kata-containers/kata-containers/blob/main/ci/install_libseccomp.sh)
|
||||
to install `libseccomp` for the agent.
|
||||
|
||||
```bash
|
||||
$ mkdir -p ${seccomp_install_path} ${gperf_install_path}
|
||||
$ kata-containers/ci/install_libseccomp.sh ${seccomp_install_path} ${gperf_install_path}
|
||||
$ export LIBSECCOMP_LIB_PATH="${seccomp_install_path}/lib"
|
||||
```
|
||||
|
||||
On `ppc64le` and `s390x`, `glibc` is used. You will need to install the `libseccomp` library
|
||||
provided by your distribution.
|
||||
|
||||
> e.g. `libseccomp-dev` for Ubuntu, or `libseccomp-devel` for CentOS
|
||||
|
||||
> **Note:**
|
||||
>
|
||||
> - If you enable seccomp in the main configuration file but build the agent without seccomp capability,
|
||||
@@ -298,12 +256,6 @@ If you want to build the agent without seccomp capability, you need to run the `
|
||||
$ script -fec 'sudo -E AGENT_INIT=yes USE_DOCKER=true SECCOMP=no ./rootfs.sh "${distro}"'
|
||||
```
|
||||
|
||||
If you want to enable SELinux on the guest, you MUST choose `centos` and run the `rootfs.sh` script with `SELINUX=yes` as follows.
|
||||
|
||||
```
|
||||
$ script -fec 'sudo -E GOPATH=$GOPATH USE_DOCKER=true SELINUX=yes ./rootfs.sh centos'
|
||||
```
|
||||
|
||||
> **Note:**
|
||||
>
|
||||
> - Check the [compatibility matrix](../tools/osbuilder/README.md#platform-distro-compatibility-matrix) before creating rootfs.
|
||||
@@ -331,19 +283,6 @@ $ script -fec 'sudo -E USE_DOCKER=true ./image_builder.sh "${ROOTFS_DIR}"'
|
||||
$ popd
|
||||
```
|
||||
|
||||
If you want to enable SELinux on the guest, you MUST run the `image_builder.sh` script with `SELINUX=yes`
|
||||
to label the guest image as follows.
|
||||
To label the image on the host, you need to make sure that SELinux is enabled (`selinuxfs` is mounted) on the host
|
||||
and the rootfs MUST be created by running the `rootfs.sh` with `SELINUX=yes`.
|
||||
|
||||
```
|
||||
$ script -fec 'sudo -E USE_DOCKER=true SELINUX=yes ./image_builder.sh ${ROOTFS_DIR}'
|
||||
```
|
||||
|
||||
Currently, the `image_builder.sh` uses `chcon` as an interim solution in order to apply `container_runtime_exec_t`
|
||||
to the `kata-agent`. Hence, if you run `restorecon` to the guest image after running the `image_builder.sh`,
|
||||
the `kata-agent` needs to be labeled `container_runtime_exec_t` again by yourself.
|
||||
|
||||
> **Notes:**
|
||||
>
|
||||
> - You must ensure that the *default Docker runtime* is `runc` to make use of
|
||||
@@ -353,7 +292,6 @@ the `kata-agent` needs to be labeled `container_runtime_exec_t` again by yoursel
|
||||
> variable in the previous command and ensure the `qemu-img` command is
|
||||
> available on your system.
|
||||
> - If `qemu-img` is not installed, you will likely see errors such as `ERROR: File /dev/loop19p1 is not a block device` and `losetup: /tmp/tmp.bHz11oY851: Warning: file is smaller than 512 bytes; the loop device may be useless or invisible for system tools`. These can be mitigated by installing the `qemu-img` command (available in the `qemu-img` package on Fedora or the `qemu-utils` package on Debian).
|
||||
> - If `loop` module is not probed, you will likely see errors such as `losetup: cannot find an unused loop device`. Execute `modprobe loop` could resolve it.
|
||||
|
||||
|
||||
### Install the rootfs image
|
||||
@@ -503,7 +441,7 @@ When using the file system type virtio-fs (default), `virtiofsd` is required
|
||||
|
||||
```bash
|
||||
$ pushd kata-containers/tools/packaging/static-build/virtiofsd
|
||||
$ ./build.sh
|
||||
$ ./build-static-virtiofsd.sh
|
||||
$ popd
|
||||
```
|
||||
|
||||
|
||||
@@ -7,9 +7,7 @@ Kata Containers design documents:
|
||||
- [Design requirements for Kata Containers](kata-design-requirements.md)
|
||||
- [VSocks](VSocks.md)
|
||||
- [VCPU handling](vcpu-handling.md)
|
||||
- [VCPU threads pinning](vcpu-threads-pinning.md)
|
||||
- [Host cgroups](host-cgroups.md)
|
||||
- [Agent systemd cgroup](agent-systemd-cgroup.md)
|
||||
- [`Inotify` support](inotify.md)
|
||||
- [Metrics(Kata 2.0)](kata-2-0-metrics.md)
|
||||
- [Design for Kata Containers `Lazyload` ability with `nydus`](kata-nydus-design.md)
|
||||
|
||||
@@ -1,84 +0,0 @@
|
||||
# Systemd Cgroup for Agent
|
||||
|
||||
As we know, we can interact with cgroups in two ways, **`cgroupfs`** and **`systemd`**. The former is achieved by reading and writing cgroup `tmpfs` files under `/sys/fs/cgroup` while the latter is done by configuring a transient unit by requesting systemd. Kata agent uses **`cgroupfs`** by default, unless you pass the parameter `--systemd-cgroup`.
|
||||
|
||||
## usage
|
||||
|
||||
For systemd, kata agent configures cgroups according to the following `linux.cgroupsPath` format standard provided by `runc` (`[slice]:[prefix]:[name]`). If you don't provide a valid `linux.cgroupsPath`, kata agent will treat it as `"system.slice:kata_agent:<container-id>"`.
|
||||
|
||||
> Here slice is a systemd slice under which the container is placed. If empty, it defaults to system.slice, except when cgroup v2 is used and rootless container is created, in which case it defaults to user.slice.
|
||||
>
|
||||
> Note that slice can contain dashes to denote a sub-slice (e.g. user-1000.slice is a correct notation, meaning a `subslice` of user.slice), but it must not contain slashes (e.g. user.slice/user-1000.slice is invalid).
|
||||
>
|
||||
> A slice of `-` represents a root slice.
|
||||
>
|
||||
> Next, prefix and name are used to compose the unit name, which is `<prefix>-<name>.scope`, unless name has `.slice` suffix, in which case prefix is ignored and the name is used as is.
|
||||
|
||||
## supported properties
|
||||
|
||||
The kata agent will translate the parameters in the `linux.resources` of `config.json` into systemd unit properties, and send it to systemd for configuration. Since systemd supports limited properties, only the following parameters in `linux.resources` will be applied. We will simply treat hybrid mode as legacy mode by the way.
|
||||
|
||||
- CPU
|
||||
|
||||
- v1
|
||||
|
||||
| runtime spec resource | systemd property name |
|
||||
| --------------------- | --------------------- |
|
||||
| `cpu.shares` | `CPUShares` |
|
||||
|
||||
- v2
|
||||
|
||||
| runtime spec resource | systemd property name |
|
||||
| -------------------------- | -------------------------- |
|
||||
| `cpu.shares` | `CPUShares` |
|
||||
| `cpu.period` | `CPUQuotaPeriodUSec`(v242) |
|
||||
| `cpu.period` & `cpu.quota` | `CPUQuotaPerSecUSec` |
|
||||
|
||||
- MEMORY
|
||||
|
||||
- v1
|
||||
|
||||
| runtime spec resource | systemd property name |
|
||||
| --------------------- | --------------------- |
|
||||
| `memory.limit` | `MemoryLimit` |
|
||||
|
||||
- v2
|
||||
|
||||
| runtime spec resource | systemd property name |
|
||||
| ------------------------------ | --------------------- |
|
||||
| `memory.low` | `MemoryLow` |
|
||||
| `memory.max` | `MemoryMax` |
|
||||
| `memory.swap` & `memory.limit` | `MemorySwapMax` |
|
||||
|
||||
- PIDS
|
||||
|
||||
| runtime spec resource | systemd property name |
|
||||
| --------------------- | --------------------- |
|
||||
| `pids.limit ` | `TasksMax` |
|
||||
|
||||
- CPUSET
|
||||
|
||||
| runtime spec resource | systemd property name |
|
||||
| --------------------- | -------------------------- |
|
||||
| `cpuset.cpus` | `AllowedCPUs`(v244) |
|
||||
| `cpuset.mems` | `AllowedMemoryNodes`(v244) |
|
||||
|
||||
## Systemd Interface
|
||||
|
||||
`session.rs` and `system.rs` in `src/agent/rustjail/src/cgroups/systemd/interface` are automatically generated by `zbus-xmlgen`, which is is an accompanying tool provided by `zbus` to generate Rust code from `D-Bus XML interface descriptions`. The specific commands to generate these two files are as follows:
|
||||
|
||||
```shell
|
||||
// system.rs
|
||||
zbus-xmlgen --system org.freedesktop.systemd1 /org/freedesktop/systemd1
|
||||
// session.rs
|
||||
zbus-xmlgen --session org.freedesktop.systemd1 /org/freedesktop/systemd1
|
||||
```
|
||||
|
||||
The current implementation of `cgroups/systemd` uses `system.rs` while `session.rs` could be used to build rootless containers in the future.
|
||||
|
||||
## references
|
||||
|
||||
- [runc - systemd cgroup driver](https://github.com/opencontainers/runc/blob/main/docs/systemd.md)
|
||||
|
||||
- [systemd.resource-control — Resource control unit settings](https://www.freedesktop.org/software/systemd/man/systemd.resource-control.html)
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 193 KiB |
@@ -81,7 +81,7 @@ Notes: given that the `mountInfo` is persisted to the disk by the Kata runtime,
|
||||
Instead of the CSI node driver writing the mount info into a `csiPlugin.json` file under the volume root,
|
||||
as described in the original proposal, here we propose that the CSI node driver passes the mount information to
|
||||
the Kata Containers runtime through a new `kata-runtime` commandline command. The `kata-runtime` then writes the mount
|
||||
information to a `mountInfo.json` file in a predefined location (`/run/kata-containers/shared/direct-volumes/[volume_path]/`).
|
||||
information to a `mount-info.json` file in a predefined location (`/run/kata-containers/shared/direct-volumes/[volume_path]/`).
|
||||
|
||||
When the Kata Containers runtime starts a container, it verifies whether a volume mount is a direct-assigned volume by checking
|
||||
whether there is a `mountInfo` file under the computed Kata `direct-volumes` directory. If it is, the runtime parses the `mountInfo` file,
|
||||
|
||||
@@ -1,37 +0,0 @@
|
||||
# Design Doc for Kata Containers' VCPUs Pinning Feature
|
||||
|
||||
## Background
|
||||
By now, vCPU threads of Kata Containers are scheduled randomly to CPUs. And each pod would request a specific set of CPUs which we call it CPU set (just the CPU set meaning in Linux cgroups).
|
||||
|
||||
If the number of vCPU threads are equal to that of CPUs claimed in CPU set, we can then pin each vCPU thread to one specified CPU, to reduce the cost of random scheduling.
|
||||
|
||||
## Detailed Design
|
||||
|
||||
### Passing Config Parameters
|
||||
Two ways are provided to use this vCPU thread pinning feature: through `QEMU` configuration file and through annotations. Finally the pinning parameter is passed to `HypervisorConfig`.
|
||||
|
||||
### Related Linux Thread Scheduling API
|
||||
|
||||
| API Info | Value |
|
||||
|-------------------|-----------------------------------------------------------|
|
||||
| Package | `golang.org/x/sys/unix` |
|
||||
| Method | `unix.SchedSetaffinity(thread_id, &unixCPUSet)` |
|
||||
| Official Doc Page | https://pkg.go.dev/golang.org/x/sys/unix#SchedSetaffinity |
|
||||
|
||||
### When is VCPUs Pinning Checked?
|
||||
|
||||
As shown in Section 1, when `num(vCPU threads) == num(CPUs in CPU set)`, we shall pin each vCPU thread to a specified CPU. And when this condition is broken, we should restore to the original random scheduling pattern.
|
||||
So when may `num(CPUs in CPU set)` change? There are 5 possible scenes:
|
||||
|
||||
| Possible scenes | Related Code |
|
||||
|-----------------------------------|--------------------------------------------|
|
||||
| when creating a container | File Sandbox.go, in method `CreateContainer` |
|
||||
| when starting a container | File Sandbox.go, in method `StartContainer` |
|
||||
| when deleting a container | File Sandbox.go, in method `DeleteContainer` |
|
||||
| when updating a container | File Sandbox.go, in method `UpdateContainer` |
|
||||
| when creating multiple containers | File Sandbox.go, in method `createContainers` |
|
||||
|
||||
### Core Pinning Logics
|
||||
|
||||
We can split the whole process into the following steps. Related methods are `checkVCPUsPinning` and `resetVCPUsPinning`, in file Sandbox.go.
|
||||

|
||||
@@ -44,4 +44,7 @@
|
||||
- [How to run Docker with Kata Containers](how-to-run-docker-with-kata.md)
|
||||
- [How to run Kata Containers with `nydus`](how-to-use-virtio-fs-nydus-with-kata.md)
|
||||
- [How to run Kata Containers with AMD SEV-SNP](how-to-run-kata-containers-with-SNP-VMs.md)
|
||||
- [How to use EROFS to build rootfs in Kata Containers](how-to-use-erofs-build-rootfs.md)
|
||||
|
||||
## Confidential Containers
|
||||
- [How to use build and test the Confidential Containers `CCv0` proof of concept](how-to-build-and-test-ccv0.md)
|
||||
- [How to generate a Kata Containers payload for the Confidential Containers Operator](how-to-generate-a-kata-containers-payload-for-the-confidential-containers-operator.md)
|
||||
|
||||
640
docs/how-to/ccv0.sh
Executable file
640
docs/how-to/ccv0.sh
Executable file
@@ -0,0 +1,640 @@
|
||||
#!/bin/bash -e
|
||||
#
|
||||
# Copyright (c) 2021, 2022 IBM Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
# Disclaimer: This script is work in progress for supporting the CCv0 prototype
|
||||
# It shouldn't be considered supported by the Kata Containers community, or anyone else
|
||||
|
||||
# Based on https://github.com/kata-containers/kata-containers/blob/main/docs/Developer-Guide.md,
|
||||
# but with elements of the tests/.ci scripts used
|
||||
|
||||
readonly script_name="$(basename "${BASH_SOURCE[0]}")"
|
||||
|
||||
# By default in Golang >= 1.16 GO111MODULE is set to "on", but not all modules support it, so overwrite to "auto"
|
||||
export GO111MODULE="auto"
|
||||
|
||||
# Setup kata containers environments if not set - we default to use containerd
|
||||
export CRI_CONTAINERD=${CRI_CONTAINERD:-"yes"}
|
||||
export CRI_RUNTIME=${CRI_RUNTIME:-"containerd"}
|
||||
export CRIO=${CRIO:-"no"}
|
||||
export KATA_HYPERVISOR="${KATA_HYPERVISOR:-qemu}"
|
||||
export KUBERNETES=${KUBERNETES:-"no"}
|
||||
export AGENT_INIT="${AGENT_INIT:-${TEST_INITRD:-no}}"
|
||||
export AA_KBC="${AA_KBC:-offline_fs_kbc}"
|
||||
|
||||
# Allow the user to overwrite the default repo and branch names if they want to build from a fork
|
||||
export katacontainers_repo="${katacontainers_repo:-github.com/kata-containers/kata-containers}"
|
||||
export katacontainers_branch="${katacontainers_branch:-CCv0}"
|
||||
export kata_default_branch=${katacontainers_branch}
|
||||
export tests_repo="${tests_repo:-github.com/kata-containers/tests}"
|
||||
export tests_branch="${tests_branch:-CCv0}"
|
||||
export target_branch=${tests_branch} # kata-containers/ci/lib.sh uses target branch var to check out tests repo
|
||||
|
||||
# if .bash_profile exists then use it, otherwise fall back to .profile
|
||||
export PROFILE="${HOME}/.profile"
|
||||
if [ -r "${HOME}/.bash_profile" ]; then
|
||||
export PROFILE="${HOME}/.bash_profile"
|
||||
fi
|
||||
# Stop PS1: unbound variable error happening
|
||||
export PS1=${PS1:-}
|
||||
|
||||
# Create a bunch of common, derived values up front so we don't need to create them in all the different functions
|
||||
. ${PROFILE}
|
||||
if [ -z ${GOPATH} ]; then
|
||||
export GOPATH=${HOME}/go
|
||||
fi
|
||||
export tests_repo_dir="${GOPATH}/src/${tests_repo}"
|
||||
export katacontainers_repo_dir="${GOPATH}/src/${katacontainers_repo}"
|
||||
export ROOTFS_DIR="${katacontainers_repo_dir}/tools/osbuilder/rootfs-builder/rootfs"
|
||||
export PULL_IMAGE="${PULL_IMAGE:-quay.io/kata-containers/confidential-containers:signed}" # Doesn't need authentication
|
||||
export CONTAINER_ID="${CONTAINER_ID:-0123456789}"
|
||||
source /etc/os-release || source /usr/lib/os-release
|
||||
grep -Eq "\<fedora\>" /etc/os-release 2> /dev/null && export USE_PODMAN=true
|
||||
|
||||
|
||||
# If we've already checked out the test repo then source the confidential scripts
|
||||
if [ "${KUBERNETES}" == "yes" ]; then
|
||||
export BATS_TEST_DIRNAME="${tests_repo_dir}/integration/kubernetes/confidential"
|
||||
[ -d "${BATS_TEST_DIRNAME}" ] && source "${BATS_TEST_DIRNAME}/lib.sh"
|
||||
else
|
||||
export BATS_TEST_DIRNAME="${tests_repo_dir}/integration/containerd/confidential"
|
||||
[ -d "${BATS_TEST_DIRNAME}" ] && source "${BATS_TEST_DIRNAME}/lib.sh"
|
||||
fi
|
||||
|
||||
[ -d "${BATS_TEST_DIRNAME}" ] && source "${BATS_TEST_DIRNAME}/../../confidential/lib.sh"
|
||||
|
||||
export RUNTIME_CONFIG_PATH=/etc/kata-containers/configuration.toml
|
||||
|
||||
usage() {
|
||||
exit_code="$1"
|
||||
cat <<EOF
|
||||
Overview:
|
||||
Build and test kata containers from source
|
||||
Optionally set kata-containers and tests repo and branch as exported variables before running
|
||||
e.g. export katacontainers_repo=github.com/stevenhorsman/kata-containers && export katacontainers_branch=kata-ci-from-fork && export tests_repo=github.com/stevenhorsman/tests && export tests_branch=kata-ci-from-fork && ~/${script_name} build_and_install_all
|
||||
Usage:
|
||||
${script_name} [options] <command>
|
||||
Commands:
|
||||
- agent_create_container: Run CreateContainer command against the agent with agent-ctl
|
||||
- agent_pull_image: Run PullImage command against the agent with agent-ctl
|
||||
- all: Build and install everything, test kata with containerd and capture the logs
|
||||
- build_and_add_agent_to_rootfs: Builds the kata-agent and adds it to the rootfs
|
||||
- build_and_install_all: Build and install everything
|
||||
- build_and_install_rootfs: Builds and installs the rootfs image
|
||||
- build_kata_runtime: Build and install the kata runtime
|
||||
- build_cloud_hypervisor Checkout, patch, build and install Cloud Hypervisor
|
||||
- build_qemu: Checkout, patch, build and install QEMU
|
||||
- configure: Configure Kata to use rootfs and enable debug
|
||||
- connect_to_ssh_demo_pod: Ssh into the ssh demo pod, showing that the decryption succeeded
|
||||
- copy_signature_files_to_guest Copies signature verification files to guest
|
||||
- create_rootfs: Create a local rootfs
|
||||
- crictl_create_cc_container Use crictl to create a new busybox container in the kata cc pod
|
||||
- crictl_create_cc_pod Use crictl to create a new kata cc pod
|
||||
- crictl_delete_cc Use crictl to delete the kata cc pod sandbox and container in it
|
||||
- help: Display this help
|
||||
- init_kubernetes: initialize a Kubernetes cluster on this system
|
||||
- initialize: Install dependencies and check out kata-containers source
|
||||
- install_guest_kernel: Setup, build and install the guest kernel
|
||||
- kubernetes_create_cc_pod: Create a Kata CC runtime busybox-based pod in Kubernetes
|
||||
- kubernetes_create_ssh_demo_pod: Create a Kata CC runtime pod based on the ssh demo
|
||||
- kubernetes_delete_cc_pod: Delete the Kata CC runtime busybox-based pod in Kubernetes
|
||||
- kubernetes_delete_ssh_demo_pod: Delete the Kata CC runtime pod based on the ssh demo
|
||||
- open_kata_shell: Open a shell into the kata runtime
|
||||
- rebuild_and_install_kata: Rebuild the kata runtime and agent and build and install the image
|
||||
- shim_pull_image: Run PullImage command against the shim with ctr
|
||||
- test_capture_logs: Test using kata with containerd and capture the logs in the user's home directory
|
||||
- test: Test using kata with containerd
|
||||
|
||||
Options:
|
||||
-d: Enable debug
|
||||
-h: Display this help
|
||||
EOF
|
||||
# if script sourced don't exit as this will exit the main shell, just return instead
|
||||
[[ $_ != $0 ]] && return "$exit_code" || exit "$exit_code"
|
||||
}
|
||||
|
||||
build_and_install_all() {
|
||||
initialize
|
||||
build_and_install_kata_runtime
|
||||
configure
|
||||
create_a_local_rootfs
|
||||
build_and_install_rootfs
|
||||
install_guest_kernel_image
|
||||
case "$KATA_HYPERVISOR" in
|
||||
"qemu")
|
||||
build_qemu
|
||||
;;
|
||||
"cloud-hypervisor")
|
||||
build_cloud_hypervisor
|
||||
;;
|
||||
*)
|
||||
echo "Invalid option: $KATA_HYPERVISOR is not supported." >&2
|
||||
;;
|
||||
esac
|
||||
|
||||
check_kata_runtime
|
||||
if [ "${KUBERNETES}" == "yes" ]; then
|
||||
init_kubernetes
|
||||
fi
|
||||
}
|
||||
|
||||
rebuild_and_install_kata() {
|
||||
checkout_tests_repo
|
||||
checkout_kata_containers_repo
|
||||
build_and_install_kata_runtime
|
||||
build_and_add_agent_to_rootfs
|
||||
build_and_install_rootfs
|
||||
check_kata_runtime
|
||||
}
|
||||
|
||||
# Based on the jenkins_job_build.sh script in kata-containers/tests/.ci - checks out source code and installs dependencies
|
||||
initialize() {
|
||||
# We need git to checkout and bootstrap the ci scripts and some other packages used in testing
|
||||
sudo apt-get update && sudo apt-get install -y curl git qemu-utils
|
||||
|
||||
grep -qxF "export GOPATH=\${HOME}/go" "${PROFILE}" || echo "export GOPATH=\${HOME}/go" >> "${PROFILE}"
|
||||
grep -qxF "export GOROOT=/usr/local/go" "${PROFILE}" || echo "export GOROOT=/usr/local/go" >> "${PROFILE}"
|
||||
grep -qxF "export PATH=\${GOPATH}/bin:/usr/local/go/bin:\${PATH}" "${PROFILE}" || echo "export PATH=\${GOPATH}/bin:/usr/local/go/bin:\${PATH}" >> "${PROFILE}"
|
||||
|
||||
# Load the new go and PATH parameters from the profile
|
||||
. ${PROFILE}
|
||||
mkdir -p "${GOPATH}"
|
||||
|
||||
checkout_tests_repo
|
||||
|
||||
pushd "${tests_repo_dir}"
|
||||
local ci_dir_name=".ci"
|
||||
sudo -E PATH=$PATH -s "${ci_dir_name}/install_go.sh" -p -f
|
||||
sudo -E PATH=$PATH -s "${ci_dir_name}/install_rust.sh"
|
||||
# Need to change ownership of rustup so later process can create temp files there
|
||||
sudo chown -R ${USER}:${USER} "${HOME}/.rustup"
|
||||
|
||||
checkout_kata_containers_repo
|
||||
|
||||
# Run setup, but don't install kata as we will build it ourselves in locations matching the developer guide
|
||||
export INSTALL_KATA="no"
|
||||
sudo -E PATH=$PATH -s ${ci_dir_name}/setup.sh
|
||||
# Reload the profile to pick up installed dependencies
|
||||
. ${PROFILE}
|
||||
popd
|
||||
}
|
||||
|
||||
checkout_tests_repo() {
|
||||
echo "Creating repo: ${tests_repo} and branch ${tests_branch} into ${tests_repo_dir}..."
|
||||
# Due to git https://github.blog/2022-04-12-git-security-vulnerability-announced/ the tests repo needs
|
||||
# to be owned by root as it is re-checked out in rootfs.sh
|
||||
mkdir -p $(dirname "${tests_repo_dir}")
|
||||
[ -d "${tests_repo_dir}" ] || sudo -E git clone "https://${tests_repo}.git" "${tests_repo_dir}"
|
||||
sudo -E chown -R root:root "${tests_repo_dir}"
|
||||
pushd "${tests_repo_dir}"
|
||||
sudo -E git fetch
|
||||
if [ -n "${tests_branch}" ]; then
|
||||
sudo -E git checkout ${tests_branch}
|
||||
fi
|
||||
sudo -E git reset --hard origin/${tests_branch}
|
||||
popd
|
||||
|
||||
source "${BATS_TEST_DIRNAME}/lib.sh"
|
||||
source "${BATS_TEST_DIRNAME}/../../confidential/lib.sh"
|
||||
}
|
||||
|
||||
# Note: clone_katacontainers_repo using go, so that needs to be installed first
|
||||
checkout_kata_containers_repo() {
|
||||
source "${tests_repo_dir}/.ci/lib.sh"
|
||||
echo "Creating repo: ${katacontainers_repo} and branch ${kata_default_branch} into ${katacontainers_repo_dir}..."
|
||||
clone_katacontainers_repo
|
||||
sudo -E chown -R ${USER}:${USER} "${katacontainers_repo_dir}"
|
||||
}
|
||||
|
||||
build_and_install_kata_runtime() {
|
||||
pushd ${katacontainers_repo_dir}/src/runtime
|
||||
make clean && make DEFAULT_HYPERVISOR=${KATA_HYPERVISOR} && sudo -E PATH=$PATH make DEFAULT_HYPERVISOR=${KATA_HYPERVISOR} install
|
||||
popd
|
||||
}
|
||||
|
||||
configure() {
|
||||
configure_kata_to_use_rootfs
|
||||
enable_full_debug
|
||||
enable_agent_console
|
||||
|
||||
# Switch image offload to true in kata config
|
||||
switch_image_service_offload "on"
|
||||
|
||||
configure_cc_containerd
|
||||
# From crictl v1.24.1 the default timoout leads to the pod creation failing, so update it
|
||||
sudo crictl config --set timeout=10
|
||||
}
|
||||
|
||||
configure_kata_to_use_rootfs() {
|
||||
sudo mkdir -p /etc/kata-containers/
|
||||
sudo install -o root -g root -m 0640 /usr/share/defaults/kata-containers/configuration.toml /etc/kata-containers
|
||||
sudo sed -i 's/^\(initrd =.*\)/# \1/g' ${RUNTIME_CONFIG_PATH}
|
||||
}
|
||||
|
||||
build_and_add_agent_to_rootfs() {
|
||||
build_a_custom_kata_agent
|
||||
add_custom_agent_to_rootfs
|
||||
}
|
||||
|
||||
build_a_custom_kata_agent() {
|
||||
# Install libseccomp for static linking
|
||||
sudo -E PATH=$PATH GOPATH=$GOPATH ${katacontainers_repo_dir}/ci/install_libseccomp.sh /tmp/kata-libseccomp /tmp/kata-gperf
|
||||
export LIBSECCOMP_LINK_TYPE=static
|
||||
export LIBSECCOMP_LIB_PATH=/tmp/kata-libseccomp/lib
|
||||
|
||||
. "$HOME/.cargo/env"
|
||||
pushd ${katacontainers_repo_dir}/src/agent
|
||||
sudo -E PATH=$PATH make
|
||||
|
||||
ARCH=$(uname -m)
|
||||
[ ${ARCH} == "ppc64le" ] || [ ${ARCH} == "s390x" ] && export LIBC=gnu || export LIBC=musl
|
||||
[ ${ARCH} == "ppc64le" ] && export ARCH=powerpc64le
|
||||
|
||||
# Run a make install into the rootfs directory in order to create the kata-agent.service file which is required when we add to the rootfs
|
||||
sudo -E PATH=$PATH make install DESTDIR="${ROOTFS_DIR}"
|
||||
popd
|
||||
}
|
||||
|
||||
create_a_local_rootfs() {
|
||||
sudo rm -rf "${ROOTFS_DIR}"
|
||||
pushd ${katacontainers_repo_dir}/tools/osbuilder/rootfs-builder
|
||||
export distro="ubuntu"
|
||||
[[ -z "${USE_PODMAN:-}" ]] && use_docker="${use_docker:-1}"
|
||||
sudo -E OS_VERSION="${OS_VERSION:-}" GOPATH=$GOPATH EXTRA_PKGS="vim iputils-ping net-tools" DEBUG="${DEBUG:-}" USE_DOCKER="${use_docker:-}" SKOPEO=${SKOPEO:-} AA_KBC=${AA_KBC:-} UMOCI=yes SECCOMP=yes ./rootfs.sh -r ${ROOTFS_DIR} ${distro}
|
||||
|
||||
# Install_rust.sh during rootfs.sh switches us to the main branch of the tests repo, so switch back now
|
||||
pushd "${tests_repo_dir}"
|
||||
sudo -E git checkout ${tests_branch}
|
||||
popd
|
||||
# During the ./rootfs.sh call the kata agent is built as root, so we need to update the permissions, so we can rebuild it
|
||||
sudo chown -R ${USER}:${USER} "${katacontainers_repo_dir}/src/agent/"
|
||||
|
||||
popd
|
||||
}
|
||||
|
||||
add_custom_agent_to_rootfs() {
|
||||
pushd ${katacontainers_repo_dir}/tools/osbuilder/rootfs-builder
|
||||
|
||||
ARCH=$(uname -m)
|
||||
[ ${ARCH} == "ppc64le" ] || [ ${ARCH} == "s390x" ] && export LIBC=gnu || export LIBC=musl
|
||||
[ ${ARCH} == "ppc64le" ] && export ARCH=powerpc64le
|
||||
|
||||
sudo install -o root -g root -m 0550 -t ${ROOTFS_DIR}/usr/bin ${katacontainers_repo_dir}/src/agent/target/${ARCH}-unknown-linux-${LIBC}/release/kata-agent
|
||||
sudo install -o root -g root -m 0440 ../../../src/agent/kata-agent.service ${ROOTFS_DIR}/usr/lib/systemd/system/
|
||||
sudo install -o root -g root -m 0440 ../../../src/agent/kata-containers.target ${ROOTFS_DIR}/usr/lib/systemd/system/
|
||||
popd
|
||||
}
|
||||
|
||||
build_and_install_rootfs() {
|
||||
build_rootfs_image
|
||||
install_rootfs_image
|
||||
}
|
||||
|
||||
build_rootfs_image() {
|
||||
pushd ${katacontainers_repo_dir}/tools/osbuilder/image-builder
|
||||
# Logic from install_kata_image.sh - if we aren't using podman (ie on a fedora like), then use docker
|
||||
[[ -z "${USE_PODMAN:-}" ]] && use_docker="${use_docker:-1}"
|
||||
sudo -E USE_DOCKER="${use_docker:-}" ./image_builder.sh ${ROOTFS_DIR}
|
||||
popd
|
||||
}
|
||||
|
||||
install_rootfs_image() {
|
||||
pushd ${katacontainers_repo_dir}/tools/osbuilder/image-builder
|
||||
local commit=$(git log --format=%h -1 HEAD)
|
||||
local date=$(date +%Y-%m-%d-%T.%N%z)
|
||||
local image="kata-containers-${date}-${commit}"
|
||||
sudo install -o root -g root -m 0640 -D kata-containers.img "/usr/share/kata-containers/${image}"
|
||||
(cd /usr/share/kata-containers && sudo ln -sf "$image" kata-containers.img)
|
||||
echo "Built Rootfs from ${ROOTFS_DIR} to /usr/share/kata-containers/${image}"
|
||||
ls -al /usr/share/kata-containers/
|
||||
popd
|
||||
}
|
||||
|
||||
install_guest_kernel_image() {
|
||||
pushd ${katacontainers_repo_dir}/tools/packaging/kernel
|
||||
sudo -E PATH=$PATH ./build-kernel.sh setup
|
||||
sudo -E PATH=$PATH ./build-kernel.sh build
|
||||
sudo chmod u+wrx /usr/share/kata-containers/ # Give user permission to install kernel
|
||||
sudo -E PATH=$PATH ./build-kernel.sh install
|
||||
popd
|
||||
}
|
||||
|
||||
build_qemu() {
|
||||
${tests_repo_dir}/.ci/install_virtiofsd.sh
|
||||
${tests_repo_dir}/.ci/install_qemu.sh
|
||||
}
|
||||
|
||||
build_cloud_hypervisor() {
|
||||
${tests_repo_dir}/.ci/install_virtiofsd.sh
|
||||
${tests_repo_dir}/.ci/install_cloud_hypervisor.sh
|
||||
}
|
||||
|
||||
check_kata_runtime() {
|
||||
sudo kata-runtime check
|
||||
}
|
||||
|
||||
k8s_pod_file="${HOME}/busybox-cc.yaml"
|
||||
init_kubernetes() {
|
||||
# Check that kubeadm was installed and install it otherwise
|
||||
if ! [ -x "$(command -v kubeadm)" ]; then
|
||||
pushd "${tests_repo_dir}/.ci"
|
||||
sudo -E PATH=$PATH -s install_kubernetes.sh
|
||||
if [ "${CRI_CONTAINERD}" == "yes" ]; then
|
||||
sudo -E PATH=$PATH -s "configure_containerd_for_kubernetes.sh"
|
||||
fi
|
||||
popd
|
||||
fi
|
||||
|
||||
# If kubernetes init has previously run we need to clean it by removing the image and resetting k8s
|
||||
local cid=$(sudo docker ps -a -q -f name=^/kata-registry$)
|
||||
if [ -n "${cid}" ]; then
|
||||
sudo docker stop ${cid} && sudo docker rm ${cid}
|
||||
fi
|
||||
local k8s_nodes=$(kubectl get nodes -o name 2>/dev/null || true)
|
||||
if [ -n "${k8s_nodes}" ]; then
|
||||
sudo kubeadm reset -f
|
||||
fi
|
||||
|
||||
export CI="true" && sudo -E PATH=$PATH -s ${tests_repo_dir}/integration/kubernetes/init.sh
|
||||
sudo chown ${USER}:$(id -g -n ${USER}) "$HOME/.kube/config"
|
||||
cat << EOF > ${k8s_pod_file}
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: busybox-cc
|
||||
spec:
|
||||
runtimeClassName: kata
|
||||
containers:
|
||||
- name: nginx
|
||||
image: quay.io/kata-containers/confidential-containers:signed
|
||||
imagePullPolicy: Always
|
||||
EOF
|
||||
}
|
||||
|
||||
call_kubernetes_create_cc_pod() {
|
||||
kubernetes_create_cc_pod ${k8s_pod_file}
|
||||
}
|
||||
|
||||
call_kubernetes_delete_cc_pod() {
|
||||
pod_name=$(kubectl get pods -o jsonpath='{.items..metadata.name}')
|
||||
kubernetes_delete_cc_pod $pod_name
|
||||
}
|
||||
|
||||
call_kubernetes_create_ssh_demo_pod() {
|
||||
setup_decryption_files_in_guest
|
||||
kubernetes_create_ssh_demo_pod
|
||||
}
|
||||
|
||||
call_connect_to_ssh_demo_pod() {
|
||||
connect_to_ssh_demo_pod
|
||||
}
|
||||
|
||||
call_kubernetes_delete_ssh_demo_pod() {
|
||||
pod=$(kubectl get pods -o jsonpath='{.items..metadata.name}')
|
||||
kubernetes_delete_ssh_demo_pod $pod
|
||||
}
|
||||
|
||||
crictl_sandbox_name=kata-cc-busybox-sandbox
|
||||
call_crictl_create_cc_pod() {
|
||||
# Update iptables to allow forwarding to the cni0 bridge avoiding issues caused by the docker0 bridge
|
||||
sudo iptables -P FORWARD ACCEPT
|
||||
|
||||
# get_pod_config in tests_common exports `pod_config` that points to the prepared pod config yaml
|
||||
get_pod_config
|
||||
|
||||
crictl_delete_cc_pod_if_exists "${crictl_sandbox_name}"
|
||||
crictl_create_cc_pod "${pod_config}"
|
||||
sudo crictl pods
|
||||
}
|
||||
|
||||
call_crictl_create_cc_container() {
|
||||
# Create container configuration yaml based on our test copy of busybox
|
||||
# get_pod_config in tests_common exports `pod_config` that points to the prepared pod config yaml
|
||||
get_pod_config
|
||||
|
||||
local container_config="${FIXTURES_DIR}/${CONTAINER_CONFIG_FILE:-container-config.yaml}"
|
||||
local pod_name=${crictl_sandbox_name}
|
||||
crictl_create_cc_container ${pod_name} ${pod_config} ${container_config}
|
||||
sudo crictl ps -a
|
||||
}
|
||||
|
||||
crictl_delete_cc() {
|
||||
crictl_delete_cc_pod ${crictl_sandbox_name}
|
||||
}
|
||||
|
||||
test_kata_runtime() {
|
||||
echo "Running ctr with the kata runtime..."
|
||||
local test_image="quay.io/kata-containers/confidential-containers:signed"
|
||||
if [ -z $(ctr images ls -q name=="${test_image}") ]; then
|
||||
sudo ctr image pull "${test_image}"
|
||||
fi
|
||||
sudo ctr run --runtime "io.containerd.kata.v2" --rm -t "${test_image}" test-kata uname -a
|
||||
}
|
||||
|
||||
run_kata_and_capture_logs() {
|
||||
echo "Clearing systemd journal..."
|
||||
sudo systemctl stop systemd-journald
|
||||
sudo rm -f /var/log/journal/*/* /run/log/journal/*/*
|
||||
sudo systemctl start systemd-journald
|
||||
test_kata_runtime
|
||||
echo "Collecting logs..."
|
||||
sudo journalctl -q -o cat -a -t kata-runtime > ${HOME}/kata-runtime.log
|
||||
sudo journalctl -q -o cat -a -t kata > ${HOME}/shimv2.log
|
||||
echo "Logs output to ${HOME}/kata-runtime.log and ${HOME}/shimv2.log"
|
||||
}
|
||||
|
||||
get_ids() {
|
||||
guest_cid=$(sudo ss -H --vsock | awk '{print $6}' | cut -d: -f1)
|
||||
sandbox_id=$(ps -ef | grep containerd-shim-kata-v2 | egrep -o "id [^,][^,].* " | awk '{print $2}')
|
||||
}
|
||||
|
||||
open_kata_shell() {
|
||||
get_ids
|
||||
sudo -E "PATH=$PATH" kata-runtime exec ${sandbox_id}
|
||||
}
|
||||
|
||||
build_bundle_dir_if_necessary() {
|
||||
bundle_dir="/tmp/bundle"
|
||||
if [ ! -d "${bundle_dir}" ]; then
|
||||
rootfs_dir="$bundle_dir/rootfs"
|
||||
image="quay.io/kata-containers/confidential-containers:signed"
|
||||
mkdir -p "$rootfs_dir" && (cd "$bundle_dir" && runc spec)
|
||||
sudo docker export $(sudo docker create "$image") | tar -C "$rootfs_dir" -xvf -
|
||||
fi
|
||||
# There were errors in create container agent-ctl command due to /bin/ seemingly not being on the path, so hardcode it
|
||||
sudo sed -i -e 's%^\(\t*\)"sh"$%\1"/bin/sh"%g' "${bundle_dir}/config.json"
|
||||
}
|
||||
|
||||
build_agent_ctl() {
|
||||
cd ${GOPATH}/src/${katacontainers_repo}/src/tools/agent-ctl/
|
||||
if [ -e "${HOME}/.cargo/registry" ]; then
|
||||
sudo chown -R ${USER}:${USER} "${HOME}/.cargo/registry"
|
||||
fi
|
||||
sudo -E PATH=$PATH -s make
|
||||
ARCH=$(uname -m)
|
||||
[ ${ARCH} == "ppc64le" ] || [ ${ARCH} == "s390x" ] && export LIBC=gnu || export LIBC=musl
|
||||
[ ${ARCH} == "ppc64le" ] && export ARCH=powerpc64le
|
||||
cd "./target/${ARCH}-unknown-linux-${LIBC}/release/"
|
||||
}
|
||||
|
||||
run_agent_ctl_command() {
|
||||
get_ids
|
||||
build_bundle_dir_if_necessary
|
||||
command=$1
|
||||
# If kata-agent-ctl pre-built in this directory, use it directly, otherwise build it first and switch to release
|
||||
if [ ! -x kata-agent-ctl ]; then
|
||||
build_agent_ctl
|
||||
fi
|
||||
./kata-agent-ctl -l debug connect --bundle-dir "${bundle_dir}" --server-address "vsock://${guest_cid}:1024" -c "${command}"
|
||||
}
|
||||
|
||||
agent_pull_image() {
|
||||
run_agent_ctl_command "PullImage image=${PULL_IMAGE} cid=${CONTAINER_ID} source_creds=${SOURCE_CREDS}"
|
||||
}
|
||||
|
||||
agent_create_container() {
|
||||
run_agent_ctl_command "CreateContainer cid=${CONTAINER_ID}"
|
||||
}
|
||||
|
||||
shim_pull_image() {
|
||||
get_ids
|
||||
local ctr_shim_command="sudo ctr --namespace k8s.io shim --id ${sandbox_id} pull-image ${PULL_IMAGE} ${CONTAINER_ID}"
|
||||
echo "Issuing command '${ctr_shim_command}'"
|
||||
${ctr_shim_command}
|
||||
}
|
||||
|
||||
call_copy_signature_files_to_guest() {
|
||||
# TODO #5173 - remove this once the kernel_params aren't ignored by the agent config
|
||||
export DEBUG_CONSOLE="true"
|
||||
|
||||
if [ "${SKOPEO:-}" = "yes" ]; then
|
||||
add_kernel_params "agent.container_policy_file=/etc/containers/quay_verification/quay_policy.json"
|
||||
setup_skopeo_signature_files_in_guest
|
||||
else
|
||||
# TODO #4888 - set config to specifically enable signature verification to be on in ImageClient
|
||||
setup_offline_fs_kbc_signature_files_in_guest
|
||||
fi
|
||||
}
|
||||
|
||||
main() {
|
||||
while getopts "dh" opt; do
|
||||
case "$opt" in
|
||||
d)
|
||||
export DEBUG="-d"
|
||||
set -x
|
||||
;;
|
||||
h)
|
||||
usage 0
|
||||
;;
|
||||
\?)
|
||||
echo "Invalid option: -$OPTARG" >&2
|
||||
usage 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
shift $((OPTIND - 1))
|
||||
|
||||
subcmd="${1:-}"
|
||||
|
||||
[ -z "${subcmd}" ] && usage 1
|
||||
|
||||
case "${subcmd}" in
|
||||
all)
|
||||
build_and_install_all
|
||||
run_kata_and_capture_logs
|
||||
;;
|
||||
build_and_install_all)
|
||||
build_and_install_all
|
||||
;;
|
||||
rebuild_and_install_kata)
|
||||
rebuild_and_install_kata
|
||||
;;
|
||||
initialize)
|
||||
initialize
|
||||
;;
|
||||
build_kata_runtime)
|
||||
build_and_install_kata_runtime
|
||||
;;
|
||||
configure)
|
||||
configure
|
||||
;;
|
||||
create_rootfs)
|
||||
create_a_local_rootfs
|
||||
;;
|
||||
build_and_add_agent_to_rootfs)
|
||||
build_and_add_agent_to_rootfs
|
||||
;;
|
||||
build_and_install_rootfs)
|
||||
build_and_install_rootfs
|
||||
;;
|
||||
install_guest_kernel)
|
||||
install_guest_kernel_image
|
||||
;;
|
||||
build_cloud_hypervisor)
|
||||
build_cloud_hypervisor
|
||||
;;
|
||||
build_qemu)
|
||||
build_qemu
|
||||
;;
|
||||
init_kubernetes)
|
||||
init_kubernetes
|
||||
;;
|
||||
crictl_create_cc_pod)
|
||||
call_crictl_create_cc_pod
|
||||
;;
|
||||
crictl_create_cc_container)
|
||||
call_crictl_create_cc_container
|
||||
;;
|
||||
crictl_delete_cc)
|
||||
crictl_delete_cc
|
||||
;;
|
||||
kubernetes_create_cc_pod)
|
||||
call_kubernetes_create_cc_pod
|
||||
;;
|
||||
kubernetes_delete_cc_pod)
|
||||
call_kubernetes_delete_cc_pod
|
||||
;;
|
||||
kubernetes_create_ssh_demo_pod)
|
||||
call_kubernetes_create_ssh_demo_pod
|
||||
;;
|
||||
connect_to_ssh_demo_pod)
|
||||
call_connect_to_ssh_demo_pod
|
||||
;;
|
||||
kubernetes_delete_ssh_demo_pod)
|
||||
call_kubernetes_delete_ssh_demo_pod
|
||||
;;
|
||||
test)
|
||||
test_kata_runtime
|
||||
;;
|
||||
test_capture_logs)
|
||||
run_kata_and_capture_logs
|
||||
;;
|
||||
open_kata_console)
|
||||
open_kata_console
|
||||
;;
|
||||
open_kata_shell)
|
||||
open_kata_shell
|
||||
;;
|
||||
agent_pull_image)
|
||||
agent_pull_image
|
||||
;;
|
||||
shim_pull_image)
|
||||
shim_pull_image
|
||||
;;
|
||||
agent_create_container)
|
||||
agent_create_container
|
||||
;;
|
||||
copy_signature_files_to_guest)
|
||||
call_copy_signature_files_to_guest
|
||||
;;
|
||||
*)
|
||||
usage 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
main $@
|
||||
@@ -257,48 +257,6 @@ This launches a BusyBox container named `hello`, and it will be removed by `--rm
|
||||
The `--cni` flag enables CNI networking for the container. Without this flag, a container with just a
|
||||
loopback interface is created.
|
||||
|
||||
### Launch containers using `ctr` command line with rootfs bundle
|
||||
|
||||
#### Get rootfs
|
||||
Use the script to create rootfs
|
||||
```bash
|
||||
ctr i pull quay.io/prometheus/busybox:latest
|
||||
ctr i export rootfs.tar quay.io/prometheus/busybox:latest
|
||||
|
||||
rootfs_tar=rootfs.tar
|
||||
bundle_dir="./bundle"
|
||||
mkdir -p "${bundle_dir}"
|
||||
|
||||
# extract busybox rootfs
|
||||
rootfs_dir="${bundle_dir}/rootfs"
|
||||
mkdir -p "${rootfs_dir}"
|
||||
layers_dir="$(mktemp -d)"
|
||||
tar -C "${layers_dir}" -pxf "${rootfs_tar}"
|
||||
for ((i=0;i<$(cat ${layers_dir}/manifest.json | jq -r ".[].Layers | length");i++)); do
|
||||
tar -C ${rootfs_dir} -xf ${layers_dir}/$(cat ${layers_dir}/manifest.json | jq -r ".[].Layers[${i}]")
|
||||
done
|
||||
```
|
||||
#### Get `config.json`
|
||||
Use runc spec to generate `config.json`
|
||||
```bash
|
||||
cd ./bundle/rootfs
|
||||
runc spec
|
||||
mv config.json ../
|
||||
```
|
||||
Change the root `path` in `config.json` to the absolute path of rootfs
|
||||
|
||||
```JSON
|
||||
"root":{
|
||||
"path":"/root/test/bundle/rootfs",
|
||||
"readonly": false
|
||||
},
|
||||
```
|
||||
|
||||
#### Run container
|
||||
```bash
|
||||
sudo ctr run -d --runtime io.containerd.run.kata.v2 --config bundle/config.json hello
|
||||
sudo ctr t exec --exec-id ${ID} -t hello sh
|
||||
```
|
||||
### Launch Pods with `crictl` command line
|
||||
|
||||
With the `crictl` command line of `cri-tools`, you can specify runtime class with `-r` or `--runtime` flag.
|
||||
|
||||
45
docs/how-to/data/confidential-agent-config.toml.in
Normal file
45
docs/how-to/data/confidential-agent-config.toml.in
Normal file
@@ -0,0 +1,45 @@
|
||||
# Copyright (c) 2021 IBM Corp.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
aa_kbc_params = "$AA_KBC_PARAMS"
|
||||
https_proxy = "$HTTPS_PROXY"
|
||||
[endpoints]
|
||||
allowed = [
|
||||
"AddARPNeighborsRequest",
|
||||
"AddSwapRequest",
|
||||
"CloseStdinRequest",
|
||||
"CopyFileRequest",
|
||||
"CreateContainerRequest",
|
||||
"CreateSandboxRequest",
|
||||
"DestroySandboxRequest",
|
||||
#"ExecProcessRequest",
|
||||
"GetMetricsRequest",
|
||||
"GetOOMEventRequest",
|
||||
"GuestDetailsRequest",
|
||||
"ListInterfacesRequest",
|
||||
"ListRoutesRequest",
|
||||
"MemHotplugByProbeRequest",
|
||||
"OnlineCPUMemRequest",
|
||||
"PauseContainerRequest",
|
||||
"PullImageRequest",
|
||||
"ReadStreamRequest",
|
||||
"RemoveContainerRequest",
|
||||
#"ReseedRandomDevRequest",
|
||||
"ResizeVolumeRequest",
|
||||
"ResumeContainerRequest",
|
||||
"SetGuestDateTimeRequest",
|
||||
"SignalProcessRequest",
|
||||
"StartContainerRequest",
|
||||
"StartTracingRequest",
|
||||
"StatsContainerRequest",
|
||||
"StopTracingRequest",
|
||||
"TtyWinResizeRequest",
|
||||
"UpdateContainerRequest",
|
||||
"UpdateInterfaceRequest",
|
||||
"UpdateRoutesRequest",
|
||||
"VolumeStatsRequest",
|
||||
"WaitProcessRequest",
|
||||
"WriteStreamRequest"
|
||||
]
|
||||
479
docs/how-to/how-to-build-and-test-ccv0.md
Normal file
479
docs/how-to/how-to-build-and-test-ccv0.md
Normal file
@@ -0,0 +1,479 @@
|
||||
# How to build, run and test Kata CCv0
|
||||
|
||||
## Introduction and Background
|
||||
|
||||
In order to try and make building (locally) and demoing the Kata Containers `CCv0` code base as simple as possible I've
|
||||
shared a script [`ccv0.sh`](./ccv0.sh). This script was originally my attempt to automate the steps of the
|
||||
[Developer Guide](https://github.com/kata-containers/kata-containers/blob/main/docs/Developer-Guide.md) so that I could do
|
||||
different sections of them repeatedly and reliably as I was playing around with make changes to different parts of the
|
||||
Kata code base. I then tried to weave in some of the [`tests/.ci`](https://github.com/kata-containers/tests/tree/main/.ci)
|
||||
scripts in order to have less duplicated code.
|
||||
As we're progress on the confidential containers journey I hope to add more features to demonstrate the functionality
|
||||
we have working.
|
||||
|
||||
*Disclaimer: This script has mostly just been used and tested by me ([@stevenhorsman](https://github.com/stevenhorsman)),*
|
||||
*so there might be issues with it. I'm happy to try and help solve these if possible, but this shouldn't be considered a*
|
||||
*fully supported process by the Kata Containers community.*
|
||||
|
||||
### Basic script set-up and optional environment variables
|
||||
|
||||
In order to build, configure and demo the CCv0 functionality, these are the set-up steps I take:
|
||||
- Provision a new VM
|
||||
- *I choose a Ubuntu 20.04 8GB VM for this as I had one available. There are some dependences on apt-get installed*
|
||||
*packages, so these will need re-working to be compatible with other platforms.*
|
||||
- Copy the script over to your VM *(I put it in the home directory)* and ensure it has execute permission by running
|
||||
```bash
|
||||
$ chmod u+x ccv0.sh
|
||||
```
|
||||
- Optionally set up some environment variables
|
||||
- By default the script checks out the `CCv0` branches of the `kata-containers/kata-containers` and
|
||||
`kata-containers/tests` repositories, but it is designed to be used to test of personal forks and branches as well.
|
||||
If you want to build and run these you can export the `katacontainers_repo`, `katacontainers_branch`, `tests_repo`
|
||||
and `tests_branch` variables e.g.
|
||||
```bash
|
||||
$ export katacontainers_repo=github.com/stevenhorsman/kata-containers
|
||||
$ export katacontainers_branch=stevenh/agent-pull-image-endpoint
|
||||
$ export tests_repo=github.com/stevenhorsman/tests
|
||||
$ export tests_branch=stevenh/add-ccv0-changes-to-build
|
||||
```
|
||||
before running the script.
|
||||
- By default the build and configuration are using `QEMU` as the hypervisor. In order to use `Cloud Hypervisor` instead
|
||||
set:
|
||||
```
|
||||
$ export KATA_HYPERVISOR="cloud-hypervisor"
|
||||
```
|
||||
before running the build.
|
||||
|
||||
- At this point you can provision a Kata confidential containers pod and container with either
|
||||
[`crictl`](#using-crictl-for-end-to-end-provisioning-of-a-kata-confidential-containers-pod-with-an-unencrypted-image),
|
||||
or [Kubernetes](#using-kubernetes-for-end-to-end-provisioning-of-a-kata-confidential-containers-pod-with-an-unencrypted-image)
|
||||
and then test and use it.
|
||||
|
||||
### Using crictl for end-to-end provisioning of a Kata confidential containers pod with an unencrypted image
|
||||
|
||||
- Run the full build process with Kubernetes turned off, so its configuration doesn't interfere with `crictl` using:
|
||||
```bash
|
||||
$ export KUBERNETES="no"
|
||||
$ export KATA_HYPERVISOR="qemu"
|
||||
$ ~/ccv0.sh -d build_and_install_all
|
||||
```
|
||||
> **Note**: Much of this script has to be run as `sudo`, so you are likely to get prompted for your password.
|
||||
- *I run this script sourced just so that the required installed components are accessible on the `PATH` to the rest*
|
||||
*of the process without having to reload the session.*
|
||||
- The steps that `build_and_install_all` takes is:
|
||||
- Checkout the git repos for the `tests` and `kata-containers` repos as specified by the environment variables
|
||||
(default to `CCv0` branches if they are not supplied)
|
||||
- Use the `tests/.ci` scripts to install the build dependencies
|
||||
- Build and install the Kata runtime
|
||||
- Configure Kata to use containerd and for debug and confidential containers features to be enabled (including
|
||||
enabling console access to the Kata guest shell, which should only be done in development)
|
||||
- Create, build and install a rootfs for the Kata hypervisor to use. For 'CCv0' this is currently based on Ubuntu
|
||||
20.04 and has extra packages like `umoci` added.
|
||||
- Build the Kata guest kernel
|
||||
- Install the hypervisor (in order to select which hypervisor will be used, the `KATA_HYPERVISOR` environment
|
||||
variable can be used to select between `qemu` or `cloud-hypervisor`)
|
||||
> **Note**: Depending on how where your VMs are hosted and how IPs are shared you might get an error from docker
|
||||
during matching `ERROR: toomanyrequests: Too Many Requests`. To get past
|
||||
this, login into Docker Hub and pull the images used with:
|
||||
> ```bash
|
||||
> $ sudo docker login
|
||||
> $ sudo docker pull ubuntu
|
||||
> ```
|
||||
> then re-run the command.
|
||||
- The first time this runs it may take a while, but subsequent runs will be quicker as more things are already
|
||||
installed and they can be further cut down by not running all the above steps
|
||||
[see "Additional script usage" below](#additional-script-usage)
|
||||
|
||||
- Create a new Kata sandbox pod using `crictl` with:
|
||||
```bash
|
||||
$ ~/ccv0.sh crictl_create_cc_pod
|
||||
```
|
||||
- This creates a pod configuration file, creates the pod from this using
|
||||
`sudo crictl runp -r kata ~/pod-config.yaml` and runs `sudo crictl pods` to show the pod
|
||||
- Create a new Kata confidential container with:
|
||||
```bash
|
||||
$ ~/ccv0.sh crictl_create_cc_container
|
||||
```
|
||||
- This creates a container (based on `busybox:1.33.1`) in the Kata cc sandbox and prints a list of containers.
|
||||
This will have been created based on an image pulled in the Kata pod sandbox/guest, not on the host machine.
|
||||
|
||||
As this point you should have a `crictl` pod and container that is using the Kata confidential containers runtime.
|
||||
You can [validate that the container image was pulled on the guest](#validate-that-the-container-image-was-pulled-on-the-guest)
|
||||
or [using the Kata pod sandbox for testing with `agent-ctl` or `ctr shim`](#using-a-kata-pod-sandbox-for-testing-with-agent-ctl-or-ctr-shim)
|
||||
|
||||
#### Clean up the `crictl` pod sandbox and container
|
||||
- When the testing is complete you can delete the container and pod by running:
|
||||
```bash
|
||||
$ ~/ccv0.sh crictl_delete_cc
|
||||
```
|
||||
### Using Kubernetes for end-to-end provisioning of a Kata confidential containers pod with an unencrypted image
|
||||
|
||||
- Run the full build process with the Kubernetes environment variable set to `"yes"`, so the Kubernetes cluster is
|
||||
configured and created using the VM
|
||||
as a single node cluster:
|
||||
```bash
|
||||
$ export KUBERNETES="yes"
|
||||
$ ~/ccv0.sh build_and_install_all
|
||||
```
|
||||
> **Note**: Depending on how where your VMs are hosted and how IPs are shared you might get an error from docker
|
||||
during matching `ERROR: toomanyrequests: Too Many Requests`. To get past
|
||||
this, login into Docker Hub and pull the images used with:
|
||||
> ```bash
|
||||
> $ sudo docker login
|
||||
> $ sudo docker pull registry:2
|
||||
> $ sudo docker pull ubuntu:20.04
|
||||
> ```
|
||||
> then re-run the command.
|
||||
- Check that your Kubernetes cluster has been correctly set-up by running :
|
||||
```bash
|
||||
$ kubectl get nodes
|
||||
```
|
||||
and checking that you see a single node e.g.
|
||||
```text
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
stevenh-ccv0-k8s1.fyre.ibm.com Ready control-plane,master 43s v1.22.0
|
||||
```
|
||||
- Create a Kata confidential containers pod by running:
|
||||
```bash
|
||||
$ ~/ccv0.sh kubernetes_create_cc_pod
|
||||
```
|
||||
- Wait a few seconds for pod to start then check that the pod's status is `Running` with
|
||||
```bash
|
||||
$ kubectl get pods
|
||||
```
|
||||
which should show something like:
|
||||
```text
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
busybox-cc 1/1 Running 0 54s
|
||||
```
|
||||
|
||||
- As this point you should have a Kubernetes pod and container running, that is using the Kata
|
||||
confidential containers runtime.
|
||||
You can [validate that the container image was pulled on the guest](#validate-that-the-container-image-was-pulled-on-the-guest)
|
||||
or [using the Kata pod sandbox for testing with `agent-ctl` or `ctr shim`](#using-a-kata-pod-sandbox-for-testing-with-agent-ctl-or-ctr-shim)
|
||||
|
||||
#### Clean up the Kubernetes pod sandbox and container
|
||||
- When the testing is complete you can delete the container and pod by running:
|
||||
```bash
|
||||
$ ~/ccv0.sh kubernetes_delete_cc_pod
|
||||
```
|
||||
|
||||
### Validate that the container image was pulled on the guest
|
||||
|
||||
There are a couple of ways we can check that the container pull image action was offloaded to the guest, by checking
|
||||
the guest's file system for the unpacked bundle and checking the host's directories to ensure it wasn't also pulled
|
||||
there.
|
||||
- To check the guest's file system:
|
||||
- Open a shell into the Kata guest with:
|
||||
```bash
|
||||
$ ~/ccv0.sh open_kata_shell
|
||||
```
|
||||
- List the files in the directory that the container image bundle should have been unpacked to with:
|
||||
```bash
|
||||
$ ls -ltr /run/kata-containers/confidential-containers_signed/
|
||||
```
|
||||
- This should give something like
|
||||
```
|
||||
total 72
|
||||
-rw-r--r-- 1 root root 2977 Jan 20 10:03 config.json
|
||||
-rw-r--r-- 1 root root 372 Jan 20 10:03 umoci.json
|
||||
-rw-r--r-- 1 root root 63584 Jan 20 10:03 sha256_be9faa75035c20288cde7d2cdeb6cd1f5f4dbcd845d3f86f7feab61c4eff9eb5.mtree
|
||||
drwxr-xr-x 12 root root 240 Jan 20 10:03 rootfs
|
||||
```
|
||||
which shows how the image has been pulled and then unbundled on the guest.
|
||||
- Leave the Kata guest shell by running:
|
||||
```bash
|
||||
$ exit
|
||||
```
|
||||
- To verify that the image wasn't pulled on the host system we can look at the shared sandbox on the host and we
|
||||
should only see a single bundle for the pause container as the `busybox` based container image should have been
|
||||
pulled on the guest:
|
||||
- Find all the `rootfs` directories under in the pod's shared directory with:
|
||||
```bash
|
||||
$ pod_id=$(ps -ef | grep containerd-shim-kata-v2 | egrep -o "id [^,][^,].* " | awk '{print $2}')
|
||||
$ sudo find /run/kata-containers/shared/sandboxes/${pod_id}/shared -name rootfs
|
||||
```
|
||||
which should only show a single `rootfs` directory if the container image was pulled on the guest, not the host
|
||||
- Looking that `rootfs` directory with
|
||||
```bash
|
||||
$ sudo ls -ltr $(sudo find /run/kata-containers/shared/sandboxes/${pod_id}/shared -name rootfs)
|
||||
```
|
||||
shows something similar to
|
||||
```
|
||||
total 668
|
||||
-rwxr-xr-x 1 root root 682696 Aug 25 13:58 pause
|
||||
drwxr-xr-x 2 root root 6 Jan 20 02:01 proc
|
||||
drwxr-xr-x 2 root root 6 Jan 20 02:01 dev
|
||||
drwxr-xr-x 2 root root 6 Jan 20 02:01 sys
|
||||
drwxr-xr-x 2 root root 25 Jan 20 02:01 etc
|
||||
```
|
||||
which is clearly the pause container indicating that the `busybox` based container image is not exposed to the host.
|
||||
|
||||
### Using a Kata pod sandbox for testing with `agent-ctl` or `ctr shim`
|
||||
|
||||
Once you have a kata pod sandbox created as described above, either using
|
||||
[`crictl`](#using-crictl-for-end-to-end-provisioning-of-a-kata-confidential-containers-pod-with-an-unencrypted-image), or [Kubernetes](#using-kubernetes-for-end-to-end-provisioning-of-a-kata-confidential-containers-pod-with-an-unencrypted-image)
|
||||
, you can use this to test specific components of the Kata confidential
|
||||
containers architecture. This can be useful for development and debugging to isolate and test features
|
||||
that aren't broadly supported end-to-end. Here are some examples:
|
||||
|
||||
- In the first terminal run the pull image on guest command against the Kata agent, via the shim (`containerd-shim-kata-v2`).
|
||||
This can be achieved using the [containerd](https://github.com/containerd/containerd) CLI tool, `ctr`, which can be used to
|
||||
interact with the shim directly. The command takes the form
|
||||
`ctr --namespace k8s.io shim --id <sandbox-id> pull-image <image> <new-container-id>` and can been run directly, or through
|
||||
the `ccv0.sh` script to automatically fill in the variables:
|
||||
- Optionally, set up some environment variables to set the image and credentials used:
|
||||
- By default the shim pull image test in `ccv0.sh` will use the `busybox:1.33.1` based test image
|
||||
`quay.io/kata-containers/confidential-containers:signed` which requires no authentication. To use a different
|
||||
image, set the `PULL_IMAGE` environment variable e.g.
|
||||
```bash
|
||||
$ export PULL_IMAGE="docker.io/library/busybox:latest"
|
||||
```
|
||||
Currently the containerd shim pull image
|
||||
code doesn't support using a container registry that requires authentication, so if this is required, see the
|
||||
below steps to run the pull image command against the agent directly.
|
||||
- Run the pull image agent endpoint with:
|
||||
```bash
|
||||
$ ~/ccv0.sh shim_pull_image
|
||||
```
|
||||
which we print the `ctr shim` command for reference
|
||||
- Alternatively you can issue the command directly to the `kata-agent` pull image endpoint, which also supports
|
||||
credentials in order to pull from an authenticated registry:
|
||||
- Optionally set up some environment variables to set the image and credentials used:
|
||||
- Set the `PULL_IMAGE` environment variable e.g. `export PULL_IMAGE="docker.io/library/busybox:latest"`
|
||||
if a specific container image is required.
|
||||
- If the container registry for the image requires authentication then this can be set with an environment
|
||||
variable `SOURCE_CREDS`. For example to use Docker Hub (`docker.io`) as an authenticated user first run
|
||||
`export SOURCE_CREDS="<dockerhub username>:<dockerhub api key>"`
|
||||
> **Note**: the credentials support on the agent request is a tactical solution for the short-term
|
||||
proof of concept to allow more images to be pulled and tested. Once we have support for getting
|
||||
keys into the Kata guest image using the attestation-agent and/or KBS I'd expect container registry
|
||||
credentials to be looked up using that mechanism.
|
||||
- Run the pull image agent endpoint with
|
||||
```bash
|
||||
$ ~/ccv0.sh agent_pull_image
|
||||
```
|
||||
and you should see output which includes `Command PullImage (1 of 1) returned (Ok(()), false)` to indicate
|
||||
that the `PullImage` request was successful e.g.
|
||||
```
|
||||
Finished release [optimized] target(s) in 0.21s
|
||||
{"msg":"announce","level":"INFO","ts":"2021-09-15T08:40:14.189360410-07:00","subsystem":"rpc","name":"kata-agent-ctl","pid":"830920","version":"0.1.0","source":"kata-agent-ctl","config":"Config { server_address: \"vsock://1970354082:1024\", bundle_dir: \"/tmp/bundle\", timeout_nano: 0, interactive: false, ignore_errors: false }"}
|
||||
{"msg":"client setup complete","level":"INFO","ts":"2021-09-15T08:40:14.193639057-07:00","pid":"830920","source":"kata-agent-ctl","name":"kata-agent-ctl","subsystem":"rpc","version":"0.1.0","server-address":"vsock://1970354082:1024"}
|
||||
{"msg":"Run command PullImage (1 of 1)","level":"INFO","ts":"2021-09-15T08:40:14.196643765-07:00","pid":"830920","source":"kata-agent-ctl","subsystem":"rpc","name":"kata-agent-ctl","version":"0.1.0"}
|
||||
{"msg":"response received","level":"INFO","ts":"2021-09-15T08:40:43.828200633-07:00","source":"kata-agent-ctl","name":"kata-agent-ctl","subsystem":"rpc","version":"0.1.0","pid":"830920","response":""}
|
||||
{"msg":"Command PullImage (1 of 1) returned (Ok(()), false)","level":"INFO","ts":"2021-09-15T08:40:43.828261708-07:00","subsystem":"rpc","pid":"830920","source":"kata-agent-ctl","version":"0.1.0","name":"kata-agent-ctl"}
|
||||
```
|
||||
> **Note**: The first time that `~/ccv0.sh agent_pull_image` is run, the `agent-ctl` tool will be built
|
||||
which may take a few minutes.
|
||||
- To validate that the image pull was successful, you can open a shell into the Kata guest with:
|
||||
```bash
|
||||
$ ~/ccv0.sh open_kata_shell
|
||||
```
|
||||
- Check the `/run/kata-containers/` directory to verify that the container image bundle has been created in a directory
|
||||
named either `01234556789` (for the container id), or the container image name, e.g.
|
||||
```bash
|
||||
$ ls -ltr /run/kata-containers/confidential-containers_signed/
|
||||
```
|
||||
which should show something like
|
||||
```
|
||||
total 72
|
||||
drwxr-xr-x 10 root root 200 Jan 1 1970 rootfs
|
||||
-rw-r--r-- 1 root root 2977 Jan 20 16:45 config.json
|
||||
-rw-r--r-- 1 root root 372 Jan 20 16:45 umoci.json
|
||||
-rw-r--r-- 1 root root 63584 Jan 20 16:45 sha256_be9faa75035c20288cde7d2cdeb6cd1f5f4dbcd845d3f86f7feab61c4eff9eb5.mtree
|
||||
```
|
||||
- Leave the Kata shell by running:
|
||||
```bash
|
||||
$ exit
|
||||
```
|
||||
|
||||
## Verifying signed images
|
||||
|
||||
For this sample demo, we use local attestation to pass through the required
|
||||
configuration to do container image signature verification. Due to this, the ability to verify images is limited
|
||||
to a pre-created selection of test images in our test
|
||||
repository [`quay.io/kata-containers/confidential-containers`](https://quay.io/repository/kata-containers/confidential-containers?tab=tags).
|
||||
For pulling images not in this test repository (called an *unprotected* registry below), we fall back to the behaviour
|
||||
of not enforcing signatures. More documentation on how to customise this to match your own containers through local,
|
||||
or remote attestation will be available in future.
|
||||
|
||||
In our test repository there are three tagged images:
|
||||
|
||||
| Test Image | Base Image used | Signature status | GPG key status |
|
||||
| --- | --- | --- | --- |
|
||||
| `quay.io/kata-containers/confidential-containers:signed` | `busybox:1.33.1` | [signature](https://github.com/kata-containers/tests/tree/CCv0/integration/confidential/fixtures/quay_verification/signatures.tar) embedded in kata rootfs | [public key](https://github.com/kata-containers/tests/tree/CCv0/integration/confidential/fixtures/quay_verification/public.gpg) embedded in kata rootfs |
|
||||
| `quay.io/kata-containers/confidential-containers:unsigned` | `busybox:1.33.1` | not signed | not signed |
|
||||
| `quay.io/kata-containers/confidential-containers:other_signed` | `nginx:1.21.3` | [signature](https://github.com/kata-containers/tests/tree/CCv0/integration/confidential/fixtures/quay_verification/signatures.tar) embedded in kata rootfs | GPG key not kept |
|
||||
|
||||
Using a standard unsigned `busybox` image that can be pulled from another, *unprotected*, `quay.io` repository we can
|
||||
test a few scenarios.
|
||||
|
||||
In this sample, with local attestation, we pass in the the public GPG key and signature files, and the [`offline_fs_kbc`
|
||||
configuration](https://github.com/confidential-containers/attestation-agent/blob/main/src/kbc_modules/offline_fs_kbc/README.md)
|
||||
into the guest image which specifies that any container image from `quay.io/kata-containers`
|
||||
must be signed with the embedded GPG key and the agent configuration needs updating to enable this.
|
||||
With this policy set a few tests of image verification can be done to test different scenarios by attempting
|
||||
to create containers from these images using `crictl`:
|
||||
|
||||
- If you don't already have the Kata Containers CC code built and configured for `crictl`, then follow the
|
||||
[instructions above](#using-crictl-for-end-to-end-provisioning-of-a-kata-confidential-containers-pod-with-an-unencrypted-image)
|
||||
up to the `~/ccv0.sh crictl_create_cc_pod` command.
|
||||
|
||||
- In order to enable the guest image, you will need to setup the required configuration, policy and signature files
|
||||
needed by running
|
||||
`~/ccv0.sh copy_signature_files_to_guest` and then run `~/ccv0.sh crictl_create_cc_pod` which will delete and recreate
|
||||
your pod - adding in the new files.
|
||||
|
||||
- To test the fallback behaviour works using an unsigned image from an *unprotected* registry we can pull the `busybox`
|
||||
image by running:
|
||||
```bash
|
||||
$ export CONTAINER_CONFIG_FILE=container-config_unsigned-unprotected.yaml
|
||||
$ ~/ccv0.sh crictl_create_cc_container
|
||||
```
|
||||
- This finishes showing the running container e.g.
|
||||
```text
|
||||
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID
|
||||
98c70fefe997a quay.io/prometheus/busybox:latest Less than a second ago Running prometheus-busybox-signed 0 70119e0539238
|
||||
```
|
||||
- To test that an unsigned image from our *protected* test container registry is rejected we can run:
|
||||
```bash
|
||||
$ export CONTAINER_CONFIG_FILE=container-config_unsigned-protected.yaml
|
||||
$ ~/ccv0.sh crictl_create_cc_container
|
||||
```
|
||||
- This correctly results in an error message from `crictl`:
|
||||
`PullImage from image service failed" err="rpc error: code = Internal desc = Security validate failed: Validate image failed: The signatures do not satisfied! Reject reason: [Match reference failed.]" image="quay.io/kata-containers/confidential-containers:unsigned"`
|
||||
- To test that the signed image our *protected* test container registry is accepted we can run:
|
||||
```bash
|
||||
$ export CONTAINER_CONFIG_FILE=container-config.yaml
|
||||
$ ~/ccv0.sh crictl_create_cc_container
|
||||
```
|
||||
- This finishes by showing a new `kata-cc-busybox-signed` running container e.g.
|
||||
```text
|
||||
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID
|
||||
b4d85c2132ed9 quay.io/kata-containers/confidential-containers:signed Less than a second ago Running kata-cc-busybox-signed 0 70119e0539238
|
||||
...
|
||||
```
|
||||
- Finally to check the image with a valid signature, but invalid GPG key (the real trusted piece of information we really
|
||||
want to protect with the attestation agent in future) fails we can run:
|
||||
```bash
|
||||
$ export CONTAINER_CONFIG_FILE=container-config_signed-protected-other.yaml
|
||||
$ ~/ccv0.sh crictl_create_cc_container
|
||||
```
|
||||
- Again this results in an error message from `crictl`:
|
||||
`"PullImage from image service failed" err="rpc error: code = Internal desc = Security validate failed: Validate image failed: The signatures do not satisfied! Reject reason: [signature verify failed! There is no pubkey can verify the signature!]" image="quay.io/kata-containers/confidential-containers:other_signed"`
|
||||
|
||||
### Using Kubernetes to create a Kata confidential containers pod from the encrypted ssh demo sample image
|
||||
|
||||
The [ssh-demo](https://github.com/confidential-containers/documentation/tree/main/demos/ssh-demo) explains how to
|
||||
demonstrate creating a Kata confidential containers pod from an encrypted image with the runtime created by the
|
||||
[confidential-containers operator](https://github.com/confidential-containers/documentation/blob/main/demos/operator-demo).
|
||||
To be fully confidential, this should be run on a Trusted Execution Environment, but it can be tested on generic
|
||||
hardware as well.
|
||||
|
||||
If you wish to build the Kata confidential containers runtime to do this yourself, then you can using the following
|
||||
steps:
|
||||
|
||||
- Run the full build process with the Kubernetes environment variable set to `"yes"`, so the Kubernetes cluster is
|
||||
configured and created using the VM as a single node cluster and with `AA_KBC` set to `offline_fs_kbc`.
|
||||
```bash
|
||||
$ export KUBERNETES="yes"
|
||||
$ export AA_KBC=offline_fs_kbc
|
||||
$ ~/ccv0.sh build_and_install_all
|
||||
```
|
||||
- The `AA_KBC=offline_fs_kbc` mode will ensure that, when creating the rootfs of the Kata guest, the
|
||||
[attestation-agent](https://github.com/confidential-containers/attestation-agent) will be added along with the
|
||||
[sample offline KBC](https://github.com/confidential-containers/documentation/blob/main/demos/ssh-demo/aa-offline_fs_kbc-keys.json)
|
||||
and an agent configuration file
|
||||
> **Note**: Depending on how where your VMs are hosted and how IPs are shared you might get an error from docker
|
||||
during matching `ERROR: toomanyrequests: Too Many Requests`. To get past
|
||||
this, login into Docker Hub and pull the images used with:
|
||||
> ```bash
|
||||
> $ sudo docker login
|
||||
> $ sudo docker pull registry:2
|
||||
> $ sudo docker pull ubuntu:20.04
|
||||
> ```
|
||||
> then re-run the command.
|
||||
- Check that your Kubernetes cluster has been correctly set-up by running :
|
||||
```bash
|
||||
$ kubectl get nodes
|
||||
```
|
||||
and checking that you see a single node e.g.
|
||||
```text
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
stevenh-ccv0-k8s1.fyre.ibm.com Ready control-plane,master 43s v1.22.0
|
||||
```
|
||||
- Create a sample Kata confidential containers ssh pod by running:
|
||||
```bash
|
||||
$ ~/ccv0.sh kubernetes_create_ssh_demo_pod
|
||||
```
|
||||
- As this point you should have a Kubernetes pod running the Kata confidential containers runtime that has pulled
|
||||
the [sample image](https://hub.docker.com/r/katadocker/ccv0-ssh) which was encrypted by the key file that we included
|
||||
in the rootfs.
|
||||
During the pod deployment the image was pulled and then decrypted using the key file, on the Kata guest image, without
|
||||
it ever being available to the host.
|
||||
|
||||
- To validate that the container is working you, can connect to the image via SSH by running:
|
||||
```bash
|
||||
$ ~/ccv0.sh connect_to_ssh_demo_pod
|
||||
```
|
||||
- During this connection the host key fingerprint is shown and should match:
|
||||
`ED25519 key fingerprint is SHA256:wK7uOpqpYQczcgV00fGCh+X97sJL3f6G1Ku4rvlwtR0.`
|
||||
- After you are finished connecting then run:
|
||||
```bash
|
||||
$ exit
|
||||
```
|
||||
|
||||
- To delete the sample SSH demo pod run:
|
||||
```bash
|
||||
$ ~/ccv0.sh kubernetes_delete_ssh_demo_pod
|
||||
```
|
||||
|
||||
## Additional script usage
|
||||
|
||||
As well as being able to use the script as above to build all of `kata-containers` from scratch it can be used to just
|
||||
re-build bits of it by running the script with different parameters. For example after the first build you will often
|
||||
not need to re-install the dependencies, the hypervisor or the Guest kernel, but just test code changes made to the
|
||||
runtime and agent. This can be done by running `~/ccv0.sh rebuild_and_install_kata`. (*Note this does a hard checkout*
|
||||
*from git, so if your changes are only made locally it is better to do the individual steps e.g.*
|
||||
`~/ccv0.sh build_kata_runtime && ~/ccv0.sh build_and_add_agent_to_rootfs && ~/ccv0.sh build_and_install_rootfs`).
|
||||
There are commands for a lot of steps in building, setting up and testing and the full list can be seen by running
|
||||
`~/ccv0.sh help`:
|
||||
```
|
||||
$ ~/ccv0.sh help
|
||||
Overview:
|
||||
Build and test kata containers from source
|
||||
Optionally set kata-containers and tests repo and branch as exported variables before running
|
||||
e.g. export katacontainers_repo=github.com/stevenhorsman/kata-containers && export katacontainers_branch=kata-ci-from-fork && export tests_repo=github.com/stevenhorsman/tests && export tests_branch=kata-ci-from-fork && ~/ccv0.sh build_and_install_all
|
||||
Usage:
|
||||
ccv0.sh [options] <command>
|
||||
Commands:
|
||||
- help: Display this help
|
||||
- all: Build and install everything, test kata with containerd and capture the logs
|
||||
- build_and_install_all: Build and install everything
|
||||
- initialize: Install dependencies and check out kata-containers source
|
||||
- rebuild_and_install_kata: Rebuild the kata runtime and agent and build and install the image
|
||||
- build_kata_runtime: Build and install the kata runtime
|
||||
- configure: Configure Kata to use rootfs and enable debug
|
||||
- create_rootfs: Create a local rootfs
|
||||
- build_and_add_agent_to_rootfs:Builds the kata-agent and adds it to the rootfs
|
||||
- build_and_install_rootfs: Builds and installs the rootfs image
|
||||
- install_guest_kernel: Setup, build and install the guest kernel
|
||||
- build_cloud_hypervisor Checkout, patch, build and install Cloud Hypervisor
|
||||
- build_qemu: Checkout, patch, build and install QEMU
|
||||
- init_kubernetes: initialize a Kubernetes cluster on this system
|
||||
- crictl_create_cc_pod Use crictl to create a new kata cc pod
|
||||
- crictl_create_cc_container Use crictl to create a new busybox container in the kata cc pod
|
||||
- crictl_delete_cc Use crictl to delete the kata cc pod sandbox and container in it
|
||||
- kubernetes_create_cc_pod: Create a Kata CC runtime busybox-based pod in Kubernetes
|
||||
- kubernetes_delete_cc_pod: Delete the Kata CC runtime busybox-based pod in Kubernetes
|
||||
- open_kata_shell: Open a shell into the kata runtime
|
||||
- agent_pull_image: Run PullImage command against the agent with agent-ctl
|
||||
- shim_pull_image: Run PullImage command against the shim with ctr
|
||||
- agent_create_container: Run CreateContainer command against the agent with agent-ctl
|
||||
- test: Test using kata with containerd
|
||||
- test_capture_logs: Test using kata with containerd and capture the logs in the user's home directory
|
||||
|
||||
Options:
|
||||
-d: Enable debug
|
||||
-h: Display this help
|
||||
```
|
||||
@@ -0,0 +1,44 @@
|
||||
# Generating a Kata Containers payload for the Confidential Containers Operator
|
||||
|
||||
[Confidential Containers
|
||||
Operator](https://github.com/confidential-containers/operator) consumes a Kata
|
||||
Containers payload, generated from the `CCv0` branch, and here one can find all
|
||||
the necessary info on how to build such a payload.
|
||||
|
||||
## Requirements
|
||||
|
||||
* `make` installed in the machine
|
||||
* Docker installed in the machine
|
||||
* `sudo` access to the machine
|
||||
|
||||
## Process
|
||||
|
||||
* Clone [Kata Containers](https://github.com/kata-containers/kata-containers)
|
||||
```sh
|
||||
git clone --branch CCv0 https://github.com/kata-containers/kata-containers
|
||||
```
|
||||
* In case you've already cloned the repo, make sure to switch to the `CCv0` branch
|
||||
```sh
|
||||
git checkout CCv0
|
||||
```
|
||||
* Ensure your tree is clean and in sync with upstream `CCv0`
|
||||
```sh
|
||||
git clean -xfd
|
||||
git reset --hard <upstream>/CCv0
|
||||
```
|
||||
* Make sure you're authenticated to `quay.io`
|
||||
```sh
|
||||
sudo docker login quay.io
|
||||
```
|
||||
* From the top repo directory, run:
|
||||
```sh
|
||||
sudo make cc-payload
|
||||
```
|
||||
* Make sure the image was upload to the [Confidential Containers
|
||||
runtime-payload
|
||||
registry](https://quay.io/repository/confidential-containers/runtime-payload?tab=tags)
|
||||
|
||||
## Notes
|
||||
|
||||
Make sure to run it on a machine that's not the one you're hacking on, prepare a
|
||||
cup of tea, and get back to it an hour later (at least).
|
||||
@@ -15,18 +15,6 @@ $ sudo .ci/aarch64/install_rom_aarch64.sh
|
||||
$ popd
|
||||
```
|
||||
|
||||
## Config KATA QEMU
|
||||
|
||||
After executing the above script, two files will be generated under the directory `/usr/share/kata-containers/` by default, namely `kata-flash0.img` and `kata-flash1.img`. Next we need to change the configuration file of `kata qemu`, which is in `/opt/kata/share/defaults/kata-containers/configuration-qemu.toml` by default, specify in the configuration file to use the UEFI ROM installed above. The above is an example of `kata deploy` installation. For package management installation, please use `kata-runtime env` to find the location of the configuration file. Please refer to the following configuration.
|
||||
|
||||
```
|
||||
[hypervisor.qemu]
|
||||
|
||||
# -pflash can add image file to VM. The arguments of it should be in format
|
||||
# of ["/path/to/flash0.img", "/path/to/flash1.img"]
|
||||
pflashes = ["/usr/share/kata-containers/kata-flash0.img", "/usr/share/kata-containers/kata-flash1.img"]
|
||||
```
|
||||
|
||||
## Run for test
|
||||
|
||||
Let's test if the memory hotplug is ready for Kata after install the UEFI ROM. Make sure containerd is ready to run Kata before test.
|
||||
|
||||
@@ -57,7 +57,6 @@ There are several kinds of Kata configurations and they are listed below.
|
||||
| `io.katacontainers.config.hypervisor.enable_iothreads` | `boolean`| enable IO to be processed in a separate thread. Supported currently for virtio-`scsi` driver |
|
||||
| `io.katacontainers.config.hypervisor.enable_mem_prealloc` | `boolean` | the memory space used for `nvdimm` device by the hypervisor |
|
||||
| `io.katacontainers.config.hypervisor.enable_vhost_user_store` | `boolean` | enable vhost-user storage device (QEMU) |
|
||||
| `io.katacontainers.config.hypervisor.vhost_user_reconnect_timeout_sec` | `string`| the timeout for reconnecting vhost user socket (QEMU)
|
||||
| `io.katacontainers.config.hypervisor.enable_virtio_mem` | `boolean` | enable virtio-mem (QEMU) |
|
||||
| `io.katacontainers.config.hypervisor.entropy_source` (R) | string| the path to a host source of entropy (`/dev/random`, `/dev/urandom` or real hardware RNG device) |
|
||||
| `io.katacontainers.config.hypervisor.file_mem_backend` (R) | string | file based memory backend root directory |
|
||||
@@ -88,7 +87,7 @@ There are several kinds of Kata configurations and they are listed below.
|
||||
| `io.katacontainers.config.hypervisor.use_vsock` | `boolean` | specify use of `vsock` for agent communication |
|
||||
| `io.katacontainers.config.hypervisor.vhost_user_store_path` (R) | `string` | specify the directory path where vhost-user devices related folders, sockets and device nodes should be (QEMU) |
|
||||
| `io.katacontainers.config.hypervisor.virtio_fs_cache_size` | uint32 | virtio-fs DAX cache size in `MiB` |
|
||||
| `io.katacontainers.config.hypervisor.virtio_fs_cache` | string | the cache mode for virtio-fs, valid values are `always`, `auto` and `never` |
|
||||
| `io.katacontainers.config.hypervisor.virtio_fs_cache` | string | the cache mode for virtio-fs, valid values are `always`, `auto` and `none` |
|
||||
| `io.katacontainers.config.hypervisor.virtio_fs_daemon` | string | virtio-fs `vhost-user` daemon path |
|
||||
| `io.katacontainers.config.hypervisor.virtio_fs_extra_args` | string | extra options passed to `virtiofs` daemon |
|
||||
| `io.katacontainers.config.hypervisor.enable_guest_swap` | `boolean` | enable swap in the guest |
|
||||
|
||||
@@ -17,9 +17,9 @@ Enable setup swap device in guest kernel as follows:
|
||||
$ sudo sed -i -e 's/^#enable_guest_swap.*$/enable_guest_swap = true/g' /etc/kata-containers/configuration.toml
|
||||
```
|
||||
|
||||
## Run a Kata Containers utilizing swap device
|
||||
## Run a Kata Container utilizing swap device
|
||||
|
||||
Use following command to start a Kata Containers with swappiness 60 and 1GB swap device (swap_in_bytes - memory_limit_in_bytes).
|
||||
Use following command to start a Kata Container with swappiness 60 and 1GB swap device (swap_in_bytes - memory_limit_in_bytes).
|
||||
```
|
||||
$ pod_yaml=pod.yaml
|
||||
$ container_yaml=container.yaml
|
||||
@@ -27,6 +27,8 @@ $ image="quay.io/prometheus/busybox:latest"
|
||||
$ cat << EOF > "${pod_yaml}"
|
||||
metadata:
|
||||
name: busybox-sandbox1
|
||||
uid: $(uuidgen)
|
||||
namespace: default
|
||||
EOF
|
||||
$ cat << EOF > "${container_yaml}"
|
||||
metadata:
|
||||
@@ -43,12 +45,12 @@ command:
|
||||
- top
|
||||
EOF
|
||||
$ sudo crictl pull $image
|
||||
$ podid=$(sudo crictl runp --runtime kata $pod_yaml)
|
||||
$ podid=$(sudo crictl runp $pod_yaml)
|
||||
$ cid=$(sudo crictl create $podid $container_yaml $pod_yaml)
|
||||
$ sudo crictl start $cid
|
||||
```
|
||||
|
||||
Kata Containers setups swap device for this container only when `io.katacontainers.container.resource.swappiness` is set.
|
||||
Kata Container setups swap device for this container only when `io.katacontainers.container.resource.swappiness` is set.
|
||||
|
||||
The following table shows the swap size how to decide if `io.katacontainers.container.resource.swappiness` is set.
|
||||
|`io.katacontainers.container.resource.swap_in_bytes`|`memory_limit_in_bytes`|swap size|
|
||||
|
||||
@@ -1,90 +0,0 @@
|
||||
# Configure Kata Containers to use EROFS build rootfs
|
||||
|
||||
## Introduction
|
||||
For kata containers, rootfs is used in the read-only way. EROFS can noticeably decrease metadata overhead.
|
||||
|
||||
`mkfs.erofs` can generate compressed and uncompressed EROFS images.
|
||||
|
||||
For uncompressed images, no files are compressed. However, it is optional to inline the data blocks at the end of the file with the metadata.
|
||||
|
||||
For compressed images, each file will be compressed using the lz4 or lz4hc algorithm, and it will be confirmed whether it can save space. Use No compression of the file if compression does not save space.
|
||||
|
||||
## Performance comparison
|
||||
| | EROFS | EXT4 | XFS |
|
||||
|-----------------|-------| --- | --- |
|
||||
| Image Size [MB] | 106(uncompressed) | 256 | 126 |
|
||||
|
||||
|
||||
## Guidance
|
||||
### Install the `erofs-utils`
|
||||
#### `apt/dnf` install
|
||||
On newer `Ubuntu/Debian` systems, it can be installed directly using the `apt` command, and on `Fedora` it can be installed directly using the `dnf` command.
|
||||
|
||||
```shell
|
||||
# Debian/Ubuntu
|
||||
$ apt install erofs-utils
|
||||
# Fedora
|
||||
$ dnf install erofs-utils
|
||||
```
|
||||
|
||||
#### Source install
|
||||
[https://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs-utils.git](https://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs-utils.git)
|
||||
|
||||
##### Compile dependencies
|
||||
If you need to enable the `Lz4` compression feature, `Lz4 1.8.0+` is required, and `Lz4 1.9.3+` is strongly recommended.
|
||||
|
||||
##### Compilation process
|
||||
For some old lz4 versions (lz4-1.8.0~1.8.3), if lz4-static is not installed, the lz4hc algorithm will not be supported. lz4-static can be installed with apt install lz4-static.x86_64. However, these versions have some bugs in compression, and it is not recommended to use these versions directly.
|
||||
If you use `lz4 1.9.0+`, you can directly use the following command to compile.
|
||||
|
||||
```shell
|
||||
$ ./autogen.sh
|
||||
$ ./configure
|
||||
$ make
|
||||
```
|
||||
|
||||
The compiled `mkfs.erofs` program will be saved in the `mkfs` directory. Afterwards, the generated tools can be installed to a system directory using make install (requires root privileges).
|
||||
|
||||
### Create a local rootfs
|
||||
```shell
|
||||
$ export distro="ubuntu"
|
||||
$ export FS_TYPE="erofs"
|
||||
$ export ROOTFS_DIR="realpath kata-containers/tools/osbuilder/rootfs-builder/rootfs"
|
||||
$ sudo rm -rf "${ROOTFS_DIR}"
|
||||
$ pushd kata-containers/tools/osbuilder/rootfs-builder
|
||||
$ script -fec 'sudo -E SECCOMP=no ./rootfs.sh "${distro}"'
|
||||
$ popd
|
||||
```
|
||||
|
||||
### Add a custom agent to the image - OPTIONAL
|
||||
> Note:
|
||||
> - You should only do this step if you are testing with the latest version of the agent.
|
||||
```shell
|
||||
$ sudo install -o root -g root -m 0550 -t "${ROOTFS_DIR}/usr/bin" "${ROOTFS_DIR}/../../../../src/agent/target/x86_64-unknown-linux-musl/release/kata-agent"
|
||||
$ sudo install -o root -g root -m 0440 "${ROOTFS_DIR}/../../../../src/agent/kata-agent.service" "${ROOTFS_DIR}/usr/lib/systemd/system/"
|
||||
$ sudo install -o root -g root -m 0440 "${ROOTFS_DIR}/../../../../src/agent/kata-containers.target" "${ROOTFS_DIR}/usr/lib/systemd/system/"
|
||||
```
|
||||
|
||||
### Build a root image
|
||||
```shell
|
||||
$ pushd kata-containers/tools/osbuilder/image-builder
|
||||
$ script -fec 'sudo -E ./image_builder.sh "${ROOTFS_DIR}"'
|
||||
$ popd
|
||||
```
|
||||
|
||||
### Install the rootfs image
|
||||
```shell
|
||||
$ pushd kata-containers/tools/osbuilder/image-builder
|
||||
$ commit="$(git log --format=%h -1 HEAD)"
|
||||
$ date="$(date +%Y-%m-%d-%T.%N%z)"
|
||||
$ rootfs="erofs"
|
||||
$ image="kata-containers-${rootfs}-${date}-${commit}"
|
||||
$ sudo install -o root -g root -m 0640 -D kata-containers.img "/usr/share/kata-containers/${image}"
|
||||
$ (cd /usr/share/kata-containers && sudo ln -sf "$image" kata-containers.img)
|
||||
$ popd
|
||||
```
|
||||
|
||||
### Use `EROFS` in the runtime
|
||||
```shell
|
||||
$ sudo sed -i -e 's/^# *\(rootfs_type\).*=.*$/\1 = erofs/g' /etc/kata-containers/configuration.toml
|
||||
```
|
||||
@@ -104,7 +104,7 @@ sudo dmsetup create "${POOL_NAME}" \
|
||||
|
||||
cat << EOF
|
||||
#
|
||||
# Add this to your config.toml configuration file and restart containerd daemon
|
||||
# Add this to your config.toml configuration file and restart `containerd` daemon
|
||||
#
|
||||
[plugins]
|
||||
[plugins.devmapper]
|
||||
@@ -212,7 +212,7 @@ Next, we need to configure containerd. Add a file in your path (e.g. `/usr/local
|
||||
|
||||
```
|
||||
#!/bin/bash
|
||||
KATA_CONF_FILE=/etc/kata-containers/configuration-fc.toml /usr/local/bin/containerd-shim-kata-v2 $@
|
||||
KATA_CONF_FILE=/etc/containers/configuration-fc.toml /usr/local/bin/containerd-shim-kata-v2 $@
|
||||
```
|
||||
> **Note:** You may need to edit the paths of the configuration file and the `containerd-shim-kata-v2` to correspond to your setup.
|
||||
|
||||
|
||||
@@ -32,6 +32,7 @@ The `nydus-sandbox.yaml` looks like below:
|
||||
metadata:
|
||||
attempt: 1
|
||||
name: nydus-sandbox
|
||||
uid: nydus-uid
|
||||
namespace: default
|
||||
log_directory: /tmp
|
||||
linux:
|
||||
|
||||
@@ -42,6 +42,8 @@ $ image="quay.io/prometheus/busybox:latest"
|
||||
$ cat << EOF > "${pod_yaml}"
|
||||
metadata:
|
||||
name: busybox-sandbox1
|
||||
uid: $(uuidgen)
|
||||
namespace: default
|
||||
EOF
|
||||
$ cat << EOF > "${container_yaml}"
|
||||
metadata:
|
||||
|
||||
@@ -49,14 +49,14 @@ Follow the [`kata-deploy`](../../tools/packaging/kata-deploy/README.md).
|
||||
|
||||
* Download `Rustup` and install `Rust`
|
||||
> **Notes:**
|
||||
> Rust version 1.62.0 is needed
|
||||
> Rust version 1.58 is needed
|
||||
|
||||
Example for `x86_64`
|
||||
```
|
||||
$ curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
|
||||
$ source $HOME/.cargo/env
|
||||
$ rustup install 1.62.0
|
||||
$ rustup default 1.62.0-x86_64-unknown-linux-gnu
|
||||
$ rustup install 1.58
|
||||
$ rustup default 1.58-x86_64-unknown-linux-gnu
|
||||
```
|
||||
|
||||
* Musl support for fully static binary
|
||||
|
||||
@@ -71,6 +71,12 @@ To use containerd, modify the `--container-runtime` argument:
|
||||
> **Notes:**
|
||||
> - Adjust the `--memory 6144` line to suit your environment and requirements. Kata Containers default to
|
||||
> requesting 2048MB per container. We recommended you supply more than that to the Minikube node.
|
||||
> - Prior to Minikube/Kubernetes v1.14, the beta `RuntimeClass` feature also needed enabling with
|
||||
> the following.
|
||||
>
|
||||
> | what | why |
|
||||
> | ---- | --- |
|
||||
> | `--feature-gates=RuntimeClass=true` | Kata needs to use the `RuntimeClass` Kubernetes feature |
|
||||
|
||||
The full command is therefore:
|
||||
|
||||
@@ -132,9 +138,17 @@ $ kubectl -n kube-system exec ${podname} -- ps -ef | fgrep infinity
|
||||
|
||||
## Enabling Kata Containers
|
||||
|
||||
> **Note:** Only Minikube/Kubernetes versions <= 1.13 require this step. Since version
|
||||
> v1.14, the `RuntimeClass` is enabled by default. Performing this step on Kubernetes > v1.14 is
|
||||
> however benign.
|
||||
|
||||
Now you have installed the Kata Containers components in the Minikube node. Next, you need to configure
|
||||
Kubernetes `RuntimeClass` to know when to use Kata Containers to run a pod.
|
||||
|
||||
```sh
|
||||
$ kubectl apply -f https://raw.githubusercontent.com/kubernetes/node-api/master/manifests/runtimeclass_crd.yaml > runtimeclass_crd.yaml
|
||||
```
|
||||
|
||||
### Register the runtime
|
||||
|
||||
Now register the `kata qemu` runtime with that class. This should result in no errors:
|
||||
|
||||
@@ -545,12 +545,6 @@ Create the hook execution file for Kata:
|
||||
/usr/bin/nvidia-container-toolkit -debug $@
|
||||
```
|
||||
|
||||
Make sure the hook shell is executable:
|
||||
|
||||
```sh
|
||||
chmod +x $ROOTFS_DIR/usr/share/oci/hooks/prestart/nvidia-container-toolkit.sh
|
||||
```
|
||||
|
||||
As the last step one can do some cleanup of files or package caches. Build the
|
||||
rootfs and configure it for use with Kata according to the development guide.
|
||||
|
||||
|
||||
@@ -49,7 +49,7 @@ the latest driver.
|
||||
$ export QAT_DRIVER_VER=qat1.7.l.4.14.0-00031.tar.gz
|
||||
$ export QAT_DRIVER_URL=https://downloadmirror.intel.com/30178/eng/${QAT_DRIVER_VER}
|
||||
$ export QAT_CONF_LOCATION=~/QAT_conf
|
||||
$ export QAT_DOCKERFILE=https://raw.githubusercontent.com/intel/intel-device-plugins-for-kubernetes/main/demo/openssl-qat-engine/Dockerfile
|
||||
$ export QAT_DOCKERFILE=https://raw.githubusercontent.com/intel/intel-device-plugins-for-kubernetes/master/demo/openssl-qat-engine/Dockerfile
|
||||
$ export QAT_SRC=~/src/QAT
|
||||
$ export GOPATH=~/src/go
|
||||
$ export KATA_KERNEL_LOCATION=~/kata
|
||||
|
||||
@@ -197,6 +197,11 @@ vhost_user_store_path = "<Path of the base directory for vhost-user device>"
|
||||
> under `[hypervisor.qemu]` section.
|
||||
|
||||
|
||||
For the subdirectories of `vhost_user_store_path`: `block` is used for block
|
||||
device; `block/sockets` is where we expect UNIX domain sockets for vhost-user
|
||||
block devices to live; `block/devices` is where simulated block device nodes
|
||||
for vhost-user block devices are created.
|
||||
|
||||
For the subdirectories of `vhost_user_store_path`:
|
||||
- `block` is used for block device;
|
||||
- `block/sockets` is where we expect UNIX domain sockets for vhost-user
|
||||
|
||||
3951
src/agent/Cargo.lock
generated
3951
src/agent/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -23,6 +23,8 @@ regex = "1.5.6"
|
||||
serial_test = "0.5.1"
|
||||
kata-sys-util = { path = "../libs/kata-sys-util" }
|
||||
kata-types = { path = "../libs/kata-types" }
|
||||
sysinfo = "0.23.0"
|
||||
url = "2.2.2"
|
||||
|
||||
# Async helpers
|
||||
async-trait = "0.1.42"
|
||||
@@ -30,7 +32,7 @@ async-recursion = "0.3.2"
|
||||
futures = "0.3.17"
|
||||
|
||||
# Async runtime
|
||||
tokio = { version = "1.14.0", features = ["full"] }
|
||||
tokio = { version = "1.21.2", features = ["full"] }
|
||||
tokio-vsock = "0.3.1"
|
||||
|
||||
netlink-sys = { version = "0.7.0", features = ["tokio_socket",]}
|
||||
@@ -51,7 +53,7 @@ log = "0.4.11"
|
||||
prometheus = { version = "0.13.0", features = ["process"] }
|
||||
procfs = "0.12.0"
|
||||
anyhow = "1.0.32"
|
||||
cgroups = { package = "cgroups-rs", version = "0.3.2" }
|
||||
cgroups = { package = "cgroups-rs", version = "0.2.10" }
|
||||
|
||||
# Tracing
|
||||
tracing = "0.1.26"
|
||||
@@ -65,10 +67,14 @@ serde = { version = "1.0.129", features = ["derive"] }
|
||||
toml = "0.5.8"
|
||||
clap = { version = "3.0.1", features = ["derive"] }
|
||||
|
||||
# Image pull/decrypt
|
||||
image-rs = { git = "https://github.com/confidential-containers/image-rs", tag = "v0.2.0" }
|
||||
# "vendored" feature for openssl is required by musl build
|
||||
openssl = { version = "0.10.38", features = ["vendored"] }
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = "3.1.0"
|
||||
test-utils = { path = "../libs/test-utils" }
|
||||
which = "4.3.0"
|
||||
|
||||
[workspace]
|
||||
members = [
|
||||
|
||||
@@ -85,6 +85,14 @@ ifeq ($(INIT),no)
|
||||
UNIT_FILES += kata-containers.target
|
||||
endif
|
||||
|
||||
# The following will be reverted, after
|
||||
# https://github.com/kata-containers/kata-containers/issues/5582
|
||||
# is resolved.
|
||||
IMAGE_RS_COMMIT = a1d7ba31201d9d7a575d05c5fed1f2cb2142a842
|
||||
ifeq ($(ARCH),s390x)
|
||||
$(shell sed -i -e "s/^\(image-rs.*\)tag\(.*\)/\1rev\2/" -e "s/^\(image-rs.*rev = \"\).*\(\".*\)/\1$(IMAGE_RS_COMMIT)\2/" Cargo.toml)
|
||||
endif
|
||||
|
||||
# Display name of command and it's version (or a message if not available).
|
||||
#
|
||||
# Arguments:
|
||||
@@ -107,8 +115,6 @@ endef
|
||||
##TARGET default: build code
|
||||
default: $(TARGET) show-header
|
||||
|
||||
static-checks-build: $(GENERATED_CODE)
|
||||
|
||||
$(TARGET): $(GENERATED_CODE) $(TARGET_PATH)
|
||||
|
||||
$(TARGET_PATH): show-summary
|
||||
|
||||
@@ -11,7 +11,6 @@ serde_json = "1.0.39"
|
||||
serde_derive = "1.0.91"
|
||||
oci = { path = "../../libs/oci" }
|
||||
protocols = { path ="../../libs/protocols" }
|
||||
kata-sys-util = { path = "../../libs/kata-sys-util" }
|
||||
caps = "0.5.0"
|
||||
nix = "0.24.2"
|
||||
scopeguard = "1.0.0"
|
||||
@@ -25,7 +24,7 @@ scan_fmt = "0.2.6"
|
||||
regex = "1.5.6"
|
||||
path-absolutize = "1.2.0"
|
||||
anyhow = "1.0.32"
|
||||
cgroups = { package = "cgroups-rs", version = "0.3.2" }
|
||||
cgroups = { package = "cgroups-rs", version = "0.2.10" }
|
||||
rlimit = "0.5.3"
|
||||
cfg-if = "0.1.0"
|
||||
|
||||
@@ -34,9 +33,6 @@ futures = "0.3.17"
|
||||
async-trait = "0.1.31"
|
||||
inotify = "0.9.2"
|
||||
libseccomp = { version = "0.3.0", optional = true }
|
||||
zbus = "2.3.0"
|
||||
bit-vec= "0.6.3"
|
||||
xattr = "0.2.3"
|
||||
|
||||
[dev-dependencies]
|
||||
serial_test = "0.5.0"
|
||||
|
||||
@@ -32,7 +32,6 @@ use protocols::agent::{
|
||||
BlkioStats, BlkioStatsEntry, CgroupStats, CpuStats, CpuUsage, HugetlbStats, MemoryData,
|
||||
MemoryStats, PidsStats, ThrottlingData,
|
||||
};
|
||||
use std::any::Any;
|
||||
use std::collections::HashMap;
|
||||
use std::fs;
|
||||
use std::path::Path;
|
||||
@@ -76,7 +75,7 @@ macro_rules! set_resource {
|
||||
|
||||
impl CgroupManager for Manager {
|
||||
fn apply(&self, pid: pid_t) -> Result<()> {
|
||||
self.cgroup.add_task_by_tgid(CgroupPid::from(pid as u64))?;
|
||||
self.cgroup.add_task(CgroupPid::from(pid as u64))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -194,83 +193,6 @@ impl CgroupManager for Manager {
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
fn update_cpuset_path(&self, guest_cpuset: &str, container_cpuset: &str) -> Result<()> {
|
||||
if guest_cpuset.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
info!(sl!(), "update_cpuset_path to: {}", guest_cpuset);
|
||||
|
||||
let h = cgroups::hierarchies::auto();
|
||||
let root_cg = h.root_control_group();
|
||||
|
||||
let root_cpuset_controller: &CpuSetController = root_cg.controller_of().unwrap();
|
||||
let path = root_cpuset_controller.path();
|
||||
let root_path = Path::new(path);
|
||||
info!(sl!(), "root cpuset path: {:?}", &path);
|
||||
|
||||
let container_cpuset_controller: &CpuSetController = self.cgroup.controller_of().unwrap();
|
||||
let path = container_cpuset_controller.path();
|
||||
let container_path = Path::new(path);
|
||||
info!(sl!(), "container cpuset path: {:?}", &path);
|
||||
|
||||
let mut paths = vec![];
|
||||
for ancestor in container_path.ancestors() {
|
||||
if ancestor == root_path {
|
||||
break;
|
||||
}
|
||||
paths.push(ancestor);
|
||||
}
|
||||
info!(sl!(), "parent paths to update cpuset: {:?}", &paths);
|
||||
|
||||
let mut i = paths.len();
|
||||
loop {
|
||||
if i == 0 {
|
||||
break;
|
||||
}
|
||||
i -= 1;
|
||||
|
||||
// remove cgroup root from path
|
||||
let r_path = &paths[i]
|
||||
.to_str()
|
||||
.unwrap()
|
||||
.trim_start_matches(root_path.to_str().unwrap());
|
||||
info!(sl!(), "updating cpuset for parent path {:?}", &r_path);
|
||||
let cg = new_cgroup(cgroups::hierarchies::auto(), r_path)?;
|
||||
let cpuset_controller: &CpuSetController = cg.controller_of().unwrap();
|
||||
cpuset_controller.set_cpus(guest_cpuset)?;
|
||||
}
|
||||
|
||||
if !container_cpuset.is_empty() {
|
||||
info!(
|
||||
sl!(),
|
||||
"updating cpuset for container path: {:?} cpuset: {}",
|
||||
&container_path,
|
||||
container_cpuset
|
||||
);
|
||||
container_cpuset_controller.set_cpus(container_cpuset)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_cgroup_path(&self, cg: &str) -> Result<String> {
|
||||
if cgroups::hierarchies::is_cgroup2_unified_mode() {
|
||||
let cg_path = format!("/sys/fs/cgroup/{}", self.cpath);
|
||||
return Ok(cg_path);
|
||||
}
|
||||
|
||||
// for cgroup v1
|
||||
Ok(self.paths.get(cg).map(|s| s.to_string()).unwrap())
|
||||
}
|
||||
|
||||
fn as_any(&self) -> Result<&dyn Any> {
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
fn name(&self) -> &str {
|
||||
"cgroupfs"
|
||||
}
|
||||
}
|
||||
|
||||
fn set_network_resources(
|
||||
@@ -545,8 +467,11 @@ fn linux_device_to_cgroup_device(d: &LinuxDevice) -> Option<DeviceResource> {
|
||||
}
|
||||
|
||||
fn linux_device_group_to_cgroup_device(d: &LinuxDeviceCgroup) -> Option<DeviceResource> {
|
||||
let dev_type = match DeviceType::from_char(d.r#type.chars().next()) {
|
||||
Some(t) => t,
|
||||
let dev_type = match &d.r#type {
|
||||
Some(t_s) => match DeviceType::from_char(t_s.chars().next()) {
|
||||
Some(t_c) => t_c,
|
||||
None => return None,
|
||||
},
|
||||
None => return None,
|
||||
};
|
||||
|
||||
@@ -603,7 +528,7 @@ lazy_static! {
|
||||
// all mknod to all char devices
|
||||
LinuxDeviceCgroup {
|
||||
allow: true,
|
||||
r#type: "c".to_string(),
|
||||
r#type: Some("c".to_string()),
|
||||
major: Some(WILDCARD),
|
||||
minor: Some(WILDCARD),
|
||||
access: "m".to_string(),
|
||||
@@ -612,7 +537,7 @@ lazy_static! {
|
||||
// all mknod to all block devices
|
||||
LinuxDeviceCgroup {
|
||||
allow: true,
|
||||
r#type: "b".to_string(),
|
||||
r#type: Some("b".to_string()),
|
||||
major: Some(WILDCARD),
|
||||
minor: Some(WILDCARD),
|
||||
access: "m".to_string(),
|
||||
@@ -621,7 +546,7 @@ lazy_static! {
|
||||
// all read/write/mknod to char device /dev/console
|
||||
LinuxDeviceCgroup {
|
||||
allow: true,
|
||||
r#type: "c".to_string(),
|
||||
r#type: Some("c".to_string()),
|
||||
major: Some(5),
|
||||
minor: Some(1),
|
||||
access: "rwm".to_string(),
|
||||
@@ -630,7 +555,7 @@ lazy_static! {
|
||||
// all read/write/mknod to char device /dev/pts/<N>
|
||||
LinuxDeviceCgroup {
|
||||
allow: true,
|
||||
r#type: "c".to_string(),
|
||||
r#type: Some("c".to_string()),
|
||||
major: Some(136),
|
||||
minor: Some(WILDCARD),
|
||||
access: "rwm".to_string(),
|
||||
@@ -639,7 +564,7 @@ lazy_static! {
|
||||
// all read/write/mknod to char device /dev/ptmx
|
||||
LinuxDeviceCgroup {
|
||||
allow: true,
|
||||
r#type: "c".to_string(),
|
||||
r#type: Some("c".to_string()),
|
||||
major: Some(5),
|
||||
minor: Some(2),
|
||||
access: "rwm".to_string(),
|
||||
@@ -648,7 +573,7 @@ lazy_static! {
|
||||
// all read/write/mknod to char device /dev/net/tun
|
||||
LinuxDeviceCgroup {
|
||||
allow: true,
|
||||
r#type: "c".to_string(),
|
||||
r#type: Some("c".to_string()),
|
||||
major: Some(10),
|
||||
minor: Some(200),
|
||||
access: "rwm".to_string(),
|
||||
@@ -695,6 +620,17 @@ fn get_cpuacct_stats(cg: &cgroups::Cgroup) -> SingularPtrField<CpuUsage> {
|
||||
});
|
||||
}
|
||||
|
||||
if cg.v2() {
|
||||
return SingularPtrField::some(CpuUsage {
|
||||
total_usage: 0,
|
||||
percpu_usage: vec![],
|
||||
usage_in_kernelmode: 0,
|
||||
usage_in_usermode: 0,
|
||||
unknown_fields: UnknownFields::default(),
|
||||
cached_size: CachedSize::default(),
|
||||
});
|
||||
}
|
||||
|
||||
// try to get from cpu controller
|
||||
let cpu_controller: &CpuController = get_controller_or_return_singular_none!(cg);
|
||||
let stat = cpu_controller.cpu().stat;
|
||||
@@ -725,7 +661,7 @@ fn get_memory_stats(cg: &cgroups::Cgroup) -> SingularPtrField<MemoryStats> {
|
||||
let value = memory.use_hierarchy;
|
||||
let use_hierarchy = value == 1;
|
||||
|
||||
// get memory data
|
||||
// gte memory datas
|
||||
let usage = SingularPtrField::some(MemoryData {
|
||||
usage: memory.usage_in_bytes,
|
||||
max_usage: memory.max_usage_in_bytes,
|
||||
@@ -1016,9 +952,9 @@ pub fn get_mounts(paths: &HashMap<String, String>) -> Result<HashMap<String, Str
|
||||
Ok(m)
|
||||
}
|
||||
|
||||
fn new_cgroup(h: Box<dyn cgroups::Hierarchy>, path: &str) -> Result<Cgroup> {
|
||||
fn new_cgroup(h: Box<dyn cgroups::Hierarchy>, path: &str) -> Cgroup {
|
||||
let valid_path = path.trim_start_matches('/').to_string();
|
||||
cgroups::Cgroup::new(h, valid_path.as_str()).map_err(anyhow::Error::from)
|
||||
cgroups::Cgroup::new(h, valid_path.as_str())
|
||||
}
|
||||
|
||||
impl Manager {
|
||||
@@ -1040,16 +976,83 @@ impl Manager {
|
||||
m.insert(key.to_string(), p);
|
||||
}
|
||||
|
||||
let cg = new_cgroup(cgroups::hierarchies::auto(), cpath)?;
|
||||
|
||||
Ok(Self {
|
||||
paths: m,
|
||||
mounts,
|
||||
// rels: paths,
|
||||
cpath: cpath.to_string(),
|
||||
cgroup: cg,
|
||||
cgroup: new_cgroup(cgroups::hierarchies::auto(), cpath),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn update_cpuset_path(&self, guest_cpuset: &str, container_cpuset: &str) -> Result<()> {
|
||||
if guest_cpuset.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
info!(sl!(), "update_cpuset_path to: {}", guest_cpuset);
|
||||
|
||||
let h = cgroups::hierarchies::auto();
|
||||
let root_cg = h.root_control_group();
|
||||
|
||||
let root_cpuset_controller: &CpuSetController = root_cg.controller_of().unwrap();
|
||||
let path = root_cpuset_controller.path();
|
||||
let root_path = Path::new(path);
|
||||
info!(sl!(), "root cpuset path: {:?}", &path);
|
||||
|
||||
let container_cpuset_controller: &CpuSetController = self.cgroup.controller_of().unwrap();
|
||||
let path = container_cpuset_controller.path();
|
||||
let container_path = Path::new(path);
|
||||
info!(sl!(), "container cpuset path: {:?}", &path);
|
||||
|
||||
let mut paths = vec![];
|
||||
for ancestor in container_path.ancestors() {
|
||||
if ancestor == root_path {
|
||||
break;
|
||||
}
|
||||
paths.push(ancestor);
|
||||
}
|
||||
info!(sl!(), "parent paths to update cpuset: {:?}", &paths);
|
||||
|
||||
let mut i = paths.len();
|
||||
loop {
|
||||
if i == 0 {
|
||||
break;
|
||||
}
|
||||
i -= 1;
|
||||
|
||||
// remove cgroup root from path
|
||||
let r_path = &paths[i]
|
||||
.to_str()
|
||||
.unwrap()
|
||||
.trim_start_matches(root_path.to_str().unwrap());
|
||||
info!(sl!(), "updating cpuset for parent path {:?}", &r_path);
|
||||
let cg = new_cgroup(cgroups::hierarchies::auto(), r_path);
|
||||
let cpuset_controller: &CpuSetController = cg.controller_of().unwrap();
|
||||
cpuset_controller.set_cpus(guest_cpuset)?;
|
||||
}
|
||||
|
||||
if !container_cpuset.is_empty() {
|
||||
info!(
|
||||
sl!(),
|
||||
"updating cpuset for container path: {:?} cpuset: {}",
|
||||
&container_path,
|
||||
container_cpuset
|
||||
);
|
||||
container_cpuset_controller.set_cpus(container_cpuset)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn get_cg_path(&self, cg: &str) -> Option<String> {
|
||||
if cgroups::hierarchies::is_cgroup2_unified_mode() {
|
||||
let cg_path = format!("/sys/fs/cgroup/{}", self.cpath);
|
||||
return Some(cg_path);
|
||||
}
|
||||
|
||||
// for cgroup v1
|
||||
self.paths.get(cg).map(|s| s.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
// get the guest's online cpus.
|
||||
|
||||
@@ -11,7 +11,6 @@ use anyhow::Result;
|
||||
use cgroups::freezer::FreezerState;
|
||||
use libc::{self, pid_t};
|
||||
use oci::LinuxResources;
|
||||
use std::any::Any;
|
||||
use std::collections::HashMap;
|
||||
use std::string::String;
|
||||
|
||||
@@ -54,22 +53,6 @@ impl CgroupManager for Manager {
|
||||
fn get_pids(&self) -> Result<Vec<pid_t>> {
|
||||
Ok(Vec::new())
|
||||
}
|
||||
|
||||
fn update_cpuset_path(&self, _: &str, _: &str) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_cgroup_path(&self, _: &str) -> Result<String> {
|
||||
Ok("".to_string())
|
||||
}
|
||||
|
||||
fn as_any(&self) -> Result<&dyn Any> {
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
fn name(&self) -> &str {
|
||||
"mock"
|
||||
}
|
||||
}
|
||||
|
||||
impl Manager {
|
||||
@@ -80,4 +63,12 @@ impl Manager {
|
||||
cpath: cpath.to_string(),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn update_cpuset_path(&self, _: &str, _: &str) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn get_cg_path(&self, _: &str) -> Option<String> {
|
||||
Some("".to_string())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,10 +4,8 @@
|
||||
//
|
||||
|
||||
use anyhow::{anyhow, Result};
|
||||
use core::fmt::Debug;
|
||||
use oci::LinuxResources;
|
||||
use protocols::agent::CgroupStats;
|
||||
use std::any::Any;
|
||||
|
||||
use cgroups::freezer::FreezerState;
|
||||
|
||||
@@ -40,24 +38,4 @@ pub trait Manager {
|
||||
fn set(&self, _container: &LinuxResources, _update: bool) -> Result<()> {
|
||||
Err(anyhow!("not supported!"))
|
||||
}
|
||||
|
||||
fn update_cpuset_path(&self, _: &str, _: &str) -> Result<()> {
|
||||
Err(anyhow!("not supported!"))
|
||||
}
|
||||
|
||||
fn get_cgroup_path(&self, _: &str) -> Result<String> {
|
||||
Err(anyhow!("not supported!"))
|
||||
}
|
||||
|
||||
fn as_any(&self) -> Result<&dyn Any> {
|
||||
Err(anyhow!("not supported!"))
|
||||
}
|
||||
|
||||
fn name(&self) -> &str;
|
||||
}
|
||||
|
||||
impl Debug for dyn Manager + Send + Sync {
|
||||
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
|
||||
write!(f, "{}", self.name())
|
||||
}
|
||||
}
|
||||
|
||||
10
src/agent/rustjail/src/cgroups/systemd.rs
Normal file
10
src/agent/rustjail/src/cgroups/systemd.rs
Normal file
@@ -0,0 +1,10 @@
|
||||
// Copyright (c) 2019 Ant Financial
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use crate::cgroups::Manager as CgroupManager;
|
||||
|
||||
pub struct Manager {}
|
||||
|
||||
impl CgroupManager for Manager {}
|
||||
@@ -1,95 +0,0 @@
|
||||
// Copyright 2021-2022 Kata Contributors
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use anyhow::{anyhow, Result};
|
||||
|
||||
use super::common::{DEFAULT_SLICE, SCOPE_SUFFIX, SLICE_SUFFIX};
|
||||
use std::string::String;
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub struct CgroupsPath {
|
||||
pub slice: String,
|
||||
pub prefix: String,
|
||||
pub name: String,
|
||||
}
|
||||
|
||||
impl CgroupsPath {
|
||||
pub fn new(cgroups_path_str: &str) -> Result<Self> {
|
||||
let path_vec: Vec<&str> = cgroups_path_str.split(':').collect();
|
||||
if path_vec.len() != 3 {
|
||||
return Err(anyhow!("invalid cpath: {:?}", cgroups_path_str));
|
||||
}
|
||||
|
||||
Ok(CgroupsPath {
|
||||
slice: if path_vec[0].is_empty() {
|
||||
DEFAULT_SLICE.to_string()
|
||||
} else {
|
||||
path_vec[0].to_owned()
|
||||
},
|
||||
prefix: path_vec[1].to_owned(),
|
||||
name: path_vec[2].to_owned(),
|
||||
})
|
||||
}
|
||||
|
||||
// ref: https://github.com/opencontainers/runc/blob/main/docs/systemd.md
|
||||
// return: (parent_slice, unit_name)
|
||||
pub fn parse(&self) -> Result<(String, String)> {
|
||||
Ok((
|
||||
parse_parent(self.slice.to_owned())?,
|
||||
get_unit_name(self.prefix.to_owned(), self.name.to_owned()),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_parent(slice: String) -> Result<String> {
|
||||
if !slice.ends_with(SLICE_SUFFIX) || slice.contains('/') {
|
||||
return Err(anyhow!("invalid slice name: {}", slice));
|
||||
} else if slice == "-.slice" {
|
||||
return Ok(String::new());
|
||||
}
|
||||
|
||||
let mut slice_path = String::new();
|
||||
let mut prefix = String::new();
|
||||
for subslice in slice.trim_end_matches(SLICE_SUFFIX).split('-') {
|
||||
if subslice.is_empty() {
|
||||
return Err(anyhow!("invalid slice name: {}", slice));
|
||||
}
|
||||
slice_path = format!("{}/{}{}{}", slice_path, prefix, subslice, SLICE_SUFFIX);
|
||||
prefix = format!("{}{}-", prefix, subslice);
|
||||
}
|
||||
slice_path.remove(0);
|
||||
Ok(slice_path)
|
||||
}
|
||||
|
||||
fn get_unit_name(prefix: String, name: String) -> String {
|
||||
if name.ends_with(SLICE_SUFFIX) {
|
||||
name
|
||||
} else if prefix.is_empty() {
|
||||
format!("{}{}", name, SCOPE_SUFFIX)
|
||||
} else {
|
||||
format!("{}-{}{}", prefix, name, SCOPE_SUFFIX)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::CgroupsPath;
|
||||
|
||||
#[test]
|
||||
fn test_cgroup_path_parse() {
|
||||
let slice = "system.slice";
|
||||
let prefix = "kata_agent";
|
||||
let name = "123";
|
||||
let cgroups_path =
|
||||
CgroupsPath::new(format!("{}:{}:{}", slice, prefix, name).as_str()).unwrap();
|
||||
assert_eq!(slice, cgroups_path.slice.as_str());
|
||||
assert_eq!(prefix, cgroups_path.prefix.as_str());
|
||||
assert_eq!(name, cgroups_path.name.as_str());
|
||||
|
||||
let (parent_slice, unit_name) = cgroups_path.parse().unwrap();
|
||||
assert_eq!(format!("{}", slice), parent_slice);
|
||||
assert_eq!(format!("{}-{}.scope", prefix, name), unit_name);
|
||||
}
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
// Copyright 2021-2022 Kata Contributors
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
pub const DEFAULT_SLICE: &str = "system.slice";
|
||||
pub const SLICE_SUFFIX: &str = ".slice";
|
||||
pub const SCOPE_SUFFIX: &str = ".scope";
|
||||
pub const UNIT_MODE: &str = "replace";
|
||||
|
||||
pub type Properties<'a> = Vec<(&'a str, zbus::zvariant::Value<'a>)>;
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub enum CgroupHierarchy {
|
||||
Legacy,
|
||||
Unified,
|
||||
}
|
||||
@@ -1,126 +0,0 @@
|
||||
// Copyright 2021-2022 Kata Contributors
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use std::vec;
|
||||
|
||||
use super::common::CgroupHierarchy;
|
||||
use super::common::{Properties, SLICE_SUFFIX, UNIT_MODE};
|
||||
use super::interface::system::ManagerProxyBlocking as SystemManager;
|
||||
use anyhow::{Context, Result};
|
||||
use zbus::zvariant::Value;
|
||||
|
||||
pub trait SystemdInterface {
|
||||
fn start_unit(
|
||||
&self,
|
||||
pid: i32,
|
||||
parent: &str,
|
||||
unit_name: &str,
|
||||
cg_hierarchy: &CgroupHierarchy,
|
||||
) -> Result<()>;
|
||||
|
||||
fn set_properties(&self, unit_name: &str, properties: &Properties) -> Result<()>;
|
||||
|
||||
fn stop_unit(&self, unit_name: &str) -> Result<()>;
|
||||
|
||||
fn get_version(&self) -> Result<String>;
|
||||
|
||||
fn unit_exist(&self, unit_name: &str) -> Result<bool>;
|
||||
|
||||
fn add_process(&self, pid: i32, unit_name: &str) -> Result<()>;
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub struct DBusClient {}
|
||||
|
||||
impl DBusClient {
|
||||
fn build_proxy(&self) -> Result<SystemManager<'static>> {
|
||||
let connection = zbus::blocking::Connection::system()?;
|
||||
let proxy = SystemManager::new(&connection)?;
|
||||
Ok(proxy)
|
||||
}
|
||||
}
|
||||
|
||||
impl SystemdInterface for DBusClient {
|
||||
fn start_unit(
|
||||
&self,
|
||||
pid: i32,
|
||||
parent: &str,
|
||||
unit_name: &str,
|
||||
cg_hierarchy: &CgroupHierarchy,
|
||||
) -> Result<()> {
|
||||
let proxy = self.build_proxy()?;
|
||||
|
||||
// enable CPUAccounting & MemoryAccounting & (Block)IOAccounting by default
|
||||
let mut properties: Properties = vec![
|
||||
("CPUAccounting", Value::Bool(true)),
|
||||
("DefaultDependencies", Value::Bool(false)),
|
||||
("MemoryAccounting", Value::Bool(true)),
|
||||
("TasksAccounting", Value::Bool(true)),
|
||||
("Description", Value::Str("kata-agent container".into())),
|
||||
("PIDs", Value::Array(vec![pid as u32].into())),
|
||||
];
|
||||
|
||||
match *cg_hierarchy {
|
||||
CgroupHierarchy::Legacy => properties.push(("IOAccounting", Value::Bool(true))),
|
||||
CgroupHierarchy::Unified => properties.push(("BlockIOAccounting", Value::Bool(true))),
|
||||
}
|
||||
|
||||
if unit_name.ends_with(SLICE_SUFFIX) {
|
||||
properties.push(("Wants", Value::Str(parent.into())));
|
||||
} else {
|
||||
properties.push(("Slice", Value::Str(parent.into())));
|
||||
properties.push(("Delegate", Value::Bool(true)));
|
||||
}
|
||||
|
||||
proxy
|
||||
.start_transient_unit(unit_name, UNIT_MODE, &properties, &[])
|
||||
.with_context(|| format!("failed to start transient unit {}", unit_name))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn set_properties(&self, unit_name: &str, properties: &Properties) -> Result<()> {
|
||||
let proxy = self.build_proxy()?;
|
||||
|
||||
proxy
|
||||
.set_unit_properties(unit_name, true, properties)
|
||||
.with_context(|| format!("failed to set unit properties {}", unit_name))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn stop_unit(&self, unit_name: &str) -> Result<()> {
|
||||
let proxy = self.build_proxy()?;
|
||||
|
||||
proxy
|
||||
.stop_unit(unit_name, UNIT_MODE)
|
||||
.with_context(|| format!("failed to stop unit {}", unit_name))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_version(&self) -> Result<String> {
|
||||
let proxy = self.build_proxy()?;
|
||||
|
||||
let systemd_version = proxy
|
||||
.version()
|
||||
.with_context(|| "failed to get systemd version".to_string())?;
|
||||
Ok(systemd_version)
|
||||
}
|
||||
|
||||
fn unit_exist(&self, unit_name: &str) -> Result<bool> {
|
||||
let proxy = self.build_proxy()?;
|
||||
|
||||
Ok(proxy.get_unit(unit_name).is_ok())
|
||||
}
|
||||
|
||||
fn add_process(&self, pid: i32, unit_name: &str) -> Result<()> {
|
||||
let proxy = self.build_proxy()?;
|
||||
|
||||
proxy
|
||||
.attach_processes_to_unit(unit_name, "/", &[pid as u32])
|
||||
.with_context(|| format!("failed to add process {}", unit_name))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
// Copyright 2021-2022 Kata Contributors
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
pub(crate) mod session;
|
||||
pub(crate) mod system;
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,133 +0,0 @@
|
||||
// Copyright 2021-2022 Kata Contributors
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use crate::cgroups::Manager as CgroupManager;
|
||||
use crate::protocols::agent::CgroupStats;
|
||||
use anyhow::Result;
|
||||
use cgroups::freezer::FreezerState;
|
||||
use libc::{self, pid_t};
|
||||
use oci::LinuxResources;
|
||||
use std::any::Any;
|
||||
use std::collections::HashMap;
|
||||
use std::convert::TryInto;
|
||||
use std::string::String;
|
||||
use std::vec;
|
||||
|
||||
use super::super::fs::Manager as FsManager;
|
||||
|
||||
use super::cgroups_path::CgroupsPath;
|
||||
use super::common::{CgroupHierarchy, Properties};
|
||||
use super::dbus_client::{DBusClient, SystemdInterface};
|
||||
use super::subsystem::transformer::Transformer;
|
||||
use super::subsystem::{cpu::Cpu, cpuset::CpuSet, memory::Memory, pids::Pids};
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub struct Manager {
|
||||
pub paths: HashMap<String, String>,
|
||||
pub mounts: HashMap<String, String>,
|
||||
pub cgroups_path: CgroupsPath,
|
||||
pub cpath: String,
|
||||
pub unit_name: String,
|
||||
// dbus client for set properties
|
||||
dbus_client: DBusClient,
|
||||
// fs manager for get properties
|
||||
fs_manager: FsManager,
|
||||
// cgroup version for different dbus properties
|
||||
cg_hierarchy: CgroupHierarchy,
|
||||
}
|
||||
|
||||
impl CgroupManager for Manager {
|
||||
fn apply(&self, pid: pid_t) -> Result<()> {
|
||||
let unit_name = self.unit_name.as_str();
|
||||
if self.dbus_client.unit_exist(unit_name).unwrap() {
|
||||
self.dbus_client.add_process(pid, self.unit_name.as_str())?;
|
||||
} else {
|
||||
self.dbus_client.start_unit(
|
||||
(pid as u32).try_into().unwrap(),
|
||||
self.cgroups_path.slice.as_str(),
|
||||
self.unit_name.as_str(),
|
||||
&self.cg_hierarchy,
|
||||
)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn set(&self, r: &LinuxResources, _: bool) -> Result<()> {
|
||||
let mut properties: Properties = vec![];
|
||||
|
||||
let systemd_version = self.dbus_client.get_version()?;
|
||||
let systemd_version_str = systemd_version.as_str();
|
||||
|
||||
Cpu::apply(r, &mut properties, &self.cg_hierarchy, systemd_version_str)?;
|
||||
Memory::apply(r, &mut properties, &self.cg_hierarchy, systemd_version_str)?;
|
||||
Pids::apply(r, &mut properties, &self.cg_hierarchy, systemd_version_str)?;
|
||||
CpuSet::apply(r, &mut properties, &self.cg_hierarchy, systemd_version_str)?;
|
||||
|
||||
self.dbus_client
|
||||
.set_properties(self.unit_name.as_str(), &properties)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_stats(&self) -> Result<CgroupStats> {
|
||||
self.fs_manager.get_stats()
|
||||
}
|
||||
|
||||
fn freeze(&self, state: FreezerState) -> Result<()> {
|
||||
self.fs_manager.freeze(state)
|
||||
}
|
||||
|
||||
fn destroy(&mut self) -> Result<()> {
|
||||
self.dbus_client.stop_unit(self.unit_name.as_str())?;
|
||||
self.fs_manager.destroy()
|
||||
}
|
||||
|
||||
fn get_pids(&self) -> Result<Vec<pid_t>> {
|
||||
self.fs_manager.get_pids()
|
||||
}
|
||||
|
||||
fn update_cpuset_path(&self, guest_cpuset: &str, container_cpuset: &str) -> Result<()> {
|
||||
self.fs_manager
|
||||
.update_cpuset_path(guest_cpuset, container_cpuset)
|
||||
}
|
||||
|
||||
fn get_cgroup_path(&self, cg: &str) -> Result<String> {
|
||||
self.fs_manager.get_cgroup_path(cg)
|
||||
}
|
||||
|
||||
fn as_any(&self) -> Result<&dyn Any> {
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
fn name(&self) -> &str {
|
||||
"systemd"
|
||||
}
|
||||
}
|
||||
|
||||
impl Manager {
|
||||
pub fn new(cgroups_path_str: &str) -> Result<Self> {
|
||||
let cgroups_path = CgroupsPath::new(cgroups_path_str)?;
|
||||
let (parent_slice, unit_name) = cgroups_path.parse()?;
|
||||
let cpath = parent_slice + "/" + &unit_name;
|
||||
|
||||
let fs_manager = FsManager::new(cpath.as_str())?;
|
||||
|
||||
Ok(Manager {
|
||||
paths: fs_manager.paths.clone(),
|
||||
mounts: fs_manager.mounts.clone(),
|
||||
cgroups_path,
|
||||
cpath,
|
||||
unit_name,
|
||||
dbus_client: DBusClient {},
|
||||
fs_manager,
|
||||
cg_hierarchy: if cgroups::hierarchies::is_cgroup2_unified_mode() {
|
||||
CgroupHierarchy::Unified
|
||||
} else {
|
||||
CgroupHierarchy::Legacy
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,12 +0,0 @@
|
||||
// Copyright 2021-2022 Kata Contributors
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
pub mod manager;
|
||||
|
||||
mod cgroups_path;
|
||||
mod common;
|
||||
mod dbus_client;
|
||||
mod interface;
|
||||
mod subsystem;
|
||||
@@ -1,139 +0,0 @@
|
||||
// Copyright 2021-2022 Kata Contributors
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use super::super::common::{CgroupHierarchy, Properties};
|
||||
use super::transformer::Transformer;
|
||||
|
||||
use anyhow::Result;
|
||||
use oci::{LinuxCpu, LinuxResources};
|
||||
use zbus::zvariant::Value;
|
||||
|
||||
const BASIC_SYSTEMD_VERSION: &str = "242";
|
||||
const DEFAULT_CPUQUOTAPERIOD: u64 = 100 * 1000;
|
||||
const SEC2MICROSEC: u64 = 1000 * 1000;
|
||||
const BASIC_INTERVAL: u64 = 10 * 1000;
|
||||
|
||||
pub struct Cpu {}
|
||||
|
||||
impl Transformer for Cpu {
|
||||
fn apply(
|
||||
r: &LinuxResources,
|
||||
properties: &mut Properties,
|
||||
cgroup_hierarchy: &CgroupHierarchy,
|
||||
systemd_version: &str,
|
||||
) -> Result<()> {
|
||||
if let Some(cpu_resources) = &r.cpu {
|
||||
match cgroup_hierarchy {
|
||||
CgroupHierarchy::Legacy => {
|
||||
Self::legacy_apply(cpu_resources, properties, systemd_version)?
|
||||
}
|
||||
CgroupHierarchy::Unified => {
|
||||
Self::unified_apply(cpu_resources, properties, systemd_version)?
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Cpu {
|
||||
// v1:
|
||||
// cpu.shares <-> CPUShares
|
||||
// cpu.period <-> CPUQuotaPeriodUSec
|
||||
// cpu.period & cpu.quota <-> CPUQuotaPerSecUSec
|
||||
fn legacy_apply(
|
||||
cpu_resources: &LinuxCpu,
|
||||
properties: &mut Properties,
|
||||
systemd_version: &str,
|
||||
) -> Result<()> {
|
||||
if let Some(shares) = cpu_resources.shares {
|
||||
properties.push(("CPUShares", Value::U64(shares)));
|
||||
}
|
||||
|
||||
if let Some(period) = cpu_resources.period {
|
||||
if period != 0 && systemd_version >= BASIC_SYSTEMD_VERSION {
|
||||
properties.push(("CPUQuotaPeriodUSec", Value::U64(period)));
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(quota) = cpu_resources.quota {
|
||||
let period = cpu_resources.period.unwrap_or(DEFAULT_CPUQUOTAPERIOD);
|
||||
if period != 0 {
|
||||
let cpu_quota_per_sec_usec = resolve_cpuquota(quota, period);
|
||||
properties.push(("CPUQuotaPerSecUSec", Value::U64(cpu_quota_per_sec_usec)));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// v2:
|
||||
// cpu.shares <-> CPUShares
|
||||
// cpu.period <-> CPUQuotaPeriodUSec
|
||||
// cpu.period & cpu.quota <-> CPUQuotaPerSecUSec
|
||||
fn unified_apply(
|
||||
cpu_resources: &LinuxCpu,
|
||||
properties: &mut Properties,
|
||||
systemd_version: &str,
|
||||
) -> Result<()> {
|
||||
if let Some(shares) = cpu_resources.shares {
|
||||
let unified_shares = get_unified_cpushares(shares);
|
||||
properties.push(("CPUShares", Value::U64(unified_shares)));
|
||||
}
|
||||
|
||||
if let Some(period) = cpu_resources.period {
|
||||
if period != 0 && systemd_version >= BASIC_SYSTEMD_VERSION {
|
||||
properties.push(("CPUQuotaPeriodUSec", Value::U64(period)));
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(quota) = cpu_resources.quota {
|
||||
let period = cpu_resources.period.unwrap_or(DEFAULT_CPUQUOTAPERIOD);
|
||||
if period != 0 {
|
||||
let cpu_quota_per_sec_usec = resolve_cpuquota(quota, period);
|
||||
properties.push(("CPUQuotaPerSecUSec", Value::U64(cpu_quota_per_sec_usec)));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
// ref: https://github.com/containers/crun/blob/main/crun.1.md#cgroup-v2
|
||||
// [2-262144] to [1-10000]
|
||||
fn get_unified_cpushares(shares: u64) -> u64 {
|
||||
if shares == 0 {
|
||||
return 100;
|
||||
}
|
||||
|
||||
1 + ((shares - 2) * 9999) / 262142
|
||||
}
|
||||
|
||||
fn resolve_cpuquota(quota: i64, period: u64) -> u64 {
|
||||
let mut cpu_quota_per_sec_usec = u64::MAX;
|
||||
if quota > 0 {
|
||||
cpu_quota_per_sec_usec = (quota as u64) * SEC2MICROSEC / period;
|
||||
if cpu_quota_per_sec_usec % BASIC_INTERVAL != 0 {
|
||||
cpu_quota_per_sec_usec =
|
||||
((cpu_quota_per_sec_usec / BASIC_INTERVAL) + 1) * BASIC_INTERVAL;
|
||||
}
|
||||
}
|
||||
cpu_quota_per_sec_usec
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::cgroups::systemd::subsystem::cpu::resolve_cpuquota;
|
||||
|
||||
#[test]
|
||||
fn test_unified_cpuquota() {
|
||||
let quota: i64 = 1000000;
|
||||
let period: u64 = 500000;
|
||||
let cpu_quota_per_sec_usec = resolve_cpuquota(quota, period);
|
||||
|
||||
assert_eq!(2000000, cpu_quota_per_sec_usec);
|
||||
}
|
||||
}
|
||||
@@ -1,124 +0,0 @@
|
||||
// Copyright 2021-2022 Kata Contributors
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use super::super::common::{CgroupHierarchy, Properties};
|
||||
|
||||
use super::transformer::Transformer;
|
||||
|
||||
use anyhow::{bail, Result};
|
||||
use bit_vec::BitVec;
|
||||
use oci::{LinuxCpu, LinuxResources};
|
||||
use std::convert::{TryFrom, TryInto};
|
||||
use zbus::zvariant::Value;
|
||||
|
||||
const BASIC_SYSTEMD_VERSION: &str = "244";
|
||||
|
||||
pub struct CpuSet {}
|
||||
|
||||
impl Transformer for CpuSet {
|
||||
fn apply(
|
||||
r: &LinuxResources,
|
||||
properties: &mut Properties,
|
||||
_: &CgroupHierarchy,
|
||||
systemd_version: &str,
|
||||
) -> Result<()> {
|
||||
if let Some(cpuset_resources) = &r.cpu {
|
||||
Self::apply(cpuset_resources, properties, systemd_version)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
// v1 & v2:
|
||||
// cpuset.cpus <-> AllowedCPUs (v244)
|
||||
// cpuset.mems <-> AllowedMemoryNodes (v244)
|
||||
impl CpuSet {
|
||||
fn apply(
|
||||
cpuset_resources: &LinuxCpu,
|
||||
properties: &mut Properties,
|
||||
systemd_version: &str,
|
||||
) -> Result<()> {
|
||||
if systemd_version < BASIC_SYSTEMD_VERSION {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let cpus = cpuset_resources.cpus.as_str();
|
||||
if !cpus.is_empty() {
|
||||
let cpus_vec: BitMask = cpus.try_into()?;
|
||||
properties.push(("AllowedCPUs", Value::Array(cpus_vec.0.into())));
|
||||
}
|
||||
|
||||
let mems = cpuset_resources.mems.as_str();
|
||||
if !mems.is_empty() {
|
||||
let mems_vec: BitMask = mems.try_into()?;
|
||||
properties.push(("AllowedMemoryNodes", Value::Array(mems_vec.0.into())));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct BitMask(Vec<u8>);
|
||||
|
||||
impl TryFrom<&str> for BitMask {
|
||||
type Error = anyhow::Error;
|
||||
|
||||
fn try_from(bitmask_str: &str) -> Result<Self, Self::Error> {
|
||||
let mut bitmask_vec = BitVec::from_elem(8, false);
|
||||
let bitmask_str_vec: Vec<&str> = bitmask_str.split(',').collect();
|
||||
for bitmask in bitmask_str_vec.iter() {
|
||||
let range: Vec<&str> = bitmask.split('-').collect();
|
||||
match range.len() {
|
||||
1 => {
|
||||
let idx: usize = range[0].parse()?;
|
||||
while idx >= bitmask_vec.len() {
|
||||
bitmask_vec.grow(8, false);
|
||||
}
|
||||
bitmask_vec.set(adjust_index(idx), true);
|
||||
}
|
||||
2 => {
|
||||
let left_index = range[0].parse()?;
|
||||
let right_index = range[1].parse()?;
|
||||
while right_index >= bitmask_vec.len() {
|
||||
bitmask_vec.grow(8, false);
|
||||
}
|
||||
for idx in left_index..=right_index {
|
||||
bitmask_vec.set(adjust_index(idx), true);
|
||||
}
|
||||
}
|
||||
_ => bail!("invalid bitmask str {}", bitmask_str),
|
||||
}
|
||||
}
|
||||
let mut result_vec = bitmask_vec.to_bytes();
|
||||
result_vec.reverse();
|
||||
|
||||
Ok(BitMask(result_vec))
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn adjust_index(idx: usize) -> usize {
|
||||
idx / 8 * 8 + 7 - idx % 8
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::convert::TryInto;
|
||||
|
||||
use crate::cgroups::systemd::subsystem::cpuset::BitMask;
|
||||
|
||||
#[test]
|
||||
fn test_bitmask_conversion() {
|
||||
let cpus_vec: BitMask = "2-4".try_into().unwrap();
|
||||
assert_eq!(vec![0b11100 as u8], cpus_vec.0);
|
||||
|
||||
let cpus_vec: BitMask = "1,7".try_into().unwrap();
|
||||
assert_eq!(vec![0b10000010 as u8], cpus_vec.0);
|
||||
|
||||
let cpus_vec: BitMask = "0,2-3,7".try_into().unwrap();
|
||||
assert_eq!(vec![0b10001101 as u8], cpus_vec.0);
|
||||
}
|
||||
}
|
||||
@@ -1,117 +0,0 @@
|
||||
// Copyright 2021-2022 Kata Contributors
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use super::super::common::{CgroupHierarchy, Properties};
|
||||
|
||||
use super::transformer::Transformer;
|
||||
|
||||
use anyhow::{bail, Result};
|
||||
use oci::{LinuxMemory, LinuxResources};
|
||||
use zbus::zvariant::Value;
|
||||
|
||||
pub struct Memory {}
|
||||
|
||||
impl Transformer for Memory {
|
||||
fn apply(
|
||||
r: &LinuxResources,
|
||||
properties: &mut Properties,
|
||||
cgroup_hierarchy: &CgroupHierarchy,
|
||||
_: &str,
|
||||
) -> Result<()> {
|
||||
if let Some(memory_resources) = &r.memory {
|
||||
match cgroup_hierarchy {
|
||||
CgroupHierarchy::Legacy => Self::legacy_apply(memory_resources, properties)?,
|
||||
CgroupHierarchy::Unified => Self::unified_apply(memory_resources, properties)?,
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Memory {
|
||||
// v1:
|
||||
// memory.limit <-> MemoryLimit
|
||||
fn legacy_apply(memory_resources: &LinuxMemory, properties: &mut Properties) -> Result<()> {
|
||||
if let Some(limit) = memory_resources.limit {
|
||||
let limit = match limit {
|
||||
1..=i64::MAX => limit as u64,
|
||||
0 => u64::MAX,
|
||||
_ => bail!("invalid memory.limit"),
|
||||
};
|
||||
properties.push(("MemoryLimit", Value::U64(limit)));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// v2:
|
||||
// memory.low <-> MemoryLow
|
||||
// memory.max <-> MemoryMax
|
||||
// memory.swap & memory.limit <-> MemorySwapMax
|
||||
fn unified_apply(memory_resources: &LinuxMemory, properties: &mut Properties) -> Result<()> {
|
||||
if let Some(limit) = memory_resources.limit {
|
||||
let limit = match limit {
|
||||
1..=i64::MAX => limit as u64,
|
||||
0 => u64::MAX,
|
||||
_ => bail!("invalid memory.limit: {}", limit),
|
||||
};
|
||||
properties.push(("MemoryMax", Value::U64(limit)));
|
||||
}
|
||||
|
||||
if let Some(reservation) = memory_resources.reservation {
|
||||
let reservation = match reservation {
|
||||
1..=i64::MAX => reservation as u64,
|
||||
0 => u64::MAX,
|
||||
_ => bail!("invalid memory.reservation: {}", reservation),
|
||||
};
|
||||
properties.push(("MemoryLow", Value::U64(reservation)));
|
||||
}
|
||||
|
||||
let swap = match memory_resources.swap {
|
||||
Some(0) => u64::MAX,
|
||||
Some(1..=i64::MAX) => match memory_resources.limit {
|
||||
Some(1..=i64::MAX) => {
|
||||
(memory_resources.limit.unwrap() - memory_resources.swap.unwrap()) as u64
|
||||
}
|
||||
_ => bail!("invalid memory.limit when memory.swap specified"),
|
||||
},
|
||||
None => u64::MAX,
|
||||
_ => bail!("invalid memory.swap"),
|
||||
};
|
||||
|
||||
properties.push(("MemorySwapMax", Value::U64(swap)));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::Memory;
|
||||
use super::Properties;
|
||||
use super::Value;
|
||||
|
||||
#[test]
|
||||
fn test_unified_memory() {
|
||||
let memory_resources = oci::LinuxMemory {
|
||||
limit: Some(736870912),
|
||||
reservation: Some(536870912),
|
||||
swap: Some(536870912),
|
||||
kernel: Some(0),
|
||||
kernel_tcp: Some(0),
|
||||
swappiness: Some(0),
|
||||
disable_oom_killer: Some(false),
|
||||
};
|
||||
let mut properties: Properties = vec![];
|
||||
|
||||
assert_eq!(
|
||||
true,
|
||||
Memory::unified_apply(&memory_resources, &mut properties).is_ok()
|
||||
);
|
||||
|
||||
assert_eq!(Value::U64(200000000), properties[2].1);
|
||||
}
|
||||
}
|
||||
@@ -1,10 +0,0 @@
|
||||
// Copyright 2021-2022 Kata Contributors
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
pub mod cpu;
|
||||
pub mod cpuset;
|
||||
pub mod memory;
|
||||
pub mod pids;
|
||||
pub mod transformer;
|
||||
@@ -1,60 +0,0 @@
|
||||
// Copyright 2021-2022 Kata Contributors
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use super::super::common::{CgroupHierarchy, Properties};
|
||||
|
||||
use super::transformer::Transformer;
|
||||
|
||||
use anyhow::Result;
|
||||
use oci::{LinuxPids, LinuxResources};
|
||||
use zbus::zvariant::Value;
|
||||
|
||||
pub struct Pids {}
|
||||
|
||||
impl Transformer for Pids {
|
||||
fn apply(
|
||||
r: &LinuxResources,
|
||||
properties: &mut Properties,
|
||||
_: &CgroupHierarchy,
|
||||
_: &str,
|
||||
) -> Result<()> {
|
||||
if let Some(pids_resources) = &r.pids {
|
||||
Self::apply(pids_resources, properties)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
// pids.limit <-> TasksMax
|
||||
impl Pids {
|
||||
fn apply(pids_resources: &LinuxPids, properties: &mut Properties) -> Result<()> {
|
||||
let limit = if pids_resources.limit > 0 {
|
||||
pids_resources.limit as u64
|
||||
} else {
|
||||
u64::MAX
|
||||
};
|
||||
|
||||
properties.push(("TasksMax", Value::U64(limit)));
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::Pids;
|
||||
use super::Properties;
|
||||
use super::Value;
|
||||
|
||||
#[test]
|
||||
fn test_subsystem_workflow() {
|
||||
let pids_resources = oci::LinuxPids { limit: 0 };
|
||||
let mut properties: Properties = vec![];
|
||||
|
||||
assert_eq!(true, Pids::apply(&pids_resources, &mut properties).is_ok());
|
||||
|
||||
assert_eq!(Value::U64(u64::MAX), properties[0].1);
|
||||
}
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
// Copyright 2021-2022 Kata Contributors
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use super::super::common::{CgroupHierarchy, Properties};
|
||||
use anyhow::Result;
|
||||
use oci::LinuxResources;
|
||||
|
||||
pub trait Transformer {
|
||||
fn apply(
|
||||
r: &LinuxResources,
|
||||
properties: &mut Properties,
|
||||
cgroup_hierarchy: &CgroupHierarchy,
|
||||
systemd_version: &str,
|
||||
) -> Result<()>;
|
||||
}
|
||||
@@ -6,7 +6,7 @@
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use libc::pid_t;
|
||||
use oci::{ContainerState, LinuxDevice, LinuxIdMapping};
|
||||
use oci::{Linux, LinuxNamespace, LinuxResources, Spec};
|
||||
use oci::{Hook, Linux, LinuxNamespace, LinuxResources, Spec};
|
||||
use std::clone::Clone;
|
||||
use std::ffi::CString;
|
||||
use std::fmt::Display;
|
||||
@@ -22,7 +22,6 @@ use crate::capabilities;
|
||||
use crate::cgroups::fs::Manager as FsManager;
|
||||
#[cfg(test)]
|
||||
use crate::cgroups::mock::Manager as FsManager;
|
||||
use crate::cgroups::systemd::manager::Manager as SystemdManager;
|
||||
use crate::cgroups::Manager;
|
||||
#[cfg(feature = "standard-oci-runtime")]
|
||||
use crate::console;
|
||||
@@ -30,7 +29,6 @@ use crate::log_child;
|
||||
use crate::process::Process;
|
||||
#[cfg(feature = "seccomp")]
|
||||
use crate::seccomp;
|
||||
use crate::selinux;
|
||||
use crate::specconv::CreateOpts;
|
||||
use crate::{mount, validator};
|
||||
|
||||
@@ -51,7 +49,6 @@ use std::os::unix::io::AsRawFd;
|
||||
use protobuf::SingularPtrField;
|
||||
|
||||
use oci::State as OCIState;
|
||||
use regex::Regex;
|
||||
use std::collections::HashMap;
|
||||
use std::os::unix::io::FromRawFd;
|
||||
use std::str::FromStr;
|
||||
@@ -67,9 +64,6 @@ use rlimit::{setrlimit, Resource, Rlim};
|
||||
use tokio::io::AsyncBufReadExt;
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
use kata_sys_util::hooks::HookStates;
|
||||
use kata_sys_util::validate::valid_env;
|
||||
|
||||
pub const EXEC_FIFO_FILENAME: &str = "exec.fifo";
|
||||
|
||||
const INIT: &str = "INIT";
|
||||
@@ -113,6 +107,7 @@ impl Default for ContainerStatus {
|
||||
}
|
||||
|
||||
// We might want to change this to thiserror in the future
|
||||
const MissingCGroupManager: &str = "failed to get container's cgroup Manager";
|
||||
const MissingLinux: &str = "no linux config";
|
||||
const InvalidNamespace: &str = "invalid namespace type";
|
||||
|
||||
@@ -206,8 +201,6 @@ lazy_static! {
|
||||
},
|
||||
]
|
||||
};
|
||||
|
||||
pub static ref SYSTEMD_CGROUP_PATH_FORMAT:Regex = Regex::new(r"^[\w\-.]*:[\w\-.]*:[\w\-.]*$").unwrap();
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
@@ -246,7 +239,7 @@ pub struct LinuxContainer {
|
||||
pub id: String,
|
||||
pub root: String,
|
||||
pub config: Config,
|
||||
pub cgroup_manager: Box<dyn Manager + Send + Sync>,
|
||||
pub cgroup_manager: Option<FsManager>,
|
||||
pub init_process_pid: pid_t,
|
||||
pub init_process_start_time: u64,
|
||||
pub uid_map_path: String,
|
||||
@@ -295,11 +288,16 @@ impl Container for LinuxContainer {
|
||||
));
|
||||
}
|
||||
|
||||
self.cgroup_manager.as_ref().freeze(FreezerState::Frozen)?;
|
||||
if self.cgroup_manager.is_some() {
|
||||
self.cgroup_manager
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.freeze(FreezerState::Frozen)?;
|
||||
|
||||
self.status.transition(ContainerState::Paused);
|
||||
|
||||
Ok(())
|
||||
self.status.transition(ContainerState::Paused);
|
||||
return Ok(());
|
||||
}
|
||||
Err(anyhow!(MissingCGroupManager))
|
||||
}
|
||||
|
||||
fn resume(&mut self) -> Result<()> {
|
||||
@@ -308,11 +306,16 @@ impl Container for LinuxContainer {
|
||||
return Err(anyhow!("container status is: {:?}, not paused", status));
|
||||
}
|
||||
|
||||
self.cgroup_manager.as_ref().freeze(FreezerState::Thawed)?;
|
||||
if self.cgroup_manager.is_some() {
|
||||
self.cgroup_manager
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.freeze(FreezerState::Thawed)?;
|
||||
|
||||
self.status.transition(ContainerState::Running);
|
||||
|
||||
Ok(())
|
||||
self.status.transition(ContainerState::Running);
|
||||
return Ok(());
|
||||
}
|
||||
Err(anyhow!(MissingCGroupManager))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -387,9 +390,7 @@ fn do_init_child(cwfd: RawFd) -> Result<()> {
|
||||
let buf = read_sync(crfd)?;
|
||||
let cm_str = std::str::from_utf8(&buf)?;
|
||||
|
||||
// deserialize cm_str into FsManager and SystemdManager separately
|
||||
let fs_cm: Result<FsManager, serde_json::Error> = serde_json::from_str(cm_str);
|
||||
let systemd_cm: Result<SystemdManager, serde_json::Error> = serde_json::from_str(cm_str);
|
||||
let cm: FsManager = serde_json::from_str(cm_str)?;
|
||||
|
||||
#[cfg(feature = "standard-oci-runtime")]
|
||||
let csocket_fd = console::setup_console_socket(&std::env::var(CONSOLE_SOCKET_FD)?)?;
|
||||
@@ -530,8 +531,6 @@ fn do_init_child(cwfd: RawFd) -> Result<()> {
|
||||
}
|
||||
}
|
||||
|
||||
let selinux_enabled = selinux::is_enabled()?;
|
||||
|
||||
sched::unshare(to_new & !CloneFlags::CLONE_NEWUSER)?;
|
||||
|
||||
if userns {
|
||||
@@ -549,18 +548,7 @@ fn do_init_child(cwfd: RawFd) -> Result<()> {
|
||||
|
||||
if to_new.contains(CloneFlags::CLONE_NEWNS) {
|
||||
// setup rootfs
|
||||
if let Ok(systemd_cm) = systemd_cm {
|
||||
mount::init_rootfs(
|
||||
cfd_log,
|
||||
&spec,
|
||||
&systemd_cm.paths,
|
||||
&systemd_cm.mounts,
|
||||
bind_device,
|
||||
)?;
|
||||
} else {
|
||||
let fs_cm = fs_cm.unwrap();
|
||||
mount::init_rootfs(cfd_log, &spec, &fs_cm.paths, &fs_cm.mounts, bind_device)?;
|
||||
}
|
||||
mount::init_rootfs(cfd_log, &spec, &cm.paths, &cm.mounts, bind_device)?;
|
||||
}
|
||||
|
||||
if init {
|
||||
@@ -633,18 +621,6 @@ fn do_init_child(cwfd: RawFd) -> Result<()> {
|
||||
capctl::prctl::set_no_new_privs().map_err(|_| anyhow!("cannot set no new privileges"))?;
|
||||
}
|
||||
|
||||
// Set SELinux label
|
||||
if !oci_process.selinux_label.is_empty() {
|
||||
if !selinux_enabled {
|
||||
return Err(anyhow!(
|
||||
"SELinux label for the process is provided but SELinux is not enabled on the running kernel"
|
||||
));
|
||||
}
|
||||
|
||||
log_child!(cfd_log, "Set SELinux label to the container process");
|
||||
selinux::set_exec_label(&oci_process.selinux_label)?;
|
||||
}
|
||||
|
||||
// Log unknown seccomp system calls in advance before the log file descriptor closes.
|
||||
#[cfg(feature = "seccomp")]
|
||||
if let Some(ref scmp) = linux.seccomp {
|
||||
@@ -854,17 +830,22 @@ impl BaseContainer for LinuxContainer {
|
||||
}
|
||||
|
||||
fn stats(&self) -> Result<StatsContainerResponse> {
|
||||
let mut r = StatsContainerResponse::default();
|
||||
|
||||
if self.cgroup_manager.is_some() {
|
||||
r.cgroup_stats =
|
||||
SingularPtrField::some(self.cgroup_manager.as_ref().unwrap().get_stats()?);
|
||||
}
|
||||
|
||||
// what about network interface stats?
|
||||
|
||||
Ok(StatsContainerResponse {
|
||||
cgroup_stats: SingularPtrField::some(self.cgroup_manager.as_ref().get_stats()?),
|
||||
..Default::default()
|
||||
})
|
||||
Ok(r)
|
||||
}
|
||||
|
||||
fn set(&mut self, r: LinuxResources) -> Result<()> {
|
||||
self.cgroup_manager.as_ref().set(&r, true)?;
|
||||
|
||||
if self.cgroup_manager.is_some() {
|
||||
self.cgroup_manager.as_ref().unwrap().set(&r, true)?;
|
||||
}
|
||||
self.config
|
||||
.spec
|
||||
.as_mut()
|
||||
@@ -1037,8 +1018,7 @@ impl BaseContainer for LinuxContainer {
|
||||
&logger,
|
||||
spec,
|
||||
&p,
|
||||
self.cgroup_manager.as_ref(),
|
||||
self.config.use_systemd_cgroup,
|
||||
self.cgroup_manager.as_ref().unwrap(),
|
||||
&st,
|
||||
&mut pipe_w,
|
||||
&mut pipe_r,
|
||||
@@ -1101,14 +1081,12 @@ impl BaseContainer for LinuxContainer {
|
||||
}
|
||||
}
|
||||
|
||||
// guest Poststop hook
|
||||
// * should be executed after the container is deleted but before the delete operation returns
|
||||
// * the executable file is in agent namespace
|
||||
// * should also be executed in agent namespace.
|
||||
if let Some(hooks) = spec.hooks.as_ref() {
|
||||
info!(self.logger, "guest Poststop hook");
|
||||
let mut hook_states = HookStates::new();
|
||||
hook_states.execute_hooks(&hooks.poststop, Some(st))?;
|
||||
if spec.hooks.is_some() {
|
||||
info!(self.logger, "poststop");
|
||||
let hooks = spec.hooks.as_ref().unwrap();
|
||||
for h in hooks.poststop.iter() {
|
||||
execute_hook(&self.logger, h, &st).await?;
|
||||
}
|
||||
}
|
||||
|
||||
self.status.transition(ContainerState::Stopped);
|
||||
@@ -1118,19 +1096,19 @@ impl BaseContainer for LinuxContainer {
|
||||
)?;
|
||||
fs::remove_dir_all(&self.root)?;
|
||||
|
||||
let cgm = self.cgroup_manager.as_mut();
|
||||
// Kill all of the processes created in this container to prevent
|
||||
// the leak of some daemon process when this container shared pidns
|
||||
// with the sandbox.
|
||||
let pids = cgm.get_pids().context("get cgroup pids")?;
|
||||
for i in pids {
|
||||
if let Err(e) = signal::kill(Pid::from_raw(i), Signal::SIGKILL) {
|
||||
warn!(self.logger, "kill the process {} error: {:?}", i, e);
|
||||
if let Some(cgm) = self.cgroup_manager.as_mut() {
|
||||
// Kill all of the processes created in this container to prevent
|
||||
// the leak of some daemon process when this container shared pidns
|
||||
// with the sandbox.
|
||||
let pids = cgm.get_pids().context("get cgroup pids")?;
|
||||
for i in pids {
|
||||
if let Err(e) = signal::kill(Pid::from_raw(i), Signal::SIGKILL) {
|
||||
warn!(self.logger, "kill the process {} error: {:?}", i, e);
|
||||
}
|
||||
}
|
||||
|
||||
cgm.destroy().context("destroy cgroups")?;
|
||||
}
|
||||
|
||||
cgm.destroy().context("destroy cgroups")?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -1154,14 +1132,16 @@ impl BaseContainer for LinuxContainer {
|
||||
.ok_or_else(|| anyhow!("OCI spec was not found"))?;
|
||||
let st = self.oci_state()?;
|
||||
|
||||
// guest Poststart hook
|
||||
// * should be executed after the container is started but before the delete operation returns
|
||||
// * the executable file is in agent namespace
|
||||
// * should also be executed in agent namespace.
|
||||
if let Some(hooks) = spec.hooks.as_ref() {
|
||||
info!(self.logger, "guest Poststart hook");
|
||||
let mut hook_states = HookStates::new();
|
||||
hook_states.execute_hooks(&hooks.poststart, Some(st))?;
|
||||
// run poststart hook
|
||||
if spec.hooks.is_some() {
|
||||
info!(self.logger, "poststart hook");
|
||||
let hooks = spec
|
||||
.hooks
|
||||
.as_ref()
|
||||
.ok_or_else(|| anyhow!("OCI hooks were not found"))?;
|
||||
for h in hooks.poststart.iter() {
|
||||
execute_hook(&self.logger, h, &st).await?;
|
||||
}
|
||||
}
|
||||
|
||||
unistd::close(fd)?;
|
||||
@@ -1300,13 +1280,11 @@ pub fn setup_child_logger(fd: RawFd, child_logger: Logger) -> tokio::task::JoinH
|
||||
})
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn join_namespaces(
|
||||
logger: &Logger,
|
||||
spec: &Spec,
|
||||
p: &Process,
|
||||
cm: &(dyn Manager + Send + Sync),
|
||||
use_systemd_cgroup: bool,
|
||||
cm: &FsManager,
|
||||
st: &OCIState,
|
||||
pipe_w: &mut PipeStream,
|
||||
pipe_r: &mut PipeStream,
|
||||
@@ -1333,11 +1311,7 @@ async fn join_namespaces(
|
||||
info!(logger, "wait child received oci process");
|
||||
read_async(pipe_r).await?;
|
||||
|
||||
let cm_str = if use_systemd_cgroup {
|
||||
serde_json::to_string(cm.as_any()?.downcast_ref::<SystemdManager>().unwrap())
|
||||
} else {
|
||||
serde_json::to_string(cm.as_any()?.downcast_ref::<FsManager>().unwrap())
|
||||
}?;
|
||||
let cm_str = serde_json::to_string(cm)?;
|
||||
write_async(pipe_w, SYNC_DATA, cm_str.as_str()).await?;
|
||||
|
||||
// wait child setup user namespace
|
||||
@@ -1360,16 +1334,13 @@ async fn join_namespaces(
|
||||
}
|
||||
|
||||
// apply cgroups
|
||||
// For FsManger, it's no matter about the order of apply and set.
|
||||
// For SystemdManger, apply must be precede set because we can only create a systemd unit with specific processes(pids).
|
||||
if res.is_some() {
|
||||
info!(logger, "apply processes to cgroups!");
|
||||
cm.apply(p.pid)?;
|
||||
if p.init && res.is_some() {
|
||||
info!(logger, "apply cgroups!");
|
||||
cm.set(res.unwrap(), false)?;
|
||||
}
|
||||
|
||||
if p.init && res.is_some() {
|
||||
info!(logger, "set properties to cgroups!");
|
||||
cm.set(res.unwrap(), false)?;
|
||||
if res.is_some() {
|
||||
cm.apply(p.pid)?;
|
||||
}
|
||||
|
||||
info!(logger, "notify child to continue");
|
||||
@@ -1382,14 +1353,13 @@ async fn join_namespaces(
|
||||
|
||||
info!(logger, "get ready to run prestart hook!");
|
||||
|
||||
// guest Prestart hook
|
||||
// * should be executed during the start operation, and before the container command is executed
|
||||
// * the executable file is in agent namespace
|
||||
// * should also be executed in agent namespace.
|
||||
if let Some(hooks) = spec.hooks.as_ref() {
|
||||
info!(logger, "guest Prestart hook");
|
||||
let mut hook_states = HookStates::new();
|
||||
hook_states.execute_hooks(&hooks.prestart, Some(st.clone()))?;
|
||||
// run prestart hook
|
||||
if spec.hooks.is_some() {
|
||||
info!(logger, "prestart hook");
|
||||
let hooks = spec.hooks.as_ref().unwrap();
|
||||
for h in hooks.prestart.iter() {
|
||||
execute_hook(&logger, h, st).await?;
|
||||
}
|
||||
}
|
||||
|
||||
// notify child run prestart hooks completed
|
||||
@@ -1475,41 +1445,27 @@ impl LinuxContainer {
|
||||
.context(format!("Cannot change owner of container {} root", id))?;
|
||||
|
||||
let spec = config.spec.as_ref().unwrap();
|
||||
|
||||
let linux = spec.linux.as_ref().unwrap();
|
||||
let cpath = if config.use_systemd_cgroup {
|
||||
if linux.cgroups_path.len() == 2 {
|
||||
format!("system.slice:kata_agent:{}", id.as_str())
|
||||
} else {
|
||||
linux.cgroups_path.clone()
|
||||
}
|
||||
} else if linux.cgroups_path.is_empty() {
|
||||
|
||||
let cpath = if linux.cgroups_path.is_empty() {
|
||||
format!("/{}", id.as_str())
|
||||
} else {
|
||||
// if we have a systemd cgroup path we need to convert it to a fs cgroup path
|
||||
linux.cgroups_path.replace(':', "/")
|
||||
linux.cgroups_path.clone()
|
||||
};
|
||||
|
||||
let cgroup_manager: Box<dyn Manager + Send + Sync> = if config.use_systemd_cgroup {
|
||||
Box::new(SystemdManager::new(cpath.as_str()).map_err(|e| {
|
||||
anyhow!(format!(
|
||||
"fail to create cgroup manager with path {}: {:}",
|
||||
cpath, e
|
||||
))
|
||||
})?)
|
||||
} else {
|
||||
Box::new(FsManager::new(cpath.as_str()).map_err(|e| {
|
||||
anyhow!(format!(
|
||||
"fail to create cgroup manager with path {}: {:}",
|
||||
cpath, e
|
||||
))
|
||||
})?)
|
||||
};
|
||||
let cgroup_manager = FsManager::new(cpath.as_str()).map_err(|e| {
|
||||
anyhow!(format!(
|
||||
"fail to create cgroup manager with path {}: {:}",
|
||||
cpath, e
|
||||
))
|
||||
})?;
|
||||
info!(logger, "new cgroup_manager {:?}", &cgroup_manager);
|
||||
|
||||
Ok(LinuxContainer {
|
||||
id: id.clone(),
|
||||
root,
|
||||
cgroup_manager,
|
||||
cgroup_manager: Some(cgroup_manager),
|
||||
status: ContainerStatus::new(),
|
||||
uid_map_path: String::from(""),
|
||||
gid_map_path: "".to_string(),
|
||||
@@ -1561,6 +1517,143 @@ fn set_sysctls(sysctls: &HashMap<String, String>) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
use std::process::Stdio;
|
||||
use std::time::Duration;
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
||||
|
||||
pub async fn execute_hook(logger: &Logger, h: &Hook, st: &OCIState) -> Result<()> {
|
||||
let logger = logger.new(o!("action" => "execute-hook"));
|
||||
|
||||
let binary = PathBuf::from(h.path.as_str());
|
||||
let path = binary.canonicalize()?;
|
||||
if !path.exists() {
|
||||
return Err(anyhow!("Path {:?} does not exist", path));
|
||||
}
|
||||
|
||||
let mut args = h.args.clone();
|
||||
// the hook.args[0] is the hook binary name which shouldn't be included
|
||||
// in the Command.args
|
||||
if args.len() > 1 {
|
||||
args.remove(0);
|
||||
}
|
||||
|
||||
// all invalid envs will be omitted, only valid envs will be passed to hook.
|
||||
let env: HashMap<&str, &str> = h.env.iter().filter_map(|e| valid_env(e)).collect();
|
||||
|
||||
// Avoid the exit signal to be reaped by the global reaper.
|
||||
let _wait_locker = WAIT_PID_LOCKER.lock().await;
|
||||
let mut child = tokio::process::Command::new(path)
|
||||
.args(args.iter())
|
||||
.envs(env.iter())
|
||||
.kill_on_drop(true)
|
||||
.stdin(Stdio::piped())
|
||||
.stdout(Stdio::piped())
|
||||
.stderr(Stdio::piped())
|
||||
.spawn()?;
|
||||
|
||||
// default timeout 10s
|
||||
let mut timeout: u64 = 10;
|
||||
|
||||
// if timeout is set if hook, then use the specified value
|
||||
if let Some(t) = h.timeout {
|
||||
if t > 0 {
|
||||
timeout = t as u64;
|
||||
}
|
||||
}
|
||||
|
||||
let state = serde_json::to_string(st)?;
|
||||
let path = h.path.clone();
|
||||
|
||||
let join_handle = tokio::spawn(async move {
|
||||
if let Some(mut stdin) = child.stdin.take() {
|
||||
match stdin.write_all(state.as_bytes()).await {
|
||||
Ok(_) => {}
|
||||
Err(e) => {
|
||||
info!(logger, "write to child stdin failed: {:?}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// read something from stdout and stderr for debug
|
||||
if let Some(stdout) = child.stdout.as_mut() {
|
||||
let mut out = String::new();
|
||||
match stdout.read_to_string(&mut out).await {
|
||||
Ok(_) => {
|
||||
info!(logger, "child stdout: {}", out.as_str());
|
||||
}
|
||||
Err(e) => {
|
||||
info!(logger, "read from child stdout failed: {:?}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut err = String::new();
|
||||
if let Some(stderr) = child.stderr.as_mut() {
|
||||
match stderr.read_to_string(&mut err).await {
|
||||
Ok(_) => {
|
||||
info!(logger, "child stderr: {}", err.as_str());
|
||||
}
|
||||
Err(e) => {
|
||||
info!(logger, "read from child stderr failed: {:?}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
match child.wait().await {
|
||||
Ok(exit) => {
|
||||
let code = exit
|
||||
.code()
|
||||
.ok_or_else(|| anyhow!("hook exit status has no status code"))?;
|
||||
|
||||
if code != 0 {
|
||||
error!(
|
||||
logger,
|
||||
"hook {} exit status is {}, error message is {}", &path, code, err
|
||||
);
|
||||
return Err(anyhow!(nix::Error::UnknownErrno));
|
||||
}
|
||||
|
||||
debug!(logger, "hook {} exit status is 0", &path);
|
||||
Ok(())
|
||||
}
|
||||
Err(e) => Err(anyhow!(
|
||||
"wait child error: {} {}",
|
||||
e,
|
||||
e.raw_os_error().unwrap()
|
||||
)),
|
||||
}
|
||||
});
|
||||
|
||||
match tokio::time::timeout(Duration::new(timeout, 0), join_handle).await {
|
||||
Ok(r) => r.unwrap(),
|
||||
Err(_) => Err(anyhow!(nix::Error::ETIMEDOUT)),
|
||||
}
|
||||
}
|
||||
|
||||
// valid environment variables according to https://doc.rust-lang.org/std/env/fn.set_var.html#panics
|
||||
fn valid_env(e: &str) -> Option<(&str, &str)> {
|
||||
// wherther key or value will contain NULL char.
|
||||
if e.as_bytes().contains(&b'\0') {
|
||||
return None;
|
||||
}
|
||||
|
||||
let v: Vec<&str> = e.splitn(2, '=').collect();
|
||||
|
||||
// key can't hold an `equal` sign, but value can
|
||||
if v.len() != 2 {
|
||||
return None;
|
||||
}
|
||||
|
||||
let (key, value) = (v[0].trim(), v[1].trim());
|
||||
|
||||
// key can't be empty
|
||||
if key.is_empty() {
|
||||
return None;
|
||||
}
|
||||
|
||||
Some((key, value))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
@@ -1571,6 +1664,7 @@ mod tests {
|
||||
use std::os::unix::io::AsRawFd;
|
||||
use tempfile::tempdir;
|
||||
use test_utils::skip_if_not_root;
|
||||
use tokio::process::Command;
|
||||
|
||||
macro_rules! sl {
|
||||
() => {
|
||||
@@ -1578,6 +1672,113 @@ mod tests {
|
||||
};
|
||||
}
|
||||
|
||||
async fn which(cmd: &str) -> String {
|
||||
let output: std::process::Output = Command::new("which")
|
||||
.arg(cmd)
|
||||
.output()
|
||||
.await
|
||||
.expect("which command failed to run");
|
||||
|
||||
match String::from_utf8(output.stdout) {
|
||||
Ok(v) => v.trim_end_matches('\n').to_string(),
|
||||
Err(e) => panic!("Invalid UTF-8 sequence: {}", e),
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_execute_hook() {
|
||||
let temp_file = "/tmp/test_execute_hook";
|
||||
|
||||
let touch = which("touch").await;
|
||||
|
||||
defer!(fs::remove_file(temp_file).unwrap(););
|
||||
let invalid_str = vec![97, b'\0', 98];
|
||||
let invalid_string = std::str::from_utf8(&invalid_str).unwrap();
|
||||
let invalid_env = format!("{}=value", invalid_string);
|
||||
|
||||
execute_hook(
|
||||
&slog_scope::logger(),
|
||||
&Hook {
|
||||
path: touch,
|
||||
args: vec!["touch".to_string(), temp_file.to_string()],
|
||||
env: vec![invalid_env],
|
||||
timeout: Some(10),
|
||||
},
|
||||
&OCIState {
|
||||
version: "1.2.3".to_string(),
|
||||
id: "321".to_string(),
|
||||
status: ContainerState::Running,
|
||||
pid: 2,
|
||||
bundle: "".to_string(),
|
||||
annotations: Default::default(),
|
||||
},
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(Path::new(&temp_file).exists(), true);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_execute_hook_with_error() {
|
||||
let ls = which("ls").await;
|
||||
|
||||
let res = execute_hook(
|
||||
&slog_scope::logger(),
|
||||
&Hook {
|
||||
path: ls,
|
||||
args: vec!["ls".to_string(), "/tmp/not-exist".to_string()],
|
||||
env: vec![],
|
||||
timeout: None,
|
||||
},
|
||||
&OCIState {
|
||||
version: "1.2.3".to_string(),
|
||||
id: "321".to_string(),
|
||||
status: ContainerState::Running,
|
||||
pid: 2,
|
||||
bundle: "".to_string(),
|
||||
annotations: Default::default(),
|
||||
},
|
||||
)
|
||||
.await;
|
||||
|
||||
let expected_err = nix::Error::UnknownErrno;
|
||||
assert_eq!(
|
||||
res.unwrap_err().downcast::<nix::Error>().unwrap(),
|
||||
expected_err
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_execute_hook_with_timeout() {
|
||||
let sleep = which("sleep").await;
|
||||
|
||||
let res = execute_hook(
|
||||
&slog_scope::logger(),
|
||||
&Hook {
|
||||
path: sleep,
|
||||
args: vec!["sleep".to_string(), "2".to_string()],
|
||||
env: vec![],
|
||||
timeout: Some(1),
|
||||
},
|
||||
&OCIState {
|
||||
version: "1.2.3".to_string(),
|
||||
id: "321".to_string(),
|
||||
status: ContainerState::Running,
|
||||
pid: 2,
|
||||
bundle: "".to_string(),
|
||||
annotations: Default::default(),
|
||||
},
|
||||
)
|
||||
.await;
|
||||
|
||||
let expected_err = nix::Error::ETIMEDOUT;
|
||||
assert_eq!(
|
||||
res.unwrap_err().downcast::<nix::Error>().unwrap(),
|
||||
expected_err
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_status_transtition() {
|
||||
let mut status = ContainerStatus::new();
|
||||
@@ -1730,12 +1931,20 @@ mod tests {
|
||||
assert!(format!("{:?}", ret).contains("failed to pause container"))
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_linuxcontainer_pause_cgroupmgr_is_none() {
|
||||
let ret = new_linux_container_and_then(|mut c: LinuxContainer| {
|
||||
c.cgroup_manager = None;
|
||||
c.pause().map_err(|e| anyhow!(e))
|
||||
});
|
||||
|
||||
assert!(ret.is_err(), "Expecting error, Got {:?}", ret);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_linuxcontainer_pause() {
|
||||
let ret = new_linux_container_and_then(|mut c: LinuxContainer| {
|
||||
c.cgroup_manager = Box::new(FsManager::new("").map_err(|e| {
|
||||
anyhow!(format!("fail to create cgroup manager with path: {:}", e))
|
||||
})?);
|
||||
c.cgroup_manager = FsManager::new("").ok();
|
||||
c.pause().map_err(|e| anyhow!(e))
|
||||
});
|
||||
|
||||
@@ -1754,12 +1963,21 @@ mod tests {
|
||||
assert!(format!("{:?}", ret).contains("not paused"))
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_linuxcontainer_resume_cgroupmgr_is_none() {
|
||||
let ret = new_linux_container_and_then(|mut c: LinuxContainer| {
|
||||
c.status.transition(ContainerState::Paused);
|
||||
c.cgroup_manager = None;
|
||||
c.resume().map_err(|e| anyhow!(e))
|
||||
});
|
||||
|
||||
assert!(ret.is_err(), "Expecting error, Got {:?}", ret);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_linuxcontainer_resume() {
|
||||
let ret = new_linux_container_and_then(|mut c: LinuxContainer| {
|
||||
c.cgroup_manager = Box::new(FsManager::new("").map_err(|e| {
|
||||
anyhow!(format!("fail to create cgroup manager with path: {:}", e))
|
||||
})?);
|
||||
c.cgroup_manager = FsManager::new("").ok();
|
||||
// Change status to paused, this way we can resume it
|
||||
c.status.transition(ContainerState::Paused);
|
||||
c.resume().map_err(|e| anyhow!(e))
|
||||
@@ -1892,4 +2110,49 @@ mod tests {
|
||||
let ret = do_init_child(std::io::stdin().as_raw_fd());
|
||||
assert!(ret.is_err(), "Expecting Err, Got {:?}", ret);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_valid_env() {
|
||||
let env = valid_env("a=b=c");
|
||||
assert_eq!(Some(("a", "b=c")), env);
|
||||
|
||||
let env = valid_env("a=b");
|
||||
assert_eq!(Some(("a", "b")), env);
|
||||
let env = valid_env("a =b");
|
||||
assert_eq!(Some(("a", "b")), env);
|
||||
|
||||
let env = valid_env(" a =b");
|
||||
assert_eq!(Some(("a", "b")), env);
|
||||
|
||||
let env = valid_env("a= b");
|
||||
assert_eq!(Some(("a", "b")), env);
|
||||
|
||||
let env = valid_env("a=b ");
|
||||
assert_eq!(Some(("a", "b")), env);
|
||||
let env = valid_env("a=b c ");
|
||||
assert_eq!(Some(("a", "b c")), env);
|
||||
|
||||
let env = valid_env("=b");
|
||||
assert_eq!(None, env);
|
||||
|
||||
let env = valid_env("a=");
|
||||
assert_eq!(Some(("a", "")), env);
|
||||
|
||||
let env = valid_env("a==");
|
||||
assert_eq!(Some(("a", "=")), env);
|
||||
|
||||
let env = valid_env("a");
|
||||
assert_eq!(None, env);
|
||||
|
||||
let invalid_str = vec![97, b'\0', 98];
|
||||
let invalid_string = std::str::from_utf8(&invalid_str).unwrap();
|
||||
|
||||
let invalid_env = format!("{}=value", invalid_string);
|
||||
let env = valid_env(&invalid_env);
|
||||
assert_eq!(None, env);
|
||||
|
||||
let invalid_env = format!("key={}", invalid_string);
|
||||
let env = valid_env(&invalid_env);
|
||||
assert_eq!(None, env);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -38,7 +38,6 @@ pub mod pipestream;
|
||||
pub mod process;
|
||||
#[cfg(feature = "seccomp")]
|
||||
pub mod seccomp;
|
||||
pub mod selinux;
|
||||
pub mod specconv;
|
||||
pub mod sync;
|
||||
pub mod sync_with_async;
|
||||
@@ -237,6 +236,12 @@ pub fn resources_grpc_to_oci(res: &grpc::LinuxResources) -> oci::LinuxResources
|
||||
let devices = {
|
||||
let mut d = Vec::new();
|
||||
for dev in res.Devices.iter() {
|
||||
let dev_type = if dev.Type.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(dev.Type.clone())
|
||||
};
|
||||
|
||||
let major = if dev.Major == -1 {
|
||||
None
|
||||
} else {
|
||||
@@ -250,7 +255,7 @@ pub fn resources_grpc_to_oci(res: &grpc::LinuxResources) -> oci::LinuxResources
|
||||
};
|
||||
d.push(oci::LinuxDeviceCgroup {
|
||||
allow: dev.Allow,
|
||||
r#type: dev.Type.clone(),
|
||||
r#type: dev_type,
|
||||
major,
|
||||
minor,
|
||||
access: dev.Access.clone(),
|
||||
|
||||
@@ -25,7 +25,6 @@ use std::fs::File;
|
||||
use std::io::{BufRead, BufReader};
|
||||
|
||||
use crate::container::DEFAULT_DEVICES;
|
||||
use crate::selinux;
|
||||
use crate::sync::write_count;
|
||||
use std::string::ToString;
|
||||
|
||||
@@ -182,8 +181,6 @@ pub fn init_rootfs(
|
||||
None => flags |= MsFlags::MS_SLAVE,
|
||||
}
|
||||
|
||||
let label = &linux.mount_label;
|
||||
|
||||
let root = spec
|
||||
.root
|
||||
.as_ref()
|
||||
@@ -247,7 +244,7 @@ pub fn init_rootfs(
|
||||
}
|
||||
}
|
||||
|
||||
mount_from(cfd_log, m, rootfs, flags, &data, label)?;
|
||||
mount_from(cfd_log, m, rootfs, flags, &data, "")?;
|
||||
// bind mount won't change mount options, we need remount to make mount options
|
||||
// effective.
|
||||
// first check that we have non-default options required before attempting a
|
||||
@@ -527,6 +524,7 @@ pub fn pivot_rootfs<P: ?Sized + NixPath + std::fmt::Debug>(path: &P) -> Result<(
|
||||
|
||||
fn rootfs_parent_mount_private(path: &str) -> Result<()> {
|
||||
let mount_infos = parse_mount_table(MOUNTINFO_PATH)?;
|
||||
|
||||
let mut max_len = 0;
|
||||
let mut mount_point = String::from("");
|
||||
let mut options = String::from("");
|
||||
@@ -769,9 +767,9 @@ fn mount_from(
|
||||
rootfs: &str,
|
||||
flags: MsFlags,
|
||||
data: &str,
|
||||
label: &str,
|
||||
_label: &str,
|
||||
) -> Result<()> {
|
||||
let mut d = String::from(data);
|
||||
let d = String::from(data);
|
||||
let dest = secure_join(rootfs, &m.destination);
|
||||
|
||||
let src = if m.r#type.as_str() == "bind" {
|
||||
@@ -782,7 +780,7 @@ fn mount_from(
|
||||
Path::new(&dest).parent().unwrap()
|
||||
};
|
||||
|
||||
fs::create_dir_all(dir).map_err(|e| {
|
||||
fs::create_dir_all(&dir).map_err(|e| {
|
||||
log_child!(
|
||||
cfd_log,
|
||||
"create dir {}: {}",
|
||||
@@ -824,37 +822,6 @@ fn mount_from(
|
||||
e
|
||||
})?;
|
||||
|
||||
// Set the SELinux context for the mounts
|
||||
let mut use_xattr = false;
|
||||
if !label.is_empty() {
|
||||
if selinux::is_enabled()? {
|
||||
let device = Path::new(&m.source)
|
||||
.file_name()
|
||||
.ok_or_else(|| anyhow!("invalid device source path: {}", &m.source))?
|
||||
.to_str()
|
||||
.ok_or_else(|| anyhow!("failed to convert device source path: {}", &m.source))?;
|
||||
|
||||
match device {
|
||||
// SELinux does not support labeling of /proc or /sys
|
||||
"proc" | "sysfs" => (),
|
||||
// SELinux does not support mount labeling against /dev/mqueue,
|
||||
// so we use setxattr instead
|
||||
"mqueue" => {
|
||||
use_xattr = true;
|
||||
}
|
||||
_ => {
|
||||
log_child!(cfd_log, "add SELinux mount label to {}", dest.as_str());
|
||||
selinux::add_mount_label(&mut d, label);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
log_child!(
|
||||
cfd_log,
|
||||
"SELinux label for the mount is provided but SELinux is not enabled on the running kernel"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
mount(
|
||||
Some(src.as_str()),
|
||||
dest.as_str(),
|
||||
@@ -867,10 +834,6 @@ fn mount_from(
|
||||
e
|
||||
})?;
|
||||
|
||||
if !label.is_empty() && selinux::is_enabled()? && use_xattr {
|
||||
xattr::set(dest.as_str(), "security.selinux", label.as_bytes())?;
|
||||
}
|
||||
|
||||
if flags.contains(MsFlags::MS_BIND)
|
||||
&& flags.intersects(
|
||||
!(MsFlags::MS_REC
|
||||
|
||||
@@ -63,7 +63,7 @@ pub fn get_unknown_syscalls(scmp: &LinuxSeccomp) -> Option<Vec<String>> {
|
||||
// init_seccomp creates a seccomp filter and loads it for the current process
|
||||
// including all the child processes.
|
||||
pub fn init_seccomp(scmp: &LinuxSeccomp) -> Result<()> {
|
||||
let def_action = ScmpAction::from_str(scmp.default_action.as_str(), Some(libc::EPERM))?;
|
||||
let def_action = ScmpAction::from_str(scmp.default_action.as_str(), Some(libc::EPERM as i32))?;
|
||||
|
||||
// Create a new filter context
|
||||
let mut filter = ScmpFilterContext::new_filter(def_action)?;
|
||||
|
||||
@@ -1,80 +0,0 @@
|
||||
// Copyright 2022 Sony Group Corporation
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use anyhow::{Context, Result};
|
||||
use nix::unistd::gettid;
|
||||
use std::fs::{self, OpenOptions};
|
||||
use std::io::prelude::*;
|
||||
use std::path::Path;
|
||||
|
||||
pub fn is_enabled() -> Result<bool> {
|
||||
let buf = fs::read_to_string("/proc/mounts")?;
|
||||
let enabled = buf.contains("selinuxfs");
|
||||
|
||||
Ok(enabled)
|
||||
}
|
||||
|
||||
pub fn add_mount_label(data: &mut String, label: &str) {
|
||||
if data.is_empty() {
|
||||
let context = format!("context=\"{}\"", label);
|
||||
data.push_str(&context);
|
||||
} else {
|
||||
let context = format!(",context=\"{}\"", label);
|
||||
data.push_str(&context);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_exec_label(label: &str) -> Result<()> {
|
||||
let mut attr_path = Path::new("/proc/thread-self/attr/exec").to_path_buf();
|
||||
if !attr_path.exists() {
|
||||
// Fall back to the old convention
|
||||
attr_path = Path::new("/proc/self/task")
|
||||
.join(gettid().to_string())
|
||||
.join("attr/exec")
|
||||
}
|
||||
|
||||
let mut file = OpenOptions::new()
|
||||
.write(true)
|
||||
.truncate(true)
|
||||
.open(attr_path)?;
|
||||
file.write_all(label.as_bytes())
|
||||
.with_context(|| "failed to apply SELinux label")?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
const TEST_LABEL: &str = "system_u:system_r:unconfined_t:s0";
|
||||
|
||||
#[test]
|
||||
fn test_is_enabled() {
|
||||
let ret = is_enabled();
|
||||
assert!(ret.is_ok(), "Expecting Ok, Got {:?}", ret);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_add_mount_label() {
|
||||
let mut data = String::new();
|
||||
add_mount_label(&mut data, TEST_LABEL);
|
||||
assert_eq!(data, format!("context=\"{}\"", TEST_LABEL));
|
||||
|
||||
let mut data = String::from("defaults");
|
||||
add_mount_label(&mut data, TEST_LABEL);
|
||||
assert_eq!(data, format!("defaults,context=\"{}\"", TEST_LABEL));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_set_exec_label() {
|
||||
let ret = set_exec_label(TEST_LABEL);
|
||||
if is_enabled().unwrap() {
|
||||
assert!(ret.is_ok(), "Expecting Ok, Got {:?}", ret);
|
||||
} else {
|
||||
assert!(ret.is_err(), "Expecting error, Got {:?}", ret);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -6,7 +6,6 @@
|
||||
use crate::container::Config;
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use oci::{Linux, LinuxIdMapping, LinuxNamespace, Spec};
|
||||
use regex::Regex;
|
||||
use std::collections::HashMap;
|
||||
use std::path::{Component, PathBuf};
|
||||
|
||||
@@ -87,23 +86,6 @@ fn hostname(oci: &Spec) -> Result<()> {
|
||||
|
||||
fn security(oci: &Spec) -> Result<()> {
|
||||
let linux = get_linux(oci)?;
|
||||
let label_pattern = r".*_u:.*_r:.*_t:s[0-9]|1[0-5].*";
|
||||
let label_regex = Regex::new(label_pattern)?;
|
||||
|
||||
if let Some(ref process) = oci.process {
|
||||
if !process.selinux_label.is_empty() && !label_regex.is_match(&process.selinux_label) {
|
||||
return Err(anyhow!(
|
||||
"SELinux label for the process is invalid format: {}",
|
||||
&process.selinux_label
|
||||
));
|
||||
}
|
||||
}
|
||||
if !linux.mount_label.is_empty() && !label_regex.is_match(&linux.mount_label) {
|
||||
return Err(anyhow!(
|
||||
"SELinux label for the mount is invalid format: {}",
|
||||
&linux.mount_label
|
||||
));
|
||||
}
|
||||
|
||||
if linux.masked_paths.is_empty() && linux.readonly_paths.is_empty() {
|
||||
return Ok(());
|
||||
@@ -113,6 +95,8 @@ fn security(oci: &Spec) -> Result<()> {
|
||||
return Err(anyhow!("Linux namespace does not contain mount"));
|
||||
}
|
||||
|
||||
// don't care about selinux at present
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -301,7 +285,7 @@ pub fn validate(conf: &Config) -> Result<()> {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use oci::{Mount, Process};
|
||||
use oci::Mount;
|
||||
|
||||
#[test]
|
||||
fn test_namespace() {
|
||||
@@ -404,29 +388,6 @@ mod tests {
|
||||
];
|
||||
spec.linux = Some(linux);
|
||||
security(&spec).unwrap();
|
||||
|
||||
// SELinux
|
||||
let valid_label = "system_u:system_r:container_t:s0:c123,c456";
|
||||
let mut process = Process::default();
|
||||
process.selinux_label = valid_label.to_string();
|
||||
spec.process = Some(process);
|
||||
security(&spec).unwrap();
|
||||
|
||||
let mut linux = Linux::default();
|
||||
linux.mount_label = valid_label.to_string();
|
||||
spec.linux = Some(linux);
|
||||
security(&spec).unwrap();
|
||||
|
||||
let invalid_label = "system_u:system_r:container_t";
|
||||
let mut process = Process::default();
|
||||
process.selinux_label = invalid_label.to_string();
|
||||
spec.process = Some(process);
|
||||
security(&spec).unwrap_err();
|
||||
|
||||
let mut linux = Linux::default();
|
||||
linux.mount_label = invalid_label.to_string();
|
||||
spec.linux = Some(linux);
|
||||
security(&spec).unwrap_err();
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -11,6 +11,7 @@ use std::fs;
|
||||
use std::str::FromStr;
|
||||
use std::time;
|
||||
use tracing::instrument;
|
||||
use url::Url;
|
||||
|
||||
use kata_types::config::default::DEFAULT_AGENT_VSOCK_PORT;
|
||||
|
||||
@@ -25,6 +26,12 @@ const LOG_VPORT_OPTION: &str = "agent.log_vport";
|
||||
const CONTAINER_PIPE_SIZE_OPTION: &str = "agent.container_pipe_size";
|
||||
const UNIFIED_CGROUP_HIERARCHY_OPTION: &str = "agent.unified_cgroup_hierarchy";
|
||||
const CONFIG_FILE: &str = "agent.config_file";
|
||||
const CONTAINER_POLICY_FILE: &str = "agent.container_policy_file";
|
||||
const AA_KBC_PARAMS: &str = "agent.aa_kbc_params";
|
||||
const HTTPS_PROXY: &str = "agent.https_proxy";
|
||||
const NO_PROXY: &str = "agent.no_proxy";
|
||||
const ENABLE_DATA_INTEGRITY: &str = "agent.data_integrity";
|
||||
const ENABLE_SIGNATURE_VERIFICATION: &str = "agent.enable_signature_verification";
|
||||
|
||||
const DEFAULT_LOG_LEVEL: slog::Level = slog::Level::Info;
|
||||
const DEFAULT_HOTPLUG_TIMEOUT: time::Duration = time::Duration::from_secs(3);
|
||||
@@ -52,6 +59,11 @@ const ERR_INVALID_CONTAINER_PIPE_SIZE_PARAM: &str = "unable to parse container p
|
||||
const ERR_INVALID_CONTAINER_PIPE_SIZE_KEY: &str = "invalid container pipe size key name";
|
||||
const ERR_INVALID_CONTAINER_PIPE_NEGATIVE: &str = "container pipe size should not be negative";
|
||||
|
||||
const ERR_INVALID_CONTAINER_POLICY_PATH_VALUE: &str = "invalid container_policy_file value";
|
||||
const ERR_INVALID_CONTAINER_POLICY_PATH_KEY: &str = "invalid container_policy_file key";
|
||||
const ERR_INVALID_CONTAINER_POLICY_ABSOLUTE: &str =
|
||||
"container_policy_file path must be an absolute file path";
|
||||
|
||||
#[derive(Debug, Default, Deserialize)]
|
||||
pub struct EndpointsConfig {
|
||||
pub allowed: Vec<String>,
|
||||
@@ -77,6 +89,12 @@ pub struct AgentConfig {
|
||||
pub tracing: bool,
|
||||
pub endpoints: AgentEndpoints,
|
||||
pub supports_seccomp: bool,
|
||||
pub container_policy_path: String,
|
||||
pub aa_kbc_params: String,
|
||||
pub https_proxy: String,
|
||||
pub no_proxy: String,
|
||||
pub data_integrity: bool,
|
||||
pub enable_signature_verification: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
@@ -92,6 +110,12 @@ pub struct AgentConfigBuilder {
|
||||
pub unified_cgroup_hierarchy: Option<bool>,
|
||||
pub tracing: Option<bool>,
|
||||
pub endpoints: Option<EndpointsConfig>,
|
||||
pub container_policy_path: Option<String>,
|
||||
pub aa_kbc_params: Option<String>,
|
||||
pub https_proxy: Option<String>,
|
||||
pub no_proxy: Option<String>,
|
||||
pub data_integrity: Option<bool>,
|
||||
pub enable_signature_verification: Option<bool>,
|
||||
}
|
||||
|
||||
macro_rules! config_override {
|
||||
@@ -153,6 +177,12 @@ impl Default for AgentConfig {
|
||||
tracing: false,
|
||||
endpoints: Default::default(),
|
||||
supports_seccomp: rpc::have_seccomp(),
|
||||
container_policy_path: String::from(""),
|
||||
aa_kbc_params: String::from(""),
|
||||
https_proxy: String::from(""),
|
||||
no_proxy: String::from(""),
|
||||
data_integrity: false,
|
||||
enable_signature_verification: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -181,6 +211,16 @@ impl FromStr for AgentConfig {
|
||||
config_override!(agent_config_builder, agent_config, server_addr);
|
||||
config_override!(agent_config_builder, agent_config, unified_cgroup_hierarchy);
|
||||
config_override!(agent_config_builder, agent_config, tracing);
|
||||
config_override!(agent_config_builder, agent_config, container_policy_path);
|
||||
config_override!(agent_config_builder, agent_config, aa_kbc_params);
|
||||
config_override!(agent_config_builder, agent_config, https_proxy);
|
||||
config_override!(agent_config_builder, agent_config, no_proxy);
|
||||
config_override!(agent_config_builder, agent_config, data_integrity);
|
||||
config_override!(
|
||||
agent_config_builder,
|
||||
agent_config,
|
||||
enable_signature_verification
|
||||
);
|
||||
|
||||
// Populate the allowed endpoints hash set, if we got any from the config file.
|
||||
if let Some(endpoints) = agent_config_builder.endpoints {
|
||||
@@ -209,6 +249,10 @@ impl AgentConfig {
|
||||
let mut config: AgentConfig = Default::default();
|
||||
let cmdline = fs::read_to_string(file)?;
|
||||
let params: Vec<&str> = cmdline.split_ascii_whitespace().collect();
|
||||
|
||||
let mut using_config_file = false;
|
||||
// Check if there is config file before parsing params that might
|
||||
// override values from the config file.
|
||||
for param in params.iter() {
|
||||
// If we get a configuration file path from the command line, we
|
||||
// generate our config from it.
|
||||
@@ -216,9 +260,13 @@ impl AgentConfig {
|
||||
// or if it can't be parsed properly.
|
||||
if param.starts_with(format!("{}=", CONFIG_FILE).as_str()) {
|
||||
let config_file = get_string_value(param)?;
|
||||
return AgentConfig::from_config_file(&config_file);
|
||||
config = AgentConfig::from_config_file(&config_file)?;
|
||||
using_config_file = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
for param in params.iter() {
|
||||
// parse cmdline flags
|
||||
parse_cmdline_param!(param, DEBUG_CONSOLE_FLAG, config.debug_console);
|
||||
parse_cmdline_param!(param, DEV_MODE_FLAG, config.dev_mode);
|
||||
@@ -278,6 +326,30 @@ impl AgentConfig {
|
||||
config.unified_cgroup_hierarchy,
|
||||
get_bool_value
|
||||
);
|
||||
|
||||
parse_cmdline_param!(
|
||||
param,
|
||||
CONTAINER_POLICY_FILE,
|
||||
config.container_policy_path,
|
||||
get_container_policy_path_value
|
||||
);
|
||||
|
||||
parse_cmdline_param!(param, AA_KBC_PARAMS, config.aa_kbc_params, get_string_value);
|
||||
parse_cmdline_param!(param, HTTPS_PROXY, config.https_proxy, get_url_value);
|
||||
parse_cmdline_param!(param, NO_PROXY, config.no_proxy, get_string_value);
|
||||
parse_cmdline_param!(
|
||||
param,
|
||||
ENABLE_DATA_INTEGRITY,
|
||||
config.data_integrity,
|
||||
get_bool_value
|
||||
);
|
||||
|
||||
parse_cmdline_param!(
|
||||
param,
|
||||
ENABLE_SIGNATURE_VERIFICATION,
|
||||
config.enable_signature_verification,
|
||||
get_bool_value
|
||||
);
|
||||
}
|
||||
|
||||
if let Ok(addr) = env::var(SERVER_ADDR_ENV_VAR) {
|
||||
@@ -297,7 +369,9 @@ impl AgentConfig {
|
||||
}
|
||||
|
||||
// We did not get a configuration file: allow all endpoints.
|
||||
config.endpoints.all_allowed = true;
|
||||
if !using_config_file {
|
||||
config.endpoints.all_allowed = true;
|
||||
}
|
||||
|
||||
Ok(config)
|
||||
}
|
||||
@@ -431,6 +505,35 @@ fn get_container_pipe_size(param: &str) -> Result<i32> {
|
||||
Ok(value)
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
fn get_container_policy_path_value(param: &str) -> Result<String> {
|
||||
let fields: Vec<&str> = param.split('=').collect();
|
||||
|
||||
ensure!(!fields[0].is_empty(), ERR_INVALID_CONTAINER_POLICY_PATH_KEY);
|
||||
ensure!(fields.len() == 2, ERR_INVALID_CONTAINER_POLICY_PATH_VALUE);
|
||||
|
||||
let key = fields[0];
|
||||
ensure!(
|
||||
key == CONTAINER_POLICY_FILE,
|
||||
ERR_INVALID_CONTAINER_POLICY_PATH_KEY
|
||||
);
|
||||
|
||||
let value = String::from(fields[1]);
|
||||
ensure!(!value.is_empty(), ERR_INVALID_CONTAINER_POLICY_PATH_VALUE);
|
||||
ensure!(
|
||||
value.starts_with('/'),
|
||||
ERR_INVALID_CONTAINER_POLICY_ABSOLUTE
|
||||
);
|
||||
ensure!(!value.contains(".."), ERR_INVALID_CONTAINER_POLICY_ABSOLUTE);
|
||||
Ok(value)
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
fn get_url_value(param: &str) -> Result<String> {
|
||||
let value = get_string_value(param)?;
|
||||
Ok(Url::parse(&value)?.to_string())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use test_utils::assert_result;
|
||||
@@ -449,6 +552,8 @@ mod tests {
|
||||
assert!(!config.dev_mode);
|
||||
assert_eq!(config.log_level, DEFAULT_LOG_LEVEL);
|
||||
assert_eq!(config.hotplug_timeout, DEFAULT_HOTPLUG_TIMEOUT);
|
||||
assert_eq!(config.container_policy_path, "");
|
||||
assert!(config.enable_signature_verification);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -467,6 +572,12 @@ mod tests {
|
||||
server_addr: &'a str,
|
||||
unified_cgroup_hierarchy: bool,
|
||||
tracing: bool,
|
||||
container_policy_path: &'a str,
|
||||
aa_kbc_params: &'a str,
|
||||
https_proxy: &'a str,
|
||||
no_proxy: &'a str,
|
||||
data_integrity: bool,
|
||||
enable_signature_verification: bool,
|
||||
}
|
||||
|
||||
impl Default for TestData<'_> {
|
||||
@@ -482,6 +593,12 @@ mod tests {
|
||||
server_addr: TEST_SERVER_ADDR,
|
||||
unified_cgroup_hierarchy: false,
|
||||
tracing: false,
|
||||
container_policy_path: "",
|
||||
aa_kbc_params: "",
|
||||
https_proxy: "",
|
||||
no_proxy: "",
|
||||
data_integrity: false,
|
||||
enable_signature_verification: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -851,6 +968,86 @@ mod tests {
|
||||
tracing: true,
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.container_policy_file=/etc/containers/policy.json",
|
||||
container_policy_path: "/etc/containers/policy.json",
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.aa_kbc_params=offline_fs_kbc::null",
|
||||
aa_kbc_params: "offline_fs_kbc::null",
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.aa_kbc_params=eaa_kbc::127.0.0.1:50000",
|
||||
aa_kbc_params: "eaa_kbc::127.0.0.1:50000",
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.https_proxy=http://proxy.url.com:81/",
|
||||
https_proxy: "http://proxy.url.com:81/",
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.https_proxy=http://192.168.1.100:81/",
|
||||
https_proxy: "http://192.168.1.100:81/",
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.no_proxy=*.internal.url.com",
|
||||
no_proxy: "*.internal.url.com",
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.no_proxy=192.168.1.0/24,172.16.0.0/12",
|
||||
no_proxy: "192.168.1.0/24,172.16.0.0/12",
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "",
|
||||
data_integrity: false,
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.data_integrity=true",
|
||||
data_integrity: true,
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.data_integrity=false",
|
||||
data_integrity: false,
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.data_integrity=1",
|
||||
data_integrity: true,
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.data_integrity=0",
|
||||
data_integrity: false,
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.enable_signature_verification=false",
|
||||
enable_signature_verification: false,
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.enable_signature_verification=0",
|
||||
enable_signature_verification: false,
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.enable_signature_verification=1",
|
||||
enable_signature_verification: true,
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.enable_signature_verification=foo",
|
||||
enable_signature_verification: false,
|
||||
..Default::default()
|
||||
},
|
||||
];
|
||||
|
||||
let dir = tempdir().expect("failed to create tmpdir");
|
||||
@@ -898,6 +1095,20 @@ mod tests {
|
||||
assert_eq!(d.container_pipe_size, config.container_pipe_size, "{}", msg);
|
||||
assert_eq!(d.server_addr, config.server_addr, "{}", msg);
|
||||
assert_eq!(d.tracing, config.tracing, "{}", msg);
|
||||
assert_eq!(
|
||||
d.container_policy_path, config.container_policy_path,
|
||||
"{}",
|
||||
msg
|
||||
);
|
||||
assert_eq!(d.aa_kbc_params, config.aa_kbc_params, "{}", msg);
|
||||
assert_eq!(d.https_proxy, config.https_proxy, "{}", msg);
|
||||
assert_eq!(d.no_proxy, config.no_proxy, "{}", msg);
|
||||
assert_eq!(d.data_integrity, config.data_integrity, "{}", msg);
|
||||
assert_eq!(
|
||||
d.enable_signature_verification, config.enable_signature_verification,
|
||||
"{}",
|
||||
msg
|
||||
);
|
||||
|
||||
for v in vars_to_unset {
|
||||
env::remove_var(v);
|
||||
@@ -1369,6 +1580,72 @@ Caused by:
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_container_policy_path_value() {
|
||||
#[derive(Debug)]
|
||||
struct TestData<'a> {
|
||||
param: &'a str,
|
||||
result: Result<String>,
|
||||
}
|
||||
|
||||
let tests = &[
|
||||
TestData {
|
||||
param: "",
|
||||
result: Err(anyhow!(ERR_INVALID_CONTAINER_POLICY_PATH_KEY)),
|
||||
},
|
||||
TestData {
|
||||
param: "agent.container_policy_file",
|
||||
result: Err(anyhow!(ERR_INVALID_CONTAINER_POLICY_PATH_VALUE)),
|
||||
},
|
||||
TestData {
|
||||
param: "agent.container_policy_file=",
|
||||
result: Err(anyhow!(ERR_INVALID_CONTAINER_POLICY_PATH_VALUE)),
|
||||
},
|
||||
TestData {
|
||||
param: "foo=bar",
|
||||
result: Err(anyhow!(ERR_INVALID_CONTAINER_POLICY_PATH_KEY)),
|
||||
},
|
||||
TestData {
|
||||
param: "agent.policy_path=/another/absolute/path.json",
|
||||
result: Err(anyhow!(ERR_INVALID_CONTAINER_POLICY_PATH_KEY)),
|
||||
},
|
||||
TestData {
|
||||
param: "agent.container_policy_file=/etc/container/policy.json",
|
||||
result: Ok("/etc/container/policy.json".into()),
|
||||
},
|
||||
TestData {
|
||||
param: "agent.container_policy_file=/another/absolute/path.json",
|
||||
result: Ok("/another/absolute/path.json".into()),
|
||||
},
|
||||
TestData {
|
||||
param: "agent.container_policy_file=./relative/path.json",
|
||||
result: Err(anyhow!(ERR_INVALID_CONTAINER_POLICY_ABSOLUTE)),
|
||||
},
|
||||
TestData {
|
||||
param: "agent.container_policy_file=./relative/path.json",
|
||||
result: Err(anyhow!(ERR_INVALID_CONTAINER_POLICY_ABSOLUTE)),
|
||||
},
|
||||
TestData {
|
||||
param: "agent.container_policy_file=../../relative/path.json",
|
||||
result: Err(anyhow!(ERR_INVALID_CONTAINER_POLICY_ABSOLUTE)),
|
||||
},
|
||||
TestData {
|
||||
param: "agent.container_policy_file=junk_string",
|
||||
result: Err(anyhow!(ERR_INVALID_CONTAINER_POLICY_ABSOLUTE)),
|
||||
},
|
||||
];
|
||||
|
||||
for (i, d) in tests.iter().enumerate() {
|
||||
let msg = format!("test[{}]: {:?}", i, d);
|
||||
|
||||
let result = get_container_policy_path_value(d.param);
|
||||
|
||||
let msg = format!("{}: result: {:?}", msg, result);
|
||||
|
||||
assert_result!(d.result, result, msg);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_config_builder_from_string() {
|
||||
let config = AgentConfig::from_str(
|
||||
@@ -1399,4 +1676,50 @@ Caused by:
|
||||
// Verify that the default values are valid
|
||||
assert_eq!(config.hotplug_timeout, DEFAULT_HOTPLUG_TIMEOUT);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_config_from_cmdline_and_config_file() {
|
||||
let dir = tempdir().expect("failed to create tmpdir");
|
||||
|
||||
let agent_config = r#"
|
||||
dev_mode = false
|
||||
server_addr = 'vsock://8:2048'
|
||||
|
||||
[endpoints]
|
||||
allowed = ["CreateContainer", "StartContainer"]
|
||||
"#;
|
||||
|
||||
let config_path = dir.path().join("agent-config.toml");
|
||||
let config_filename = config_path.to_str().expect("failed to get config filename");
|
||||
|
||||
fs::write(config_filename, agent_config).expect("failed to write agen config");
|
||||
|
||||
let cmdline = format!("agent.devmode agent.config_file={}", config_filename);
|
||||
|
||||
let cmdline_path = dir.path().join("cmdline");
|
||||
let cmdline_filename = cmdline_path
|
||||
.to_str()
|
||||
.expect("failed to get cmdline filename");
|
||||
|
||||
fs::write(cmdline_filename, cmdline).expect("failed to write agen config");
|
||||
|
||||
let config = AgentConfig::from_cmdline(cmdline_filename, vec![])
|
||||
.expect("failed to parse command line");
|
||||
|
||||
// Should be overwritten by cmdline
|
||||
assert!(config.dev_mode);
|
||||
|
||||
// Should be from agent config
|
||||
assert_eq!(config.server_addr, "vsock://8:2048");
|
||||
|
||||
// Should be from agent config
|
||||
assert_eq!(
|
||||
config.endpoints.allowed,
|
||||
vec!["CreateContainer".to_string(), "StartContainer".to_string()]
|
||||
.iter()
|
||||
.cloned()
|
||||
.collect()
|
||||
);
|
||||
assert!(!config.endpoints.all_allowed);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -414,7 +414,7 @@ fn scan_scsi_bus(scsi_addr: &str) -> Result<()> {
|
||||
|
||||
// Scan scsi host passing in the channel, SCSI id and LUN.
|
||||
// Channel is always 0 because we have only one SCSI controller.
|
||||
let scan_data = &format!("0 {} {}", tokens[0], tokens[1]);
|
||||
let scan_data = format!("0 {} {}", tokens[0], tokens[1]);
|
||||
|
||||
for entry in fs::read_dir(SYSFS_SCSI_HOST_PATH)? {
|
||||
let host = entry?.file_name();
|
||||
@@ -428,7 +428,7 @@ fn scan_scsi_bus(scsi_addr: &str) -> Result<()> {
|
||||
|
||||
let scan_path = PathBuf::from(&format!("{}/{}/{}", SYSFS_SCSI_HOST_PATH, host_str, "scan"));
|
||||
|
||||
fs::write(scan_path, scan_data)?;
|
||||
fs::write(scan_path, &scan_data)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -571,13 +571,15 @@ fn update_spec_devices(spec: &mut Spec, mut updates: HashMap<&str, DevUpdate>) -
|
||||
|
||||
if let Some(resources) = linux.resources.as_mut() {
|
||||
for r in &mut resources.devices {
|
||||
if let (Some(host_major), Some(host_minor)) = (r.major, r.minor) {
|
||||
if let Some(update) = res_updates.get(&(r.r#type.as_str(), host_major, host_minor))
|
||||
if let (Some(host_type), Some(host_major), Some(host_minor)) =
|
||||
(r.r#type.as_ref(), r.major, r.minor)
|
||||
{
|
||||
if let Some(update) = res_updates.get(&(host_type.as_str(), host_major, host_minor))
|
||||
{
|
||||
info!(
|
||||
sl!(),
|
||||
"update_spec_devices() updating resource";
|
||||
"type" => &r.r#type,
|
||||
"type" => &host_type,
|
||||
"host_major" => host_major,
|
||||
"host_minor" => host_minor,
|
||||
"guest_major" => update.guest_major,
|
||||
@@ -854,7 +856,7 @@ pub fn update_device_cgroup(spec: &mut Spec) -> Result<()> {
|
||||
allow: false,
|
||||
major: Some(major),
|
||||
minor: Some(minor),
|
||||
r#type: String::from("b"),
|
||||
r#type: Some(String::from("b")),
|
||||
access: String::from("rw"),
|
||||
});
|
||||
|
||||
@@ -1017,13 +1019,13 @@ mod tests {
|
||||
resources: Some(LinuxResources {
|
||||
devices: vec![
|
||||
oci::LinuxDeviceCgroup {
|
||||
r#type: "c".to_string(),
|
||||
r#type: Some("c".to_string()),
|
||||
major: Some(host_major_a),
|
||||
minor: Some(host_minor_a),
|
||||
..oci::LinuxDeviceCgroup::default()
|
||||
},
|
||||
oci::LinuxDeviceCgroup {
|
||||
r#type: "c".to_string(),
|
||||
r#type: Some("c".to_string()),
|
||||
major: Some(host_major_b),
|
||||
minor: Some(host_minor_b),
|
||||
..oci::LinuxDeviceCgroup::default()
|
||||
@@ -1116,13 +1118,13 @@ mod tests {
|
||||
resources: Some(LinuxResources {
|
||||
devices: vec![
|
||||
LinuxDeviceCgroup {
|
||||
r#type: "c".to_string(),
|
||||
r#type: Some("c".to_string()),
|
||||
major: Some(host_major),
|
||||
minor: Some(host_minor),
|
||||
..LinuxDeviceCgroup::default()
|
||||
},
|
||||
LinuxDeviceCgroup {
|
||||
r#type: "b".to_string(),
|
||||
r#type: Some("b".to_string()),
|
||||
major: Some(host_major),
|
||||
minor: Some(host_minor),
|
||||
..LinuxDeviceCgroup::default()
|
||||
@@ -1531,7 +1533,7 @@ mod tests {
|
||||
pci_driver_override(syspci, dev0, "drv_b").unwrap();
|
||||
assert_eq!(fs::read_to_string(&dev0override).unwrap(), "drv_b");
|
||||
assert_eq!(fs::read_to_string(&probepath).unwrap(), dev0.to_string());
|
||||
assert_eq!(fs::read_to_string(drvaunbind).unwrap(), dev0.to_string());
|
||||
assert_eq!(fs::read_to_string(&drvaunbind).unwrap(), dev0.to_string());
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -1543,7 +1545,7 @@ mod tests {
|
||||
let dev0 = pci::Address::new(0, 0, pci::SlotFn::new(0, 0).unwrap());
|
||||
let dev0path = syspci.join("devices").join(dev0.to_string());
|
||||
|
||||
fs::create_dir_all(dev0path).unwrap();
|
||||
fs::create_dir_all(&dev0path).unwrap();
|
||||
|
||||
// Test dev0
|
||||
assert!(pci_iommu_group(&syspci, dev0).unwrap().is_none());
|
||||
@@ -1554,7 +1556,7 @@ mod tests {
|
||||
let dev1group = dev1path.join("iommu_group");
|
||||
|
||||
fs::create_dir_all(&dev1path).unwrap();
|
||||
std::os::unix::fs::symlink("../../../kernel/iommu_groups/12", dev1group).unwrap();
|
||||
std::os::unix::fs::symlink("../../../kernel/iommu_groups/12", &dev1group).unwrap();
|
||||
|
||||
// Test dev1
|
||||
assert_eq!(
|
||||
@@ -1567,7 +1569,7 @@ mod tests {
|
||||
let dev2path = syspci.join("devices").join(dev2.to_string());
|
||||
let dev2group = dev2path.join("iommu_group");
|
||||
|
||||
fs::create_dir_all(dev2group).unwrap();
|
||||
fs::create_dir_all(&dev2group).unwrap();
|
||||
|
||||
// Test dev2
|
||||
assert!(pci_iommu_group(&syspci, dev2).is_err());
|
||||
|
||||
412
src/agent/src/image_rpc.rs
Normal file
412
src/agent/src/image_rpc.rs
Normal file
@@ -0,0 +1,412 @@
|
||||
// Copyright (c) 2021 Alibaba Cloud
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use std::env;
|
||||
use std::fmt::Write as _;
|
||||
use std::fs;
|
||||
use std::path::Path;
|
||||
use std::process::{Command, ExitStatus};
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::{anyhow, ensure, Result};
|
||||
use async_trait::async_trait;
|
||||
use protocols::image;
|
||||
use tokio::sync::Mutex;
|
||||
use ttrpc::{self, error::get_rpc_status as ttrpc_error};
|
||||
|
||||
use crate::rpc::{verify_cid, CONTAINER_BASE};
|
||||
use crate::sandbox::Sandbox;
|
||||
use crate::AGENT_CONFIG;
|
||||
|
||||
use image_rs::image::ImageClient;
|
||||
use std::io::Write;
|
||||
|
||||
const SKOPEO_PATH: &str = "/usr/bin/skopeo";
|
||||
const UMOCI_PATH: &str = "/usr/local/bin/umoci";
|
||||
const IMAGE_OCI: &str = "image_oci";
|
||||
const AA_PATH: &str = "/usr/local/bin/attestation-agent";
|
||||
const AA_KEYPROVIDER_PORT: &str = "127.0.0.1:50000";
|
||||
const AA_GETRESOURCE_PORT: &str = "127.0.0.1:50001";
|
||||
const OCICRYPT_CONFIG_PATH: &str = "/tmp/ocicrypt_config.json";
|
||||
// kata rootfs is readonly, use tmpfs before CC storage is implemented.
|
||||
const KATA_CC_IMAGE_WORK_DIR: &str = "/run/image/";
|
||||
const KATA_CC_PAUSE_BUNDLE: &str = "/pause_bundle";
|
||||
const CONFIG_JSON: &str = "config.json";
|
||||
|
||||
// Convenience macro to obtain the scope logger
|
||||
macro_rules! sl {
|
||||
() => {
|
||||
slog_scope::logger()
|
||||
};
|
||||
}
|
||||
|
||||
pub struct ImageService {
|
||||
sandbox: Arc<Mutex<Sandbox>>,
|
||||
attestation_agent_started: AtomicBool,
|
||||
image_client: Arc<Mutex<ImageClient>>,
|
||||
}
|
||||
|
||||
impl ImageService {
|
||||
pub fn new(sandbox: Arc<Mutex<Sandbox>>) -> Self {
|
||||
env::set_var("CC_IMAGE_WORK_DIR", KATA_CC_IMAGE_WORK_DIR);
|
||||
Self {
|
||||
sandbox,
|
||||
attestation_agent_started: AtomicBool::new(false),
|
||||
image_client: Arc::new(Mutex::new(ImageClient::default())),
|
||||
}
|
||||
}
|
||||
|
||||
fn pull_image_from_registry(
|
||||
image: &str,
|
||||
cid: &str,
|
||||
source_creds: Option<&str>,
|
||||
policy_path: Option<&str>,
|
||||
aa_kbc_params: &str,
|
||||
) -> Result<()> {
|
||||
let source_image = format!("{}{}", "docker://", image);
|
||||
|
||||
let tmp_cid_path = Path::new("/tmp/").join(cid);
|
||||
let oci_path = tmp_cid_path.join(IMAGE_OCI);
|
||||
let target_path_oci = format!("oci://{}:latest", oci_path.to_string_lossy());
|
||||
|
||||
fs::create_dir_all(&oci_path)?;
|
||||
|
||||
let mut pull_command = Command::new(SKOPEO_PATH);
|
||||
pull_command
|
||||
.arg("copy")
|
||||
.arg(source_image)
|
||||
.arg(&target_path_oci)
|
||||
.arg("--remove-signatures"); //umoci requires signatures to be removed
|
||||
|
||||
// If source credentials were passed (so not using an anonymous registry), pass them through
|
||||
if let Some(source_creds) = source_creds {
|
||||
pull_command.arg("--src-creds").arg(source_creds);
|
||||
}
|
||||
|
||||
// If a policy_path provided, use it, otherwise fall back to allow all image registries
|
||||
if let Some(policy_path) = policy_path {
|
||||
pull_command.arg("--policy").arg(policy_path);
|
||||
} else {
|
||||
info!(
|
||||
sl!(),
|
||||
"No policy path was supplied, so revert to allow all images to be pulled."
|
||||
);
|
||||
pull_command.arg("--insecure-policy");
|
||||
}
|
||||
|
||||
debug!(sl!(), "skopeo command: {:?}", &pull_command);
|
||||
if !aa_kbc_params.is_empty() {
|
||||
// Skopeo will copy an unencrypted image even if the decryption key argument is provided.
|
||||
// Thus, this does not guarantee that the image was encrypted.
|
||||
pull_command
|
||||
.arg("--decryption-key")
|
||||
.arg(format!("provider:attestation-agent:{}", aa_kbc_params))
|
||||
.env("OCICRYPT_KEYPROVIDER_CONFIG", OCICRYPT_CONFIG_PATH);
|
||||
}
|
||||
|
||||
let status: ExitStatus = pull_command.status()?;
|
||||
|
||||
if !status.success() {
|
||||
let mut error_message = format!("failed to pull image: {:?}", status);
|
||||
|
||||
if let Err(e) = fs::remove_dir_all(&tmp_cid_path) {
|
||||
let _ = write!(
|
||||
error_message,
|
||||
" and clean up of temporary container directory {:?} failed with error {:?}",
|
||||
tmp_cid_path, e
|
||||
);
|
||||
};
|
||||
return Err(anyhow!(error_message));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn unpack_image(cid: &str) -> Result<()> {
|
||||
let tmp_cid_path = Path::new("/tmp/").join(cid);
|
||||
let source_path_oci = tmp_cid_path.join(IMAGE_OCI);
|
||||
|
||||
let target_path_bundle = Path::new(CONTAINER_BASE).join(cid);
|
||||
|
||||
info!(sl!(), "unpack image {:?} to {:?}", cid, target_path_bundle);
|
||||
|
||||
// Unpack image
|
||||
let status: ExitStatus = Command::new(UMOCI_PATH)
|
||||
.arg("unpack")
|
||||
.arg("--image")
|
||||
.arg(&source_path_oci)
|
||||
.arg(&target_path_bundle)
|
||||
.status()?;
|
||||
|
||||
ensure!(status.success(), "failed to unpack image: {:?}", status);
|
||||
|
||||
// To save space delete the oci image after unpack
|
||||
fs::remove_dir_all(&tmp_cid_path)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// pause image is packaged in rootfs for CC
|
||||
fn unpack_pause_image(cid: &str) -> Result<()> {
|
||||
let cc_pause_bundle = Path::new(KATA_CC_PAUSE_BUNDLE);
|
||||
if !cc_pause_bundle.exists() {
|
||||
return Err(anyhow!("Pause image not present in rootfs"));
|
||||
}
|
||||
|
||||
info!(sl!(), "use guest pause image cid {:?}", cid);
|
||||
let pause_bundle = Path::new(CONTAINER_BASE).join(&cid);
|
||||
let pause_rootfs = pause_bundle.join("rootfs");
|
||||
let pause_config = pause_bundle.join(CONFIG_JSON);
|
||||
let pause_binary = pause_rootfs.join("pause");
|
||||
fs::create_dir_all(&pause_rootfs)?;
|
||||
if !pause_config.exists() {
|
||||
fs::copy(
|
||||
cc_pause_bundle.join(CONFIG_JSON),
|
||||
pause_bundle.join(CONFIG_JSON),
|
||||
)?;
|
||||
}
|
||||
if !pause_binary.exists() {
|
||||
fs::copy(cc_pause_bundle.join("rootfs").join("pause"), pause_binary)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// If we fail to start the AA, Skopeo/ocicrypt won't be able to unwrap keys
|
||||
// and container decryption will fail.
|
||||
fn init_attestation_agent() -> Result<()> {
|
||||
let config_path = OCICRYPT_CONFIG_PATH;
|
||||
|
||||
// The image will need to be encrypted using a keyprovider
|
||||
// that has the same name (at least according to the config).
|
||||
let ocicrypt_config = serde_json::json!({
|
||||
"key-providers": {
|
||||
"attestation-agent":{
|
||||
"grpc":AA_KEYPROVIDER_PORT
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
let mut config_file = fs::File::create(config_path)?;
|
||||
config_file.write_all(ocicrypt_config.to_string().as_bytes())?;
|
||||
|
||||
// The Attestation Agent will run for the duration of the guest.
|
||||
Command::new(AA_PATH)
|
||||
.arg("--keyprovider_sock")
|
||||
.arg(AA_KEYPROVIDER_PORT)
|
||||
.arg("--getresource_sock")
|
||||
.arg(AA_GETRESOURCE_PORT)
|
||||
.spawn()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Determines the container id (cid) to use for a given request.
|
||||
///
|
||||
/// If the request specifies a non-empty id, use it; otherwise derive it from the image path.
|
||||
/// In either case, verify that the chosen id is valid.
|
||||
fn cid_from_request(req: &image::PullImageRequest) -> Result<String> {
|
||||
let req_cid = req.get_container_id();
|
||||
let cid = if !req_cid.is_empty() {
|
||||
req_cid.to_string()
|
||||
} else if let Some(last) = req.get_image().rsplit('/').next() {
|
||||
// ':' have special meaning for umoci during upack
|
||||
last.replace(':', "_")
|
||||
} else {
|
||||
return Err(anyhow!("Invalid image name. {}", req.get_image()));
|
||||
};
|
||||
verify_cid(&cid)?;
|
||||
Ok(cid)
|
||||
}
|
||||
|
||||
async fn pull_image(&self, req: &image::PullImageRequest) -> Result<String> {
|
||||
env::set_var("OCICRYPT_KEYPROVIDER_CONFIG", OCICRYPT_CONFIG_PATH);
|
||||
|
||||
let https_proxy = &AGENT_CONFIG.read().await.https_proxy;
|
||||
if !https_proxy.is_empty() {
|
||||
env::set_var("HTTPS_PROXY", https_proxy);
|
||||
}
|
||||
|
||||
let no_proxy = &AGENT_CONFIG.read().await.no_proxy;
|
||||
if !no_proxy.is_empty() {
|
||||
env::set_var("NO_PROXY", no_proxy);
|
||||
}
|
||||
|
||||
let cid = Self::cid_from_request(req)?;
|
||||
let image = req.get_image();
|
||||
// Can switch to use cid directly when we remove umoci
|
||||
let v: Vec<&str> = image.rsplit('/').collect();
|
||||
if !v[0].is_empty() && v[0].starts_with("pause:") {
|
||||
Self::unpack_pause_image(&cid)?;
|
||||
|
||||
let mut sandbox = self.sandbox.lock().await;
|
||||
sandbox.images.insert(String::from(image), cid);
|
||||
return Ok(image.to_owned());
|
||||
}
|
||||
|
||||
let aa_kbc_params = &AGENT_CONFIG.read().await.aa_kbc_params;
|
||||
if !aa_kbc_params.is_empty() {
|
||||
match self.attestation_agent_started.compare_exchange_weak(
|
||||
false,
|
||||
true,
|
||||
Ordering::SeqCst,
|
||||
Ordering::SeqCst,
|
||||
) {
|
||||
Ok(_) => Self::init_attestation_agent()?,
|
||||
Err(_) => info!(sl!(), "Attestation Agent already running"),
|
||||
}
|
||||
}
|
||||
|
||||
let source_creds = (!req.get_source_creds().is_empty()).then(|| req.get_source_creds());
|
||||
|
||||
if Path::new(SKOPEO_PATH).exists() {
|
||||
// Read the policy path from the agent config
|
||||
let config_policy_path = &AGENT_CONFIG.read().await.container_policy_path;
|
||||
let policy_path = (!config_policy_path.is_empty()).then(|| config_policy_path.as_str());
|
||||
Self::pull_image_from_registry(image, &cid, source_creds, policy_path, aa_kbc_params)?;
|
||||
Self::unpack_image(&cid)?;
|
||||
} else {
|
||||
// Read enable signature verification from the agent config and set it in the image_client
|
||||
let enable_signature_verification =
|
||||
&AGENT_CONFIG.read().await.enable_signature_verification;
|
||||
info!(
|
||||
sl!(),
|
||||
"enable_signature_verification set to: {}", enable_signature_verification
|
||||
);
|
||||
self.image_client.lock().await.config.security_validate =
|
||||
*enable_signature_verification;
|
||||
|
||||
let bundle_path = Path::new(CONTAINER_BASE).join(&cid);
|
||||
fs::create_dir_all(&bundle_path)?;
|
||||
|
||||
let decrypt_config = format!("provider:attestation-agent:{}", aa_kbc_params);
|
||||
|
||||
info!(sl!(), "pull image {:?}, bundle path {:?}", cid, bundle_path);
|
||||
// Image layers will store at KATA_CC_IMAGE_WORK_DIR, generated bundles
|
||||
// with rootfs and config.json will store under CONTAINER_BASE/cid.
|
||||
self.image_client
|
||||
.lock()
|
||||
.await
|
||||
.pull_image(image, &bundle_path, &source_creds, &Some(&decrypt_config))
|
||||
.await?;
|
||||
}
|
||||
|
||||
let mut sandbox = self.sandbox.lock().await;
|
||||
sandbox.images.insert(String::from(image), cid);
|
||||
Ok(image.to_owned())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl protocols::image_ttrpc_async::Image for ImageService {
|
||||
async fn pull_image(
|
||||
&self,
|
||||
_ctx: &ttrpc::r#async::TtrpcContext,
|
||||
req: image::PullImageRequest,
|
||||
) -> ttrpc::Result<image::PullImageResponse> {
|
||||
match self.pull_image(&req).await {
|
||||
Ok(r) => {
|
||||
let mut resp = image::PullImageResponse::new();
|
||||
resp.image_ref = r;
|
||||
return Ok(resp);
|
||||
}
|
||||
Err(e) => {
|
||||
return Err(ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::ImageService;
|
||||
use protocols::image;
|
||||
|
||||
#[test]
|
||||
fn test_cid_from_request() {
|
||||
struct Case {
|
||||
cid: &'static str,
|
||||
image: &'static str,
|
||||
result: Option<&'static str>,
|
||||
}
|
||||
|
||||
let cases = [
|
||||
Case {
|
||||
cid: "",
|
||||
image: "",
|
||||
result: None,
|
||||
},
|
||||
Case {
|
||||
cid: "..",
|
||||
image: "",
|
||||
result: None,
|
||||
},
|
||||
Case {
|
||||
cid: "",
|
||||
image: "..",
|
||||
result: None,
|
||||
},
|
||||
Case {
|
||||
cid: "",
|
||||
image: "abc/..",
|
||||
result: None,
|
||||
},
|
||||
Case {
|
||||
cid: "",
|
||||
image: "abc/",
|
||||
result: None,
|
||||
},
|
||||
Case {
|
||||
cid: "",
|
||||
image: "../abc",
|
||||
result: Some("abc"),
|
||||
},
|
||||
Case {
|
||||
cid: "",
|
||||
image: "../9abc",
|
||||
result: Some("9abc"),
|
||||
},
|
||||
Case {
|
||||
cid: "some-string.1_2",
|
||||
image: "",
|
||||
result: Some("some-string.1_2"),
|
||||
},
|
||||
Case {
|
||||
cid: "0some-string.1_2",
|
||||
image: "",
|
||||
result: Some("0some-string.1_2"),
|
||||
},
|
||||
Case {
|
||||
cid: "a:b",
|
||||
image: "",
|
||||
result: None,
|
||||
},
|
||||
Case {
|
||||
cid: "",
|
||||
image: "prefix/a:b",
|
||||
result: Some("a_b"),
|
||||
},
|
||||
Case {
|
||||
cid: "",
|
||||
image: "/a/b/c/d:e",
|
||||
result: Some("d_e"),
|
||||
},
|
||||
];
|
||||
|
||||
for case in &cases {
|
||||
let mut req = image::PullImageRequest::new();
|
||||
req.set_image(case.image.to_string());
|
||||
req.set_container_id(case.cid.to_string());
|
||||
let ret = ImageService::cid_from_request(&req);
|
||||
match (case.result, ret) {
|
||||
(Some(expected), Ok(actual)) => assert_eq!(expected, actual),
|
||||
(None, Err(_)) => (),
|
||||
(None, Ok(r)) => panic!("Expected an error, got {}", r),
|
||||
(Some(expected), Err(e)) => {
|
||||
panic!("Expected {} but got an error ({})", expected, e)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -71,6 +71,7 @@ use tokio::{
|
||||
task::JoinHandle,
|
||||
};
|
||||
|
||||
mod image_rpc;
|
||||
mod rpc;
|
||||
mod tracer;
|
||||
|
||||
@@ -339,7 +340,7 @@ async fn start_sandbox(
|
||||
sandbox.lock().await.sender = Some(tx);
|
||||
|
||||
// vsock:///dev/vsock, port
|
||||
let mut server = rpc::start(sandbox.clone(), config.server_addr.as_str(), init_mode)?;
|
||||
let mut server = rpc::start(sandbox.clone(), config.server_addr.as_str())?;
|
||||
server.start().await?;
|
||||
|
||||
rx.await?;
|
||||
|
||||
@@ -5,11 +5,10 @@
|
||||
|
||||
extern crate procfs;
|
||||
|
||||
use prometheus::{Encoder, Gauge, GaugeVec, IntCounter, Opts, Registry, TextEncoder};
|
||||
use prometheus::{Encoder, Gauge, GaugeVec, IntCounter, TextEncoder};
|
||||
|
||||
use anyhow::{anyhow, Result};
|
||||
use anyhow::Result;
|
||||
use slog::warn;
|
||||
use std::sync::Mutex;
|
||||
use tracing::instrument;
|
||||
|
||||
const NAMESPACE_KATA_AGENT: &str = "kata_agent";
|
||||
@@ -24,70 +23,55 @@ macro_rules! sl {
|
||||
|
||||
lazy_static! {
|
||||
|
||||
static ref REGISTERED: Mutex<bool> = Mutex::new(false);
|
||||
static ref AGENT_SCRAPE_COUNT: IntCounter =
|
||||
prometheus::register_int_counter!(format!("{}_{}",NAMESPACE_KATA_AGENT,"scrape_count"), "Metrics scrape count").unwrap();
|
||||
|
||||
// custom registry
|
||||
static ref REGISTRY: Registry = Registry::new();
|
||||
static ref AGENT_THREADS: Gauge =
|
||||
prometheus::register_gauge!(format!("{}_{}",NAMESPACE_KATA_AGENT,"threads"), "Agent process threads").unwrap();
|
||||
|
||||
static ref AGENT_SCRAPE_COUNT: IntCounter =
|
||||
IntCounter::new(format!("{}_{}",NAMESPACE_KATA_AGENT,"scrape_count"), "Metrics scrape count").unwrap();
|
||||
static ref AGENT_TOTAL_TIME: Gauge =
|
||||
prometheus::register_gauge!(format!("{}_{}",NAMESPACE_KATA_AGENT,"total_time"), "Agent process total time").unwrap();
|
||||
|
||||
// agent metrics
|
||||
static ref AGENT_THREADS: Gauge =
|
||||
Gauge::new(format!("{}_{}",NAMESPACE_KATA_AGENT,"threads"), "Agent process threads").unwrap();
|
||||
static ref AGENT_TOTAL_VM: Gauge =
|
||||
prometheus::register_gauge!(format!("{}_{}",NAMESPACE_KATA_AGENT,"total_vm"), "Agent process total VM size").unwrap();
|
||||
|
||||
static ref AGENT_TOTAL_TIME: Gauge =
|
||||
Gauge::new(format!("{}_{}",NAMESPACE_KATA_AGENT,"total_time"), "Agent process total time").unwrap();
|
||||
static ref AGENT_TOTAL_RSS: Gauge =
|
||||
prometheus::register_gauge!(format!("{}_{}",NAMESPACE_KATA_AGENT,"total_rss"), "Agent process total RSS size").unwrap();
|
||||
|
||||
static ref AGENT_TOTAL_VM: Gauge =
|
||||
Gauge::new(format!("{}_{}",NAMESPACE_KATA_AGENT,"total_vm"), "Agent process total VM size").unwrap() ;
|
||||
static ref AGENT_PROC_STATUS: GaugeVec =
|
||||
prometheus::register_gauge_vec!(format!("{}_{}",NAMESPACE_KATA_AGENT,"proc_status"), "Agent process status.", &["item"]).unwrap();
|
||||
|
||||
static ref AGENT_TOTAL_RSS: Gauge =
|
||||
Gauge::new(format!("{}_{}",NAMESPACE_KATA_AGENT,"total_rss"), "Agent process total RSS size").unwrap();
|
||||
static ref AGENT_IO_STAT: GaugeVec =
|
||||
prometheus::register_gauge_vec!(format!("{}_{}",NAMESPACE_KATA_AGENT,"io_stat"), "Agent process IO statistics.", &["item"]).unwrap();
|
||||
|
||||
static ref AGENT_PROC_STATUS: GaugeVec =
|
||||
GaugeVec::new(Opts::new(format!("{}_{}",NAMESPACE_KATA_AGENT,"proc_status"), "Agent process status."), &["item"]).unwrap();
|
||||
|
||||
static ref AGENT_IO_STAT: GaugeVec =
|
||||
GaugeVec::new(Opts::new(format!("{}_{}",NAMESPACE_KATA_AGENT,"io_stat"), "Agent process IO statistics."), &["item"]).unwrap();
|
||||
|
||||
static ref AGENT_PROC_STAT: GaugeVec =
|
||||
GaugeVec::new(Opts::new(format!("{}_{}",NAMESPACE_KATA_AGENT,"proc_stat"), "Agent process statistics."), &["item"]).unwrap();
|
||||
static ref AGENT_PROC_STAT: GaugeVec =
|
||||
prometheus::register_gauge_vec!(format!("{}_{}",NAMESPACE_KATA_AGENT,"proc_stat"), "Agent process statistics.", &["item"]).unwrap();
|
||||
|
||||
// guest os metrics
|
||||
static ref GUEST_LOAD: GaugeVec =
|
||||
GaugeVec::new(Opts::new(format!("{}_{}",NAMESPACE_KATA_GUEST,"load"), "Guest system load."), &["item"]).unwrap();
|
||||
static ref GUEST_LOAD: GaugeVec =
|
||||
prometheus::register_gauge_vec!(format!("{}_{}",NAMESPACE_KATA_GUEST,"load") , "Guest system load.", &["item"]).unwrap();
|
||||
|
||||
static ref GUEST_TASKS: GaugeVec =
|
||||
GaugeVec::new(Opts::new(format!("{}_{}",NAMESPACE_KATA_GUEST,"tasks"), "Guest system load."), &["item"]).unwrap();
|
||||
static ref GUEST_TASKS: GaugeVec =
|
||||
prometheus::register_gauge_vec!(format!("{}_{}",NAMESPACE_KATA_GUEST,"tasks") , "Guest system load.", &["item"]).unwrap();
|
||||
|
||||
static ref GUEST_CPU_TIME: GaugeVec =
|
||||
GaugeVec::new(Opts::new(format!("{}_{}",NAMESPACE_KATA_GUEST,"cpu_time"), "Guest CPU statistics."), &["cpu","item"]).unwrap();
|
||||
static ref GUEST_CPU_TIME: GaugeVec =
|
||||
prometheus::register_gauge_vec!(format!("{}_{}",NAMESPACE_KATA_GUEST,"cpu_time") , "Guest CPU statistics.", &["cpu","item"]).unwrap();
|
||||
|
||||
static ref GUEST_VM_STAT: GaugeVec =
|
||||
GaugeVec::new(Opts::new(format!("{}_{}",NAMESPACE_KATA_GUEST,"vm_stat"), "Guest virtual memory statistics."), &["item"]).unwrap();
|
||||
static ref GUEST_VM_STAT: GaugeVec =
|
||||
prometheus::register_gauge_vec!(format!("{}_{}",NAMESPACE_KATA_GUEST,"vm_stat") , "Guest virtual memory statistics.", &["item"]).unwrap();
|
||||
|
||||
static ref GUEST_NETDEV_STAT: GaugeVec =
|
||||
GaugeVec::new(Opts::new(format!("{}_{}",NAMESPACE_KATA_GUEST,"netdev_stat"), "Guest net devices statistics."), &["interface","item"]).unwrap();
|
||||
static ref GUEST_NETDEV_STAT: GaugeVec =
|
||||
prometheus::register_gauge_vec!(format!("{}_{}",NAMESPACE_KATA_GUEST,"netdev_stat") , "Guest net devices statistics.", &["interface","item"]).unwrap();
|
||||
|
||||
static ref GUEST_DISKSTAT: GaugeVec =
|
||||
GaugeVec::new(Opts::new(format!("{}_{}",NAMESPACE_KATA_GUEST,"diskstat"), "Disks statistics in system."), &["disk","item"]).unwrap();
|
||||
static ref GUEST_DISKSTAT: GaugeVec =
|
||||
prometheus::register_gauge_vec!(format!("{}_{}",NAMESPACE_KATA_GUEST,"diskstat") , "Disks statistics in system.", &["disk","item"]).unwrap();
|
||||
|
||||
static ref GUEST_MEMINFO: GaugeVec =
|
||||
GaugeVec::new(Opts::new(format!("{}_{}",NAMESPACE_KATA_GUEST,"meminfo"), "Statistics about memory usage in the system."), &["item"]).unwrap();
|
||||
static ref GUEST_MEMINFO: GaugeVec =
|
||||
prometheus::register_gauge_vec!(format!("{}_{}",NAMESPACE_KATA_GUEST,"meminfo") , "Statistics about memory usage in the system.", &["item"]).unwrap();
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
pub fn get_metrics(_: &protocols::agent::GetMetricsRequest) -> Result<String> {
|
||||
let mut registered = REGISTERED
|
||||
.lock()
|
||||
.map_err(|e| anyhow!("failed to check agent metrics register status {:?}", e))?;
|
||||
|
||||
if !(*registered) {
|
||||
register_metrics()?;
|
||||
*registered = true;
|
||||
}
|
||||
|
||||
AGENT_SCRAPE_COUNT.inc();
|
||||
|
||||
// update agent process metrics
|
||||
@@ -97,7 +81,7 @@ pub fn get_metrics(_: &protocols::agent::GetMetricsRequest) -> Result<String> {
|
||||
update_guest_metrics();
|
||||
|
||||
// gather all metrics and return as a String
|
||||
let metric_families = REGISTRY.gather();
|
||||
let metric_families = prometheus::gather();
|
||||
|
||||
let mut buffer = Vec::new();
|
||||
let encoder = TextEncoder::new();
|
||||
@@ -106,31 +90,6 @@ pub fn get_metrics(_: &protocols::agent::GetMetricsRequest) -> Result<String> {
|
||||
Ok(String::from_utf8(buffer)?)
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
fn register_metrics() -> Result<()> {
|
||||
REGISTRY.register(Box::new(AGENT_SCRAPE_COUNT.clone()))?;
|
||||
|
||||
// agent metrics
|
||||
REGISTRY.register(Box::new(AGENT_THREADS.clone()))?;
|
||||
REGISTRY.register(Box::new(AGENT_TOTAL_TIME.clone()))?;
|
||||
REGISTRY.register(Box::new(AGENT_TOTAL_VM.clone()))?;
|
||||
REGISTRY.register(Box::new(AGENT_TOTAL_RSS.clone()))?;
|
||||
REGISTRY.register(Box::new(AGENT_PROC_STATUS.clone()))?;
|
||||
REGISTRY.register(Box::new(AGENT_IO_STAT.clone()))?;
|
||||
REGISTRY.register(Box::new(AGENT_PROC_STAT.clone()))?;
|
||||
|
||||
// guest metrics
|
||||
REGISTRY.register(Box::new(GUEST_LOAD.clone()))?;
|
||||
REGISTRY.register(Box::new(GUEST_TASKS.clone()))?;
|
||||
REGISTRY.register(Box::new(GUEST_CPU_TIME.clone()))?;
|
||||
REGISTRY.register(Box::new(GUEST_VM_STAT.clone()))?;
|
||||
REGISTRY.register(Box::new(GUEST_NETDEV_STAT.clone()))?;
|
||||
REGISTRY.register(Box::new(GUEST_DISKSTAT.clone()))?;
|
||||
REGISTRY.register(Box::new(GUEST_MEMINFO.clone()))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
fn update_agent_metrics() -> Result<()> {
|
||||
let me = procfs::process::Process::myself();
|
||||
|
||||
@@ -648,7 +648,7 @@ pub fn recursive_ownership_change(
|
||||
) -> Result<()> {
|
||||
let mut mask = if read_only { RO_MASK } else { RW_MASK };
|
||||
if path.is_dir() {
|
||||
for entry in fs::read_dir(path)? {
|
||||
for entry in fs::read_dir(&path)? {
|
||||
recursive_ownership_change(entry?.path().as_path(), uid, gid, read_only)?;
|
||||
}
|
||||
mask |= EXEC_MASK;
|
||||
@@ -894,7 +894,7 @@ pub fn get_cgroup_mounts(
|
||||
}]);
|
||||
}
|
||||
|
||||
let file = File::open(cg_path)?;
|
||||
let file = File::open(&cg_path)?;
|
||||
let reader = BufReader::new(file);
|
||||
|
||||
let mut has_device_cgroup = false;
|
||||
@@ -1777,7 +1777,7 @@ mod tests {
|
||||
let tempdir = tempdir().unwrap();
|
||||
|
||||
let src = if d.mask_src {
|
||||
tempdir.path().join(d.src)
|
||||
tempdir.path().join(&d.src)
|
||||
} else {
|
||||
Path::new(d.src).to_path_buf()
|
||||
};
|
||||
|
||||
@@ -78,7 +78,6 @@ impl Namespace {
|
||||
// setup creates persistent namespace without switching to it.
|
||||
// Note, pid namespaces cannot be persisted.
|
||||
#[instrument]
|
||||
#[allow(clippy::question_mark)]
|
||||
pub async fn setup(mut self) -> Result<Self> {
|
||||
fs::create_dir_all(&self.persistent_ns_dir)?;
|
||||
|
||||
@@ -89,7 +88,7 @@ impl Namespace {
|
||||
}
|
||||
let logger = self.logger.clone();
|
||||
|
||||
let new_ns_path = ns_path.join(ns_type.get());
|
||||
let new_ns_path = ns_path.join(&ns_type.get());
|
||||
|
||||
File::create(new_ns_path.as_path())?;
|
||||
|
||||
@@ -103,7 +102,7 @@ impl Namespace {
|
||||
let source = Path::new(&origin_ns_path);
|
||||
let destination = new_ns_path.as_path();
|
||||
|
||||
File::open(source)?;
|
||||
File::open(&source)?;
|
||||
|
||||
// Create a new netns on the current thread.
|
||||
let cf = ns_type.get_flags();
|
||||
|
||||
@@ -946,13 +946,13 @@ mod tests {
|
||||
fn clean_env_for_test_add_one_arp_neighbor(dummy_name: &str, ip: &str) {
|
||||
// ip link delete dummy
|
||||
Command::new("ip")
|
||||
.args(["link", "delete", dummy_name])
|
||||
.args(&["link", "delete", dummy_name])
|
||||
.output()
|
||||
.expect("prepare: failed to delete dummy");
|
||||
|
||||
// ip neigh del dev dummy ip
|
||||
Command::new("ip")
|
||||
.args(["neigh", "del", dummy_name, ip])
|
||||
.args(&["neigh", "del", dummy_name, ip])
|
||||
.output()
|
||||
.expect("prepare: failed to delete neigh");
|
||||
}
|
||||
@@ -967,19 +967,19 @@ mod tests {
|
||||
|
||||
// ip link add dummy type dummy
|
||||
Command::new("ip")
|
||||
.args(["link", "add", dummy_name, "type", "dummy"])
|
||||
.args(&["link", "add", dummy_name, "type", "dummy"])
|
||||
.output()
|
||||
.expect("failed to add dummy interface");
|
||||
|
||||
// ip addr add 192.168.0.2/16 dev dummy
|
||||
Command::new("ip")
|
||||
.args(["addr", "add", "192.168.0.2/16", "dev", dummy_name])
|
||||
.args(&["addr", "add", "192.168.0.2/16", "dev", dummy_name])
|
||||
.output()
|
||||
.expect("failed to add ip for dummy");
|
||||
|
||||
// ip link set dummy up;
|
||||
Command::new("ip")
|
||||
.args(["link", "set", dummy_name, "up"])
|
||||
.args(&["link", "set", dummy_name, "up"])
|
||||
.output()
|
||||
.expect("failed to up dummy");
|
||||
}
|
||||
@@ -1011,7 +1011,7 @@ mod tests {
|
||||
|
||||
// ip neigh show dev dummy ip
|
||||
let stdout = Command::new("ip")
|
||||
.args(["neigh", "show", "dev", dummy_name, to_ip])
|
||||
.args(&["neigh", "show", "dev", dummy_name, to_ip])
|
||||
.output()
|
||||
.expect("failed to show neigh")
|
||||
.stdout;
|
||||
|
||||
@@ -64,7 +64,7 @@ fn do_setup_guest_dns(logger: Logger, dns_list: Vec<String>, src: &str, dst: &st
|
||||
.map(|x| x.trim())
|
||||
.collect::<Vec<&str>>()
|
||||
.join("\n");
|
||||
fs::write(src, content)?;
|
||||
fs::write(src, &content)?;
|
||||
|
||||
// bind mount to /etc/resolv.conf
|
||||
mount::mount(Some(src), dst, Some("bind"), MsFlags::MS_BIND, None::<&str>)
|
||||
|
||||
@@ -34,9 +34,12 @@ use protocols::health::{
|
||||
HealthCheckResponse, HealthCheckResponse_ServingStatus, VersionCheckResponse,
|
||||
};
|
||||
use protocols::types::Interface;
|
||||
use protocols::{agent_ttrpc_async as agent_ttrpc, health_ttrpc_async as health_ttrpc};
|
||||
use protocols::{
|
||||
agent_ttrpc_async as agent_ttrpc, health_ttrpc_async as health_ttrpc,
|
||||
image_ttrpc_async as image_ttrpc,
|
||||
};
|
||||
use rustjail::cgroups::notifier;
|
||||
use rustjail::container::{BaseContainer, Container, LinuxContainer, SYSTEMD_CGROUP_PATH_FORMAT};
|
||||
use rustjail::container::{BaseContainer, Container, LinuxContainer};
|
||||
use rustjail::process::Process;
|
||||
use rustjail::specconv::CreateOpts;
|
||||
|
||||
@@ -44,11 +47,13 @@ use nix::errno::Errno;
|
||||
use nix::mount::MsFlags;
|
||||
use nix::sys::{stat, statfs};
|
||||
use nix::unistd::{self, Pid};
|
||||
use rustjail::cgroups::Manager;
|
||||
use rustjail::process::ProcessOperations;
|
||||
|
||||
use crate::device::{
|
||||
add_devices, get_virtio_blk_pci_device_name, update_device_cgroup, update_env_pci,
|
||||
};
|
||||
use crate::image_rpc;
|
||||
use crate::linux_abi::*;
|
||||
use crate::metrics::get_metrics;
|
||||
use crate::mount::{add_storages, baremount, STORAGE_HANDLER_LIST};
|
||||
@@ -80,18 +85,16 @@ use std::io::{BufRead, BufReader, Write};
|
||||
use std::os::unix::fs::FileExt;
|
||||
use std::path::PathBuf;
|
||||
|
||||
const CONTAINER_BASE: &str = "/run/kata-containers";
|
||||
pub const CONTAINER_BASE: &str = "/run/kata-containers";
|
||||
const MODPROBE_PATH: &str = "/sbin/modprobe";
|
||||
const ANNO_K8S_IMAGE_NAME: &str = "io.kubernetes.cri.image-name";
|
||||
const CONFIG_JSON: &str = "config.json";
|
||||
const INIT_TRUSTED_STORAGE: &str = "/usr/bin/kata-init-trusted-storage";
|
||||
const TRUSTED_STORAGE_DEVICE: &str = "/dev/trusted_store";
|
||||
|
||||
/// the iptables seriers binaries could appear either in /sbin
|
||||
/// or /usr/sbin, we need to check both of them
|
||||
const USR_IPTABLES_SAVE: &str = "/usr/sbin/iptables-save";
|
||||
const IPTABLES_SAVE: &str = "/sbin/iptables-save";
|
||||
const USR_IPTABLES_RESTORE: &str = "/usr/sbin/iptables-store";
|
||||
const IPTABLES_RESTORE: &str = "/sbin/iptables-restore";
|
||||
const USR_IP6TABLES_SAVE: &str = "/usr/sbin/ip6tables-save";
|
||||
const IP6TABLES_SAVE: &str = "/sbin/ip6tables-save";
|
||||
const USR_IP6TABLES_RESTORE: &str = "/usr/sbin/ip6tables-save";
|
||||
const IP6TABLES_RESTORE: &str = "/sbin/ip6tables-restore";
|
||||
|
||||
const ERR_CANNOT_GET_WRITER: &str = "Cannot get writer";
|
||||
@@ -137,7 +140,43 @@ macro_rules! is_allowed {
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct AgentService {
|
||||
sandbox: Arc<Mutex<Sandbox>>,
|
||||
init_mode: bool,
|
||||
}
|
||||
|
||||
// A container ID must match this regex:
|
||||
//
|
||||
// ^[a-zA-Z0-9][a-zA-Z0-9_.-]+$
|
||||
//
|
||||
pub fn verify_cid(id: &str) -> Result<()> {
|
||||
let mut chars = id.chars();
|
||||
|
||||
let valid = match chars.next() {
|
||||
Some(first)
|
||||
if first.is_alphanumeric()
|
||||
&& id.len() > 1
|
||||
&& chars.all(|c| c.is_alphanumeric() || ['.', '-', '_'].contains(&c)) =>
|
||||
{
|
||||
true
|
||||
}
|
||||
_ => false,
|
||||
};
|
||||
|
||||
match valid {
|
||||
true => Ok(()),
|
||||
false => Err(anyhow!("invalid container ID: {:?}", id)),
|
||||
}
|
||||
}
|
||||
|
||||
// Partially merge an OCI process specification into another one.
|
||||
fn merge_oci_process(target: &mut oci::Process, source: &oci::Process) {
|
||||
if target.args.is_empty() && !source.args.is_empty() {
|
||||
target.args.append(&mut source.args.clone());
|
||||
}
|
||||
|
||||
if target.cwd.is_empty() && !source.cwd.is_empty() {
|
||||
target.cwd = String::from(&source.cwd);
|
||||
}
|
||||
|
||||
target.env.append(&mut source.env.clone());
|
||||
}
|
||||
|
||||
impl AgentService {
|
||||
@@ -170,6 +209,9 @@ impl AgentService {
|
||||
"receive createcontainer, storages: {:?}", &req.storages
|
||||
);
|
||||
|
||||
// Merge the image bundle OCI spec into the container creation request OCI spec.
|
||||
self.merge_bundle_oci(&mut oci).await?;
|
||||
|
||||
// Some devices need some extra processing (the ones invoked with
|
||||
// --device for instance), and that's what this call is doing. It
|
||||
// updates the devices listed in the OCI spec, so that they actually
|
||||
@@ -177,6 +219,30 @@ impl AgentService {
|
||||
// cannot predict everything from the caller.
|
||||
add_devices(&req.devices.to_vec(), &mut oci, &self.sandbox).await?;
|
||||
|
||||
let linux = oci
|
||||
.linux
|
||||
.as_mut()
|
||||
.ok_or_else(|| anyhow!("Spec didn't contain linux field"))?;
|
||||
|
||||
for specdev in &mut linux.devices {
|
||||
let dev_major_minor = format!("{}:{}", specdev.major, specdev.minor);
|
||||
|
||||
if specdev.path == TRUSTED_STORAGE_DEVICE {
|
||||
let data_integrity = AGENT_CONFIG.read().await.data_integrity;
|
||||
info!(
|
||||
sl!(),
|
||||
"trusted_store device major:min {}, enable data integrity {}",
|
||||
dev_major_minor,
|
||||
data_integrity.to_string()
|
||||
);
|
||||
|
||||
Command::new(INIT_TRUSTED_STORAGE)
|
||||
.args(&[&dev_major_minor, &data_integrity.to_string()])
|
||||
.output()
|
||||
.expect("Failed to initialize confidential storage");
|
||||
}
|
||||
}
|
||||
|
||||
// Both rootfs and volumes (invoked with --volume for instance) will
|
||||
// be processed the same way. The idea is to always mount any provided
|
||||
// storage to the specified MountPoint, so that it will match what's
|
||||
@@ -211,20 +277,9 @@ impl AgentService {
|
||||
// restore the cwd for kata-agent process.
|
||||
defer!(unistd::chdir(&olddir).unwrap());
|
||||
|
||||
// determine which cgroup driver to take and then assign to use_systemd_cgroup
|
||||
// systemd: "[slice]:[prefix]:[name]"
|
||||
// fs: "/path_a/path_b"
|
||||
// If agent is init we can't use systemd cgroup mode, no matter what the host tells us
|
||||
let cgroups_path = oci.linux.as_ref().map_or("", |linux| &linux.cgroups_path);
|
||||
let use_systemd_cgroup = if self.init_mode {
|
||||
false
|
||||
} else {
|
||||
SYSTEMD_CGROUP_PATH_FORMAT.is_match(cgroups_path)
|
||||
};
|
||||
|
||||
let opts = CreateOpts {
|
||||
cgroup_name: "".to_string(),
|
||||
use_systemd_cgroup,
|
||||
use_systemd_cgroup: false,
|
||||
no_pivot_root: s.no_pivot_root,
|
||||
no_new_keyring: false,
|
||||
spec: Some(oci.clone()),
|
||||
@@ -283,13 +338,14 @@ impl AgentService {
|
||||
}
|
||||
|
||||
// start oom event loop
|
||||
if let Some(ref ctr) = ctr.cgroup_manager {
|
||||
let cg_path = ctr.get_cg_path("memory");
|
||||
|
||||
let cg_path = ctr.cgroup_manager.as_ref().get_cgroup_path("memory");
|
||||
if let Some(cg_path) = cg_path {
|
||||
let rx = notifier::notify_oom(cid.as_str(), cg_path.to_string()).await?;
|
||||
|
||||
if let Ok(cg_path) = cg_path {
|
||||
let rx = notifier::notify_oom(cid.as_str(), cg_path.to_string()).await?;
|
||||
|
||||
s.run_oom_event_monitor(rx, cid.clone()).await;
|
||||
s.run_oom_event_monitor(rx, cid.clone()).await;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -393,7 +449,6 @@ impl AgentService {
|
||||
"signal process";
|
||||
"container-id" => cid.clone(),
|
||||
"exec-id" => eid.clone(),
|
||||
"signal" => req.signal,
|
||||
);
|
||||
|
||||
let mut sig: libc::c_int = req.signal as libc::c_int;
|
||||
@@ -476,7 +531,11 @@ impl AgentService {
|
||||
let ctr = sandbox
|
||||
.get_container(cid)
|
||||
.ok_or_else(|| anyhow!("Invalid container id {}", cid))?;
|
||||
ctr.cgroup_manager.as_ref().freeze(state)?;
|
||||
let cm = ctr
|
||||
.cgroup_manager
|
||||
.as_ref()
|
||||
.ok_or_else(|| anyhow!("cgroup manager not exist"))?;
|
||||
cm.freeze(state)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -486,7 +545,11 @@ impl AgentService {
|
||||
let ctr = sandbox
|
||||
.get_container(cid)
|
||||
.ok_or_else(|| anyhow!("Invalid container id {}", cid))?;
|
||||
let pids = ctr.cgroup_manager.as_ref().get_pids()?;
|
||||
let cm = ctr
|
||||
.cgroup_manager
|
||||
.as_ref()
|
||||
.ok_or_else(|| anyhow!("cgroup manager not exist"))?;
|
||||
let pids = cm.get_pids()?;
|
||||
Ok(pids)
|
||||
}
|
||||
|
||||
@@ -638,6 +701,54 @@ impl AgentService {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// When being passed an image name through a container annotation, merge its
|
||||
// corresponding bundle OCI specification into the passed container creation one.
|
||||
async fn merge_bundle_oci(&self, container_oci: &mut oci::Spec) -> Result<()> {
|
||||
if let Some(image_name) = container_oci
|
||||
.annotations
|
||||
.get(&ANNO_K8S_IMAGE_NAME.to_string())
|
||||
{
|
||||
if let Some(container_id) = self.sandbox.clone().lock().await.images.get(image_name) {
|
||||
let image_oci_config_path = Path::new(CONTAINER_BASE)
|
||||
.join(container_id)
|
||||
.join(CONFIG_JSON);
|
||||
debug!(
|
||||
sl!(),
|
||||
"Image bundle config path: {:?}", image_oci_config_path
|
||||
);
|
||||
|
||||
let image_oci =
|
||||
oci::Spec::load(image_oci_config_path.to_str().ok_or_else(|| {
|
||||
anyhow!(
|
||||
"Invalid container image OCI config path {:?}",
|
||||
image_oci_config_path
|
||||
)
|
||||
})?)
|
||||
.context("load image bundle")?;
|
||||
|
||||
if let Some(container_root) = container_oci.root.as_mut() {
|
||||
if let Some(image_root) = image_oci.root.as_ref() {
|
||||
let root_path = Path::new(CONTAINER_BASE)
|
||||
.join(container_id)
|
||||
.join(image_root.path.clone());
|
||||
container_root.path =
|
||||
String::from(root_path.to_str().ok_or_else(|| {
|
||||
anyhow!("Invalid container image root path {:?}", root_path)
|
||||
})?);
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(container_process) = container_oci.process.as_mut() {
|
||||
if let Some(image_process) = image_oci.process.as_ref() {
|
||||
merge_oci_process(container_process, image_process);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
@@ -1007,18 +1118,8 @@ impl agent_ttrpc::AgentService for AgentService {
|
||||
|
||||
info!(sl!(), "get_ip_tables: request received");
|
||||
|
||||
// the binary could exists in either /usr/sbin or /sbin
|
||||
// here check both of the places and return the one exists
|
||||
// if none exists, return the /sbin one, and the rpc will
|
||||
// returns an internal error
|
||||
let cmd = if req.is_ipv6 {
|
||||
if Path::new(USR_IP6TABLES_SAVE).exists() {
|
||||
USR_IP6TABLES_SAVE
|
||||
} else {
|
||||
IP6TABLES_SAVE
|
||||
}
|
||||
} else if Path::new(USR_IPTABLES_SAVE).exists() {
|
||||
USR_IPTABLES_SAVE
|
||||
IP6TABLES_SAVE
|
||||
} else {
|
||||
IPTABLES_SAVE
|
||||
}
|
||||
@@ -1046,18 +1147,8 @@ impl agent_ttrpc::AgentService for AgentService {
|
||||
|
||||
info!(sl!(), "set_ip_tables request received");
|
||||
|
||||
// the binary could exists in both /usr/sbin and /sbin
|
||||
// here check both of the places and return the one exists
|
||||
// if none exists, return the /sbin one, and the rpc will
|
||||
// returns an internal error
|
||||
let cmd = if req.is_ipv6 {
|
||||
if Path::new(USR_IP6TABLES_RESTORE).exists() {
|
||||
USR_IP6TABLES_RESTORE
|
||||
} else {
|
||||
IP6TABLES_RESTORE
|
||||
}
|
||||
} else if Path::new(USR_IPTABLES_RESTORE).exists() {
|
||||
USR_IPTABLES_RESTORE
|
||||
IP6TABLES_RESTORE
|
||||
} else {
|
||||
IPTABLES_RESTORE
|
||||
}
|
||||
@@ -1685,25 +1776,28 @@ async fn read_stream(reader: Arc<Mutex<ReadHalf<PipeStream>>>, l: usize) -> Resu
|
||||
Ok(content)
|
||||
}
|
||||
|
||||
pub fn start(s: Arc<Mutex<Sandbox>>, server_address: &str, init_mode: bool) -> Result<TtrpcServer> {
|
||||
let agent_service = Box::new(AgentService {
|
||||
sandbox: s,
|
||||
init_mode,
|
||||
}) as Box<dyn agent_ttrpc::AgentService + Send + Sync>;
|
||||
|
||||
pub fn start(s: Arc<Mutex<Sandbox>>, server_address: &str) -> Result<TtrpcServer> {
|
||||
let agent_service = Box::new(AgentService { sandbox: s.clone() })
|
||||
as Box<dyn agent_ttrpc::AgentService + Send + Sync>;
|
||||
let agent_worker = Arc::new(agent_service);
|
||||
|
||||
let health_service = Box::new(HealthService {}) as Box<dyn health_ttrpc::Health + Send + Sync>;
|
||||
let health_worker = Arc::new(health_service);
|
||||
|
||||
let image_service =
|
||||
Box::new(image_rpc::ImageService::new(s)) as Box<dyn image_ttrpc::Image + Send + Sync>;
|
||||
|
||||
let aservice = agent_ttrpc::create_agent_service(agent_worker);
|
||||
|
||||
let hservice = health_ttrpc::create_health(health_worker);
|
||||
|
||||
let iservice = image_ttrpc::create_image(Arc::new(image_service));
|
||||
|
||||
let server = TtrpcServer::new()
|
||||
.bind(server_address)?
|
||||
.register_service(aservice)
|
||||
.register_service(hservice);
|
||||
.register_service(hservice)
|
||||
.register_service(iservice);
|
||||
|
||||
info!(sl!(), "ttRPC server started"; "address" => server_address);
|
||||
|
||||
@@ -1909,6 +2003,38 @@ fn do_copy_file(req: &CopyFileRequest) -> Result<()> {
|
||||
|
||||
std::fs::set_permissions(&dir, std::fs::Permissions::from_mode(req.dir_mode))?;
|
||||
|
||||
let sflag = stat::SFlag::from_bits_truncate(req.file_mode);
|
||||
|
||||
if sflag.contains(stat::SFlag::S_IFDIR) {
|
||||
fs::create_dir(path.clone()).or_else(|e| {
|
||||
if e.kind() != std::io::ErrorKind::AlreadyExists {
|
||||
return Err(e);
|
||||
}
|
||||
Ok(())
|
||||
})?;
|
||||
|
||||
std::fs::set_permissions(path.clone(), std::fs::Permissions::from_mode(req.file_mode))?;
|
||||
|
||||
unistd::chown(
|
||||
&path,
|
||||
Some(Uid::from_raw(req.uid as u32)),
|
||||
Some(Gid::from_raw(req.gid as u32)),
|
||||
)?;
|
||||
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if sflag.contains(stat::SFlag::S_IFLNK) {
|
||||
let src = PathBuf::from(String::from_utf8(req.data.clone()).unwrap());
|
||||
|
||||
unistd::symlinkat(&src, None, &path)?;
|
||||
let path_str = CString::new(path.to_str().unwrap())?;
|
||||
let ret = unsafe { libc::lchown(path_str.as_ptr(), req.uid as u32, req.gid as u32) };
|
||||
Errno::result(ret).map(drop)?;
|
||||
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut tmpfile = path.clone();
|
||||
tmpfile.set_extension("tmp");
|
||||
|
||||
@@ -1974,18 +2100,26 @@ pub fn setup_bundle(cid: &str, spec: &mut Spec) -> Result<PathBuf> {
|
||||
let spec_root_path = Path::new(&spec_root.path);
|
||||
|
||||
let bundle_path = Path::new(CONTAINER_BASE).join(cid);
|
||||
let config_path = bundle_path.join("config.json");
|
||||
let config_path = bundle_path.join(CONFIG_JSON);
|
||||
let rootfs_path = bundle_path.join("rootfs");
|
||||
|
||||
fs::create_dir_all(&rootfs_path)?;
|
||||
baremount(
|
||||
spec_root_path,
|
||||
&rootfs_path,
|
||||
"bind",
|
||||
MsFlags::MS_BIND,
|
||||
"",
|
||||
&sl!(),
|
||||
)?;
|
||||
let rootfs_exists = Path::new(&rootfs_path).exists();
|
||||
info!(
|
||||
sl!(),
|
||||
"The rootfs_path is {:?} and exists: {}", rootfs_path, rootfs_exists
|
||||
);
|
||||
|
||||
if !rootfs_exists {
|
||||
fs::create_dir_all(&rootfs_path)?;
|
||||
baremount(
|
||||
spec_root_path,
|
||||
&rootfs_path,
|
||||
"bind",
|
||||
MsFlags::MS_BIND,
|
||||
"",
|
||||
&sl!(),
|
||||
)?;
|
||||
}
|
||||
|
||||
let rootfs_path_name = rootfs_path
|
||||
.to_str()
|
||||
@@ -2063,11 +2197,6 @@ mod tests {
|
||||
use tempfile::{tempdir, TempDir};
|
||||
use test_utils::{assert_result, skip_if_not_root};
|
||||
use ttrpc::{r#async::TtrpcContext, MessageHeader};
|
||||
use which::which;
|
||||
|
||||
fn check_command(cmd: &str) -> bool {
|
||||
which(cmd).is_ok()
|
||||
}
|
||||
|
||||
fn mk_ttrpc_context() -> TtrpcContext {
|
||||
TtrpcContext {
|
||||
@@ -2165,7 +2294,6 @@ mod tests {
|
||||
|
||||
let agent_service = Box::new(AgentService {
|
||||
sandbox: Arc::new(Mutex::new(sandbox)),
|
||||
init_mode: true,
|
||||
});
|
||||
|
||||
let req = protocols::agent::UpdateInterfaceRequest::default();
|
||||
@@ -2183,7 +2311,6 @@ mod tests {
|
||||
|
||||
let agent_service = Box::new(AgentService {
|
||||
sandbox: Arc::new(Mutex::new(sandbox)),
|
||||
init_mode: true,
|
||||
});
|
||||
|
||||
let req = protocols::agent::UpdateRoutesRequest::default();
|
||||
@@ -2201,7 +2328,6 @@ mod tests {
|
||||
|
||||
let agent_service = Box::new(AgentService {
|
||||
sandbox: Arc::new(Mutex::new(sandbox)),
|
||||
init_mode: true,
|
||||
});
|
||||
|
||||
let req = protocols::agent::AddARPNeighborsRequest::default();
|
||||
@@ -2335,7 +2461,6 @@ mod tests {
|
||||
|
||||
let agent_service = Box::new(AgentService {
|
||||
sandbox: Arc::new(Mutex::new(sandbox)),
|
||||
init_mode: true,
|
||||
});
|
||||
|
||||
let result = agent_service
|
||||
@@ -2791,32 +2916,10 @@ OtherField:other
|
||||
async fn test_ip_tables() {
|
||||
skip_if_not_root!();
|
||||
|
||||
let iptables_cmd_list = [
|
||||
USR_IPTABLES_SAVE,
|
||||
USR_IP6TABLES_SAVE,
|
||||
USR_IPTABLES_RESTORE,
|
||||
USR_IP6TABLES_RESTORE,
|
||||
IPTABLES_SAVE,
|
||||
IP6TABLES_SAVE,
|
||||
IPTABLES_RESTORE,
|
||||
IP6TABLES_RESTORE,
|
||||
];
|
||||
|
||||
for cmd in iptables_cmd_list {
|
||||
if !check_command(cmd) {
|
||||
warn!(
|
||||
sl!(),
|
||||
"one or more commands for ip tables test are missing, skip it"
|
||||
);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
let logger = slog::Logger::root(slog::Discard, o!());
|
||||
let sandbox = Sandbox::new(&logger).unwrap();
|
||||
let agent_service = Box::new(AgentService {
|
||||
sandbox: Arc::new(Mutex::new(sandbox)),
|
||||
init_mode: true,
|
||||
});
|
||||
|
||||
let ctx = mk_ttrpc_context();
|
||||
@@ -2922,7 +3025,7 @@ COMMIT
|
||||
.unwrap();
|
||||
assert!(!result.data.is_empty(), "we should have non-zero output:");
|
||||
assert!(
|
||||
std::str::from_utf8(&result.data).unwrap().contains(
|
||||
std::str::from_utf8(&*result.data).unwrap().contains(
|
||||
"PREROUTING -d 192.168.103.153/32 -j DNAT --to-destination 192.168.188.153"
|
||||
),
|
||||
"We should see the resulting rule"
|
||||
@@ -2960,7 +3063,7 @@ COMMIT
|
||||
.unwrap();
|
||||
assert!(!result.data.is_empty(), "we should have non-zero output:");
|
||||
assert!(
|
||||
std::str::from_utf8(&result.data)
|
||||
std::str::from_utf8(&*result.data)
|
||||
.unwrap()
|
||||
.contains("INPUT -s 2001:db8:100::1/128 -i sit+ -p tcp -m tcp --sport 512:65535"),
|
||||
"We should see the resulting rule"
|
||||
|
||||
@@ -60,6 +60,7 @@ pub struct Sandbox {
|
||||
pub event_tx: Option<Sender<String>>,
|
||||
pub bind_watcher: BindWatcher,
|
||||
pub pcimap: HashMap<pci::Address, pci::Address>,
|
||||
pub images: HashMap<String, String>,
|
||||
}
|
||||
|
||||
impl Sandbox {
|
||||
@@ -93,6 +94,7 @@ impl Sandbox {
|
||||
event_tx: Some(tx),
|
||||
bind_watcher: BindWatcher::new(),
|
||||
pcimap: HashMap::new(),
|
||||
images: HashMap::new(),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -296,6 +298,7 @@ impl Sandbox {
|
||||
info!(self.logger, "updating {}", ctr.id.as_str());
|
||||
ctr.cgroup_manager
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.update_cpuset_path(guest_cpuset.as_str(), container_cpust)?;
|
||||
}
|
||||
|
||||
@@ -1072,7 +1075,7 @@ mod tests {
|
||||
fs::create_dir(&subdir_path).unwrap();
|
||||
for file in j.files {
|
||||
let subfile_path = format!("{}/{}", subdir_path, file.name);
|
||||
let mut subfile = File::create(subfile_path).unwrap();
|
||||
let mut subfile = File::create(&subfile_path).unwrap();
|
||||
subfile.write_all(file.content.as_bytes()).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -24,7 +24,7 @@ async fn handle_sigchild(logger: Logger, sandbox: Arc<Mutex<Sandbox>>) -> Result
|
||||
loop {
|
||||
// Avoid reaping the undesirable child's signal, e.g., execute_hook's
|
||||
// The lock should be released immediately.
|
||||
rustjail::container::WAIT_PID_LOCKER.lock().await;
|
||||
let _ = rustjail::container::WAIT_PID_LOCKER.lock().await;
|
||||
let result = wait::waitpid(
|
||||
Some(Pid::from_raw(-1)),
|
||||
Some(WaitPidFlag::WNOHANG | WaitPidFlag::__WALL),
|
||||
|
||||
@@ -124,7 +124,7 @@ impl Storage {
|
||||
|
||||
// if we are creating a directory: just create it, nothing more to do
|
||||
if metadata.file_type().is_dir() {
|
||||
let dest_file_path = self.make_target_path(source_file_path)?;
|
||||
let dest_file_path = self.make_target_path(&source_file_path)?;
|
||||
|
||||
fs::create_dir_all(&dest_file_path)
|
||||
.await
|
||||
@@ -152,7 +152,7 @@ impl Storage {
|
||||
// Assume target mount is a file path
|
||||
self.target_mount_point.clone()
|
||||
} else {
|
||||
let dest_file_path = self.make_target_path(source_file_path)?;
|
||||
let dest_file_path = self.make_target_path(&source_file_path)?;
|
||||
|
||||
if let Some(path) = dest_file_path.parent() {
|
||||
debug!(logger, "Creating destination directory: {}", path.display());
|
||||
@@ -778,7 +778,7 @@ mod tests {
|
||||
22
|
||||
);
|
||||
assert_eq!(
|
||||
fs::read_to_string(entries.0[0].target_mount_point.as_path().join("1.txt")).unwrap(),
|
||||
fs::read_to_string(&entries.0[0].target_mount_point.as_path().join("1.txt")).unwrap(),
|
||||
"updated"
|
||||
);
|
||||
|
||||
@@ -823,7 +823,7 @@ mod tests {
|
||||
2
|
||||
);
|
||||
assert_eq!(
|
||||
fs::read_to_string(entries.0[1].target_mount_point.as_path().join("foo.txt")).unwrap(),
|
||||
fs::read_to_string(&entries.0[1].target_mount_point.as_path().join("foo.txt")).unwrap(),
|
||||
"updated"
|
||||
);
|
||||
|
||||
@@ -1000,7 +1000,7 @@ mod tests {
|
||||
|
||||
// create a path we'll remove later
|
||||
fs::create_dir_all(source_dir.path().join("tmp")).unwrap();
|
||||
fs::write(source_dir.path().join("tmp/test-file"), "foo").unwrap();
|
||||
fs::write(&source_dir.path().join("tmp/test-file"), "foo").unwrap();
|
||||
assert_eq!(entry.scan(&logger).await.unwrap(), 3); // root, ./tmp, test-file
|
||||
|
||||
// Verify expected directory, file:
|
||||
@@ -1291,7 +1291,6 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
#[cfg(not(target_arch = "aarch64"))]
|
||||
async fn create_tmpfs() {
|
||||
skip_if_not_root!();
|
||||
|
||||
|
||||
1850
src/dragonball/Cargo.lock
generated
1850
src/dragonball/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -12,15 +12,15 @@ edition = "2018"
|
||||
[dependencies]
|
||||
arc-swap = "1.5.0"
|
||||
bytes = "1.1.0"
|
||||
dbs-address-space = "0.2.0"
|
||||
dbs-address-space = "0.1.0"
|
||||
dbs-allocator = "0.1.0"
|
||||
dbs-arch = "0.2.0"
|
||||
dbs-boot = "0.3.0"
|
||||
dbs-device = "0.2.0"
|
||||
dbs-interrupt = { version = "0.2.0", features = ["kvm-irq"] }
|
||||
dbs-arch = "0.1.0"
|
||||
dbs-boot = "0.2.0"
|
||||
dbs-device = "0.1.0"
|
||||
dbs-interrupt = { version = "0.1.0", features = ["kvm-irq"] }
|
||||
dbs-legacy-devices = "0.1.0"
|
||||
dbs-upcall = { version = "0.1.0", optional = true }
|
||||
dbs-utils = "0.2.0"
|
||||
dbs-utils = "0.1.0"
|
||||
dbs-virtio-devices = { version = "0.1.0", optional = true, features = ["virtio-mmio"] }
|
||||
kvm-bindings = "0.5.0"
|
||||
kvm-ioctls = "0.11.0"
|
||||
@@ -36,7 +36,7 @@ serde_json = "1.0.9"
|
||||
slog = "2.5.2"
|
||||
slog-scope = "4.4.0"
|
||||
thiserror = "1"
|
||||
vmm-sys-util = "0.11.0"
|
||||
vmm-sys-util = "0.10.0"
|
||||
virtio-queue = { version = "0.4.0", optional = true }
|
||||
vm-memory = { version = "0.9.0", features = ["backend-mmap"] }
|
||||
|
||||
@@ -54,3 +54,14 @@ virtio-blk = ["dbs-virtio-devices/virtio-blk", "virtio-queue"]
|
||||
virtio-net = ["dbs-virtio-devices/virtio-net", "virtio-queue"]
|
||||
# virtio-fs only work on atomic-guest-memory
|
||||
virtio-fs = ["dbs-virtio-devices/virtio-fs", "virtio-queue", "atomic-guest-memory"]
|
||||
|
||||
[patch.'crates-io']
|
||||
dbs-device = { git = "https://github.com/openanolis/dragonball-sandbox.git", rev = "c3d7831aee7c3962b8a90f0afbfd0fb7e4d30323" }
|
||||
dbs-interrupt = { git = "https://github.com/openanolis/dragonball-sandbox.git", rev = "c3d7831aee7c3962b8a90f0afbfd0fb7e4d30323" }
|
||||
dbs-legacy-devices = { git = "https://github.com/openanolis/dragonball-sandbox.git", rev = "c3d7831aee7c3962b8a90f0afbfd0fb7e4d30323" }
|
||||
dbs-upcall = { git = "https://github.com/openanolis/dragonball-sandbox.git", rev = "c3d7831aee7c3962b8a90f0afbfd0fb7e4d30323" }
|
||||
dbs-utils = { git = "https://github.com/openanolis/dragonball-sandbox.git", rev = "c3d7831aee7c3962b8a90f0afbfd0fb7e4d30323" }
|
||||
dbs-virtio-devices = { git = "https://github.com/openanolis/dragonball-sandbox.git", rev = "c3d7831aee7c3962b8a90f0afbfd0fb7e4d30323" }
|
||||
dbs-boot = { git = "https://github.com/openanolis/dragonball-sandbox.git", rev = "c3d7831aee7c3962b8a90f0afbfd0fb7e4d30323" }
|
||||
dbs-arch = { git = "https://github.com/openanolis/dragonball-sandbox.git", rev = "c3d7831aee7c3962b8a90f0afbfd0fb7e4d30323" }
|
||||
dbs-address-space = { git = "https://github.com/openanolis/dragonball-sandbox.git", rev = "c3d7831aee7c3962b8a90f0afbfd0fb7e4d30323" }
|
||||
|
||||
@@ -16,9 +16,6 @@ build:
|
||||
@echo "INFO: cargo build..."
|
||||
cargo build --all-features --target $(TRIPLE)
|
||||
|
||||
static-checks-build:
|
||||
@echo "INFO: static-checks-build do nothing.."
|
||||
|
||||
check: clippy format
|
||||
|
||||
clippy:
|
||||
|
||||
@@ -19,7 +19,6 @@ and configuration process.
|
||||
Device: [Device Document](docs/device.md)
|
||||
vCPU: [vCPU Document](docs/vcpu.md)
|
||||
API: [API Document](docs/api.md)
|
||||
`Upcall`: [`Upcall` Document](docs/upcall.md)
|
||||
|
||||
Currently, the documents are still actively adding.
|
||||
You could see the [official documentation](docs/) page for more details.
|
||||
|
||||
@@ -1,177 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xl="http://www.w3.org/1999/xlink" xmlns:dc="http://purl.org/dc/elements/1.1/" version="1.1" viewBox="51 242 818 479" width="818" height="479">
|
||||
<defs>
|
||||
<marker orient="auto" overflow="visible" markerUnits="strokeWidth" id="FilledArrow_Marker" stroke-linejoin="miter" stroke-miterlimit="10" viewBox="-1 -4 10 8" markerWidth="10" markerHeight="8" color="black">
|
||||
<g>
|
||||
<path d="M 8 0 L 0 -3 L 0 3 Z" fill="currentColor" stroke="currentColor" stroke-width="1"/>
|
||||
</g>
|
||||
</marker>
|
||||
<marker orient="auto" overflow="visible" markerUnits="strokeWidth" id="FilledArrow_Marker_2" stroke-linejoin="miter" stroke-miterlimit="10" viewBox="-9 -4 10 8" markerWidth="10" markerHeight="8" color="black">
|
||||
<g>
|
||||
<path d="M -8 0 L 0 3 L 0 -3 Z" fill="currentColor" stroke="currentColor" stroke-width="1"/>
|
||||
</g>
|
||||
</marker>
|
||||
</defs>
|
||||
<g id="Canvas_1" fill="none" fill-opacity="1" stroke="none" stroke-opacity="1" stroke-dasharray="none">
|
||||
<title>Canvas 1</title>
|
||||
<rect fill="white" x="51" y="242" width="818" height="479"/>
|
||||
<g id="Canvas_1_Layer_1">
|
||||
<title>Layer 1</title>
|
||||
<g id="Line_4">
|
||||
<line x1="153" y1="279.5" x2="856.1097" y2="279.5" stroke="black" stroke-linecap="round" stroke-linejoin="round" stroke-dasharray="4.0,4.0" stroke-width="1"/>
|
||||
</g>
|
||||
<g id="Graphic_5">
|
||||
<text transform="translate(56 247.5)" fill="black">
|
||||
<tspan font-family="Alibaba PuHuiTi" font-size="16" fill="black" x="0" y="17">Guest User</tspan>
|
||||
</text>
|
||||
</g>
|
||||
<g id="Graphic_6">
|
||||
<text transform="translate(56 286)" fill="black">
|
||||
<tspan font-family="Alibaba PuHuiTi" font-size="16" fill="black" x="0" y="17">Guest Kernel</tspan>
|
||||
</text>
|
||||
</g>
|
||||
<g id="Line_7">
|
||||
<line x1="153" y1="592" x2="856.1097" y2="592" stroke="black" stroke-linecap="round" stroke-linejoin="round" stroke-dasharray="4.0,4.0" stroke-width="1"/>
|
||||
</g>
|
||||
<g id="Graphic_8">
|
||||
<text transform="translate(62.76 597.5)" fill="black">
|
||||
<tspan font-family="Alibaba PuHuiTi" font-size="16" fill="black" x="7531753e-19" y="17">Hypervisor</tspan>
|
||||
</text>
|
||||
</g>
|
||||
<g id="Graphic_10">
|
||||
<path d="M 264 328 L 347.456 328 C 354.0834 328 359.456 333.3726 359.456 340 L 359.456 524.5 C 359.456 531.1274 354.0834 536.5 347.456 536.5 L 264 536.5 C 257.37258 536.5 252 531.1274 252 524.5 L 252 340 C 252 333.3726 257.37258 328 264 328 Z" stroke="gray" stroke-linecap="round" stroke-linejoin="round" stroke-width="1"/>
|
||||
</g>
|
||||
<g id="Graphic_11">
|
||||
<text transform="translate(276.776 333)" fill="black">
|
||||
<tspan font-family="Alibaba PuHuiTi" font-size="16" fill="black" x="0" y="17">socket</tspan>
|
||||
</text>
|
||||
</g>
|
||||
<g id="Graphic_12">
|
||||
<path d="M 582 294.5 L 672 294.5 C 678.6274 294.5 684 299.8726 684 306.5 L 684 354 C 684 360.6274 678.6274 366 672 366 L 582 366 C 575.3726 366 570 360.6274 570 354 L 570 306.5 C 570 299.8726 575.3726 294.5 582 294.5 Z" stroke="gray" stroke-linecap="round" stroke-linejoin="round" stroke-width="1"/>
|
||||
<text transform="translate(575 302.578)" fill="black">
|
||||
<tspan font-family="Helvetica Neue" font-size="16" fill="black" x="27.704" y="15">Device </tspan>
|
||||
<tspan font-family="Helvetica Neue" font-size="16" fill="black" x="20.44" y="33.448">Manager</tspan>
|
||||
<tspan font-family="Helvetica Neue" font-size="16" fill="black" x="25.488" y="51.895996">Service</tspan>
|
||||
</text>
|
||||
</g>
|
||||
<g id="Graphic_13">
|
||||
<text transform="translate(284.824 374)" fill="black">
|
||||
<tspan font-family="Alibaba PuHuiTi" font-size="16" fill="black" x="8135714e-19" y="17">bind</tspan>
|
||||
</text>
|
||||
</g>
|
||||
<g id="Graphic_14">
|
||||
<text transform="translate(280.528 416.25)" fill="black">
|
||||
<tspan font-family="Alibaba PuHuiTi" font-size="16" fill="black" x="0" y="17">listen</tspan>
|
||||
</text>
|
||||
</g>
|
||||
<g id="Graphic_15">
|
||||
<text transform="translate(274.92 459.5)" fill="black">
|
||||
<tspan font-family="Alibaba PuHuiTi" font-size="16" fill="black" x="0" y="17">accept</tspan>
|
||||
</text>
|
||||
</g>
|
||||
<g id="Graphic_16">
|
||||
<text transform="translate(256.372 503.5)" fill="black">
|
||||
<tspan font-family="Alibaba PuHuiTi" font-size="16" fill="black" x="8668621e-19" y="17">new kthread</tspan>
|
||||
</text>
|
||||
</g>
|
||||
<g id="Graphic_17">
|
||||
<path d="M 268 566.5 L 807.5 566.5 C 813.0228 566.5 817.5 570.97715 817.5 576.5 L 817.5 576.5 C 817.5 582.02285 813.0228 586.5 807.5 586.5 L 268 586.5 C 262.47715 586.5 258 582.02285 258 576.5 L 258 576.5 C 258 570.97715 262.47715 566.5 268 566.5 Z" stroke="gray" stroke-linecap="round" stroke-linejoin="round" stroke-width="1"/>
|
||||
<text transform="translate(263 567.276)" fill="black">
|
||||
<tspan font-family="Helvetica Neue" font-size="16" fill="black" x="226.454" y="15">virtio-vsocket</tspan>
|
||||
</text>
|
||||
</g>
|
||||
<g id="Graphic_18">
|
||||
<path d="M 268 598.5 L 807.5 598.5 C 813.0228 598.5 817.5 602.97715 817.5 608.5 L 817.5 608.5 C 817.5 614.02285 813.0228 618.5 807.5 618.5 L 268 618.5 C 262.47715 618.5 258 614.02285 258 608.5 L 258 608.5 C 258 602.97715 262.47715 598.5 268 598.5 Z" stroke="gray" stroke-linecap="round" stroke-linejoin="round" stroke-width="1"/>
|
||||
<text transform="translate(263 599.276)" fill="black">
|
||||
<tspan font-family="Helvetica Neue" font-size="16" fill="black" x="193.254" y="15">virtio-vsocket backend</tspan>
|
||||
</text>
|
||||
</g>
|
||||
<g id="Line_20">
|
||||
<line x1="301.9" y1="352" x2="301.9" y2="369.84976" marker-end="url(#FilledArrow_Marker)" stroke="black" stroke-linecap="round" stroke-linejoin="round" stroke-width="1"/>
|
||||
</g>
|
||||
<g id="Line_21">
|
||||
<line x1="300.828" y1="394.6251" x2="300.828" y2="412.4749" marker-end="url(#FilledArrow_Marker)" stroke="black" stroke-linecap="round" stroke-linejoin="round" stroke-width="1"/>
|
||||
</g>
|
||||
<g id="Line_22">
|
||||
<line x1="300.828" y1="437.56256" x2="300.828" y2="455.4123" marker-end="url(#FilledArrow_Marker)" stroke="black" stroke-linecap="round" stroke-linejoin="round" stroke-width="1"/>
|
||||
</g>
|
||||
<g id="Line_23">
|
||||
<line x1="299.9" y1="480.1251" x2="299.9" y2="497.9749" marker-end="url(#FilledArrow_Marker)" stroke="black" stroke-linecap="round" stroke-linejoin="round" stroke-width="1"/>
|
||||
</g>
|
||||
<g id="Graphic_24">
|
||||
<rect x="266.5" y="541.5" width="71.188" height="20" stroke="gray" stroke-linecap="round" stroke-linejoin="round" stroke-width="1"/>
|
||||
<text transform="translate(271.5 540.5)" fill="black">
|
||||
<tspan font-family="Alibaba PuHuiTi" font-size="16" fill="black" x="13.858" y="17">Port</tspan>
|
||||
</text>
|
||||
</g>
|
||||
<g id="Graphic_27">
|
||||
<path d="M 582 648.5 L 672 648.5 C 678.6274 648.5 684 653.8726 684 660.5 L 684 708 C 684 714.6274 678.6274 720 672 720 L 582 720 C 575.3726 720 570 714.6274 570 708 L 570 660.5 C 570 653.8726 575.3726 648.5 582 648.5 Z" stroke="gray" stroke-linecap="round" stroke-linejoin="round" stroke-width="1"/>
|
||||
<text transform="translate(575 656.578)" fill="black">
|
||||
<tspan font-family="Helvetica Neue" font-size="16" fill="black" x="27.704" y="15">Device </tspan>
|
||||
<tspan font-family="Helvetica Neue" font-size="16" fill="black" x="20.44" y="33.448">Manager</tspan>
|
||||
<tspan font-family="Helvetica Neue" font-size="16" fill="black" x="20.288" y="51.895996">Backend</tspan>
|
||||
</text>
|
||||
</g>
|
||||
<g id="Line_28">
|
||||
<line x1="627" y1="375.9" x2="627" y2="638.6" marker-end="url(#FilledArrow_Marker)" marker-start="url(#FilledArrow_Marker_2)" stroke="black" stroke-linecap="round" stroke-linejoin="round" stroke-dasharray="4.0,4.0" stroke-width="1"/>
|
||||
</g>
|
||||
<g id="Graphic_31">
|
||||
<path d="M 711 294.5 L 801 294.5 C 807.6274 294.5 813 299.8726 813 306.5 L 813 354 C 813 360.6274 807.6274 366 801 366 L 711 366 C 704.3726 366 699 360.6274 699 354 L 699 306.5 C 699 299.8726 704.3726 294.5 711 294.5 Z" stroke="gray" stroke-linecap="round" stroke-linejoin="round" stroke-width="1"/>
|
||||
<text transform="translate(704 321.026)" fill="black">
|
||||
<tspan font-family="Helvetica Neue" font-size="16" fill="black" x="17.784" y="15">Service B</tspan>
|
||||
</text>
|
||||
</g>
|
||||
<g id="Graphic_30">
|
||||
<path d="M 711 648.5 L 801 648.5 C 807.6274 648.5 813 653.8726 813 660.5 L 813 708 C 813 714.6274 807.6274 720 801 720 L 711 720 C 704.3726 720 699 714.6274 699 708 L 699 660.5 C 699 653.8726 704.3726 648.5 711 648.5 Z" stroke="gray" stroke-linecap="round" stroke-linejoin="round" stroke-width="1"/>
|
||||
<text transform="translate(704 675.026)" fill="black">
|
||||
<tspan font-family="Helvetica Neue" font-size="16" fill="black" x="12.584" y="15">Backend B</tspan>
|
||||
</text>
|
||||
</g>
|
||||
<g id="Line_29">
|
||||
<line x1="756" y1="375.9" x2="756" y2="638.6" marker-end="url(#FilledArrow_Marker)" marker-start="url(#FilledArrow_Marker_2)" stroke="black" stroke-linecap="round" stroke-linejoin="round" stroke-dasharray="4.0,4.0" stroke-width="1"/>
|
||||
</g>
|
||||
<g id="Graphic_32">
|
||||
<text transform="translate(833 319.25)" fill="black">
|
||||
<tspan font-family="Alibaba PuHuiTi" font-size="16" fill="black" x="58264504e-20" y="17">……</tspan>
|
||||
</text>
|
||||
</g>
|
||||
<g id="Graphic_33">
|
||||
<text transform="translate(833 673.25)" fill="black">
|
||||
<tspan font-family="Alibaba PuHuiTi" font-size="16" fill="black" x="58264504e-20" y="17">……</tspan>
|
||||
</text>
|
||||
</g>
|
||||
<g id="Graphic_34">
|
||||
<text transform="translate(252.616 296)" fill="black">
|
||||
<tspan font-family="Alibaba PuHuiTi" font-size="16" fill="black" x="0" y="17">Upcall Server</tspan>
|
||||
</text>
|
||||
</g>
|
||||
<g id="Line_39">
|
||||
<path d="M 251.372 514.94444 L 196.16455 515.40173 L 196.2135 443.25 L 290.92825 443.92903" marker-end="url(#FilledArrow_Marker)" stroke="black" stroke-linecap="round" stroke-linejoin="round" stroke-width="1"/>
|
||||
</g>
|
||||
<g id="Graphic_41">
|
||||
<text transform="translate(417 503.5)" fill="black">
|
||||
<tspan font-family="Alibaba PuHuiTi" font-size="16" fill="black" x="0" y="17">Service handler</tspan>
|
||||
</text>
|
||||
</g>
|
||||
<g id="Graphic_42">
|
||||
<rect x="591.406" y="540.4723" width="71.188" height="20" stroke="gray" stroke-linecap="round" stroke-linejoin="round" stroke-width="1"/>
|
||||
<text transform="translate(596.406 539.4723)" fill="black">
|
||||
<tspan font-family="Alibaba PuHuiTi" font-size="16" fill="black" x="10.386" y="17">Conn</tspan>
|
||||
</text>
|
||||
</g>
|
||||
<g id="Graphic_43">
|
||||
<rect x="720.406" y="541.4723" width="71.188" height="20" stroke="gray" stroke-linecap="round" stroke-linejoin="round" stroke-width="1"/>
|
||||
<text transform="translate(725.406 540.4723)" fill="black">
|
||||
<tspan font-family="Alibaba PuHuiTi" font-size="16" fill="black" x="10.386" y="17">Conn</tspan>
|
||||
</text>
|
||||
</g>
|
||||
<g id="Line_44">
|
||||
<line x1="358.684" y1="514.5" x2="402.1" y2="514.5" marker-end="url(#FilledArrow_Marker)" stroke="black" stroke-linecap="round" stroke-linejoin="round" stroke-width="1"/>
|
||||
</g>
|
||||
<g id="Line_46">
|
||||
<path d="M 479.2467 498.5 L 480 328 L 560.10116 329.22604" marker-end="url(#FilledArrow_Marker)" stroke="black" stroke-linecap="round" stroke-linejoin="round" stroke-width="1"/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</svg>
|
||||
|
Before Width: | Height: | Size: 12 KiB |
@@ -1,30 +0,0 @@
|
||||
# `Upcall`
|
||||
|
||||
## What is `Upcall`?
|
||||
|
||||
`Upcall` is a direct communication tool between VMM and guest developed upon `vsock`. The server side of the `upcall` is a driver in guest kernel (kernel patches are needed for this feature) and it'll start to serve the requests after the kernel starts. And the client side is in Dragonball VMM , it'll be a thread that communicates with `vsock` through `uds`.
|
||||
|
||||
We want to keep the lightweight of the VM through the implementation of the `upcall`.
|
||||
|
||||

|
||||
## What can `upcall` do?
|
||||
|
||||
We define specific operations in the device manager service (one of the services in `upcall` we developed) to perform device hotplug / hot-unplug including vCPU hotplug, `virtio-mmio` hotplug, and memory hotplug. We have accomplished device hotplug / hot-unplug directly through `upcall` in order to avoid the virtualization of ACPI to minimize virtual machines overhead. And there could be many other uses if other services are implemented.
|
||||
|
||||
## How to enable `upcall`?
|
||||
|
||||
`Upcall` needs a server in the guest kernel which will be several kernel patches for the `upcall` server itself and different services registered in the `upcall` server. It's currently tested on upstream Linux kernel 5.10.
|
||||
|
||||
To make it easy for users to use, we have open-source the `upcall` guest patches in [Dragonball experimental guest patches](../../../tools/packaging/kernel/patches/5.10.x/dragonball-experimental) and develop `upcall` support in [Kata guest kernel building script](../../../tools/packaging/kernel/build-kernel.sh).
|
||||
|
||||
You could use following command to download the upstream kernel (currently Dragonball uses 5.10.25) and put the `upcall` patches and other Kata patches into kernel code.
|
||||
|
||||
`sh build-kernel.sh -e -t dragonball -f setup`
|
||||
|
||||
`-e` here means experimental, mainly because `upcall` patches are not in upstream Linux kernel.
|
||||
`-t dragonball` is for specifying hypervisor type
|
||||
`-f` is for generating `.config` file
|
||||
|
||||
After this command, the kernel code with `upcall` and related `.config` file are all set up in the directory `kata-linux-dragonball-experimental-5.10.25-[config version]`. You can either manually compile the kernel with `make` command or following [Document for build-kernel.sh](../../../tools/packaging/kernel/README.md) to build and use this guest kernel.
|
||||
|
||||
Also, a client-side is also needed in VMM. Dragonball has already open-source the way to implement `upcall` client and Dragonball compiled with `dbs-upcall` feature will enable Dragonball client side.
|
||||
@@ -250,11 +250,6 @@ impl AddressSpaceMgr {
|
||||
self.address_space.as_ref()
|
||||
}
|
||||
|
||||
/// Get the guest memory.
|
||||
pub fn vm_memory(&self) -> Option<<GuestAddressSpaceImpl as GuestAddressSpace>::T> {
|
||||
self.get_vm_as().map(|m| m.memory())
|
||||
}
|
||||
|
||||
/// Create the address space for a virtual machine.
|
||||
///
|
||||
/// This method is designed to be called when starting up a virtual machine instead of at
|
||||
@@ -406,9 +401,9 @@ impl AddressSpaceMgr {
|
||||
let flags = 0u32;
|
||||
|
||||
let mem_region = kvm_userspace_memory_region {
|
||||
slot,
|
||||
slot: slot as u32,
|
||||
guest_phys_addr: reg.start_addr().raw_value(),
|
||||
memory_size: reg.len(),
|
||||
memory_size: reg.len() as u64,
|
||||
userspace_addr: host_addr as u64,
|
||||
flags,
|
||||
};
|
||||
@@ -426,7 +421,7 @@ impl AddressSpaceMgr {
|
||||
self.base_to_slot
|
||||
.lock()
|
||||
.unwrap()
|
||||
.insert(reg.start_addr().raw_value(), slot);
|
||||
.insert(reg.start_addr().raw_value(), slot as u32);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -35,9 +35,6 @@ pub use crate::device_manager::virtio_net_dev_mgr::{
|
||||
#[cfg(feature = "virtio-vsock")]
|
||||
pub use crate::device_manager::vsock_dev_mgr::{VsockDeviceConfigInfo, VsockDeviceError};
|
||||
|
||||
#[cfg(feature = "hotplug")]
|
||||
pub use crate::vcpu::{VcpuResizeError, VcpuResizeInfo};
|
||||
|
||||
use super::*;
|
||||
|
||||
/// Wrapper for all errors associated with VMM actions.
|
||||
@@ -47,13 +44,9 @@ pub enum VmmActionError {
|
||||
#[error("the virtual machine instance ID is invalid")]
|
||||
InvalidVMID,
|
||||
|
||||
/// VM doesn't exist and can't get VM information.
|
||||
#[error("VM doesn't exist and can't get VM information")]
|
||||
VmNotExist,
|
||||
|
||||
/// Failed to hotplug, due to Upcall not ready.
|
||||
#[error("Upcall not ready, can't hotplug device.")]
|
||||
UpcallServerNotReady,
|
||||
UpcallNotReady,
|
||||
|
||||
/// The action `ConfigureBootSource` failed either because of bad user input or an internal
|
||||
/// error.
|
||||
@@ -92,11 +85,6 @@ pub enum VmmActionError {
|
||||
/// The action `InsertFsDevice` failed either because of bad user input or an internal error.
|
||||
#[error("virtio-fs device error: {0}")]
|
||||
FsDevice(#[source] FsDeviceError),
|
||||
|
||||
#[cfg(feature = "hotplug")]
|
||||
/// The action `ResizeVcpu` Failed
|
||||
#[error("vcpu resize error : {0}")]
|
||||
ResizeVcpu(#[source] VcpuResizeError),
|
||||
}
|
||||
|
||||
/// This enum represents the public interface of the VMM. Each action contains various
|
||||
@@ -168,10 +156,6 @@ pub enum VmmAction {
|
||||
#[cfg(feature = "virtio-fs")]
|
||||
/// Update fs rate limiter, after microVM start.
|
||||
UpdateFsDevice(FsDeviceConfigUpdateInfo),
|
||||
|
||||
#[cfg(feature = "hotplug")]
|
||||
/// Resize Vcpu number in the guest.
|
||||
ResizeVcpu(VcpuResizeInfo),
|
||||
}
|
||||
|
||||
/// The enum represents the response sent by the VMM in case of success. The response is either
|
||||
@@ -272,8 +256,6 @@ impl VmmService {
|
||||
VmmAction::UpdateFsDevice(fs_update_cfg) => {
|
||||
self.update_fs_rate_limiters(vmm, fs_update_cfg)
|
||||
}
|
||||
#[cfg(feature = "hotplug")]
|
||||
VmmAction::ResizeVcpu(vcpu_resize_cfg) => self.resize_vcpu(vmm, vcpu_resize_cfg),
|
||||
};
|
||||
|
||||
debug!("send vmm response: {:?}", response);
|
||||
@@ -424,10 +406,19 @@ impl VmmService {
|
||||
}
|
||||
config.vpmu_feature = machine_config.vpmu_feature;
|
||||
|
||||
// If serial_path is:
|
||||
// - None, legacy_manager will create_stdio_console.
|
||||
// - Some(path), legacy_manager will create_socket_console on that path.
|
||||
config.serial_path = machine_config.serial_path;
|
||||
let vm_id = vm.shared_info().read().unwrap().id.clone();
|
||||
let serial_path = match machine_config.serial_path {
|
||||
Some(value) => value,
|
||||
None => {
|
||||
if config.serial_path.is_none() {
|
||||
String::from("/run/dragonball/") + &vm_id + "_com1"
|
||||
} else {
|
||||
// Safe to unwrap() because we have checked it has a value.
|
||||
config.serial_path.as_ref().unwrap().clone()
|
||||
}
|
||||
}
|
||||
};
|
||||
config.serial_path = Some(serial_path);
|
||||
|
||||
vm.set_vm_config(config.clone());
|
||||
self.machine_config = config;
|
||||
@@ -480,8 +471,8 @@ impl VmmService {
|
||||
let ctx = vm
|
||||
.create_device_op_context(Some(event_mgr.epoll_manager()))
|
||||
.map_err(|e| {
|
||||
if let StartMicroVmError::UpcallServerNotReady = e {
|
||||
return VmmActionError::UpcallServerNotReady;
|
||||
if let StartMicroVmError::UpcallNotReady = e {
|
||||
return VmmActionError::UpcallNotReady;
|
||||
}
|
||||
VmmActionError::Block(BlockDeviceError::UpdateNotAllowedPostBoot)
|
||||
})?;
|
||||
@@ -536,8 +527,8 @@ impl VmmService {
|
||||
.map_err(|e| {
|
||||
if let StartMicroVmError::MicroVMAlreadyRunning = e {
|
||||
VmmActionError::VirtioNet(VirtioNetDeviceError::UpdateNotAllowedPostBoot)
|
||||
} else if let StartMicroVmError::UpcallServerNotReady = e {
|
||||
VmmActionError::UpcallServerNotReady
|
||||
} else if let StartMicroVmError::UpcallNotReady = e {
|
||||
VmmActionError::UpcallNotReady
|
||||
} else {
|
||||
VmmActionError::StartMicroVm(e)
|
||||
}
|
||||
@@ -613,37 +604,6 @@ impl VmmService {
|
||||
.map(|_| VmmData::Empty)
|
||||
.map_err(VmmActionError::FsDevice)
|
||||
}
|
||||
|
||||
#[cfg(feature = "hotplug")]
|
||||
fn resize_vcpu(&mut self, vmm: &mut Vmm, config: VcpuResizeInfo) -> VmmRequestResult {
|
||||
if !cfg!(target_arch = "x86_64") {
|
||||
// TODO: Arm need to support vcpu hotplug. issue: #6010
|
||||
warn!("This arch do not support vm resize!");
|
||||
return Ok(VmmData::Empty);
|
||||
}
|
||||
|
||||
if !cfg!(feature = "dbs-upcall") {
|
||||
warn!("We only support cpu resize through upcall server in the guest kernel now, please enable dbs-upcall feature.");
|
||||
return Ok(VmmData::Empty);
|
||||
}
|
||||
|
||||
let vm = vmm.get_vm_mut().ok_or(VmmActionError::VmNotExist)?;
|
||||
|
||||
if !vm.is_vm_initialized() {
|
||||
return Err(VmmActionError::ResizeVcpu(
|
||||
VcpuResizeError::UpdateNotAllowedPreBoot,
|
||||
));
|
||||
}
|
||||
|
||||
vm.resize_vcpu(config, None).map_err(|e| {
|
||||
if let VcpuResizeError::UpcallServerNotReady = e {
|
||||
return VmmActionError::UpcallServerNotReady;
|
||||
}
|
||||
VmmActionError::ResizeVcpu(e)
|
||||
})?;
|
||||
|
||||
Ok(VmmData::Empty)
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_cpu_topology(
|
||||
@@ -705,10 +665,10 @@ mod tests {
|
||||
let (to_vmm, from_api) = channel();
|
||||
let (to_api, from_vmm) = channel();
|
||||
|
||||
let epoll_mgr = EpollManager::default();
|
||||
let vmm = Arc::new(Mutex::new(create_vmm_instance(epoll_mgr.clone())));
|
||||
let vmm = Arc::new(Mutex::new(create_vmm_instance()));
|
||||
let mut vservice = VmmService::new(from_api, to_api);
|
||||
|
||||
let epoll_mgr = EpollManager::default();
|
||||
let mut event_mgr = EventManager::new(&vmm, epoll_mgr).unwrap();
|
||||
let mut v = vmm.lock().unwrap();
|
||||
|
||||
@@ -730,9 +690,9 @@ mod tests {
|
||||
|
||||
let (_to_vmm, from_api) = channel();
|
||||
let (to_api, _from_vmm) = channel();
|
||||
let epoll_mgr = EpollManager::default();
|
||||
let vmm = Arc::new(Mutex::new(create_vmm_instance(epoll_mgr.clone())));
|
||||
let vmm = Arc::new(Mutex::new(create_vmm_instance()));
|
||||
let mut vservice = VmmService::new(from_api, to_api);
|
||||
let epoll_mgr = EpollManager::default();
|
||||
let mut event_mgr = EventManager::new(&vmm, epoll_mgr).unwrap();
|
||||
let mut v = vmm.lock().unwrap();
|
||||
|
||||
@@ -744,9 +704,9 @@ mod tests {
|
||||
fn test_vmm_action_disconnected() {
|
||||
let (to_vmm, from_api) = channel();
|
||||
let (to_api, _from_vmm) = channel();
|
||||
let epoll_mgr = EpollManager::default();
|
||||
let vmm = Arc::new(Mutex::new(create_vmm_instance(epoll_mgr.clone())));
|
||||
let vmm = Arc::new(Mutex::new(create_vmm_instance()));
|
||||
let mut vservice = VmmService::new(from_api, to_api);
|
||||
let epoll_mgr = EpollManager::default();
|
||||
let mut event_mgr = EventManager::new(&vmm, epoll_mgr).unwrap();
|
||||
let mut v = vmm.lock().unwrap();
|
||||
|
||||
|
||||
@@ -595,17 +595,23 @@ impl DeviceManager {
|
||||
.map_err(|_| StartMicroVmError::EventFd)?;
|
||||
|
||||
info!(self.logger, "init console path: {:?}", com1_sock_path);
|
||||
|
||||
if let Some(legacy_manager) = self.legacy_manager.as_ref() {
|
||||
let com1 = legacy_manager.get_com1_serial();
|
||||
if let Some(path) = com1_sock_path {
|
||||
self.con_manager
|
||||
.create_socket_console(com1, path)
|
||||
.map_err(StartMicroVmError::DeviceManager)?;
|
||||
} else {
|
||||
self.con_manager
|
||||
.create_stdio_console(com1)
|
||||
.map_err(StartMicroVmError::DeviceManager)?;
|
||||
// Currently, the `com1_sock_path` "stdio" is only reserved for creating the stdio console
|
||||
if path != "stdio" {
|
||||
let com1 = legacy_manager.get_com1_serial();
|
||||
self.con_manager
|
||||
.create_socket_console(com1, path)
|
||||
.map_err(StartMicroVmError::DeviceManager)?;
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
let com1 = legacy_manager.get_com1_serial();
|
||||
self.con_manager
|
||||
.create_stdio_console(com1)
|
||||
.map_err(StartMicroVmError::DeviceManager)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -1005,170 +1011,3 @@ impl DeviceManager {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
use kvm_ioctls::Kvm;
|
||||
use test_utils::skip_if_not_root;
|
||||
use vm_memory::{GuestAddress, MmapRegion};
|
||||
|
||||
use super::*;
|
||||
use crate::vm::CpuTopology;
|
||||
|
||||
impl DeviceManager {
|
||||
pub fn new_test_mgr() -> Self {
|
||||
let kvm = Kvm::new().unwrap();
|
||||
let vm = kvm.create_vm().unwrap();
|
||||
let vm_fd = Arc::new(vm);
|
||||
let epoll_manager = EpollManager::default();
|
||||
let res_manager = Arc::new(ResourceManager::new(None));
|
||||
let logger = slog_scope::logger().new(slog::o!());
|
||||
|
||||
DeviceManager {
|
||||
vm_fd: Arc::clone(&vm_fd),
|
||||
con_manager: ConsoleManager::new(epoll_manager, &logger),
|
||||
io_manager: Arc::new(ArcSwap::new(Arc::new(IoManager::new()))),
|
||||
io_lock: Arc::new(Mutex::new(())),
|
||||
irq_manager: Arc::new(KvmIrqManager::new(vm_fd.clone())),
|
||||
res_manager,
|
||||
|
||||
legacy_manager: None,
|
||||
#[cfg(feature = "virtio-blk")]
|
||||
block_manager: BlockDeviceMgr::default(),
|
||||
#[cfg(feature = "virtio-fs")]
|
||||
fs_manager: Arc::new(Mutex::new(FsDeviceMgr::default())),
|
||||
#[cfg(feature = "virtio-net")]
|
||||
virtio_net_manager: VirtioNetDeviceMgr::default(),
|
||||
#[cfg(feature = "virtio-vsock")]
|
||||
vsock_manager: VsockDeviceMgr::default(),
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
mmio_device_info: HashMap::new(),
|
||||
|
||||
logger,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_create_device_manager() {
|
||||
skip_if_not_root!();
|
||||
let mgr = DeviceManager::new_test_mgr();
|
||||
let _ = mgr.io_manager();
|
||||
}
|
||||
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
#[test]
|
||||
fn test_create_devices() {
|
||||
skip_if_not_root!();
|
||||
use crate::vm::VmConfigInfo;
|
||||
|
||||
let epoll_manager = EpollManager::default();
|
||||
let vmm = Arc::new(Mutex::new(crate::vmm::tests::create_vmm_instance(
|
||||
epoll_manager.clone(),
|
||||
)));
|
||||
let event_mgr = crate::event_manager::EventManager::new(&vmm, epoll_manager).unwrap();
|
||||
let mut vm = crate::vm::tests::create_vm_instance();
|
||||
let vm_config = VmConfigInfo {
|
||||
vcpu_count: 1,
|
||||
max_vcpu_count: 1,
|
||||
cpu_pm: "off".to_string(),
|
||||
mem_type: "shmem".to_string(),
|
||||
mem_file_path: "".to_string(),
|
||||
mem_size_mib: 16,
|
||||
serial_path: None,
|
||||
cpu_topology: CpuTopology {
|
||||
threads_per_core: 1,
|
||||
cores_per_die: 1,
|
||||
dies_per_socket: 1,
|
||||
sockets: 1,
|
||||
},
|
||||
vpmu_feature: 0,
|
||||
};
|
||||
vm.set_vm_config(vm_config);
|
||||
vm.init_guest_memory().unwrap();
|
||||
vm.setup_interrupt_controller().unwrap();
|
||||
let vm_as = vm.vm_as().cloned().unwrap();
|
||||
let kernel_temp_file = vmm_sys_util::tempfile::TempFile::new().unwrap();
|
||||
let kernel_file = kernel_temp_file.into_file();
|
||||
let mut cmdline = crate::vm::KernelConfigInfo::new(
|
||||
kernel_file,
|
||||
None,
|
||||
linux_loader::cmdline::Cmdline::new(0x1000),
|
||||
);
|
||||
|
||||
let address_space = vm.vm_address_space().cloned();
|
||||
let mgr = vm.device_manager_mut();
|
||||
let guard = mgr.io_manager.load();
|
||||
let mut lcr = [0u8];
|
||||
// 0x3f8 is the adddress of serial device
|
||||
guard.pio_read(0x3f8 + 3, &mut lcr).unwrap_err();
|
||||
assert_eq!(lcr[0], 0x0);
|
||||
|
||||
mgr.create_interrupt_manager().unwrap();
|
||||
mgr.create_devices(
|
||||
vm_as,
|
||||
event_mgr.epoll_manager(),
|
||||
&mut cmdline,
|
||||
None,
|
||||
None,
|
||||
address_space.as_ref(),
|
||||
)
|
||||
.unwrap();
|
||||
let guard = mgr.io_manager.load();
|
||||
guard.pio_read(0x3f8 + 3, &mut lcr).unwrap();
|
||||
assert_eq!(lcr[0], 0x3);
|
||||
}
|
||||
|
||||
#[cfg(feature = "virtio-fs")]
|
||||
#[test]
|
||||
fn test_handler_insert_region() {
|
||||
skip_if_not_root!();
|
||||
|
||||
use dbs_virtio_devices::VirtioRegionHandler;
|
||||
use lazy_static::__Deref;
|
||||
use vm_memory::{GuestAddressSpace, GuestMemory, GuestMemoryRegion};
|
||||
|
||||
let vm = crate::test_utils::tests::create_vm_for_test();
|
||||
let ctx = DeviceOpContext::new(
|
||||
Some(vm.epoll_manager().clone()),
|
||||
vm.device_manager(),
|
||||
Some(vm.vm_as().unwrap().clone()),
|
||||
vm.vm_address_space().cloned(),
|
||||
true,
|
||||
);
|
||||
let guest_addr = GuestAddress(0x200000000000);
|
||||
|
||||
let cache_len = 1024 * 1024 * 1024;
|
||||
let mmap_region = MmapRegion::build(
|
||||
None,
|
||||
cache_len as usize,
|
||||
libc::PROT_NONE,
|
||||
libc::MAP_ANONYMOUS | libc::MAP_NORESERVE | libc::MAP_PRIVATE,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let guest_mmap_region =
|
||||
Arc::new(vm_memory::GuestRegionMmap::new(mmap_region, guest_addr).unwrap());
|
||||
|
||||
let mut handler = DeviceVirtioRegionHandler {
|
||||
vm_as: ctx.get_vm_as().unwrap(),
|
||||
address_space: ctx.address_space.as_ref().unwrap().clone(),
|
||||
};
|
||||
handler.insert_region(guest_mmap_region).unwrap();
|
||||
let mut find_region = false;
|
||||
let find_region_ptr = &mut find_region;
|
||||
|
||||
let guard = vm.vm_as().unwrap().clone().memory();
|
||||
|
||||
let mem = guard.deref();
|
||||
for region in mem.iter() {
|
||||
if region.start_addr() == guest_addr && region.len() == cache_len {
|
||||
*find_region_ptr = true;
|
||||
}
|
||||
}
|
||||
|
||||
assert!(find_region);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -100,7 +100,7 @@ pub enum StartMicroVmError {
|
||||
|
||||
/// Upcall is not ready
|
||||
#[error("the upcall client is not ready")]
|
||||
UpcallServerNotReady,
|
||||
UpcallNotReady,
|
||||
|
||||
/// Configuration passed in is invalidate.
|
||||
#[error("invalid virtual machine configuration: {0} ")]
|
||||
|
||||
@@ -228,7 +228,7 @@ mod tests {
|
||||
assert!(c.max_memslots >= 32);
|
||||
|
||||
let kvm = Kvm::new().unwrap();
|
||||
let f = std::mem::ManuallyDrop::new(unsafe { File::from_raw_fd(kvm.as_raw_fd()) });
|
||||
let f = unsafe { File::from_raw_fd(kvm.as_raw_fd()) };
|
||||
let m1 = f.metadata().unwrap();
|
||||
let m2 = File::open("/dev/kvm").unwrap().metadata().unwrap();
|
||||
|
||||
|
||||
@@ -34,9 +34,6 @@ pub mod vm;
|
||||
|
||||
mod event_manager;
|
||||
mod io_manager;
|
||||
|
||||
mod test_utils;
|
||||
|
||||
mod vmm;
|
||||
|
||||
pub use self::error::StartMicroVmError;
|
||||
|
||||
@@ -420,7 +420,6 @@ impl ResourceManager {
|
||||
}
|
||||
|
||||
/// Allocate requested resources for a device.
|
||||
#[allow(clippy::question_mark)]
|
||||
pub fn allocate_device_resources(
|
||||
&self,
|
||||
requests: &[ResourceConstraint],
|
||||
@@ -436,7 +435,10 @@ impl ResourceManager {
|
||||
constraint.max = r.1 as u64;
|
||||
}
|
||||
match self.allocate_pio_address(&constraint) {
|
||||
Some(base) => Resource::PioAddressRange { base, size: *size },
|
||||
Some(base) => Resource::PioAddressRange {
|
||||
base: base as u16,
|
||||
size: *size,
|
||||
},
|
||||
None => {
|
||||
if let Err(e) = self.free_device_resources(&resources) {
|
||||
return Err(e);
|
||||
|
||||
@@ -41,7 +41,7 @@ extern "C" fn sigsys_handler(num: c_int, info: *mut siginfo_t, _unused: *mut c_v
|
||||
let si_code = unsafe { (*info).si_code };
|
||||
|
||||
// Sanity check. The condition should never be true.
|
||||
if num != si_signo || num != SIGSYS || si_code != SYS_SECCOMP_CODE {
|
||||
if num != si_signo || num != SIGSYS || si_code != SYS_SECCOMP_CODE as i32 {
|
||||
// Safe because we're terminating the process anyway.
|
||||
unsafe { _exit(i32::from(super::EXIT_CODE_UNEXPECTED_ERROR)) };
|
||||
}
|
||||
|
||||
@@ -1,47 +0,0 @@
|
||||
// Copyright (C) 2022 Alibaba Cloud. All rights reserved.
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
use crate::api::v1::InstanceInfo;
|
||||
use crate::vm::{CpuTopology, KernelConfigInfo, Vm, VmConfigInfo};
|
||||
use dbs_utils::epoll_manager::EpollManager;
|
||||
use linux_loader::cmdline::Cmdline;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use vmm_sys_util::tempfile::TempFile;
|
||||
|
||||
pub fn create_vm_for_test() -> Vm {
|
||||
// Call for kvm too frequently would cause error in some host kernel.
|
||||
let instance_info = Arc::new(RwLock::new(InstanceInfo::default()));
|
||||
let epoll_manager = EpollManager::default();
|
||||
let mut vm = Vm::new(None, instance_info, epoll_manager).unwrap();
|
||||
let kernel_file = TempFile::new().unwrap();
|
||||
let cmd_line = Cmdline::new(64);
|
||||
vm.set_kernel_config(KernelConfigInfo::new(
|
||||
kernel_file.into_file(),
|
||||
None,
|
||||
cmd_line,
|
||||
));
|
||||
|
||||
let vm_config = VmConfigInfo {
|
||||
vcpu_count: 1,
|
||||
max_vcpu_count: 1,
|
||||
cpu_pm: "off".to_string(),
|
||||
mem_type: "shmem".to_string(),
|
||||
mem_file_path: "".to_string(),
|
||||
mem_size_mib: 1,
|
||||
serial_path: None,
|
||||
cpu_topology: CpuTopology {
|
||||
threads_per_core: 1,
|
||||
cores_per_die: 1,
|
||||
dies_per_socket: 1,
|
||||
sockets: 1,
|
||||
},
|
||||
vpmu_feature: 0,
|
||||
};
|
||||
vm.set_vm_config(vm_config);
|
||||
vm.init_guest_memory().unwrap();
|
||||
vm
|
||||
}
|
||||
}
|
||||
@@ -10,10 +10,7 @@ mod vcpu_manager;
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
use dbs_arch::cpuid::VpmuFeatureLevel;
|
||||
|
||||
pub use vcpu_manager::{VcpuManager, VcpuManagerError, VcpuResizeInfo};
|
||||
|
||||
#[cfg(feature = "hotplug")]
|
||||
pub use vcpu_manager::VcpuResizeError;
|
||||
pub use vcpu_manager::{VcpuManager, VcpuManagerError};
|
||||
|
||||
/// vcpu config collection
|
||||
pub struct VcpuConfig {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user