mirror of
https://github.com/kata-containers/kata-containers.git
synced 2026-02-22 14:54:23 +00:00
Compare commits
608 Commits
2.2.0
...
2.4.0-alph
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3e3e3a0253 | ||
|
|
72b8144b56 | ||
|
|
f8aaefc919 | ||
|
|
8ee67aae4f | ||
|
|
879ec4e0e9 | ||
|
|
a6219cb5e0 | ||
|
|
f59d3ff600 | ||
|
|
7364cd4983 | ||
|
|
5e7c1a290f | ||
|
|
06d28d50ed | ||
|
|
857501d8dd | ||
|
|
a32e02a1ee | ||
|
|
c3de161168 | ||
|
|
01b6ffc0a4 | ||
|
|
9412be39ba | ||
|
|
a813378ac5 | ||
|
|
f0734f52c1 | ||
|
|
aff3275608 | ||
|
|
d41c375c4f | ||
|
|
baf4f76d97 | ||
|
|
fcf45b0c92 | ||
|
|
9fed7d0bde | ||
|
|
318b3f187b | ||
|
|
e8bb6b2666 | ||
|
|
c1111a1d2d | ||
|
|
597b239ef3 | ||
|
|
cf360fad92 | ||
|
|
bc9558149c | ||
|
|
abf39ddef0 | ||
|
|
ed7eb26bff | ||
|
|
6a0b7165ba | ||
|
|
2938f60abb | ||
|
|
5ba2f52c73 | ||
|
|
5dbd752f8f | ||
|
|
85eb743f46 | ||
|
|
76540dbdd1 | ||
|
|
36d73c96c8 | ||
|
|
c8e22daf67 | ||
|
|
ac958a3073 | ||
|
|
edca829242 | ||
|
|
31f6c2c2ea | ||
|
|
75bb340137 | ||
|
|
bd3217daeb | ||
|
|
adab64349c | ||
|
|
351cef7b6a | ||
|
|
a7d1c70c4b | ||
|
|
09abcd4dc6 | ||
|
|
35db75baa1 | ||
|
|
46e459584d | ||
|
|
c7349d0bf1 | ||
|
|
ddc68131df | ||
|
|
ac058b3897 | ||
|
|
181f876fdb | ||
|
|
705687dc42 | ||
|
|
acece84906 | ||
|
|
143fb27802 | ||
|
|
45d76407aa | ||
|
|
0c6c0735ec | ||
|
|
0ae77e1232 | ||
|
|
a7c08aa4b6 | ||
|
|
3be15aed1c | ||
|
|
ce0693d6dc | ||
|
|
cad279b37d | ||
|
|
1b28d7180f | ||
|
|
a0919b0865 | ||
|
|
ce92cadc7d | ||
|
|
2227c46c25 | ||
|
|
4c2883f7e2 | ||
|
|
34f23de512 | ||
|
|
c28e5a7807 | ||
|
|
a0e0e18639 | ||
|
|
b5dfcf2653 | ||
|
|
d08bcde7aa | ||
|
|
78dff468bf | ||
|
|
4530e7df29 | ||
|
|
653b461dc2 | ||
|
|
b60622786d | ||
|
|
89ff700038 | ||
|
|
c855a312f0 | ||
|
|
084538d334 | ||
|
|
d6a3ebc496 | ||
|
|
f4982130e1 | ||
|
|
f10e8c8165 | ||
|
|
46a4020e9e | ||
|
|
e7beed5430 | ||
|
|
2029eeebca | ||
|
|
57541315db | ||
|
|
0c51da3dd0 | ||
|
|
94b7936f51 | ||
|
|
296e76f8ee | ||
|
|
2b6dfe414a | ||
|
|
3c9ae7fb4b | ||
|
|
c01189d4a6 | ||
|
|
0380b9bda7 | ||
|
|
bdde8beb52 | ||
|
|
f80ca66300 | ||
|
|
112ea25859 | ||
|
|
d5a18173b9 | ||
|
|
6955d1442f | ||
|
|
7269352fd4 | ||
|
|
bbaf57adb0 | ||
|
|
46fd5069c9 | ||
|
|
076dbe6cea | ||
|
|
7e6f2b8d64 | ||
|
|
860f30882a | ||
|
|
8acb3a32b6 | ||
|
|
4788cb8263 | ||
|
|
b6ebddd7ef | ||
|
|
599bc0c2a9 | ||
|
|
e34893a0c4 | ||
|
|
1e7cb4bc3a | ||
|
|
15b5d22e81 | ||
|
|
55412044df | ||
|
|
480343671b | ||
|
|
eb11d053d5 | ||
|
|
92e3a14023 | ||
|
|
0a19340a93 | ||
|
|
b046c1ef6b | ||
|
|
e89c06e68b | ||
|
|
b585264555 | ||
|
|
d38135c93b | ||
|
|
a3b3c85ec3 | ||
|
|
6b48d3754a | ||
|
|
c8f2ef9488 | ||
|
|
09f7962ff1 | ||
|
|
6acedc2531 | ||
|
|
c0aea3f662 | ||
|
|
7c947357ad | ||
|
|
395638c4bc | ||
|
|
bf24eb6b33 | ||
|
|
570915a8c3 | ||
|
|
bcf181b7ee | ||
|
|
3430723594 | ||
|
|
04185bd068 | ||
|
|
05cf7cdddb | ||
|
|
6339fdd1f6 | ||
|
|
57bb7ffae3 | ||
|
|
653976c0fd | ||
|
|
fbf3bb55c0 | ||
|
|
8ab90e1068 | ||
|
|
18c47fe8f3 | ||
|
|
eacfcdec19 | ||
|
|
e7856ff10c | ||
|
|
8b01666109 | ||
|
|
b7b89905d4 | ||
|
|
7566b736ac | ||
|
|
87f676062c | ||
|
|
b09dd7a883 | ||
|
|
b192d388c1 | ||
|
|
c9e6efb1e1 | ||
|
|
4be2c8b190 | ||
|
|
99c46be787 | ||
|
|
d17100aee6 | ||
|
|
84ccdd8ef2 | ||
|
|
b5cfb73466 | ||
|
|
02181cb7d8 | ||
|
|
d47484e7c1 | ||
|
|
09d5d8836b | ||
|
|
5c9c0b6e62 | ||
|
|
f611785fdc | ||
|
|
86b5bb5801 | ||
|
|
bcf3e82cf0 | ||
|
|
b34ed403c5 | ||
|
|
7362e1e8a9 | ||
|
|
a7a47bd7d4 | ||
|
|
fbe27d9097 | ||
|
|
a239a38f45 | ||
|
|
375ad2b2b6 | ||
|
|
b468dc500a | ||
|
|
5e230a1cba | ||
|
|
1aaa0599d9 | ||
|
|
1e331f7542 | ||
|
|
9d3ec58370 | ||
|
|
1c81d7e0b6 | ||
|
|
3bc25e684e | ||
|
|
415f5a9a67 | ||
|
|
e15c8460db | ||
|
|
51e9038ad5 | ||
|
|
3f21af9c5c | ||
|
|
c8553ea427 | ||
|
|
969b78b01f | ||
|
|
39ab5f4bea | ||
|
|
e009b58c93 | ||
|
|
7b406d5561 | ||
|
|
9b270d72d1 | ||
|
|
98b4406196 | ||
|
|
53a9f9460f | ||
|
|
2551179e43 | ||
|
|
2751a13bbd | ||
|
|
4e2dd41eb6 | ||
|
|
338ac87516 | ||
|
|
71b69c36d5 | ||
|
|
eb248b0c66 | ||
|
|
23496f94be | ||
|
|
00a20c840b | ||
|
|
29f5ff5304 | ||
|
|
e610fc82ff | ||
|
|
caa6e19b5d | ||
|
|
7e401952f8 | ||
|
|
82de838e5f | ||
|
|
d1bcf105ff | ||
|
|
e66d0473be | ||
|
|
bdf4824145 | ||
|
|
c509a204f3 | ||
|
|
b85edbfa00 | ||
|
|
42add7f201 | ||
|
|
5dfedc2b19 | ||
|
|
45e7c2cab1 | ||
|
|
a3647e3486 | ||
|
|
3be50adab9 | ||
|
|
4d4a15d6ce | ||
|
|
03a9411884 | ||
|
|
4280415149 | ||
|
|
bf5f42d411 | ||
|
|
8f33e6f593 | ||
|
|
b0bc71f463 | ||
|
|
309dae631a | ||
|
|
3120b489e3 | ||
|
|
a10cfffdff | ||
|
|
6abccb92ce | ||
|
|
8d8604e10f | ||
|
|
bf00b8df87 | ||
|
|
b67fa9e450 | ||
|
|
e377578e08 | ||
|
|
d1d9e84e9f | ||
|
|
5f306330f4 | ||
|
|
5f5eca6b8e | ||
|
|
d2a7b6ff4a | ||
|
|
6cc8000cae | ||
|
|
2063b13805 | ||
|
|
3d0fe433c6 | ||
|
|
ec3aa1694b | ||
|
|
01fdeb7641 | ||
|
|
ded864f862 | ||
|
|
a13e2f77b8 | ||
|
|
a0825badf6 | ||
|
|
e709f11229 | ||
|
|
34273da98f | ||
|
|
68696e051d | ||
|
|
d9e2e9edb2 | ||
|
|
57ab408576 | ||
|
|
730b9c433f | ||
|
|
175f9b06e9 | ||
|
|
9891efc61f | ||
|
|
d6b62c029e | ||
|
|
2680c0bfee | ||
|
|
42b92b2b05 | ||
|
|
827a41f973 | ||
|
|
8ceadcc5a9 | ||
|
|
ff59db7534 | ||
|
|
13b06a35d5 | ||
|
|
e22bd78249 | ||
|
|
b40eedc9f7 | ||
|
|
57c0f93f54 | ||
|
|
1a96b8ba35 | ||
|
|
43b13a4a6d | ||
|
|
c59c36732b | ||
|
|
fa922517d9 | ||
|
|
52268d0ece | ||
|
|
a72bed5b34 | ||
|
|
f434bcbf6c | ||
|
|
76f1ce9e30 | ||
|
|
fd24a695bf | ||
|
|
a6385c8fde | ||
|
|
f989078cd2 | ||
|
|
73b4f27c46 | ||
|
|
7308610c41 | ||
|
|
8f78e1cc19 | ||
|
|
4d47aeef2e | ||
|
|
6baf2586ee | ||
|
|
37fa453dd2 | ||
|
|
03877f3479 | ||
|
|
8c8bcb7b00 | ||
|
|
09741272bc | ||
|
|
8030b6caf0 | ||
|
|
8296754e07 | ||
|
|
de45c783ca | ||
|
|
c1adb075ad | ||
|
|
2b13944964 | ||
|
|
6abc70725f | ||
|
|
4f75ccb903 | ||
|
|
4f018b5287 | ||
|
|
7a80aeb0b8 | ||
|
|
09a5e03f4a | ||
|
|
448fe0a5ed | ||
|
|
b625f62d4b | ||
|
|
24fff57c23 | ||
|
|
2b9f79cfc9 | ||
|
|
9db56ffd85 | ||
|
|
1ba069b303 | ||
|
|
29234c6d45 | ||
|
|
4ce2b14e60 | ||
|
|
72d1a04cf1 | ||
|
|
78d3f319e2 | ||
|
|
273a1a9ac6 | ||
|
|
76f16fd1a7 | ||
|
|
6d55b1bafa | ||
|
|
ed02bc9041 | ||
|
|
50da26d3e6 | ||
|
|
41c49a7bf5 | ||
|
|
b4fadc9456 | ||
|
|
b8e69ce5bd | ||
|
|
d0e5e55e55 | ||
|
|
17a8c5c685 | ||
|
|
f16a99603c | ||
|
|
1cb38ecbe7 | ||
|
|
c2be2dfb61 | ||
|
|
681b80473f | ||
|
|
6ffe9e5afe | ||
|
|
f34f67d610 | ||
|
|
135a0802c5 | ||
|
|
eb5dd76e9d | ||
|
|
bcffa26305 | ||
|
|
e61f5e2931 | ||
|
|
5b3a349db5 | ||
|
|
baf4784a29 | ||
|
|
e42bc05c8a | ||
|
|
321be0f794 | ||
|
|
7d0b616cf3 | ||
|
|
119edcc443 | ||
|
|
8873ddab9e | ||
|
|
8be85fda4f | ||
|
|
5c7e1b457c | ||
|
|
6cc4d6b54e | ||
|
|
176dee6f37 | ||
|
|
7b2bfd4eca | ||
|
|
3e24e46c70 | ||
|
|
88affdb7b7 | ||
|
|
b7cd4ca2b8 | ||
|
|
2d7b65e8eb | ||
|
|
3f95469a78 | ||
|
|
06f4ab10b4 | ||
|
|
9796babd92 | ||
|
|
adc9e0baaf | ||
|
|
51cbe14584 | ||
|
|
98b7350a1b | ||
|
|
0300e91cd0 | ||
|
|
5c77cc2c49 | ||
|
|
4152c45e4c | ||
|
|
72044180e4 | ||
|
|
e50b05d93c | ||
|
|
8528157b9b | ||
|
|
988eb95621 | ||
|
|
bf8f582c1d | ||
|
|
011c58d626 | ||
|
|
10ec4b133c | ||
|
|
a9c2a4ba8e | ||
|
|
c4236cb2d1 | ||
|
|
08360c981d | ||
|
|
8a4e69d237 | ||
|
|
0ea2e3af07 | ||
|
|
63539dc9fd | ||
|
|
a953fea324 | ||
|
|
b888edc2fc | ||
|
|
4cde619c68 | ||
|
|
6e3fcce2a2 | ||
|
|
04cdf5b1f0 | ||
|
|
7eac2ec786 | ||
|
|
8acfc154de | ||
|
|
5b02d54e23 | ||
|
|
ff9728f032 | ||
|
|
5c138c8f12 | ||
|
|
191d001610 | ||
|
|
1f6a551570 | ||
|
|
2bc7561561 | ||
|
|
db7d3b91bd | ||
|
|
f7f6bd0142 | ||
|
|
5fd963530d | ||
|
|
a44cde7e8d | ||
|
|
71ce6cfe9e | ||
|
|
99450bd1f7 | ||
|
|
f85fe70231 | ||
|
|
e439cec7c5 | ||
|
|
e5159ea755 | ||
|
|
fd5c858390 | ||
|
|
2ce8d4263c | ||
|
|
cd1064b16f | ||
|
|
8739a73dd3 | ||
|
|
762922a521 | ||
|
|
4f4854308a | ||
|
|
96c033ba6c | ||
|
|
7183de47df | ||
|
|
80f6b97710 | ||
|
|
4ac7199282 | ||
|
|
bb99bfb45d | ||
|
|
b57613f53e | ||
|
|
870771d76d | ||
|
|
20f4c252b8 | ||
|
|
823818cfbc | ||
|
|
f9ecaaa6be | ||
|
|
46720c61c1 | ||
|
|
18bff58487 | ||
|
|
e5fe53f0a9 | ||
|
|
0b087a873d | ||
|
|
2304a59601 | ||
|
|
315295e0ef | ||
|
|
3217b03b17 | ||
|
|
39df808f6a | ||
|
|
13e65f2ee8 | ||
|
|
05995632c3 | ||
|
|
da42cbc0a7 | ||
|
|
dffc50928a | ||
|
|
ff6a677d16 | ||
|
|
90046964ef | ||
|
|
1fbb73041b | ||
|
|
907459c1c1 | ||
|
|
75f426dd1e | ||
|
|
aad1a8734f | ||
|
|
ebd7b61884 | ||
|
|
ad45c52fbe | ||
|
|
5c2af3e308 | ||
|
|
8bc71105f4 | ||
|
|
f7a2707505 | ||
|
|
5b1eb08bde | ||
|
|
cf36fd87ad | ||
|
|
6d94957a14 | ||
|
|
48090f624a | ||
|
|
c811dd7484 | ||
|
|
ed705482a2 | ||
|
|
a525991c2c | ||
|
|
9ad44750e8 | ||
|
|
39dcbaa672 | ||
|
|
832d57c960 | ||
|
|
04139ba686 | ||
|
|
57e3712dbd | ||
|
|
279f8e9d03 | ||
|
|
fa44e5c1e5 | ||
|
|
e987632deb | ||
|
|
1766c93b08 | ||
|
|
272771dcf9 | ||
|
|
5d2a82fbf9 | ||
|
|
8b0bc1f45e | ||
|
|
bfb556d56a | ||
|
|
0e854f3b80 | ||
|
|
80463b445a | ||
|
|
3276f3b5b6 | ||
|
|
0ececc630f | ||
|
|
e33c26ba18 | ||
|
|
47170e302a | ||
|
|
439e5ac3b0 | ||
|
|
8bbcb06af5 | ||
|
|
cc4983eeac | ||
|
|
e248de4616 | ||
|
|
b6ff23d21b | ||
|
|
0ca8c27241 | ||
|
|
771a934fc5 | ||
|
|
3b0c4bf9a0 | ||
|
|
3bdcfaa658 | ||
|
|
41c590fa0a | ||
|
|
debf3c9fe9 | ||
|
|
43a72d76e2 | ||
|
|
ea9b2f9c92 | ||
|
|
e541105680 | ||
|
|
9acf4e5d32 | ||
|
|
a86babe0d0 | ||
|
|
a156288c1f | ||
|
|
32c3fb71f2 | ||
|
|
08e55a279a | ||
|
|
2bee8bc6bd | ||
|
|
305afc8b70 | ||
|
|
1fe080fd24 | ||
|
|
3a4aca4d67 | ||
|
|
21c8511630 | ||
|
|
9ea78ac386 | ||
|
|
9a6d56f1ab | ||
|
|
c4bafc4e68 | ||
|
|
90e6388726 | ||
|
|
45d40179c2 | ||
|
|
48fb1d9203 | ||
|
|
afad910d0e | ||
|
|
e38686f74d | ||
|
|
245a12bbb7 | ||
|
|
fc067d61d4 | ||
|
|
53ec4df953 | ||
|
|
077b77c178 | ||
|
|
39cd05e0bb | ||
|
|
1cfe59304d | ||
|
|
fd98373850 | ||
|
|
1b1790fdbc | ||
|
|
fff82b4ef5 | ||
|
|
6159ef3499 | ||
|
|
067c44d0b6 | ||
|
|
9353cd77fd | ||
|
|
9a311a2b58 | ||
|
|
e7c42fbc76 | ||
|
|
4f7cc18622 | ||
|
|
7bf96d2457 | ||
|
|
9ed024e0bf | ||
|
|
b46adbc527 | ||
|
|
9d3cd9841f | ||
|
|
64aa562355 | ||
|
|
08d7aebc28 | ||
|
|
9fa3beff4f | ||
|
|
49282854f1 | ||
|
|
bac849ecba | ||
|
|
d00decc97d | ||
|
|
2a26c2397d | ||
|
|
64bb803fcf | ||
|
|
e7deee948a | ||
|
|
25ac3524c9 | ||
|
|
e4cb6cbfbb | ||
|
|
851d5f8613 | ||
|
|
842c76cb40 | ||
|
|
d789b42937 | ||
|
|
4b7e4a4c70 | ||
|
|
8d9d6e6af0 | ||
|
|
9bed2ade0f | ||
|
|
b42ed39349 | ||
|
|
f17752b0dc | ||
|
|
dc7e9bce73 | ||
|
|
f811026c77 | ||
|
|
a6066404f7 | ||
|
|
bb18cd475c | ||
|
|
967db0cbcc | ||
|
|
9381f23ccf | ||
|
|
58e77a3c13 | ||
|
|
057eb80ac9 | ||
|
|
75ef8c243a | ||
|
|
62a1a6f827 | ||
|
|
d67a414b2b | ||
|
|
13b8bb0c74 | ||
|
|
25670d3058 | ||
|
|
041a513f80 | ||
|
|
62baa48ef5 | ||
|
|
81de2d476b | ||
|
|
f785ff0bf2 | ||
|
|
0e0e59dc5f | ||
|
|
f0b5331430 | ||
|
|
230eae3ff3 | ||
|
|
116521367e | ||
|
|
626d659fd9 | ||
|
|
78d99f5129 | ||
|
|
59486b855a | ||
|
|
96e1246bce | ||
|
|
a2db68e347 | ||
|
|
74d645cd21 | ||
|
|
f28e6e506f | ||
|
|
d865c80986 | ||
|
|
71f915c63f | ||
|
|
2174fee48d | ||
|
|
2abc450a4d | ||
|
|
924a68d08d | ||
|
|
1fff9be707 | ||
|
|
9bbaa66f39 | ||
|
|
4d7ddffe6f | ||
|
|
f5172d1c36 | ||
|
|
3c9170ea0d | ||
|
|
103fdd3f6c | ||
|
|
c64867ad34 | ||
|
|
f3a1bf3b45 | ||
|
|
e2a9e78c9e | ||
|
|
bfcee91164 | ||
|
|
4996f9b7da | ||
|
|
716ab32acf | ||
|
|
fee6f49d38 | ||
|
|
256c3b2747 | ||
|
|
bcc9fa3b35 | ||
|
|
bd85da0461 | ||
|
|
18c95b9ab1 | ||
|
|
c0daa4ebff | ||
|
|
67d1f4fd14 | ||
|
|
87152fffcb | ||
|
|
2250360b56 | ||
|
|
a9de761d71 | ||
|
|
8ae3edbc18 | ||
|
|
0c7789fad6 | ||
|
|
72e3538e36 | ||
|
|
8dadca9cd1 | ||
|
|
b564dd47b6 | ||
|
|
a89cc0bb5c | ||
|
|
8771d8c375 | ||
|
|
a99fcc3af1 | ||
|
|
39ffd8ee84 | ||
|
|
ff37f5c798 | ||
|
|
fb583780f6 | ||
|
|
4751698829 | ||
|
|
8f0f949abf | ||
|
|
932ee41b3f | ||
|
|
bff38e4f4d | ||
|
|
d967d3cb37 | ||
|
|
87de26bda3 | ||
|
|
8058e97212 | ||
|
|
a6a2e525de | ||
|
|
d490704133 | ||
|
|
a0dd840ef6 | ||
|
|
0be91280f2 | ||
|
|
f62ea5dade | ||
|
|
9de1129bf7 | ||
|
|
65a1e13195 | ||
|
|
a4214738b9 | ||
|
|
a989238348 | ||
|
|
b8717f35d7 | ||
|
|
938981be1d | ||
|
|
2304f935b4 | ||
|
|
c5fea9ff70 | ||
|
|
f3539d2fb7 | ||
|
|
e26a140448 | ||
|
|
814cea9601 | ||
|
|
2a614577fb | ||
|
|
486baba7fd | ||
|
|
46eb07e14f | ||
|
|
80fba4d637 | ||
|
|
8594f80c0a | ||
|
|
87bbae1bd7 | ||
|
|
d422789fac | ||
|
|
d45c86de29 | ||
|
|
c4a642636b | ||
|
|
881b996443 |
20
.github/workflows/kata-deploy-push.yaml
vendored
20
.github/workflows/kata-deploy-push.yaml
vendored
@@ -1,6 +1,6 @@
|
||||
name: kata-deploy-build
|
||||
name: kata deploy build
|
||||
|
||||
on: push
|
||||
on: [push, pull_request]
|
||||
|
||||
jobs:
|
||||
build-asset:
|
||||
@@ -9,6 +9,7 @@ jobs:
|
||||
matrix:
|
||||
asset:
|
||||
- kernel
|
||||
- kernel-experimental
|
||||
- shim-v2
|
||||
- qemu
|
||||
- cloud-hypervisor
|
||||
@@ -24,7 +25,7 @@ jobs:
|
||||
|
||||
- name: Build ${{ matrix.asset }}
|
||||
run: |
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-binaries-in-docker.sh --build="${KATA_ASSET}"
|
||||
make "${KATA_ASSET}-tarball"
|
||||
build_dir=$(readlink -f build)
|
||||
# store-artifact does not work with symlink
|
||||
sudo cp -r --preserve=all "${build_dir}" "kata-build"
|
||||
@@ -47,12 +48,21 @@ jobs:
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-artifacts
|
||||
path: build
|
||||
- name: merge-artifacts
|
||||
run: |
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-merge-builds.sh kata-artifacts
|
||||
make merge-builds
|
||||
- name: store-artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: kata-static-tarball
|
||||
path: kata-static.tar.xz
|
||||
|
||||
make-kata-tarball:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: make kata-tarball
|
||||
run: |
|
||||
make kata-tarball
|
||||
sudo make install-tarball
|
||||
|
||||
147
.github/workflows/kata-deploy-test.yaml
vendored
147
.github/workflows/kata-deploy-test.yaml
vendored
@@ -5,60 +5,121 @@ on:
|
||||
name: test-kata-deploy
|
||||
|
||||
jobs:
|
||||
check_comments:
|
||||
if: ${{ github.event.issue.pull_request }}
|
||||
check-comment-and-membership:
|
||||
runs-on: ubuntu-latest
|
||||
if: |
|
||||
github.event.issue.pull_request
|
||||
&& github.event_name == 'issue_comment'
|
||||
&& github.event.action == 'created'
|
||||
&& startsWith(github.event.comment.body, '/test_kata_deploy')
|
||||
steps:
|
||||
- name: Check for Command
|
||||
id: command
|
||||
uses: kata-containers/slash-command-action@v1
|
||||
- name: Check membership
|
||||
uses: kata-containers/is-organization-member@1.0.1
|
||||
id: is_organization_member
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
command: "test_kata_deploy"
|
||||
reaction: "true"
|
||||
reaction-type: "eyes"
|
||||
allow-edits: "false"
|
||||
permission-level: admin
|
||||
- name: verify command arg is kata-deploy
|
||||
organization: kata-containers
|
||||
username: ${{ github.event.comment.user.login }}
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Fail if not member
|
||||
run: |
|
||||
echo "The command was '${{ steps.command.outputs.command-name }}' with arguments '${{ steps.command.outputs.command-arguments }}'"
|
||||
result=${{ steps.is_organization_member.outputs.result }}
|
||||
if [ $result == false ]; then
|
||||
user=${{ github.event.comment.user.login }}
|
||||
echo Either ${user} is not part of the kata-containers organization
|
||||
echo or ${user} has its Organization Visibility set to Private at
|
||||
echo https://github.com/orgs/kata-containers/people?query=${user}
|
||||
echo
|
||||
echo Ensure you change your Organization Visibility to Public and
|
||||
echo trigger the test again.
|
||||
exit 1
|
||||
fi
|
||||
|
||||
create-and-test-container:
|
||||
needs: check_comments
|
||||
build-asset:
|
||||
runs-on: ubuntu-latest
|
||||
needs: check-comment-and-membership
|
||||
strategy:
|
||||
matrix:
|
||||
asset:
|
||||
- cloud-hypervisor
|
||||
- firecracker
|
||||
- kernel
|
||||
- qemu
|
||||
- rootfs-image
|
||||
- rootfs-initrd
|
||||
- shim-v2
|
||||
steps:
|
||||
- name: get-PR-ref
|
||||
id: get-PR-ref
|
||||
- uses: actions/checkout@v2
|
||||
- name: Install docker
|
||||
run: |
|
||||
ref=$(cat $GITHUB_EVENT_PATH | jq -r '.issue.pull_request.url' | sed 's#^.*\/pulls#refs\/pull#' | sed 's#$#\/merge#')
|
||||
echo "reference for PR: " ${ref}
|
||||
echo "##[set-output name=pr-ref;]${ref}"
|
||||
curl -fsSL https://test.docker.com -o test-docker.sh
|
||||
sh test-docker.sh
|
||||
|
||||
- name: check out
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
ref: ${{ steps.get-PR-ref.outputs.pr-ref }}
|
||||
|
||||
- name: build-container-image
|
||||
id: build-container-image
|
||||
- name: Build ${{ matrix.asset }}
|
||||
run: |
|
||||
PR_SHA=$(git log --format=format:%H -n1)
|
||||
VERSION="2.0.0"
|
||||
ARTIFACT_URL="https://github.com/kata-containers/kata-containers/releases/download/${VERSION}/kata-static-${VERSION}-x86_64.tar.xz"
|
||||
wget "${ARTIFACT_URL}" -O tools/packaging/kata-deploy/kata-static.tar.xz
|
||||
docker build --build-arg KATA_ARTIFACTS=kata-static.tar.xz -t katadocker/kata-deploy-ci:${PR_SHA} -t quay.io/kata-containers/kata-deploy-ci:${PR_SHA} ./tools/packaging/kata-deploy
|
||||
docker login -u ${{ secrets.DOCKER_USERNAME }} -p ${{ secrets.DOCKER_PASSWORD }}
|
||||
docker push katadocker/kata-deploy-ci:$PR_SHA
|
||||
docker login -u ${{ secrets.QUAY_DEPLOYER_USERNAME }} -p ${{ secrets.QUAY_DEPLOYER_PASSWORD }} quay.io
|
||||
docker push quay.io/kata-containers/kata-deploy-ci:$PR_SHA
|
||||
echo "##[set-output name=pr-sha;]${PR_SHA}"
|
||||
|
||||
- name: test-kata-deploy-ci-in-aks
|
||||
uses: ./tools/packaging/kata-deploy/action
|
||||
with:
|
||||
packaging-sha: ${{ steps.build-container-image.outputs.pr-sha }}
|
||||
make "${KATA_ASSET}-tarball"
|
||||
build_dir=$(readlink -f build)
|
||||
# store-artifact does not work with symlink
|
||||
sudo cp -r "${build_dir}" "kata-build"
|
||||
env:
|
||||
PKG_SHA: ${{ steps.build-container-image.outputs.pr-sha }}
|
||||
KATA_ASSET: ${{ matrix.asset }}
|
||||
TAR_OUTPUT: ${{ matrix.asset }}.tar.gz
|
||||
|
||||
- name: store-artifact ${{ matrix.asset }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-build/kata-static-${{ matrix.asset }}.tar.xz
|
||||
if-no-files-found: error
|
||||
|
||||
create-kata-tarball:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build-asset
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: get-artifacts
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-artifacts
|
||||
- name: merge-artifacts
|
||||
run: |
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-merge-builds.sh kata-artifacts
|
||||
- name: store-artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: kata-static-tarball
|
||||
path: kata-static.tar.xz
|
||||
|
||||
kata-deploy:
|
||||
needs: create-kata-tarball
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: kata-static-tarball
|
||||
- name: build-and-push-kata-deploy-ci
|
||||
id: build-and-push-kata-deploy-ci
|
||||
run: |
|
||||
tag=$(echo $GITHUB_REF | cut -d/ -f3-)
|
||||
pushd $GITHUB_WORKSPACE
|
||||
git checkout $tag
|
||||
pkg_sha=$(git rev-parse HEAD)
|
||||
popd
|
||||
mv kata-static.tar.xz $GITHUB_WORKSPACE/tools/packaging/kata-deploy/kata-static.tar.xz
|
||||
docker build --build-arg KATA_ARTIFACTS=kata-static.tar.xz -t quay.io/kata-containers/kata-deploy-ci:$pkg_sha $GITHUB_WORKSPACE/tools/packaging/kata-deploy
|
||||
docker login -u ${{ secrets.QUAY_DEPLOYER_USERNAME }} -p ${{ secrets.QUAY_DEPLOYER_PASSWORD }} quay.io
|
||||
docker push quay.io/kata-containers/kata-deploy-ci:$pkg_sha
|
||||
mkdir -p packaging/kata-deploy
|
||||
ln -s $GITHUB_WORKSPACE/tools/packaging/kata-deploy/action packaging/kata-deploy/action
|
||||
echo "::set-output name=PKG_SHA::${pkg_sha}"
|
||||
- name: test-kata-deploy-ci-in-aks
|
||||
uses: ./packaging/kata-deploy/action
|
||||
with:
|
||||
packaging-sha: ${{steps.build-and-push-kata-deploy-ci.outputs.PKG_SHA}}
|
||||
env:
|
||||
PKG_SHA: ${{steps.build-and-push-kata-deploy-ci.outputs.PKG_SHA}}
|
||||
AZ_APPID: ${{ secrets.AZ_APPID }}
|
||||
AZ_PASSWORD: ${{ secrets.AZ_PASSWORD }}
|
||||
AZ_SUBSCRIPTION_ID: ${{ secrets.AZ_SUBSCRIPTION_ID }}
|
||||
|
||||
295
.github/workflows/main.yaml
vendored
295
.github/workflows/main.yaml
vendored
@@ -1,295 +0,0 @@
|
||||
name: Publish release tarball
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- '1.*'
|
||||
|
||||
jobs:
|
||||
get-artifact-list:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: get the list
|
||||
run: |
|
||||
pushd $GITHUB_WORKSPACE
|
||||
tag=$(echo $GITHUB_REF | cut -d/ -f3-)
|
||||
git checkout $tag
|
||||
popd
|
||||
$GITHUB_WORKSPACE/tools/packaging/artifact-list.sh > artifact-list.txt
|
||||
- name: save-artifact-list
|
||||
uses: actions/upload-artifact@master
|
||||
with:
|
||||
name: artifact-list
|
||||
path: artifact-list.txt
|
||||
|
||||
build-kernel:
|
||||
runs-on: ubuntu-16.04
|
||||
needs: get-artifact-list
|
||||
env:
|
||||
buildstr: "install_kernel"
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: get-artifact-list
|
||||
uses: actions/download-artifact@master
|
||||
with:
|
||||
name: artifact-list
|
||||
- run: |
|
||||
sudo apt-get update && sudo apt install -y flex bison libelf-dev bc iptables
|
||||
- name: build-kernel
|
||||
run: |
|
||||
if grep -q $buildstr ./artifact-list/artifact-list.txt; then
|
||||
$GITHUB_WORKSPACE/.github/workflows/generate-artifact-tarball.sh $buildstr
|
||||
echo "artifact-built=true" >> $GITHUB_ENV
|
||||
else
|
||||
echo "artifact-built=false" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: ${{ env.artifact-built }} == 'true'
|
||||
uses: actions/upload-artifact@master
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-static-kernel.tar.gz
|
||||
|
||||
build-experimental-kernel:
|
||||
runs-on: ubuntu-16.04
|
||||
needs: get-artifact-list
|
||||
env:
|
||||
buildstr: "install_experimental_kernel"
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: get-artifact-list
|
||||
uses: actions/download-artifact@master
|
||||
with:
|
||||
name: artifact-list
|
||||
- run: |
|
||||
sudo apt-get update && sudo apt install -y flex bison libelf-dev bc iptables
|
||||
- name: build-experimental-kernel
|
||||
run: |
|
||||
if grep -q $buildstr ./artifact-list/artifact-list.txt; then
|
||||
$GITHUB_WORKSPACE/.github/workflows/generate-artifact-tarball.sh $buildstr
|
||||
echo "artifact-built=true" >> $GITHUB_ENV
|
||||
else
|
||||
echo "artifact-built=false" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: ${{ env.artifact-built }} == 'true'
|
||||
uses: actions/upload-artifact@master
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-static-experimental-kernel.tar.gz
|
||||
|
||||
build-qemu:
|
||||
runs-on: ubuntu-16.04
|
||||
needs: get-artifact-list
|
||||
env:
|
||||
buildstr: "install_qemu"
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: get-artifact-list
|
||||
uses: actions/download-artifact@master
|
||||
with:
|
||||
name: artifact-list
|
||||
- name: build-qemu
|
||||
run: |
|
||||
if grep -q $buildstr ./artifact-list/artifact-list.txt; then
|
||||
$GITHUB_WORKSPACE/.github/workflows/generate-artifact-tarball.sh $buildstr
|
||||
echo "artifact-built=true" >> $GITHUB_ENV
|
||||
else
|
||||
echo "artifact-built=false" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: ${{ env.artifact-built }} == 'true'
|
||||
uses: actions/upload-artifact@master
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-static-qemu.tar.gz
|
||||
|
||||
# Job for building the image
|
||||
build-image:
|
||||
runs-on: ubuntu-16.04
|
||||
needs: get-artifact-list
|
||||
env:
|
||||
buildstr: "install_image"
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: get-artifact-list
|
||||
uses: actions/download-artifact@master
|
||||
with:
|
||||
name: artifact-list
|
||||
- name: build-image
|
||||
run: |
|
||||
if grep -q $buildstr ./artifact-list/artifact-list.txt; then
|
||||
$GITHUB_WORKSPACE/.github/workflows/generate-artifact-tarball.sh $buildstr
|
||||
echo "artifact-built=true" >> $GITHUB_ENV
|
||||
else
|
||||
echo "artifact-built=false" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: ${{ env.artifact-built }} == 'true'
|
||||
uses: actions/upload-artifact@master
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-static-image.tar.gz
|
||||
|
||||
# Job for building firecracker hypervisor
|
||||
build-firecracker:
|
||||
runs-on: ubuntu-16.04
|
||||
needs: get-artifact-list
|
||||
env:
|
||||
buildstr: "install_firecracker"
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: get-artifact-list
|
||||
uses: actions/download-artifact@master
|
||||
with:
|
||||
name: artifact-list
|
||||
- name: build-firecracker
|
||||
run: |
|
||||
if grep -q $buildstr ./artifact-list/artifact-list.txt; then
|
||||
$GITHUB_WORKSPACE/.github/workflows/generate-artifact-tarball.sh $buildstr
|
||||
echo "artifact-built=true" >> $GITHUB_ENV
|
||||
else
|
||||
echo "artifact-built=false" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: ${{ env.artifact-built }} == 'true'
|
||||
uses: actions/upload-artifact@master
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-static-firecracker.tar.gz
|
||||
|
||||
# Job for building cloud-hypervisor
|
||||
build-clh:
|
||||
runs-on: ubuntu-16.04
|
||||
needs: get-artifact-list
|
||||
env:
|
||||
buildstr: "install_clh"
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: get-artifact-list
|
||||
uses: actions/download-artifact@master
|
||||
with:
|
||||
name: artifact-list
|
||||
- name: build-clh
|
||||
run: |
|
||||
if grep -q $buildstr ./artifact-list/artifact-list.txt; then
|
||||
$GITHUB_WORKSPACE/.github/workflows/generate-artifact-tarball.sh $buildstr
|
||||
echo "artifact-built=true" >> $GITHUB_ENV
|
||||
else
|
||||
echo "artifact-built=false" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: ${{ env.artifact-built }} == 'true'
|
||||
uses: actions/upload-artifact@master
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-static-clh.tar.gz
|
||||
|
||||
# Job for building kata components
|
||||
build-kata-components:
|
||||
runs-on: ubuntu-16.04
|
||||
needs: get-artifact-list
|
||||
env:
|
||||
buildstr: "install_kata_components"
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: get-artifact-list
|
||||
uses: actions/download-artifact@master
|
||||
with:
|
||||
name: artifact-list
|
||||
- name: build-kata-components
|
||||
run: |
|
||||
if grep -q $buildstr ./artifact-list/artifact-list.txt; then
|
||||
$GITHUB_WORKSPACE/.github/workflows/generate-artifact-tarball.sh $buildstr
|
||||
echo "artifact-built=true" >> $GITHUB_ENV
|
||||
else
|
||||
echo "artifact-built=false" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: ${{ env.artifact-built }} == 'true'
|
||||
uses: actions/upload-artifact@master
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-static-kata-components.tar.gz
|
||||
|
||||
gather-artifacts:
|
||||
runs-on: ubuntu-16.04
|
||||
needs: [build-experimental-kernel, build-kernel, build-qemu, build-image, build-firecracker, build-kata-components, build-clh]
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: get-artifacts
|
||||
uses: actions/download-artifact@master
|
||||
with:
|
||||
name: kata-artifacts
|
||||
- name: colate-artifacts
|
||||
run: |
|
||||
$GITHUB_WORKSPACE/.github/workflows/gather-artifacts.sh
|
||||
- name: store-artifacts
|
||||
uses: actions/upload-artifact@master
|
||||
with:
|
||||
name: release-candidate
|
||||
path: kata-static.tar.xz
|
||||
|
||||
kata-deploy:
|
||||
needs: gather-artifacts
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: get-artifacts
|
||||
uses: actions/download-artifact@master
|
||||
with:
|
||||
name: release-candidate
|
||||
- name: build-and-push-kata-deploy-ci
|
||||
id: build-and-push-kata-deploy-ci
|
||||
run: |
|
||||
tag=$(echo $GITHUB_REF | cut -d/ -f3-)
|
||||
git clone https://github.com/kata-containers/packaging
|
||||
pushd packaging
|
||||
git checkout $tag
|
||||
pkg_sha=$(git rev-parse HEAD)
|
||||
popd
|
||||
mv release-candidate/kata-static.tar.xz ./packaging/kata-deploy/kata-static.tar.xz
|
||||
docker build --build-arg KATA_ARTIFACTS=kata-static.tar.xz -t katadocker/kata-deploy-ci:$pkg_sha -t quay.io/kata-containers/kata-deploy-ci:$pkg_sha ./packaging/kata-deploy
|
||||
docker login -u ${{ secrets.DOCKER_USERNAME }} -p ${{ secrets.DOCKER_PASSWORD }}
|
||||
docker push katadocker/kata-deploy-ci:$pkg_sha
|
||||
docker login -u ${{ secrets.QUAY_DEPLOYER_USERNAME }} -p ${{ secrets.QUAY_DEPLOYER_PASSWORD }} quay.io
|
||||
docker push quay.io/kata-containers/kata-deploy-ci:$pkg_sha
|
||||
echo "::set-output name=PKG_SHA::${pkg_sha}"
|
||||
- name: test-kata-deploy-ci-in-aks
|
||||
uses: ./packaging/kata-deploy/action
|
||||
with:
|
||||
packaging-sha: ${{steps.build-and-push-kata-deploy-ci.outputs.PKG_SHA}}
|
||||
env:
|
||||
PKG_SHA: ${{steps.build-and-push-kata-deploy-ci.outputs.PKG_SHA}}
|
||||
AZ_APPID: ${{ secrets.AZ_APPID }}
|
||||
AZ_PASSWORD: ${{ secrets.AZ_PASSWORD }}
|
||||
AZ_SUBSCRIPTION_ID: ${{ secrets.AZ_SUBSCRIPTION_ID }}
|
||||
AZ_TENANT_ID: ${{ secrets.AZ_TENANT_ID }}
|
||||
- name: push-tarball
|
||||
run: |
|
||||
# tag the container image we created and push to DockerHub
|
||||
tag=$(echo $GITHUB_REF | cut -d/ -f3-)
|
||||
docker tag katadocker/kata-deploy-ci:${{steps.build-and-push-kata-deploy-ci.outputs.PKG_SHA}} katadocker/kata-deploy:${tag}
|
||||
docker push katadocker/kata-deploy:${tag}
|
||||
|
||||
upload-static-tarball:
|
||||
needs: kata-deploy
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: download-artifacts
|
||||
uses: actions/download-artifact@master
|
||||
with:
|
||||
name: release-candidate
|
||||
- name: install hub
|
||||
run: |
|
||||
HUB_VER=$(curl -s "https://api.github.com/repos/github/hub/releases/latest" | jq -r .tag_name | sed 's/^v//')
|
||||
wget -q -O- https://github.com/github/hub/releases/download/v$HUB_VER/hub-linux-amd64-$HUB_VER.tgz | \
|
||||
tar xz --strip-components=2 --wildcards '*/bin/hub' && sudo mv hub /usr/local/bin/hub
|
||||
- name: push static tarball to github
|
||||
run: |
|
||||
tag=$(echo $GITHUB_REF | cut -d/ -f3-)
|
||||
tarball="kata-static-$tag-x86_64.tar.xz"
|
||||
repo="https://github.com/kata-containers/runtime.git"
|
||||
mv release-candidate/kata-static.tar.xz "release-candidate/${tarball}"
|
||||
git clone "${repo}"
|
||||
cd runtime
|
||||
echo "uploading asset '${tarball}' to '${repo}' tag: ${tag}"
|
||||
GITHUB_TOKEN=${{ secrets.GIT_UPLOAD_TOKEN }} hub release edit -m "" -a "../release-candidate/${tarball}" "${tag}"
|
||||
58
.github/workflows/release.yaml
vendored
58
.github/workflows/release.yaml
vendored
@@ -100,10 +100,14 @@ jobs:
|
||||
run: |
|
||||
# tag the container image we created and push to DockerHub
|
||||
tag=$(echo $GITHUB_REF | cut -d/ -f3-)
|
||||
docker tag katadocker/kata-deploy-ci:${{steps.build-and-push-kata-deploy-ci.outputs.PKG_SHA}} katadocker/kata-deploy:${tag}
|
||||
docker tag quay.io/kata-containers/kata-deploy-ci:${{steps.build-and-push-kata-deploy-ci.outputs.PKG_SHA}} quay.io/kata-containers/kata-deploy:${tag}
|
||||
docker push katadocker/kata-deploy:${tag}
|
||||
docker push quay.io/kata-containers/kata-deploy:${tag}
|
||||
tags=($tag)
|
||||
tags+=($([[ "$tag" =~ "alpha"|"rc" ]] && echo "latest" || echo "stable"))
|
||||
for tag in ${tags[@]}; do \
|
||||
docker tag katadocker/kata-deploy-ci:${{steps.build-and-push-kata-deploy-ci.outputs.PKG_SHA}} katadocker/kata-deploy:${tag} && \
|
||||
docker tag quay.io/kata-containers/kata-deploy-ci:${{steps.build-and-push-kata-deploy-ci.outputs.PKG_SHA}} quay.io/kata-containers/kata-deploy:${tag} && \
|
||||
docker push katadocker/kata-deploy:${tag} && \
|
||||
docker push quay.io/kata-containers/kata-deploy:${tag}; \
|
||||
done
|
||||
|
||||
upload-static-tarball:
|
||||
needs: kata-deploy
|
||||
@@ -127,3 +131,49 @@ jobs:
|
||||
pushd $GITHUB_WORKSPACE
|
||||
echo "uploading asset '${tarball}' for tag: ${tag}"
|
||||
GITHUB_TOKEN=${{ secrets.GIT_UPLOAD_TOKEN }} hub release edit -m "" -a "${tarball}" "${tag}"
|
||||
popd
|
||||
|
||||
upload-cargo-vendored-tarball:
|
||||
needs: upload-static-tarball
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: generate-and-upload-tarball
|
||||
run: |
|
||||
pushd $GITHUB_WORKSPACE/src/agent
|
||||
cargo vendor >> .cargo/config
|
||||
popd
|
||||
tag=$(echo $GITHUB_REF | cut -d/ -f3-)
|
||||
tarball="kata-containers-$tag-vendor.tar.gz"
|
||||
pushd $GITHUB_WORKSPACE
|
||||
tar -cvzf "${tarball}" src/agent/.cargo/config src/agent/vendor
|
||||
GITHUB_TOKEN=${{ secrets.GIT_UPLOAD_TOKEN }} hub release edit -m "" -a "${tarball}" "${tag}"
|
||||
popd
|
||||
|
||||
upload-libseccomp-tarball:
|
||||
needs: upload-cargo-vendored-tarball
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: download-and-upload-tarball
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GIT_UPLOAD_TOKEN }}
|
||||
GOPATH: ${HOME}/go
|
||||
run: |
|
||||
pushd $GITHUB_WORKSPACE
|
||||
./ci/install_yq.sh
|
||||
tag=$(echo $GITHUB_REF | cut -d/ -f3-)
|
||||
versions_yaml="versions.yaml"
|
||||
version=$(${GOPATH}/bin/yq read ${versions_yaml} "externals.libseccomp.version")
|
||||
repo_url=$(${GOPATH}/bin/yq read ${versions_yaml} "externals.libseccomp.url")
|
||||
download_url="${repo_url}/releases/download/v${version}"
|
||||
tarball="libseccomp-${version}.tar.gz"
|
||||
asc="${tarball}.asc"
|
||||
curl -sSLO "${download_url}/${tarball}"
|
||||
curl -sSLO "${download_url}/${asc}"
|
||||
# "-m" option should be empty to re-use the existing release title
|
||||
# without opening a text editor.
|
||||
# For the details, check https://hub.github.com/hub-release.1.html.
|
||||
hub release edit -m "" -a "${tarball}" "${tag}"
|
||||
hub release edit -m "" -a "${asc}" "${tag}"
|
||||
popd
|
||||
|
||||
@@ -12,8 +12,7 @@ on:
|
||||
- reopened
|
||||
- labeled
|
||||
- unlabeled
|
||||
pull_request:
|
||||
branches:
|
||||
branches:
|
||||
- main
|
||||
|
||||
jobs:
|
||||
@@ -32,8 +31,6 @@ jobs:
|
||||
|
||||
- name: Checkout code to allow hub to communicate with the project
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
token: ${{ secrets.KATA_GITHUB_ACTIONS_TOKEN }}
|
||||
|
||||
- name: Install porting checker script
|
||||
run: |
|
||||
|
||||
16
.github/workflows/static-checks.yaml
vendored
16
.github/workflows/static-checks.yaml
vendored
@@ -13,7 +13,7 @@ jobs:
|
||||
test:
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.15.x, 1.16.x]
|
||||
go-version: [1.16.x, 1.17.x]
|
||||
os: [ubuntu-20.04]
|
||||
runs-on: ${{ matrix.os }}
|
||||
env:
|
||||
@@ -60,13 +60,21 @@ jobs:
|
||||
cd ${GOPATH}/src/github.com/${{ github.repository }} && ./ci/setup.sh
|
||||
env:
|
||||
GOPATH: ${{ runner.workspace }}/kata-containers
|
||||
- name: Building rust
|
||||
- name: Installing rust
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
cd ${GOPATH}/src/github.com/${{ github.repository }} && ./ci/install_rust.sh
|
||||
PATH=$PATH:"$HOME/.cargo/bin"
|
||||
rustup target add x86_64-unknown-linux-musl
|
||||
rustup component add rustfmt clippy
|
||||
- name: Setup seccomp
|
||||
run: |
|
||||
libseccomp_install_dir=$(mktemp -d -t libseccomp.XXXXXXXXXX)
|
||||
gperf_install_dir=$(mktemp -d -t gperf.XXXXXXXXXX)
|
||||
cd ${GOPATH}/src/github.com/${{ github.repository }} && ./ci/install_libseccomp.sh "${libseccomp_install_dir}" "${gperf_install_dir}"
|
||||
echo "Set environment variables for the libseccomp crate to link the libseccomp library statically"
|
||||
echo "LIBSECCOMP_LINK_TYPE=static" >> $GITHUB_ENV
|
||||
echo "LIBSECCOMP_LIB_PATH=${libseccomp_install_dir}/lib" >> $GITHUB_ENV
|
||||
# Check whether the vendored code is up-to-date & working as the first thing
|
||||
- name: Check vendored code
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
@@ -84,3 +92,7 @@ jobs:
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
cd ${GOPATH}/src/github.com/${{ github.repository }} && make test
|
||||
- name: Run Unit Tests As Root User
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
cd ${GOPATH}/src/github.com/${{ github.repository }} && sudo -E PATH="$PATH" make test
|
||||
|
||||
24
Makefile
24
Makefile
@@ -17,9 +17,15 @@ TOOLS += agent-ctl
|
||||
|
||||
STANDARD_TARGETS = build check clean install test vendor
|
||||
|
||||
include utils.mk
|
||||
default: all
|
||||
|
||||
all: build
|
||||
all: logging-crate-tests build
|
||||
|
||||
logging-crate-tests:
|
||||
make -C pkg/logging
|
||||
|
||||
include utils.mk
|
||||
include ./tools/packaging/kata-deploy/local-build/Makefile
|
||||
|
||||
# Create the rules
|
||||
$(eval $(call create_all_rules,$(COMPONENTS),$(TOOLS),$(STANDARD_TARGETS)))
|
||||
@@ -33,10 +39,10 @@ generate-protocols:
|
||||
static-checks: build
|
||||
bash ci/static-checks.sh
|
||||
|
||||
binary-tarball:
|
||||
make -f ./tools/packaging/kata-deploy/local-build/Makefile
|
||||
|
||||
install-binary-tarball:
|
||||
make -f ./tools/packaging/kata-deploy/local-build/Makefile install
|
||||
|
||||
.PHONY: all default static-checks binary-tarball install-binary-tarball
|
||||
.PHONY: \
|
||||
all \
|
||||
binary-tarball \
|
||||
default \
|
||||
install-binary-tarball \
|
||||
logging-crate-tests \
|
||||
static-checks
|
||||
|
||||
@@ -1,30 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Copyright (c) 2018 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# Check there are no os.Exit() calls creeping into the code
|
||||
# We don't use that exit path in the Kata codebase.
|
||||
|
||||
# Allow the path to check to be over-ridden.
|
||||
# Default to the current directory.
|
||||
go_packages=${1:-.}
|
||||
|
||||
echo "Checking for no os.Exit() calls for package [${go_packages}]"
|
||||
|
||||
candidates=`go list -f '{{.Dir}}/*.go' $go_packages`
|
||||
for f in $candidates; do
|
||||
filename=`basename $f`
|
||||
# skip all go test files
|
||||
[[ $filename == *_test.go ]] && continue
|
||||
# skip exit.go where, the only file we should call os.Exit() from.
|
||||
[[ $filename == "exit.go" ]] && continue
|
||||
files="$f $files"
|
||||
done
|
||||
|
||||
[ -z "$files" ] && echo "No files to check, skipping" && exit 0
|
||||
|
||||
if egrep -n '\<os\.Exit\>' $files; then
|
||||
echo "Direct calls to os.Exit() are forbidden, please use exit() so atexit() works"
|
||||
exit 1
|
||||
fi
|
||||
109
ci/install_libseccomp.sh
Executable file
109
ci/install_libseccomp.sh
Executable file
@@ -0,0 +1,109 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright 2021 Sony Group Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
set -o errexit
|
||||
|
||||
cidir=$(dirname "$0")
|
||||
source "${cidir}/lib.sh"
|
||||
|
||||
clone_tests_repo
|
||||
|
||||
source "${tests_repo_dir}/.ci/lib.sh"
|
||||
|
||||
# The following variables if set on the environment will change the behavior
|
||||
# of gperf and libseccomp configure scripts, that may lead this script to
|
||||
# fail. So let's ensure they are unset here.
|
||||
unset PREFIX DESTDIR
|
||||
|
||||
arch=$(uname -m)
|
||||
workdir="$(mktemp -d --tmpdir build-libseccomp.XXXXX)"
|
||||
|
||||
# Variables for libseccomp
|
||||
# Currently, specify the libseccomp version directly without using `versions.yaml`
|
||||
# because the current Snap workflow is incomplete.
|
||||
# After solving the issue, replace this code by using the `versions.yaml`.
|
||||
# libseccomp_version=$(get_version "externals.libseccomp.version")
|
||||
# libseccomp_url=$(get_version "externals.libseccomp.url")
|
||||
libseccomp_version="2.5.1"
|
||||
libseccomp_url="https://github.com/seccomp/libseccomp"
|
||||
libseccomp_tarball="libseccomp-${libseccomp_version}.tar.gz"
|
||||
libseccomp_tarball_url="${libseccomp_url}/releases/download/v${libseccomp_version}/${libseccomp_tarball}"
|
||||
cflags="-O2"
|
||||
|
||||
# Variables for gperf
|
||||
# Currently, specify the gperf version directly without using `versions.yaml`
|
||||
# because the current Snap workflow is incomplete.
|
||||
# After solving the issue, replace this code by using the `versions.yaml`.
|
||||
# gperf_version=$(get_version "externals.gperf.version")
|
||||
# gperf_url=$(get_version "externals.gperf.url")
|
||||
gperf_version="3.1"
|
||||
gperf_url="https://ftp.gnu.org/gnu/gperf"
|
||||
gperf_tarball="gperf-${gperf_version}.tar.gz"
|
||||
gperf_tarball_url="${gperf_url}/${gperf_tarball}"
|
||||
|
||||
# We need to build the libseccomp library from sources to create a static library for the musl libc.
|
||||
# However, ppc64le and s390x have no musl targets in Rust. Hence, we do not set cflags for the musl libc.
|
||||
if ([ "${arch}" != "ppc64le" ] && [ "${arch}" != "s390x" ]); then
|
||||
# Set FORTIFY_SOURCE=1 because the musl-libc does not have some functions about FORTIFY_SOURCE=2
|
||||
cflags="-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=1 -O2"
|
||||
fi
|
||||
|
||||
die() {
|
||||
msg="$*"
|
||||
echo "[Error] ${msg}" >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
finish() {
|
||||
rm -rf "${workdir}"
|
||||
}
|
||||
|
||||
trap finish EXIT
|
||||
|
||||
build_and_install_gperf() {
|
||||
echo "Build and install gperf version ${gperf_version}"
|
||||
mkdir -p "${gperf_install_dir}"
|
||||
curl -sLO "${gperf_tarball_url}"
|
||||
tar -xf "${gperf_tarball}"
|
||||
pushd "gperf-${gperf_version}"
|
||||
./configure --prefix="${gperf_install_dir}"
|
||||
make
|
||||
make install
|
||||
export PATH=$PATH:"${gperf_install_dir}"/bin
|
||||
popd
|
||||
echo "Gperf installed successfully"
|
||||
}
|
||||
|
||||
build_and_install_libseccomp() {
|
||||
echo "Build and install libseccomp version ${libseccomp_version}"
|
||||
mkdir -p "${libseccomp_install_dir}"
|
||||
curl -sLO "${libseccomp_tarball_url}"
|
||||
tar -xf "${libseccomp_tarball}"
|
||||
pushd "libseccomp-${libseccomp_version}"
|
||||
./configure --prefix="${libseccomp_install_dir}" CFLAGS="${cflags}" --enable-static
|
||||
make
|
||||
make install
|
||||
popd
|
||||
echo "Libseccomp installed successfully"
|
||||
}
|
||||
|
||||
main() {
|
||||
local libseccomp_install_dir="${1:-}"
|
||||
local gperf_install_dir="${2:-}"
|
||||
|
||||
if [ -z "${libseccomp_install_dir}" ] || [ -z "${gperf_install_dir}" ]; then
|
||||
die "Usage: ${0} <libseccomp-install-dir> <gperf-install-dir>"
|
||||
fi
|
||||
|
||||
pushd "$workdir"
|
||||
# gperf is required for building the libseccomp.
|
||||
build_and_install_gperf
|
||||
build_and_install_libseccomp
|
||||
popd
|
||||
}
|
||||
|
||||
main "$@"
|
||||
@@ -12,5 +12,5 @@ source "${cidir}/lib.sh"
|
||||
clone_tests_repo
|
||||
|
||||
pushd ${tests_repo_dir}
|
||||
.ci/install_rust.sh
|
||||
.ci/install_rust.sh ${1:-}
|
||||
popd
|
||||
|
||||
@@ -4,6 +4,6 @@
|
||||
#
|
||||
# This is the build root image for Kata Containers on OpenShift CI.
|
||||
#
|
||||
FROM centos:8
|
||||
FROM registry.centos.org/centos:8
|
||||
|
||||
RUN yum -y update && yum -y install git sudo wget
|
||||
|
||||
@@ -8,10 +8,14 @@
|
||||
set -e
|
||||
cidir=$(dirname "$0")
|
||||
source "${cidir}/lib.sh"
|
||||
export CI_JOB="${CI_JOB:-}"
|
||||
|
||||
clone_tests_repo
|
||||
|
||||
pushd ${tests_repo_dir}
|
||||
.ci/run.sh
|
||||
tracing/test-agent-shutdown.sh
|
||||
# temporary fix, see https://github.com/kata-containers/tests/issues/3878
|
||||
if [ "$(uname -m)" != "s390x" ] && [ "$CI_JOB" == "CRI_CONTAINERD_K8S_MINIMAL" ]; then
|
||||
tracing/test-agent-shutdown.sh
|
||||
fi
|
||||
popd
|
||||
|
||||
@@ -86,6 +86,16 @@ One of the `initrd` and `image` options in Kata runtime config file **MUST** be
|
||||
The main difference between the options is that the size of `initrd`(10MB+) is significantly smaller than
|
||||
rootfs `image`(100MB+).
|
||||
|
||||
## Enable seccomp
|
||||
|
||||
Enable seccomp as follows:
|
||||
|
||||
```
|
||||
$ sudo sed -i '/^disable_guest_seccomp/ s/true/false/' /etc/kata-containers/configuration.toml
|
||||
```
|
||||
|
||||
This will pass container seccomp profiles to the kata agent.
|
||||
|
||||
## Enable full debug
|
||||
|
||||
Enable full debug as follows:
|
||||
@@ -216,6 +226,18 @@ $ go get -d -u github.com/kata-containers/kata-containers
|
||||
$ cd $GOPATH/src/github.com/kata-containers/kata-containers/src/agent && make
|
||||
```
|
||||
|
||||
The agent is built with seccomp capability by default.
|
||||
If you want to build the agent without the seccomp capability, you need to run `make` with `SECCOMP=no` as follows.
|
||||
|
||||
```
|
||||
$ make -C $GOPATH/src/github.com/kata-containers/kata-containers/src/agent SECCOMP=no
|
||||
```
|
||||
|
||||
> **Note:**
|
||||
>
|
||||
> - If you enable seccomp in the main configuration file but build the agent without seccomp capability,
|
||||
> the runtime exits conservatively with an error message.
|
||||
|
||||
## Get the osbuilder
|
||||
|
||||
```
|
||||
@@ -234,9 +256,21 @@ the following example.
|
||||
$ export ROOTFS_DIR=${GOPATH}/src/github.com/kata-containers/kata-containers/tools/osbuilder/rootfs-builder/rootfs
|
||||
$ sudo rm -rf ${ROOTFS_DIR}
|
||||
$ cd $GOPATH/src/github.com/kata-containers/kata-containers/tools/osbuilder/rootfs-builder
|
||||
$ script -fec 'sudo -E GOPATH=$GOPATH USE_DOCKER=true SECCOMP=no ./rootfs.sh ${distro}'
|
||||
$ script -fec 'sudo -E GOPATH=$GOPATH USE_DOCKER=true ./rootfs.sh ${distro}'
|
||||
```
|
||||
|
||||
You MUST choose a distribution (e.g., `ubuntu`) for `${distro}`.
|
||||
You can get a supported distributions list in the Kata Containers by running the following.
|
||||
|
||||
```
|
||||
$ ./rootfs.sh -l
|
||||
```
|
||||
|
||||
If you want to build the agent without seccomp capability, you need to run the `rootfs.sh` script with `SECCOMP=no` as follows.
|
||||
|
||||
```
|
||||
$ script -fec 'sudo -E GOPATH=$GOPATH AGENT_INIT=yes USE_DOCKER=true SECCOMP=no ./rootfs.sh ${distro}'
|
||||
```
|
||||
You MUST choose one of `alpine`, `centos`, `clearlinux`, `debian`, `euleros`, `fedora`, `suse`, and `ubuntu` for `${distro}`. By default `seccomp` packages are not included in the rootfs image. Set `SECCOMP` to `yes` to include them.
|
||||
|
||||
> **Note:**
|
||||
>
|
||||
@@ -272,6 +306,7 @@ $ script -fec 'sudo -E USE_DOCKER=true ./image_builder.sh ${ROOTFS_DIR}'
|
||||
> - If you do *not* wish to build under Docker, remove the `USE_DOCKER`
|
||||
> variable in the previous command and ensure the `qemu-img` command is
|
||||
> available on your system.
|
||||
> - If `qemu-img` is not installed, you will likely see errors such as `ERROR: File /dev/loop19p1 is not a block device` and `losetup: /tmp/tmp.bHz11oY851: Warning: file is smaller than 512 bytes; the loop device may be useless or invisible for system tools`. These can be mitigated by installing the `qemu-img` command (available in the `qemu-img` package on Fedora or the `qemu-utils` package on Debian).
|
||||
|
||||
|
||||
### Install the rootfs image
|
||||
@@ -290,12 +325,23 @@ $ (cd /usr/share/kata-containers && sudo ln -sf "$image" kata-containers.img)
|
||||
$ export ROOTFS_DIR="${GOPATH}/src/github.com/kata-containers/kata-containers/tools/osbuilder/rootfs-builder/rootfs"
|
||||
$ sudo rm -rf ${ROOTFS_DIR}
|
||||
$ cd $GOPATH/src/github.com/kata-containers/kata-containers/tools/osbuilder/rootfs-builder
|
||||
$ script -fec 'sudo -E GOPATH=$GOPATH AGENT_INIT=yes USE_DOCKER=true SECCOMP=no ./rootfs.sh ${distro}'
|
||||
$ script -fec 'sudo -E GOPATH=$GOPATH AGENT_INIT=yes USE_DOCKER=true ./rootfs.sh ${distro}'
|
||||
```
|
||||
`AGENT_INIT` controls if the guest image uses the Kata agent as the guest `init` process. When you create an initrd image,
|
||||
always set `AGENT_INIT` to `yes`. By default `seccomp` packages are not included in the initrd image. Set `SECCOMP` to `yes` to include them.
|
||||
always set `AGENT_INIT` to `yes`.
|
||||
|
||||
You MUST choose one of `alpine`, `centos`, `clearlinux`, `euleros`, and `fedora` for `${distro}`.
|
||||
You MUST choose a distribution (e.g., `ubuntu`) for `${distro}`.
|
||||
You can get a supported distributions list in the Kata Containers by running the following.
|
||||
|
||||
```
|
||||
$ ./rootfs.sh -l
|
||||
```
|
||||
|
||||
If you want to build the agent without seccomp capability, you need to run the `rootfs.sh` script with `SECCOMP=no` as follows.
|
||||
|
||||
```
|
||||
$ script -fec 'sudo -E GOPATH=$GOPATH AGENT_INIT=yes USE_DOCKER=true SECCOMP=no ./rootfs.sh ${distro}'
|
||||
```
|
||||
|
||||
> **Note:**
|
||||
>
|
||||
|
||||
@@ -11,6 +11,10 @@ For details of the other Kata Containers repositories, see the
|
||||
|
||||
* [Installation guides](./install/README.md): Install and run Kata Containers with Docker or Kubernetes
|
||||
|
||||
## Tracing
|
||||
|
||||
See the [tracing documentation](tracing.md).
|
||||
|
||||
## More User Guides
|
||||
|
||||
* [Upgrading](Upgrading.md): how to upgrade from [Clear Containers](https://github.com/clearcontainers) and [runV](https://github.com/hyperhq/runv) to [Kata Containers](https://github.com/kata-containers) and how to upgrade an existing Kata Containers system to the latest version.
|
||||
@@ -40,6 +44,7 @@ Documents that help to understand and contribute to Kata Containers.
|
||||
* [Kata Containers Architecture](design/architecture.md): Architectural overview of Kata Containers
|
||||
* [Kata Containers E2E Flow](design/end-to-end-flow.md): The entire end-to-end flow of Kata Containers
|
||||
* [Kata Containers design](./design/README.md): More Kata Containers design documents
|
||||
* [Kata Containers threat model](./threat-model/threat-model.md): Kata Containers threat model
|
||||
|
||||
### How to Contribute
|
||||
|
||||
@@ -47,6 +52,18 @@ Documents that help to understand and contribute to Kata Containers.
|
||||
* [How to contribute to Kata Containers](https://github.com/kata-containers/community/blob/master/CONTRIBUTING.md)
|
||||
* [Code of Conduct](../CODE_OF_CONDUCT.md)
|
||||
|
||||
## Help Writing a Code PR
|
||||
|
||||
* [Code PR advice](code-pr-advice.md).
|
||||
|
||||
## Help Writing Unit Tests
|
||||
|
||||
* [Unit Test Advice](Unit-Test-Advice.md)
|
||||
|
||||
## Help Improving the Documents
|
||||
|
||||
* [Documentation Requirements](Documentation-Requirements.md)
|
||||
|
||||
### Code Licensing
|
||||
|
||||
* [Licensing](Licensing-strategy.md): About the licensing strategy of Kata Containers.
|
||||
@@ -56,10 +73,6 @@ Documents that help to understand and contribute to Kata Containers.
|
||||
* [Release strategy](Stable-Branch-Strategy.md)
|
||||
* [Release Process](Release-Process.md)
|
||||
|
||||
## Help Improving the Documents
|
||||
|
||||
* [Documentation Requirements](Documentation-Requirements.md)
|
||||
|
||||
## Website Changes
|
||||
|
||||
If you have a suggestion for how we can improve the
|
||||
|
||||
@@ -64,7 +64,7 @@
|
||||
|
||||
### Check Git-hub Actions
|
||||
|
||||
We make use of [GitHub actions](https://github.com/features/actions) in this [file](https://github.com/kata-containers/kata-containers/blob/main/.github/workflows/main.yaml) in the `kata-containers/kata-containers` repository to build and upload release artifacts. This action is auto triggered with the above step when a new tag is pushed to the `kata-containers/kata-containers` repository.
|
||||
We make use of [GitHub actions](https://github.com/features/actions) in this [file](https://github.com/kata-containers/kata-containers/blob/main/.github/workflows/release.yaml) in the `kata-containers/kata-containers` repository to build and upload release artifacts. This action is auto triggered with the above step when a new tag is pushed to the `kata-containers/kata-containers` repository.
|
||||
|
||||
Check the [actions status page](https://github.com/kata-containers/kata-containers/actions) to verify all steps in the actions workflow have completed successfully. On success, a static tarball containing Kata release artifacts will be uploaded to the [Release page](https://github.com/kata-containers/kata-containers/releases).
|
||||
|
||||
|
||||
379
docs/Unit-Test-Advice.md
Normal file
379
docs/Unit-Test-Advice.md
Normal file
@@ -0,0 +1,379 @@
|
||||
# Unit Test Advice
|
||||
|
||||
## Overview
|
||||
|
||||
This document offers advice on writing a Unit Test (UT) in
|
||||
[Golang](https://golang.org) and [Rust](https://www.rust-lang.org).
|
||||
|
||||
## General advice
|
||||
|
||||
### Unit test strategies
|
||||
|
||||
#### Positive and negative tests
|
||||
|
||||
Always add positive tests (where success is expected) *and* negative
|
||||
tests (where failure is expected).
|
||||
|
||||
#### Boundary condition tests
|
||||
|
||||
Try to add unit tests that exercise boundary conditions such as:
|
||||
|
||||
- Missing values (`null` or `None`).
|
||||
- Empty strings and huge strings.
|
||||
- Empty (or uninitialised) complex data structures
|
||||
(such as lists, vectors and hash tables).
|
||||
- Common numeric values (such as `-1`, `0`, `1` and the minimum and
|
||||
maximum values).
|
||||
|
||||
#### Test unusual values
|
||||
|
||||
Also always consider "unusual" input values such as:
|
||||
|
||||
- String values containing spaces, Unicode characters, special
|
||||
characters, escaped characters or null bytes.
|
||||
|
||||
> **Note:** Consider these unusual values in prefix, infix and
|
||||
> suffix position.
|
||||
|
||||
- String values that cannot be converted into numeric values or which
|
||||
contain invalid structured data (such as invalid JSON).
|
||||
|
||||
#### Other types of tests
|
||||
|
||||
If the code requires other forms of testing (such as stress testing,
|
||||
fuzz testing and integration testing), raise a GitHub issue and
|
||||
reference it on the issue you are using for the main work. This
|
||||
ensures the test team are aware that a new test is required.
|
||||
|
||||
### Test environment
|
||||
|
||||
#### Create unique files and directories
|
||||
|
||||
Ensure your tests do not write to a fixed file or directory. This can
|
||||
cause problems when running multiple tests simultaneously and also
|
||||
when running tests after a previous test run failure.
|
||||
|
||||
#### Assume parallel testing
|
||||
|
||||
Always assume your tests will be run *in parallel*. If this is
|
||||
problematic for a test, force it to run in isolation using the
|
||||
`serial_test` crate for Rust code for example.
|
||||
|
||||
### Running
|
||||
|
||||
Ensure you run the unit tests and they all pass before raising a PR.
|
||||
Ideally do this on different distributions on different architectures
|
||||
to maximise coverage (and so minimise surprises when your code runs in
|
||||
the CI).
|
||||
|
||||
## Assertions
|
||||
|
||||
### Golang assertions
|
||||
|
||||
Use the `testify` assertions package to create a new assertion object as this
|
||||
keeps the test code free from distracting `if` tests:
|
||||
|
||||
```go
|
||||
func TestSomething(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
err := doSomething()
|
||||
assert.NoError(err)
|
||||
}
|
||||
```
|
||||
|
||||
### Rust assertions
|
||||
|
||||
Use the standard set of `assert!()` macros.
|
||||
|
||||
## Table driven tests
|
||||
|
||||
Try to write tests using a table-based approach. This allows you to distill
|
||||
the logic into a compact table (rather than spreading the tests across
|
||||
multiple test functions). It also makes it easy to cover all the
|
||||
interesting boundary conditions:
|
||||
|
||||
### Golang table driven tests
|
||||
|
||||
Assume the following function:
|
||||
|
||||
```go
|
||||
// The function under test.
|
||||
//
|
||||
// Accepts a string and an integer and returns the
|
||||
// result of sticking them together separated by a dash as a string.
|
||||
func joinParamsWithDash(str string, num int) (string, error) {
|
||||
if str == "" {
|
||||
return "", errors.New("string cannot be blank")
|
||||
}
|
||||
|
||||
if num <= 0 {
|
||||
return "", errors.New("number must be positive")
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s-%d", str, num), nil
|
||||
}
|
||||
```
|
||||
|
||||
A table driven approach to testing it:
|
||||
|
||||
```go
|
||||
import (
|
||||
"testing"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestJoinParamsWithDash(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
// Type used to hold function parameters and expected results.
|
||||
type testData struct {
|
||||
param1 string
|
||||
param2 int
|
||||
expectedResult string
|
||||
expectError bool
|
||||
}
|
||||
|
||||
// List of tests to run including the expected results
|
||||
data := []testData{
|
||||
// Failure scenarios
|
||||
{"", -1, "", true},
|
||||
{"", 0, "", true},
|
||||
{"", 1, "", true},
|
||||
{"foo", 0, "", true},
|
||||
{"foo", -1, "", true},
|
||||
|
||||
// Success scenarios
|
||||
{"foo", 1, "foo-1", false},
|
||||
{"bar", 42, "bar-42", false},
|
||||
}
|
||||
|
||||
// Run the tests
|
||||
for i, d := range data {
|
||||
// Create a test-specific string that is added to each assert
|
||||
// call. It will be displayed if any assert test fails.
|
||||
msg := fmt.Sprintf("test[%d]: %+v", i, d)
|
||||
|
||||
// Call the function under test
|
||||
result, err := joinParamsWithDash(d.param1, d.param2)
|
||||
|
||||
// update the message for more information on failure
|
||||
msg = fmt.Sprintf("%s, result: %q, err: %v", msg, result, err)
|
||||
|
||||
if d.expectError {
|
||||
assert.Error(err, msg)
|
||||
|
||||
// If an error is expected, there is no point
|
||||
// performing additional checks.
|
||||
continue
|
||||
}
|
||||
|
||||
assert.NoError(err, msg)
|
||||
assert.Equal(d.expectedResult, result, msg)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Rust table driven tests
|
||||
|
||||
Assume the following function:
|
||||
|
||||
```rust
|
||||
// Convenience type to allow Result return types to only specify the type
|
||||
// for the true case; failures are specified as static strings.
|
||||
// XXX: This is an example. In real code use the "anyhow" and
|
||||
// XXX: "thiserror" crates.
|
||||
pub type Result<T> = std::result::Result<T, &'static str>;
|
||||
|
||||
// The function under test.
|
||||
//
|
||||
// Accepts a string and an integer and returns the
|
||||
// result of sticking them together separated by a dash as a string.
|
||||
fn join_params_with_dash(str: &str, num: i32) -> Result<String> {
|
||||
if str.is_empty() {
|
||||
return Err("string cannot be blank");
|
||||
}
|
||||
|
||||
if num <= 0 {
|
||||
return Err("number must be positive");
|
||||
}
|
||||
|
||||
let result = format!("{}-{}", str, num);
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
A table driven approach to testing it:
|
||||
|
||||
```rust
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_join_params_with_dash() {
|
||||
// This is a type used to record all details of the inputs
|
||||
// and outputs of the function under test.
|
||||
#[derive(Debug)]
|
||||
struct TestData<'a> {
|
||||
str: &'a str,
|
||||
num: i32,
|
||||
result: Result<String>,
|
||||
}
|
||||
|
||||
// The tests can now be specified as a set of inputs and outputs
|
||||
let tests = &[
|
||||
// Failure scenarios
|
||||
TestData {
|
||||
str: "",
|
||||
num: 0,
|
||||
result: Err("string cannot be blank"),
|
||||
},
|
||||
TestData {
|
||||
str: "foo",
|
||||
num: -1,
|
||||
result: Err("number must be positive"),
|
||||
},
|
||||
|
||||
// Success scenarios
|
||||
TestData {
|
||||
str: "foo",
|
||||
num: 42,
|
||||
result: Ok("foo-42".to_string()),
|
||||
},
|
||||
TestData {
|
||||
str: "-",
|
||||
num: 1,
|
||||
result: Ok("--1".to_string()),
|
||||
},
|
||||
];
|
||||
|
||||
// Run the tests
|
||||
for (i, d) in tests.iter().enumerate() {
|
||||
// Create a string containing details of the test
|
||||
let msg = format!("test[{}]: {:?}", i, d);
|
||||
|
||||
// Call the function under test
|
||||
let result = join_params_with_dash(d.str, d.num);
|
||||
|
||||
// Update the test details string with the results of the call
|
||||
let msg = format!("{}, result: {:?}", msg, result);
|
||||
|
||||
// Perform the checks
|
||||
if d.result.is_ok() {
|
||||
assert!(result == d.result, msg);
|
||||
continue;
|
||||
}
|
||||
|
||||
let expected_error = format!("{}", d.result.as_ref().unwrap_err());
|
||||
let actual_error = format!("{}", result.unwrap_err());
|
||||
assert!(actual_error == expected_error, msg);
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Temporary files
|
||||
|
||||
Always delete temporary files on success.
|
||||
|
||||
### Golang temporary files
|
||||
|
||||
```go
|
||||
func TestSomething(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
// Create a temporary directory
|
||||
tmpdir, err := ioutil.TempDir("", "")
|
||||
assert.NoError(err)
|
||||
|
||||
// Delete it at the end of the test
|
||||
defer os.RemoveAll(tmpdir)
|
||||
|
||||
// Add test logic that will use the tmpdir here...
|
||||
}
|
||||
```
|
||||
|
||||
### Rust temporary files
|
||||
|
||||
Use the `tempfile` crate which allows files and directories to be deleted
|
||||
automatically:
|
||||
|
||||
```rust
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use tempfile::tempdir;
|
||||
|
||||
#[test]
|
||||
fn test_something() {
|
||||
|
||||
// Create a temporary directory (which will be deleted automatically
|
||||
let dir = tempdir().expect("failed to create tmpdir");
|
||||
|
||||
let filename = dir.path().join("file.txt");
|
||||
|
||||
// create filename ...
|
||||
}
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
## Test user
|
||||
|
||||
[Unit tests are run *twice*](https://github.com/kata-containers/tests/blob/main/.ci/go-test.sh):
|
||||
|
||||
- as the current user
|
||||
- as the `root` user (if different to the current user)
|
||||
|
||||
When writing a test consider which user should run it; even if the code the
|
||||
test is exercising runs as `root`, it may be necessary to *only* run the test
|
||||
as a non-`root` for the test to be meaningful. Add appropriate skip
|
||||
guards around code that requires `root` and non-`root` so that the test
|
||||
will run if the correct type of user is detected and skipped if not.
|
||||
|
||||
### Run Golang tests as a different user
|
||||
|
||||
The main repository has the most comprehensive set of skip abilities. See:
|
||||
|
||||
- https://github.com/kata-containers/kata-containers/tree/main/src/runtime/pkg/katatestutils
|
||||
|
||||
### Run Rust tests as a different user
|
||||
|
||||
One method is to use the `nix` crate along with some custom macros:
|
||||
|
||||
```
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
#[allow(unused_macros)]
|
||||
macro_rules! skip_if_root {
|
||||
() => {
|
||||
if nix::unistd::Uid::effective().is_root() {
|
||||
println!("INFO: skipping {} which needs non-root", module_path!());
|
||||
return;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
#[allow(unused_macros)]
|
||||
macro_rules! skip_if_not_root {
|
||||
() => {
|
||||
if !nix::unistd::Uid::effective().is_root() {
|
||||
println!("INFO: skipping {} which needs root", module_path!());
|
||||
return;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_that_must_be_run_as_root() {
|
||||
// Not running as the superuser, so skip.
|
||||
skip_if_not_root!();
|
||||
|
||||
// Run test *iff* the user running the test is root
|
||||
|
||||
// ...
|
||||
}
|
||||
}
|
||||
```
|
||||
246
docs/code-pr-advice.md
Normal file
246
docs/code-pr-advice.md
Normal file
@@ -0,0 +1,246 @@
|
||||
# Code PR Advice
|
||||
|
||||
Before raising a PR containing code changes, we suggest you consider
|
||||
the following to ensure a smooth and fast process.
|
||||
|
||||
> **Note:**
|
||||
>
|
||||
> - All the advice in this document is optional. However, if the
|
||||
> advice provided is not followed, there is no guarantee your PR
|
||||
> will be merged.
|
||||
>
|
||||
> - All the check tools will be run automatically on your PR by the CI.
|
||||
> However, if you run them locally first, there is a much better
|
||||
> chance of a successful initial CI run.
|
||||
|
||||
## Assumptions
|
||||
|
||||
This document assumes you have already read (and in the case of the
|
||||
code of conduct agreed to):
|
||||
|
||||
- The [Kata Containers code of conduct](https://github.com/kata-containers/community/blob/main/CODE_OF_CONDUCT.md).
|
||||
- The [Kata Containers contributing guide](https://github.com/kata-containers/community/blob/main/CONTRIBUTING.md).
|
||||
|
||||
## Code
|
||||
|
||||
### Architectures
|
||||
|
||||
Do not write architecture-specific code if it is possible to write the
|
||||
code generically.
|
||||
|
||||
### General advice
|
||||
|
||||
- Do not write code to impress: instead write code that is easy to read and understand.
|
||||
|
||||
- Always consider which user will run the code. Try to minimise
|
||||
the privileges the code requires.
|
||||
|
||||
### Comments
|
||||
|
||||
Always add comments if the intent of the code is not obvious. However,
|
||||
try to avoid comments if the code could be made clearer (for example
|
||||
by using more meaningful variable names).
|
||||
|
||||
### Constants
|
||||
|
||||
Don't embed magic numbers and strings in functions, particularly if
|
||||
they are used repeatedly.
|
||||
|
||||
Create constants at the top of the file instead.
|
||||
|
||||
### Copyright and license
|
||||
|
||||
Ensure all new files contain a copyright statement and an SPDX license
|
||||
identifier in the comments at the top of the file.
|
||||
|
||||
### FIXME and TODO
|
||||
|
||||
If the code contains areas that are not fully implemented, make this
|
||||
clear a comment which provides a link to a GitHub issue that provides
|
||||
further information.
|
||||
|
||||
Do not just rely on comments in this case though: if possible, return
|
||||
a "`BUG: feature X not implemented see {bug-url}`" type error.
|
||||
|
||||
### Functions
|
||||
|
||||
- Keep functions relatively short (less than 100 lines is a good "rule of thumb").
|
||||
|
||||
- Document functions if the parameters, return value or general intent
|
||||
of the function is not obvious.
|
||||
|
||||
- Always return errors where possible.
|
||||
|
||||
Do not discard error return values from the functions this function
|
||||
calls.
|
||||
|
||||
### Logging
|
||||
|
||||
- Don't use multiple log calls when a single log call could be used.
|
||||
|
||||
- Use structured logging where possible to allow
|
||||
[standard tooling](https://github.com/kata-containers/tests/tree/main/cmd/log-parser)
|
||||
be able to extract the log fields.
|
||||
|
||||
### Names
|
||||
|
||||
Give functions, macros and variables clear and meaningful names.
|
||||
|
||||
### Structures
|
||||
|
||||
#### Golang structures
|
||||
|
||||
Unlike Rust, Go does not enforce that all structure members be set.
|
||||
This has lead to numerous bugs in the past where code like the
|
||||
following is used:
|
||||
|
||||
```go
|
||||
type Foo struct {
|
||||
Key string
|
||||
Value string
|
||||
}
|
||||
|
||||
// BUG: Key not set, but nobody noticed! ;(
|
||||
let foo1 = Foo {
|
||||
Value: "foo",
|
||||
}
|
||||
```
|
||||
|
||||
A much safer approach is to create a constructor function to enforce
|
||||
integrity:
|
||||
|
||||
```go
|
||||
type Foo struct {
|
||||
Key string
|
||||
Value string
|
||||
}
|
||||
|
||||
func NewFoo(key, value string) (*Foo, error) {
|
||||
if key == "" {
|
||||
return nil, errors.New("Foo needs a key")
|
||||
}
|
||||
|
||||
if value == "" {
|
||||
return nil, errors.New("Foo needs a value")
|
||||
}
|
||||
|
||||
return &Foo{
|
||||
Key: key,
|
||||
Value: value,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func testFoo() error {
|
||||
// BUG: Key not set, but nobody noticed! ;(
|
||||
badFoo := Foo{Value: "value"}
|
||||
|
||||
// Ok - the constructor performs needed validation
|
||||
goodFoo, err := NewFoo("name", "value")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
```
|
||||
|
||||
> **Note:**
|
||||
>
|
||||
> The above is just an example. The *safest* approach would be to move
|
||||
> `NewFoo()` into a separate package and make `Foo` and it's elements
|
||||
> private. The compiler would then enforce the use of the constructor
|
||||
> to guarantee correctly defined objects.
|
||||
|
||||
|
||||
### Tracing
|
||||
|
||||
Consider if the code needs to create a new
|
||||
[trace span](https://github.com/kata-containers/kata-containers/blob/main/docs/tracing.md).
|
||||
|
||||
Ensure any new trace spans added to the code are completed.
|
||||
|
||||
## Tests
|
||||
|
||||
### Unit tests
|
||||
|
||||
Where possible, code changes should be accompanied by unit tests.
|
||||
|
||||
Consider using the standard
|
||||
[table-based approach](https://github.com/kata-containers/tests/blob/main/Unit-Test-Advice.md)
|
||||
as it encourages you to make functions small and simple, and also
|
||||
allows you to think about what types of value to test.
|
||||
|
||||
### Other categories of test
|
||||
|
||||
Raised a GitHub issue in the
|
||||
[`tests`](https://github.com/kata-containers/tests) repository that
|
||||
explains what sort of test is required along with as much detail as
|
||||
possible. Ensure the original issue is referenced on the `tests` issue.
|
||||
|
||||
### Unsafe code
|
||||
|
||||
#### Rust language specifics
|
||||
|
||||
Minimise the use of `unsafe` blocks in Rust code and since it is
|
||||
potentially dangerous always write [unit tests][#unit-tests]
|
||||
for this code where possible.
|
||||
|
||||
`expect()` and `unwrap()` will cause the code to panic on error.
|
||||
Prefer to return a `Result` on error rather than using these calls to
|
||||
allow the caller to deal with the error condition.
|
||||
|
||||
The table below lists the small number of cases where use of
|
||||
`expect()` and `unwrap()` are permitted:
|
||||
|
||||
| Area | Rationale for permitting |
|
||||
|-|-|
|
||||
| In test code (the `tests` module) | Panics will cause the test to fail, which is desirable. |
|
||||
| `lazy_static!()` | This magic macro cannot "return" a value as it runs before `main()`. |
|
||||
| `defer!()` | Similar to golang's `defer()` but doesn't allow the use of `?`. |
|
||||
| `tokio::spawn(async move {})` | Cannot currently return a `Result` from an `async move` closure. |
|
||||
| If an explicit test is performed before the `unwrap()` / `expect()` | *"Just about acceptable"*, but not ideal `[*]` |
|
||||
|
||||
|
||||
`[*]` - There can lead to bad *future* code: consider what would
|
||||
happen if the explicit test gets dropped in the future. This is easier
|
||||
to happen if the test and the extraction of the value are two separate
|
||||
operations. In summary, this strategy can introduce an insidious
|
||||
maintenance issue.
|
||||
|
||||
## Documentation
|
||||
|
||||
### General requirements
|
||||
|
||||
- All new features should be accompanied by documentation explaining:
|
||||
|
||||
- What the new feature does
|
||||
|
||||
- Why it is useful
|
||||
|
||||
- How to use the feature
|
||||
|
||||
- Any known issues or limitations
|
||||
|
||||
Links should be provided to GitHub issues tracking the issues
|
||||
|
||||
- The [documentation requirements document](Documentation-Requirements.md)
|
||||
explains how the project formats documentation.
|
||||
|
||||
### Markdown syntax
|
||||
|
||||
Run the
|
||||
[markdown checker](https://github.com/kata-containers/tests/tree/main/cmd/check-markdown)
|
||||
on your documentation changes.
|
||||
|
||||
### Spell check
|
||||
|
||||
Run the
|
||||
[spell checker](https://github.com/kata-containers/tests/tree/main/cmd/check-spelling)
|
||||
on your documentation changes.
|
||||
|
||||
## Finally
|
||||
|
||||
You may wish to read the documentation that the
|
||||
[Kata Review Team](https://github.com/kata-containers/community/blob/main/Rota-Process.md) use to help review PRs:
|
||||
|
||||
- [PR review guide](https://github.com/kata-containers/community/blob/main/PR-Review-Guide.md).
|
||||
- [documentation review process](https://github.com/kata-containers/community/blob/main/Documentation-Review-Process.md).
|
||||
@@ -1 +1 @@
|
||||
<mxfile host="Chrome" modified="2020-07-02T06:44:28.736Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36" etag="r7FpfnbGNK7jbg54Gu9x" version="13.3.5" type="device"><diagram id="XNV8G0dePIPkhS_Khqr4" name="Page-1">7VvZcuI4FP0aHqFky+sjkNCTqfR0qtLV6fTLlMDy0hiLscWWrx8Zy3iRQkhjQxbygnVlhK1zz9HRkg4cztZfYjT3vxIHhx0VOOsOvOqoKrRthX2kkU0WMTWYBbw4cLIQKAL3wRPOgkoeXQQOTngsC1FCQhrMq8EJiSI8oZUYimOyqt7mktCpBObIw0LgfoJCMfoQONTPoiqEdlHxFw48n/80hIA/+Qzld/NA4iOHrEoheN2Bw5gQml3N1kMcpr1X7ZjRM7W7J4txRA/5wrd/v5rDewTubvrjyZDYg1l/01W0rJklChf8lfnT0k3eBzFZRA5OW1E6cLDyA4rv52iS1q4Y6izm01nIq10SUQ4jwxAOvBg5AXvCIQlJvG0PmhgZOK1zgzAsxR2ELXfC4gmNyRSXaoyJhccuqxHfmXfDEscUr0sh3gdfMJlhGm/YLby2q+i6nn2J56QOzKy8KhDWctT8Eri7rEQ8q7xd60W/swve9a+BQW8PBlWTw+C6jm0YIgyu66oTKQyOMTZ0oykYNLsGQ/7OJRgYnUQYFENvCYYWUXgvZFDss5PBuHBBVc7OBQUKvY4dNjbyIompTzwSofC6iA4KXNKULu65JWTO0fiNKd1wONCCkipWeB3Qn6Xrx7Spns5LV2ve8raw4YUyy7R9iCRkEU/4u3i3328se/zw+97wp99Wf4fTh2I0pCj2MN3TOZwjaYfsBTjGIaLBsmomGodKhQJjKI3nEymAt2jMPFql01EYeBG7nrAOwyy/B2nqBswE9XnFLHCcDF+cBE9ovG0v7fo5CSK6fR190NGvDgJjb7YJpNlZO/6rFfMkJRPoKbahVUUtKx2MBm/8Ln27Ustqmonldrt2tQ3iugnLmzqeu4c8COK9mVkRRSNkXTpwgmUFZeO/RWopt0h0ky0UfXaDos3XWzzyenblpZ+sfykKIhw73cQPZt0poqi73DXPHnf7C9nNzY2Hmqi2yhgpWJWpLQDGdX/EW6jqM/trSoUts6bCUBwLFdPMk6Csw0YDg6EcePMV3H6D4szwiDc/y4XSt9Ji8bVtSSbq5nGibouivpdjL6o6TxjQA5ZuVjMGHKc0jQqJfKwQzdQHzpwj7YAkc+Sj17nswN7HLklGofHNCbglCrjhWKahyQQc9nUN5i20JeBMnMHLAi6bzLQm35r+megGjqKbeqhQw0OF+jR8U0W+3cVp2z5eJOmL45gl8ccmnm1XiGdIltQUS2uHePJx7py8K7j2WKbaC7wrqPaYt3eSYQ5KZr1yMTsb76QQi1Min9K5FPe3OelVnyHaq+e8oMfYVWXgkVPefIKrKL3aAlN71lRcxXgWz5PxWP2YRIYHEnmXXxBa1fSyj8uvRrMJ/XBvb7q/6A348c9DF/34nvjgzANA61lzgizJMX4jNguKer9dqpqRKKCkXX917pUpW1drS48yh6Xqp1yZ0kS9Tshk2hwOsl0xCwATynDo6wBooG0cLFibYFoiCjIQYGs2VxH6+43OL/9omNu32vLyqozxpuyqKurXe9uleZYyf+BYoa6rx3mInJWGUuFkVwOn86yKgOllW6Zp0TVr2zKaRHRPvS2jiWT+8IOfqcBeDQodqucd/8TdMeSl7/iRzaCm1bYpVREEWwZCW0dFLAGEnXilCtcsGJLTO7bpANOUEEbHliNdFbXUMczO+1SBGo0AGI2aAgqqdaC0XKTK0qWdkjB79oZYWJw0f1qsDMkgc1Kk8vN15fWwzRzHyyCRzHbZi9IqGNWOjEiEa73OQ4f7Shn61blE/aidT+LgKc2vsPPCssWrzizWBVB2ZlF2ZLE1qEQb6C1wkprUKY6j9Ez8u4CrmeEJVNGBsk1Y46TwiCdKP59L0HOhOpdLkJxkutgE2dCjw/PbBGW/p7v4hB1Y5tl9gmjpLj5B6hNka+Yn9QmqaOkuPmGHjtaeT2DF4h/tsrW/4v8V4fX/</diagram></mxfile>
|
||||
<mxfile host="app.diagrams.net" modified="2021-11-05T13:07:32.992Z" agent="5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36" etag="j5e7J3AOXxeQrt-Zz2uw" version="15.6.8" type="device"><diagram id="XNV8G0dePIPkhS_Khqr4" name="Page-1">7Vxdd9o4EP01nLP7QI5s+fORUNhNT7rNbnqaZl/2CCywG2OxQhDIr18Z29iyZD6CDZRuHho8toQ9986dGVlNC3Yny98omvqfiIfDlg68ZQt+aOm6AUyb/4otq8TiGnpiGNPAS0wgNzwGbzgxapl1Hnh4ltoSEyMkZMFUNA5JFOEhE2yIUvIqXjYioScYpmiMJcPjEIWy9SnwmJ9YdQjd/MTvOBj76VdDCNI7n6Ds6tQw85FHXgsm2GvBLiWEJZ8myy4OY++JjulXnN3cGcUR22fAn3fPzx+jj7e9HrIXA330feIZ7czPCxTO00dO75atMh+MKZlP08swZXip8jwaZJcD+ca0zeNyomAywYyu+CXpRG3NNM1kUMoSXU+PX3OfG9nEfsHdG56gFOfxZvbcE/xD6gy1Yz7/88nuPiLwcNcZDLvEvZ10Vm1zt1+4WyIPx5NoLXj76gcMP07RMD77yqOB23w2CdPTIxKxlN78nuHtmCIv4A7qkpDQ9XzQxsjC8blREIYFu4ewMxpy+4xR8oILZ6yhgwcjfkZ2+Xa0yzDK2JzP81ZzntcNtedHI8+1LNnzo9FIHyo971kDy7Qa8Xx61gJCSGhyRHCtkXHRLLMhXGwJF659/KFrCwtHBkAbIA3rKgAAsHqdfjqDCBn/aRIYTRORUWiVa8rAwKZwcSRcuCQzFESYcrNWLz6K4HFtD9i2QrZM7HiGCjtHH8Ak3ETs+n0A+v0msdNhCTv7RkZPM1TwNSV37lb4asw6VwifDc4MnqJ88ldTTBfBjHulDB1/SibiI/o2IhEuAZGaUBiMI3445B7lvIC3sc8CXqZ20hOTwPPir1ESIqcMUORDn9DgLaZcmF7QnHKWUhqQ4TNUpUZj6GkSeuM5nvGUBl4wjeJW5odAsDnAzBJihiLgrIYgUz6DrJYSRqdvVwzblol82qJZM3Y75sfrV9wKGC+pXdEa7BTP16/s7/lLbVc0uY+8hn7lYGCkdgVKyJy0XdHkPvJn6VcOxu4C2xVte7t5zf3K0fCdv12Ry6efpl05XDgvrFvR5V7zmruVw/E6Z7OiDjcoQYK9MX5MDwllPhmTCIW93FpyXn7NPSHTFMXvmLFV6lI0Z0TEGC8D9q3w+TmeiueN5OjDMp15fbCSMdJijDg0dPUtuzI+KMwSH+bTrI+yeWRss3xO5nSYsfb+y53jDp6+P1r+y+fXj+HLU97AMETHmG1xalo+xI7cygqKQ8SCBRZuQwXxemiHUrQqXDAlQcRmhZkfYoPQBNqOmJstu0gYxQjD1ksjnBLFkrvICbd5nCNUA0qqwRidDpXMvEcDLiMCm/ZXAopnwVvaV8dcSF3IJzdvW+YHBctktmwNo727+erWHdxormWIMpEcHUYXGV0osmFz09kUZDSaYSZJymEIqyNHLqirEb5A7Xmv1hTZZB+nPa6sPVtFaqf45HyDBrRFvtnHEa5WQqklQy7ir5g6aiE6hjpqEbuQtOUCUf52p63yCFOzS6w7Lm1t9WtB1LqFNrMXjfmnlm6FcYE74CZrHH/6ZdOLeq04F/T5v92/7tqff5UoXeeyj+U5tqXsPWHHNGA2w37LPtlLib0L37auOa4IKpQXpDXHkktfq4bSV4lf1tb+HBpyXOmr75t+4L7pZ28ROQpjXY7RBxrP7eP5LH5wTBdYXla4rsATyz4LqALPaCbw1Mn7nHGXx9pzMdR2xF0eas/ZfCfJ3VDRcqrFrPa4e2fytk1bSbfq5F0eYTpmrclbyUH5XaTP2FRJzMvsOPUKJTi44QQ3Um6uqd/MXm9l/aZuiFM01x7ICwqV6J5MdqCY8QGwTqI98aQPmAbcpTFTj1wC21+P4J56tGGhCQEUK8QjaVhNs8NVzbHFezNAaSP7TlUrjTha1dDX0f1d+292B77+8dRGX7/MfHCmzPpOplaycPcah9tIslM1lpq4jWZTPGWTJBGTjjuGYVXftKXpLY0wHbh9hA5ca9uIZtpkKKfaF8RQe0KigCle6V3sXofDa2/NNfWbCgIVqm/dVLxgba76lrcvXGjb+64yetvK1u4VMKtuYTkOKjl0frQXI5VR8446FZqmXlNlCsqvQkqyXktpqnxnvMdWvOZ3hzqGmAgMR7EmYG928hR1yW5s05XCMcnaqRcsBAdZ/87j/5C4pmR7tuZkh1+gGdPlmpjZ+WzFNV9wbc/8YNJep5/FZnp+t+tvSC6uLx8ZDeejrfQ6aDtqBdTNrbzKujYjw5f6XK/a8esAwIt4hes7JgAGOKXrs/2o2o1e2qUtb51T1QZ1bL5SPoL8mvYCxEn5pkDNWKcpxir3rv+vToeGiF1BnMtSJ3n16ArUaX/XV6qTKW9Wq0md+GH+RwaSSiv/Ww2w9x8=</diagram></mxfile>
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 93 KiB After Width: | Height: | Size: 90 KiB |
@@ -14,7 +14,7 @@ through the [CRI-O\*](https://github.com/kubernetes-incubator/cri-o) and
|
||||
|
||||
Kata Containers creates a QEMU\*/KVM virtual machine for pod that `kubelet` (Kubernetes) creates respectively.
|
||||
|
||||
The [`containerd-shim-kata-v2` (shown as `shimv2` from this point onwards)](../../src/runtime/containerd-shim-v2)
|
||||
The [`containerd-shim-kata-v2` (shown as `shimv2` from this point onwards)](../../src/runtime/cmd/containerd-shim-kata-v2/)
|
||||
is the Kata Containers entrypoint, which
|
||||
implements the [Containerd Runtime V2 (Shim API)](https://github.com/containerd/containerd/tree/master/runtime/v2) for Kata.
|
||||
|
||||
@@ -259,7 +259,7 @@ With `RuntimeClass`, users can define Kata Containers as a `RuntimeClass` and th
|
||||
|
||||
## DAX
|
||||
|
||||
Kata Containers utilizes the Linux kernel DAX [(Direct Access filesystem)](https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/tree/Documentation/filesystems/dax.txt)
|
||||
Kata Containers utilizes the Linux kernel DAX [(Direct Access filesystem)](https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/Documentation/filesystems/dax.rst?h=v5.14)
|
||||
feature to efficiently map some host-side files into the guest VM space.
|
||||
In particular, Kata Containers uses the QEMU NVDIMM feature to provide a
|
||||
memory-mapped virtual device that can be used to DAX map the virtual machine's
|
||||
|
||||
@@ -1825,12 +1825,8 @@ components:
|
||||
desc: ""
|
||||
- value: grpc.StartContainerRequest
|
||||
desc: ""
|
||||
- value: grpc.StartTracingRequest
|
||||
desc: ""
|
||||
- value: grpc.StatsContainerRequest
|
||||
desc: ""
|
||||
- value: grpc.StopTracingRequest
|
||||
desc: ""
|
||||
- value: grpc.TtyWinResizeRequest
|
||||
desc: ""
|
||||
- value: grpc.UpdateContainerRequest
|
||||
|
||||
@@ -12,187 +12,244 @@ The OCI [runtime specification][linux-config] provides guidance on where the con
|
||||
> [`cgroupsPath`][cgroupspath]: (string, OPTIONAL) path to the cgroups. It can be used to either control the cgroups
|
||||
> hierarchy for containers or to run a new process in an existing container
|
||||
|
||||
cgroups are hierarchical, and this can be seen with the following pod example:
|
||||
Cgroups are hierarchical, and this can be seen with the following pod example:
|
||||
|
||||
- Pod 1: `cgroupsPath=/kubepods/pod1`
|
||||
- Container 1:
|
||||
`cgroupsPath=/kubepods/pod1/container1`
|
||||
- Container 2:
|
||||
`cgroupsPath=/kubepods/pod1/container2`
|
||||
- Container 1: `cgroupsPath=/kubepods/pod1/container1`
|
||||
- Container 2: `cgroupsPath=/kubepods/pod1/container2`
|
||||
|
||||
- Pod 2: `cgroupsPath=/kubepods/pod2`
|
||||
- Container 1:
|
||||
`cgroupsPath=/kubepods/pod2/container2`
|
||||
- Container 2:
|
||||
`cgroupsPath=/kubepods/pod2/container2`
|
||||
- Container 1: `cgroupsPath=/kubepods/pod2/container2`
|
||||
- Container 2: `cgroupsPath=/kubepods/pod2/container2`
|
||||
|
||||
Depending on the upper-level orchestrator, the cgroup under which the pod is placed is
|
||||
managed by the orchestrator. In the case of Kubernetes, the pod-cgroup is created by Kubelet,
|
||||
while the container cgroups are to be handled by the runtime. Kubelet will size the pod-cgroup
|
||||
based on the container resource requirements.
|
||||
Depending on the upper-level orchestration layers, the cgroup under which the pod is placed is
|
||||
managed by the orchestrator or not. In the case of Kubernetes, the pod cgroup is created by Kubelet,
|
||||
while the container cgroups are to be handled by the runtime.
|
||||
Kubelet will size the pod cgroup based on the container resource requirements, to which it may add
|
||||
a configured set of [pod resource overheads](https://kubernetes.io/docs/concepts/scheduling-eviction/pod-overhead/).
|
||||
|
||||
Kata Containers introduces a non-negligible overhead for running a sandbox (pod). Based on this, two scenarios are possible:
|
||||
1) The upper-layer orchestrator takes the overhead of running a sandbox into account when sizing the pod-cgroup, or
|
||||
2) Kata Containers do not fully constrain the VMM and associated processes, instead placing a subset of them outside of the pod-cgroup.
|
||||
Kata Containers introduces a non-negligible resource overhead for running a sandbox (pod). Typically, the Kata shim,
|
||||
through its underlying VMM invocation, will create many additional threads compared to process based container runtimes:
|
||||
the para-virtualized I/O back-ends, the VMM instance or even the Kata shim process, all of those host processes consume
|
||||
memory and CPU time not directly tied to the container workload, and introduces a sandbox resource overhead.
|
||||
In order for a Kata workload to run without significant performance degradation, its sandbox overhead must be
|
||||
provisioned accordingly. Two scenarios are possible:
|
||||
|
||||
Kata Containers provides two options for how cgroups are handled on the host. Selection of these options is done through
|
||||
the `SandboxCgroupOnly` flag within the Kata Containers [configuration](../../src/runtime/README.md#configuration)
|
||||
file.
|
||||
1) The upper-layer orchestrator takes the overhead of running a sandbox into account when sizing the pod cgroup.
|
||||
For example, Kubernetes [`PodOverhead`](https://kubernetes.io/docs/concepts/scheduling-eviction/pod-overhead/)
|
||||
feature lets the orchestrator add a configured sandbox overhead to the sum of all its containers resources. In
|
||||
that case, the pod sandbox is properly sized and all Kata created processes will run under the pod cgroup
|
||||
defined constraints and limits.
|
||||
2) The upper-layer orchestrator does **not** take the sandbox overhead into account and the pod cgroup is not
|
||||
sized to properly run all Kata created processes. With that scenario, attaching all the Kata processes to the sandbox
|
||||
cgroup may lead to non-negligible workload performance degradations. As a consequence, Kata Containers will move
|
||||
all processes but the vCPU threads into a dedicated overhead cgroup under `/kata_overhead`. The Kata runtime will
|
||||
not apply any constraints or limits to that cgroup, it is up to the infrastructure owner to optionally set it up.
|
||||
|
||||
## `SandboxCgroupOnly` enabled
|
||||
Those 2 scenarios are not dynamically detected by the Kata Containers runtime implementation, and thus the
|
||||
infrastructure owner must configure the runtime according to how the upper-layer orchestrator creates and sizes the
|
||||
pod cgroup. That configuration selection is done through the `sandbox_cgroup_only` flag within the Kata Containers
|
||||
[configuration](../../src/runtime/README.md#configuration) file.
|
||||
|
||||
With `SandboxCgroupOnly` enabled, it is expected that the parent cgroup is sized to take the overhead of running
|
||||
a sandbox into account. This is ideal, as all the applicable Kata Containers components can be placed within the
|
||||
given cgroup-path.
|
||||
## `sandbox_cgroup_only = true`
|
||||
|
||||
In the context of Kubernetes, Kubelet will size the pod-cgroup to take the overhead of running a Kata-based sandbox
|
||||
into account. This will be feasible in the 1.16 Kubernetes release through the `PodOverhead` feature.
|
||||
Setting `sandbox_cgroup_only` to `true` from the Kata Containers configuration file means that the pod cgroup is
|
||||
properly sized and takes the pod overhead into account. This is ideal, as all the applicable Kata Containers processes
|
||||
can simply be placed within the given cgroup path.
|
||||
|
||||
In the context of Kubernetes, Kubelet can size the pod cgroup to take the overhead of running a Kata-based sandbox
|
||||
into account. This has been supported since the 1.16 Kubernetes release, through the
|
||||
[`PodOverhead`](https://kubernetes.io/docs/concepts/scheduling-eviction/pod-overhead/) feature.
|
||||
|
||||
```
|
||||
+----------------------------------------------------------+
|
||||
| +---------------------------------------------------+ |
|
||||
| | +---------------------------------------------+ | |
|
||||
| | | +--------------------------------------+ | | |
|
||||
| | | | kata-shimv2, VMM and threads: | | | |
|
||||
| | | | (VMM, IO-threads, vCPU threads, etc)| | | |
|
||||
| | | | | | | |
|
||||
| | | | kata_<sandbox-id> | | | |
|
||||
| | | +--------------------------------------+ | | |
|
||||
| | | | | |
|
||||
| | |Pod 1 | | |
|
||||
| | +---------------------------------------------+ | |
|
||||
| | | |
|
||||
| | +---------------------------------------------+ | |
|
||||
| | | +--------------------------------------+ | | |
|
||||
| | | | kata-shimv2, VMM and threads: | | | |
|
||||
| | | | (VMM, IO-threads, vCPU threads, etc)| | | |
|
||||
| | | | | | | |
|
||||
| | | | kata_<sandbox-id> | | | |
|
||||
| | | +--------------------------------------+ | | |
|
||||
| | |Pod 2 | | |
|
||||
| | +---------------------------------------------+ | |
|
||||
| |kubepods | |
|
||||
| +---------------------------------------------------+ |
|
||||
| |
|
||||
|Node |
|
||||
+----------------------------------------------------------+
|
||||
┌─────────────────────────────────────────┐
|
||||
│ │
|
||||
│ ┌──────────────────────────────────┐ │
|
||||
│ │ │ │
|
||||
│ │ ┌─────────────────────────────┐ │ │
|
||||
│ │ │ │ │ │
|
||||
│ │ │ ┌─────────────────────┐ │ │ │
|
||||
│ │ │ │ vCPU threads │ │ │ │
|
||||
│ │ │ │ I/O threads │ │ │ │
|
||||
│ │ │ │ VMM │ │ │ │
|
||||
│ │ │ │ Kata Shim │ │ │ │
|
||||
│ │ │ │ │ │ │ │
|
||||
│ │ │ │ /kata_<sandbox_id> │ │ │ │
|
||||
│ │ │ └─────────────────────┘ │ │ │
|
||||
│ │ │Pod 1 │ │ │
|
||||
│ │ └─────────────────────────────┘ │ │
|
||||
│ │ │ │
|
||||
│ │ ┌─────────────────────────────┐ │ │
|
||||
│ │ │ │ │ │
|
||||
│ │ │ ┌─────────────────────┐ │ │ │
|
||||
│ │ │ │ vCPU threads │ │ │ │
|
||||
│ │ │ │ I/O threads │ │ │ │
|
||||
│ │ │ │ VMM │ │ │ │
|
||||
│ │ │ │ Kata Shim │ │ │ │
|
||||
│ │ │ │ │ │ │ │
|
||||
│ │ │ │ /kata_<sandbox_id> │ │ │ │
|
||||
│ │ │ └─────────────────────┘ │ │ │
|
||||
│ │ │Pod 2 │ │ │
|
||||
│ │ └─────────────────────────────┘ │ │
|
||||
│ │ │ │
|
||||
│ │/kubepods │ │
|
||||
│ └──────────────────────────────────┘ │
|
||||
│ │
|
||||
│ Node │
|
||||
└─────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
### What does Kata do in this configuration?
|
||||
1. Given a `PodSandbox` container creation, let:
|
||||
### Implementation details
|
||||
|
||||
```
|
||||
podCgroup=Parent(container.CgroupsPath)
|
||||
KataSandboxCgroup=<podCgroup>/kata_<PodSandboxID>
|
||||
```
|
||||
When `sandbox_cgroup_only` is enabled, the Kata shim will create a per pod
|
||||
sub-cgroup under the pod's dedicated cgroup. For example, in the Kubernetes context,
|
||||
it will create a `/kata_<PodSandboxID>` under the `/kubepods` cgroup hierarchy.
|
||||
On a typical cgroup v1 hierarchy mounted under `/sys/fs/cgroup/`, the memory cgroup
|
||||
subsystem for a pod with sandbox ID `12345678` would live under
|
||||
`/sys/fs/cgroup/memory/kubepods/kata_12345678`.
|
||||
|
||||
2. Create the cgroup, `KataSandboxCgroup`
|
||||
In most cases, the `/kata_<PodSandboxID>` created cgroup is unrestricted and inherits and shares all
|
||||
constraints and limits from the parent cgroup (`/kubepods` in the Kubernetes case). The exception is
|
||||
for the `cpuset` and `devices` cgroup subsystems, which are managed by the Kata shim.
|
||||
|
||||
3. Join the `KataSandboxCgroup`
|
||||
After creating the `/kata_<PodSandboxID>` cgroup, the Kata Containers shim will move itself to it, **before** starting
|
||||
the virtual machine. As a consequence all processes subsequently created by the Kata Containers shim (the VMM itself, and
|
||||
all vCPU and I/O related threads) will be created in the `/kata_<PodSandboxID>` cgroup.
|
||||
|
||||
Any process created by the runtime will be created in `KataSandboxCgroup`.
|
||||
The runtime will limit the cgroup in the host only if the sandbox doesn't have a
|
||||
container type annotation, but the caller is free to set the proper limits for the `podCgroup`.
|
||||
### Why create a kata-cgroup under the parent cgroup?
|
||||
|
||||
In the example above the pod cgroups are `/kubepods/pod1` and `/kubepods/pod2`.
|
||||
Kata creates the unrestricted sandbox cgroup under the pod cgroup.
|
||||
And why not directly adding the per sandbox shim directly to the pod cgroup (e.g.
|
||||
`/kubepods` in the Kubernetes context)?
|
||||
|
||||
### Why create a Kata-cgroup under the parent cgroup?
|
||||
The Kata Containers shim implementation creates a per-sandbox cgroup
|
||||
(`/kata_<PodSandboxID>`) to support the `Docker` use case. Although `Docker` does not
|
||||
have a notion of pods, Kata Containers still creates a sandbox to support the pod-less,
|
||||
single container use case that `Docker` implements. Since `Docker` does create any
|
||||
cgroup hierarchy to place a container into, it would be very complex for Kata to map
|
||||
a particular container to its sandbox without placing it under a `/kata_<containerID>>`
|
||||
sub-cgroup first.
|
||||
|
||||
`Docker` does not have a notion of pods, and will not create a cgroup directory
|
||||
to place a particular container in (i.e., all containers would be in a path like
|
||||
`/docker/container-id`. To simplify the implementation and continue to support `Docker`,
|
||||
Kata Containers creates the sandbox-cgroup, in the case of Kubernetes, or a container cgroup, in the case
|
||||
of docker.
|
||||
### Advantages
|
||||
|
||||
### Improvements
|
||||
Keeping all Kata Containers processes under a properly sized pod cgroup is ideal
|
||||
and makes for a simpler Kata Containers implementation. It also helps with gathering
|
||||
accurate statistics and preventing Kata workloads from being noisy neighbors.
|
||||
|
||||
- Get statistics about pod resources
|
||||
#### Pod resources statistics
|
||||
|
||||
If the Kata caller wants to know the resource usage on the host it can get
|
||||
statistics from the pod cgroup. All cgroups stats in the hierarchy will include
|
||||
the Kata overhead. This gives the possibility of gathering usage-statics at the
|
||||
pod level and the container level.
|
||||
|
||||
- Better host resource isolation
|
||||
#### Better host resource isolation
|
||||
|
||||
Because the Kata runtime will place all the Kata processes in the pod cgroup,
|
||||
the resource limits that the caller applies to the pod cgroup will affect all
|
||||
processes that belong to the Kata sandbox in the host. This will improve the
|
||||
isolation in the host preventing Kata to become a noisy neighbor.
|
||||
|
||||
## `SandboxCgroupOnly` disabled (default, legacy)
|
||||
## `sandbox_cgroup_only = false` (Default setting)
|
||||
|
||||
If the cgroup provided to Kata is not sized appropriately, Kata components will
|
||||
consume resources that the actual container workloads expect to see and use.
|
||||
This can cause instability and performance degradations.
|
||||
|
||||
To avoid that situation, Kata Containers creates an unconstrained overhead
|
||||
cgroup and moves all non workload related processes (Anything but the virtual CPU
|
||||
threads) to it. The name of this overhead cgroup is `/kata_overhead` and a per
|
||||
sandbox sub cgroup will be created under it for each sandbox Kata Containers creates.
|
||||
|
||||
Kata Containers does not add any constraints or limitations on the overhead cgroup. It is up to the infrastructure
|
||||
owner to either:
|
||||
|
||||
- Provision nodes with a pre-sized `/kata_overhead` cgroup. Kata Containers will
|
||||
load that existing cgroup and move all non workload related processes to it.
|
||||
- Let Kata Containers create the `/kata_overhead` cgroup, leave it
|
||||
unconstrained or resize it a-posteriori.
|
||||
|
||||
If the cgroup provided to Kata is not sized appropriately, instability will be
|
||||
introduced when fully constraining Kata components, and the user-workload will
|
||||
see a subset of resources that were requested. Based on this, the default
|
||||
handling for Kata Containers is to not fully constrain the VMM and Kata
|
||||
components on the host.
|
||||
|
||||
```
|
||||
+----------------------------------------------------------+
|
||||
| +---------------------------------------------------+ |
|
||||
| | +---------------------------------------------+ | |
|
||||
| | | +--------------------------------------+ | | |
|
||||
| | | |Container 1 |-|Container 2 | | | |
|
||||
| | | | |-| | | | |
|
||||
| | | | Shim+container1 |-| Shim+container2 | | | |
|
||||
| | | +--------------------------------------+ | | |
|
||||
| | | | | |
|
||||
| | |Pod 1 | | |
|
||||
| | +---------------------------------------------+ | |
|
||||
| | | |
|
||||
| | +---------------------------------------------+ | |
|
||||
| | | +--------------------------------------+ | | |
|
||||
| | | |Container 1 |-|Container 2 | | | |
|
||||
| | | | |-| | | | |
|
||||
| | | | Shim+container1 |-| Shim+container2 | | | |
|
||||
| | | +--------------------------------------+ | | |
|
||||
| | | | | |
|
||||
| | |Pod 2 | | |
|
||||
| | +---------------------------------------------+ | |
|
||||
| |kubepods | |
|
||||
| +---------------------------------------------------+ |
|
||||
| +---------------------------------------------------+ |
|
||||
| | Hypervisor | |
|
||||
| |Kata | |
|
||||
| +---------------------------------------------------+ |
|
||||
| |
|
||||
|Node |
|
||||
+----------------------------------------------------------+
|
||||
┌────────────────────────────────────────────────────────────────────┐
|
||||
│ │
|
||||
│ ┌─────────────────────────────┐ ┌───────────────────────────┐ │
|
||||
│ │ │ │ │ │
|
||||
│ │ ┌─────────────────────────┼────┼─────────────────────────┐ │ │
|
||||
│ │ │ │ │ │ │ │
|
||||
│ │ │ ┌─────────────────────┐ │ │ ┌─────────────────────┐ │ │ │
|
||||
│ │ │ │ vCPU threads │ │ │ │ VMM │ │ │ │
|
||||
│ │ │ │ │ │ │ │ I/O threads │ │ │ │
|
||||
│ │ │ │ │ │ │ │ Kata Shim │ │ │ │
|
||||
│ │ │ │ │ │ │ │ │ │ │ │
|
||||
│ │ │ │ /kata_<sandbox_id> │ │ │ │ /<sandbox_id> │ │ │ │
|
||||
│ │ │ └─────────────────────┘ │ │ └─────────────────────┘ │ │ │
|
||||
│ │ │ │ │ │ │ │
|
||||
│ │ │ Pod 1 │ │ │ │ │
|
||||
│ │ └─────────────────────────┼────┼─────────────────────────┘ │ │
|
||||
│ │ │ │ │ │
|
||||
│ │ │ │ │ │
|
||||
│ │ ┌─────────────────────────┼────┼─────────────────────────┐ │ │
|
||||
│ │ │ │ │ │ │ │
|
||||
│ │ │ ┌─────────────────────┐ │ │ ┌─────────────────────┐ │ │ │
|
||||
│ │ │ │ vCPU threads │ │ │ │ VMM │ │ │ │
|
||||
│ │ │ │ │ │ │ │ I/O threads │ │ │ │
|
||||
│ │ │ │ │ │ │ │ Kata Shim │ │ │ │
|
||||
│ │ │ │ │ │ │ │ │ │ │ │
|
||||
│ │ │ │ /kata_<sandbox_id> │ │ │ │ /<sandbox_id> │ │ │ │
|
||||
│ │ │ └─────────────────────┘ │ │ └─────────────────────┘ │ │ │
|
||||
│ │ │ │ │ │ │ │
|
||||
│ │ │ Pod 2 │ │ │ │ │
|
||||
│ │ └─────────────────────────┼────┼─────────────────────────┘ │ │
|
||||
│ │ │ │ │ │
|
||||
│ │ /kubepods │ │ /kata_overhead │ │
|
||||
│ └─────────────────────────────┘ └───────────────────────────┘ │
|
||||
│ │
|
||||
│ │
|
||||
│ Node │
|
||||
└────────────────────────────────────────────────────────────────────┘
|
||||
|
||||
```
|
||||
|
||||
### What does this method do?
|
||||
### Implementation Details
|
||||
|
||||
1. Given a container creation let `containerCgroupHost=container.CgroupsPath`
|
||||
1. Rename `containerCgroupHost` path to add `kata_`
|
||||
1. Let `PodCgroupPath=PodSanboxContainerCgroup` where `PodSanboxContainerCgroup` is the cgroup of a container of type `PodSandbox`
|
||||
1. Limit the `PodCgroupPath` with the sum of all the container limits in the Sandbox
|
||||
1. Move only vCPU threads of hypervisor to `PodCgroupPath`
|
||||
1. Per each container, move its `kata-shim` to its own `containerCgroupHost`
|
||||
1. Move hypervisor and applicable threads to memory cgroup `/kata`
|
||||
When `sandbox_cgroup_only` is disabled, the Kata Containers shim will create a per pod
|
||||
sub-cgroup under the pods dedicated cgroup, and another one under the overhead cgroup.
|
||||
For example, in the Kubernetes context, it will create a `/kata_<PodSandboxID>` under
|
||||
the `/kubepods` cgroup hierarchy, and a `/<PodSandboxID>` under the `/kata_overhead` one.
|
||||
|
||||
_Note_: the Kata Containers runtime will not add all the hypervisor threads to
|
||||
the cgroup path requested, only vCPUs. These threads are run unconstrained.
|
||||
On a typical cgroup v1 hierarchy mounted under `/sys/fs/cgroup/`, for a pod which sandbox
|
||||
ID is `12345678`, create with `sandbox_cgroup_only` disabled, the 2 memory subsystems
|
||||
for the sandbox cgroup and the overhead cgroup would respectively live under
|
||||
`/sys/fs/cgroup/memory/kubepods/kata_12345678` and `/sys/fs/cgroup/memory/kata_overhead/12345678`.
|
||||
|
||||
This mitigates the risk of the VMM and other threads receiving an out of memory scenario (`OOM`).
|
||||
Unlike when `sandbox_cgroup_only` is enabled, the Kata Containers shim will move itself
|
||||
to the overhead cgroup first, and then move the vCPU threads to the sandbox cgroup as
|
||||
they're created. All Kata processes and threads will run under the overhead cgroup except for
|
||||
the vCPU threads.
|
||||
|
||||
With `sandbox_cgroup_only` disabled, Kata Containers assumes the pod cgroup is only sized
|
||||
to accommodate for the actual container workloads processes. For Kata, this maps
|
||||
to the VMM created virtual CPU threads and so they are the only ones running under the pod
|
||||
cgroup. This mitigates the risk of the VMM, the Kata shim and the I/O threads going through
|
||||
a catastrophic out of memory scenario (`OOM`).
|
||||
|
||||
#### Impact
|
||||
#### Pros and Cons
|
||||
|
||||
If resources are reserved at a system level to account for the overheads of
|
||||
running sandbox containers, this configuration can be utilized with adequate
|
||||
stability. In this scenario, non-negligible amounts of CPU and memory will be
|
||||
utilized unaccounted for on the host.
|
||||
Running all non vCPU threads under an unconstrained overhead cgroup could lead to workloads
|
||||
potentially consuming a large amount of host resources.
|
||||
|
||||
On the other hand, running all non vCPU threads under a dedicated overhead cgroup can provide
|
||||
accurate metrics on the actual Kata Container pod overhead, allowing for tuning the overhead
|
||||
cgroup size and constraints accordingly.
|
||||
|
||||
[linux-config]: https://github.com/opencontainers/runtime-spec/blob/master/config-linux.md
|
||||
[cgroupspath]: https://github.com/opencontainers/runtime-spec/blob/master/config-linux.md#cgroups-path
|
||||
|
||||
# Supported cgroups
|
||||
|
||||
Kata Containers supports cgroups `v1` and `v2`. In the following sections each cgroup is
|
||||
described briefly and what changes are needed in Kata Containers to support it.
|
||||
Kata Containers currently only supports cgroups `v1`.
|
||||
|
||||
In the following sections each cgroup is described briefly.
|
||||
|
||||
## Cgroups V1
|
||||
|
||||
@@ -244,7 +301,7 @@ diagram:
|
||||
A process can join a cgroup by writing its process id (`pid`) to `cgroup.procs` file,
|
||||
or join a cgroup partially by writing the task (thread) id (`tid`) to the `tasks` file.
|
||||
|
||||
Kata Containers supports `v1` by default and no change in the configuration file is needed.
|
||||
Kata Containers only supports `v1`.
|
||||
To know more about `cgroups v1`, see [cgroupsv1(7)][2].
|
||||
|
||||
## Cgroups V2
|
||||
@@ -297,22 +354,13 @@ Same as `cgroups v1`, a process can join the cgroup by writing its process id (`
|
||||
`cgroup.procs` file, or join a cgroup partially by writing the task (thread) id (`tid`) to
|
||||
`cgroup.threads` file.
|
||||
|
||||
For backwards compatibility Kata Containers defaults to supporting cgroups v1 by default.
|
||||
To change this to `v2`, set `sandbox_cgroup_only=true` in the `configuration.toml` file.
|
||||
To know more about `cgroups v2`, see [cgroupsv2(7)][3].
|
||||
Kata Containers does not support cgroups `v2` on the host.
|
||||
|
||||
### Distro Support
|
||||
|
||||
Many Linux distributions do not yet support `cgroups v2`, as it is quite a recent addition.
|
||||
For more information about the status of this feature see [issue #2494][4].
|
||||
|
||||
# Summary
|
||||
|
||||
| cgroup option | default? | status | pros | cons | cgroups
|
||||
|-|-|-|-|-|-|
|
||||
| `SandboxCgroupOnly=false` | yes | legacy | Easiest to make Kata work | Unaccounted for memory and resource utilization | v1
|
||||
| `SandboxCgroupOnly=true` | no | recommended | Complete tracking of Kata memory and CPU utilization. In Kubernetes, the Kubelet can fully constrain Kata via the pod cgroup | Requires upper layer orchestrator which sizes sandbox cgroup appropriately | v1, v2
|
||||
|
||||
|
||||
[1]: http://man7.org/linux/man-pages/man5/tmpfs.5.html
|
||||
[2]: http://man7.org/linux/man-pages/man7/cgroups.7.html#CGROUPS_VERSION_1
|
||||
|
||||
@@ -1,21 +1,21 @@
|
||||
# Kata 2.0 Metrics Design
|
||||
|
||||
Kata implement CRI's API and support [`ContainerStats`](https://github.com/kubernetes/kubernetes/blob/release-1.18/staging/src/k8s.io/cri-api/pkg/apis/runtime/v1alpha2/api.proto#L101) and [`ListContainerStats`](https://github.com/kubernetes/kubernetes/blob/release-1.18/staging/src/k8s.io/cri-api/pkg/apis/runtime/v1alpha2/api.proto#L103) interfaces to expose containers metrics. User can use these interface to get basic metrics about container.
|
||||
Kata implements CRI's API and supports [`ContainerStats`](https://github.com/kubernetes/kubernetes/blob/release-1.18/staging/src/k8s.io/cri-api/pkg/apis/runtime/v1alpha2/api.proto#L101) and [`ListContainerStats`](https://github.com/kubernetes/kubernetes/blob/release-1.18/staging/src/k8s.io/cri-api/pkg/apis/runtime/v1alpha2/api.proto#L103) interfaces to expose containers metrics. User can use these interfaces to get basic metrics about containers.
|
||||
|
||||
But unlike `runc`, Kata is a VM-based runtime and has a different architecture.
|
||||
Unlike `runc`, Kata is a VM-based runtime and has a different architecture.
|
||||
|
||||
## Limitations of Kata 1.x and the target of Kata 2.0
|
||||
## Limitations of Kata 1.x and target of Kata 2.0
|
||||
|
||||
Kata 1.x has a number of limitations related to observability that may be obstacles to running Kata Containers at scale.
|
||||
|
||||
In Kata 2.0, the following components will be able to provide more details about the system.
|
||||
In Kata 2.0, the following components will be able to provide more details about the system:
|
||||
|
||||
- containerd shim v2 (effectively `kata-runtime`)
|
||||
- Hypervisor statistics
|
||||
- Agent process
|
||||
- Guest OS statistics
|
||||
|
||||
> **Note**: In Kata 1.x, the main user-facing component was the runtime (`kata-runtime`). From 1.5, Kata then introduced the Kata containerd shim v2 (`containerd-shim-kata-v2`) which is essentially a modified runtime that is loaded by containerd to simplify and improve the way VM-based containers are created and managed.
|
||||
> **Note**: In Kata 1.x, the main user-facing component was the runtime (`kata-runtime`). From 1.5, Kata introduced the Kata containerd shim v2 (`containerd-shim-kata-v2`) which is essentially a modified runtime that is loaded by containerd to simplify and improve the way VM-based containers are created and managed.
|
||||
>
|
||||
> For Kata 2.0, the main component is the Kata containerd shim v2, although the deprecated `kata-runtime` binary will be maintained for a period of time.
|
||||
>
|
||||
@@ -25,14 +25,15 @@ In Kata 2.0, the following components will be able to provide more details about
|
||||
|
||||
Kata 2.0 metrics strongly depend on [Prometheus](https://prometheus.io/), a graduated project from CNCF.
|
||||
|
||||
Kata Containers 2.0 introduces a new Kata component called `kata-monitor` which is used to monitor the other Kata components on the host. It's the monitor interface with Kata runtime, and we can do something like these:
|
||||
Kata Containers 2.0 introduces a new Kata component called `kata-monitor` which is used to monitor the Kata components on the host. It's shipped with the Kata runtime to provide an interface to:
|
||||
|
||||
- Get metrics
|
||||
- Get events
|
||||
|
||||
In this document we will cover metrics only. And until now it only supports metrics function.
|
||||
At present, `kata-monitor` supports retrieval of metrics only: this is what will be covered in this document.
|
||||
|
||||
This is the architecture overview metrics in Kata Containers 2.0.
|
||||
|
||||
This is the architecture overview of metrics in Kata Containers 2.0:
|
||||
|
||||

|
||||
|
||||
@@ -45,38 +46,38 @@ For a quick evaluation, you can check out [this how to](../how-to/how-to-set-pro
|
||||
|
||||
### Kata monitor
|
||||
|
||||
`kata-monitor` is a management agent on one node, where many Kata containers are running. `kata-monitor`'s work include:
|
||||
The `kata-monitor` management agent should be started on each node where the Kata containers runtime is installed. `kata-monitor` will:
|
||||
|
||||
> **Note**: node is a single host system or a node in K8s clusters.
|
||||
> **Note**: a *node* running Kata containers will be either a single host system or a worker node belonging to a K8s cluster capable of running Kata pods.
|
||||
|
||||
- Aggregate sandbox metrics running on this node, and add `sandbox_id` label
|
||||
- As a Prometheus target, all metrics from Kata shim on this node will be collected by Prometheus indirectly. This can easy the targets count in Prometheus, and also need not to expose shim's metrics by `ip:port`
|
||||
- Aggregate sandbox metrics running on the node, adding the `sandbox_id` label to them.
|
||||
- Expose a new Prometheus target, allowing all node metrics coming from the Kata shim to be collected by Prometheus indirectly. This simplifies the targets count in Prometheus and avoids exposing shim's metrics by `ip:port`.
|
||||
|
||||
Only one `kata-monitor` process are running on one node.
|
||||
Only one `kata-monitor` process runs in each node.
|
||||
|
||||
`kata-monitor` is using a different communication channel other than that `conatinerd` communicating with Kata shim, and Kata shim listen on a new socket address for communicating with `kata-monitor`.
|
||||
`kata-monitor` uses a different communication channel than the one used by the container engine (`containerd`/`CRI-O`) to communicate with the Kata shim. The Kata shim exposes a dedicated socket address reserved to `kata-monitor`.
|
||||
|
||||
The way `kata-monitor` get shim's metrics socket file(`monitor_address`) like that `containerd` get shim address. The socket is an abstract socket and saved as file `abstract` with the same directory of `address` for `containerd`.
|
||||
The shim's metrics socket file is created under the virtcontainers sandboxes directory, i.e. `vc/sbs/${PODID}/shim-monitor.sock`.
|
||||
|
||||
> **Note**: If there is no Prometheus server is configured, i.e., there is no scrape operations, `kata-monitor` will do nothing initiative.
|
||||
> **Note**: If there is no Prometheus server configured, i.e., there are no scrape operations, `kata-monitor` will not collect any metrics.
|
||||
|
||||
### Kata runtime
|
||||
|
||||
Runtime is responsible for:
|
||||
Kata runtime is responsible for:
|
||||
|
||||
- Gather metrics about shim process
|
||||
- Gather metrics about hypervisor process
|
||||
- Gather metrics about running sandbox
|
||||
- Get metrics from Kata agent(through `ttrpc`)
|
||||
- Get metrics from Kata agent (through `ttrpc`)
|
||||
|
||||
### Kata agent
|
||||
|
||||
Agent is responsible for:
|
||||
Kata agent is responsible for:
|
||||
|
||||
- Gather agent process metrics
|
||||
- Gather guest OS metrics
|
||||
|
||||
And in Kata 2.0, agent will add a new interface:
|
||||
In Kata 2.0, the agent adds a new interface:
|
||||
|
||||
```protobuf
|
||||
rpc GetMetrics(GetMetricsRequest) returns (Metrics);
|
||||
@@ -93,33 +94,49 @@ The `metrics` field is Prometheus encoded content. This can avoid defining a fix
|
||||
|
||||
### Performance and overhead
|
||||
|
||||
Metrics should not become the bottleneck of system, downgrade the performance, and run with minimal overhead.
|
||||
Metrics should not become a bottleneck for the system or downgrade the performance: they should run with minimal overhead.
|
||||
|
||||
Requirements:
|
||||
|
||||
* Metrics **MUST** be quick to collect
|
||||
* Metrics **MUST** be small.
|
||||
* Metrics **MUST** be small
|
||||
* Metrics **MUST** be generated only if there are subscribers to the Kata metrics service
|
||||
* Metrics **MUST** be stateless
|
||||
|
||||
In Kata 2.0, metrics are collected mainly from `/proc` filesystem, and consumed by Prometheus, based on a pull mode, that is mean if there is no Prometheus collector is running, so there will be zero overhead if nobody cares the metrics.
|
||||
In Kata 2.0, metrics are collected only when needed (pull mode), mainly from the `/proc` filesystem, and consumed by Prometheus. This means that if the Prometheus collector is not running (so no one cares about the metrics) the overhead will be zero.
|
||||
|
||||
Metrics service also doesn't hold any metrics in memory.
|
||||
The metrics service also doesn't hold any metrics in memory.
|
||||
|
||||
#### Metrics size ####
|
||||
|
||||
|\*|No Sandbox | 1 Sandbox | 2 Sandboxes |
|
||||
|---|---|---|---|
|
||||
|Metrics count| 39 | 106 | 173 |
|
||||
|Metrics size(bytes)| 9K | 144K | 283K |
|
||||
|Metrics size(`gzipped`, bytes)| 2K | 10K | 17K |
|
||||
|Metrics size (bytes)| 9K | 144K | 283K |
|
||||
|Metrics size (`gzipped`, bytes)| 2K | 10K | 17K |
|
||||
|
||||
*Metrics size*: Response size of one Prometheus scrape request.
|
||||
*Metrics size*: response size of one Prometheus scrape request.
|
||||
|
||||
It's easy to estimated that if there are 10 sandboxes running in the host, the size of one metrics fetch request issued by Prometheus will be about to 9 + (144 - 9) * 10 = 1.35M (not `gzipped`) or 2 + (10 - 2) * 10 = 82K (`gzipped`). Of course Prometheus support `gzip` compression, that can reduce the response size of every request.
|
||||
It's easy to estimate the size of one metrics fetch request issued by Prometheus.
|
||||
The formula to calculate the expected size when no gzip compression is in place is:
|
||||
9 + (144 - 9) * `number of kata sandboxes`
|
||||
|
||||
Prometheus supports `gzip compression`. When enabled, the response size of each request will be smaller:
|
||||
2 + (10 - 2) * `number of kata sandboxes`
|
||||
|
||||
**Example**
|
||||
We have 10 sandboxes running on a node. The expected size of one metrics fetch request issued by Prometheus against the kata-monitor agent running on that node will be:
|
||||
9 + (144 - 9) * 10 = **1.35M**
|
||||
|
||||
If `gzip compression` is enabled:
|
||||
2 + (10 - 2) * 10 = **82K**
|
||||
|
||||
#### Metrics delay ####
|
||||
|
||||
And here is some test data:
|
||||
|
||||
- End-to-end (from Prometheus server to `kata-monitor` and `kata-monitor` write response back): 20ms(avg)
|
||||
- Agent(RPC all from shim to agent): 3ms(avg)
|
||||
- End-to-end (from Prometheus server to `kata-monitor` and `kata-monitor` write response back): **20ms**(avg)
|
||||
- Agent (RPC all from shim to agent): **3ms**(avg)
|
||||
|
||||
Test infrastructure:
|
||||
|
||||
@@ -128,13 +145,13 @@ Test infrastructure:
|
||||
|
||||
**Scrape interval**
|
||||
|
||||
Prometheus default `scrape_interval` is 1 minute, and usually it is set to 15s. Small `scrape_interval` will cause more overhead, so user should set it on monitor demand.
|
||||
Prometheus default `scrape_interval` is 1 minute, but it is usually set to 15 seconds. A smaller `scrape_interval` causes more overhead, so users should set it depending on their monitoring needs.
|
||||
|
||||
## Metrics list
|
||||
|
||||
Here listed is all supported metrics by Kata 2.0. Some metrics is dependent on guest kernels in the VM, so there may be some different by your environment.
|
||||
Here are listed all the metrics supported by Kata 2.0. Some metrics are dependent on the VM guest kernel, so the available ones may differ based on the environment.
|
||||
|
||||
Metrics is categorized by component where metrics are collected from and for.
|
||||
Metrics are categorized by the component from/for which the metrics are collected.
|
||||
|
||||
* [Metric types](#metric-types)
|
||||
* [Kata agent metrics](#kata-agent-metrics)
|
||||
@@ -145,15 +162,15 @@ Metrics is categorized by component where metrics are collected from and for.
|
||||
* [Kata containerd shim v2 metrics](#kata-containerd-shim-v2-metrics)
|
||||
|
||||
> **Note**:
|
||||
> * Labels here are not include `instance` and `job` labels that added by Prometheus.
|
||||
> * Labels here do not include the `instance` and `job` labels added by Prometheus.
|
||||
> * Notes about metrics unit
|
||||
> * `Kibibytes`, abbreviated `KiB`. 1 `KiB` equals 1024 B.
|
||||
> * For some metrics (like network devices statistics from file `/proc/net/dev`), unit is depend on label( for example `recv_bytes` and `recv_packets` are having different units).
|
||||
> * Most of these metrics is collected from `/proc` filesystem, so the unit of metrics are keeping the same unit as `/proc`. See the `proc(5)` manual page for further details.
|
||||
> * For some metrics (like network devices statistics from file `/proc/net/dev`), unit depends on label( for example `recv_bytes` and `recv_packets` have different units).
|
||||
> * Most of these metrics are collected from the `/proc` filesystem, so the unit of each metric matches the unit of the relevant `/proc` entry. See the `proc(5)` manual page for further details.
|
||||
|
||||
### Metric types
|
||||
|
||||
Prometheus offer four core metric types.
|
||||
Prometheus offers four core metric types.
|
||||
|
||||
- Counter: A counter is a cumulative metric that represents a single monotonically increasing counter whose value can only increase.
|
||||
|
||||
@@ -207,7 +224,7 @@ Metrics for Firecracker vmm.
|
||||
| `kata_firecracker_uart`: <br> Metrics specific to the UART device. | `GAUGE` | | <ul><li>`item`<ul><li>`error_count`</li><li>`flush_count`</li><li>`missed_read_count`</li><li>`missed_write_count`</li><li>`read_count`</li><li>`write_count`</li></ul></li><li>`sandbox_id`</li></ul> | 2.0.0 |
|
||||
| `kata_firecracker_vcpu`: <br> Metrics specific to VCPUs' mode of functioning. | `GAUGE` | | <ul><li>`item`<ul><li>`exit_io_in`</li><li>`exit_io_out`</li><li>`exit_mmio_read`</li><li>`exit_mmio_write`</li><li>`failures`</li><li>`filter_cpuid`</li></ul></li><li>`sandbox_id`</li></ul> | 2.0.0 |
|
||||
| `kata_firecracker_vmm`: <br> Metrics specific to the machine manager as a whole. | `GAUGE` | | <ul><li>`item`<ul><li>`device_events`</li><li>`panic_count`</li></ul></li><li>`sandbox_id`</li></ul> | 2.0.0 |
|
||||
| `kata_firecracker_vsock`: <br> Vsock-related metrics. | `GAUGE` | | <ul><li>`item`<ul><li>`activate_fails`</li><li>`cfg_fails`</li><li>`conn_event_fails`</li><li>`conns_added`</li><li>`conns_killed`</li><li>`conns_removed`</li><li>`ev_queue_event_fails`</li><li>`killq_resync`</li><li>`muxer_event_fails`</li><li>`rx_bytes_count`</li><li>`rx_packets_count`</li><li>`rx_queue_event_count`</li><li>`rx_queue_event_fails`</li><li>`rx_read_fails`</li><li>`tx_bytes_count`</li><li>`tx_flush_fails`</li><li>`tx_packets_count`</li><li>`tx_queue_event_count`</li><li>`tx_queue_event_fails`</li><li>`tx_write_fails`</li></ul></li><li>`sandbox_id`</li></ul> | 2.0.0 |
|
||||
| `kata_firecracker_vsock`: <br> VSOCK-related metrics. | `GAUGE` | | <ul><li>`item`<ul><li>`activate_fails`</li><li>`cfg_fails`</li><li>`conn_event_fails`</li><li>`conns_added`</li><li>`conns_killed`</li><li>`conns_removed`</li><li>`ev_queue_event_fails`</li><li>`killq_resync`</li><li>`muxer_event_fails`</li><li>`rx_bytes_count`</li><li>`rx_packets_count`</li><li>`rx_queue_event_count`</li><li>`rx_queue_event_fails`</li><li>`rx_read_fails`</li><li>`tx_bytes_count`</li><li>`tx_flush_fails`</li><li>`tx_packets_count`</li><li>`tx_queue_event_count`</li><li>`tx_queue_event_fails`</li><li>`tx_write_fails`</li></ul></li><li>`sandbox_id`</li></ul> | 2.0.0 |
|
||||
|
||||
### Kata guest OS metrics
|
||||
|
||||
@@ -288,7 +305,7 @@ Metrics about Kata containerd shim v2 process.
|
||||
|
||||
| Metric name | Type | Units | Labels | Introduced in Kata version |
|
||||
|---|---|---|---|---|
|
||||
| `kata_shim_agent_rpc_durations_histogram_milliseconds`: <br> RPC latency distributions. | `HISTOGRAM` | `milliseconds` | <ul><li>`action` (RPC actions of Kata agent)<ul><li>`grpc.CheckRequest`</li><li>`grpc.CloseStdinRequest`</li><li>`grpc.CopyFileRequest`</li><li>`grpc.CreateContainerRequest`</li><li>`grpc.CreateSandboxRequest`</li><li>`grpc.DestroySandboxRequest`</li><li>`grpc.ExecProcessRequest`</li><li>`grpc.GetMetricsRequest`</li><li>`grpc.GuestDetailsRequest`</li><li>`grpc.ListInterfacesRequest`</li><li>`grpc.ListProcessesRequest`</li><li>`grpc.ListRoutesRequest`</li><li>`grpc.MemHotplugByProbeRequest`</li><li>`grpc.OnlineCPUMemRequest`</li><li>`grpc.PauseContainerRequest`</li><li>`grpc.RemoveContainerRequest`</li><li>`grpc.ReseedRandomDevRequest`</li><li>`grpc.ResumeContainerRequest`</li><li>`grpc.SetGuestDateTimeRequest`</li><li>`grpc.SignalProcessRequest`</li><li>`grpc.StartContainerRequest`</li><li>`grpc.StartTracingRequest`</li><li>`grpc.StatsContainerRequest`</li><li>`grpc.StopTracingRequest`</li><li>`grpc.TtyWinResizeRequest`</li><li>`grpc.UpdateContainerRequest`</li><li>`grpc.UpdateInterfaceRequest`</li><li>`grpc.UpdateRoutesRequest`</li><li>`grpc.WaitProcessRequest`</li><li>`grpc.WriteStreamRequest`</li></ul></li><li>`sandbox_id`</li></ul> | 2.0.0 |
|
||||
| `kata_shim_agent_rpc_durations_histogram_milliseconds`: <br> RPC latency distributions. | `HISTOGRAM` | `milliseconds` | <ul><li>`action` (RPC actions of Kata agent)<ul><li>`grpc.CheckRequest`</li><li>`grpc.CloseStdinRequest`</li><li>`grpc.CopyFileRequest`</li><li>`grpc.CreateContainerRequest`</li><li>`grpc.CreateSandboxRequest`</li><li>`grpc.DestroySandboxRequest`</li><li>`grpc.ExecProcessRequest`</li><li>`grpc.GetMetricsRequest`</li><li>`grpc.GuestDetailsRequest`</li><li>`grpc.ListInterfacesRequest`</li><li>`grpc.ListProcessesRequest`</li><li>`grpc.ListRoutesRequest`</li><li>`grpc.MemHotplugByProbeRequest`</li><li>`grpc.OnlineCPUMemRequest`</li><li>`grpc.PauseContainerRequest`</li><li>`grpc.RemoveContainerRequest`</li><li>`grpc.ReseedRandomDevRequest`</li><li>`grpc.ResumeContainerRequest`</li><li>`grpc.SetGuestDateTimeRequest`</li><li>`grpc.SignalProcessRequest`</li><li>`grpc.StartContainerRequest`</li><li>`grpc.StatsContainerRequest`</li><li>`grpc.TtyWinResizeRequest`</li><li>`grpc.UpdateContainerRequest`</li><li>`grpc.UpdateInterfaceRequest`</li><li>`grpc.UpdateRoutesRequest`</li><li>`grpc.WaitProcessRequest`</li><li>`grpc.WriteStreamRequest`</li></ul></li><li>`sandbox_id`</li></ul> | 2.0.0 |
|
||||
| `kata_shim_fds`: <br> Kata containerd shim v2 open FDs. | `GAUGE` | | <ul><li>`sandbox_id`</li></ul> | 2.0.0 |
|
||||
| `kata_shim_go_gc_duration_seconds`: <br> A summary of the pause duration of garbage collection cycles. | `SUMMARY` | `seconds` | <ul><li>`sandbox_id`</li></ul> | 2.0.0 |
|
||||
| `kata_shim_go_goroutines`: <br> Number of goroutines that currently exist. | `GAUGE` | | <ul><li>`sandbox_id`</li></ul> | 2.0.0 |
|
||||
|
||||
@@ -30,7 +30,7 @@ The Kata Containers runtime **MUST** implement the following command line option
|
||||
The Kata Containers project **MUST** provide two interfaces for CRI shims to manage hardware
|
||||
virtualization based Kubernetes pods and containers:
|
||||
- An OCI and `runc` compatible command line interface, as described in the previous section.
|
||||
This interface is used by implementations such as [`CRI-O`](http://cri-o.io) and [`cri-containerd`](https://github.com/containerd/cri-containerd), for example.
|
||||
This interface is used by implementations such as [`CRI-O`](http://cri-o.io) and [`containerd`](https://github.com/containerd/containerd), for example.
|
||||
- A hardware virtualization runtime library API for CRI shims to consume and provide a more
|
||||
CRI native implementation. The [`frakti`](https://github.com/kubernetes/frakti) CRI shim is an example of such a consumer.
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
- [Run Kata containers with `crictl`](run-kata-with-crictl.md)
|
||||
- [Run Kata Containers with Kubernetes](run-kata-with-k8s.md)
|
||||
- [How to use Kata Containers and Containerd](containerd-kata.md)
|
||||
- [How to use Kata Containers and CRI (containerd plugin) with Kubernetes](how-to-use-k8s-with-cri-containerd-and-kata.md)
|
||||
- [How to use Kata Containers and CRI (containerd) with Kubernetes](how-to-use-k8s-with-cri-containerd-and-kata.md)
|
||||
- [Kata Containers and service mesh for Kubernetes](service-mesh.md)
|
||||
- [How to import Kata Containers logs into Fluentd](how-to-import-kata-logs-with-fluentd.md)
|
||||
|
||||
@@ -17,10 +17,9 @@
|
||||
- `firecracker`
|
||||
- `ACRN`
|
||||
|
||||
While `qemu` and `cloud-hypervisor` work out of the box with installation of Kata,
|
||||
some additional configuration is needed in case of `firecracker` and `ACRN`.
|
||||
While `qemu` , `cloud-hypervisor` and `firecracker` work out of the box with installation of Kata,
|
||||
some additional configuration is needed in case of `ACRN`.
|
||||
Refer to the following guides for additional configuration steps:
|
||||
- [Kata Containers with Firecracker](https://github.com/kata-containers/documentation/wiki/Initial-release-of-Kata-Containers-with-Firecracker-support)
|
||||
- [Kata Containers with ACRN Hypervisor](how-to-use-kata-containers-with-acrn.md)
|
||||
|
||||
## Advanced Topics
|
||||
@@ -35,3 +34,5 @@
|
||||
- [How to set sandbox Kata Containers configurations with pod annotations](how-to-set-sandbox-config-kata.md)
|
||||
- [How to monitor Kata Containers in K8s](how-to-set-prometheus-in-k8s.md)
|
||||
- [How to use hotplug memory on arm64 in Kata Containers](how-to-hotplug-memory-arm64.md)
|
||||
- [How to setup swap devices in guest kernel](how-to-setup-swap-devices-in-guest-kernel.md)
|
||||
- [How to run rootless vmm](how-to-run-rootless-vmm.md)
|
||||
|
||||
@@ -39,7 +39,7 @@ use `RuntimeClass` instead of the deprecated annotations.
|
||||
|
||||
### Containerd Runtime V2 API: Shim V2 API
|
||||
|
||||
The [`containerd-shim-kata-v2` (short as `shimv2` in this documentation)](../../src/runtime/containerd-shim-v2)
|
||||
The [`containerd-shim-kata-v2` (short as `shimv2` in this documentation)](../../src/runtime/cmd/containerd-shim-kata-v2/)
|
||||
implements the [Containerd Runtime V2 (Shim API)](https://github.com/containerd/containerd/tree/master/runtime/v2) for Kata.
|
||||
With `shimv2`, Kubernetes can launch Pod and OCI-compatible containers with one shim per Pod. Prior to `shimv2`, `2N+1`
|
||||
shims (i.e. a `containerd-shim` and a `kata-shim` for each container and the Pod sandbox itself) and no standalone `kata-proxy`
|
||||
|
||||
33
docs/how-to/how-to-run-rootless-vmm.md
Normal file
33
docs/how-to/how-to-run-rootless-vmm.md
Normal file
@@ -0,0 +1,33 @@
|
||||
## Introduction
|
||||
To improve security, Kata Container supports running the VMM process (currently only QEMU) as a non-`root` user.
|
||||
This document describes how to enable the rootless VMM mode and its limitations.
|
||||
|
||||
## Pre-requisites
|
||||
The permission and ownership of the `kvm` device node (`/dev/kvm`) need to be configured to:
|
||||
```
|
||||
$ crw-rw---- 1 root kvm
|
||||
```
|
||||
use the following commands:
|
||||
```
|
||||
$ sudo groupadd kvm -r
|
||||
$ sudo chown root:kvm /dev/kvm
|
||||
$ sudo chmod 660 /dev/kvm
|
||||
```
|
||||
|
||||
## Configure rootless VMM
|
||||
By default, the VMM process still runs as the root user. There are two ways to enable rootless VMM:
|
||||
1. Set the `rootless` flag to `true` in the hypervisor section of `configuration.toml`.
|
||||
2. Set the Kubernetes annotation `io.katacontainers.hypervisor.rootless` to `true`.
|
||||
|
||||
## Implementation details
|
||||
When `rootless` flag is enabled, upon a request to create a Pod, Kata Containers runtime creates a random user and group (e.g. `kata-123`), and uses them to start the hypervisor process.
|
||||
The `kvm` group is also given to the hypervisor process as a supplemental group to give the hypervisor process access to the `/dev/kvm` device.
|
||||
Another necessary change is to move the hypervisor runtime files (e.g. `vhost-fs.sock`, `qmp.sock`) to a directory (under `/run/user/[uid]/`) where only the non-root hypervisor has access to.
|
||||
|
||||
## Limitations
|
||||
|
||||
1. Only the VMM process is running as a non-root user. Other processes such as Kata Container shimv2 and `virtiofsd` still run as the root user.
|
||||
2. Currently, this feature is only supported in QEMU. Still need to bring it to Firecracker and Cloud Hypervisor (see https://github.com/kata-containers/kata-containers/issues/2567).
|
||||
3. Certain features will not work when rootless VMM is enabled, including:
|
||||
1. Passing devices to the guest (`virtio-blk`, `virtio-scsi`) will not work if the non-privileged user does not have permission to access it (leading to a permission denied error). A more permissive permission (e.g. 666) may overcome this issue. However, you need to be aware of the potential security implications of reducing the security on such devices.
|
||||
2. `vfio` device will also not work because of permission denied error.
|
||||
@@ -34,8 +34,6 @@ There are several kinds of Kata configurations and they are listed below.
|
||||
| `io.katacontainers.config.agent.enable_tracing` | `boolean` | enable tracing for the agent |
|
||||
| `io.katacontainers.config.agent.container_pipe_size` | uint32 | specify the size of the std(in/out) pipes created for containers |
|
||||
| `io.katacontainers.config.agent.kernel_modules` | string | the list of kernel modules and their parameters that will be loaded in the guest kernel. Semicolon separated list of kernel modules and their parameters. These modules will be loaded in the guest kernel using `modprobe`(8). E.g., `e1000e InterruptThrottleRate=3000,3000,3000 EEE=1; i915 enable_ppgtt=0` |
|
||||
| `io.katacontainers.config.agent.trace_mode` | string | the trace mode for the agent |
|
||||
| `io.katacontainers.config.agent.trace_type` | string | the trace type for the agent |
|
||||
|
||||
## Hypervisor Options
|
||||
| Key | Value Type | Comments |
|
||||
@@ -91,6 +89,13 @@ There are several kinds of Kata configurations and they are listed below.
|
||||
| `io.katacontainers.config.hypervisor.virtio_fs_cache` | string | the cache mode for virtio-fs, valid values are `always`, `auto` and `none` |
|
||||
| `io.katacontainers.config.hypervisor.virtio_fs_daemon` | string | virtio-fs `vhost-user` daemon path |
|
||||
| `io.katacontainers.config.hypervisor.virtio_fs_extra_args` | string | extra options passed to `virtiofs` daemon |
|
||||
| `io.katacontainers.config.hypervisor.enable_guest_swap` | `boolean` | enable swap in the guest |
|
||||
|
||||
## Container Options
|
||||
| Key | Value Type | Comments |
|
||||
|-------| ----- | ----- |
|
||||
| `io.katacontainers.container.resource.swappiness"` | `uint64` | specify the `Resources.Memory.Swappiness` |
|
||||
| `io.katacontainers.container.resource.swap_in_bytes"` | `uint64` | specify the `Resources.Memory.Swap` |
|
||||
|
||||
# CRI-O Configuration
|
||||
|
||||
@@ -100,11 +105,12 @@ In case of CRI-O, all annotations specified in the pod spec are passed down to K
|
||||
|
||||
For containerd, annotations specified in the pod spec are passed down to Kata
|
||||
starting with version `1.3.0` of containerd. Additionally, extra configuration is
|
||||
needed for containerd, by providing a `pod_annotations` field in the containerd config
|
||||
file. The `pod_annotations` field is a list of annotations that can be passed down to
|
||||
Kata as OCI annotations. It supports golang match patterns. Since annotations supported
|
||||
by Kata follow the pattern `io.katacontainers.*`, the following configuration would work
|
||||
for passing annotations to Kata from containerd:
|
||||
needed for containerd, by providing `pod_annotations` field and
|
||||
`container_annotations` field in the containerd config
|
||||
file. The `pod_annotations` field and `container_annotations` field are two lists of
|
||||
annotations that can be passed down to Kata as OCI annotations. They support golang match
|
||||
patterns. Since annotations supported by Kata follow the pattern `io.katacontainers.*`,
|
||||
the following configuration would work for passing annotations to Kata from containerd:
|
||||
|
||||
```
|
||||
$ cat /etc/containerd/config
|
||||
@@ -113,6 +119,7 @@ $ cat /etc/containerd/config
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.kata]
|
||||
runtime_type = "io.containerd.kata.v2"
|
||||
pod_annotations = ["io.katacontainers.*"]
|
||||
container_annotations = ["io.katacontainers.*"]
|
||||
....
|
||||
|
||||
```
|
||||
|
||||
59
docs/how-to/how-to-setup-swap-devices-in-guest-kernel.md
Normal file
59
docs/how-to/how-to-setup-swap-devices-in-guest-kernel.md
Normal file
@@ -0,0 +1,59 @@
|
||||
# Setup swap device in guest kernel
|
||||
|
||||
## Introduction
|
||||
|
||||
Setup swap device in guest kernel can help to increase memory capacity, handle some memory issues and increase file access speed sometimes.
|
||||
Kata Containers can insert a raw file to the guest as the swap device.
|
||||
|
||||
## Requisites
|
||||
|
||||
The swap config of the containers should be set by [annotations](how-to-set-sandbox-config-kata.md#container-options). So [extra configuration is needed for containerd](how-to-set-sandbox-config-kata.md#containerd-configuration).
|
||||
|
||||
Kata Containers just supports setup swap device in guest kernel with QEMU.
|
||||
Install and setup Kata Containers as shown [here](../install/README.md).
|
||||
|
||||
Enable setup swap device in guest kernel as follows:
|
||||
```
|
||||
$ sudo sed -i -e 's/^#enable_guest_swap.*$/enable_guest_swap = true/g' /etc/kata-containers/configuration.toml
|
||||
```
|
||||
|
||||
## Run a Kata Container utilizing swap device
|
||||
|
||||
Use following command to start a Kata Container with swappiness 60 and 1GB swap device (swap_in_bytes - memory_limit_in_bytes).
|
||||
```
|
||||
$ pod_yaml=pod.yaml
|
||||
$ container_yaml=container.yaml
|
||||
$ image="quay.io/prometheus/busybox:latest"
|
||||
$ cat << EOF > "${pod_yaml}"
|
||||
metadata:
|
||||
name: busybox-sandbox1
|
||||
EOF
|
||||
$ cat << EOF > "${container_yaml}"
|
||||
metadata:
|
||||
name: busybox-test-swap
|
||||
annotations:
|
||||
io.katacontainers.container.resource.swappiness: "60"
|
||||
io.katacontainers.container.resource.swap_in_bytes: "2147483648"
|
||||
linux:
|
||||
resources:
|
||||
memory_limit_in_bytes: 1073741824
|
||||
image:
|
||||
image: "$image"
|
||||
command:
|
||||
- top
|
||||
EOF
|
||||
$ sudo crictl pull $image
|
||||
$ podid=$(sudo crictl runp $pod_yaml)
|
||||
$ cid=$(sudo crictl create $podid $container_yaml $pod_yaml)
|
||||
$ sudo crictl start $cid
|
||||
```
|
||||
|
||||
Kata Container setups swap device for this container only when `io.katacontainers.container.resource.swappiness` is set.
|
||||
|
||||
The following table shows the swap size how to decide if `io.katacontainers.container.resource.swappiness` is set.
|
||||
|`io.katacontainers.container.resource.swap_in_bytes`|`memory_limit_in_bytes`|swap size|
|
||||
|---|---|---|
|
||||
|set|set| `io.katacontainers.container.resource.swap_in_bytes` - `memory_limit_in_bytes`|
|
||||
|not set|set| `memory_limit_in_bytes`|
|
||||
|not set|not set| `io.katacontainers.config.hypervisor.default_memory`|
|
||||
|set|not set|cgroup doesn't support this usage|
|
||||
@@ -3,7 +3,7 @@
|
||||
This document describes how to set up a single-machine Kubernetes (k8s) cluster.
|
||||
|
||||
The Kubernetes cluster will use the
|
||||
[CRI containerd plugin](https://github.com/containerd/cri) and
|
||||
[CRI containerd](https://github.com/containerd/containerd/) and
|
||||
[Kata Containers](https://katacontainers.io) to launch untrusted workloads.
|
||||
|
||||
## Requirements
|
||||
@@ -71,12 +71,12 @@ $ for service in ${services}; do
|
||||
service_dir="/etc/systemd/system/${service}.service.d/"
|
||||
sudo mkdir -p ${service_dir}
|
||||
|
||||
cat << EOT | sudo tee "${service_dir}/proxy.conf"
|
||||
cat << EOF | sudo tee "${service_dir}/proxy.conf"
|
||||
[Service]
|
||||
Environment="HTTP_PROXY=${http_proxy}"
|
||||
Environment="HTTPS_PROXY=${https_proxy}"
|
||||
Environment="NO_PROXY=${no_proxy}"
|
||||
EOT
|
||||
EOF
|
||||
done
|
||||
|
||||
$ sudo systemctl daemon-reload
|
||||
@@ -172,7 +172,7 @@ If a pod has the `runtimeClassName` set to `kata`, the CRI plugin runs the pod w
|
||||
- Create an pod configuration that using Kata Containers runtime
|
||||
|
||||
```bash
|
||||
$ cat << EOT | tee nginx-kata.yaml
|
||||
$ cat << EOF | tee nginx-kata.yaml
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
@@ -183,7 +183,7 @@ If a pod has the `runtimeClassName` set to `kata`, the CRI plugin runs the pod w
|
||||
- name: nginx
|
||||
image: nginx
|
||||
|
||||
EOT
|
||||
EOF
|
||||
```
|
||||
|
||||
- Create the pod
|
||||
|
||||
@@ -22,7 +22,7 @@ This document requires the presence of the ACRN hypervisor and Kata Containers o
|
||||
|
||||
- ACRN supported [Hardware](https://projectacrn.github.io/latest/hardware.html#supported-hardware).
|
||||
> **Note:** Please make sure to have a minimum of 4 logical processors (HT) or cores.
|
||||
- ACRN [software](https://projectacrn.github.io/latest/tutorials/kbl-nuc-sdc.html#use-the-script-to-set-up-acrn-automatically) setup.
|
||||
- ACRN [software](https://projectacrn.github.io/latest/tutorials/run_kata_containers.html) setup.
|
||||
- For networking, ACRN supports either MACVTAP or TAP. If MACVTAP is not enabled in the Service OS, please follow the below steps to update the kernel:
|
||||
|
||||
```sh
|
||||
|
||||
@@ -16,9 +16,9 @@ from the host, a potentially undesirable side-effect that decreases the security
|
||||
|
||||
The following sections document how to configure this behavior in different container runtimes.
|
||||
|
||||
#### Containerd and CRI
|
||||
#### Containerd
|
||||
|
||||
The Containerd CRI allows configuring the privileged host devices behavior for each runtime in the CRI config. This is
|
||||
The Containerd allows configuring the privileged host devices behavior for each runtime in the containerd config. This is
|
||||
done with the `privileged_without_host_devices` option. Setting this to `true` will disable hot plugging of the host
|
||||
devices into the guest, even when privileged is enabled.
|
||||
|
||||
@@ -41,7 +41,7 @@ See below example config:
|
||||
```
|
||||
|
||||
- [Kata Containers with Containerd and CRI documentation](how-to-use-k8s-with-cri-containerd-and-kata.md)
|
||||
- [Containerd CRI config documentation](https://github.com/containerd/cri/blob/master/docs/config.md)
|
||||
- [Containerd CRI config documentation](https://github.com/containerd/containerd/blob/main/docs/cri/config.md)
|
||||
|
||||
#### CRI-O
|
||||
|
||||
|
||||
@@ -9,7 +9,7 @@ Kubernetes CRI (Container Runtime Interface) implementations allow using any
|
||||
OCI-compatible runtime with Kubernetes, such as the Kata Containers runtime.
|
||||
|
||||
Kata Containers support both the [CRI-O](https://github.com/kubernetes-incubator/cri-o) and
|
||||
[CRI-containerd](https://github.com/containerd/cri) CRI implementations.
|
||||
[containerd](https://github.com/containerd/containerd) CRI implementations.
|
||||
|
||||
After choosing one CRI implementation, you must make the appropriate configuration
|
||||
to ensure it integrates with Kata Containers.
|
||||
@@ -20,9 +20,9 @@ required to spawn pods and containers, and this is the preferred way to run Kata
|
||||
An equivalent shim implementation for CRI-O is planned.
|
||||
|
||||
### CRI-O
|
||||
For CRI-O installation instructions, refer to the [CRI-O Tutorial](https://github.com/kubernetes-incubator/cri-o/blob/master/tutorial.md) page.
|
||||
For CRI-O installation instructions, refer to the [CRI-O Tutorial](https://github.com/cri-o/cri-o/blob/main/tutorial.md) page.
|
||||
|
||||
The following sections show how to set up the CRI-O configuration file (default path: `/etc/crio/crio.conf`) for Kata.
|
||||
The following sections show how to set up the CRI-O snippet configuration file (default path: `/etc/crio/crio.conf`) for Kata.
|
||||
|
||||
Unless otherwise stated, all the following settings are specific to the `crio.runtime` table:
|
||||
```toml
|
||||
@@ -30,7 +30,7 @@ Unless otherwise stated, all the following settings are specific to the `crio.ru
|
||||
# runtime used and options for how to set up and manage the OCI runtime.
|
||||
[crio.runtime]
|
||||
```
|
||||
A comprehensive documentation of the configuration file can be found [here](https://github.com/cri-o/cri-o/blob/master/docs/crio.conf.5.md).
|
||||
A comprehensive documentation of the configuration file can be found [here](https://github.com/cri-o/cri-o/blob/main/docs/crio.conf.5.md).
|
||||
|
||||
> **Note**: After any change to this file, the CRI-O daemon have to be restarted with:
|
||||
>````
|
||||
@@ -40,82 +40,20 @@ A comprehensive documentation of the configuration file can be found [here](http
|
||||
#### Kubernetes Runtime Class (CRI-O v1.12+)
|
||||
The [Kubernetes Runtime Class](https://kubernetes.io/docs/concepts/containers/runtime-class/)
|
||||
is the preferred way of specifying the container runtime configuration to run a Pod's containers.
|
||||
To use this feature, Kata must added as a runtime handler with:
|
||||
To use this feature, Kata must added as a runtime handler. This can be done by
|
||||
dropping a `50-kata` snippet file into `/etc/crio/crio.conf.d`, with the
|
||||
content shown below:
|
||||
|
||||
```toml
|
||||
[crio.runtime.runtimes.kata-runtime]
|
||||
runtime_path = "/usr/bin/kata-runtime"
|
||||
runtime_type = "oci"
|
||||
```
|
||||
|
||||
You can also add multiple entries to specify alternatives hypervisors, e.g.:
|
||||
```toml
|
||||
[crio.runtime.runtimes.kata-qemu]
|
||||
runtime_path = "/usr/bin/kata-runtime"
|
||||
runtime_type = "oci"
|
||||
|
||||
[crio.runtime.runtimes.kata-fc]
|
||||
runtime_path = "/usr/bin/kata-runtime"
|
||||
runtime_type = "oci"
|
||||
```
|
||||
|
||||
#### Untrusted annotation (until CRI-O v1.12)
|
||||
The untrusted annotation is used to specify a runtime for __untrusted__ workloads, i.e.
|
||||
a runtime to be used when the workload cannot be trusted and a higher level of security
|
||||
is required. An additional flag can be used to let CRI-O know if a workload
|
||||
should be considered _trusted_ or _untrusted_ by default.
|
||||
For further details, see the documentation
|
||||
[here](../design/architecture.md#mixing-vm-based-and-namespace-based-runtimes).
|
||||
|
||||
```toml
|
||||
# runtime is the OCI compatible runtime used for trusted container workloads.
|
||||
# This is a mandatory setting as this runtime will be the default one
|
||||
# and will also be used for untrusted container workloads if
|
||||
# runtime_untrusted_workload is not set.
|
||||
runtime = "/usr/bin/runc"
|
||||
|
||||
# runtime_untrusted_workload is the OCI compatible runtime used for untrusted
|
||||
# container workloads. This is an optional setting, except if
|
||||
# default_container_trust is set to "untrusted".
|
||||
runtime_untrusted_workload = "/usr/bin/kata-runtime"
|
||||
|
||||
# default_workload_trust is the default level of trust crio puts in container
|
||||
# workloads. It can either be "trusted" or "untrusted", and the default
|
||||
# is "trusted".
|
||||
# Containers can be run through different container runtimes, depending on
|
||||
# the trust hints we receive from kubelet:
|
||||
# - If kubelet tags a container workload as untrusted, crio will try first to
|
||||
# run it through the untrusted container workload runtime. If it is not set,
|
||||
# crio will use the trusted runtime.
|
||||
# - If kubelet does not provide any information about the container workload trust
|
||||
# level, the selected runtime will depend on the default_container_trust setting.
|
||||
# If it is set to "untrusted", then all containers except for the host privileged
|
||||
# ones, will be run by the runtime_untrusted_workload runtime. Host privileged
|
||||
# containers are by definition trusted and will always use the trusted container
|
||||
# runtime. If default_container_trust is set to "trusted", crio will use the trusted
|
||||
# container runtime for all containers.
|
||||
default_workload_trust = "untrusted"
|
||||
```
|
||||
|
||||
#### Network namespace management
|
||||
To enable networking for the workloads run by Kata, CRI-O needs to be configured to
|
||||
manage network namespaces, by setting the following key to `true`.
|
||||
|
||||
In CRI-O v1.16:
|
||||
```toml
|
||||
manage_network_ns_lifecycle = true
|
||||
```
|
||||
In CRI-O v1.17+:
|
||||
```toml
|
||||
manage_ns_lifecycle = true
|
||||
[crio.runtime.runtimes.kata]
|
||||
runtime_path = "/usr/bin/containerd-shim-kata-v2"
|
||||
runtime_type = "vm"
|
||||
runtime_root = "/run/vc"
|
||||
privileged_without_host_devices = true
|
||||
```
|
||||
|
||||
|
||||
### containerd with CRI plugin
|
||||
|
||||
If you select containerd with `cri` plugin, follow the "Getting Started for Developers"
|
||||
instructions [here](https://github.com/containerd/cri#getting-started-for-developers)
|
||||
to properly install it.
|
||||
### containerd
|
||||
|
||||
To customize containerd to select Kata Containers runtime, follow our
|
||||
"Configure containerd to use Kata Containers" internal documentation
|
||||
@@ -160,7 +98,7 @@ $ sudo systemctl restart kubelet
|
||||
# If using CRI-O
|
||||
$ sudo kubeadm init --ignore-preflight-errors=all --cri-socket /var/run/crio/crio.sock --pod-network-cidr=10.244.0.0/16
|
||||
|
||||
# If using CRI-containerd
|
||||
# If using containerd
|
||||
$ sudo kubeadm init --ignore-preflight-errors=all --cri-socket /run/containerd/containerd.sock --pod-network-cidr=10.244.0.0/16
|
||||
|
||||
$ export KUBECONFIG=/etc/kubernetes/admin.conf
|
||||
|
||||
@@ -34,7 +34,7 @@ as the proxy starts.
|
||||
|
||||
Follow the [instructions](../install/README.md)
|
||||
to get Kata Containers properly installed and configured with Kubernetes.
|
||||
You can choose between CRI-O and CRI-containerd, both are supported
|
||||
You can choose between CRI-O and containerd, both are supported
|
||||
through this document.
|
||||
|
||||
For both cases, select the workloads as _trusted_ by default. This way,
|
||||
@@ -159,7 +159,7 @@ containers with `privileged: true` to `privileged: false`.
|
||||
There is no difference between Istio and Linkerd in this section. It is
|
||||
about which CRI implementation you use.
|
||||
|
||||
For both CRI-O and CRI-containerd, you have to add an annotation indicating
|
||||
For both CRI-O and containerd, you have to add an annotation indicating
|
||||
the workload for this deployment is not _trusted_, which will trigger
|
||||
`kata-runtime` to be called instead of `runc`.
|
||||
|
||||
@@ -193,9 +193,9 @@ spec:
|
||||
...
|
||||
```
|
||||
|
||||
__CRI-containerd:__
|
||||
__containerd:__
|
||||
|
||||
Add the following annotation for CRI-containerd
|
||||
Add the following annotation for containerd
|
||||
```yaml
|
||||
io.kubernetes.cri.untrusted-workload: "true"
|
||||
```
|
||||
|
||||
@@ -47,7 +47,7 @@ and can be classified as potentially misunderstood behaviors rather than vulnera
|
||||
VM templating can be enabled by changing your Kata Containers config file (`/usr/share/defaults/kata-containers/configuration.toml`,
|
||||
overridden by `/etc/kata-containers/configuration.toml` if provided) such that:
|
||||
|
||||
- `qemu-lite` is specified in `hypervisor.qemu`->`path` section
|
||||
- `qemu` version `v4.1.0` or above is specified in `hypervisor.qemu`->`path` section
|
||||
- `enable_template = true`
|
||||
- `initrd =` is set
|
||||
- `image =` option is commented out or removed
|
||||
|
||||
@@ -12,16 +12,26 @@ Containers.
|
||||
|
||||
Packaged installation methods uses your distribution's native package format (such as RPM or DEB).
|
||||
|
||||
*Note:* We encourage installation methods that provides automatic updates, it ensures security updates and bug fixes are
|
||||
easily applied.
|
||||
> **Note:** We encourage installation methods that provides automatic updates, it ensures security updates and bug fixes are
|
||||
> easily applied.
|
||||
|
||||
| Installation method | Description | Automatic updates | Use case |
|
||||
|------------------------------------------------------|---------------------------------------------------------------------|-------------------|----------------------------------------------------------|
|
||||
| [Using official distro packages](#official-packages) | Kata packages provided by Linux distributions official repositories | yes | Recommended for most users. |
|
||||
| [Using snap](#snap-installation) | Easy to install | yes | Good alternative to official distro packages. |
|
||||
| [Automatic](#automatic-installation) | Run a single command to install a full system | **No!** | For those wanting the latest release quickly. |
|
||||
| [Manual](#manual-installation) | Follow a guide step-by-step to install a working system | **No!** | For those who want the latest release with more control. |
|
||||
| [Build from source](#build-from-source-installation) | Build the software components manually | **No!** | Power users and developers only. |
|
||||
| Installation method | Description | Automatic updates | Use case |
|
||||
|------------------------------------------------------|----------------------------------------------------------------------------------------------|-------------------|-----------------------------------------------------------------------------------------------|
|
||||
| [Using kata-deploy](#kata-deploy-installation) | The preferred way to deploy the Kata Containers distributed binaries on a Kubernetes cluster | **No!** | Best way to give it a try on kata-containers on an already up and running Kubernetes cluster. |
|
||||
| [Using official distro packages](#official-packages) | Kata packages provided by Linux distributions official repositories | yes | Recommended for most users. |
|
||||
| [Using snap](#snap-installation) | Easy to install | yes | Good alternative to official distro packages. |
|
||||
| [Automatic](#automatic-installation) | Run a single command to install a full system | **No!** | For those wanting the latest release quickly. |
|
||||
| [Manual](#manual-installation) | Follow a guide step-by-step to install a working system | **No!** | For those who want the latest release with more control. |
|
||||
| [Build from source](#build-from-source-installation) | Build the software components manually | **No!** | Power users and developers only. |
|
||||
|
||||
### Kata Deploy Installation
|
||||
|
||||
Kata Deploy provides a Dockerfile, which contains all of the binaries and
|
||||
artifacts required to run Kata Containers, as well as reference DaemonSets,
|
||||
which can be utilized to install Kata Containers on a running Kubernetes
|
||||
cluster.
|
||||
|
||||
[Use Kata Deploy](/tools/packaging/kata-deploy/README.md) to install Kata Containers on a Kubernetes Cluster.
|
||||
|
||||
### Official packages
|
||||
|
||||
@@ -48,9 +58,9 @@ Follow the [containerd installation guide](container-manager/containerd/containe
|
||||
|
||||
## Build from source installation
|
||||
|
||||
*Note:* Power users who decide to build from sources should be aware of the
|
||||
implications of using an unpackaged system which will not be automatically
|
||||
updated as new [releases](../Stable-Branch-Strategy.md) are made available.
|
||||
> **Note:** Power users who decide to build from sources should be aware of the
|
||||
> implications of using an unpackaged system which will not be automatically
|
||||
> updated as new [releases](../Stable-Branch-Strategy.md) are made available.
|
||||
|
||||
[Building from sources](../Developer-Guide.md#initial-setup) allows power users
|
||||
who are comfortable building software from source to use the latest component
|
||||
|
||||
1
docs/threat-model/threat-model-boundaries.svg
Normal file
1
docs/threat-model/threat-model-boundaries.svg
Normal file
File diff suppressed because one or more lines are too long
|
After Width: | Height: | Size: 150 KiB |
137
docs/threat-model/threat-model.md
Normal file
137
docs/threat-model/threat-model.md
Normal file
@@ -0,0 +1,137 @@
|
||||
# Kata Containers threat model
|
||||
|
||||
This document discusses threat models associated with the Kata Containers project.
|
||||
Kata was designed to provide additional isolation of container workloads, protecting
|
||||
the host infrastructure from potentially malicious container users or workloads. Since
|
||||
Kata Containers adds a level of isolation on top of traditional containers, the focus
|
||||
is on the additional layer provided, not on traditional container security.
|
||||
|
||||
This document provides a brief background on containers and layered security, describes
|
||||
the interface to Kata from CRI runtimes, a review of utilized virtual machine interfaces, and then
|
||||
a review of threats.
|
||||
|
||||
## Kata security objective
|
||||
|
||||
Kata seeks to prevent an untrusted container workload or user of that container workload to gain
|
||||
control of, obtain information from, or tamper with the host infrastructure.
|
||||
|
||||
In our scenario, an asset is anything on the host system, or elsewhere in the cluster
|
||||
infrastructure. The attacker is assumed to be either a malicious user or the workload itself
|
||||
running within the container. The goal of Kata is to prevent attacks which would allow
|
||||
any access to the defined assets.
|
||||
|
||||
## Background on containers, layered security
|
||||
|
||||
Traditional containers leverage several key Linux kernel features to provide isolation and
|
||||
a view that the container workload is the only entity running on the host. Key features include
|
||||
`Namespaces`, `cgroups`, `capablities`, `SELinux` and `seccomp`. The canonical runtime for creating such
|
||||
a container is `runc`. In the remainder of the document, the term `traditional-container` will be used
|
||||
to describe a container workload created by runc.
|
||||
|
||||
Kata Containers provides a second layer of isolation on top of those provided by traditional-containers.
|
||||
The hardware virtualization interface is the basis of this additional layer. Kata launches a lightweight
|
||||
virtual machine, and uses the guest’s Linux kernel to create a container workload, or workloads in the case
|
||||
of multi-container pods. In Kubernetes and in the Kata implementation, the sandbox is carried out at the
|
||||
pod level. In Kata, this sandbox is created using a virtual machine.
|
||||
|
||||
## Interface to Kata Containers: CRI, v2-shim, OCI
|
||||
|
||||
A typical Kata Containers deployment uses Kubernetes with a CRI implementation.
|
||||
On every node, Kubelet will interact with a CRI implementor, which will in turn interface with
|
||||
an OCI based runtime, such as Kata Containers. Typical CRI implementors are `cri-o` and `containerd`.
|
||||
|
||||
The CRI API, as defined at the Kubernetes [CRI-API repo](https://github.com/kubernetes/cri-api/),
|
||||
results in a few constructs being supported by the CRI implementation, and ultimately in the OCI
|
||||
runtime creating the workloads.
|
||||
|
||||
In order to run a container inside of the Kata sandbox, several virtual machine devices and interfaces
|
||||
are required. Kata translates sandbox and container definitions to underlying virtualization technologies provided
|
||||
by a set of virtual machine monitors (VMMs) and hypervisors. These devices and their underlying
|
||||
implementations are discussed in detail in the following section.
|
||||
|
||||
## Interface to the Kata sandbox/virtual machine
|
||||
|
||||
In case of Kata, today the devices which we need in the guest are:
|
||||
- Storage: In the current design of Kata Containers, we are reliant on the CRI implementor to
|
||||
assist in image handling and volume management on the host. As a result, we need to support a way of passing to the sandbox the container rootfs, volumes requested
|
||||
by the workload, and any other volumes created to facilitate sharing of secrets and `configmaps` with the containers. Depending on how these are managed, a block based device or file-system
|
||||
sharing is required. Kata Containers does this by way of `virtio-blk` and/or `virtio-fs`.
|
||||
- Networking: A method for enabling network connectivity with the workload is required. Typically this will be done providing a `TAP` device
|
||||
to the VMM, and this will be exposed to the guest as a `virtio-net` device. It is feasible to pass in a NIC device directly, in which case `VFIO` is leveraged
|
||||
and the device itself will be exposed to the guest.
|
||||
- Control: In order to interact with the guest agent and retrieve `STDIO` from containers, a medium of communication is required.
|
||||
This is available via `virtio-vsock`.
|
||||
- Devices: `VFIO` is utilized when devices are passed directly to the virtual machine and exposed to the container.
|
||||
- Dynamic Resource Management: `ACPI` is utilized to allow for dynamic VM resource management (for example: CPU, memory, device hotplug). This is required when containers are resized,
|
||||
or more generally when containers are added to a pod.
|
||||
|
||||
How these devices are utilized varies depending on the VMM utilized. We clarify the default settings provided when integrating Kata
|
||||
with the QEMU, Firecracker and Cloud Hypervisor VMMs in the following sections.
|
||||
|
||||
### Devices
|
||||
|
||||
Each virtio device is implemented by a backend, which may execute within userspace on the host (vhost-user), the VMM itself, or within the host kernel (vhost). While it may provide enhanced performance,
|
||||
vhost devices are often seen as higher risk since an exploit would be already running within the kernel space. While VMM and vhost-user are both in userspace on the host, `vhost-user` generally allows for the back-end process to require less system calls and capabilities compared to a full VMM.
|
||||
|
||||
#### `virtio-blk` and `virtio-scsi`
|
||||
|
||||
The backend for `virtio-blk` and `virtio-scsi` are based in the VMM itself (ring3 in the context of x86) by default for Cloud Hypervisor, Firecracker and QEMU.
|
||||
While `vhost` based back-ends are available for QEMU, it is not recommended. `vhost-user` back-ends are being added for Cloud Hypervisor, they are not utilized in Kata today.
|
||||
|
||||
#### `virtio-fs`
|
||||
|
||||
`virtio-fs` is supported in Cloud Hypervisor and QEMU. `virtio-fs`'s interaction with the host filesystem is done through a vhost-user daemon, `virtiofsd`.
|
||||
The `virtio-fs` client, running in the guest, will generate requests to access files. `virtiofsd` will receive requests, open the file, and request the VMM
|
||||
to `mmap` it into the guest. When DAX is utilized, the guest will access the host's page cache, avoiding the need for copy and duplication. DAX is still an experimental feature,
|
||||
and is not enabled by default.
|
||||
|
||||
From the `virtiofsd` [documentation](https://qemu-project.gitlab.io/qemu/tools/virtiofsd.html):
|
||||
```This program must be run as the root user. Upon startup the program will switch into a new file system namespace with the shared directory tree as its root. This prevents “file system escapes” due to symlinks and other file system objects that might lead to files outside the shared directory. The program also sandboxes itself using seccomp(2) to prevent ptrace(2) and other vectors that could allow an attacker to compromise the system after gaining control of the virtiofsd process.```
|
||||
|
||||
DAX-less support for `virtio-fs` is available as of the 5.4 Linux kernel. QEMU VMM supports virtio-fs as of v4.2. Cloud Hypervisor
|
||||
supports `virtio-fs`.
|
||||
|
||||
#### `virtio-net`
|
||||
|
||||
`virtio-net` has many options, depending on the VMM and Kata configurations.
|
||||
|
||||
##### QEMU networking
|
||||
|
||||
While QEMU has options for `vhost`, `virtio-net` and `vhost-user`, the `virtio-net` backend
|
||||
for Kata defaults to `vhost-net` for performance reasons. The default configuration is being
|
||||
reevaluated.
|
||||
|
||||
##### Firecracker networking
|
||||
|
||||
For Firecracker, the `virtio-net` backend is within Firecracker's VMM.
|
||||
|
||||
##### Cloud Hypervisor networking
|
||||
|
||||
For Cloud Hypervisor, the current backend default is within the VMM. `vhost-user-net` support
|
||||
is being added (written in rust, Cloud Hypervisor specific).
|
||||
|
||||
#### virtio-vsock
|
||||
|
||||
##### QEMU vsock
|
||||
|
||||
In QEMU, vsock is backed by `vhost_vsock`, which runs within the kernel itself.
|
||||
|
||||
##### Firecracker and Cloud Hypervisor
|
||||
|
||||
In Firecracker and Cloud Hypervisor, vsock is backed by a unix-domain-socket in the hosts userspace.
|
||||
|
||||
#### VFIO
|
||||
|
||||
Utilizing VFIO, devices can be passed through to the virtual machine. We will assess this separately. Exposure to
|
||||
host is limited to gaps in device pass-through handling. This is supported in QEMU and Cloud Hypervisor, but not
|
||||
Firecracker.
|
||||
|
||||
#### ACPI
|
||||
|
||||
ACPI is necessary for hotplug of CPU, memory and devices. ACPI is available in QEMU and Cloud Hypervisor. Device, CPU and memory hotplug
|
||||
are not available in Firecracker.
|
||||
|
||||
## Devices and threat model
|
||||
|
||||

|
||||
|
||||
214
docs/tracing.md
Normal file
214
docs/tracing.md
Normal file
@@ -0,0 +1,214 @@
|
||||
# Overview
|
||||
|
||||
This document explains how to trace Kata Containers components.
|
||||
|
||||
# Introduction
|
||||
|
||||
The Kata Containers runtime and agent are able to generate
|
||||
[OpenTelemetry][opentelemetry] trace spans, which allow the administrator to
|
||||
observe what those components are doing and how much time they are spending on
|
||||
each operation.
|
||||
|
||||
# OpenTelemetry summary
|
||||
|
||||
An OpenTelemetry-enabled application creates a number of trace "spans". A span
|
||||
contains the following attributes:
|
||||
|
||||
- A name
|
||||
- A pair of timestamps (recording the start time and end time of some operation)
|
||||
- A reference to the span's parent span
|
||||
|
||||
All spans need to be *finished*, or *completed*, to allow the OpenTelemetry
|
||||
framework to generate the final trace information (by effectively closing the
|
||||
transaction encompassing the initial (root) span and all its children).
|
||||
|
||||
For Kata, the root span represents the total amount of time taken to run a
|
||||
particular component from startup to its shutdown (the "run time").
|
||||
|
||||
# Architecture
|
||||
|
||||
## Runtime tracing architecture
|
||||
|
||||
The runtime, which runs in the host environment, has been modified to
|
||||
optionally generate trace spans which are sent to a trace collector on the
|
||||
host.
|
||||
|
||||
## Agent tracing architecture
|
||||
|
||||
An OpenTelemetry system (such as [Jaeger][jaeger-tracing]) uses a collector to
|
||||
gather up trace spans from the application for viewing and processing. For an
|
||||
application to use the collector, it must run in the same context as
|
||||
the collector.
|
||||
|
||||
This poses a problem for tracing the Kata Containers agent since it does not
|
||||
run in the same context as the collector: it runs inside a virtual machine (VM).
|
||||
|
||||
To allow spans from the agent to be sent to the trace collector, Kata provides
|
||||
a [trace forwarder][trace-forwarder] component. This runs in the same context
|
||||
as the collector (generally on the host system) and listens on a
|
||||
[`VSOCK`][vsock] channel for traces generated by the agent, forwarding them on
|
||||
to the trace collector.
|
||||
|
||||
> **Note:**
|
||||
>
|
||||
> This design supports agent tracing without having to make changes to the
|
||||
> image, but also means that [custom images][osbuilder] can also benefit from
|
||||
> agent tracing.
|
||||
|
||||
The following diagram summarises the architecture used to trace the Kata
|
||||
Containers agent:
|
||||
|
||||
```
|
||||
+--------------------------------------------+
|
||||
| Host |
|
||||
| |
|
||||
| +---------------+ |
|
||||
| | OpenTelemetry | |
|
||||
| | Trace | |
|
||||
| | Collector | |
|
||||
| +---------------+ |
|
||||
| ^ +---------------+ |
|
||||
| | spans | Kata VM | |
|
||||
| +-----+-----+ | | |
|
||||
| | Kata | spans o +-------+ | |
|
||||
| | Trace |<-----------------| Kata | | |
|
||||
| | Forwarder | VSOCK o | Agent | | |
|
||||
| +-----------+ Channel | +-------+ | |
|
||||
| +---------------+ |
|
||||
+--------------------------------------------+
|
||||
```
|
||||
|
||||
# Agent tracing prerequisites
|
||||
|
||||
- You must have a trace collector running.
|
||||
|
||||
Although the collector normally runs on the host, it can also be run from
|
||||
inside a Docker image configured to expose the appropriate host ports to the
|
||||
collector.
|
||||
|
||||
The [Jaeger "all-in-one" Docker image][jaeger-all-in-one] method
|
||||
is the quickest and simplest way to run the collector for testing.
|
||||
|
||||
- If you wish to trace the agent, you must start the
|
||||
[trace forwarder][trace-forwarder].
|
||||
|
||||
> **Notes:**
|
||||
>
|
||||
> - If agent tracing is enabled but the forwarder is not running,
|
||||
> the agent will log an error (signalling that it cannot generate trace
|
||||
> spans), but continue to work as normal.
|
||||
>
|
||||
> - The trace forwarder requires a trace collector (such as Jaeger) to be
|
||||
> running before it is started. If a collector is not running, the trace
|
||||
> forwarder will exit with an error.
|
||||
|
||||
# Enable tracing
|
||||
|
||||
By default, tracing is disabled for all components. To enable _any_ form of
|
||||
tracing an `enable_tracing` option must be enabled for at least one component.
|
||||
|
||||
> **Note:**
|
||||
>
|
||||
> Enabling this option will only allow tracing for subsequently
|
||||
> started containers.
|
||||
|
||||
## Enable runtime tracing
|
||||
|
||||
To enable runtime tracing, set the tracing option as shown:
|
||||
|
||||
```toml
|
||||
[runtime]
|
||||
enable_tracing = true
|
||||
```
|
||||
|
||||
## Enable agent tracing
|
||||
|
||||
To enable agent tracing, set the tracing option as shown:
|
||||
|
||||
```toml
|
||||
[agent.kata]
|
||||
enable_tracing = true
|
||||
```
|
||||
|
||||
> **Note:**
|
||||
>
|
||||
> If both agent tracing and runtime tracing are enabled, the resulting trace
|
||||
> spans will be "collated": expanding individual runtime spans in the Jaeger
|
||||
> web UI will show the agent trace spans resulting from the runtime
|
||||
> operation.
|
||||
|
||||
# Appendices
|
||||
|
||||
## Agent tracing requirements
|
||||
|
||||
### Host environment
|
||||
|
||||
- The host kernel must support the VSOCK socket type.
|
||||
|
||||
This will be available if the kernel is built with the
|
||||
`CONFIG_VHOST_VSOCK` configuration option.
|
||||
|
||||
- The VSOCK kernel module must be loaded:
|
||||
|
||||
```
|
||||
$ sudo modprobe vhost_vsock
|
||||
```
|
||||
|
||||
### Guest environment
|
||||
|
||||
- The guest kernel must support the VSOCK socket type:
|
||||
|
||||
This will be available if the kernel is built with the
|
||||
`CONFIG_VIRTIO_VSOCKETS` configuration option.
|
||||
|
||||
> **Note:** The default Kata Containers guest kernel provides this feature.
|
||||
|
||||
## Agent tracing limitations
|
||||
|
||||
- Agent tracing is only "completed" when the workload and the Kata agent
|
||||
process have exited.
|
||||
|
||||
Although trace information *can* be inspected before the workload and agent
|
||||
have exited, it is incomplete. This is shown as `<trace-without-root-span>`
|
||||
in the Jaeger web UI.
|
||||
|
||||
If the workload is still running, the trace transaction -- which spans the entire
|
||||
runtime of the Kata agent -- will not have been completed. To view the complete
|
||||
trace details, wait for the workload to end, or stop the container.
|
||||
|
||||
## Performance impact
|
||||
|
||||
[OpenTelemetry][opentelemetry] is designed for high performance. It combines
|
||||
the best of two previous generation projects (OpenTracing and OpenCensus) and
|
||||
uses a very efficient mechanism to capture trace spans. Further, the trace
|
||||
points inserted into the agent are generated dynamically at compile time. This
|
||||
is advantageous since new versions of the agent will automatically benefit
|
||||
from improvements in the tracing infrastructure. Overall, the impact of
|
||||
enabling runtime and agent tracing should be extremely low.
|
||||
|
||||
## Agent shutdown behaviour
|
||||
|
||||
In normal operation, the Kata runtime manages the VM shutdown and performs
|
||||
certain optimisations to speed up this process. However, if agent tracing is
|
||||
enabled, the agent itself is responsible for shutting down the VM. This it to
|
||||
ensure all agent trace transactions are completed. This means there will be a
|
||||
small performance impact for container shutdown when agent tracing is enabled
|
||||
as the runtime must wait for the VM to shutdown fully.
|
||||
|
||||
## Set up a tracing development environment
|
||||
|
||||
If you want to debug, further develop, or test tracing,
|
||||
[enabling full debug][enable-full-debug]
|
||||
is highly recommended. For working with the agent, you may also wish to
|
||||
[enable a debug console][setup-debug-console]
|
||||
to allow you to access the VM environment.
|
||||
|
||||
[agent-ctl]: https://github.com/kata-containers/kata-containers/blob/main/tools/agent-ctl
|
||||
[enable-full-debug]: https://github.com/kata-containers/kata-containers/blob/main/docs/Developer-Guide.md#enable-full-debug
|
||||
[jaeger-all-in-one]: https://www.jaegertracing.io/docs/getting-started/
|
||||
[jaeger-tracing]: https://www.jaegertracing.io
|
||||
[opentelemetry]: https://opentelemetry.io
|
||||
[osbuilder]: https://github.com/kata-containers/kata-containers/blob/main/tools/osbuilder
|
||||
[setup-debug-console]: https://github.com/kata-containers/kata-containers/blob/main/docs/Developer-Guide.md#set-up-a-debug-console
|
||||
[trace-forwarder]: https://github.com/kata-containers/kata-containers/blob/main/src/trace-forwarder
|
||||
[vsock]: https://wiki.qemu.org/Features/VirtioVsock
|
||||
@@ -67,7 +67,7 @@ To use large BARs devices (for example, Nvidia Tesla P100), you need Kata versio
|
||||
|
||||
The following configuration in the Kata `configuration.toml` file as shown below can work:
|
||||
|
||||
Hotplug for PCI devices by `shpchp` (Linux's SHPC PCI Hotplug driver):
|
||||
Hotplug for PCI devices by `acpi_pcihp` (Linux's ACPI PCI Hotplug driver):
|
||||
```
|
||||
machine_type = "q35"
|
||||
|
||||
@@ -91,7 +91,6 @@ The following kernel config options need to be enabled:
|
||||
```
|
||||
# Support PCI/PCIe device hotplug (Required for large BARs device)
|
||||
CONFIG_HOTPLUG_PCI_PCIE=y
|
||||
CONFIG_HOTPLUG_PCI_SHPC=y
|
||||
|
||||
# Support for loading modules (Required for load Nvidia drivers)
|
||||
CONFIG_MODULES=y
|
||||
|
||||
@@ -1,107 +1,113 @@
|
||||
# Kata Containers with SGX
|
||||
|
||||
Intel® Software Guard Extensions (SGX) is a set of instructions that increases the security
|
||||
Intel Software Guard Extensions (SGX) is a set of instructions that increases the security
|
||||
of applications code and data, giving them more protections from disclosure or modification.
|
||||
|
||||
> **Note:** At the time of writing this document, SGX patches have not landed on the Linux kernel
|
||||
> project, so specific versions for guest and host kernels must be installed to enable SGX.
|
||||
This document guides you to run containers with SGX enclaves with Kata Containers in Kubernetes.
|
||||
|
||||
## Check if SGX is enabled
|
||||
## Preconditions
|
||||
|
||||
Run the following command to check if your host supports SGX.
|
||||
* Intel SGX capable bare metal nodes
|
||||
* Host kernel Linux 5.13 or later with SGX and SGX KVM enabled:
|
||||
|
||||
```sh
|
||||
$ grep -o sgx /proc/cpuinfo
|
||||
$ grep SGX /boot/config-`uname -r`
|
||||
CONFIG_X86_SGX=y
|
||||
CONFIG_X86_SGX_KVM=y
|
||||
```
|
||||
|
||||
Continue to the following section if the output of the above command is empty,
|
||||
otherwise continue to section [Install Guest kernel with SGX support](#install-guest-kernel-with-sgx-support)
|
||||
* Kubernetes cluster configured with:
|
||||
* [`kata-deploy`](https://github.com/kata-containers/kata-containers/tree/main/tools/packaging/kata-deploy) based Kata Containers installation
|
||||
* [Intel SGX Kubernetes device plugin](https://github.com/intel/intel-device-plugins-for-kubernetes/tree/main/cmd/sgx_plugin#deploying-with-pre-built-images)
|
||||
|
||||
## Install Host kernel with SGX support
|
||||
> Note: Kata Containers supports creating VM sandboxes with Intel® SGX enabled
|
||||
> using [cloud-hypervisor](https://github.com/cloud-hypervisor/cloud-hypervisor/) VMM only. QEMU support is waiting to get the
|
||||
> Intel SGX enabled QEMU upstream release.
|
||||
|
||||
The following commands were tested on Fedora 32, they might work on other distros too.
|
||||
## Installation
|
||||
|
||||
### Kata Containers Guest Kernel
|
||||
|
||||
Follow the instructions to [setup](../../tools/packaging/kernel/README.md#setup-kernel-source-code) and [build](../../tools/packaging/kernel/README.md#build-the-kernel) the experimental guest kernel. Then, install as:
|
||||
|
||||
```sh
|
||||
$ git clone --depth=1 https://github.com/intel/kvm-sgx
|
||||
$ pushd kvm-sgx
|
||||
$ cp /boot/config-$(uname -r) .config
|
||||
$ yes "" | make oldconfig
|
||||
$ # In the following step, enable: INTEL_SGX and INTEL_SGX_VIRTUALIZATION
|
||||
$ make menuconfig
|
||||
$ make -j$(($(nproc)-1)) bzImage
|
||||
$ make -j$(($(nproc)-1)) modules
|
||||
$ sudo make modules_install
|
||||
$ sudo make install
|
||||
$ popd
|
||||
$ sudo reboot
|
||||
$ sudo cp kata-linux-experimental-*/vmlinux /opt/kata/share/kata-containers/vmlinux.sgx
|
||||
$ sudo sed -i 's|vmlinux.container|vmlinux.sgx|g' \
|
||||
/opt/kata/share/defaults/kata-containers/configuration-clh.toml
|
||||
```
|
||||
|
||||
> **Notes:**
|
||||
> * Run: `mokutil --sb-state` to check whether secure boot is enabled, if so, you will need to sign the kernel.
|
||||
> * You'll lose SGX support when a new distro kernel is installed and the system rebooted.
|
||||
|
||||
Once you have restarted your system with the new brand Linux Kernel with SGX support, run
|
||||
the following command to make sure it's enabled. If the output is empty, go to the BIOS
|
||||
setup and enable SGX manually.
|
||||
|
||||
```sh
|
||||
$ grep -o sgx /proc/cpuinfo
|
||||
```
|
||||
|
||||
## Install Guest kernel with SGX support
|
||||
|
||||
Install the guest kernel in the Kata Containers directory, this way it can be used to run
|
||||
Kata Containers.
|
||||
|
||||
```sh
|
||||
$ curl -LOk https://github.com/devimc/kvm-sgx/releases/download/v0.0.1/kata-virtiofs-sgx.tar.gz
|
||||
$ sudo tar -xf kata-virtiofs-sgx.tar.gz -C /usr/share/kata-containers/
|
||||
$ sudo sed -i 's|kernel =|kernel = "/usr/share/kata-containers/vmlinux-virtiofs-sgx.container"|g' \
|
||||
/usr/share/defaults/kata-containers/configuration.toml
|
||||
```
|
||||
|
||||
## Run Kata Containers with SGX enabled
|
||||
### Kata Containers Configuration
|
||||
|
||||
Before running a Kata Container make sure that your version of `crio` or `containerd`
|
||||
supports annotations.
|
||||
|
||||
For `containerd` check in `/etc/containerd/config.toml` that the list of `pod_annotations` passed
|
||||
to the `sandbox` are: `["io.katacontainers.*", "sgx.intel.com/epc"]`.
|
||||
|
||||
> `sgx.yaml`
|
||||
## Usage
|
||||
|
||||
With the following sample job deployed using `kubectl apply -f`:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: sgx
|
||||
annotations:
|
||||
sgx.intel.com/epc: "32Mi"
|
||||
name: oesgx-demo-job
|
||||
labels:
|
||||
jobgroup: oesgx-demo
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 0
|
||||
runtimeClassName: kata
|
||||
containers:
|
||||
- name: c1
|
||||
image: busybox
|
||||
command:
|
||||
- sh
|
||||
stdin: true
|
||||
tty: true
|
||||
volumeMounts:
|
||||
- mountPath: /dev/sgx/
|
||||
name: test-volume
|
||||
volumes:
|
||||
- name: test-volume
|
||||
hostPath:
|
||||
path: /dev/sgx/
|
||||
type: Directory
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
jobgroup: oesgx-demo
|
||||
spec:
|
||||
runtimeClassName: kata-clh
|
||||
initContainers:
|
||||
- name: init-sgx
|
||||
image: busybox
|
||||
command: ['sh', '-c', 'mkdir /dev/sgx; ln -s /dev/sgx_enclave /dev/sgx/enclave; ln -s /dev/sgx_provision /dev/sgx/provision']
|
||||
volumeMounts:
|
||||
- mountPath: /dev
|
||||
name: dev-mount
|
||||
restartPolicy: Never
|
||||
containers:
|
||||
-
|
||||
name: eosgx-demo-job-1
|
||||
image: oeciteam/oe-helloworld:latest
|
||||
imagePullPolicy: IfNotPresent
|
||||
securityContext:
|
||||
readOnlyRootFilesystem: true
|
||||
capabilities:
|
||||
add: ["IPC_LOCK"]
|
||||
resources:
|
||||
limits:
|
||||
sgx.intel.com/epc: "512Ki"
|
||||
volumes:
|
||||
- name: dev-mount
|
||||
hostPath:
|
||||
path: /dev
|
||||
```
|
||||
|
||||
You'll see the enclave output:
|
||||
|
||||
```sh
|
||||
$ kubectl apply -f sgx.yaml
|
||||
$ kubectl exec -ti sgx ls /dev/sgx/
|
||||
enclave provision
|
||||
$ kubectl logs oesgx-demo-job-wh42g
|
||||
Hello world from the enclave
|
||||
Enclave called into host to print: Hello World!
|
||||
```
|
||||
|
||||
The output of the latest command shouldn't be empty, otherwise check
|
||||
your system environment to make sure SGX is fully supported.
|
||||
### Notes
|
||||
|
||||
[1]: github.com/cloud-hypervisor/cloud-hypervisor/
|
||||
* The Kata VM's SGX Encrypted Page Cache (EPC) memory size is based on the sum of `sgx.intel.com/epc`
|
||||
resource requests within the pod.
|
||||
* `init-sgx` can be removed from the YAML configuration file if the Kata rootfs is modified with the
|
||||
necessary udev rules.
|
||||
See the [note on SGX backwards compatibility](https://github.com/intel/intel-device-plugins-for-kubernetes/tree/main/cmd/sgx_plugin#backwards-compatibility-note).
|
||||
* Intel SGX DCAP attestation is known to work from Kata sandboxes but it comes with one limitation: If
|
||||
the Intel SGX `aesm` daemon runs on the bare metal node and DCAP `out-of-proc` attestation is used,
|
||||
containers within the Kata sandbox cannot get the access to the host's `/var/run/aesmd/aesm.sock`
|
||||
because socket passthrough is not supported. An alternative is to deploy the `aesm` daemon as a side-car
|
||||
container.
|
||||
* Projects like [Gramine Shielded Containers (GSC)](https://gramine-gsc.readthedocs.io/en/latest/) are
|
||||
also known to work. For GSC specifically, the Kata guest kernel needs to have the `CONFIG_NUMA=y`
|
||||
enabled and at least one CPU online when running the GSC container.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Setup to run SPDK vhost-user devices with Kata Containers and Docker*
|
||||
# Setup to run SPDK vhost-user devices with Kata Containers
|
||||
|
||||
> **Note:** This guide only applies to QEMU, since the vhost-user storage
|
||||
> device is only available for QEMU now. The enablement work on other
|
||||
@@ -222,26 +222,43 @@ minor `0` should be created for it, in order to be recognized by Kata runtime:
|
||||
$ sudo mknod /var/run/kata-containers/vhost-user/block/devices/vhostblk0 b 241 0
|
||||
```
|
||||
|
||||
> **Note:** The enablement of vhost-user block device in Kata containers
|
||||
> is supported by Kata Containers `1.11.0-alpha1` or newer.
|
||||
> Make sure you have updated your Kata containers before evaluation.
|
||||
|
||||
## Launch a Kata container with SPDK vhost-user block device
|
||||
|
||||
To use `vhost-user-blk` device, use Docker to pass a host `vhost-user-blk`
|
||||
device to the container. In docker, `--device=HOST-DIR:CONTAINER-DIR` is used
|
||||
To use `vhost-user-blk` device, use `ctr` to pass a host `vhost-user-blk`
|
||||
device to the container. In your `config.json`, you should use `devices`
|
||||
to pass a host device to the container.
|
||||
|
||||
For example:
|
||||
For example (only `vhost-user-blk` listed):
|
||||
|
||||
```json
|
||||
{
|
||||
"linux": {
|
||||
"devices": [
|
||||
{
|
||||
"path": "/dev/vda",
|
||||
"type": "b",
|
||||
"major": 241,
|
||||
"minor": 0,
|
||||
"fileMode": 420,
|
||||
"uid": 0,
|
||||
"gid": 0
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
With `rootfs` provisioned under `bundle` directory, you can run your SPDK container:
|
||||
|
||||
```bash
|
||||
$ sudo docker run --runtime kata-runtime --device=/var/run/kata-containers/vhost-user/block/devices/vhostblk0:/dev/vda -it busybox sh
|
||||
$ sudo ctr run -d --runtime io.containerd.run.kata.v2 --config bundle/config.json spdk_container
|
||||
```
|
||||
|
||||
Example of performing I/O operations on the `vhost-user-blk` device inside
|
||||
container:
|
||||
|
||||
```
|
||||
$ sudo ctr t exec --exec-id 1 -t spdk_container sh
|
||||
/ # ls -l /dev/vda
|
||||
brw-r--r-- 1 root root 254, 0 Jan 20 03:54 /dev/vda
|
||||
/ # dd if=/dev/vda of=/tmp/ddtest bs=4k count=20
|
||||
|
||||
@@ -12,7 +12,7 @@ serde_json = "1.0.39"
|
||||
# - Dynamic keys required to allow HashMap keys to be slog::Serialized.
|
||||
# - The 'max_*' features allow changing the log level at runtime
|
||||
# (by stopping the compiler from removing log calls).
|
||||
slog = { version = "2.5.2", features = ["dynamic-keys", "max_level_trace", "release_max_level_info"] }
|
||||
slog = { version = "2.5.2", features = ["dynamic-keys", "max_level_trace", "release_max_level_debug"] }
|
||||
slog-json = "2.3.0"
|
||||
slog-async = "2.3.0"
|
||||
slog-scope = "4.1.2"
|
||||
|
||||
18
pkg/logging/Makefile
Normal file
18
pkg/logging/Makefile
Normal file
@@ -0,0 +1,18 @@
|
||||
# Copyright (c) 2021 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
# It is not necessary to have a build target as this crate is built
|
||||
# automatically by the consumers of it.
|
||||
#
|
||||
# However, it is essential that the crate be tested.
|
||||
default: test
|
||||
|
||||
# It is essential to run these tests using *both* build profiles.
|
||||
# See the `test_logger_levels()` test for further information.
|
||||
test:
|
||||
@echo "INFO: testing log levels for development build"
|
||||
@cargo test
|
||||
@echo "INFO: testing log levels for release build"
|
||||
@cargo test --release
|
||||
@@ -20,6 +20,8 @@ const LOG_LEVELS: &[(&str, slog::Level)] = &[
|
||||
("critical", slog::Level::Critical),
|
||||
];
|
||||
|
||||
const DEFAULT_SUBSYSTEM: &str = "root";
|
||||
|
||||
// XXX: 'writer' param used to make testing possible.
|
||||
pub fn create_logger<W>(
|
||||
name: &str,
|
||||
@@ -50,7 +52,7 @@ where
|
||||
let logger = slog::Logger::root(
|
||||
async_drain.fuse(),
|
||||
o!("version" => env!("CARGO_PKG_VERSION"),
|
||||
"subsystem" => "root",
|
||||
"subsystem" => DEFAULT_SUBSYSTEM,
|
||||
"pid" => process::id().to_string(),
|
||||
"name" => name.to_string(),
|
||||
"source" => source.to_string()),
|
||||
@@ -216,8 +218,8 @@ where
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use serde_json::Value;
|
||||
use slog::info;
|
||||
use serde_json::{json, Value};
|
||||
use slog::{crit, debug, error, info, warn, Logger};
|
||||
use std::io::prelude::*;
|
||||
use tempfile::NamedTempFile;
|
||||
|
||||
@@ -295,15 +297,15 @@ mod tests {
|
||||
let result_level = result.unwrap();
|
||||
let expected_level = d.result.unwrap();
|
||||
|
||||
assert!(result_level == expected_level, msg);
|
||||
assert!(result_level == expected_level, "{}", msg);
|
||||
continue;
|
||||
} else {
|
||||
assert!(result.is_err(), msg);
|
||||
assert!(result.is_err(), "{}", msg);
|
||||
}
|
||||
|
||||
let expected_error = format!("{}", d.result.as_ref().unwrap_err());
|
||||
let actual_error = format!("{}", result.unwrap_err());
|
||||
assert!(actual_error == expected_error, msg);
|
||||
let expected_error = d.result.as_ref().unwrap_err();
|
||||
let actual_error = result.unwrap_err();
|
||||
assert!(&actual_error == expected_error, "{}", msg);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -350,13 +352,13 @@ mod tests {
|
||||
let msg = format!("{}, result: {:?}", msg, result);
|
||||
|
||||
if d.result.is_ok() {
|
||||
assert!(result == d.result, msg);
|
||||
assert!(result == d.result, "{}", msg);
|
||||
continue;
|
||||
}
|
||||
|
||||
let expected_error = format!("{}", d.result.as_ref().unwrap_err());
|
||||
let actual_error = format!("{}", result.unwrap_err());
|
||||
assert!(actual_error == expected_error, msg);
|
||||
let expected_error = d.result.as_ref().unwrap_err();
|
||||
let actual_error = result.unwrap_err();
|
||||
assert!(&actual_error == expected_error, "{}", msg);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -376,14 +378,17 @@ mod tests {
|
||||
let record_key = "record-key-1";
|
||||
let record_value = "record-key-2";
|
||||
|
||||
let logger = create_logger(name, source, level, writer);
|
||||
let (logger, guard) = create_logger(name, source, level, writer);
|
||||
|
||||
let msg = "foo, bar, baz";
|
||||
|
||||
// Call the logger (which calls the drain)
|
||||
info!(logger, "{}", msg; "subsystem" => record_subsystem, record_key => record_value);
|
||||
// Note: This "mid level" log level should be available in debug or
|
||||
// release builds.
|
||||
info!(&logger, "{}", msg; "subsystem" => record_subsystem, record_key => record_value);
|
||||
|
||||
// Force temp file to be flushed
|
||||
drop(guard);
|
||||
drop(logger);
|
||||
|
||||
let mut contents = String::new();
|
||||
@@ -430,4 +435,168 @@ mod tests {
|
||||
.expect("failed to find record key field");
|
||||
assert_eq!(field_record_value, record_value);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_logger_levels() {
|
||||
let name = "name";
|
||||
let source = "source";
|
||||
|
||||
let debug_msg = "a debug log level message";
|
||||
let info_msg = "an info log level message";
|
||||
let warn_msg = "a warn log level message";
|
||||
let error_msg = "an error log level message";
|
||||
let critical_msg = "a critical log level message";
|
||||
|
||||
// The slog crate will *remove* macro calls for log levels "above" the
|
||||
// configured log level.lock
|
||||
//
|
||||
// At the time of writing, the default slog log
|
||||
// level is "info", but this crate overrides that using the magic
|
||||
// "*max_level*" features in the "Cargo.toml" manifest.
|
||||
|
||||
// However, there are two log levels:
|
||||
//
|
||||
// - max_level_${level}
|
||||
//
|
||||
// This is the log level for normal "cargo build" (development/debug)
|
||||
// builds.
|
||||
//
|
||||
// - release_max_level_${level}
|
||||
//
|
||||
// This is the log level for "cargo install" and
|
||||
// "cargo build --release" (release) builds.
|
||||
//
|
||||
// This crate sets them to different values, which is sensible and
|
||||
// standard practice. However, that causes a problem: there is
|
||||
// currently no clean way for this test code to detect _which_
|
||||
// profile the test is being built for (development or release),
|
||||
// meaning we cannot know which macros are expected to produce output
|
||||
// and which aren't ;(
|
||||
//
|
||||
// The best we can do is test the following log levels which
|
||||
// are expected to work in all build profiles.
|
||||
|
||||
let debug_closure = |logger: &Logger, msg: String| debug!(logger, "{}", msg);
|
||||
let info_closure = |logger: &Logger, msg: String| info!(logger, "{}", msg);
|
||||
let warn_closure = |logger: &Logger, msg: String| warn!(logger, "{}", msg);
|
||||
let error_closure = |logger: &Logger, msg: String| error!(logger, "{}", msg);
|
||||
let critical_closure = |logger: &Logger, msg: String| crit!(logger, "{}", msg);
|
||||
|
||||
struct TestData<'a> {
|
||||
slog_level: slog::Level,
|
||||
slog_level_tag: &'a str,
|
||||
msg: String,
|
||||
closure: Box<dyn Fn(&Logger, String)>,
|
||||
}
|
||||
|
||||
let tests = &[
|
||||
TestData {
|
||||
slog_level: slog::Level::Debug,
|
||||
// Looks like a typo but tragically it isn't! ;(
|
||||
slog_level_tag: "DEBG",
|
||||
msg: debug_msg.into(),
|
||||
closure: Box::new(debug_closure),
|
||||
},
|
||||
TestData {
|
||||
slog_level: slog::Level::Info,
|
||||
slog_level_tag: "INFO",
|
||||
msg: info_msg.into(),
|
||||
closure: Box::new(info_closure),
|
||||
},
|
||||
TestData {
|
||||
slog_level: slog::Level::Warning,
|
||||
slog_level_tag: "WARN",
|
||||
msg: warn_msg.into(),
|
||||
closure: Box::new(warn_closure),
|
||||
},
|
||||
TestData {
|
||||
slog_level: slog::Level::Error,
|
||||
// Another language tragedy
|
||||
slog_level_tag: "ERRO",
|
||||
msg: error_msg.into(),
|
||||
closure: Box::new(error_closure),
|
||||
},
|
||||
TestData {
|
||||
slog_level: slog::Level::Critical,
|
||||
slog_level_tag: "CRIT",
|
||||
msg: critical_msg.into(),
|
||||
closure: Box::new(critical_closure),
|
||||
},
|
||||
];
|
||||
|
||||
for (i, d) in tests.iter().enumerate() {
|
||||
let msg = format!("test[{}]", i);
|
||||
|
||||
// Create a writer for the logger drain to use
|
||||
let writer =
|
||||
NamedTempFile::new().expect(&format!("{:}: failed to create tempfile", msg));
|
||||
|
||||
// Used to check file contents before the temp file is unlinked
|
||||
let mut writer_ref = writer
|
||||
.reopen()
|
||||
.expect(&format!("{:?}: failed to clone tempfile", msg));
|
||||
|
||||
let (logger, logger_guard) = create_logger(name, source, d.slog_level, writer);
|
||||
|
||||
// Call the logger (which calls the drain)
|
||||
(d.closure)(&logger, d.msg.to_owned());
|
||||
|
||||
// Force temp file to be flushed
|
||||
drop(logger_guard);
|
||||
drop(logger);
|
||||
|
||||
let mut contents = String::new();
|
||||
writer_ref
|
||||
.read_to_string(&mut contents)
|
||||
.expect(&format!("{:?}: failed to read tempfile contents", msg));
|
||||
|
||||
// Convert file to JSON
|
||||
let fields: Value = serde_json::from_str(&contents)
|
||||
.expect(&format!("{:?}: failed to convert logfile to json", msg));
|
||||
|
||||
// Check the expected JSON fields
|
||||
|
||||
let field_ts = fields
|
||||
.get("ts")
|
||||
.expect(&format!("{:?}: failed to find timestamp field", msg));
|
||||
assert_ne!(field_ts, "", "{}", msg);
|
||||
|
||||
let field_version = fields
|
||||
.get("version")
|
||||
.expect(&format!("{:?}: failed to find version field", msg));
|
||||
assert_eq!(field_version, env!("CARGO_PKG_VERSION"), "{}", msg);
|
||||
|
||||
let field_pid = fields
|
||||
.get("pid")
|
||||
.expect(&format!("{:?}: failed to find pid field", msg));
|
||||
assert_ne!(field_pid, "", "{}", msg);
|
||||
|
||||
let field_level = fields
|
||||
.get("level")
|
||||
.expect(&format!("{:?}: failed to find level field", msg));
|
||||
assert_eq!(field_level, d.slog_level_tag, "{}", msg);
|
||||
|
||||
let field_msg = fields
|
||||
.get("msg")
|
||||
.expect(&format!("{:?}: failed to find msg field", msg));
|
||||
assert_eq!(field_msg, &json!(d.msg), "{}", msg);
|
||||
|
||||
let field_name = fields
|
||||
.get("name")
|
||||
.expect(&format!("{:?}: failed to find name field", msg));
|
||||
assert_eq!(field_name, name, "{}", msg);
|
||||
|
||||
let field_source = fields
|
||||
.get("source")
|
||||
.expect(&format!("{:?}: failed to find source field", msg));
|
||||
assert_eq!(field_source, source, "{}", msg);
|
||||
|
||||
let field_subsystem = fields
|
||||
.get("subsystem")
|
||||
.expect(&format!("{:?}: failed to find subsystem field", msg));
|
||||
|
||||
// No explicit subsystem, so should be the default
|
||||
assert_eq!(field_subsystem, &json!(DEFAULT_SUBSYSTEM), "{}", msg);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -59,7 +59,7 @@ parts:
|
||||
|
||||
yq_version=3.4.1
|
||||
yq_url="https://${yq_pkg}/releases/download/${yq_version}/yq_${goos}_${goarch}"
|
||||
curl -o "${yq_path}" -LSsf "${yq_url}"
|
||||
curl -o "${yq_path}" -L "${yq_url}"
|
||||
chmod +x "${yq_path}"
|
||||
|
||||
kata_dir=gopath/src/github.com/${SNAPCRAFT_PROJECT_NAME}/${SNAPCRAFT_PROJECT_NAME}
|
||||
@@ -139,7 +139,7 @@ parts:
|
||||
cp kata-containers*.img ${kata_image_dir}
|
||||
|
||||
runtime:
|
||||
after: [godeps, image]
|
||||
after: [godeps, image, cloud-hypervisor]
|
||||
plugin: nil
|
||||
build-attributes: [no-patchelf]
|
||||
override-build: |
|
||||
@@ -185,6 +185,7 @@ parts:
|
||||
- flex
|
||||
override-build: |
|
||||
yq=${SNAPCRAFT_STAGE}/yq
|
||||
export PATH="${PATH}:${SNAPCRAFT_STAGE}"
|
||||
export GOPATH=${SNAPCRAFT_STAGE}/gopath
|
||||
kata_dir=${GOPATH}/src/github.com/${SNAPCRAFT_PROJECT_NAME}/${SNAPCRAFT_PROJECT_NAME}
|
||||
versions_file="${kata_dir}/versions.yaml"
|
||||
@@ -199,10 +200,17 @@ parts:
|
||||
kata_dir=${GOPATH}/src/github.com/${SNAPCRAFT_PROJECT_NAME}/${SNAPCRAFT_PROJECT_NAME}
|
||||
|
||||
cd ${kata_dir}/tools/packaging/kernel
|
||||
kernel_dir_prefix="kata-linux-"
|
||||
|
||||
# Setup and build kernel
|
||||
./build-kernel.sh -v ${kernel_version} -d setup
|
||||
kernel_dir_prefix="kata-linux-"
|
||||
if [ "$(uname -m)" = "x86_64" ]; then
|
||||
kernel_version="$(${yq} r $versions_file assets.kernel-experimental.tag)"
|
||||
kernel_version=${kernel_version#v}
|
||||
kernel_dir_prefix="kata-linux-experimental-"
|
||||
./build-kernel.sh -e -v ${kernel_version} -d setup
|
||||
else
|
||||
./build-kernel.sh -v ${kernel_version} -d setup
|
||||
fi
|
||||
cd ${kernel_dir_prefix}*
|
||||
make -j $(($(nproc)-1)) EXTRAVERSION=".container"
|
||||
|
||||
@@ -299,13 +307,13 @@ parts:
|
||||
| xargs ./configure
|
||||
|
||||
# Copy QEMU configurations (Kconfigs)
|
||||
case "$(branch)" in
|
||||
case "${branch}" in
|
||||
"v5.1.0")
|
||||
cp -a ${kata_dir}/tools/packaging/qemu/default-configs/* default-configs
|
||||
;;
|
||||
|
||||
*)
|
||||
cp -a ${kata_dir}/tools/packaging/qemu/default-configs/* default-configs/devices/
|
||||
cp -a ${kata_dir}/tools/packaging/qemu/default-configs/* configs/devices/
|
||||
;;
|
||||
esac
|
||||
|
||||
@@ -327,6 +335,22 @@ parts:
|
||||
# Hack: move qemu to /
|
||||
"snap/kata-containers/current/": "./"
|
||||
|
||||
cloud-hypervisor:
|
||||
plugin: nil
|
||||
after: [godeps]
|
||||
override-build: |
|
||||
export GOPATH=${SNAPCRAFT_STAGE}/gopath
|
||||
yq=${SNAPCRAFT_STAGE}/yq
|
||||
kata_dir=${GOPATH}/src/github.com/${SNAPCRAFT_PROJECT_NAME}/${SNAPCRAFT_PROJECT_NAME}
|
||||
versions_file="${kata_dir}/versions.yaml"
|
||||
version="$(${yq} r ${versions_file} assets.hypervisor.cloud_hypervisor.version)"
|
||||
url="https://github.com/cloud-hypervisor/cloud-hypervisor/releases/download/${version}"
|
||||
curl -L ${url}/cloud-hypervisor-static -o cloud-hypervisor
|
||||
curl -LO ${url}/clh-remote
|
||||
|
||||
install -D cloud-hypervisor ${SNAPCRAFT_PART_INSTALL}/usr/bin/cloud-hypervisor
|
||||
install -D clh-remote ${SNAPCRAFT_PART_INSTALL}/usr/bin/clh-remote
|
||||
|
||||
apps:
|
||||
runtime:
|
||||
command: usr/bin/kata-runtime
|
||||
|
||||
99
src/agent/Cargo.lock
generated
99
src/agent/Cargo.lock
generated
@@ -4,9 +4,9 @@ version = 3
|
||||
|
||||
[[package]]
|
||||
name = "addr2line"
|
||||
version = "0.15.1"
|
||||
version = "0.16.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "03345e98af8f3d786b6d9f656ccfa6ac316d954e92bc4841f0bba20789d5fb5a"
|
||||
checksum = "3e61f2b7f93d2c7d2b08263acaa4a363b3e276806c68af6134c44f523bf1aacd"
|
||||
dependencies = [
|
||||
"gimli",
|
||||
]
|
||||
@@ -83,9 +83,9 @@ checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a"
|
||||
|
||||
[[package]]
|
||||
name = "backtrace"
|
||||
version = "0.3.59"
|
||||
version = "0.3.61"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4717cfcbfaa661a0fd48f8453951837ae7e8f81e481fbb136e3202d72805a744"
|
||||
checksum = "e7a905d892734eea339e896738c14b9afce22b5318f64b951e70bf3844419b01"
|
||||
dependencies = [
|
||||
"addr2line",
|
||||
"cc",
|
||||
@@ -414,15 +414,15 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "gimli"
|
||||
version = "0.24.0"
|
||||
version = "0.25.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0e4075386626662786ddb0ec9081e7c7eeb1ba31951f447ca780ef9f5d568189"
|
||||
checksum = "f0a01e0497841a3b2db4f8afa483cce65f7e96a3498bd6c541734792aeac8fe7"
|
||||
|
||||
[[package]]
|
||||
name = "heck"
|
||||
version = "0.3.2"
|
||||
version = "0.3.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "87cbf45460356b7deeb5e3415b5563308c0a9b057c85e12b06ad551f98d0a6ac"
|
||||
checksum = "6d621efb26863f0e9924c6ac577e8275e5e6b77455db64ffa6c65c904e9e132c"
|
||||
dependencies = [
|
||||
"unicode-segmentation",
|
||||
]
|
||||
@@ -544,7 +544,9 @@ dependencies = [
|
||||
"rustjail",
|
||||
"scan_fmt",
|
||||
"scopeguard",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"serial_test",
|
||||
"slog",
|
||||
"slog-scope",
|
||||
"slog-stdlog",
|
||||
@@ -552,6 +554,7 @@ dependencies = [
|
||||
"thiserror",
|
||||
"tokio",
|
||||
"tokio-vsock",
|
||||
"toml",
|
||||
"tracing",
|
||||
"tracing-opentelemetry",
|
||||
"tracing-subscriber",
|
||||
@@ -591,6 +594,24 @@ dependencies = [
|
||||
"rle-decode-fast",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "libseccomp"
|
||||
version = "0.1.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "36ad71a5b66ceef3acfe6a3178b29b4da063f8bcb2c36dab666d52a7a9cfdb86"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"libseccomp-sys",
|
||||
"nix 0.17.0",
|
||||
"pkg-config",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "libseccomp-sys"
|
||||
version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "539912de229a4fc16e507e8df12a394038a524a5b5b6c92045ad344472aac475"
|
||||
|
||||
[[package]]
|
||||
name = "lock_api"
|
||||
version = "0.4.4"
|
||||
@@ -760,6 +781,19 @@ dependencies = [
|
||||
"void",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "nix"
|
||||
version = "0.17.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "50e4785f2c3b7589a0d0c1dd60285e1188adac4006e8abd6dd578e1567027363"
|
||||
dependencies = [
|
||||
"bitflags",
|
||||
"cc",
|
||||
"cfg-if 0.1.10",
|
||||
"libc",
|
||||
"void",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "nix"
|
||||
version = "0.19.1"
|
||||
@@ -837,9 +871,12 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "object"
|
||||
version = "0.24.0"
|
||||
version = "0.26.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1a5b3dd1c072ee7963717671d1ca129f1048fda25edea6b752bfc71ac8854170"
|
||||
checksum = "c55827317fb4c08822499848a14237d2874d6f139828893017237e7ab93eb386"
|
||||
dependencies = [
|
||||
"memchr",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "oci"
|
||||
@@ -974,6 +1011,12 @@ version = "0.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
|
||||
|
||||
[[package]]
|
||||
name = "pkg-config"
|
||||
version = "0.3.19"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c"
|
||||
|
||||
[[package]]
|
||||
name = "ppv-lite86"
|
||||
version = "0.2.10"
|
||||
@@ -1098,6 +1141,10 @@ name = "protobuf"
|
||||
version = "2.14.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8e86d370532557ae7573551a1ec8235a0f8d6cb276c7c9e6aa490b511c447485"
|
||||
dependencies = [
|
||||
"serde",
|
||||
"serde_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "protobuf-codegen"
|
||||
@@ -1124,6 +1171,8 @@ version = "0.1.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"protobuf",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"ttrpc",
|
||||
"ttrpc-codegen",
|
||||
]
|
||||
@@ -1264,9 +1313,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "rustc-demangle"
|
||||
version = "0.1.19"
|
||||
version = "0.1.21"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "410f7acf3cb3a44527c5d9546bad4bf4e6c460915d5f9f2fc524498bfe8f70ce"
|
||||
checksum = "7ef03e0a2b150c7a90d01faf6254c9c48a41e95fb2a8c2ac1c6f0d2b9aefc342"
|
||||
|
||||
[[package]]
|
||||
name = "rustjail"
|
||||
@@ -1281,6 +1330,7 @@ dependencies = [
|
||||
"inotify",
|
||||
"lazy_static",
|
||||
"libc",
|
||||
"libseccomp",
|
||||
"nix 0.21.0",
|
||||
"oci",
|
||||
"path-absolutize",
|
||||
@@ -1323,18 +1373,18 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
|
||||
|
||||
[[package]]
|
||||
name = "serde"
|
||||
version = "1.0.126"
|
||||
version = "1.0.130"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ec7505abeacaec74ae4778d9d9328fe5a5d04253220a85c4ee022239fc996d03"
|
||||
checksum = "f12d06de37cf59146fbdecab66aa99f9fe4f78722e3607577a5375d66bd0c913"
|
||||
dependencies = [
|
||||
"serde_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_derive"
|
||||
version = "1.0.126"
|
||||
version = "1.0.130"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "963a7dbc9895aeac7ac90e74f34a5d5261828f79df35cbed41e10189d3804d43"
|
||||
checksum = "d7bc1a1ab1961464eae040d96713baa5a724a8152c1222492465b54322ec508b"
|
||||
dependencies = [
|
||||
"proc-macro2 1.0.26",
|
||||
"quote 1.0.9",
|
||||
@@ -1343,9 +1393,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "serde_json"
|
||||
version = "1.0.64"
|
||||
version = "1.0.68"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "799e97dc9fdae36a5c8b8f2cae9ce2ee9fdce2058c57a93e6099d919fd982f79"
|
||||
checksum = "0f690853975602e1bfe1ccbf50504d67174e3bcf340f23b5ea9992e0587a52d8"
|
||||
dependencies = [
|
||||
"itoa",
|
||||
"ryu",
|
||||
@@ -1618,6 +1668,15 @@ dependencies = [
|
||||
"vsock",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "toml"
|
||||
version = "0.5.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa"
|
||||
dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tracing"
|
||||
version = "0.1.26"
|
||||
@@ -1754,9 +1813,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "unicode-segmentation"
|
||||
version = "1.7.1"
|
||||
version = "1.8.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bb0d2e7be6ae3a5fa87eed5fb451aff96f2573d2694942e40543ae0bbe19c796"
|
||||
checksum = "8895849a949e7845e06bd6dc1aa51731a103c42707010a5b591c0038fb73385b"
|
||||
|
||||
[[package]]
|
||||
name = "unicode-xid"
|
||||
|
||||
@@ -6,7 +6,6 @@ edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
oci = { path = "oci" }
|
||||
logging = { path = "../../pkg/logging" }
|
||||
rustjail = { path = "rustjail" }
|
||||
protocols = { path = "protocols" }
|
||||
lazy_static = "1.3.0"
|
||||
@@ -20,6 +19,7 @@ scan_fmt = "0.2.3"
|
||||
scopeguard = "1.0.0"
|
||||
thiserror = "1.0.26"
|
||||
regex = "1"
|
||||
serial_test = "0.5.1"
|
||||
|
||||
# Async helpers
|
||||
async-trait = "0.1.42"
|
||||
@@ -35,11 +35,10 @@ rtnetlink = "0.8.0"
|
||||
netlink-packet-utils = "0.4.1"
|
||||
ipnetwork = "0.17.0"
|
||||
|
||||
# slog:
|
||||
# - Dynamic keys required to allow HashMap keys to be slog::Serialized.
|
||||
# - The 'max_*' features allow changing the log level at runtime
|
||||
# (by stopping the compiler from removing log calls).
|
||||
slog = { version = "2.5.2", features = ["dynamic-keys", "max_level_trace", "release_max_level_info"] }
|
||||
# Note: this crate sets the slog 'max_*' features which allows the log level
|
||||
# to be modified at runtime.
|
||||
logging = { path = "../../pkg/logging" }
|
||||
slog = "2.5.2"
|
||||
slog-scope = "4.1.2"
|
||||
|
||||
# Redirect ttrpc log calls
|
||||
@@ -58,6 +57,10 @@ tracing-opentelemetry = "0.13.0"
|
||||
opentelemetry = { version = "0.14.0", features = ["rt-tokio-current-thread"]}
|
||||
vsock-exporter = { path = "vsock-exporter" }
|
||||
|
||||
# Configuration
|
||||
serde = { version = "1.0.129", features = ["derive"] }
|
||||
toml = "0.5.8"
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = "3.1.0"
|
||||
|
||||
@@ -70,3 +73,6 @@ members = [
|
||||
|
||||
[profile.release]
|
||||
lto = true
|
||||
|
||||
[features]
|
||||
seccomp = ["rustjail/seccomp"]
|
||||
|
||||
@@ -1,202 +0,0 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
@@ -27,6 +27,20 @@ COMMIT_MSG = $(if $(COMMIT),$(COMMIT),unknown)
|
||||
# Exported to allow cargo to see it
|
||||
export VERSION_COMMIT := $(if $(COMMIT),$(VERSION)-$(COMMIT),$(VERSION))
|
||||
|
||||
EXTRA_RUSTFEATURES :=
|
||||
|
||||
##VAR SECCOMP=yes|no define if agent enables seccomp feature
|
||||
SECCOMP := yes
|
||||
|
||||
# Enable seccomp feature of rust build
|
||||
ifeq ($(SECCOMP),yes)
|
||||
override EXTRA_RUSTFEATURES += seccomp
|
||||
endif
|
||||
|
||||
ifneq ($(EXTRA_RUSTFEATURES),)
|
||||
override EXTRA_RUSTFEATURES := --features $(EXTRA_RUSTFEATURES)
|
||||
endif
|
||||
|
||||
include ../../utils.mk
|
||||
|
||||
TARGET_PATH = target/$(TRIPLE)/$(BUILD_TYPE)/$(TARGET)
|
||||
@@ -87,18 +101,20 @@ endef
|
||||
##TARGET default: build code
|
||||
default: $(TARGET) show-header
|
||||
|
||||
$(TARGET): $(GENERATED_CODE) $(TARGET_PATH)
|
||||
$(TARGET): $(GENERATED_CODE) logging-crate-tests $(TARGET_PATH)
|
||||
|
||||
logging-crate-tests:
|
||||
make -C $(CWD)/../../pkg/logging
|
||||
|
||||
$(TARGET_PATH): $(SOURCES) | show-summary
|
||||
@RUSTFLAGS="$(EXTRA_RUSTFLAGS) --deny warnings" cargo build --target $(TRIPLE) --$(BUILD_TYPE)
|
||||
@RUSTFLAGS="$(EXTRA_RUSTFLAGS) --deny warnings" cargo build --target $(TRIPLE) --$(BUILD_TYPE) $(EXTRA_RUSTFEATURES)
|
||||
|
||||
$(GENERATED_FILES): %: %.in
|
||||
@sed $(foreach r,$(GENERATED_REPLACEMENTS),-e 's|@$r@|$($r)|g') "$<" > "$@"
|
||||
|
||||
##TARGET optimize: optimized build
|
||||
optimize: $(SOURCES) | show-summary show-header
|
||||
@RUSTFLAGS="-C link-arg=-s $(EXTRA_RUSTFLAGS) --deny-warnings" cargo build --target $(TRIPLE) --$(BUILD_TYPE)
|
||||
|
||||
@RUSTFLAGS="-C link-arg=-s $(EXTRA_RUSTFLAGS) --deny warnings" cargo build --target $(TRIPLE) --$(BUILD_TYPE) $(EXTRA_RUSTFEATURES)
|
||||
|
||||
##TARGET clippy: run clippy linter
|
||||
clippy: $(GENERATED_CODE)
|
||||
@@ -127,7 +143,7 @@ vendor:
|
||||
|
||||
#TARGET test: run cargo tests
|
||||
test:
|
||||
@cargo test --all --target $(TRIPLE)
|
||||
@cargo test --all --target $(TRIPLE) $(EXTRA_RUSTFEATURES) -- --nocapture
|
||||
|
||||
##TARGET check: run test
|
||||
check: clippy format
|
||||
@@ -192,9 +208,10 @@ codecov-html: check_tarpaulin
|
||||
|
||||
.PHONY: \
|
||||
help \
|
||||
logging-crate-tests \
|
||||
optimize \
|
||||
show-header \
|
||||
show-summary \
|
||||
optimize \
|
||||
vendor
|
||||
|
||||
##TARGET generate-protocols: generate/update grpc agent protocols
|
||||
|
||||
@@ -1,47 +1,38 @@
|
||||
# Kata Agent in Rust
|
||||
# Kata Agent
|
||||
|
||||
This is a rust version of the [`kata-agent`](https://github.com/kata-containers/agent).
|
||||
## Overview
|
||||
|
||||
In Denver PTG, [we discussed about re-writing agent in rust](https://etherpad.openstack.org/p/katacontainers-2019-ptg-denver-agenda):
|
||||
The Kata agent is a long running process that runs inside the Virtual Machine
|
||||
(VM) (also known as the "pod" or "sandbox").
|
||||
|
||||
> In general, we all think about re-write agent in rust to reduce the footprint of agent. Moreover, Eric mentioned the possibility to stop using gRPC, which may have some impact on footprint. We may begin to do some POC to show how much we could save by re-writing agent in rust.
|
||||
The agent is packaged inside the Kata Containers
|
||||
[guest image](../../docs/design/architecture.md#guest-image)
|
||||
which is used to boot the VM. Once the runtime has launched the configured
|
||||
[hypervisor](../../docs/hypervisors.md) to create a new VM, the agent is
|
||||
started. From this point on, the agent is responsible for creating and
|
||||
managing the life cycle of the containers inside the VM.
|
||||
|
||||
After that, we drafted the initial code here, and any contributions are welcome.
|
||||
For further details, see the
|
||||
[architecture document](../../docs/design/architecture.md).
|
||||
|
||||
## Features
|
||||
## Audience
|
||||
|
||||
| Feature | Status |
|
||||
| :--|:--:|
|
||||
| **OCI Behaviors** |
|
||||
| create/start containers | :white_check_mark: |
|
||||
| signal/wait process | :white_check_mark: |
|
||||
| exec/list process | :white_check_mark: |
|
||||
| I/O stream | :white_check_mark: |
|
||||
| Cgroups | :white_check_mark: |
|
||||
| Capabilities, `rlimit`, readonly path, masked path, users | :white_check_mark: |
|
||||
| container stats (`stats_container`) | :white_check_mark: |
|
||||
| Hooks | :white_check_mark: |
|
||||
| **Agent Features & APIs** |
|
||||
| run agent as `init` (mount fs, udev, setup `lo`) | :white_check_mark: |
|
||||
| block device as root device | :white_check_mark: |
|
||||
| Health API | :white_check_mark: |
|
||||
| network, interface/routes (`update_container`) | :white_check_mark: |
|
||||
| File transfer API (`copy_file`) | :white_check_mark: |
|
||||
| Device APIs (`reseed_random_device`, , `online_cpu_memory`, `mem_hotplug_probe`, `set_guet_data_time`) | :white_check_mark: |
|
||||
| VSOCK support | :white_check_mark: |
|
||||
| virtio-serial support | :heavy_multiplication_x: |
|
||||
| OCI Spec validator | :white_check_mark: |
|
||||
| **Infrastructures**|
|
||||
| Debug Console | :white_check_mark: |
|
||||
| Command line | :white_check_mark: |
|
||||
| Tracing | :heavy_multiplication_x: |
|
||||
If you simply wish to use Kata Containers, it is not necessary to understand
|
||||
the details of how the agent operates. Please see the
|
||||
[installation documentation](../../docs/install) for details of how deploy
|
||||
Kata Containers (which will include the Kata agent).
|
||||
|
||||
## Getting Started
|
||||
The remainder of this document is only useful for developers and testers.
|
||||
|
||||
### Build from Source
|
||||
The rust-agent needs to be built statically and linked with `musl`
|
||||
## Build from Source
|
||||
|
||||
> **Note:** skip this step for ppc64le, the build scripts explicitly use gnu for ppc64le.
|
||||
Since the agent is written in the Rust language this section assumes the tool
|
||||
chain has been installed using standard Rust `rustup` tool.
|
||||
|
||||
### Build with musl
|
||||
|
||||
If you wish to build the agent with the `musl` C library, you need to run the
|
||||
following commands:
|
||||
|
||||
```bash
|
||||
$ arch=$(uname -m)
|
||||
@@ -49,12 +40,15 @@ $ rustup target add "${arch}-unknown-linux-musl"
|
||||
$ sudo ln -s /usr/bin/g++ /bin/musl-g++
|
||||
```
|
||||
|
||||
ppc64le-only: Manually install `protoc`, e.g.
|
||||
```bash
|
||||
$ sudo dnf install protobuf-compiler
|
||||
```
|
||||
> **Note:**
|
||||
>
|
||||
> It is not currently possible to build using `musl` on ppc64le and s390x
|
||||
> since both platforms lack the `musl` target.
|
||||
|
||||
### Build the agent binary
|
||||
|
||||
The following steps download the Kata Containers source files and build the agent:
|
||||
|
||||
Download the source files in the Kata containers repository and build the agent:
|
||||
```bash
|
||||
$ GOPATH="${GOPATH:-$HOME/go}"
|
||||
$ dir="$GOPATH/src/github.com/kata-containers"
|
||||
@@ -62,17 +56,56 @@ $ git -C ${dir} clone --depth 1 https://github.com/kata-containers/kata-containe
|
||||
$ make -C ${dir}/kata-containers/src/agent
|
||||
```
|
||||
|
||||
## Run Kata CI with rust-agent
|
||||
* Firstly, install Kata as noted by ["how to install Kata"](../../docs/install/README.md)
|
||||
* Secondly, build your own Kata initrd/image following the steps in ["how to build your own initrd/image"](../../docs/Developer-Guide.md#create-and-install-rootfs-and-initrd-image).
|
||||
notes: Please use your rust agent instead of the go agent when building your initrd/image.
|
||||
* Clone the Kata CI test cases from: https://github.com/kata-containers/tests.git, and then run the CRI test with:
|
||||
## Change the agent API
|
||||
|
||||
The Kata runtime communicates with the Kata agent using a ttRPC based API protocol.
|
||||
|
||||
This ttRPC API is defined by a set of [protocol buffers files](protocols/protos).
|
||||
The protocol files are used to generate the bindings for the following components:
|
||||
|
||||
| Component | Language | Generation method | Tooling required |
|
||||
|-|-|-|-|
|
||||
| runtime | Golang | Run, `make generate-protocols` | `protoc` |
|
||||
| agent | Rust | Run, `make` | |
|
||||
|
||||
If you wish to change the API, these files must be regenerated. Although the
|
||||
rust code will be automatically generated by the
|
||||
[build script](protocols/build.rs),
|
||||
the Golang code generation requires the external `protoc` command to be
|
||||
available in `$PATH`.
|
||||
|
||||
To install the `protoc` command on a Fedora/CentOS/RHEL system:
|
||||
|
||||
```bash
|
||||
$sudo -E PATH=$PATH -E GOPATH=$GOPATH integration/containerd/shimv2/shimv2-tests.sh
|
||||
$ sudo dnf install -y protobuf-compiler
|
||||
```
|
||||
|
||||
## Mini Benchmark
|
||||
The memory of `RssAnon` consumed by the go-agent and rust-agent as below:
|
||||
go-agent: about 11M
|
||||
rust-agent: about 1.1M
|
||||
## Custom guest image and kernel assets
|
||||
|
||||
If you wish to develop or test changes to the agent, you will need to create a
|
||||
custom guest image using the [osbuilder tool](../../tools/osbuilder). You
|
||||
may also wish to create a custom [guest kernel](../../tools/packaging/kernel).
|
||||
|
||||
Once created, [configure](../runtime/README.md#configuration) Kata Containers to use
|
||||
these custom assets to allow you to test your changes.
|
||||
|
||||
> **Note:**
|
||||
>
|
||||
> To simplify development and testing, you may wish to run the agent
|
||||
> [stand alone](#run-the-agent-stand-alone) initially.
|
||||
|
||||
## Tracing
|
||||
|
||||
For details of tracing the operation of the agent, see the
|
||||
[tracing documentation](../../docs/tracing.md).
|
||||
|
||||
## Run the agent stand alone
|
||||
|
||||
Although the agent is designed to run in a VM environment, for development and
|
||||
testing purposes it is possible to run it as a normal application.
|
||||
|
||||
When run in this way, the agent can be controlled using the low-level Kata
|
||||
agent control tool, rather than the Kata runtime.
|
||||
|
||||
For further details, see the
|
||||
[agent control tool documentation](../../tools/agent-ctl/README.md#run-the-tool-and-the-agent-in-the-same-environment).
|
||||
|
||||
@@ -4,10 +4,16 @@ version = "0.1.0"
|
||||
authors = ["The Kata Containers community <kata-dev@lists.katacontainers.io>"]
|
||||
edition = "2018"
|
||||
|
||||
[features]
|
||||
default = []
|
||||
with-serde = [ "serde", "serde_json" ]
|
||||
|
||||
[dependencies]
|
||||
ttrpc = { version = "0.5.0", features = ["async"] }
|
||||
async-trait = "0.1.42"
|
||||
protobuf = "=2.14.0"
|
||||
protobuf = { version = "=2.14.0", features = ["with-serde"] }
|
||||
serde = { version = "1.0.130", features = ["derive"], optional = true }
|
||||
serde_json = { version = "1.0.68", optional = true }
|
||||
|
||||
[build-dependencies]
|
||||
ttrpc-codegen = "0.2.0"
|
||||
|
||||
@@ -3,29 +3,148 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use std::fs;
|
||||
use ttrpc_codegen::{Codegen, Customize};
|
||||
use std::fs::File;
|
||||
use std::io::{BufRead, BufReader, Read, Write};
|
||||
use std::path::Path;
|
||||
use std::process::exit;
|
||||
use ttrpc_codegen::{Codegen, Customize, ProtobufCustomize};
|
||||
|
||||
fn replace_text_in_file(file_name: &str, from: &str, to: &str) -> Result<(), std::io::Error> {
|
||||
let mut src = File::open(file_name)?;
|
||||
let mut contents = String::new();
|
||||
src.read_to_string(&mut contents).unwrap();
|
||||
drop(src);
|
||||
|
||||
let new_contents = contents.replace(from, to);
|
||||
|
||||
let mut dst = File::create(&file_name)?;
|
||||
dst.write_all(new_contents.as_bytes())?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn use_serde(protos: &[&str], out_dir: &Path) -> Result<(), std::io::Error> {
|
||||
protos
|
||||
.iter()
|
||||
.try_for_each(|f: &&str| -> Result<(), std::io::Error> {
|
||||
let out_file = Path::new(f)
|
||||
.file_name()
|
||||
.and_then(|s| s.to_str())
|
||||
.ok_or(format!("failed to get proto file name for {:?}", f))
|
||||
.map(|s| {
|
||||
let t = s.replace(".proto", ".rs");
|
||||
out_dir.join(t)
|
||||
})
|
||||
.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))?
|
||||
.to_str()
|
||||
.ok_or(format!("cannot convert {:?} path to string", f))
|
||||
.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))?
|
||||
.to_string();
|
||||
|
||||
replace_text_in_file(
|
||||
&out_file,
|
||||
"derive(Serialize, Deserialize)",
|
||||
"derive(serde::Serialize, serde::Deserialize)",
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
fn handle_file(autogen_comment: &str, rust_filename: &str) -> Result<(), std::io::Error> {
|
||||
let mut new_contents = Vec::new();
|
||||
|
||||
let file = File::open(rust_filename)?;
|
||||
|
||||
let reader = BufReader::new(file);
|
||||
|
||||
// Guard the code since it is only needed for the agent-ctl tool,
|
||||
// not the agent itself.
|
||||
let serde_default_code = r#"#[cfg_attr(feature = "with-serde", serde(default))]"#;
|
||||
|
||||
for line in reader.lines() {
|
||||
let line = line?;
|
||||
|
||||
new_contents.push(line.clone());
|
||||
|
||||
let pattern = "//! Generated file from";
|
||||
|
||||
if line.starts_with(&pattern) {
|
||||
new_contents.push(autogen_comment.into());
|
||||
}
|
||||
|
||||
let struct_pattern = "pub struct ";
|
||||
|
||||
// Although we've requested serde support via `Customize`, to
|
||||
// allow the `kata-agent-ctl` tool to partially deserialise structures
|
||||
// specified in JSON, we need this bit of additional magic.
|
||||
if line.starts_with(&struct_pattern) {
|
||||
new_contents.insert(new_contents.len() - 1, serde_default_code.trim().into());
|
||||
}
|
||||
}
|
||||
|
||||
let data = new_contents.join("\n");
|
||||
|
||||
let mut dst = File::create(&rust_filename)?;
|
||||
|
||||
dst.write_all(data.as_bytes())?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn real_main() -> Result<(), std::io::Error> {
|
||||
let autogen_comment = format!("\n//! Generated by {:?} ({:?})", file!(), module_path!());
|
||||
|
||||
fn main() {
|
||||
let protos = vec![
|
||||
"protos/types.proto",
|
||||
"protos/agent.proto",
|
||||
"protos/health.proto",
|
||||
"protos/google/protobuf/empty.proto",
|
||||
"protos/health.proto",
|
||||
"protos/oci.proto",
|
||||
"protos/types.proto",
|
||||
];
|
||||
|
||||
// Tell Cargo that if the .proto files changed, to rerun this build script.
|
||||
protos
|
||||
.iter()
|
||||
.for_each(|p| println!("cargo:rerun-if-changed={}", &p));
|
||||
|
||||
let ttrpc_options = Customize {
|
||||
async_server: true,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let protobuf_options = ProtobufCustomize {
|
||||
serde_derive: Some(true),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let out_dir = Path::new("src");
|
||||
|
||||
Codegen::new()
|
||||
.out_dir("src")
|
||||
.out_dir(out_dir)
|
||||
.inputs(&protos)
|
||||
.include("protos")
|
||||
.customize(ttrpc_options)
|
||||
.rust_protobuf()
|
||||
.customize(Customize {
|
||||
async_server: true,
|
||||
..Default::default()
|
||||
})
|
||||
.run()
|
||||
.expect("Gen codes failed.");
|
||||
.rust_protobuf_customize(protobuf_options)
|
||||
.run()?;
|
||||
|
||||
for file in protos.iter() {
|
||||
let proto_filename = Path::new(file).file_name().unwrap();
|
||||
|
||||
let generated_file = proto_filename
|
||||
.to_str()
|
||||
.ok_or("failed")
|
||||
.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))?
|
||||
.replace(".proto", ".rs");
|
||||
|
||||
let out_file = out_dir.join(generated_file);
|
||||
|
||||
let out_file_str = out_file
|
||||
.to_str()
|
||||
.ok_or("failed")
|
||||
.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))?;
|
||||
|
||||
handle_file(&autogen_comment, out_file_str)?;
|
||||
}
|
||||
|
||||
// There is a message named 'Box' in oci.proto
|
||||
// so there is a struct named 'Box', we should replace Box<Self> to ::std::boxed::Box<Self>
|
||||
@@ -34,11 +153,16 @@ fn main() {
|
||||
"src/oci.rs",
|
||||
"self: Box<Self>",
|
||||
"self: ::std::boxed::Box<Self>",
|
||||
)
|
||||
.unwrap();
|
||||
)?;
|
||||
|
||||
use_serde(&protos, out_dir)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn replace_text_in_file(file_name: &str, from: &str, to: &str) -> Result<(), std::io::Error> {
|
||||
let new_contents = fs::read_to_string(file_name)?.replace(from, to);
|
||||
fs::write(&file_name, new_contents.as_bytes())
|
||||
fn main() {
|
||||
if let Err(e) = real_main() {
|
||||
eprintln!("ERROR: {}", e);
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -52,8 +52,6 @@ service AgentService {
|
||||
rpc AddARPNeighbors(AddARPNeighborsRequest) returns (google.protobuf.Empty);
|
||||
|
||||
// observability
|
||||
rpc StartTracing(StartTracingRequest) returns (google.protobuf.Empty);
|
||||
rpc StopTracing(StopTracingRequest) returns (google.protobuf.Empty);
|
||||
rpc GetMetrics(GetMetricsRequest) returns (Metrics);
|
||||
|
||||
// misc (TODO: some rpcs can be replaced by hyperstart-exec)
|
||||
@@ -492,12 +490,6 @@ message CopyFileRequest {
|
||||
bytes data = 8;
|
||||
}
|
||||
|
||||
message StartTracingRequest {
|
||||
}
|
||||
|
||||
message StopTracingRequest {
|
||||
}
|
||||
|
||||
message GetOOMEventRequest {}
|
||||
|
||||
message OOMEvent {
|
||||
|
||||
@@ -46,6 +46,7 @@ message Route {
|
||||
string device = 3;
|
||||
string source = 4;
|
||||
uint32 scope = 5;
|
||||
IPFamily family = 6;
|
||||
}
|
||||
|
||||
message ARPNeighbor {
|
||||
|
||||
@@ -30,7 +30,11 @@ tokio = { version = "1.2.0", features = ["sync", "io-util", "process", "time", "
|
||||
futures = "0.3"
|
||||
async-trait = "0.1.31"
|
||||
inotify = "0.9.2"
|
||||
libseccomp = { version = "0.1.3", optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
serial_test = "0.5.0"
|
||||
tempfile = "3.1.0"
|
||||
|
||||
[features]
|
||||
seccomp = ["libseccomp"]
|
||||
|
||||
@@ -25,6 +25,8 @@ use crate::cgroups::mock::Manager as FsManager;
|
||||
use crate::cgroups::Manager;
|
||||
use crate::log_child;
|
||||
use crate::process::Process;
|
||||
#[cfg(feature = "seccomp")]
|
||||
use crate::seccomp;
|
||||
use crate::specconv::CreateOpts;
|
||||
use crate::{mount, validator};
|
||||
|
||||
@@ -151,7 +153,7 @@ lazy_static! {
|
||||
},
|
||||
LinuxDevice {
|
||||
path: "/dev/full".to_string(),
|
||||
r#type: String::from("c"),
|
||||
r#type: "c".to_string(),
|
||||
major: 1,
|
||||
minor: 7,
|
||||
file_mode: Some(0o666),
|
||||
@@ -593,11 +595,22 @@ fn do_init_child(cwfd: RawFd) -> Result<()> {
|
||||
})?;
|
||||
}
|
||||
|
||||
// NoNewPeiviledges, Drop capabilities
|
||||
// NoNewPrivileges
|
||||
if oci_process.no_new_privileges {
|
||||
capctl::prctl::set_no_new_privs().map_err(|_| anyhow!("cannot set no new privileges"))?;
|
||||
}
|
||||
|
||||
// Without NoNewPrivileges, we need to set seccomp
|
||||
// before dropping capabilities because the calling thread
|
||||
// must have the CAP_SYS_ADMIN.
|
||||
#[cfg(feature = "seccomp")]
|
||||
if !oci_process.no_new_privileges {
|
||||
if let Some(ref scmp) = linux.seccomp {
|
||||
seccomp::init_seccomp(scmp)?;
|
||||
}
|
||||
}
|
||||
|
||||
// Drop capabilities
|
||||
if oci_process.capabilities.is_some() {
|
||||
let c = oci_process.capabilities.as_ref().unwrap();
|
||||
capabilities::drop_privileges(cfd_log, c)?;
|
||||
@@ -623,11 +636,10 @@ fn do_init_child(cwfd: RawFd) -> Result<()> {
|
||||
|
||||
// setup the envs
|
||||
for e in env.iter() {
|
||||
let v: Vec<&str> = e.splitn(2, '=').collect();
|
||||
if v.len() != 2 {
|
||||
continue;
|
||||
match valid_env(e) {
|
||||
Some((key, value)) => env::set_var(key, value),
|
||||
None => log_child!(cfd_log, "invalid env key-value: {:?}", e),
|
||||
}
|
||||
env::set_var(v[0], v[1]);
|
||||
}
|
||||
|
||||
// set the "HOME" env getting from "/etc/passwd", if
|
||||
@@ -641,7 +653,7 @@ fn do_init_child(cwfd: RawFd) -> Result<()> {
|
||||
let exec_file = Path::new(&args[0]);
|
||||
log_child!(cfd_log, "process command: {:?}", &args);
|
||||
if !exec_file.exists() {
|
||||
find_file(exec_file).ok_or_else(|| anyhow!("the file {} is not exist", &args[0]))?;
|
||||
find_file(exec_file).ok_or_else(|| anyhow!("the file {} was not found", &args[0]))?;
|
||||
}
|
||||
|
||||
// notify parent that the child's ready to start
|
||||
@@ -651,8 +663,8 @@ fn do_init_child(cwfd: RawFd) -> Result<()> {
|
||||
let _ = unistd::close(crfd);
|
||||
let _ = unistd::close(cwfd);
|
||||
|
||||
unistd::setsid().context("create a new session")?;
|
||||
if oci_process.terminal {
|
||||
unistd::setsid()?;
|
||||
unsafe {
|
||||
libc::ioctl(0, libc::TIOCSCTTY);
|
||||
}
|
||||
@@ -669,6 +681,16 @@ fn do_init_child(cwfd: RawFd) -> Result<()> {
|
||||
unistd::read(fd, &mut buf)?;
|
||||
}
|
||||
|
||||
// With NoNewPrivileges, we should set seccomp as close to
|
||||
// do_exec as possible in order to reduce the amount of
|
||||
// system calls in the seccomp profiles.
|
||||
#[cfg(feature = "seccomp")]
|
||||
if oci_process.no_new_privileges {
|
||||
if let Some(ref scmp) = linux.seccomp {
|
||||
seccomp::init_seccomp(scmp)?;
|
||||
}
|
||||
}
|
||||
|
||||
do_exec(&args);
|
||||
}
|
||||
|
||||
@@ -833,6 +855,20 @@ impl BaseContainer for LinuxContainer {
|
||||
}
|
||||
let linux = spec.linux.as_ref().unwrap();
|
||||
|
||||
if p.oci.capabilities.is_none() {
|
||||
// No capabilities, inherit from container process
|
||||
let process = spec
|
||||
.process
|
||||
.as_ref()
|
||||
.ok_or_else(|| anyhow!("no process config"))?;
|
||||
p.oci.capabilities = Some(
|
||||
process
|
||||
.capabilities
|
||||
.clone()
|
||||
.ok_or_else(|| anyhow!("missing process capabilities"))?,
|
||||
);
|
||||
}
|
||||
|
||||
let (pfd_log, cfd_log) = unistd::pipe().context("failed to create pipe")?;
|
||||
|
||||
let _ = fcntl::fcntl(pfd_log, FcntlArg::F_SETFD(FdFlag::FD_CLOEXEC))
|
||||
@@ -958,8 +994,6 @@ impl BaseContainer for LinuxContainer {
|
||||
|
||||
info!(logger, "entered namespaces!");
|
||||
|
||||
self.created = SystemTime::now();
|
||||
|
||||
if p.init {
|
||||
let spec = self.config.spec.as_mut().unwrap();
|
||||
update_namespaces(&self.logger, spec, p.pid)?;
|
||||
@@ -1528,6 +1562,30 @@ async fn execute_hook(logger: &Logger, h: &Hook, st: &OCIState) -> Result<()> {
|
||||
}
|
||||
}
|
||||
|
||||
// valid environment variables according to https://doc.rust-lang.org/std/env/fn.set_var.html#panics
|
||||
fn valid_env(e: &str) -> Option<(&str, &str)> {
|
||||
// wherther key or value will contain NULL char.
|
||||
if e.as_bytes().contains(&b'\0') {
|
||||
return None;
|
||||
}
|
||||
|
||||
let v: Vec<&str> = e.splitn(2, '=').collect();
|
||||
|
||||
// key can't hold an `equal` sign, but value can
|
||||
if v.len() != 2 {
|
||||
return None;
|
||||
}
|
||||
|
||||
let (key, value) = (v[0].trim(), v[1].trim());
|
||||
|
||||
// key can't be empty
|
||||
if key.is_empty() {
|
||||
return None;
|
||||
}
|
||||
|
||||
Some((key, value))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
@@ -1951,4 +2009,49 @@ mod tests {
|
||||
let ret = do_init_child(std::io::stdin().as_raw_fd());
|
||||
assert!(ret.is_err(), "Expecting Err, Got {:?}", ret);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_valid_env() {
|
||||
let env = valid_env("a=b=c");
|
||||
assert_eq!(Some(("a", "b=c")), env);
|
||||
|
||||
let env = valid_env("a=b");
|
||||
assert_eq!(Some(("a", "b")), env);
|
||||
let env = valid_env("a =b");
|
||||
assert_eq!(Some(("a", "b")), env);
|
||||
|
||||
let env = valid_env(" a =b");
|
||||
assert_eq!(Some(("a", "b")), env);
|
||||
|
||||
let env = valid_env("a= b");
|
||||
assert_eq!(Some(("a", "b")), env);
|
||||
|
||||
let env = valid_env("a=b ");
|
||||
assert_eq!(Some(("a", "b")), env);
|
||||
let env = valid_env("a=b c ");
|
||||
assert_eq!(Some(("a", "b c")), env);
|
||||
|
||||
let env = valid_env("=b");
|
||||
assert_eq!(None, env);
|
||||
|
||||
let env = valid_env("a=");
|
||||
assert_eq!(Some(("a", "")), env);
|
||||
|
||||
let env = valid_env("a==");
|
||||
assert_eq!(Some(("a", "=")), env);
|
||||
|
||||
let env = valid_env("a");
|
||||
assert_eq!(None, env);
|
||||
|
||||
let invalid_str = vec![97, b'\0', 98];
|
||||
let invalid_string = std::str::from_utf8(&invalid_str).unwrap();
|
||||
|
||||
let invalid_env = format!("{}=value", invalid_string);
|
||||
let env = valid_env(&invalid_env);
|
||||
assert_eq!(None, env);
|
||||
|
||||
let invalid_env = format!("key={}", invalid_string);
|
||||
let env = valid_env(&invalid_env);
|
||||
assert_eq!(None, env);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -34,6 +34,8 @@ pub mod container;
|
||||
pub mod mount;
|
||||
pub mod pipestream;
|
||||
pub mod process;
|
||||
#[cfg(feature = "seccomp")]
|
||||
pub mod seccomp;
|
||||
pub mod specconv;
|
||||
pub mod sync;
|
||||
pub mod sync_with_async;
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use anyhow::{anyhow, bail, Context, Result};
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use libc::uid_t;
|
||||
use nix::errno::Errno;
|
||||
use nix::fcntl::{self, OFlag};
|
||||
@@ -19,7 +19,7 @@ use std::fs::{self, OpenOptions};
|
||||
use std::mem::MaybeUninit;
|
||||
use std::os::unix;
|
||||
use std::os::unix::io::RawFd;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::path::{Component, Path, PathBuf};
|
||||
|
||||
use path_absolutize::*;
|
||||
use std::fs::File;
|
||||
@@ -112,6 +112,7 @@ lazy_static! {
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
#[cfg(not(test))]
|
||||
pub fn mount<
|
||||
P1: ?Sized + NixPath,
|
||||
P2: ?Sized + NixPath,
|
||||
@@ -124,21 +125,42 @@ pub fn mount<
|
||||
flags: MsFlags,
|
||||
data: Option<&P4>,
|
||||
) -> std::result::Result<(), nix::Error> {
|
||||
#[cfg(not(test))]
|
||||
return mount::mount(source, target, fstype, flags, data);
|
||||
#[cfg(test)]
|
||||
return Ok(());
|
||||
mount::mount(source, target, fstype, flags, data)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
#[cfg(test)]
|
||||
pub fn mount<
|
||||
P1: ?Sized + NixPath,
|
||||
P2: ?Sized + NixPath,
|
||||
P3: ?Sized + NixPath,
|
||||
P4: ?Sized + NixPath,
|
||||
>(
|
||||
_source: Option<&P1>,
|
||||
_target: &P2,
|
||||
_fstype: Option<&P3>,
|
||||
_flags: MsFlags,
|
||||
_data: Option<&P4>,
|
||||
) -> std::result::Result<(), nix::Error> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
#[cfg(not(test))]
|
||||
pub fn umount2<P: ?Sized + NixPath>(
|
||||
target: &P,
|
||||
flags: MntFlags,
|
||||
) -> std::result::Result<(), nix::Error> {
|
||||
#[cfg(not(test))]
|
||||
return mount::umount2(target, flags);
|
||||
#[cfg(test)]
|
||||
return Ok(());
|
||||
mount::umount2(target, flags)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
#[cfg(test)]
|
||||
pub fn umount2<P: ?Sized + NixPath>(
|
||||
_target: &P,
|
||||
_flags: MntFlags,
|
||||
) -> std::result::Result<(), nix::Error> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn init_rootfs(
|
||||
@@ -450,14 +472,20 @@ fn mount_cgroups(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(not(test))]
|
||||
fn pivot_root<P1: ?Sized + NixPath, P2: ?Sized + NixPath>(
|
||||
new_root: &P1,
|
||||
put_old: &P2,
|
||||
) -> anyhow::Result<(), nix::Error> {
|
||||
#[cfg(not(test))]
|
||||
return unistd::pivot_root(new_root, put_old);
|
||||
#[cfg(test)]
|
||||
return Ok(());
|
||||
unistd::pivot_root(new_root, put_old)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
fn pivot_root<P1: ?Sized + NixPath, P2: ?Sized + NixPath>(
|
||||
_new_root: &P1,
|
||||
_put_old: &P2,
|
||||
) -> anyhow::Result<(), nix::Error> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn pivot_rootfs<P: ?Sized + NixPath + std::fmt::Debug>(path: &P) -> Result<()> {
|
||||
@@ -582,11 +610,15 @@ fn parse_mount_table() -> Result<Vec<Info>> {
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
#[cfg(not(test))]
|
||||
fn chroot<P: ?Sized + NixPath>(path: &P) -> Result<(), nix::Error> {
|
||||
#[cfg(not(test))]
|
||||
return unistd::chroot(path);
|
||||
#[cfg(test)]
|
||||
return Ok(());
|
||||
unistd::chroot(path)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
#[cfg(test)]
|
||||
fn chroot<P: ?Sized + NixPath>(_path: &P) -> Result<(), nix::Error> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn ms_move_root(rootfs: &str) -> Result<bool> {
|
||||
@@ -828,18 +860,35 @@ fn default_symlinks() -> Result<()> {
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn dev_rel_path(path: &str) -> Option<&Path> {
|
||||
let path = Path::new(path);
|
||||
|
||||
if !path.starts_with("/dev")
|
||||
|| path == Path::new("/dev")
|
||||
|| path.components().any(|c| c == Component::ParentDir)
|
||||
{
|
||||
return None;
|
||||
}
|
||||
path.strip_prefix("/").ok()
|
||||
}
|
||||
|
||||
fn create_devices(devices: &[LinuxDevice], bind: bool) -> Result<()> {
|
||||
let op: fn(&LinuxDevice) -> Result<()> = if bind { bind_dev } else { mknod_dev };
|
||||
let op: fn(&LinuxDevice, &Path) -> Result<()> = if bind { bind_dev } else { mknod_dev };
|
||||
let old = stat::umask(Mode::from_bits_truncate(0o000));
|
||||
for dev in DEFAULT_DEVICES.iter() {
|
||||
op(dev)?;
|
||||
let path = Path::new(&dev.path[1..]);
|
||||
op(dev, path).context(format!("Creating container device {:?}", dev))?;
|
||||
}
|
||||
for dev in devices {
|
||||
if !dev.path.starts_with("/dev") || dev.path.contains("..") {
|
||||
let path = dev_rel_path(&dev.path).ok_or_else(|| {
|
||||
let msg = format!("{} is not a valid device path", dev.path);
|
||||
bail!(anyhow!(msg));
|
||||
anyhow!(msg)
|
||||
})?;
|
||||
if let Some(dir) = path.parent() {
|
||||
fs::create_dir_all(dir).context(format!("Creating container device {:?}", dev))?;
|
||||
}
|
||||
op(dev)?;
|
||||
op(dev, path).context(format!("Creating container device {:?}", dev))?;
|
||||
}
|
||||
stat::umask(old);
|
||||
Ok(())
|
||||
@@ -861,21 +910,21 @@ lazy_static! {
|
||||
};
|
||||
}
|
||||
|
||||
fn mknod_dev(dev: &LinuxDevice) -> Result<()> {
|
||||
fn mknod_dev(dev: &LinuxDevice, relpath: &Path) -> Result<()> {
|
||||
let f = match LINUXDEVICETYPE.get(dev.r#type.as_str()) {
|
||||
Some(v) => v,
|
||||
None => return Err(anyhow!("invalid spec".to_string())),
|
||||
};
|
||||
|
||||
stat::mknod(
|
||||
&dev.path[1..],
|
||||
relpath,
|
||||
*f,
|
||||
Mode::from_bits_truncate(dev.file_mode.unwrap_or(0)),
|
||||
nix::sys::stat::makedev(dev.major as u64, dev.minor as u64),
|
||||
)?;
|
||||
|
||||
unistd::chown(
|
||||
&dev.path[1..],
|
||||
relpath,
|
||||
Some(Uid::from_raw(dev.uid.unwrap_or(0) as uid_t)),
|
||||
Some(Gid::from_raw(dev.gid.unwrap_or(0) as uid_t)),
|
||||
)?;
|
||||
@@ -883,9 +932,9 @@ fn mknod_dev(dev: &LinuxDevice) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn bind_dev(dev: &LinuxDevice) -> Result<()> {
|
||||
fn bind_dev(dev: &LinuxDevice, relpath: &Path) -> Result<()> {
|
||||
let fd = fcntl::open(
|
||||
&dev.path[1..],
|
||||
relpath,
|
||||
OFlag::O_RDWR | OFlag::O_CREAT,
|
||||
Mode::from_bits_truncate(0o644),
|
||||
)?;
|
||||
@@ -894,7 +943,7 @@ fn bind_dev(dev: &LinuxDevice) -> Result<()> {
|
||||
|
||||
mount(
|
||||
Some(&*dev.path),
|
||||
&dev.path[1..],
|
||||
relpath,
|
||||
None::<&str>,
|
||||
MsFlags::MS_BIND,
|
||||
None::<&str>,
|
||||
@@ -1258,11 +1307,12 @@ mod tests {
|
||||
uid: Some(unistd::getuid().as_raw()),
|
||||
gid: Some(unistd::getgid().as_raw()),
|
||||
};
|
||||
let path = Path::new("fifo");
|
||||
|
||||
let ret = mknod_dev(&dev);
|
||||
let ret = mknod_dev(&dev, path);
|
||||
assert!(ret.is_ok(), "Should pass. Got: {:?}", ret);
|
||||
|
||||
let ret = stat::stat("fifo");
|
||||
let ret = stat::stat(path);
|
||||
assert!(ret.is_ok(), "Should pass. Got: {:?}", ret);
|
||||
}
|
||||
#[test]
|
||||
@@ -1379,4 +1429,26 @@ mod tests {
|
||||
assert!(result == t.result, "{}", msg);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_dev_rel_path() {
|
||||
// Valid device paths
|
||||
assert_eq!(dev_rel_path("/dev/sda").unwrap(), Path::new("dev/sda"));
|
||||
assert_eq!(dev_rel_path("//dev/sda").unwrap(), Path::new("dev/sda"));
|
||||
assert_eq!(
|
||||
dev_rel_path("/dev/vfio/99").unwrap(),
|
||||
Path::new("dev/vfio/99")
|
||||
);
|
||||
assert_eq!(dev_rel_path("/dev/...").unwrap(), Path::new("dev/..."));
|
||||
assert_eq!(dev_rel_path("/dev/a..b").unwrap(), Path::new("dev/a..b"));
|
||||
assert_eq!(dev_rel_path("/dev//foo").unwrap(), Path::new("dev/foo"));
|
||||
|
||||
// Bad device paths
|
||||
assert!(dev_rel_path("/devfoo").is_none());
|
||||
assert!(dev_rel_path("/etc/passwd").is_none());
|
||||
assert!(dev_rel_path("/dev/../etc/passwd").is_none());
|
||||
assert!(dev_rel_path("dev/foo").is_none());
|
||||
assert!(dev_rel_path("").is_none());
|
||||
assert!(dev_rel_path("/dev").is_none());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -24,6 +24,16 @@ use tokio::io::{split, ReadHalf, WriteHalf};
|
||||
use tokio::sync::Mutex;
|
||||
use tokio::sync::Notify;
|
||||
|
||||
macro_rules! close_process_stream {
|
||||
($self: ident, $stream:ident, $stream_type: ident) => {
|
||||
if $self.$stream.is_some() {
|
||||
$self.close_stream(StreamType::$stream_type);
|
||||
let _ = unistd::close($self.$stream.unwrap());
|
||||
$self.$stream = None;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Hash, Clone)]
|
||||
pub enum StreamType {
|
||||
Stdin,
|
||||
@@ -147,6 +157,22 @@ impl Process {
|
||||
notify.notify_one();
|
||||
}
|
||||
|
||||
pub fn close_stdin(&mut self) {
|
||||
close_process_stream!(self, term_master, TermMaster);
|
||||
close_process_stream!(self, parent_stdin, ParentStdin);
|
||||
|
||||
self.notify_term_close();
|
||||
}
|
||||
|
||||
pub fn cleanup_process_stream(&mut self) {
|
||||
close_process_stream!(self, parent_stdin, ParentStdin);
|
||||
close_process_stream!(self, parent_stdout, ParentStdout);
|
||||
close_process_stream!(self, parent_stderr, ParentStderr);
|
||||
close_process_stream!(self, term_master, TermMaster);
|
||||
|
||||
self.notify_term_close();
|
||||
}
|
||||
|
||||
fn get_fd(&self, stream_type: &StreamType) -> Option<RawFd> {
|
||||
match stream_type {
|
||||
StreamType::Stdin => self.stdin,
|
||||
|
||||
237
src/agent/rustjail/src/seccomp.rs
Normal file
237
src/agent/rustjail/src/seccomp.rs
Normal file
@@ -0,0 +1,237 @@
|
||||
// Copyright 2021 Sony Group Corporation
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use anyhow::{anyhow, Result};
|
||||
use libseccomp::*;
|
||||
use oci::{LinuxSeccomp, LinuxSeccompArg};
|
||||
use std::str::FromStr;
|
||||
|
||||
fn get_filter_attr_from_flag(flag: &str) -> Result<ScmpFilterAttr> {
|
||||
match flag {
|
||||
"SECCOMP_FILTER_FLAG_TSYNC" => Ok(ScmpFilterAttr::CtlTsync),
|
||||
"SECCOMP_FILTER_FLAG_LOG" => Ok(ScmpFilterAttr::CtlLog),
|
||||
"SECCOMP_FILTER_FLAG_SPEC_ALLOW" => Ok(ScmpFilterAttr::CtlSsb),
|
||||
_ => Err(anyhow!("Invalid seccomp flag")),
|
||||
}
|
||||
}
|
||||
|
||||
// get_rule_conditions gets rule conditions for a system call from the args.
|
||||
fn get_rule_conditions(args: &[LinuxSeccompArg]) -> Result<Vec<ScmpArgCompare>> {
|
||||
let mut conditions: Vec<ScmpArgCompare> = Vec::new();
|
||||
|
||||
for arg in args {
|
||||
if arg.op.is_empty() {
|
||||
return Err(anyhow!("seccomp opreator is required"));
|
||||
}
|
||||
|
||||
let cond = ScmpArgCompare::new(
|
||||
arg.index,
|
||||
ScmpCompareOp::from_str(&arg.op)?,
|
||||
arg.value,
|
||||
Some(arg.value_two),
|
||||
);
|
||||
|
||||
conditions.push(cond);
|
||||
}
|
||||
|
||||
Ok(conditions)
|
||||
}
|
||||
|
||||
// init_seccomp creates a seccomp filter and loads it for the current process
|
||||
// including all the child processes.
|
||||
pub fn init_seccomp(scmp: &LinuxSeccomp) -> Result<()> {
|
||||
let def_action = ScmpAction::from_str(scmp.default_action.as_str(), Some(libc::EPERM as u32))?;
|
||||
|
||||
// Create a new filter context
|
||||
let mut filter = ScmpFilterContext::new_filter(def_action)?;
|
||||
|
||||
// Add extra architectures
|
||||
for arch in &scmp.architectures {
|
||||
let scmp_arch = ScmpArch::from_str(arch)?;
|
||||
filter.add_arch(scmp_arch)?;
|
||||
}
|
||||
|
||||
// Unset no new privileges bit
|
||||
filter.set_no_new_privs_bit(false)?;
|
||||
|
||||
// Add a rule for each system call
|
||||
for syscall in &scmp.syscalls {
|
||||
if syscall.names.is_empty() {
|
||||
return Err(anyhow!("syscall name is required"));
|
||||
}
|
||||
|
||||
let action = ScmpAction::from_str(&syscall.action, Some(syscall.errno_ret))?;
|
||||
if action == def_action {
|
||||
continue;
|
||||
}
|
||||
|
||||
for name in &syscall.names {
|
||||
let syscall_num = get_syscall_from_name(name, None)?;
|
||||
|
||||
if syscall.args.is_empty() {
|
||||
filter.add_rule(action, syscall_num, None)?;
|
||||
} else {
|
||||
let conditions = get_rule_conditions(&syscall.args)?;
|
||||
filter.add_rule(action, syscall_num, Some(&conditions))?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Set filter attributes for each seccomp flag
|
||||
for flag in &scmp.flags {
|
||||
let scmp_attr = get_filter_attr_from_flag(flag)?;
|
||||
filter.set_filter_attr(scmp_attr, 1)?;
|
||||
}
|
||||
|
||||
// Load the filter
|
||||
filter.load()?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::skip_if_not_root;
|
||||
use libc::{dup3, process_vm_readv, EPERM, O_CLOEXEC};
|
||||
use std::io::Error;
|
||||
use std::ptr::null;
|
||||
|
||||
macro_rules! syscall_assert {
|
||||
($e1: expr, $e2: expr) => {
|
||||
let mut errno: i32 = 0;
|
||||
if $e1 < 0 {
|
||||
errno = -Error::last_os_error().raw_os_error().unwrap();
|
||||
}
|
||||
assert_eq!(errno, $e2);
|
||||
};
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_filter_attr_from_flag() {
|
||||
skip_if_not_root!();
|
||||
|
||||
assert_eq!(
|
||||
get_filter_attr_from_flag("SECCOMP_FILTER_FLAG_TSYNC").unwrap(),
|
||||
ScmpFilterAttr::CtlTsync
|
||||
);
|
||||
|
||||
assert_eq!(get_filter_attr_from_flag("ERROR").is_err(), true);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_init_seccomp() {
|
||||
skip_if_not_root!();
|
||||
|
||||
let data = r#"{
|
||||
"defaultAction": "SCMP_ACT_ALLOW",
|
||||
"architectures": [
|
||||
],
|
||||
"flags": [
|
||||
"SECCOMP_FILTER_FLAG_LOG"
|
||||
],
|
||||
"syscalls": [
|
||||
{
|
||||
"names": [
|
||||
"dup3"
|
||||
],
|
||||
"action": "SCMP_ACT_ERRNO"
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"process_vm_readv"
|
||||
],
|
||||
"action": "SCMP_ACT_ERRNO",
|
||||
"errnoRet": 111,
|
||||
"args": [
|
||||
{
|
||||
"index": 0,
|
||||
"value": 10,
|
||||
"op": "SCMP_CMP_EQ"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"process_vm_readv"
|
||||
],
|
||||
"action": "SCMP_ACT_ERRNO",
|
||||
"errnoRet": 111,
|
||||
"args": [
|
||||
{
|
||||
"index": 0,
|
||||
"value": 20,
|
||||
"op": "SCMP_CMP_EQ"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"process_vm_readv"
|
||||
],
|
||||
"action": "SCMP_ACT_ERRNO",
|
||||
"errnoRet": 222,
|
||||
"args": [
|
||||
{
|
||||
"index": 0,
|
||||
"value": 30,
|
||||
"op": "SCMP_CMP_EQ"
|
||||
},
|
||||
{
|
||||
"index": 2,
|
||||
"value": 40,
|
||||
"op": "SCMP_CMP_EQ"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}"#;
|
||||
|
||||
let mut scmp: oci::LinuxSeccomp = serde_json::from_str(data).unwrap();
|
||||
let mut arch: Vec<oci::Arch>;
|
||||
|
||||
if cfg!(target_endian = "little") {
|
||||
// For little-endian architectures
|
||||
arch = vec![
|
||||
"SCMP_ARCH_X86".to_string(),
|
||||
"SCMP_ARCH_X32".to_string(),
|
||||
"SCMP_ARCH_X86_64".to_string(),
|
||||
"SCMP_ARCH_AARCH64".to_string(),
|
||||
"SCMP_ARCH_ARM".to_string(),
|
||||
"SCMP_ARCH_PPC64LE".to_string(),
|
||||
];
|
||||
} else {
|
||||
// For big-endian architectures
|
||||
arch = vec!["SCMP_ARCH_S390X".to_string()];
|
||||
}
|
||||
|
||||
scmp.architectures.append(&mut arch);
|
||||
|
||||
init_seccomp(&scmp).unwrap();
|
||||
|
||||
// Basic syscall with simple rule
|
||||
syscall_assert!(unsafe { dup3(0, 1, O_CLOEXEC) }, -EPERM);
|
||||
|
||||
// Syscall with permitted arguments
|
||||
syscall_assert!(unsafe { process_vm_readv(1, null(), 0, null(), 0, 0) }, 0);
|
||||
|
||||
// Multiple arguments with OR rules with ERRNO
|
||||
syscall_assert!(
|
||||
unsafe { process_vm_readv(10, null(), 0, null(), 0, 0) },
|
||||
-111
|
||||
);
|
||||
syscall_assert!(
|
||||
unsafe { process_vm_readv(20, null(), 0, null(), 0, 0) },
|
||||
-111
|
||||
);
|
||||
|
||||
// Multiple arguments with AND rules with ERRNO
|
||||
syscall_assert!(unsafe { process_vm_readv(30, null(), 0, null(), 0, 0) }, 0);
|
||||
syscall_assert!(
|
||||
unsafe { process_vm_readv(30, null(), 40, null(), 0, 0) },
|
||||
-222
|
||||
);
|
||||
}
|
||||
}
|
||||
39
src/agent/samples/configuration-all-endpoints.toml
Normal file
39
src/agent/samples/configuration-all-endpoints.toml
Normal file
@@ -0,0 +1,39 @@
|
||||
# This is an agent configuration file example.
|
||||
dev_mode = true
|
||||
server_addr = 'vsock://8:2048'
|
||||
|
||||
[endpoints]
|
||||
# All endpoints are allowed
|
||||
allowed = [
|
||||
"AddARPNeighborsRequest",
|
||||
"AddSwapRequest",
|
||||
"CloseStdinRequest",
|
||||
"CopyFileRequest",
|
||||
"CreateContainerRequest",
|
||||
"CreateSandboxRequest",
|
||||
"DestroySandboxRequest",
|
||||
"ExecProcessRequest",
|
||||
"GetMetricsRequest",
|
||||
"GetOOMEventRequest",
|
||||
"GuestDetailsRequest",
|
||||
"ListInterfacesRequest",
|
||||
"ListRoutesRequest",
|
||||
"MemHotplugByProbeRequest",
|
||||
"OnlineCPUMemRequest",
|
||||
"PauseContainerRequest",
|
||||
"PullImageRequest",
|
||||
"ReadStreamRequest",
|
||||
"RemoveContainerRequest",
|
||||
"ReseedRandomDevRequest",
|
||||
"ResumeContainerRequest",
|
||||
"SetGuestDateTimeRequest",
|
||||
"SignalProcessRequest",
|
||||
"StartContainerRequest",
|
||||
"StatsContainerRequest",
|
||||
"TtyWinResizeRequest",
|
||||
"UpdateContainerRequest",
|
||||
"UpdateInterfaceRequest",
|
||||
"UpdateRoutesRequest",
|
||||
"WaitProcessRequest",
|
||||
"WriteStreamRequest"
|
||||
]
|
||||
@@ -2,10 +2,13 @@
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
use crate::tracer;
|
||||
use crate::rpc;
|
||||
use anyhow::{bail, ensure, Context, Result};
|
||||
use serde::Deserialize;
|
||||
use std::collections::HashSet;
|
||||
use std::env;
|
||||
use std::fs;
|
||||
use std::str::FromStr;
|
||||
use std::time;
|
||||
use tracing::instrument;
|
||||
|
||||
@@ -19,6 +22,7 @@ const DEBUG_CONSOLE_VPORT_OPTION: &str = "agent.debug_console_vport";
|
||||
const LOG_VPORT_OPTION: &str = "agent.log_vport";
|
||||
const CONTAINER_PIPE_SIZE_OPTION: &str = "agent.container_pipe_size";
|
||||
const UNIFIED_CGROUP_HIERARCHY_OPTION: &str = "agent.unified_cgroup_hierarchy";
|
||||
const CONFIG_FILE: &str = "agent.config_file";
|
||||
|
||||
const DEFAULT_LOG_LEVEL: slog::Level = slog::Level::Info;
|
||||
const DEFAULT_HOTPLUG_TIMEOUT: time::Duration = time::Duration::from_secs(3);
|
||||
@@ -29,7 +33,7 @@ const VSOCK_PORT: u16 = 1024;
|
||||
// Environment variables used for development and testing
|
||||
const SERVER_ADDR_ENV_VAR: &str = "KATA_AGENT_SERVER_ADDR";
|
||||
const LOG_LEVEL_ENV_VAR: &str = "KATA_AGENT_LOG_LEVEL";
|
||||
const TRACE_TYPE_ENV_VAR: &str = "KATA_AGENT_TRACE_TYPE";
|
||||
const TRACING_ENV_VAR: &str = "KATA_AGENT_TRACING";
|
||||
|
||||
const ERR_INVALID_LOG_LEVEL: &str = "invalid log level";
|
||||
const ERR_INVALID_LOG_LEVEL_PARAM: &str = "invalid log level parameter";
|
||||
@@ -47,6 +51,17 @@ const ERR_INVALID_CONTAINER_PIPE_SIZE_PARAM: &str = "unable to parse container p
|
||||
const ERR_INVALID_CONTAINER_PIPE_SIZE_KEY: &str = "invalid container pipe size key name";
|
||||
const ERR_INVALID_CONTAINER_PIPE_NEGATIVE: &str = "container pipe size should not be negative";
|
||||
|
||||
#[derive(Debug, Default, Deserialize)]
|
||||
pub struct EndpointsConfig {
|
||||
pub allowed: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct AgentEndpoints {
|
||||
pub allowed: HashSet<String>,
|
||||
pub all_allowed: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct AgentConfig {
|
||||
pub debug_console: bool,
|
||||
@@ -58,7 +73,38 @@ pub struct AgentConfig {
|
||||
pub container_pipe_size: i32,
|
||||
pub server_addr: String,
|
||||
pub unified_cgroup_hierarchy: bool,
|
||||
pub tracing: tracer::TraceType,
|
||||
pub tracing: bool,
|
||||
pub endpoints: AgentEndpoints,
|
||||
pub supports_seccomp: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct AgentConfigBuilder {
|
||||
pub debug_console: Option<bool>,
|
||||
pub dev_mode: Option<bool>,
|
||||
pub log_level: Option<String>,
|
||||
pub hotplug_timeout: Option<time::Duration>,
|
||||
pub debug_console_vport: Option<i32>,
|
||||
pub log_vport: Option<i32>,
|
||||
pub container_pipe_size: Option<i32>,
|
||||
pub server_addr: Option<String>,
|
||||
pub unified_cgroup_hierarchy: Option<bool>,
|
||||
pub tracing: Option<bool>,
|
||||
pub endpoints: Option<EndpointsConfig>,
|
||||
}
|
||||
|
||||
macro_rules! config_override {
|
||||
($builder:ident, $config:ident, $field:ident) => {
|
||||
if let Some(v) = $builder.$field {
|
||||
$config.$field = v;
|
||||
}
|
||||
};
|
||||
|
||||
($builder:ident, $config:ident, $field:ident, $func: ident) => {
|
||||
if let Some(v) = $builder.$field {
|
||||
$config.$field = $func(&v)?;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// parse_cmdline_param parse commandline parameters.
|
||||
@@ -91,8 +137,8 @@ macro_rules! parse_cmdline_param {
|
||||
};
|
||||
}
|
||||
|
||||
impl AgentConfig {
|
||||
pub fn new() -> AgentConfig {
|
||||
impl Default for AgentConfig {
|
||||
fn default() -> Self {
|
||||
AgentConfig {
|
||||
debug_console: false,
|
||||
dev_mode: false,
|
||||
@@ -103,34 +149,84 @@ impl AgentConfig {
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
server_addr: format!("{}:{}", VSOCK_ADDR, VSOCK_PORT),
|
||||
unified_cgroup_hierarchy: false,
|
||||
tracing: tracer::TraceType::Disabled,
|
||||
tracing: false,
|
||||
endpoints: Default::default(),
|
||||
supports_seccomp: rpc::have_seccomp(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for AgentConfig {
|
||||
type Err = anyhow::Error;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
let agent_config_builder: AgentConfigBuilder =
|
||||
toml::from_str(s).map_err(anyhow::Error::new)?;
|
||||
let mut agent_config: AgentConfig = Default::default();
|
||||
|
||||
// Overwrite default values with the configuration files ones.
|
||||
config_override!(agent_config_builder, agent_config, debug_console);
|
||||
config_override!(agent_config_builder, agent_config, dev_mode);
|
||||
config_override!(
|
||||
agent_config_builder,
|
||||
agent_config,
|
||||
log_level,
|
||||
logrus_to_slog_level
|
||||
);
|
||||
config_override!(agent_config_builder, agent_config, hotplug_timeout);
|
||||
config_override!(agent_config_builder, agent_config, debug_console_vport);
|
||||
config_override!(agent_config_builder, agent_config, log_vport);
|
||||
config_override!(agent_config_builder, agent_config, container_pipe_size);
|
||||
config_override!(agent_config_builder, agent_config, server_addr);
|
||||
config_override!(agent_config_builder, agent_config, unified_cgroup_hierarchy);
|
||||
config_override!(agent_config_builder, agent_config, tracing);
|
||||
|
||||
// Populate the allowed endpoints hash set, if we got any from the config file.
|
||||
if let Some(endpoints) = agent_config_builder.endpoints {
|
||||
for ep in endpoints.allowed {
|
||||
agent_config.endpoints.allowed.insert(ep);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(agent_config)
|
||||
}
|
||||
}
|
||||
|
||||
impl AgentConfig {
|
||||
#[instrument]
|
||||
pub fn parse_cmdline(&mut self, file: &str) -> Result<()> {
|
||||
pub fn from_cmdline(file: &str) -> Result<AgentConfig> {
|
||||
let mut config: AgentConfig = Default::default();
|
||||
let cmdline = fs::read_to_string(file)?;
|
||||
let params: Vec<&str> = cmdline.split_ascii_whitespace().collect();
|
||||
for param in params.iter() {
|
||||
// If we get a configuration file path from the command line, we
|
||||
// generate our config from it.
|
||||
// The agent will fail to start if the configuration file is not present,
|
||||
// or if it can't be parsed properly.
|
||||
if param.starts_with(format!("{}=", CONFIG_FILE).as_str()) {
|
||||
let config_file = get_string_value(param)?;
|
||||
return AgentConfig::from_config_file(&config_file);
|
||||
}
|
||||
|
||||
// parse cmdline flags
|
||||
parse_cmdline_param!(param, DEBUG_CONSOLE_FLAG, self.debug_console);
|
||||
parse_cmdline_param!(param, DEV_MODE_FLAG, self.dev_mode);
|
||||
parse_cmdline_param!(param, DEBUG_CONSOLE_FLAG, config.debug_console);
|
||||
parse_cmdline_param!(param, DEV_MODE_FLAG, config.dev_mode);
|
||||
|
||||
// Support "bare" tracing option for backwards compatibility with
|
||||
// Kata 1.x.
|
||||
if param == &TRACE_MODE_OPTION {
|
||||
self.tracing = tracer::TraceType::Isolated;
|
||||
config.tracing = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
parse_cmdline_param!(param, TRACE_MODE_OPTION, self.tracing, get_trace_type);
|
||||
parse_cmdline_param!(param, TRACE_MODE_OPTION, config.tracing, get_bool_value);
|
||||
|
||||
// parse cmdline options
|
||||
parse_cmdline_param!(param, LOG_LEVEL_OPTION, self.log_level, get_log_level);
|
||||
parse_cmdline_param!(param, LOG_LEVEL_OPTION, config.log_level, get_log_level);
|
||||
parse_cmdline_param!(
|
||||
param,
|
||||
SERVER_ADDR_OPTION,
|
||||
self.server_addr,
|
||||
config.server_addr,
|
||||
get_string_value
|
||||
);
|
||||
|
||||
@@ -138,7 +234,7 @@ impl AgentConfig {
|
||||
parse_cmdline_param!(
|
||||
param,
|
||||
HOTPLUG_TIMOUT_OPTION,
|
||||
self.hotplug_timeout,
|
||||
config.hotplug_timeout,
|
||||
get_hotplug_timeout,
|
||||
|hotplug_timeout: time::Duration| hotplug_timeout.as_secs() > 0
|
||||
);
|
||||
@@ -147,14 +243,14 @@ impl AgentConfig {
|
||||
parse_cmdline_param!(
|
||||
param,
|
||||
DEBUG_CONSOLE_VPORT_OPTION,
|
||||
self.debug_console_vport,
|
||||
config.debug_console_vport,
|
||||
get_vsock_port,
|
||||
|port| port > 0
|
||||
);
|
||||
parse_cmdline_param!(
|
||||
param,
|
||||
LOG_VPORT_OPTION,
|
||||
self.log_vport,
|
||||
config.log_vport,
|
||||
get_vsock_port,
|
||||
|port| port > 0
|
||||
);
|
||||
@@ -162,34 +258,47 @@ impl AgentConfig {
|
||||
parse_cmdline_param!(
|
||||
param,
|
||||
CONTAINER_PIPE_SIZE_OPTION,
|
||||
self.container_pipe_size,
|
||||
config.container_pipe_size,
|
||||
get_container_pipe_size
|
||||
);
|
||||
parse_cmdline_param!(
|
||||
param,
|
||||
UNIFIED_CGROUP_HIERARCHY_OPTION,
|
||||
self.unified_cgroup_hierarchy,
|
||||
config.unified_cgroup_hierarchy,
|
||||
get_bool_value
|
||||
);
|
||||
}
|
||||
|
||||
if let Ok(addr) = env::var(SERVER_ADDR_ENV_VAR) {
|
||||
self.server_addr = addr;
|
||||
config.server_addr = addr;
|
||||
}
|
||||
|
||||
if let Ok(addr) = env::var(LOG_LEVEL_ENV_VAR) {
|
||||
if let Ok(level) = logrus_to_slog_level(&addr) {
|
||||
self.log_level = level;
|
||||
config.log_level = level;
|
||||
}
|
||||
}
|
||||
|
||||
if let Ok(value) = env::var(TRACE_TYPE_ENV_VAR) {
|
||||
if let Ok(result) = value.parse::<tracer::TraceType>() {
|
||||
self.tracing = result;
|
||||
}
|
||||
if let Ok(value) = env::var(TRACING_ENV_VAR) {
|
||||
let name_value = format!("{}={}", TRACING_ENV_VAR, value);
|
||||
|
||||
config.tracing = get_bool_value(&name_value)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
// We did not get a configuration file: allow all endpoints.
|
||||
config.endpoints.all_allowed = true;
|
||||
|
||||
Ok(config)
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
pub fn from_config_file(file: &str) -> Result<AgentConfig> {
|
||||
let config = fs::read_to_string(file)?;
|
||||
AgentConfig::from_str(&config)
|
||||
}
|
||||
|
||||
pub fn is_allowed_endpoint(&self, ep: &str) -> bool {
|
||||
self.endpoints.all_allowed || self.endpoints.allowed.contains(ep)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -236,25 +345,6 @@ fn get_log_level(param: &str) -> Result<slog::Level> {
|
||||
logrus_to_slog_level(fields[1])
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
fn get_trace_type(param: &str) -> Result<tracer::TraceType> {
|
||||
ensure!(!param.is_empty(), "invalid trace type parameter");
|
||||
|
||||
let fields: Vec<&str> = param.split('=').collect();
|
||||
ensure!(
|
||||
fields[0] == TRACE_MODE_OPTION,
|
||||
"invalid trace type key name"
|
||||
);
|
||||
|
||||
if fields.len() == 1 {
|
||||
return Ok(tracer::TraceType::Isolated);
|
||||
}
|
||||
|
||||
let result = fields[1].parse::<tracer::TraceType>()?;
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
fn get_hotplug_timeout(param: &str) -> Result<time::Duration> {
|
||||
let fields: Vec<&str> = param.split('=').collect();
|
||||
@@ -339,10 +429,6 @@ mod tests {
|
||||
use std::time;
|
||||
use tempfile::tempdir;
|
||||
|
||||
const ERR_INVALID_TRACE_TYPE_PARAM: &str = "invalid trace type parameter";
|
||||
const ERR_INVALID_TRACE_TYPE: &str = "invalid trace type";
|
||||
const ERR_INVALID_TRACE_TYPE_KEY: &str = "invalid trace type key name";
|
||||
|
||||
// Parameters:
|
||||
//
|
||||
// 1: expected Result
|
||||
@@ -371,7 +457,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_new() {
|
||||
let config = AgentConfig::new();
|
||||
let config: AgentConfig = Default::default();
|
||||
assert!(!config.debug_console);
|
||||
assert!(!config.dev_mode);
|
||||
assert_eq!(config.log_level, DEFAULT_LOG_LEVEL);
|
||||
@@ -379,7 +465,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_cmdline() {
|
||||
fn test_from_cmdline() {
|
||||
const TEST_SERVER_ADDR: &str = "vsock://-1:1024";
|
||||
|
||||
#[derive(Debug)]
|
||||
@@ -393,7 +479,7 @@ mod tests {
|
||||
container_pipe_size: i32,
|
||||
server_addr: &'a str,
|
||||
unified_cgroup_hierarchy: bool,
|
||||
tracing: tracer::TraceType,
|
||||
tracing: bool,
|
||||
}
|
||||
|
||||
impl Default for TestData<'_> {
|
||||
@@ -408,7 +494,7 @@ mod tests {
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
server_addr: TEST_SERVER_ADDR,
|
||||
unified_cgroup_hierarchy: false,
|
||||
tracing: tracer::TraceType::Disabled,
|
||||
tracing: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -667,64 +753,121 @@ mod tests {
|
||||
},
|
||||
TestData {
|
||||
contents: "trace",
|
||||
tracing: tracer::TraceType::Disabled,
|
||||
tracing: false,
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: ".trace",
|
||||
tracing: tracer::TraceType::Disabled,
|
||||
tracing: false,
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.tracer",
|
||||
tracing: tracer::TraceType::Disabled,
|
||||
tracing: false,
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.trac",
|
||||
tracing: tracer::TraceType::Disabled,
|
||||
tracing: false,
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.trace",
|
||||
tracing: tracer::TraceType::Isolated,
|
||||
tracing: true,
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.trace=isolated",
|
||||
tracing: tracer::TraceType::Isolated,
|
||||
contents: "agent.trace=true",
|
||||
tracing: true,
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.trace=disabled",
|
||||
tracing: tracer::TraceType::Disabled,
|
||||
contents: "agent.trace=false",
|
||||
tracing: false,
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.trace=0",
|
||||
tracing: false,
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.trace=1",
|
||||
tracing: true,
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.trace=a",
|
||||
tracing: false,
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.trace=foo",
|
||||
tracing: false,
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.trace=.",
|
||||
tracing: false,
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.trace=,",
|
||||
tracing: false,
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "",
|
||||
env_vars: vec!["KATA_AGENT_TRACE_TYPE=isolated"],
|
||||
tracing: tracer::TraceType::Isolated,
|
||||
env_vars: vec!["KATA_AGENT_TRACING="],
|
||||
tracing: false,
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "",
|
||||
env_vars: vec!["KATA_AGENT_TRACE_TYPE=disabled"],
|
||||
tracing: tracer::TraceType::Disabled,
|
||||
env_vars: vec!["KATA_AGENT_TRACING=''"],
|
||||
tracing: false,
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "",
|
||||
env_vars: vec!["KATA_AGENT_TRACING=0"],
|
||||
tracing: false,
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "",
|
||||
env_vars: vec!["KATA_AGENT_TRACING=."],
|
||||
tracing: false,
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "",
|
||||
env_vars: vec!["KATA_AGENT_TRACING=,"],
|
||||
tracing: false,
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "",
|
||||
env_vars: vec!["KATA_AGENT_TRACING=foo"],
|
||||
tracing: false,
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "",
|
||||
env_vars: vec!["KATA_AGENT_TRACING=1"],
|
||||
tracing: true,
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "",
|
||||
env_vars: vec!["KATA_AGENT_TRACING=true"],
|
||||
tracing: true,
|
||||
..Default::default()
|
||||
},
|
||||
];
|
||||
|
||||
let dir = tempdir().expect("failed to create tmpdir");
|
||||
|
||||
// First, check a missing file is handled
|
||||
let file_path = dir.path().join("enoent");
|
||||
|
||||
let filename = file_path.to_str().expect("failed to create filename");
|
||||
|
||||
let mut config = AgentConfig::new();
|
||||
let result = config.parse_cmdline(&filename.to_owned());
|
||||
assert!(result.is_err());
|
||||
|
||||
// Now, test various combinations of file contents and environment
|
||||
// variables.
|
||||
for (i, d) in tests.iter().enumerate() {
|
||||
@@ -753,22 +896,7 @@ mod tests {
|
||||
vars_to_unset.push(name);
|
||||
}
|
||||
|
||||
let mut config = AgentConfig::new();
|
||||
assert!(!config.debug_console, "{}", msg);
|
||||
assert!(!config.dev_mode, "{}", msg);
|
||||
assert!(!config.unified_cgroup_hierarchy, "{}", msg);
|
||||
assert_eq!(
|
||||
config.hotplug_timeout,
|
||||
time::Duration::from_secs(3),
|
||||
"{}",
|
||||
msg
|
||||
);
|
||||
assert_eq!(config.container_pipe_size, 0, "{}", msg);
|
||||
assert_eq!(config.server_addr, TEST_SERVER_ADDR, "{}", msg);
|
||||
assert_eq!(config.tracing, tracer::TraceType::Disabled, "{}", msg);
|
||||
|
||||
let result = config.parse_cmdline(filename);
|
||||
assert!(result.is_ok(), "{}", msg);
|
||||
let config = AgentConfig::from_cmdline(filename).expect("Failed to parse command line");
|
||||
|
||||
assert_eq!(d.debug_console, config.debug_console, "{}", msg);
|
||||
assert_eq!(d.dev_mode, config.dev_mode, "{}", msg);
|
||||
@@ -1220,60 +1348,33 @@ Caused by:
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_trace_type() {
|
||||
#[derive(Debug)]
|
||||
struct TestData<'a> {
|
||||
param: &'a str,
|
||||
result: Result<tracer::TraceType>,
|
||||
}
|
||||
fn test_config_builder_from_string() {
|
||||
let config = AgentConfig::from_str(
|
||||
r#"
|
||||
dev_mode = true
|
||||
server_addr = 'vsock://8:2048'
|
||||
|
||||
let tests = &[
|
||||
TestData {
|
||||
param: "",
|
||||
result: Err(anyhow!(ERR_INVALID_TRACE_TYPE_PARAM)),
|
||||
},
|
||||
TestData {
|
||||
param: "agent.tracer",
|
||||
result: Err(anyhow!(ERR_INVALID_TRACE_TYPE_KEY)),
|
||||
},
|
||||
TestData {
|
||||
param: "agent.trac",
|
||||
result: Err(anyhow!(ERR_INVALID_TRACE_TYPE_KEY)),
|
||||
},
|
||||
TestData {
|
||||
param: "agent.trace=",
|
||||
result: Err(anyhow!(ERR_INVALID_TRACE_TYPE)),
|
||||
},
|
||||
TestData {
|
||||
param: "agent.trace==",
|
||||
result: Err(anyhow!(ERR_INVALID_TRACE_TYPE)),
|
||||
},
|
||||
TestData {
|
||||
param: "agent.trace=foo",
|
||||
result: Err(anyhow!(ERR_INVALID_TRACE_TYPE)),
|
||||
},
|
||||
TestData {
|
||||
param: "agent.trace",
|
||||
result: Ok(tracer::TraceType::Isolated),
|
||||
},
|
||||
TestData {
|
||||
param: "agent.trace=isolated",
|
||||
result: Ok(tracer::TraceType::Isolated),
|
||||
},
|
||||
TestData {
|
||||
param: "agent.trace=disabled",
|
||||
result: Ok(tracer::TraceType::Disabled),
|
||||
},
|
||||
];
|
||||
[endpoints]
|
||||
allowed = ["CreateContainer", "StartContainer"]
|
||||
"#,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
for (i, d) in tests.iter().enumerate() {
|
||||
let msg = format!("test[{}]: {:?}", i, d);
|
||||
// Verify that the all_allowed flag is false
|
||||
assert!(!config.endpoints.all_allowed);
|
||||
|
||||
let result = get_trace_type(d.param);
|
||||
// Verify that the override worked
|
||||
assert!(config.dev_mode);
|
||||
assert_eq!(config.server_addr, "vsock://8:2048");
|
||||
assert_eq!(
|
||||
config.endpoints.allowed,
|
||||
vec!["CreateContainer".to_string(), "StartContainer".to_string()]
|
||||
.iter()
|
||||
.cloned()
|
||||
.collect()
|
||||
);
|
||||
|
||||
let msg = format!("{}: result: {:?}", msg, result);
|
||||
|
||||
assert_result!(d.result, result, msg);
|
||||
}
|
||||
// Verify that the default values are valid
|
||||
assert_eq!(config.hotplug_timeout, DEFAULT_HOTPLUG_TIMEOUT);
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -9,7 +9,6 @@
|
||||
use std::fs;
|
||||
|
||||
pub const SYSFS_DIR: &str = "/sys";
|
||||
pub const SYSFS_PCI_BUS_RESCAN_FILE: &str = "/sys/bus/pci/rescan";
|
||||
#[cfg(any(
|
||||
target_arch = "powerpc64",
|
||||
target_arch = "s390x",
|
||||
@@ -84,6 +83,8 @@ pub const SYSFS_MEMORY_ONLINE_PATH: &str = "/sys/devices/system/memory";
|
||||
|
||||
pub const SYSFS_SCSI_HOST_PATH: &str = "/sys/class/scsi_host";
|
||||
|
||||
pub const SYSFS_BUS_PCI_PATH: &str = "/sys/bus/pci";
|
||||
|
||||
pub const SYSFS_CGROUPPATH: &str = "/sys/fs/cgroup";
|
||||
pub const SYSFS_ONLINE_FILE: &str = "online";
|
||||
|
||||
@@ -95,6 +96,7 @@ pub const SYSTEM_DEV_PATH: &str = "/dev";
|
||||
// Linux UEvent related consts.
|
||||
pub const U_EVENT_ACTION: &str = "ACTION";
|
||||
pub const U_EVENT_ACTION_ADD: &str = "add";
|
||||
pub const U_EVENT_ACTION_REMOVE: &str = "remove";
|
||||
pub const U_EVENT_DEV_PATH: &str = "DEVPATH";
|
||||
pub const U_EVENT_SUB_SYSTEM: &str = "SUBSYSTEM";
|
||||
pub const U_EVENT_SEQ_NUM: &str = "SEQNUM";
|
||||
|
||||
@@ -77,11 +77,11 @@ mod rpc;
|
||||
mod tracer;
|
||||
|
||||
const NAME: &str = "kata-agent";
|
||||
const KERNEL_CMDLINE_FILE: &str = "/proc/cmdline";
|
||||
|
||||
lazy_static! {
|
||||
static ref AGENT_CONFIG: Arc<RwLock<AgentConfig>> =
|
||||
Arc::new(RwLock::new(config::AgentConfig::new()));
|
||||
static ref AGENT_CONFIG: Arc<RwLock<AgentConfig>> = Arc::new(RwLock::new(
|
||||
AgentConfig::from_cmdline("/proc/cmdline").unwrap()
|
||||
));
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
@@ -113,10 +113,10 @@ async fn create_logger_task(rfd: RawFd, vsock_port: u32, shutdown: Receiver<bool
|
||||
)?;
|
||||
|
||||
let addr = SockAddr::new_vsock(libc::VMADDR_CID_ANY, vsock_port);
|
||||
socket::bind(listenfd, &addr).unwrap();
|
||||
socket::listen(listenfd, 1).unwrap();
|
||||
socket::bind(listenfd, &addr)?;
|
||||
socket::listen(listenfd, 1)?;
|
||||
|
||||
writer = Box::new(util::get_vsock_stream(listenfd).await.unwrap());
|
||||
writer = Box::new(util::get_vsock_stream(listenfd).await?);
|
||||
} else {
|
||||
writer = Box::new(tokio::io::stdout());
|
||||
}
|
||||
@@ -134,15 +134,11 @@ async fn real_main() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
||||
|
||||
console::initialize();
|
||||
|
||||
lazy_static::initialize(&AGENT_CONFIG);
|
||||
|
||||
// support vsock log
|
||||
let (rfd, wfd) = unistd::pipe2(OFlag::O_CLOEXEC)?;
|
||||
|
||||
let (shutdown_tx, shutdown_rx) = channel(true);
|
||||
|
||||
let agent_config = AGENT_CONFIG.clone();
|
||||
|
||||
let init_mode = unistd::getpid() == Pid::from_raw(1);
|
||||
if init_mode {
|
||||
// dup a new file descriptor for this temporary logger writer,
|
||||
@@ -163,20 +159,15 @@ async fn real_main() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
||||
e
|
||||
})?;
|
||||
|
||||
let mut config = agent_config.write().await;
|
||||
config.parse_cmdline(KERNEL_CMDLINE_FILE)?;
|
||||
lazy_static::initialize(&AGENT_CONFIG);
|
||||
|
||||
init_agent_as_init(&logger, config.unified_cgroup_hierarchy)?;
|
||||
init_agent_as_init(&logger, AGENT_CONFIG.read().await.unified_cgroup_hierarchy)?;
|
||||
drop(logger_async_guard);
|
||||
} else {
|
||||
// once parsed cmdline and set the config, release the write lock
|
||||
// as soon as possible in case other thread would get read lock on
|
||||
// it.
|
||||
let mut config = agent_config.write().await;
|
||||
config.parse_cmdline(KERNEL_CMDLINE_FILE)?;
|
||||
lazy_static::initialize(&AGENT_CONFIG);
|
||||
}
|
||||
let config = agent_config.read().await;
|
||||
|
||||
let config = AGENT_CONFIG.read().await;
|
||||
let log_vport = config.log_vport as u32;
|
||||
|
||||
let log_handle = tokio::spawn(create_logger_task(rfd, log_vport, shutdown_rx.clone()));
|
||||
@@ -205,16 +196,16 @@ async fn real_main() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
||||
ttrpc_log_guard = Ok(slog_stdlog::init().map_err(|e| e)?);
|
||||
}
|
||||
|
||||
if config.tracing != tracer::TraceType::Disabled {
|
||||
let _ = tracer::setup_tracing(NAME, &logger, &config)?;
|
||||
if config.tracing {
|
||||
tracer::setup_tracing(NAME, &logger)?;
|
||||
}
|
||||
|
||||
let root = span!(tracing::Level::TRACE, "root-span", work_units = 2);
|
||||
let root_span = span!(tracing::Level::TRACE, "root-span");
|
||||
|
||||
// XXX: Start the root trace transaction.
|
||||
//
|
||||
// XXX: Note that *ALL* spans needs to start after this point!!
|
||||
let _enter = root.enter();
|
||||
let span_guard = root_span.enter();
|
||||
|
||||
// Start the sandbox and wait for its ttRPC server to end
|
||||
start_sandbox(&logger, &config, init_mode, &mut tasks, shutdown_rx.clone()).await?;
|
||||
@@ -238,19 +229,29 @@ async fn real_main() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
||||
// Wait for all threads to finish
|
||||
let results = join_all(tasks).await;
|
||||
|
||||
for result in results {
|
||||
if let Err(e) = result {
|
||||
return Err(anyhow!(e).into());
|
||||
}
|
||||
}
|
||||
// force flushing spans
|
||||
drop(span_guard);
|
||||
drop(root_span);
|
||||
|
||||
if config.tracing != tracer::TraceType::Disabled {
|
||||
if config.tracing {
|
||||
tracer::end_tracing();
|
||||
}
|
||||
|
||||
eprintln!("{} shutdown complete", NAME);
|
||||
|
||||
Ok(())
|
||||
let mut wait_errors: Vec<tokio::task::JoinError> = vec![];
|
||||
for result in results {
|
||||
if let Err(e) = result {
|
||||
eprintln!("wait task error: {:#?}", e);
|
||||
wait_errors.push(e);
|
||||
}
|
||||
}
|
||||
|
||||
if wait_errors.is_empty() {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(anyhow!("wait all tasks failed: {:#?}", wait_errors).into())
|
||||
}
|
||||
}
|
||||
|
||||
fn main() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
||||
@@ -325,7 +326,7 @@ async fn start_sandbox(
|
||||
sandbox.lock().await.sender = Some(tx);
|
||||
|
||||
// vsock:///dev/vsock, port
|
||||
let mut server = rpc::start(sandbox.clone(), config.server_addr.as_str());
|
||||
let mut server = rpc::start(sandbox.clone(), config.server_addr.as_str())?;
|
||||
server.start().await?;
|
||||
|
||||
rx.await?;
|
||||
|
||||
@@ -8,6 +8,7 @@ extern crate procfs;
|
||||
use prometheus::{Encoder, Gauge, GaugeVec, IntCounter, TextEncoder};
|
||||
|
||||
use anyhow::Result;
|
||||
use slog::warn;
|
||||
use tracing::instrument;
|
||||
|
||||
const NAMESPACE_KATA_AGENT: &str = "kata_agent";
|
||||
@@ -74,7 +75,7 @@ pub fn get_metrics(_: &protocols::agent::GetMetricsRequest) -> Result<String> {
|
||||
AGENT_SCRAPE_COUNT.inc();
|
||||
|
||||
// update agent process metrics
|
||||
update_agent_metrics();
|
||||
update_agent_metrics()?;
|
||||
|
||||
// update guest os metrics
|
||||
update_guest_metrics();
|
||||
@@ -84,23 +85,26 @@ pub fn get_metrics(_: &protocols::agent::GetMetricsRequest) -> Result<String> {
|
||||
|
||||
let mut buffer = Vec::new();
|
||||
let encoder = TextEncoder::new();
|
||||
encoder.encode(&metric_families, &mut buffer).unwrap();
|
||||
encoder.encode(&metric_families, &mut buffer)?;
|
||||
|
||||
Ok(String::from_utf8(buffer).unwrap())
|
||||
Ok(String::from_utf8(buffer)?)
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
fn update_agent_metrics() {
|
||||
fn update_agent_metrics() -> Result<()> {
|
||||
let me = procfs::process::Process::myself();
|
||||
|
||||
if let Err(err) = me {
|
||||
error!(sl!(), "failed to create process instance: {:?}", err);
|
||||
return;
|
||||
}
|
||||
let me = match me {
|
||||
Ok(p) => p,
|
||||
Err(e) => {
|
||||
// FIXME: return Ok for all errors?
|
||||
warn!(sl!(), "failed to create process instance: {:?}", e);
|
||||
|
||||
let me = me.unwrap();
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
|
||||
let tps = procfs::ticks_per_second().unwrap();
|
||||
let tps = procfs::ticks_per_second()?;
|
||||
|
||||
// process total time
|
||||
AGENT_TOTAL_TIME.set((me.stat.utime + me.stat.stime) as f64 / (tps as f64));
|
||||
@@ -109,7 +113,7 @@ fn update_agent_metrics() {
|
||||
AGENT_TOTAL_VM.set(me.stat.vsize as f64);
|
||||
|
||||
// Total resident set
|
||||
let page_size = procfs::page_size().unwrap() as f64;
|
||||
let page_size = procfs::page_size()? as f64;
|
||||
AGENT_TOTAL_RSS.set(me.stat.rss as f64 * page_size);
|
||||
|
||||
// io
|
||||
@@ -132,11 +136,11 @@ fn update_agent_metrics() {
|
||||
}
|
||||
|
||||
match me.status() {
|
||||
Err(err) => {
|
||||
info!(sl!(), "failed to get process status: {:?}", err);
|
||||
}
|
||||
Err(err) => error!(sl!(), "failed to get process status: {:?}", err),
|
||||
Ok(status) => set_gauge_vec_proc_status(&AGENT_PROC_STATUS, &status),
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
|
||||
@@ -4,28 +4,27 @@
|
||||
//
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::ffi::CString;
|
||||
use std::fs;
|
||||
use std::fs::File;
|
||||
use std::io;
|
||||
use std::io::{BufRead, BufReader};
|
||||
use std::iter;
|
||||
use std::os::unix::fs::{MetadataExt, PermissionsExt};
|
||||
use std::path::Path;
|
||||
use std::ptr::null;
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
use libc::{c_void, mount};
|
||||
use nix::mount::{self, MsFlags};
|
||||
use nix::mount::MsFlags;
|
||||
use nix::unistd::Gid;
|
||||
|
||||
use regex::Regex;
|
||||
|
||||
use crate::device::{
|
||||
get_scsi_device_name, get_virtio_blk_pci_device_name, online_device, wait_for_pmem_device,
|
||||
DRIVER_9P_TYPE, DRIVER_BLK_CCW_TYPE, DRIVER_BLK_TYPE, DRIVER_EPHEMERAL_TYPE, DRIVER_LOCAL_TYPE,
|
||||
DRIVER_MMIO_BLK_TYPE, DRIVER_NVDIMM_TYPE, DRIVER_SCSI_TYPE, DRIVER_VIRTIOFS_TYPE,
|
||||
DRIVER_WATCHABLE_BIND_TYPE,
|
||||
};
|
||||
use crate::linux_abi::*;
|
||||
use crate::pci;
|
||||
@@ -37,17 +36,6 @@ use anyhow::{anyhow, Context, Result};
|
||||
use slog::Logger;
|
||||
use tracing::instrument;
|
||||
|
||||
pub const DRIVER_9P_TYPE: &str = "9p";
|
||||
pub const DRIVER_VIRTIOFS_TYPE: &str = "virtio-fs";
|
||||
pub const DRIVER_BLK_TYPE: &str = "blk";
|
||||
pub const DRIVER_BLK_CCW_TYPE: &str = "blk-ccw";
|
||||
pub const DRIVER_MMIO_BLK_TYPE: &str = "mmioblk";
|
||||
pub const DRIVER_SCSI_TYPE: &str = "scsi";
|
||||
pub const DRIVER_NVDIMM_TYPE: &str = "nvdimm";
|
||||
pub const DRIVER_EPHEMERAL_TYPE: &str = "ephemeral";
|
||||
pub const DRIVER_LOCAL_TYPE: &str = "local";
|
||||
pub const DRIVER_WATCHABLE_BIND_TYPE: &str = "watchable-bind";
|
||||
|
||||
pub const TYPE_ROOTFS: &str = "rootfs";
|
||||
|
||||
pub const MOUNT_GUEST_TAG: &str = "kataShared";
|
||||
@@ -149,96 +137,53 @@ pub const STORAGE_HANDLER_LIST: &[&str] = &[
|
||||
DRIVER_WATCHABLE_BIND_TYPE,
|
||||
];
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct BareMount<'a> {
|
||||
source: &'a str,
|
||||
destination: &'a str,
|
||||
fs_type: &'a str,
|
||||
#[instrument]
|
||||
pub fn baremount(
|
||||
source: &Path,
|
||||
destination: &Path,
|
||||
fs_type: &str,
|
||||
flags: MsFlags,
|
||||
options: &'a str,
|
||||
logger: Logger,
|
||||
}
|
||||
options: &str,
|
||||
logger: &Logger,
|
||||
) -> Result<()> {
|
||||
let logger = logger.new(o!("subsystem" => "baremount"));
|
||||
|
||||
// mount mounts a source in to a destination. This will do some bookkeeping:
|
||||
// * evaluate all symlinks
|
||||
// * ensure the source exists
|
||||
impl<'a> BareMount<'a> {
|
||||
#[instrument]
|
||||
pub fn new(
|
||||
s: &'a str,
|
||||
d: &'a str,
|
||||
fs_type: &'a str,
|
||||
flags: MsFlags,
|
||||
options: &'a str,
|
||||
logger: &Logger,
|
||||
) -> Self {
|
||||
BareMount {
|
||||
source: s,
|
||||
destination: d,
|
||||
fs_type,
|
||||
flags,
|
||||
options,
|
||||
logger: logger.new(o!("subsystem" => "baremount")),
|
||||
}
|
||||
if source.as_os_str().is_empty() {
|
||||
return Err(anyhow!("need mount source"));
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
pub fn mount(&self) -> Result<()> {
|
||||
let source;
|
||||
let dest;
|
||||
let fs_type;
|
||||
let mut options = null();
|
||||
let cstr_options: CString;
|
||||
let cstr_source: CString;
|
||||
let cstr_dest: CString;
|
||||
let cstr_fs_type: CString;
|
||||
|
||||
if self.source.is_empty() {
|
||||
return Err(anyhow!("need mount source"));
|
||||
}
|
||||
|
||||
if self.destination.is_empty() {
|
||||
return Err(anyhow!("need mount destination"));
|
||||
}
|
||||
|
||||
cstr_source = CString::new(self.source)?;
|
||||
source = cstr_source.as_ptr();
|
||||
|
||||
cstr_dest = CString::new(self.destination)?;
|
||||
dest = cstr_dest.as_ptr();
|
||||
|
||||
if self.fs_type.is_empty() {
|
||||
return Err(anyhow!("need mount FS type"));
|
||||
}
|
||||
|
||||
cstr_fs_type = CString::new(self.fs_type)?;
|
||||
fs_type = cstr_fs_type.as_ptr();
|
||||
|
||||
if !self.options.is_empty() {
|
||||
cstr_options = CString::new(self.options)?;
|
||||
options = cstr_options.as_ptr() as *const c_void;
|
||||
}
|
||||
|
||||
info!(
|
||||
self.logger,
|
||||
"mount source={:?}, dest={:?}, fs_type={:?}, options={:?}",
|
||||
self.source,
|
||||
self.destination,
|
||||
self.fs_type,
|
||||
self.options
|
||||
);
|
||||
let rc = unsafe { mount(source, dest, fs_type, self.flags.bits(), options) };
|
||||
|
||||
if rc < 0 {
|
||||
return Err(anyhow!(
|
||||
"failed to mount {:?} to {:?}, with error: {}",
|
||||
self.source,
|
||||
self.destination,
|
||||
io::Error::last_os_error()
|
||||
));
|
||||
}
|
||||
Ok(())
|
||||
if destination.as_os_str().is_empty() {
|
||||
return Err(anyhow!("need mount destination"));
|
||||
}
|
||||
|
||||
if fs_type.is_empty() {
|
||||
return Err(anyhow!("need mount FS type"));
|
||||
}
|
||||
|
||||
info!(
|
||||
logger,
|
||||
"mount source={:?}, dest={:?}, fs_type={:?}, options={:?}",
|
||||
source,
|
||||
destination,
|
||||
fs_type,
|
||||
options
|
||||
);
|
||||
|
||||
nix::mount::mount(
|
||||
Some(source),
|
||||
destination,
|
||||
Some(fs_type),
|
||||
flags,
|
||||
Some(options),
|
||||
)
|
||||
.map_err(|e| {
|
||||
anyhow!(
|
||||
"failed to mount {:?} to {:?}, with error: {}",
|
||||
source,
|
||||
destination,
|
||||
e
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
@@ -486,39 +431,37 @@ fn mount_storage(logger: &Logger, storage: &Storage) -> Result<()> {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
match storage.fstype.as_str() {
|
||||
DRIVER_9P_TYPE | DRIVER_VIRTIOFS_TYPE => {
|
||||
let dest_path = Path::new(storage.mount_point.as_str());
|
||||
if !dest_path.exists() {
|
||||
fs::create_dir_all(dest_path).context("Create mount destination failed")?;
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
ensure_destination_exists(storage.mount_point.as_str(), storage.fstype.as_str())?;
|
||||
}
|
||||
let mount_path = Path::new(&storage.mount_point);
|
||||
let src_path = Path::new(&storage.source);
|
||||
if storage.fstype == "bind" && !src_path.is_dir() {
|
||||
ensure_destination_file_exists(mount_path)
|
||||
} else {
|
||||
fs::create_dir_all(mount_path).map_err(anyhow::Error::from)
|
||||
}
|
||||
.context("Could not create mountpoint")?;
|
||||
|
||||
let options_vec = storage.options.to_vec();
|
||||
let options_vec = options_vec.iter().map(String::as_str).collect();
|
||||
let (flags, options) = parse_mount_flags_and_options(options_vec);
|
||||
|
||||
let source = Path::new(&storage.source);
|
||||
let mount_point = Path::new(&storage.mount_point);
|
||||
|
||||
info!(logger, "mounting storage";
|
||||
"mount-source:" => storage.source.as_str(),
|
||||
"mount-destination" => storage.mount_point.as_str(),
|
||||
"mount-source" => source.display(),
|
||||
"mount-destination" => mount_point.display(),
|
||||
"mount-fstype" => storage.fstype.as_str(),
|
||||
"mount-options" => options.as_str(),
|
||||
);
|
||||
|
||||
let bare_mount = BareMount::new(
|
||||
storage.source.as_str(),
|
||||
storage.mount_point.as_str(),
|
||||
baremount(
|
||||
source,
|
||||
mount_point,
|
||||
storage.fstype.as_str(),
|
||||
flags,
|
||||
options.as_str(),
|
||||
&logger,
|
||||
);
|
||||
|
||||
bare_mount.mount()
|
||||
)
|
||||
}
|
||||
|
||||
/// Looks for `mount_point` entry in the /proc/mounts.
|
||||
@@ -637,11 +580,12 @@ fn mount_to_rootfs(logger: &Logger, m: &InitMount) -> Result<()> {
|
||||
|
||||
let (flags, options) = parse_mount_flags_and_options(options_vec);
|
||||
|
||||
let bare_mount = BareMount::new(m.src, m.dest, m.fstype, flags, options.as_str(), logger);
|
||||
|
||||
fs::create_dir_all(Path::new(m.dest)).context("could not create directory")?;
|
||||
|
||||
bare_mount.mount().or_else(|e| {
|
||||
let source = Path::new(m.src);
|
||||
let dest = Path::new(m.dest);
|
||||
|
||||
baremount(source, dest, m.fstype, flags, &options, logger).or_else(|e| {
|
||||
if m.src != "dev" {
|
||||
return Err(e);
|
||||
}
|
||||
@@ -684,8 +628,7 @@ pub fn get_mount_fs_type_from_file(mount_file: &str, mount_point: &str) -> Resul
|
||||
let file = File::open(mount_file)?;
|
||||
let reader = BufReader::new(file);
|
||||
|
||||
let re = Regex::new(format!("device .+ mounted on {} with fstype (.+)", mount_point).as_str())
|
||||
.unwrap();
|
||||
let re = Regex::new(format!("device .+ mounted on {} with fstype (.+)", mount_point).as_str())?;
|
||||
|
||||
// Read the file line by line using the lines() iterator from std::io::BufRead.
|
||||
for (_index, line) in reader.lines().enumerate() {
|
||||
@@ -763,20 +706,21 @@ pub fn get_cgroup_mounts(
|
||||
}
|
||||
}
|
||||
|
||||
if fields[0].is_empty() {
|
||||
let subsystem_name = fields[0];
|
||||
|
||||
if subsystem_name.is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
if fields[0] == "devices" {
|
||||
if subsystem_name == "devices" {
|
||||
has_device_cgroup = true;
|
||||
}
|
||||
|
||||
if let Some(value) = CGROUPS.get(&fields[0]) {
|
||||
let key = CGROUPS.keys().find(|&&f| f == fields[0]).unwrap();
|
||||
if let Some((key, value)) = CGROUPS.get_key_value(subsystem_name) {
|
||||
cg_mounts.push(InitMount {
|
||||
fstype: "cgroup",
|
||||
src: "cgroup",
|
||||
dest: *value,
|
||||
dest: value,
|
||||
options: vec!["nosuid", "nodev", "noexec", "relatime", key],
|
||||
});
|
||||
}
|
||||
@@ -816,32 +760,26 @@ pub fn cgroups_mount(logger: &Logger, unified_cgroup_hierarchy: bool) -> Result<
|
||||
#[instrument]
|
||||
pub fn remove_mounts(mounts: &[String]) -> Result<()> {
|
||||
for m in mounts.iter() {
|
||||
mount::umount(m.as_str()).context(format!("failed to umount {:?}", m))?;
|
||||
nix::mount::umount(m.as_str()).context(format!("failed to umount {:?}", m))?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ensure_destination_exists will recursively create a given mountpoint. If directories
|
||||
// are created, their permissions are initialized to mountPerm(0755)
|
||||
#[instrument]
|
||||
fn ensure_destination_exists(destination: &str, fs_type: &str) -> Result<()> {
|
||||
let d = Path::new(destination);
|
||||
if d.exists() {
|
||||
fn ensure_destination_file_exists(path: &Path) -> Result<()> {
|
||||
if path.is_file() {
|
||||
return Ok(());
|
||||
} else if path.exists() {
|
||||
return Err(anyhow!("{:?} exists but is not a regular file", path));
|
||||
}
|
||||
let dir = d
|
||||
|
||||
let dir = path
|
||||
.parent()
|
||||
.ok_or_else(|| anyhow!("mount destination {} doesn't exist", destination))?;
|
||||
.ok_or_else(|| anyhow!("failed to find parent path for {:?}", path))?;
|
||||
|
||||
if !dir.exists() {
|
||||
fs::create_dir_all(dir).context(format!("create dir all {:?}", dir))?;
|
||||
}
|
||||
fs::create_dir_all(dir).context(format!("create_dir_all {:?}", dir))?;
|
||||
|
||||
if fs_type != "bind" || d.is_dir() {
|
||||
fs::create_dir_all(d).context(format!("create dir all {:?}", d))?;
|
||||
} else {
|
||||
fs::File::create(d).context(format!("create file {:?}", d))?;
|
||||
}
|
||||
fs::File::create(path).context(format!("create empty file {:?}", path))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -865,8 +803,6 @@ fn parse_options(option_list: Vec<String>) -> HashMap<String, String> {
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::{skip_if_not_root, skip_loop_if_not_root, skip_loop_if_root};
|
||||
use libc::umount;
|
||||
use std::fs::metadata;
|
||||
use std::fs::File;
|
||||
use std::fs::OpenOptions;
|
||||
use std::io::Write;
|
||||
@@ -1006,16 +942,10 @@ mod tests {
|
||||
std::fs::create_dir_all(d).expect("failed to created directory");
|
||||
}
|
||||
|
||||
let bare_mount = BareMount::new(
|
||||
&src_filename,
|
||||
&dest_filename,
|
||||
d.fs_type,
|
||||
d.flags,
|
||||
d.options,
|
||||
&logger,
|
||||
);
|
||||
let src = Path::new(&src_filename);
|
||||
let dest = Path::new(&dest_filename);
|
||||
|
||||
let result = bare_mount.mount();
|
||||
let result = baremount(src, dest, d.fs_type, d.flags, d.options, &logger);
|
||||
|
||||
let msg = format!("{}: result: {:?}", msg, result);
|
||||
|
||||
@@ -1023,17 +953,7 @@ mod tests {
|
||||
assert!(result.is_ok(), "{}", msg);
|
||||
|
||||
// Cleanup
|
||||
unsafe {
|
||||
let cstr_dest =
|
||||
CString::new(dest_filename).expect("failed to convert dest to cstring");
|
||||
let umount_dest = cstr_dest.as_ptr();
|
||||
|
||||
let ret = umount(umount_dest);
|
||||
|
||||
let msg = format!("{}: umount result: {:?}", msg, result);
|
||||
|
||||
assert!(ret == 0, "{}", msg);
|
||||
};
|
||||
nix::mount::umount(dest_filename.as_str()).unwrap();
|
||||
|
||||
continue;
|
||||
}
|
||||
@@ -1102,17 +1022,11 @@ mod tests {
|
||||
.unwrap_or_else(|_| panic!("failed to create directory {}", d));
|
||||
}
|
||||
|
||||
// Create an actual mount
|
||||
let bare_mount = BareMount::new(
|
||||
mnt_src_filename,
|
||||
mnt_dest_filename,
|
||||
"bind",
|
||||
MsFlags::MS_BIND,
|
||||
"",
|
||||
&logger,
|
||||
);
|
||||
let src = Path::new(mnt_src_filename);
|
||||
let dest = Path::new(mnt_dest_filename);
|
||||
|
||||
let result = bare_mount.mount();
|
||||
// Create an actual mount
|
||||
let result = baremount(src, dest, "bind", MsFlags::MS_BIND, "", &logger);
|
||||
assert!(result.is_ok(), "mount for test setup failed");
|
||||
|
||||
let tests = &[
|
||||
@@ -1444,37 +1358,20 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ensure_destination_exists() {
|
||||
fn test_ensure_destination_file_exists() {
|
||||
let dir = tempdir().expect("failed to create tmpdir");
|
||||
|
||||
let mut testfile = dir.into_path();
|
||||
testfile.push("testfile");
|
||||
|
||||
let result = ensure_destination_exists(testfile.to_str().unwrap(), "bind");
|
||||
let result = ensure_destination_file_exists(&testfile);
|
||||
|
||||
assert!(result.is_ok());
|
||||
assert!(testfile.exists());
|
||||
|
||||
let result = ensure_destination_exists(testfile.to_str().unwrap(), "bind");
|
||||
let result = ensure_destination_file_exists(&testfile);
|
||||
assert!(result.is_ok());
|
||||
|
||||
let meta = metadata(testfile).unwrap();
|
||||
|
||||
assert!(meta.is_file());
|
||||
|
||||
let dir = tempdir().expect("failed to create tmpdir");
|
||||
let mut testdir = dir.into_path();
|
||||
testdir.push("testdir");
|
||||
|
||||
let result = ensure_destination_exists(testdir.to_str().unwrap(), "ext4");
|
||||
assert!(result.is_ok());
|
||||
assert!(testdir.exists());
|
||||
|
||||
let result = ensure_destination_exists(testdir.to_str().unwrap(), "ext4");
|
||||
assert!(result.is_ok());
|
||||
|
||||
//let meta = metadata(testdir.to_str().unwrap()).unwrap();
|
||||
let meta = metadata(testdir).unwrap();
|
||||
assert!(meta.is_dir());
|
||||
assert!(testfile.is_file());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,7 +13,7 @@ use std::fs::File;
|
||||
use std::path::{Path, PathBuf};
|
||||
use tracing::instrument;
|
||||
|
||||
use crate::mount::{BareMount, FLAGS};
|
||||
use crate::mount::{baremount, FLAGS};
|
||||
use slog::Logger;
|
||||
|
||||
const PERSISTENT_NS_DIR: &str = "/var/run/sandbox-ns";
|
||||
@@ -104,7 +104,10 @@ impl Namespace {
|
||||
if let Err(err) = || -> Result<()> {
|
||||
let origin_ns_path = get_current_thread_ns_path(ns_type.get());
|
||||
|
||||
File::open(Path::new(&origin_ns_path))?;
|
||||
let source = Path::new(&origin_ns_path);
|
||||
let destination = new_ns_path.as_path();
|
||||
|
||||
File::open(&source)?;
|
||||
|
||||
// Create a new netns on the current thread.
|
||||
let cf = ns_type.get_flags();
|
||||
@@ -115,8 +118,6 @@ impl Namespace {
|
||||
nix::unistd::sethostname(hostname.unwrap())?;
|
||||
}
|
||||
// Bind mount the new namespace from the current thread onto the mount point to persist it.
|
||||
let source: &str = origin_ns_path.as_str();
|
||||
let destination: &str = new_ns_path.as_path().to_str().unwrap_or("none");
|
||||
|
||||
let mut flags = MsFlags::empty();
|
||||
|
||||
@@ -129,10 +130,9 @@ impl Namespace {
|
||||
}
|
||||
};
|
||||
|
||||
let bare_mount = BareMount::new(source, destination, "none", flags, "", &logger);
|
||||
bare_mount.mount().map_err(|e| {
|
||||
baremount(source, destination, "none", flags, "", &logger).map_err(|e| {
|
||||
anyhow!(
|
||||
"Failed to mount {} to {} with err:{:?}",
|
||||
"Failed to mount {:?} to {:?} with err:{:?}",
|
||||
source,
|
||||
destination,
|
||||
e
|
||||
@@ -251,4 +251,126 @@ mod tests {
|
||||
assert_eq!("pid", pid.get());
|
||||
assert_eq!(CloneFlags::CLONE_NEWPID, pid.get_flags());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_new() {
|
||||
// Create dummy logger and temp folder.
|
||||
let logger = slog::Logger::root(slog::Discard, o!());
|
||||
|
||||
let ns_ipc = Namespace::new(&logger);
|
||||
assert_eq!(NamespaceType::Ipc, ns_ipc.ns_type);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_ipc() {
|
||||
// Create dummy logger and temp folder.
|
||||
let logger = slog::Logger::root(slog::Discard, o!());
|
||||
|
||||
let ns_ipc = Namespace::new(&logger).get_ipc();
|
||||
assert_eq!(NamespaceType::Ipc, ns_ipc.ns_type);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_uts_with_hostname() {
|
||||
let hostname = String::from("a.test.com");
|
||||
// Create dummy logger and temp folder.
|
||||
let logger = slog::Logger::root(slog::Discard, o!());
|
||||
|
||||
let ns_uts = Namespace::new(&logger).get_uts(hostname.as_str());
|
||||
assert_eq!(NamespaceType::Uts, ns_uts.ns_type);
|
||||
assert!(ns_uts.hostname.is_some());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_uts() {
|
||||
let hostname = String::from("");
|
||||
// Create dummy logger and temp folder.
|
||||
let logger = slog::Logger::root(slog::Discard, o!());
|
||||
|
||||
let ns_uts = Namespace::new(&logger).get_uts(hostname.as_str());
|
||||
assert_eq!(NamespaceType::Uts, ns_uts.ns_type);
|
||||
assert!(ns_uts.hostname.is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_pid() {
|
||||
// Create dummy logger and temp folder.
|
||||
let logger = slog::Logger::root(slog::Discard, o!());
|
||||
|
||||
let ns_pid = Namespace::new(&logger).get_pid();
|
||||
assert_eq!(NamespaceType::Pid, ns_pid.ns_type);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_set_root_dir() {
|
||||
// Create dummy logger and temp folder.
|
||||
let logger = slog::Logger::root(slog::Discard, o!());
|
||||
let tmpdir = Builder::new().prefix("pid").tempdir().unwrap();
|
||||
|
||||
let ns_root = Namespace::new(&logger).set_root_dir(tmpdir.path().to_str().unwrap());
|
||||
assert_eq!(NamespaceType::Ipc, ns_root.ns_type);
|
||||
assert_eq!(ns_root.persistent_ns_dir, tmpdir.path().to_str().unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_namespace_type_get() {
|
||||
#[derive(Debug)]
|
||||
struct TestData<'a> {
|
||||
ns_type: NamespaceType,
|
||||
str: &'a str,
|
||||
}
|
||||
|
||||
let tests = &[
|
||||
TestData {
|
||||
ns_type: NamespaceType::Ipc,
|
||||
str: "ipc",
|
||||
},
|
||||
TestData {
|
||||
ns_type: NamespaceType::Uts,
|
||||
str: "uts",
|
||||
},
|
||||
TestData {
|
||||
ns_type: NamespaceType::Pid,
|
||||
str: "pid",
|
||||
},
|
||||
];
|
||||
|
||||
// Run the tests
|
||||
for (i, d) in tests.iter().enumerate() {
|
||||
// Create a string containing details of the test
|
||||
let msg = format!("test[{}]: {:?}", i, d);
|
||||
assert_eq!(d.str, d.ns_type.get(), "{}", msg)
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_namespace_type_get_flags() {
|
||||
#[derive(Debug)]
|
||||
struct TestData {
|
||||
ns_type: NamespaceType,
|
||||
ns_flag: CloneFlags,
|
||||
}
|
||||
|
||||
let tests = &[
|
||||
TestData {
|
||||
ns_type: NamespaceType::Ipc,
|
||||
ns_flag: CloneFlags::CLONE_NEWIPC,
|
||||
},
|
||||
TestData {
|
||||
ns_type: NamespaceType::Uts,
|
||||
ns_flag: CloneFlags::CLONE_NEWUTS,
|
||||
},
|
||||
TestData {
|
||||
ns_type: NamespaceType::Pid,
|
||||
ns_flag: CloneFlags::CLONE_NEWPID,
|
||||
},
|
||||
];
|
||||
|
||||
// Run the tests
|
||||
for (i, d) in tests.iter().enumerate() {
|
||||
// Create a string containing details of the test
|
||||
let msg = format!("test[{}]: {:?}", i, d);
|
||||
assert_eq!(d.ns_flag, d.ns_type.get_flags(), "{}", msg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use futures::{future, StreamExt, TryStreamExt};
|
||||
use ipnetwork::{IpNetwork, Ipv4Network, Ipv6Network};
|
||||
use nix::errno::Errno;
|
||||
use protobuf::RepeatedField;
|
||||
use protocols::types::{ARPNeighbor, IPAddress, IPFamily, Interface, Route};
|
||||
use rtnetlink::{new_connection, packet, IpVersion};
|
||||
@@ -312,7 +313,6 @@ impl Handle {
|
||||
|
||||
for route in list {
|
||||
let link = self.find_link(LinkFilter::Name(&route.device)).await?;
|
||||
let is_v6 = is_ipv6(route.get_gateway()) || is_ipv6(route.get_dest());
|
||||
|
||||
const MAIN_TABLE: u8 = packet::constants::RT_TABLE_MAIN;
|
||||
const UNICAST: u8 = packet::constants::RTN_UNICAST;
|
||||
@@ -334,7 +334,7 @@ impl Handle {
|
||||
|
||||
// `rtnetlink` offers a separate request builders for different IP versions (IP v4 and v6).
|
||||
// This if branch is a bit clumsy because it does almost the same.
|
||||
if is_v6 {
|
||||
if route.get_family() == IPFamily::v6 {
|
||||
let dest_addr = if !route.dest.is_empty() {
|
||||
Ipv6Network::from_str(&route.dest)?
|
||||
} else {
|
||||
@@ -364,14 +364,17 @@ impl Handle {
|
||||
request = request.gateway(ip);
|
||||
}
|
||||
|
||||
request.execute().await.with_context(|| {
|
||||
format!(
|
||||
"Failed to add IP v6 route (src: {}, dst: {}, gtw: {})",
|
||||
route.get_source(),
|
||||
route.get_dest(),
|
||||
route.get_gateway()
|
||||
)
|
||||
})?;
|
||||
if let Err(rtnetlink::Error::NetlinkError(message)) = request.execute().await {
|
||||
if Errno::from_i32(message.code.abs()) != Errno::EEXIST {
|
||||
return Err(anyhow!(
|
||||
"Failed to add IP v6 route (src: {}, dst: {}, gtw: {},Err: {})",
|
||||
route.get_source(),
|
||||
route.get_dest(),
|
||||
route.get_gateway(),
|
||||
message
|
||||
));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
let dest_addr = if !route.dest.is_empty() {
|
||||
Ipv4Network::from_str(&route.dest)?
|
||||
@@ -402,7 +405,17 @@ impl Handle {
|
||||
request = request.gateway(ip);
|
||||
}
|
||||
|
||||
request.execute().await?;
|
||||
if let Err(rtnetlink::Error::NetlinkError(message)) = request.execute().await {
|
||||
if Errno::from_i32(message.code.abs()) != Errno::EEXIST {
|
||||
return Err(anyhow!(
|
||||
"Failed to add IP v4 route (src: {}, dst: {}, gtw: {},Err: {})",
|
||||
route.get_source(),
|
||||
route.get_dest(),
|
||||
route.get_gateway(),
|
||||
message
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -594,10 +607,6 @@ fn format_address(data: &[u8]) -> Result<String> {
|
||||
}
|
||||
}
|
||||
|
||||
fn is_ipv6(str: &str) -> bool {
|
||||
Ipv6Addr::from_str(str).is_ok()
|
||||
}
|
||||
|
||||
fn parse_mac_address(addr: &str) -> Result<[u8; 6]> {
|
||||
let mut split = addr.splitn(6, ':');
|
||||
|
||||
@@ -932,16 +941,6 @@ mod tests {
|
||||
assert_eq!(bytes, [0xAB, 0x0C, 0xDE, 0x12, 0x34, 0x56]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn check_ipv6() {
|
||||
assert!(is_ipv6("::1"));
|
||||
assert!(is_ipv6("2001:0:3238:DFE1:63::FEFB"));
|
||||
|
||||
assert!(!is_ipv6(""));
|
||||
assert!(!is_ipv6("127.0.0.1"));
|
||||
assert!(!is_ipv6("10.10.10.10"));
|
||||
}
|
||||
|
||||
fn clean_env_for_test_add_one_arp_neighbor(dummy_name: &str, ip: &str) {
|
||||
// ip link delete dummy
|
||||
Command::new("ip")
|
||||
|
||||
@@ -9,51 +9,143 @@ use std::str::FromStr;
|
||||
|
||||
use anyhow::anyhow;
|
||||
|
||||
// The PCI spec reserves 5 bits for slot number (a.k.a. device
|
||||
// number), giving slots 0..31
|
||||
// The PCI spec reserves 5 bits (0..31) for slot number (a.k.a. device
|
||||
// number)
|
||||
const SLOT_BITS: u8 = 5;
|
||||
const SLOT_MAX: u8 = (1 << SLOT_BITS) - 1;
|
||||
|
||||
// Represents a PCI function's slot number (a.k.a. device number),
|
||||
// giving its location on a single bus
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
|
||||
pub struct Slot(u8);
|
||||
// The PCI spec reserves 3 bits (0..7) for function number
|
||||
const FUNCTION_BITS: u8 = 3;
|
||||
const FUNCTION_MAX: u8 = (1 << FUNCTION_BITS) - 1;
|
||||
|
||||
impl Slot {
|
||||
pub fn new<T: TryInto<u8> + fmt::Display + Copy>(v: T) -> anyhow::Result<Self> {
|
||||
if let Ok(v8) = v.try_into() {
|
||||
if v8 <= SLOT_MAX {
|
||||
return Ok(Slot(v8));
|
||||
// Represents a PCI function's slot (a.k.a. device) and function
|
||||
// numbers, giving its location on a single logical bus
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
|
||||
pub struct SlotFn(u8);
|
||||
|
||||
impl SlotFn {
|
||||
pub fn new<T, U>(ss: T, f: U) -> anyhow::Result<Self>
|
||||
where
|
||||
T: TryInto<u8> + fmt::Display + Copy,
|
||||
U: TryInto<u8> + fmt::Display + Copy,
|
||||
{
|
||||
let ss8 = match ss.try_into() {
|
||||
Ok(ss8) if ss8 <= SLOT_MAX => ss8,
|
||||
_ => {
|
||||
return Err(anyhow!(
|
||||
"PCI slot {} should be in range [0..{:#x}]",
|
||||
ss,
|
||||
SLOT_MAX
|
||||
));
|
||||
}
|
||||
}
|
||||
Err(anyhow!(
|
||||
"PCI slot {} should be in range [0..{:#x}]",
|
||||
v,
|
||||
SLOT_MAX
|
||||
))
|
||||
};
|
||||
|
||||
let f8 = match f.try_into() {
|
||||
Ok(f8) if f8 <= FUNCTION_MAX => f8,
|
||||
_ => {
|
||||
return Err(anyhow!(
|
||||
"PCI function {} should be in range [0..{:#x}]",
|
||||
f,
|
||||
FUNCTION_MAX
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
Ok(SlotFn(ss8 << FUNCTION_BITS | f8))
|
||||
}
|
||||
|
||||
pub fn slot(self) -> u8 {
|
||||
self.0 >> FUNCTION_BITS
|
||||
}
|
||||
|
||||
pub fn function(self) -> u8 {
|
||||
self.0 & FUNCTION_MAX
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for Slot {
|
||||
impl FromStr for SlotFn {
|
||||
type Err = anyhow::Error;
|
||||
|
||||
fn from_str(s: &str) -> anyhow::Result<Self> {
|
||||
let v = isize::from_str_radix(s, 16)?;
|
||||
Slot::new(v)
|
||||
let mut tokens = s.split('.').fuse();
|
||||
let slot = tokens.next();
|
||||
let func = tokens.next();
|
||||
|
||||
if slot.is_none() || tokens.next().is_some() {
|
||||
return Err(anyhow!(
|
||||
"PCI slot/function {} should have the format SS.F",
|
||||
s
|
||||
));
|
||||
}
|
||||
|
||||
let slot = isize::from_str_radix(slot.unwrap(), 16)?;
|
||||
let func = match func {
|
||||
Some(func) => isize::from_str_radix(func, 16)?,
|
||||
None => 0,
|
||||
};
|
||||
|
||||
SlotFn::new(slot, func)
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for Slot {
|
||||
impl fmt::Display for SlotFn {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
|
||||
write!(f, "{:02x}", self.0)
|
||||
write!(f, "{:02x}.{:01x}", self.slot(), self.function())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
|
||||
pub struct Address {
|
||||
domain: u16,
|
||||
bus: u8,
|
||||
slotfn: SlotFn,
|
||||
}
|
||||
|
||||
impl Address {
|
||||
pub fn new(domain: u16, bus: u8, slotfn: SlotFn) -> Self {
|
||||
Address {
|
||||
domain,
|
||||
bus,
|
||||
slotfn,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for Address {
|
||||
type Err = anyhow::Error;
|
||||
|
||||
fn from_str(s: &str) -> anyhow::Result<Self> {
|
||||
let mut tokens = s.split(':').fuse();
|
||||
let domain = tokens.next();
|
||||
let bus = tokens.next();
|
||||
let slotfn = tokens.next();
|
||||
|
||||
if domain.is_none() || bus.is_none() || slotfn.is_none() || tokens.next().is_some() {
|
||||
return Err(anyhow!(
|
||||
"PCI address {} should have the format DDDD:BB:SS.F",
|
||||
s
|
||||
));
|
||||
}
|
||||
|
||||
let domain = u16::from_str_radix(domain.unwrap(), 16)?;
|
||||
let bus = u8::from_str_radix(bus.unwrap(), 16)?;
|
||||
let slotfn = SlotFn::from_str(slotfn.unwrap())?;
|
||||
|
||||
Ok(Address::new(domain, bus, slotfn))
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for Address {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
|
||||
write!(f, "{:04x}:{:02x}:{}", self.domain, self.bus, self.slotfn)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
pub struct Path(Vec<Slot>);
|
||||
pub struct Path(Vec<SlotFn>);
|
||||
|
||||
impl Path {
|
||||
pub fn new(slots: Vec<Slot>) -> anyhow::Result<Self> {
|
||||
pub fn new(slots: Vec<SlotFn>) -> anyhow::Result<Self> {
|
||||
if slots.is_empty() {
|
||||
return Err(anyhow!("PCI path must have at least one element"));
|
||||
}
|
||||
@@ -63,7 +155,7 @@ impl Path {
|
||||
|
||||
// Let Path be treated as a slice of Slots
|
||||
impl Deref for Path {
|
||||
type Target = [Slot];
|
||||
type Target = [SlotFn];
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.0
|
||||
@@ -85,83 +177,170 @@ impl FromStr for Path {
|
||||
type Err = anyhow::Error;
|
||||
|
||||
fn from_str(s: &str) -> anyhow::Result<Self> {
|
||||
let rslots: anyhow::Result<Vec<Slot>> = s.split('/').map(Slot::from_str).collect();
|
||||
let rslots: anyhow::Result<Vec<SlotFn>> = s.split('/').map(SlotFn::from_str).collect();
|
||||
Path::new(rslots?)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::pci::{Path, Slot};
|
||||
use super::*;
|
||||
use std::str::FromStr;
|
||||
|
||||
#[test]
|
||||
fn test_slot() {
|
||||
fn test_slotfn() {
|
||||
// Valid slots
|
||||
let slot = Slot::new(0x00).unwrap();
|
||||
assert_eq!(format!("{}", slot), "00");
|
||||
let sf = SlotFn::new(0x00, 0x0).unwrap();
|
||||
assert_eq!(format!("{}", sf), "00.0");
|
||||
|
||||
let slot = Slot::from_str("00").unwrap();
|
||||
assert_eq!(format!("{}", slot), "00");
|
||||
let sf = SlotFn::from_str("00.0").unwrap();
|
||||
assert_eq!(format!("{}", sf), "00.0");
|
||||
|
||||
let slot = Slot::new(31).unwrap();
|
||||
let slot2 = Slot::from_str("1f").unwrap();
|
||||
assert_eq!(slot, slot2);
|
||||
let sf = SlotFn::from_str("00").unwrap();
|
||||
assert_eq!(format!("{}", sf), "00.0");
|
||||
|
||||
let sf = SlotFn::new(31, 7).unwrap();
|
||||
let sf2 = SlotFn::from_str("1f.7").unwrap();
|
||||
assert_eq!(sf, sf2);
|
||||
|
||||
// Bad slots
|
||||
let slot = Slot::new(-1);
|
||||
assert!(slot.is_err());
|
||||
let sf = SlotFn::new(-1, 0);
|
||||
assert!(sf.is_err());
|
||||
|
||||
let slot = Slot::new(32);
|
||||
assert!(slot.is_err());
|
||||
let sf = SlotFn::new(32, 0);
|
||||
assert!(sf.is_err());
|
||||
|
||||
let slot = Slot::from_str("20");
|
||||
assert!(slot.is_err());
|
||||
let sf = SlotFn::from_str("20.0");
|
||||
assert!(sf.is_err());
|
||||
|
||||
let slot = Slot::from_str("xy");
|
||||
assert!(slot.is_err());
|
||||
let sf = SlotFn::from_str("20");
|
||||
assert!(sf.is_err());
|
||||
|
||||
let slot = Slot::from_str("00/");
|
||||
assert!(slot.is_err());
|
||||
let sf = SlotFn::from_str("xy.0");
|
||||
assert!(sf.is_err());
|
||||
|
||||
let slot = Slot::from_str("");
|
||||
assert!(slot.is_err());
|
||||
let sf = SlotFn::from_str("xy");
|
||||
assert!(sf.is_err());
|
||||
|
||||
// Bad functions
|
||||
let sf = SlotFn::new(0, -1);
|
||||
assert!(sf.is_err());
|
||||
|
||||
let sf = SlotFn::new(0, 8);
|
||||
assert!(sf.is_err());
|
||||
|
||||
let sf = SlotFn::from_str("00.8");
|
||||
assert!(sf.is_err());
|
||||
|
||||
let sf = SlotFn::from_str("00.x");
|
||||
assert!(sf.is_err());
|
||||
|
||||
// Bad formats
|
||||
let sf = SlotFn::from_str("");
|
||||
assert!(sf.is_err());
|
||||
|
||||
let sf = SlotFn::from_str("00.0.0");
|
||||
assert!(sf.is_err());
|
||||
|
||||
let sf = SlotFn::from_str("00.0/");
|
||||
assert!(sf.is_err());
|
||||
|
||||
let sf = SlotFn::from_str("00/");
|
||||
assert!(sf.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_address() {
|
||||
// Valid addresses
|
||||
let sf0_0 = SlotFn::new(0, 0).unwrap();
|
||||
let sf1f_7 = SlotFn::new(0x1f, 7).unwrap();
|
||||
|
||||
let addr = Address::new(0, 0, sf0_0);
|
||||
assert_eq!(format!("{}", addr), "0000:00:00.0");
|
||||
let addr2 = Address::from_str("0000:00:00.0").unwrap();
|
||||
assert_eq!(addr, addr2);
|
||||
|
||||
let addr = Address::new(0xffff, 0xff, sf1f_7);
|
||||
assert_eq!(format!("{}", addr), "ffff:ff:1f.7");
|
||||
let addr2 = Address::from_str("ffff:ff:1f.7").unwrap();
|
||||
assert_eq!(addr, addr2);
|
||||
|
||||
// Bad addresses
|
||||
let addr = Address::from_str("10000:00:00.0");
|
||||
assert!(addr.is_err());
|
||||
|
||||
let addr = Address::from_str("0000:100:00.0");
|
||||
assert!(addr.is_err());
|
||||
|
||||
let addr = Address::from_str("0000:00:20.0");
|
||||
assert!(addr.is_err());
|
||||
|
||||
let addr = Address::from_str("0000:00:00.8");
|
||||
assert!(addr.is_err());
|
||||
|
||||
let addr = Address::from_str("xyz");
|
||||
assert!(addr.is_err());
|
||||
|
||||
let addr = Address::from_str("xyxy:xy:xy.z");
|
||||
assert!(addr.is_err());
|
||||
|
||||
let addr = Address::from_str("0000:00:00.0:00");
|
||||
assert!(addr.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_path() {
|
||||
let slot3 = Slot::new(0x03).unwrap();
|
||||
let slot4 = Slot::new(0x04).unwrap();
|
||||
let slot5 = Slot::new(0x05).unwrap();
|
||||
let sf3_0 = SlotFn::new(0x03, 0).unwrap();
|
||||
let sf4_0 = SlotFn::new(0x04, 0).unwrap();
|
||||
let sf5_0 = SlotFn::new(0x05, 0).unwrap();
|
||||
let sfa_5 = SlotFn::new(0x0a, 5).unwrap();
|
||||
let sfb_6 = SlotFn::new(0x0b, 6).unwrap();
|
||||
let sfc_7 = SlotFn::new(0x0c, 7).unwrap();
|
||||
|
||||
// Valid paths
|
||||
let pcipath = Path::new(vec![slot3]).unwrap();
|
||||
assert_eq!(format!("{}", pcipath), "03");
|
||||
let pcipath = Path::new(vec![sf3_0]).unwrap();
|
||||
assert_eq!(format!("{}", pcipath), "03.0");
|
||||
let pcipath2 = Path::from_str("03.0").unwrap();
|
||||
assert_eq!(pcipath, pcipath2);
|
||||
let pcipath2 = Path::from_str("03").unwrap();
|
||||
assert_eq!(pcipath, pcipath2);
|
||||
assert_eq!(pcipath.len(), 1);
|
||||
assert_eq!(pcipath[0], slot3);
|
||||
assert_eq!(pcipath[0], sf3_0);
|
||||
|
||||
let pcipath = Path::new(vec![slot3, slot4]).unwrap();
|
||||
assert_eq!(format!("{}", pcipath), "03/04");
|
||||
let pcipath = Path::new(vec![sf3_0, sf4_0]).unwrap();
|
||||
assert_eq!(format!("{}", pcipath), "03.0/04.0");
|
||||
let pcipath2 = Path::from_str("03.0/04.0").unwrap();
|
||||
assert_eq!(pcipath, pcipath2);
|
||||
let pcipath2 = Path::from_str("03/04").unwrap();
|
||||
assert_eq!(pcipath, pcipath2);
|
||||
assert_eq!(pcipath.len(), 2);
|
||||
assert_eq!(pcipath[0], slot3);
|
||||
assert_eq!(pcipath[1], slot4);
|
||||
assert_eq!(pcipath[0], sf3_0);
|
||||
assert_eq!(pcipath[1], sf4_0);
|
||||
|
||||
let pcipath = Path::new(vec![slot3, slot4, slot5]).unwrap();
|
||||
assert_eq!(format!("{}", pcipath), "03/04/05");
|
||||
let pcipath = Path::new(vec![sf3_0, sf4_0, sf5_0]).unwrap();
|
||||
assert_eq!(format!("{}", pcipath), "03.0/04.0/05.0");
|
||||
let pcipath2 = Path::from_str("03.0/04.0/05.0").unwrap();
|
||||
assert_eq!(pcipath, pcipath2);
|
||||
let pcipath2 = Path::from_str("03/04/05").unwrap();
|
||||
assert_eq!(pcipath, pcipath2);
|
||||
assert_eq!(pcipath.len(), 3);
|
||||
assert_eq!(pcipath[0], slot3);
|
||||
assert_eq!(pcipath[1], slot4);
|
||||
assert_eq!(pcipath[2], slot5);
|
||||
assert_eq!(pcipath[0], sf3_0);
|
||||
assert_eq!(pcipath[1], sf4_0);
|
||||
assert_eq!(pcipath[2], sf5_0);
|
||||
|
||||
let pcipath = Path::new(vec![sfa_5, sfb_6, sfc_7]).unwrap();
|
||||
assert_eq!(format!("{}", pcipath), "0a.5/0b.6/0c.7");
|
||||
let pcipath2 = Path::from_str("0a.5/0b.6/0c.7").unwrap();
|
||||
assert_eq!(pcipath, pcipath2);
|
||||
assert_eq!(pcipath.len(), 3);
|
||||
assert_eq!(pcipath[0], sfa_5);
|
||||
assert_eq!(pcipath[1], sfb_6);
|
||||
assert_eq!(pcipath[2], sfc_7);
|
||||
|
||||
// Bad paths
|
||||
assert!(Path::new(vec!()).is_err());
|
||||
assert!(Path::from_str("20").is_err());
|
||||
assert!(Path::from_str("00.8").is_err());
|
||||
assert!(Path::from_str("//").is_err());
|
||||
assert!(Path::from_str("xyz").is_err());
|
||||
}
|
||||
|
||||
@@ -3,7 +3,6 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use crate::pci;
|
||||
use async_trait::async_trait;
|
||||
use rustjail::{pipestream::PipeStream, process::StreamType};
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt, ReadHalf};
|
||||
@@ -21,7 +20,7 @@ use ttrpc::{
|
||||
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use oci::{LinuxNamespace, Root, Spec};
|
||||
use protobuf::{RepeatedField, SingularPtrField};
|
||||
use protobuf::{Message, RepeatedField, SingularPtrField};
|
||||
use protocols::agent::{
|
||||
AddSwapRequest, AgentDetails, CopyFileRequest, GuestDetailsResponse, Interfaces, Metrics,
|
||||
OOMEvent, ReadStreamResponse, Routes, StatsContainerResponse, WaitProcessResponse,
|
||||
@@ -44,12 +43,13 @@ use nix::sys::stat;
|
||||
use nix::unistd::{self, Pid};
|
||||
use rustjail::process::ProcessOperations;
|
||||
|
||||
use crate::device::{add_devices, pcipath_to_sysfs, rescan_pci_bus, update_device_cgroup};
|
||||
use crate::device::{add_devices, get_virtio_blk_pci_device_name, update_device_cgroup};
|
||||
use crate::linux_abi::*;
|
||||
use crate::metrics::get_metrics;
|
||||
use crate::mount::{add_storages, remove_mounts, BareMount, STORAGE_HANDLER_LIST};
|
||||
use crate::mount::{add_storages, baremount, remove_mounts, STORAGE_HANDLER_LIST};
|
||||
use crate::namespace::{NSTYPEIPC, NSTYPEPID, NSTYPEUTS};
|
||||
use crate::network::setup_guest_dns;
|
||||
use crate::pci;
|
||||
use crate::random;
|
||||
use crate::sandbox::Sandbox;
|
||||
use crate::version::{AGENT_VERSION, API_VERSION};
|
||||
@@ -86,6 +86,21 @@ macro_rules! sl {
|
||||
};
|
||||
}
|
||||
|
||||
macro_rules! is_allowed {
|
||||
($req:ident) => {
|
||||
if !AGENT_CONFIG
|
||||
.read()
|
||||
.await
|
||||
.is_allowed_endpoint($req.descriptor().name())
|
||||
{
|
||||
return Err(ttrpc_error(
|
||||
ttrpc::Code::UNIMPLEMENTED,
|
||||
format!("{} is blocked", $req.descriptor().name()),
|
||||
));
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct AgentService {
|
||||
sandbox: Arc<Mutex<Sandbox>>,
|
||||
@@ -96,11 +111,18 @@ pub struct AgentService {
|
||||
// ^[a-zA-Z0-9][a-zA-Z0-9_.-]+$
|
||||
//
|
||||
fn verify_cid(id: &str) -> Result<()> {
|
||||
let valid = id.len() > 1
|
||||
&& id.chars().next().unwrap().is_alphanumeric()
|
||||
&& id
|
||||
.chars()
|
||||
.all(|c| (c.is_alphanumeric() || ['.', '-', '_'].contains(&c)));
|
||||
let mut chars = id.chars();
|
||||
|
||||
let valid = match chars.next() {
|
||||
Some(first)
|
||||
if first.is_alphanumeric()
|
||||
&& id.len() > 1
|
||||
&& chars.all(|c| c.is_alphanumeric() || ['.', '-', '_'].contains(&c)) =>
|
||||
{
|
||||
true
|
||||
}
|
||||
_ => false,
|
||||
};
|
||||
|
||||
match valid {
|
||||
true => Ok(()),
|
||||
@@ -134,10 +156,6 @@ impl AgentService {
|
||||
|
||||
info!(sl!(), "receive createcontainer, spec: {:?}", &oci);
|
||||
|
||||
// re-scan PCI bus
|
||||
// looking for hidden devices
|
||||
rescan_pci_bus().context("Could not rescan PCI bus")?;
|
||||
|
||||
// Some devices need some extra processing (the ones invoked with
|
||||
// --device for instance), and that's what this call is doing. It
|
||||
// updates the devices listed in the OCI spec, so that they actually
|
||||
@@ -165,7 +183,7 @@ impl AgentService {
|
||||
update_device_cgroup(&mut oci)?;
|
||||
|
||||
// Append guest hooks
|
||||
append_guest_hooks(&s, &mut oci);
|
||||
append_guest_hooks(&s, &mut oci)?;
|
||||
|
||||
// write spec to bundle path, hooks might
|
||||
// read ocispec
|
||||
@@ -187,21 +205,14 @@ impl AgentService {
|
||||
LinuxContainer::new(cid.as_str(), CONTAINER_BASE, opts, &sl!())?;
|
||||
|
||||
let pipe_size = AGENT_CONFIG.read().await.container_pipe_size;
|
||||
let p = if oci.process.is_some() {
|
||||
Process::new(
|
||||
&sl!(),
|
||||
oci.process.as_ref().unwrap(),
|
||||
cid.as_str(),
|
||||
true,
|
||||
pipe_size,
|
||||
)?
|
||||
|
||||
let p = if let Some(p) = oci.process {
|
||||
Process::new(&sl!(), &p, cid.as_str(), true, pipe_size)?
|
||||
} else {
|
||||
info!(sl!(), "no process configurations!");
|
||||
return Err(anyhow!(nix::Error::from_errno(nix::errno::Errno::EINVAL)));
|
||||
};
|
||||
|
||||
ctr.start(p).await?;
|
||||
|
||||
s.update_shared_pidns(&ctr)?;
|
||||
s.add_container(ctr);
|
||||
info!(sl!(), "created container!");
|
||||
@@ -223,11 +234,17 @@ impl AgentService {
|
||||
|
||||
ctr.exec()?;
|
||||
|
||||
if sid == cid {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// start oom event loop
|
||||
if sid != cid && ctr.cgroup_manager.is_some() {
|
||||
let cg_path = ctr.cgroup_manager.as_ref().unwrap().get_cg_path("memory");
|
||||
if cg_path.is_some() {
|
||||
let rx = notifier::notify_oom(cid.as_str(), cg_path.unwrap()).await?;
|
||||
if let Some(ref ctr) = ctr.cgroup_manager {
|
||||
let cg_path = ctr.get_cg_path("memory");
|
||||
|
||||
if let Some(cg_path) = cg_path {
|
||||
let rx = notifier::notify_oom(cid.as_str(), cg_path.to_string()).await?;
|
||||
|
||||
s.run_oom_event_monitor(rx, cid.clone()).await;
|
||||
}
|
||||
}
|
||||
@@ -327,14 +344,13 @@ impl AgentService {
|
||||
let s = self.sandbox.clone();
|
||||
let mut sandbox = s.lock().await;
|
||||
|
||||
let process = if req.process.is_some() {
|
||||
req.process.as_ref().unwrap()
|
||||
} else {
|
||||
return Err(anyhow!(nix::Error::from_errno(nix::errno::Errno::EINVAL)));
|
||||
};
|
||||
let process = req
|
||||
.process
|
||||
.into_option()
|
||||
.ok_or_else(|| anyhow!(nix::Error::from_errno(nix::errno::Errno::EINVAL)))?;
|
||||
|
||||
let pipe_size = AGENT_CONFIG.read().await.container_pipe_size;
|
||||
let ocip = rustjail::process_grpc_to_oci(process);
|
||||
let ocip = rustjail::process_grpc_to_oci(&process);
|
||||
let p = Process::new(&sl!(), &ocip, exec_id.as_str(), false, pipe_size)?;
|
||||
|
||||
let ctr = sandbox
|
||||
@@ -352,7 +368,6 @@ impl AgentService {
|
||||
let eid = req.exec_id.clone();
|
||||
let s = self.sandbox.clone();
|
||||
let mut sandbox = s.lock().await;
|
||||
let mut init = false;
|
||||
|
||||
info!(
|
||||
sl!(),
|
||||
@@ -361,13 +376,14 @@ impl AgentService {
|
||||
"exec-id" => eid.clone(),
|
||||
);
|
||||
|
||||
if eid.is_empty() {
|
||||
init = true;
|
||||
}
|
||||
let p = sandbox.find_container_process(cid.as_str(), eid.as_str())?;
|
||||
|
||||
let p = find_process(&mut sandbox, cid.as_str(), eid.as_str(), init)?;
|
||||
|
||||
let mut signal = Signal::try_from(req.signal as i32).unwrap();
|
||||
let mut signal = Signal::try_from(req.signal as i32).map_err(|e| {
|
||||
anyhow!(e).context(format!(
|
||||
"failed to convert {:?} to signal (container-id: {}, exec-id: {})",
|
||||
req.signal, cid, eid
|
||||
))
|
||||
})?;
|
||||
|
||||
// For container initProcess, if it hasn't installed handler for "SIGTERM" signal,
|
||||
// it will ignore the "SIGTERM" signal sent to it, thus send it "SIGKILL" signal
|
||||
@@ -403,7 +419,7 @@ impl AgentService {
|
||||
|
||||
let exit_rx = {
|
||||
let mut sandbox = s.lock().await;
|
||||
let p = find_process(&mut sandbox, cid.as_str(), eid.as_str(), false)?;
|
||||
let p = sandbox.find_container_process(cid.as_str(), eid.as_str())?;
|
||||
|
||||
p.exit_watchers.push(exit_send);
|
||||
pid = p.pid;
|
||||
@@ -422,18 +438,22 @@ impl AgentService {
|
||||
.get_container(&cid)
|
||||
.ok_or_else(|| anyhow!("Invalid container id"))?;
|
||||
|
||||
let mut p = match ctr.processes.get_mut(&pid) {
|
||||
let p = match ctr.processes.get_mut(&pid) {
|
||||
Some(p) => p,
|
||||
None => {
|
||||
// Lost race, pick up exit code from channel
|
||||
resp.status = exit_recv.recv().await.unwrap();
|
||||
resp.status = exit_recv
|
||||
.recv()
|
||||
.await
|
||||
.ok_or_else(|| anyhow!("Failed to receive exit code"))?;
|
||||
|
||||
return Ok(resp);
|
||||
}
|
||||
};
|
||||
|
||||
// need to close all fd
|
||||
// ignore errors for some fd might be closed by stream
|
||||
let _ = cleanup_process(&mut p);
|
||||
p.cleanup_process_stream();
|
||||
|
||||
resp.status = p.exit_code;
|
||||
// broadcast exit code to all parallel watchers
|
||||
@@ -457,7 +477,7 @@ impl AgentService {
|
||||
let writer = {
|
||||
let s = self.sandbox.clone();
|
||||
let mut sandbox = s.lock().await;
|
||||
let p = find_process(&mut sandbox, cid.as_str(), eid.as_str(), false)?;
|
||||
let p = sandbox.find_container_process(cid.as_str(), eid.as_str())?;
|
||||
|
||||
// use ptmx io
|
||||
if p.term_master.is_some() {
|
||||
@@ -468,7 +488,7 @@ impl AgentService {
|
||||
}
|
||||
};
|
||||
|
||||
let writer = writer.unwrap();
|
||||
let writer = writer.ok_or_else(|| anyhow!("cannot get writer"))?;
|
||||
writer.lock().await.write_all(req.data.as_slice()).await?;
|
||||
|
||||
let mut resp = WriteStreamResponse::new();
|
||||
@@ -490,7 +510,7 @@ impl AgentService {
|
||||
let s = self.sandbox.clone();
|
||||
let mut sandbox = s.lock().await;
|
||||
|
||||
let p = find_process(&mut sandbox, cid.as_str(), eid.as_str(), false)?;
|
||||
let p = sandbox.find_container_process(cid.as_str(), eid.as_str())?;
|
||||
|
||||
if p.term_master.is_some() {
|
||||
term_exit_notifier = p.term_exit_notifier.clone();
|
||||
@@ -510,7 +530,7 @@ impl AgentService {
|
||||
return Err(anyhow!(nix::Error::from_errno(nix::errno::Errno::EINVAL)));
|
||||
}
|
||||
|
||||
let reader = reader.unwrap();
|
||||
let reader = reader.ok_or_else(|| anyhow!("cannot get stream reader"))?;
|
||||
|
||||
tokio::select! {
|
||||
_ = term_exit_notifier.notified() => {
|
||||
@@ -535,6 +555,7 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
req: protocols::agent::CreateContainerRequest,
|
||||
) -> ttrpc::Result<Empty> {
|
||||
trace_rpc_call!(ctx, "create_container", req);
|
||||
is_allowed!(req);
|
||||
match self.do_create_container(req).await {
|
||||
Err(e) => Err(ttrpc_error(ttrpc::Code::INTERNAL, e.to_string())),
|
||||
Ok(_) => Ok(Empty::new()),
|
||||
@@ -547,6 +568,7 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
req: protocols::agent::StartContainerRequest,
|
||||
) -> ttrpc::Result<Empty> {
|
||||
trace_rpc_call!(ctx, "start_container", req);
|
||||
is_allowed!(req);
|
||||
match self.do_start_container(req).await {
|
||||
Err(e) => Err(ttrpc_error(ttrpc::Code::INTERNAL, e.to_string())),
|
||||
Ok(_) => Ok(Empty::new()),
|
||||
@@ -559,6 +581,7 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
req: protocols::agent::RemoveContainerRequest,
|
||||
) -> ttrpc::Result<Empty> {
|
||||
trace_rpc_call!(ctx, "remove_container", req);
|
||||
is_allowed!(req);
|
||||
match self.do_remove_container(req).await {
|
||||
Err(e) => Err(ttrpc_error(ttrpc::Code::INTERNAL, e.to_string())),
|
||||
Ok(_) => Ok(Empty::new()),
|
||||
@@ -571,6 +594,7 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
req: protocols::agent::ExecProcessRequest,
|
||||
) -> ttrpc::Result<Empty> {
|
||||
trace_rpc_call!(ctx, "exec_process", req);
|
||||
is_allowed!(req);
|
||||
match self.do_exec_process(req).await {
|
||||
Err(e) => Err(ttrpc_error(ttrpc::Code::INTERNAL, e.to_string())),
|
||||
Ok(_) => Ok(Empty::new()),
|
||||
@@ -583,6 +607,7 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
req: protocols::agent::SignalProcessRequest,
|
||||
) -> ttrpc::Result<Empty> {
|
||||
trace_rpc_call!(ctx, "signal_process", req);
|
||||
is_allowed!(req);
|
||||
match self.do_signal_process(req).await {
|
||||
Err(e) => Err(ttrpc_error(ttrpc::Code::INTERNAL, e.to_string())),
|
||||
Ok(_) => Ok(Empty::new()),
|
||||
@@ -595,6 +620,7 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
req: protocols::agent::WaitProcessRequest,
|
||||
) -> ttrpc::Result<WaitProcessResponse> {
|
||||
trace_rpc_call!(ctx, "wait_process", req);
|
||||
is_allowed!(req);
|
||||
self.do_wait_process(req)
|
||||
.await
|
||||
.map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()))
|
||||
@@ -606,6 +632,7 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
req: protocols::agent::UpdateContainerRequest,
|
||||
) -> ttrpc::Result<Empty> {
|
||||
trace_rpc_call!(ctx, "update_container", req);
|
||||
is_allowed!(req);
|
||||
let cid = req.container_id.clone();
|
||||
let res = req.resources;
|
||||
|
||||
@@ -621,8 +648,8 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
|
||||
let resp = Empty::new();
|
||||
|
||||
if res.is_some() {
|
||||
let oci_res = rustjail::resources_grpc_to_oci(&res.unwrap());
|
||||
if let Some(res) = res.as_ref() {
|
||||
let oci_res = rustjail::resources_grpc_to_oci(res);
|
||||
match ctr.set(oci_res) {
|
||||
Err(e) => {
|
||||
return Err(ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()));
|
||||
@@ -641,6 +668,7 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
req: protocols::agent::StatsContainerRequest,
|
||||
) -> ttrpc::Result<StatsContainerResponse> {
|
||||
trace_rpc_call!(ctx, "stats_container", req);
|
||||
is_allowed!(req);
|
||||
let cid = req.container_id;
|
||||
let s = Arc::clone(&self.sandbox);
|
||||
let mut sandbox = s.lock().await;
|
||||
@@ -662,6 +690,7 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
req: protocols::agent::PauseContainerRequest,
|
||||
) -> ttrpc::Result<protocols::empty::Empty> {
|
||||
trace_rpc_call!(ctx, "pause_container", req);
|
||||
is_allowed!(req);
|
||||
let cid = req.get_container_id();
|
||||
let s = Arc::clone(&self.sandbox);
|
||||
let mut sandbox = s.lock().await;
|
||||
@@ -685,6 +714,7 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
req: protocols::agent::ResumeContainerRequest,
|
||||
) -> ttrpc::Result<protocols::empty::Empty> {
|
||||
trace_rpc_call!(ctx, "resume_container", req);
|
||||
is_allowed!(req);
|
||||
let cid = req.get_container_id();
|
||||
let s = Arc::clone(&self.sandbox);
|
||||
let mut sandbox = s.lock().await;
|
||||
@@ -707,6 +737,7 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
_ctx: &TtrpcContext,
|
||||
req: protocols::agent::WriteStreamRequest,
|
||||
) -> ttrpc::Result<WriteStreamResponse> {
|
||||
is_allowed!(req);
|
||||
self.do_write_stream(req)
|
||||
.await
|
||||
.map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()))
|
||||
@@ -717,6 +748,7 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
_ctx: &TtrpcContext,
|
||||
req: protocols::agent::ReadStreamRequest,
|
||||
) -> ttrpc::Result<ReadStreamResponse> {
|
||||
is_allowed!(req);
|
||||
self.do_read_stream(req, true)
|
||||
.await
|
||||
.map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()))
|
||||
@@ -727,6 +759,7 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
_ctx: &TtrpcContext,
|
||||
req: protocols::agent::ReadStreamRequest,
|
||||
) -> ttrpc::Result<ReadStreamResponse> {
|
||||
is_allowed!(req);
|
||||
self.do_read_stream(req, false)
|
||||
.await
|
||||
.map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()))
|
||||
@@ -738,32 +771,23 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
req: protocols::agent::CloseStdinRequest,
|
||||
) -> ttrpc::Result<Empty> {
|
||||
trace_rpc_call!(ctx, "close_stdin", req);
|
||||
is_allowed!(req);
|
||||
|
||||
let cid = req.container_id.clone();
|
||||
let eid = req.exec_id;
|
||||
let s = Arc::clone(&self.sandbox);
|
||||
let mut sandbox = s.lock().await;
|
||||
|
||||
let p = find_process(&mut sandbox, cid.as_str(), eid.as_str(), false).map_err(|e| {
|
||||
ttrpc_error(
|
||||
ttrpc::Code::INVALID_ARGUMENT,
|
||||
format!("invalid argument: {:?}", e),
|
||||
)
|
||||
})?;
|
||||
let p = sandbox
|
||||
.find_container_process(cid.as_str(), eid.as_str())
|
||||
.map_err(|e| {
|
||||
ttrpc_error(
|
||||
ttrpc::Code::INVALID_ARGUMENT,
|
||||
format!("invalid argument: {:?}", e),
|
||||
)
|
||||
})?;
|
||||
|
||||
if p.term_master.is_some() {
|
||||
p.close_stream(StreamType::TermMaster);
|
||||
let _ = unistd::close(p.term_master.unwrap());
|
||||
p.term_master = None;
|
||||
}
|
||||
|
||||
if p.parent_stdin.is_some() {
|
||||
p.close_stream(StreamType::ParentStdin);
|
||||
let _ = unistd::close(p.parent_stdin.unwrap());
|
||||
p.parent_stdin = None;
|
||||
}
|
||||
|
||||
p.notify_term_close();
|
||||
p.close_stdin();
|
||||
|
||||
Ok(Empty::new())
|
||||
}
|
||||
@@ -774,37 +798,39 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
req: protocols::agent::TtyWinResizeRequest,
|
||||
) -> ttrpc::Result<Empty> {
|
||||
trace_rpc_call!(ctx, "tty_win_resize", req);
|
||||
is_allowed!(req);
|
||||
|
||||
let cid = req.container_id.clone();
|
||||
let eid = req.exec_id.clone();
|
||||
let s = Arc::clone(&self.sandbox);
|
||||
let mut sandbox = s.lock().await;
|
||||
let p = find_process(&mut sandbox, cid.as_str(), eid.as_str(), false).map_err(|e| {
|
||||
ttrpc_error(
|
||||
ttrpc::Code::UNAVAILABLE,
|
||||
format!("invalid argument: {:?}", e),
|
||||
)
|
||||
})?;
|
||||
let p = sandbox
|
||||
.find_container_process(cid.as_str(), eid.as_str())
|
||||
.map_err(|e| {
|
||||
ttrpc_error(
|
||||
ttrpc::Code::UNAVAILABLE,
|
||||
format!("invalid argument: {:?}", e),
|
||||
)
|
||||
})?;
|
||||
|
||||
if p.term_master.is_none() {
|
||||
if let Some(fd) = p.term_master {
|
||||
unsafe {
|
||||
let win = winsize {
|
||||
ws_row: req.row as c_ushort,
|
||||
ws_col: req.column as c_ushort,
|
||||
ws_xpixel: 0,
|
||||
ws_ypixel: 0,
|
||||
};
|
||||
|
||||
let err = libc::ioctl(fd, TIOCSWINSZ, &win);
|
||||
Errno::result(err).map(drop).map_err(|e| {
|
||||
ttrpc_error(ttrpc::Code::INTERNAL, format!("ioctl error: {:?}", e))
|
||||
})?;
|
||||
}
|
||||
} else {
|
||||
return Err(ttrpc_error(ttrpc::Code::UNAVAILABLE, "no tty".to_string()));
|
||||
}
|
||||
|
||||
let fd = p.term_master.unwrap();
|
||||
unsafe {
|
||||
let win = winsize {
|
||||
ws_row: req.row as c_ushort,
|
||||
ws_col: req.column as c_ushort,
|
||||
ws_xpixel: 0,
|
||||
ws_ypixel: 0,
|
||||
};
|
||||
|
||||
let err = libc::ioctl(fd, TIOCSWINSZ, &win);
|
||||
Errno::result(err)
|
||||
.map(drop)
|
||||
.map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, format!("ioctl error: {:?}", e)))?;
|
||||
}
|
||||
|
||||
Ok(Empty::new())
|
||||
}
|
||||
|
||||
@@ -814,6 +840,7 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
req: protocols::agent::UpdateInterfaceRequest,
|
||||
) -> ttrpc::Result<Interface> {
|
||||
trace_rpc_call!(ctx, "update_interface", req);
|
||||
is_allowed!(req);
|
||||
|
||||
let interface = req.interface.into_option().ok_or_else(|| {
|
||||
ttrpc_error(
|
||||
@@ -841,6 +868,7 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
req: protocols::agent::UpdateRoutesRequest,
|
||||
) -> ttrpc::Result<Routes> {
|
||||
trace_rpc_call!(ctx, "update_routes", req);
|
||||
is_allowed!(req);
|
||||
|
||||
let new_routes = req
|
||||
.routes
|
||||
@@ -881,6 +909,7 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
req: protocols::agent::ListInterfacesRequest,
|
||||
) -> ttrpc::Result<Interfaces> {
|
||||
trace_rpc_call!(ctx, "list_interfaces", req);
|
||||
is_allowed!(req);
|
||||
|
||||
let list = self
|
||||
.sandbox
|
||||
@@ -908,6 +937,7 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
req: protocols::agent::ListRoutesRequest,
|
||||
) -> ttrpc::Result<Routes> {
|
||||
trace_rpc_call!(ctx, "list_routes", req);
|
||||
is_allowed!(req);
|
||||
|
||||
let list = self
|
||||
.sandbox
|
||||
@@ -924,29 +954,13 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
})
|
||||
}
|
||||
|
||||
async fn start_tracing(
|
||||
&self,
|
||||
_ctx: &TtrpcContext,
|
||||
req: protocols::agent::StartTracingRequest,
|
||||
) -> ttrpc::Result<Empty> {
|
||||
info!(sl!(), "start_tracing {:?}", req);
|
||||
Ok(Empty::new())
|
||||
}
|
||||
|
||||
async fn stop_tracing(
|
||||
&self,
|
||||
_ctx: &TtrpcContext,
|
||||
_req: protocols::agent::StopTracingRequest,
|
||||
) -> ttrpc::Result<Empty> {
|
||||
Ok(Empty::new())
|
||||
}
|
||||
|
||||
async fn create_sandbox(
|
||||
&self,
|
||||
ctx: &TtrpcContext,
|
||||
req: protocols::agent::CreateSandboxRequest,
|
||||
) -> ttrpc::Result<Empty> {
|
||||
trace_rpc_call!(ctx, "create_sandbox", req);
|
||||
is_allowed!(req);
|
||||
|
||||
{
|
||||
let sandbox = self.sandbox.clone();
|
||||
@@ -1012,17 +1026,31 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
req: protocols::agent::DestroySandboxRequest,
|
||||
) -> ttrpc::Result<Empty> {
|
||||
trace_rpc_call!(ctx, "destroy_sandbox", req);
|
||||
is_allowed!(req);
|
||||
|
||||
let s = Arc::clone(&self.sandbox);
|
||||
let mut sandbox = s.lock().await;
|
||||
// destroy all containers, clean up, notify agent to exit
|
||||
// etc.
|
||||
sandbox.destroy().await.unwrap();
|
||||
sandbox
|
||||
.destroy()
|
||||
.await
|
||||
.map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()))?;
|
||||
// Close get_oom_event connection,
|
||||
// otherwise it will block the shutdown of ttrpc.
|
||||
sandbox.event_tx.take();
|
||||
|
||||
sandbox.sender.take().unwrap().send(1).unwrap();
|
||||
sandbox
|
||||
.sender
|
||||
.take()
|
||||
.ok_or_else(|| {
|
||||
ttrpc_error(
|
||||
ttrpc::Code::INTERNAL,
|
||||
"failed to get sandbox sender channel".to_string(),
|
||||
)
|
||||
})?
|
||||
.send(1)
|
||||
.map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()))?;
|
||||
|
||||
Ok(Empty::new())
|
||||
}
|
||||
@@ -1033,6 +1061,7 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
req: protocols::agent::AddARPNeighborsRequest,
|
||||
) -> ttrpc::Result<Empty> {
|
||||
trace_rpc_call!(ctx, "add_arp_neighbors", req);
|
||||
is_allowed!(req);
|
||||
|
||||
let neighs = req
|
||||
.neighbors
|
||||
@@ -1066,6 +1095,7 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
ctx: &TtrpcContext,
|
||||
req: protocols::agent::OnlineCPUMemRequest,
|
||||
) -> ttrpc::Result<Empty> {
|
||||
is_allowed!(req);
|
||||
let s = Arc::clone(&self.sandbox);
|
||||
let sandbox = s.lock().await;
|
||||
trace_rpc_call!(ctx, "online_cpu_mem", req);
|
||||
@@ -1083,6 +1113,7 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
req: protocols::agent::ReseedRandomDevRequest,
|
||||
) -> ttrpc::Result<Empty> {
|
||||
trace_rpc_call!(ctx, "reseed_random_dev", req);
|
||||
is_allowed!(req);
|
||||
|
||||
random::reseed_rng(req.data.as_slice())
|
||||
.map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()))?;
|
||||
@@ -1096,6 +1127,7 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
req: protocols::agent::GuestDetailsRequest,
|
||||
) -> ttrpc::Result<GuestDetailsResponse> {
|
||||
trace_rpc_call!(ctx, "get_guest_details", req);
|
||||
is_allowed!(req);
|
||||
|
||||
info!(sl!(), "get guest details!");
|
||||
let mut resp = GuestDetailsResponse::new();
|
||||
@@ -1124,6 +1156,7 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
req: protocols::agent::MemHotplugByProbeRequest,
|
||||
) -> ttrpc::Result<Empty> {
|
||||
trace_rpc_call!(ctx, "mem_hotplug_by_probe", req);
|
||||
is_allowed!(req);
|
||||
|
||||
do_mem_hotplug_by_probe(&req.memHotplugProbeAddr)
|
||||
.map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()))?;
|
||||
@@ -1137,6 +1170,7 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
req: protocols::agent::SetGuestDateTimeRequest,
|
||||
) -> ttrpc::Result<Empty> {
|
||||
trace_rpc_call!(ctx, "set_guest_date_time", req);
|
||||
is_allowed!(req);
|
||||
|
||||
do_set_guest_date_time(req.Sec, req.Usec)
|
||||
.map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()))?;
|
||||
@@ -1150,6 +1184,7 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
req: protocols::agent::CopyFileRequest,
|
||||
) -> ttrpc::Result<Empty> {
|
||||
trace_rpc_call!(ctx, "copy_file", req);
|
||||
is_allowed!(req);
|
||||
|
||||
do_copy_file(&req).map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()))?;
|
||||
|
||||
@@ -1162,6 +1197,7 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
req: protocols::agent::GetMetricsRequest,
|
||||
) -> ttrpc::Result<Metrics> {
|
||||
trace_rpc_call!(ctx, "get_metrics", req);
|
||||
is_allowed!(req);
|
||||
|
||||
match get_metrics(&req) {
|
||||
Err(e) => Err(ttrpc_error(ttrpc::Code::INTERNAL, e.to_string())),
|
||||
@@ -1176,8 +1212,9 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
async fn get_oom_event(
|
||||
&self,
|
||||
_ctx: &TtrpcContext,
|
||||
_req: protocols::agent::GetOOMEventRequest,
|
||||
req: protocols::agent::GetOOMEventRequest,
|
||||
) -> ttrpc::Result<OOMEvent> {
|
||||
is_allowed!(req);
|
||||
let sandbox = self.sandbox.clone();
|
||||
let s = sandbox.lock().await;
|
||||
let event_rx = &s.event_rx.clone();
|
||||
@@ -1203,8 +1240,11 @@ impl protocols::agent_ttrpc::AgentService for AgentService {
|
||||
req: protocols::agent::AddSwapRequest,
|
||||
) -> ttrpc::Result<Empty> {
|
||||
trace_rpc_call!(ctx, "add_swap", req);
|
||||
is_allowed!(req);
|
||||
|
||||
do_add_swap(&req).map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()))?;
|
||||
do_add_swap(&self.sandbox, &req)
|
||||
.await
|
||||
.map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()))?;
|
||||
|
||||
Ok(Empty::new())
|
||||
}
|
||||
@@ -1269,11 +1309,7 @@ fn get_memory_info(block_size: bool, hotplug: bool) -> Result<(u64, bool)> {
|
||||
match stat::stat(SYSFS_MEMORY_HOTPLUG_PROBE_PATH) {
|
||||
Ok(_) => plug = true,
|
||||
Err(e) => {
|
||||
info!(
|
||||
sl!(),
|
||||
"hotplug memory error: {}",
|
||||
e.as_errno().unwrap().desc()
|
||||
);
|
||||
info!(sl!(), "hotplug memory error: {:?}", e);
|
||||
match e {
|
||||
nix::Error::Sys(errno) => match errno {
|
||||
Errno::ENOENT => plug = false,
|
||||
@@ -1288,11 +1324,19 @@ fn get_memory_info(block_size: bool, hotplug: bool) -> Result<(u64, bool)> {
|
||||
Ok((size, plug))
|
||||
}
|
||||
|
||||
pub fn have_seccomp() -> bool {
|
||||
if cfg!(feature = "seccomp") {
|
||||
return true;
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
|
||||
fn get_agent_details() -> AgentDetails {
|
||||
let mut detail = AgentDetails::new();
|
||||
|
||||
detail.set_version(AGENT_VERSION.to_string());
|
||||
detail.set_supports_seccomp(false);
|
||||
detail.set_supports_seccomp(have_seccomp());
|
||||
detail.init_daemon = unistd::getpid() == Pid::from_raw(1);
|
||||
|
||||
detail.device_handlers = RepeatedField::new();
|
||||
@@ -1321,27 +1365,7 @@ async fn read_stream(reader: Arc<Mutex<ReadHalf<PipeStream>>>, l: usize) -> Resu
|
||||
Ok(content)
|
||||
}
|
||||
|
||||
fn find_process<'a>(
|
||||
sandbox: &'a mut Sandbox,
|
||||
cid: &'a str,
|
||||
eid: &'a str,
|
||||
init: bool,
|
||||
) -> Result<&'a mut Process> {
|
||||
let ctr = sandbox
|
||||
.get_container(cid)
|
||||
.ok_or_else(|| anyhow!("Invalid container id"))?;
|
||||
|
||||
if init || eid.is_empty() {
|
||||
return ctr
|
||||
.processes
|
||||
.get_mut(&ctr.init_process_pid)
|
||||
.ok_or_else(|| anyhow!("cannot find init process!"));
|
||||
}
|
||||
|
||||
ctr.get_process(eid).map_err(|_| anyhow!("Invalid exec id"))
|
||||
}
|
||||
|
||||
pub fn start(s: Arc<Mutex<Sandbox>>, server_address: &str) -> TtrpcServer {
|
||||
pub fn start(s: Arc<Mutex<Sandbox>>, server_address: &str) -> Result<TtrpcServer> {
|
||||
let agent_service = Box::new(AgentService { sandbox: s })
|
||||
as Box<dyn protocols::agent_ttrpc::AgentService + Send + Sync>;
|
||||
|
||||
@@ -1356,14 +1380,13 @@ pub fn start(s: Arc<Mutex<Sandbox>>, server_address: &str) -> TtrpcServer {
|
||||
let hservice = protocols::health_ttrpc::create_health(health_worker);
|
||||
|
||||
let server = TtrpcServer::new()
|
||||
.bind(server_address)
|
||||
.unwrap()
|
||||
.bind(server_address)?
|
||||
.register_service(aservice)
|
||||
.register_service(hservice);
|
||||
|
||||
info!(sl!(), "ttRPC server started"; "address" => server_address);
|
||||
|
||||
server
|
||||
Ok(server)
|
||||
}
|
||||
|
||||
// This function updates the container namespaces configuration based on the
|
||||
@@ -1408,24 +1431,28 @@ fn update_container_namespaces(
|
||||
// the create_sandbox request or create_container request.
|
||||
// Else set this to empty string so that a new pid namespace is
|
||||
// created for the container.
|
||||
if sandbox_pidns && sandbox.sandbox_pidns.is_some() {
|
||||
pid_ns.path = String::from(sandbox.sandbox_pidns.as_ref().unwrap().path.as_str());
|
||||
if sandbox_pidns {
|
||||
if let Some(ref pidns) = &sandbox.sandbox_pidns {
|
||||
pid_ns.path = String::from(pidns.path.as_str());
|
||||
} else {
|
||||
return Err(anyhow!("failed to get sandbox pidns"));
|
||||
}
|
||||
}
|
||||
|
||||
linux.namespaces.push(pid_ns);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn append_guest_hooks(s: &Sandbox, oci: &mut Spec) {
|
||||
if s.hooks.is_none() {
|
||||
return;
|
||||
fn append_guest_hooks(s: &Sandbox, oci: &mut Spec) -> Result<()> {
|
||||
if let Some(ref guest_hooks) = s.hooks {
|
||||
let mut hooks = oci.hooks.take().unwrap_or_default();
|
||||
hooks.prestart.append(&mut guest_hooks.prestart.clone());
|
||||
hooks.poststart.append(&mut guest_hooks.poststart.clone());
|
||||
hooks.poststop.append(&mut guest_hooks.poststop.clone());
|
||||
oci.hooks = Some(hooks);
|
||||
}
|
||||
let guest_hooks = s.hooks.as_ref().unwrap();
|
||||
let mut hooks = oci.hooks.take().unwrap_or_default();
|
||||
hooks.prestart.append(&mut guest_hooks.prestart.clone());
|
||||
hooks.poststart.append(&mut guest_hooks.poststart.clone());
|
||||
hooks.poststop.append(&mut guest_hooks.poststop.clone());
|
||||
oci.hooks = Some(hooks);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Check is the container process installed the
|
||||
@@ -1515,7 +1542,7 @@ fn do_copy_file(req: &CopyFileRequest) -> Result<()> {
|
||||
PathBuf::from("/")
|
||||
};
|
||||
|
||||
fs::create_dir_all(dir.to_str().unwrap()).or_else(|e| {
|
||||
fs::create_dir_all(&dir).or_else(|e| {
|
||||
if e.kind() != std::io::ErrorKind::AlreadyExists {
|
||||
return Err(e);
|
||||
}
|
||||
@@ -1523,10 +1550,7 @@ fn do_copy_file(req: &CopyFileRequest) -> Result<()> {
|
||||
Ok(())
|
||||
})?;
|
||||
|
||||
std::fs::set_permissions(
|
||||
dir.to_str().unwrap(),
|
||||
std::fs::Permissions::from_mode(req.dir_mode),
|
||||
)?;
|
||||
std::fs::set_permissions(&dir, std::fs::Permissions::from_mode(req.dir_mode))?;
|
||||
|
||||
let mut tmpfile = path.clone();
|
||||
tmpfile.set_extension("tmp");
|
||||
@@ -1535,10 +1559,10 @@ fn do_copy_file(req: &CopyFileRequest) -> Result<()> {
|
||||
.write(true)
|
||||
.create(true)
|
||||
.truncate(false)
|
||||
.open(tmpfile.to_str().unwrap())?;
|
||||
.open(&tmpfile)?;
|
||||
|
||||
file.write_all_at(req.data.as_slice(), req.offset as u64)?;
|
||||
let st = stat::stat(tmpfile.to_str().unwrap())?;
|
||||
let st = stat::stat(&tmpfile)?;
|
||||
|
||||
if st.st_size != req.file_size {
|
||||
return Ok(());
|
||||
@@ -1547,7 +1571,7 @@ fn do_copy_file(req: &CopyFileRequest) -> Result<()> {
|
||||
file.set_permissions(std::fs::Permissions::from_mode(req.file_mode))?;
|
||||
|
||||
unistd::chown(
|
||||
tmpfile.to_str().unwrap(),
|
||||
&tmpfile,
|
||||
Some(Uid::from_raw(req.uid as u32)),
|
||||
Some(Gid::from_raw(req.gid as u32)),
|
||||
)?;
|
||||
@@ -1557,43 +1581,13 @@ fn do_copy_file(req: &CopyFileRequest) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn path_name_lookup<P: std::clone::Clone + AsRef<Path> + std::fmt::Debug>(
|
||||
path: P,
|
||||
lookup: &str,
|
||||
) -> Result<(PathBuf, String)> {
|
||||
for entry in fs::read_dir(path.clone())? {
|
||||
let entry = entry?;
|
||||
if let Some(name) = entry.path().file_name() {
|
||||
if let Some(name) = name.to_str() {
|
||||
if Some(0) == name.find(lookup) {
|
||||
return Ok((entry.path(), name.to_string()));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(anyhow!("cannot get {} dir in {:?}", lookup, path))
|
||||
}
|
||||
|
||||
fn do_add_swap(req: &AddSwapRequest) -> Result<()> {
|
||||
// re-scan PCI bus
|
||||
// looking for hidden devices
|
||||
rescan_pci_bus().context("Could not rescan PCI bus")?;
|
||||
|
||||
async fn do_add_swap(sandbox: &Arc<Mutex<Sandbox>>, req: &AddSwapRequest) -> Result<()> {
|
||||
let mut slots = Vec::new();
|
||||
for slot in &req.PCIPath {
|
||||
slots.push(pci::Slot::new(*slot as u8)?);
|
||||
slots.push(pci::SlotFn::new(*slot, 0)?);
|
||||
}
|
||||
let pcipath = pci::Path::new(slots)?;
|
||||
let root_bus_sysfs = format!("{}{}", SYSFS_DIR, create_pci_root_bus_path());
|
||||
let sysfs_rel_path = format!(
|
||||
"{}{}",
|
||||
root_bus_sysfs,
|
||||
pcipath_to_sysfs(&root_bus_sysfs, &pcipath)?
|
||||
);
|
||||
let (mut virtio_path, _) = path_name_lookup(sysfs_rel_path, "virtio")?;
|
||||
virtio_path.push("block");
|
||||
let (_, dev_name) = path_name_lookup(virtio_path, "vd")?;
|
||||
let dev_name = format!("/dev/{}", dev_name);
|
||||
let dev_name = get_virtio_blk_pci_device_name(sandbox, &pcipath).await?;
|
||||
|
||||
let c_str = CString::new(dev_name)?;
|
||||
let ret = unsafe { libc::swapon(c_str.as_ptr() as *const c_char, 0) };
|
||||
@@ -1614,74 +1608,54 @@ fn do_add_swap(req: &AddSwapRequest) -> Result<()> {
|
||||
// - container rootfs bind mounted at /<CONTAINER_BASE>/<cid>/rootfs
|
||||
// - modify container spec root to point to /<CONTAINER_BASE>/<cid>/rootfs
|
||||
fn setup_bundle(cid: &str, spec: &mut Spec) -> Result<PathBuf> {
|
||||
if spec.root.is_none() {
|
||||
let spec_root = if let Some(sr) = &spec.root {
|
||||
sr
|
||||
} else {
|
||||
return Err(nix::Error::Sys(Errno::EINVAL).into());
|
||||
}
|
||||
let spec_root = spec.root.as_ref().unwrap();
|
||||
};
|
||||
|
||||
let spec_root_path = Path::new(&spec_root.path);
|
||||
|
||||
let bundle_path = Path::new(CONTAINER_BASE).join(cid);
|
||||
let config_path = bundle_path.join("config.json");
|
||||
let rootfs_path = bundle_path.join("rootfs");
|
||||
|
||||
fs::create_dir_all(&rootfs_path)?;
|
||||
BareMount::new(
|
||||
&spec_root.path,
|
||||
rootfs_path.to_str().unwrap(),
|
||||
baremount(
|
||||
spec_root_path,
|
||||
&rootfs_path,
|
||||
"bind",
|
||||
MsFlags::MS_BIND,
|
||||
"",
|
||||
&sl!(),
|
||||
)
|
||||
.mount()?;
|
||||
)?;
|
||||
|
||||
let rootfs_path_name = rootfs_path
|
||||
.to_str()
|
||||
.ok_or_else(|| anyhow!("failed to convert rootfs to unicode"))?
|
||||
.to_string();
|
||||
|
||||
spec.root = Some(Root {
|
||||
path: rootfs_path.to_str().unwrap().to_owned(),
|
||||
path: rootfs_path_name,
|
||||
readonly: spec_root.readonly,
|
||||
});
|
||||
|
||||
info!(
|
||||
sl!(),
|
||||
"{:?}",
|
||||
spec.process.as_ref().unwrap().console_size.as_ref()
|
||||
let _ = spec.save(
|
||||
config_path
|
||||
.to_str()
|
||||
.ok_or_else(|| anyhow!("cannot convert path to unicode"))?,
|
||||
);
|
||||
let _ = spec.save(config_path.to_str().unwrap());
|
||||
|
||||
let olddir = unistd::getcwd().context("cannot getcwd")?;
|
||||
unistd::chdir(bundle_path.to_str().unwrap())?;
|
||||
unistd::chdir(
|
||||
bundle_path
|
||||
.to_str()
|
||||
.ok_or_else(|| anyhow!("cannot convert bundle path to unicode"))?,
|
||||
)?;
|
||||
|
||||
Ok(olddir)
|
||||
}
|
||||
|
||||
fn cleanup_process(p: &mut Process) -> Result<()> {
|
||||
if p.parent_stdin.is_some() {
|
||||
p.close_stream(StreamType::ParentStdin);
|
||||
unistd::close(p.parent_stdin.unwrap())?;
|
||||
}
|
||||
|
||||
if p.parent_stdout.is_some() {
|
||||
p.close_stream(StreamType::ParentStdout);
|
||||
unistd::close(p.parent_stdout.unwrap())?;
|
||||
}
|
||||
|
||||
if p.parent_stderr.is_some() {
|
||||
p.close_stream(StreamType::ParentStderr);
|
||||
unistd::close(p.parent_stderr.unwrap())?;
|
||||
}
|
||||
|
||||
if p.term_master.is_some() {
|
||||
p.close_stream(StreamType::TermMaster);
|
||||
unistd::close(p.term_master.unwrap())?;
|
||||
}
|
||||
|
||||
p.notify_term_close();
|
||||
|
||||
p.parent_stdin = None;
|
||||
p.parent_stdout = None;
|
||||
p.parent_stderr = None;
|
||||
p.term_master = None;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn load_kernel_module(module: &protocols::agent::KernelModule) -> Result<()> {
|
||||
if module.name.is_empty() {
|
||||
return Err(anyhow!("Kernel module name is empty"));
|
||||
@@ -1710,8 +1684,8 @@ fn load_kernel_module(module: &protocols::agent::KernelModule) -> Result<()> {
|
||||
|
||||
match status.code() {
|
||||
Some(code) => {
|
||||
let std_out: String = String::from_utf8(output.stdout).unwrap();
|
||||
let std_err: String = String::from_utf8(output.stderr).unwrap();
|
||||
let std_out = String::from_utf8_lossy(&output.stdout);
|
||||
let std_err = String::from_utf8_lossy(&output.stderr);
|
||||
let msg = format!(
|
||||
"load_kernel_module return code: {} stdout:{} stderr:{}",
|
||||
code, std_out, std_err
|
||||
@@ -1774,7 +1748,7 @@ mod tests {
|
||||
let mut oci = Spec {
|
||||
..Default::default()
|
||||
};
|
||||
append_guest_hooks(&s, &mut oci);
|
||||
append_guest_hooks(&s, &mut oci).unwrap();
|
||||
assert_eq!(s.hooks, oci.hooks);
|
||||
}
|
||||
|
||||
|
||||
@@ -226,6 +226,21 @@ impl Sandbox {
|
||||
None
|
||||
}
|
||||
|
||||
pub fn find_container_process(&mut self, cid: &str, eid: &str) -> Result<&mut Process> {
|
||||
let ctr = self
|
||||
.get_container(cid)
|
||||
.ok_or_else(|| anyhow!("Invalid container id"))?;
|
||||
|
||||
if eid.is_empty() {
|
||||
return ctr
|
||||
.processes
|
||||
.get_mut(&ctr.init_process_pid)
|
||||
.ok_or_else(|| anyhow!("cannot find init process!"));
|
||||
}
|
||||
|
||||
ctr.get_process(eid).map_err(|_| anyhow!("Invalid exec id"))
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
pub async fn destroy(&mut self) -> Result<()> {
|
||||
for ctr in self.containers.values_mut() {
|
||||
@@ -449,23 +464,30 @@ fn online_memory(logger: &Logger) -> Result<()> {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::Sandbox;
|
||||
use crate::{mount::BareMount, skip_if_not_root};
|
||||
use anyhow::Error;
|
||||
use crate::{mount::baremount, skip_if_not_root};
|
||||
use anyhow::{anyhow, Error};
|
||||
use nix::mount::MsFlags;
|
||||
use oci::{Linux, Root, Spec};
|
||||
use rustjail::container::LinuxContainer;
|
||||
use rustjail::process::Process;
|
||||
use rustjail::specconv::CreateOpts;
|
||||
use slog::Logger;
|
||||
use std::fs::{self, File};
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
use tempfile::Builder;
|
||||
use std::path::Path;
|
||||
use tempfile::{tempdir, Builder, TempDir};
|
||||
|
||||
fn bind_mount(src: &str, dst: &str, logger: &Logger) -> Result<(), Error> {
|
||||
let baremount = BareMount::new(src, dst, "bind", MsFlags::MS_BIND, "", logger);
|
||||
baremount.mount()
|
||||
let src_path = Path::new(src);
|
||||
let dst_path = Path::new(dst);
|
||||
|
||||
baremount(src_path, dst_path, "bind", MsFlags::MS_BIND, "", logger)
|
||||
}
|
||||
|
||||
use serial_test::serial;
|
||||
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn set_sandbox_storage() {
|
||||
let logger = slog::Logger::root(slog::Discard, o!());
|
||||
let mut s = Sandbox::new(&logger).unwrap();
|
||||
@@ -500,6 +522,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn remove_sandbox_storage() {
|
||||
skip_if_not_root!();
|
||||
|
||||
@@ -556,6 +579,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn unset_and_remove_sandbox_storage() {
|
||||
skip_if_not_root!();
|
||||
|
||||
@@ -607,6 +631,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn unset_sandbox_storage() {
|
||||
let logger = slog::Logger::root(slog::Discard, o!());
|
||||
let mut s = Sandbox::new(&logger).unwrap();
|
||||
@@ -679,22 +704,31 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
fn create_linuxcontainer() -> LinuxContainer {
|
||||
LinuxContainer::new(
|
||||
"some_id",
|
||||
"/run/agent",
|
||||
create_dummy_opts(),
|
||||
&slog_scope::logger(),
|
||||
fn create_linuxcontainer() -> (LinuxContainer, TempDir) {
|
||||
// Create a temporal directory
|
||||
let dir = tempdir()
|
||||
.map_err(|e| anyhow!(e).context("tempdir failed"))
|
||||
.unwrap();
|
||||
|
||||
// Create a new container
|
||||
(
|
||||
LinuxContainer::new(
|
||||
"some_id",
|
||||
dir.path().join("rootfs").to_str().unwrap(),
|
||||
create_dummy_opts(),
|
||||
&slog_scope::logger(),
|
||||
)
|
||||
.unwrap(),
|
||||
dir,
|
||||
)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn get_container_entry_exist() {
|
||||
skip_if_not_root!();
|
||||
let logger = slog::Logger::root(slog::Discard, o!());
|
||||
let mut s = Sandbox::new(&logger).unwrap();
|
||||
let linux_container = create_linuxcontainer();
|
||||
let (linux_container, _root) = create_linuxcontainer();
|
||||
|
||||
s.containers
|
||||
.insert("testContainerID".to_string(), linux_container);
|
||||
@@ -703,6 +737,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn get_container_no_entry() {
|
||||
let logger = slog::Logger::root(slog::Discard, o!());
|
||||
let mut s = Sandbox::new(&logger).unwrap();
|
||||
@@ -712,24 +747,24 @@ mod tests {
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn add_and_get_container() {
|
||||
skip_if_not_root!();
|
||||
let logger = slog::Logger::root(slog::Discard, o!());
|
||||
let mut s = Sandbox::new(&logger).unwrap();
|
||||
let linux_container = create_linuxcontainer();
|
||||
let (linux_container, _root) = create_linuxcontainer();
|
||||
|
||||
s.add_container(linux_container);
|
||||
assert!(s.get_container("some_id").is_some());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn update_shared_pidns() {
|
||||
skip_if_not_root!();
|
||||
let logger = slog::Logger::root(slog::Discard, o!());
|
||||
let mut s = Sandbox::new(&logger).unwrap();
|
||||
let test_pid = 9999;
|
||||
|
||||
let mut linux_container = create_linuxcontainer();
|
||||
let (mut linux_container, _root) = create_linuxcontainer();
|
||||
linux_container.init_process_pid = test_pid;
|
||||
|
||||
s.update_shared_pidns(&linux_container).unwrap();
|
||||
@@ -741,6 +776,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn add_guest_hooks() {
|
||||
let logger = slog::Logger::root(slog::Discard, o!());
|
||||
let mut s = Sandbox::new(&logger).unwrap();
|
||||
@@ -764,10 +800,56 @@ mod tests {
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_sandbox_set_destroy() {
|
||||
let logger = slog::Logger::root(slog::Discard, o!());
|
||||
let mut s = Sandbox::new(&logger).unwrap();
|
||||
let ret = s.destroy().await;
|
||||
assert!(ret.is_ok());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_find_container_process() {
|
||||
let logger = slog::Logger::root(slog::Discard, o!());
|
||||
let mut s = Sandbox::new(&logger).unwrap();
|
||||
let cid = "container-123";
|
||||
|
||||
let (mut linux_container, _root) = create_linuxcontainer();
|
||||
linux_container.init_process_pid = 1;
|
||||
linux_container.id = cid.to_string();
|
||||
// add init process
|
||||
linux_container.processes.insert(
|
||||
1,
|
||||
Process::new(&logger, &oci::Process::default(), "1", true, 1).unwrap(),
|
||||
);
|
||||
// add exec process
|
||||
linux_container.processes.insert(
|
||||
123,
|
||||
Process::new(&logger, &oci::Process::default(), "exec-123", false, 1).unwrap(),
|
||||
);
|
||||
|
||||
s.add_container(linux_container);
|
||||
|
||||
// empty exec-id will return init process
|
||||
let p = s.find_container_process(cid, "");
|
||||
assert!(p.is_ok(), "Expecting Ok, Got {:?}", p);
|
||||
let p = p.unwrap();
|
||||
assert_eq!("1", p.exec_id, "exec_id should be 1");
|
||||
assert!(p.init, "init flag should be true");
|
||||
|
||||
// get exist exec-id will return the exec process
|
||||
let p = s.find_container_process(cid, "exec-123");
|
||||
assert!(p.is_ok(), "Expecting Ok, Got {:?}", p);
|
||||
let p = p.unwrap();
|
||||
assert_eq!("exec-123", p.exec_id, "exec_id should be exec-123");
|
||||
assert!(!p.init, "init flag should be false");
|
||||
|
||||
// get not exist exec-id will return error
|
||||
let p = s.find_container_process(cid, "exec-456");
|
||||
assert!(p.is_err(), "Expecting Error, Got {:?}", p);
|
||||
|
||||
// container does not exist
|
||||
let p = s.find_container_process("not-exist-cid", "");
|
||||
assert!(p.is_err(), "Expecting Error, Got {:?}", p);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,60 +3,17 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use crate::config::AgentConfig;
|
||||
use anyhow::Result;
|
||||
use opentelemetry::sdk::propagation::TraceContextPropagator;
|
||||
use opentelemetry::{global, sdk::trace::Config, trace::TracerProvider};
|
||||
use slog::{info, o, Logger};
|
||||
use std::collections::HashMap;
|
||||
use std::error::Error;
|
||||
use std::fmt;
|
||||
use std::str::FromStr;
|
||||
use tracing_opentelemetry::OpenTelemetryLayer;
|
||||
use tracing_subscriber::layer::SubscriberExt;
|
||||
use tracing_subscriber::Registry;
|
||||
use ttrpc::r#async::TtrpcContext;
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum TraceType {
|
||||
Disabled,
|
||||
Isolated,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct TraceTypeError {
|
||||
details: String,
|
||||
}
|
||||
|
||||
impl TraceTypeError {
|
||||
fn new(msg: &str) -> TraceTypeError {
|
||||
TraceTypeError {
|
||||
details: msg.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Error for TraceTypeError {}
|
||||
|
||||
impl fmt::Display for TraceTypeError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "{}", self.details)
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for TraceType {
|
||||
type Err = TraceTypeError;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
match s {
|
||||
"isolated" => Ok(TraceType::Isolated),
|
||||
"disabled" => Ok(TraceType::Disabled),
|
||||
_ => Err(TraceTypeError::new("invalid trace type")),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn setup_tracing(name: &'static str, logger: &Logger, _agent_cfg: &AgentConfig) -> Result<()> {
|
||||
pub fn setup_tracing(name: &'static str, logger: &Logger) -> Result<()> {
|
||||
let logger = logger.new(o!("subsystem" => "vsock-tracer"));
|
||||
|
||||
let exporter = vsock_exporter::Exporter::builder()
|
||||
|
||||
@@ -97,10 +97,18 @@ impl Uevent {
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
async fn process_remove(&self, logger: &Logger, sandbox: &Arc<Mutex<Sandbox>>) {
|
||||
let mut sb = sandbox.lock().await;
|
||||
sb.uevent_map.remove(&self.devpath);
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
async fn process(&self, logger: &Logger, sandbox: &Arc<Mutex<Sandbox>>) {
|
||||
if self.action == U_EVENT_ACTION_ADD {
|
||||
return self.process_add(logger, sandbox).await;
|
||||
} else if self.action == U_EVENT_ACTION_REMOVE {
|
||||
return self.process_remove(logger, sandbox).await;
|
||||
}
|
||||
debug!(*logger, "ignoring event"; "uevent" => format!("{:?}", self));
|
||||
}
|
||||
@@ -111,10 +119,13 @@ pub async fn wait_for_uevent(
|
||||
sandbox: &Arc<Mutex<Sandbox>>,
|
||||
matcher: impl UeventMatcher,
|
||||
) -> Result<Uevent> {
|
||||
let logprefix = format!("Waiting for {:?}", &matcher);
|
||||
|
||||
info!(sl!(), "{}", logprefix);
|
||||
let mut sb = sandbox.lock().await;
|
||||
for uev in sb.uevent_map.values() {
|
||||
if matcher.is_match(uev) {
|
||||
info!(sl!(), "Device {:?} found in device map", uev);
|
||||
info!(sl!(), "{}: found {:?} in uevent map", logprefix, &uev);
|
||||
return Ok(uev.clone());
|
||||
}
|
||||
}
|
||||
@@ -129,7 +140,8 @@ pub async fn wait_for_uevent(
|
||||
sb.uevent_watchers.push(Some((Box::new(matcher), tx)));
|
||||
drop(sb); // unlock
|
||||
|
||||
info!(sl!(), "Waiting on channel for uevent notification\n");
|
||||
info!(sl!(), "{}: waiting on channel", logprefix);
|
||||
|
||||
let hotplug_timeout = AGENT_CONFIG.read().await.hotplug_timeout;
|
||||
|
||||
let uev = match tokio::time::timeout(hotplug_timeout, rx).await {
|
||||
@@ -146,6 +158,7 @@ pub async fn wait_for_uevent(
|
||||
}
|
||||
};
|
||||
|
||||
info!(sl!(), "{}: found {:?} on channel", logprefix, &uev);
|
||||
Ok(uev)
|
||||
}
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use anyhow::Result;
|
||||
use anyhow::{anyhow, Result};
|
||||
use futures::StreamExt;
|
||||
use std::io;
|
||||
use std::io::ErrorKind;
|
||||
@@ -64,8 +64,12 @@ pub fn get_vsock_incoming(fd: RawFd) -> Incoming {
|
||||
|
||||
#[instrument]
|
||||
pub async fn get_vsock_stream(fd: RawFd) -> Result<VsockStream> {
|
||||
let stream = get_vsock_incoming(fd).next().await.unwrap()?;
|
||||
Ok(stream)
|
||||
let stream = get_vsock_incoming(fd)
|
||||
.next()
|
||||
.await
|
||||
.ok_or_else(|| anyhow!("cannot handle incoming vsock connection"))?;
|
||||
|
||||
Ok(stream?)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -124,7 +128,9 @@ mod tests {
|
||||
|
||||
let mut vec_locked = vec_ref.lock();
|
||||
|
||||
let v = vec_locked.as_deref_mut().unwrap();
|
||||
let v = vec_locked
|
||||
.as_deref_mut()
|
||||
.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e.to_string()))?;
|
||||
|
||||
std::io::Write::flush(v)
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#![allow(clippy::unknown_clippy_lints)]
|
||||
#![allow(unknown_lints)]
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::path::{Path, PathBuf};
|
||||
@@ -20,7 +20,7 @@ use tokio::sync::Mutex;
|
||||
use tokio::task;
|
||||
use tokio::time::{self, Duration};
|
||||
|
||||
use crate::mount::BareMount;
|
||||
use crate::mount::baremount;
|
||||
use crate::protocols::agent as protos;
|
||||
|
||||
/// The maximum number of file system entries agent will watch for each mount.
|
||||
@@ -49,7 +49,7 @@ struct Storage {
|
||||
/// the source becomes too large, either in number of files (>16) or total size (>1MB).
|
||||
watch: bool,
|
||||
|
||||
/// The list of files to watch from the source mount point and updated in the target one.
|
||||
/// The list of files, directories, symlinks to watch from the source mount point and updated in the target one.
|
||||
watched_files: HashMap<PathBuf, SystemTime>,
|
||||
}
|
||||
|
||||
@@ -79,6 +79,20 @@ impl Drop for Storage {
|
||||
}
|
||||
}
|
||||
|
||||
async fn copy(from: impl AsRef<Path>, to: impl AsRef<Path>) -> Result<()> {
|
||||
if fs::symlink_metadata(&from).await?.file_type().is_symlink() {
|
||||
// if source is a symlink, create new symlink with same link source. If
|
||||
// the symlink exists, remove and create new one:
|
||||
if fs::symlink_metadata(&to).await.is_ok() {
|
||||
fs::remove_file(&to).await?;
|
||||
}
|
||||
fs::symlink(fs::read_link(&from).await?, &to).await?;
|
||||
} else {
|
||||
fs::copy(from, to).await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
impl Storage {
|
||||
async fn new(storage: protos::Storage) -> Result<Storage> {
|
||||
let entry = Storage {
|
||||
@@ -93,6 +107,16 @@ impl Storage {
|
||||
async fn update_target(&self, logger: &Logger, source_path: impl AsRef<Path>) -> Result<()> {
|
||||
let source_file_path = source_path.as_ref();
|
||||
|
||||
// if we are creating a directory: just create it, nothing more to do
|
||||
if source_file_path.symlink_metadata()?.file_type().is_dir() {
|
||||
fs::create_dir_all(source_file_path)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!("Unable to mkdir all for {}", source_file_path.display())
|
||||
})?
|
||||
}
|
||||
|
||||
// Assume we are dealing with either a file or a symlink now:
|
||||
let dest_file_path = if self.source_mount_point.is_file() {
|
||||
// Simple file to file copy
|
||||
// Assume target mount is a file path
|
||||
@@ -110,19 +134,13 @@ impl Storage {
|
||||
dest_file_path
|
||||
};
|
||||
|
||||
debug!(
|
||||
logger,
|
||||
"Copy from {} to {}",
|
||||
source_file_path.display(),
|
||||
dest_file_path.display()
|
||||
);
|
||||
fs::copy(&source_file_path, &dest_file_path)
|
||||
copy(&source_file_path, &dest_file_path)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Copy from {} to {} failed",
|
||||
source_file_path.display(),
|
||||
dest_file_path.display()
|
||||
dest_file_path.display(),
|
||||
)
|
||||
})?;
|
||||
|
||||
@@ -135,7 +153,7 @@ impl Storage {
|
||||
let mut remove_list = Vec::new();
|
||||
let mut updated_files: Vec<PathBuf> = Vec::new();
|
||||
|
||||
// Remove deleted files for tracking list
|
||||
// Remove deleted files for tracking list.
|
||||
self.watched_files.retain(|st, _| {
|
||||
if st.exists() {
|
||||
true
|
||||
@@ -147,10 +165,19 @@ impl Storage {
|
||||
|
||||
// Delete from target
|
||||
for path in remove_list {
|
||||
// File has been deleted, remove it from target mount
|
||||
let target = self.make_target_path(path)?;
|
||||
debug!(logger, "Removing file from mount: {}", target.display());
|
||||
let _ = fs::remove_file(target).await;
|
||||
// The target may be a directory or a file. If it is a directory that is removed,
|
||||
// we'll remove all files under that directory as well. Because of this, there's a
|
||||
// chance the target (a subdirectory or file under a prior removed target) was already
|
||||
// removed. Make sure we check if the target exists before checking the metadata, and
|
||||
// don't return an error if the remove fails
|
||||
if target.exists() && target.symlink_metadata()?.file_type().is_dir() {
|
||||
debug!(logger, "Removing a directory: {}", target.display());
|
||||
let _ = fs::remove_dir_all(target).await;
|
||||
} else {
|
||||
debug!(logger, "Removing a file: {}", target.display());
|
||||
let _ = fs::remove_file(target).await;
|
||||
}
|
||||
}
|
||||
|
||||
// Scan new & changed files
|
||||
@@ -182,25 +209,18 @@ impl Storage {
|
||||
let mut size: u64 = 0;
|
||||
debug!(logger, "Scanning path: {}", path.display());
|
||||
|
||||
if path.is_file() {
|
||||
let metadata = path
|
||||
.metadata()
|
||||
.with_context(|| format!("Failed to query metadata for: {}", path.display()))?;
|
||||
let metadata = path
|
||||
.symlink_metadata()
|
||||
.with_context(|| format!("Failed to query metadata for: {}", path.display()))?;
|
||||
|
||||
let modified = metadata
|
||||
.modified()
|
||||
.with_context(|| format!("Failed to get modified date for: {}", path.display()))?;
|
||||
let modified = metadata
|
||||
.modified()
|
||||
.with_context(|| format!("Failed to get modified date for: {}", path.display()))?;
|
||||
|
||||
// Treat files and symlinks the same:
|
||||
if path.is_file() || metadata.file_type().is_symlink() {
|
||||
size += metadata.len();
|
||||
|
||||
ensure!(
|
||||
self.watched_files.len() <= MAX_ENTRIES_PER_STORAGE,
|
||||
WatcherError::MountTooManyFiles {
|
||||
count: self.watched_files.len(),
|
||||
mnt: self.source_mount_point.display().to_string()
|
||||
}
|
||||
);
|
||||
|
||||
// Insert will return old entry if any
|
||||
if let Some(old_st) = self.watched_files.insert(path.to_path_buf(), modified) {
|
||||
if modified > old_st {
|
||||
@@ -211,7 +231,25 @@ impl Storage {
|
||||
debug!(logger, "New entry: {}", path.display());
|
||||
update_list.push(PathBuf::from(&path))
|
||||
}
|
||||
|
||||
ensure!(
|
||||
self.watched_files.len() <= MAX_ENTRIES_PER_STORAGE,
|
||||
WatcherError::MountTooManyFiles {
|
||||
count: self.watched_files.len(),
|
||||
mnt: self.source_mount_point.display().to_string()
|
||||
}
|
||||
);
|
||||
} else {
|
||||
// Handling regular directories - check to see if this directory is already being tracked, and
|
||||
// track if not:
|
||||
if self
|
||||
.watched_files
|
||||
.insert(path.to_path_buf(), modified)
|
||||
.is_none()
|
||||
{
|
||||
update_list.push(path.to_path_buf());
|
||||
}
|
||||
|
||||
// Scan dir recursively
|
||||
let mut entries = fs::read_dir(path)
|
||||
.await
|
||||
@@ -269,6 +307,19 @@ impl SandboxStorages {
|
||||
let entry = Storage::new(storage)
|
||||
.await
|
||||
.with_context(|| "Failed to add storage")?;
|
||||
|
||||
// If the storage source is a directory, let's create the target mount point:
|
||||
if entry.source_mount_point.as_path().is_dir() {
|
||||
fs::create_dir_all(&entry.target_mount_point)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Unable to mkdir all for {}",
|
||||
entry.target_mount_point.display()
|
||||
)
|
||||
})?;
|
||||
}
|
||||
|
||||
self.0.push(entry);
|
||||
}
|
||||
|
||||
@@ -314,16 +365,14 @@ impl SandboxStorages {
|
||||
}
|
||||
}
|
||||
|
||||
match BareMount::new(
|
||||
entry.source_mount_point.to_str().unwrap(),
|
||||
entry.target_mount_point.to_str().unwrap(),
|
||||
match baremount(
|
||||
entry.source_mount_point.as_path(),
|
||||
entry.target_mount_point.as_path(),
|
||||
"bind",
|
||||
MsFlags::MS_BIND,
|
||||
"bind",
|
||||
logger,
|
||||
)
|
||||
.mount()
|
||||
{
|
||||
) {
|
||||
Ok(_) => {
|
||||
entry.watch = false;
|
||||
info!(logger, "watchable mount replaced with bind mount")
|
||||
@@ -427,15 +476,14 @@ impl BindWatcher {
|
||||
async fn mount(&self, logger: &Logger) -> Result<()> {
|
||||
fs::create_dir_all(WATCH_MOUNT_POINT_PATH).await?;
|
||||
|
||||
BareMount::new(
|
||||
"tmpfs",
|
||||
WATCH_MOUNT_POINT_PATH,
|
||||
baremount(
|
||||
Path::new("tmpfs"),
|
||||
Path::new(WATCH_MOUNT_POINT_PATH),
|
||||
"tmpfs",
|
||||
MsFlags::empty(),
|
||||
"",
|
||||
logger,
|
||||
)
|
||||
.mount()?;
|
||||
)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -475,6 +523,85 @@ mod tests {
|
||||
Ok((storage, src_path))
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_empty_sourcedir_check() {
|
||||
//skip_if_not_root!();
|
||||
let dir = tempfile::tempdir().expect("failed to create tempdir");
|
||||
|
||||
let logger = slog::Logger::root(slog::Discard, o!());
|
||||
|
||||
let src_path = dir.path().join("src");
|
||||
let dest_path = dir.path().join("dest");
|
||||
let src_filename = src_path.to_str().expect("failed to create src filename");
|
||||
let dest_filename = dest_path.to_str().expect("failed to create dest filename");
|
||||
|
||||
std::fs::create_dir_all(src_filename).expect("failed to create path");
|
||||
|
||||
let storage = protos::Storage {
|
||||
source: src_filename.to_string(),
|
||||
mount_point: dest_filename.to_string(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let mut entries = SandboxStorages {
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
entries
|
||||
.add(std::iter::once(storage), &logger)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert!(entries.check(&logger).await.is_ok());
|
||||
assert_eq!(entries.0.len(), 1);
|
||||
|
||||
assert_eq!(std::fs::read_dir(src_path).unwrap().count(), 0);
|
||||
assert_eq!(std::fs::read_dir(dest_path).unwrap().count(), 0);
|
||||
assert_eq!(std::fs::read_dir(dir.path()).unwrap().count(), 2);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_single_file_check() {
|
||||
//skip_if_not_root!();
|
||||
let dir = tempfile::tempdir().expect("failed to create tempdir");
|
||||
|
||||
let logger = slog::Logger::root(slog::Discard, o!());
|
||||
|
||||
let src_file_path = dir.path().join("src.txt");
|
||||
let dest_file_path = dir.path().join("dest.txt");
|
||||
|
||||
let src_filename = src_file_path
|
||||
.to_str()
|
||||
.expect("failed to create src filename");
|
||||
let dest_filename = dest_file_path
|
||||
.to_str()
|
||||
.expect("failed to create dest filename");
|
||||
|
||||
let storage = protos::Storage {
|
||||
source: src_filename.to_string(),
|
||||
mount_point: dest_filename.to_string(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
//create file
|
||||
fs::write(src_file_path, "original").unwrap();
|
||||
|
||||
let mut entries = SandboxStorages::default();
|
||||
|
||||
entries
|
||||
.add(std::iter::once(storage), &logger)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert!(entries.check(&logger).await.is_ok());
|
||||
assert_eq!(entries.0.len(), 1);
|
||||
|
||||
// there should only be 2 files
|
||||
assert_eq!(std::fs::read_dir(dir.path()).unwrap().count(), 2);
|
||||
|
||||
assert_eq!(fs::read_to_string(dest_file_path).unwrap(), "original");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_watch_entries() {
|
||||
skip_if_not_root!();
|
||||
@@ -523,7 +650,7 @@ mod tests {
|
||||
.unwrap();
|
||||
|
||||
// setup storage3: many files, but still watchable
|
||||
for i in 1..MAX_ENTRIES_PER_STORAGE + 1 {
|
||||
for i in 1..MAX_ENTRIES_PER_STORAGE {
|
||||
fs::write(src3_path.join(format!("{}.txt", i)), "original").unwrap();
|
||||
}
|
||||
|
||||
@@ -533,6 +660,9 @@ mod tests {
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// delay 20 ms between writes to files in order to ensure filesystem timestamps are unique
|
||||
thread::sleep(Duration::from_millis(20));
|
||||
|
||||
entries
|
||||
.add(std::iter::once(storage0), &logger)
|
||||
.await
|
||||
@@ -585,7 +715,7 @@ mod tests {
|
||||
std::fs::read_dir(entries.0[3].target_mount_point.as_path())
|
||||
.unwrap()
|
||||
.count(),
|
||||
MAX_ENTRIES_PER_STORAGE
|
||||
MAX_ENTRIES_PER_STORAGE - 1
|
||||
);
|
||||
|
||||
// Add two files to storage 0, verify it is updated without needing to run check:
|
||||
@@ -603,6 +733,9 @@ mod tests {
|
||||
"updated"
|
||||
);
|
||||
|
||||
// delay 20 ms between writes to files in order to ensure filesystem timestamps are unique
|
||||
thread::sleep(Duration::from_millis(20));
|
||||
|
||||
//
|
||||
// Prepare for second check: update mount sources
|
||||
//
|
||||
@@ -655,7 +788,7 @@ mod tests {
|
||||
std::fs::read_dir(entries.0[3].target_mount_point.as_path())
|
||||
.unwrap()
|
||||
.count(),
|
||||
MAX_ENTRIES_PER_STORAGE + 1
|
||||
MAX_ENTRIES_PER_STORAGE
|
||||
);
|
||||
|
||||
// verify that we can remove files as well, but that it isn't observed until check is run
|
||||
@@ -733,15 +866,20 @@ mod tests {
|
||||
fs::remove_file(source_dir.path().join("big.txt")).unwrap();
|
||||
fs::remove_file(source_dir.path().join("too-big.txt")).unwrap();
|
||||
|
||||
// Up to 16 files should be okay:
|
||||
for i in 1..MAX_ENTRIES_PER_STORAGE + 1 {
|
||||
assert_eq!(entry.scan(&logger).await.unwrap(), 0);
|
||||
|
||||
// Up to 15 files should be okay (can watch 15 files + 1 directory)
|
||||
for i in 1..MAX_ENTRIES_PER_STORAGE {
|
||||
fs::write(source_dir.path().join(format!("{}.txt", i)), "original").unwrap();
|
||||
}
|
||||
|
||||
assert_eq!(entry.scan(&logger).await.unwrap(), MAX_ENTRIES_PER_STORAGE);
|
||||
assert_eq!(
|
||||
entry.scan(&logger).await.unwrap(),
|
||||
MAX_ENTRIES_PER_STORAGE - 1
|
||||
);
|
||||
|
||||
// 17 files is too many:
|
||||
fs::write(source_dir.path().join("17.txt"), "updated").unwrap();
|
||||
// 16 files wll be too many:
|
||||
fs::write(source_dir.path().join("16.txt"), "updated").unwrap();
|
||||
thread::sleep(Duration::from_secs(1));
|
||||
|
||||
// Expect to receive a MountTooManyFiles error
|
||||
@@ -754,6 +892,180 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_copy() {
|
||||
// prepare tmp src/destination
|
||||
let source_dir = tempfile::tempdir().unwrap();
|
||||
let dest_dir = tempfile::tempdir().unwrap();
|
||||
|
||||
// verify copy of a regular file
|
||||
let src_file = source_dir.path().join("file.txt");
|
||||
let dst_file = dest_dir.path().join("file.txt");
|
||||
fs::write(&src_file, "foo").unwrap();
|
||||
copy(&src_file, &dst_file).await.unwrap();
|
||||
// verify destination:
|
||||
assert!(!fs::symlink_metadata(dst_file)
|
||||
.unwrap()
|
||||
.file_type()
|
||||
.is_symlink());
|
||||
|
||||
// verify copy of a symlink
|
||||
let src_symlink_file = source_dir.path().join("symlink_file.txt");
|
||||
let dst_symlink_file = dest_dir.path().join("symlink_file.txt");
|
||||
tokio::fs::symlink(&src_file, &src_symlink_file)
|
||||
.await
|
||||
.unwrap();
|
||||
copy(src_symlink_file, &dst_symlink_file).await.unwrap();
|
||||
// verify destination:
|
||||
assert!(fs::symlink_metadata(&dst_symlink_file)
|
||||
.unwrap()
|
||||
.file_type()
|
||||
.is_symlink());
|
||||
assert_eq!(fs::read_link(&dst_symlink_file).unwrap(), src_file);
|
||||
assert_eq!(fs::read_to_string(&dst_symlink_file).unwrap(), "foo")
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn watch_directory_verify_dir_removal() {
|
||||
let source_dir = tempfile::tempdir().unwrap();
|
||||
let dest_dir = tempfile::tempdir().unwrap();
|
||||
|
||||
let mut entry = Storage::new(protos::Storage {
|
||||
source: source_dir.path().display().to_string(),
|
||||
mount_point: dest_dir.path().display().to_string(),
|
||||
..Default::default()
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
let logger = slog::Logger::root(slog::Discard, o!());
|
||||
|
||||
// create a path we'll remove later
|
||||
fs::create_dir_all(source_dir.path().join("tmp")).unwrap();
|
||||
fs::write(&source_dir.path().join("tmp/test-file"), "foo").unwrap();
|
||||
assert_eq!(entry.scan(&logger).await.unwrap(), 3); // root, ./tmp, test-file
|
||||
|
||||
// Verify expected directory, file:
|
||||
assert_eq!(
|
||||
std::fs::read_dir(dest_dir.path().join("tmp"))
|
||||
.unwrap()
|
||||
.count(),
|
||||
1
|
||||
);
|
||||
assert_eq!(std::fs::read_dir(&dest_dir).unwrap().count(), 1);
|
||||
|
||||
// Now, remove directory, and verify that the directory (and its file) are removed:
|
||||
fs::remove_dir_all(source_dir.path().join("tmp")).unwrap();
|
||||
thread::sleep(Duration::from_secs(1));
|
||||
assert_eq!(entry.scan(&logger).await.unwrap(), 0);
|
||||
|
||||
assert_eq!(std::fs::read_dir(&dest_dir).unwrap().count(), 0);
|
||||
|
||||
assert_eq!(entry.scan(&logger).await.unwrap(), 0);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn watch_directory_with_symlinks() {
|
||||
// Prepare source directory:
|
||||
// ..2021_10_29_03_10_48.161654083/file.txt
|
||||
// ..data -> ..2021_10_29_03_10_48.161654083
|
||||
// file.txt -> ..data/file.txt
|
||||
|
||||
let source_dir = tempfile::tempdir().unwrap();
|
||||
let actual_dir = source_dir.path().join("..2021_10_29_03_10_48.161654083");
|
||||
let actual_file = actual_dir.join("file.txt");
|
||||
let sym_dir = source_dir.path().join("..data");
|
||||
let sym_file = source_dir.path().join("file.txt");
|
||||
|
||||
let relative_to_dir = PathBuf::from("..2021_10_29_03_10_48.161654083");
|
||||
|
||||
// create backing file/path
|
||||
fs::create_dir_all(&actual_dir).unwrap();
|
||||
fs::write(&actual_file, "two").unwrap();
|
||||
|
||||
// create indirection symlink directory that points to the directory that holds the actual file:
|
||||
tokio::fs::symlink(&relative_to_dir, &sym_dir)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// create presented data file symlink:
|
||||
tokio::fs::symlink(PathBuf::from("..data/file.txt"), sym_file)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let dest_dir = tempfile::tempdir().unwrap();
|
||||
|
||||
// delay 20 ms between writes to files in order to ensure filesystem timestamps are unique
|
||||
thread::sleep(Duration::from_millis(20));
|
||||
|
||||
let mut entry = Storage::new(protos::Storage {
|
||||
source: source_dir.path().display().to_string(),
|
||||
mount_point: dest_dir.path().display().to_string(),
|
||||
..Default::default()
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let logger = slog::Logger::root(slog::Discard, o!());
|
||||
|
||||
assert_eq!(entry.scan(&logger).await.unwrap(), 5);
|
||||
|
||||
// Should copy no files since nothing is changed since last check
|
||||
assert_eq!(entry.scan(&logger).await.unwrap(), 0);
|
||||
|
||||
// now what, what is updated?
|
||||
fs::write(actual_file, "updated").unwrap();
|
||||
|
||||
// delay 20 ms between writes to files in order to ensure filesystem timestamps are unique
|
||||
thread::sleep(Duration::from_millis(20));
|
||||
|
||||
assert_eq!(entry.scan(&logger).await.unwrap(), 1);
|
||||
|
||||
assert_eq!(
|
||||
fs::read_to_string(dest_dir.path().join("file.txt")).unwrap(),
|
||||
"updated"
|
||||
);
|
||||
|
||||
// Verify that resulting file.txt is a symlink:
|
||||
assert!(
|
||||
tokio::fs::symlink_metadata(dest_dir.path().join("file.txt"))
|
||||
.await
|
||||
.unwrap()
|
||||
.file_type()
|
||||
.is_symlink()
|
||||
);
|
||||
|
||||
// Verify that .data directory is a symlink:
|
||||
assert!(tokio::fs::symlink_metadata(&dest_dir.path().join("..data"))
|
||||
.await
|
||||
.unwrap()
|
||||
.file_type()
|
||||
.is_symlink());
|
||||
|
||||
// Should copy no new files after copy happened
|
||||
assert_eq!(entry.scan(&logger).await.unwrap(), 0);
|
||||
|
||||
// Now, simulate configmap update.
|
||||
// - create a new actual dir/file,
|
||||
// - update the symlink directory to point to this one
|
||||
// - remove old dir/file
|
||||
let new_actual_dir = source_dir.path().join("..2021_10_31");
|
||||
let new_actual_file = new_actual_dir.join("file.txt");
|
||||
fs::create_dir_all(&new_actual_dir).unwrap();
|
||||
fs::write(&new_actual_file, "new configmap").unwrap();
|
||||
|
||||
tokio::fs::remove_file(&sym_dir).await.unwrap();
|
||||
tokio::fs::symlink(PathBuf::from("..2021_10_31"), &sym_dir)
|
||||
.await
|
||||
.unwrap();
|
||||
tokio::fs::remove_dir_all(&actual_dir).await.unwrap();
|
||||
|
||||
assert_eq!(entry.scan(&logger).await.unwrap(), 3); // file, file-dir, symlink
|
||||
assert_eq!(
|
||||
fs::read_to_string(dest_dir.path().join("file.txt")).unwrap(),
|
||||
"new configmap"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn watch_directory() {
|
||||
// Prepare source directory:
|
||||
@@ -764,6 +1076,9 @@ mod tests {
|
||||
fs::create_dir_all(source_dir.path().join("A/B")).unwrap();
|
||||
fs::write(source_dir.path().join("A/B/1.txt"), "two").unwrap();
|
||||
|
||||
// delay 20 ms between writes to files in order to ensure filesystem timestamps are unique
|
||||
thread::sleep(Duration::from_millis(20));
|
||||
|
||||
let dest_dir = tempfile::tempdir().unwrap();
|
||||
|
||||
let mut entry = Storage::new(protos::Storage {
|
||||
@@ -776,13 +1091,11 @@ mod tests {
|
||||
|
||||
let logger = slog::Logger::root(slog::Discard, o!());
|
||||
|
||||
assert_eq!(entry.scan(&logger).await.unwrap(), 2);
|
||||
assert_eq!(entry.scan(&logger).await.unwrap(), 5);
|
||||
|
||||
// Should copy no files since nothing is changed since last check
|
||||
assert_eq!(entry.scan(&logger).await.unwrap(), 0);
|
||||
|
||||
// Should copy 1 file
|
||||
thread::sleep(Duration::from_secs(1));
|
||||
fs::write(source_dir.path().join("A/B/1.txt"), "updated").unwrap();
|
||||
assert_eq!(entry.scan(&logger).await.unwrap(), 1);
|
||||
assert_eq!(
|
||||
@@ -790,6 +1103,9 @@ mod tests {
|
||||
"updated"
|
||||
);
|
||||
|
||||
// delay 20 ms between writes to files in order to ensure filesystem timestamps are unique
|
||||
thread::sleep(Duration::from_millis(20));
|
||||
|
||||
// Should copy no new files after copy happened
|
||||
assert_eq!(entry.scan(&logger).await.unwrap(), 0);
|
||||
|
||||
@@ -820,7 +1136,9 @@ mod tests {
|
||||
|
||||
assert_eq!(entry.scan(&logger).await.unwrap(), 1);
|
||||
|
||||
thread::sleep(Duration::from_secs(1));
|
||||
// delay 20 ms between writes to files in order to ensure filesystem timestamps are unique
|
||||
thread::sleep(Duration::from_millis(20));
|
||||
|
||||
fs::write(&source_file, "two").unwrap();
|
||||
assert_eq!(entry.scan(&logger).await.unwrap(), 1);
|
||||
assert_eq!(fs::read_to_string(&dest_file).unwrap(), "two");
|
||||
@@ -846,8 +1164,9 @@ mod tests {
|
||||
|
||||
let logger = slog::Logger::root(slog::Discard, o!());
|
||||
|
||||
assert_eq!(entry.scan(&logger).await.unwrap(), 1);
|
||||
assert_eq!(entry.watched_files.len(), 1);
|
||||
// expect the root directory and the file:
|
||||
assert_eq!(entry.scan(&logger).await.unwrap(), 2);
|
||||
assert_eq!(entry.watched_files.len(), 2);
|
||||
|
||||
assert!(target_file.exists());
|
||||
assert!(entry.watched_files.contains_key(&source_file));
|
||||
@@ -857,7 +1176,7 @@ mod tests {
|
||||
|
||||
assert_eq!(entry.scan(&logger).await.unwrap(), 0);
|
||||
|
||||
assert_eq!(entry.watched_files.len(), 0);
|
||||
assert_eq!(entry.watched_files.len(), 1);
|
||||
assert!(!target_file.exists());
|
||||
}
|
||||
|
||||
@@ -890,7 +1209,10 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
use serial_test::serial;
|
||||
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn create_tmpfs() {
|
||||
skip_if_not_root!();
|
||||
|
||||
@@ -900,11 +1222,14 @@ mod tests {
|
||||
watcher.mount(&logger).await.unwrap();
|
||||
assert!(is_mounted(WATCH_MOUNT_POINT_PATH).unwrap());
|
||||
|
||||
thread::sleep(Duration::from_millis(20));
|
||||
|
||||
watcher.cleanup();
|
||||
assert!(!is_mounted(WATCH_MOUNT_POINT_PATH).unwrap());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn spawn_thread() {
|
||||
skip_if_not_root!();
|
||||
|
||||
@@ -934,6 +1259,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn verify_container_cleanup_watching() {
|
||||
skip_if_not_root!();
|
||||
|
||||
|
||||
@@ -15,6 +15,6 @@ serde = { version = "1.0.126", features = ["derive"] }
|
||||
tokio-vsock = "0.3.1"
|
||||
bincode = "1.3.3"
|
||||
byteorder = "1.4.3"
|
||||
slog = { version = "2.5.2", features = ["dynamic-keys", "max_level_trace", "release_max_level_info"] }
|
||||
slog = { version = "2.5.2", features = ["dynamic-keys", "max_level_trace", "release_max_level_debug"] }
|
||||
async-trait = "0.1.50"
|
||||
tokio = "1.2.0"
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
// payload, which allows the forwarder to know how many bytes it must read to
|
||||
// consume the trace span. The payload is a serialised version of the trace span.
|
||||
|
||||
#![allow(clippy::unknown_clippy_lints)]
|
||||
#![allow(unknown_lints)]
|
||||
|
||||
use async_trait::async_trait;
|
||||
use byteorder::{ByteOrder, NetworkEndian};
|
||||
|
||||
16
src/runtime/.gitignore
vendored
16
src/runtime/.gitignore
vendored
@@ -5,25 +5,15 @@ coverage.txt
|
||||
coverage.html
|
||||
.git-commit
|
||||
.git-commit.tmp
|
||||
/cli/config/configuration-acrn.toml
|
||||
/cli/config/configuration-clh.toml
|
||||
/cli/config/configuration-fc.toml
|
||||
/cli/config/configuration-qemu.toml
|
||||
/cli/config/configuration-clh.toml
|
||||
/cli/config-generated.go
|
||||
/cli/containerd-shim-kata-v2/config-generated.go
|
||||
/cli/coverage.html
|
||||
/config/*.toml
|
||||
config-generated.go
|
||||
/containerd-shim-kata-v2
|
||||
/containerd-shim-v2/monitor_address
|
||||
/pkg/containerd-shim-v2/monitor_address
|
||||
/data/kata-collect-data.sh
|
||||
/kata-monitor
|
||||
/kata-netmon
|
||||
/kata-runtime
|
||||
/pkg/katautils/config-settings.go
|
||||
/virtcontainers/hack/virtc/virtc
|
||||
/virtcontainers/hook/mock/hook
|
||||
/virtcontainers/profile.cov
|
||||
/virtcontainers/shim/mock/cc-shim/cc-shim
|
||||
/virtcontainers/shim/mock/kata-shim/kata-shim
|
||||
/virtcontainers/shim/mock/shim
|
||||
/virtcontainers/utils/supportfiles
|
||||
|
||||
@@ -51,14 +51,10 @@ PROJECT_DIR = $(PROJECT_TAG)
|
||||
IMAGENAME = $(PROJECT_TAG).img
|
||||
|
||||
TARGET = $(BIN_PREFIX)-runtime
|
||||
TARGET_OUTPUT = $(CURDIR)/$(TARGET)
|
||||
RUNTIME_OUTPUT = $(CURDIR)/$(TARGET)
|
||||
RUNTIME_DIR = $(CLI_DIR)/$(TARGET)
|
||||
BINLIST += $(TARGET)
|
||||
|
||||
NETMON_DIR = netmon
|
||||
NETMON_TARGET = $(PROJECT_TYPE)-netmon
|
||||
NETMON_TARGET_OUTPUT = $(CURDIR)/$(NETMON_TARGET)
|
||||
BINLIBEXECLIST += $(NETMON_TARGET)
|
||||
|
||||
DESTDIR ?= /
|
||||
|
||||
ifeq ($(PREFIX),)
|
||||
@@ -141,9 +137,6 @@ ACRNVALIDHYPERVISORPATHS := [\"$(ACRNPATH)\"]
|
||||
ACRNCTLPATH := $(ACRNBINDIR)/$(ACRNCTLCMD)
|
||||
ACRNVALIDCTLPATHS := [\"$(ACRNCTLPATH)\"]
|
||||
|
||||
NETMONCMD := $(BIN_PREFIX)-netmon
|
||||
NETMONPATH := $(PKGLIBEXECDIR)/$(NETMONCMD)
|
||||
|
||||
# Default number of vCPUs
|
||||
DEFVCPUS := 1
|
||||
# Default maximum number of vCPUs
|
||||
@@ -189,6 +182,7 @@ DEFVALIDVHOSTUSERSTOREPATHS := [\"$(DEFVHOSTUSERSTOREPATH)\"]
|
||||
DEFFILEMEMBACKEND := ""
|
||||
DEFVALIDFILEMEMBACKENDS := [\"$(DEFFILEMEMBACKEND)\"]
|
||||
DEFMSIZE9P := 8192
|
||||
DEFVFIOMODE := guest-kernel
|
||||
|
||||
# Default cgroup model
|
||||
DEFSANDBOXCGROUPONLY ?= false
|
||||
@@ -200,7 +194,7 @@ FEATURE_SELINUX ?= check
|
||||
|
||||
SED = sed
|
||||
|
||||
CLI_DIR = cli
|
||||
CLI_DIR = cmd
|
||||
SHIMV2 = containerd-shim-kata-v2
|
||||
SHIMV2_OUTPUT = $(CURDIR)/$(SHIMV2)
|
||||
SHIMV2_DIR = $(CLI_DIR)/$(SHIMV2)
|
||||
@@ -225,7 +219,7 @@ ifneq (,$(QEMUCMD))
|
||||
KNOWN_HYPERVISORS += $(HYPERVISOR_QEMU)
|
||||
|
||||
CONFIG_FILE_QEMU = configuration-qemu.toml
|
||||
CONFIG_QEMU = $(CLI_DIR)/config/$(CONFIG_FILE_QEMU)
|
||||
CONFIG_QEMU = config/$(CONFIG_FILE_QEMU)
|
||||
CONFIG_QEMU_IN = $(CONFIG_QEMU).in
|
||||
|
||||
CONFIG_PATH_QEMU = $(abspath $(CONFDIR)/$(CONFIG_FILE_QEMU))
|
||||
@@ -248,7 +242,7 @@ ifneq (,$(CLHCMD))
|
||||
KNOWN_HYPERVISORS += $(HYPERVISOR_CLH)
|
||||
|
||||
CONFIG_FILE_CLH = configuration-clh.toml
|
||||
CONFIG_CLH = $(CLI_DIR)/config/$(CONFIG_FILE_CLH)
|
||||
CONFIG_CLH = config/$(CONFIG_FILE_CLH)
|
||||
CONFIG_CLH_IN = $(CONFIG_CLH).in
|
||||
|
||||
CONFIG_PATH_CLH = $(abspath $(CONFDIR)/$(CONFIG_FILE_CLH))
|
||||
@@ -271,7 +265,7 @@ ifneq (,$(FCCMD))
|
||||
KNOWN_HYPERVISORS += $(HYPERVISOR_FC)
|
||||
|
||||
CONFIG_FILE_FC = configuration-fc.toml
|
||||
CONFIG_FC = $(CLI_DIR)/config/$(CONFIG_FILE_FC)
|
||||
CONFIG_FC = config/$(CONFIG_FILE_FC)
|
||||
CONFIG_FC_IN = $(CONFIG_FC).in
|
||||
|
||||
CONFIG_PATH_FC = $(abspath $(CONFDIR)/$(CONFIG_FILE_FC))
|
||||
@@ -294,7 +288,7 @@ ifneq (,$(ACRNCMD))
|
||||
KNOWN_HYPERVISORS += $(HYPERVISOR_ACRN)
|
||||
|
||||
CONFIG_FILE_ACRN = configuration-acrn.toml
|
||||
CONFIG_ACRN = $(CLI_DIR)/config/$(CONFIG_FILE_ACRN)
|
||||
CONFIG_ACRN = config/$(CONFIG_FILE_ACRN)
|
||||
CONFIG_ACRN_IN = $(CONFIG_ACRN).in
|
||||
|
||||
CONFIG_PATH_ACRN = $(abspath $(CONFDIR)/$(CONFIG_FILE_ACRN))
|
||||
@@ -414,7 +408,6 @@ USER_VARS += PROJECT_PREFIX
|
||||
USER_VARS += PROJECT_TAG
|
||||
USER_VARS += PROJECT_TYPE
|
||||
USER_VARS += PROJECT_URL
|
||||
USER_VARS += NETMONPATH
|
||||
USER_VARS += QEMUBINDIR
|
||||
USER_VARS += QEMUCMD
|
||||
USER_VARS += QEMUPATH
|
||||
@@ -458,6 +451,7 @@ USER_VARS += DEFENTROPYSOURCE
|
||||
USER_VARS += DEFVALIDENTROPYSOURCES
|
||||
USER_VARS += DEFSANDBOXCGROUPONLY
|
||||
USER_VARS += DEFBINDMOUNTS
|
||||
USER_VARS += DEFVFIOMODE
|
||||
USER_VARS += FEATURE_SELINUX
|
||||
USER_VARS += BUILDFLAGS
|
||||
|
||||
@@ -506,7 +500,7 @@ define SHOW_ARCH
|
||||
$(shell printf "\\t%s%s\\\n" "$(1)" $(if $(filter $(ARCH),$(1))," (default)",""))
|
||||
endef
|
||||
|
||||
all: runtime containerd-shim-v2 netmon monitor
|
||||
all: runtime containerd-shim-v2 monitor
|
||||
|
||||
# Targets that depend on .git-commit can use $(shell cat .git-commit) to get a
|
||||
# git revision string. They will only be rebuilt if the revision string
|
||||
@@ -522,15 +516,10 @@ containerd-shim-v2: $(SHIMV2_OUTPUT)
|
||||
|
||||
monitor: $(MONITOR_OUTPUT)
|
||||
|
||||
netmon: $(NETMON_TARGET_OUTPUT)
|
||||
|
||||
$(NETMON_TARGET_OUTPUT): $(SOURCES) VERSION
|
||||
$(QUIET_BUILD)(cd $(NETMON_DIR) && go build $(BUILDFLAGS) -o $@ -ldflags "-X main.version=$(VERSION)" $(KATA_LDFLAGS))
|
||||
|
||||
runtime: $(TARGET_OUTPUT) $(CONFIGS)
|
||||
runtime: $(RUNTIME_OUTPUT) $(CONFIGS)
|
||||
.DEFAULT: default
|
||||
|
||||
build: default
|
||||
build: all
|
||||
|
||||
#Install an executable file
|
||||
# params:
|
||||
@@ -558,16 +547,12 @@ define MAKE_KERNEL_VIRTIOFS_NAME
|
||||
$(if $(findstring uncompressed,$1),vmlinux-virtiofs.container,vmlinuz-virtiofs.container)
|
||||
endef
|
||||
|
||||
GENERATED_CONFIG = $(abspath $(CLI_DIR)/config-generated.go)
|
||||
|
||||
GENERATED_FILES += $(GENERATED_CONFIG)
|
||||
GENERATED_FILES += pkg/katautils/config-settings.go
|
||||
|
||||
$(TARGET_OUTPUT): $(SOURCES) $(GENERATED_FILES) $(MAKEFILE_LIST) | show-summary
|
||||
$(QUIET_BUILD)(cd $(CLI_DIR) && go build $(KATA_LDFLAGS) $(BUILDFLAGS) -o $@ .)
|
||||
$(RUNTIME_OUTPUT): $(SOURCES) $(GENERATED_FILES) $(MAKEFILE_LIST) | show-summary
|
||||
$(QUIET_BUILD)(cd $(RUNTIME_DIR) && go build $(KATA_LDFLAGS) $(BUILDFLAGS) -o $@ .)
|
||||
|
||||
$(SHIMV2_OUTPUT): $(SOURCES) $(GENERATED_FILES) $(MAKEFILE_LIST)
|
||||
$(QUIET_BUILD)(cd $(SHIMV2_DIR)/ && ln -fs $(GENERATED_CONFIG))
|
||||
$(QUIET_BUILD)(cd $(SHIMV2_DIR)/ && go build $(KATA_LDFLAGS) $(BUILDFLAGS) -o $@ .)
|
||||
|
||||
$(MONITOR_OUTPUT): $(SOURCES) $(GENERATED_FILES) $(MAKEFILE_LIST) .git-commit
|
||||
@@ -576,10 +561,11 @@ $(MONITOR_OUTPUT): $(SOURCES) $(GENERATED_FILES) $(MAKEFILE_LIST) .git-commit
|
||||
|
||||
.PHONY: \
|
||||
check \
|
||||
check-go-static \
|
||||
coverage \
|
||||
default \
|
||||
install \
|
||||
lint \
|
||||
pre-commit \
|
||||
show-header \
|
||||
show-summary \
|
||||
show-variables \
|
||||
@@ -598,8 +584,6 @@ $(GENERATED_FILES): %: %.in $(MAKEFILE_LIST) VERSION .git-commit
|
||||
|
||||
generate-config: $(CONFIGS)
|
||||
|
||||
check: check-go-static
|
||||
|
||||
test: install-hook go-test
|
||||
|
||||
install-hook:
|
||||
@@ -610,25 +594,43 @@ ifeq ($(shell id -u), 0)
|
||||
endif
|
||||
|
||||
go-test: $(GENERATED_FILES)
|
||||
go clean -testcache
|
||||
go test -v -mod=vendor ./...
|
||||
|
||||
check-go-static:
|
||||
$(QUIET_CHECK)../../ci/go-no-os-exit.sh ./cli
|
||||
$(QUIET_CHECK)../../ci/go-no-os-exit.sh ./virtcontainers
|
||||
fast-test: $(GENERATED_FILES)
|
||||
go clean -testcache
|
||||
for s in $$(go list ./...); do if ! go test -failfast -v -mod=vendor -p 1 $$s; then break; fi; done
|
||||
|
||||
GOLANGCI_LINT_FILE := ../../../tests/.ci/.golangci.yml
|
||||
GOLANGCI_LINT_NAME = golangci-lint
|
||||
GOLANGCI_LINT_CMD := $(shell command -v $(GOLANGCI_LINT_NAME) 2>/dev/null)
|
||||
lint: all
|
||||
if [ -z $(GOLANGCI_LINT_CMD) ] ; \
|
||||
then \
|
||||
echo "ERROR: command $(GOLANGCI_LINT_NAME) not found. Please install it first." >&2; exit 1; \
|
||||
fi
|
||||
|
||||
if [ -f $(GOLANGCI_LINT_FILE) ] ; \
|
||||
then \
|
||||
echo "running $(GOLANGCI_LINT_NAME)..."; \
|
||||
$(GOLANGCI_LINT_NAME) run -c $(GOLANGCI_LINT_FILE) ; \
|
||||
else \
|
||||
echo "ERROR: file $(GOLANGCI_LINT_FILE) not found. You should clone https://github.com/kata-containers/tests to run $(GOLANGCI_LINT_NAME) locally." >&2; exit 1; \
|
||||
fi;
|
||||
|
||||
pre-commit: lint fast-test
|
||||
|
||||
coverage:
|
||||
go test -v -mod=vendor -covermode=atomic -coverprofile=coverage.txt ./...
|
||||
go tool cover -html=coverage.txt -o coverage.html
|
||||
|
||||
install: default install-runtime install-containerd-shim-v2 install-monitor install-netmon
|
||||
install: all install-runtime install-containerd-shim-v2 install-monitor
|
||||
|
||||
install-bin: $(BINLIST)
|
||||
$(QUIET_INST)$(foreach f,$(BINLIST),$(call INSTALL_EXEC,$f,$(BINDIR)))
|
||||
|
||||
install-runtime: runtime install-scripts install-completions install-configs install-bin
|
||||
|
||||
install-netmon: install-bin-libexec
|
||||
|
||||
install-containerd-shim-v2: $(SHIMV2)
|
||||
$(QUIET_INST)$(call INSTALL_EXEC,$<,$(BINDIR))
|
||||
|
||||
@@ -660,10 +662,8 @@ clean:
|
||||
$(QUIET_CLEAN)rm -f \
|
||||
$(CONFIGS) \
|
||||
$(GENERATED_FILES) \
|
||||
$(NETMON_TARGET) \
|
||||
$(MONITOR) \
|
||||
$(SHIMV2) \
|
||||
$(SHIMV2_DIR)/$(notdir $(GENERATED_CONFIG)) \
|
||||
$(TARGET) \
|
||||
.git-commit .git-commit.tmp
|
||||
|
||||
@@ -678,6 +678,9 @@ show-usage: show-header
|
||||
@printf "\n"
|
||||
@printf "\tbuild : standard build (build everything).\n"
|
||||
@printf "\ttest : run tests.\n"
|
||||
@printf "\tpre-commit : run $(GOLANGCI_LINT_NAME) and tests locally.\n"
|
||||
@printf "\tlint : run $(GOLANGCI_LINT_NAME).\n"
|
||||
@printf "\tfast-test : run tests with failfast option.\n"
|
||||
@printf "\tcheck : run code checks.\n"
|
||||
@printf "\tclean : remove built files.\n"
|
||||
@printf "\tcontainerd-shim-v2 : only build containerd shim v2.\n"
|
||||
@@ -686,9 +689,7 @@ show-usage: show-header
|
||||
@printf "\tgenerate-config : create configuration file.\n"
|
||||
@printf "\tinstall : install everything.\n"
|
||||
@printf "\tinstall-containerd-shim-v2 : only install containerd shim v2 files.\n"
|
||||
@printf "\tinstall-netmon : only install netmon files.\n"
|
||||
@printf "\tinstall-runtime : only install runtime files.\n"
|
||||
@printf "\tnetmon : only build netmon.\n"
|
||||
@printf "\truntime : only build runtime.\n"
|
||||
@printf "\tshow-arches : show supported architectures (ARCH variable values).\n"
|
||||
@printf "\tshow-summary : show install locations.\n"
|
||||
|
||||
@@ -26,8 +26,7 @@ to work seamlessly with both Docker and Kubernetes respectively.
|
||||
## License
|
||||
|
||||
The code is licensed under an Apache 2.0 license.
|
||||
|
||||
See [the license file](LICENSE) for further details.
|
||||
See [the license file](https://github.com/kata-containers/kata-containers/blob/main/LICENSE) for further details.
|
||||
|
||||
## Platform support
|
||||
|
||||
|
||||
@@ -1,40 +0,0 @@
|
||||
//
|
||||
// Copyright (c) 2018-2019 Intel Corporation
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
// WARNING: This file is auto-generated - DO NOT EDIT!
|
||||
//
|
||||
// Note that some variables are "var" to allow them to be modified
|
||||
// by the tests.
|
||||
package main
|
||||
|
||||
// name is the name of the runtime
|
||||
const name = "@RUNTIME_NAME@"
|
||||
|
||||
// name of the project
|
||||
const project = "@PROJECT_NAME@"
|
||||
|
||||
// prefix used to denote non-standard CLI commands and options.
|
||||
const projectPrefix = "@PROJECT_TYPE@"
|
||||
|
||||
// original URL for this project
|
||||
const projectURL = "@PROJECT_URL@"
|
||||
|
||||
// Project URL's organisation name
|
||||
const projectORG = "@PROJECT_ORG@"
|
||||
|
||||
const defaultRootDirectory = "@PKGRUNDIR@"
|
||||
|
||||
// commit is the git commit the runtime is compiled from.
|
||||
var commit = "@COMMIT@"
|
||||
|
||||
// version is the runtime version.
|
||||
var version = "@VERSION@"
|
||||
|
||||
// Default config file used by stateless systems.
|
||||
var defaultRuntimeConfiguration = "@CONFIG_PATH@"
|
||||
|
||||
// Alternate config file that takes precedence over
|
||||
// defaultRuntimeConfiguration.
|
||||
var defaultSysConfRuntimeConfiguration = "@SYSCONFIG@"
|
||||
@@ -1,30 +0,0 @@
|
||||
// Copyright (c) 2018 HyperHQ Inc.
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/containerd/containerd/runtime/v2/shim"
|
||||
containerdshim "github.com/kata-containers/kata-containers/src/runtime/containerd-shim-v2"
|
||||
"github.com/kata-containers/kata-containers/src/runtime/pkg/types"
|
||||
)
|
||||
|
||||
func shimConfig(config *shim.Config) {
|
||||
config.NoReaper = true
|
||||
config.NoSubreaper = true
|
||||
}
|
||||
|
||||
func main() {
|
||||
|
||||
if len(os.Args) == 2 && os.Args[1] == "--version" {
|
||||
fmt.Printf("%s containerd shim: id: %q, version: %s, commit: %v\n", project, types.DefaultKataRuntimeName, version, commit)
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
shim.Run(types.DefaultKataRuntimeName, containerdshim.New, shimConfig)
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
// Copyright (c) 2017 Intel Corporation
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
package main
|
||||
|
||||
import "os"
|
||||
|
||||
var atexitFuncs []func()
|
||||
|
||||
var exitFunc = os.Exit
|
||||
|
||||
// atexit registers a function f that will be run when exit is called. The
|
||||
// handlers so registered will be called the in reverse order of their
|
||||
// registration.
|
||||
func atexit(f func()) {
|
||||
atexitFuncs = append(atexitFuncs, f)
|
||||
}
|
||||
|
||||
// exit calls all atexit handlers before exiting the process with status.
|
||||
func exit(status int) {
|
||||
for i := len(atexitFuncs) - 1; i >= 0; i-- {
|
||||
f := atexitFuncs[i]
|
||||
f()
|
||||
}
|
||||
exitFunc(status)
|
||||
}
|
||||
@@ -1,42 +0,0 @@
|
||||
// Copyright (c) 2017 Intel Corporation
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
var testFoo string
|
||||
|
||||
func testFunc() {
|
||||
testFoo = "bar"
|
||||
}
|
||||
|
||||
func TestExit(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
var testExitStatus int
|
||||
exitFunc = func(status int) {
|
||||
testExitStatus = status
|
||||
}
|
||||
|
||||
defer func() {
|
||||
exitFunc = os.Exit
|
||||
}()
|
||||
|
||||
// test with no atexit functions added.
|
||||
exit(1)
|
||||
assert.Equal(testExitStatus, 1)
|
||||
|
||||
// test with a function added to the atexit list.
|
||||
atexit(testFunc)
|
||||
exit(0)
|
||||
assert.Equal(testFoo, "bar")
|
||||
assert.Equal(testExitStatus, 0)
|
||||
}
|
||||
32
src/runtime/cmd/containerd-shim-kata-v2/main.go
Normal file
32
src/runtime/cmd/containerd-shim-kata-v2/main.go
Normal file
@@ -0,0 +1,32 @@
|
||||
// Copyright (c) 2018 HyperHQ Inc.
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
shimapi "github.com/containerd/containerd/runtime/v2/shim"
|
||||
|
||||
shim "github.com/kata-containers/kata-containers/src/runtime/pkg/containerd-shim-v2"
|
||||
"github.com/kata-containers/kata-containers/src/runtime/pkg/katautils"
|
||||
"github.com/kata-containers/kata-containers/src/runtime/pkg/types"
|
||||
)
|
||||
|
||||
func shimConfig(config *shimapi.Config) {
|
||||
config.NoReaper = true
|
||||
config.NoSubreaper = true
|
||||
}
|
||||
|
||||
func main() {
|
||||
|
||||
if len(os.Args) == 2 && os.Args[1] == "--version" {
|
||||
fmt.Printf("%s containerd shim: id: %q, version: %s, commit: %v\n", katautils.PROJECT, types.DefaultKataRuntimeName, katautils.VERSION, katautils.COMMIT)
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
shimapi.Run(types.DefaultKataRuntimeName, shim.New, shimConfig)
|
||||
}
|
||||
@@ -7,6 +7,7 @@ package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
goruntime "runtime"
|
||||
@@ -25,7 +26,7 @@ var logLevel = flag.String("log-level", "info", "Log level of logrus(trace/debug
|
||||
var (
|
||||
appName = "kata-monitor"
|
||||
// version is the kata monitor version.
|
||||
version = "0.1.0"
|
||||
version = "0.2.0"
|
||||
|
||||
GitCommit = "unknown-commit"
|
||||
)
|
||||
@@ -54,6 +55,15 @@ func printVersion(ver versionInfo) {
|
||||
}
|
||||
}
|
||||
|
||||
type endpoint struct {
|
||||
handler http.HandlerFunc
|
||||
path string
|
||||
desc string
|
||||
}
|
||||
|
||||
// global variable endpoints contains all available endpoints
|
||||
var endpoints []endpoint
|
||||
|
||||
func main() {
|
||||
ver := versionInfo{
|
||||
AppName: appName,
|
||||
@@ -97,19 +107,62 @@ func main() {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// setup handlers, now only metrics is supported
|
||||
// setup handlers, currently only metrics are supported
|
||||
m := http.NewServeMux()
|
||||
m.Handle("/metrics", http.HandlerFunc(km.ProcessMetricsRequest))
|
||||
m.Handle("/sandboxes", http.HandlerFunc(km.ListSandboxes))
|
||||
m.Handle("/agent-url", http.HandlerFunc(km.GetAgentURL))
|
||||
endpoints = []endpoint{
|
||||
{
|
||||
path: "/metrics",
|
||||
desc: "Get metrics from sandboxes.",
|
||||
handler: km.ProcessMetricsRequest,
|
||||
},
|
||||
{
|
||||
path: "/sandboxes",
|
||||
desc: "List all Kata Containers sandboxes.",
|
||||
handler: km.ListSandboxes,
|
||||
},
|
||||
{
|
||||
path: "/agent-url",
|
||||
desc: "Get sandbox agent URL.",
|
||||
handler: km.GetAgentURL,
|
||||
},
|
||||
{
|
||||
path: "/debug/vars",
|
||||
desc: "Golang pprof `/debug/vars` endpoint for kata runtime shim process.",
|
||||
handler: km.ExpvarHandler,
|
||||
},
|
||||
{
|
||||
path: "/debug/pprof/",
|
||||
desc: "Golang pprof `/debug/pprof/` endpoint for kata runtime shim process.",
|
||||
handler: km.PprofIndex,
|
||||
},
|
||||
{
|
||||
path: "/debug/pprof/cmdline",
|
||||
desc: "Golang pprof `/debug/pprof/cmdline` endpoint for kata runtime shim process.",
|
||||
handler: km.PprofCmdline,
|
||||
},
|
||||
{
|
||||
path: "/debug/pprof/profile",
|
||||
desc: "Golang pprof `/debug/pprof/profile` endpoint for kata runtime shim process.",
|
||||
handler: km.PprofProfile,
|
||||
},
|
||||
{
|
||||
path: "/debug/pprof/symbol",
|
||||
desc: "Golang pprof `/debug/pprof/symbol` endpoint for kata runtime shim process.",
|
||||
handler: km.PprofSymbol,
|
||||
},
|
||||
{
|
||||
path: "/debug/pprof/trace",
|
||||
desc: "Golang pprof `/debug/pprof/trace` endpoint for kata runtime shim process.",
|
||||
handler: km.PprofTrace,
|
||||
},
|
||||
}
|
||||
|
||||
// for debug shim process
|
||||
m.Handle("/debug/vars", http.HandlerFunc(km.ExpvarHandler))
|
||||
m.Handle("/debug/pprof/", http.HandlerFunc(km.PprofIndex))
|
||||
m.Handle("/debug/pprof/cmdline", http.HandlerFunc(km.PprofCmdline))
|
||||
m.Handle("/debug/pprof/profile", http.HandlerFunc(km.PprofProfile))
|
||||
m.Handle("/debug/pprof/symbol", http.HandlerFunc(km.PprofSymbol))
|
||||
m.Handle("/debug/pprof/trace", http.HandlerFunc(km.PprofTrace))
|
||||
for _, endpoint := range endpoints {
|
||||
m.Handle(endpoint.path, endpoint.handler)
|
||||
}
|
||||
|
||||
// root index page to show all endpoints in kata-monitor
|
||||
m.Handle("/", http.HandlerFunc(indexPage))
|
||||
|
||||
// listening on the server
|
||||
svr := &http.Server{
|
||||
@@ -119,6 +172,23 @@ func main() {
|
||||
logrus.Fatal(svr.ListenAndServe())
|
||||
}
|
||||
|
||||
func indexPage(w http.ResponseWriter, r *http.Request) {
|
||||
w.Write([]byte("Available HTTP endpoints:\n"))
|
||||
|
||||
spacing := 0
|
||||
for _, endpoint := range endpoints {
|
||||
if len(endpoint.path) > spacing {
|
||||
spacing = len(endpoint.path)
|
||||
}
|
||||
}
|
||||
spacing = spacing + 3
|
||||
|
||||
formattedString := fmt.Sprintf("%%-%ds: %%s\n", spacing)
|
||||
for _, endpoint := range endpoints {
|
||||
w.Write([]byte(fmt.Sprintf(formattedString, endpoint.path, endpoint.desc)))
|
||||
}
|
||||
}
|
||||
|
||||
// initLog setup logger
|
||||
func initLog() {
|
||||
kataMonitorLog := logrus.WithFields(logrus.Fields{
|
||||
@@ -16,10 +16,10 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/types"
|
||||
"github.com/kata-containers/kata-containers/src/runtime/pkg/oci"
|
||||
pb "github.com/kata-containers/kata-containers/src/runtime/protocols/cache"
|
||||
vc "github.com/kata-containers/kata-containers/src/runtime/virtcontainers"
|
||||
vf "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/factory"
|
||||
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/oci"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/urfave/cli"
|
||||
"golang.org/x/sys/unix"
|
||||
@@ -25,10 +25,9 @@ import (
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/containerd/cgroups"
|
||||
"github.com/kata-containers/kata-containers/src/runtime/pkg/katautils"
|
||||
"github.com/kata-containers/kata-containers/src/runtime/pkg/oci"
|
||||
vc "github.com/kata-containers/kata-containers/src/runtime/virtcontainers"
|
||||
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/oci"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
@@ -62,9 +61,9 @@ type vmContainerCapableDetails struct {
|
||||
|
||||
const (
|
||||
moduleParamDir = "parameters"
|
||||
successMessageCapable = "System is capable of running " + project
|
||||
successMessageCreate = "System can currently create " + project
|
||||
failMessage = "System is not capable of running " + project
|
||||
successMessageCapable = "System is capable of running " + katautils.PROJECT
|
||||
successMessageCreate = "System can currently create " + katautils.PROJECT
|
||||
failMessage = "System is not capable of running " + katautils.PROJECT
|
||||
kernelPropertyCorrect = "Kernel property value correct"
|
||||
|
||||
// these refer to fields in the procCPUINFO file
|
||||
@@ -229,7 +228,7 @@ func checkKernelModules(modules map[string]kernelModule, handler kernelParamHand
|
||||
}
|
||||
|
||||
if !haveKernelModule(module) {
|
||||
kataLog.WithFields(fields).Error("kernel property not found")
|
||||
kataLog.WithFields(fields).Errorf("kernel property %s not found", module)
|
||||
if details.required {
|
||||
count++
|
||||
}
|
||||
@@ -292,11 +291,9 @@ func genericHostIsVMContainerCapable(details vmContainerCapableDetails) error {
|
||||
errorCount := uint32(0)
|
||||
|
||||
count := checkCPUAttribs(cpuinfo, details.requiredCPUAttribs)
|
||||
|
||||
errorCount += count
|
||||
|
||||
count = checkCPUFlags(cpuFlags, details.requiredCPUFlags)
|
||||
|
||||
errorCount += count
|
||||
|
||||
count, err = checkKernelModules(details.requiredKernelModules, archKernelParamHandler)
|
||||
@@ -316,7 +313,7 @@ func genericHostIsVMContainerCapable(details vmContainerCapableDetails) error {
|
||||
var kataCheckCLICommand = cli.Command{
|
||||
Name: "check",
|
||||
Aliases: []string{"kata-check"},
|
||||
Usage: "tests if system can run " + project,
|
||||
Usage: "tests if system can run " + katautils.PROJECT,
|
||||
Flags: []cli.Flag{
|
||||
cli.BoolFlag{
|
||||
Name: "check-version-only",
|
||||
@@ -375,14 +372,14 @@ EXAMPLES:
|
||||
|
||||
$ %s check --only-list-releases --include-all-releases
|
||||
`,
|
||||
project,
|
||||
katautils.PROJECT,
|
||||
noNetworkEnvVar,
|
||||
name,
|
||||
name,
|
||||
name,
|
||||
name,
|
||||
name,
|
||||
name,
|
||||
katautils.NAME,
|
||||
katautils.NAME,
|
||||
katautils.NAME,
|
||||
katautils.NAME,
|
||||
katautils.NAME,
|
||||
katautils.NAME,
|
||||
),
|
||||
|
||||
Action: func(context *cli.Context) error {
|
||||
@@ -401,7 +398,7 @@ EXAMPLES:
|
||||
if os.Geteuid() == 0 {
|
||||
kataLog.Warn("Not running network checks as super user")
|
||||
} else {
|
||||
err := HandleReleaseVersions(cmd, version, context.Bool("include-all-releases"))
|
||||
err := HandleReleaseVersions(cmd, katautils.VERSION, context.Bool("include-all-releases"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -417,11 +414,6 @@ EXAMPLES:
|
||||
return errors.New("check: cannot determine runtime config")
|
||||
}
|
||||
|
||||
// check if cgroup can work use the same logic for creating containers
|
||||
if _, err := vc.V1Constraints(); err != nil && err == cgroups.ErrMountPointNotExist && !runtimeConfig.SandboxCgroupOnly {
|
||||
return fmt.Errorf("Cgroup v2 requires the following configuration: `sandbox_cgroup_only=true`.")
|
||||
}
|
||||
|
||||
err := setCPUtype(runtimeConfig.HypervisorType)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -161,6 +161,16 @@ func setCPUtype(hypervisorType vc.HypervisorType) error {
|
||||
required: false,
|
||||
},
|
||||
}
|
||||
case "mock":
|
||||
archRequiredCPUFlags = map[string]string{
|
||||
cpuFlagVMX: "Virtualization support",
|
||||
cpuFlagLM: "64Bit CPU",
|
||||
cpuFlagSSE4_1: "SSE4.1",
|
||||
}
|
||||
archRequiredCPUAttribs = map[string]string{
|
||||
archGenuineIntel: "Intel Architecture CPU",
|
||||
}
|
||||
|
||||
default:
|
||||
return fmt.Errorf("setCPUtype: Unknown hypervisor type %s", hypervisorType)
|
||||
}
|
||||
@@ -292,6 +302,8 @@ func archHostCanCreateVMContainer(hypervisorType vc.HypervisorType) error {
|
||||
return kvmIsUsable()
|
||||
case "acrn":
|
||||
return acrnIsUsable()
|
||||
case "mock":
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("archHostCanCreateVMContainer: Unknown hypervisor type %s", hypervisorType)
|
||||
}
|
||||
@@ -317,11 +317,12 @@ func TestCheckHostIsVMContainerCapable(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
setupCheckHostIsVMContainerCapable(assert, cpuInfoFile, cpuData, moduleData)
|
||||
|
||||
// remove the modules to force a failure
|
||||
err = os.RemoveAll(sysModuleDir)
|
||||
// to check if host is capable for Kata Containers, must setup CPU info first.
|
||||
_, config, err := makeRuntimeConfig(dir)
|
||||
assert.NoError(err)
|
||||
setCPUtype(config.HypervisorType)
|
||||
|
||||
setupCheckHostIsVMContainerCapable(assert, cpuInfoFile, cpuData, moduleData)
|
||||
|
||||
details := vmContainerCapableDetails{
|
||||
cpuInfoFile: cpuInfoFile,
|
||||
@@ -332,6 +333,12 @@ func TestCheckHostIsVMContainerCapable(t *testing.T) {
|
||||
|
||||
err = hostIsVMContainerCapable(details)
|
||||
assert.Nil(err)
|
||||
|
||||
// remove the modules to force a failure
|
||||
err = os.RemoveAll(sysModuleDir)
|
||||
assert.NoError(err)
|
||||
err = hostIsVMContainerCapable(details)
|
||||
assert.Error(err)
|
||||
}
|
||||
|
||||
func TestArchKernelParamHandler(t *testing.T) {
|
||||
@@ -28,9 +28,9 @@ func setupCheckHostIsVMContainerCapable(assert *assert.Assertions, cpuInfoFile s
|
||||
func TestCCCheckCLIFunction(t *testing.T) {
|
||||
var cpuData []testCPUData
|
||||
moduleData := []testModuleData{
|
||||
{filepath.Join(sysModuleDir, "kvm"), true, ""},
|
||||
{filepath.Join(sysModuleDir, "vhost"), true, ""},
|
||||
{filepath.Join(sysModuleDir, "vhost_net"), true, ""},
|
||||
{filepath.Join(sysModuleDir, "kvm"), "", true},
|
||||
{filepath.Join(sysModuleDir, "vhost"), "", true},
|
||||
{filepath.Join(sysModuleDir, "vhost_net"), "", true},
|
||||
}
|
||||
|
||||
genericCheckCLIFunction(t, cpuData, moduleData)
|
||||
@@ -10,7 +10,7 @@ vendor_id : IBM/S390
|
||||
# processors : 4
|
||||
bogomips per cpu: 20325.00
|
||||
max thread id : 0
|
||||
features : esan3 zarch stfle msa ldisp eimm dfp edat etf3eh highgprs te vx sie
|
||||
features : esan3 zarch stfle msa ldisp eimm dfp edat etf3eh highgprs te vx sie
|
||||
cache0 : level=1 type=Data scope=Private size=128K line_size=256 associativity=8
|
||||
cache1 : level=1 type=Instruction scope=Private size=96K line_size=256 associativity=6
|
||||
cache2 : level=2 type=Data scope=Private size=2048K line_size=256 associativity=8
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user