mirror of
https://github.com/kata-containers/kata-containers.git
synced 2026-02-27 09:12:24 +00:00
Compare commits
638 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3de6d09a86 | ||
|
|
3037303e09 | ||
|
|
4c34cfb0ab | ||
|
|
8cdd968092 | ||
|
|
91b874f18c | ||
|
|
b25538f670 | ||
|
|
3dabe0f5f0 | ||
|
|
98886a7571 | ||
|
|
e27d70d47e | ||
|
|
9a33a3413b | ||
|
|
68d539f5c5 | ||
|
|
b93f5390ce | ||
|
|
23f5786cca | ||
|
|
4ae9317675 | ||
|
|
b00203ba9b | ||
|
|
cca77f0911 | ||
|
|
e3efad8ed2 | ||
|
|
4adb454ed0 | ||
|
|
f0e0c74fd4 | ||
|
|
69509eff33 | ||
|
|
ece0f9690e | ||
|
|
ccfb7faa1b | ||
|
|
f13d13c8fa | ||
|
|
c371b4e1ce | ||
|
|
c06bf2e3bb | ||
|
|
f9b7a8a23c | ||
|
|
bc195d758a | ||
|
|
614e21ccfb | ||
|
|
aae654be80 | ||
|
|
3622b5e8b4 | ||
|
|
02f5fd94bd | ||
|
|
cf5d3ed0d4 | ||
|
|
0c4a7c8771 | ||
|
|
3f7ce1d620 | ||
|
|
036b04094e | ||
|
|
65ecac5777 | ||
|
|
a992feb7f3 | ||
|
|
0cda92c6d8 | ||
|
|
616eb8b19b | ||
|
|
652ba30d4a | ||
|
|
59e3ab07e4 | ||
|
|
b2fb19f8f8 | ||
|
|
01a957f7e1 | ||
|
|
091ad2a1b2 | ||
|
|
3bbf3c81c2 | ||
|
|
9c0c159b25 | ||
|
|
2035d638df | ||
|
|
b5142c94b9 | ||
|
|
8763880e93 | ||
|
|
e08749ce58 | ||
|
|
80196c06ad | ||
|
|
083b2f24d8 | ||
|
|
63c1f81c23 | ||
|
|
7a38cce73c | ||
|
|
e56af7a370 | ||
|
|
a94024aedc | ||
|
|
fe307303c8 | ||
|
|
31e09058af | ||
|
|
974d6b0736 | ||
|
|
1f33fd4cd4 | ||
|
|
da281b4444 | ||
|
|
71d0c46e0a | ||
|
|
e989e7ee4e | ||
|
|
6d5fc898b8 | ||
|
|
5aaef8e6eb | ||
|
|
4cd737d9fd | ||
|
|
77c5db6267 | ||
|
|
2d089d9695 | ||
|
|
b9025462fb | ||
|
|
9138f55757 | ||
|
|
d7c2b7d13c | ||
|
|
96336d141b | ||
|
|
23927d8a94 | ||
|
|
ac393f6316 | ||
|
|
4eb7e2966c | ||
|
|
3f46dfcf2f | ||
|
|
cda04fa539 | ||
|
|
efc8e93bfe | ||
|
|
720265c2d8 | ||
|
|
63b6e8a215 | ||
|
|
2ae090b44b | ||
|
|
2440a39c50 | ||
|
|
dd2878a9c8 | ||
|
|
fdcfac0641 | ||
|
|
4abfc11b4f | ||
|
|
5c1cea1601 | ||
|
|
1a4928e710 | ||
|
|
973b8a1d8f | ||
|
|
8412c09143 | ||
|
|
9a8341f431 | ||
|
|
a1d380305c | ||
|
|
b3ed7830e4 | ||
|
|
b179598fed | ||
|
|
820e000f1c | ||
|
|
4ccf1f29f9 | ||
|
|
3b24219310 | ||
|
|
94bc54f4d2 | ||
|
|
b49800633d | ||
|
|
7fe44d3a3d | ||
|
|
52ef092489 | ||
|
|
c037ac0e82 | ||
|
|
dfd0ca9bfe | ||
|
|
6a9e3ccddf | ||
|
|
66bcfe7369 | ||
|
|
bafa527be0 | ||
|
|
36750b56f1 | ||
|
|
86b8c53d27 | ||
|
|
d91979d7fa | ||
|
|
ad0f2b2a55 | ||
|
|
11b1a72442 | ||
|
|
3911bd3108 | ||
|
|
f7bc627a86 | ||
|
|
b1275bed1b | ||
|
|
01d460ac63 | ||
|
|
e8d1feb25f | ||
|
|
3a7f9595b6 | ||
|
|
cb5a2b30e9 | ||
|
|
e4733748aa | ||
|
|
08eb5fc7ff | ||
|
|
71afeccdf1 | ||
|
|
857222af02 | ||
|
|
caf3b19505 | ||
|
|
57e8cbff6f | ||
|
|
edf4ca4738 | ||
|
|
09ed9c5c50 | ||
|
|
e1825c2ef3 | ||
|
|
39b0e9aa8f | ||
|
|
c70588fafe | ||
|
|
8355eee9f5 | ||
|
|
2c2941122c | ||
|
|
6a8b137965 | ||
|
|
e738054ddb | ||
|
|
6b94cc47a8 | ||
|
|
b8ba346e98 | ||
|
|
0e0cb24387 | ||
|
|
6f0b3eb2f9 | ||
|
|
8a893cd4ee | ||
|
|
f1f5bef9ef | ||
|
|
52397ca2c1 | ||
|
|
20b4be0225 | ||
|
|
ba94eed891 | ||
|
|
fb27de3561 | ||
|
|
79a3b4e2e5 | ||
|
|
4f745f77cb | ||
|
|
78c63c7951 | ||
|
|
456e13db98 | ||
|
|
b85a886694 | ||
|
|
2d6ac3d85d | ||
|
|
c6b86e88e4 | ||
|
|
9cff9271bc | ||
|
|
374b8d2534 | ||
|
|
aedf14b244 | ||
|
|
63b25e8cb0 | ||
|
|
03735d78ec | ||
|
|
020e3da9b9 | ||
|
|
77c844da12 | ||
|
|
6eef58dc3e | ||
|
|
b9d88f74ed | ||
|
|
51dade3382 | ||
|
|
49b3a0faa3 | ||
|
|
31438dba79 | ||
|
|
fefcf7cfa4 | ||
|
|
cdaaf708a1 | ||
|
|
a6ee15c5c7 | ||
|
|
e9593b53a4 | ||
|
|
4d11fecc2d | ||
|
|
3d5f48e02e | ||
|
|
5e5eb9759f | ||
|
|
8c92f3bfec | ||
|
|
204ee21bc8 | ||
|
|
eb1227f47d | ||
|
|
8789551fe6 | ||
|
|
35c7f8d1ba | ||
|
|
eccdffebf7 | ||
|
|
a19f2eacec | ||
|
|
27f8f69195 | ||
|
|
1597f8ba00 | ||
|
|
593cbb8710 | ||
|
|
5402f2c637 | ||
|
|
b63d49b34a | ||
|
|
18c887f055 | ||
|
|
28d430ec42 | ||
|
|
da2377346d | ||
|
|
096f32cc52 | ||
|
|
9d29ce460d | ||
|
|
c0d35a66aa | ||
|
|
1abeffdac6 | ||
|
|
5448f7fbbf | ||
|
|
72471d1a18 | ||
|
|
72df3004e8 | ||
|
|
03cd02a006 | ||
|
|
cefba08903 | ||
|
|
8d609e47fb | ||
|
|
fc5a631791 | ||
|
|
aa9f21bd19 | ||
|
|
3087ce17a6 | ||
|
|
c80c8d84c3 | ||
|
|
9606e7ac8b | ||
|
|
653bc3973f | ||
|
|
dfcb41b5cc | ||
|
|
705e469696 | ||
|
|
5777869cf4 | ||
|
|
6773f14667 | ||
|
|
124f01beb3 | ||
|
|
16f5ebf5f9 | ||
|
|
5badc30a69 | ||
|
|
6f363bba18 | ||
|
|
d3127af9c5 | ||
|
|
5a7d0ed3ad | ||
|
|
b09eba8c46 | ||
|
|
00e657cdb7 | ||
|
|
5e03890562 | ||
|
|
0aae847ae5 | ||
|
|
4c933a5611 | ||
|
|
e937cb1ded | ||
|
|
0c5ac042e7 | ||
|
|
0346b32a90 | ||
|
|
94d95fc055 | ||
|
|
db1ca4b665 | ||
|
|
d2d8d2e519 | ||
|
|
871476c3cb | ||
|
|
f9249b4476 | ||
|
|
eb7f747df1 | ||
|
|
71ede4ea3f | ||
|
|
614328f342 | ||
|
|
095c5ed961 | ||
|
|
97ecdabde9 | ||
|
|
fdaf12d16c | ||
|
|
04d1122a46 | ||
|
|
c48c6f974e | ||
|
|
7e400f7bb2 | ||
|
|
1178fe20e9 | ||
|
|
66dda37877 | ||
|
|
f6cfc33314 | ||
|
|
e2200f0690 | ||
|
|
d3e3ee7755 | ||
|
|
f94d80783d | ||
|
|
3946aa7283 | ||
|
|
b4bbbf65c6 | ||
|
|
8045a7a2ba | ||
|
|
822f898433 | ||
|
|
2c774fb207 | ||
|
|
2af1113426 | ||
|
|
d0968032f7 | ||
|
|
3f541aff4a | ||
|
|
dfea12bc53 | ||
|
|
6f8897249b | ||
|
|
5a52fe1a75 | ||
|
|
1684c1962c | ||
|
|
f31839af63 | ||
|
|
026a4d92a9 | ||
|
|
51ee4c381a | ||
|
|
3a37652d01 | ||
|
|
75816d17f1 | ||
|
|
df55f37dfe | ||
|
|
41c2d81fd3 | ||
|
|
f45129cb44 | ||
|
|
52203db760 | ||
|
|
e1afb92a28 | ||
|
|
25bd04c02a | ||
|
|
5fc645c869 | ||
|
|
07f104085a | ||
|
|
0cb87767ae | ||
|
|
0738d75a92 | ||
|
|
bbf934161b | ||
|
|
4e33665be8 | ||
|
|
0b3ad2f830 | ||
|
|
281f0d7f29 | ||
|
|
b05811587e | ||
|
|
37ddb837c4 | ||
|
|
65a4562050 | ||
|
|
7818484120 | ||
|
|
8285957678 | ||
|
|
29ce2205a1 | ||
|
|
1885478e2e | ||
|
|
f2625b0014 | ||
|
|
e1ac2f4416 | ||
|
|
4b257bcbb6 | ||
|
|
cc9aeee81a | ||
|
|
7ab95b56f1 | ||
|
|
deb6d12ff6 | ||
|
|
fcc35dd3a7 | ||
|
|
bb5d8bbcb5 | ||
|
|
70491ff29f | ||
|
|
ad66f4dfc9 | ||
|
|
24c2d13fd3 | ||
|
|
36a4104753 | ||
|
|
7d048f5963 | ||
|
|
d44d66ddf6 | ||
|
|
f66e8c41a1 | ||
|
|
4025468e27 | ||
|
|
b10256a7ca | ||
|
|
447a7feccf | ||
|
|
9c1b5238b3 | ||
|
|
ad9968ce2d | ||
|
|
13517cf9c1 | ||
|
|
3be719c805 | ||
|
|
f0066568eb | ||
|
|
9e1388728e | ||
|
|
c2774b09dd | ||
|
|
bb9bcd886a | ||
|
|
a773797594 | ||
|
|
64ccb1645d | ||
|
|
3b23d62635 | ||
|
|
40f8aae6db | ||
|
|
cfe6e4ae71 | ||
|
|
424347bf0e | ||
|
|
77521cc8d2 | ||
|
|
07e0e843e8 | ||
|
|
bc8156c3ae | ||
|
|
0ad35dc91b | ||
|
|
5b0ab7f17c | ||
|
|
1cefa48047 | ||
|
|
b0a912b8b4 | ||
|
|
057612f18f | ||
|
|
0d519162b5 | ||
|
|
e47eb0d7d4 | ||
|
|
e8657c502d | ||
|
|
4698235e59 | ||
|
|
e1d3fb8c00 | ||
|
|
0cb93ed1bb | ||
|
|
c2aa288498 | ||
|
|
825cb2d22e | ||
|
|
1309c49c09 | ||
|
|
7be77ebee5 | ||
|
|
6ce5e62c48 | ||
|
|
ef5a5ea26e | ||
|
|
19d8f11345 | ||
|
|
886b3047ac | ||
|
|
4fd4b02f2e | ||
|
|
aa8635727d | ||
|
|
8241423ba5 | ||
|
|
dd9f41547c | ||
|
|
394480e7ff | ||
|
|
83b031ca7a | ||
|
|
c747852bce | ||
|
|
ae2cdedba8 | ||
|
|
aa8bdbde5a | ||
|
|
de98e467b4 | ||
|
|
ceab66b1ce | ||
|
|
b4ce84b9d2 | ||
|
|
645aaa6f7f | ||
|
|
3affde5b28 | ||
|
|
9f6f5dac4b | ||
|
|
f24983b3cf | ||
|
|
3a749cfb44 | ||
|
|
a3dba3e82b | ||
|
|
3a14b04621 | ||
|
|
95f6246858 | ||
|
|
11ba8f05ca | ||
|
|
70168a467d | ||
|
|
6b0272d6bf | ||
|
|
83177efb9b | ||
|
|
a0bd78b358 | ||
|
|
169b4490d2 | ||
|
|
7f0289de60 | ||
|
|
45f69373a6 | ||
|
|
4cd83d2b98 | ||
|
|
11bb9231c2 | ||
|
|
44bf7ccb46 | ||
|
|
b0be03a93f | ||
|
|
254f8bca74 | ||
|
|
e69535326d | ||
|
|
2fa8e85439 | ||
|
|
274de8c6af | ||
|
|
5d7397cc69 | ||
|
|
890fa26767 | ||
|
|
2f6edc4b9b | ||
|
|
baa8af3f8e | ||
|
|
7d0aba1a24 | ||
|
|
8615516823 | ||
|
|
a9a1345a31 | ||
|
|
ab493b6028 | ||
|
|
6596012956 | ||
|
|
2512ddeab2 | ||
|
|
6139e253a0 | ||
|
|
3fd108b09a | ||
|
|
8ccc8a8d0b | ||
|
|
59e31baaee | ||
|
|
09a13da8ec | ||
|
|
55bdb380fb | ||
|
|
27d5539954 | ||
|
|
3fd021a9b3 | ||
|
|
f071c8cada | ||
|
|
6654491cc3 | ||
|
|
c04a805215 | ||
|
|
b18c3dfce3 | ||
|
|
36f4038a89 | ||
|
|
21f9f01e1d | ||
|
|
e0bff7ed14 | ||
|
|
ca3d778479 | ||
|
|
3ebaa5d215 | ||
|
|
aeb6f54979 | ||
|
|
40d385d401 | ||
|
|
c0d7222194 | ||
|
|
e014eee4e8 | ||
|
|
6a982930e2 | ||
|
|
42d48efcc2 | ||
|
|
e0ae398a2e | ||
|
|
d03b72f19b | ||
|
|
c2393dc467 | ||
|
|
58623723b1 | ||
|
|
e75c149dec | ||
|
|
dd2d9e5524 | ||
|
|
7113490cb1 | ||
|
|
0831081399 | ||
|
|
a78d82f4f1 | ||
|
|
79c1d0a806 | ||
|
|
28aa4314ba | ||
|
|
720edbe3fc | ||
|
|
7b5da45059 | ||
|
|
6ea34f13e1 | ||
|
|
45f43e2a6a | ||
|
|
c22ac4f72c | ||
|
|
b203f715e5 | ||
|
|
8d63723910 | ||
|
|
6c58ae5b95 | ||
|
|
1eda6b7237 | ||
|
|
ca05aca548 | ||
|
|
b3a4cd1a06 | ||
|
|
b843b236e4 | ||
|
|
aa31a9d3c4 | ||
|
|
ba3c484d12 | ||
|
|
0f3eb2451e | ||
|
|
e1775e4719 | ||
|
|
1d21ff9864 | ||
|
|
5d815ffde1 | ||
|
|
0dd16e6b25 | ||
|
|
3733266a60 | ||
|
|
ba3e5f6b4a | ||
|
|
758e650a28 | ||
|
|
74662a0721 | ||
|
|
905c76bd47 | ||
|
|
0223eedda5 | ||
|
|
1f6a8baaf1 | ||
|
|
5f4209e008 | ||
|
|
6ecb2b8870 | ||
|
|
51b9d20d55 | ||
|
|
b4d10e7655 | ||
|
|
365df81d5e | ||
|
|
a9b436f788 | ||
|
|
1528d543b2 | ||
|
|
37bd2406e0 | ||
|
|
c5a973e68c | ||
|
|
6c506cde86 | ||
|
|
ecfbc9515a | ||
|
|
5ad47b8372 | ||
|
|
d48ad94825 | ||
|
|
cadcf5f92d | ||
|
|
506977b102 | ||
|
|
b0b6a1baea | ||
|
|
e580e29246 | ||
|
|
af598a232b | ||
|
|
88451d26d0 | ||
|
|
bdca5ca145 | ||
|
|
99730256a2 | ||
|
|
bce5cb2ce5 | ||
|
|
aee23409da | ||
|
|
722b576eb3 | ||
|
|
91084058ae | ||
|
|
5fe65e9fc2 | ||
|
|
c3a0ab4b93 | ||
|
|
47be9c7c01 | ||
|
|
df993b0f88 | ||
|
|
94347e2537 | ||
|
|
775f6bdc5c | ||
|
|
5e5fc145cd | ||
|
|
e4c023a9fa | ||
|
|
44b08b84b0 | ||
|
|
b6a3a3f8fe | ||
|
|
584d7a265e | ||
|
|
1012449141 | ||
|
|
a6a736eeaf | ||
|
|
374405aed1 | ||
|
|
d11ce129ac | ||
|
|
ed0732c75d | ||
|
|
3d053a70ab | ||
|
|
dfb92e403e | ||
|
|
7270a7ba48 | ||
|
|
2da77c6979 | ||
|
|
fb166956ab | ||
|
|
d0ca43162d | ||
|
|
63802ecdd9 | ||
|
|
ba884aac13 | ||
|
|
f33f2d09f7 | ||
|
|
8d9bec2e01 | ||
|
|
1221ab73f9 | ||
|
|
51690bc157 | ||
|
|
94b3348d3c | ||
|
|
d455883b46 | ||
|
|
69535e5458 | ||
|
|
4d1416529d | ||
|
|
43dca8deb4 | ||
|
|
3b2173c87a | ||
|
|
89f1581e54 | ||
|
|
3b896cf3ef | ||
|
|
62a086937e | ||
|
|
76af5a444b | ||
|
|
aadde2c25b | ||
|
|
b93a0642e0 | ||
|
|
2628b34435 | ||
|
|
8da5f7a72f | ||
|
|
551e0a6287 | ||
|
|
ed57ef0297 | ||
|
|
388b5b0e58 | ||
|
|
08be9c3601 | ||
|
|
322c80e7c8 | ||
|
|
b7999ac765 | ||
|
|
4183680bc3 | ||
|
|
302e02aed8 | ||
|
|
194cc7ca81 | ||
|
|
dcd0c0b269 | ||
|
|
9e99329bef | ||
|
|
2eac8fa452 | ||
|
|
1e640ec3a6 | ||
|
|
c2a55552b2 | ||
|
|
66b0305eed | ||
|
|
20a88b6470 | ||
|
|
aef7da7bc9 | ||
|
|
c5dad991ce | ||
|
|
5ec11afc21 | ||
|
|
7454908690 | ||
|
|
d72cb8ccfc | ||
|
|
bfd014871a | ||
|
|
e9710332e7 | ||
|
|
c784fb6508 | ||
|
|
d35320472c | ||
|
|
230aefc0da | ||
|
|
8e9f140ee0 | ||
|
|
11e10647f9 | ||
|
|
fc0f635098 | ||
|
|
2cfb32ac4d | ||
|
|
41b7577f08 | ||
|
|
e135d536c5 | ||
|
|
fdf7036d5e | ||
|
|
c8a160d14a | ||
|
|
8d529b960a | ||
|
|
528745fc88 | ||
|
|
de22b3c4bf | ||
|
|
e3f0d2a751 | ||
|
|
4fbf9d67a5 | ||
|
|
949ffd146a | ||
|
|
3e348e9768 | ||
|
|
7b691455c2 | ||
|
|
83056457d6 | ||
|
|
5546ce4031 | ||
|
|
b0b04bd2f3 | ||
|
|
d11657a581 | ||
|
|
3f58ea9258 | ||
|
|
5f146e10a1 | ||
|
|
4a8fb475be | ||
|
|
2a9ed19512 | ||
|
|
c51ba73199 | ||
|
|
e156516bde | ||
|
|
a113fc93c8 | ||
|
|
8d61029676 | ||
|
|
b80057dfb5 | ||
|
|
d7637f93f9 | ||
|
|
a37f10fc87 | ||
|
|
0f11384ede | ||
|
|
95b78ecaa9 | ||
|
|
abb0a2659a | ||
|
|
bb2b60dcfc | ||
|
|
b56313472b | ||
|
|
882385858d | ||
|
|
bf813f85f2 | ||
|
|
b3eab5ffea | ||
|
|
c500fd5761 | ||
|
|
faffee8909 | ||
|
|
8b5499204d | ||
|
|
cda00ed176 | ||
|
|
d8cac9f60b | ||
|
|
4e003a2125 | ||
|
|
36385a114d | ||
|
|
c3adeda3cc | ||
|
|
f08b594733 | ||
|
|
79edf2ca7d | ||
|
|
64d6293bb0 | ||
|
|
49fbae4fb1 | ||
|
|
fecb70b85e | ||
|
|
162a6b44f6 | ||
|
|
dd1e09bd9d | ||
|
|
3095b65ac3 | ||
|
|
4a1c828bf8 | ||
|
|
0e0b146b87 | ||
|
|
efb7390357 | ||
|
|
012029063c | ||
|
|
a388d2b8d4 | ||
|
|
2b44e9427c | ||
|
|
b408cc1694 | ||
|
|
0e5489797d | ||
|
|
3d17a7038a | ||
|
|
941577ab3b | ||
|
|
d69950e5c6 | ||
|
|
f26d595e5d | ||
|
|
66f6ec2919 | ||
|
|
5765b6e062 | ||
|
|
73bcb09232 | ||
|
|
3029e6e849 | ||
|
|
b7a184a0d8 | ||
|
|
67466aa27f | ||
|
|
c774cd6bb0 | ||
|
|
6ea6e85f77 | ||
|
|
3476fb481e | ||
|
|
7dd560f07f | ||
|
|
3127dbb3df | ||
|
|
2681fc7eb0 | ||
|
|
be2d4719c2 | ||
|
|
8eaa2f0dc8 | ||
|
|
44e443678d | ||
|
|
dc97f3f540 | ||
|
|
d0dc67bb96 | ||
|
|
20f6979d8f | ||
|
|
8a2f7b7a8c | ||
|
|
ddff762782 | ||
|
|
60f52a4b93 | ||
|
|
fc4357f642 | ||
|
|
ab6f37aa52 | ||
|
|
256ab50f1a | ||
|
|
1fdc5c1183 | ||
|
|
a7e4d3b738 | ||
|
|
035a42baa4 | ||
|
|
d2ac01c862 | ||
|
|
6e7ee4bdab | ||
|
|
1636c201f4 | ||
|
|
c6390f2a2a | ||
|
|
1e304e6307 | ||
|
|
2fef4bc844 | ||
|
|
57d2ded3e2 | ||
|
|
30e5e88ff1 | ||
|
|
b3cc8b200f | ||
|
|
9f4d1ffd43 | ||
|
|
b23ea508d5 | ||
|
|
4d66ee1935 | ||
|
|
eda5d2c623 | ||
|
|
f20924db24 | ||
|
|
82a1892d34 | ||
|
|
2127288437 |
2
.github/workflows/PR-wip-checks.yaml
vendored
2
.github/workflows/PR-wip-checks.yaml
vendored
@@ -15,7 +15,7 @@ concurrency:
|
||||
|
||||
jobs:
|
||||
pr_wip_check:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-22.04
|
||||
name: WIP Check
|
||||
steps:
|
||||
- name: WIP Check
|
||||
|
||||
2
.github/workflows/add-issues-to-project.yaml
vendored
2
.github/workflows/add-issues-to-project.yaml
vendored
@@ -17,7 +17,7 @@ concurrency:
|
||||
|
||||
jobs:
|
||||
add-new-issues-to-backlog:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Install hub
|
||||
run: |
|
||||
|
||||
2
.github/workflows/add-pr-sizing-label.yaml
vendored
2
.github/workflows/add-pr-sizing-label.yaml
vendored
@@ -18,7 +18,7 @@ concurrency:
|
||||
|
||||
jobs:
|
||||
add-pr-size-label:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
67
.github/workflows/basic-ci-amd64.yaml
vendored
67
.github/workflows/basic-ci-amd64.yaml
vendored
@@ -168,37 +168,6 @@ jobs:
|
||||
- name: Run runk tests
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/runk/gha-run.sh run
|
||||
run-stdio:
|
||||
runs-on: garm-ubuntu-2204-smaller
|
||||
env:
|
||||
CONTAINERD_VERSION: lts
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: Install dependencies
|
||||
run: bash tests/integration/stdio/gha-run.sh install-dependencies
|
||||
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: kata-static-tarball-amd64${{ inputs.tarball-suffix }}
|
||||
path: kata-artifacts
|
||||
|
||||
- name: Install kata
|
||||
run: bash tests/integration/stdio/gha-run.sh install-kata kata-artifacts
|
||||
|
||||
- name: Run stdio tests
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/stdio/gha-run.sh
|
||||
|
||||
run-tracing:
|
||||
strategy:
|
||||
@@ -291,6 +260,8 @@ jobs:
|
||||
vmm:
|
||||
- clh
|
||||
- qemu
|
||||
- dragonball
|
||||
- cloud-hypervisor
|
||||
runs-on: ubuntu-22.04
|
||||
env:
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
@@ -334,9 +305,6 @@ jobs:
|
||||
- dragonball
|
||||
- qemu
|
||||
- cloud-hypervisor
|
||||
# TODO: enable with clh when https://github.com/kata-containers/kata-containers/issues/9852 is fixed
|
||||
exclude:
|
||||
- vmm: clh
|
||||
runs-on: ubuntu-22.04
|
||||
env:
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
@@ -379,3 +347,34 @@ jobs:
|
||||
name: nerdctl-tests-garm-${{ matrix.vmm }}
|
||||
path: /tmp/artifacts
|
||||
retention-days: 1
|
||||
|
||||
run-kata-agent-apis:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: Install dependencies
|
||||
run: bash tests/functional/kata-agent-apis/gha-run.sh install-dependencies
|
||||
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: kata-static-tarball-amd64${{ inputs.tarball-suffix }}
|
||||
path: kata-artifacts
|
||||
|
||||
- name: Install kata
|
||||
run: bash tests/functional/kata-agent-apis/gha-run.sh install-kata kata-artifacts
|
||||
|
||||
- name: Run kata agent api tests with agent-ctl
|
||||
run: bash tests/functional/kata-agent-apis/gha-run.sh run
|
||||
|
||||
5
.github/workflows/build-checks.yaml
vendored
5
.github/workflows/build-checks.yaml
vendored
@@ -57,7 +57,6 @@ jobs:
|
||||
sudo chown -R $USER:$USER $GITHUB_WORKSPACE $HOME
|
||||
sudo rm -rf $GITHUB_WORKSPACE/* && echo "GITHUB_WORKSPACE removed" || { sleep 10 && sudo rm -rf $GITHUB_WORKSPACE/*; }
|
||||
sudo rm -f /tmp/kata_hybrid* # Sometime we got leftover from test_setup_hvsock_failed()
|
||||
if: ${{ inputs.instance != 'ubuntu-20.04' }}
|
||||
|
||||
- name: Checkout the code
|
||||
uses: actions/checkout@v4
|
||||
@@ -95,10 +94,10 @@ jobs:
|
||||
echo "LIBSECCOMP_LINK_TYPE=static" >> $GITHUB_ENV
|
||||
echo "LIBSECCOMP_LIB_PATH=${libseccomp_install_dir}/lib" >> $GITHUB_ENV
|
||||
- name: Install protobuf-compiler
|
||||
if: ${{ matrix.command != 'make vendor' && (matrix.component == 'agent' || matrix.component == 'runk' || matrix.component == 'genpolicy') }}
|
||||
if: ${{ matrix.command != 'make vendor' && (matrix.component == 'agent' || matrix.component == 'runk' || matrix.component == 'genpolicy' || matrix.component == 'agent-ctl') }}
|
||||
run: sudo apt-get -y install protobuf-compiler
|
||||
- name: Install clang
|
||||
if: ${{ matrix.command == 'make check' && matrix.component == 'agent' }}
|
||||
if: ${{ matrix.command == 'make check' && (matrix.component == 'agent' || matrix.component == 'agent-ctl') }}
|
||||
run: sudo apt-get -y install clang
|
||||
- name: Setup XDG_RUNTIME_DIR for the `runtime` tests
|
||||
if: ${{ matrix.command != 'make vendor' && matrix.command != 'make check' && matrix.component == 'runtime' }}
|
||||
|
||||
@@ -23,7 +23,12 @@ on:
|
||||
|
||||
jobs:
|
||||
build-asset:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-22.04
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
id-token: write
|
||||
attestations: write
|
||||
strategy:
|
||||
matrix:
|
||||
asset:
|
||||
@@ -50,9 +55,9 @@ jobs:
|
||||
- stratovirt
|
||||
- rootfs-image
|
||||
- rootfs-image-confidential
|
||||
- rootfs-image-mariner
|
||||
- rootfs-initrd
|
||||
- rootfs-initrd-confidential
|
||||
- rootfs-initrd-mariner
|
||||
- runk
|
||||
- shim-v2
|
||||
- trace-forwarder
|
||||
@@ -62,6 +67,8 @@ jobs:
|
||||
exclude:
|
||||
- asset: cloud-hypervisor-glibc
|
||||
stage: release
|
||||
env:
|
||||
PERFORM_ATTESTATION: ${{ matrix.asset == 'agent' && inputs.push-to-registry == 'yes' && 'yes' || 'no' }}
|
||||
steps:
|
||||
- name: Login to Kata Containers quay.io
|
||||
if: ${{ inputs.push-to-registry == 'yes' }}
|
||||
@@ -83,6 +90,7 @@ jobs:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: Build ${{ matrix.asset }}
|
||||
id: build
|
||||
run: |
|
||||
make "${KATA_ASSET}-tarball"
|
||||
build_dir=$(readlink -f build)
|
||||
@@ -98,6 +106,34 @@ jobs:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
RELEASE: ${{ inputs.stage == 'release' && 'yes' || 'no' }}
|
||||
|
||||
- name: Parse OCI image name and digest
|
||||
id: parse-oci-segments
|
||||
if: ${{ env.PERFORM_ATTESTATION == 'yes' }}
|
||||
run: |
|
||||
oci_image="$(<"build/${{ matrix.asset }}-oci-image")"
|
||||
echo "oci-name=${oci_image%@*}" >> "$GITHUB_OUTPUT"
|
||||
echo "oci-digest=${oci_image#*@}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- uses: oras-project/setup-oras@v1
|
||||
if: ${{ env.PERFORM_ATTESTATION == 'yes' }}
|
||||
with:
|
||||
version: "1.2.0"
|
||||
|
||||
# for pushing attestations to the registry
|
||||
- uses: docker/login-action@v3
|
||||
if: ${{ env.PERFORM_ATTESTATION == 'yes' }}
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- uses: actions/attest-build-provenance@v1
|
||||
if: ${{ env.PERFORM_ATTESTATION == 'yes' }}
|
||||
with:
|
||||
subject-name: ${{ steps.parse-oci-segments.outputs.oci-name }}
|
||||
subject-digest: ${{ steps.parse-oci-segments.outputs.oci-digest }}
|
||||
push-to-registry: true
|
||||
|
||||
- name: store-artifact ${{ matrix.asset }}
|
||||
if: ${{ matrix.stage != 'release' || (matrix.asset != 'agent' && matrix.asset != 'coco-guest-components' && matrix.asset != 'pause-image') }}
|
||||
uses: actions/upload-artifact@v4
|
||||
@@ -108,7 +144,7 @@ jobs:
|
||||
if-no-files-found: error
|
||||
|
||||
create-kata-tarball:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-22.04
|
||||
needs: build-asset
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
@@ -24,6 +24,11 @@ on:
|
||||
jobs:
|
||||
build-asset:
|
||||
runs-on: s390x
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
id-token: write
|
||||
attestations: write
|
||||
strategy:
|
||||
matrix:
|
||||
asset:
|
||||
@@ -39,10 +44,9 @@ jobs:
|
||||
- rootfs-initrd-confidential
|
||||
- shim-v2
|
||||
- virtiofsd
|
||||
env:
|
||||
PERFORM_ATTESTATION: ${{ matrix.asset == 'agent' && inputs.push-to-registry == 'yes' && 'yes' || 'no' }}
|
||||
steps:
|
||||
- name: Take a pre-action for self-hosted runner
|
||||
run: ${HOME}/script/pre_action.sh ubuntu-2204
|
||||
|
||||
- name: Login to Kata Containers quay.io
|
||||
if: ${{ inputs.push-to-registry == 'yes' }}
|
||||
uses: docker/login-action@v3
|
||||
@@ -63,6 +67,7 @@ jobs:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: Build ${{ matrix.asset }}
|
||||
id: build
|
||||
run: |
|
||||
make "${KATA_ASSET}-tarball"
|
||||
build_dir=$(readlink -f build)
|
||||
@@ -78,6 +83,29 @@ jobs:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
RELEASE: ${{ inputs.stage == 'release' && 'yes' || 'no' }}
|
||||
|
||||
- name: Parse OCI image name and digest
|
||||
id: parse-oci-segments
|
||||
if: ${{ env.PERFORM_ATTESTATION == 'yes' }}
|
||||
run: |
|
||||
oci_image="$(<"build/${{ matrix.asset }}-oci-image")"
|
||||
echo "oci-name=${oci_image%@*}" >> "$GITHUB_OUTPUT"
|
||||
echo "oci-digest=${oci_image#*@}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
# for pushing attestations to the registry
|
||||
- uses: docker/login-action@v3
|
||||
if: ${{ env.PERFORM_ATTESTATION == 'yes' }}
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- uses: actions/attest-build-provenance@v1
|
||||
if: ${{ env.PERFORM_ATTESTATION == 'yes' }}
|
||||
with:
|
||||
subject-name: ${{ steps.parse-oci-segments.outputs.oci-name }}
|
||||
subject-digest: ${{ steps.parse-oci-segments.outputs.oci-digest }}
|
||||
push-to-registry: true
|
||||
|
||||
- name: store-artifact ${{ matrix.asset }}
|
||||
if: ${{ inputs.stage != 'release' || (matrix.asset != 'agent' && matrix.asset != 'coco-guest-components' && matrix.asset != 'pause-image') }}
|
||||
uses: actions/upload-artifact@v4
|
||||
@@ -91,11 +119,14 @@ jobs:
|
||||
runs-on: s390x
|
||||
needs: build-asset
|
||||
steps:
|
||||
- name: Take a pre-action for self-hosted runner
|
||||
run: ${HOME}/script/pre_action.sh ubuntu-2204
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: get-artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
@@ -136,9 +167,6 @@ jobs:
|
||||
runs-on: s390x
|
||||
needs: [build-asset, build-asset-boot-image-se]
|
||||
steps:
|
||||
- name: Take a pre-action for self-hosted runner
|
||||
run: ${HOME}/script/pre_action.sh ubuntu-2204
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
|
||||
2
.github/workflows/cargo-deny-runner.yaml
vendored
2
.github/workflows/cargo-deny-runner.yaml
vendored
@@ -13,7 +13,7 @@ concurrency:
|
||||
|
||||
jobs:
|
||||
cargo-deny-runner:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-22.04
|
||||
|
||||
steps:
|
||||
- name: Checkout Code
|
||||
|
||||
19
.github/workflows/ci-coco-stability.yaml
vendored
Normal file
19
.github/workflows/ci-coco-stability.yaml
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
name: Kata Containers CoCo Stability Tests Weekly
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 0 * * 0'
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
kata-containers-ci-on-push:
|
||||
uses: ./.github/workflows/ci-weekly.yaml
|
||||
with:
|
||||
commit-hash: ${{ github.sha }}
|
||||
pr-number: "weekly"
|
||||
tag: ${{ github.sha }}-weekly
|
||||
target-branch: ${{ github.ref_name }}
|
||||
secrets: inherit
|
||||
11
.github/workflows/ci-on-push.yaml
vendored
11
.github/workflows/ci-on-push.yaml
vendored
@@ -19,12 +19,21 @@ concurrency:
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
kata-containers-ci-on-push:
|
||||
skipper:
|
||||
if: ${{ contains(github.event.pull_request.labels.*.name, 'ok-to-test') }}
|
||||
uses: ./.github/workflows/gatekeeper-skipper.yaml
|
||||
with:
|
||||
commit-hash: ${{ github.event.pull_request.head.sha }}
|
||||
target-branch: ${{ github.event.pull_request.base.ref }}
|
||||
|
||||
kata-containers-ci-on-push:
|
||||
needs: skipper
|
||||
if: ${{ needs.skipper.outputs.skip_build != 'yes' }}
|
||||
uses: ./.github/workflows/ci.yaml
|
||||
with:
|
||||
commit-hash: ${{ github.event.pull_request.head.sha }}
|
||||
pr-number: ${{ github.event.pull_request.number }}
|
||||
tag: ${{ github.event.pull_request.number }}-${{ github.event.pull_request.head.sha }}
|
||||
target-branch: ${{ github.event.pull_request.base.ref }}
|
||||
skip-test: ${{ needs.skipper.outputs.skip_test }}
|
||||
secrets: inherit
|
||||
|
||||
86
.github/workflows/ci-weekly.yaml
vendored
Normal file
86
.github/workflows/ci-weekly.yaml
vendored
Normal file
@@ -0,0 +1,86 @@
|
||||
name: Run the CoCo Kata Containers Stability CI
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
commit-hash:
|
||||
required: true
|
||||
type: string
|
||||
pr-number:
|
||||
required: true
|
||||
type: string
|
||||
tag:
|
||||
required: true
|
||||
type: string
|
||||
target-branch:
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
|
||||
jobs:
|
||||
build-kata-static-tarball-amd64:
|
||||
uses: ./.github/workflows/build-kata-static-tarball-amd64.yaml
|
||||
with:
|
||||
tarball-suffix: -${{ inputs.tag }}
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
|
||||
publish-kata-deploy-payload-amd64:
|
||||
needs: build-kata-static-tarball-amd64
|
||||
uses: ./.github/workflows/publish-kata-deploy-payload-amd64.yaml
|
||||
with:
|
||||
tarball-suffix: -${{ inputs.tag }}
|
||||
registry: ghcr.io
|
||||
repo: ${{ github.repository_owner }}/kata-deploy-ci
|
||||
tag: ${{ inputs.tag }}-amd64
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
secrets: inherit
|
||||
|
||||
build-and-publish-tee-confidential-unencrypted-image:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Login to Kata Containers ghcr.io
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Docker build and push
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
tags: ghcr.io/kata-containers/test-images:unencrypted-${{ inputs.pr-number }}
|
||||
push: true
|
||||
context: tests/integration/kubernetes/runtimeclass_workloads/confidential/unencrypted/
|
||||
platforms: linux/amd64
|
||||
file: tests/integration/kubernetes/runtimeclass_workloads/confidential/unencrypted/Dockerfile
|
||||
|
||||
run-kata-coco-stability-tests:
|
||||
needs: [publish-kata-deploy-payload-amd64, build-and-publish-tee-confidential-unencrypted-image]
|
||||
uses: ./.github/workflows/run-kata-coco-stability-tests.yaml
|
||||
with:
|
||||
registry: ghcr.io
|
||||
repo: ${{ github.repository_owner }}/kata-deploy-ci
|
||||
tag: ${{ inputs.tag }}-amd64
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
pr-number: ${{ inputs.pr-number }}
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
secrets: inherit
|
||||
79
.github/workflows/ci.yaml
vendored
79
.github/workflows/ci.yaml
vendored
@@ -15,6 +15,10 @@ on:
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
skip-test:
|
||||
required: false
|
||||
type: string
|
||||
default: no
|
||||
|
||||
jobs:
|
||||
build-kata-static-tarball-amd64:
|
||||
@@ -36,6 +40,25 @@ jobs:
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
secrets: inherit
|
||||
|
||||
build-kata-static-tarball-arm64:
|
||||
uses: ./.github/workflows/build-kata-static-tarball-arm64.yaml
|
||||
with:
|
||||
tarball-suffix: -${{ inputs.tag }}
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
|
||||
publish-kata-deploy-payload-arm64:
|
||||
needs: build-kata-static-tarball-arm64
|
||||
uses: ./.github/workflows/publish-kata-deploy-payload-arm64.yaml
|
||||
with:
|
||||
tarball-suffix: -${{ inputs.tag }}
|
||||
registry: ghcr.io
|
||||
repo: ${{ github.repository_owner }}/kata-deploy-ci
|
||||
tag: ${{ inputs.tag }}-arm64
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
secrets: inherit
|
||||
|
||||
build-kata-static-tarball-s390x:
|
||||
uses: ./.github/workflows/build-kata-static-tarball-s390x.yaml
|
||||
with:
|
||||
@@ -76,7 +99,7 @@ jobs:
|
||||
secrets: inherit
|
||||
|
||||
build-and-publish-tee-confidential-unencrypted-image:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
@@ -112,35 +135,8 @@ jobs:
|
||||
platforms: linux/amd64, linux/s390x
|
||||
file: tests/integration/kubernetes/runtimeclass_workloads/confidential/unencrypted/Dockerfile
|
||||
|
||||
run-kata-deploy-tests-on-aks:
|
||||
# TODO: Reenable when Azure CI budget is secured (see #9939).
|
||||
if: false
|
||||
needs: publish-kata-deploy-payload-amd64
|
||||
uses: ./.github/workflows/run-kata-deploy-tests-on-aks.yaml
|
||||
with:
|
||||
registry: ghcr.io
|
||||
repo: ${{ github.repository_owner }}/kata-deploy-ci
|
||||
tag: ${{ inputs.tag }}-amd64
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
pr-number: ${{ inputs.pr-number }}
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
secrets: inherit
|
||||
|
||||
run-kata-deploy-tests-on-garm:
|
||||
# TODO: Transition to free runner (see #9940).
|
||||
if: false
|
||||
needs: publish-kata-deploy-payload-amd64
|
||||
uses: ./.github/workflows/run-kata-deploy-tests-on-garm.yaml
|
||||
with:
|
||||
registry: ghcr.io
|
||||
repo: ${{ github.repository_owner }}/kata-deploy-ci
|
||||
tag: ${{ inputs.tag }}-amd64
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
pr-number: ${{ inputs.pr-number }}
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
secrets: inherit
|
||||
|
||||
run-kata-monitor-tests:
|
||||
if: ${{ inputs.skip-test != 'yes' }}
|
||||
needs: build-kata-static-tarball-amd64
|
||||
uses: ./.github/workflows/run-kata-monitor-tests.yaml
|
||||
with:
|
||||
@@ -149,6 +145,7 @@ jobs:
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
|
||||
run-k8s-tests-on-aks:
|
||||
if: ${{ inputs.skip-test != 'yes' }}
|
||||
needs: publish-kata-deploy-payload-amd64
|
||||
uses: ./.github/workflows/run-k8s-tests-on-aks.yaml
|
||||
with:
|
||||
@@ -161,21 +158,10 @@ jobs:
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
secrets: inherit
|
||||
|
||||
run-k8s-tests-on-garm:
|
||||
run-k8s-tests-on-amd64:
|
||||
if: ${{ inputs.skip-test != 'yes' }}
|
||||
needs: publish-kata-deploy-payload-amd64
|
||||
uses: ./.github/workflows/run-k8s-tests-on-garm.yaml
|
||||
with:
|
||||
registry: ghcr.io
|
||||
repo: ${{ github.repository_owner }}/kata-deploy-ci
|
||||
tag: ${{ inputs.tag }}-amd64
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
pr-number: ${{ inputs.pr-number }}
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
secrets: inherit
|
||||
|
||||
run-k8s-tests-with-crio-on-garm:
|
||||
needs: publish-kata-deploy-payload-amd64
|
||||
uses: ./.github/workflows/run-k8s-tests-with-crio-on-garm.yaml
|
||||
uses: ./.github/workflows/run-k8s-tests-on-amd64.yaml
|
||||
with:
|
||||
registry: ghcr.io
|
||||
repo: ${{ github.repository_owner }}/kata-deploy-ci
|
||||
@@ -186,6 +172,7 @@ jobs:
|
||||
secrets: inherit
|
||||
|
||||
run-kata-coco-tests:
|
||||
if: ${{ inputs.skip-test != 'yes' }}
|
||||
needs: [publish-kata-deploy-payload-amd64, build-and-publish-tee-confidential-unencrypted-image]
|
||||
uses: ./.github/workflows/run-kata-coco-tests.yaml
|
||||
with:
|
||||
@@ -198,6 +185,7 @@ jobs:
|
||||
secrets: inherit
|
||||
|
||||
run-k8s-tests-on-zvsi:
|
||||
if: ${{ inputs.skip-test != 'yes' }}
|
||||
needs: [publish-kata-deploy-payload-s390x, build-and-publish-tee-confidential-unencrypted-image]
|
||||
uses: ./.github/workflows/run-k8s-tests-on-zvsi.yaml
|
||||
with:
|
||||
@@ -210,6 +198,7 @@ jobs:
|
||||
secrets: inherit
|
||||
|
||||
run-k8s-tests-on-ppc64le:
|
||||
if: ${{ inputs.skip-test != 'yes' }}
|
||||
needs: publish-kata-deploy-payload-ppc64le
|
||||
uses: ./.github/workflows/run-k8s-tests-on-ppc64le.yaml
|
||||
with:
|
||||
@@ -221,6 +210,7 @@ jobs:
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
|
||||
run-metrics-tests:
|
||||
if: ${{ inputs.skip-test != 'yes' }}
|
||||
needs: build-kata-static-tarball-amd64
|
||||
uses: ./.github/workflows/run-metrics.yaml
|
||||
with:
|
||||
@@ -229,6 +219,7 @@ jobs:
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
|
||||
run-basic-amd64-tests:
|
||||
if: ${{ inputs.skip-test != 'yes' }}
|
||||
needs: build-kata-static-tarball-amd64
|
||||
uses: ./.github/workflows/basic-ci-amd64.yaml
|
||||
with:
|
||||
@@ -237,6 +228,7 @@ jobs:
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
|
||||
run-cri-containerd-tests-s390x:
|
||||
if: ${{ inputs.skip-test != 'yes' }}
|
||||
needs: build-kata-static-tarball-s390x
|
||||
uses: ./.github/workflows/run-cri-containerd-tests-s390x.yaml
|
||||
with:
|
||||
@@ -245,6 +237,7 @@ jobs:
|
||||
target-branch: ${{ inputs.target-branch }}
|
||||
|
||||
run-cri-containerd-tests-ppc64le:
|
||||
if: ${{ inputs.skip-test != 'yes' }}
|
||||
needs: build-kata-static-tarball-ppc64le
|
||||
uses: ./.github/workflows/run-cri-containerd-tests-ppc64le.yaml
|
||||
with:
|
||||
|
||||
6
.github/workflows/cleanup-resources.yaml
vendored
6
.github/workflows/cleanup-resources.yaml
vendored
@@ -1,12 +1,12 @@
|
||||
name: Cleanup dangling Azure resources
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 */6 * * *"
|
||||
- cron: "0 0 * * *"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
cleanup-resources:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
@@ -27,5 +27,5 @@ jobs:
|
||||
- name: Cleanup resources
|
||||
env:
|
||||
AZ_SUBSCRIPTION_ID: ${{ secrets.AZ_SUBSCRIPTION_ID }}
|
||||
CLEANUP_AFTER_HOURS: 6 # Clean up resources created more than this many hours ago.
|
||||
CLEANUP_AFTER_HOURS: 24 # Clean up resources created more than this many hours ago.
|
||||
run: python3 tests/cleanup_resources.py
|
||||
|
||||
7
.github/workflows/commit-message-check.yaml
vendored
7
.github/workflows/commit-message-check.yaml
vendored
@@ -18,7 +18,7 @@ env:
|
||||
|
||||
jobs:
|
||||
commit-message-check:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-22.04
|
||||
env:
|
||||
PR_AUTHOR: ${{ github.event.pull_request.user.login }}
|
||||
name: Commit Message Check
|
||||
@@ -34,7 +34,10 @@ jobs:
|
||||
#
|
||||
# Revert "<original-subject-line>"
|
||||
#
|
||||
filter_out_pattern: '^Revert "'
|
||||
# The format of a re-re-vert commit as follows:
|
||||
#
|
||||
# Reapply "<original-subject-line>"
|
||||
filter_out_pattern: '^Revert "|^Reapply "'
|
||||
|
||||
- name: DCO Check
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
|
||||
2
.github/workflows/docs-url-alive-check.yaml
vendored
2
.github/workflows/docs-url-alive-check.yaml
vendored
@@ -5,7 +5,7 @@ on:
|
||||
name: Docs URL Alive Check
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-22.04
|
||||
# don't run this action on forks
|
||||
if: github.repository_owner == 'kata-containers'
|
||||
env:
|
||||
|
||||
52
.github/workflows/gatekeeper-skipper.yaml
vendored
Normal file
52
.github/workflows/gatekeeper-skipper.yaml
vendored
Normal file
@@ -0,0 +1,52 @@
|
||||
name: Skipper
|
||||
|
||||
# This workflow sets various "skip_*" output values that can be used to
|
||||
# determine what workflows/jobs are expected to be executed. Sample usage:
|
||||
#
|
||||
# skipper:
|
||||
# uses: ./.github/workflows/gatekeeper-skipper.yaml
|
||||
# with:
|
||||
# commit-hash: ${{ github.event.pull_request.head.sha }}
|
||||
# target-branch: ${{ github.event.pull_request.base.ref }}
|
||||
#
|
||||
# your-workflow:
|
||||
# needs: skipper
|
||||
# if: ${{ needs.skipper.outputs.skip_build != 'yes' }}
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
commit-hash:
|
||||
required: true
|
||||
type: string
|
||||
target-branch:
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
outputs:
|
||||
skip_build:
|
||||
value: ${{ jobs.skipper.outputs.skip_build }}
|
||||
skip_test:
|
||||
value: ${{ jobs.skipper.outputs.skip_test }}
|
||||
skip_static:
|
||||
value: ${{ jobs.skipper.outputs.skip_static }}
|
||||
|
||||
|
||||
jobs:
|
||||
skipper:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
skip_build: ${{ steps.skipper.outputs.skip_build }}
|
||||
skip_test: ${{ steps.skipper.outputs.skip_test }}
|
||||
skip_static: ${{ steps.skipper.outputs.skip_static }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
- id: skipper
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
run: |
|
||||
python3 tools/testing/gatekeeper/skips.py | tee -a "$GITHUB_OUTPUT"
|
||||
shell: /usr/bin/bash -x {0}
|
||||
44
.github/workflows/gatekeeper.yaml
vendored
Normal file
44
.github/workflows/gatekeeper.yaml
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
name: Gatekeeper
|
||||
|
||||
# Gatekeeper uses the "skips.py" to determine which job names/regexps are
|
||||
# required for given PR and waits for them to either complete or fail
|
||||
# reporting the status.
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types:
|
||||
- opened
|
||||
- synchronize
|
||||
- reopened
|
||||
- labeled
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
gatekeeper:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
fetch-depth: 0
|
||||
- id: gatekeeper
|
||||
env:
|
||||
TARGET_BRANCH: ${{ github.event.pull_request.base.ref }}
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
COMMIT_HASH: ${{ github.event.pull_request.head.sha }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
run: |
|
||||
#!/usr/bin/env bash -x
|
||||
mapfile -t lines < <(python3 tools/testing/gatekeeper/skips.py -t)
|
||||
export REQUIRED_JOBS="${lines[0]}"
|
||||
export REQUIRED_REGEXPS="${lines[1]}"
|
||||
export REQUIRED_LABELS="${lines[2]}"
|
||||
echo "REQUIRED_JOBS: $REQUIRED_JOBS"
|
||||
echo "REQUIRED_REGEXPS: $REQUIRED_REGEXPS"
|
||||
echo "REQUIRED_LABELS: $REQUIRED_LABELS"
|
||||
python3 tools/testing/gatekeeper/jobs.py
|
||||
exit $?
|
||||
shell: /usr/bin/bash -x {0}
|
||||
@@ -12,7 +12,7 @@ concurrency:
|
||||
|
||||
jobs:
|
||||
kata-deploy-runtime-classes-check:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
@@ -13,7 +13,7 @@ on:
|
||||
|
||||
jobs:
|
||||
move-linked-issues-to-in-progress:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Install hub
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
|
||||
2
.github/workflows/payload-after-push.yaml
vendored
2
.github/workflows/payload-after-push.yaml
vendored
@@ -86,7 +86,7 @@ jobs:
|
||||
secrets: inherit
|
||||
|
||||
publish-manifest:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-22.04
|
||||
needs: [publish-kata-deploy-payload-amd64, publish-kata-deploy-payload-arm64, publish-kata-deploy-payload-s390x, publish-kata-deploy-payload-ppc64le]
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
|
||||
@@ -24,7 +24,7 @@ on:
|
||||
|
||||
jobs:
|
||||
kata-payload:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
|
||||
@@ -26,9 +26,6 @@ jobs:
|
||||
kata-payload:
|
||||
runs-on: s390x
|
||||
steps:
|
||||
- name: Take a pre-action for self-hosted runner
|
||||
run: ${HOME}/script/pre_action.sh ubuntu-2204
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
|
||||
2
.github/workflows/release-amd64.yaml
vendored
2
.github/workflows/release-amd64.yaml
vendored
@@ -16,7 +16,7 @@ jobs:
|
||||
|
||||
kata-deploy:
|
||||
needs: build-kata-static-tarball-amd64
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Login to Kata Containers docker.io
|
||||
uses: docker/login-action@v3
|
||||
|
||||
3
.github/workflows/release-s390x.yaml
vendored
3
.github/workflows/release-s390x.yaml
vendored
@@ -18,9 +18,6 @@ jobs:
|
||||
needs: build-kata-static-tarball-s390x
|
||||
runs-on: s390x
|
||||
steps:
|
||||
- name: Take a pre-action for self-hosted runner
|
||||
run: ${HOME}/script/pre_action.sh ubuntu-2204
|
||||
|
||||
- name: Login to Kata Containers docker.io
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
|
||||
14
.github/workflows/release.yaml
vendored
14
.github/workflows/release.yaml
vendored
@@ -4,7 +4,7 @@ on:
|
||||
|
||||
jobs:
|
||||
release:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
@@ -46,7 +46,7 @@ jobs:
|
||||
secrets: inherit
|
||||
|
||||
publish-multi-arch-images:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-22.04
|
||||
needs: [build-and-push-assets-amd64, build-and-push-assets-arm64, build-and-push-assets-s390x, build-and-push-assets-ppc64le]
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
@@ -78,7 +78,7 @@ jobs:
|
||||
|
||||
upload-multi-arch-static-tarball:
|
||||
needs: [build-and-push-assets-amd64, build-and-push-assets-arm64, build-and-push-assets-s390x, build-and-push-assets-ppc64le]
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
@@ -138,7 +138,7 @@ jobs:
|
||||
|
||||
upload-versions-yaml:
|
||||
needs: release
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
@@ -151,7 +151,7 @@ jobs:
|
||||
|
||||
upload-cargo-vendored-tarball:
|
||||
needs: release
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
@@ -164,7 +164,7 @@ jobs:
|
||||
|
||||
upload-libseccomp-tarball:
|
||||
needs: release
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
@@ -177,7 +177,7 @@ jobs:
|
||||
|
||||
publish-release:
|
||||
needs: [ build-and-push-assets-amd64, build-and-push-assets-arm64, build-and-push-assets-s390x, build-and-push-assets-ppc64le, publish-multi-arch-images, upload-multi-arch-static-tarball, upload-versions-yaml, upload-cargo-vendored-tarball, upload-libseccomp-tarball ]
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
@@ -29,9 +29,6 @@ jobs:
|
||||
GOPATH: ${{ github.workspace }}
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
steps:
|
||||
- name: Take a pre-action for self-hosted runner
|
||||
run: ${HOME}/script/pre_action.sh ubuntu-2204
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
@@ -57,7 +54,3 @@ jobs:
|
||||
|
||||
- name: Run cri-containerd tests
|
||||
run: bash tests/integration/cri-containerd/gha-run.sh run
|
||||
|
||||
- name: Take a post-action for self-hosted runner
|
||||
if: always()
|
||||
run: ${HOME}/script/post_action.sh ubuntu-2204
|
||||
|
||||
6
.github/workflows/run-k8s-tests-on-aks.yaml
vendored
6
.github/workflows/run-k8s-tests-on-aks.yaml
vendored
@@ -47,14 +47,17 @@ jobs:
|
||||
vmm: clh
|
||||
instance-type: small
|
||||
genpolicy-pull-method: oci-distribution
|
||||
auto-generate-policy: yes
|
||||
- host_os: cbl-mariner
|
||||
vmm: clh
|
||||
instance-type: small
|
||||
genpolicy-pull-method: containerd
|
||||
auto-generate-policy: yes
|
||||
- host_os: cbl-mariner
|
||||
vmm: clh
|
||||
instance-type: normal
|
||||
runs-on: ubuntu-latest
|
||||
auto-generate-policy: yes
|
||||
runs-on: ubuntu-22.04
|
||||
env:
|
||||
DOCKER_REGISTRY: ${{ inputs.registry }}
|
||||
DOCKER_REPO: ${{ inputs.repo }}
|
||||
@@ -66,6 +69,7 @@ jobs:
|
||||
USING_NFD: "false"
|
||||
K8S_TEST_HOST_TYPE: ${{ matrix.instance-type }}
|
||||
GENPOLICY_PULL_METHOD: ${{ matrix.genpolicy-pull-method }}
|
||||
AUTO_GENERATE_POLICY: ${{ matrix.auto-generate-policy }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
name: CI | Run kubernetes tests on GARM
|
||||
name: CI | Run kubernetes tests on amd64
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
@@ -23,7 +23,7 @@ on:
|
||||
default: ""
|
||||
|
||||
jobs:
|
||||
run-k8s-tests:
|
||||
run-k8s-tests-amd64:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
@@ -33,19 +33,18 @@ jobs:
|
||||
- fc #firecracker
|
||||
- qemu
|
||||
- cloud-hypervisor
|
||||
container_runtime:
|
||||
- containerd
|
||||
snapshotter:
|
||||
- devmapper
|
||||
k8s:
|
||||
- k3s
|
||||
instance:
|
||||
- garm-ubuntu-2004
|
||||
- garm-ubuntu-2004-smaller
|
||||
include:
|
||||
- instance: garm-ubuntu-2004
|
||||
instance-type: normal
|
||||
- instance: garm-ubuntu-2004-smaller
|
||||
instance-type: small
|
||||
runs-on: ${{ matrix.instance }}
|
||||
- vmm: qemu
|
||||
container_runtime: crio
|
||||
snapshotter: ""
|
||||
k8s: k0s
|
||||
runs-on: ubuntu-22.04
|
||||
env:
|
||||
DOCKER_REGISTRY: ${{ inputs.registry }}
|
||||
DOCKER_REPO: ${{ inputs.repo }}
|
||||
@@ -53,9 +52,10 @@ jobs:
|
||||
PR_NUMBER: ${{ inputs.pr-number }}
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
KUBERNETES: ${{ matrix.k8s }}
|
||||
KUBERNETES_EXTRA_PARAMS: ${{ matrix.container_runtime != 'crio' && '' || '--cri-socket remote:unix:///var/run/crio/crio.sock --kubelet-extra-args --cgroup-driver="systemd"' }}
|
||||
SNAPSHOTTER: ${{ matrix.snapshotter }}
|
||||
USING_NFD: "false"
|
||||
K8S_TEST_HOST_TYPE: ${{ matrix.instance-type }}
|
||||
K8S_TEST_HOST_TYPE: all
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
@@ -68,15 +68,20 @@ jobs:
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: Configure CRI-O
|
||||
if: matrix.container_runtime == 'crio'
|
||||
run: bash tests/integration/kubernetes/gha-run.sh setup-crio
|
||||
|
||||
- name: Deploy ${{ matrix.k8s }}
|
||||
run: bash tests/integration/kubernetes/gha-run.sh deploy-k8s
|
||||
|
||||
- name: Configure the ${{ matrix.snapshotter }} snapshotter
|
||||
if: matrix.snapshotter != ''
|
||||
run: bash tests/integration/kubernetes/gha-run.sh configure-snapshotter
|
||||
|
||||
- name: Deploy Kata
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/kubernetes/gha-run.sh deploy-kata-garm
|
||||
run: bash tests/integration/kubernetes/gha-run.sh deploy-kata
|
||||
|
||||
- name: Install `bats`
|
||||
run: bash tests/integration/kubernetes/gha-run.sh install-bats
|
||||
@@ -93,10 +98,10 @@ jobs:
|
||||
- name: Archive artifacts ${{ matrix.vmm }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: k8s-tests-garm-${{ matrix.vmm }}-${{ matrix.snapshotter }}-${{ matrix.k8s }}-${{ matrix.instance }}-${{ inputs.tag }}
|
||||
name: k8s-tests-${{ matrix.vmm }}-${{ matrix.snapshotter }}-${{ matrix.k8s }}-${{ matrix.instance }}-${{ inputs.tag }}
|
||||
path: /tmp/artifacts
|
||||
retention-days: 1
|
||||
|
||||
- name: Delete kata-deploy
|
||||
if: always()
|
||||
run: bash tests/integration/kubernetes/gha-run.sh cleanup-garm
|
||||
run: bash tests/integration/kubernetes/gha-run.sh cleanup
|
||||
63
.github/workflows/run-k8s-tests-on-zvsi.yaml
vendored
63
.github/workflows/run-k8s-tests-on-zvsi.yaml
vendored
@@ -28,8 +28,13 @@ jobs:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
snapshotter:
|
||||
- overlayfs
|
||||
- devmapper
|
||||
- nydus
|
||||
vmm:
|
||||
- qemu
|
||||
- qemu-runtime-rs
|
||||
- qemu-coco-dev
|
||||
k8s:
|
||||
- k3s
|
||||
include:
|
||||
@@ -37,12 +42,23 @@ jobs:
|
||||
pull-type: default
|
||||
using-nfd: true
|
||||
deploy-cmd: configure-snapshotter
|
||||
vmm: qemu
|
||||
- snapshotter: nydus
|
||||
pull-type: guest-pull
|
||||
using-nfd: false
|
||||
deploy-cmd: deploy-snapshotter
|
||||
exclude:
|
||||
- snapshotter: overlayfs
|
||||
vmm: qemu
|
||||
- snapshotter: overlayfs
|
||||
vmm: qemu-coco-dev
|
||||
- snapshotter: devmapper
|
||||
vmm: qemu-runtime-rs
|
||||
- snapshotter: devmapper
|
||||
vmm: qemu-coco-dev
|
||||
- snapshotter: nydus
|
||||
vmm: qemu
|
||||
- snapshotter: nydus
|
||||
vmm: qemu-runtime-rs
|
||||
runs-on: s390x-large
|
||||
env:
|
||||
DOCKER_REGISTRY: ${{ inputs.registry }}
|
||||
@@ -52,7 +68,7 @@ jobs:
|
||||
GH_PR_NUMBER: ${{ inputs.pr-number }}
|
||||
KATA_HOST_OS: "ubuntu"
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
KUBERNETES: "k3s"
|
||||
KUBERNETES: ${{ matrix.k8s }}
|
||||
PULL_TYPE: ${{ matrix.pull-type }}
|
||||
SNAPSHOTTER: ${{ matrix.snapshotter }}
|
||||
USING_NFD: ${{ matrix.using-nfd }}
|
||||
@@ -60,10 +76,6 @@ jobs:
|
||||
AUTHENTICATED_IMAGE_USER: ${{ secrets.AUTHENTICATED_IMAGE_USER }}
|
||||
AUTHENTICATED_IMAGE_PASSWORD: ${{ secrets.AUTHENTICATED_IMAGE_PASSWORD }}
|
||||
steps:
|
||||
- name: Take a pre-action for self-hosted runner
|
||||
run: |
|
||||
"${HOME}/script/pre_action.sh" ubuntu-2204
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
@@ -75,22 +87,55 @@ jobs:
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: Set SNAPSHOTTER to empty if overlayfs
|
||||
run: echo "SNAPSHOTTER=" >> $GITHUB_ENV
|
||||
if: ${{ matrix.snapshotter == 'overlayfs' }}
|
||||
|
||||
- name: Set KBS and KBS_INGRESS if qemu-coco-dev
|
||||
run: |
|
||||
echo "KBS=true" >> $GITHUB_ENV
|
||||
echo "KBS_INGRESS=nodeport" >> $GITHUB_ENV
|
||||
if: ${{ matrix.vmm == 'qemu-coco-dev' }}
|
||||
|
||||
- name: Deploy ${{ matrix.k8s }}
|
||||
run: bash tests/integration/kubernetes/gha-run.sh deploy-k8s
|
||||
|
||||
# qemu-runtime-rs only works with overlayfs
|
||||
# See: https://github.com/kata-containers/kata-containers/issues/10066
|
||||
- name: Configure the ${{ matrix.snapshotter }} snapshotter
|
||||
run: bash tests/integration/kubernetes/gha-run.sh ${{ matrix.deploy-cmd }}
|
||||
if: ${{ matrix.snapshotter != 'overlayfs' }}
|
||||
|
||||
- name: Deploy Kata
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/kubernetes/gha-run.sh deploy-kata-zvsi
|
||||
|
||||
- name: Uninstall previous `kbs-client`
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/kubernetes/gha-run.sh uninstall-kbs-client
|
||||
if: ${{ matrix.vmm == 'qemu-coco-dev' }}
|
||||
|
||||
- name: Deploy CoCo KBS
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/kubernetes/gha-run.sh deploy-coco-kbs
|
||||
if: ${{ matrix.vmm == 'qemu-coco-dev' }}
|
||||
|
||||
- name: Install `kbs-client`
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/kubernetes/gha-run.sh install-kbs-client
|
||||
if: ${{ matrix.vmm == 'qemu-coco-dev' }}
|
||||
|
||||
- name: Run tests
|
||||
timeout-minutes: 60
|
||||
run: bash tests/integration/kubernetes/gha-run.sh run-tests
|
||||
|
||||
- name: Take a post-action
|
||||
- name: Delete kata-deploy
|
||||
if: always()
|
||||
run: bash tests/integration/kubernetes/gha-run.sh cleanup-zvsi
|
||||
|
||||
- name: Delete CoCo KBS
|
||||
if: always()
|
||||
run: |
|
||||
bash tests/integration/kubernetes/gha-run.sh cleanup-zvsi || true
|
||||
"${HOME}/script/post_action.sh" ubuntu-2204
|
||||
if [ "${KBS}" == "true" ]; then
|
||||
bash tests/integration/kubernetes/gha-run.sh delete-coco-kbs
|
||||
fi
|
||||
|
||||
@@ -1,86 +0,0 @@
|
||||
name: CI | Run kubernetes tests, using CRI-O, on GARM
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
registry:
|
||||
required: true
|
||||
type: string
|
||||
repo:
|
||||
required: true
|
||||
type: string
|
||||
tag:
|
||||
required: true
|
||||
type: string
|
||||
pr-number:
|
||||
required: true
|
||||
type: string
|
||||
commit-hash:
|
||||
required: false
|
||||
type: string
|
||||
target-branch:
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
|
||||
jobs:
|
||||
run-k8s-tests:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
vmm:
|
||||
- qemu
|
||||
k8s:
|
||||
- k0s
|
||||
instance:
|
||||
- garm-ubuntu-2204
|
||||
- garm-ubuntu-2204-smaller
|
||||
include:
|
||||
- instance: garm-ubuntu-2204
|
||||
instance-type: normal
|
||||
- instance: garm-ubuntu-2204-smaller
|
||||
instance-type: small
|
||||
- k8s: k0s
|
||||
k8s-extra-params: '--cri-socket remote:unix:///var/run/crio/crio.sock --kubelet-extra-args --cgroup-driver="systemd"'
|
||||
runs-on: ${{ matrix.instance }}
|
||||
env:
|
||||
DOCKER_REGISTRY: ${{ inputs.registry }}
|
||||
DOCKER_REPO: ${{ inputs.repo }}
|
||||
DOCKER_TAG: ${{ inputs.tag }}
|
||||
PR_NUMBER: ${{ inputs.pr-number }}
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
KUBERNETES: ${{ matrix.k8s }}
|
||||
KUBERNETES_EXTRA_PARAMS: ${{ matrix.k8s-extra-params }}
|
||||
USING_NFD: "false"
|
||||
K8S_TEST_HOST_TYPE: ${{ matrix.instance-type }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: Configure CRI-O
|
||||
run: bash tests/integration/kubernetes/gha-run.sh setup-crio
|
||||
|
||||
- name: Deploy ${{ matrix.k8s }}
|
||||
run: bash tests/integration/kubernetes/gha-run.sh deploy-k8s
|
||||
|
||||
- name: Deploy Kata
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/kubernetes/gha-run.sh deploy-kata-garm
|
||||
|
||||
- name: Install `bats`
|
||||
run: bash tests/integration/kubernetes/gha-run.sh install-bats
|
||||
|
||||
- name: Run tests
|
||||
timeout-minutes: 30
|
||||
run: bash tests/integration/kubernetes/gha-run.sh run-tests
|
||||
|
||||
- name: Delete kata-deploy
|
||||
if: always()
|
||||
run: bash tests/integration/kubernetes/gha-run.sh cleanup-garm
|
||||
113
.github/workflows/run-kata-coco-stability-tests.yaml
vendored
Normal file
113
.github/workflows/run-kata-coco-stability-tests.yaml
vendored
Normal file
@@ -0,0 +1,113 @@
|
||||
name: CI | Run Kata CoCo k8s Stability Tests
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
registry:
|
||||
required: true
|
||||
type: string
|
||||
repo:
|
||||
required: true
|
||||
type: string
|
||||
tag:
|
||||
required: true
|
||||
type: string
|
||||
pr-number:
|
||||
required: true
|
||||
type: string
|
||||
commit-hash:
|
||||
required: false
|
||||
type: string
|
||||
target-branch:
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
|
||||
jobs:
|
||||
# Generate jobs for testing CoCo on non-TEE environments
|
||||
run-stability-k8s-tests-coco-nontee:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
vmm:
|
||||
- qemu-coco-dev
|
||||
snapshotter:
|
||||
- nydus
|
||||
pull-type:
|
||||
- guest-pull
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
DOCKER_REGISTRY: ${{ inputs.registry }}
|
||||
DOCKER_REPO: ${{ inputs.repo }}
|
||||
DOCKER_TAG: ${{ inputs.tag }}
|
||||
GH_PR_NUMBER: ${{ inputs.pr-number }}
|
||||
KATA_HOST_OS: ${{ matrix.host_os }}
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
# Some tests rely on that variable to run (or not)
|
||||
KBS: "true"
|
||||
# Set the KBS ingress handler (empty string disables handling)
|
||||
KBS_INGRESS: "aks"
|
||||
KUBERNETES: "vanilla"
|
||||
PULL_TYPE: ${{ matrix.pull-type }}
|
||||
AUTHENTICATED_IMAGE_USER: ${{ secrets.AUTHENTICATED_IMAGE_USER }}
|
||||
AUTHENTICATED_IMAGE_PASSWORD: ${{ secrets.AUTHENTICATED_IMAGE_PASSWORD }}
|
||||
SNAPSHOTTER: ${{ matrix.snapshotter }}
|
||||
USING_NFD: "false"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Rebase atop of the latest target branch
|
||||
run: |
|
||||
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target-branch }}
|
||||
|
||||
- name: Download Azure CLI
|
||||
run: bash tests/integration/kubernetes/gha-run.sh install-azure-cli
|
||||
|
||||
- name: Log into the Azure account
|
||||
run: bash tests/integration/kubernetes/gha-run.sh login-azure
|
||||
env:
|
||||
AZ_APPID: ${{ secrets.AZ_APPID }}
|
||||
AZ_PASSWORD: ${{ secrets.AZ_PASSWORD }}
|
||||
AZ_TENANT_ID: ${{ secrets.AZ_TENANT_ID }}
|
||||
AZ_SUBSCRIPTION_ID: ${{ secrets.AZ_SUBSCRIPTION_ID }}
|
||||
|
||||
- name: Create AKS cluster
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/kubernetes/gha-run.sh create-cluster
|
||||
|
||||
- name: Install `bats`
|
||||
run: bash tests/integration/kubernetes/gha-run.sh install-bats
|
||||
|
||||
- name: Install `kubectl`
|
||||
run: bash tests/integration/kubernetes/gha-run.sh install-kubectl
|
||||
|
||||
- name: Download credentials for the Kubernetes CLI to use them
|
||||
run: bash tests/integration/kubernetes/gha-run.sh get-cluster-credentials
|
||||
|
||||
- name: Deploy Snapshotter
|
||||
timeout-minutes: 5
|
||||
run: bash tests/integration/kubernetes/gha-run.sh deploy-snapshotter
|
||||
|
||||
- name: Deploy Kata
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/kubernetes/gha-run.sh deploy-kata-aks
|
||||
|
||||
- name: Deploy CoCo KBS
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/kubernetes/gha-run.sh deploy-coco-kbs
|
||||
|
||||
- name: Install `kbs-client`
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/kubernetes/gha-run.sh install-kbs-client
|
||||
|
||||
- name: Run stability tests
|
||||
timeout-minutes: 300
|
||||
run: bash tests/stability/gha-stability-run.sh run-tests
|
||||
|
||||
- name: Delete AKS cluster
|
||||
if: always()
|
||||
run: bash tests/integration/kubernetes/gha-run.sh delete-cluster
|
||||
12
.github/workflows/run-kata-coco-tests.yaml
vendored
12
.github/workflows/run-kata-coco-tests.yaml
vendored
@@ -49,6 +49,8 @@ jobs:
|
||||
PULL_TYPE: ${{ matrix.pull-type }}
|
||||
AUTHENTICATED_IMAGE_USER: ${{ secrets.AUTHENTICATED_IMAGE_USER }}
|
||||
AUTHENTICATED_IMAGE_PASSWORD: ${{ secrets.AUTHENTICATED_IMAGE_PASSWORD }}
|
||||
ITA_KEY: ${{ secrets.ITA_KEY }}
|
||||
AUTO_GENERATE_POLICY: "yes"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
@@ -82,7 +84,7 @@ jobs:
|
||||
run: bash tests/integration/kubernetes/gha-run.sh install-kbs-client
|
||||
|
||||
- name: Run tests
|
||||
timeout-minutes: 50
|
||||
timeout-minutes: 100
|
||||
run: bash tests/integration/kubernetes/gha-run.sh run-tests
|
||||
|
||||
- name: Delete kata-deploy
|
||||
@@ -122,6 +124,7 @@ jobs:
|
||||
PULL_TYPE: ${{ matrix.pull-type }}
|
||||
AUTHENTICATED_IMAGE_USER: ${{ secrets.AUTHENTICATED_IMAGE_USER }}
|
||||
AUTHENTICATED_IMAGE_PASSWORD: ${{ secrets.AUTHENTICATED_IMAGE_PASSWORD }}
|
||||
AUTO_GENERATE_POLICY: "yes"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
@@ -143,7 +146,7 @@ jobs:
|
||||
run: bash tests/integration/kubernetes/gha-run.sh deploy-kata-sev
|
||||
|
||||
- name: Run tests
|
||||
timeout-minutes: 30
|
||||
timeout-minutes: 50
|
||||
run: bash tests/integration/kubernetes/gha-run.sh run-tests
|
||||
|
||||
- name: Delete kata-deploy
|
||||
@@ -181,6 +184,7 @@ jobs:
|
||||
PULL_TYPE: ${{ matrix.pull-type }}
|
||||
AUTHENTICATED_IMAGE_USER: ${{ secrets.AUTHENTICATED_IMAGE_USER }}
|
||||
AUTHENTICATED_IMAGE_PASSWORD: ${{ secrets.AUTHENTICATED_IMAGE_PASSWORD }}
|
||||
AUTO_GENERATE_POLICY: "yes"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
@@ -214,7 +218,7 @@ jobs:
|
||||
run: bash tests/integration/kubernetes/gha-run.sh install-kbs-client
|
||||
|
||||
- name: Run tests
|
||||
timeout-minutes: 30
|
||||
timeout-minutes: 50
|
||||
run: bash tests/integration/kubernetes/gha-run.sh run-tests
|
||||
|
||||
- name: Delete kata-deploy
|
||||
@@ -240,7 +244,7 @@ jobs:
|
||||
- nydus
|
||||
pull-type:
|
||||
- guest-pull
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-22.04
|
||||
env:
|
||||
DOCKER_REGISTRY: ${{ inputs.registry }}
|
||||
DOCKER_REPO: ${{ inputs.repo }}
|
||||
|
||||
@@ -37,7 +37,7 @@ jobs:
|
||||
include:
|
||||
- host_os: cbl-mariner
|
||||
vmm: clh
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-22.04
|
||||
env:
|
||||
DOCKER_REGISTRY: ${{ inputs.registry }}
|
||||
DOCKER_REPO: ${{ inputs.repo }}
|
||||
|
||||
@@ -15,8 +15,6 @@ on:
|
||||
|
||||
jobs:
|
||||
run-monitor:
|
||||
# TODO: Transition to free runner (see #9940).
|
||||
if: false
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
@@ -33,7 +31,7 @@ jobs:
|
||||
# TODO: enable with containerd when https://github.com/kata-containers/kata-containers/issues/9761 is fixed
|
||||
- container_engine: containerd
|
||||
vmm: qemu
|
||||
runs-on: garm-ubuntu-2204-smaller
|
||||
runs-on: ubuntu-22.04
|
||||
env:
|
||||
CONTAINER_ENGINE: ${{ matrix.container_engine }}
|
||||
#CONTAINERD_VERSION: ${{ matrix.containerd_version }}
|
||||
|
||||
4
.github/workflows/run-runk-tests.yaml
vendored
4
.github/workflows/run-runk-tests.yaml
vendored
@@ -15,9 +15,7 @@ on:
|
||||
|
||||
jobs:
|
||||
run-runk:
|
||||
# TODO: Transition to free runner (see #9940).
|
||||
if: false
|
||||
runs-on: garm-ubuntu-2204-smaller
|
||||
runs-on: ubuntu-22.04
|
||||
env:
|
||||
CONTAINERD_VERSION: lts
|
||||
steps:
|
||||
|
||||
2
.github/workflows/stale.yaml
vendored
2
.github/workflows/stale.yaml
vendored
@@ -6,7 +6,7 @@ on:
|
||||
|
||||
jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/stale@v9
|
||||
with:
|
||||
|
||||
10
.github/workflows/static-checks-self-hosted.yaml
vendored
10
.github/workflows/static-checks-self-hosted.yaml
vendored
@@ -12,8 +12,16 @@ concurrency:
|
||||
|
||||
name: Static checks self-hosted
|
||||
jobs:
|
||||
build-checks:
|
||||
skipper:
|
||||
if: ${{ contains(github.event.pull_request.labels.*.name, 'ok-to-test') }}
|
||||
uses: ./.github/workflows/gatekeeper-skipper.yaml
|
||||
with:
|
||||
commit-hash: ${{ github.event.pull_request.head.sha }}
|
||||
target-branch: ${{ github.event.pull_request.base.ref }}
|
||||
|
||||
build-checks:
|
||||
needs: skipper
|
||||
if: ${{ needs.skipper.outputs.skip_static != 'yes' }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
|
||||
24
.github/workflows/static-checks.yaml
vendored
24
.github/workflows/static-checks.yaml
vendored
@@ -12,8 +12,16 @@ concurrency:
|
||||
|
||||
name: Static checks
|
||||
jobs:
|
||||
skipper:
|
||||
uses: ./.github/workflows/gatekeeper-skipper.yaml
|
||||
with:
|
||||
commit-hash: ${{ github.event.pull_request.head.sha }}
|
||||
target-branch: ${{ github.event.pull_request.base.ref }}
|
||||
|
||||
check-kernel-config-version:
|
||||
runs-on: ubuntu-latest
|
||||
needs: skipper
|
||||
if: ${{ needs.skipper.outputs.skip_static != 'yes' }}
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout the code
|
||||
uses: actions/checkout@v4
|
||||
@@ -35,14 +43,16 @@ jobs:
|
||||
fi
|
||||
|
||||
build-checks:
|
||||
needs: skipper
|
||||
if: ${{ needs.skipper.outputs.skip_static != 'yes' }}
|
||||
uses: ./.github/workflows/build-checks.yaml
|
||||
with:
|
||||
instance: ubuntu-20.04
|
||||
instance: ubuntu-22.04
|
||||
|
||||
build-checks-depending-on-kvm:
|
||||
# TODO: Transition to free runner (see #9940).
|
||||
if: false
|
||||
runs-on: garm-ubuntu-2004-smaller
|
||||
runs-on: ubuntu-22.04
|
||||
needs: skipper
|
||||
if: ${{ needs.skipper.outputs.skip_static != 'yes' }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
@@ -79,7 +89,9 @@ jobs:
|
||||
RUST_BACKTRACE: "1"
|
||||
|
||||
static-checks:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-22.04
|
||||
needs: skipper
|
||||
if: ${{ needs.skipper.outputs.skip_static != 'yes' }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
|
||||
14
ci/README.md
14
ci/README.md
@@ -55,14 +55,14 @@ of a PR review), the following tests will be executed:
|
||||
- Run the following tests:
|
||||
- Tests depending on the generated tarball
|
||||
- Metrics (runs on bare-metal)
|
||||
- `docker` (runs on Azure small instances)
|
||||
- `nerdctl` (runs on Azure small instances)
|
||||
- `kata-monitor` (runs on Azure small instances)
|
||||
- `cri-containerd` (runs on Azure small instances)
|
||||
- `nydus` (runs on Azure small instances)
|
||||
- `vfio` (runs on Azure normal instances)
|
||||
- `docker` (runs on cost free runners)
|
||||
- `nerdctl` (runs on cost free runners)
|
||||
- `kata-monitor` (runs on cost free runners)
|
||||
- `cri-containerd` (runs on cost free runners)
|
||||
- `nydus` (runs on cost free runners)
|
||||
- `vfio` (runs on cost free runners)
|
||||
- Tests depending on the generated kata-deploy payload
|
||||
- kata-deploy (runs on Azure small instances)
|
||||
- kata-deploy (runs on cost free runners)
|
||||
- Tests are performed using different "Kubernetes flavors", such as k0s, k3s, rke2, and Azure Kubernetes Service (AKS).
|
||||
- Kubernetes (runs in Azure small and medium instances depending on what's required by each test, and on TEE bare-metal machines)
|
||||
- Tests are performed with different runtime engines, such as CRI-O and containerd.
|
||||
|
||||
@@ -23,11 +23,11 @@ workdir="$(mktemp -d --tmpdir build-libseccomp.XXXXX)"
|
||||
# Variables for libseccomp
|
||||
libseccomp_version="${LIBSECCOMP_VERSION:-""}"
|
||||
if [ -z "${libseccomp_version}" ]; then
|
||||
libseccomp_version=$(get_from_kata_deps ".externals.libseccomp.version")
|
||||
libseccomp_version=$(get_from_kata_deps ".externals.libseccomp.version")
|
||||
fi
|
||||
libseccomp_url="${LIBSECCOMP_URL:-""}"
|
||||
if [ -z "${libseccomp_url}" ]; then
|
||||
libseccomp_url=$(get_from_kata_deps ".externals.libseccomp.url")
|
||||
libseccomp_url=$(get_from_kata_deps ".externals.libseccomp.url")
|
||||
fi
|
||||
libseccomp_tarball="libseccomp-${libseccomp_version}.tar.gz"
|
||||
libseccomp_tarball_url="${libseccomp_url}/releases/download/v${libseccomp_version}/${libseccomp_tarball}"
|
||||
@@ -36,11 +36,11 @@ cflags="-O2"
|
||||
# Variables for gperf
|
||||
gperf_version="${GPERF_VERSION:-""}"
|
||||
if [ -z "${gperf_version}" ]; then
|
||||
gperf_version=$(get_from_kata_deps ".externals.gperf.version")
|
||||
gperf_version=$(get_from_kata_deps ".externals.gperf.version")
|
||||
fi
|
||||
gperf_url="${GPERF_URL:-""}"
|
||||
if [ -z "${gperf_url}" ]; then
|
||||
gperf_url=$(get_from_kata_deps ".externals.gperf.url")
|
||||
gperf_url=$(get_from_kata_deps ".externals.gperf.url")
|
||||
fi
|
||||
gperf_tarball="gperf-${gperf_version}.tar.gz"
|
||||
gperf_tarball_url="${gperf_url}/${gperf_tarball}"
|
||||
@@ -48,64 +48,64 @@ gperf_tarball_url="${gperf_url}/${gperf_tarball}"
|
||||
# We need to build the libseccomp library from sources to create a static library for the musl libc.
|
||||
# However, ppc64le and s390x have no musl targets in Rust. Hence, we do not set cflags for the musl libc.
|
||||
if ([ "${arch}" != "ppc64le" ] && [ "${arch}" != "s390x" ]); then
|
||||
# Set FORTIFY_SOURCE=1 because the musl-libc does not have some functions about FORTIFY_SOURCE=2
|
||||
cflags="-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=1 -O2"
|
||||
# Set FORTIFY_SOURCE=1 because the musl-libc does not have some functions about FORTIFY_SOURCE=2
|
||||
cflags="-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=1 -O2"
|
||||
fi
|
||||
|
||||
die() {
|
||||
msg="$*"
|
||||
echo "[Error] ${msg}" >&2
|
||||
exit 1
|
||||
msg="$*"
|
||||
echo "[Error] ${msg}" >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
finish() {
|
||||
rm -rf "${workdir}"
|
||||
rm -rf "${workdir}"
|
||||
}
|
||||
|
||||
trap finish EXIT
|
||||
|
||||
build_and_install_gperf() {
|
||||
echo "Build and install gperf version ${gperf_version}"
|
||||
mkdir -p "${gperf_install_dir}"
|
||||
curl -sLO "${gperf_tarball_url}"
|
||||
tar -xf "${gperf_tarball}"
|
||||
pushd "gperf-${gperf_version}"
|
||||
# Unset $CC for configure, we will always use native for gperf
|
||||
CC= ./configure --prefix="${gperf_install_dir}"
|
||||
make
|
||||
make install
|
||||
export PATH=$PATH:"${gperf_install_dir}"/bin
|
||||
popd
|
||||
echo "Gperf installed successfully"
|
||||
echo "Build and install gperf version ${gperf_version}"
|
||||
mkdir -p "${gperf_install_dir}"
|
||||
curl -sLO "${gperf_tarball_url}"
|
||||
tar -xf "${gperf_tarball}"
|
||||
pushd "gperf-${gperf_version}"
|
||||
# Unset $CC for configure, we will always use native for gperf
|
||||
CC= ./configure --prefix="${gperf_install_dir}"
|
||||
make
|
||||
make install
|
||||
export PATH=$PATH:"${gperf_install_dir}"/bin
|
||||
popd
|
||||
echo "Gperf installed successfully"
|
||||
}
|
||||
|
||||
build_and_install_libseccomp() {
|
||||
echo "Build and install libseccomp version ${libseccomp_version}"
|
||||
mkdir -p "${libseccomp_install_dir}"
|
||||
curl -sLO "${libseccomp_tarball_url}"
|
||||
tar -xf "${libseccomp_tarball}"
|
||||
pushd "libseccomp-${libseccomp_version}"
|
||||
[ "${arch}" == $(uname -m) ] && cc_name="" || cc_name="${arch}-linux-gnu-gcc"
|
||||
CC=${cc_name} ./configure --prefix="${libseccomp_install_dir}" CFLAGS="${cflags}" --enable-static --host="${arch}"
|
||||
make
|
||||
make install
|
||||
popd
|
||||
echo "Libseccomp installed successfully"
|
||||
echo "Build and install libseccomp version ${libseccomp_version}"
|
||||
mkdir -p "${libseccomp_install_dir}"
|
||||
curl -sLO "${libseccomp_tarball_url}"
|
||||
tar -xf "${libseccomp_tarball}"
|
||||
pushd "libseccomp-${libseccomp_version}"
|
||||
[ "${arch}" == $(uname -m) ] && cc_name="" || cc_name="${arch}-linux-gnu-gcc"
|
||||
CC=${cc_name} ./configure --prefix="${libseccomp_install_dir}" CFLAGS="${cflags}" --enable-static --host="${arch}"
|
||||
make
|
||||
make install
|
||||
popd
|
||||
echo "Libseccomp installed successfully"
|
||||
}
|
||||
|
||||
main() {
|
||||
local libseccomp_install_dir="${1:-}"
|
||||
local gperf_install_dir="${2:-}"
|
||||
local libseccomp_install_dir="${1:-}"
|
||||
local gperf_install_dir="${2:-}"
|
||||
|
||||
if [ -z "${libseccomp_install_dir}" ] || [ -z "${gperf_install_dir}" ]; then
|
||||
die "Usage: ${0} <libseccomp-install-dir> <gperf-install-dir>"
|
||||
fi
|
||||
if [ -z "${libseccomp_install_dir}" ] || [ -z "${gperf_install_dir}" ]; then
|
||||
die "Usage: ${0} <libseccomp-install-dir> <gperf-install-dir>"
|
||||
fi
|
||||
|
||||
pushd "$workdir"
|
||||
# gperf is required for building the libseccomp.
|
||||
build_and_install_gperf
|
||||
build_and_install_libseccomp
|
||||
popd
|
||||
pushd "$workdir"
|
||||
# gperf is required for building the libseccomp.
|
||||
build_and_install_gperf
|
||||
build_and_install_libseccomp
|
||||
popd
|
||||
}
|
||||
|
||||
main "$@"
|
||||
|
||||
@@ -14,20 +14,38 @@ die() {
|
||||
exit 1
|
||||
}
|
||||
|
||||
function verify_yq_exists() {
|
||||
local yq_path=$1
|
||||
local yq_version=$2
|
||||
local expected="yq (https://github.com/mikefarah/yq/) version $yq_version"
|
||||
if [ -x "${yq_path}" ] && [ "$($yq_path --version)"X == "$expected"X ]; then
|
||||
return 0
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Install the yq yaml query package from the mikefarah github repo
|
||||
# Install via binary download, as we may not have golang installed at this point
|
||||
function install_yq() {
|
||||
local yq_pkg="github.com/mikefarah/yq"
|
||||
local yq_version=v4.40.7
|
||||
local precmd=""
|
||||
local yq_path=""
|
||||
INSTALL_IN_GOPATH=${INSTALL_IN_GOPATH:-true}
|
||||
|
||||
if [ "${INSTALL_IN_GOPATH}" == "true" ];then
|
||||
if [ "${INSTALL_IN_GOPATH}" == "true" ]; then
|
||||
GOPATH=${GOPATH:-${HOME}/go}
|
||||
mkdir -p "${GOPATH}/bin"
|
||||
local yq_path="${GOPATH}/bin/yq"
|
||||
yq_path="${GOPATH}/bin/yq"
|
||||
else
|
||||
yq_path="/usr/local/bin/yq"
|
||||
fi
|
||||
if verify_yq_exists "$yq_path" "$yq_version"; then
|
||||
echo "yq is already installed in correct version"
|
||||
return
|
||||
fi
|
||||
if [ "${yq_path}" == "/usr/local/bin/yq" ]; then
|
||||
# Check if we need sudo to install yq
|
||||
if [ ! -w "/usr/local/bin" ]; then
|
||||
# Check if we have sudo privileges
|
||||
@@ -38,7 +56,6 @@ function install_yq() {
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
[ -x "${yq_path}" ] && [ "`${yq_path} --version`"X == "yq (https://github.com/mikefarah/yq/) version ${yq_version}"X ] && return
|
||||
|
||||
read -r -a sysInfo <<< "$(uname -sm)"
|
||||
|
||||
|
||||
@@ -16,9 +16,12 @@ REPO="quay.io/kata-containers/kata-deploy-ci"
|
||||
TAGS=$(skopeo list-tags "docker://$REPO")
|
||||
# Only amd64
|
||||
TAGS=$(echo "$TAGS" | jq '.Tags' | jq "map(select(endswith(\"$ARCH\")))" | jq -r '.[]')
|
||||
# Tags since $GOOD
|
||||
TAGS=$(echo "$TAGS" | sed -n -e "/$GOOD/,$$p")
|
||||
# Tags up to $BAD
|
||||
[ -n "$BAD" ] && TAGS=$(echo "$TAGS" | sed "/$BAD/q")
|
||||
# Sort by git
|
||||
SORTED=""
|
||||
[ -n "$BAD" ] && LOG_ARGS="$GOOD~1..$BAD" || LOG_ARGS="$GOOD~1.."
|
||||
for TAG in $(git log --merges --pretty=format:%H --reverse $LOG_ARGS); do
|
||||
[[ "$TAGS" =~ "$TAG" ]] && SORTED+="
|
||||
kata-containers-$TAG-$ARCH"
|
||||
done
|
||||
# Comma separated tags with repo
|
||||
echo "$TAGS" | sed -e "s@^@$REPO:@" | paste -s -d, -
|
||||
echo "$SORTED" | tail -n +2 | sed -e "s@^@$REPO:@" | paste -s -d, -
|
||||
|
||||
@@ -13,16 +13,11 @@ set -e
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
script_dir="$(dirname $0)"
|
||||
script_dir="$(realpath $(dirname $0))"
|
||||
webhook_dir="${script_dir}/../../../tools/testing/kata-webhook"
|
||||
source "${script_dir}/../lib.sh"
|
||||
KATA_RUNTIME=${KATA_RUNTIME:-kata-ci}
|
||||
|
||||
info "Creates the kata-webhook ConfigMap"
|
||||
RUNTIME_CLASS="${KATA_RUNTIME}" \
|
||||
envsubst < "${script_dir}/deployments/configmap_kata-webhook.yaml.in" \
|
||||
| oc apply -f -
|
||||
|
||||
pushd "${webhook_dir}" >/dev/null
|
||||
# Build and deploy the webhook
|
||||
#
|
||||
@@ -30,6 +25,12 @@ info "Builds the kata-webhook"
|
||||
./create-certs.sh
|
||||
info "Deploys the kata-webhook"
|
||||
oc apply -f deploy/
|
||||
|
||||
info "Override our KATA_RUNTIME ConfigMap"
|
||||
RUNTIME_CLASS="${KATA_RUNTIME}" \
|
||||
envsubst < "${script_dir}/deployments/configmap_kata-webhook.yaml.in" \
|
||||
| oc apply -f -
|
||||
|
||||
# Check the webhook was deployed and is working.
|
||||
RUNTIME_CLASS="${KATA_RUNTIME}" ./webhook-check.sh
|
||||
popd >/dev/null
|
||||
|
||||
@@ -499,19 +499,6 @@ If you do not want to install the respective QEMU version, the configuration fil
|
||||
|
||||
See the [static-build script for QEMU](../tools/packaging/static-build/qemu/build-static-qemu.sh) for a reference on how to get, setup, configure and build QEMU for Kata.
|
||||
|
||||
### Build a custom QEMU for aarch64/arm64 - REQUIRED
|
||||
> **Note:**
|
||||
>
|
||||
> - You should only do this step if you are on aarch64/arm64.
|
||||
> - You should include [Eric Auger's latest PCDIMM/NVDIMM patches](https://patchwork.kernel.org/cover/10647305/) which are
|
||||
> under upstream review for supporting NVDIMM on aarch64.
|
||||
>
|
||||
You could build the custom `qemu-system-aarch64` as required with the following command:
|
||||
```bash
|
||||
$ git clone https://github.com/kata-containers/tests.git
|
||||
$ script -fec 'sudo -E tests/.ci/install_qemu.sh'
|
||||
```
|
||||
|
||||
## Build `virtiofsd`
|
||||
|
||||
When using the file system type virtio-fs (default), `virtiofsd` is required
|
||||
|
||||
@@ -28,10 +28,22 @@ Bug fixes are released as part of `MINOR` or `MAJOR` releases only. `PATCH` is a
|
||||
|
||||
## Release Process
|
||||
|
||||
### Bump the `VERSION` file
|
||||
### Bump the `VERSION` and `Chart.yaml` file
|
||||
|
||||
When the `kata-containers/kata-containers` repository is ready for a new release,
|
||||
first create a PR to set the release in the `VERSION` file and have it merged.
|
||||
first create a PR to set the release in the [`VERSION`](./../VERSION) file and update the
|
||||
`version` and `appVersion` in the
|
||||
[`Chart.yaml`](./../tools/packaging/kata-deploy/helm-chart/kata-deploy/Chart.yaml) file and
|
||||
have it merged.
|
||||
|
||||
### Lock the `main` branch
|
||||
|
||||
In order to prevent any PRs getting merged during the release process, and slowing the release
|
||||
process down, by impacting the payload caches, we have recently trailed setting the `main`
|
||||
branch to read only whilst the release action runs.
|
||||
|
||||
> [!NOTE]
|
||||
> Admin permission is needed to complete this task.
|
||||
|
||||
### Check GitHub Actions
|
||||
|
||||
@@ -40,6 +52,9 @@ We make use of [GitHub actions](https://github.com/features/actions) in the
|
||||
file from the `kata-containers/kata-containers` repository to build and upload
|
||||
release artifacts.
|
||||
|
||||
> [!NOTE]
|
||||
> Write permissions to trigger the action.
|
||||
|
||||
The action is manually triggered and is responsible for generating a new
|
||||
release (including a new tag), pushing those to the
|
||||
`kata-containers/kata-containers` repository. The new release is initially
|
||||
@@ -59,6 +74,11 @@ If for some reason you need to cancel the workflow or re-run it entirely, go fir
|
||||
to the [Release page](https://github.com/kata-containers/kata-containers/releases) and
|
||||
delete the draft release from the previous run.
|
||||
|
||||
### Unlock the `main` branch
|
||||
|
||||
After the release process has concluded, either unlock the `main` branch, or ask
|
||||
an admin to do it.
|
||||
|
||||
### Improve the release notes
|
||||
|
||||
Release notes are auto-generated by the GitHub CLI tool used as part of our
|
||||
|
||||
@@ -50,7 +50,7 @@ We provide `Dragonball` Sandbox to enable built-in VMM by integrating VMM's func
|
||||
#### How To Support Async
|
||||
The kata-runtime is controlled by TOKIO_RUNTIME_WORKER_THREADS to run the OS thread, which is 2 threads by default. For TTRPC and container-related threads run in the `tokio` thread in a unified manner, and related dependencies need to be switched to Async, such as Timer, File, Netlink, etc. With the help of Async, we can easily support no-block I/O and timer. Currently, we only utilize Async for kata-runtime. The built-in VMM keeps the OS thread because it can ensure that the threads are controllable.
|
||||
|
||||
**For N tokio worker threads and M containers**
|
||||
**For N `tokio` worker threads and M containers**
|
||||
|
||||
- Sync runtime(both OS thread and `tokio` task are OS thread but without `tokio` worker thread) OS thread number: 4 + 12*M
|
||||
- Async runtime(only OS thread is OS thread) OS thread number: 2 + N
|
||||
@@ -103,7 +103,6 @@ In our case, there will be a variety of resources, and every resource has severa
|
||||
| `Cgroup V2` | | Stage 2 | 🚧 |
|
||||
| Hypervisor | `Dragonball` | Stage 1 | 🚧 |
|
||||
| | QEMU | Stage 2 | 🚫 |
|
||||
| | ACRN | Stage 3 | 🚫 |
|
||||
| | Cloud Hypervisor | Stage 3 | 🚫 |
|
||||
| | Firecracker | Stage 3 | 🚫 |
|
||||
|
||||
@@ -166,4 +165,4 @@ In our case, there will be a variety of resources, and every resource has severa
|
||||
|
||||
- What is the security boundary for the monolithic / "Built-in VMM" case?
|
||||
|
||||
It has the security boundary of virtualization. More details will be provided in next stage.
|
||||
It has the security boundary of virtualization. More details will be provided in next stage.
|
||||
|
||||
@@ -113,6 +113,13 @@ Next, the kata-agent's RPC module will handle the create container request which
|
||||
> **Notes:**
|
||||
> In this flow, `ImageService.pull_image()` parses the image metadata, looking for either the `io.kubernetes.cri.container-type: sandbox` or `io.kubernetes.cri-o.ContainerType: sandbox` (CRI-IO case) annotation, then it never calls the `image-rs.pull_image()` because the pause image is expected to already be inside the guest's filesystem, so instead `ImageService.unpack_pause_image()` is called.
|
||||
|
||||
## Using guest image pull with `nerdctl`
|
||||
|
||||
When running a workload, add the `--label io.kubernetes.cri.image-name=<image>` option e.g.:
|
||||
```sh
|
||||
nerdctl run --runtime io.containerd.kata.v2 --snapshotter nydus --label io.kubernetes.cri.image-name=docker.io/library/busybox:latest --rm docker.io/library/busybox:latest uname -r
|
||||
```
|
||||
|
||||
References:
|
||||
[1] [[RFC] Image management proposal for hosting sharing and peer pods](https://github.com/confidential-containers/confidential-containers/issues/137)
|
||||
[2] https://github.com/containerd/containerd/blob/main/docs/content-flow.md
|
||||
|
||||
@@ -60,7 +60,7 @@ So in guest, container rootfs=overlay(`lowerdir=rafs`, `upperdir=snapshotdir/fs`
|
||||
|
||||
> how to transfer the `rafs` info from `nydus-snapshotter` to the Kata Containers Containerd v2 shim?
|
||||
|
||||
By default, when creating `OCI` image container, `nydus-snapshotter` will return [`struct` Mount slice](https://github.com/containerd/containerd/blob/main/mount/mount.go#L21) below to containerd and containerd use them to mount rootfs
|
||||
By default, when creating `OCI` image container, `nydus-snapshotter` will return [`struct` Mount slice](https://github.com/containerd/containerd/blob/main/core/mount/mount.go#L30) below to containerd and containerd use them to mount rootfs
|
||||
|
||||
```
|
||||
[
|
||||
@@ -72,7 +72,7 @@ By default, when creating `OCI` image container, `nydus-snapshotter` will return
|
||||
]
|
||||
```
|
||||
|
||||
Then, we can append `rafs` info into `Options`, but if do this, containerd will mount failed, as containerd can not identify `rafs` info. Here, we can refer to [containerd mount helper](https://github.com/containerd/containerd/blob/main/mount/mount_linux.go#L42) and provide a binary called `nydus-overlayfs`. The `Mount` slice which `nydus-snapshotter` returned becomes
|
||||
Then, we can append `rafs` info into `Options`, but if do this, containerd will mount failed, as containerd can not identify `rafs` info. Here, we can refer to [containerd mount helper](https://github.com/containerd/containerd/blob/main/core/mount/mount_linux.go#L81) and provide a binary called `nydus-overlayfs`. The `Mount` slice which `nydus-snapshotter` returned becomes
|
||||
|
||||
```
|
||||
[
|
||||
|
||||
@@ -20,12 +20,6 @@
|
||||
for the VM rootfs. Refer to the following guide for additional configuration
|
||||
steps:
|
||||
- [Setup Kata containers with `firecracker`](how-to-use-kata-containers-with-firecracker.md)
|
||||
- `ACRN`
|
||||
|
||||
While `qemu` , `cloud-hypervisor` and `firecracker` work out of the box with installation of Kata,
|
||||
some additional configuration is needed in case of `ACRN`.
|
||||
Refer to the following guides for additional configuration steps:
|
||||
- [Kata Containers with ACRN Hypervisor](how-to-use-kata-containers-with-acrn.md)
|
||||
|
||||
## Confidential Containers Policy
|
||||
|
||||
@@ -52,4 +46,4 @@
|
||||
- [How to use EROFS to build rootfs in Kata Containers](how-to-use-erofs-build-rootfs.md)
|
||||
- [How to run Kata Containers with kinds of Block Volumes](how-to-run-kata-containers-with-kinds-of-Block-Volumes.md)
|
||||
- [How to use the Kata Agent Policy](how-to-use-the-kata-agent-policy.md)
|
||||
- [How to pull images in the guest](how-to-pull-images-in-guest-with-kata.md)
|
||||
- [How to pull images in the guest](how-to-pull-images-in-guest-with-kata.md)
|
||||
|
||||
@@ -137,7 +137,7 @@ snapshotter = "nydus"
|
||||
$ sudo systemctl restart containerd
|
||||
```
|
||||
|
||||
## Verification
|
||||
## Run pod in kata containers with pulling image in guest
|
||||
|
||||
To verify pulling images in a guest VM, please refer to the following commands:
|
||||
|
||||
@@ -148,8 +148,6 @@ apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: busybox
|
||||
annotations:
|
||||
io.containerd.cri.runtime-handler: kata-qemu
|
||||
spec:
|
||||
runtimeClassName: kata-qemu
|
||||
containers:
|
||||
@@ -163,9 +161,6 @@ NAME READY STATUS RESTARTS AGE
|
||||
busybox 1/1 Running 0 10s
|
||||
```
|
||||
|
||||
> **Notes:**
|
||||
> The `CRI Runtime Specific Snapshotter` is still an experimental feature. To pull images in the guest under the specific kata runtime (such as `kata-qemu`), we need to add the following annotation in metadata to each pod yaml: `io.containerd.cri.runtime-handler: kata-qemu`. By adding the annotation, we can ensure that the feature works as expected.
|
||||
|
||||
2. Verify that the pod's images have been successfully downloaded in the guest.
|
||||
If images intended for deployment are deleted prior to deploying with `nydus snapshotter`, the root filesystems required for the pod's images (including the pause image and the container image) should not be present on the host.
|
||||
```bash
|
||||
@@ -173,4 +168,145 @@ $ sandbox_id=$(ps -ef| grep containerd-shim-kata-v2| grep -oP '(?<=-id\s)[a-f0-9
|
||||
$ rootfs_count=$(find /run/kata-containers/shared/sandboxes/$sandbox_id -name rootfs -type d| grep -o "rootfs" | wc -l)
|
||||
$ echo $rootfs_count
|
||||
0
|
||||
```
|
||||
|
||||
## Run pod in kata containers with pulling large image in guest
|
||||
|
||||
Currently, the image pulled in the guest will be downloaded and unpacked in the `/run/kata-containers/image` directory. However, by default, in rootfs-confidential image, systemd allocates 50% of the available physical RAM to the `/run` directory using a `tmpfs` filesystem. As we all know, memory is valuable, especially for confidential containers. This means that if we run a kata container with the default configuration (where the default memory assigned for a VM is 2048 MiB), `/run` would be allocated around 1024 MiB. Consequently, we can only pull images up to 1024 MiB in the guest. So we can use a block volume from the host and use `dm-crypt` and `dm-integrity` to encrypt the block volume in the guest, providing a secure place to store downloaded container images.
|
||||
|
||||
### Create block volume with k8s
|
||||
|
||||
There are a lot of CSI Plugins that support block volumes: AWS EBS, Azure Disk, Open-Local and so on. But as an example, we use Local Persistent Volumes to use local disks as block storage with k8s cluster.
|
||||
|
||||
1. Create an empty disk image and attach the image to a loop device, such as `/dev/loop0`
|
||||
```bash
|
||||
$ loop_file="/tmp/trusted-image-storage.img"
|
||||
$ sudo dd if=/dev/zero of=$loop_file bs=1M count=2500
|
||||
$ sudo losetup /dev/loop0 $loop_file
|
||||
```
|
||||
|
||||
2. Create a Storage Class
|
||||
```yaml
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: local-storage
|
||||
provisioner: kubernetes.io/no-provisioner
|
||||
volumeBindingMode: WaitForFirstConsumer
|
||||
```
|
||||
|
||||
3. Create Persistent Volume
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: trusted-block-pv
|
||||
spec:
|
||||
capacity:
|
||||
storage: 10Gi
|
||||
volumeMode: Block
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
persistentVolumeReclaimPolicy: Retain
|
||||
storageClassName: local-storage
|
||||
local:
|
||||
path: /dev/loop0
|
||||
nodeAffinity:
|
||||
required:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: kubernetes.io/hostname
|
||||
operator: In
|
||||
values:
|
||||
- NODE_NAME
|
||||
```
|
||||
|
||||
4. Create Persistent Volume Claim
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: trusted-pvc
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
volumeMode: Block
|
||||
storageClassName: local-storage
|
||||
```
|
||||
|
||||
5. Run a pod with pulling large image in guest
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: large-image-pod
|
||||
spec:
|
||||
runtimeClassName: kata-qemu
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: kubernetes.io/hostname
|
||||
operator: In
|
||||
values:
|
||||
- NODE_NAME
|
||||
volumes:
|
||||
- name: trusted-storage
|
||||
persistentVolumeClaim:
|
||||
claimName: trusted-pvc
|
||||
containers:
|
||||
- name: app-container
|
||||
image: quay.io/confidential-containers/test-images:largeimage
|
||||
command: ["/bin/sh", "-c"]
|
||||
args:
|
||||
- sleep 6000
|
||||
volumeDevices:
|
||||
- devicePath: /dev/trusted_store
|
||||
name: trusted-image-storage
|
||||
```
|
||||
|
||||
5. Docker image size
|
||||
```bash
|
||||
docker image ls|grep "largeimage"
|
||||
quay.io/confidential-containers/test-images largeimage 00bc1f6c893a 4 months ago 2.15GB
|
||||
```
|
||||
|
||||
6. Check whether the device is encrypted and used by entering into the VM
|
||||
```bash
|
||||
$ lsblk --fs
|
||||
NAME FSTYPE LABEL UUID FSAVAIL FSUSE% MOUNTPOINT
|
||||
sda
|
||||
└─encrypted_disk_GsLDt
|
||||
178M 87% /run/kata-containers/image
|
||||
|
||||
$ cryptsetup status encrypted_disk_GsLDt
|
||||
/dev/mapper/encrypted_disk_GsLDt is active and is in use.
|
||||
type: LUKS2
|
||||
cipher: aes-xts-plain64
|
||||
keysize: 512 bits
|
||||
key location: keyring
|
||||
device: /dev/sda
|
||||
sector size: 4096
|
||||
offset: 32768 sectors
|
||||
size: 5087232 sectors
|
||||
mode: read/write
|
||||
|
||||
$ mount|grep "encrypted_disk_GsLDt"
|
||||
/dev/mapper/encrypted_disk_GsLDt on /run/kata-containers/image type ext4
|
||||
|
||||
$ du -h --max-depth=1 /run/kata-containers/image/
|
||||
16K /run/kata-containers/image/lost+found
|
||||
2.1G /run/kata-containers/image/layers
|
||||
60K /run/kata-containers/image/overlay
|
||||
2.1G /run/kata-containers/image/
|
||||
|
||||
$ free -m
|
||||
total used free shared buff/cache available
|
||||
Mem: 1989 52 43 0 1893 1904
|
||||
Swap: 0 0 0
|
||||
```
|
||||
@@ -88,19 +88,19 @@ However, if any of these components are absent, they must be built from the
|
||||
```
|
||||
$ # Assume that the project is cloned at $GOPATH/src/github.com/kata-containers
|
||||
$ cd $GOPATH/src/github.com/kata-containers/kata-containers
|
||||
$ sudo -E PATH=$PATH make kernel-confidential-tarball
|
||||
$ sudo -E PATH=$PATH make rootfs-initrd-confidential-tarball
|
||||
$ make rootfs-initrd-confidential-tarball
|
||||
$ tar -tf build/kata-static-kernel-confidential.tar.xz | grep vmlinuz
|
||||
./opt/kata/share/kata-containers/vmlinuz-confidential.container
|
||||
./opt/kata/share/kata-containers/vmlinuz-6.1.62-121-confidential
|
||||
./opt/kata/share/kata-containers/vmlinuz-6.7-136-confidential
|
||||
$ kernel_version=6.7-136
|
||||
$ tar -tf build/kata-static-rootfs-initrd-confidential.tar.xz | grep initrd
|
||||
./opt/kata/share/kata-containers/kata-containers-initrd-confidential.img
|
||||
./opt/kata/share/kata-containers/kata-ubuntu-20.04-confidential.initrd
|
||||
$ mkdir artifacts
|
||||
$ tar -xvf build/kata-static-kernel-confidential.tar.xz -C artifacts ./opt/kata/share/kata-containers/vmlinuz-6.1.62-121-confidential
|
||||
$ tar -xvf build/kata-static-kernel-confidential.tar.xz -C artifacts ./opt/kata/share/kata-containers/vmlinuz-${kernel_version}-confidential
|
||||
$ tar -xvf build/kata-static-rootfs-initrd-confidential.tar.xz -C artifacts ./opt/kata/share/kata-containers/kata-ubuntu-20.04-confidential.initrd
|
||||
$ ls artifacts/opt/kata/share/kata-containers/
|
||||
kata-ubuntu-20.04-confidential.initrd vmlinuz-6.1.62-121-confidential
|
||||
kata-ubuntu-20.04-confidential.initrd vmlinuz-${kernel_version}-confidential
|
||||
```
|
||||
|
||||
3. Secure Image Generation Tool
|
||||
@@ -114,7 +114,7 @@ Here is an example of a native build from the source:
|
||||
|
||||
```
|
||||
$ sudo apt-get install gcc libglib2.0-dev libssl-dev libcurl4-openssl-dev
|
||||
$ tool_version=v2.25.0
|
||||
$ tool_version=v2.34.0
|
||||
$ git clone -b $tool_version https://github.com/ibm-s390-linux/s390-tools.git
|
||||
$ pushd s390-tools/genprotimg && make && sudo make install && popd
|
||||
$ rm -rf s390-tools
|
||||
@@ -125,14 +125,15 @@ $ rm -rf s390-tools
|
||||
A host key document is a public key employed for encrypting a secure image, which is
|
||||
subsequently decrypted using a corresponding private key during the VM bootstrap process.
|
||||
You can obtain the host key document either through IBM's designated
|
||||
[Resource Link](http://www.ibm.com/servers/resourcelink) or by requesting it from the
|
||||
[Resource Link](http://www.ibm.com/servers/resourcelink)(you need to log in to access it) or by requesting it from the
|
||||
cloud provider responsible for the IBM Z and LinuxONE instances where your workloads are intended to run.
|
||||
|
||||
To ensure security, it is essential to verify the authenticity and integrity of the host key document
|
||||
belonging to an authentic IBM machine. To achieve this, please additionally obtain the following
|
||||
certificates from the Resource Link:
|
||||
To ensure security, it is essential to verify the authenticity and integrity of the host
|
||||
key document belonging to an authentic IBM machine. To achieve this, please additionally
|
||||
obtain the following files from the Resource Link:
|
||||
|
||||
- IBM Z signing key certificate
|
||||
- IBM Z host key certificate revocation list
|
||||
- `DigiCert` intermediate CA certificate
|
||||
|
||||
These files will be used for verification during secure image construction in the next section.
|
||||
@@ -143,10 +144,11 @@ Assuming you have placed a host key document at `$HOME/host-key-document`:
|
||||
|
||||
- Host key document as `HKD-0000-0000000.crt`
|
||||
|
||||
and two certificates at `$HOME/certificates`:
|
||||
and two certificates and one revocation list at `$HOME/certificates`:
|
||||
|
||||
- IBM Z signing-key certificate as `ibm-z-host-key-signing-gen2.crt`
|
||||
- `DigiCert` intermediate CA certificate as `DigiCertCA.crt`
|
||||
- IBM Z signing-key certificate as `ibm-z-host-key-signing.crt`
|
||||
- IBM Z host key certificate revocation list as `ibm-z-host-key-gen2.crl`
|
||||
|
||||
you can construct a secure image using the following procedure:
|
||||
|
||||
@@ -154,7 +156,7 @@ you can construct a secure image using the following procedure:
|
||||
$ # Change a directory to the project root
|
||||
$ cd $GOPATH/src/github.com/kata-containers/kata-containers
|
||||
$ host_key_document=$HOME/host-key-document/HKD-0000-0000000.crt
|
||||
$ kernel_image=artifacts/opt/kata/share/kata-containers/vmlinuz-6.1.62-121-confidential
|
||||
$ kernel_image=artifacts/opt/kata/share/kata-containers/vmlinuz-${kernel_version}-confidential
|
||||
$ initrd_image=artifacts/opt/kata/share/kata-containers/kata-ubuntu-20.04-confidential.initrd
|
||||
$ echo "panic=1 scsi_mod.scan=none swiotlb=262144 agent.log=debug" > parmfile
|
||||
$ genprotimg --host-key-document=${host_key_document} \
|
||||
@@ -173,11 +175,12 @@ In production, the image construction should incorporate the verification
|
||||
in the following manner:
|
||||
|
||||
```
|
||||
$ signcert=$HOME/certificates/ibm-z-host-key-signing-gen2.crt
|
||||
$ cacert=$HOME/certificates/DigiCertCA.crt
|
||||
$ signcert=$HOME/certificates/ibm-z-host-key-signing.crt
|
||||
$ crl=$HOME/certificates/ibm-z-host-key-gen2.crl
|
||||
$ genprotimg --host-key-document=${host_key_document} \
|
||||
--output=kata-containers-se.img --image=${kernel_image} --ramdisk=${initrd_image} \
|
||||
--cert=${cacert} --cert=${signcert} --parmfile=parmfile
|
||||
--cert=${cacert} --cert=${signcert} --crl=${crl} --parmfile=parmfile
|
||||
```
|
||||
|
||||
The steps with no verification, including the dependencies for the kernel and initrd,
|
||||
@@ -186,20 +189,20 @@ can be easily accomplished by issuing the following make target:
|
||||
```
|
||||
$ cd $GOPATH/src/github.com/kata-containers/kata-containers
|
||||
$ mkdir hkd_dir && cp $host_key_document hkd_dir
|
||||
$ sudo -E PATH=$PATH HKD_PATH=hkd_dir SE_KERNEL_PARAMS="agent.log=debug" \
|
||||
make boot-image-se-tarball
|
||||
$ HKD_PATH=hkd_dir SE_KERNEL_PARAMS="agent.log=debug" make boot-image-se-tarball
|
||||
$ ls build/kata-static-boot-image-se.tar.xz
|
||||
build/kata-static-boot-image-se.tar.xz
|
||||
```
|
||||
|
||||
`SE_KERNEL_PARAMS` could be used to add any extra kernel parameters. If no additional kernel configuration is required, this can be omitted.
|
||||
|
||||
In production, you could build an image by running the same command, but with two
|
||||
additional environment variables for key verification:
|
||||
In production, you could build an image by running the same command, but with the
|
||||
following environment variables for key verification:
|
||||
|
||||
```
|
||||
$ export SIGNING_KEY_CERT_PATH=$HOME/certificates/ibm-z-host-key-signing.crt
|
||||
$ export SIGNING_KEY_CERT_PATH=$HOME/certificates/ibm-z-host-key-signing-gen2.crt
|
||||
$ export INTERMEDIATE_CA_CERT_PATH=$HOME/certificates/DigiCertCA.crt
|
||||
$ export HOST_KEY_CRL_PATH=$HOME/certificates/ibm-z-host-key-gen2.crl
|
||||
```
|
||||
|
||||
To build an image on the `x86_64` platform, set the following environment variables together with the variables above before `make boot-image-se-tarball`:
|
||||
@@ -213,8 +216,9 @@ CROSS_BUILD=true TARGET_ARCH=s390x ARCH=s390x
|
||||
There still remains an opportunity to fine-tune the configuration file:
|
||||
|
||||
```
|
||||
$ export PATH=$PATH:/opt/kata/bin
|
||||
$ runtime_config_path=$(kata-runtime kata-env --json | jq -r '.Runtime.Config.Path')
|
||||
$ cp ${runtime_config_path} ${runtime_config_path}.old
|
||||
$ sudo cp ${runtime_config_path} ${runtime_config_path}.old
|
||||
$ # Make the following adjustment to the original config file
|
||||
$ diff ${runtime_config_path}.old ${runtime_config_path}
|
||||
16,17c16,17
|
||||
@@ -258,6 +262,13 @@ $ sudo $hypervisor_command -machine confidential-guest-support=pv0 \
|
||||
$ # Press ctrl + a + x to exit
|
||||
```
|
||||
|
||||
Unless the host key document is legitimate, you will encounter the following error message:
|
||||
|
||||
```
|
||||
qemu-system-s390x: KVM PV command 2 (KVM_PV_SET_SEC_PARMS) failed: header rc 108 rrc 5 IOCTL rc: -22
|
||||
Protected boot has failed: 0xa02
|
||||
```
|
||||
|
||||
If the hypervisor log does not indicate any errors, it provides assurance that the image
|
||||
has been successfully loaded, and a Virtual Machine (VM) initiated by the kata runtime
|
||||
will function properly.
|
||||
@@ -318,7 +329,7 @@ binary artifacts such as kernel, shim-v2, and more.
|
||||
This section will explain how to build a payload image
|
||||
(i.e., `kata-deploy`) for confidential containers. For the remaining instructions,
|
||||
please refer to the
|
||||
[documentation](https://github.com/confidential-containers/operator/blob/main/docs/how-to/INSTALL-CC-WITH-IBM-SE.md)
|
||||
[documentation](https://github.com/confidential-containers/confidential-containers/blob/main/guides/ibm-se.md)
|
||||
for confidential containers.
|
||||
|
||||
|
||||
@@ -327,12 +338,10 @@ $ cd $GOPATH/src/github.com/kata-containers/kata-containers
|
||||
$ host_key_document=$HOME/host-key-document/HKD-0000-0000000.crt
|
||||
$ mkdir hkd_dir && cp $host_key_document hkd_dir
|
||||
$ # kernel-confidential and rootfs-initrd-confidential are built automactially by the command below
|
||||
$ sudo -E PATH=$PATH HKD_PATH=hkd_dir SE_KERNEL_PARAMS="agent.log=debug" \
|
||||
make boot-image-se-tarball
|
||||
$ sudo -E PATH=$PATH make qemu-tarball
|
||||
$ sudo -E PATH=$PATH make virtiofsd-tarball
|
||||
$ # shim-v2 should be built after kernel due to dependency
|
||||
$ sudo -E PATH=$PATH make shim-v2-tarball
|
||||
$ HKD_PATH=hkd_dir SE_KERNEL_PARAMS="agent.log=debug" make boot-image-se-tarball
|
||||
$ make qemu-tarball
|
||||
$ make virtiofsd-tarball
|
||||
$ make shim-v2-tarball
|
||||
$ mkdir kata-artifacts
|
||||
$ build_dir=$(readlink -f build)
|
||||
$ cp -r $build_dir/*.tar.xz kata-artifacts
|
||||
@@ -340,6 +349,7 @@ $ ls -1 kata-artifacts
|
||||
kata-static-agent.tar.xz
|
||||
kata-static-boot-image-se.tar.xz
|
||||
kata-static-coco-guest-components.tar.xz
|
||||
kata-static-kernel-confidential-modules.tar.xz
|
||||
kata-static-kernel-confidential.tar.xz
|
||||
kata-static-pause-image.tar.xz
|
||||
kata-static-qemu.tar.xz
|
||||
@@ -349,14 +359,14 @@ kata-static-virtiofsd.tar.xz
|
||||
$ ./tools/packaging/kata-deploy/local-build/kata-deploy-merge-builds.sh kata-artifacts
|
||||
```
|
||||
|
||||
In production, the environment variables `SIGNING_KEY_CERT_PATH` and
|
||||
`INTERMEDIATE_CA_CERT_PATH` should be exported like the manual configuration.
|
||||
If a rootfs-image is required for other available runtime classes (e.g. `kata` and `kata-qemu`)
|
||||
without the Secure Execution functionality, please run the following command
|
||||
before running `kata-deploy-merge-builds.sh`:
|
||||
In production, the environment variables `SIGNING_KEY_CERT_PATH`, `INTERMEDIATE_CA_CERT_PATH`
|
||||
and `SIGNING_KEY_CERT_PATH` should be exported like the manual configuration.
|
||||
If a rootfs-image is required for other available runtime classes (e.g. `kata` and
|
||||
`kata-qemu`) without the Secure Execution functionality, please run the following
|
||||
command before running `kata-deploy-merge-builds.sh`:
|
||||
|
||||
```
|
||||
$ sudo -E PATH=$PATH make rootfs-image-tarball
|
||||
$ make rootfs-image-tarball
|
||||
```
|
||||
|
||||
At this point, you should have an archive file named `kata-static.tar.xz` at the project root,
|
||||
@@ -371,7 +381,7 @@ Build and push a payload image with the name `localhost:5000/build-kata-deploy`
|
||||
`latest` using the following:
|
||||
|
||||
```
|
||||
$ sudo -E PATH=$PATH ./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh kata-static.tar.xz localhost:5000/build-kata-deploy latest
|
||||
$ ./tools/packaging/kata-deploy/local-build/kata-deploy-build-and-upload-payload.sh kata-static.tar.xz localhost:5000/build-kata-deploy latest
|
||||
... logs ...
|
||||
Pushing the image localhost:5000/build-kata-deploy:latest to the registry
|
||||
The push refers to repository [localhost:5000/build-kata-deploy]
|
||||
|
||||
@@ -35,6 +35,7 @@ There are several kinds of Kata configurations and they are listed below.
|
||||
| `io.katacontainers.config.agent.enable_tracing` | `boolean` | enable tracing for the agent |
|
||||
| `io.katacontainers.config.agent.container_pipe_size` | uint32 | specify the size of the std(in/out) pipes created for containers |
|
||||
| `io.katacontainers.config.agent.kernel_modules` | string | the list of kernel modules and their parameters that will be loaded in the guest kernel. Semicolon separated list of kernel modules and their parameters. These modules will be loaded in the guest kernel using `modprobe`(8). E.g., `e1000e InterruptThrottleRate=3000,3000,3000 EEE=1; i915 enable_ppgtt=0` |
|
||||
| `io.katacontainers.config.agent.cdh_api_timeout` | uint32 | timeout in second for Confidential Data Hub (CDH) API service, default is `50` |
|
||||
|
||||
## Hypervisor Options
|
||||
| Key | Value Type | Comments |
|
||||
@@ -45,7 +46,6 @@ There are several kinds of Kata configurations and they are listed below.
|
||||
| `io.katacontainers.config.hypervisor.block_device_cache_set` | `boolean` | cache-related options will be set to block devices or not |
|
||||
| `io.katacontainers.config.hypervisor.block_device_driver` | string | the driver to be used for block device, valid values are `virtio-blk`, `virtio-scsi`, `nvdimm`|
|
||||
| `io.katacontainers.config.hypervisor.cpu_features` | `string` | Comma-separated list of CPU features to pass to the CPU (QEMU) |
|
||||
| `io.katacontainers.config.hypervisor.ctlpath` (R) | `string` | Path to the `acrnctl` binary for the ACRN hypervisor |
|
||||
| `io.katacontainers.config.hypervisor.default_max_vcpus` | uint32| the maximum number of vCPUs allocated for the VM by the hypervisor |
|
||||
| `io.katacontainers.config.hypervisor.default_memory` | uint32| the memory assigned for a VM by the hypervisor in `MiB` |
|
||||
| `io.katacontainers.config.hypervisor.default_vcpus` | float32| the default vCPUs assigned for a VM by the hypervisor |
|
||||
@@ -208,7 +208,6 @@ the configuration entry:
|
||||
|
||||
| Key | Config file entry | Comments |
|
||||
|-------| ----- | ----- |
|
||||
| `ctlpath` | `valid_ctlpaths` | Valid paths for `acrnctl` binary |
|
||||
| `entropy_source` | `valid_entropy_sources` | Valid entropy sources, e.g. `/dev/random` |
|
||||
| `file_mem_backend` | `valid_file_mem_backends` | Valid locations for the file-based memory backend root directory |
|
||||
| `jailer_path` | `valid_jailer_paths`| Valid paths for the jailer constraining the container VM (Firecracker) |
|
||||
|
||||
@@ -1,125 +0,0 @@
|
||||
# Kata Containers with ACRN
|
||||
|
||||
This document provides an overview on how to run Kata containers with ACRN hypervisor and device model.
|
||||
|
||||
## Introduction
|
||||
|
||||
ACRN is a flexible, lightweight Type-1 reference hypervisor built with real-time and safety-criticality in mind. ACRN uses an open source platform making it optimized to streamline embedded development.
|
||||
|
||||
Some of the key features being:
|
||||
|
||||
- Small footprint - Approx. 25K lines of code (LOC).
|
||||
- Real Time - Low latency, faster boot time, improves overall responsiveness with hardware.
|
||||
- Adaptability - Multi-OS support for guest operating systems like Linux, Android, RTOSes.
|
||||
- Rich I/O mediators - Allows sharing of various I/O devices across VMs.
|
||||
- Optimized for a variety of IoT (Internet of Things) and embedded device solutions.
|
||||
|
||||
Please refer to ACRN [documentation](https://projectacrn.github.io/latest/index.html) for more details on ACRN hypervisor and device model.
|
||||
|
||||
## Pre-requisites
|
||||
|
||||
This document requires the presence of the ACRN hypervisor and Kata Containers on your system. Install using the instructions available through the following links:
|
||||
|
||||
- ACRN supported [Hardware](https://projectacrn.github.io/latest/hardware.html#supported-hardware).
|
||||
> **Note:** Please make sure to have a minimum of 4 logical processors (HT) or cores.
|
||||
- ACRN [software](https://projectacrn.github.io/latest/tutorials/run_kata_containers.html) setup.
|
||||
- For networking, ACRN supports either MACVTAP or TAP. If MACVTAP is not enabled in the Service OS, please follow the below steps to update the kernel:
|
||||
|
||||
```sh
|
||||
$ git clone https://github.com/projectacrn/acrn-kernel.git
|
||||
$ cd acrn-kernel
|
||||
$ cp kernel_config_sos .config
|
||||
$ sed -i "s/# CONFIG_MACVLAN is not set/CONFIG_MACVLAN=y/" .config
|
||||
$ sed -i '$ i CONFIG_MACVTAP=y' .config
|
||||
$ make clean && make olddefconfig && make && sudo make modules_install INSTALL_MOD_PATH=out/
|
||||
```
|
||||
Login into Service OS and update the kernel with MACVTAP support:
|
||||
|
||||
```sh
|
||||
$ sudo mount /dev/sda1 /mnt
|
||||
$ sudo scp -r <user name>@<host address>:<your workspace>/acrn-kernel/arch/x86/boot/bzImage /mnt/EFI/org.clearlinux/
|
||||
$ sudo scp -r <user name>@<host address>:<your workspace>/acrn-kernel/out/lib/modules/* /lib/modules/
|
||||
$ conf_file=$(sed -n '$ s/default //p' /mnt/loader/loader.conf).conf
|
||||
$ kernel_img=$(sed -n 2p /mnt/loader/entries/$conf_file | cut -d'/' -f4)
|
||||
$ sudo sed -i "s/$kernel_img/bzImage/g" /mnt/loader/entries/$conf_file
|
||||
$ sync && sudo umount /mnt && sudo reboot
|
||||
```
|
||||
- Kata Containers installation: Automated installation does not seem to be supported for Clear Linux, so please use [manual installation](../Developer-Guide.md) steps.
|
||||
|
||||
> **Note:** Create rootfs image and not initrd image.
|
||||
|
||||
In order to run Kata with ACRN, your container stack must provide block-based storage, such as device-mapper.
|
||||
|
||||
> **Note:** Currently, by design you can only launch one VM from Kata Containers using ACRN hypervisor (SDC scenario). Based on feedback from community we can increase number of VMs.
|
||||
|
||||
## Configure Docker
|
||||
|
||||
To configure Docker for device-mapper and Kata,
|
||||
|
||||
1. Stop Docker daemon if it is already running.
|
||||
|
||||
```bash
|
||||
$ sudo systemctl stop docker
|
||||
```
|
||||
|
||||
2. Set `/etc/docker/daemon.json` with the following contents.
|
||||
|
||||
```
|
||||
{
|
||||
"storage-driver": "devicemapper"
|
||||
}
|
||||
```
|
||||
|
||||
3. Restart docker.
|
||||
|
||||
```bash
|
||||
$ sudo systemctl daemon-reload
|
||||
$ sudo systemctl restart docker
|
||||
```
|
||||
|
||||
4. Configure [Docker](../Developer-Guide.md#update-the-docker-systemd-unit-file) to use `kata-runtime`.
|
||||
|
||||
## Configure Kata Containers with ACRN
|
||||
|
||||
To configure Kata Containers with ACRN, copy the generated `configuration-acrn.toml` file when building the `kata-runtime` to either `/etc/kata-containers/configuration.toml` or `/usr/share/defaults/kata-containers/configuration.toml`.
|
||||
|
||||
The following command shows full paths to the `configuration.toml` files that the runtime loads. It will use the first path that exists. (Please make sure the kernel and image paths are set correctly in the `configuration.toml` file)
|
||||
|
||||
```bash
|
||||
$ sudo kata-runtime --show-default-config-paths
|
||||
```
|
||||
|
||||
>**Warning:** Please offline CPUs using [this](offline_cpu.sh) script, else VM launches will fail.
|
||||
|
||||
```bash
|
||||
$ sudo ./offline_cpu.sh
|
||||
```
|
||||
|
||||
Start an ACRN based Kata Container,
|
||||
|
||||
```bash
|
||||
$ sudo docker run -ti --runtime=kata-runtime busybox sh
|
||||
```
|
||||
|
||||
You will see ACRN(`acrn-dm`) is now running on your system, as well as a `kata-shim`. You should obtain an interactive shell prompt. Verify that all the Kata processes terminate once you exit the container.
|
||||
|
||||
```bash
|
||||
$ ps -ef | grep -E "kata|acrn"
|
||||
```
|
||||
|
||||
Validate ACRN hypervisor by using `kata-runtime kata-env`,
|
||||
|
||||
```sh
|
||||
$ kata-runtime kata-env | awk -v RS= '/\[Hypervisor\]/'
|
||||
[Hypervisor]
|
||||
MachineType = ""
|
||||
Version = "DM version is: 1.2-unstable-254577a6-dirty (daily tag:acrn-2019w27.4-140000p)
|
||||
Path = "/usr/bin/acrn-dm"
|
||||
BlockDeviceDriver = "virtio-blk"
|
||||
EntropySource = "/dev/urandom"
|
||||
Msize9p = 0
|
||||
MemorySlots = 10
|
||||
Debug = false
|
||||
UseVSock = false
|
||||
SharedFS = ""
|
||||
```
|
||||
@@ -18,7 +18,6 @@ for i in $(ls -d /sys/devices/system/cpu/cpu[1-9]*); do
|
||||
echo 0 > $i/online
|
||||
online=`cat $i/online`
|
||||
done
|
||||
echo $idx > /sys/class/vhm/acrn_vhm/offline_cpu
|
||||
fi
|
||||
done
|
||||
|
||||
|
||||
@@ -18,7 +18,6 @@ which hypervisors you may wish to investigate further.
|
||||
|
||||
| Hypervisor | Written in | Architectures | Type |
|
||||
|-|-|-|-|
|
||||
|[ACRN] | C | `x86_64` | Type 1 (bare metal) |
|
||||
|[Cloud Hypervisor] | rust | `aarch64`, `x86_64` | Type 2 ([KVM]) |
|
||||
|[Firecracker] | rust | `aarch64`, `x86_64` | Type 2 ([KVM]) |
|
||||
|[QEMU] | C | all | Type 2 ([KVM]) | `configuration-qemu.toml` |
|
||||
@@ -38,7 +37,6 @@ the hypervisors:
|
||||
|
||||
| Hypervisor | Summary | Features | Limitations | Container Creation speed | Memory density | Use cases | Comment |
|
||||
|-|-|-|-|-|-|-|-|
|
||||
|[ACRN] | Safety critical and real-time workloads | | | excellent | excellent | Embedded and IOT systems | For advanced users |
|
||||
|[Cloud Hypervisor] | Low latency, small memory footprint, small attack surface | Minimal | | excellent | excellent | High performance modern cloud workloads | |
|
||||
|[Firecracker] | Very slimline | Extremely minimal | Doesn't support all device types | excellent | excellent | Serverless / FaaS | |
|
||||
|[QEMU] | Lots of features | Lots | | good | good | Good option for most users | |
|
||||
@@ -57,7 +55,6 @@ are available, their default values and how each setting can be used.
|
||||
|
||||
| Hypervisor | Golang runtime config file | golang runtime short name | golang runtime default | rust runtime config file | rust runtime short name | rust runtime default |
|
||||
|-|-|-|-|-|-|-|
|
||||
| [ACRN] | [`configuration-acrn.toml`](../src/runtime/config/configuration-acrn.toml.in) | `acrn` | | | | |
|
||||
| [Cloud Hypervisor] | [`configuration-clh.toml`](../src/runtime/config/configuration-clh.toml.in) | `clh` | | [`configuration-cloud-hypervisor.toml`](../src/runtime-rs/config/configuration-cloud-hypervisor.toml.in) | `cloud-hypervisor` | |
|
||||
| [Firecracker] | [`configuration-fc.toml`](../src/runtime/config/configuration-fc.toml.in) | `fc` | | | | |
|
||||
| [QEMU] | [`configuration-qemu.toml`](../src/runtime/config/configuration-qemu.toml.in) | `qemu` | yes | [`configuration-qemu.toml`](../src/runtime-rs/config/configuration-qemu-runtime-rs.toml.in) | `qemu` | |
|
||||
@@ -93,10 +90,9 @@ are available, their default values and how each setting can be used.
|
||||
To switch the configured hypervisor, you only need to run a single command.
|
||||
See [the `kata-manager` documentation](../utils/README.md#choose-a-hypervisor) for further details.
|
||||
|
||||
[ACRN]: https://projectacrn.org
|
||||
[Cloud Hypervisor]: https://github.com/cloud-hypervisor/cloud-hypervisor
|
||||
[Firecracker]: https://github.com/firecracker-microvm/firecracker
|
||||
[KVM]: https://en.wikipedia.org/wiki/Kernel-based_Virtual_Machine
|
||||
[QEMU]: http://www.qemu-project.org
|
||||
[QEMU]: http://www.qemu.org
|
||||
[`Dragonball`]: https://github.com/kata-containers/kata-containers/blob/main/src/dragonball
|
||||
[StratoVirt]: https://gitee.com/openeuler/stratovirt
|
||||
|
||||
@@ -83,6 +83,23 @@ $ make && sudo make install
|
||||
```
|
||||
After running the command above, the default config file `configuration.toml` will be installed under `/usr/share/defaults/kata-containers/`, the binary file `containerd-shim-kata-v2` will be installed under `/usr/local/bin/` .
|
||||
|
||||
### Install Shim Without Builtin Dragonball VMM
|
||||
|
||||
By default, runtime-rs includes the `Dragonball` VMM. To build without the built-in `Dragonball` hypervisor, use `make USE_BUILDIN_DB=false`:
|
||||
```bash
|
||||
$ cd kata-containers/src/runtime-rs
|
||||
$ make USE_BUILDIN_DB=false
|
||||
```
|
||||
After building, specify the desired hypervisor during installation using `HYPERVISOR`. For example, to use `qemu` or `cloud-hypervisor`:
|
||||
|
||||
```
|
||||
sudo make install HYPERVISOR=qemu
|
||||
```
|
||||
or
|
||||
```
|
||||
sudo make install HYPERVISOR=cloud-hypervisor
|
||||
```
|
||||
|
||||
### Build Kata Containers Kernel
|
||||
Follow the [Kernel installation guide](/tools/packaging/kernel/README.md).
|
||||
|
||||
|
||||
417
src/agent/Cargo.lock
generated
417
src/agent/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -6,7 +6,8 @@ edition = "2018"
|
||||
license = "Apache-2.0"
|
||||
|
||||
[dependencies]
|
||||
oci = { path = "../libs/oci" }
|
||||
runtime-spec = { path = "../libs/runtime-spec" }
|
||||
oci-spec = { version = "0.6.8", features = ["runtime"] }
|
||||
rustjail = { path = "rustjail" }
|
||||
protocols = { path = "../libs/protocols", features = ["async", "with-serde"] }
|
||||
lazy_static = "1.3.0"
|
||||
@@ -19,7 +20,7 @@ serde_json = "1.0.39"
|
||||
scan_fmt = "0.2.3"
|
||||
scopeguard = "1.0.0"
|
||||
thiserror = "1.0.26"
|
||||
regex = "1.10.4"
|
||||
regex = "1.10.5"
|
||||
serial_test = "0.5.1"
|
||||
url = "2.5.0"
|
||||
derivative = "2.2.0"
|
||||
@@ -34,7 +35,7 @@ async-recursion = "0.3.2"
|
||||
futures = "0.3.30"
|
||||
|
||||
# Async runtime
|
||||
tokio = { version = "1.38.0", features = ["full"] }
|
||||
tokio = { version = "1.39.0", features = ["full"] }
|
||||
tokio-vsock = "0.3.4"
|
||||
|
||||
netlink-sys = { version = "0.7.0", features = ["tokio_socket"] }
|
||||
@@ -76,7 +77,7 @@ strum = "0.26.2"
|
||||
strum_macros = "0.26.2"
|
||||
|
||||
# Image pull/decrypt
|
||||
image-rs = { git = "https://github.com/confidential-containers/guest-components", rev = "2c5ac6b01aafcb0be3875f5743c77d654a548146", default-features = false, optional = true }
|
||||
image-rs = { git = "https://github.com/confidential-containers/guest-components", rev = "v0.10.0", default-features = false, optional = true }
|
||||
|
||||
# Agent Policy
|
||||
regorus = { version = "0.1.4", default-features = false, features = [
|
||||
|
||||
@@ -159,7 +159,7 @@ vendor:
|
||||
|
||||
#TARGET test: run cargo tests
|
||||
test: $(GENERATED_FILES)
|
||||
@RUST_LIB_BACKTRACE=0 cargo test --all --target $(TRIPLE) $(EXTRA_RUSTFEATURES) -- --nocapture
|
||||
@RUST_LIB_BACKTRACE=0 RUST_BACKTRACE=1 cargo test --all --target $(TRIPLE) $(EXTRA_RUSTFEATURES) -- --nocapture
|
||||
|
||||
##TARGET check: run test
|
||||
check: $(GENERATED_FILES) standard_rust_check
|
||||
|
||||
@@ -128,12 +128,16 @@ The kata agent has the ability to configure agent options in guest kernel comman
|
||||
| `agent.guest_components_rest_api` | `api-server-rest` configuration | Select the features that the API Server Rest attestation component will run with. Valid values are `all`, `attestation`, `resource` | string | `resource` |
|
||||
| `agent.guest_components_procs` | guest-components processes | Attestation-related processes that should be spawned as children of the guest. Valid values are `none`, `attestation-agent`, `confidential-data-hub` (implies `attestation-agent`), `api-server-rest` (implies `attestation-agent` and `confidential-data-hub`) | string | `api-server-rest` |
|
||||
| `agent.hotplug_timeout` | Hotplug timeout | Allow to configure hotplug timeout(seconds) of block devices | integer | `3` |
|
||||
| `agent.cdh_api_timeout` | Confidential Data Hub (CDH) API timeout | Allow to configure CDH API timeout(seconds) | integer | `50` |
|
||||
| `agent.https_proxy` | HTTPS proxy | Allow to configure `https_proxy` in the guest | string | `""` |
|
||||
| `agent.image_registry_auth` | Image registry credential URI | The URI to where image-rs can find the credentials for pulling images from private registries e.g. `file:///root/.docker/config.json` to read from a file in the guest image, or `kbs:///default/credentials/test` to get the file from the KBS| string | `""` |
|
||||
| `agent.enable_signature_verification` | Image security policy flag | Whether enable image security policy enforcement. If `true`, the resource indexed by URI `agent.image_policy_file` will be got to work as image pulling policy. | string | `""` |
|
||||
| `agent.image_policy_file` | Image security policy URI | The URI to where image-rs Typical policy URIs are like `file:///etc/image.json` to read from a file in the guest image, or `kbs:///default/security-policy/test` to get the file from the KBS| string | `""` |
|
||||
| `agent.log` | Log level | Allow the agent log level to be changed (produces more or less output) | string | `"info"` |
|
||||
| `agent.log_vport` | Log port | Allow to specify the `vsock` port to read logs | integer | `0` |
|
||||
| `agent.no_proxy` | NO proxy | Allow to configure `no_proxy` in the guest | string | `""` |
|
||||
| `agent.passfd_listener_port` | File descriptor passthrough IO listener port | Allow to set the file descriptor passthrough IO listener port | integer | `0` |
|
||||
| `agent.secure_image_storage_integrity` | Image storage integrity | Allow to use `dm-integrity` to protect the integrity of encrypted block volume | boolean | `false` |
|
||||
| `agent.server_addr` | Server address | Allow the ttRPC server address to be specified | string | `"vsock://-1:1024"` |
|
||||
| `agent.trace` | Trace mode | Allow to static tracing | boolean | `false` |
|
||||
| `systemd.unified_cgroup_hierarchy` | `Cgroup hierarchy` | Allow to setup v2 cgroups | boolean | `false` |
|
||||
@@ -144,7 +148,7 @@ The kata agent has the ability to configure agent options in guest kernel comman
|
||||
> The agent will fail to start if the configuration file is not present,
|
||||
> or if it can't be parsed properly.
|
||||
> - `agent.devmode`: true | false
|
||||
> - `agent.hotplug_timeout`: a whole number of seconds
|
||||
> - `agent.hotplug_timeout` and `agent.cdh_api_timeout`: a whole number of seconds
|
||||
> - `agent.log`: "critical"("fatal" | "panic") | "error" | "warn"("warning") | "info" | "debug"
|
||||
> - `agent.server_addr`: "{VSOCK_ADDR}:{VSOCK_PORT}"
|
||||
> - `agent.trace`: true | false
|
||||
|
||||
@@ -10,7 +10,8 @@ awaitgroup = "0.6.0"
|
||||
serde = "1.0.91"
|
||||
serde_json = "1.0.39"
|
||||
serde_derive = "1.0.91"
|
||||
oci = { path = "../../libs/oci" }
|
||||
runtime-spec = { path = "../../libs/runtime-spec" }
|
||||
oci-spec = { version = "0.6.8", features = ["runtime"] }
|
||||
protocols = { path ="../../libs/protocols" }
|
||||
kata-sys-util = { path = "../../libs/kata-sys-util" }
|
||||
caps = "0.5.0"
|
||||
@@ -44,6 +45,7 @@ xattr = "0.2.3"
|
||||
serial_test = "0.5.0"
|
||||
tempfile = "3.1.0"
|
||||
test-utils = { path = "../../libs/test-utils" }
|
||||
protocols = { path ="../../libs/protocols" }
|
||||
|
||||
[features]
|
||||
seccomp = ["libseccomp"]
|
||||
|
||||
@@ -10,17 +10,20 @@ use crate::log_child;
|
||||
use crate::sync::write_count;
|
||||
use anyhow::{anyhow, Result};
|
||||
use caps::{self, runtime, CapSet, Capability, CapsHashSet};
|
||||
use oci::LinuxCapabilities;
|
||||
use oci::{Capability as LinuxCapability, LinuxCapabilities};
|
||||
use oci_spec::runtime as oci;
|
||||
use std::collections::HashSet;
|
||||
use std::os::unix::io::RawFd;
|
||||
use std::str::FromStr;
|
||||
|
||||
fn to_capshashset(cfd_log: RawFd, caps: &[String]) -> CapsHashSet {
|
||||
fn to_capshashset(cfd_log: RawFd, capabilities: &Option<HashSet<LinuxCapability>>) -> CapsHashSet {
|
||||
let mut r = CapsHashSet::new();
|
||||
|
||||
let binding: HashSet<LinuxCapability> = HashSet::new();
|
||||
let caps = capabilities.as_ref().unwrap_or(&binding);
|
||||
for cap in caps.iter() {
|
||||
match Capability::from_str(cap) {
|
||||
match Capability::from_str(&format!("CAP_{}", cap)) {
|
||||
Err(_) => {
|
||||
log_child!(cfd_log, "{} is not a cap", cap);
|
||||
log_child!(cfd_log, "{} is not a cap", &cap.to_string());
|
||||
continue;
|
||||
}
|
||||
Ok(c) => r.insert(c),
|
||||
@@ -48,33 +51,33 @@ pub fn reset_effective() -> Result<()> {
|
||||
pub fn drop_privileges(cfd_log: RawFd, caps: &LinuxCapabilities) -> Result<()> {
|
||||
let all = get_all_caps();
|
||||
|
||||
for c in all.difference(&to_capshashset(cfd_log, caps.bounding.as_ref())) {
|
||||
for c in all.difference(&to_capshashset(cfd_log, caps.bounding())) {
|
||||
caps::drop(None, CapSet::Bounding, *c).map_err(|e| anyhow!(e.to_string()))?;
|
||||
}
|
||||
|
||||
caps::set(
|
||||
None,
|
||||
CapSet::Effective,
|
||||
&to_capshashset(cfd_log, caps.effective.as_ref()),
|
||||
&to_capshashset(cfd_log, caps.effective()),
|
||||
)
|
||||
.map_err(|e| anyhow!(e.to_string()))?;
|
||||
caps::set(
|
||||
None,
|
||||
CapSet::Permitted,
|
||||
&to_capshashset(cfd_log, caps.permitted.as_ref()),
|
||||
&to_capshashset(cfd_log, caps.permitted()),
|
||||
)
|
||||
.map_err(|e| anyhow!(e.to_string()))?;
|
||||
caps::set(
|
||||
None,
|
||||
CapSet::Inheritable,
|
||||
&to_capshashset(cfd_log, caps.inheritable.as_ref()),
|
||||
&to_capshashset(cfd_log, caps.inheritable()),
|
||||
)
|
||||
.map_err(|e| anyhow!(e.to_string()))?;
|
||||
|
||||
let _ = caps::set(
|
||||
None,
|
||||
CapSet::Ambient,
|
||||
&to_capshashset(cfd_log, caps.ambient.as_ref()),
|
||||
&to_capshashset(cfd_log, caps.ambient()),
|
||||
)
|
||||
.map_err(|_| log_child!(cfd_log, "failed to set ambient capability"));
|
||||
|
||||
|
||||
@@ -23,9 +23,10 @@ use crate::container::DEFAULT_DEVICES;
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use libc::{self, pid_t};
|
||||
use oci::{
|
||||
LinuxBlockIo, LinuxCpu, LinuxDevice, LinuxDeviceCgroup, LinuxHugepageLimit, LinuxMemory,
|
||||
LinuxNetwork, LinuxPids, LinuxResources, Spec,
|
||||
LinuxBlockIo, LinuxCpu, LinuxDevice, LinuxDeviceCgroup, LinuxDeviceCgroupBuilder,
|
||||
LinuxHugepageLimit, LinuxMemory, LinuxNetwork, LinuxPids, LinuxResources, Spec,
|
||||
};
|
||||
use oci_spec::runtime as oci;
|
||||
|
||||
use protobuf::MessageField;
|
||||
use protocols::agent::{
|
||||
@@ -72,7 +73,7 @@ pub struct Manager {
|
||||
// set_resource is used to set reources by cgroup controller.
|
||||
macro_rules! set_resource {
|
||||
($cont:ident, $func:ident, $res:ident, $field:ident) => {
|
||||
let resource_value = $res.$field.unwrap_or(0);
|
||||
let resource_value = $res.$field().unwrap_or(0);
|
||||
if resource_value != 0 {
|
||||
$cont.$func(resource_value)?;
|
||||
}
|
||||
@@ -95,38 +96,40 @@ impl CgroupManager for Manager {
|
||||
let pod_res = &mut cgroups::Resources::default();
|
||||
|
||||
// set cpuset and cpu reources
|
||||
if let Some(cpu) = &r.cpu {
|
||||
if let Some(cpu) = &r.cpu() {
|
||||
set_cpu_resources(&self.cgroup, cpu)?;
|
||||
}
|
||||
|
||||
// set memory resources
|
||||
if let Some(memory) = &r.memory {
|
||||
if let Some(memory) = &r.memory() {
|
||||
set_memory_resources(&self.cgroup, memory, update)?;
|
||||
}
|
||||
|
||||
// set pids resources
|
||||
if let Some(pids_resources) = &r.pids {
|
||||
if let Some(pids_resources) = &r.pids() {
|
||||
set_pids_resources(&self.cgroup, pids_resources)?;
|
||||
}
|
||||
|
||||
// set block_io resources
|
||||
if let Some(blkio) = &r.block_io {
|
||||
if let Some(blkio) = &r.block_io() {
|
||||
set_block_io_resources(&self.cgroup, blkio, res);
|
||||
}
|
||||
|
||||
// set hugepages resources
|
||||
if !r.hugepage_limits.is_empty() {
|
||||
set_hugepages_resources(&self.cgroup, &r.hugepage_limits, res);
|
||||
if let Some(hugepage_limits) = r.hugepage_limits() {
|
||||
set_hugepages_resources(&self.cgroup, hugepage_limits, res);
|
||||
}
|
||||
|
||||
// set network resources
|
||||
if let Some(network) = &r.network {
|
||||
if let Some(network) = &r.network() {
|
||||
set_network_resources(&self.cgroup, network, res);
|
||||
}
|
||||
|
||||
// set devices resources
|
||||
if !self.devcg_allowed_all {
|
||||
set_devices_resources(&self.cgroup, &r.devices, res, pod_res);
|
||||
if let Some(devices) = r.devices() {
|
||||
set_devices_resources(&self.cgroup, devices, res, pod_res);
|
||||
}
|
||||
}
|
||||
debug!(
|
||||
sl(),
|
||||
@@ -301,7 +304,7 @@ fn set_network_resources(
|
||||
|
||||
// set classid
|
||||
// description can be found at https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v1/net_cls.html
|
||||
let class_id = network.class_id.unwrap_or(0) as u64;
|
||||
let class_id = network.class_id().unwrap_or(0) as u64;
|
||||
if class_id != 0 {
|
||||
res.network.class_id = Some(class_id);
|
||||
}
|
||||
@@ -309,10 +312,11 @@ fn set_network_resources(
|
||||
// set network priorities
|
||||
// description can be found at https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v1/net_prio.html
|
||||
let mut priorities = vec![];
|
||||
for p in network.priorities.iter() {
|
||||
let interface_priority = network.priorities().clone().unwrap_or_default();
|
||||
for p in interface_priority.iter() {
|
||||
priorities.push(NetworkPriority {
|
||||
name: p.name.clone(),
|
||||
priority: p.priority as u64,
|
||||
name: p.name().clone(),
|
||||
priority: p.priority() as u64,
|
||||
});
|
||||
}
|
||||
|
||||
@@ -351,17 +355,18 @@ fn set_hugepages_resources(
|
||||
let hugetlb_controller = cg.controller_of::<HugeTlbController>();
|
||||
|
||||
for l in hugepage_limits.iter() {
|
||||
if hugetlb_controller.is_some() && hugetlb_controller.unwrap().size_supported(&l.page_size)
|
||||
if hugetlb_controller.is_some() && hugetlb_controller.unwrap().size_supported(l.page_size())
|
||||
{
|
||||
let hr = HugePageResource {
|
||||
size: l.page_size.clone(),
|
||||
limit: l.limit,
|
||||
size: l.page_size().clone(),
|
||||
limit: l.limit() as u64,
|
||||
};
|
||||
limits.push(hr);
|
||||
} else {
|
||||
warn!(
|
||||
sl(),
|
||||
"{} page size support cannot be verified, dropping requested limit", l.page_size
|
||||
"{} page size support cannot be verified, dropping requested limit",
|
||||
l.page_size()
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -375,29 +380,47 @@ fn set_block_io_resources(
|
||||
) {
|
||||
info!(sl(), "cgroup manager set block io");
|
||||
|
||||
res.blkio.weight = blkio.weight;
|
||||
res.blkio.leaf_weight = blkio.leaf_weight;
|
||||
res.blkio.weight = blkio.weight();
|
||||
res.blkio.leaf_weight = blkio.leaf_weight();
|
||||
|
||||
let mut blk_device_resources = vec![];
|
||||
for d in blkio.weight_device.iter() {
|
||||
let default_weight_device = vec![];
|
||||
let weight_device = blkio
|
||||
.weight_device()
|
||||
.as_ref()
|
||||
.unwrap_or(&default_weight_device);
|
||||
for d in weight_device.iter() {
|
||||
let dr = BlkIoDeviceResource {
|
||||
major: d.blk.major as u64,
|
||||
minor: d.blk.minor as u64,
|
||||
weight: blkio.weight,
|
||||
leaf_weight: blkio.leaf_weight,
|
||||
major: d.major() as u64,
|
||||
minor: d.minor() as u64,
|
||||
weight: blkio.weight(),
|
||||
leaf_weight: blkio.leaf_weight(),
|
||||
};
|
||||
blk_device_resources.push(dr);
|
||||
}
|
||||
res.blkio.weight_device = blk_device_resources;
|
||||
|
||||
res.blkio.throttle_read_bps_device =
|
||||
build_blk_io_device_throttle_resource(&blkio.throttle_read_bps_device);
|
||||
res.blkio.throttle_write_bps_device =
|
||||
build_blk_io_device_throttle_resource(&blkio.throttle_write_bps_device);
|
||||
res.blkio.throttle_read_iops_device =
|
||||
build_blk_io_device_throttle_resource(&blkio.throttle_read_iops_device);
|
||||
res.blkio.throttle_write_iops_device =
|
||||
build_blk_io_device_throttle_resource(&blkio.throttle_write_iops_device);
|
||||
res.blkio.throttle_read_bps_device = build_blk_io_device_throttle_resource(
|
||||
blkio.throttle_read_bps_device().as_ref().unwrap_or(&vec![]),
|
||||
);
|
||||
res.blkio.throttle_write_bps_device = build_blk_io_device_throttle_resource(
|
||||
blkio
|
||||
.throttle_write_bps_device()
|
||||
.as_ref()
|
||||
.unwrap_or(&vec![]),
|
||||
);
|
||||
res.blkio.throttle_read_iops_device = build_blk_io_device_throttle_resource(
|
||||
blkio
|
||||
.throttle_read_iops_device()
|
||||
.as_ref()
|
||||
.unwrap_or(&vec![]),
|
||||
);
|
||||
res.blkio.throttle_write_iops_device = build_blk_io_device_throttle_resource(
|
||||
blkio
|
||||
.throttle_write_iops_device()
|
||||
.as_ref()
|
||||
.unwrap_or(&vec![]),
|
||||
);
|
||||
}
|
||||
|
||||
fn set_cpu_resources(cg: &cgroups::Cgroup, cpu: &LinuxCpu) -> Result<()> {
|
||||
@@ -405,19 +428,19 @@ fn set_cpu_resources(cg: &cgroups::Cgroup, cpu: &LinuxCpu) -> Result<()> {
|
||||
|
||||
let cpuset_controller: &CpuSetController = cg.controller_of().unwrap();
|
||||
|
||||
if !cpu.cpus.is_empty() {
|
||||
if let Err(e) = cpuset_controller.set_cpus(&cpu.cpus) {
|
||||
if let Some(cpus) = cpu.cpus() {
|
||||
if let Err(e) = cpuset_controller.set_cpus(cpus) {
|
||||
warn!(sl(), "write cpuset failed: {:?}", e);
|
||||
}
|
||||
}
|
||||
|
||||
if !cpu.mems.is_empty() {
|
||||
cpuset_controller.set_mems(&cpu.mems)?;
|
||||
if let Some(mems) = cpu.mems() {
|
||||
cpuset_controller.set_mems(mems)?;
|
||||
}
|
||||
|
||||
let cpu_controller: &CpuController = cg.controller_of().unwrap();
|
||||
|
||||
if let Some(shares) = cpu.shares {
|
||||
if let Some(shares) = cpu.shares() {
|
||||
let shares = if cg.v2() {
|
||||
convert_shares_to_v2_value(shares)
|
||||
} else {
|
||||
@@ -449,12 +472,12 @@ fn set_memory_resources(cg: &cgroups::Cgroup, memory: &LinuxMemory, update: bool
|
||||
|
||||
// If the memory update is set to -1 we should also
|
||||
// set swap to -1, it means unlimited memory.
|
||||
let mut swap = memory.swap.unwrap_or(0);
|
||||
if memory.limit == Some(-1) {
|
||||
let mut swap = memory.swap().unwrap_or(0);
|
||||
if memory.limit() == Some(-1) {
|
||||
swap = -1;
|
||||
}
|
||||
|
||||
if memory.limit.is_some() && swap != 0 {
|
||||
if memory.limit().is_some() && swap != 0 {
|
||||
let memstat = get_memory_stats(cg)
|
||||
.into_option()
|
||||
.ok_or_else(|| anyhow!("failed to get the cgroup memory stats"))?;
|
||||
@@ -475,7 +498,7 @@ fn set_memory_resources(cg: &cgroups::Cgroup, memory: &LinuxMemory, update: bool
|
||||
} else {
|
||||
set_resource!(mem_controller, set_limit, memory, limit);
|
||||
swap = if cg.v2() {
|
||||
convert_memory_swap_to_v2_value(swap, memory.limit.unwrap_or(0))?
|
||||
convert_memory_swap_to_v2_value(swap, memory.limit().unwrap_or(0))?
|
||||
} else {
|
||||
swap
|
||||
};
|
||||
@@ -488,7 +511,7 @@ fn set_memory_resources(cg: &cgroups::Cgroup, memory: &LinuxMemory, update: bool
|
||||
set_resource!(mem_controller, set_kmem_limit, memory, kernel);
|
||||
set_resource!(mem_controller, set_tcp_limit, memory, kernel_tcp);
|
||||
|
||||
if let Some(swappiness) = memory.swappiness {
|
||||
if let Some(swappiness) = memory.swappiness() {
|
||||
if (0..=100).contains(&swappiness) {
|
||||
mem_controller.set_swappiness(swappiness)?;
|
||||
} else {
|
||||
@@ -499,7 +522,7 @@ fn set_memory_resources(cg: &cgroups::Cgroup, memory: &LinuxMemory, update: bool
|
||||
}
|
||||
}
|
||||
|
||||
if memory.disable_oom_killer.unwrap_or(false) {
|
||||
if memory.disable_oom_killer().unwrap_or(false) {
|
||||
mem_controller.disable_oom_killer()?;
|
||||
}
|
||||
|
||||
@@ -509,8 +532,8 @@ fn set_memory_resources(cg: &cgroups::Cgroup, memory: &LinuxMemory, update: bool
|
||||
fn set_pids_resources(cg: &cgroups::Cgroup, pids: &LinuxPids) -> Result<()> {
|
||||
info!(sl(), "cgroup manager set pids");
|
||||
let pid_controller: &PidController = cg.controller_of().unwrap();
|
||||
let v = if pids.limit > 0 {
|
||||
MaxValue::Value(pids.limit)
|
||||
let v = if pids.limit() > 0 {
|
||||
MaxValue::Value(pids.limit())
|
||||
} else {
|
||||
MaxValue::Max
|
||||
};
|
||||
@@ -525,9 +548,9 @@ fn build_blk_io_device_throttle_resource(
|
||||
let mut blk_io_device_throttle_resources = vec![];
|
||||
for d in input.iter() {
|
||||
let tr = BlkIoDeviceThrottleResource {
|
||||
major: d.blk.major as u64,
|
||||
minor: d.blk.minor as u64,
|
||||
rate: d.rate,
|
||||
major: d.major() as u64,
|
||||
minor: d.minor() as u64,
|
||||
rate: d.rate(),
|
||||
};
|
||||
blk_io_device_throttle_resources.push(tr);
|
||||
}
|
||||
@@ -536,13 +559,20 @@ fn build_blk_io_device_throttle_resource(
|
||||
}
|
||||
|
||||
fn linux_device_cgroup_to_device_resource(d: &LinuxDeviceCgroup) -> Option<DeviceResource> {
|
||||
let dev_type = match DeviceType::from_char(d.r#type.chars().next()) {
|
||||
let dev_type = match DeviceType::from_char(d.typ().unwrap_or_default().as_str().chars().next())
|
||||
{
|
||||
Some(t) => t,
|
||||
None => return None,
|
||||
};
|
||||
|
||||
let mut permissions: Vec<DevicePermissions> = vec![];
|
||||
for p in d.access.chars().collect::<Vec<char>>() {
|
||||
for p in d
|
||||
.access()
|
||||
.as_ref()
|
||||
.unwrap_or(&"".to_owned())
|
||||
.chars()
|
||||
.collect::<Vec<char>>()
|
||||
{
|
||||
match p {
|
||||
'r' => permissions.push(DevicePermissions::Read),
|
||||
'w' => permissions.push(DevicePermissions::Write),
|
||||
@@ -552,10 +582,10 @@ fn linux_device_cgroup_to_device_resource(d: &LinuxDeviceCgroup) -> Option<Devic
|
||||
}
|
||||
|
||||
Some(DeviceResource {
|
||||
allow: d.allow,
|
||||
allow: d.allow(),
|
||||
devtype: dev_type,
|
||||
major: d.major.unwrap_or(0),
|
||||
minor: d.minor.unwrap_or(0),
|
||||
major: d.major().unwrap_or(0),
|
||||
minor: d.minor().unwrap_or(0),
|
||||
access: permissions,
|
||||
})
|
||||
}
|
||||
@@ -592,58 +622,64 @@ lazy_static! {
|
||||
pub static ref DEFAULT_ALLOWED_DEVICES: Vec<LinuxDeviceCgroup> = {
|
||||
vec![
|
||||
// all mknod to all char devices
|
||||
LinuxDeviceCgroup {
|
||||
allow: true,
|
||||
r#type: "c".to_string(),
|
||||
major: Some(WILDCARD),
|
||||
minor: Some(WILDCARD),
|
||||
access: "m".to_string(),
|
||||
},
|
||||
LinuxDeviceCgroupBuilder::default()
|
||||
.allow(true)
|
||||
.typ(oci::LinuxDeviceType::C)
|
||||
.major(WILDCARD)
|
||||
.minor(WILDCARD)
|
||||
.access("m")
|
||||
.build()
|
||||
.unwrap(),
|
||||
|
||||
// all mknod to all block devices
|
||||
LinuxDeviceCgroup {
|
||||
allow: true,
|
||||
r#type: "b".to_string(),
|
||||
major: Some(WILDCARD),
|
||||
minor: Some(WILDCARD),
|
||||
access: "m".to_string(),
|
||||
},
|
||||
LinuxDeviceCgroupBuilder::default()
|
||||
.allow(true)
|
||||
.typ(oci::LinuxDeviceType::B)
|
||||
.major(WILDCARD)
|
||||
.minor(WILDCARD)
|
||||
.access("m")
|
||||
.build()
|
||||
.unwrap(),
|
||||
|
||||
// all read/write/mknod to char device /dev/console
|
||||
LinuxDeviceCgroup {
|
||||
allow: true,
|
||||
r#type: "c".to_string(),
|
||||
major: Some(5),
|
||||
minor: Some(1),
|
||||
access: "rwm".to_string(),
|
||||
},
|
||||
LinuxDeviceCgroupBuilder::default()
|
||||
.allow(true)
|
||||
.typ(oci::LinuxDeviceType::C)
|
||||
.major(5)
|
||||
.minor(1)
|
||||
.access("rwm")
|
||||
.build()
|
||||
.unwrap(),
|
||||
|
||||
// all read/write/mknod to char device /dev/pts/<N>
|
||||
LinuxDeviceCgroup {
|
||||
allow: true,
|
||||
r#type: "c".to_string(),
|
||||
major: Some(136),
|
||||
minor: Some(WILDCARD),
|
||||
access: "rwm".to_string(),
|
||||
},
|
||||
LinuxDeviceCgroupBuilder::default()
|
||||
.allow(true)
|
||||
.typ(oci::LinuxDeviceType::C)
|
||||
.major(136)
|
||||
.minor(WILDCARD)
|
||||
.access("rwm")
|
||||
.build()
|
||||
.unwrap(),
|
||||
|
||||
// all read/write/mknod to char device /dev/ptmx
|
||||
LinuxDeviceCgroup {
|
||||
allow: true,
|
||||
r#type: "c".to_string(),
|
||||
major: Some(5),
|
||||
minor: Some(2),
|
||||
access: "rwm".to_string(),
|
||||
},
|
||||
LinuxDeviceCgroupBuilder::default()
|
||||
.allow(true)
|
||||
.typ(oci::LinuxDeviceType::C)
|
||||
.major(5)
|
||||
.minor(2)
|
||||
.access("rwm")
|
||||
.build()
|
||||
.unwrap(),
|
||||
|
||||
// all read/write/mknod to char device /dev/net/tun
|
||||
LinuxDeviceCgroup {
|
||||
allow: true,
|
||||
r#type: "c".to_string(),
|
||||
major: Some(10),
|
||||
minor: Some(200),
|
||||
access: "rwm".to_string(),
|
||||
},
|
||||
LinuxDeviceCgroupBuilder::default()
|
||||
.allow(true)
|
||||
.typ(oci::LinuxDeviceType::C)
|
||||
.major(10)
|
||||
.minor(200)
|
||||
.access("rwm")
|
||||
.build()
|
||||
.unwrap(),
|
||||
]
|
||||
};
|
||||
}
|
||||
@@ -688,9 +724,20 @@ fn get_cpuacct_stats(cg: &cgroups::Cgroup) -> MessageField<CpuUsage> {
|
||||
let cpu_controller: &CpuController = get_controller_or_return_singular_none!(cg);
|
||||
let stat = cpu_controller.cpu().stat;
|
||||
let h = lines_to_map(&stat);
|
||||
let usage_in_usermode = *h.get("user_usec").unwrap_or(&0);
|
||||
let usage_in_kernelmode = *h.get("system_usec").unwrap_or(&0);
|
||||
let total_usage = *h.get("usage_usec").unwrap_or(&0);
|
||||
// All fields in CpuUsage are expressed in nanoseconds (ns).
|
||||
//
|
||||
// For cgroup v1 (cpuacct controller):
|
||||
// kata-agent reads the cpuacct.stat file, which reports the number of ticks
|
||||
// consumed by the processes in the cgroup. It then converts these ticks to nanoseconds.
|
||||
// Ref: https://www.kernel.org/doc/Documentation/cgroup-v1/cpuacct.txt
|
||||
//
|
||||
// For cgroup v2 (cpu controller):
|
||||
// kata-agent reads the cpu.stat file, which reports the time consumed by the
|
||||
// processes in the cgroup in microseconds (us). It then converts microseconds to nanoseconds.
|
||||
// Ref: https://www.kernel.org/doc/Documentation/cgroup-v2.txt, section 5-1-1. CPU Interface Files
|
||||
let usage_in_usermode = *h.get("user_usec").unwrap_or(&0) * 1000;
|
||||
let usage_in_kernelmode = *h.get("system_usec").unwrap_or(&0) * 1000;
|
||||
let total_usage = *h.get("usage_usec").unwrap_or(&0) * 1000;
|
||||
let percpu_usage = vec![];
|
||||
|
||||
MessageField::some(CpuUsage {
|
||||
@@ -1218,19 +1265,24 @@ impl Manager {
|
||||
|
||||
/// Check if OCI spec contains a rule of allowed all devices.
|
||||
fn has_allowed_all_devices_rule(spec: &Spec) -> bool {
|
||||
let linux = match spec.linux.as_ref() {
|
||||
let linux = match spec.linux().as_ref() {
|
||||
Some(linux) => linux,
|
||||
None => return false,
|
||||
};
|
||||
let resources = match linux.resources.as_ref() {
|
||||
let resources = match linux.resources().as_ref() {
|
||||
Some(resource) => resource,
|
||||
None => return false,
|
||||
};
|
||||
|
||||
resources
|
||||
.devices
|
||||
.iter()
|
||||
.find(|dev| rule_for_all_devices(dev))
|
||||
.map(|dev| dev.allow)
|
||||
.devices()
|
||||
.as_ref()
|
||||
.and_then(|devices| {
|
||||
devices
|
||||
.iter()
|
||||
.find(|dev| rule_for_all_devices(dev))
|
||||
.map(|dev| dev.allow())
|
||||
})
|
||||
.unwrap_or_default()
|
||||
}
|
||||
}
|
||||
@@ -1254,7 +1306,7 @@ fn default_allowed_devices() -> Vec<DeviceResource> {
|
||||
|
||||
/// Convert LinuxDevice to DeviceResource.
|
||||
fn linux_device_to_device_resource(d: &LinuxDevice) -> Option<DeviceResource> {
|
||||
let dev_type = match DeviceType::from_char(d.r#type.chars().next()) {
|
||||
let dev_type = match DeviceType::from_char(d.typ().as_str().chars().next()) {
|
||||
Some(t) => t,
|
||||
None => return None,
|
||||
};
|
||||
@@ -1268,8 +1320,8 @@ fn linux_device_to_device_resource(d: &LinuxDevice) -> Option<DeviceResource> {
|
||||
Some(DeviceResource {
|
||||
allow: true,
|
||||
devtype: dev_type,
|
||||
major: d.major,
|
||||
minor: d.minor,
|
||||
major: d.major(),
|
||||
minor: d.minor(),
|
||||
access: permissions,
|
||||
})
|
||||
}
|
||||
@@ -1328,7 +1380,11 @@ mod tests {
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
use cgroups::devices::{DevicePermissions, DeviceType};
|
||||
use oci::{Linux, LinuxDeviceCgroup, LinuxResources, Spec};
|
||||
use oci::{
|
||||
LinuxBuilder, LinuxDeviceCgroup, LinuxDeviceCgroupBuilder, LinuxDeviceType,
|
||||
LinuxResourcesBuilder, SpecBuilder,
|
||||
};
|
||||
use oci_spec::runtime as oci;
|
||||
use test_utils::skip_if_not_root;
|
||||
|
||||
use super::default_allowed_devices;
|
||||
@@ -1423,21 +1479,22 @@ mod tests {
|
||||
container_devices_list: Vec<String>,
|
||||
}
|
||||
|
||||
let allow_all = LinuxDeviceCgroup {
|
||||
allow: true,
|
||||
r#type: String::new(),
|
||||
major: Some(0),
|
||||
minor: Some(0),
|
||||
access: String::from("rwm"),
|
||||
};
|
||||
|
||||
let deny_all = LinuxDeviceCgroup {
|
||||
allow: false,
|
||||
r#type: String::new(),
|
||||
major: Some(0),
|
||||
minor: Some(0),
|
||||
access: String::from("rwm"),
|
||||
};
|
||||
let allow_all = LinuxDeviceCgroupBuilder::default()
|
||||
.allow(true)
|
||||
.typ(LinuxDeviceType::A)
|
||||
.major(0)
|
||||
.minor(0)
|
||||
.access("rwm")
|
||||
.build()
|
||||
.unwrap();
|
||||
let deny_all = LinuxDeviceCgroupBuilder::default()
|
||||
.allow(false)
|
||||
.typ(LinuxDeviceType::A)
|
||||
.major(0)
|
||||
.minor(0)
|
||||
.access("rwm")
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
let now = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
@@ -1490,16 +1547,20 @@ mod tests {
|
||||
let mut managers = Vec::with_capacity(tc.devices.len());
|
||||
|
||||
for cid in 0..tc.devices.len() {
|
||||
let spec = Spec {
|
||||
linux: Some(Linux {
|
||||
resources: Some(LinuxResources {
|
||||
devices: tc.devices[cid].clone(),
|
||||
..Default::default()
|
||||
}),
|
||||
..Default::default()
|
||||
}),
|
||||
..Default::default()
|
||||
};
|
||||
let spec = SpecBuilder::default()
|
||||
.linux(
|
||||
LinuxBuilder::default()
|
||||
.resources(
|
||||
LinuxResourcesBuilder::default()
|
||||
.devices(tc.devices[cid].clone())
|
||||
.build()
|
||||
.unwrap(),
|
||||
)
|
||||
.build()
|
||||
.unwrap(),
|
||||
)
|
||||
.build()
|
||||
.unwrap();
|
||||
managers.push(
|
||||
Manager::new(&tc.cpath[cid], &spec, Some(sandbox.devcg_info.clone())).unwrap(),
|
||||
);
|
||||
|
||||
@@ -11,6 +11,7 @@ use anyhow::Result;
|
||||
use cgroups::freezer::FreezerState;
|
||||
use libc::{self, pid_t};
|
||||
use oci::{LinuxResources, Spec};
|
||||
use oci_spec::runtime as oci;
|
||||
use std::any::Any;
|
||||
use std::collections::HashMap;
|
||||
use std::string::String;
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
|
||||
use anyhow::{anyhow, Result};
|
||||
use core::fmt::Debug;
|
||||
use oci::{LinuxDeviceCgroup, LinuxResources};
|
||||
use oci_spec::runtime::{LinuxDeviceCgroup, LinuxDeviceType, LinuxResources};
|
||||
use protocols::agent::CgroupStats;
|
||||
use std::any::Any;
|
||||
|
||||
@@ -75,15 +75,20 @@ impl Debug for dyn Manager + Send + Sync {
|
||||
///
|
||||
/// The formats representing all devices between OCI spec and cgroups-rs
|
||||
/// are different.
|
||||
/// - OCI spec: major: 0, minor: 0, type: "", access: "rwm";
|
||||
/// - OCI spec: major: Some(0), minor: Some(0), type: Some(A), access: Some("rwm");
|
||||
/// - Cgroups-rs: major: -1, minor: -1, type: "a", access: "rwm";
|
||||
/// - Linux: a *:* rwm
|
||||
#[inline]
|
||||
fn rule_for_all_devices(dev_cgroup: &LinuxDeviceCgroup) -> bool {
|
||||
dev_cgroup.major.unwrap_or(0) == 0
|
||||
&& dev_cgroup.minor.unwrap_or(0) == 0
|
||||
&& (dev_cgroup.r#type.as_str() == "" || dev_cgroup.r#type.as_str() == "a")
|
||||
&& dev_cgroup.access.contains('r')
|
||||
&& dev_cgroup.access.contains('w')
|
||||
&& dev_cgroup.access.contains('m')
|
||||
let cgrp_access = dev_cgroup.access().clone().unwrap_or_default();
|
||||
let dev_type = dev_cgroup
|
||||
.typ()
|
||||
.as_ref()
|
||||
.map_or(LinuxDeviceType::default(), |x| *x);
|
||||
dev_cgroup.major().unwrap_or(0) == 0
|
||||
&& dev_cgroup.minor().unwrap_or(0) == 0
|
||||
&& dev_type == LinuxDeviceType::A
|
||||
&& cgrp_access.contains('r')
|
||||
&& cgrp_access.contains('w')
|
||||
&& cgrp_access.contains('m')
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ use anyhow::{anyhow, Result};
|
||||
use cgroups::freezer::FreezerState;
|
||||
use libc::{self, pid_t};
|
||||
use oci::LinuxResources;
|
||||
use oci_spec::runtime as oci;
|
||||
use std::any::Any;
|
||||
use std::collections::HashMap;
|
||||
use std::convert::TryInto;
|
||||
|
||||
@@ -8,6 +8,7 @@ use super::transformer::Transformer;
|
||||
|
||||
use anyhow::{bail, Result};
|
||||
use oci::{LinuxCpu, LinuxResources};
|
||||
use oci_spec::runtime as oci;
|
||||
use zbus::zvariant::Value;
|
||||
|
||||
const BASIC_SYSTEMD_VERSION: &str = "242";
|
||||
@@ -25,7 +26,7 @@ impl Transformer for Cpu {
|
||||
cgroup_hierarchy: &CgroupHierarchy,
|
||||
systemd_version: &str,
|
||||
) -> Result<()> {
|
||||
if let Some(cpu_resources) = &r.cpu {
|
||||
if let Some(cpu_resources) = &r.cpu() {
|
||||
match cgroup_hierarchy {
|
||||
CgroupHierarchy::Legacy => {
|
||||
Self::legacy_apply(cpu_resources, properties, systemd_version)?
|
||||
@@ -50,7 +51,7 @@ impl Cpu {
|
||||
properties: &mut Properties,
|
||||
systemd_version: &str,
|
||||
) -> Result<()> {
|
||||
if let Some(shares) = cpu_resources.shares {
|
||||
if let Some(shares) = cpu_resources.shares() {
|
||||
// Minimum value of CPUShares should be 2, see https://github.com/systemd/systemd/blob/d19434fbf81db04d03c8cffa87821f754a86635b/src/basic/cgroup-util.h#L122
|
||||
let shares = match shares {
|
||||
0 => 1024,
|
||||
@@ -60,14 +61,14 @@ impl Cpu {
|
||||
properties.push(("CPUShares", Value::U64(shares)));
|
||||
}
|
||||
|
||||
if let Some(period) = cpu_resources.period {
|
||||
if let Some(period) = cpu_resources.period() {
|
||||
if period != 0 && systemd_version >= BASIC_SYSTEMD_VERSION {
|
||||
properties.push(("CPUQuotaPeriodUSec", Value::U64(period)));
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(quota) = cpu_resources.quota {
|
||||
let period = cpu_resources.period.unwrap_or(DEFAULT_CPUQUOTAPERIOD);
|
||||
if let Some(quota) = cpu_resources.quota() {
|
||||
let period = cpu_resources.period().unwrap_or(DEFAULT_CPUQUOTAPERIOD);
|
||||
if period != 0 {
|
||||
let cpu_quota_per_sec_usec = resolve_cpuquota(quota, period);
|
||||
properties.push(("CPUQuotaPerSecUSec", Value::U64(cpu_quota_per_sec_usec)));
|
||||
@@ -86,19 +87,19 @@ impl Cpu {
|
||||
properties: &mut Properties,
|
||||
systemd_version: &str,
|
||||
) -> Result<()> {
|
||||
if let Some(shares) = cpu_resources.shares {
|
||||
if let Some(shares) = cpu_resources.shares() {
|
||||
let weight = shares_to_weight(shares).unwrap();
|
||||
properties.push(("CPUWeight", Value::U64(weight)));
|
||||
}
|
||||
|
||||
if let Some(period) = cpu_resources.period {
|
||||
if let Some(period) = cpu_resources.period() {
|
||||
if period != 0 && systemd_version >= BASIC_SYSTEMD_VERSION {
|
||||
properties.push(("CPUQuotaPeriodUSec", Value::U64(period)));
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(quota) = cpu_resources.quota {
|
||||
let period = cpu_resources.period.unwrap_or(DEFAULT_CPUQUOTAPERIOD);
|
||||
if let Some(quota) = cpu_resources.quota() {
|
||||
let period = cpu_resources.period().unwrap_or(DEFAULT_CPUQUOTAPERIOD);
|
||||
if period != 0 {
|
||||
let cpu_quota_per_sec_usec = resolve_cpuquota(quota, period);
|
||||
properties.push(("CPUQuotaPerSecUSec", Value::U64(cpu_quota_per_sec_usec)));
|
||||
|
||||
@@ -10,6 +10,7 @@ use super::transformer::Transformer;
|
||||
use anyhow::{bail, Result};
|
||||
use bit_vec::BitVec;
|
||||
use oci::{LinuxCpu, LinuxResources};
|
||||
use oci_spec::runtime as oci;
|
||||
use std::convert::{TryFrom, TryInto};
|
||||
use zbus::zvariant::Value;
|
||||
|
||||
@@ -24,7 +25,7 @@ impl Transformer for CpuSet {
|
||||
_: &CgroupHierarchy,
|
||||
systemd_version: &str,
|
||||
) -> Result<()> {
|
||||
if let Some(cpuset_resources) = &r.cpu {
|
||||
if let Some(cpuset_resources) = &r.cpu() {
|
||||
Self::apply(cpuset_resources, properties, systemd_version)?;
|
||||
}
|
||||
|
||||
@@ -45,15 +46,13 @@ impl CpuSet {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let cpus = cpuset_resources.cpus.as_str();
|
||||
if !cpus.is_empty() {
|
||||
let cpus_vec: BitMask = cpus.try_into()?;
|
||||
if let Some(cpus) = cpuset_resources.cpus().as_ref() {
|
||||
let cpus_vec: BitMask = cpus.as_str().try_into()?;
|
||||
properties.push(("AllowedCPUs", Value::Array(cpus_vec.0.into())));
|
||||
}
|
||||
|
||||
let mems = cpuset_resources.mems.as_str();
|
||||
if !mems.is_empty() {
|
||||
let mems_vec: BitMask = mems.try_into()?;
|
||||
if let Some(mems) = cpuset_resources.mems().as_ref() {
|
||||
let mems_vec: BitMask = mems.as_str().try_into()?;
|
||||
properties.push(("AllowedMemoryNodes", Value::Array(mems_vec.0.into())));
|
||||
}
|
||||
|
||||
|
||||
@@ -9,6 +9,7 @@ use super::transformer::Transformer;
|
||||
|
||||
use anyhow::{bail, Result};
|
||||
use oci::{LinuxMemory, LinuxResources};
|
||||
use oci_spec::runtime as oci;
|
||||
use zbus::zvariant::Value;
|
||||
|
||||
pub struct Memory {}
|
||||
@@ -20,7 +21,7 @@ impl Transformer for Memory {
|
||||
cgroup_hierarchy: &CgroupHierarchy,
|
||||
_: &str,
|
||||
) -> Result<()> {
|
||||
if let Some(memory_resources) = &r.memory {
|
||||
if let Some(memory_resources) = &r.memory() {
|
||||
match cgroup_hierarchy {
|
||||
CgroupHierarchy::Legacy => Self::legacy_apply(memory_resources, properties)?,
|
||||
CgroupHierarchy::Unified => Self::unified_apply(memory_resources, properties)?,
|
||||
@@ -35,7 +36,7 @@ impl Memory {
|
||||
// v1:
|
||||
// memory.limit <-> MemoryLimit
|
||||
fn legacy_apply(memory_resources: &LinuxMemory, properties: &mut Properties) -> Result<()> {
|
||||
if let Some(limit) = memory_resources.limit {
|
||||
if let Some(limit) = memory_resources.limit() {
|
||||
let limit = match limit {
|
||||
1..=i64::MAX => limit as u64,
|
||||
0 => u64::MAX,
|
||||
@@ -52,7 +53,7 @@ impl Memory {
|
||||
// memory.max <-> MemoryMax
|
||||
// memory.swap & memory.limit <-> MemorySwapMax
|
||||
fn unified_apply(memory_resources: &LinuxMemory, properties: &mut Properties) -> Result<()> {
|
||||
if let Some(limit) = memory_resources.limit {
|
||||
if let Some(limit) = memory_resources.limit() {
|
||||
let limit = match limit {
|
||||
1..=i64::MAX => limit as u64,
|
||||
0 => u64::MAX,
|
||||
@@ -61,7 +62,7 @@ impl Memory {
|
||||
properties.push(("MemoryMax", Value::U64(limit)));
|
||||
}
|
||||
|
||||
if let Some(reservation) = memory_resources.reservation {
|
||||
if let Some(reservation) = memory_resources.reservation() {
|
||||
let reservation = match reservation {
|
||||
1..=i64::MAX => reservation as u64,
|
||||
0 => u64::MAX,
|
||||
@@ -70,11 +71,11 @@ impl Memory {
|
||||
properties.push(("MemoryLow", Value::U64(reservation)));
|
||||
}
|
||||
|
||||
let swap = match memory_resources.swap {
|
||||
let swap = match memory_resources.swap() {
|
||||
Some(0) => u64::MAX,
|
||||
Some(1..=i64::MAX) => match memory_resources.limit {
|
||||
Some(1..=i64::MAX) => match memory_resources.limit() {
|
||||
Some(1..=i64::MAX) => {
|
||||
(memory_resources.limit.unwrap() - memory_resources.swap.unwrap()) as u64
|
||||
(memory_resources.limit().unwrap() - memory_resources.swap().unwrap()) as u64
|
||||
}
|
||||
_ => bail!("invalid memory.limit when memory.swap specified"),
|
||||
},
|
||||
@@ -93,18 +94,21 @@ mod tests {
|
||||
use super::Memory;
|
||||
use super::Properties;
|
||||
use super::Value;
|
||||
use oci_spec::runtime as oci;
|
||||
|
||||
#[test]
|
||||
fn test_unified_memory() {
|
||||
let memory_resources = oci::LinuxMemory {
|
||||
limit: Some(736870912),
|
||||
reservation: Some(536870912),
|
||||
swap: Some(536870912),
|
||||
kernel: Some(0),
|
||||
kernel_tcp: Some(0),
|
||||
swappiness: Some(0),
|
||||
disable_oom_killer: Some(false),
|
||||
};
|
||||
let memory_resources = oci::LinuxMemoryBuilder::default()
|
||||
.limit(736870912)
|
||||
.reservation(536870912)
|
||||
.swap(536870912)
|
||||
.kernel(0)
|
||||
.kernel_tcp(0)
|
||||
.swappiness(0u64)
|
||||
.disable_oom_killer(false)
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
let mut properties: Properties = vec![];
|
||||
|
||||
assert_eq!(
|
||||
|
||||
@@ -9,6 +9,7 @@ use super::transformer::Transformer;
|
||||
|
||||
use anyhow::Result;
|
||||
use oci::{LinuxPids, LinuxResources};
|
||||
use oci_spec::runtime as oci;
|
||||
use zbus::zvariant::Value;
|
||||
|
||||
pub struct Pids {}
|
||||
@@ -20,7 +21,7 @@ impl Transformer for Pids {
|
||||
_: &CgroupHierarchy,
|
||||
_: &str,
|
||||
) -> Result<()> {
|
||||
if let Some(pids_resources) = &r.pids {
|
||||
if let Some(pids_resources) = &r.pids() {
|
||||
Self::apply(pids_resources, properties)?;
|
||||
}
|
||||
|
||||
@@ -31,8 +32,8 @@ impl Transformer for Pids {
|
||||
// pids.limit <-> TasksMax
|
||||
impl Pids {
|
||||
fn apply(pids_resources: &LinuxPids, properties: &mut Properties) -> Result<()> {
|
||||
let limit = if pids_resources.limit > 0 {
|
||||
pids_resources.limit as u64
|
||||
let limit = if pids_resources.limit() > 0 {
|
||||
pids_resources.limit() as u64
|
||||
} else {
|
||||
u64::MAX
|
||||
};
|
||||
@@ -47,10 +48,13 @@ mod tests {
|
||||
use super::Pids;
|
||||
use super::Properties;
|
||||
use super::Value;
|
||||
use oci_spec::runtime as oci;
|
||||
|
||||
#[test]
|
||||
fn test_subsystem_workflow() {
|
||||
let pids_resources = oci::LinuxPids { limit: 0 };
|
||||
let mut pids_resources = oci::LinuxPids::default();
|
||||
pids_resources.set_limit(0 as i64);
|
||||
|
||||
let mut properties: Properties = vec![];
|
||||
|
||||
assert_eq!(true, Pids::apply(&pids_resources, &mut properties).is_ok());
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
use super::super::common::{CgroupHierarchy, Properties};
|
||||
use anyhow::Result;
|
||||
use oci::LinuxResources;
|
||||
use oci_spec::runtime as oci;
|
||||
|
||||
pub trait Transformer {
|
||||
fn apply(
|
||||
|
||||
@@ -5,8 +5,10 @@
|
||||
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use libc::pid_t;
|
||||
use oci::{ContainerState, LinuxDevice, LinuxIdMapping};
|
||||
use oci::{Linux, LinuxNamespace, LinuxResources, Spec};
|
||||
use oci::{Linux, LinuxDevice, LinuxIdMapping, LinuxNamespace, LinuxResources, Spec};
|
||||
use oci_spec::runtime as oci;
|
||||
use runtime_spec as spec;
|
||||
use spec::{ContainerState, State as OCIState};
|
||||
use std::clone::Clone;
|
||||
use std::ffi::CString;
|
||||
use std::fmt::Display;
|
||||
@@ -51,7 +53,6 @@ use std::os::unix::io::AsRawFd;
|
||||
|
||||
use protobuf::MessageField;
|
||||
|
||||
use oci::State as OCIState;
|
||||
use regex::Regex;
|
||||
use std::collections::HashMap;
|
||||
use std::os::unix::io::FromRawFd;
|
||||
@@ -130,82 +131,88 @@ lazy_static! {
|
||||
m.insert("user", CloneFlags::CLONE_NEWUSER);
|
||||
m.insert("ipc", CloneFlags::CLONE_NEWIPC);
|
||||
m.insert("pid", CloneFlags::CLONE_NEWPID);
|
||||
m.insert("network", CloneFlags::CLONE_NEWNET);
|
||||
m.insert("mount", CloneFlags::CLONE_NEWNS);
|
||||
m.insert("net", CloneFlags::CLONE_NEWNET);
|
||||
m.insert("mnt", CloneFlags::CLONE_NEWNS);
|
||||
m.insert("uts", CloneFlags::CLONE_NEWUTS);
|
||||
m.insert("cgroup", CloneFlags::CLONE_NEWCGROUP);
|
||||
m
|
||||
};
|
||||
|
||||
// type to name hashmap, better to be in NAMESPACES
|
||||
pub static ref TYPETONAME: HashMap<&'static str, &'static str> = {
|
||||
pub static ref TYPETONAME: HashMap<oci::LinuxNamespaceType, &'static str> = {
|
||||
let mut m = HashMap::new();
|
||||
m.insert("ipc", "ipc");
|
||||
m.insert("user", "user");
|
||||
m.insert("pid", "pid");
|
||||
m.insert("network", "net");
|
||||
m.insert("mount", "mnt");
|
||||
m.insert("cgroup", "cgroup");
|
||||
m.insert("uts", "uts");
|
||||
m.insert(oci::LinuxNamespaceType::Ipc, "ipc");
|
||||
m.insert(oci::LinuxNamespaceType::User, "user");
|
||||
m.insert(oci::LinuxNamespaceType::Pid, "pid");
|
||||
m.insert(oci::LinuxNamespaceType::Network, "net");
|
||||
m.insert(oci::LinuxNamespaceType::Mount, "mnt");
|
||||
m.insert(oci::LinuxNamespaceType::Cgroup, "cgroup");
|
||||
m.insert(oci::LinuxNamespaceType::Uts, "uts");
|
||||
m
|
||||
};
|
||||
|
||||
pub static ref DEFAULT_DEVICES: Vec<LinuxDevice> = {
|
||||
vec![
|
||||
LinuxDevice {
|
||||
path: "/dev/null".to_string(),
|
||||
r#type: "c".to_string(),
|
||||
major: 1,
|
||||
minor: 3,
|
||||
file_mode: Some(0o666),
|
||||
uid: Some(0xffffffff),
|
||||
gid: Some(0xffffffff),
|
||||
},
|
||||
LinuxDevice {
|
||||
path: "/dev/zero".to_string(),
|
||||
r#type: "c".to_string(),
|
||||
major: 1,
|
||||
minor: 5,
|
||||
file_mode: Some(0o666),
|
||||
uid: Some(0xffffffff),
|
||||
gid: Some(0xffffffff),
|
||||
},
|
||||
LinuxDevice {
|
||||
path: "/dev/full".to_string(),
|
||||
r#type: "c".to_string(),
|
||||
major: 1,
|
||||
minor: 7,
|
||||
file_mode: Some(0o666),
|
||||
uid: Some(0xffffffff),
|
||||
gid: Some(0xffffffff),
|
||||
},
|
||||
LinuxDevice {
|
||||
path: "/dev/tty".to_string(),
|
||||
r#type: "c".to_string(),
|
||||
major: 5,
|
||||
minor: 0,
|
||||
file_mode: Some(0o666),
|
||||
uid: Some(0xffffffff),
|
||||
gid: Some(0xffffffff),
|
||||
},
|
||||
LinuxDevice {
|
||||
path: "/dev/urandom".to_string(),
|
||||
r#type: "c".to_string(),
|
||||
major: 1,
|
||||
minor: 9,
|
||||
file_mode: Some(0o666),
|
||||
uid: Some(0xffffffff),
|
||||
gid: Some(0xffffffff),
|
||||
},
|
||||
LinuxDevice {
|
||||
path: "/dev/random".to_string(),
|
||||
r#type: "c".to_string(),
|
||||
major: 1,
|
||||
minor: 8,
|
||||
file_mode: Some(0o666),
|
||||
uid: Some(0xffffffff),
|
||||
gid: Some(0xffffffff),
|
||||
},
|
||||
oci::LinuxDeviceBuilder::default()
|
||||
.path(PathBuf::from("/dev/null"))
|
||||
.typ(oci::LinuxDeviceType::C)
|
||||
.major(1)
|
||||
.minor(3)
|
||||
.file_mode(0o066_u32)
|
||||
.uid(0xffffffff_u32)
|
||||
.gid(0xffffffff_u32)
|
||||
.build()
|
||||
.unwrap(),
|
||||
oci::LinuxDeviceBuilder::default()
|
||||
.path(PathBuf::from("/dev/zero"))
|
||||
.typ(oci::LinuxDeviceType::C)
|
||||
.major(1)
|
||||
.minor(5)
|
||||
.file_mode(0o066_u32)
|
||||
.uid(0xffffffff_u32)
|
||||
.gid(0xffffffff_u32)
|
||||
.build()
|
||||
.unwrap(),
|
||||
oci::LinuxDeviceBuilder::default()
|
||||
.path(PathBuf::from("/dev/full"))
|
||||
.typ(oci::LinuxDeviceType::C)
|
||||
.major(1)
|
||||
.minor(7)
|
||||
.file_mode(0o066_u32)
|
||||
.uid(0xffffffff_u32)
|
||||
.gid(0xffffffff_u32)
|
||||
.build()
|
||||
.unwrap(),
|
||||
oci::LinuxDeviceBuilder::default()
|
||||
.path(PathBuf::from("/dev/tty"))
|
||||
.typ(oci::LinuxDeviceType::C)
|
||||
.major(5)
|
||||
.minor(0)
|
||||
.file_mode(0o066_u32)
|
||||
.uid(0xffffffff_u32)
|
||||
.gid(0xffffffff_u32)
|
||||
.build()
|
||||
.unwrap(),
|
||||
oci::LinuxDeviceBuilder::default()
|
||||
.path(PathBuf::from("/dev/urandom"))
|
||||
.typ(oci::LinuxDeviceType::C)
|
||||
.major(1)
|
||||
.minor(9)
|
||||
.file_mode(0o066_u32)
|
||||
.uid(0xffffffff_u32)
|
||||
.gid(0xffffffff_u32)
|
||||
.build()
|
||||
.unwrap(),
|
||||
oci::LinuxDeviceBuilder::default()
|
||||
.path(PathBuf::from("/dev/random"))
|
||||
.typ(oci::LinuxDeviceType::C)
|
||||
.major(1)
|
||||
.minor(8)
|
||||
.file_mode(0o066_u32)
|
||||
.uid(0xffffffff_u32)
|
||||
.gid(0xffffffff_u32)
|
||||
.build()
|
||||
.unwrap(),
|
||||
]
|
||||
};
|
||||
|
||||
@@ -402,7 +409,7 @@ fn do_init_child(cwfd: RawFd) -> Result<()> {
|
||||
|
||||
let buf = read_sync(crfd)?;
|
||||
let state_str = std::str::from_utf8(&buf)?;
|
||||
let mut state: oci::State = serde_json::from_str(state_str)?;
|
||||
let mut state: OCIState = serde_json::from_str(state_str)?;
|
||||
log_child!(cfd_log, "notify parent to send cgroup manager");
|
||||
write_sync(cwfd, SYNC_SUCCESS, "")?;
|
||||
|
||||
@@ -416,16 +423,16 @@ fn do_init_child(cwfd: RawFd) -> Result<()> {
|
||||
#[cfg(feature = "standard-oci-runtime")]
|
||||
let csocket_fd = console::setup_console_socket(&std::env::var(CONSOLE_SOCKET_FD)?)?;
|
||||
|
||||
let p = if spec.process.is_some() {
|
||||
spec.process.as_ref().unwrap()
|
||||
let p = if spec.process().is_some() {
|
||||
spec.process().as_ref().unwrap()
|
||||
} else {
|
||||
return Err(anyhow!("didn't find process in Spec"));
|
||||
};
|
||||
|
||||
if spec.linux.is_none() {
|
||||
if spec.linux().is_none() {
|
||||
return Err(anyhow!(MissingLinux));
|
||||
}
|
||||
let linux = spec.linux.as_ref().unwrap();
|
||||
let linux = spec.linux().as_ref().unwrap();
|
||||
|
||||
// get namespace vector to join/new
|
||||
let nses = get_namespaces(linux);
|
||||
@@ -435,25 +442,30 @@ fn do_init_child(cwfd: RawFd) -> Result<()> {
|
||||
let mut to_join = Vec::new();
|
||||
|
||||
for ns in &nses {
|
||||
let s = NAMESPACES.get(&ns.r#type.as_str());
|
||||
let ns_type = ns.typ().to_string();
|
||||
let s = NAMESPACES.get(&ns_type.as_str());
|
||||
if s.is_none() {
|
||||
return Err(anyhow!(InvalidNamespace));
|
||||
}
|
||||
let s = s.unwrap();
|
||||
|
||||
if ns.path.is_empty() {
|
||||
if ns
|
||||
.path()
|
||||
.as_ref()
|
||||
.map_or(true, |p| p.as_os_str().is_empty())
|
||||
{
|
||||
// skip the pidns since it has been done in parent process.
|
||||
if *s != CloneFlags::CLONE_NEWPID {
|
||||
to_new.set(*s, true);
|
||||
}
|
||||
} else {
|
||||
let fd =
|
||||
fcntl::open(ns.path.as_str(), OFlag::O_CLOEXEC, Mode::empty()).map_err(|e| {
|
||||
let fd = fcntl::open(ns.path().as_ref().unwrap(), OFlag::O_CLOEXEC, Mode::empty())
|
||||
.map_err(|e| {
|
||||
log_child!(
|
||||
cfd_log,
|
||||
"cannot open type: {} path: {}",
|
||||
ns.r#type.clone(),
|
||||
ns.path.clone()
|
||||
&ns.typ().to_string(),
|
||||
ns.path().as_ref().unwrap().display()
|
||||
);
|
||||
log_child!(cfd_log, "error is : {:?}", e);
|
||||
e
|
||||
@@ -469,21 +481,23 @@ fn do_init_child(cwfd: RawFd) -> Result<()> {
|
||||
userns = true;
|
||||
}
|
||||
|
||||
if p.oom_score_adj.is_some() {
|
||||
log_child!(cfd_log, "write oom score {}", p.oom_score_adj.unwrap());
|
||||
if p.oom_score_adj().is_some() {
|
||||
log_child!(cfd_log, "write oom score {}", p.oom_score_adj().unwrap());
|
||||
fs::write(
|
||||
"/proc/self/oom_score_adj",
|
||||
p.oom_score_adj.unwrap().to_string().as_bytes(),
|
||||
p.oom_score_adj().unwrap().to_string().as_bytes(),
|
||||
)?;
|
||||
}
|
||||
|
||||
// set rlimit
|
||||
for rl in p.rlimits.iter() {
|
||||
let default_rlimits = Vec::new();
|
||||
let process_rlimits = p.rlimits().as_ref().unwrap_or(&default_rlimits);
|
||||
for rl in process_rlimits.iter() {
|
||||
log_child!(cfd_log, "set resource limit: {:?}", rl);
|
||||
setrlimit(
|
||||
Resource::from_str(&rl.r#type)?,
|
||||
Rlim::from_raw(rl.soft),
|
||||
Rlim::from_raw(rl.hard),
|
||||
Resource::from_str(&rl.typ().to_string())?,
|
||||
Rlim::from_raw(rl.soft()),
|
||||
Rlim::from_raw(rl.hard()),
|
||||
)?;
|
||||
}
|
||||
|
||||
@@ -565,12 +579,17 @@ fn do_init_child(cwfd: RawFd) -> Result<()> {
|
||||
}
|
||||
|
||||
if to_new.contains(CloneFlags::CLONE_NEWUTS) {
|
||||
unistd::sethostname(&spec.hostname)?;
|
||||
unistd::sethostname(
|
||||
spec.hostname()
|
||||
.as_ref()
|
||||
.map_or("".to_string(), |x| x.clone()),
|
||||
)?;
|
||||
}
|
||||
|
||||
let rootfs = spec.root.as_ref().unwrap().path.as_str();
|
||||
log_child!(cfd_log, "setup rootfs {}", rootfs);
|
||||
let root = fs::canonicalize(rootfs)?;
|
||||
let rootfs = spec.root().as_ref().unwrap().path().display().to_string();
|
||||
|
||||
log_child!(cfd_log, "setup rootfs {}", &rootfs);
|
||||
let root = fs::canonicalize(&rootfs)?;
|
||||
let rootfs = root.to_str().unwrap();
|
||||
|
||||
if to_new.contains(CloneFlags::CLONE_NEWNS) {
|
||||
@@ -605,15 +624,22 @@ fn do_init_child(cwfd: RawFd) -> Result<()> {
|
||||
// CreateContainer Hooks:
|
||||
// before pivot_root after prestart, createruntime
|
||||
state.pid = std::process::id() as i32;
|
||||
state.status = oci::ContainerState::Created;
|
||||
if let Some(hooks) = spec.hooks.as_ref() {
|
||||
state.status = spec::ContainerState::Created;
|
||||
if let Some(hooks) = spec.hooks().as_ref() {
|
||||
log_child!(
|
||||
cfd_log,
|
||||
"create_container hooks {:?}",
|
||||
hooks.create_container
|
||||
hooks.create_container()
|
||||
);
|
||||
let mut create_container_states = HookStates::new();
|
||||
create_container_states.execute_hooks(&hooks.create_container, Some(state.clone()))?;
|
||||
create_container_states.execute_hooks(
|
||||
hooks
|
||||
.create_container()
|
||||
.clone()
|
||||
.unwrap_or_default()
|
||||
.as_slice(),
|
||||
Some(state.clone()),
|
||||
)?;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -627,7 +653,7 @@ fn do_init_child(cwfd: RawFd) -> Result<()> {
|
||||
}
|
||||
|
||||
// setup sysctl
|
||||
set_sysctls(&linux.sysctl)?;
|
||||
set_sysctls(&linux.sysctl().clone().unwrap_or_default())?;
|
||||
unistd::chdir("/")?;
|
||||
}
|
||||
|
||||
@@ -635,14 +661,14 @@ fn do_init_child(cwfd: RawFd) -> Result<()> {
|
||||
mount::finish_rootfs(cfd_log, &spec, &oci_process)?;
|
||||
}
|
||||
|
||||
if !oci_process.cwd.is_empty() {
|
||||
unistd::chdir(oci_process.cwd.as_str())?;
|
||||
if !oci_process.cwd().as_os_str().is_empty() {
|
||||
unistd::chdir(oci_process.cwd().display().to_string().as_str())?;
|
||||
}
|
||||
|
||||
let guser = &oci_process.user;
|
||||
let guser = &oci_process.user();
|
||||
|
||||
let uid = Uid::from_raw(guser.uid);
|
||||
let gid = Gid::from_raw(guser.gid);
|
||||
let uid = Uid::from_raw(guser.uid());
|
||||
let gid = Gid::from_raw(guser.gid());
|
||||
|
||||
// only change stdio devices owner when user
|
||||
// isn't root.
|
||||
@@ -652,9 +678,8 @@ fn do_init_child(cwfd: RawFd) -> Result<()> {
|
||||
|
||||
setid(uid, gid)?;
|
||||
|
||||
if !guser.additional_gids.is_empty() {
|
||||
let gids: Vec<Gid> = guser
|
||||
.additional_gids
|
||||
if let Some(additional_gids) = guser.additional_gids() {
|
||||
let gids: Vec<Gid> = additional_gids
|
||||
.iter()
|
||||
.map(|gid| Gid::from_raw(*gid))
|
||||
.collect();
|
||||
@@ -671,12 +696,17 @@ fn do_init_child(cwfd: RawFd) -> Result<()> {
|
||||
}
|
||||
|
||||
// NoNewPrivileges
|
||||
if oci_process.no_new_privileges {
|
||||
if oci_process.no_new_privileges().unwrap_or_default() {
|
||||
capctl::prctl::set_no_new_privs().map_err(|_| anyhow!("cannot set no new privileges"))?;
|
||||
}
|
||||
|
||||
// Set SELinux label
|
||||
if !oci_process.selinux_label.is_empty() {
|
||||
if !oci_process
|
||||
.selinux_label()
|
||||
.clone()
|
||||
.unwrap_or_default()
|
||||
.is_empty()
|
||||
{
|
||||
if !selinux_enabled {
|
||||
return Err(anyhow!(
|
||||
"SELinux label for the process is provided but SELinux is not enabled on the running kernel"
|
||||
@@ -684,12 +714,18 @@ fn do_init_child(cwfd: RawFd) -> Result<()> {
|
||||
}
|
||||
|
||||
log_child!(cfd_log, "Set SELinux label to the container process");
|
||||
selinux::set_exec_label(&oci_process.selinux_label)?;
|
||||
let default_label = String::new();
|
||||
selinux::set_exec_label(
|
||||
oci_process
|
||||
.selinux_label()
|
||||
.as_ref()
|
||||
.unwrap_or(&default_label),
|
||||
)?;
|
||||
}
|
||||
|
||||
// Log unknown seccomp system calls in advance before the log file descriptor closes.
|
||||
#[cfg(feature = "seccomp")]
|
||||
if let Some(ref scmp) = linux.seccomp {
|
||||
if let Some(ref scmp) = linux.seccomp() {
|
||||
if let Some(syscalls) = seccomp::get_unknown_syscalls(scmp) {
|
||||
log_child!(cfd_log, "unknown seccomp system calls: {:?}", syscalls);
|
||||
}
|
||||
@@ -699,20 +735,21 @@ fn do_init_child(cwfd: RawFd) -> Result<()> {
|
||||
// before dropping capabilities because the calling thread
|
||||
// must have the CAP_SYS_ADMIN.
|
||||
#[cfg(feature = "seccomp")]
|
||||
if !oci_process.no_new_privileges {
|
||||
if let Some(ref scmp) = linux.seccomp {
|
||||
if !oci_process.no_new_privileges().unwrap_or_default() {
|
||||
if let Some(ref scmp) = linux.seccomp() {
|
||||
seccomp::init_seccomp(scmp)?;
|
||||
}
|
||||
}
|
||||
|
||||
// Drop capabilities
|
||||
if oci_process.capabilities.is_some() {
|
||||
let c = oci_process.capabilities.as_ref().unwrap();
|
||||
if oci_process.capabilities().is_some() {
|
||||
let c = oci_process.capabilities().as_ref().unwrap();
|
||||
capabilities::drop_privileges(cfd_log, c)?;
|
||||
}
|
||||
|
||||
let args = oci_process.args.to_vec();
|
||||
let env = oci_process.env.to_vec();
|
||||
let default_vec = Vec::new();
|
||||
let args = oci_process.args().as_ref().unwrap_or(&default_vec).to_vec();
|
||||
let env = oci_process.env().as_ref().unwrap_or(&default_vec).to_vec();
|
||||
|
||||
let mut fifofd = -1;
|
||||
if init {
|
||||
@@ -734,7 +771,7 @@ fn do_init_child(cwfd: RawFd) -> Result<()> {
|
||||
|
||||
if env::var_os(HOME_ENV_KEY).is_none() {
|
||||
// try to set "HOME" env by uid
|
||||
if let Ok(Some(user)) = User::from_uid(Uid::from_raw(guser.uid)) {
|
||||
if let Ok(Some(user)) = User::from_uid(Uid::from_raw(guser.uid())) {
|
||||
if let Ok(user_home_dir) = user.dir.into_os_string().into_string() {
|
||||
env::set_var(HOME_ENV_KEY, user_home_dir);
|
||||
}
|
||||
@@ -758,7 +795,7 @@ fn do_init_child(cwfd: RawFd) -> Result<()> {
|
||||
let _ = unistd::close(crfd);
|
||||
let _ = unistd::close(cwfd);
|
||||
|
||||
if oci_process.terminal {
|
||||
if oci_process.terminal().unwrap_or_default() {
|
||||
cfg_if::cfg_if! {
|
||||
if #[cfg(feature = "standard-oci-runtime")] {
|
||||
if let Some(csocket_fd) = csocket_fd {
|
||||
@@ -791,10 +828,17 @@ fn do_init_child(cwfd: RawFd) -> Result<()> {
|
||||
// * should be run after container is created and before container is started (before user-specific command is executed)
|
||||
// * spec details: https://github.com/opencontainers/runtime-spec/blob/c1662686cff159595277b79322d0272f5182941b/config.md#startcontainer-hooks
|
||||
state.pid = std::process::id() as i32;
|
||||
state.status = oci::ContainerState::Created;
|
||||
if let Some(hooks) = spec.hooks.as_ref() {
|
||||
state.status = spec::ContainerState::Created;
|
||||
if let Some(hooks) = spec.hooks().as_ref() {
|
||||
let mut start_container_states = HookStates::new();
|
||||
start_container_states.execute_hooks(&hooks.start_container, Some(state))?;
|
||||
start_container_states.execute_hooks(
|
||||
hooks
|
||||
.start_container()
|
||||
.clone()
|
||||
.unwrap_or_default()
|
||||
.as_slice(),
|
||||
Some(state),
|
||||
)?;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -802,8 +846,8 @@ fn do_init_child(cwfd: RawFd) -> Result<()> {
|
||||
// do_exec as possible in order to reduce the amount of
|
||||
// system calls in the seccomp profiles.
|
||||
#[cfg(feature = "seccomp")]
|
||||
if oci_process.no_new_privileges {
|
||||
if let Some(ref scmp) = linux.seccomp {
|
||||
if oci_process.no_new_privileges().unwrap_or_default() {
|
||||
if let Some(ref scmp) = linux.seccomp() {
|
||||
seccomp::init_seccomp(scmp)?;
|
||||
}
|
||||
}
|
||||
@@ -869,8 +913,8 @@ impl BaseContainer for LinuxContainer {
|
||||
0
|
||||
};
|
||||
|
||||
let root = match oci.root.as_ref() {
|
||||
Some(s) => s.path.as_str(),
|
||||
let root = match oci.root().as_ref() {
|
||||
Some(s) => s.path().display().to_string(),
|
||||
None => return Err(anyhow!("Unable to get root path: oci.root is none")),
|
||||
};
|
||||
|
||||
@@ -881,12 +925,12 @@ impl BaseContainer for LinuxContainer {
|
||||
};
|
||||
|
||||
Ok(OCIState {
|
||||
version: oci.version.clone(),
|
||||
version: oci.version().clone(),
|
||||
id: self.id(),
|
||||
status,
|
||||
pid,
|
||||
bundle,
|
||||
annotations: oci.annotations.clone(),
|
||||
annotations: oci.annotations().clone().unwrap_or_default(),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -920,14 +964,10 @@ impl BaseContainer for LinuxContainer {
|
||||
fn set(&mut self, r: LinuxResources) -> Result<()> {
|
||||
self.cgroup_manager.as_ref().set(&r, true)?;
|
||||
|
||||
self.config
|
||||
.spec
|
||||
.as_mut()
|
||||
.unwrap()
|
||||
.linux
|
||||
.as_mut()
|
||||
.unwrap()
|
||||
.resources = Some(r);
|
||||
if let Some(linux) = self.config.spec.as_mut().unwrap().linux_mut() {
|
||||
linux.set_resources(Some(r));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -956,23 +996,23 @@ impl BaseContainer for LinuxContainer {
|
||||
}
|
||||
|
||||
let spec = self.config.spec.as_ref().unwrap();
|
||||
if spec.linux.is_none() {
|
||||
if spec.linux().is_none() {
|
||||
return Err(anyhow!("no linux config"));
|
||||
}
|
||||
let linux = spec.linux.as_ref().unwrap();
|
||||
let linux = spec.linux().as_ref().unwrap();
|
||||
|
||||
if p.oci.capabilities.is_none() {
|
||||
if p.oci.capabilities().is_none() {
|
||||
// No capabilities, inherit from container process
|
||||
let process = spec
|
||||
.process
|
||||
.process()
|
||||
.as_ref()
|
||||
.ok_or_else(|| anyhow!("no process config"))?;
|
||||
p.oci.capabilities = Some(
|
||||
p.oci.set_capabilities(Some(
|
||||
process
|
||||
.capabilities
|
||||
.capabilities()
|
||||
.clone()
|
||||
.ok_or_else(|| anyhow!("missing process capabilities"))?,
|
||||
);
|
||||
));
|
||||
}
|
||||
|
||||
let (pfd_log, cfd_log) = unistd::pipe().context("failed to create pipe")?;
|
||||
@@ -1247,17 +1287,33 @@ impl BaseContainer for LinuxContainer {
|
||||
// * should be executed after the container is deleted but before the delete operation returns
|
||||
// * the executable file is in agent namespace
|
||||
// * should also be executed in agent namespace.
|
||||
if let Some(hooks) = spec.hooks.as_ref() {
|
||||
if let Some(hooks) = spec.hooks().as_ref() {
|
||||
info!(self.logger, "guest Poststop hook");
|
||||
let mut hook_states = HookStates::new();
|
||||
hook_states.execute_hooks(&hooks.poststop, Some(st))?;
|
||||
hook_states.execute_hooks(
|
||||
hooks.poststop().clone().unwrap_or_default().as_slice(),
|
||||
Some(st),
|
||||
)?;
|
||||
}
|
||||
|
||||
self.status.transition(ContainerState::Stopped);
|
||||
mount::umount2(
|
||||
spec.root.as_ref().unwrap().path.as_str(),
|
||||
spec.root()
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.path()
|
||||
.display()
|
||||
.to_string()
|
||||
.as_str(),
|
||||
MntFlags::MNT_DETACH,
|
||||
)?;
|
||||
)
|
||||
.or_else(|e| {
|
||||
if e.ne(&nix::Error::EINVAL) {
|
||||
return Err(anyhow!(e));
|
||||
}
|
||||
warn!(self.logger, "rootfs not mounted");
|
||||
Ok(())
|
||||
})?;
|
||||
fs::remove_dir_all(&self.root)?;
|
||||
|
||||
let cgm = self.cgroup_manager.as_mut();
|
||||
@@ -1300,10 +1356,13 @@ impl BaseContainer for LinuxContainer {
|
||||
// * should be executed after the container is started but before the delete operation returns
|
||||
// * the executable file is in agent namespace
|
||||
// * should also be executed in agent namespace.
|
||||
if let Some(hooks) = spec.hooks.as_ref() {
|
||||
if let Some(hooks) = spec.hooks().as_ref() {
|
||||
info!(self.logger, "guest Poststart hook");
|
||||
let mut hook_states = HookStates::new();
|
||||
hook_states.execute_hooks(&hooks.poststart, Some(st))?;
|
||||
hook_states.execute_hooks(
|
||||
hooks.poststart().clone().unwrap_or_default().as_slice(),
|
||||
Some(st),
|
||||
)?;
|
||||
}
|
||||
|
||||
unistd::close(fd)?;
|
||||
@@ -1351,21 +1410,26 @@ fn do_exec(args: &[String]) -> ! {
|
||||
pub fn update_namespaces(logger: &Logger, spec: &mut Spec, init_pid: RawFd) -> Result<()> {
|
||||
info!(logger, "updating namespaces");
|
||||
let linux = spec
|
||||
.linux
|
||||
.linux_mut()
|
||||
.as_mut()
|
||||
.ok_or_else(|| anyhow!("Spec didn't contain linux field"))?;
|
||||
|
||||
let namespaces = linux.namespaces.as_mut_slice();
|
||||
for namespace in namespaces.iter_mut() {
|
||||
if TYPETONAME.contains_key(namespace.r#type.as_str()) {
|
||||
let ns_path = format!(
|
||||
"/proc/{}/ns/{}",
|
||||
init_pid,
|
||||
TYPETONAME.get(namespace.r#type.as_str()).unwrap()
|
||||
);
|
||||
if let Some(namespaces) = linux.namespaces_mut().as_mut() {
|
||||
for namespace in namespaces.iter_mut() {
|
||||
if TYPETONAME.contains_key(&namespace.typ()) {
|
||||
let ns_path = format!(
|
||||
"/proc/{}/ns/{}",
|
||||
init_pid,
|
||||
TYPETONAME.get(&namespace.typ()).unwrap()
|
||||
);
|
||||
|
||||
if namespace.path.is_empty() {
|
||||
namespace.path = ns_path;
|
||||
if namespace
|
||||
.path()
|
||||
.as_ref()
|
||||
.map_or(true, |p| p.as_os_str().is_empty())
|
||||
{
|
||||
namespace.set_path(Some(PathBuf::from(&ns_path)));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1374,24 +1438,28 @@ pub fn update_namespaces(logger: &Logger, spec: &mut Spec, init_pid: RawFd) -> R
|
||||
}
|
||||
|
||||
fn get_pid_namespace(logger: &Logger, linux: &Linux) -> Result<PidNs> {
|
||||
for ns in &linux.namespaces {
|
||||
if ns.r#type == "pid" {
|
||||
if ns.path.is_empty() {
|
||||
return Ok(PidNs::new(true, None));
|
||||
}
|
||||
|
||||
let fd =
|
||||
fcntl::open(ns.path.as_str(), OFlag::O_RDONLY, Mode::empty()).map_err(|e| {
|
||||
let linux_namespaces = linux.namespaces().clone().unwrap_or_default();
|
||||
for ns in &linux_namespaces {
|
||||
if &ns.typ().to_string() == "pid" {
|
||||
let fd = match ns.path() {
|
||||
None => return Ok(PidNs::new(true, None)),
|
||||
Some(ns_path) => fcntl::open(
|
||||
ns_path.display().to_string().as_str(),
|
||||
OFlag::O_RDONLY,
|
||||
Mode::empty(),
|
||||
)
|
||||
.map_err(|e| {
|
||||
error!(
|
||||
logger,
|
||||
"cannot open type: {} path: {}",
|
||||
ns.r#type.clone(),
|
||||
ns.path.clone()
|
||||
&ns.typ().to_string(),
|
||||
ns_path.display()
|
||||
);
|
||||
error!(logger, "error is : {:?}", e);
|
||||
|
||||
e
|
||||
})?;
|
||||
})?,
|
||||
};
|
||||
|
||||
return Ok(PidNs::new(true, Some(fd)));
|
||||
}
|
||||
@@ -1402,18 +1470,25 @@ fn get_pid_namespace(logger: &Logger, linux: &Linux) -> Result<PidNs> {
|
||||
|
||||
fn is_userns_enabled(linux: &Linux) -> bool {
|
||||
linux
|
||||
.namespaces
|
||||
.namespaces()
|
||||
.clone()
|
||||
.unwrap_or_default()
|
||||
.iter()
|
||||
.any(|ns| ns.r#type == "user" && ns.path.is_empty())
|
||||
.any(|ns| &ns.typ().to_string() == "user" && ns.path().is_none())
|
||||
}
|
||||
|
||||
fn get_namespaces(linux: &Linux) -> Vec<LinuxNamespace> {
|
||||
linux
|
||||
.namespaces
|
||||
.namespaces()
|
||||
.clone()
|
||||
.unwrap_or_default()
|
||||
.iter()
|
||||
.map(|ns| LinuxNamespace {
|
||||
r#type: ns.r#type.clone(),
|
||||
path: ns.path.clone(),
|
||||
.map(|ns| {
|
||||
let mut namespace = LinuxNamespace::default();
|
||||
namespace.set_typ(ns.typ());
|
||||
namespace.set_path(ns.path().clone());
|
||||
|
||||
namespace
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
@@ -1455,8 +1530,11 @@ async fn join_namespaces(
|
||||
) -> Result<()> {
|
||||
let logger = logger.new(o!("action" => "join-namespaces"));
|
||||
|
||||
let linux = spec.linux.as_ref().unwrap();
|
||||
let res = linux.resources.as_ref();
|
||||
let linux = spec
|
||||
.linux()
|
||||
.as_ref()
|
||||
.ok_or_else(|| anyhow!("Spec didn't contain linux field"))?;
|
||||
let res = linux.resources().as_ref();
|
||||
|
||||
let userns = is_userns_enabled(linux);
|
||||
|
||||
@@ -1494,17 +1572,11 @@ async fn join_namespaces(
|
||||
|
||||
if userns {
|
||||
info!(logger, "setup uid/gid mappings");
|
||||
let uid_mappings = linux.uid_mappings().clone().unwrap_or_default();
|
||||
let gid_mappings = linux.gid_mappings().clone().unwrap_or_default();
|
||||
// setup uid/gid mappings
|
||||
write_mappings(
|
||||
&logger,
|
||||
&format!("/proc/{}/uid_map", p.pid),
|
||||
&linux.uid_mappings,
|
||||
)?;
|
||||
write_mappings(
|
||||
&logger,
|
||||
&format!("/proc/{}/gid_map", p.pid),
|
||||
&linux.gid_mappings,
|
||||
)?;
|
||||
write_mappings(&logger, &format!("/proc/{}/uid_map", p.pid), &uid_mappings)?;
|
||||
write_mappings(&logger, &format!("/proc/{}/gid_map", p.pid), &gid_mappings)?;
|
||||
}
|
||||
|
||||
// apply cgroups
|
||||
@@ -1534,10 +1606,13 @@ async fn join_namespaces(
|
||||
// * should be executed during the start operation, and before the container command is executed
|
||||
// * the executable file is in agent namespace
|
||||
// * should also be executed in agent namespace.
|
||||
if let Some(hooks) = spec.hooks.as_ref() {
|
||||
if let Some(hooks) = spec.hooks().as_ref() {
|
||||
info!(logger, "guest Prestart hook");
|
||||
let mut hook_states = HookStates::new();
|
||||
hook_states.execute_hooks(&hooks.prestart, Some(st.clone()))?;
|
||||
hook_states.execute_hooks(
|
||||
hooks.prestart().clone().unwrap_or_default().as_slice(),
|
||||
Some(st.clone()),
|
||||
)?;
|
||||
}
|
||||
|
||||
// notify child run prestart hooks completed
|
||||
@@ -1554,8 +1629,8 @@ async fn join_namespaces(
|
||||
fn write_mappings(logger: &Logger, path: &str, maps: &[LinuxIdMapping]) -> Result<()> {
|
||||
let data = maps
|
||||
.iter()
|
||||
.filter(|m| m.size != 0)
|
||||
.map(|m| format!("{} {} {}\n", m.container_id, m.host_id, m.size))
|
||||
.filter(|m| m.size() != 0)
|
||||
.map(|m| format!("{} {} {}\n", m.container_id(), m.host_id(), m.size()))
|
||||
.collect::<Vec<_>>()
|
||||
.join("");
|
||||
|
||||
@@ -1624,18 +1699,24 @@ impl LinuxContainer {
|
||||
.context(format!("Cannot change owner of container {} root", id))?;
|
||||
|
||||
let spec = config.spec.as_ref().unwrap();
|
||||
let linux = spec.linux.as_ref().unwrap();
|
||||
let linux_cgroups_path = spec
|
||||
.linux()
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.cgroups_path()
|
||||
.as_ref()
|
||||
.map_or(String::new(), |cgrp| cgrp.display().to_string());
|
||||
let cpath = if config.use_systemd_cgroup {
|
||||
if linux.cgroups_path.len() == 2 {
|
||||
if linux_cgroups_path.len() == 2 {
|
||||
format!("system.slice:kata_agent:{}", id.as_str())
|
||||
} else {
|
||||
linux.cgroups_path.clone()
|
||||
linux_cgroups_path.clone()
|
||||
}
|
||||
} else if linux.cgroups_path.is_empty() {
|
||||
} else if linux_cgroups_path.is_empty() {
|
||||
format!("/{}", id.as_str())
|
||||
} else {
|
||||
// if we have a systemd cgroup path we need to convert it to a fs cgroup path
|
||||
linux.cgroups_path.replace(':', "/")
|
||||
linux_cgroups_path.replace(':', "/")
|
||||
};
|
||||
|
||||
let cgroup_manager: Box<dyn Manager + Send + Sync> = if config.use_systemd_cgroup {
|
||||
@@ -1708,7 +1789,8 @@ mod tests {
|
||||
use super::*;
|
||||
use crate::process::Process;
|
||||
use nix::unistd::Uid;
|
||||
use oci::{LinuxDeviceCgroup, Root};
|
||||
use oci::{LinuxBuilder, LinuxDeviceCgroupBuilder, LinuxResourcesBuilder, Root, SpecBuilder};
|
||||
use oci_spec::runtime as oci;
|
||||
use std::fs;
|
||||
use std::os::unix::fs::MetadataExt;
|
||||
use std::os::unix::io::AsRawFd;
|
||||
@@ -1777,10 +1859,10 @@ mod tests {
|
||||
let ns = NAMESPACES.get("pid");
|
||||
assert!(ns.is_some());
|
||||
|
||||
let ns = NAMESPACES.get("network");
|
||||
let ns = NAMESPACES.get("net");
|
||||
assert!(ns.is_some());
|
||||
|
||||
let ns = NAMESPACES.get("mount");
|
||||
let ns = NAMESPACES.get("mnt");
|
||||
assert!(ns.is_some());
|
||||
|
||||
let ns = NAMESPACES.get("uts");
|
||||
@@ -1795,25 +1877,25 @@ mod tests {
|
||||
lazy_static::initialize(&TYPETONAME);
|
||||
assert_eq!(TYPETONAME.len(), 7);
|
||||
|
||||
let ns = TYPETONAME.get("user");
|
||||
let ns = TYPETONAME.get(&oci::LinuxNamespaceType::User);
|
||||
assert!(ns.is_some());
|
||||
|
||||
let ns = TYPETONAME.get("ipc");
|
||||
let ns = TYPETONAME.get(&oci::LinuxNamespaceType::Ipc);
|
||||
assert!(ns.is_some());
|
||||
|
||||
let ns = TYPETONAME.get("pid");
|
||||
let ns = TYPETONAME.get(&oci::LinuxNamespaceType::Pid);
|
||||
assert!(ns.is_some());
|
||||
|
||||
let ns = TYPETONAME.get("network");
|
||||
let ns = TYPETONAME.get(&oci::LinuxNamespaceType::Network);
|
||||
assert!(ns.is_some());
|
||||
|
||||
let ns = TYPETONAME.get("mount");
|
||||
let ns = TYPETONAME.get(&oci::LinuxNamespaceType::Mount);
|
||||
assert!(ns.is_some());
|
||||
|
||||
let ns = TYPETONAME.get("uts");
|
||||
let ns = TYPETONAME.get(&oci::LinuxNamespaceType::Uts);
|
||||
assert!(ns.is_some());
|
||||
|
||||
let ns = TYPETONAME.get("cgroup");
|
||||
let ns = TYPETONAME.get(&oci::LinuxNamespaceType::Cgroup);
|
||||
assert!(ns.is_some());
|
||||
}
|
||||
|
||||
@@ -1823,21 +1905,18 @@ mod tests {
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.expect("Time went backwards");
|
||||
|
||||
let root = Root {
|
||||
path: String::from("/tmp"),
|
||||
..Default::default()
|
||||
};
|
||||
let mut root = Root::default();
|
||||
root.set_path(String::from("/tmp").into());
|
||||
|
||||
let linux_resources = LinuxResources {
|
||||
devices: vec![LinuxDeviceCgroup {
|
||||
allow: true,
|
||||
r#type: String::new(),
|
||||
major: None,
|
||||
minor: None,
|
||||
access: String::from("rwm"),
|
||||
}],
|
||||
..Default::default()
|
||||
};
|
||||
let linux_resources = LinuxResourcesBuilder::default()
|
||||
.devices(vec![LinuxDeviceCgroupBuilder::default()
|
||||
.allow(true)
|
||||
.typ(oci::LinuxDeviceType::C)
|
||||
.access("rwm")
|
||||
.build()
|
||||
.unwrap()])
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
let cgroups_path = format!(
|
||||
"/{}/dummycontainer{}",
|
||||
@@ -1845,15 +1924,18 @@ mod tests {
|
||||
since_the_epoch.as_millis()
|
||||
);
|
||||
|
||||
let spec = Spec {
|
||||
linux: Some(Linux {
|
||||
cgroups_path,
|
||||
resources: Some(linux_resources),
|
||||
..Default::default()
|
||||
}),
|
||||
root: Some(root),
|
||||
..Default::default()
|
||||
};
|
||||
let mut spec = SpecBuilder::default()
|
||||
.linux(
|
||||
LinuxBuilder::default()
|
||||
.cgroups_path(cgroups_path)
|
||||
.resources(linux_resources)
|
||||
.build()
|
||||
.unwrap(),
|
||||
)
|
||||
.root(root)
|
||||
.build()
|
||||
.unwrap();
|
||||
spec.set_process(None);
|
||||
|
||||
CreateOpts {
|
||||
cgroup_name: "".to_string(),
|
||||
@@ -1959,7 +2041,14 @@ mod tests {
|
||||
#[test]
|
||||
fn test_linuxcontainer_oci_state_no_root_parent() {
|
||||
let ret = new_linux_container_and_then(|mut c: LinuxContainer| {
|
||||
c.config.spec.as_mut().unwrap().root.as_mut().unwrap().path = "/".to_string();
|
||||
c.config
|
||||
.spec
|
||||
.as_mut()
|
||||
.unwrap()
|
||||
.root_mut()
|
||||
.as_mut()
|
||||
.unwrap()
|
||||
.set_path("/".to_string().into());
|
||||
c.oci_state()
|
||||
});
|
||||
assert!(ret.is_err(), "Expecting Err, Got {:?}", ret);
|
||||
@@ -2032,21 +2121,25 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn test_linuxcontainer_start() {
|
||||
let (c, _dir) = new_linux_container();
|
||||
let mut oci_process = oci::Process::default();
|
||||
oci_process.set_capabilities(None);
|
||||
let ret = c
|
||||
.unwrap()
|
||||
.start(Process::new(&sl(), &oci::Process::default(), "123", true, 1, None).unwrap())
|
||||
.start(Process::new(&sl(), &oci_process, "123", true, 1, None).unwrap())
|
||||
.await;
|
||||
assert!(ret.is_err(), "Expecting Err, Got {:?}", ret);
|
||||
assert!(format!("{:?}", ret).contains("no process config"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_linuxcontainer_run() {
|
||||
let (c, _dir) = new_linux_container();
|
||||
let mut oci_process = oci::Process::default();
|
||||
oci_process.set_capabilities(None);
|
||||
let ret = c
|
||||
.unwrap()
|
||||
.run(Process::new(&sl(), &oci::Process::default(), "123", true, 1, None).unwrap())
|
||||
.run(Process::new(&sl(), &oci_process, "123", true, 1, None).unwrap())
|
||||
.await;
|
||||
assert!(ret.is_err(), "Expecting Err, Got {:?}", ret);
|
||||
assert!(format!("{:?}", ret).contains("no process config"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -13,6 +13,7 @@ use nix::sys::stat::{self, Mode, SFlag};
|
||||
use nix::unistd::{self, Gid, Uid};
|
||||
use nix::NixPath;
|
||||
use oci::{LinuxDevice, Mount, Process, Spec};
|
||||
use oci_spec::runtime as oci;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::fs::{self, OpenOptions};
|
||||
use std::mem::MaybeUninit;
|
||||
@@ -172,24 +173,33 @@ pub fn init_rootfs(
|
||||
lazy_static::initialize(&LINUXDEVICETYPE);
|
||||
|
||||
let linux = &spec
|
||||
.linux
|
||||
.linux()
|
||||
.as_ref()
|
||||
.ok_or_else(|| anyhow!("Could not get linux configuration from spec"))?;
|
||||
|
||||
let mut flags = MsFlags::MS_REC;
|
||||
match PROPAGATION.get(&linux.rootfs_propagation.as_str()) {
|
||||
let default_propagation = String::new();
|
||||
match PROPAGATION.get(
|
||||
&linux
|
||||
.rootfs_propagation()
|
||||
.as_ref()
|
||||
.unwrap_or(&default_propagation)
|
||||
.as_str(),
|
||||
) {
|
||||
Some(fl) => flags |= *fl,
|
||||
None => flags |= MsFlags::MS_SLAVE,
|
||||
}
|
||||
|
||||
let label = &linux.mount_label;
|
||||
let default_mntlabel = String::new();
|
||||
let label = linux.mount_label().as_ref().unwrap_or(&default_mntlabel);
|
||||
|
||||
let root = spec
|
||||
.root
|
||||
.root()
|
||||
.as_ref()
|
||||
.ok_or_else(|| anyhow!("Could not get rootfs path from spec"))
|
||||
.and_then(|r| {
|
||||
fs::canonicalize(r.path.as_str()).context("Could not canonicalize rootfs path")
|
||||
fs::canonicalize(r.path().display().to_string().as_str())
|
||||
.context("Could not canonicalize rootfs path")
|
||||
})?;
|
||||
|
||||
let rootfs = (*root)
|
||||
@@ -209,13 +219,13 @@ pub fn init_rootfs(
|
||||
)?;
|
||||
|
||||
let mut bind_mount_dev = false;
|
||||
for m in &spec.mounts {
|
||||
let default_mnts = vec![];
|
||||
for m in spec.mounts().as_ref().unwrap_or(&default_mnts) {
|
||||
let (mut flags, pgflags, data) = parse_mount(m);
|
||||
if !m.destination.starts_with('/') || m.destination.contains("..") {
|
||||
return Err(anyhow!(
|
||||
"the mount destination {} is invalid",
|
||||
m.destination
|
||||
));
|
||||
|
||||
let mount_dest = &m.destination().display().to_string();
|
||||
if !mount_dest.starts_with('/') || mount_dest.contains("..") {
|
||||
return Err(anyhow!("the mount destination {} is invalid", mount_dest));
|
||||
}
|
||||
|
||||
// From https://github.com/opencontainers/runtime-spec/blob/main/config.md#mounts
|
||||
@@ -223,35 +233,37 @@ pub fn init_rootfs(
|
||||
// bind may be only specified in the oci spec options -> flags update r#type
|
||||
let m = &{
|
||||
let mut mbind = m.clone();
|
||||
if mbind.r#type.is_empty() && flags & MsFlags::MS_BIND == MsFlags::MS_BIND {
|
||||
mbind.r#type = "bind".to_string();
|
||||
if mbind.typ().is_none() && flags & MsFlags::MS_BIND == MsFlags::MS_BIND {
|
||||
mbind.set_typ(Some("bind".to_string()));
|
||||
}
|
||||
mbind
|
||||
};
|
||||
|
||||
if m.r#type == "cgroup" {
|
||||
let default_typ = String::new();
|
||||
let mount_typ = m.typ().as_ref().unwrap_or(&default_typ);
|
||||
if mount_typ == "cgroup" {
|
||||
mount_cgroups(cfd_log, m, rootfs, flags, &data, cpath, mounts)?;
|
||||
} else {
|
||||
if m.destination == "/dev" {
|
||||
if m.r#type == "bind" {
|
||||
if mount_dest.clone().as_str() == "/dev" {
|
||||
if mount_typ == "bind" {
|
||||
bind_mount_dev = true;
|
||||
}
|
||||
flags &= !MsFlags::MS_RDONLY;
|
||||
}
|
||||
|
||||
if m.r#type == "bind" {
|
||||
if mount_typ == "bind" {
|
||||
check_proc_mount(m)?;
|
||||
}
|
||||
|
||||
// If the destination already exists and is not a directory, we bail
|
||||
// out This is to avoid mounting through a symlink or similar -- which
|
||||
// has been a "fun" attack scenario in the past.
|
||||
if m.r#type == "proc" || m.r#type == "sysfs" {
|
||||
if let Ok(meta) = fs::symlink_metadata(&m.destination) {
|
||||
if mount_typ == "proc" || mount_typ == "sysfs" {
|
||||
if let Ok(meta) = fs::symlink_metadata(mount_dest) {
|
||||
if !meta.is_dir() {
|
||||
return Err(anyhow!(
|
||||
"Mount point {} must be ordinary directory: got {:?}",
|
||||
m.destination,
|
||||
&mount_dest,
|
||||
meta.file_type()
|
||||
));
|
||||
}
|
||||
@@ -263,8 +275,8 @@ pub fn init_rootfs(
|
||||
// effective.
|
||||
// first check that we have non-default options required before attempting a
|
||||
// remount
|
||||
if m.r#type == "bind" && !pgflags.is_empty() {
|
||||
let dest = secure_join(rootfs, &m.destination);
|
||||
if mount_typ == "bind" && !pgflags.is_empty() {
|
||||
let dest = secure_join(rootfs, mount_dest);
|
||||
mount(
|
||||
None::<&str>,
|
||||
dest.as_str(),
|
||||
@@ -282,9 +294,11 @@ pub fn init_rootfs(
|
||||
// in case the /dev directory was binded mount from guest,
|
||||
// then there's no need to create devices nodes and symlinks
|
||||
// in /dev.
|
||||
let default_devs = Vec::new();
|
||||
let linux_devices = linux.devices().as_ref().unwrap_or(&default_devs);
|
||||
if !bind_mount_dev {
|
||||
default_symlinks()?;
|
||||
create_devices(&linux.devices, bind_device)?;
|
||||
create_devices(linux_devices, bind_device)?;
|
||||
ensure_ptmx()?;
|
||||
}
|
||||
|
||||
@@ -308,17 +322,19 @@ fn check_proc_mount(m: &Mount) -> Result<()> {
|
||||
"/proc/net/dev",
|
||||
];
|
||||
|
||||
let mount_dest = m.destination().display().to_string();
|
||||
for i in valid_destinations.iter() {
|
||||
if m.destination.as_str() == *i {
|
||||
if mount_dest == *i {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
if m.destination == PROC_PATH {
|
||||
if mount_dest == PROC_PATH {
|
||||
// only allow a mount on-top of proc if it's source is "proc"
|
||||
unsafe {
|
||||
let mut stats = MaybeUninit::<libc::statfs>::uninit();
|
||||
if m.source
|
||||
let mount_source = m.source().as_ref().unwrap().display().to_string();
|
||||
if mount_source
|
||||
.with_nix_path(|path| libc::statfs(path.as_ptr(), stats.as_mut_ptr()))
|
||||
.is_ok()
|
||||
{
|
||||
@@ -331,15 +347,15 @@ fn check_proc_mount(m: &Mount) -> Result<()> {
|
||||
|
||||
return Err(anyhow!(format!(
|
||||
"{} cannot be mounted to {} because it is not of type proc",
|
||||
m.source, m.destination
|
||||
&mount_source, &mount_dest
|
||||
)));
|
||||
}
|
||||
}
|
||||
|
||||
if m.destination.starts_with(PROC_PATH) {
|
||||
if mount_dest.starts_with(PROC_PATH) {
|
||||
return Err(anyhow!(format!(
|
||||
"{} cannot be mounted because it is inside /proc",
|
||||
m.destination
|
||||
&mount_dest
|
||||
)));
|
||||
}
|
||||
|
||||
@@ -351,12 +367,11 @@ fn mount_cgroups_v2(cfd_log: RawFd, m: &Mount, rootfs: &str, flags: MsFlags) ->
|
||||
unistd::chdir(rootfs)?;
|
||||
|
||||
// https://github.com/opencontainers/runc/blob/09ddc63afdde16d5fb859a1d3ab010bd45f08497/libcontainer/rootfs_linux.go#L287
|
||||
let bm = Mount {
|
||||
source: "cgroup".to_string(),
|
||||
r#type: "cgroup2".to_string(),
|
||||
destination: m.destination.clone(),
|
||||
options: Vec::new(),
|
||||
};
|
||||
|
||||
let mut bm = oci::Mount::default();
|
||||
bm.set_source(Some(PathBuf::from("cgroup")));
|
||||
bm.set_typ(Some("cgroup2".to_string()));
|
||||
bm.set_destination(m.destination().clone());
|
||||
|
||||
let mount_flags: MsFlags = flags;
|
||||
|
||||
@@ -365,7 +380,11 @@ fn mount_cgroups_v2(cfd_log: RawFd, m: &Mount, rootfs: &str, flags: MsFlags) ->
|
||||
unistd::chdir(&olddir)?;
|
||||
|
||||
if flags.contains(MsFlags::MS_RDONLY) {
|
||||
let dest = format!("{}{}", rootfs, m.destination.as_str());
|
||||
let dest = format!(
|
||||
"{}{}",
|
||||
rootfs,
|
||||
m.destination().display().to_string().as_str()
|
||||
);
|
||||
mount(
|
||||
Some(dest.as_str()),
|
||||
dest.as_str(),
|
||||
@@ -390,13 +409,13 @@ fn mount_cgroups(
|
||||
if cgroups::hierarchies::is_cgroup2_unified_mode() {
|
||||
return mount_cgroups_v2(cfd_log, m, rootfs, flags);
|
||||
}
|
||||
|
||||
let mount_dest = m.destination().display().to_string();
|
||||
// mount tmpfs
|
||||
let ctm = Mount {
|
||||
source: "tmpfs".to_string(),
|
||||
r#type: "tmpfs".to_string(),
|
||||
destination: m.destination.clone(),
|
||||
options: Vec::new(),
|
||||
};
|
||||
let mut ctm = oci::Mount::default();
|
||||
ctm.set_source(Some(PathBuf::from("tmpfs")));
|
||||
ctm.set_typ(Some("tmpfs".to_string()));
|
||||
ctm.set_destination(m.destination().clone());
|
||||
|
||||
let cflags = MsFlags::MS_NOEXEC | MsFlags::MS_NOSUID | MsFlags::MS_NODEV;
|
||||
mount_from(cfd_log, &ctm, rootfs, cflags, "", "")?;
|
||||
@@ -421,12 +440,12 @@ fn mount_cgroups(
|
||||
&mount[..]
|
||||
};
|
||||
|
||||
let destination = format!("{}/{}", m.destination.as_str(), base);
|
||||
let destination = format!("{}/{}", &mount_dest, base);
|
||||
|
||||
if srcs.contains(source) {
|
||||
// already mounted, xxx,yyy style cgroup
|
||||
if key != base {
|
||||
let src = format!("{}/{}", m.destination.as_str(), key);
|
||||
let src = format!("{}/{}", &mount_dest, key);
|
||||
unix::fs::symlink(destination.as_str(), &src[1..])?;
|
||||
}
|
||||
|
||||
@@ -437,12 +456,10 @@ fn mount_cgroups(
|
||||
|
||||
log_child!(cfd_log, "mount destination: {}", destination.as_str());
|
||||
|
||||
let bm = Mount {
|
||||
source: source.to_string(),
|
||||
r#type: "bind".to_string(),
|
||||
destination: destination.clone(),
|
||||
options: Vec::new(),
|
||||
};
|
||||
let mut bm = oci::Mount::default();
|
||||
bm.set_source(Some(PathBuf::from(source)));
|
||||
bm.set_typ(Some("bind".to_string()));
|
||||
bm.set_destination(PathBuf::from(destination.clone()));
|
||||
|
||||
let mut mount_flags: MsFlags = flags | MsFlags::MS_REC | MsFlags::MS_BIND;
|
||||
if key.contains("systemd") {
|
||||
@@ -451,7 +468,7 @@ fn mount_cgroups(
|
||||
mount_from(cfd_log, &bm, rootfs, mount_flags, "", "")?;
|
||||
|
||||
if key != base {
|
||||
let src = format!("{}/{}", m.destination.as_str(), key);
|
||||
let src = format!("{}/{}", &mount_dest, key);
|
||||
unix::fs::symlink(destination.as_str(), &src[1..]).map_err(|e| {
|
||||
log_child!(
|
||||
cfd_log,
|
||||
@@ -469,7 +486,7 @@ fn mount_cgroups(
|
||||
unistd::chdir(&olddir)?;
|
||||
|
||||
if flags.contains(MsFlags::MS_RDONLY) {
|
||||
let dest = format!("{}{}", rootfs, m.destination.as_str());
|
||||
let dest = format!("{}{}", rootfs, &mount_dest);
|
||||
mount(
|
||||
Some(dest.as_str()),
|
||||
dest.as_str(),
|
||||
@@ -710,7 +727,9 @@ fn parse_mount(m: &Mount) -> (MsFlags, MsFlags, String) {
|
||||
let mut pgflags = MsFlags::empty();
|
||||
let mut data = Vec::new();
|
||||
|
||||
for o in &m.options {
|
||||
let default_options = Vec::new();
|
||||
let mount_options = m.options().as_ref().unwrap_or(&default_options);
|
||||
for o in mount_options {
|
||||
if let Some(v) = OPTIONS.get(o.as_str()) {
|
||||
let (clear, fl) = *v;
|
||||
if clear {
|
||||
@@ -783,10 +802,13 @@ fn mount_from(
|
||||
label: &str,
|
||||
) -> Result<()> {
|
||||
let mut d = String::from(data);
|
||||
let dest = secure_join(rootfs, &m.destination);
|
||||
let mount_dest = m.destination().display().to_string();
|
||||
let mount_typ = m.typ().as_ref().unwrap();
|
||||
let dest = secure_join(rootfs, &mount_dest);
|
||||
|
||||
let src = if m.r#type.as_str() == "bind" {
|
||||
let src = fs::canonicalize(m.source.as_str())?;
|
||||
let mount_source = m.source().as_ref().unwrap().display().to_string();
|
||||
let src = if mount_typ == "bind" {
|
||||
let src = fs::canonicalize(&mount_source)?;
|
||||
let dir = if src.is_dir() {
|
||||
Path::new(&dest)
|
||||
} else {
|
||||
@@ -822,11 +844,10 @@ fn mount_from(
|
||||
src.to_str().unwrap().to_string()
|
||||
} else {
|
||||
let _ = fs::create_dir_all(&dest);
|
||||
if m.r#type.as_str() == "cgroup2" {
|
||||
if mount_typ == "cgroup2" {
|
||||
"cgroup2".to_string()
|
||||
} else {
|
||||
let tmp = PathBuf::from(&m.source);
|
||||
tmp.to_str().unwrap().to_string()
|
||||
mount_source.to_string()
|
||||
}
|
||||
};
|
||||
|
||||
@@ -839,11 +860,16 @@ fn mount_from(
|
||||
let mut use_xattr = false;
|
||||
if !label.is_empty() {
|
||||
if selinux::is_enabled()? {
|
||||
let device = Path::new(&m.source)
|
||||
let device = m
|
||||
.source()
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.file_name()
|
||||
.ok_or_else(|| anyhow!("invalid device source path: {}", &m.source))?
|
||||
.ok_or_else(|| anyhow!("invalid device source path: {}", &mount_source))?
|
||||
.to_str()
|
||||
.ok_or_else(|| anyhow!("failed to convert device source path: {}", &m.source))?;
|
||||
.ok_or_else(|| {
|
||||
anyhow!("failed to convert device source path: {}", &mount_source)
|
||||
})?;
|
||||
|
||||
match device {
|
||||
// SELinux does not support labeling of /proc or /sys
|
||||
@@ -869,7 +895,7 @@ fn mount_from(
|
||||
mount(
|
||||
Some(src.as_str()),
|
||||
dest.as_str(),
|
||||
Some(m.r#type.as_str()),
|
||||
Some(mount_typ.as_str()),
|
||||
flags,
|
||||
Some(d.as_str()),
|
||||
)
|
||||
@@ -924,9 +950,7 @@ fn default_symlinks() -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn dev_rel_path(path: &str) -> Option<&Path> {
|
||||
let path = Path::new(path);
|
||||
|
||||
fn dev_rel_path(path: &PathBuf) -> Option<&Path> {
|
||||
if !path.starts_with("/dev")
|
||||
|| path == Path::new("/dev")
|
||||
|| path.components().any(|c| c == Component::ParentDir)
|
||||
@@ -940,12 +964,17 @@ fn create_devices(devices: &[LinuxDevice], bind: bool) -> Result<()> {
|
||||
let op: fn(&LinuxDevice, &Path) -> Result<()> = if bind { bind_dev } else { mknod_dev };
|
||||
let old = stat::umask(Mode::from_bits_truncate(0o000));
|
||||
for dev in DEFAULT_DEVICES.iter() {
|
||||
let path = Path::new(&dev.path[1..]);
|
||||
let dev_path = dev.path().display().to_string();
|
||||
let path = Path::new(&dev_path[1..]);
|
||||
op(dev, path).context(format!("Creating container device {:?}", dev))?;
|
||||
}
|
||||
for dev in devices {
|
||||
let path = dev_rel_path(&dev.path).ok_or_else(|| {
|
||||
let msg = format!("{} is not a valid device path", dev.path);
|
||||
let dev_path = &dev.path();
|
||||
let path = dev_rel_path(dev_path).ok_or_else(|| {
|
||||
let msg = format!(
|
||||
"{} is not a valid device path",
|
||||
&dev.path().display().to_string().as_str()
|
||||
);
|
||||
anyhow!(msg)
|
||||
})?;
|
||||
if let Some(dir) = path.parent() {
|
||||
@@ -974,7 +1003,7 @@ lazy_static! {
|
||||
}
|
||||
|
||||
fn mknod_dev(dev: &LinuxDevice, relpath: &Path) -> Result<()> {
|
||||
let f = match LINUXDEVICETYPE.get(dev.r#type.as_str()) {
|
||||
let f = match LINUXDEVICETYPE.get(dev.typ().as_str()) {
|
||||
Some(v) => v,
|
||||
None => return Err(anyhow!("invalid spec".to_string())),
|
||||
};
|
||||
@@ -982,14 +1011,14 @@ fn mknod_dev(dev: &LinuxDevice, relpath: &Path) -> Result<()> {
|
||||
stat::mknod(
|
||||
relpath,
|
||||
*f,
|
||||
Mode::from_bits_truncate(dev.file_mode.unwrap_or(0)),
|
||||
nix::sys::stat::makedev(dev.major as u64, dev.minor as u64),
|
||||
Mode::from_bits_truncate(dev.file_mode().unwrap_or(0)),
|
||||
nix::sys::stat::makedev(dev.major() as u64, dev.minor() as u64),
|
||||
)?;
|
||||
|
||||
unistd::chown(
|
||||
relpath,
|
||||
Some(Uid::from_raw(dev.uid.unwrap_or(0) as uid_t)),
|
||||
Some(Gid::from_raw(dev.gid.unwrap_or(0) as uid_t)),
|
||||
Some(Uid::from_raw(dev.uid().unwrap_or(0) as uid_t)),
|
||||
Some(Gid::from_raw(dev.gid().unwrap_or(0) as uid_t)),
|
||||
)?;
|
||||
|
||||
Ok(())
|
||||
@@ -1005,7 +1034,7 @@ fn bind_dev(dev: &LinuxDevice, relpath: &Path) -> Result<()> {
|
||||
unistd::close(fd)?;
|
||||
|
||||
mount(
|
||||
Some(&*dev.path),
|
||||
Some(dev.path()),
|
||||
relpath,
|
||||
None::<&str>,
|
||||
MsFlags::MS_BIND,
|
||||
@@ -1019,30 +1048,34 @@ pub fn finish_rootfs(cfd_log: RawFd, spec: &Spec, process: &Process) -> Result<(
|
||||
log_child!(cfd_log, "old cwd: {}", olddir.to_str().unwrap());
|
||||
unistd::chdir("/")?;
|
||||
|
||||
if !process.cwd.is_empty() {
|
||||
let process_cwd = process.cwd().display().to_string();
|
||||
if process_cwd.is_empty() {
|
||||
// Although the process.cwd string can be unclean/malicious (../../dev, etc),
|
||||
// we are running on our own mount namespace and we just chrooted into the
|
||||
// container's root. It's safe to create CWD from there.
|
||||
log_child!(cfd_log, "Creating CWD {}", process.cwd.as_str());
|
||||
log_child!(cfd_log, "Creating CWD {}", process_cwd.as_str());
|
||||
// Unconditionally try to create CWD, create_dir_all will not fail if
|
||||
// it already exists.
|
||||
fs::create_dir_all(process.cwd.as_str())?;
|
||||
fs::create_dir_all(process_cwd.as_str())?;
|
||||
}
|
||||
|
||||
if spec.linux.is_some() {
|
||||
let linux = spec.linux.as_ref().unwrap();
|
||||
|
||||
for path in linux.masked_paths.iter() {
|
||||
if spec.linux().is_some() {
|
||||
let linux = spec.linux().as_ref().unwrap();
|
||||
let linux_masked_paths = linux.masked_paths().clone().unwrap_or_default();
|
||||
for path in linux_masked_paths.iter() {
|
||||
mask_path(path)?;
|
||||
}
|
||||
|
||||
for path in linux.readonly_paths.iter() {
|
||||
let ro_paths = vec![];
|
||||
let linux_readonly_paths = linux.readonly_paths().as_ref().unwrap_or(&ro_paths);
|
||||
for path in linux_readonly_paths.iter() {
|
||||
readonly_path(path)?;
|
||||
}
|
||||
}
|
||||
|
||||
for m in spec.mounts.iter() {
|
||||
if m.destination == "/dev" {
|
||||
let default_mnts = vec![];
|
||||
let spec_mounts = spec.mounts().as_ref().unwrap_or(&default_mnts);
|
||||
for m in spec_mounts.iter() {
|
||||
let mount_dest = m.destination().display().to_string();
|
||||
if &mount_dest == "/dev" {
|
||||
let (flags, _, _) = parse_mount(m);
|
||||
if flags.contains(MsFlags::MS_RDONLY) {
|
||||
mount(
|
||||
@@ -1056,7 +1089,7 @@ pub fn finish_rootfs(cfd_log: RawFd, spec: &Spec, process: &Process) -> Result<(
|
||||
}
|
||||
}
|
||||
|
||||
if spec.root.as_ref().unwrap().readonly {
|
||||
if spec.root().as_ref().unwrap().readonly().unwrap_or_default() {
|
||||
let flags = MsFlags::MS_BIND | MsFlags::MS_RDONLY | MsFlags::MS_NODEV | MsFlags::MS_REMOUNT;
|
||||
|
||||
mount(Some("/"), "/", None::<&str>, flags, None::<&str>)?;
|
||||
@@ -1125,7 +1158,6 @@ fn check_paths(path: &str) -> Result<()> {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::assert_result;
|
||||
use std::fs::create_dir;
|
||||
use std::fs::create_dir_all;
|
||||
use std::fs::remove_dir_all;
|
||||
@@ -1134,6 +1166,7 @@ mod tests {
|
||||
use std::os::unix::fs;
|
||||
use std::os::unix::io::AsRawFd;
|
||||
use tempfile::tempdir;
|
||||
use test_utils::assert_result;
|
||||
use test_utils::skip_if_not_root;
|
||||
|
||||
#[test]
|
||||
@@ -1153,7 +1186,7 @@ mod tests {
|
||||
);
|
||||
|
||||
// there is no spec.Root, should fail
|
||||
spec.linux = Some(oci::Linux::default());
|
||||
spec.set_linux(Some(oci::Linux::default()));
|
||||
let ret = init_rootfs(stdout_fd, &spec, &cpath, &mounts, true);
|
||||
assert!(
|
||||
ret.is_err(),
|
||||
@@ -1165,10 +1198,10 @@ mod tests {
|
||||
let ret = create_dir(rootfs.path().join("dev"));
|
||||
assert!(ret.is_ok(), "Got: {:?}", ret);
|
||||
|
||||
spec.root = Some(oci::Root {
|
||||
path: rootfs.path().to_str().unwrap().to_string(),
|
||||
readonly: false,
|
||||
});
|
||||
let mut oci_root = oci::Root::default();
|
||||
oci_root.set_path(rootfs.path().to_path_buf());
|
||||
oci_root.set_readonly(Some(false));
|
||||
spec.set_root(Some(oci_root));
|
||||
|
||||
// there is no spec.mounts, but should pass
|
||||
let ret = init_rootfs(stdout_fd, &spec, &cpath, &mounts, true);
|
||||
@@ -1176,13 +1209,16 @@ mod tests {
|
||||
let _ = remove_dir_all(rootfs.path().join("dev"));
|
||||
let _ = create_dir(rootfs.path().join("dev"));
|
||||
|
||||
if spec.mounts().is_none() {
|
||||
spec.set_mounts(Some(Vec::new()));
|
||||
}
|
||||
// Adding bad mount point to spec.mounts
|
||||
spec.mounts.push(oci::Mount {
|
||||
destination: "error".into(),
|
||||
r#type: "bind".into(),
|
||||
source: "error".into(),
|
||||
options: vec!["shared".into(), "rw".into(), "dev".into()],
|
||||
});
|
||||
let mut oci_mount = oci::Mount::default();
|
||||
oci_mount.set_destination("error".into());
|
||||
oci_mount.set_typ(Some("bind".to_string()));
|
||||
oci_mount.set_source(Some("error".into()));
|
||||
oci_mount.set_options(Some(vec!["shared".into(), "rw".into(), "dev".into()]));
|
||||
spec.mounts_mut().as_mut().unwrap().push(oci_mount);
|
||||
|
||||
// destination doesn't start with /, should fail
|
||||
let ret = init_rootfs(stdout_fd, &spec, &cpath, &mounts, true);
|
||||
@@ -1191,31 +1227,31 @@ mod tests {
|
||||
"Should fail: destination doesn't start with '/'. Got: {:?}",
|
||||
ret
|
||||
);
|
||||
spec.mounts.pop();
|
||||
spec.mounts_mut().as_mut().unwrap().pop();
|
||||
let _ = remove_dir_all(rootfs.path().join("dev"));
|
||||
let _ = create_dir(rootfs.path().join("dev"));
|
||||
|
||||
// mounting a cgroup
|
||||
spec.mounts.push(oci::Mount {
|
||||
destination: "/cgroup".into(),
|
||||
r#type: "cgroup".into(),
|
||||
source: "/cgroup".into(),
|
||||
options: vec!["shared".into()],
|
||||
});
|
||||
let mut oci_mount = oci::Mount::default();
|
||||
oci_mount.set_destination("/cgroup".into());
|
||||
oci_mount.set_typ(Some("cgroup".into()));
|
||||
oci_mount.set_source(Some("/cgroup".into()));
|
||||
oci_mount.set_options(Some(vec!["shared".into()]));
|
||||
spec.mounts_mut().as_mut().unwrap().push(oci_mount);
|
||||
|
||||
let ret = init_rootfs(stdout_fd, &spec, &cpath, &mounts, true);
|
||||
assert!(ret.is_ok(), "Should pass. Got: {:?}", ret);
|
||||
spec.mounts.pop();
|
||||
spec.mounts_mut().as_mut().unwrap().pop();
|
||||
let _ = remove_dir_all(rootfs.path().join("dev"));
|
||||
let _ = create_dir(rootfs.path().join("dev"));
|
||||
|
||||
// mounting /dev
|
||||
spec.mounts.push(oci::Mount {
|
||||
destination: "/dev".into(),
|
||||
r#type: "bind".into(),
|
||||
source: "/dev".into(),
|
||||
options: vec!["shared".into()],
|
||||
});
|
||||
let mut oci_mount = oci::Mount::default();
|
||||
oci_mount.set_destination("/dev".into());
|
||||
oci_mount.set_typ(Some("bind".into()));
|
||||
oci_mount.set_source(Some("/dev".into()));
|
||||
oci_mount.set_options(Some(vec!["shared".into()]));
|
||||
spec.mounts_mut().as_mut().unwrap().push(oci_mount);
|
||||
|
||||
let ret = init_rootfs(stdout_fd, &spec, &cpath, &mounts, true);
|
||||
assert!(ret.is_ok(), "Should pass. Got: {:?}", ret);
|
||||
@@ -1225,12 +1261,13 @@ mod tests {
|
||||
#[serial(chdir)]
|
||||
fn test_mount_cgroups() {
|
||||
let stdout_fd = std::io::stdout().as_raw_fd();
|
||||
let mount = oci::Mount {
|
||||
destination: "/cgroups".to_string(),
|
||||
r#type: "cgroup".to_string(),
|
||||
source: "/cgroups".to_string(),
|
||||
options: vec!["shared".to_string()],
|
||||
};
|
||||
|
||||
let mut mount = oci::Mount::default();
|
||||
mount.set_destination("/cgroup".into());
|
||||
mount.set_typ(Some("cgroup".into()));
|
||||
mount.set_source(Some("/cgroup".into()));
|
||||
mount.set_options(Some(vec!["shared".into()]));
|
||||
|
||||
let tempdir = tempdir().unwrap();
|
||||
let rootfs = tempdir.path().to_str().unwrap().to_string();
|
||||
let flags = MsFlags::MS_RDONLY;
|
||||
@@ -1310,19 +1347,27 @@ mod tests {
|
||||
let stdout_fd = std::io::stdout().as_raw_fd();
|
||||
let mut spec = oci::Spec::default();
|
||||
|
||||
spec.linux = Some(oci::Linux::default());
|
||||
spec.linux.as_mut().unwrap().masked_paths = vec!["/tmp".to_string()];
|
||||
spec.linux.as_mut().unwrap().readonly_paths = vec!["/tmp".to_string()];
|
||||
spec.root = Some(oci::Root {
|
||||
path: "/tmp".to_string(),
|
||||
readonly: true,
|
||||
});
|
||||
spec.mounts = vec![oci::Mount {
|
||||
destination: "/dev".to_string(),
|
||||
r#type: "bind".to_string(),
|
||||
source: "/dev".to_string(),
|
||||
options: vec!["ro".to_string(), "shared".to_string()],
|
||||
}];
|
||||
spec.set_linux(Some(oci::Linux::default()));
|
||||
spec.linux_mut()
|
||||
.as_mut()
|
||||
.unwrap()
|
||||
.set_masked_paths(Some(vec!["/tmp".to_string()]));
|
||||
spec.linux_mut()
|
||||
.as_mut()
|
||||
.unwrap()
|
||||
.set_readonly_paths(Some(vec!["/tmp".to_string()]));
|
||||
|
||||
let mut oci_root = oci::Root::default();
|
||||
oci_root.set_path(PathBuf::from("/tmp"));
|
||||
oci_root.set_readonly(Some(true));
|
||||
spec.set_root(Some(oci_root));
|
||||
|
||||
let mut oci_mount = oci::Mount::default();
|
||||
oci_mount.set_destination("/dev".into());
|
||||
oci_mount.set_typ(Some("bind".into()));
|
||||
oci_mount.set_source(Some("/dev".into()));
|
||||
oci_mount.set_options(Some(vec!["shared".into()]));
|
||||
spec.set_mounts(Some(vec![oci_mount]));
|
||||
|
||||
let ret = finish_rootfs(stdout_fd, &spec, &oci::Process::default());
|
||||
assert!(ret.is_ok(), "Should pass. Got: {:?}", ret);
|
||||
@@ -1346,15 +1391,16 @@ mod tests {
|
||||
skip_if_not_root!();
|
||||
|
||||
let path = "/dev/fifo-test";
|
||||
let dev = oci::LinuxDevice {
|
||||
path: path.to_string(),
|
||||
r#type: "c".to_string(),
|
||||
major: 0,
|
||||
minor: 0,
|
||||
file_mode: Some(0660),
|
||||
uid: Some(unistd::getuid().as_raw()),
|
||||
gid: Some(unistd::getgid().as_raw()),
|
||||
};
|
||||
let dev = oci::LinuxDeviceBuilder::default()
|
||||
.path(PathBuf::from(path))
|
||||
.typ(oci::LinuxDeviceType::C)
|
||||
.major(0)
|
||||
.minor(0)
|
||||
.file_mode(0660 as u32)
|
||||
.uid(unistd::getuid().as_raw())
|
||||
.gid(unistd::getgid().as_raw())
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
let ret = mknod_dev(&dev, Path::new(path));
|
||||
assert!(ret.is_ok(), "Should pass. Got: {:?}", ret);
|
||||
@@ -1370,6 +1416,7 @@ mod tests {
|
||||
#[test]
|
||||
fn test_mount_from() {
|
||||
#[derive(Debug)]
|
||||
#[allow(dead_code)]
|
||||
struct TestData<'a> {
|
||||
source: &'a str,
|
||||
destination: &'a str,
|
||||
@@ -1444,12 +1491,11 @@ mod tests {
|
||||
std::fs::write(&source_path, []).unwrap();
|
||||
}
|
||||
|
||||
let mount = Mount {
|
||||
source: source_path,
|
||||
destination: d.destination.to_string(),
|
||||
r#type: d.r#type.to_string(),
|
||||
options: vec![],
|
||||
};
|
||||
let mut mount = oci::Mount::default();
|
||||
mount.set_destination(d.destination.into());
|
||||
mount.set_typ(Some("bind".into()));
|
||||
mount.set_source(Some(source_path.into()));
|
||||
mount.set_options(Some(vec![]));
|
||||
|
||||
let result = mount_from(
|
||||
wfd,
|
||||
@@ -1524,30 +1570,27 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_check_proc_mount() {
|
||||
let mount = oci::Mount {
|
||||
destination: "/proc".to_string(),
|
||||
r#type: "bind".to_string(),
|
||||
source: "/test".to_string(),
|
||||
options: vec!["shared".to_string()],
|
||||
};
|
||||
let mut mount = oci::Mount::default();
|
||||
mount.set_destination("/proc".into());
|
||||
mount.set_typ(Some("bind".into()));
|
||||
mount.set_source(Some("/test".into()));
|
||||
mount.set_options(Some(vec!["shared".to_string()]));
|
||||
|
||||
assert!(check_proc_mount(&mount).is_err());
|
||||
|
||||
let mount = oci::Mount {
|
||||
destination: "/proc/cpuinfo".to_string(),
|
||||
r#type: "bind".to_string(),
|
||||
source: "/test".to_string(),
|
||||
options: vec!["shared".to_string()],
|
||||
};
|
||||
let mut mount = oci::Mount::default();
|
||||
mount.set_destination("/proc/cpuinfo".into());
|
||||
mount.set_typ(Some("bind".into()));
|
||||
mount.set_source(Some("/test".into()));
|
||||
mount.set_options(Some(vec!["shared".to_string()]));
|
||||
|
||||
assert!(check_proc_mount(&mount).is_ok());
|
||||
|
||||
let mount = oci::Mount {
|
||||
destination: "/proc/test".to_string(),
|
||||
r#type: "bind".to_string(),
|
||||
source: "/test".to_string(),
|
||||
options: vec!["shared".to_string()],
|
||||
};
|
||||
let mut mount = oci::Mount::default();
|
||||
mount.set_destination("/proc/test".into());
|
||||
mount.set_typ(Some("bind".into()));
|
||||
mount.set_source(Some("/test".into()));
|
||||
mount.set_options(Some(vec!["shared".to_string()]));
|
||||
|
||||
assert!(check_proc_mount(&mount).is_err());
|
||||
}
|
||||
@@ -1755,22 +1798,37 @@ mod tests {
|
||||
#[test]
|
||||
fn test_dev_rel_path() {
|
||||
// Valid device paths
|
||||
assert_eq!(dev_rel_path("/dev/sda").unwrap(), Path::new("dev/sda"));
|
||||
assert_eq!(dev_rel_path("//dev/sda").unwrap(), Path::new("dev/sda"));
|
||||
assert_eq!(
|
||||
dev_rel_path("/dev/vfio/99").unwrap(),
|
||||
dev_rel_path(&PathBuf::from("/dev/sda")).unwrap(),
|
||||
Path::new("dev/sda")
|
||||
);
|
||||
assert_eq!(
|
||||
dev_rel_path(&PathBuf::from("//dev/sda")).unwrap(),
|
||||
Path::new("dev/sda")
|
||||
);
|
||||
assert_eq!(
|
||||
dev_rel_path(&PathBuf::from("/dev/vfio/99")).unwrap(),
|
||||
Path::new("dev/vfio/99")
|
||||
);
|
||||
assert_eq!(dev_rel_path("/dev/...").unwrap(), Path::new("dev/..."));
|
||||
assert_eq!(dev_rel_path("/dev/a..b").unwrap(), Path::new("dev/a..b"));
|
||||
assert_eq!(dev_rel_path("/dev//foo").unwrap(), Path::new("dev/foo"));
|
||||
assert_eq!(
|
||||
dev_rel_path(&PathBuf::from("/dev/...")).unwrap(),
|
||||
Path::new("dev/...")
|
||||
);
|
||||
assert_eq!(
|
||||
dev_rel_path(&PathBuf::from("/dev/a..b")).unwrap(),
|
||||
Path::new("dev/a..b")
|
||||
);
|
||||
assert_eq!(
|
||||
dev_rel_path(&PathBuf::from("/dev//foo")).unwrap(),
|
||||
Path::new("dev/foo")
|
||||
);
|
||||
|
||||
// Bad device paths
|
||||
assert!(dev_rel_path("/devfoo").is_none());
|
||||
assert!(dev_rel_path("/etc/passwd").is_none());
|
||||
assert!(dev_rel_path("/dev/../etc/passwd").is_none());
|
||||
assert!(dev_rel_path("dev/foo").is_none());
|
||||
assert!(dev_rel_path("").is_none());
|
||||
assert!(dev_rel_path("/dev").is_none());
|
||||
assert!(dev_rel_path(&PathBuf::from("/devfoo")).is_none());
|
||||
assert!(dev_rel_path(&PathBuf::from("/etc/passwd")).is_none());
|
||||
assert!(dev_rel_path(&PathBuf::from("/dev/../etc/passwd")).is_none());
|
||||
assert!(dev_rel_path(&PathBuf::from("dev/foo")).is_none());
|
||||
assert!(dev_rel_path(&PathBuf::from("")).is_none());
|
||||
assert!(dev_rel_path(&PathBuf::from("/dev")).is_none());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,6 +16,7 @@ use nix::unistd::{self, Pid};
|
||||
use nix::Result;
|
||||
|
||||
use oci::Process as OCIProcess;
|
||||
use oci_spec::runtime as oci;
|
||||
use slog::Logger;
|
||||
|
||||
use crate::pipestream::PipeStream;
|
||||
@@ -147,7 +148,7 @@ impl Process {
|
||||
exit_tx: Some(exit_tx),
|
||||
exit_rx: Some(exit_rx),
|
||||
extra_files: Vec::new(),
|
||||
tty: ocip.terminal,
|
||||
tty: ocip.terminal().unwrap_or_default(),
|
||||
term_master: None,
|
||||
parent_stdin: None,
|
||||
parent_stdout: None,
|
||||
|
||||
@@ -5,9 +5,11 @@
|
||||
|
||||
use anyhow::{anyhow, Result};
|
||||
use libseccomp::*;
|
||||
use oci::{LinuxSeccomp, LinuxSeccompArg};
|
||||
use std::str::FromStr;
|
||||
|
||||
use oci::{LinuxSeccomp, LinuxSeccompArg};
|
||||
use oci_spec::runtime as oci;
|
||||
|
||||
fn get_filter_attr_from_flag(flag: &str) -> Result<ScmpFilterAttr> {
|
||||
match flag {
|
||||
"SECCOMP_FILTER_FLAG_TSYNC" => Ok(ScmpFilterAttr::CtlTsync),
|
||||
@@ -22,19 +24,15 @@ fn get_rule_conditions(args: &[LinuxSeccompArg]) -> Result<Vec<ScmpArgCompare>>
|
||||
let mut conditions: Vec<ScmpArgCompare> = Vec::new();
|
||||
|
||||
for arg in args {
|
||||
if arg.op.is_empty() {
|
||||
return Err(anyhow!("seccomp opreator is required"));
|
||||
}
|
||||
|
||||
let mut op = ScmpCompareOp::from_str(&arg.op)?;
|
||||
let mut value = arg.value;
|
||||
let mut op = ScmpCompareOp::from_str(&arg.op().to_string())?;
|
||||
let mut value = arg.value();
|
||||
// For SCMP_CMP_MASKED_EQ, arg.value is the mask and arg.value_two is the value
|
||||
if op == ScmpCompareOp::MaskedEqual(u64::default()) {
|
||||
op = ScmpCompareOp::MaskedEqual(arg.value);
|
||||
value = arg.value_two;
|
||||
op = ScmpCompareOp::MaskedEqual(arg.value());
|
||||
value = arg.value_two().unwrap_or(0);
|
||||
}
|
||||
|
||||
let cond = ScmpArgCompare::new(arg.index, op, value);
|
||||
let cond = ScmpArgCompare::new(arg.index() as u32, op, value);
|
||||
|
||||
conditions.push(cond);
|
||||
}
|
||||
@@ -44,9 +42,9 @@ fn get_rule_conditions(args: &[LinuxSeccompArg]) -> Result<Vec<ScmpArgCompare>>
|
||||
|
||||
pub fn get_unknown_syscalls(scmp: &LinuxSeccomp) -> Option<Vec<String>> {
|
||||
let mut unknown_syscalls: Vec<String> = Vec::new();
|
||||
|
||||
for syscall in &scmp.syscalls {
|
||||
for name in &syscall.names {
|
||||
let scmp_syscalls = scmp.syscalls().clone().unwrap_or_default();
|
||||
for syscall in scmp_syscalls.iter() {
|
||||
for name in syscall.names().iter() {
|
||||
if ScmpSyscall::from_name(name).is_err() {
|
||||
unknown_syscalls.push(name.to_string());
|
||||
}
|
||||
@@ -63,14 +61,15 @@ pub fn get_unknown_syscalls(scmp: &LinuxSeccomp) -> Option<Vec<String>> {
|
||||
// init_seccomp creates a seccomp filter and loads it for the current process
|
||||
// including all the child processes.
|
||||
pub fn init_seccomp(scmp: &LinuxSeccomp) -> Result<()> {
|
||||
let def_action = ScmpAction::from_str(scmp.default_action.as_str(), Some(libc::EPERM))?;
|
||||
let def_action = ScmpAction::from_str(&scmp.default_action().to_string(), Some(libc::EPERM))?;
|
||||
|
||||
// Create a new filter context
|
||||
let mut filter = ScmpFilterContext::new_filter(def_action)?;
|
||||
|
||||
// Add extra architectures
|
||||
for arch in &scmp.architectures {
|
||||
let scmp_arch = ScmpArch::from_str(arch)?;
|
||||
let architectures = scmp.architectures().clone().unwrap_or_default();
|
||||
for arch in architectures {
|
||||
let scmp_arch = ScmpArch::from_str(&arch.to_string())?;
|
||||
filter.add_arch(scmp_arch)?;
|
||||
}
|
||||
|
||||
@@ -78,17 +77,23 @@ pub fn init_seccomp(scmp: &LinuxSeccomp) -> Result<()> {
|
||||
filter.set_ctl_nnp(false)?;
|
||||
|
||||
// Add a rule for each system call
|
||||
for syscall in &scmp.syscalls {
|
||||
if syscall.names.is_empty() {
|
||||
let scmp_syscalls = scmp.syscalls().clone().unwrap_or_default();
|
||||
for syscall in scmp_syscalls {
|
||||
if syscall.names().is_empty() {
|
||||
return Err(anyhow!("syscall name is required"));
|
||||
}
|
||||
|
||||
let action = ScmpAction::from_str(&syscall.action, Some(syscall.errno_ret as i32))?;
|
||||
let action = ScmpAction::from_str(
|
||||
&syscall.action().to_string(),
|
||||
syscall
|
||||
.errno_ret()
|
||||
.map_or(Some(libc::EPERM), |x| Some(x as i32)),
|
||||
)?;
|
||||
if action == def_action {
|
||||
continue;
|
||||
}
|
||||
|
||||
for name in &syscall.names {
|
||||
for name in syscall.names() {
|
||||
let syscall_num = match ScmpSyscall::from_name(name) {
|
||||
Ok(num) => num,
|
||||
Err(_) => {
|
||||
@@ -98,18 +103,20 @@ pub fn init_seccomp(scmp: &LinuxSeccomp) -> Result<()> {
|
||||
}
|
||||
};
|
||||
|
||||
if syscall.args.is_empty() {
|
||||
if syscall.args().is_none() {
|
||||
filter.add_rule(action, syscall_num)?;
|
||||
} else {
|
||||
let conditions = get_rule_conditions(&syscall.args)?;
|
||||
let syscall_args = syscall.args().clone().unwrap_or_default();
|
||||
let conditions = get_rule_conditions(&syscall_args)?;
|
||||
filter.add_rule_conditional(action, syscall_num, &conditions)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Set filter attributes for each seccomp flag
|
||||
for flag in &scmp.flags {
|
||||
let scmp_attr = get_filter_attr_from_flag(flag)?;
|
||||
let flags = scmp.flags().clone().unwrap_or_default();
|
||||
for flag in flags {
|
||||
let scmp_attr = get_filter_attr_from_flag(&flag.to_string())?;
|
||||
filter.set_filter_attr(scmp_attr, 1)?;
|
||||
}
|
||||
|
||||
@@ -123,6 +130,7 @@ pub fn init_seccomp(scmp: &LinuxSeccomp) -> Result<()> {
|
||||
mod tests {
|
||||
use super::*;
|
||||
use libc::{dup3, process_vm_readv, EPERM, O_CLOEXEC};
|
||||
use oci_spec::runtime as oci;
|
||||
use std::io::Error;
|
||||
use std::ptr::null;
|
||||
use test_utils::skip_if_not_root;
|
||||
@@ -233,21 +241,23 @@ mod tests {
|
||||
if cfg!(target_endian = "little") {
|
||||
// For little-endian architectures
|
||||
arch = vec![
|
||||
"SCMP_ARCH_X86".to_string(),
|
||||
"SCMP_ARCH_X32".to_string(),
|
||||
"SCMP_ARCH_X86_64".to_string(),
|
||||
"SCMP_ARCH_AARCH64".to_string(),
|
||||
"SCMP_ARCH_ARM".to_string(),
|
||||
"SCMP_ARCH_PPC64LE".to_string(),
|
||||
"SCMP_ARCH_X86".parse::<oci::Arch>().unwrap(),
|
||||
"SCMP_ARCH_X32".parse::<oci::Arch>().unwrap(),
|
||||
"SCMP_ARCH_X86_64".parse::<oci::Arch>().unwrap(),
|
||||
"SCMP_ARCH_AARCH64".parse::<oci::Arch>().unwrap(),
|
||||
"SCMP_ARCH_ARM".parse::<oci::Arch>().unwrap(),
|
||||
"SCMP_ARCH_PPC64LE".parse::<oci::Arch>().unwrap(),
|
||||
];
|
||||
} else {
|
||||
// For big-endian architectures
|
||||
arch = vec!["SCMP_ARCH_S390X".to_string()];
|
||||
arch = vec!["SCMP_ARCH_S390X".parse::<oci::Arch>().unwrap()];
|
||||
}
|
||||
|
||||
scmp.architectures.append(&mut arch);
|
||||
let mut archs = scmp.architectures().clone().unwrap();
|
||||
archs.append(&mut arch);
|
||||
scmp.set_architectures(Some(archs));
|
||||
|
||||
init_seccomp(&scmp).unwrap();
|
||||
assert!(init_seccomp(&scmp).is_ok());
|
||||
|
||||
// Basic syscall with simple rule
|
||||
syscall_assert!(unsafe { dup3(0, 1, O_CLOEXEC) }, -EPERM);
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use oci::Spec;
|
||||
use oci_spec::runtime::Spec;
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Default, Clone)]
|
||||
pub struct CreateOpts {
|
||||
|
||||
@@ -6,19 +6,26 @@
|
||||
use crate::container::Config;
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use oci::{Linux, LinuxIdMapping, LinuxNamespace, Spec};
|
||||
use oci_spec::runtime as oci;
|
||||
use regex::Regex;
|
||||
use std::collections::HashMap;
|
||||
use std::convert::TryFrom;
|
||||
use std::path::{Component, PathBuf};
|
||||
|
||||
fn get_linux(oci: &Spec) -> Result<&Linux> {
|
||||
oci.linux
|
||||
oci.linux()
|
||||
.as_ref()
|
||||
.ok_or_else(|| anyhow!("Unable to get Linux section from Spec"))
|
||||
}
|
||||
|
||||
fn contain_namespace(nses: &[LinuxNamespace], key: &str) -> bool {
|
||||
let nstype = match oci::LinuxNamespaceType::try_from(key) {
|
||||
Ok(ns_type) => ns_type,
|
||||
Err(_e) => return false,
|
||||
};
|
||||
|
||||
for ns in nses {
|
||||
if ns.r#type.as_str() == key {
|
||||
if ns.typ() == nstype {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@@ -73,12 +80,13 @@ fn rootfs(root: &str) -> Result<()> {
|
||||
}
|
||||
|
||||
fn hostname(oci: &Spec) -> Result<()> {
|
||||
if oci.hostname.is_empty() {
|
||||
if oci.hostname().is_none() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let linux = get_linux(oci)?;
|
||||
if !contain_namespace(&linux.namespaces, "uts") {
|
||||
let default_vec = vec![];
|
||||
if !contain_namespace(linux.namespaces().as_ref().unwrap_or(&default_vec), "uts") {
|
||||
return Err(anyhow!("Linux namespace does not contain uts"));
|
||||
}
|
||||
|
||||
@@ -90,26 +98,30 @@ fn security(oci: &Spec) -> Result<()> {
|
||||
let label_pattern = r".*_u:.*_r:.*_t:s[0-9]|1[0-5].*";
|
||||
let label_regex = Regex::new(label_pattern)?;
|
||||
|
||||
if let Some(ref process) = oci.process {
|
||||
if !process.selinux_label.is_empty() && !label_regex.is_match(&process.selinux_label) {
|
||||
let default_vec = vec![];
|
||||
if let Some(process) = oci.process().as_ref() {
|
||||
if process.selinux_label().is_some()
|
||||
&& !label_regex.is_match(process.selinux_label().as_ref().unwrap())
|
||||
{
|
||||
return Err(anyhow!(
|
||||
"SELinux label for the process is invalid format: {}",
|
||||
&process.selinux_label
|
||||
"SELinux label for the process is invalid format: {:?}",
|
||||
&process.selinux_label()
|
||||
));
|
||||
}
|
||||
}
|
||||
if !linux.mount_label.is_empty() && !label_regex.is_match(&linux.mount_label) {
|
||||
if linux.mount_label().is_some() && !label_regex.is_match(linux.mount_label().as_ref().unwrap())
|
||||
{
|
||||
return Err(anyhow!(
|
||||
"SELinux label for the mount is invalid format: {}",
|
||||
&linux.mount_label
|
||||
linux.mount_label().as_ref().unwrap()
|
||||
));
|
||||
}
|
||||
|
||||
if linux.masked_paths.is_empty() && linux.readonly_paths.is_empty() {
|
||||
if linux.masked_paths().is_none() && linux.readonly_paths().is_none() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if !contain_namespace(&linux.namespaces, "mount") {
|
||||
if !contain_namespace(linux.namespaces().as_ref().unwrap_or(&default_vec), "mnt") {
|
||||
return Err(anyhow!("Linux namespace does not contain mount"));
|
||||
}
|
||||
|
||||
@@ -118,7 +130,7 @@ fn security(oci: &Spec) -> Result<()> {
|
||||
|
||||
fn idmapping(maps: &[LinuxIdMapping]) -> Result<()> {
|
||||
for map in maps {
|
||||
if map.size > 0 {
|
||||
if map.size() > 0 {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
@@ -129,18 +141,22 @@ fn idmapping(maps: &[LinuxIdMapping]) -> Result<()> {
|
||||
fn usernamespace(oci: &Spec) -> Result<()> {
|
||||
let linux = get_linux(oci)?;
|
||||
|
||||
if contain_namespace(&linux.namespaces, "user") {
|
||||
let default_vec = vec![];
|
||||
if contain_namespace(linux.namespaces().as_ref().unwrap_or(&default_vec), "user") {
|
||||
let user_ns = PathBuf::from("/proc/self/ns/user");
|
||||
if !user_ns.exists() {
|
||||
return Err(anyhow!("user namespace not supported!"));
|
||||
}
|
||||
// check if idmappings is correct, at least I saw idmaps
|
||||
// with zero size was passed to agent
|
||||
idmapping(&linux.uid_mappings).context("idmapping uid")?;
|
||||
idmapping(&linux.gid_mappings).context("idmapping gid")?;
|
||||
let default_vec2 = vec![];
|
||||
idmapping(linux.uid_mappings().as_ref().unwrap_or(&default_vec2))
|
||||
.context("idmapping uid")?;
|
||||
idmapping(linux.gid_mappings().as_ref().unwrap_or(&default_vec2))
|
||||
.context("idmapping gid")?;
|
||||
} else {
|
||||
// no user namespace but idmap
|
||||
if !linux.uid_mappings.is_empty() || !linux.gid_mappings.is_empty() {
|
||||
if !linux.uid_mappings().is_none() || !linux.gid_mappings().is_none() {
|
||||
return Err(anyhow!("No user namespace, but uid or gid mapping exists"));
|
||||
}
|
||||
}
|
||||
@@ -151,7 +167,11 @@ fn usernamespace(oci: &Spec) -> Result<()> {
|
||||
fn cgroupnamespace(oci: &Spec) -> Result<()> {
|
||||
let linux = get_linux(oci)?;
|
||||
|
||||
if contain_namespace(&linux.namespaces, "cgroup") {
|
||||
let default_vec = vec![];
|
||||
if contain_namespace(
|
||||
linux.namespaces().as_ref().unwrap_or(&default_vec),
|
||||
"cgroup",
|
||||
) {
|
||||
let path = PathBuf::from("/proc/self/ns/cgroup");
|
||||
if !path.exists() {
|
||||
return Err(anyhow!("cgroup unsupported!"));
|
||||
@@ -178,9 +198,13 @@ lazy_static! {
|
||||
fn sysctl(oci: &Spec) -> Result<()> {
|
||||
let linux = get_linux(oci)?;
|
||||
|
||||
for (key, _) in linux.sysctl.iter() {
|
||||
let default_hash = HashMap::new();
|
||||
let sysctl_hash = linux.sysctl().as_ref().unwrap_or(&default_hash);
|
||||
let default_vec = vec![];
|
||||
let linux_namespaces = linux.namespaces().as_ref().unwrap_or(&default_vec);
|
||||
for (key, _) in sysctl_hash.iter() {
|
||||
if SYSCTLS.contains_key(key.as_str()) || key.starts_with("fs.mqueue.") {
|
||||
if contain_namespace(&linux.namespaces, "ipc") {
|
||||
if contain_namespace(linux_namespaces, "ipc") {
|
||||
continue;
|
||||
} else {
|
||||
return Err(anyhow!("Linux namespace does not contain ipc"));
|
||||
@@ -192,7 +216,7 @@ fn sysctl(oci: &Spec) -> Result<()> {
|
||||
continue;
|
||||
}
|
||||
|
||||
if contain_namespace(&linux.namespaces, "uts") {
|
||||
if contain_namespace(linux_namespaces, "uts") {
|
||||
if key == "kernel.domainname" {
|
||||
continue;
|
||||
}
|
||||
@@ -210,11 +234,12 @@ fn sysctl(oci: &Spec) -> Result<()> {
|
||||
fn rootless_euid_mapping(oci: &Spec) -> Result<()> {
|
||||
let linux = get_linux(oci)?;
|
||||
|
||||
if !contain_namespace(&linux.namespaces, "user") {
|
||||
let default_ns = vec![];
|
||||
if !contain_namespace(linux.namespaces().as_ref().unwrap_or(&default_ns), "user") {
|
||||
return Err(anyhow!("Linux namespace is missing user"));
|
||||
}
|
||||
|
||||
if linux.uid_mappings.is_empty() || linux.gid_mappings.is_empty() {
|
||||
if linux.uid_mappings().is_none() || linux.gid_mappings().is_none() {
|
||||
return Err(anyhow!(
|
||||
"Rootless containers require at least one UID/GID mapping"
|
||||
));
|
||||
@@ -225,7 +250,7 @@ fn rootless_euid_mapping(oci: &Spec) -> Result<()> {
|
||||
|
||||
fn has_idmapping(maps: &[LinuxIdMapping], id: u32) -> bool {
|
||||
for map in maps {
|
||||
if id >= map.container_id && id < map.container_id + map.size {
|
||||
if id >= map.container_id() && id < map.container_id() + map.size() {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@@ -235,8 +260,12 @@ fn has_idmapping(maps: &[LinuxIdMapping], id: u32) -> bool {
|
||||
fn rootless_euid_mount(oci: &Spec) -> Result<()> {
|
||||
let linux = get_linux(oci)?;
|
||||
|
||||
for mnt in oci.mounts.iter() {
|
||||
for opt in mnt.options.iter() {
|
||||
let default_mounts = vec![];
|
||||
let oci_mounts = oci.mounts().as_ref().unwrap_or(&default_mounts);
|
||||
for mnt in oci_mounts.iter() {
|
||||
let default_options = vec![];
|
||||
let mnt_options = mnt.options().as_ref().unwrap_or(&default_options);
|
||||
for opt in mnt_options.iter() {
|
||||
if opt.starts_with("uid=") || opt.starts_with("gid=") {
|
||||
let fields: Vec<&str> = opt.split('=').collect();
|
||||
|
||||
@@ -249,11 +278,15 @@ fn rootless_euid_mount(oci: &Spec) -> Result<()> {
|
||||
.parse::<u32>()
|
||||
.context(format!("parse field {}", &fields[1]))?;
|
||||
|
||||
if opt.starts_with("uid=") && !has_idmapping(&linux.uid_mappings, id) {
|
||||
if opt.starts_with("uid=")
|
||||
&& !has_idmapping(linux.uid_mappings().as_ref().unwrap_or(&vec![]), id)
|
||||
{
|
||||
return Err(anyhow!("uid of {} does not have a valid mapping", id));
|
||||
}
|
||||
|
||||
if opt.starts_with("gid=") && !has_idmapping(&linux.gid_mappings, id) {
|
||||
if opt.starts_with("gid=")
|
||||
&& !has_idmapping(linux.gid_mappings().as_ref().unwrap_or(&vec![]), id)
|
||||
{
|
||||
return Err(anyhow!("gid of {} does not have a valid mapping", id));
|
||||
}
|
||||
}
|
||||
@@ -275,16 +308,16 @@ pub fn validate(conf: &Config) -> Result<()> {
|
||||
.as_ref()
|
||||
.ok_or_else(|| anyhow!("Invalid config spec"))?;
|
||||
|
||||
if oci.linux.is_none() {
|
||||
if oci.linux().is_none() {
|
||||
return Err(anyhow!("oci Linux is none"));
|
||||
}
|
||||
|
||||
let root = match oci.root.as_ref() {
|
||||
Some(v) => v.path.as_str(),
|
||||
let root = match oci.root().as_ref() {
|
||||
Some(v) => v.path().display().to_string(),
|
||||
None => return Err(anyhow!("oci root is none")),
|
||||
};
|
||||
|
||||
rootfs(root).context("rootfs")?;
|
||||
rootfs(&root).context("rootfs")?;
|
||||
hostname(oci).context("hostname")?;
|
||||
security(oci).context("security")?;
|
||||
usernamespace(oci).context("usernamespace")?;
|
||||
@@ -301,19 +334,22 @@ pub fn validate(conf: &Config) -> Result<()> {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use oci::{Mount, Process};
|
||||
use oci::{LinuxIdMappingBuilder, LinuxNamespaceBuilder, LinuxNamespaceType, Process, Spec};
|
||||
use oci_spec::runtime as oci;
|
||||
|
||||
#[test]
|
||||
fn test_namespace() {
|
||||
let namespaces = [
|
||||
LinuxNamespace {
|
||||
r#type: "net".to_owned(),
|
||||
path: "/sys/cgroups/net".to_owned(),
|
||||
},
|
||||
LinuxNamespace {
|
||||
r#type: "uts".to_owned(),
|
||||
path: "/sys/cgroups/uts".to_owned(),
|
||||
},
|
||||
LinuxNamespaceBuilder::default()
|
||||
.typ(LinuxNamespaceType::Network)
|
||||
.path("/sys/cgroups/net")
|
||||
.build()
|
||||
.unwrap(),
|
||||
LinuxNamespaceBuilder::default()
|
||||
.typ(LinuxNamespaceType::Uts)
|
||||
.path("/sys/cgroups/uts")
|
||||
.build()
|
||||
.unwrap(),
|
||||
];
|
||||
|
||||
assert_eq!(contain_namespace(&namespaces, "net"), true);
|
||||
@@ -347,24 +383,27 @@ mod tests {
|
||||
fn test_hostname() {
|
||||
let mut spec = Spec::default();
|
||||
|
||||
hostname(&spec).unwrap();
|
||||
assert!(hostname(&spec).is_ok());
|
||||
|
||||
spec.hostname = "a.test.com".to_owned();
|
||||
hostname(&spec).unwrap_err();
|
||||
spec.set_hostname(Some("a.test.com".to_owned()));
|
||||
assert!(hostname(&spec).is_ok());
|
||||
|
||||
let mut linux = Linux::default();
|
||||
linux.namespaces = vec![
|
||||
LinuxNamespace {
|
||||
r#type: "net".to_owned(),
|
||||
path: "/sys/cgroups/net".to_owned(),
|
||||
},
|
||||
LinuxNamespace {
|
||||
r#type: "uts".to_owned(),
|
||||
path: "/sys/cgroups/uts".to_owned(),
|
||||
},
|
||||
let namespaces = vec![
|
||||
LinuxNamespaceBuilder::default()
|
||||
.typ(LinuxNamespaceType::Network)
|
||||
.path("/sys/cgroups/net")
|
||||
.build()
|
||||
.unwrap(),
|
||||
LinuxNamespaceBuilder::default()
|
||||
.typ(LinuxNamespaceType::Uts)
|
||||
.path("/sys/cgroups/uts")
|
||||
.build()
|
||||
.unwrap(),
|
||||
];
|
||||
spec.linux = Some(linux);
|
||||
hostname(&spec).unwrap();
|
||||
linux.set_namespaces(Some(namespaces));
|
||||
spec.set_linux(Some(linux));
|
||||
assert!(hostname(&spec).is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -372,88 +411,89 @@ mod tests {
|
||||
let mut spec = Spec::default();
|
||||
|
||||
let linux = Linux::default();
|
||||
spec.linux = Some(linux);
|
||||
spec.set_linux(Some(linux));
|
||||
security(&spec).unwrap();
|
||||
|
||||
let mut linux = Linux::default();
|
||||
linux.masked_paths.push("/test".to_owned());
|
||||
linux.namespaces = vec![
|
||||
LinuxNamespace {
|
||||
r#type: "net".to_owned(),
|
||||
path: "/sys/cgroups/net".to_owned(),
|
||||
},
|
||||
LinuxNamespace {
|
||||
r#type: "uts".to_owned(),
|
||||
path: "/sys/cgroups/uts".to_owned(),
|
||||
},
|
||||
linux.set_masked_paths(Some(vec!["/test".to_owned()]));
|
||||
let namespaces = vec![
|
||||
LinuxNamespaceBuilder::default()
|
||||
.typ(LinuxNamespaceType::Network)
|
||||
.path("/sys/cgroups/net")
|
||||
.build()
|
||||
.unwrap(),
|
||||
LinuxNamespaceBuilder::default()
|
||||
.typ(LinuxNamespaceType::Uts)
|
||||
.path("/sys/cgroups/uts")
|
||||
.build()
|
||||
.unwrap(),
|
||||
];
|
||||
spec.linux = Some(linux);
|
||||
linux.set_namespaces(Some(namespaces));
|
||||
spec.set_linux(Some(linux));
|
||||
security(&spec).unwrap_err();
|
||||
|
||||
let mut linux = Linux::default();
|
||||
linux.masked_paths.push("/test".to_owned());
|
||||
linux.namespaces = vec![
|
||||
LinuxNamespace {
|
||||
r#type: "net".to_owned(),
|
||||
path: "/sys/cgroups/net".to_owned(),
|
||||
},
|
||||
LinuxNamespace {
|
||||
r#type: "mount".to_owned(),
|
||||
path: "/sys/cgroups/mount".to_owned(),
|
||||
},
|
||||
linux.set_masked_paths(Some(vec!["/test".to_owned()]));
|
||||
let namespaces = vec![
|
||||
LinuxNamespaceBuilder::default()
|
||||
.typ(LinuxNamespaceType::Network)
|
||||
.path("/sys/cgroups/net")
|
||||
.build()
|
||||
.unwrap(),
|
||||
LinuxNamespaceBuilder::default()
|
||||
.typ(LinuxNamespaceType::Mount)
|
||||
.path("/sys/cgroups/mount")
|
||||
.build()
|
||||
.unwrap(),
|
||||
];
|
||||
spec.linux = Some(linux);
|
||||
security(&spec).unwrap();
|
||||
linux.set_namespaces(Some(namespaces));
|
||||
spec.set_linux(Some(linux));
|
||||
assert!(security(&spec).is_ok());
|
||||
|
||||
// SELinux
|
||||
let valid_label = "system_u:system_r:container_t:s0:c123,c456";
|
||||
let mut process = Process::default();
|
||||
process.selinux_label = valid_label.to_string();
|
||||
spec.process = Some(process);
|
||||
process.set_selinux_label(Some(valid_label.to_string()));
|
||||
spec.set_process(Some(process));
|
||||
security(&spec).unwrap();
|
||||
|
||||
let mut linux = Linux::default();
|
||||
linux.mount_label = valid_label.to_string();
|
||||
spec.linux = Some(linux);
|
||||
linux.set_mount_label(Some(valid_label.to_string()));
|
||||
spec.set_linux(Some(linux));
|
||||
security(&spec).unwrap();
|
||||
|
||||
let invalid_label = "system_u:system_r:container_t";
|
||||
let mut process = Process::default();
|
||||
process.selinux_label = invalid_label.to_string();
|
||||
spec.process = Some(process);
|
||||
process.set_selinux_label(Some(invalid_label.to_string()));
|
||||
spec.set_process(Some(process));
|
||||
security(&spec).unwrap_err();
|
||||
|
||||
let mut linux = Linux::default();
|
||||
linux.mount_label = invalid_label.to_string();
|
||||
spec.linux = Some(linux);
|
||||
linux.set_mount_label(Some(valid_label.to_string()));
|
||||
spec.set_linux(Some(linux));
|
||||
security(&spec).unwrap_err();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_usernamespace() {
|
||||
let mut spec = Spec::default();
|
||||
usernamespace(&spec).unwrap_err();
|
||||
assert!(usernamespace(&spec).is_ok());
|
||||
|
||||
let linux = Linux::default();
|
||||
spec.linux = Some(linux);
|
||||
spec.set_linux(Some(linux));
|
||||
usernamespace(&spec).unwrap();
|
||||
|
||||
let mut linux = Linux::default();
|
||||
linux.uid_mappings = vec![LinuxIdMapping {
|
||||
container_id: 0,
|
||||
host_id: 1000,
|
||||
size: 0,
|
||||
}];
|
||||
spec.linux = Some(linux);
|
||||
usernamespace(&spec).unwrap_err();
|
||||
|
||||
let mut linux = Linux::default();
|
||||
linux.uid_mappings = vec![LinuxIdMapping {
|
||||
container_id: 0,
|
||||
host_id: 1000,
|
||||
size: 100,
|
||||
}];
|
||||
spec.linux = Some(linux);
|
||||
let uidmap = LinuxIdMappingBuilder::default()
|
||||
.container_id(0u32)
|
||||
.host_id(1000u32)
|
||||
.size(0u32)
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
linux.set_uid_mappings(Some(vec![uidmap]));
|
||||
spec.set_linux(Some(linux));
|
||||
usernamespace(&spec).unwrap_err();
|
||||
}
|
||||
|
||||
@@ -467,62 +507,73 @@ mod tests {
|
||||
|
||||
// Test case: without user namespace
|
||||
let linux = Linux::default();
|
||||
spec.linux = Some(linux);
|
||||
spec.set_linux(Some(linux));
|
||||
rootless_euid_mapping(&spec).unwrap_err();
|
||||
|
||||
// Test case: without user namespace
|
||||
let linux = spec.linux.as_mut().unwrap();
|
||||
linux.namespaces = vec![
|
||||
LinuxNamespace {
|
||||
r#type: "net".to_owned(),
|
||||
path: "/sys/cgroups/net".to_owned(),
|
||||
},
|
||||
LinuxNamespace {
|
||||
r#type: "uts".to_owned(),
|
||||
path: "/sys/cgroups/uts".to_owned(),
|
||||
},
|
||||
let linux = spec.linux_mut().as_mut().unwrap();
|
||||
let namespaces = vec![
|
||||
LinuxNamespaceBuilder::default()
|
||||
.typ(LinuxNamespaceType::Network)
|
||||
.path("/sys/cgroups/net")
|
||||
.build()
|
||||
.unwrap(),
|
||||
LinuxNamespaceBuilder::default()
|
||||
.typ(LinuxNamespaceType::Uts)
|
||||
.path("/sys/cgroups/uts")
|
||||
.build()
|
||||
.unwrap(),
|
||||
];
|
||||
linux.set_namespaces(Some(namespaces));
|
||||
rootless_euid_mapping(&spec).unwrap_err();
|
||||
|
||||
let linux = spec.linux.as_mut().unwrap();
|
||||
linux.namespaces = vec![
|
||||
LinuxNamespace {
|
||||
r#type: "net".to_owned(),
|
||||
path: "/sys/cgroups/net".to_owned(),
|
||||
},
|
||||
LinuxNamespace {
|
||||
r#type: "user".to_owned(),
|
||||
path: "/sys/cgroups/user".to_owned(),
|
||||
},
|
||||
let linux = spec.linux_mut().as_mut().unwrap();
|
||||
let namespaces = vec![
|
||||
LinuxNamespaceBuilder::default()
|
||||
.typ(LinuxNamespaceType::Network)
|
||||
.path("/sys/cgroups/net")
|
||||
.build()
|
||||
.unwrap(),
|
||||
LinuxNamespaceBuilder::default()
|
||||
.typ(LinuxNamespaceType::User)
|
||||
.path("/sys/cgroups/user")
|
||||
.build()
|
||||
.unwrap(),
|
||||
];
|
||||
linux.uid_mappings = vec![LinuxIdMapping {
|
||||
container_id: 0,
|
||||
host_id: 1000,
|
||||
size: 1000,
|
||||
}];
|
||||
linux.gid_mappings = vec![LinuxIdMapping {
|
||||
container_id: 0,
|
||||
host_id: 1000,
|
||||
size: 1000,
|
||||
}];
|
||||
linux.set_namespaces(Some(namespaces));
|
||||
|
||||
let uidmap = LinuxIdMappingBuilder::default()
|
||||
.container_id(0u32)
|
||||
.host_id(1000u32)
|
||||
.size(1000u32)
|
||||
.build()
|
||||
.unwrap();
|
||||
let gidmap = LinuxIdMappingBuilder::default()
|
||||
.container_id(0u32)
|
||||
.host_id(1000u32)
|
||||
.size(1000u32)
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
linux.set_uid_mappings(Some(vec![uidmap]));
|
||||
linux.set_gid_mappings(Some(vec![gidmap]));
|
||||
rootless_euid_mapping(&spec).unwrap();
|
||||
|
||||
spec.mounts.push(Mount {
|
||||
destination: "/app".to_owned(),
|
||||
r#type: "tmpfs".to_owned(),
|
||||
source: "".to_owned(),
|
||||
options: vec!["uid=10000".to_owned()],
|
||||
});
|
||||
let mut oci_mount = oci::Mount::default();
|
||||
oci_mount.set_destination("/app".into());
|
||||
oci_mount.set_typ(Some("tmpfs".to_owned()));
|
||||
oci_mount.set_source(Some("".into()));
|
||||
oci_mount.set_options(Some(vec!["uid=10000".to_owned()]));
|
||||
spec.mounts_mut().as_mut().unwrap().push(oci_mount);
|
||||
rootless_euid_mount(&spec).unwrap_err();
|
||||
|
||||
spec.mounts = vec![
|
||||
(Mount {
|
||||
destination: "/app".to_owned(),
|
||||
r#type: "tmpfs".to_owned(),
|
||||
source: "".to_owned(),
|
||||
options: vec!["uid=500".to_owned(), "gid=500".to_owned()],
|
||||
}),
|
||||
];
|
||||
let mut oci_mount = oci::Mount::default();
|
||||
oci_mount.set_destination("/app".into());
|
||||
oci_mount.set_typ(Some("tmpfs".to_owned()));
|
||||
oci_mount.set_source(Some("".into()));
|
||||
oci_mount.set_options(Some(vec!["uid=500".to_owned(), "gid=500".to_owned()]));
|
||||
spec.set_mounts(Some(vec![oci_mount]));
|
||||
|
||||
rootless_euid(&spec).unwrap();
|
||||
}
|
||||
|
||||
@@ -531,25 +582,34 @@ mod tests {
|
||||
let mut spec = Spec::default();
|
||||
|
||||
let mut linux = Linux::default();
|
||||
linux.namespaces = vec![LinuxNamespace {
|
||||
r#type: "net".to_owned(),
|
||||
path: "/sys/cgroups/net".to_owned(),
|
||||
}];
|
||||
linux
|
||||
.sysctl
|
||||
.insert("kernel.domainname".to_owned(), "test.com".to_owned());
|
||||
spec.linux = Some(linux);
|
||||
let namespaces = vec![LinuxNamespaceBuilder::default()
|
||||
.typ(LinuxNamespaceType::Network)
|
||||
.path("/sys/cgroups/net")
|
||||
.build()
|
||||
.unwrap()];
|
||||
linux.set_namespaces(Some(namespaces));
|
||||
|
||||
let mut sysctl_hash = HashMap::new();
|
||||
sysctl_hash.insert("kernel.domainname".to_owned(), "test.com".to_owned());
|
||||
linux.set_sysctl(Some(sysctl_hash));
|
||||
|
||||
spec.set_linux(Some(linux));
|
||||
sysctl(&spec).unwrap_err();
|
||||
|
||||
spec.linux
|
||||
spec.linux_mut()
|
||||
.as_mut()
|
||||
.unwrap()
|
||||
.namespaces
|
||||
.push(LinuxNamespace {
|
||||
r#type: "uts".to_owned(),
|
||||
path: "/sys/cgroups/uts".to_owned(),
|
||||
});
|
||||
sysctl(&spec).unwrap();
|
||||
.namespaces_mut()
|
||||
.as_mut()
|
||||
.unwrap()
|
||||
.push(
|
||||
LinuxNamespaceBuilder::default()
|
||||
.typ(LinuxNamespaceType::User)
|
||||
.path("/sys/cgroups/user")
|
||||
.build()
|
||||
.unwrap(),
|
||||
);
|
||||
assert!(sysctl(&spec).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -569,7 +629,7 @@ mod tests {
|
||||
validate(&config).unwrap_err();
|
||||
|
||||
let linux = Linux::default();
|
||||
config.spec.as_mut().unwrap().linux = Some(linux);
|
||||
config.spec.as_mut().unwrap().set_linux(Some(linux));
|
||||
validate(&config).unwrap_err();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,83 +7,219 @@
|
||||
// Confidential Data Hub is a service running inside guest to provide resource related APIs.
|
||||
// https://github.com/confidential-containers/guest-components/tree/main/confidential-data-hub
|
||||
|
||||
use anyhow::Result;
|
||||
use crate::AGENT_CONFIG;
|
||||
use anyhow::{bail, Context, Result};
|
||||
use derivative::Derivative;
|
||||
use protocols::{
|
||||
sealed_secret, sealed_secret_ttrpc_async, sealed_secret_ttrpc_async::SealedSecretServiceClient,
|
||||
confidential_data_hub, confidential_data_hub_ttrpc_async,
|
||||
confidential_data_hub_ttrpc_async::{SealedSecretServiceClient, SecureMountServiceClient},
|
||||
};
|
||||
|
||||
use crate::CDH_SOCKET_URI;
|
||||
use std::fs;
|
||||
use std::os::unix::fs::symlink;
|
||||
use std::path::Path;
|
||||
use tokio::sync::OnceCell;
|
||||
|
||||
// Nanoseconds
|
||||
const CDH_UNSEAL_TIMEOUT: i64 = 50 * 1000 * 1000 * 1000;
|
||||
lazy_static! {
|
||||
static ref CDH_API_TIMEOUT: i64 = AGENT_CONFIG.cdh_api_timeout.as_nanos() as i64;
|
||||
pub static ref CDH_CLIENT: OnceCell<CDHClient> = OnceCell::new();
|
||||
}
|
||||
|
||||
const SEALED_SECRET_PREFIX: &str = "sealed.";
|
||||
|
||||
// Convenience function to obtain the scope logger.
|
||||
fn sl() -> slog::Logger {
|
||||
slog_scope::logger().new(o!("subsystem" => "cdh"))
|
||||
}
|
||||
|
||||
#[derive(Derivative)]
|
||||
#[derivative(Clone, Debug)]
|
||||
pub struct CDHClient {
|
||||
#[derivative(Debug = "ignore")]
|
||||
sealed_secret_client: SealedSecretServiceClient,
|
||||
#[derivative(Debug = "ignore")]
|
||||
secure_mount_client: SecureMountServiceClient,
|
||||
}
|
||||
|
||||
impl CDHClient {
|
||||
pub fn new() -> Result<Self> {
|
||||
let client = ttrpc::asynchronous::Client::connect(CDH_SOCKET_URI)?;
|
||||
pub fn new(cdh_socket_uri: &str) -> Result<Self> {
|
||||
let client = ttrpc::asynchronous::Client::connect(cdh_socket_uri)?;
|
||||
let sealed_secret_client =
|
||||
sealed_secret_ttrpc_async::SealedSecretServiceClient::new(client);
|
||||
|
||||
confidential_data_hub_ttrpc_async::SealedSecretServiceClient::new(client.clone());
|
||||
let secure_mount_client =
|
||||
confidential_data_hub_ttrpc_async::SecureMountServiceClient::new(client);
|
||||
Ok(CDHClient {
|
||||
sealed_secret_client,
|
||||
secure_mount_client,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn unseal_secret_async(&self, sealed_secret: &str) -> Result<Vec<u8>> {
|
||||
let mut input = sealed_secret::UnsealSecretInput::new();
|
||||
let mut input = confidential_data_hub::UnsealSecretInput::new();
|
||||
input.set_secret(sealed_secret.into());
|
||||
|
||||
let unsealed_secret = self
|
||||
.sealed_secret_client
|
||||
.unseal_secret(ttrpc::context::with_timeout(CDH_UNSEAL_TIMEOUT), &input)
|
||||
.unseal_secret(ttrpc::context::with_timeout(*CDH_API_TIMEOUT), &input)
|
||||
.await?;
|
||||
Ok(unsealed_secret.plaintext)
|
||||
}
|
||||
|
||||
pub async fn unseal_env(&self, env: &str) -> Result<String> {
|
||||
if let Some((key, value)) = env.split_once('=') {
|
||||
if value.starts_with(SEALED_SECRET_PREFIX) {
|
||||
let unsealed_value = self.unseal_secret_async(value).await?;
|
||||
let unsealed_env = format!("{}={}", key, std::str::from_utf8(&unsealed_value)?);
|
||||
|
||||
return Ok(unsealed_env);
|
||||
}
|
||||
}
|
||||
|
||||
Ok((*env.to_owned()).to_string())
|
||||
pub async fn secure_mount(
|
||||
&self,
|
||||
volume_type: &str,
|
||||
options: &std::collections::HashMap<String, String>,
|
||||
flags: Vec<String>,
|
||||
mount_point: &str,
|
||||
) -> Result<()> {
|
||||
let req = confidential_data_hub::SecureMountRequest {
|
||||
volume_type: volume_type.to_string(),
|
||||
options: options.clone(),
|
||||
flags,
|
||||
mount_point: mount_point.to_string(),
|
||||
..Default::default()
|
||||
};
|
||||
self.secure_mount_client
|
||||
.secure_mount(ttrpc::context::with_timeout(*CDH_API_TIMEOUT), &req)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn init_cdh_client(cdh_socket_uri: &str) -> Result<()> {
|
||||
CDH_CLIENT
|
||||
.get_or_try_init(|| async {
|
||||
CDHClient::new(cdh_socket_uri).context("Failed to create CDH Client")
|
||||
})
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Check if the CDH client is initialized
|
||||
pub async fn is_cdh_client_initialized() -> bool {
|
||||
CDH_CLIENT.get().is_some() // Returns true if CDH_CLIENT is initialized, false otherwise
|
||||
}
|
||||
|
||||
pub async fn unseal_env(env: &str) -> Result<String> {
|
||||
let cdh_client = CDH_CLIENT
|
||||
.get()
|
||||
.expect("Confidential Data Hub not initialized");
|
||||
|
||||
if let Some((key, value)) = env.split_once('=') {
|
||||
if value.starts_with(SEALED_SECRET_PREFIX) {
|
||||
let unsealed_value = cdh_client.unseal_secret_async(value).await?;
|
||||
let unsealed_env = format!("{}={}", key, std::str::from_utf8(&unsealed_value)?);
|
||||
|
||||
return Ok(unsealed_env);
|
||||
}
|
||||
}
|
||||
Ok((*env.to_owned()).to_string())
|
||||
}
|
||||
|
||||
pub async fn unseal_file(path: &str) -> Result<()> {
|
||||
let cdh_client = CDH_CLIENT
|
||||
.get()
|
||||
.expect("Confidential Data Hub not initialized");
|
||||
|
||||
if !Path::new(path).exists() {
|
||||
bail!("sealed secret file {:?} does not exist", path);
|
||||
}
|
||||
|
||||
// Iterate over all entries to handle the sealed secret file.
|
||||
// For example, the directory is as follows:
|
||||
// The secret directory in the guest: /run/kata-containers/shared/containers/21bbf0d932b70263d65d7052ecfd72ee46de03f766650cb378e93852ddb30a54-5063be11b6800f96-sealed-secret-target/:
|
||||
// - ..2024_09_30_02_55_58.2237819815
|
||||
// - ..data -> ..2024_09_30_02_55_58.2237819815
|
||||
// - secret -> ..2024_09_30_02_55_58.2237819815/secret
|
||||
//
|
||||
// The directory "..2024_09_30_02_55_58.2237819815":
|
||||
// - secret
|
||||
for entry in fs::read_dir(path)? {
|
||||
let entry = entry?;
|
||||
let entry_type = entry.file_type()?;
|
||||
if !entry_type.is_symlink() && !entry_type.is_file() {
|
||||
debug!(
|
||||
sl(),
|
||||
"skipping sealed source entry {:?} because its file type is {:?}",
|
||||
entry,
|
||||
entry_type
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
let target_path = fs::canonicalize(&entry.path())?;
|
||||
info!(sl(), "sealed source entry target path: {:?}", target_path);
|
||||
|
||||
// Skip if the target path is not a file (e.g., it's a symlink pointing to the secret file).
|
||||
if !target_path.is_file() {
|
||||
debug!(sl(), "sealed source is not a file: {:?}", target_path);
|
||||
continue;
|
||||
}
|
||||
|
||||
let secret_name = entry.file_name();
|
||||
let contents = fs::read_to_string(&target_path)?;
|
||||
if contents.starts_with(SEALED_SECRET_PREFIX) {
|
||||
// Get the directory name of the sealed secret file
|
||||
let dir_name = target_path
|
||||
.parent()
|
||||
.and_then(|p| p.file_name())
|
||||
.map(|name| name.to_string_lossy().to_string())
|
||||
.unwrap_or_default();
|
||||
|
||||
// Create the unsealed file name in the same directory, which will be written the unsealed data.
|
||||
let unsealed_filename = format!("{}.unsealed", target_path.to_string_lossy());
|
||||
// Create the unsealed file symlink, which is used for reading the unsealed data in the container.
|
||||
let unsealed_filename_symlink =
|
||||
format!("{}/{}.unsealed", dir_name, secret_name.to_string_lossy());
|
||||
|
||||
// Unseal the secret and write it to the unsealed file
|
||||
let unsealed_value = cdh_client.unseal_secret_async(&contents).await?;
|
||||
fs::write(&unsealed_filename, unsealed_value)?;
|
||||
|
||||
// Remove the original sealed symlink and create a symlink to the unsealed file
|
||||
fs::remove_file(&entry.path())?;
|
||||
symlink(unsealed_filename_symlink, &entry.path())?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn secure_mount(
|
||||
volume_type: &str,
|
||||
options: &std::collections::HashMap<String, String>,
|
||||
flags: Vec<String>,
|
||||
mount_point: &str,
|
||||
) -> Result<()> {
|
||||
let cdh_client = CDH_CLIENT
|
||||
.get()
|
||||
.expect("Confidential Data Hub not initialized");
|
||||
|
||||
cdh_client
|
||||
.secure_mount(volume_type, options, flags, mount_point)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[cfg(feature = "sealed-secret")]
|
||||
mod tests {
|
||||
use crate::cdh::CDHClient;
|
||||
use crate::cdh::CDH_ADDR;
|
||||
use anyhow::anyhow;
|
||||
use super::*;
|
||||
use async_trait::async_trait;
|
||||
use protocols::{sealed_secret, sealed_secret_ttrpc_async};
|
||||
use std::fs::File;
|
||||
use std::io::{Read, Write};
|
||||
use std::sync::Arc;
|
||||
use tempfile::tempdir;
|
||||
use test_utils::skip_if_not_root;
|
||||
use tokio::signal::unix::{signal, SignalKind};
|
||||
|
||||
struct TestService;
|
||||
|
||||
#[async_trait]
|
||||
impl sealed_secret_ttrpc_async::SealedSecretService for TestService {
|
||||
impl confidential_data_hub_ttrpc_async::SealedSecretService for TestService {
|
||||
async fn unseal_secret(
|
||||
&self,
|
||||
_ctx: &::ttrpc::asynchronous::TtrpcContext,
|
||||
_req: sealed_secret::UnsealSecretInput,
|
||||
) -> ttrpc::error::Result<sealed_secret::UnsealSecretOutput> {
|
||||
let mut output = sealed_secret::UnsealSecretOutput::new();
|
||||
_req: confidential_data_hub::UnsealSecretInput,
|
||||
) -> ttrpc::error::Result<confidential_data_hub::UnsealSecretOutput> {
|
||||
let mut output = confidential_data_hub::UnsealSecretOutput::new();
|
||||
output.set_plaintext("unsealed".into());
|
||||
Ok(output)
|
||||
}
|
||||
@@ -101,17 +237,17 @@ mod tests {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn start_ttrpc_server() {
|
||||
fn start_ttrpc_server(cdh_socket_uri: String) {
|
||||
tokio::spawn(async move {
|
||||
let ss = Box::new(TestService {})
|
||||
as Box<dyn sealed_secret_ttrpc_async::SealedSecretService + Send + Sync>;
|
||||
as Box<dyn confidential_data_hub_ttrpc_async::SealedSecretService + Send + Sync>;
|
||||
let ss = Arc::new(ss);
|
||||
let ss_service = sealed_secret_ttrpc_async::create_sealed_secret_service(ss);
|
||||
let ss_service = confidential_data_hub_ttrpc_async::create_sealed_secret_service(ss);
|
||||
|
||||
remove_if_sock_exist(CDH_ADDR).unwrap();
|
||||
remove_if_sock_exist(&cdh_socket_uri).unwrap();
|
||||
|
||||
let mut server = ttrpc::asynchronous::Server::new()
|
||||
.bind(CDH_ADDR)
|
||||
.bind(&cdh_socket_uri)
|
||||
.unwrap()
|
||||
.register_service(ss_service);
|
||||
|
||||
@@ -127,23 +263,58 @@ mod tests {
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_unseal_env() {
|
||||
async fn test_sealed_secret() {
|
||||
skip_if_not_root!();
|
||||
let test_dir = tempdir().expect("failed to create tmpdir");
|
||||
let test_dir_path = test_dir.path();
|
||||
let cdh_sock_uri = &format!(
|
||||
"unix://{}",
|
||||
test_dir_path.join("cdh.sock").to_str().unwrap()
|
||||
);
|
||||
|
||||
let rt = tokio::runtime::Runtime::new().unwrap();
|
||||
let _guard = rt.enter();
|
||||
start_ttrpc_server();
|
||||
start_ttrpc_server(cdh_sock_uri.to_string());
|
||||
std::thread::sleep(std::time::Duration::from_secs(2));
|
||||
init_cdh_client(cdh_sock_uri).await.unwrap();
|
||||
|
||||
let cc = Some(CDHClient::new().unwrap());
|
||||
let cdh_client = cc.as_ref().ok_or(anyhow!("get cdh_client failed")).unwrap();
|
||||
// Test sealed secret as env vars
|
||||
let sealed_env = String::from("key=sealed.testdata");
|
||||
let unsealed_env = cdh_client.unseal_env(&sealed_env).await.unwrap();
|
||||
let unsealed_env = unseal_env(&sealed_env).await.unwrap();
|
||||
assert_eq!(unsealed_env, String::from("key=unsealed"));
|
||||
let normal_env = String::from("key=testdata");
|
||||
let unchanged_env = cdh_client.unseal_env(&normal_env).await.unwrap();
|
||||
let unchanged_env = unseal_env(&normal_env).await.unwrap();
|
||||
assert_eq!(unchanged_env, String::from("key=testdata"));
|
||||
|
||||
// Test sealed secret as files
|
||||
let sealed_dir = test_dir_path.join("..test");
|
||||
fs::create_dir(&sealed_dir).unwrap();
|
||||
let sealed_filename = sealed_dir.join("secret");
|
||||
let mut sealed_file = File::create(sealed_filename.clone()).unwrap();
|
||||
sealed_file.write_all(b"sealed.testdata").unwrap();
|
||||
let secret_symlink = test_dir_path.join("secret");
|
||||
symlink(&sealed_filename, &secret_symlink).unwrap();
|
||||
|
||||
unseal_file(test_dir_path.to_str().unwrap()).await.unwrap();
|
||||
|
||||
let unsealed_filename = test_dir_path.join("secret");
|
||||
let mut unsealed_file = File::open(unsealed_filename.clone()).unwrap();
|
||||
let mut contents = String::new();
|
||||
unsealed_file.read_to_string(&mut contents).unwrap();
|
||||
assert_eq!(contents, String::from("unsealed"));
|
||||
fs::remove_file(sealed_filename).unwrap();
|
||||
fs::remove_file(unsealed_filename).unwrap();
|
||||
|
||||
let normal_filename = test_dir_path.join("secret");
|
||||
let mut normal_file = File::create(normal_filename.clone()).unwrap();
|
||||
normal_file.write_all(b"testdata").unwrap();
|
||||
unseal_file(test_dir_path.to_str().unwrap()).await.unwrap();
|
||||
let mut contents = String::new();
|
||||
let mut normal_file = File::open(normal_filename.clone()).unwrap();
|
||||
normal_file.read_to_string(&mut contents).unwrap();
|
||||
assert_eq!(contents, String::from("testdata"));
|
||||
fs::remove_file(normal_filename).unwrap();
|
||||
|
||||
rt.shutdown_background();
|
||||
std::thread::sleep(std::time::Duration::from_secs(2));
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -11,7 +11,7 @@ use nix::libc::{STDERR_FILENO, STDIN_FILENO, STDOUT_FILENO};
|
||||
use nix::pty::{openpty, OpenptyResult};
|
||||
use nix::sys::socket::{self, AddressFamily, SockFlag, SockType, VsockAddr};
|
||||
use nix::sys::stat::Mode;
|
||||
use nix::sys::wait;
|
||||
use nix::sys::{signal, wait};
|
||||
use nix::unistd::{self, close, dup2, fork, setsid, ForkResult, Pid};
|
||||
use rustjail::pipestream::PipeStream;
|
||||
use slog::Logger;
|
||||
@@ -178,6 +178,13 @@ async fn run_in_parent<T: AsyncRead + AsyncWrite>(
|
||||
);
|
||||
}
|
||||
res = tokio::io::copy(&mut socket_reader, &mut master_writer) => {
|
||||
// the shell run in child may not be exited, in some scenes
|
||||
// eg. directly Ctrl-C in the host to terminate the kata-runtime process
|
||||
// that will block this task,while waiting for the child to exit.
|
||||
//
|
||||
let _ = signal::kill(child_pid, Some(signal::Signal::SIGKILL))
|
||||
.map_err(|e| warn!(logger, "kill child shell process {:?}", e));
|
||||
|
||||
info!(
|
||||
logger,
|
||||
"socket closed: {:?}", res
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
342
src/agent/src/device/block_device_handler.rs
Normal file
342
src/agent/src/device/block_device_handler.rs
Normal file
@@ -0,0 +1,342 @@
|
||||
// Copyright (c) 2019 Ant Financial
|
||||
// Copyright (c) 2024 Intel Corporation
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#[cfg(target_arch = "s390x")]
|
||||
use crate::ccw;
|
||||
use crate::device::{
|
||||
pcipath_to_sysfs, DeviceContext, DeviceHandler, DeviceInfo, SpecUpdate, BLOCK,
|
||||
};
|
||||
#[cfg(target_arch = "s390x")]
|
||||
use crate::linux_abi::CCW_ROOT_BUS_PATH;
|
||||
use crate::linux_abi::{create_pci_root_bus_path, SYSFS_DIR, SYSTEM_DEV_PATH};
|
||||
use crate::pci;
|
||||
use crate::sandbox::Sandbox;
|
||||
use crate::uevent::{wait_for_uevent, Uevent, UeventMatcher};
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use kata_types::device::{DRIVER_BLK_CCW_TYPE, DRIVER_BLK_MMIO_TYPE, DRIVER_BLK_PCI_TYPE};
|
||||
use protocols::agent::Device;
|
||||
use regex::Regex;
|
||||
use std::path::Path;
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::Mutex;
|
||||
use tracing::instrument;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct VirtioBlkPciDeviceHandler {}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct VirtioBlkCcwDeviceHandler {}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct VirtioBlkMmioDeviceHandler {}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl DeviceHandler for VirtioBlkPciDeviceHandler {
|
||||
#[instrument]
|
||||
fn driver_types(&self) -> &[&str] {
|
||||
&[DRIVER_BLK_PCI_TYPE]
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
async fn device_handler(&self, device: &Device, ctx: &mut DeviceContext) -> Result<SpecUpdate> {
|
||||
let pcipath = pci::Path::from_str(&device.id)?;
|
||||
let vm_path = get_virtio_blk_pci_device_name(ctx.sandbox, &pcipath).await?;
|
||||
|
||||
Ok(DeviceInfo::new(&vm_path, true)
|
||||
.context("New device info")?
|
||||
.into())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl DeviceHandler for VirtioBlkCcwDeviceHandler {
|
||||
#[instrument]
|
||||
fn driver_types(&self) -> &[&str] {
|
||||
&[DRIVER_BLK_CCW_TYPE]
|
||||
}
|
||||
|
||||
#[cfg(target_arch = "s390x")]
|
||||
#[instrument]
|
||||
async fn device_handler(&self, device: &Device, ctx: &mut DeviceContext) -> Result<SpecUpdate> {
|
||||
let ccw_device = ccw::Device::from_str(&device.id)?;
|
||||
let vm_path = get_virtio_blk_ccw_device_name(ctx.sandbox, &ccw_device).await?;
|
||||
|
||||
Ok(DeviceInfo::new(&vm_path, true)
|
||||
.context("New device info")?
|
||||
.into())
|
||||
}
|
||||
|
||||
#[cfg(not(target_arch = "s390x"))]
|
||||
async fn device_handler(
|
||||
&self,
|
||||
_device: &Device,
|
||||
_ctx: &mut DeviceContext,
|
||||
) -> Result<SpecUpdate> {
|
||||
Err(anyhow!("CCW is only supported on s390x"))
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl DeviceHandler for VirtioBlkMmioDeviceHandler {
|
||||
#[instrument]
|
||||
fn driver_types(&self) -> &[&str] {
|
||||
&[DRIVER_BLK_MMIO_TYPE]
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
async fn device_handler(&self, device: &Device, ctx: &mut DeviceContext) -> Result<SpecUpdate> {
|
||||
if device.vm_path.is_empty() {
|
||||
return Err(anyhow!("Invalid path for virtio mmio blk device"));
|
||||
}
|
||||
if !Path::new(&device.vm_path).exists() {
|
||||
get_virtio_blk_mmio_device_name(ctx.sandbox, &device.vm_path.to_string())
|
||||
.await
|
||||
.context("failed to get mmio device name")?;
|
||||
}
|
||||
|
||||
Ok(DeviceInfo::new(device.vm_path(), true)
|
||||
.context("New device info")?
|
||||
.into())
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
pub async fn get_virtio_blk_pci_device_name(
|
||||
sandbox: &Arc<Mutex<Sandbox>>,
|
||||
pcipath: &pci::Path,
|
||||
) -> Result<String> {
|
||||
let root_bus_sysfs = format!("{}{}", SYSFS_DIR, create_pci_root_bus_path());
|
||||
let sysfs_rel_path = pcipath_to_sysfs(&root_bus_sysfs, pcipath)?;
|
||||
let matcher = VirtioBlkPciMatcher::new(&sysfs_rel_path);
|
||||
|
||||
let uev = wait_for_uevent(sandbox, matcher).await?;
|
||||
Ok(format!("{}/{}", SYSTEM_DEV_PATH, &uev.devname))
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
pub async fn get_virtio_blk_mmio_device_name(
|
||||
sandbox: &Arc<Mutex<Sandbox>>,
|
||||
devpath: &str,
|
||||
) -> Result<()> {
|
||||
let devname = devpath
|
||||
.strip_prefix("/dev/")
|
||||
.ok_or_else(|| anyhow!("Storage source '{}' must start with /dev/", devpath))?;
|
||||
|
||||
let matcher = VirtioBlkMmioMatcher::new(devname);
|
||||
let uev = wait_for_uevent(sandbox, matcher)
|
||||
.await
|
||||
.context("failed to wait for uevent")?;
|
||||
if uev.devname != devname {
|
||||
return Err(anyhow!(
|
||||
"Unexpected device name {} for mmio device (expected {})",
|
||||
uev.devname,
|
||||
devname
|
||||
));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(target_arch = "s390x")]
|
||||
#[instrument]
|
||||
pub async fn get_virtio_blk_ccw_device_name(
|
||||
sandbox: &Arc<Mutex<Sandbox>>,
|
||||
device: &ccw::Device,
|
||||
) -> Result<String> {
|
||||
let matcher = VirtioBlkCCWMatcher::new(CCW_ROOT_BUS_PATH, device);
|
||||
let uev = wait_for_uevent(sandbox, matcher).await?;
|
||||
let devname = uev.devname;
|
||||
Path::new(SYSTEM_DEV_PATH)
|
||||
.join(&devname)
|
||||
.to_str()
|
||||
.map(String::from)
|
||||
.ok_or_else(|| anyhow!("CCW device name {} is not valid UTF-8", &devname))
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct VirtioBlkPciMatcher {
|
||||
rex: Regex,
|
||||
}
|
||||
|
||||
impl VirtioBlkPciMatcher {
|
||||
pub fn new(relpath: &str) -> VirtioBlkPciMatcher {
|
||||
let root_bus = create_pci_root_bus_path();
|
||||
let re = format!(r"^{}{}/virtio[0-9]+/block/", root_bus, relpath);
|
||||
|
||||
VirtioBlkPciMatcher {
|
||||
rex: Regex::new(&re).expect("BUG: failed to compile VirtioBlkPciMatcher regex"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl UeventMatcher for VirtioBlkPciMatcher {
|
||||
fn is_match(&self, uev: &Uevent) -> bool {
|
||||
uev.subsystem == BLOCK && self.rex.is_match(&uev.devpath) && !uev.devname.is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct VirtioBlkMmioMatcher {
|
||||
suffix: String,
|
||||
}
|
||||
|
||||
impl VirtioBlkMmioMatcher {
|
||||
pub fn new(devname: &str) -> VirtioBlkMmioMatcher {
|
||||
VirtioBlkMmioMatcher {
|
||||
suffix: format!(r"/block/{}", devname),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl UeventMatcher for VirtioBlkMmioMatcher {
|
||||
fn is_match(&self, uev: &Uevent) -> bool {
|
||||
uev.subsystem == BLOCK && uev.devpath.ends_with(&self.suffix) && !uev.devname.is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(target_arch = "s390x")]
|
||||
#[derive(Debug)]
|
||||
pub struct VirtioBlkCCWMatcher {
|
||||
rex: Regex,
|
||||
}
|
||||
|
||||
#[cfg(target_arch = "s390x")]
|
||||
impl VirtioBlkCCWMatcher {
|
||||
pub fn new(root_bus_path: &str, device: &ccw::Device) -> Self {
|
||||
let re = format!(
|
||||
r"^{}/0\.[0-3]\.[0-9a-f]{{1,4}}/{}/virtio[0-9]+/block/",
|
||||
root_bus_path, device
|
||||
);
|
||||
VirtioBlkCCWMatcher {
|
||||
rex: Regex::new(&re).expect("BUG: failed to compile VirtioBlkCCWMatcher regex"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(target_arch = "s390x")]
|
||||
impl UeventMatcher for VirtioBlkCCWMatcher {
|
||||
fn is_match(&self, uev: &Uevent) -> bool {
|
||||
uev.action == "add" && self.rex.is_match(&uev.devpath) && !uev.devname.is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
#[allow(clippy::redundant_clone)]
|
||||
async fn test_virtio_blk_matcher() {
|
||||
let root_bus = create_pci_root_bus_path();
|
||||
let devname = "vda";
|
||||
|
||||
let mut uev_a = crate::uevent::Uevent::default();
|
||||
let relpath_a = "/0000:00:0a.0";
|
||||
uev_a.action = crate::linux_abi::U_EVENT_ACTION_ADD.to_string();
|
||||
uev_a.subsystem = BLOCK.to_string();
|
||||
uev_a.devname = devname.to_string();
|
||||
uev_a.devpath = format!("{}{}/virtio4/block/{}", root_bus, relpath_a, devname);
|
||||
let matcher_a = VirtioBlkPciMatcher::new(relpath_a);
|
||||
|
||||
let mut uev_b = uev_a.clone();
|
||||
let relpath_b = "/0000:00:0a.0/0000:00:0b.0";
|
||||
uev_b.devpath = format!("{}{}/virtio0/block/{}", root_bus, relpath_b, devname);
|
||||
let matcher_b = VirtioBlkPciMatcher::new(relpath_b);
|
||||
|
||||
assert!(matcher_a.is_match(&uev_a));
|
||||
assert!(matcher_b.is_match(&uev_b));
|
||||
assert!(!matcher_b.is_match(&uev_a));
|
||||
assert!(!matcher_a.is_match(&uev_b));
|
||||
}
|
||||
|
||||
#[cfg(target_arch = "s390x")]
|
||||
#[tokio::test]
|
||||
async fn test_virtio_blk_ccw_matcher() {
|
||||
let root_bus = CCW_ROOT_BUS_PATH;
|
||||
let subsystem = "block";
|
||||
let devname = "vda";
|
||||
let relpath = "0.0.0002";
|
||||
|
||||
let mut uev = crate::uevent::Uevent::default();
|
||||
uev.action = crate::linux_abi::U_EVENT_ACTION_ADD.to_string();
|
||||
uev.subsystem = subsystem.to_string();
|
||||
uev.devname = devname.to_string();
|
||||
uev.devpath = format!(
|
||||
"{}/0.0.0001/{}/virtio1/{}/{}",
|
||||
root_bus, relpath, subsystem, devname
|
||||
);
|
||||
|
||||
// Valid path
|
||||
let device = ccw::Device::from_str(relpath).unwrap();
|
||||
let matcher = VirtioBlkCCWMatcher::new(root_bus, &device);
|
||||
assert!(matcher.is_match(&uev));
|
||||
|
||||
// Invalid paths
|
||||
uev.devpath = format!(
|
||||
"{}/0.0.0001/0.0.0003/virtio1/{}/{}",
|
||||
root_bus, subsystem, devname
|
||||
);
|
||||
assert!(!matcher.is_match(&uev));
|
||||
|
||||
uev.devpath = format!("0.0.0001/{}/virtio1/{}/{}", relpath, subsystem, devname);
|
||||
assert!(!matcher.is_match(&uev));
|
||||
|
||||
uev.devpath = format!(
|
||||
"{}/0.0.0001/{}/virtio/{}/{}",
|
||||
root_bus, relpath, subsystem, devname
|
||||
);
|
||||
assert!(!matcher.is_match(&uev));
|
||||
|
||||
uev.devpath = format!("{}/0.0.0001/{}/virtio1", root_bus, relpath);
|
||||
assert!(!matcher.is_match(&uev));
|
||||
|
||||
uev.devpath = format!(
|
||||
"{}/1.0.0001/{}/virtio1/{}/{}",
|
||||
root_bus, relpath, subsystem, devname
|
||||
);
|
||||
assert!(!matcher.is_match(&uev));
|
||||
|
||||
uev.devpath = format!(
|
||||
"{}/0.4.0001/{}/virtio1/{}/{}",
|
||||
root_bus, relpath, subsystem, devname
|
||||
);
|
||||
assert!(!matcher.is_match(&uev));
|
||||
|
||||
uev.devpath = format!(
|
||||
"{}/0.0.10000/{}/virtio1/{}/{}",
|
||||
root_bus, relpath, subsystem, devname
|
||||
);
|
||||
assert!(!matcher.is_match(&uev));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[allow(clippy::redundant_clone)]
|
||||
async fn test_virtio_blk_mmio_matcher() {
|
||||
let devname_a = "vda";
|
||||
let devname_b = "vdb";
|
||||
let mut uev_a = crate::uevent::Uevent::default();
|
||||
uev_a.action = crate::linux_abi::U_EVENT_ACTION_ADD.to_string();
|
||||
uev_a.subsystem = BLOCK.to_string();
|
||||
uev_a.devname = devname_a.to_string();
|
||||
uev_a.devpath = format!(
|
||||
"/sys/devices/virtio-mmio-cmdline/virtio-mmio.0/virtio0/block/{}",
|
||||
devname_a
|
||||
);
|
||||
let matcher_a = VirtioBlkMmioMatcher::new(devname_a);
|
||||
|
||||
let mut uev_b = uev_a.clone();
|
||||
uev_b.devpath = format!(
|
||||
"/sys/devices/virtio-mmio-cmdline/virtio-mmio.4/virtio4/block/{}",
|
||||
devname_b
|
||||
);
|
||||
let matcher_b = VirtioBlkMmioMatcher::new(devname_b);
|
||||
|
||||
assert!(matcher_a.is_match(&uev_a));
|
||||
assert!(matcher_b.is_match(&uev_b));
|
||||
assert!(!matcher_b.is_match(&uev_a));
|
||||
assert!(!matcher_a.is_match(&uev_b));
|
||||
}
|
||||
}
|
||||
1113
src/agent/src/device/mod.rs
Normal file
1113
src/agent/src/device/mod.rs
Normal file
File diff suppressed because it is too large
Load Diff
114
src/agent/src/device/network_device_handler.rs
Normal file
114
src/agent/src/device/network_device_handler.rs
Normal file
@@ -0,0 +1,114 @@
|
||||
// Copyright (c) 2019 Ant Financial
|
||||
// Copyright (c) 2024 Intel Corporation
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
use crate::device::pcipath_to_sysfs;
|
||||
use crate::linux_abi::*;
|
||||
use crate::pci;
|
||||
use crate::sandbox::Sandbox;
|
||||
use crate::uevent::{wait_for_uevent, Uevent, UeventMatcher};
|
||||
use anyhow::{anyhow, Result};
|
||||
use regex::Regex;
|
||||
use std::fs;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
pub async fn wait_for_net_interface(
|
||||
sandbox: &Arc<Mutex<Sandbox>>,
|
||||
pcipath: &pci::Path,
|
||||
) -> Result<()> {
|
||||
let root_bus_sysfs = format!("{}{}", SYSFS_DIR, create_pci_root_bus_path());
|
||||
let sysfs_rel_path = pcipath_to_sysfs(&root_bus_sysfs, pcipath)?;
|
||||
|
||||
let matcher = NetPciMatcher::new(&sysfs_rel_path);
|
||||
|
||||
// Check if the interface is already added in case network is cold-plugged
|
||||
// or the uevent loop is started before network is added.
|
||||
// We check for the pci deive in the sysfs directory for network devices.
|
||||
let pattern = format!(
|
||||
r"[./]+{}/[a-z0-9/]*net/[a-z0-9/]*",
|
||||
matcher.devpath.as_str()
|
||||
);
|
||||
let re = Regex::new(&pattern).expect("BUG: Failed to compile regex for NetPciMatcher");
|
||||
|
||||
for entry in fs::read_dir(SYSFS_NET_PATH)? {
|
||||
let entry = entry?;
|
||||
let path = entry.path();
|
||||
let target_path = fs::read_link(path)?;
|
||||
let target_path_str = target_path
|
||||
.to_str()
|
||||
.ok_or_else(|| anyhow!("Expected symlink in dir {}", SYSFS_NET_PATH))?;
|
||||
|
||||
if re.is_match(target_path_str) {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
let _uev = wait_for_uevent(sandbox, matcher).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct NetPciMatcher {
|
||||
devpath: String,
|
||||
}
|
||||
|
||||
impl NetPciMatcher {
|
||||
pub fn new(relpath: &str) -> NetPciMatcher {
|
||||
let root_bus = create_pci_root_bus_path();
|
||||
|
||||
NetPciMatcher {
|
||||
devpath: format!("{}{}", root_bus, relpath),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl UeventMatcher for NetPciMatcher {
|
||||
fn is_match(&self, uev: &Uevent) -> bool {
|
||||
uev.devpath.starts_with(self.devpath.as_str())
|
||||
&& uev.subsystem == "net"
|
||||
&& !uev.interface.is_empty()
|
||||
&& uev.action == "add"
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
#[allow(clippy::redundant_clone)]
|
||||
async fn test_net_pci_matcher() {
|
||||
let root_bus = create_pci_root_bus_path();
|
||||
let relpath_a = "/0000:00:02.0/0000:01:01.0";
|
||||
|
||||
let mut uev_a = crate::uevent::Uevent::default();
|
||||
uev_a.action = crate::linux_abi::U_EVENT_ACTION_ADD.to_string();
|
||||
uev_a.devpath = format!("{}{}", root_bus, relpath_a);
|
||||
uev_a.subsystem = String::from("net");
|
||||
uev_a.interface = String::from("eth0");
|
||||
let matcher_a = NetPciMatcher::new(relpath_a);
|
||||
println!("Matcher a : {}", matcher_a.devpath);
|
||||
|
||||
let relpath_b = "/0000:00:02.0/0000:01:02.0";
|
||||
let mut uev_b = uev_a.clone();
|
||||
uev_b.devpath = format!("{}{}", root_bus, relpath_b);
|
||||
let matcher_b = NetPciMatcher::new(relpath_b);
|
||||
|
||||
assert!(matcher_a.is_match(&uev_a));
|
||||
assert!(matcher_b.is_match(&uev_b));
|
||||
assert!(!matcher_b.is_match(&uev_a));
|
||||
assert!(!matcher_a.is_match(&uev_b));
|
||||
|
||||
let relpath_c = "/0000:00:02.0/0000:01:03.0";
|
||||
let net_substr = "/net/eth0";
|
||||
let mut uev_c = uev_a.clone();
|
||||
uev_c.devpath = format!("{}{}{}", root_bus, relpath_c, net_substr);
|
||||
let matcher_c = NetPciMatcher::new(relpath_c);
|
||||
|
||||
assert!(matcher_c.is_match(&uev_c));
|
||||
assert!(!matcher_a.is_match(&uev_c));
|
||||
assert!(!matcher_b.is_match(&uev_c));
|
||||
}
|
||||
}
|
||||
83
src/agent/src/device/nvdimm_device_handler.rs
Normal file
83
src/agent/src/device/nvdimm_device_handler.rs
Normal file
@@ -0,0 +1,83 @@
|
||||
// Copyright (c) 2019 Ant Financial
|
||||
// Copyright (c) 2024 Intel Corporation
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use crate::device::{DeviceContext, DeviceHandler, DeviceInfo, SpecUpdate, BLOCK};
|
||||
use crate::linux_abi::ACPI_DEV_PATH;
|
||||
use crate::sandbox::Sandbox;
|
||||
use crate::uevent::{wait_for_uevent, Uevent, UeventMatcher};
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use kata_types::device::DRIVER_NVDIMM_TYPE;
|
||||
use protocols::agent::Device;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::Mutex;
|
||||
use tracing::instrument;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct VirtioNvdimmDeviceHandler {}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl DeviceHandler for VirtioNvdimmDeviceHandler {
|
||||
#[instrument]
|
||||
fn driver_types(&self) -> &[&str] {
|
||||
&[DRIVER_NVDIMM_TYPE]
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
async fn device_handler(&self, device: &Device, ctx: &mut DeviceContext) -> Result<SpecUpdate> {
|
||||
if device.vm_path.is_empty() {
|
||||
return Err(anyhow!("Invalid path for nvdimm device"));
|
||||
}
|
||||
Ok(DeviceInfo::new(device.vm_path(), true)
|
||||
.context("New device info")?
|
||||
.into())
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
pub async fn wait_for_pmem_device(sandbox: &Arc<Mutex<Sandbox>>, devpath: &str) -> Result<()> {
|
||||
let devname = match devpath.strip_prefix("/dev/") {
|
||||
Some(dev) => dev,
|
||||
None => {
|
||||
return Err(anyhow!(
|
||||
"Storage source '{}' must start with /dev/",
|
||||
devpath
|
||||
))
|
||||
}
|
||||
};
|
||||
|
||||
let matcher = PmemBlockMatcher::new(devname);
|
||||
let uev = wait_for_uevent(sandbox, matcher).await?;
|
||||
if uev.devname != devname {
|
||||
return Err(anyhow!(
|
||||
"Unexpected device name {} for pmem device (expected {})",
|
||||
uev.devname,
|
||||
devname
|
||||
));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct PmemBlockMatcher {
|
||||
suffix: String,
|
||||
}
|
||||
|
||||
impl PmemBlockMatcher {
|
||||
pub fn new(devname: &str) -> PmemBlockMatcher {
|
||||
let suffix = format!(r"/block/{}", devname);
|
||||
|
||||
PmemBlockMatcher { suffix }
|
||||
}
|
||||
}
|
||||
|
||||
impl UeventMatcher for PmemBlockMatcher {
|
||||
fn is_match(&self, uev: &Uevent) -> bool {
|
||||
uev.subsystem == BLOCK
|
||||
&& uev.devpath.starts_with(ACPI_DEV_PATH)
|
||||
&& uev.devpath.ends_with(&self.suffix)
|
||||
&& !uev.devname.is_empty()
|
||||
}
|
||||
}
|
||||
140
src/agent/src/device/scsi_device_handler.rs
Normal file
140
src/agent/src/device/scsi_device_handler.rs
Normal file
@@ -0,0 +1,140 @@
|
||||
// Copyright (c) 2019 Ant Financial
|
||||
// Copyright (c) 2024 Intel Corporation
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use crate::device::{DeviceContext, DeviceHandler, DeviceInfo, SpecUpdate, BLOCK};
|
||||
use crate::linux_abi::*;
|
||||
use crate::sandbox::Sandbox;
|
||||
use crate::uevent::{wait_for_uevent, Uevent, UeventMatcher};
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use kata_types::device::DRIVER_SCSI_TYPE;
|
||||
use protocols::agent::Device;
|
||||
use std::fs;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::Mutex;
|
||||
use tracing::instrument;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct ScsiDeviceHandler {}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl DeviceHandler for ScsiDeviceHandler {
|
||||
#[instrument]
|
||||
fn driver_types(&self) -> &[&str] {
|
||||
&[DRIVER_SCSI_TYPE]
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
async fn device_handler(&self, device: &Device, ctx: &mut DeviceContext) -> Result<SpecUpdate> {
|
||||
let vm_path = get_scsi_device_name(ctx.sandbox, &device.id).await?;
|
||||
|
||||
Ok(DeviceInfo::new(&vm_path, true)
|
||||
.context("New device info")?
|
||||
.into())
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
pub async fn get_scsi_device_name(
|
||||
sandbox: &Arc<Mutex<Sandbox>>,
|
||||
scsi_addr: &str,
|
||||
) -> Result<String> {
|
||||
let matcher = ScsiBlockMatcher::new(scsi_addr);
|
||||
|
||||
scan_scsi_bus(scsi_addr)?;
|
||||
let uev = wait_for_uevent(sandbox, matcher).await?;
|
||||
Ok(format!("{}/{}", SYSTEM_DEV_PATH, &uev.devname))
|
||||
}
|
||||
|
||||
// FIXME: This matcher is only correct if the guest has at most one
|
||||
// SCSI host.
|
||||
#[derive(Debug)]
|
||||
pub struct ScsiBlockMatcher {
|
||||
search: String,
|
||||
}
|
||||
|
||||
impl ScsiBlockMatcher {
|
||||
pub fn new(scsi_addr: &str) -> ScsiBlockMatcher {
|
||||
let search = format!(r"/0:0:{}/block/", scsi_addr);
|
||||
|
||||
ScsiBlockMatcher { search }
|
||||
}
|
||||
}
|
||||
|
||||
impl UeventMatcher for ScsiBlockMatcher {
|
||||
fn is_match(&self, uev: &Uevent) -> bool {
|
||||
uev.subsystem == BLOCK && uev.devpath.contains(&self.search) && !uev.devname.is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
/// Scan SCSI bus for the given SCSI address(SCSI-Id and LUN)
|
||||
#[instrument]
|
||||
fn scan_scsi_bus(scsi_addr: &str) -> Result<()> {
|
||||
let tokens: Vec<&str> = scsi_addr.split(':').collect();
|
||||
if tokens.len() != 2 {
|
||||
return Err(anyhow!(
|
||||
"Unexpected format for SCSI Address: {}, expect SCSIID:LUA",
|
||||
scsi_addr
|
||||
));
|
||||
}
|
||||
|
||||
// Scan scsi host passing in the channel, SCSI id and LUN.
|
||||
// Channel is always 0 because we have only one SCSI controller.
|
||||
let scan_data = &format!("0 {} {}", tokens[0], tokens[1]);
|
||||
|
||||
for entry in fs::read_dir(SYSFS_SCSI_HOST_PATH)? {
|
||||
let host = entry?.file_name();
|
||||
|
||||
let host_str = host.to_str().ok_or_else(|| {
|
||||
anyhow!(
|
||||
"failed to convert directory entry to unicode for file {:?}",
|
||||
host
|
||||
)
|
||||
})?;
|
||||
|
||||
let scan_path = PathBuf::from(&format!("{}/{}/{}", SYSFS_SCSI_HOST_PATH, host_str, "scan"));
|
||||
|
||||
fs::write(scan_path, scan_data)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
#[allow(clippy::redundant_clone)]
|
||||
async fn test_scsi_block_matcher() {
|
||||
let root_bus = create_pci_root_bus_path();
|
||||
let devname = "sda";
|
||||
|
||||
let mut uev_a = crate::uevent::Uevent::default();
|
||||
let addr_a = "0:0";
|
||||
uev_a.action = crate::linux_abi::U_EVENT_ACTION_ADD.to_string();
|
||||
uev_a.subsystem = BLOCK.to_string();
|
||||
uev_a.devname = devname.to_string();
|
||||
uev_a.devpath = format!(
|
||||
"{}/0000:00:00.0/virtio0/host0/target0:0:0/0:0:{}/block/sda",
|
||||
root_bus, addr_a
|
||||
);
|
||||
let matcher_a = ScsiBlockMatcher::new(addr_a);
|
||||
|
||||
let mut uev_b = uev_a.clone();
|
||||
let addr_b = "2:0";
|
||||
uev_b.devpath = format!(
|
||||
"{}/0000:00:00.0/virtio0/host0/target0:0:2/0:0:{}/block/sdb",
|
||||
root_bus, addr_b
|
||||
);
|
||||
let matcher_b = ScsiBlockMatcher::new(addr_b);
|
||||
|
||||
assert!(matcher_a.is_match(&uev_a));
|
||||
assert!(matcher_b.is_match(&uev_b));
|
||||
assert!(!matcher_b.is_match(&uev_a));
|
||||
assert!(!matcher_a.is_match(&uev_b));
|
||||
}
|
||||
}
|
||||
455
src/agent/src/device/vfio_device_handler.rs
Normal file
455
src/agent/src/device/vfio_device_handler.rs
Normal file
@@ -0,0 +1,455 @@
|
||||
// Copyright (c) 2019 Ant Financial
|
||||
// Copyright (c) 2024 Intel Corporation
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#[cfg(target_arch = "s390x")]
|
||||
use crate::ap;
|
||||
use crate::device::{pcipath_to_sysfs, DevUpdate, DeviceContext, DeviceHandler, SpecUpdate};
|
||||
use crate::linux_abi::*;
|
||||
use crate::pci;
|
||||
use crate::sandbox::Sandbox;
|
||||
use crate::uevent::{wait_for_uevent, Uevent, UeventMatcher};
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use kata_types::device::{DRIVER_VFIO_AP_TYPE, DRIVER_VFIO_PCI_GK_TYPE, DRIVER_VFIO_PCI_TYPE};
|
||||
use protocols::agent::Device;
|
||||
use slog::Logger;
|
||||
use std::ffi::OsStr;
|
||||
use std::fmt;
|
||||
use std::fs;
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
use std::path::Path;
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::Mutex;
|
||||
use tracing::instrument;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct VfioPciDeviceHandler {}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct VfioApDeviceHandler {}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl DeviceHandler for VfioPciDeviceHandler {
|
||||
#[instrument]
|
||||
fn driver_types(&self) -> &[&str] {
|
||||
&[DRIVER_VFIO_PCI_GK_TYPE, DRIVER_VFIO_PCI_TYPE]
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
async fn device_handler(&self, device: &Device, ctx: &mut DeviceContext) -> Result<SpecUpdate> {
|
||||
let vfio_in_guest = device.type_ != DRIVER_VFIO_PCI_GK_TYPE;
|
||||
let mut pci_fixups = Vec::<(pci::Address, pci::Address)>::new();
|
||||
let mut group = None;
|
||||
|
||||
for opt in device.options.iter() {
|
||||
let (host, pcipath) = split_vfio_pci_option(opt)
|
||||
.ok_or_else(|| anyhow!("Malformed VFIO PCI option {:?}", opt))?;
|
||||
let host =
|
||||
pci::Address::from_str(host).context("Bad host PCI address in VFIO option {:?}")?;
|
||||
let pcipath = pci::Path::from_str(pcipath)?;
|
||||
|
||||
let guestdev = wait_for_pci_device(ctx.sandbox, &pcipath).await?;
|
||||
if vfio_in_guest {
|
||||
pci_driver_override(ctx.logger, SYSFS_BUS_PCI_PATH, guestdev, "vfio-pci")?;
|
||||
|
||||
// Devices must have an IOMMU group to be usable via VFIO
|
||||
let devgroup = pci_iommu_group(SYSFS_BUS_PCI_PATH, guestdev)?
|
||||
.ok_or_else(|| anyhow!("{} has no IOMMU group", guestdev))?;
|
||||
|
||||
if let Some(g) = group {
|
||||
if g != devgroup {
|
||||
return Err(anyhow!("{} is not in guest IOMMU group {}", guestdev, g));
|
||||
}
|
||||
}
|
||||
|
||||
group = Some(devgroup);
|
||||
}
|
||||
|
||||
// collect PCI address mapping for both vfio-pci-gk and vfio-pci device
|
||||
pci_fixups.push((host, guestdev));
|
||||
}
|
||||
|
||||
let dev_update = if vfio_in_guest {
|
||||
// If there are any devices at all, logic above ensures that group is not None
|
||||
let group = group.ok_or_else(|| anyhow!("failed to get VFIO group"))?;
|
||||
|
||||
let vm_path = get_vfio_pci_device_name(group, ctx.sandbox).await?;
|
||||
|
||||
Some(DevUpdate::new(&vm_path, &vm_path)?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Ok(SpecUpdate {
|
||||
dev: dev_update,
|
||||
pci: pci_fixups,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl DeviceHandler for VfioApDeviceHandler {
|
||||
#[instrument]
|
||||
fn driver_types(&self) -> &[&str] {
|
||||
&[DRIVER_VFIO_AP_TYPE]
|
||||
}
|
||||
|
||||
#[cfg(target_arch = "s390x")]
|
||||
#[instrument]
|
||||
async fn device_handler(&self, device: &Device, ctx: &mut DeviceContext) -> Result<SpecUpdate> {
|
||||
// Force AP bus rescan
|
||||
fs::write(AP_SCANS_PATH, "1")?;
|
||||
for apqn in device.options.iter() {
|
||||
wait_for_ap_device(ctx.sandbox, ap::Address::from_str(apqn)?).await?;
|
||||
}
|
||||
let dev_update = Some(DevUpdate::new(Z9_CRYPT_DEV_PATH, Z9_CRYPT_DEV_PATH)?);
|
||||
Ok(SpecUpdate {
|
||||
dev: dev_update,
|
||||
pci: Vec::new(),
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(not(target_arch = "s390x"))]
|
||||
#[instrument]
|
||||
async fn device_handler(&self, _: &Device, _: &mut DeviceContext) -> Result<SpecUpdate> {
|
||||
Err(anyhow!("VFIO-AP is only supported on s390x"))
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_vfio_pci_device_name(
|
||||
grp: IommuGroup,
|
||||
sandbox: &Arc<Mutex<Sandbox>>,
|
||||
) -> Result<String> {
|
||||
let matcher = VfioMatcher::new(grp);
|
||||
|
||||
let uev = wait_for_uevent(sandbox, matcher).await?;
|
||||
Ok(format!("{}/{}", SYSTEM_DEV_PATH, &uev.devname))
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct VfioMatcher {
|
||||
syspath: String,
|
||||
}
|
||||
|
||||
impl VfioMatcher {
|
||||
pub fn new(grp: IommuGroup) -> VfioMatcher {
|
||||
VfioMatcher {
|
||||
syspath: format!("/devices/virtual/vfio/{}", grp),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl UeventMatcher for VfioMatcher {
|
||||
fn is_match(&self, uev: &Uevent) -> bool {
|
||||
uev.devpath == self.syspath
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(target_arch = "s390x")]
|
||||
#[derive(Debug)]
|
||||
pub struct ApMatcher {
|
||||
syspath: String,
|
||||
}
|
||||
|
||||
#[cfg(target_arch = "s390x")]
|
||||
impl ApMatcher {
|
||||
pub fn new(address: ap::Address) -> ApMatcher {
|
||||
ApMatcher {
|
||||
syspath: format!(
|
||||
"{}/card{:02x}/{}",
|
||||
AP_ROOT_BUS_PATH, address.adapter_id, address
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(target_arch = "s390x")]
|
||||
impl UeventMatcher for ApMatcher {
|
||||
fn is_match(&self, uev: &Uevent) -> bool {
|
||||
uev.action == "add" && uev.devpath == self.syspath
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct PciMatcher {
|
||||
devpath: String,
|
||||
}
|
||||
|
||||
impl PciMatcher {
|
||||
pub fn new(relpath: &str) -> Result<PciMatcher> {
|
||||
let root_bus = create_pci_root_bus_path();
|
||||
Ok(PciMatcher {
|
||||
devpath: format!("{}{}", root_bus, relpath),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl UeventMatcher for PciMatcher {
|
||||
fn is_match(&self, uev: &Uevent) -> bool {
|
||||
uev.devpath == self.devpath
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(target_arch = "s390x")]
|
||||
#[instrument]
|
||||
async fn wait_for_ap_device(sandbox: &Arc<Mutex<Sandbox>>, address: ap::Address) -> Result<()> {
|
||||
let matcher = ApMatcher::new(address);
|
||||
wait_for_uevent(sandbox, matcher).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn wait_for_pci_device(
|
||||
sandbox: &Arc<Mutex<Sandbox>>,
|
||||
pcipath: &pci::Path,
|
||||
) -> Result<pci::Address> {
|
||||
let root_bus_sysfs = format!("{}{}", SYSFS_DIR, create_pci_root_bus_path());
|
||||
let sysfs_rel_path = pcipath_to_sysfs(&root_bus_sysfs, pcipath)?;
|
||||
let matcher = PciMatcher::new(&sysfs_rel_path)?;
|
||||
|
||||
let uev = wait_for_uevent(sandbox, matcher).await?;
|
||||
|
||||
let addr = uev
|
||||
.devpath
|
||||
.rsplit('/')
|
||||
.next()
|
||||
.ok_or_else(|| anyhow!("Bad device path {:?} in uevent", &uev.devpath))?;
|
||||
let addr = pci::Address::from_str(addr)?;
|
||||
Ok(addr)
|
||||
}
|
||||
|
||||
// Represents an IOMMU group
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
pub struct IommuGroup(u32);
|
||||
|
||||
impl fmt::Display for IommuGroup {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
|
||||
write!(f, "{}", self.0)
|
||||
}
|
||||
}
|
||||
|
||||
// Determine the IOMMU group of a PCI device
|
||||
#[instrument]
|
||||
fn pci_iommu_group<T>(syspci: T, dev: pci::Address) -> Result<Option<IommuGroup>>
|
||||
where
|
||||
T: AsRef<OsStr> + std::fmt::Debug,
|
||||
{
|
||||
let syspci = Path::new(&syspci);
|
||||
let grouppath = syspci
|
||||
.join("devices")
|
||||
.join(dev.to_string())
|
||||
.join("iommu_group");
|
||||
|
||||
match fs::read_link(&grouppath) {
|
||||
// Device has no group
|
||||
Err(e) if e.kind() == std::io::ErrorKind::NotFound => Ok(None),
|
||||
Err(e) => Err(anyhow!("Error reading link {:?}: {}", &grouppath, e)),
|
||||
Ok(group) => {
|
||||
if let Some(group) = group.file_name() {
|
||||
if let Some(group) = group.to_str() {
|
||||
if let Ok(group) = group.parse::<u32>() {
|
||||
return Ok(Some(IommuGroup(group)));
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(anyhow!(
|
||||
"Unexpected IOMMU group link {:?} => {:?}",
|
||||
grouppath,
|
||||
group
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn split_vfio_pci_option(opt: &str) -> Option<(&str, &str)> {
|
||||
let mut tokens = opt.split('=');
|
||||
let hostbdf = tokens.next()?;
|
||||
let path = tokens.next()?;
|
||||
if tokens.next().is_some() {
|
||||
None
|
||||
} else {
|
||||
Some((hostbdf, path))
|
||||
}
|
||||
}
|
||||
|
||||
// Force a given PCI device to bind to the given driver, does
|
||||
// basically the same thing as
|
||||
// driverctl set-override <PCI address> <driver>
|
||||
#[instrument]
|
||||
pub fn pci_driver_override<T, U>(
|
||||
logger: &Logger,
|
||||
syspci: T,
|
||||
dev: pci::Address,
|
||||
drv: U,
|
||||
) -> Result<()>
|
||||
where
|
||||
T: AsRef<OsStr> + std::fmt::Debug,
|
||||
U: AsRef<OsStr> + std::fmt::Debug,
|
||||
{
|
||||
let syspci = Path::new(&syspci);
|
||||
let drv = drv.as_ref();
|
||||
info!(logger, "rebind_pci_driver: {} => {:?}", dev, drv);
|
||||
|
||||
let devpath = syspci.join("devices").join(dev.to_string());
|
||||
let overridepath = &devpath.join("driver_override");
|
||||
|
||||
fs::write(overridepath, drv.as_bytes())?;
|
||||
|
||||
let drvpath = &devpath.join("driver");
|
||||
let need_unbind = match fs::read_link(drvpath) {
|
||||
Ok(d) if d.file_name() == Some(drv) => return Ok(()), // Nothing to do
|
||||
Err(e) if e.kind() == std::io::ErrorKind::NotFound => false, // No current driver
|
||||
Err(e) => return Err(anyhow!("Error checking driver on {}: {}", dev, e)),
|
||||
Ok(_) => true, // Current driver needs unbinding
|
||||
};
|
||||
if need_unbind {
|
||||
let unbindpath = &drvpath.join("unbind");
|
||||
fs::write(unbindpath, dev.to_string())?;
|
||||
}
|
||||
let probepath = syspci.join("drivers_probe");
|
||||
fs::write(probepath, dev.to_string())?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use tempfile::tempdir;
|
||||
|
||||
#[tokio::test]
|
||||
#[allow(clippy::redundant_clone)]
|
||||
async fn test_vfio_matcher() {
|
||||
let grpa = IommuGroup(1);
|
||||
let grpb = IommuGroup(22);
|
||||
|
||||
let mut uev_a = crate::uevent::Uevent::default();
|
||||
uev_a.action = crate::linux_abi::U_EVENT_ACTION_ADD.to_string();
|
||||
uev_a.devname = format!("vfio/{}", grpa);
|
||||
uev_a.devpath = format!("/devices/virtual/vfio/{}", grpa);
|
||||
let matcher_a = VfioMatcher::new(grpa);
|
||||
|
||||
let mut uev_b = uev_a.clone();
|
||||
uev_b.devpath = format!("/devices/virtual/vfio/{}", grpb);
|
||||
let matcher_b = VfioMatcher::new(grpb);
|
||||
|
||||
assert!(matcher_a.is_match(&uev_a));
|
||||
assert!(matcher_b.is_match(&uev_b));
|
||||
assert!(!matcher_b.is_match(&uev_a));
|
||||
assert!(!matcher_a.is_match(&uev_b));
|
||||
}
|
||||
#[test]
|
||||
fn test_split_vfio_pci_option() {
|
||||
assert_eq!(
|
||||
split_vfio_pci_option("0000:01:00.0=02/01"),
|
||||
Some(("0000:01:00.0", "02/01"))
|
||||
);
|
||||
assert_eq!(split_vfio_pci_option("0000:01:00.0=02/01=rubbish"), None);
|
||||
assert_eq!(split_vfio_pci_option("0000:01:00.0"), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_pci_driver_override() {
|
||||
let logger = slog::Logger::root(slog::Discard, o!());
|
||||
let testdir = tempdir().expect("failed to create tmpdir");
|
||||
let syspci = testdir.path(); // Path to mock /sys/bus/pci
|
||||
|
||||
let dev0 = pci::Address::new(0, 0, pci::SlotFn::new(0, 0).unwrap());
|
||||
let dev0path = syspci.join("devices").join(dev0.to_string());
|
||||
let dev0drv = dev0path.join("driver");
|
||||
let dev0override = dev0path.join("driver_override");
|
||||
|
||||
let drvapath = syspci.join("drivers").join("drv_a");
|
||||
let drvaunbind = drvapath.join("unbind");
|
||||
|
||||
let probepath = syspci.join("drivers_probe");
|
||||
|
||||
// Start mocking dev0 as being unbound
|
||||
fs::create_dir_all(&dev0path).unwrap();
|
||||
|
||||
pci_driver_override(&logger, syspci, dev0, "drv_a").unwrap();
|
||||
assert_eq!(fs::read_to_string(&dev0override).unwrap(), "drv_a");
|
||||
assert_eq!(fs::read_to_string(&probepath).unwrap(), dev0.to_string());
|
||||
|
||||
// Now mock dev0 already being attached to drv_a
|
||||
fs::create_dir_all(&drvapath).unwrap();
|
||||
std::os::unix::fs::symlink(&drvapath, dev0drv).unwrap();
|
||||
std::fs::remove_file(&probepath).unwrap();
|
||||
|
||||
pci_driver_override(&logger, syspci, dev0, "drv_a").unwrap(); // no-op
|
||||
assert_eq!(fs::read_to_string(&dev0override).unwrap(), "drv_a");
|
||||
assert!(!probepath.exists());
|
||||
|
||||
// Now try binding to a different driver
|
||||
pci_driver_override(&logger, syspci, dev0, "drv_b").unwrap();
|
||||
assert_eq!(fs::read_to_string(&dev0override).unwrap(), "drv_b");
|
||||
assert_eq!(fs::read_to_string(&probepath).unwrap(), dev0.to_string());
|
||||
assert_eq!(fs::read_to_string(drvaunbind).unwrap(), dev0.to_string());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_pci_iommu_group() {
|
||||
let testdir = tempdir().expect("failed to create tmpdir"); // mock /sys
|
||||
let syspci = testdir.path().join("bus").join("pci");
|
||||
|
||||
// Mock dev0, which has no group
|
||||
let dev0 = pci::Address::new(0, 0, pci::SlotFn::new(0, 0).unwrap());
|
||||
let dev0path = syspci.join("devices").join(dev0.to_string());
|
||||
|
||||
fs::create_dir_all(dev0path).unwrap();
|
||||
|
||||
// Test dev0
|
||||
assert!(pci_iommu_group(&syspci, dev0).unwrap().is_none());
|
||||
|
||||
// Mock dev1, which is in group 12
|
||||
let dev1 = pci::Address::new(0, 1, pci::SlotFn::new(0, 0).unwrap());
|
||||
let dev1path = syspci.join("devices").join(dev1.to_string());
|
||||
let dev1group = dev1path.join("iommu_group");
|
||||
|
||||
fs::create_dir_all(&dev1path).unwrap();
|
||||
std::os::unix::fs::symlink("../../../kernel/iommu_groups/12", dev1group).unwrap();
|
||||
|
||||
// Test dev1
|
||||
assert_eq!(
|
||||
pci_iommu_group(&syspci, dev1).unwrap(),
|
||||
Some(IommuGroup(12))
|
||||
);
|
||||
|
||||
// Mock dev2, which has a bogus group (dir instead of symlink)
|
||||
let dev2 = pci::Address::new(0, 2, pci::SlotFn::new(0, 0).unwrap());
|
||||
let dev2path = syspci.join("devices").join(dev2.to_string());
|
||||
let dev2group = dev2path.join("iommu_group");
|
||||
|
||||
fs::create_dir_all(dev2group).unwrap();
|
||||
|
||||
// Test dev2
|
||||
assert!(pci_iommu_group(&syspci, dev2).is_err());
|
||||
}
|
||||
|
||||
#[cfg(target_arch = "s390x")]
|
||||
#[tokio::test]
|
||||
async fn test_vfio_ap_matcher() {
|
||||
let subsystem = "ap";
|
||||
let card = "0a";
|
||||
let relpath = format!("{}.0001", card);
|
||||
|
||||
let mut uev = Uevent::default();
|
||||
uev.action = U_EVENT_ACTION_ADD.to_string();
|
||||
uev.subsystem = subsystem.to_string();
|
||||
uev.devpath = format!("{}/card{}/{}", AP_ROOT_BUS_PATH, card, relpath);
|
||||
|
||||
let ap_address = ap::Address::from_str(&relpath).unwrap();
|
||||
let matcher = ApMatcher::new(ap_address);
|
||||
|
||||
assert!(matcher.is_match(&uev));
|
||||
|
||||
let mut uev_remove = uev.clone();
|
||||
uev_remove.action = U_EVENT_ACTION_REMOVE.to_string();
|
||||
assert!(!matcher.is_match(&uev_remove));
|
||||
|
||||
let mut uev_other_device = uev.clone();
|
||||
uev_other_device.devpath = format!("{}/card{}/{}.0002", AP_ROOT_BUS_PATH, card, card);
|
||||
assert!(!matcher.is_match(&uev_other_device));
|
||||
}
|
||||
}
|
||||
@@ -15,12 +15,13 @@ use std::sync::Arc;
|
||||
use anyhow::{anyhow, bail, Context, Result};
|
||||
use image_rs::image::ImageClient;
|
||||
use kata_sys_util::validate::verify_id;
|
||||
use oci_spec::runtime as oci;
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
use crate::rpc::CONTAINER_BASE;
|
||||
use crate::AGENT_CONFIG;
|
||||
|
||||
const KATA_IMAGE_WORK_DIR: &str = "/run/kata-containers/image/";
|
||||
pub const KATA_IMAGE_WORK_DIR: &str = "/run/kata-containers/image/";
|
||||
const CONFIG_JSON: &str = "config.json";
|
||||
const KATA_PAUSE_BUNDLE: &str = "/pause_bundle";
|
||||
|
||||
@@ -56,13 +57,27 @@ impl ImageService {
|
||||
pub fn new() -> Self {
|
||||
let mut image_client = ImageClient::new(PathBuf::from(KATA_IMAGE_WORK_DIR));
|
||||
#[cfg(feature = "guest-pull")]
|
||||
if !AGENT_CONFIG.image_registry_auth.is_empty() {
|
||||
let registry_auth = &AGENT_CONFIG.image_registry_auth;
|
||||
debug!(sl(), "Set registry auth file {:?}", registry_auth);
|
||||
image_client.config.file_paths.auth_file = registry_auth.clone();
|
||||
image_client.config.auth = true;
|
||||
}
|
||||
{
|
||||
if !AGENT_CONFIG.image_registry_auth.is_empty() {
|
||||
let registry_auth = &AGENT_CONFIG.image_registry_auth;
|
||||
debug!(sl(), "Set registry auth file {:?}", registry_auth);
|
||||
image_client.config.file_paths.auth_file = registry_auth.clone();
|
||||
image_client.config.auth = true;
|
||||
}
|
||||
|
||||
let enable_signature_verification = &AGENT_CONFIG.enable_signature_verification;
|
||||
debug!(
|
||||
sl(),
|
||||
"Enable image signature verification: {:?}", enable_signature_verification
|
||||
);
|
||||
image_client.config.security_validate = *enable_signature_verification;
|
||||
|
||||
if !AGENT_CONFIG.image_policy_file.is_empty() {
|
||||
let image_policy_file = &AGENT_CONFIG.image_policy_file;
|
||||
debug!(sl(), "Use imagepolicy file {:?}", image_policy_file);
|
||||
image_client.config.file_paths.policy_path = image_policy_file.clone();
|
||||
}
|
||||
}
|
||||
Self { image_client }
|
||||
}
|
||||
|
||||
@@ -85,7 +100,7 @@ impl ImageService {
|
||||
})?)
|
||||
.context("load image config file")?;
|
||||
|
||||
let image_oci_process = image_oci.process.ok_or_else(|| {
|
||||
let image_oci_process = image_oci.process().as_ref().ok_or_else(|| {
|
||||
anyhow!("The guest pause image config does not contain a process specification. Please check the pause image.")
|
||||
})?;
|
||||
info!(
|
||||
@@ -95,11 +110,12 @@ impl ImageService {
|
||||
);
|
||||
|
||||
// Ensure that the args vector is not empty before accessing its elements.
|
||||
let args = image_oci_process.args;
|
||||
// Check the number of arguments.
|
||||
if args.is_empty() {
|
||||
let args = if let Some(args_vec) = image_oci_process.args() {
|
||||
args_vec
|
||||
} else {
|
||||
bail!("The number of args should be greater than or equal to one! Please check the pause image.");
|
||||
}
|
||||
};
|
||||
|
||||
let pause_bundle = scoped_join(CONTAINER_BASE, cid)?;
|
||||
fs::create_dir_all(&pause_bundle)?;
|
||||
|
||||
@@ -90,6 +90,7 @@ pub const SYSFS_MEMORY_HOTPLUG_PROBE_PATH: &str = "/sys/devices/system/memory/pr
|
||||
pub const SYSFS_MEMORY_ONLINE_PATH: &str = "/sys/devices/system/memory";
|
||||
|
||||
pub const SYSFS_SCSI_HOST_PATH: &str = "/sys/class/scsi_host";
|
||||
pub const SYSFS_NET_PATH: &str = "/sys/class/net";
|
||||
|
||||
pub const SYSFS_BUS_PCI_PATH: &str = "/sys/bus/pci";
|
||||
|
||||
|
||||
@@ -6,7 +6,6 @@
|
||||
#[macro_use]
|
||||
extern crate lazy_static;
|
||||
extern crate capctl;
|
||||
extern crate oci;
|
||||
extern crate prometheus;
|
||||
extern crate protocols;
|
||||
extern crate regex;
|
||||
@@ -60,7 +59,6 @@ mod util;
|
||||
mod version;
|
||||
mod watcher;
|
||||
|
||||
use cdh::CDHClient;
|
||||
use config::GuestComponentsProcs;
|
||||
use mount::{cgroups_mount, general_mount};
|
||||
use sandbox::Sandbox;
|
||||
@@ -409,7 +407,6 @@ async fn start_sandbox(
|
||||
let (tx, rx) = tokio::sync::oneshot::channel();
|
||||
sandbox.lock().await.sender = Some(tx);
|
||||
|
||||
let mut cdh_client = None;
|
||||
let gc_procs = config.guest_components_procs;
|
||||
if gc_procs != GuestComponentsProcs::None {
|
||||
if !attestation_binaries_available(logger, &gc_procs) {
|
||||
@@ -418,18 +415,12 @@ async fn start_sandbox(
|
||||
"attestation binaries requested for launch not available"
|
||||
);
|
||||
} else {
|
||||
cdh_client = init_attestation_components(logger, config)?;
|
||||
init_attestation_components(logger, config).await?;
|
||||
}
|
||||
}
|
||||
|
||||
// vsock:///dev/vsock, port
|
||||
let mut server = rpc::start(
|
||||
sandbox.clone(),
|
||||
config.server_addr.as_str(),
|
||||
init_mode,
|
||||
cdh_client,
|
||||
)
|
||||
.await?;
|
||||
let mut server = rpc::start(sandbox.clone(), config.server_addr.as_str(), init_mode).await?;
|
||||
|
||||
server.start().await?;
|
||||
|
||||
@@ -460,10 +451,10 @@ fn attestation_binaries_available(logger: &Logger, procs: &GuestComponentsProcs)
|
||||
// and the corresponding procs are enabled in the agent configuration. the process will be
|
||||
// launched in the background and the function will return immediately.
|
||||
// If the CDH is started, a CDH client will be instantiated and returned.
|
||||
fn init_attestation_components(logger: &Logger, config: &AgentConfig) -> Result<Option<CDHClient>> {
|
||||
async fn init_attestation_components(logger: &Logger, config: &AgentConfig) -> Result<()> {
|
||||
// skip launch of any guest-component
|
||||
if config.guest_components_procs == GuestComponentsProcs::None {
|
||||
return Ok(None);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
debug!(logger, "spawning attestation-agent process {}", AA_PATH);
|
||||
@@ -478,7 +469,7 @@ fn init_attestation_components(logger: &Logger, config: &AgentConfig) -> Result<
|
||||
|
||||
// skip launch of confidential-data-hub and api-server-rest
|
||||
if config.guest_components_procs == GuestComponentsProcs::AttestationAgent {
|
||||
return Ok(None);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let ocicrypt_config = serde_json::json!({
|
||||
@@ -506,11 +497,12 @@ fn init_attestation_components(logger: &Logger, config: &AgentConfig) -> Result<
|
||||
)
|
||||
.map_err(|e| anyhow!("launch_process {} failed: {:?}", CDH_PATH, e))?;
|
||||
|
||||
let cdh_client = CDHClient::new().context("Failed to create CDH Client")?;
|
||||
// initialize cdh client
|
||||
cdh::init_cdh_client(CDH_SOCKET_URI).await?;
|
||||
|
||||
// skip launch of api-server-rest
|
||||
if config.guest_components_procs == GuestComponentsProcs::ConfidentialDataHub {
|
||||
return Ok(Some(cdh_client));
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let features = config.guest_components_rest_api;
|
||||
@@ -527,7 +519,7 @@ fn init_attestation_components(logger: &Logger, config: &AgentConfig) -> Result<
|
||||
)
|
||||
.map_err(|e| anyhow!("launch_process {} failed: {:?}", API_SERVER_PATH, e))?;
|
||||
|
||||
Ok(Some(cdh_client))
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn wait_for_path_to_exist(logger: &Logger, path: &str, timeout_secs: i32) -> Result<()> {
|
||||
@@ -608,11 +600,7 @@ fn init_agent_as_init(logger: &Logger, unified_cgroup_hierarchy: bool) -> Result
|
||||
|
||||
#[cfg(feature = "agent-policy")]
|
||||
async fn initialize_policy() -> Result<()> {
|
||||
AGENT_POLICY
|
||||
.lock()
|
||||
.await
|
||||
.initialize("/etc/kata-opa/default-policy.rego")
|
||||
.await
|
||||
AGENT_POLICY.lock().await.initialize().await
|
||||
}
|
||||
|
||||
// The Rust standard library had suppressed the default SIGPIPE behavior,
|
||||
|
||||
@@ -95,16 +95,30 @@ impl Handle {
|
||||
let mut new_link = None;
|
||||
if link.name() != iface.name {
|
||||
if let Ok(link) = self.find_link(LinkFilter::Name(iface.name.as_str())).await {
|
||||
// Bring down interface if it is UP
|
||||
if link.is_up() {
|
||||
self.enable_link(link.index(), false).await?;
|
||||
}
|
||||
|
||||
// update the existing interface name with a temporary name, otherwise
|
||||
// it would failed to udpate this interface with an existing name.
|
||||
let mut request = self.handle.link().set(link.index());
|
||||
request.message_mut().header = link.header.clone();
|
||||
let link_name = link.name();
|
||||
let temp_name = link_name.clone() + "_temp";
|
||||
|
||||
request
|
||||
.name(format!("{}_temp", link.name()))
|
||||
.up()
|
||||
.name(temp_name.clone())
|
||||
.execute()
|
||||
.await?;
|
||||
.await
|
||||
.map_err(|err| {
|
||||
anyhow!(
|
||||
"Failed to rename interface {} to {}with error: {}",
|
||||
link_name,
|
||||
temp_name,
|
||||
err
|
||||
)
|
||||
})?;
|
||||
|
||||
new_link = Some(link);
|
||||
}
|
||||
@@ -120,14 +134,33 @@ impl Handle {
|
||||
.arp(iface.raw_flags & libc::IFF_NOARP as u32 == 0)
|
||||
.up()
|
||||
.execute()
|
||||
.await?;
|
||||
.await
|
||||
.map_err(|err| {
|
||||
anyhow!(
|
||||
"Failure in LinkSetRequest for interface {}: {}",
|
||||
iface.name.as_str(),
|
||||
err
|
||||
)
|
||||
})?;
|
||||
|
||||
// swap the updated iface's name.
|
||||
if let Some(nlink) = new_link {
|
||||
let mut request = self.handle.link().set(nlink.index());
|
||||
request.message_mut().header = nlink.header.clone();
|
||||
|
||||
request.name(link.name()).up().execute().await?;
|
||||
request
|
||||
.name(link.name())
|
||||
.up()
|
||||
.execute()
|
||||
.await
|
||||
.map_err(|err| {
|
||||
anyhow!(
|
||||
"Error swapping back interface name {} to {}: {}",
|
||||
nlink.name().as_str(),
|
||||
link.name(),
|
||||
err
|
||||
)
|
||||
})?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -5,13 +5,13 @@
|
||||
|
||||
use anyhow::Result;
|
||||
use protobuf::MessageDyn;
|
||||
use slog::Drain;
|
||||
use tokio::io::AsyncWriteExt;
|
||||
|
||||
use crate::rpc::ttrpc_error;
|
||||
use crate::AGENT_POLICY;
|
||||
use crate::{AGENT_CONFIG, AGENT_POLICY};
|
||||
|
||||
static POLICY_LOG_FILE: &str = "/tmp/policy.txt";
|
||||
static POLICY_DEFAULT_FILE: &str = "/etc/kata-opa/default-policy.rego";
|
||||
|
||||
/// Convenience macro to obtain the scope logger
|
||||
macro_rules! sl {
|
||||
@@ -86,8 +86,8 @@ impl AgentPolicy {
|
||||
}
|
||||
|
||||
/// Initialize regorus.
|
||||
pub async fn initialize(&mut self, default_policy_file: &str) -> Result<()> {
|
||||
if sl!().is_enabled(slog::Level::Debug) {
|
||||
pub async fn initialize(&mut self) -> Result<()> {
|
||||
if AGENT_CONFIG.log_level.as_usize() >= slog::Level::Debug.as_usize() {
|
||||
self.log_file = Some(
|
||||
tokio::fs::OpenOptions::new()
|
||||
.write(true)
|
||||
@@ -99,12 +99,16 @@ impl AgentPolicy {
|
||||
debug!(sl!(), "policy: log file: {}", POLICY_LOG_FILE);
|
||||
}
|
||||
|
||||
// Check if policy file has been set via AgentConfig
|
||||
// If empty, use default file.
|
||||
let mut default_policy_file = AGENT_CONFIG.policy_file.clone();
|
||||
if default_policy_file.is_empty() {
|
||||
default_policy_file = POLICY_DEFAULT_FILE.to_string();
|
||||
}
|
||||
info!(sl!(), "default policy: {default_policy_file}");
|
||||
|
||||
self.engine.add_policy_from_file(default_policy_file)?;
|
||||
self.engine.set_input_json("{}")?;
|
||||
self.allow_failures = match self.allow_request("AllowRequestsFailingPolicy", "{}").await {
|
||||
Ok((allowed, _prints)) => allowed,
|
||||
Err(_) => false,
|
||||
};
|
||||
self.update_allow_failures_flag().await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -116,8 +120,18 @@ impl AgentPolicy {
|
||||
let query = format!("data.agent_policy.{ep}");
|
||||
self.engine.set_input_json(ep_input)?;
|
||||
|
||||
let mut allow = self.engine.eval_bool_query(query, false)?;
|
||||
let mut allow = match self.engine.eval_bool_query(query, false) {
|
||||
Ok(a) => a,
|
||||
Err(e) => {
|
||||
if !self.allow_failures {
|
||||
return Err(e);
|
||||
}
|
||||
false
|
||||
}
|
||||
};
|
||||
|
||||
if !allow && self.allow_failures {
|
||||
warn!(sl!(), "policy: ignoring error for {ep}");
|
||||
allow = true;
|
||||
}
|
||||
|
||||
@@ -134,6 +148,7 @@ impl AgentPolicy {
|
||||
self.engine = Self::new_engine();
|
||||
self.engine
|
||||
.add_policy("agent_policy".to_string(), policy.to_string())?;
|
||||
self.update_allow_failures_flag().await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -160,4 +175,20 @@ impl AgentPolicy {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn update_allow_failures_flag(&mut self) -> Result<()> {
|
||||
self.allow_failures = match self.allow_request("AllowRequestsFailingPolicy", "{}").await {
|
||||
Ok((allowed, _prints)) => {
|
||||
if allowed {
|
||||
warn!(
|
||||
sl!(),
|
||||
"policy: AllowRequestsFailingPolicy is enabled - will ignore errors"
|
||||
);
|
||||
}
|
||||
allowed
|
||||
}
|
||||
Err(_) => false,
|
||||
};
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,11 +8,13 @@ use rustjail::{pipestream::PipeStream, process::StreamType};
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt, ReadHalf};
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
use std::convert::TryFrom;
|
||||
use std::ffi::{CString, OsStr};
|
||||
use std::fmt::Debug;
|
||||
use std::io;
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
use std::path::Path;
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
use ttrpc::{
|
||||
self,
|
||||
@@ -22,7 +24,8 @@ use ttrpc::{
|
||||
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use cgroups::freezer::FreezerState;
|
||||
use oci::{LinuxNamespace, Root, Spec};
|
||||
use oci::{Hooks, LinuxNamespace, Spec};
|
||||
use oci_spec::runtime as oci;
|
||||
use protobuf::MessageField;
|
||||
use protocols::agent::{
|
||||
AddSwapRequest, AgentDetails, CopyFileRequest, GetIPTablesRequest, GetIPTablesResponse,
|
||||
@@ -52,8 +55,12 @@ use nix::sys::{stat, statfs};
|
||||
use nix::unistd::{self, Pid};
|
||||
use rustjail::process::ProcessOperations;
|
||||
|
||||
use crate::device::{add_devices, get_virtio_blk_pci_device_name, update_env_pci};
|
||||
use crate::cdh;
|
||||
use crate::device::block_device_handler::get_virtio_blk_pci_device_name;
|
||||
use crate::device::network_device_handler::wait_for_net_interface;
|
||||
use crate::device::{add_devices, update_env_pci};
|
||||
use crate::features::get_build_features;
|
||||
use crate::image::KATA_IMAGE_WORK_DIR;
|
||||
use crate::linux_abi::*;
|
||||
use crate::metrics::get_metrics;
|
||||
use crate::mount::baremount;
|
||||
@@ -64,6 +71,7 @@ use crate::pci;
|
||||
use crate::random;
|
||||
use crate::sandbox::Sandbox;
|
||||
use crate::storage::{add_storages, update_ephemeral_mounts, STORAGE_HANDLERS};
|
||||
use crate::util;
|
||||
use crate::version::{AGENT_VERSION, API_VERSION};
|
||||
use crate::AGENT_CONFIG;
|
||||
|
||||
@@ -76,8 +84,6 @@ use crate::policy::{do_set_policy, is_allowed};
|
||||
#[cfg(feature = "guest-pull")]
|
||||
use crate::image;
|
||||
|
||||
use crate::cdh::CDHClient;
|
||||
|
||||
use opentelemetry::global;
|
||||
use tracing::span;
|
||||
use tracing_opentelemetry::OpenTelemetrySpanExt;
|
||||
@@ -100,7 +106,7 @@ use kata_types::k8s;
|
||||
|
||||
pub const CONTAINER_BASE: &str = "/run/kata-containers";
|
||||
const MODPROBE_PATH: &str = "/sbin/modprobe";
|
||||
|
||||
const TRUSTED_IMAGE_STORAGE_DEVICE: &str = "/dev/trusted_store";
|
||||
/// the iptables seriers binaries could appear either in /sbin
|
||||
/// or /usr/sbin, we need to check both of them
|
||||
const USR_IPTABLES_SAVE: &str = "/usr/sbin/iptables-save";
|
||||
@@ -173,7 +179,6 @@ impl<T> OptionToTtrpcResult<T> for Option<T> {
|
||||
pub struct AgentService {
|
||||
sandbox: Arc<Mutex<Sandbox>>,
|
||||
init_mode: bool,
|
||||
cdh_client: Option<CDHClient>,
|
||||
}
|
||||
|
||||
impl AgentService {
|
||||
@@ -194,11 +199,10 @@ impl AgentService {
|
||||
|
||||
kata_sys_util::validate::verify_id(&cid)?;
|
||||
|
||||
let mut oci_spec = req.OCI.clone();
|
||||
let use_sandbox_pidns = req.sandbox_pidns();
|
||||
|
||||
let mut oci = match oci_spec.as_mut() {
|
||||
Some(spec) => rustjail::grpc_to_oci(spec),
|
||||
let mut oci = match req.OCI.into_option() {
|
||||
Some(spec) => spec.into(),
|
||||
None => {
|
||||
error!(sl(), "no oci spec in the create container request!");
|
||||
return Err(anyhow!(nix::Error::EINVAL));
|
||||
@@ -218,23 +222,9 @@ impl AgentService {
|
||||
// updates the devices listed in the OCI spec, so that they actually
|
||||
// match real devices inside the VM. This step is necessary since we
|
||||
// cannot predict everything from the caller.
|
||||
add_devices(&req.devices, &mut oci, &self.sandbox).await?;
|
||||
add_devices(&sl(), &req.devices, &mut oci, &self.sandbox).await?;
|
||||
|
||||
if let Some(cdh) = self.cdh_client.as_ref() {
|
||||
let process = oci
|
||||
.process
|
||||
.as_mut()
|
||||
.ok_or_else(|| anyhow!("Spec didn't contain process field"))?;
|
||||
|
||||
for env in process.env.iter_mut() {
|
||||
match cdh.unseal_env(env).await {
|
||||
Ok(unsealed_env) => *env = unsealed_env.to_string(),
|
||||
Err(e) => {
|
||||
warn!(sl(), "Failed to unseal secret: {}", e)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
cdh_handler(&mut oci).await?;
|
||||
|
||||
// Both rootfs and volumes (invoked with --volume for instance) will
|
||||
// be processed the same way. The idea is to always mount any provided
|
||||
@@ -263,7 +253,13 @@ impl AgentService {
|
||||
// systemd: "[slice]:[prefix]:[name]"
|
||||
// fs: "/path_a/path_b"
|
||||
// If agent is init we can't use systemd cgroup mode, no matter what the host tells us
|
||||
let cgroups_path = oci.linux.as_ref().map_or("", |linux| &linux.cgroups_path);
|
||||
let cgroups_path = &oci
|
||||
.linux()
|
||||
.as_ref()
|
||||
.and_then(|linux| linux.cgroups_path().as_ref())
|
||||
.map(|cgrps_path| cgrps_path.display().to_string())
|
||||
.unwrap_or_default();
|
||||
|
||||
let use_systemd_cgroup = if self.init_mode {
|
||||
false
|
||||
} else {
|
||||
@@ -291,8 +287,8 @@ impl AgentService {
|
||||
|
||||
let pipe_size = AGENT_CONFIG.container_pipe_size;
|
||||
|
||||
let p = if let Some(p) = oci.process {
|
||||
Process::new(&sl(), &p, cid.as_str(), true, pipe_size, proc_io)?
|
||||
let p = if let Some(p) = oci.process() {
|
||||
Process::new(&sl(), p, cid.as_str(), true, pipe_size, proc_io)?
|
||||
} else {
|
||||
info!(sl(), "no process configurations!");
|
||||
return Err(anyhow!(nix::Error::EINVAL));
|
||||
@@ -408,8 +404,7 @@ impl AgentService {
|
||||
update_env_pci(&mut process.Env, &sandbox.pcimap)?;
|
||||
|
||||
let pipe_size = AGENT_CONFIG.container_pipe_size;
|
||||
let ocip = rustjail::process_grpc_to_oci(&process);
|
||||
|
||||
let ocip = process.into();
|
||||
let p = Process::new(&sl(), &ocip, exec_id.as_str(), false, pipe_size, proc_io)?;
|
||||
|
||||
let ctr = sandbox
|
||||
@@ -759,7 +754,7 @@ impl agent_ttrpc::AgentService for AgentService {
|
||||
.get_container(&req.container_id)
|
||||
.map_ttrpc_err(ttrpc::Code::INVALID_ARGUMENT, "invalid container id")?;
|
||||
if let Some(res) = req.resources.as_ref() {
|
||||
let oci_res = rustjail::resources_grpc_to_oci(res);
|
||||
let oci_res = res.clone().into();
|
||||
ctr.set(oci_res).map_ttrpc_err(same)?;
|
||||
}
|
||||
|
||||
@@ -940,6 +935,17 @@ impl agent_ttrpc::AgentService for AgentService {
|
||||
"empty update interface request",
|
||||
)?;
|
||||
|
||||
// For network devices passed on the pci bus, check for the network interface
|
||||
// to be available first.
|
||||
if !interface.pciPath.is_empty() {
|
||||
let pcipath = pci::Path::from_str(&interface.pciPath)
|
||||
.map_ttrpc_err(|e| format!("Unexpected pci-path for network interface: {:?}", e))?;
|
||||
|
||||
wait_for_net_interface(&self.sandbox, &pcipath)
|
||||
.await
|
||||
.map_ttrpc_err(|e| format!("interface not available: {:?}", e))?;
|
||||
}
|
||||
|
||||
self.sandbox
|
||||
.lock()
|
||||
.await
|
||||
@@ -1626,12 +1632,10 @@ pub async fn start(
|
||||
s: Arc<Mutex<Sandbox>>,
|
||||
server_address: &str,
|
||||
init_mode: bool,
|
||||
cdh_client: Option<CDHClient>,
|
||||
) -> Result<TtrpcServer> {
|
||||
let agent_service = Box::new(AgentService {
|
||||
sandbox: s,
|
||||
init_mode,
|
||||
cdh_client,
|
||||
}) as Box<dyn agent_ttrpc::AgentService + Send + Sync>;
|
||||
let aservice = agent_ttrpc::create_agent_service(Arc::new(agent_service));
|
||||
|
||||
@@ -1668,41 +1672,51 @@ fn update_container_namespaces(
|
||||
sandbox_pidns: bool,
|
||||
) -> Result<()> {
|
||||
let linux = spec
|
||||
.linux
|
||||
.linux_mut()
|
||||
.as_mut()
|
||||
.ok_or_else(|| anyhow!(ERR_NO_LINUX_FIELD))?;
|
||||
|
||||
let namespaces = linux.namespaces.as_mut_slice();
|
||||
for namespace in namespaces.iter_mut() {
|
||||
if namespace.r#type == NSTYPEIPC {
|
||||
namespace.path = sandbox.shared_ipcns.path.clone();
|
||||
continue;
|
||||
if let Some(namespaces) = linux.namespaces_mut() {
|
||||
for namespace in namespaces.iter_mut() {
|
||||
if namespace.typ().to_string() == NSTYPEIPC {
|
||||
namespace.set_path(if !sandbox.shared_ipcns.path.is_empty() {
|
||||
Some(PathBuf::from(&sandbox.shared_ipcns.path))
|
||||
} else {
|
||||
None
|
||||
});
|
||||
continue;
|
||||
}
|
||||
if namespace.typ().to_string() == NSTYPEUTS {
|
||||
namespace.set_path(if !sandbox.shared_utsns.path.is_empty() {
|
||||
Some(PathBuf::from(&sandbox.shared_utsns.path))
|
||||
} else {
|
||||
None
|
||||
});
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if namespace.r#type == NSTYPEUTS {
|
||||
namespace.path = sandbox.shared_utsns.path.clone();
|
||||
continue;
|
||||
|
||||
// update pid namespace
|
||||
let mut pid_ns = LinuxNamespace::default();
|
||||
pid_ns.set_typ(oci::LinuxNamespaceType::try_from(NSTYPEPID).unwrap());
|
||||
|
||||
// Use shared pid ns if useSandboxPidns has been set in either
|
||||
// the create_sandbox request or create_container request.
|
||||
// Else set this to empty string so that a new pid namespace is
|
||||
// created for the container.
|
||||
if sandbox_pidns {
|
||||
if let Some(ref pidns) = &sandbox.sandbox_pidns {
|
||||
if !pidns.path.is_empty() {
|
||||
pid_ns.set_path(Some(PathBuf::from(&pidns.path)));
|
||||
}
|
||||
} else if !sandbox.containers.is_empty() {
|
||||
return Err(anyhow!(ERR_NO_SANDBOX_PIDNS));
|
||||
}
|
||||
}
|
||||
|
||||
namespaces.push(pid_ns);
|
||||
}
|
||||
|
||||
// update pid namespace
|
||||
let mut pid_ns = LinuxNamespace {
|
||||
r#type: NSTYPEPID.to_string(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// Use shared pid ns if useSandboxPidns has been set in either
|
||||
// the create_sandbox request or create_container request.
|
||||
// Else set this to empty string so that a new pid namespace is
|
||||
// created for the container.
|
||||
if sandbox_pidns {
|
||||
if let Some(ref pidns) = &sandbox.sandbox_pidns {
|
||||
pid_ns.path = String::from(pidns.path.as_str());
|
||||
} else {
|
||||
return Err(anyhow!(ERR_NO_SANDBOX_PIDNS));
|
||||
}
|
||||
}
|
||||
|
||||
linux.namespaces.push(pid_ns);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -1737,11 +1751,18 @@ async fn remove_container_resources(sandbox: &mut Sandbox, cid: &str) -> Result<
|
||||
|
||||
fn append_guest_hooks(s: &Sandbox, oci: &mut Spec) -> Result<()> {
|
||||
if let Some(ref guest_hooks) = s.hooks {
|
||||
let mut hooks = oci.hooks.take().unwrap_or_default();
|
||||
hooks.prestart.append(&mut guest_hooks.prestart.clone());
|
||||
hooks.poststart.append(&mut guest_hooks.poststart.clone());
|
||||
hooks.poststop.append(&mut guest_hooks.poststop.clone());
|
||||
oci.hooks = Some(hooks);
|
||||
if let Some(hooks) = oci.hooks_mut() {
|
||||
util::merge(hooks.poststart_mut(), guest_hooks.prestart());
|
||||
util::merge(hooks.poststart_mut(), guest_hooks.poststart());
|
||||
util::merge(hooks.poststop_mut(), guest_hooks.poststop());
|
||||
} else {
|
||||
let _oci_hooks = oci.set_hooks(Some(Hooks::default()));
|
||||
if let Some(hooks) = oci.hooks_mut() {
|
||||
hooks.set_prestart(guest_hooks.prestart().clone());
|
||||
hooks.set_poststart(guest_hooks.poststart().clone());
|
||||
hooks.set_poststop(guest_hooks.poststop().clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -1941,7 +1962,7 @@ async fn do_add_swap(sandbox: &Arc<Mutex<Sandbox>>, req: &AddSwapRequest) -> Res
|
||||
// - container rootfs bind mounted at /<CONTAINER_BASE>/<cid>/rootfs
|
||||
// - modify container spec root to point to /<CONTAINER_BASE>/<cid>/rootfs
|
||||
pub fn setup_bundle(cid: &str, spec: &mut Spec) -> Result<PathBuf> {
|
||||
let spec_root = if let Some(sr) = &spec.root {
|
||||
let spec_root = if let Some(sr) = &spec.root() {
|
||||
sr
|
||||
} else {
|
||||
return Err(anyhow!(nix::Error::EINVAL));
|
||||
@@ -1950,7 +1971,7 @@ pub fn setup_bundle(cid: &str, spec: &mut Spec) -> Result<PathBuf> {
|
||||
let bundle_path = Path::new(CONTAINER_BASE).join(cid);
|
||||
let config_path = bundle_path.join("config.json");
|
||||
let rootfs_path = bundle_path.join("rootfs");
|
||||
let spec_root_path = Path::new(&spec_root.path);
|
||||
let spec_root_path = spec_root.path();
|
||||
|
||||
let rootfs_exists = Path::new(&rootfs_path).exists();
|
||||
info!(
|
||||
@@ -1970,15 +1991,10 @@ pub fn setup_bundle(cid: &str, spec: &mut Spec) -> Result<PathBuf> {
|
||||
)?;
|
||||
}
|
||||
|
||||
let rootfs_path_name = rootfs_path
|
||||
.to_str()
|
||||
.ok_or_else(|| anyhow!("failed to convert rootfs to unicode"))?
|
||||
.to_string();
|
||||
|
||||
spec.root = Some(Root {
|
||||
path: rootfs_path_name,
|
||||
readonly: spec_root.readonly,
|
||||
});
|
||||
let mut oci_root = oci::Root::default();
|
||||
oci_root.set_path(rootfs_path);
|
||||
oci_root.set_readonly(spec_root.readonly());
|
||||
spec.set_root(Some(oci_root));
|
||||
|
||||
let _ = spec.save(
|
||||
config_path
|
||||
@@ -2036,6 +2052,76 @@ fn load_kernel_module(module: &protocols::agent::KernelModule) -> Result<()> {
|
||||
}
|
||||
}
|
||||
|
||||
async fn cdh_handler(oci: &mut Spec) -> Result<()> {
|
||||
if !cdh::is_cdh_client_initialized().await {
|
||||
return Ok(());
|
||||
}
|
||||
let process = oci
|
||||
.process_mut()
|
||||
.as_mut()
|
||||
.ok_or_else(|| anyhow!("Spec didn't contain process field"))?;
|
||||
if let Some(envs) = process.env_mut().as_mut() {
|
||||
for env in envs.iter_mut() {
|
||||
match cdh::unseal_env(env).await {
|
||||
Ok(unsealed_env) => *env = unsealed_env.to_string(),
|
||||
Err(e) => {
|
||||
warn!(sl(), "Failed to unseal secret: {}", e)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mounts = oci
|
||||
.mounts_mut()
|
||||
.as_mut()
|
||||
.ok_or_else(|| anyhow!("Spec didn't contain mounts field"))?;
|
||||
|
||||
for m in mounts.iter_mut() {
|
||||
if m.destination().starts_with("/sealed") {
|
||||
info!(
|
||||
sl(),
|
||||
"sealed mount destination: {:?} source: {:?}",
|
||||
m.destination(),
|
||||
m.source()
|
||||
);
|
||||
if let Some(source_str) = m.source().as_ref().and_then(|p| p.to_str()) {
|
||||
cdh::unseal_file(source_str).await?;
|
||||
} else {
|
||||
warn!(sl(), "Failed to unseal: Mount source is None or invalid");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let linux = oci
|
||||
.linux()
|
||||
.as_ref()
|
||||
.ok_or_else(|| anyhow!("Spec didn't contain linux field"))?;
|
||||
|
||||
if let Some(devices) = linux.devices() {
|
||||
for specdev in devices.iter() {
|
||||
if specdev.path().as_path().to_str() == Some(TRUSTED_IMAGE_STORAGE_DEVICE) {
|
||||
let dev_major_minor = format!("{}:{}", specdev.major(), specdev.minor());
|
||||
let secure_storage_integrity = AGENT_CONFIG.secure_storage_integrity.to_string();
|
||||
info!(
|
||||
sl(),
|
||||
"trusted_store device major:min {}, enable data integrity {}",
|
||||
dev_major_minor,
|
||||
secure_storage_integrity
|
||||
);
|
||||
|
||||
let options = std::collections::HashMap::from([
|
||||
("deviceId".to_string(), dev_major_minor),
|
||||
("encryptType".to_string(), "LUKS".to_string()),
|
||||
("dataIntegrity".to_string(), secure_storage_integrity),
|
||||
]);
|
||||
cdh::secure_mount("BlockDevice", &options, vec![], KATA_IMAGE_WORK_DIR).await?;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(dead_code)]
|
||||
mod tests {
|
||||
@@ -2045,7 +2131,11 @@ mod tests {
|
||||
use crate::{namespace::Namespace, protocols::agent_ttrpc_async::AgentService as _};
|
||||
use nix::mount;
|
||||
use nix::sched::{unshare, CloneFlags};
|
||||
use oci::{Hook, Hooks, Linux, LinuxDeviceCgroup, LinuxNamespace, LinuxResources};
|
||||
use oci::{
|
||||
HookBuilder, HooksBuilder, Linux, LinuxBuilder, LinuxDeviceCgroupBuilder, LinuxNamespace,
|
||||
LinuxNamespaceBuilder, LinuxResourcesBuilder, SpecBuilder,
|
||||
};
|
||||
use oci_spec::runtime::{LinuxNamespaceType, Root};
|
||||
use tempfile::{tempdir, TempDir};
|
||||
use test_utils::{assert_result, skip_if_not_root};
|
||||
use ttrpc::{r#async::TtrpcContext, MessageHeader};
|
||||
@@ -2072,21 +2162,17 @@ mod tests {
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.expect("Time went backwards");
|
||||
|
||||
let root = Root {
|
||||
path: String::from("/"),
|
||||
..Default::default()
|
||||
};
|
||||
let mut root = Root::default();
|
||||
root.set_path(PathBuf::from("/"));
|
||||
|
||||
let linux_resources = LinuxResources {
|
||||
devices: vec![LinuxDeviceCgroup {
|
||||
allow: true,
|
||||
r#type: String::new(),
|
||||
major: None,
|
||||
minor: None,
|
||||
access: String::from("rwm"),
|
||||
}],
|
||||
..Default::default()
|
||||
};
|
||||
let linux_resources = LinuxResourcesBuilder::default()
|
||||
.devices(vec![LinuxDeviceCgroupBuilder::default()
|
||||
.allow(true)
|
||||
.access("rwm")
|
||||
.build()
|
||||
.unwrap()])
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
let cgroups_path = format!(
|
||||
"/{}/dummycontainer{}",
|
||||
@@ -2094,15 +2180,17 @@ mod tests {
|
||||
since_the_epoch.as_millis()
|
||||
);
|
||||
|
||||
let spec = Spec {
|
||||
linux: Some(Linux {
|
||||
cgroups_path,
|
||||
resources: Some(linux_resources),
|
||||
..Default::default()
|
||||
}),
|
||||
root: Some(root),
|
||||
..Default::default()
|
||||
};
|
||||
let spec = SpecBuilder::default()
|
||||
.linux(
|
||||
LinuxBuilder::default()
|
||||
.cgroups_path(cgroups_path)
|
||||
.resources(linux_resources)
|
||||
.build()
|
||||
.unwrap(),
|
||||
)
|
||||
.root(root)
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
CreateOpts {
|
||||
cgroup_name: "".to_string(),
|
||||
@@ -2160,18 +2248,18 @@ mod tests {
|
||||
async fn test_append_guest_hooks() {
|
||||
let logger = slog::Logger::root(slog::Discard, o!());
|
||||
let mut s = Sandbox::new(&logger).unwrap();
|
||||
s.hooks = Some(Hooks {
|
||||
prestart: vec![Hook {
|
||||
path: "foo".to_string(),
|
||||
..Default::default()
|
||||
}],
|
||||
..Default::default()
|
||||
});
|
||||
let mut oci = Spec {
|
||||
..Default::default()
|
||||
};
|
||||
let hooks = HooksBuilder::default()
|
||||
.prestart(vec![HookBuilder::default()
|
||||
.path(PathBuf::from("foo"))
|
||||
.build()
|
||||
.unwrap()])
|
||||
.build()
|
||||
.unwrap();
|
||||
s.hooks = Some(hooks);
|
||||
|
||||
let mut oci = Spec::default();
|
||||
append_guest_hooks(&s, &mut oci).unwrap();
|
||||
assert_eq!(s.hooks, oci.hooks);
|
||||
assert_eq!(s.hooks, oci.hooks().clone());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -2182,7 +2270,6 @@ mod tests {
|
||||
let agent_service = Box::new(AgentService {
|
||||
sandbox: Arc::new(Mutex::new(sandbox)),
|
||||
init_mode: true,
|
||||
cdh_client: None,
|
||||
});
|
||||
|
||||
let req = protocols::agent::UpdateInterfaceRequest::default();
|
||||
@@ -2200,7 +2287,6 @@ mod tests {
|
||||
let agent_service = Box::new(AgentService {
|
||||
sandbox: Arc::new(Mutex::new(sandbox)),
|
||||
init_mode: true,
|
||||
cdh_client: None,
|
||||
});
|
||||
|
||||
let req = protocols::agent::UpdateRoutesRequest::default();
|
||||
@@ -2218,7 +2304,6 @@ mod tests {
|
||||
let agent_service = Box::new(AgentService {
|
||||
sandbox: Arc::new(Mutex::new(sandbox)),
|
||||
init_mode: true,
|
||||
cdh_client: None,
|
||||
});
|
||||
|
||||
let req = protocols::agent::AddARPNeighborsRequest::default();
|
||||
@@ -2357,7 +2442,6 @@ mod tests {
|
||||
let agent_service = Box::new(AgentService {
|
||||
sandbox: Arc::new(Mutex::new(sandbox)),
|
||||
init_mode: true,
|
||||
cdh_client: None,
|
||||
});
|
||||
|
||||
let result = agent_service
|
||||
@@ -2399,30 +2483,32 @@ mod tests {
|
||||
has_linux_in_spec: true,
|
||||
sandbox_pidns_path: Some("sharedpidns"),
|
||||
namespaces: vec![
|
||||
LinuxNamespace {
|
||||
r#type: NSTYPEIPC.to_string(),
|
||||
path: "ipcpath".to_string(),
|
||||
},
|
||||
LinuxNamespace {
|
||||
r#type: NSTYPEUTS.to_string(),
|
||||
path: "utspath".to_string(),
|
||||
},
|
||||
LinuxNamespaceBuilder::default()
|
||||
.typ(LinuxNamespaceType::Ipc)
|
||||
.path("ipcpath")
|
||||
.build()
|
||||
.unwrap(),
|
||||
LinuxNamespaceBuilder::default()
|
||||
.typ(LinuxNamespaceType::Uts)
|
||||
.path("utspath")
|
||||
.build()
|
||||
.unwrap(),
|
||||
],
|
||||
use_sandbox_pidns: false,
|
||||
result: Ok(()),
|
||||
expected_namespaces: vec![
|
||||
LinuxNamespace {
|
||||
r#type: NSTYPEIPC.to_string(),
|
||||
path: "".to_string(),
|
||||
},
|
||||
LinuxNamespace {
|
||||
r#type: NSTYPEUTS.to_string(),
|
||||
path: "".to_string(),
|
||||
},
|
||||
LinuxNamespace {
|
||||
r#type: NSTYPEPID.to_string(),
|
||||
path: "".to_string(),
|
||||
},
|
||||
LinuxNamespaceBuilder::default()
|
||||
.typ(LinuxNamespaceType::Ipc)
|
||||
.build()
|
||||
.unwrap(),
|
||||
LinuxNamespaceBuilder::default()
|
||||
.typ(LinuxNamespaceType::Uts)
|
||||
.build()
|
||||
.unwrap(),
|
||||
LinuxNamespaceBuilder::default()
|
||||
.typ(LinuxNamespaceType::Pid)
|
||||
.build()
|
||||
.unwrap(),
|
||||
],
|
||||
}
|
||||
}
|
||||
@@ -2435,45 +2521,39 @@ mod tests {
|
||||
TestData {
|
||||
use_sandbox_pidns: true,
|
||||
expected_namespaces: vec![
|
||||
LinuxNamespace {
|
||||
r#type: NSTYPEIPC.to_string(),
|
||||
path: "".to_string(),
|
||||
},
|
||||
LinuxNamespace {
|
||||
r#type: NSTYPEUTS.to_string(),
|
||||
path: "".to_string(),
|
||||
},
|
||||
LinuxNamespace {
|
||||
r#type: NSTYPEPID.to_string(),
|
||||
path: "sharedpidns".to_string(),
|
||||
},
|
||||
LinuxNamespaceBuilder::default()
|
||||
.typ(LinuxNamespaceType::Ipc)
|
||||
.build()
|
||||
.unwrap(),
|
||||
LinuxNamespaceBuilder::default()
|
||||
.typ(LinuxNamespaceType::Uts)
|
||||
.build()
|
||||
.unwrap(),
|
||||
LinuxNamespaceBuilder::default()
|
||||
.typ(LinuxNamespaceType::Pid)
|
||||
.path("sharedpidns")
|
||||
.build()
|
||||
.unwrap(),
|
||||
],
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
namespaces: vec![],
|
||||
use_sandbox_pidns: true,
|
||||
expected_namespaces: vec![LinuxNamespace {
|
||||
r#type: NSTYPEPID.to_string(),
|
||||
path: "sharedpidns".to_string(),
|
||||
}],
|
||||
expected_namespaces: vec![LinuxNamespaceBuilder::default()
|
||||
.typ(LinuxNamespaceType::Pid)
|
||||
.path("sharedpidns")
|
||||
.build()
|
||||
.unwrap()],
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
namespaces: vec![],
|
||||
use_sandbox_pidns: false,
|
||||
expected_namespaces: vec![LinuxNamespace {
|
||||
r#type: NSTYPEPID.to_string(),
|
||||
path: "".to_string(),
|
||||
}],
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
namespaces: vec![],
|
||||
sandbox_pidns_path: None,
|
||||
use_sandbox_pidns: true,
|
||||
result: Err(anyhow!(ERR_NO_SANDBOX_PIDNS)),
|
||||
expected_namespaces: vec![],
|
||||
expected_namespaces: vec![LinuxNamespaceBuilder::default()
|
||||
.typ(LinuxNamespaceType::Pid)
|
||||
.build()
|
||||
.unwrap()],
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
@@ -2495,11 +2575,11 @@ mod tests {
|
||||
}
|
||||
|
||||
let mut oci = Spec::default();
|
||||
oci.set_linux(None);
|
||||
if d.has_linux_in_spec {
|
||||
oci.linux = Some(Linux {
|
||||
namespaces: d.namespaces.clone(),
|
||||
..Default::default()
|
||||
});
|
||||
let mut linux = Linux::default();
|
||||
linux.set_namespaces(Some(d.namespaces.clone()));
|
||||
oci.set_linux(Some(linux));
|
||||
}
|
||||
|
||||
let result = update_container_namespaces(&sandbox, &mut oci, d.use_sandbox_pidns);
|
||||
@@ -2507,8 +2587,13 @@ mod tests {
|
||||
let msg = format!("{}, result: {:?}", msg, result);
|
||||
|
||||
assert_result!(d.result, result, msg);
|
||||
if let Some(linux) = oci.linux {
|
||||
assert_eq!(d.expected_namespaces, linux.namespaces, "{}", msg);
|
||||
if let Some(linux) = oci.linux() {
|
||||
assert_eq!(
|
||||
d.expected_namespaces,
|
||||
linux.namespaces().clone().unwrap(),
|
||||
"{}",
|
||||
msg
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2847,7 +2932,6 @@ OtherField:other
|
||||
let agent_service = Box::new(AgentService {
|
||||
sandbox: Arc::new(Mutex::new(sandbox)),
|
||||
init_mode: true,
|
||||
cdh_client: None,
|
||||
});
|
||||
|
||||
let ctx = mk_ttrpc_context();
|
||||
|
||||
@@ -9,7 +9,7 @@ use std::fmt::{Debug, Formatter};
|
||||
use std::fs;
|
||||
use std::os::fd::FromRawFd;
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
use std::path::Path;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::str::FromStr;
|
||||
use std::sync::atomic::{AtomicU32, Ordering};
|
||||
use std::sync::{Arc, RwLock};
|
||||
@@ -24,6 +24,7 @@ use nix::fcntl::{self, OFlag};
|
||||
use nix::sched::{setns, unshare, CloneFlags};
|
||||
use nix::sys::stat::Mode;
|
||||
use oci::{Hook, Hooks};
|
||||
use oci_spec::runtime as oci;
|
||||
use protocols::agent::{OnlineCPUMemRequest, SharedMount};
|
||||
use regex::Regex;
|
||||
use rustjail::cgroups::{self as rustjail_cgroups, DevicesCgroupInfo};
|
||||
@@ -319,16 +320,21 @@ impl Sandbox {
|
||||
let guest_cpuset = rustjail_cgroups::fs::get_guest_cpuset()?;
|
||||
|
||||
for (_, ctr) in self.containers.iter() {
|
||||
if let Some(spec) = ctr.config.spec.as_ref() {
|
||||
if let Some(linux) = spec.linux.as_ref() {
|
||||
if let Some(resources) = linux.resources.as_ref() {
|
||||
if let Some(cpus) = resources.cpu.as_ref() {
|
||||
info!(self.logger, "updating {}", ctr.id.as_str());
|
||||
ctr.cgroup_manager
|
||||
.update_cpuset_path(guest_cpuset.as_str(), &cpus.cpus)?;
|
||||
}
|
||||
}
|
||||
match ctr
|
||||
.config
|
||||
.spec
|
||||
.as_ref()
|
||||
.and_then(|spec| spec.linux().as_ref())
|
||||
.and_then(|linux| linux.resources().as_ref())
|
||||
.and_then(|resources| resources.cpu().as_ref())
|
||||
.and_then(|cpus| cpus.cpus().as_ref())
|
||||
{
|
||||
Some(cpu_set) => {
|
||||
info!(self.logger, "updating {}", ctr.id.as_str());
|
||||
ctr.cgroup_manager
|
||||
.update_cpuset_path(guest_cpuset.as_str(), cpu_set)?;
|
||||
}
|
||||
None => continue,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -339,15 +345,16 @@ impl Sandbox {
|
||||
pub fn add_hooks(&mut self, dir: &str) -> Result<()> {
|
||||
let mut hooks = Hooks::default();
|
||||
if let Ok(hook) = self.find_hooks(dir, "prestart") {
|
||||
hooks.prestart = hook;
|
||||
hooks.set_prestart(Some(hook));
|
||||
}
|
||||
if let Ok(hook) = self.find_hooks(dir, "poststart") {
|
||||
hooks.poststart = hook;
|
||||
hooks.set_poststart(Some(hook));
|
||||
}
|
||||
if let Ok(hook) = self.find_hooks(dir, "poststop") {
|
||||
hooks.poststop = hook;
|
||||
hooks.set_poststop(Some(hook));
|
||||
}
|
||||
self.hooks = Some(hooks);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -365,16 +372,13 @@ impl Sandbox {
|
||||
}
|
||||
|
||||
let name = entry.file_name();
|
||||
let hook = Hook {
|
||||
path: Path::new(hook_path)
|
||||
.join(hook_type)
|
||||
.join(&name)
|
||||
.to_str()
|
||||
.unwrap()
|
||||
.to_owned(),
|
||||
args: vec![name.to_str().unwrap().to_owned(), hook_type.to_owned()],
|
||||
..Default::default()
|
||||
};
|
||||
let mut hook = oci::Hook::default();
|
||||
hook.set_path(PathBuf::from(hook_path).join(hook_type).join(&name));
|
||||
hook.set_args(Some(vec![
|
||||
name.to_str().unwrap().to_owned(),
|
||||
hook_type.to_owned(),
|
||||
]));
|
||||
|
||||
info!(
|
||||
self.logger,
|
||||
"found {} hook {:?} mode {:o}",
|
||||
@@ -382,6 +386,7 @@ impl Sandbox {
|
||||
hook,
|
||||
entry.metadata()?.permissions().mode()
|
||||
);
|
||||
|
||||
hooks.push(hook);
|
||||
}
|
||||
|
||||
@@ -662,7 +667,8 @@ mod tests {
|
||||
use crate::mount::baremount;
|
||||
use anyhow::{anyhow, Error};
|
||||
use nix::mount::MsFlags;
|
||||
use oci::{Linux, LinuxDeviceCgroup, LinuxResources, Root, Spec};
|
||||
use oci::{Linux, LinuxBuilder, LinuxDeviceCgroup, LinuxResources, Root, Spec, SpecBuilder};
|
||||
use oci_spec::runtime as oci;
|
||||
use rustjail::container::LinuxContainer;
|
||||
use rustjail::process::Process;
|
||||
use rustjail::specconv::CreateOpts;
|
||||
@@ -836,21 +842,15 @@ mod tests {
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.expect("Time went backwards");
|
||||
|
||||
let root = Root {
|
||||
path: String::from("/"),
|
||||
..Default::default()
|
||||
};
|
||||
let mut root = Root::default();
|
||||
root.set_path(PathBuf::from("/"));
|
||||
|
||||
let linux_resources = LinuxResources {
|
||||
devices: vec![LinuxDeviceCgroup {
|
||||
allow: true,
|
||||
r#type: String::new(),
|
||||
major: None,
|
||||
minor: None,
|
||||
access: String::from("rwm"),
|
||||
}],
|
||||
..Default::default()
|
||||
};
|
||||
let mut cgroup = LinuxDeviceCgroup::default();
|
||||
cgroup.set_allow(true);
|
||||
cgroup.set_access(Some(String::from("rwm")));
|
||||
|
||||
let mut linux_resources = LinuxResources::default();
|
||||
linux_resources.set_devices(Some(vec![cgroup]));
|
||||
|
||||
let cgroups_path = format!(
|
||||
"/{}/dummycontainer{}",
|
||||
@@ -858,15 +858,17 @@ mod tests {
|
||||
since_the_epoch.as_millis()
|
||||
);
|
||||
|
||||
let spec = Spec {
|
||||
linux: Some(Linux {
|
||||
cgroups_path,
|
||||
resources: Some(linux_resources),
|
||||
..Default::default()
|
||||
}),
|
||||
root: Some(root),
|
||||
..Default::default()
|
||||
};
|
||||
let spec = SpecBuilder::default()
|
||||
.linux(
|
||||
LinuxBuilder::default()
|
||||
.cgroups_path(cgroups_path)
|
||||
.resources(linux_resources)
|
||||
.build()
|
||||
.unwrap(),
|
||||
)
|
||||
.root(root)
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
CreateOpts {
|
||||
cgroup_name: "".to_string(),
|
||||
@@ -977,9 +979,18 @@ mod tests {
|
||||
|
||||
assert!(s.add_hooks(tmpdir_path).is_ok());
|
||||
assert!(s.hooks.is_some());
|
||||
assert!(s.hooks.as_ref().unwrap().prestart.len() == 1);
|
||||
assert!(s.hooks.as_ref().unwrap().poststart.is_empty());
|
||||
assert!(s.hooks.as_ref().unwrap().poststop.is_empty());
|
||||
assert!(s.hooks.as_ref().unwrap().prestart().clone().unwrap().len() == 1);
|
||||
// As we don't create poststart/xxx, the poststart will be none
|
||||
assert!(s.hooks.as_ref().unwrap().poststart().clone().is_none());
|
||||
// poststop path is created but as the problem of file perm is rejected.
|
||||
assert!(s
|
||||
.hooks
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.poststop()
|
||||
.clone()
|
||||
.unwrap()
|
||||
.is_empty());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
|
||||
@@ -4,20 +4,25 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use crate::storage::{new_device, StorageContext, StorageHandler};
|
||||
use anyhow::Result;
|
||||
use kata_types::device::DRIVER_WATCHABLE_BIND_TYPE;
|
||||
use kata_types::mount::StorageDevice;
|
||||
use protocols::agent::Storage;
|
||||
use std::iter;
|
||||
use std::sync::Arc;
|
||||
use tracing::instrument;
|
||||
|
||||
use crate::storage::{new_device, StorageContext, StorageHandler};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct BindWatcherHandler {}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl StorageHandler for BindWatcherHandler {
|
||||
#[instrument]
|
||||
fn driver_types(&self) -> &[&str] {
|
||||
&[DRIVER_WATCHABLE_BIND_TYPE]
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
async fn create_device(
|
||||
&self,
|
||||
|
||||
@@ -11,24 +11,36 @@ use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use kata_types::device::{
|
||||
DRIVER_BLK_CCW_TYPE, DRIVER_BLK_MMIO_TYPE, DRIVER_BLK_PCI_TYPE, DRIVER_NVDIMM_TYPE,
|
||||
DRIVER_SCSI_TYPE,
|
||||
};
|
||||
use kata_types::mount::StorageDevice;
|
||||
use protocols::agent::Storage;
|
||||
use tracing::instrument;
|
||||
|
||||
use crate::device::{
|
||||
get_scsi_device_name, get_virtio_blk_pci_device_name, get_virtio_mmio_device_name,
|
||||
wait_for_pmem_device,
|
||||
#[cfg(target_arch = "s390x")]
|
||||
use crate::ccw;
|
||||
#[cfg(target_arch = "s390x")]
|
||||
use crate::device::block_device_handler::get_virtio_blk_ccw_device_name;
|
||||
use crate::device::block_device_handler::{
|
||||
get_virtio_blk_mmio_device_name, get_virtio_blk_pci_device_name,
|
||||
};
|
||||
use crate::device::nvdimm_device_handler::wait_for_pmem_device;
|
||||
use crate::device::scsi_device_handler::get_scsi_device_name;
|
||||
use crate::pci;
|
||||
use crate::storage::{common_storage_handler, new_device, StorageContext, StorageHandler};
|
||||
#[cfg(target_arch = "s390x")]
|
||||
use crate::{ccw, device::get_virtio_blk_ccw_device_name};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct VirtioBlkMmioHandler {}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl StorageHandler for VirtioBlkMmioHandler {
|
||||
#[instrument]
|
||||
fn driver_types(&self) -> &[&str] {
|
||||
&[DRIVER_BLK_MMIO_TYPE]
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
async fn create_device(
|
||||
&self,
|
||||
@@ -36,7 +48,7 @@ impl StorageHandler for VirtioBlkMmioHandler {
|
||||
ctx: &mut StorageContext,
|
||||
) -> Result<Arc<dyn StorageDevice>> {
|
||||
if !Path::new(&storage.source).exists() {
|
||||
get_virtio_mmio_device_name(ctx.sandbox, &storage.source)
|
||||
get_virtio_blk_mmio_device_name(ctx.sandbox, &storage.source)
|
||||
.await
|
||||
.context("failed to get mmio device name")?;
|
||||
}
|
||||
@@ -50,6 +62,11 @@ pub struct VirtioBlkPciHandler {}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl StorageHandler for VirtioBlkPciHandler {
|
||||
#[instrument]
|
||||
fn driver_types(&self) -> &[&str] {
|
||||
&[DRIVER_BLK_PCI_TYPE]
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
async fn create_device(
|
||||
&self,
|
||||
@@ -81,6 +98,11 @@ pub struct VirtioBlkCcwHandler {}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl StorageHandler for VirtioBlkCcwHandler {
|
||||
#[instrument]
|
||||
fn driver_types(&self) -> &[&str] {
|
||||
&[DRIVER_BLK_CCW_TYPE]
|
||||
}
|
||||
|
||||
#[cfg(target_arch = "s390x")]
|
||||
#[instrument]
|
||||
async fn create_device(
|
||||
@@ -111,6 +133,11 @@ pub struct ScsiHandler {}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl StorageHandler for ScsiHandler {
|
||||
#[instrument]
|
||||
fn driver_types(&self) -> &[&str] {
|
||||
&[DRIVER_SCSI_TYPE]
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
async fn create_device(
|
||||
&self,
|
||||
@@ -131,6 +158,11 @@ pub struct PmemHandler {}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl StorageHandler for PmemHandler {
|
||||
#[instrument]
|
||||
fn driver_types(&self) -> &[&str] {
|
||||
&[DRIVER_NVDIMM_TYPE]
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
async fn create_device(
|
||||
&self,
|
||||
|
||||
@@ -20,13 +20,14 @@ use slog::Logger;
|
||||
use tokio::sync::Mutex;
|
||||
use tracing::instrument;
|
||||
|
||||
use crate::device::{DRIVER_EPHEMERAL_TYPE, FS_TYPE_HUGETLB};
|
||||
use crate::mount::baremount;
|
||||
use crate::sandbox::Sandbox;
|
||||
use crate::storage::{
|
||||
common_storage_handler, new_device, parse_options, StorageContext, StorageHandler, MODE_SETGID,
|
||||
};
|
||||
use kata_types::device::DRIVER_EPHEMERAL_TYPE;
|
||||
|
||||
const FS_TYPE_HUGETLB: &str = "hugetlbfs";
|
||||
const FS_GID_EQ: &str = "fsgid=";
|
||||
const SYS_FS_HUGEPAGES_PREFIX: &str = "/sys/kernel/mm/hugepages";
|
||||
|
||||
@@ -35,6 +36,11 @@ pub struct EphemeralHandler {}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl StorageHandler for EphemeralHandler {
|
||||
#[instrument]
|
||||
fn driver_types(&self) -> &[&str] {
|
||||
&[DRIVER_EPHEMERAL_TYPE]
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
async fn create_device(
|
||||
&self,
|
||||
|
||||
@@ -8,18 +8,23 @@ use std::fs;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::storage::{common_storage_handler, new_device, StorageContext, StorageHandler};
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use kata_types::device::{DRIVER_9P_TYPE, DRIVER_OVERLAYFS_TYPE, DRIVER_VIRTIOFS_TYPE};
|
||||
use kata_types::mount::StorageDevice;
|
||||
use protocols::agent::Storage;
|
||||
use tracing::instrument;
|
||||
|
||||
use crate::storage::{common_storage_handler, new_device, StorageContext, StorageHandler};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct OverlayfsHandler {}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl StorageHandler for OverlayfsHandler {
|
||||
#[instrument]
|
||||
fn driver_types(&self) -> &[&str] {
|
||||
&[DRIVER_OVERLAYFS_TYPE]
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
async fn create_device(
|
||||
&self,
|
||||
@@ -61,6 +66,11 @@ pub struct Virtio9pHandler {}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl StorageHandler for Virtio9pHandler {
|
||||
#[instrument]
|
||||
fn driver_types(&self) -> &[&str] {
|
||||
&[DRIVER_9P_TYPE]
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
async fn create_device(
|
||||
&self,
|
||||
@@ -77,6 +87,11 @@ pub struct VirtioFsHandler {}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl StorageHandler for VirtioFsHandler {
|
||||
#[instrument]
|
||||
fn driver_types(&self) -> &[&str] {
|
||||
&[DRIVER_VIRTIOFS_TYPE]
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
async fn create_device(
|
||||
&self,
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user