mirror of
https://github.com/kata-containers/kata-containers.git
synced 2026-03-14 16:52:18 +00:00
Compare commits
624 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6c2a2a14fe | ||
|
|
87e6a61cf7 | ||
|
|
8de1f8e19f | ||
|
|
06308fbf96 | ||
|
|
4089d48b45 | ||
|
|
1dadbb337b | ||
|
|
424de1cbfa | ||
|
|
f1b4b95b3f | ||
|
|
135c166b8e | ||
|
|
cf6169de4c | ||
|
|
159bc2713f | ||
|
|
3bd72b9a0d | ||
|
|
0eda83fa52 | ||
|
|
2b3eb00db7 | ||
|
|
7f89d291ee | ||
|
|
69b820a9bd | ||
|
|
981f0a1f0f | ||
|
|
d9c9040474 | ||
|
|
64392c9a87 | ||
|
|
548e4c1667 | ||
|
|
a1eab1248c | ||
|
|
cf8f8bc2cf | ||
|
|
18b378dbb4 | ||
|
|
6be56addd8 | ||
|
|
1ad87faaf4 | ||
|
|
35b7b0379c | ||
|
|
1f0cc490bd | ||
|
|
0ad5dff3b8 | ||
|
|
5e6cecf01a | ||
|
|
9fb0eb4076 | ||
|
|
7fc822896c | ||
|
|
b588c1128c | ||
|
|
7ee7ca2b31 | ||
|
|
3dbbbc88ac | ||
|
|
6ae4951a8c | ||
|
|
e18a425fbb | ||
|
|
e14424011c | ||
|
|
58e8eed807 | ||
|
|
6e784fb6b3 | ||
|
|
b07dfbe213 | ||
|
|
6329c7e290 | ||
|
|
d5d0befc61 | ||
|
|
87b3f6a63c | ||
|
|
53ea36d3f5 | ||
|
|
d0ac25f5c5 | ||
|
|
0ffc9c02a2 | ||
|
|
560e21d8e3 | ||
|
|
87b8164add | ||
|
|
6b3c63570e | ||
|
|
5e5e78cd6f | ||
|
|
4f1d631af1 | ||
|
|
4662a7a942 | ||
|
|
ee15a389de | ||
|
|
b71443a7ae | ||
|
|
a533c974f9 | ||
|
|
d0b69651ce | ||
|
|
8f38dcb850 | ||
|
|
f462c69484 | ||
|
|
622bd4e370 | ||
|
|
72c9f62b70 | ||
|
|
fd33309475 | ||
|
|
e36c2b6249 | ||
|
|
5ad3eba8b1 | ||
|
|
29eb2c02d9 | ||
|
|
bedd536461 | ||
|
|
d788d4af2f | ||
|
|
51c665a09c | ||
|
|
36431de30f | ||
|
|
9ebb91f94c | ||
|
|
f7fd2c1dfc | ||
|
|
c0668ef7eb | ||
|
|
0a33d27c30 | ||
|
|
48465d0547 | ||
|
|
cb7cc1d708 | ||
|
|
6e508ae322 | ||
|
|
91916ed118 | ||
|
|
d1d49675a0 | ||
|
|
3416e104b8 | ||
|
|
495b9825e5 | ||
|
|
da8d4a4584 | ||
|
|
03aceccb1b | ||
|
|
70e3dc5acb | ||
|
|
bbe0db55ac | ||
|
|
bea936b4a8 | ||
|
|
3b212ec8ed | ||
|
|
08b87a4eab | ||
|
|
911ab9c306 | ||
|
|
d70ed93173 | ||
|
|
13943fb81b | ||
|
|
e865359f4e | ||
|
|
47c28923d7 | ||
|
|
212229df83 | ||
|
|
f1573b4747 | ||
|
|
d7873e5251 | ||
|
|
c60adedf99 | ||
|
|
9c02722d46 | ||
|
|
75def881e5 | ||
|
|
5316839165 | ||
|
|
7cc29708a4 | ||
|
|
c3a8ce53e6 | ||
|
|
f64041e686 | ||
|
|
57001431b4 | ||
|
|
b4092023bf | ||
|
|
acd0a75efd | ||
|
|
4e3a1ebcaf | ||
|
|
83b020f4a3 | ||
|
|
f45ee1fe1d | ||
|
|
1f4facdfe9 | ||
|
|
d955f9dcf8 | ||
|
|
039fde2b66 | ||
|
|
eac38d1a05 | ||
|
|
bb4f0a9263 | ||
|
|
af413550dd | ||
|
|
badba8058c | ||
|
|
c888facd24 | ||
|
|
de1fe7bed0 | ||
|
|
412e8554f3 | ||
|
|
d5483aaf7c | ||
|
|
3b1af40b16 | ||
|
|
331e35bc1a | ||
|
|
aee71b16f1 | ||
|
|
0d5a9eaeff | ||
|
|
6dcd164c4e | ||
|
|
cf15777edd | ||
|
|
7f12e27a68 | ||
|
|
67748bde6c | ||
|
|
b6218beef6 | ||
|
|
dfb38245e7 | ||
|
|
4417641803 | ||
|
|
9d4ec379b1 | ||
|
|
a7f01b4456 | ||
|
|
bb644ee3ed | ||
|
|
aef93c7aaf | ||
|
|
15d1b2431c | ||
|
|
61340c3d63 | ||
|
|
6d07df4b15 | ||
|
|
7e553e6707 | ||
|
|
edf51c83c0 | ||
|
|
b64891c5f5 | ||
|
|
0e9a8f22ca | ||
|
|
d8953498c6 | ||
|
|
cfba372f17 | ||
|
|
f218a3104e | ||
|
|
81980388d4 | ||
|
|
624d3c063a | ||
|
|
2a084ecbef | ||
|
|
bc685665c6 | ||
|
|
723c44a7c4 | ||
|
|
d053f848b4 | ||
|
|
32d347aa25 | ||
|
|
1ec85d7485 | ||
|
|
3930a62c41 | ||
|
|
516468815e | ||
|
|
b975c27793 | ||
|
|
6ed57d1e9a | ||
|
|
72cbcf040b | ||
|
|
c52d090522 | ||
|
|
a39fd6c066 | ||
|
|
7031b5db07 | ||
|
|
a89c9cd620 | ||
|
|
30f504e962 | ||
|
|
4746fa3daa | ||
|
|
cc922be5ec | ||
|
|
7bf994827d | ||
|
|
dcdb3b067f | ||
|
|
2defdcc598 | ||
|
|
473b0d3a31 | ||
|
|
0a8208c670 | ||
|
|
03d1fa67b1 | ||
|
|
eb463b38ec | ||
|
|
ebc86091d1 | ||
|
|
a2d731ad26 | ||
|
|
d1a6296221 | ||
|
|
498f7c0549 | ||
|
|
f8a5255cf7 | ||
|
|
86f705d98b | ||
|
|
43fe5d1b90 | ||
|
|
54f6a78500 | ||
|
|
5cdf981a2b | ||
|
|
c932369f42 | ||
|
|
034d7aab87 | ||
|
|
fac8ccf5cd | ||
|
|
ab5f603ffa | ||
|
|
36d53dd2af | ||
|
|
1e8fe131bd | ||
|
|
729b2dd611 | ||
|
|
311671abb5 | ||
|
|
aeaec9dae9 | ||
|
|
e664969862 | ||
|
|
6b48ac63ba | ||
|
|
a0ebfbf18a | ||
|
|
baabfa9f1f | ||
|
|
98ba211a34 | ||
|
|
5333618d70 | ||
|
|
37f34781d1 | ||
|
|
d3c5422379 | ||
|
|
71a9f67781 | ||
|
|
84badd89d7 | ||
|
|
b098960442 | ||
|
|
24bf637835 | ||
|
|
b23c5ed155 | ||
|
|
5a20d8dcaf | ||
|
|
d0abf45ed1 | ||
|
|
553fd79ea9 | ||
|
|
194120b679 | ||
|
|
863283716d | ||
|
|
3c319d8d4c | ||
|
|
5b5caf8908 | ||
|
|
5f5e05a77f | ||
|
|
0bee9f199d | ||
|
|
2df6cb7609 | ||
|
|
4559caf619 | ||
|
|
b365bef570 | ||
|
|
83e866a37d | ||
|
|
bde0e72da5 | ||
|
|
44eb19841a | ||
|
|
d5d9f03e85 | ||
|
|
7144acb2a5 | ||
|
|
66db5b5350 | ||
|
|
7164ced4dc | ||
|
|
c36572418f | ||
|
|
8c03deac3a | ||
|
|
4fbe0a3a53 | ||
|
|
7e1b1949d4 | ||
|
|
6c867d9e86 | ||
|
|
6163c35657 | ||
|
|
fa35afa982 | ||
|
|
b2ff97aa01 | ||
|
|
ebdae7cfdf | ||
|
|
845eeb4d7b | ||
|
|
e2755a47b8 | ||
|
|
1163fc9de2 | ||
|
|
3958a39d07 | ||
|
|
7d1c48c881 | ||
|
|
e719423262 | ||
|
|
1e15369e59 | ||
|
|
5dbe88330f | ||
|
|
d424f3c595 | ||
|
|
cf8899f260 | ||
|
|
3b45060b61 | ||
|
|
9bb8451df5 | ||
|
|
64fdb98704 | ||
|
|
a81ad3b587 | ||
|
|
542012c8be | ||
|
|
5979f3790b | ||
|
|
006ecce49a | ||
|
|
6ad16d4977 | ||
|
|
4e812009f5 | ||
|
|
3230dec950 | ||
|
|
29855ed0c6 | ||
|
|
025596b289 | ||
|
|
e1a69c0c92 | ||
|
|
1a6b27bf6a | ||
|
|
a536d4a7bf | ||
|
|
ad6e53c399 | ||
|
|
7ffc0c1225 | ||
|
|
35d6d86ab5 | ||
|
|
f764248095 | ||
|
|
2205fb9d05 | ||
|
|
11631c681a | ||
|
|
7923de8999 | ||
|
|
e2c31fce23 | ||
|
|
2fc5f0e2e0 | ||
|
|
c0171ea0a7 | ||
|
|
58f9a57c20 | ||
|
|
07694ef3ae | ||
|
|
d8439dba89 | ||
|
|
bda83cee5d | ||
|
|
badff23c71 | ||
|
|
27c02367f9 | ||
|
|
a0a524efc2 | ||
|
|
f5e9985afe | ||
|
|
f910c66d6f | ||
|
|
1b21a46246 | ||
|
|
1a94aad44f | ||
|
|
2d13e2d71c | ||
|
|
b77d69aeee | ||
|
|
743291c6c4 | ||
|
|
a71d35c764 | ||
|
|
6328181762 | ||
|
|
f74b7aba18 | ||
|
|
8933d54428 | ||
|
|
8a584589ff | ||
|
|
21f5b65233 | ||
|
|
69f05cf9e6 | ||
|
|
87d41b3dfa | ||
|
|
28e5e9c86e | ||
|
|
ff8d7e7e41 | ||
|
|
1b111a9aab | ||
|
|
684a6e1a55 | ||
|
|
99711f107f | ||
|
|
7c857d38c1 | ||
|
|
28e171bf73 | ||
|
|
91e1e612c3 | ||
|
|
cddcde1d40 | ||
|
|
7edc7172c0 | ||
|
|
b3901c46d6 | ||
|
|
8a2c201719 | ||
|
|
5a1b5d3672 | ||
|
|
ad413d1646 | ||
|
|
1512560111 | ||
|
|
bee1a628bd | ||
|
|
431c3630f2 | ||
|
|
62e328ca5c | ||
|
|
458e1bc712 | ||
|
|
1cc1c81c9a | ||
|
|
1a5f90dc3f | ||
|
|
51cd99c927 | ||
|
|
3b883bf5a7 | ||
|
|
f9dec11a8f | ||
|
|
53af71cfd0 | ||
|
|
a435d36fe1 | ||
|
|
a79a3a8e1d | ||
|
|
3c32875046 | ||
|
|
08dfaa97aa | ||
|
|
63b8534b41 | ||
|
|
e8f8641988 | ||
|
|
68b9acfd02 | ||
|
|
f89abcbad8 | ||
|
|
c9742d6fa9 | ||
|
|
78522c5802 | ||
|
|
288296dacd | ||
|
|
731e7c763f | ||
|
|
d74639d8c6 | ||
|
|
02cc4fe9db | ||
|
|
607c87ef94 | ||
|
|
40e678164a | ||
|
|
8353aae41a | ||
|
|
6ad5d7112e | ||
|
|
5261e3a60c | ||
|
|
9cc6b5f461 | ||
|
|
9d285c6226 | ||
|
|
87568ed985 | ||
|
|
39192c6084 | ||
|
|
0e157be6f2 | ||
|
|
a274333248 | ||
|
|
69535b8089 | ||
|
|
9e1710674a | ||
|
|
61a8eabf8e | ||
|
|
6222bd9103 | ||
|
|
187a72d381 | ||
|
|
3b957c7ec3 | ||
|
|
b8b73939ea | ||
|
|
0c84270357 | ||
|
|
6520dfee37 | ||
|
|
ff22790617 | ||
|
|
a5d4e33880 | ||
|
|
5e937fa622 | ||
|
|
b0bea47c53 | ||
|
|
73c57b9a19 | ||
|
|
e941b3a094 | ||
|
|
ba8a8fcbf2 | ||
|
|
57b932c127 | ||
|
|
e0bcb39ee7 | ||
|
|
03478ad064 | ||
|
|
c8fcd29d9b | ||
|
|
901c192251 | ||
|
|
5d6199f9bc | ||
|
|
20f1f62a2a | ||
|
|
ede1dae65d | ||
|
|
662f87539e | ||
|
|
f28af98ac6 | ||
|
|
8a22b5f075 | ||
|
|
9792ac49fe | ||
|
|
24564a8499 | ||
|
|
c5a87eed29 | ||
|
|
6daeb08e69 | ||
|
|
3aa6c77a01 | ||
|
|
37641a5430 | ||
|
|
61cbae6c39 | ||
|
|
e35b4cc9fb | ||
|
|
eff98f5795 | ||
|
|
5ae7a74846 | ||
|
|
46f04d762f | ||
|
|
314aec73d4 | ||
|
|
4703434b12 | ||
|
|
350f3f70b7 | ||
|
|
d7f04a64a0 | ||
|
|
bdde6aa948 | ||
|
|
91a0b3b406 | ||
|
|
9ea161577f | ||
|
|
a578266b26 | ||
|
|
3c1044d9d5 | ||
|
|
78262695d2 | ||
|
|
f6197f60b4 | ||
|
|
ce926439f0 | ||
|
|
5385ddc560 | ||
|
|
6177a0db3e | ||
|
|
1093e71cc5 | ||
|
|
a45900324d | ||
|
|
ea198fddcc | ||
|
|
8f7ef41c14 | ||
|
|
6293c17bde | ||
|
|
ad8c96b6c0 | ||
|
|
2213660bf3 | ||
|
|
7f1226ae2b | ||
|
|
cdf04e5018 | ||
|
|
7a3b55ce67 | ||
|
|
c1bd527163 | ||
|
|
6efd684a46 | ||
|
|
5b82268d2c | ||
|
|
ff4cfcd8a2 | ||
|
|
471e23cb12 | ||
|
|
3f309fad01 | ||
|
|
d03685004e | ||
|
|
0749022f8c | ||
|
|
c8ac56569a | ||
|
|
81775ab1b3 | ||
|
|
717f775f30 | ||
|
|
b9f100b391 | ||
|
|
19a86aa072 | ||
|
|
a40ea43413 | ||
|
|
eb82e0d4b9 | ||
|
|
a56f96bb2b | ||
|
|
5ce0b4743f | ||
|
|
068e535b9d | ||
|
|
7204b991e7 | ||
|
|
b8abd6bfee | ||
|
|
0f022d5771 | ||
|
|
344921849c | ||
|
|
507a89bb32 | ||
|
|
ef6c0be984 | ||
|
|
20a523f81b | ||
|
|
4d0b319a8b | ||
|
|
8d1e1d4b0a | ||
|
|
3fa936e492 | ||
|
|
f62a88f179 | ||
|
|
6f552b010c | ||
|
|
4e883fc5be | ||
|
|
b11d618a3f | ||
|
|
56fdeb1247 | ||
|
|
4a5ab38f16 | ||
|
|
d4eba36980 | ||
|
|
b7c9867d60 | ||
|
|
2e9853c761 | ||
|
|
7c4b597816 | ||
|
|
589672d510 | ||
|
|
6a680e241b | ||
|
|
fb4f7a002c | ||
|
|
0ae987973b | ||
|
|
4a207a16f9 | ||
|
|
2c8f83424d | ||
|
|
1fc715bc65 | ||
|
|
e1a4040a6c | ||
|
|
6a59e227b6 | ||
|
|
e91f5edba0 | ||
|
|
8b8aef09af | ||
|
|
56767001cb | ||
|
|
a84773652c | ||
|
|
99ba86a1b2 | ||
|
|
7f3b309997 | ||
|
|
fde22d6bce | ||
|
|
9465a04963 | ||
|
|
df8d144119 | ||
|
|
f90570aef0 | ||
|
|
c3637039f4 | ||
|
|
bc4919f9b2 | ||
|
|
f9e332c6db | ||
|
|
cfd662fee9 | ||
|
|
d36c3395c0 | ||
|
|
b5be8a4a8f | ||
|
|
f2e00c95c0 | ||
|
|
8979552527 | ||
|
|
1bbcbafa67 | ||
|
|
f66c68a2bf | ||
|
|
4dd828414f | ||
|
|
ad47d1b9f8 | ||
|
|
788c562a95 | ||
|
|
6742f3a898 | ||
|
|
5eacecffc3 | ||
|
|
8ed1595f96 | ||
|
|
6123d0db2c | ||
|
|
8653be71b2 | ||
|
|
6a76bf92cb | ||
|
|
72743851c1 | ||
|
|
9f6d4892c8 | ||
|
|
6f73a72839 | ||
|
|
3615d73433 | ||
|
|
34779491e0 | ||
|
|
f3738beaca | ||
|
|
b87ed27416 | ||
|
|
124e390333 | ||
|
|
db77c9a438 | ||
|
|
13715db1f8 | ||
|
|
e149a3c783 | ||
|
|
630634c5df | ||
|
|
228b30f31c | ||
|
|
81f99543ec | ||
|
|
38a7b5325f | ||
|
|
a0fd41fd37 | ||
|
|
ae6e8d2b38 | ||
|
|
309e232553 | ||
|
|
f95a7896b1 | ||
|
|
14025baafe | ||
|
|
b629f6a822 | ||
|
|
59fdd69b85 | ||
|
|
5dddd7c5d1 | ||
|
|
bad3ac84b0 | ||
|
|
87d99a71ec | ||
|
|
545de5042a | ||
|
|
62aa6750ec | ||
|
|
fe07ac662d | ||
|
|
dd422ccb69 | ||
|
|
114542e2ba | ||
|
|
371a118ad0 | ||
|
|
e64edf41e5 | ||
|
|
67a6fff4f7 | ||
|
|
c3f21c36f3 | ||
|
|
01450deb6a | ||
|
|
8430068058 | ||
|
|
bbd3c1b6ab | ||
|
|
7153b51578 | ||
|
|
8c662916ab | ||
|
|
5f7da301fd | ||
|
|
fad801d0fb | ||
|
|
55e2f0955b | ||
|
|
556e663fce | ||
|
|
98c1217093 | ||
|
|
8e7d9926e4 | ||
|
|
e2ee769783 | ||
|
|
2011e3d72a | ||
|
|
8e09e04f48 | ||
|
|
935432c36d | ||
|
|
2ee2cd307b | ||
|
|
88eaff5330 | ||
|
|
c09e268a1b | ||
|
|
25d80fcec2 | ||
|
|
4687f2bf9d | ||
|
|
6a7a323656 | ||
|
|
ac5f5353ba | ||
|
|
950b89ffac | ||
|
|
7729d82e6e | ||
|
|
26d525fcf3 | ||
|
|
b4852c8544 | ||
|
|
8ccc1e5c93 | ||
|
|
f50d2b0664 | ||
|
|
687596ae41 | ||
|
|
620b945975 | ||
|
|
d50f3888af | ||
|
|
ce14f26d82 | ||
|
|
419f8a5db7 | ||
|
|
6c91af0a26 | ||
|
|
5a9829996c | ||
|
|
59f4731bb2 | ||
|
|
468f017e21 | ||
|
|
b9535fb187 | ||
|
|
7a854507cc | ||
|
|
cfc90fad84 | ||
|
|
64f013f3bf | ||
|
|
8f4b1df9cf | ||
|
|
9b3dc572ae | ||
|
|
2c8dfde168 | ||
|
|
b9b8ccca0c | ||
|
|
150e54d02b | ||
|
|
3ae02f9202 | ||
|
|
22d4e4c5a6 | ||
|
|
a864d0e349 | ||
|
|
788d2a254e | ||
|
|
e8917d7321 | ||
|
|
8db43eae44 | ||
|
|
3fed61e7a4 | ||
|
|
b34dda4ca6 | ||
|
|
6787c63900 | ||
|
|
6e5679bc46 | ||
|
|
62080f83cb | ||
|
|
02d99caf6d | ||
|
|
9824206820 | ||
|
|
61e4032b08 | ||
|
|
a24dbdc781 | ||
|
|
dacdf7c282 | ||
|
|
f5d1957174 | ||
|
|
304b9d9146 | ||
|
|
eed3c7c046 | ||
|
|
7319cff77a | ||
|
|
2a957d41c8 | ||
|
|
75a294b74b | ||
|
|
b69cdb5c21 | ||
|
|
ee17097e88 | ||
|
|
f63673838b | ||
|
|
6924d14df5 | ||
|
|
9e048c8ee0 | ||
|
|
2935aeb7d7 | ||
|
|
02031e29aa | ||
|
|
107fae033b | ||
|
|
8c75c2f4bd | ||
|
|
49723a9ecf | ||
|
|
dc67d902eb | ||
|
|
3f38f75918 | ||
|
|
438fe3b829 | ||
|
|
bd08d745f4 | ||
|
|
3ffd48bc16 | ||
|
|
7f961461bd | ||
|
|
bb2ef4ca34 | ||
|
|
063f7aa7cb | ||
|
|
b6282f7053 | ||
|
|
1af03b9b32 | ||
|
|
4cecd62370 | ||
|
|
c4094f62c9 | ||
|
|
b9a63d66a4 | ||
|
|
1ab99bd6bb | ||
|
|
f6a51a8a78 | ||
|
|
4e352a73ee | ||
|
|
89b622dcb8 | ||
|
|
8c9d08e872 | ||
|
|
283f809dda | ||
|
|
a65291ad72 | ||
|
|
46b81dd7d2 | ||
|
|
c4771d9e89 | ||
|
|
a88212e2c5 | ||
|
|
883b4db380 | ||
|
|
6822029c81 | ||
|
|
ae55893deb | ||
|
|
ce54e43ebe | ||
|
|
ceb5c69ee8 | ||
|
|
fbc2a91ab5 | ||
|
|
307cfc8f7a | ||
|
|
aedc586e14 | ||
|
|
310e069f73 | ||
|
|
ed23b47c71 | ||
|
|
2be342023b | ||
|
|
6ca34f949e | ||
|
|
6c68924230 | ||
|
|
f72cb2fc12 | ||
|
|
07810bf71f |
4
.github/workflows/PR-wip-checks.yaml
vendored
4
.github/workflows/PR-wip-checks.yaml
vendored
@@ -9,6 +9,10 @@ on:
|
||||
- labeled
|
||||
- unlabeled
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
pr_wip_check:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
4
.github/workflows/add-backport-label.yaml
vendored
4
.github/workflows/add-backport-label.yaml
vendored
@@ -10,6 +10,10 @@ on:
|
||||
- labeled
|
||||
- unlabeled
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
check-issues:
|
||||
if: ${{ github.event.label.name != 'auto-backport' }}
|
||||
|
||||
4
.github/workflows/add-issues-to-project.yaml
vendored
4
.github/workflows/add-issues-to-project.yaml
vendored
@@ -11,6 +11,10 @@ on:
|
||||
- opened
|
||||
- reopened
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
add-new-issues-to-backlog:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
4
.github/workflows/add-pr-sizing-label.yaml
vendored
4
.github/workflows/add-pr-sizing-label.yaml
vendored
@@ -12,6 +12,10 @@ on:
|
||||
- reopened
|
||||
- synchronize
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
add-pr-size-label:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
4
.github/workflows/auto-backport.yaml
vendored
4
.github/workflows/auto-backport.yaml
vendored
@@ -2,6 +2,10 @@ on:
|
||||
pull_request_target:
|
||||
types: ["labeled", "closed"]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
backport:
|
||||
name: Backport PR
|
||||
|
||||
@@ -99,7 +99,7 @@ jobs:
|
||||
path: kata-artifacts
|
||||
- name: merge-artifacts
|
||||
run: |
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-merge-builds.sh kata-artifacts
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-merge-builds.sh kata-artifacts versions.yaml
|
||||
- name: store-artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
|
||||
@@ -2,6 +2,10 @@ name: CI | Build kata-static tarball for arm64
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
stage:
|
||||
required: false
|
||||
type: string
|
||||
default: test
|
||||
tarball-suffix:
|
||||
required: false
|
||||
type: string
|
||||
@@ -29,6 +33,8 @@ jobs:
|
||||
- rootfs-initrd
|
||||
- shim-v2
|
||||
- virtiofsd
|
||||
stage:
|
||||
- ${{ inputs.stage }}
|
||||
steps:
|
||||
- name: Adjust a permission for repo
|
||||
run: |
|
||||
@@ -83,7 +89,7 @@ jobs:
|
||||
path: kata-artifacts
|
||||
- name: merge-artifacts
|
||||
run: |
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-merge-builds.sh kata-artifacts
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-merge-builds.sh kata-artifacts versions.yaml
|
||||
- name: store-artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
|
||||
@@ -2,6 +2,10 @@ name: CI | Build kata-static tarball for s390x
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
stage:
|
||||
required: false
|
||||
type: string
|
||||
default: test
|
||||
tarball-suffix:
|
||||
required: false
|
||||
type: string
|
||||
@@ -25,6 +29,8 @@ jobs:
|
||||
- rootfs-initrd
|
||||
- shim-v2
|
||||
- virtiofsd
|
||||
stage:
|
||||
- ${{ inputs.stage }}
|
||||
steps:
|
||||
- name: Adjust a permission for repo
|
||||
run: |
|
||||
@@ -80,7 +86,7 @@ jobs:
|
||||
path: kata-artifacts
|
||||
- name: merge-artifacts
|
||||
run: |
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-merge-builds.sh kata-artifacts
|
||||
./tools/packaging/kata-deploy/local-build/kata-deploy-merge-builds.sh kata-artifacts versions.yaml
|
||||
- name: store-artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
|
||||
5
.github/workflows/cargo-deny-runner.yaml
vendored
5
.github/workflows/cargo-deny-runner.yaml
vendored
@@ -7,6 +7,11 @@ on:
|
||||
- reopened
|
||||
- synchronize
|
||||
paths-ignore: [ '**.md', '**.png', '**.jpg', '**.jpeg', '**.svg', '/docs/**' ]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
cargo-deny-runner:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
@@ -14,26 +14,26 @@ jobs:
|
||||
measured_rootfs:
|
||||
- no
|
||||
asset:
|
||||
- cc-cloud-hypervisor
|
||||
- cc-qemu
|
||||
- cc-virtiofsd
|
||||
- cc-sev-kernel
|
||||
- cc-sev-ovmf
|
||||
- cc-x86_64-ovmf
|
||||
- cc-snp-qemu
|
||||
- cc-sev-rootfs-initrd
|
||||
- cc-tdx-qemu
|
||||
- cloud-hypervisor
|
||||
- qemu
|
||||
- virtiofsd
|
||||
- kernel-sev
|
||||
- ovmf-sev
|
||||
- ovmf
|
||||
- qemu-snp-experimental
|
||||
- qemu-tdx-experimental
|
||||
- rootfs-initrd-sev
|
||||
- cc-tdx-td-shim
|
||||
- cc-tdx-tdvf
|
||||
- tdvf
|
||||
include:
|
||||
- measured_rootfs: yes
|
||||
asset: cc-kernel
|
||||
asset: kernel
|
||||
- measured_rootfs: yes
|
||||
asset: cc-tdx-kernel
|
||||
asset: kernel-tdx-experimental
|
||||
- measured_rootfs: yes
|
||||
asset: cc-rootfs-image
|
||||
- measured_rootfs: yes
|
||||
asset: cc-tdx-rootfs-image
|
||||
asset: rootfs-image-tdx
|
||||
steps:
|
||||
- name: Login to Kata Containers quay.io
|
||||
uses: docker/login-action@v2
|
||||
@@ -47,7 +47,7 @@ jobs:
|
||||
fetch-depth: 0 # This is needed in order to keep the commit ids history
|
||||
- name: Build ${{ matrix.asset }}
|
||||
run: |
|
||||
make "${KATA_ASSET}-tarball"
|
||||
USE_CACHE="no" make "${KATA_ASSET}-tarball"
|
||||
build_dir=$(readlink -f build)
|
||||
# store-artifact does not work with symlink
|
||||
sudo cp -r "${build_dir}" "kata-build"
|
||||
@@ -108,7 +108,7 @@ jobs:
|
||||
|
||||
- name: Build cc-shim-v2
|
||||
run: |
|
||||
make cc-shim-v2-tarball
|
||||
USE_CACHE="no" make cc-shim-v2-tarball
|
||||
build_dir=$(readlink -f build)
|
||||
# store-artifact does not work with symlink
|
||||
sudo cp -r "${build_dir}" "kata-build"
|
||||
|
||||
@@ -14,13 +14,12 @@ jobs:
|
||||
measured_rootfs:
|
||||
- no
|
||||
asset:
|
||||
- cc-qemu
|
||||
- qemu
|
||||
- cc-rootfs-initrd
|
||||
- cc-se-image
|
||||
- cc-virtiofsd
|
||||
- virtiofsd
|
||||
include:
|
||||
- measured_rootfs: yes
|
||||
asset: cc-kernel
|
||||
asset: kernel
|
||||
- measured_rootfs: yes
|
||||
asset: cc-rootfs-image
|
||||
steps:
|
||||
@@ -39,16 +38,9 @@ jobs:
|
||||
with:
|
||||
fetch-depth: 0 # This is needed in order to keep the commit ids history
|
||||
|
||||
- name: Place a host key document
|
||||
run: |
|
||||
mkdir -p "host-key-document"
|
||||
cp "${CI_HKD_PATH}" "host-key-document"
|
||||
env:
|
||||
CI_HKD_PATH: ${{ secrets.CI_HKD_PATH }}
|
||||
|
||||
- name: Build ${{ matrix.asset }}
|
||||
run: |
|
||||
make "${KATA_ASSET}-tarball"
|
||||
USE_CACHE="no" make "${KATA_ASSET}-tarball"
|
||||
build_dir=$(readlink -f build)
|
||||
# store-artifact does not work with symlink
|
||||
sudo cp -r "${build_dir}" "kata-build"
|
||||
@@ -58,7 +50,6 @@ jobs:
|
||||
TAR_OUTPUT: ${{ matrix.asset }}.tar.gz
|
||||
PUSH_TO_REGISTRY: yes
|
||||
MEASURED_ROOTFS: ${{ matrix.measured_rootfs }}
|
||||
HKD_PATH: "host-key-document"
|
||||
|
||||
- name: store-artifact ${{ matrix.asset }}
|
||||
uses: actions/upload-artifact@v3
|
||||
@@ -101,7 +92,7 @@ jobs:
|
||||
|
||||
- name: Build cc-shim-v2
|
||||
run: |
|
||||
make cc-shim-v2-tarball
|
||||
USE_CACHE="no" make cc-shim-v2-tarball
|
||||
build_dir=$(readlink -f build)
|
||||
# store-artifact does not work with symlink
|
||||
sudo cp -r "${build_dir}" "kata-build"
|
||||
@@ -117,9 +108,54 @@ jobs:
|
||||
retention-days: 1
|
||||
if-no-files-found: error
|
||||
|
||||
build-asset-cc-se-image:
|
||||
runs-on: s390x
|
||||
needs: build-asset
|
||||
steps:
|
||||
- name: Adjust a permission for repo
|
||||
run: |
|
||||
sudo chown -R $USER:$USER $GITHUB_WORKSPACE
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: get-artifacts
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: kata-artifacts-s390x
|
||||
path: kata-artifacts
|
||||
|
||||
- name: Place a host key document
|
||||
run: |
|
||||
mkdir -p "host-key-document"
|
||||
cp "${CI_HKD_PATH}" "host-key-document"
|
||||
env:
|
||||
CI_HKD_PATH: ${{ secrets.CI_HKD_PATH }}
|
||||
|
||||
- name: Build cc-se-image
|
||||
run: |
|
||||
base_dir=tools/packaging/kata-deploy/local-build/
|
||||
cp -r kata-artifacts ${base_dir}/build
|
||||
# Skip building dependant artifacts of cc-se-image-tarball
|
||||
# because we already have them from the previous build
|
||||
sed -i 's/\(^cc-se-image-tarball:\).*/\1/g' ${base_dir}/Makefile
|
||||
USE_CACHE="no" make cc-se-image-tarball
|
||||
build_dir=$(readlink -f build)
|
||||
sudo cp -r "${build_dir}" "kata-build"
|
||||
sudo chown -R $(id -u):$(id -g) "kata-build"
|
||||
env:
|
||||
HKD_PATH: "host-key-document"
|
||||
|
||||
- name: store-artifact cc-se-image
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: kata-artifacts-s390x
|
||||
path: kata-build/kata-static-cc-se-image.tar.xz
|
||||
retention-days: 1
|
||||
if-no-files-found: error
|
||||
|
||||
create-kata-tarball:
|
||||
runs-on: s390x
|
||||
needs: [build-asset, build-asset-cc-shim-v2]
|
||||
needs: [build-asset, build-asset-cc-shim-v2, build-asset-cc-se-image]
|
||||
steps:
|
||||
- name: Adjust a permission for repo
|
||||
run: |
|
||||
|
||||
26
.github/workflows/cc-payload-amd64.yaml
vendored
26
.github/workflows/cc-payload-amd64.yaml
vendored
@@ -14,26 +14,26 @@ jobs:
|
||||
measured_rootfs:
|
||||
- no
|
||||
asset:
|
||||
- cc-cloud-hypervisor
|
||||
- cc-qemu
|
||||
- cc-virtiofsd
|
||||
- cc-sev-kernel
|
||||
- cc-sev-ovmf
|
||||
- cc-x86_64-ovmf
|
||||
- cc-snp-qemu
|
||||
- cc-sev-rootfs-initrd
|
||||
- cc-tdx-qemu
|
||||
- cloud-hypervisor
|
||||
- qemu
|
||||
- virtiofsd
|
||||
- kernel-sev
|
||||
- ovmf-sev
|
||||
- ovmf
|
||||
- qemu-snp-experimental
|
||||
- qemu-tdx-experimental
|
||||
- rootfs-initrd-sev
|
||||
- cc-tdx-td-shim
|
||||
- cc-tdx-tdvf
|
||||
- tdvf
|
||||
include:
|
||||
- measured_rootfs: yes
|
||||
asset: cc-kernel
|
||||
asset: kernel
|
||||
- measured_rootfs: yes
|
||||
asset: cc-tdx-kernel
|
||||
asset: kernel-tdx-experimental
|
||||
- measured_rootfs: yes
|
||||
asset: cc-rootfs-image
|
||||
- measured_rootfs: yes
|
||||
asset: cc-tdx-rootfs-image
|
||||
asset: rootfs-image-tdx
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Build ${{ matrix.asset }}
|
||||
|
||||
6
.github/workflows/cc-payload-s390x.yaml
vendored
6
.github/workflows/cc-payload-s390x.yaml
vendored
@@ -14,11 +14,11 @@ jobs:
|
||||
measured_rootfs:
|
||||
- no
|
||||
asset:
|
||||
- cc-qemu
|
||||
- cc-virtiofsd
|
||||
- qemu
|
||||
- virtiofsd
|
||||
include:
|
||||
- measured_rootfs: yes
|
||||
asset: cc-kernel
|
||||
asset: kernel
|
||||
- measured_rootfs: yes
|
||||
asset: cc-rootfs-image
|
||||
steps:
|
||||
|
||||
4
.github/workflows/ci-nightly.yaml
vendored
4
.github/workflows/ci-nightly.yaml
vendored
@@ -4,6 +4,10 @@ on:
|
||||
- cron: '0 0 * * *'
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
kata-containers-ci-on-push:
|
||||
uses: ./.github/workflows/ci.yaml
|
||||
|
||||
6
.github/workflows/ci-on-push.yaml
vendored
6
.github/workflows/ci-on-push.yaml
vendored
@@ -3,6 +3,7 @@ on:
|
||||
pull_request_target:
|
||||
branches:
|
||||
- 'main'
|
||||
- 'stable-*'
|
||||
types:
|
||||
# Adding 'labeled' to the list of activity types that trigger this event
|
||||
# (default: opened, synchronize, reopened) so that we can run this
|
||||
@@ -14,6 +15,11 @@ on:
|
||||
- labeled
|
||||
paths-ignore:
|
||||
- 'docs/**'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
kata-containers-ci-on-push:
|
||||
if: ${{ contains(github.event.pull_request.labels.*.name, 'ok-to-test') }}
|
||||
|
||||
60
.github/workflows/ci.yaml
vendored
60
.github/workflows/ci.yaml
vendored
@@ -30,6 +30,36 @@ jobs:
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
secrets: inherit
|
||||
|
||||
build-and-publish-tee-confidential-unencrypted-image:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Login to Kata Containers ghcr.io
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Docker build and push
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
tags: ghcr.io/kata-containers/test-images:unencrypted-${{ inputs.pr-number }}
|
||||
push: true
|
||||
context: tests/integration/kubernetes/runtimeclass_workloads/confidential/unencrypted/
|
||||
platforms: linux/amd64, linux/s390x
|
||||
file: tests/integration/kubernetes/runtimeclass_workloads/confidential/unencrypted/Dockerfile
|
||||
|
||||
run-k8s-tests-on-aks:
|
||||
needs: publish-kata-deploy-payload-amd64
|
||||
uses: ./.github/workflows/run-k8s-tests-on-aks.yaml
|
||||
@@ -42,31 +72,34 @@ jobs:
|
||||
secrets: inherit
|
||||
|
||||
run-k8s-tests-on-sev:
|
||||
needs: publish-kata-deploy-payload-amd64
|
||||
needs: [publish-kata-deploy-payload-amd64, build-and-publish-tee-confidential-unencrypted-image]
|
||||
uses: ./.github/workflows/run-k8s-tests-on-sev.yaml
|
||||
with:
|
||||
registry: ghcr.io
|
||||
repo: ${{ github.repository_owner }}/kata-deploy-ci
|
||||
tag: ${{ inputs.tag }}-amd64
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
pr-number: ${{ inputs.pr-number }}
|
||||
|
||||
run-k8s-tests-on-snp:
|
||||
needs: publish-kata-deploy-payload-amd64
|
||||
needs: [publish-kata-deploy-payload-amd64, build-and-publish-tee-confidential-unencrypted-image]
|
||||
uses: ./.github/workflows/run-k8s-tests-on-snp.yaml
|
||||
with:
|
||||
registry: ghcr.io
|
||||
repo: ${{ github.repository_owner }}/kata-deploy-ci
|
||||
tag: ${{ inputs.tag }}-amd64
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
pr-number: ${{ inputs.pr-number }}
|
||||
|
||||
run-k8s-tests-on-tdx:
|
||||
needs: publish-kata-deploy-payload-amd64
|
||||
needs: [publish-kata-deploy-payload-amd64, build-and-publish-tee-confidential-unencrypted-image]
|
||||
uses: ./.github/workflows/run-k8s-tests-on-tdx.yaml
|
||||
with:
|
||||
registry: ghcr.io
|
||||
repo: ${{ github.repository_owner }}/kata-deploy-ci
|
||||
tag: ${{ inputs.tag }}-amd64
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
pr-number: ${{ inputs.pr-number }}
|
||||
|
||||
run-metrics-tests:
|
||||
needs: build-kata-static-tarball-amd64
|
||||
@@ -74,3 +107,24 @@ jobs:
|
||||
with:
|
||||
tarball-suffix: -${{ inputs.tag }}
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
|
||||
run-cri-containerd-tests:
|
||||
needs: build-kata-static-tarball-amd64
|
||||
uses: ./.github/workflows/run-cri-containerd-tests.yaml
|
||||
with:
|
||||
tarball-suffix: -${{ inputs.tag }}
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
|
||||
run-nydus-tests:
|
||||
needs: build-kata-static-tarball-amd64
|
||||
uses: ./.github/workflows/run-nydus-tests.yaml
|
||||
with:
|
||||
tarball-suffix: -${{ inputs.tag }}
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
|
||||
run-vfio-tests:
|
||||
needs: build-kata-static-tarball-amd64
|
||||
uses: ./.github/workflows/run-vfio-tests.yaml
|
||||
with:
|
||||
tarball-suffix: -${{ inputs.tag }}
|
||||
commit-hash: ${{ inputs.commit-hash }}
|
||||
|
||||
4
.github/workflows/commit-message-check.yaml
vendored
4
.github/workflows/commit-message-check.yaml
vendored
@@ -6,6 +6,10 @@ on:
|
||||
- reopened
|
||||
- synchronize
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
error_msg: |+
|
||||
See the document below for help on formatting commits for the project.
|
||||
|
||||
5
.github/workflows/darwin-tests.yaml
vendored
5
.github/workflows/darwin-tests.yaml
vendored
@@ -6,6 +6,11 @@ on:
|
||||
- reopened
|
||||
- synchronize
|
||||
paths-ignore: [ '**.md', '**.png', '**.jpg', '**.jpeg', '**.svg', '/docs/**' ]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
name: Darwin tests
|
||||
jobs:
|
||||
test:
|
||||
|
||||
36
.github/workflows/kata-runtime-classes-sync.yaml
vendored
Normal file
36
.github/workflows/kata-runtime-classes-sync.yaml
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
- edited
|
||||
- reopened
|
||||
- synchronize
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
kata-deploy-runtime-classes-check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
- name: Ensure the split out runtime classes match the all-in-one file
|
||||
run: |
|
||||
pushd tools/packaging/kata-deploy/runtimeclasses/
|
||||
echo "::group::Combine runtime classes"
|
||||
for runtimeClass in `find . -type f \( -name "*.yaml" -and -not -name "kata-runtimeClasses.yaml" \) | sort`; do
|
||||
echo "Adding ${runtimeClass} to the resultingRuntimeClasses.yaml"
|
||||
cat ${runtimeClass} >> resultingRuntimeClasses.yaml;
|
||||
done
|
||||
echo "::endgroup::"
|
||||
echo "::group::Displaying the content of resultingRuntimeClasses.yaml"
|
||||
cat resultingRuntimeClasses.yaml
|
||||
echo "::endgroup::"
|
||||
echo ""
|
||||
echo "::group::Displaying the content of kata-runtimeClasses.yaml"
|
||||
cat kata-runtimeClasses.yaml
|
||||
echo "::endgroup::"
|
||||
echo ""
|
||||
diff resultingRuntimeClasses.yaml kata-runtimeClasses.yaml
|
||||
4
.github/workflows/payload-after-push.yaml
vendored
4
.github/workflows/payload-after-push.yaml
vendored
@@ -5,6 +5,10 @@ on:
|
||||
- main
|
||||
- stable-*
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build-assets-amd64:
|
||||
uses: ./.github/workflows/build-kata-static-tarball-amd64.yaml
|
||||
|
||||
19
.github/workflows/release.yaml
vendored
19
.github/workflows/release.yaml
vendored
@@ -4,6 +4,10 @@ on:
|
||||
tags:
|
||||
- '[0-9]+.[0-9]+.[0-9]+*'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build-and-push-assets-amd64:
|
||||
uses: ./.github/workflows/release-amd64.yaml
|
||||
@@ -117,6 +121,21 @@ jobs:
|
||||
GITHUB_TOKEN=${{ secrets.GIT_UPLOAD_TOKEN }} hub release edit -m "" -a "${tarball}" "${tag}"
|
||||
popd
|
||||
|
||||
upload-versions-yaml:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: upload versions.yaml
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GIT_UPLOAD_TOKEN }}
|
||||
run: |
|
||||
tag=$(echo $GITHUB_REF | cut -d/ -f3-)
|
||||
pushd $GITHUB_WORKSPACE
|
||||
versions_file="kata-containers-$tag-versions.yaml"
|
||||
cp versions.yaml ${versions_file}
|
||||
hub release edit -m "" -a "${versions_file}" "${tag}"
|
||||
popd
|
||||
|
||||
upload-cargo-vendored-tarball:
|
||||
needs: upload-multi-arch-static-tarball
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
@@ -15,6 +15,10 @@ on:
|
||||
branches:
|
||||
- main
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
check-pr-porting-labels:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
42
.github/workflows/run-cri-containerd-tests.yaml
vendored
Normal file
42
.github/workflows/run-cri-containerd-tests.yaml
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
name: CI | Run cri-containerd tests
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
tarball-suffix:
|
||||
required: false
|
||||
type: string
|
||||
commit-hash:
|
||||
required: false
|
||||
type: string
|
||||
|
||||
jobs:
|
||||
run-cri-containerd:
|
||||
strategy:
|
||||
fail-fast: true
|
||||
matrix:
|
||||
containerd_version: ['lts', 'active']
|
||||
vmm: ['clh', 'qemu']
|
||||
runs-on: garm-ubuntu-2204
|
||||
env:
|
||||
CONTAINERD_VERSION: ${{ matrix.containerd_version }}
|
||||
GOPATH: ${{ github.workspace }}
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
|
||||
- name: Install dependencies
|
||||
run: bash tests/integration/cri-containerd/gha-run.sh install-dependencies
|
||||
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: kata-static-tarball-amd64${{ inputs.tarball-suffix }}
|
||||
path: kata-artifacts
|
||||
|
||||
- name: Install kata
|
||||
run: bash tests/integration/cri-containerd/gha-run.sh install-kata kata-artifacts
|
||||
|
||||
- name: Run cri-containerd tests
|
||||
run: bash tests/integration/cri-containerd/gha-run.sh run
|
||||
22
.github/workflows/run-k8s-tests-on-aks.yaml
vendored
22
.github/workflows/run-k8s-tests-on-aks.yaml
vendored
@@ -40,37 +40,43 @@ jobs:
|
||||
GH_PR_NUMBER: ${{ inputs.pr-number }}
|
||||
KATA_HOST_OS: ${{ matrix.host_os }}
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
USING_NFD: "false"
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
|
||||
- name: Download Azure CLI
|
||||
run: bash tests/integration/gha-run.sh install-azure-cli
|
||||
run: bash tests/integration/kubernetes/gha-run.sh install-azure-cli
|
||||
|
||||
- name: Log into the Azure account
|
||||
run: bash tests/integration/gha-run.sh login-azure
|
||||
run: bash tests/integration/kubernetes/gha-run.sh login-azure
|
||||
env:
|
||||
AZ_APPID: ${{ secrets.AZ_APPID }}
|
||||
AZ_PASSWORD: ${{ secrets.AZ_PASSWORD }}
|
||||
AZ_TENANT_ID: ${{ secrets.AZ_TENANT_ID }}
|
||||
|
||||
- name: Create AKS cluster
|
||||
run: bash tests/integration/gha-run.sh create-cluster
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/kubernetes/gha-run.sh create-cluster
|
||||
|
||||
- name: Install `bats`
|
||||
run: bash tests/integration/gha-run.sh install-bats
|
||||
run: bash tests/integration/kubernetes/gha-run.sh install-bats
|
||||
|
||||
- name: Install `kubectl`
|
||||
run: bash tests/integration/gha-run.sh install-kubectl
|
||||
run: bash tests/integration/kubernetes/gha-run.sh install-kubectl
|
||||
|
||||
- name: Download credentials for the Kubernetes CLI to use them
|
||||
run: bash tests/integration/gha-run.sh get-cluster-credentials
|
||||
run: bash tests/integration/kubernetes/gha-run.sh get-cluster-credentials
|
||||
|
||||
- name: Deploy Kata
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/kubernetes/gha-run.sh deploy-kata-aks
|
||||
|
||||
- name: Run tests
|
||||
timeout-minutes: 60
|
||||
run: bash tests/integration/gha-run.sh run-tests-aks
|
||||
run: bash tests/integration/kubernetes/gha-run.sh run-tests
|
||||
|
||||
- name: Delete AKS cluster
|
||||
if: always()
|
||||
run: bash tests/integration/gha-run.sh delete-cluster
|
||||
run: bash tests/integration/kubernetes/gha-run.sh delete-cluster
|
||||
|
||||
13
.github/workflows/run-k8s-tests-on-sev.yaml
vendored
13
.github/workflows/run-k8s-tests-on-sev.yaml
vendored
@@ -11,6 +11,9 @@ on:
|
||||
tag:
|
||||
required: true
|
||||
type: string
|
||||
pr-number:
|
||||
required: true
|
||||
type: string
|
||||
commit-hash:
|
||||
required: false
|
||||
type: string
|
||||
@@ -27,17 +30,23 @@ jobs:
|
||||
DOCKER_REGISTRY: ${{ inputs.registry }}
|
||||
DOCKER_REPO: ${{ inputs.repo }}
|
||||
DOCKER_TAG: ${{ inputs.tag }}
|
||||
PR_NUMBER: ${{ inputs.pr-number }}
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
KUBECONFIG: /home/kata/.kube/config
|
||||
USING_NFD: "false"
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
|
||||
- name: Deploy Kata
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/kubernetes/gha-run.sh deploy-kata-sev
|
||||
|
||||
- name: Run tests
|
||||
timeout-minutes: 30
|
||||
run: bash tests/integration/gha-run.sh run-tests-sev
|
||||
run: bash tests/integration/kubernetes/gha-run.sh run-tests
|
||||
|
||||
- name: Delete kata-deploy
|
||||
if: always()
|
||||
run: bash tests/integration/gha-run.sh cleanup-sev
|
||||
run: bash tests/integration/kubernetes/gha-run.sh cleanup-sev
|
||||
|
||||
15
.github/workflows/run-k8s-tests-on-snp.yaml
vendored
15
.github/workflows/run-k8s-tests-on-snp.yaml
vendored
@@ -11,6 +11,9 @@ on:
|
||||
tag:
|
||||
required: true
|
||||
type: string
|
||||
pr-number:
|
||||
required: true
|
||||
type: string
|
||||
commit-hash:
|
||||
required: false
|
||||
type: string
|
||||
@@ -27,17 +30,23 @@ jobs:
|
||||
DOCKER_REGISTRY: ${{ inputs.registry }}
|
||||
DOCKER_REPO: ${{ inputs.repo }}
|
||||
DOCKER_TAG: ${{ inputs.tag }}
|
||||
PR_NUMBER: ${{ inputs.pr-number }}
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
KUBECONFIG: /home/kata/.kube/config
|
||||
USING_NFD: "false"
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
|
||||
- name: Deploy Kata
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/kubernetes/gha-run.sh deploy-kata-snp
|
||||
|
||||
- name: Run tests
|
||||
timeout-minutes: 30
|
||||
run: bash tests/integration/gha-run.sh run-tests-snp
|
||||
|
||||
run: bash tests/integration/kubernetes/gha-run.sh run-tests
|
||||
|
||||
- name: Delete kata-deploy
|
||||
if: always()
|
||||
run: bash tests/integration/gha-run.sh cleanup-snp
|
||||
run: bash tests/integration/kubernetes/gha-run.sh cleanup-snp
|
||||
|
||||
16
.github/workflows/run-k8s-tests-on-tdx.yaml
vendored
16
.github/workflows/run-k8s-tests-on-tdx.yaml
vendored
@@ -11,6 +11,9 @@ on:
|
||||
tag:
|
||||
required: true
|
||||
type: string
|
||||
pr-number:
|
||||
required: true
|
||||
type: string
|
||||
commit-hash:
|
||||
required: false
|
||||
type: string
|
||||
@@ -27,17 +30,22 @@ jobs:
|
||||
DOCKER_REGISTRY: ${{ inputs.registry }}
|
||||
DOCKER_REPO: ${{ inputs.repo }}
|
||||
DOCKER_TAG: ${{ inputs.tag }}
|
||||
PR_NUMBER: ${{ inputs.pr-number }}
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
KUBECONFIG: /etc/rancher/k3s/k3s.yaml
|
||||
USING_NFD: "true"
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
|
||||
- name: Deploy Kata
|
||||
timeout-minutes: 10
|
||||
run: bash tests/integration/kubernetes/gha-run.sh deploy-kata-tdx
|
||||
|
||||
- name: Run tests
|
||||
timeout-minutes: 30
|
||||
run: bash tests/integration/gha-run.sh run-tests-tdx
|
||||
|
||||
run: bash tests/integration/kubernetes/gha-run.sh run-tests
|
||||
|
||||
- name: Delete kata-deploy
|
||||
if: always()
|
||||
run: bash tests/integration/gha-run.sh cleanup-tdx
|
||||
run: bash tests/integration/kubernetes/gha-run.sh cleanup-tdx
|
||||
|
||||
35
.github/workflows/run-metrics.yaml
vendored
35
.github/workflows/run-metrics.yaml
vendored
@@ -10,16 +10,11 @@ on:
|
||||
type: string
|
||||
|
||||
jobs:
|
||||
run-metrics:
|
||||
strategy:
|
||||
fail-fast: true
|
||||
matrix:
|
||||
vmm: ['clh', 'qemu']
|
||||
max-parallel: 1
|
||||
setup-kata:
|
||||
name: Kata Setup
|
||||
runs-on: metrics
|
||||
env:
|
||||
GOPATH: ${{ github.workspace }}
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
@@ -34,6 +29,21 @@ jobs:
|
||||
- name: Install kata
|
||||
run: bash tests/metrics/gha-run.sh install-kata kata-artifacts
|
||||
|
||||
run-metrics:
|
||||
needs: setup-kata
|
||||
strategy:
|
||||
fail-fast: true
|
||||
matrix:
|
||||
vmm: ['clh', 'qemu']
|
||||
max-parallel: 1
|
||||
runs-on: metrics
|
||||
env:
|
||||
GOPATH: ${{ github.workspace }}
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
steps:
|
||||
- name: enabling the hypervisor
|
||||
run: bash tests/metrics/gha-run.sh enabling-hypervisor
|
||||
|
||||
- name: run launch times test
|
||||
run: bash tests/metrics/gha-run.sh run-test-launchtimes
|
||||
|
||||
@@ -46,9 +56,18 @@ jobs:
|
||||
- name: run blogbench test
|
||||
run: bash tests/metrics/gha-run.sh run-test-blogbench
|
||||
|
||||
- name: run tensorflow test
|
||||
run: bash tests/metrics/gha-run.sh run-test-tensorflow
|
||||
|
||||
- name: run fio test
|
||||
run: bash tests/metrics/gha-run.sh run-test-fio
|
||||
|
||||
- name: run iperf test
|
||||
run: bash tests/metrics/gha-run.sh run-test-iperf
|
||||
|
||||
- name: make metrics tarball ${{ matrix.vmm }}
|
||||
run: bash tests/metrics/gha-run.sh make-tarball-results
|
||||
|
||||
|
||||
- name: archive metrics results ${{ matrix.vmm }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
|
||||
42
.github/workflows/run-nydus-tests.yaml
vendored
Normal file
42
.github/workflows/run-nydus-tests.yaml
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
name: CI | Run nydus tests
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
tarball-suffix:
|
||||
required: false
|
||||
type: string
|
||||
commit-hash:
|
||||
required: false
|
||||
type: string
|
||||
|
||||
jobs:
|
||||
run-nydus:
|
||||
strategy:
|
||||
fail-fast: true
|
||||
matrix:
|
||||
containerd_version: ['lts', 'active']
|
||||
vmm: ['clh', 'qemu', 'dragonball']
|
||||
runs-on: garm-ubuntu-2204
|
||||
env:
|
||||
CONTAINERD_VERSION: ${{ matrix.containerd_version }}
|
||||
GOPATH: ${{ github.workspace }}
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
|
||||
- name: Install dependencies
|
||||
run: bash tests/integration/nydus/gha-run.sh install-dependencies
|
||||
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: kata-static-tarball-amd64${{ inputs.tarball-suffix }}
|
||||
path: kata-artifacts
|
||||
|
||||
- name: Install kata
|
||||
run: bash tests/integration/nydus/gha-run.sh install-kata kata-artifacts
|
||||
|
||||
- name: Run nydus tests
|
||||
run: bash tests/integration/nydus/gha-run.sh run
|
||||
37
.github/workflows/run-vfio-tests.yaml
vendored
Normal file
37
.github/workflows/run-vfio-tests.yaml
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
name: CI | Run vfio tests
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
tarball-suffix:
|
||||
required: false
|
||||
type: string
|
||||
commit-hash:
|
||||
required: false
|
||||
type: string
|
||||
|
||||
jobs:
|
||||
run-vfio:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
vmm: ['clh', 'qemu']
|
||||
runs-on: garm-ubuntu-2204
|
||||
env:
|
||||
GOPATH: ${{ github.workspace }}
|
||||
KATA_HYPERVISOR: ${{ matrix.vmm }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{ inputs.commit-hash }}
|
||||
|
||||
- name: Install dependencies
|
||||
run: bash tests/functional/vfio/gha-run.sh install-dependencies
|
||||
|
||||
- name: get-kata-tarball
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: kata-static-tarball-amd64${{ inputs.tarball-suffix }}
|
||||
path: kata-artifacts
|
||||
|
||||
- name: Run vfio tests
|
||||
run: bash tests/functional/vfio/gha-run.sh run
|
||||
@@ -7,10 +7,14 @@ on:
|
||||
- synchronize
|
||||
paths-ignore: [ '**.md', '**.png', '**.jpg', '**.jpeg', '**.svg', '/docs/**' ]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
name: Static checks dragonball
|
||||
jobs:
|
||||
test-dragonball:
|
||||
runs-on: self-hosted
|
||||
runs-on: dragonball
|
||||
env:
|
||||
RUST_BACKTRACE: "1"
|
||||
steps:
|
||||
|
||||
16
.github/workflows/static-checks.yaml
vendored
16
.github/workflows/static-checks.yaml
vendored
@@ -6,10 +6,14 @@ on:
|
||||
- reopened
|
||||
- synchronize
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
name: Static checks
|
||||
jobs:
|
||||
static-checks:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: garm-ubuntu-2004
|
||||
strategy:
|
||||
matrix:
|
||||
cmd:
|
||||
@@ -32,6 +36,14 @@ jobs:
|
||||
with:
|
||||
fetch-depth: 0
|
||||
path: ./src/github.com/${{ github.repository }}
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y --no-install-recommends \
|
||||
build-essential \
|
||||
haveged \
|
||||
libdevmapper-dev \
|
||||
clang
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
@@ -78,4 +90,6 @@ jobs:
|
||||
- name: Run check
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'force-skip-ci') }}
|
||||
run: |
|
||||
export PATH=$PATH:"$HOME/.cargo/bin"
|
||||
export XDG_RUNTIME_DIR=$(mktemp -d /tmp/kata-tests-$USER.XXX | tee >(xargs chmod 0700))
|
||||
cd ${GOPATH}/src/github.com/${{ github.repository }} && ${{ matrix.cmd }}
|
||||
|
||||
7
Makefile
7
Makefile
@@ -24,6 +24,10 @@ TOOLS += trace-forwarder
|
||||
|
||||
STANDARD_TARGETS = build check clean install static-checks-build test vendor
|
||||
|
||||
# Variables for the build-and-publish-kata-debug target
|
||||
KATA_DEBUG_REGISTRY ?= ""
|
||||
KATA_DEBUG_TAG ?= ""
|
||||
|
||||
default: all
|
||||
|
||||
include utils.mk
|
||||
@@ -44,6 +48,9 @@ static-checks: static-checks-build
|
||||
docs-url-alive-check:
|
||||
bash ci/docs-url-alive-check.sh
|
||||
|
||||
build-and-publish-kata-debug:
|
||||
bash tools/packaging/kata-debug/kata-debug-build-and-upload-payload.sh ${KATA_DEBUG_REGISTRY} ${KATA_DEBUG_TAG}
|
||||
|
||||
.PHONY: \
|
||||
all \
|
||||
kata-tarball \
|
||||
|
||||
@@ -134,6 +134,7 @@ The table below lists the remaining parts of the project:
|
||||
| [packaging](tools/packaging) | infrastructure | Scripts and metadata for producing packaged binaries<br/>(components, hypervisors, kernel and rootfs). |
|
||||
| [kernel](https://www.kernel.org) | kernel | Linux kernel used by the hypervisor to boot the guest image. Patches are stored [here](tools/packaging/kernel). |
|
||||
| [osbuilder](tools/osbuilder) | infrastructure | Tool to create "mini O/S" rootfs and initrd images and kernel for the hypervisor. |
|
||||
| [kata-debug](tools/packaging/kata-debug/README.md) | infrastructure | Utility tool to gather Kata Containers debug information from Kubernetes clusters. |
|
||||
| [`agent-ctl`](src/tools/agent-ctl) | utility | Tool that provides low-level access for testing the agent. |
|
||||
| [`kata-ctl`](src/tools/kata-ctl) | utility | Tool that provides advanced commands and debug facilities. |
|
||||
| [`log-parser-rs`](src/tools/log-parser-rs) | utility | Tool that aid in analyzing logs from the kata runtime. |
|
||||
|
||||
@@ -88,7 +88,8 @@ build_and_install_libseccomp() {
|
||||
curl -sLO "${libseccomp_tarball_url}"
|
||||
tar -xf "${libseccomp_tarball}"
|
||||
pushd "libseccomp-${libseccomp_version}"
|
||||
./configure --prefix="${libseccomp_install_dir}" CFLAGS="${cflags}" --enable-static --host="${arch}"
|
||||
[ "${arch}" == $(uname -m) ] && cc_name="" || cc_name="${arch}-linux-gnu-gcc"
|
||||
CC=${cc_name} ./configure --prefix="${libseccomp_install_dir}" CFLAGS="${cflags}" --enable-static --host="${arch}"
|
||||
make
|
||||
make install
|
||||
popd
|
||||
|
||||
@@ -14,6 +14,7 @@ Kata Containers design documents:
|
||||
- [`Inotify` support](inotify.md)
|
||||
- [`Hooks` support](hooks-handling.md)
|
||||
- [Metrics(Kata 2.0)](kata-2-0-metrics.md)
|
||||
- [Metrics in Rust Runtime(runtime-rs)](kata-metrics-in-runtime-rs.md)
|
||||
- [Design for Kata Containers `Lazyload` ability with `nydus`](kata-nydus-design.md)
|
||||
- [Design for direct-assigned volume](direct-blk-device-assignment.md)
|
||||
- [Design for core-scheduling](core-scheduling.md)
|
||||
|
||||
@@ -3,16 +3,16 @@
|
||||
[Kubernetes](https://github.com/kubernetes/kubernetes/), or K8s, is a popular open source
|
||||
container orchestration engine. In Kubernetes, a set of containers sharing resources
|
||||
such as networking, storage, mount, PID, etc. is called a
|
||||
[pod](https://kubernetes.io/docs/user-guide/pods/).
|
||||
[pod](https://kubernetes.io/docs/concepts/workloads/pods/).
|
||||
|
||||
A node can have multiple pods, but at a minimum, a node within a Kubernetes cluster
|
||||
only needs to run a container runtime and a container agent (called a
|
||||
[Kubelet](https://kubernetes.io/docs/admin/kubelet/)).
|
||||
[Kubelet](https://kubernetes.io/docs/concepts/overview/components/#kubelet)).
|
||||
|
||||
Kata Containers represents a Kubelet pod as a VM.
|
||||
|
||||
A Kubernetes cluster runs a control plane where a scheduler (typically
|
||||
running on a dedicated master node) calls into a compute Kubelet. This
|
||||
running on a dedicated control-plane node) calls into a compute Kubelet. This
|
||||
Kubelet instance is responsible for managing the lifecycle of pods
|
||||
within the nodes and eventually relies on a container runtime to
|
||||
handle execution. The Kubelet architecture decouples lifecycle
|
||||
|
||||
50
docs/design/kata-metrics-in-runtime-rs.md
Normal file
50
docs/design/kata-metrics-in-runtime-rs.md
Normal file
@@ -0,0 +1,50 @@
|
||||
# Kata Metrics in Rust Runtime(runtime-rs)
|
||||
|
||||
Rust Runtime(runtime-rs) is responsible for:
|
||||
|
||||
- Gather metrics about `shim`.
|
||||
- Gather metrics from `hypervisor` (through `channel`).
|
||||
- Get metrics from `agent` (through `ttrpc`).
|
||||
|
||||
---
|
||||
|
||||
Here are listed all the metrics gathered by `runtime-rs`.
|
||||
|
||||
> * Current status of each entry is marked as:
|
||||
> * ✅:DONE
|
||||
> * 🚧:TODO
|
||||
|
||||
### Kata Shim
|
||||
|
||||
| STATUS | Metric name | Type | Units | Labels |
|
||||
| ------ | ------------------------------------------------------------ | ----------- | -------------- | ------------------------------------------------------------ |
|
||||
| 🚧 | `kata_shim_agent_rpc_durations_histogram_milliseconds`: <br> RPC latency distributions. | `HISTOGRAM` | `milliseconds` | <ul><li>`action` (RPC actions of Kata agent)<ul><li>`grpc.CheckRequest`</li><li>`grpc.CloseStdinRequest`</li><li>`grpc.CopyFileRequest`</li><li>`grpc.CreateContainerRequest`</li><li>`grpc.CreateSandboxRequest`</li><li>`grpc.DestroySandboxRequest`</li><li>`grpc.ExecProcessRequest`</li><li>`grpc.GetMetricsRequest`</li><li>`grpc.GuestDetailsRequest`</li><li>`grpc.ListInterfacesRequest`</li><li>`grpc.ListProcessesRequest`</li><li>`grpc.ListRoutesRequest`</li><li>`grpc.MemHotplugByProbeRequest`</li><li>`grpc.OnlineCPUMemRequest`</li><li>`grpc.PauseContainerRequest`</li><li>`grpc.RemoveContainerRequest`</li><li>`grpc.ReseedRandomDevRequest`</li><li>`grpc.ResumeContainerRequest`</li><li>`grpc.SetGuestDateTimeRequest`</li><li>`grpc.SignalProcessRequest`</li><li>`grpc.StartContainerRequest`</li><li>`grpc.StatsContainerRequest`</li><li>`grpc.TtyWinResizeRequest`</li><li>`grpc.UpdateContainerRequest`</li><li>`grpc.UpdateInterfaceRequest`</li><li>`grpc.UpdateRoutesRequest`</li><li>`grpc.WaitProcessRequest`</li><li>`grpc.WriteStreamRequest`</li></ul></li><li>`sandbox_id`</li></ul> |
|
||||
| ✅ | `kata_shim_fds`: <br> Kata containerd shim v2 open FDs. | `GAUGE` | | <ul><li>`sandbox_id`</li></ul> |
|
||||
| ✅ | `kata_shim_io_stat`: <br> Kata containerd shim v2 process IO statistics. | `GAUGE` | | <ul><li>`item` (see `/proc/<pid>/io`)<ul><li>`cancelledwritebytes`</li><li>`rchar`</li><li>`readbytes`</li><li>`syscr`</li><li>`syscw`</li><li>`wchar`</li><li>`writebytes`</li></ul></li><li>`sandbox_id`</li></ul> |
|
||||
| ✅ | `kata_shim_netdev`: <br> Kata containerd shim v2 network devices statistics. | `GAUGE` | | <ul><li>`interface` (network device name)</li><li>`item` (see `/proc/net/dev`)<ul><li>`recv_bytes`</li><li>`recv_compressed`</li><li>`recv_drop`</li><li>`recv_errs`</li><li>`recv_fifo`</li><li>`recv_frame`</li><li>`recv_multicast`</li><li>`recv_packets`</li><li>`sent_bytes`</li><li>`sent_carrier`</li><li>`sent_colls`</li><li>`sent_compressed`</li><li>`sent_drop`</li><li>`sent_errs`</li><li>`sent_fifo`</li><li>`sent_packets`</li></ul></li><li>`sandbox_id`</li></ul> |
|
||||
| 🚧 | `kata_shim_pod_overhead_cpu`: <br> Kata Pod overhead for CPU resources(percent). | `GAUGE` | percent | <ul><li>`sandbox_id`</li></ul> |
|
||||
| 🚧 | `kata_shim_pod_overhead_memory_in_bytes`: <br> Kata Pod overhead for memory resources(bytes). | `GAUGE` | `bytes` | <ul><li>`sandbox_id`</li></ul> |
|
||||
| ✅ | `kata_shim_proc_stat`: <br> Kata containerd shim v2 process statistics. | `GAUGE` | | <ul><li>`item` (see `/proc/<pid>/stat`)<ul><li>`cstime`</li><li>`cutime`</li><li>`stime`</li><li>`utime`</li></ul></li><li>`sandbox_id`</li></ul> |
|
||||
| ✅ | `kata_shim_proc_status`: <br> Kata containerd shim v2 process status. | `GAUGE` | | <ul><li>`item` (see `/proc/<pid>/status`)<ul><li>`hugetlbpages`</li><li>`nonvoluntary_ctxt_switches`</li><li>`rssanon`</li><li>`rssfile`</li><li>`rssshmem`</li><li>`vmdata`</li><li>`vmexe`</li><li>`vmhwm`</li><li>`vmlck`</li><li>`vmlib`</li><li>`vmpeak`</li><li>`vmpin`</li><li>`vmpmd`</li><li>`vmpte`</li><li>`vmrss`</li><li>`vmsize`</li><li>`vmstk`</li><li>`vmswap`</li><li>`voluntary_ctxt_switches`</li></ul></li><li>`sandbox_id`</li></ul> |
|
||||
| 🚧 | `kata_shim_process_cpu_seconds_total`: <br> Total user and system CPU time spent in seconds. | `COUNTER` | `seconds` | <ul><li>`sandbox_id`</li></ul> |
|
||||
| 🚧 | `kata_shim_process_max_fds`: <br> Maximum number of open file descriptors. | `GAUGE` | | <ul><li>`sandbox_id`</li></ul> |
|
||||
| 🚧 | `kata_shim_process_open_fds`: <br> Number of open file descriptors. | `GAUGE` | | <ul><li>`sandbox_id`</li></ul> |
|
||||
| 🚧 | `kata_shim_process_resident_memory_bytes`: <br> Resident memory size in bytes. | `GAUGE` | `bytes` | <ul><li>`sandbox_id`</li></ul> |
|
||||
| 🚧 | `kata_shim_process_start_time_seconds`: <br> Start time of the process since `unix` epoch in seconds. | `GAUGE` | `seconds` | <ul><li>`sandbox_id`</li></ul> |
|
||||
| 🚧 | `kata_shim_process_virtual_memory_bytes`: <br> Virtual memory size in bytes. | `GAUGE` | `bytes` | <ul><li>`sandbox_id`</li></ul> |
|
||||
| 🚧 | `kata_shim_process_virtual_memory_max_bytes`: <br> Maximum amount of virtual memory available in bytes. | `GAUGE` | `bytes` | <ul><li>`sandbox_id`</li></ul> |
|
||||
| 🚧 | `kata_shim_rpc_durations_histogram_milliseconds`: <br> RPC latency distributions. | `HISTOGRAM` | `milliseconds` | <ul><li>`action` (Kata shim v2 actions)<ul><li>`checkpoint`</li><li>`close_io`</li><li>`connect`</li><li>`create`</li><li>`delete`</li><li>`exec`</li><li>`kill`</li><li>`pause`</li><li>`pids`</li><li>`resize_pty`</li><li>`resume`</li><li>`shutdown`</li><li>`start`</li><li>`state`</li><li>`stats`</li><li>`update`</li><li>`wait`</li></ul></li><li>`sandbox_id`</li></ul> |
|
||||
| ✅ | `kata_shim_threads`: <br> Kata containerd shim v2 process threads. | `GAUGE` | | <ul><li>`sandbox_id`</li></ul> |
|
||||
|
||||
### Kata Hypervisor
|
||||
|
||||
Different from golang runtime, hypervisor and shim in runtime-rs belong to the **same process**, so all previous metrics for hypervisor and shim only need to be gathered once. Thus, we currently only collect previous metrics in kata shim.
|
||||
|
||||
At the same time, we added the interface(`VmmAction::GetHypervisorMetrics`) to gather hypervisor metrics, in case we design tailor-made metrics for hypervisor in the future. Here're metrics exposed from [src/dragonball/src/metric.rs](https://github.com/kata-containers/kata-containers/blob/main/src/dragonball/src/metric.rs).
|
||||
|
||||
| Metric name | Type | Units | Labels |
|
||||
| ------------------------------------------------------------ | ---------- | ----- | ------------------------------------------------------------ |
|
||||
| `kata_hypervisor_scrape_count`: <br> Metrics scrape count | `COUNTER` | | <ul><li>`sandbox_id`</li></ul> |
|
||||
| `kata_hypervisor_vcpu`: <br>Hypervisor metrics specific to VCPUs' mode of functioning. | `IntGauge` | | <ul><li>`item`<ul><li>`exit_io_in`</li><li>`exit_io_out`</li><li>`exit_mmio_read`</li><li>`exit_mmio_write`</li><li>`failures`</li><li>`filter_cpuid`</li></ul></li><li>`sandbox_id`</li></ul> |
|
||||
| `kata_hypervisor_seccomp`: <br> Hypervisor metrics for the seccomp filtering. | `IntGauge` | | <ul><li>`item`<ul><li>`num_faults`</li></ul></li><li>`sandbox_id`</li></ul> |
|
||||
| `kata_hypervisor_seccomp`: <br> Hypervisor metrics for the seccomp filtering. | `IntGauge` | | <ul><li>`item`<ul><li>`sigbus`</li><li>`sigsegv`</li></ul></li><li>`sandbox_id`</li></ul> |
|
||||
@@ -27,6 +27,8 @@ There are several kinds of Kata configurations and they are listed below.
|
||||
| `io.katacontainers.config.runtime.internetworking_model` | string| determines how the VM should be connected to the container network interface. Valid values are `macvtap`, `tcfilter` and `none` |
|
||||
| `io.katacontainers.config.runtime.sandbox_cgroup_only`| `boolean` | determines if Kata processes are managed only in sandbox cgroup |
|
||||
| `io.katacontainers.config.runtime.enable_pprof` | `boolean` | enables Golang `pprof` for `containerd-shim-kata-v2` process |
|
||||
| `io.katacontainers.config.runtime.image_request_timeout` | `uint64` | the timeout for pulling an image within the guest in `seconds`, default is `60` |
|
||||
| `io.katacontainers.config.runtime.sealed_secret_enabled` | `boolean` | enables the sealed secret feature, default is `false` |
|
||||
|
||||
## Agent Options
|
||||
| Key | Value Type | Comments |
|
||||
|
||||
@@ -139,12 +139,12 @@ By default the CNI plugin binaries is installed under `/opt/cni/bin` (in package
|
||||
EOF
|
||||
```
|
||||
|
||||
## Allow pods to run in the master node
|
||||
## Allow pods to run in the control-plane node
|
||||
|
||||
By default, the cluster will not schedule pods in the master node. To enable master node scheduling:
|
||||
By default, the cluster will not schedule pods in the control-plane node. To enable control-plane node scheduling:
|
||||
|
||||
```bash
|
||||
$ sudo -E kubectl taint nodes --all node-role.kubernetes.io/master-
|
||||
$ sudo -E kubectl taint nodes --all node-role.kubernetes.io/control-plane-
|
||||
```
|
||||
|
||||
## Create runtime class for Kata Containers
|
||||
|
||||
@@ -19,12 +19,14 @@ This document requires the presence of Kata Containers on your system. Install u
|
||||
|
||||
## Install AWS Firecracker
|
||||
|
||||
Kata Containers only support AWS Firecracker v0.23.4 ([yet](https://github.com/kata-containers/kata-containers/pull/1519)).
|
||||
For information about the supported version of Firecracker, see the Kata Containers
|
||||
[`versions.yaml`](../../versions.yaml).
|
||||
|
||||
To install Firecracker we need to get the `firecracker` and `jailer` binaries:
|
||||
|
||||
```bash
|
||||
$ release_url="https://github.com/firecracker-microvm/firecracker/releases"
|
||||
$ version="v0.23.1"
|
||||
$ version=$(yq read <kata-repository>/versions.yaml assets.hypervisor.firecracker.version)
|
||||
$ arch=`uname -m`
|
||||
$ curl ${release_url}/download/${version}/firecracker-${version}-${arch} -o firecracker
|
||||
$ curl ${release_url}/download/${version}/jailer-${version}-${arch} -o jailer
|
||||
|
||||
@@ -115,11 +115,11 @@ $ sudo kubeadm init --ignore-preflight-errors=all --config kubeadm-config.yaml
|
||||
$ export KUBECONFIG=/etc/kubernetes/admin.conf
|
||||
```
|
||||
|
||||
### Allow pods to run in the master node
|
||||
### Allow pods to run in the control-plane node
|
||||
|
||||
By default, the cluster will not schedule pods in the master node. To enable master node scheduling:
|
||||
By default, the cluster will not schedule pods in the control-plane node. To enable control-plane node scheduling:
|
||||
```bash
|
||||
$ sudo -E kubectl taint nodes --all node-role.kubernetes.io/master-
|
||||
$ sudo -E kubectl taint nodes --all node-role.kubernetes.io/control-plane-
|
||||
```
|
||||
|
||||
### Create runtime class for Kata Containers
|
||||
|
||||
@@ -91,7 +91,7 @@ Before you install Kata Containers, check that your Minikube is operating. On yo
|
||||
$ kubectl get nodes
|
||||
```
|
||||
|
||||
You should see your `master` node listed as being `Ready`.
|
||||
You should see your `control-plane` node listed as being `Ready`.
|
||||
|
||||
Check you have virtualization enabled inside your Minikube. The following should return
|
||||
a number larger than `0` if you have either of the `vmx` or `svm` nested virtualization features
|
||||
|
||||
628
src/agent/Cargo.lock
generated
628
src/agent/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -23,7 +23,9 @@ regex = "1.5.6"
|
||||
serial_test = "0.5.1"
|
||||
kata-sys-util = { path = "../libs/kata-sys-util" }
|
||||
kata-types = { path = "../libs/kata-types" }
|
||||
const_format = "0.2.30"
|
||||
url = "2.2.2"
|
||||
derivative = "2.2.0"
|
||||
|
||||
# Async helpers
|
||||
async-trait = "0.1.42"
|
||||
@@ -34,7 +36,7 @@ futures = "0.3.28"
|
||||
tokio = { version = "1.28.1", features = ["full"] }
|
||||
tokio-vsock = "0.3.1"
|
||||
|
||||
netlink-sys = { version = "0.7.0", features = ["tokio_socket",]}
|
||||
netlink-sys = { version = "0.7.0", features = ["tokio_socket"] }
|
||||
rtnetlink = "0.8.0"
|
||||
netlink-packet-utils = "0.4.1"
|
||||
ipnetwork = "0.17.0"
|
||||
@@ -44,6 +46,7 @@ ipnetwork = "0.17.0"
|
||||
logging = { path = "../libs/logging" }
|
||||
slog = "2.5.2"
|
||||
slog-scope = "4.1.2"
|
||||
slog-term = "2.9.0"
|
||||
|
||||
# Redirect ttrpc log calls
|
||||
slog-stdlog = "4.0.0"
|
||||
@@ -59,7 +62,7 @@ cgroups = { package = "cgroups-rs", version = "0.3.2" }
|
||||
tracing = "0.1.26"
|
||||
tracing-subscriber = "0.2.18"
|
||||
tracing-opentelemetry = "0.13.0"
|
||||
opentelemetry = { version = "0.14.0", features = ["rt-tokio-current-thread"]}
|
||||
opentelemetry = { version = "0.14.0", features = ["rt-tokio-current-thread"] }
|
||||
vsock-exporter = { path = "vsock-exporter" }
|
||||
|
||||
# Configuration
|
||||
@@ -71,7 +74,11 @@ clap = { version = "3.0.1", features = ["derive"] }
|
||||
openssl = { version = "0.10.38", features = ["vendored"] }
|
||||
|
||||
# Image pull/decrypt
|
||||
image-rs = { git = "https://github.com/confidential-containers/guest-components", tag = "v0.7.0", default-features = false, features = ["kata-cc-native-tls"] }
|
||||
image-rs = { git = "https://github.com/confidential-containers/guest-components", tag = "v0.8.0", default-features = false, features = [
|
||||
"kata-cc-native-tls",
|
||||
"verity",
|
||||
"signature-simple-xrss",
|
||||
] }
|
||||
|
||||
[patch.crates-io]
|
||||
oci-distribution = { git = "https://github.com/krustlet/oci-distribution.git", rev = "f44124c" }
|
||||
@@ -83,15 +90,15 @@ which = "4.3.0"
|
||||
|
||||
[workspace]
|
||||
resolver = "2"
|
||||
members = [
|
||||
"rustjail",
|
||||
]
|
||||
members = ["rustjail"]
|
||||
|
||||
[profile.release]
|
||||
lto = true
|
||||
|
||||
[features]
|
||||
confidential-data-hub = []
|
||||
seccomp = ["rustjail/seccomp"]
|
||||
sealed-secret = ["protocols/sealed-secret", "confidential-data-hub"]
|
||||
standard-oci-runtime = ["rustjail/standard-oci-runtime"]
|
||||
|
||||
[[bin]]
|
||||
|
||||
@@ -26,13 +26,20 @@ export VERSION_COMMIT := $(if $(COMMIT),$(VERSION)-$(COMMIT),$(VERSION))
|
||||
EXTRA_RUSTFEATURES :=
|
||||
|
||||
##VAR SECCOMP=yes|no define if agent enables seccomp feature
|
||||
SECCOMP := yes
|
||||
SECCOMP ?= yes
|
||||
|
||||
# Enable seccomp feature of rust build
|
||||
ifeq ($(SECCOMP),yes)
|
||||
override EXTRA_RUSTFEATURES += seccomp
|
||||
endif
|
||||
|
||||
SEALED_SECRET ?= no
|
||||
|
||||
# Enable sealed-secret feature of rust build
|
||||
ifeq ($(SEALED_SECRET),yes)
|
||||
override EXTRA_RUSTFEATURES += sealed-secret
|
||||
endif
|
||||
|
||||
include ../../utils.mk
|
||||
|
||||
ifeq ($(ARCH), ppc64le)
|
||||
|
||||
@@ -34,7 +34,7 @@ futures = "0.3.17"
|
||||
async-trait = "0.1.31"
|
||||
inotify = "0.9.2"
|
||||
libseccomp = { version = "0.3.0", optional = true }
|
||||
zbus = "2.3.0"
|
||||
zbus = "3.12.0"
|
||||
bit-vec= "0.6.3"
|
||||
xattr = "0.2.3"
|
||||
|
||||
|
||||
@@ -6,7 +6,10 @@
|
||||
pub const DEFAULT_SLICE: &str = "system.slice";
|
||||
pub const SLICE_SUFFIX: &str = ".slice";
|
||||
pub const SCOPE_SUFFIX: &str = ".scope";
|
||||
pub const UNIT_MODE: &str = "replace";
|
||||
pub const WHO_ENUM_ALL: &str = "all";
|
||||
pub const SIGNAL_KILL: i32 = nix::sys::signal::SIGKILL as i32;
|
||||
pub const UNIT_MODE_REPLACE: &str = "replace";
|
||||
pub const NO_SUCH_UNIT_ERROR: &str = "org.freedesktop.systemd1.NoSuchUnit";
|
||||
|
||||
pub type Properties<'a> = Vec<(&'a str, zbus::zvariant::Value<'a>)>;
|
||||
|
||||
|
||||
@@ -1,56 +1,50 @@
|
||||
// Copyright 2021-2022 Kata Contributors
|
||||
// Copyright 2021-2023 Kata Contributors
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use std::vec;
|
||||
|
||||
use super::common::CgroupHierarchy;
|
||||
use super::common::{Properties, SLICE_SUFFIX, UNIT_MODE};
|
||||
use super::common::{
|
||||
CgroupHierarchy, Properties, NO_SUCH_UNIT_ERROR, SIGNAL_KILL, SLICE_SUFFIX, UNIT_MODE_REPLACE,
|
||||
WHO_ENUM_ALL,
|
||||
};
|
||||
use super::interface::system::ManagerProxyBlocking as SystemManager;
|
||||
use anyhow::{Context, Result};
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use zbus::zvariant::Value;
|
||||
|
||||
pub trait SystemdInterface {
|
||||
fn start_unit(
|
||||
&self,
|
||||
pid: i32,
|
||||
parent: &str,
|
||||
unit_name: &str,
|
||||
cg_hierarchy: &CgroupHierarchy,
|
||||
) -> Result<()>;
|
||||
|
||||
fn set_properties(&self, unit_name: &str, properties: &Properties) -> Result<()>;
|
||||
|
||||
fn stop_unit(&self, unit_name: &str) -> Result<()>;
|
||||
|
||||
fn start_unit(&self, pid: i32, parent: &str, cg_hierarchy: &CgroupHierarchy) -> Result<()>;
|
||||
fn set_properties(&self, properties: &Properties) -> Result<()>;
|
||||
fn kill_unit(&self) -> Result<()>;
|
||||
fn freeze_unit(&self) -> Result<()>;
|
||||
fn thaw_unit(&self) -> Result<()>;
|
||||
fn add_process(&self, pid: i32) -> Result<()>;
|
||||
fn get_version(&self) -> Result<String>;
|
||||
|
||||
fn unit_exists(&self, unit_name: &str) -> Result<bool>;
|
||||
|
||||
fn add_process(&self, pid: i32, unit_name: &str) -> Result<()>;
|
||||
fn unit_exists(&self) -> Result<bool>;
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub struct DBusClient {}
|
||||
pub struct DBusClient {
|
||||
unit_name: String,
|
||||
}
|
||||
|
||||
impl DBusClient {
|
||||
pub fn new(unit_name: String) -> Self {
|
||||
Self { unit_name }
|
||||
}
|
||||
|
||||
fn build_proxy(&self) -> Result<SystemManager<'static>> {
|
||||
let connection =
|
||||
zbus::blocking::Connection::system().context("Establishing a D-Bus connection")?;
|
||||
let proxy = SystemManager::new(&connection).context("Building a D-Bus proxy manager")?;
|
||||
|
||||
Ok(proxy)
|
||||
}
|
||||
}
|
||||
|
||||
impl SystemdInterface for DBusClient {
|
||||
fn start_unit(
|
||||
&self,
|
||||
pid: i32,
|
||||
parent: &str,
|
||||
unit_name: &str,
|
||||
cg_hierarchy: &CgroupHierarchy,
|
||||
) -> Result<()> {
|
||||
fn start_unit(&self, pid: i32, parent: &str, cg_hierarchy: &CgroupHierarchy) -> Result<()> {
|
||||
let proxy = self.build_proxy()?;
|
||||
|
||||
// enable CPUAccounting & MemoryAccounting & (Block)IOAccounting by default
|
||||
@@ -68,7 +62,7 @@ impl SystemdInterface for DBusClient {
|
||||
CgroupHierarchy::Unified => properties.push(("BlockIOAccounting", Value::Bool(true))),
|
||||
}
|
||||
|
||||
if unit_name.ends_with(SLICE_SUFFIX) {
|
||||
if self.unit_name.ends_with(SLICE_SUFFIX) {
|
||||
properties.push(("Wants", Value::Str(parent.into())));
|
||||
} else {
|
||||
properties.push(("Slice", Value::Str(parent.into())));
|
||||
@@ -76,27 +70,57 @@ impl SystemdInterface for DBusClient {
|
||||
}
|
||||
|
||||
proxy
|
||||
.start_transient_unit(unit_name, UNIT_MODE, &properties, &[])
|
||||
.with_context(|| format!("failed to start transient unit {}", unit_name))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn set_properties(&self, unit_name: &str, properties: &Properties) -> Result<()> {
|
||||
let proxy = self.build_proxy()?;
|
||||
|
||||
proxy
|
||||
.set_unit_properties(unit_name, true, properties)
|
||||
.with_context(|| format!("failed to set unit properties {}", unit_name))?;
|
||||
.start_transient_unit(&self.unit_name, UNIT_MODE_REPLACE, &properties, &[])
|
||||
.context(format!("failed to start transient unit {}", self.unit_name))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn stop_unit(&self, unit_name: &str) -> Result<()> {
|
||||
fn set_properties(&self, properties: &Properties) -> Result<()> {
|
||||
let proxy = self.build_proxy()?;
|
||||
|
||||
proxy
|
||||
.stop_unit(unit_name, UNIT_MODE)
|
||||
.with_context(|| format!("failed to stop unit {}", unit_name))?;
|
||||
.set_unit_properties(&self.unit_name, true, properties)
|
||||
.context(format!("failed to set unit {} properties", self.unit_name))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn kill_unit(&self) -> Result<()> {
|
||||
let proxy = self.build_proxy()?;
|
||||
|
||||
proxy
|
||||
.kill_unit(&self.unit_name, WHO_ENUM_ALL, SIGNAL_KILL)
|
||||
.or_else(|e| match e {
|
||||
zbus::Error::MethodError(error_name, _, _)
|
||||
if error_name.as_str() == NO_SUCH_UNIT_ERROR =>
|
||||
{
|
||||
Ok(())
|
||||
}
|
||||
_ => Err(e),
|
||||
})
|
||||
.context(format!("failed to kill unit {}", self.unit_name))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn freeze_unit(&self) -> Result<()> {
|
||||
let proxy = self.build_proxy()?;
|
||||
|
||||
proxy
|
||||
.freeze_unit(&self.unit_name)
|
||||
.context(format!("failed to freeze unit {}", self.unit_name))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn thaw_unit(&self) -> Result<()> {
|
||||
let proxy = self.build_proxy()?;
|
||||
|
||||
proxy
|
||||
.thaw_unit(&self.unit_name)
|
||||
.context(format!("failed to thaw unit {}", self.unit_name))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -105,24 +129,37 @@ impl SystemdInterface for DBusClient {
|
||||
|
||||
let systemd_version = proxy
|
||||
.version()
|
||||
.with_context(|| "failed to get systemd version".to_string())?;
|
||||
.context("failed to get systemd version".to_string())?;
|
||||
|
||||
Ok(systemd_version)
|
||||
}
|
||||
|
||||
fn unit_exists(&self, unit_name: &str) -> Result<bool> {
|
||||
let proxy = self
|
||||
.build_proxy()
|
||||
.with_context(|| format!("Checking if systemd unit {} exists", unit_name))?;
|
||||
fn unit_exists(&self) -> Result<bool> {
|
||||
let proxy = self.build_proxy()?;
|
||||
|
||||
Ok(proxy.get_unit(unit_name).is_ok())
|
||||
match proxy.get_unit(&self.unit_name) {
|
||||
Ok(_) => Ok(true),
|
||||
Err(zbus::Error::MethodError(error_name, _, _))
|
||||
if error_name.as_str() == NO_SUCH_UNIT_ERROR =>
|
||||
{
|
||||
Ok(false)
|
||||
}
|
||||
Err(e) => Err(anyhow!(format!(
|
||||
"failed to check if unit {} exists: {:?}",
|
||||
self.unit_name, e
|
||||
))),
|
||||
}
|
||||
}
|
||||
|
||||
fn add_process(&self, pid: i32, unit_name: &str) -> Result<()> {
|
||||
fn add_process(&self, pid: i32) -> Result<()> {
|
||||
let proxy = self.build_proxy()?;
|
||||
|
||||
proxy
|
||||
.attach_processes_to_unit(unit_name, "/", &[pid as u32])
|
||||
.with_context(|| format!("failed to add process {}", unit_name))?;
|
||||
.attach_processes_to_unit(&self.unit_name, "/", &[pid as u32])
|
||||
.context(format!(
|
||||
"failed to add process into unit {}",
|
||||
self.unit_name
|
||||
))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright 2021-2022 Kata Contributors
|
||||
// Copyright 2021-2023 Kata Contributors
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
@@ -8,7 +8,7 @@
|
||||
|
||||
//! # DBus interface proxy for: `org.freedesktop.systemd1.Manager`
|
||||
//!
|
||||
//! This code was generated by `zbus-xmlgen` `2.0.1` from DBus introspection data.
|
||||
//! This code was generated by `zbus-xmlgen` `3.1.1` from DBus introspection data.
|
||||
//! Source: `Interface '/org/freedesktop/systemd1' from service 'org.freedesktop.systemd1' on system bus`.
|
||||
//!
|
||||
//! You may prefer to adapt it, instead of using it verbatim.
|
||||
@@ -189,12 +189,14 @@ trait Manager {
|
||||
) -> zbus::Result<zbus::zvariant::OwnedObjectPath>;
|
||||
|
||||
/// GetUnitByInvocationID method
|
||||
#[dbus_proxy(name = "GetUnitByInvocationID")]
|
||||
fn get_unit_by_invocation_id(
|
||||
&self,
|
||||
invocation_id: &[u8],
|
||||
) -> zbus::Result<zbus::zvariant::OwnedObjectPath>;
|
||||
|
||||
/// GetUnitByPID method
|
||||
#[dbus_proxy(name = "GetUnitByPID")]
|
||||
fn get_unit_by_pid(&self, pid: u32) -> zbus::Result<zbus::zvariant::OwnedObjectPath>;
|
||||
|
||||
/// GetUnitFileLinks method
|
||||
@@ -210,6 +212,7 @@ trait Manager {
|
||||
fn halt(&self) -> zbus::Result<()>;
|
||||
|
||||
/// KExec method
|
||||
#[dbus_proxy(name = "KExec")]
|
||||
fn kexec(&self) -> zbus::Result<()>;
|
||||
|
||||
/// KillUnit method
|
||||
@@ -330,6 +333,7 @@ trait Manager {
|
||||
fn lookup_dynamic_user_by_name(&self, name: &str) -> zbus::Result<u32>;
|
||||
|
||||
/// LookupDynamicUserByUID method
|
||||
#[dbus_proxy(name = "LookupDynamicUserByUID")]
|
||||
fn lookup_dynamic_user_by_uid(&self, uid: u32) -> zbus::Result<String>;
|
||||
|
||||
/// MaskUnitFiles method
|
||||
@@ -571,139 +575,139 @@ trait Manager {
|
||||
fn ctrl_alt_del_burst_action(&self) -> zbus::Result<String>;
|
||||
|
||||
/// DefaultBlockIOAccounting property
|
||||
#[dbus_proxy(property)]
|
||||
#[dbus_proxy(property, name = "DefaultBlockIOAccounting")]
|
||||
fn default_block_ioaccounting(&self) -> zbus::Result<bool>;
|
||||
|
||||
/// DefaultCPUAccounting property
|
||||
#[dbus_proxy(property)]
|
||||
#[dbus_proxy(property, name = "DefaultCPUAccounting")]
|
||||
fn default_cpuaccounting(&self) -> zbus::Result<bool>;
|
||||
|
||||
/// DefaultLimitAS property
|
||||
#[dbus_proxy(property)]
|
||||
#[dbus_proxy(property, name = "DefaultLimitAS")]
|
||||
fn default_limit_as(&self) -> zbus::Result<u64>;
|
||||
|
||||
/// DefaultLimitASSoft property
|
||||
#[dbus_proxy(property)]
|
||||
#[dbus_proxy(property, name = "DefaultLimitASSoft")]
|
||||
fn default_limit_assoft(&self) -> zbus::Result<u64>;
|
||||
|
||||
/// DefaultLimitCORE property
|
||||
#[dbus_proxy(property)]
|
||||
#[dbus_proxy(property, name = "DefaultLimitCORE")]
|
||||
fn default_limit_core(&self) -> zbus::Result<u64>;
|
||||
|
||||
/// DefaultLimitCORESoft property
|
||||
#[dbus_proxy(property)]
|
||||
#[dbus_proxy(property, name = "DefaultLimitCORESoft")]
|
||||
fn default_limit_coresoft(&self) -> zbus::Result<u64>;
|
||||
|
||||
/// DefaultLimitCPU property
|
||||
#[dbus_proxy(property)]
|
||||
#[dbus_proxy(property, name = "DefaultLimitCPU")]
|
||||
fn default_limit_cpu(&self) -> zbus::Result<u64>;
|
||||
|
||||
/// DefaultLimitCPUSoft property
|
||||
#[dbus_proxy(property)]
|
||||
#[dbus_proxy(property, name = "DefaultLimitCPUSoft")]
|
||||
fn default_limit_cpusoft(&self) -> zbus::Result<u64>;
|
||||
|
||||
/// DefaultLimitDATA property
|
||||
#[dbus_proxy(property)]
|
||||
#[dbus_proxy(property, name = "DefaultLimitDATA")]
|
||||
fn default_limit_data(&self) -> zbus::Result<u64>;
|
||||
|
||||
/// DefaultLimitDATASoft property
|
||||
#[dbus_proxy(property)]
|
||||
#[dbus_proxy(property, name = "DefaultLimitDATASoft")]
|
||||
fn default_limit_datasoft(&self) -> zbus::Result<u64>;
|
||||
|
||||
/// DefaultLimitFSIZE property
|
||||
#[dbus_proxy(property)]
|
||||
#[dbus_proxy(property, name = "DefaultLimitFSIZE")]
|
||||
fn default_limit_fsize(&self) -> zbus::Result<u64>;
|
||||
|
||||
/// DefaultLimitFSIZESoft property
|
||||
#[dbus_proxy(property)]
|
||||
#[dbus_proxy(property, name = "DefaultLimitFSIZESoft")]
|
||||
fn default_limit_fsizesoft(&self) -> zbus::Result<u64>;
|
||||
|
||||
/// DefaultLimitLOCKS property
|
||||
#[dbus_proxy(property)]
|
||||
#[dbus_proxy(property, name = "DefaultLimitLOCKS")]
|
||||
fn default_limit_locks(&self) -> zbus::Result<u64>;
|
||||
|
||||
/// DefaultLimitLOCKSSoft property
|
||||
#[dbus_proxy(property)]
|
||||
#[dbus_proxy(property, name = "DefaultLimitLOCKSSoft")]
|
||||
fn default_limit_lockssoft(&self) -> zbus::Result<u64>;
|
||||
|
||||
/// DefaultLimitMEMLOCK property
|
||||
#[dbus_proxy(property)]
|
||||
#[dbus_proxy(property, name = "DefaultLimitMEMLOCK")]
|
||||
fn default_limit_memlock(&self) -> zbus::Result<u64>;
|
||||
|
||||
/// DefaultLimitMEMLOCKSoft property
|
||||
#[dbus_proxy(property)]
|
||||
#[dbus_proxy(property, name = "DefaultLimitMEMLOCKSoft")]
|
||||
fn default_limit_memlocksoft(&self) -> zbus::Result<u64>;
|
||||
|
||||
/// DefaultLimitMSGQUEUE property
|
||||
#[dbus_proxy(property)]
|
||||
#[dbus_proxy(property, name = "DefaultLimitMSGQUEUE")]
|
||||
fn default_limit_msgqueue(&self) -> zbus::Result<u64>;
|
||||
|
||||
/// DefaultLimitMSGQUEUESoft property
|
||||
#[dbus_proxy(property)]
|
||||
#[dbus_proxy(property, name = "DefaultLimitMSGQUEUESoft")]
|
||||
fn default_limit_msgqueuesoft(&self) -> zbus::Result<u64>;
|
||||
|
||||
/// DefaultLimitNICE property
|
||||
#[dbus_proxy(property)]
|
||||
#[dbus_proxy(property, name = "DefaultLimitNICE")]
|
||||
fn default_limit_nice(&self) -> zbus::Result<u64>;
|
||||
|
||||
/// DefaultLimitNICESoft property
|
||||
#[dbus_proxy(property)]
|
||||
#[dbus_proxy(property, name = "DefaultLimitNICESoft")]
|
||||
fn default_limit_nicesoft(&self) -> zbus::Result<u64>;
|
||||
|
||||
/// DefaultLimitNOFILE property
|
||||
#[dbus_proxy(property)]
|
||||
#[dbus_proxy(property, name = "DefaultLimitNOFILE")]
|
||||
fn default_limit_nofile(&self) -> zbus::Result<u64>;
|
||||
|
||||
/// DefaultLimitNOFILESoft property
|
||||
#[dbus_proxy(property)]
|
||||
#[dbus_proxy(property, name = "DefaultLimitNOFILESoft")]
|
||||
fn default_limit_nofilesoft(&self) -> zbus::Result<u64>;
|
||||
|
||||
/// DefaultLimitNPROC property
|
||||
#[dbus_proxy(property)]
|
||||
#[dbus_proxy(property, name = "DefaultLimitNPROC")]
|
||||
fn default_limit_nproc(&self) -> zbus::Result<u64>;
|
||||
|
||||
/// DefaultLimitNPROCSoft property
|
||||
#[dbus_proxy(property)]
|
||||
#[dbus_proxy(property, name = "DefaultLimitNPROCSoft")]
|
||||
fn default_limit_nprocsoft(&self) -> zbus::Result<u64>;
|
||||
|
||||
/// DefaultLimitRSS property
|
||||
#[dbus_proxy(property)]
|
||||
#[dbus_proxy(property, name = "DefaultLimitRSS")]
|
||||
fn default_limit_rss(&self) -> zbus::Result<u64>;
|
||||
|
||||
/// DefaultLimitRSSSoft property
|
||||
#[dbus_proxy(property)]
|
||||
#[dbus_proxy(property, name = "DefaultLimitRSSSoft")]
|
||||
fn default_limit_rsssoft(&self) -> zbus::Result<u64>;
|
||||
|
||||
/// DefaultLimitRTPRIO property
|
||||
#[dbus_proxy(property)]
|
||||
#[dbus_proxy(property, name = "DefaultLimitRTPRIO")]
|
||||
fn default_limit_rtprio(&self) -> zbus::Result<u64>;
|
||||
|
||||
/// DefaultLimitRTPRIOSoft property
|
||||
#[dbus_proxy(property)]
|
||||
#[dbus_proxy(property, name = "DefaultLimitRTPRIOSoft")]
|
||||
fn default_limit_rtpriosoft(&self) -> zbus::Result<u64>;
|
||||
|
||||
/// DefaultLimitRTTIME property
|
||||
#[dbus_proxy(property)]
|
||||
#[dbus_proxy(property, name = "DefaultLimitRTTIME")]
|
||||
fn default_limit_rttime(&self) -> zbus::Result<u64>;
|
||||
|
||||
/// DefaultLimitRTTIMESoft property
|
||||
#[dbus_proxy(property)]
|
||||
#[dbus_proxy(property, name = "DefaultLimitRTTIMESoft")]
|
||||
fn default_limit_rttimesoft(&self) -> zbus::Result<u64>;
|
||||
|
||||
/// DefaultLimitSIGPENDING property
|
||||
#[dbus_proxy(property)]
|
||||
#[dbus_proxy(property, name = "DefaultLimitSIGPENDING")]
|
||||
fn default_limit_sigpending(&self) -> zbus::Result<u64>;
|
||||
|
||||
/// DefaultLimitSIGPENDINGSoft property
|
||||
#[dbus_proxy(property)]
|
||||
#[dbus_proxy(property, name = "DefaultLimitSIGPENDINGSoft")]
|
||||
fn default_limit_sigpendingsoft(&self) -> zbus::Result<u64>;
|
||||
|
||||
/// DefaultLimitSTACK property
|
||||
#[dbus_proxy(property)]
|
||||
#[dbus_proxy(property, name = "DefaultLimitSTACK")]
|
||||
fn default_limit_stack(&self) -> zbus::Result<u64>;
|
||||
|
||||
/// DefaultLimitSTACKSoft property
|
||||
#[dbus_proxy(property)]
|
||||
#[dbus_proxy(property, name = "DefaultLimitSTACKSoft")]
|
||||
fn default_limit_stacksoft(&self) -> zbus::Result<u64>;
|
||||
|
||||
/// DefaultMemoryAccounting property
|
||||
@@ -711,11 +715,11 @@ trait Manager {
|
||||
fn default_memory_accounting(&self) -> zbus::Result<bool>;
|
||||
|
||||
/// DefaultOOMPolicy property
|
||||
#[dbus_proxy(property)]
|
||||
#[dbus_proxy(property, name = "DefaultOOMPolicy")]
|
||||
fn default_oompolicy(&self) -> zbus::Result<String>;
|
||||
|
||||
/// DefaultRestartUSec property
|
||||
#[dbus_proxy(property)]
|
||||
#[dbus_proxy(property, name = "DefaultRestartUSec")]
|
||||
fn default_restart_usec(&self) -> zbus::Result<u64>;
|
||||
|
||||
/// DefaultStandardError property
|
||||
@@ -731,7 +735,7 @@ trait Manager {
|
||||
fn default_start_limit_burst(&self) -> zbus::Result<u32>;
|
||||
|
||||
/// DefaultStartLimitIntervalUSec property
|
||||
#[dbus_proxy(property)]
|
||||
#[dbus_proxy(property, name = "DefaultStartLimitIntervalUSec")]
|
||||
fn default_start_limit_interval_usec(&self) -> zbus::Result<u64>;
|
||||
|
||||
/// DefaultTasksAccounting property
|
||||
@@ -743,19 +747,19 @@ trait Manager {
|
||||
fn default_tasks_max(&self) -> zbus::Result<u64>;
|
||||
|
||||
/// DefaultTimeoutAbortUSec property
|
||||
#[dbus_proxy(property)]
|
||||
#[dbus_proxy(property, name = "DefaultTimeoutAbortUSec")]
|
||||
fn default_timeout_abort_usec(&self) -> zbus::Result<u64>;
|
||||
|
||||
/// DefaultTimeoutStartUSec property
|
||||
#[dbus_proxy(property)]
|
||||
#[dbus_proxy(property, name = "DefaultTimeoutStartUSec")]
|
||||
fn default_timeout_start_usec(&self) -> zbus::Result<u64>;
|
||||
|
||||
/// DefaultTimeoutStopUSec property
|
||||
#[dbus_proxy(property)]
|
||||
#[dbus_proxy(property, name = "DefaultTimeoutStopUSec")]
|
||||
fn default_timeout_stop_usec(&self) -> zbus::Result<u64>;
|
||||
|
||||
/// DefaultTimerAccuracyUSec property
|
||||
#[dbus_proxy(property)]
|
||||
#[dbus_proxy(property, name = "DefaultTimerAccuracyUSec")]
|
||||
fn default_timer_accuracy_usec(&self) -> zbus::Result<u64>;
|
||||
|
||||
/// Environment property
|
||||
@@ -803,65 +807,64 @@ trait Manager {
|
||||
fn generators_start_timestamp_monotonic(&self) -> zbus::Result<u64>;
|
||||
|
||||
/// InitRDGeneratorsFinishTimestamp property
|
||||
#[dbus_proxy(property)]
|
||||
#[dbus_proxy(property, name = "InitRDGeneratorsFinishTimestamp")]
|
||||
fn init_rdgenerators_finish_timestamp(&self) -> zbus::Result<u64>;
|
||||
|
||||
/// InitRDGeneratorsFinishTimestampMonotonic property
|
||||
#[dbus_proxy(property)]
|
||||
#[dbus_proxy(property, name = "InitRDGeneratorsFinishTimestampMonotonic")]
|
||||
fn init_rdgenerators_finish_timestamp_monotonic(&self) -> zbus::Result<u64>;
|
||||
|
||||
/// InitRDGeneratorsStartTimestamp property
|
||||
#[dbus_proxy(property)]
|
||||
#[dbus_proxy(property, name = "InitRDGeneratorsStartTimestamp")]
|
||||
fn init_rdgenerators_start_timestamp(&self) -> zbus::Result<u64>;
|
||||
|
||||
/// InitRDGeneratorsStartTimestampMonotonic property
|
||||
#[dbus_proxy(property)]
|
||||
#[dbus_proxy(property, name = "InitRDGeneratorsStartTimestampMonotonic")]
|
||||
fn init_rdgenerators_start_timestamp_monotonic(&self) -> zbus::Result<u64>;
|
||||
|
||||
/// InitRDSecurityFinishTimestamp property
|
||||
#[dbus_proxy(property)]
|
||||
#[dbus_proxy(property, name = "InitRDSecurityFinishTimestamp")]
|
||||
fn init_rdsecurity_finish_timestamp(&self) -> zbus::Result<u64>;
|
||||
|
||||
/// InitRDSecurityFinishTimestampMonotonic property
|
||||
#[dbus_proxy(property)]
|
||||
#[dbus_proxy(property, name = "InitRDSecurityFinishTimestampMonotonic")]
|
||||
fn init_rdsecurity_finish_timestamp_monotonic(&self) -> zbus::Result<u64>;
|
||||
|
||||
/// InitRDSecurityStartTimestamp property
|
||||
#[dbus_proxy(property)]
|
||||
#[dbus_proxy(property, name = "InitRDSecurityStartTimestamp")]
|
||||
fn init_rdsecurity_start_timestamp(&self) -> zbus::Result<u64>;
|
||||
|
||||
/// InitRDSecurityStartTimestampMonotonic property
|
||||
#[dbus_proxy(property)]
|
||||
#[dbus_proxy(property, name = "InitRDSecurityStartTimestampMonotonic")]
|
||||
fn init_rdsecurity_start_timestamp_monotonic(&self) -> zbus::Result<u64>;
|
||||
|
||||
/// InitRDTimestamp property
|
||||
#[dbus_proxy(property)]
|
||||
#[dbus_proxy(property, name = "InitRDTimestamp")]
|
||||
fn init_rdtimestamp(&self) -> zbus::Result<u64>;
|
||||
|
||||
/// InitRDTimestampMonotonic property
|
||||
#[dbus_proxy(property)]
|
||||
#[dbus_proxy(property, name = "InitRDTimestampMonotonic")]
|
||||
fn init_rdtimestamp_monotonic(&self) -> zbus::Result<u64>;
|
||||
|
||||
/// InitRDUnitsLoadFinishTimestamp property
|
||||
#[dbus_proxy(property)]
|
||||
#[dbus_proxy(property, name = "InitRDUnitsLoadFinishTimestamp")]
|
||||
fn init_rdunits_load_finish_timestamp(&self) -> zbus::Result<u64>;
|
||||
|
||||
/// InitRDUnitsLoadFinishTimestampMonotonic property
|
||||
#[dbus_proxy(property)]
|
||||
#[dbus_proxy(property, name = "InitRDUnitsLoadFinishTimestampMonotonic")]
|
||||
fn init_rdunits_load_finish_timestamp_monotonic(&self) -> zbus::Result<u64>;
|
||||
|
||||
/// InitRDUnitsLoadStartTimestamp property
|
||||
#[dbus_proxy(property)]
|
||||
#[dbus_proxy(property, name = "InitRDUnitsLoadStartTimestamp")]
|
||||
fn init_rdunits_load_start_timestamp(&self) -> zbus::Result<u64>;
|
||||
|
||||
/// InitRDUnitsLoadStartTimestampMonotonic property
|
||||
#[dbus_proxy(property)]
|
||||
#[dbus_proxy(property, name = "InitRDUnitsLoadStartTimestampMonotonic")]
|
||||
fn init_rdunits_load_start_timestamp_monotonic(&self) -> zbus::Result<u64>;
|
||||
|
||||
/// KExecWatchdogUSec property
|
||||
#[dbus_proxy(property)]
|
||||
#[dbus_proxy(property, name = "KExecWatchdogUSec")]
|
||||
fn kexec_watchdog_usec(&self) -> zbus::Result<u64>;
|
||||
#[dbus_proxy(property)]
|
||||
fn set_kexec_watchdog_usec(&self, value: u64) -> zbus::Result<()>;
|
||||
|
||||
/// KernelTimestamp property
|
||||
@@ -883,33 +886,31 @@ trait Manager {
|
||||
/// LogLevel property
|
||||
#[dbus_proxy(property)]
|
||||
fn log_level(&self) -> zbus::Result<String>;
|
||||
#[dbus_proxy(property)]
|
||||
fn set_log_level(&self, value: &str) -> zbus::Result<()>;
|
||||
|
||||
/// LogTarget property
|
||||
#[dbus_proxy(property)]
|
||||
fn log_target(&self) -> zbus::Result<String>;
|
||||
#[dbus_proxy(property)]
|
||||
fn set_log_target(&self, value: &str) -> zbus::Result<()>;
|
||||
|
||||
/// NFailedJobs property
|
||||
#[dbus_proxy(property)]
|
||||
#[dbus_proxy(property, name = "NFailedJobs")]
|
||||
fn nfailed_jobs(&self) -> zbus::Result<u32>;
|
||||
|
||||
/// NFailedUnits property
|
||||
#[dbus_proxy(property)]
|
||||
#[dbus_proxy(property, name = "NFailedUnits")]
|
||||
fn nfailed_units(&self) -> zbus::Result<u32>;
|
||||
|
||||
/// NInstalledJobs property
|
||||
#[dbus_proxy(property)]
|
||||
#[dbus_proxy(property, name = "NInstalledJobs")]
|
||||
fn ninstalled_jobs(&self) -> zbus::Result<u32>;
|
||||
|
||||
/// NJobs property
|
||||
#[dbus_proxy(property)]
|
||||
#[dbus_proxy(property, name = "NJobs")]
|
||||
fn njobs(&self) -> zbus::Result<u32>;
|
||||
|
||||
/// NNames property
|
||||
#[dbus_proxy(property)]
|
||||
#[dbus_proxy(property, name = "NNames")]
|
||||
fn nnames(&self) -> zbus::Result<u32>;
|
||||
|
||||
/// Progress property
|
||||
@@ -917,15 +918,13 @@ trait Manager {
|
||||
fn progress(&self) -> zbus::Result<f64>;
|
||||
|
||||
/// RebootWatchdogUSec property
|
||||
#[dbus_proxy(property)]
|
||||
#[dbus_proxy(property, name = "RebootWatchdogUSec")]
|
||||
fn reboot_watchdog_usec(&self) -> zbus::Result<u64>;
|
||||
#[dbus_proxy(property)]
|
||||
fn set_reboot_watchdog_usec(&self, value: u64) -> zbus::Result<()>;
|
||||
|
||||
/// RuntimeWatchdogUSec property
|
||||
#[dbus_proxy(property)]
|
||||
#[dbus_proxy(property, name = "RuntimeWatchdogUSec")]
|
||||
fn runtime_watchdog_usec(&self) -> zbus::Result<u64>;
|
||||
#[dbus_proxy(property)]
|
||||
fn set_runtime_watchdog_usec(&self, value: u64) -> zbus::Result<()>;
|
||||
|
||||
/// SecurityFinishTimestamp property
|
||||
@@ -947,7 +946,6 @@ trait Manager {
|
||||
/// ServiceWatchdogs property
|
||||
#[dbus_proxy(property)]
|
||||
fn service_watchdogs(&self) -> zbus::Result<bool>;
|
||||
#[dbus_proxy(property)]
|
||||
fn set_service_watchdogs(&self, value: bool) -> zbus::Result<()>;
|
||||
|
||||
/// ShowStatus property
|
||||
@@ -963,7 +961,7 @@ trait Manager {
|
||||
fn tainted(&self) -> zbus::Result<String>;
|
||||
|
||||
/// TimerSlackNSec property
|
||||
#[dbus_proxy(property)]
|
||||
#[dbus_proxy(property, name = "TimerSlackNSec")]
|
||||
fn timer_slack_nsec(&self) -> zbus::Result<u64>;
|
||||
|
||||
/// UnitPath property
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
|
||||
use crate::cgroups::Manager as CgroupManager;
|
||||
use crate::protocols::agent::CgroupStats;
|
||||
use anyhow::Result;
|
||||
use anyhow::{anyhow, Result};
|
||||
use cgroups::freezer::FreezerState;
|
||||
use libc::{self, pid_t};
|
||||
use oci::LinuxResources;
|
||||
@@ -29,7 +29,6 @@ pub struct Manager {
|
||||
pub mounts: HashMap<String, String>,
|
||||
pub cgroups_path: CgroupsPath,
|
||||
pub cpath: String,
|
||||
pub unit_name: String,
|
||||
// dbus client for set properties
|
||||
dbus_client: DBusClient,
|
||||
// fs manager for get properties
|
||||
@@ -40,14 +39,12 @@ pub struct Manager {
|
||||
|
||||
impl CgroupManager for Manager {
|
||||
fn apply(&self, pid: pid_t) -> Result<()> {
|
||||
let unit_name = self.unit_name.as_str();
|
||||
if self.dbus_client.unit_exists(unit_name)? {
|
||||
self.dbus_client.add_process(pid, self.unit_name.as_str())?;
|
||||
if self.dbus_client.unit_exists()? {
|
||||
self.dbus_client.add_process(pid)?;
|
||||
} else {
|
||||
self.dbus_client.start_unit(
|
||||
(pid as u32).try_into().unwrap(),
|
||||
self.cgroups_path.slice.as_str(),
|
||||
self.unit_name.as_str(),
|
||||
&self.cg_hierarchy,
|
||||
)?;
|
||||
}
|
||||
@@ -66,8 +63,7 @@ impl CgroupManager for Manager {
|
||||
Pids::apply(r, &mut properties, &self.cg_hierarchy, systemd_version_str)?;
|
||||
CpuSet::apply(r, &mut properties, &self.cg_hierarchy, systemd_version_str)?;
|
||||
|
||||
self.dbus_client
|
||||
.set_properties(self.unit_name.as_str(), &properties)?;
|
||||
self.dbus_client.set_properties(&properties)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -77,11 +73,15 @@ impl CgroupManager for Manager {
|
||||
}
|
||||
|
||||
fn freeze(&self, state: FreezerState) -> Result<()> {
|
||||
self.fs_manager.freeze(state)
|
||||
match state {
|
||||
FreezerState::Thawed => self.dbus_client.thaw_unit(),
|
||||
FreezerState::Frozen => self.dbus_client.freeze_unit(),
|
||||
_ => Err(anyhow!("Invalid FreezerState")),
|
||||
}
|
||||
}
|
||||
|
||||
fn destroy(&mut self) -> Result<()> {
|
||||
self.dbus_client.stop_unit(self.unit_name.as_str())?;
|
||||
self.dbus_client.kill_unit()?;
|
||||
self.fs_manager.destroy()
|
||||
}
|
||||
|
||||
@@ -120,8 +120,7 @@ impl Manager {
|
||||
mounts: fs_manager.mounts.clone(),
|
||||
cgroups_path,
|
||||
cpath,
|
||||
unit_name,
|
||||
dbus_client: DBusClient {},
|
||||
dbus_client: DBusClient::new(unit_name),
|
||||
fs_manager,
|
||||
cg_hierarchy: if cgroups::hierarchies::is_cgroup2_unified_mode() {
|
||||
CgroupHierarchy::Unified
|
||||
|
||||
@@ -1118,6 +1118,7 @@ mod tests {
|
||||
use std::fs::create_dir;
|
||||
use std::fs::create_dir_all;
|
||||
use std::fs::remove_dir_all;
|
||||
use std::fs::remove_file;
|
||||
use std::io;
|
||||
use std::os::unix::fs;
|
||||
use std::os::unix::io::AsRawFd;
|
||||
@@ -1333,14 +1334,9 @@ mod tests {
|
||||
fn test_mknod_dev() {
|
||||
skip_if_not_root!();
|
||||
|
||||
let tempdir = tempdir().unwrap();
|
||||
|
||||
let olddir = unistd::getcwd().unwrap();
|
||||
defer!(let _ = unistd::chdir(&olddir););
|
||||
let _ = unistd::chdir(tempdir.path());
|
||||
|
||||
let path = "/dev/fifo-test";
|
||||
let dev = oci::LinuxDevice {
|
||||
path: "/fifo".to_string(),
|
||||
path: path.to_string(),
|
||||
r#type: "c".to_string(),
|
||||
major: 0,
|
||||
minor: 0,
|
||||
@@ -1348,13 +1344,16 @@ mod tests {
|
||||
uid: Some(unistd::getuid().as_raw()),
|
||||
gid: Some(unistd::getgid().as_raw()),
|
||||
};
|
||||
let path = Path::new("fifo");
|
||||
|
||||
let ret = mknod_dev(&dev, path);
|
||||
let ret = mknod_dev(&dev, Path::new(path));
|
||||
assert!(ret.is_ok(), "Should pass. Got: {:?}", ret);
|
||||
|
||||
let ret = stat::stat(path);
|
||||
assert!(ret.is_ok(), "Should pass. Got: {:?}", ret);
|
||||
|
||||
// clear test device node
|
||||
let ret = remove_file(path);
|
||||
assert!(ret.is_ok(), "Should pass, Got: {:?}", ret);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -161,7 +161,7 @@ impl Process {
|
||||
|
||||
pub fn notify_term_close(&mut self) {
|
||||
let notify = self.term_exit_notifier.clone();
|
||||
notify.notify_one();
|
||||
notify.notify_waiters();
|
||||
}
|
||||
|
||||
pub fn close_stdin(&mut self) {
|
||||
|
||||
289
src/agent/src/cdh.rs
Normal file
289
src/agent/src/cdh.rs
Normal file
@@ -0,0 +1,289 @@
|
||||
// Copyright (c) 2023 Intel Corporation
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
// Confidential Data Hub client wrapper.
|
||||
// Confidential Data Hub is a service running inside guest to provide resource related APIs.
|
||||
// https://github.com/confidential-containers/guest-components/tree/main/confidential-data-hub
|
||||
|
||||
use anyhow::{anyhow, Result};
|
||||
use oci::{Mount, Spec};
|
||||
use protocols::{
|
||||
sealed_secret, sealed_secret_ttrpc_async, sealed_secret_ttrpc_async::SealedSecretServiceClient,
|
||||
};
|
||||
use std::fs;
|
||||
use std::os::unix::fs::symlink;
|
||||
use std::path::Path;
|
||||
const CDH_ADDR: &str = "unix:///run/confidential-containers/cdh.sock";
|
||||
const SECRETS_DIR: &str = "/run/secrets/";
|
||||
const SEALED_SECRET_TIMEOUT: i64 = 50 * 1000 * 1000 * 1000;
|
||||
|
||||
// Convenience function to obtain the scope logger.
|
||||
fn sl() -> slog::Logger {
|
||||
slog_scope::logger()
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct CDHClient {
|
||||
sealed_secret_client: Option<SealedSecretServiceClient>,
|
||||
}
|
||||
|
||||
impl CDHClient {
|
||||
pub fn new() -> Result<Self> {
|
||||
let c = ttrpc::asynchronous::Client::connect(CDH_ADDR);
|
||||
match c {
|
||||
Ok(v) => {
|
||||
let ssclient = sealed_secret_ttrpc_async::SealedSecretServiceClient::new(v);
|
||||
Ok(CDHClient {
|
||||
sealed_secret_client: Some(ssclient),
|
||||
})
|
||||
}
|
||||
Err(_) => Ok(CDHClient {
|
||||
sealed_secret_client: None,
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn unseal_secret_async(
|
||||
&self,
|
||||
sealed: &str,
|
||||
) -> Result<sealed_secret::UnsealSecretOutput> {
|
||||
let secret = sealed
|
||||
.strip_prefix("sealed.")
|
||||
.ok_or(anyhow!("strip_prefix \"sealed.\" failed"))?;
|
||||
let mut input = sealed_secret::UnsealSecretInput::new();
|
||||
input.set_secret(secret.into());
|
||||
let unseal = self
|
||||
.sealed_secret_client
|
||||
.as_ref()
|
||||
.ok_or(anyhow!("unwrap sealed_secret_client failed"))?
|
||||
.unseal_secret(ttrpc::context::with_timeout(SEALED_SECRET_TIMEOUT), &input)
|
||||
.await?;
|
||||
Ok(unseal)
|
||||
}
|
||||
|
||||
pub async fn unseal_env(&self, env: &str) -> Result<String> {
|
||||
let (key, value) = env.split_once('=').unwrap_or(("", ""));
|
||||
if value.starts_with("sealed.") {
|
||||
let unsealed_value = self.unseal_secret_async(value).await;
|
||||
match unsealed_value {
|
||||
Ok(v) => {
|
||||
let plain_env = format!("{}={}", key, std::str::from_utf8(&v.plaintext)?);
|
||||
return Ok(plain_env);
|
||||
}
|
||||
Err(e) => {
|
||||
return Err(e);
|
||||
}
|
||||
};
|
||||
}
|
||||
Ok((*env.to_owned()).to_string())
|
||||
}
|
||||
|
||||
pub async fn unseal_file(&self, sealed_source_path: &String) -> Result<()> {
|
||||
if !Path::new(sealed_source_path).exists() {
|
||||
info!(
|
||||
sl(),
|
||||
"sealed source path {:?} does not exist", sealed_source_path
|
||||
);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
for entry in fs::read_dir(sealed_source_path)? {
|
||||
let entry = entry?;
|
||||
|
||||
if !entry.file_type()?.is_symlink()
|
||||
&& !fs::metadata(entry.path())?.file_type().is_file()
|
||||
{
|
||||
info!(
|
||||
sl(),
|
||||
"skipping sealed source entry {:?} because its file type is {:?}",
|
||||
entry,
|
||||
entry.file_type()?
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
let target_path = fs::canonicalize(&entry.path())?;
|
||||
info!(sl(), "sealed source entry target path: {:?}", target_path);
|
||||
if !target_path.is_file() {
|
||||
info!(sl(), "sealed source is not a file: {:?}", target_path);
|
||||
continue;
|
||||
}
|
||||
|
||||
let secret_name = entry.file_name();
|
||||
let contents = fs::read_to_string(&target_path)?;
|
||||
if contents.starts_with("sealed.") {
|
||||
info!(sl(), "sealed source entry found: {:?}", target_path);
|
||||
let unsealed_filename = SECRETS_DIR.to_string()
|
||||
+ secret_name
|
||||
.as_os_str()
|
||||
.to_str()
|
||||
.ok_or(anyhow!("create unsealed_filename failed"))?;
|
||||
let unsealed_value = self.unseal_secret_async(&contents).await?;
|
||||
fs::write(&unsealed_filename, unsealed_value.plaintext)?;
|
||||
fs::remove_file(&entry.path())?;
|
||||
symlink(unsealed_filename, &entry.path())?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn create_sealed_secret_mounts(&self, spec: &mut Spec) -> Result<Vec<String>> {
|
||||
let mut sealed_source_path: Vec<String> = vec![];
|
||||
for m in spec.mounts.iter_mut() {
|
||||
if let Some(unsealed_mount_point) = m.destination.strip_prefix("/sealed") {
|
||||
info!(
|
||||
sl(),
|
||||
"sealed mount destination: {:?} source: {:?}", m.destination, m.source
|
||||
);
|
||||
sealed_source_path.push(m.source.clone());
|
||||
m.destination = unsealed_mount_point.to_string();
|
||||
}
|
||||
}
|
||||
|
||||
if !sealed_source_path.is_empty() {
|
||||
let sealed_mounts = Mount {
|
||||
destination: SECRETS_DIR.to_string(),
|
||||
r#type: "bind".to_string(),
|
||||
source: SECRETS_DIR.to_string(),
|
||||
options: vec!["bind".to_string()],
|
||||
};
|
||||
spec.mounts.push(sealed_mounts);
|
||||
}
|
||||
fs::create_dir_all(SECRETS_DIR)?;
|
||||
Ok(sealed_source_path)
|
||||
}
|
||||
} /* end of impl CDHClient */
|
||||
|
||||
#[cfg(test)]
|
||||
#[cfg(feature = "sealed-secret")]
|
||||
mod tests {
|
||||
use crate::cdh::CDHClient;
|
||||
use crate::cdh::CDH_ADDR;
|
||||
use crate::cdh::SECRETS_DIR;
|
||||
use anyhow::anyhow;
|
||||
use async_trait::async_trait;
|
||||
use protocols::{sealed_secret, sealed_secret_ttrpc_async};
|
||||
use std::fs;
|
||||
use std::fs::File;
|
||||
use std::io::{Read, Write};
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
use tokio::signal::unix::{signal, SignalKind};
|
||||
|
||||
struct TestService;
|
||||
|
||||
#[async_trait]
|
||||
impl sealed_secret_ttrpc_async::SealedSecretService for TestService {
|
||||
async fn unseal_secret(
|
||||
&self,
|
||||
_ctx: &::ttrpc::asynchronous::TtrpcContext,
|
||||
_req: sealed_secret::UnsealSecretInput,
|
||||
) -> ttrpc::error::Result<sealed_secret::UnsealSecretOutput> {
|
||||
let mut output = sealed_secret::UnsealSecretOutput::new();
|
||||
output.set_plaintext("unsealed".into());
|
||||
Ok(output)
|
||||
}
|
||||
}
|
||||
|
||||
fn remove_if_sock_exist(sock_addr: &str) -> std::io::Result<()> {
|
||||
let path = sock_addr
|
||||
.strip_prefix("unix://")
|
||||
.expect("socket address does not have the expected format.");
|
||||
|
||||
if std::path::Path::new(path).exists() {
|
||||
std::fs::remove_file(path)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn start_ttrpc_server() {
|
||||
tokio::spawn(async move {
|
||||
let ss = Box::new(TestService {})
|
||||
as Box<dyn sealed_secret_ttrpc_async::SealedSecretService + Send + Sync>;
|
||||
let ss = Arc::new(ss);
|
||||
let ss_service = sealed_secret_ttrpc_async::create_sealed_secret_service(ss);
|
||||
|
||||
remove_if_sock_exist(CDH_ADDR).unwrap();
|
||||
|
||||
let mut server = ttrpc::asynchronous::Server::new()
|
||||
.bind(CDH_ADDR)
|
||||
.unwrap()
|
||||
.register_service(ss_service);
|
||||
|
||||
server.start().await.unwrap();
|
||||
|
||||
let mut interrupt = signal(SignalKind::interrupt()).unwrap();
|
||||
tokio::select! {
|
||||
_ = interrupt.recv() => {
|
||||
server.shutdown().await.unwrap();
|
||||
}
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_unseal_env() {
|
||||
let rt = tokio::runtime::Runtime::new().unwrap();
|
||||
let _guard = rt.enter();
|
||||
start_ttrpc_server();
|
||||
std::thread::sleep(std::time::Duration::from_secs(2));
|
||||
|
||||
let cc = Some(CDHClient::new().unwrap());
|
||||
let cdh_client = cc
|
||||
.as_ref()
|
||||
.ok_or(anyhow!("get confidential-data-hub client failed"))
|
||||
.unwrap();
|
||||
let sealed_env = String::from("key=sealed.testdata");
|
||||
let unsealed_env = cdh_client.unseal_env(&sealed_env).await.unwrap();
|
||||
assert_eq!(unsealed_env, String::from("key=unsealed"));
|
||||
let normal_env = String::from("key=testdata");
|
||||
let unchanged_env = cdh_client.unseal_env(&normal_env).await.unwrap();
|
||||
assert_eq!(unchanged_env, String::from("key=testdata"));
|
||||
|
||||
rt.shutdown_background();
|
||||
std::thread::sleep(std::time::Duration::from_secs(2));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_unseal_file() {
|
||||
let rt = tokio::runtime::Runtime::new().unwrap();
|
||||
let _guard = rt.enter();
|
||||
start_ttrpc_server();
|
||||
std::thread::sleep(std::time::Duration::from_secs(2));
|
||||
|
||||
let cc = Some(CDHClient::new().unwrap());
|
||||
let cdh_client = cc
|
||||
.as_ref()
|
||||
.ok_or(anyhow!("get confidential-data-hub client failed"))
|
||||
.unwrap();
|
||||
|
||||
fs::create_dir_all(SECRETS_DIR).unwrap();
|
||||
|
||||
let sealed_filename = "passwd";
|
||||
let mut sealed_file = File::create(sealed_filename).unwrap();
|
||||
let dir = String::from(".");
|
||||
sealed_file.write_all(b"sealed.passwd").unwrap();
|
||||
cdh_client.unseal_file(&dir).await.unwrap();
|
||||
let unsealed_filename = SECRETS_DIR.to_string() + "/passwd";
|
||||
let mut unsealed_file = fs::File::open(unsealed_filename.clone()).unwrap();
|
||||
let mut contents = String::new();
|
||||
unsealed_file.read_to_string(&mut contents).unwrap();
|
||||
assert_eq!(contents, String::from("unsealed"));
|
||||
fs::remove_file(sealed_filename).unwrap();
|
||||
fs::remove_file(unsealed_filename).unwrap();
|
||||
|
||||
let normal_filename = "passwd";
|
||||
let mut normal_file = File::create(normal_filename).unwrap();
|
||||
normal_file.write_all(b"passwd").unwrap();
|
||||
cdh_client.unseal_file(&dir).await.unwrap();
|
||||
let filename = SECRETS_DIR.to_string() + "/passwd";
|
||||
assert!(!Path::new(&filename).exists());
|
||||
fs::remove_file(normal_filename).unwrap();
|
||||
|
||||
rt.shutdown_background();
|
||||
std::thread::sleep(std::time::Duration::from_secs(2));
|
||||
}
|
||||
}
|
||||
@@ -27,6 +27,7 @@ const CONTAINER_PIPE_SIZE_OPTION: &str = "agent.container_pipe_size";
|
||||
const UNIFIED_CGROUP_HIERARCHY_OPTION: &str = "agent.unified_cgroup_hierarchy";
|
||||
const CONFIG_FILE: &str = "agent.config_file";
|
||||
const AA_KBC_PARAMS: &str = "agent.aa_kbc_params";
|
||||
const REST_API_OPTION: &str = "agent.rest_api";
|
||||
const HTTPS_PROXY: &str = "agent.https_proxy";
|
||||
const NO_PROXY: &str = "agent.no_proxy";
|
||||
const ENABLE_DATA_INTEGRITY: &str = "agent.data_integrity";
|
||||
@@ -88,6 +89,7 @@ pub struct AgentConfig {
|
||||
pub supports_seccomp: bool,
|
||||
pub container_policy_path: String,
|
||||
pub aa_kbc_params: String,
|
||||
pub rest_api: String,
|
||||
pub https_proxy: String,
|
||||
pub no_proxy: String,
|
||||
pub data_integrity: bool,
|
||||
@@ -112,6 +114,7 @@ pub struct AgentConfigBuilder {
|
||||
pub endpoints: Option<EndpointsConfig>,
|
||||
pub container_policy_path: Option<String>,
|
||||
pub aa_kbc_params: Option<String>,
|
||||
pub rest_api: Option<String>,
|
||||
pub https_proxy: Option<String>,
|
||||
pub no_proxy: Option<String>,
|
||||
pub data_integrity: Option<bool>,
|
||||
@@ -182,6 +185,7 @@ impl Default for AgentConfig {
|
||||
supports_seccomp: rpc::have_seccomp(),
|
||||
container_policy_path: String::from(""),
|
||||
aa_kbc_params: String::from(""),
|
||||
rest_api: String::from(""),
|
||||
https_proxy: String::from(""),
|
||||
no_proxy: String::from(""),
|
||||
data_integrity: false,
|
||||
@@ -219,6 +223,7 @@ impl FromStr for AgentConfig {
|
||||
config_override!(agent_config_builder, agent_config, tracing);
|
||||
config_override!(agent_config_builder, agent_config, container_policy_path);
|
||||
config_override!(agent_config_builder, agent_config, aa_kbc_params);
|
||||
config_override!(agent_config_builder, agent_config, rest_api);
|
||||
config_override!(agent_config_builder, agent_config, https_proxy);
|
||||
config_override!(agent_config_builder, agent_config, no_proxy);
|
||||
config_override!(agent_config_builder, agent_config, data_integrity);
|
||||
@@ -248,6 +253,7 @@ impl FromStr for AgentConfig {
|
||||
|
||||
impl AgentConfig {
|
||||
#[instrument]
|
||||
#[allow(clippy::redundant_closure_call)]
|
||||
pub fn from_cmdline(file: &str, args: Vec<String>) -> Result<AgentConfig> {
|
||||
// If config file specified in the args, generate our config from it
|
||||
let config_position = args.iter().position(|a| a == "--config" || a == "-c");
|
||||
@@ -343,6 +349,7 @@ impl AgentConfig {
|
||||
);
|
||||
|
||||
parse_cmdline_param!(param, AA_KBC_PARAMS, config.aa_kbc_params, get_string_value);
|
||||
parse_cmdline_param!(param, REST_API_OPTION, config.rest_api, get_string_value);
|
||||
parse_cmdline_param!(param, HTTPS_PROXY, config.https_proxy, get_url_value);
|
||||
parse_cmdline_param!(param, NO_PROXY, config.no_proxy, get_string_value);
|
||||
parse_cmdline_param!(
|
||||
@@ -588,6 +595,7 @@ mod tests {
|
||||
tracing: bool,
|
||||
container_policy_path: &'a str,
|
||||
aa_kbc_params: &'a str,
|
||||
rest_api: &'a str,
|
||||
https_proxy: &'a str,
|
||||
no_proxy: &'a str,
|
||||
data_integrity: bool,
|
||||
@@ -612,6 +620,7 @@ mod tests {
|
||||
tracing: false,
|
||||
container_policy_path: "",
|
||||
aa_kbc_params: "",
|
||||
rest_api: "",
|
||||
https_proxy: "",
|
||||
no_proxy: "",
|
||||
data_integrity: false,
|
||||
@@ -998,6 +1007,21 @@ mod tests {
|
||||
aa_kbc_params: "eaa_kbc::127.0.0.1:50000",
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.rest_api=attestation",
|
||||
rest_api: "attestation",
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.rest_api=resource",
|
||||
rest_api: "resource",
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.rest_api=all",
|
||||
rest_api: "all",
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.https_proxy=http://proxy.url.com:81/",
|
||||
https_proxy: "http://proxy.url.com:81/",
|
||||
@@ -1161,6 +1185,7 @@ mod tests {
|
||||
msg
|
||||
);
|
||||
assert_eq!(d.aa_kbc_params, config.aa_kbc_params, "{}", msg);
|
||||
assert_eq!(d.rest_api, config.rest_api, "{}", msg);
|
||||
assert_eq!(d.https_proxy, config.https_proxy, "{}", msg);
|
||||
assert_eq!(d.no_proxy, config.no_proxy, "{}", msg);
|
||||
assert_eq!(d.data_integrity, config.data_integrity, "{}", msg);
|
||||
@@ -1672,7 +1697,7 @@ Caused by:
|
||||
assert_eq!(config.server_addr, "vsock://8:2048");
|
||||
assert_eq!(
|
||||
config.endpoints.allowed,
|
||||
vec!["CreateContainer".to_string(), "StartContainer".to_string()]
|
||||
["CreateContainer".to_string(), "StartContainer".to_string()]
|
||||
.iter()
|
||||
.cloned()
|
||||
.collect()
|
||||
@@ -1720,7 +1745,7 @@ Caused by:
|
||||
// Should be from agent config
|
||||
assert_eq!(
|
||||
config.endpoints.allowed,
|
||||
vec!["CreateContainer".to_string(), "StartContainer".to_string()]
|
||||
["CreateContainer".to_string(), "StartContainer".to_string()]
|
||||
.iter()
|
||||
.cloned()
|
||||
.collect()
|
||||
|
||||
@@ -35,9 +35,9 @@ const VM_ROOTFS: &str = "/";
|
||||
const BLOCK: &str = "block";
|
||||
pub const DRIVER_9P_TYPE: &str = "9p";
|
||||
pub const DRIVER_VIRTIOFS_TYPE: &str = "virtio-fs";
|
||||
pub const DRIVER_BLK_TYPE: &str = "blk";
|
||||
pub const DRIVER_BLK_PCI_TYPE: &str = "blk";
|
||||
pub const DRIVER_BLK_CCW_TYPE: &str = "blk-ccw";
|
||||
pub const DRIVER_MMIO_BLK_TYPE: &str = "mmioblk";
|
||||
pub const DRIVER_BLK_MMIO_TYPE: &str = "mmioblk";
|
||||
pub const DRIVER_SCSI_TYPE: &str = "scsi";
|
||||
pub const DRIVER_NVDIMM_TYPE: &str = "nvdimm";
|
||||
pub const DRIVER_EPHEMERAL_TYPE: &str = "ephemeral";
|
||||
@@ -937,9 +937,9 @@ async fn add_device(device: &Device, sandbox: &Arc<Mutex<Sandbox>>) -> Result<Sp
|
||||
}
|
||||
|
||||
match device.type_.as_str() {
|
||||
DRIVER_BLK_TYPE => virtio_blk_device_handler(device, sandbox).await,
|
||||
DRIVER_BLK_PCI_TYPE => virtio_blk_device_handler(device, sandbox).await,
|
||||
DRIVER_BLK_CCW_TYPE => virtio_blk_ccw_device_handler(device, sandbox).await,
|
||||
DRIVER_MMIO_BLK_TYPE => virtiommio_blk_device_handler(device, sandbox).await,
|
||||
DRIVER_BLK_MMIO_TYPE => virtiommio_blk_device_handler(device, sandbox).await,
|
||||
DRIVER_NVDIMM_TYPE => virtio_nvdimm_device_handler(device, sandbox).await,
|
||||
DRIVER_SCSI_TYPE => virtio_scsi_device_handler(device, sandbox).await,
|
||||
DRIVER_VFIO_PCI_GK_TYPE | DRIVER_VFIO_PCI_TYPE => {
|
||||
@@ -1469,6 +1469,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[allow(clippy::redundant_clone)]
|
||||
async fn test_virtio_blk_matcher() {
|
||||
let root_bus = create_pci_root_bus_path();
|
||||
let devname = "vda";
|
||||
@@ -1553,6 +1554,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[allow(clippy::redundant_clone)]
|
||||
async fn test_scsi_block_matcher() {
|
||||
let root_bus = create_pci_root_bus_path();
|
||||
let devname = "sda";
|
||||
@@ -1583,6 +1585,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[allow(clippy::redundant_clone)]
|
||||
async fn test_vfio_matcher() {
|
||||
let grpa = IommuGroup(1);
|
||||
let grpb = IommuGroup(22);
|
||||
@@ -1604,6 +1607,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[allow(clippy::redundant_clone)]
|
||||
async fn test_mmio_block_matcher() {
|
||||
let devname_a = "vda";
|
||||
let devname_b = "vdb";
|
||||
|
||||
@@ -5,88 +5,90 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::env;
|
||||
use std::fs;
|
||||
use std::path::Path;
|
||||
use std::process::Command;
|
||||
use std::sync::atomic::{AtomicBool, AtomicU16, Ordering};
|
||||
use std::sync::atomic::{AtomicU16, Ordering};
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::{anyhow, Result};
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use async_trait::async_trait;
|
||||
use image_rs::image::ImageClient;
|
||||
use protocols::image;
|
||||
use tokio::sync::Mutex;
|
||||
use ttrpc::{self, error::get_rpc_status as ttrpc_error};
|
||||
|
||||
use crate::rpc::{verify_cid, CONTAINER_BASE};
|
||||
use crate::sandbox::Sandbox;
|
||||
use crate::AGENT_CONFIG;
|
||||
|
||||
use image_rs::image::ImageClient;
|
||||
use std::io::Write;
|
||||
// A marker to merge container spec for images pulled inside guest.
|
||||
const ANNO_K8S_IMAGE_NAME: &str = "io.kubernetes.cri.image-name";
|
||||
|
||||
const AA_PATH: &str = "/usr/local/bin/attestation-agent";
|
||||
|
||||
const AA_KEYPROVIDER_URI: &str =
|
||||
"unix:///run/confidential-containers/attestation-agent/keyprovider.sock";
|
||||
const AA_GETRESOURCE_URI: &str =
|
||||
"unix:///run/confidential-containers/attestation-agent/getresource.sock";
|
||||
|
||||
const OCICRYPT_CONFIG_PATH: &str = "/tmp/ocicrypt_config.json";
|
||||
// kata rootfs is readonly, use tmpfs before CC storage is implemented.
|
||||
const KATA_CC_IMAGE_WORK_DIR: &str = "/run/image/";
|
||||
const KATA_CC_PAUSE_BUNDLE: &str = "/pause_bundle";
|
||||
const CONFIG_JSON: &str = "config.json";
|
||||
|
||||
#[rustfmt::skip]
|
||||
lazy_static! {
|
||||
pub static ref IMAGE_SERVICE: Mutex<Option<ImageService>> = Mutex::new(None);
|
||||
}
|
||||
|
||||
// Convenience function to obtain the scope logger.
|
||||
fn sl() -> slog::Logger {
|
||||
slog_scope::logger().new(o!("subsystem" => "cgroups"))
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct ImageService {
|
||||
sandbox: Arc<Mutex<Sandbox>>,
|
||||
attestation_agent_started: AtomicBool,
|
||||
image_client: Arc<Mutex<ImageClient>>,
|
||||
images: Arc<Mutex<HashMap<String, String>>>,
|
||||
container_count: Arc<AtomicU16>,
|
||||
}
|
||||
|
||||
impl ImageService {
|
||||
pub async fn new(sandbox: Arc<Mutex<Sandbox>>) -> Self {
|
||||
pub fn new() -> Self {
|
||||
env::set_var("CC_IMAGE_WORK_DIR", KATA_CC_IMAGE_WORK_DIR);
|
||||
|
||||
let mut image_client = ImageClient::default();
|
||||
|
||||
let image_policy_file = &AGENT_CONFIG.image_policy_file;
|
||||
if !image_policy_file.is_empty() {
|
||||
image_client.config.file_paths.sigstore_config = image_policy_file.clone();
|
||||
if !AGENT_CONFIG.image_policy_file.is_empty() {
|
||||
image_client.config.file_paths.policy_path = AGENT_CONFIG.image_policy_file.clone();
|
||||
}
|
||||
|
||||
let simple_signing_sigstore_config = &AGENT_CONFIG.simple_signing_sigstore_config;
|
||||
if !simple_signing_sigstore_config.is_empty() {
|
||||
image_client.config.file_paths.sigstore_config = simple_signing_sigstore_config.clone();
|
||||
if !AGENT_CONFIG.simple_signing_sigstore_config.is_empty() {
|
||||
image_client.config.file_paths.sigstore_config =
|
||||
AGENT_CONFIG.simple_signing_sigstore_config.clone();
|
||||
}
|
||||
|
||||
let image_registry_auth_file = &AGENT_CONFIG.image_registry_auth_file;
|
||||
if !image_registry_auth_file.is_empty() {
|
||||
image_client.config.file_paths.auth_file = image_registry_auth_file.clone();
|
||||
if !AGENT_CONFIG.image_registry_auth_file.is_empty() {
|
||||
image_client.config.file_paths.auth_file =
|
||||
AGENT_CONFIG.image_registry_auth_file.clone();
|
||||
}
|
||||
|
||||
Self {
|
||||
sandbox,
|
||||
attestation_agent_started: AtomicBool::new(false),
|
||||
image_client: Arc::new(Mutex::new(image_client)),
|
||||
images: Arc::new(Mutex::new(HashMap::new())),
|
||||
container_count: Arc::new(AtomicU16::new(0)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the singleton instance of image service.
|
||||
pub async fn singleton() -> Result<ImageService> {
|
||||
IMAGE_SERVICE
|
||||
.lock()
|
||||
.await
|
||||
.clone()
|
||||
.ok_or_else(|| anyhow!("image service is uninitialized"))
|
||||
}
|
||||
|
||||
// pause image is packaged in rootfs for CC
|
||||
fn unpack_pause_image(cid: &str) -> Result<()> {
|
||||
fn unpack_pause_image(cid: &str, target_subpath: &str) -> Result<String> {
|
||||
let cc_pause_bundle = Path::new(KATA_CC_PAUSE_BUNDLE);
|
||||
if !cc_pause_bundle.exists() {
|
||||
return Err(anyhow!("Pause image not present in rootfs"));
|
||||
}
|
||||
|
||||
info!(sl(), "use guest pause image cid {:?}", cid);
|
||||
let pause_bundle = Path::new(CONTAINER_BASE).join(cid);
|
||||
let pause_bundle = Path::new(CONTAINER_BASE).join(cid).join(target_subpath);
|
||||
let pause_rootfs = pause_bundle.join("rootfs");
|
||||
let pause_config = pause_bundle.join(CONFIG_JSON);
|
||||
let pause_binary = pause_rootfs.join("pause");
|
||||
@@ -101,35 +103,7 @@ impl ImageService {
|
||||
fs::copy(cc_pause_bundle.join("rootfs").join("pause"), pause_binary)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// If we fail to start the AA, ocicrypt won't be able to unwrap keys
|
||||
// and container decryption will fail.
|
||||
fn init_attestation_agent() -> Result<()> {
|
||||
let config_path = OCICRYPT_CONFIG_PATH;
|
||||
|
||||
// The image will need to be encrypted using a keyprovider
|
||||
// that has the same name (at least according to the config).
|
||||
let ocicrypt_config = serde_json::json!({
|
||||
"key-providers": {
|
||||
"attestation-agent":{
|
||||
"ttrpc":AA_KEYPROVIDER_URI
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
let mut config_file = fs::File::create(config_path)?;
|
||||
config_file.write_all(ocicrypt_config.to_string().as_bytes())?;
|
||||
|
||||
// The Attestation Agent will run for the duration of the guest.
|
||||
Command::new(AA_PATH)
|
||||
.arg("--keyprovider_sock")
|
||||
.arg(AA_KEYPROVIDER_URI)
|
||||
.arg("--getresource_sock")
|
||||
.arg(AA_GETRESOURCE_URI)
|
||||
.spawn()?;
|
||||
Ok(())
|
||||
Ok(pause_rootfs.display().to_string())
|
||||
}
|
||||
|
||||
/// Determines the container id (cid) to use for a given request.
|
||||
@@ -153,41 +127,21 @@ impl ImageService {
|
||||
Ok(cid)
|
||||
}
|
||||
|
||||
async fn pull_image(&self, req: &image::PullImageRequest) -> Result<String> {
|
||||
env::set_var("OCICRYPT_KEYPROVIDER_CONFIG", OCICRYPT_CONFIG_PATH);
|
||||
|
||||
/// Set proxy environment from AGENT_CONFIG
|
||||
fn set_proxy_env_vars() {
|
||||
let https_proxy = &AGENT_CONFIG.https_proxy;
|
||||
if !https_proxy.is_empty() {
|
||||
env::set_var("HTTPS_PROXY", https_proxy);
|
||||
}
|
||||
|
||||
let no_proxy = &AGENT_CONFIG.no_proxy;
|
||||
if !no_proxy.is_empty() {
|
||||
env::set_var("NO_PROXY", no_proxy);
|
||||
}
|
||||
}
|
||||
|
||||
let cid = self.cid_from_request(req)?;
|
||||
let image = req.image();
|
||||
if cid.starts_with("pause") {
|
||||
Self::unpack_pause_image(&cid)?;
|
||||
|
||||
let mut sandbox = self.sandbox.lock().await;
|
||||
sandbox.images.insert(String::from(image), cid);
|
||||
return Ok(image.to_owned());
|
||||
}
|
||||
|
||||
/// init atestation agent and read config from AGENT_CONFIG
|
||||
async fn get_security_config(&self) -> Result<String> {
|
||||
let aa_kbc_params = &AGENT_CONFIG.aa_kbc_params;
|
||||
if !aa_kbc_params.is_empty() {
|
||||
match self.attestation_agent_started.compare_exchange_weak(
|
||||
false,
|
||||
true,
|
||||
Ordering::SeqCst,
|
||||
Ordering::SeqCst,
|
||||
) {
|
||||
Ok(_) => Self::init_attestation_agent()?,
|
||||
Err(_) => info!(sl(), "Attestation Agent already running"),
|
||||
}
|
||||
}
|
||||
// If the attestation-agent is being used, then enable the authenticated credentials support
|
||||
info!(
|
||||
sl(),
|
||||
@@ -195,6 +149,7 @@ impl ImageService {
|
||||
!aa_kbc_params.is_empty()
|
||||
);
|
||||
self.image_client.lock().await.config.auth = !aa_kbc_params.is_empty();
|
||||
let decrypt_config = format!("provider:attestation-agent:{}", aa_kbc_params);
|
||||
|
||||
// Read enable signature verification from the agent config and set it in the image_client
|
||||
let enable_signature_verification = &AGENT_CONFIG.enable_signature_verification;
|
||||
@@ -203,24 +158,24 @@ impl ImageService {
|
||||
"enable_signature_verification set to: {}", enable_signature_verification
|
||||
);
|
||||
self.image_client.lock().await.config.security_validate = *enable_signature_verification;
|
||||
Ok(decrypt_config)
|
||||
}
|
||||
|
||||
let source_creds = (!req.source_creds().is_empty()).then(|| req.source_creds());
|
||||
|
||||
let bundle_path = Path::new(CONTAINER_BASE).join(&cid);
|
||||
fs::create_dir_all(&bundle_path)?;
|
||||
|
||||
let decrypt_config = format!("provider:attestation-agent:{}", aa_kbc_params);
|
||||
|
||||
info!(sl(), "pull image {:?}, bundle path {:?}", cid, bundle_path);
|
||||
// Image layers will store at KATA_CC_IMAGE_WORK_DIR, generated bundles
|
||||
// with rootfs and config.json will store under CONTAINER_BASE/cid.
|
||||
/// Call image-rs to pull and unpack image.
|
||||
async fn common_image_pull(
|
||||
&self,
|
||||
image: &str,
|
||||
bundle_path: &Path,
|
||||
decrypt_config: &str,
|
||||
source_creds: Option<&str>,
|
||||
cid: &str,
|
||||
) -> Result<()> {
|
||||
let res = self
|
||||
.image_client
|
||||
.lock()
|
||||
.await
|
||||
.pull_image(image, &bundle_path, &source_creds, &Some(&decrypt_config))
|
||||
.pull_image(image, bundle_path, &source_creds, &Some(decrypt_config))
|
||||
.await;
|
||||
|
||||
match res {
|
||||
Ok(image) => {
|
||||
info!(
|
||||
@@ -239,11 +194,146 @@ impl ImageService {
|
||||
return Err(e);
|
||||
}
|
||||
};
|
||||
self.add_image(String::from(image), String::from(cid)).await;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
let mut sandbox = self.sandbox.lock().await;
|
||||
sandbox.images.insert(String::from(image), cid);
|
||||
/// Pull image when creating container and return the bundle path with rootfs.
|
||||
pub async fn pull_image_for_container(
|
||||
&self,
|
||||
image: &str,
|
||||
cid: &str,
|
||||
image_metadata: &HashMap<String, String>,
|
||||
) -> Result<String> {
|
||||
info!(sl(), "image metadata: {:?}", image_metadata);
|
||||
Self::set_proxy_env_vars();
|
||||
let is_sandbox = if let Some(value) = image_metadata.get("io.kubernetes.cri.container-type")
|
||||
{
|
||||
value == "sandbox"
|
||||
} else if let Some(value) = image_metadata.get("io.kubernetes.cri-o.ContainerType") {
|
||||
value == "sandbox"
|
||||
} else {
|
||||
false
|
||||
};
|
||||
|
||||
if is_sandbox {
|
||||
let mount_path = Self::unpack_pause_image(cid, "pause")?;
|
||||
self.add_image(String::from(image), String::from(cid)).await;
|
||||
return Ok(mount_path);
|
||||
}
|
||||
let bundle_path = Path::new(CONTAINER_BASE).join(cid).join("images");
|
||||
fs::create_dir_all(&bundle_path)?;
|
||||
info!(sl(), "pull image {:?}, bundle path {:?}", cid, bundle_path);
|
||||
|
||||
let decrypt_config = self.get_security_config().await?;
|
||||
|
||||
let source_creds = None; // You need to determine how to obtain this.
|
||||
|
||||
self.common_image_pull(image, &bundle_path, &decrypt_config, source_creds, cid)
|
||||
.await?;
|
||||
Ok(format! {"{}/rootfs",bundle_path.display()})
|
||||
}
|
||||
|
||||
/// Pull image when recieving the PullImageRequest and return the image digest.
|
||||
async fn pull_image(&self, req: &image::PullImageRequest) -> Result<String> {
|
||||
Self::set_proxy_env_vars();
|
||||
let cid = self.cid_from_request(req)?;
|
||||
let image = req.image();
|
||||
if cid.starts_with("pause") {
|
||||
Self::unpack_pause_image(&cid, "")?;
|
||||
self.add_image(String::from(image), cid).await;
|
||||
return Ok(image.to_owned());
|
||||
}
|
||||
|
||||
// Image layers will store at KATA_CC_IMAGE_WORK_DIR, generated bundles
|
||||
// with rootfs and config.json will store under CONTAINER_BASE/cid.
|
||||
let bundle_path = Path::new(CONTAINER_BASE).join(&cid);
|
||||
fs::create_dir_all(&bundle_path)?;
|
||||
|
||||
let decrypt_config = self.get_security_config().await?;
|
||||
let source_creds = (!req.source_creds().is_empty()).then(|| req.source_creds());
|
||||
|
||||
self.common_image_pull(
|
||||
image,
|
||||
&bundle_path,
|
||||
&decrypt_config,
|
||||
source_creds,
|
||||
cid.clone().as_str(),
|
||||
)
|
||||
.await?;
|
||||
Ok(image.to_owned())
|
||||
}
|
||||
|
||||
async fn add_image(&self, image: String, cid: String) {
|
||||
self.images.lock().await.insert(image, cid);
|
||||
}
|
||||
|
||||
// When being passed an image name through a container annotation, merge its
|
||||
// corresponding bundle OCI specification into the passed container creation one.
|
||||
pub async fn merge_bundle_oci(&self, container_oci: &mut oci::Spec) -> Result<()> {
|
||||
if let Some(image_name) = container_oci
|
||||
.annotations
|
||||
.get(&ANNO_K8S_IMAGE_NAME.to_string())
|
||||
{
|
||||
let images = self.images.lock().await;
|
||||
if let Some(container_id) = images.get(image_name) {
|
||||
let image_oci_config_path = Path::new(CONTAINER_BASE)
|
||||
.join(container_id)
|
||||
.join(CONFIG_JSON);
|
||||
debug!(
|
||||
sl(),
|
||||
"Image bundle config path: {:?}", image_oci_config_path
|
||||
);
|
||||
|
||||
let image_oci =
|
||||
oci::Spec::load(image_oci_config_path.to_str().ok_or_else(|| {
|
||||
anyhow!(
|
||||
"Invalid container image OCI config path {:?}",
|
||||
image_oci_config_path
|
||||
)
|
||||
})?)
|
||||
.context("load image bundle")?;
|
||||
|
||||
if let Some(container_root) = container_oci.root.as_mut() {
|
||||
if let Some(image_root) = image_oci.root.as_ref() {
|
||||
let root_path = Path::new(CONTAINER_BASE)
|
||||
.join(container_id)
|
||||
.join(image_root.path.clone());
|
||||
container_root.path =
|
||||
String::from(root_path.to_str().ok_or_else(|| {
|
||||
anyhow!("Invalid container image root path {:?}", root_path)
|
||||
})?);
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(container_process) = container_oci.process.as_mut() {
|
||||
if let Some(image_process) = image_oci.process.as_ref() {
|
||||
self.merge_oci_process(container_process, image_process);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Partially merge an OCI process specification into another one.
|
||||
fn merge_oci_process(&self, target: &mut oci::Process, source: &oci::Process) {
|
||||
if target.args.is_empty() && !source.args.is_empty() {
|
||||
target.args.append(&mut source.args.clone());
|
||||
}
|
||||
|
||||
if target.cwd == "/" && source.cwd != "/" {
|
||||
target.cwd = String::from(&source.cwd);
|
||||
}
|
||||
|
||||
for source_env in &source.env {
|
||||
let variable_name: Vec<&str> = source_env.split('=').collect();
|
||||
if !target.env.iter().any(|i| i.contains(variable_name[0])) {
|
||||
target.env.push(source_env.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
@@ -269,10 +359,7 @@ impl protocols::image_ttrpc_async::Image for ImageService {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::ImageService;
|
||||
use crate::sandbox::Sandbox;
|
||||
use protocols::image;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_cid_from_request() {
|
||||
@@ -345,9 +432,7 @@ mod tests {
|
||||
},
|
||||
];
|
||||
|
||||
let logger = slog::Logger::root(slog::Discard, o!());
|
||||
let s = Sandbox::new(&logger).unwrap();
|
||||
let image_service = ImageService::new(Arc::new(Mutex::new(s))).await;
|
||||
let image_service = ImageService::new();
|
||||
for case in &cases {
|
||||
let mut req = image::PullImageRequest::new();
|
||||
req.set_image(case.image.to_string());
|
||||
@@ -363,4 +448,139 @@ mod tests {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_merge_cwd() {
|
||||
#[derive(Debug)]
|
||||
struct TestData<'a> {
|
||||
container_process_cwd: &'a str,
|
||||
image_process_cwd: &'a str,
|
||||
expected: &'a str,
|
||||
}
|
||||
|
||||
let tests = &[
|
||||
// Image cwd should override blank container cwd
|
||||
// TODO - how can we tell the user didn't specifically set it to `/` vs not setting at all? Is that scenario valid?
|
||||
TestData {
|
||||
container_process_cwd: "/",
|
||||
image_process_cwd: "/imageDir",
|
||||
expected: "/imageDir",
|
||||
},
|
||||
// Container cwd should override image cwd
|
||||
TestData {
|
||||
container_process_cwd: "/containerDir",
|
||||
image_process_cwd: "/imageDir",
|
||||
expected: "/containerDir",
|
||||
},
|
||||
// Container cwd should override blank image cwd
|
||||
TestData {
|
||||
container_process_cwd: "/containerDir",
|
||||
image_process_cwd: "/",
|
||||
expected: "/containerDir",
|
||||
},
|
||||
];
|
||||
|
||||
let image_service = ImageService::new();
|
||||
|
||||
for (i, d) in tests.iter().enumerate() {
|
||||
let msg = format!("test[{}]: {:?}", i, d);
|
||||
|
||||
let mut container_process = oci::Process {
|
||||
cwd: d.container_process_cwd.to_string(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let image_process = oci::Process {
|
||||
cwd: d.image_process_cwd.to_string(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
image_service.merge_oci_process(&mut container_process, &image_process);
|
||||
|
||||
assert_eq!(d.expected, container_process.cwd, "{}", msg);
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_merge_env() {
|
||||
#[derive(Debug)]
|
||||
struct TestData {
|
||||
container_process_env: Vec<String>,
|
||||
image_process_env: Vec<String>,
|
||||
expected: Vec<String>,
|
||||
}
|
||||
|
||||
let tests = &[
|
||||
// Test that the pods environment overrides the images
|
||||
TestData {
|
||||
container_process_env: vec!["ISPRODUCTION=true".to_string()],
|
||||
image_process_env: vec!["ISPRODUCTION=false".to_string()],
|
||||
expected: vec!["ISPRODUCTION=true".to_string()],
|
||||
},
|
||||
// Test that multiple environment variables can be overrided
|
||||
TestData {
|
||||
container_process_env: vec![
|
||||
"ISPRODUCTION=true".to_string(),
|
||||
"ISDEVELOPMENT=false".to_string(),
|
||||
],
|
||||
image_process_env: vec![
|
||||
"ISPRODUCTION=false".to_string(),
|
||||
"ISDEVELOPMENT=true".to_string(),
|
||||
],
|
||||
expected: vec![
|
||||
"ISPRODUCTION=true".to_string(),
|
||||
"ISDEVELOPMENT=false".to_string(),
|
||||
],
|
||||
},
|
||||
// Test that when none of the variables match do not override them
|
||||
TestData {
|
||||
container_process_env: vec!["ANOTHERENV=TEST".to_string()],
|
||||
image_process_env: vec![
|
||||
"ISPRODUCTION=false".to_string(),
|
||||
"ISDEVELOPMENT=true".to_string(),
|
||||
],
|
||||
expected: vec![
|
||||
"ANOTHERENV=TEST".to_string(),
|
||||
"ISPRODUCTION=false".to_string(),
|
||||
"ISDEVELOPMENT=true".to_string(),
|
||||
],
|
||||
},
|
||||
// Test a mix of both overriding and not
|
||||
TestData {
|
||||
container_process_env: vec![
|
||||
"ANOTHERENV=TEST".to_string(),
|
||||
"ISPRODUCTION=true".to_string(),
|
||||
],
|
||||
image_process_env: vec![
|
||||
"ISPRODUCTION=false".to_string(),
|
||||
"ISDEVELOPMENT=true".to_string(),
|
||||
],
|
||||
expected: vec![
|
||||
"ANOTHERENV=TEST".to_string(),
|
||||
"ISPRODUCTION=true".to_string(),
|
||||
"ISDEVELOPMENT=true".to_string(),
|
||||
],
|
||||
},
|
||||
];
|
||||
|
||||
let image_service = ImageService::new();
|
||||
|
||||
for (i, d) in tests.iter().enumerate() {
|
||||
let msg = format!("test[{}]: {:?}", i, d);
|
||||
|
||||
let mut container_process = oci::Process {
|
||||
env: d.container_process_env.clone(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let image_process = oci::Process {
|
||||
env: d.image_process_env.clone(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
image_service.merge_oci_process(&mut container_process, &image_process);
|
||||
|
||||
assert_eq!(d.expected, container_process.env, "{}", msg);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -33,7 +33,7 @@ pub fn create_pci_root_bus_path() -> String {
|
||||
|
||||
// check if there is pci bus path for acpi
|
||||
acpi_sysfs_dir.push_str(&acpi_root_bus_path);
|
||||
if let Ok(_) = fs::metadata(&acpi_sysfs_dir) {
|
||||
if fs::metadata(&acpi_sysfs_dir).is_ok() {
|
||||
return acpi_root_bus_path;
|
||||
}
|
||||
|
||||
|
||||
@@ -22,6 +22,7 @@ extern crate slog;
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use cfg_if::cfg_if;
|
||||
use clap::{AppSettings, Parser};
|
||||
use const_format::concatcp;
|
||||
use nix::fcntl::OFlag;
|
||||
use nix::sys::socket::{self, AddressFamily, SockFlag, SockType, VsockAddr};
|
||||
use nix::unistd::{self, dup, Pid};
|
||||
@@ -32,9 +33,12 @@ use std::os::unix::fs as unixfs;
|
||||
use std::os::unix::io::AsRawFd;
|
||||
use std::path::Path;
|
||||
use std::process::exit;
|
||||
use std::process::Command;
|
||||
use std::sync::Arc;
|
||||
use tracing::{instrument, span};
|
||||
|
||||
#[cfg(feature = "confidential-data-hub")]
|
||||
mod cdh;
|
||||
mod config;
|
||||
mod console;
|
||||
mod device;
|
||||
@@ -48,6 +52,7 @@ mod pci;
|
||||
pub mod random;
|
||||
mod sandbox;
|
||||
mod signal;
|
||||
mod storage;
|
||||
mod uevent;
|
||||
mod util;
|
||||
mod version;
|
||||
@@ -83,6 +88,27 @@ cfg_if! {
|
||||
|
||||
const NAME: &str = "kata-agent";
|
||||
|
||||
const OCICRYPT_CONFIG_PATH: &str = "/tmp/ocicrypt_config.json";
|
||||
const AA_PATH: &str = "/usr/local/bin/attestation-agent";
|
||||
const AA_UNIX_SOCKET_DIR: &str = "/run/confidential-containers/attestation-agent/";
|
||||
const UNIX_SOCKET_PREFIX: &str = "unix://";
|
||||
const AA_KEYPROVIDER_URI: &str =
|
||||
concatcp!(UNIX_SOCKET_PREFIX, AA_UNIX_SOCKET_DIR, "keyprovider.sock");
|
||||
const AA_GETRESOURCE_URI: &str =
|
||||
concatcp!(UNIX_SOCKET_PREFIX, AA_UNIX_SOCKET_DIR, "getresource.sock");
|
||||
const AA_ATTESTATION_SOCKET: &str = concatcp!(AA_UNIX_SOCKET_DIR, "attestation-agent.sock");
|
||||
const AA_ATTESTATION_URI: &str = concatcp!(UNIX_SOCKET_PREFIX, AA_ATTESTATION_SOCKET);
|
||||
|
||||
const DEFAULT_LAUNCH_PROCESS_TIMEOUT: i32 = 6;
|
||||
|
||||
cfg_if! {
|
||||
if #[cfg(feature = "confidential-data-hub")] {
|
||||
const CDH_PATH: &str = "/usr/local/bin/confidential-data-hub";
|
||||
const CDH_SOCKET: &str = "/run/confidential-containers/cdh.sock";
|
||||
const API_SERVER_PATH: &str = "/usr/local/bin/api-server-rest";
|
||||
}
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
static ref AGENT_CONFIG: AgentConfig =
|
||||
// Note: We can't do AgentOpts.parse() here to send through the processed arguments to AgentConfig
|
||||
@@ -344,6 +370,10 @@ async fn start_sandbox(
|
||||
let (tx, rx) = tokio::sync::oneshot::channel();
|
||||
sandbox.lock().await.sender = Some(tx);
|
||||
|
||||
if !config.aa_kbc_params.is_empty() {
|
||||
init_attestation_agent(logger, config)?;
|
||||
}
|
||||
|
||||
// vsock:///dev/vsock, port
|
||||
let mut server = rpc::start(sandbox.clone(), config.server_addr.as_str(), init_mode).await?;
|
||||
server.start().await?;
|
||||
@@ -354,6 +384,110 @@ async fn start_sandbox(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// If we fail to start the AA, ocicrypt won't be able to unwrap keys
|
||||
// and container decryption will fail.
|
||||
fn init_attestation_agent(logger: &Logger, _config: &AgentConfig) -> Result<()> {
|
||||
let config_path = OCICRYPT_CONFIG_PATH;
|
||||
|
||||
// The image will need to be encrypted using a keyprovider
|
||||
// that has the same name (at least according to the config).
|
||||
let ocicrypt_config = serde_json::json!({
|
||||
"key-providers": {
|
||||
"attestation-agent":{
|
||||
"ttrpc":AA_KEYPROVIDER_URI
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
fs::write(config_path, ocicrypt_config.to_string().as_bytes())?;
|
||||
|
||||
env::set_var("OCICRYPT_KEYPROVIDER_CONFIG", config_path);
|
||||
|
||||
// The Attestation Agent will run for the duration of the guest.
|
||||
launch_process(
|
||||
logger,
|
||||
AA_PATH,
|
||||
&vec![
|
||||
"--keyprovider_sock",
|
||||
AA_KEYPROVIDER_URI,
|
||||
"--getresource_sock",
|
||||
AA_GETRESOURCE_URI,
|
||||
"--attestation_sock",
|
||||
AA_ATTESTATION_URI,
|
||||
],
|
||||
AA_ATTESTATION_SOCKET,
|
||||
DEFAULT_LAUNCH_PROCESS_TIMEOUT,
|
||||
)
|
||||
.map_err(|e| anyhow!("launch_process {} failed: {:?}", AA_PATH, e))?;
|
||||
|
||||
#[cfg(feature = "confidential-data-hub")]
|
||||
{
|
||||
if let Err(e) = launch_process(
|
||||
logger,
|
||||
CDH_PATH,
|
||||
&vec![],
|
||||
CDH_SOCKET,
|
||||
DEFAULT_LAUNCH_PROCESS_TIMEOUT,
|
||||
) {
|
||||
error!(logger, "launch_process {} failed: {:?}", CDH_PATH, e);
|
||||
} else if !_config.rest_api.is_empty() {
|
||||
if let Err(e) = launch_process(
|
||||
logger,
|
||||
API_SERVER_PATH,
|
||||
&vec!["--features", &_config.rest_api],
|
||||
"",
|
||||
0,
|
||||
) {
|
||||
error!(logger, "launch_process {} failed: {:?}", API_SERVER_PATH, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn wait_for_path_to_exist(logger: &Logger, path: &str, timeout_secs: i32) -> Result<()> {
|
||||
let p = Path::new(path);
|
||||
let mut attempts = 0;
|
||||
loop {
|
||||
std::thread::sleep(std::time::Duration::from_secs(1));
|
||||
if p.exists() {
|
||||
return Ok(());
|
||||
}
|
||||
if attempts >= timeout_secs {
|
||||
break;
|
||||
}
|
||||
attempts += 1;
|
||||
info!(
|
||||
logger,
|
||||
"waiting for {} to exist (attempts={})", path, attempts
|
||||
);
|
||||
}
|
||||
|
||||
Err(anyhow!("wait for {} to exist timeout.", path))
|
||||
}
|
||||
|
||||
fn launch_process(
|
||||
logger: &Logger,
|
||||
path: &str,
|
||||
args: &Vec<&str>,
|
||||
unix_socket_path: &str,
|
||||
timeout_secs: i32,
|
||||
) -> Result<()> {
|
||||
if !Path::new(path).exists() {
|
||||
return Err(anyhow!("path {} does not exist.", path));
|
||||
}
|
||||
if !unix_socket_path.is_empty() && Path::new(unix_socket_path).exists() {
|
||||
fs::remove_file(unix_socket_path)?;
|
||||
}
|
||||
Command::new(path).args(args).spawn()?;
|
||||
if !unix_socket_path.is_empty() && timeout_secs > 0 {
|
||||
wait_for_path_to_exist(logger, unix_socket_path, timeout_secs)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// init_agent_as_init will do the initializations such as setting up the rootfs
|
||||
// when this agent has been run as the init process.
|
||||
fn init_agent_as_init(logger: &Logger, unified_cgroup_hierarchy: bool) -> Result<()> {
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -7,14 +7,14 @@ use anyhow::{anyhow, Result};
|
||||
use nix::mount::MsFlags;
|
||||
use nix::sched::{unshare, CloneFlags};
|
||||
use nix::unistd::{getpid, gettid};
|
||||
use slog::Logger;
|
||||
use std::fmt;
|
||||
use std::fs;
|
||||
use std::fs::File;
|
||||
use std::path::{Path, PathBuf};
|
||||
use tracing::instrument;
|
||||
|
||||
use crate::mount::{baremount, FLAGS};
|
||||
use slog::Logger;
|
||||
use crate::mount::baremount;
|
||||
|
||||
const PERSISTENT_NS_DIR: &str = "/var/run/sandbox-ns";
|
||||
pub const NSTYPEIPC: &str = "ipc";
|
||||
@@ -116,15 +116,7 @@ impl Namespace {
|
||||
// Bind mount the new namespace from the current thread onto the mount point to persist it.
|
||||
|
||||
let mut flags = MsFlags::empty();
|
||||
|
||||
if let Some(x) = FLAGS.get("rbind") {
|
||||
let (clear, f) = *x;
|
||||
if clear {
|
||||
flags &= !f;
|
||||
} else {
|
||||
flags |= f;
|
||||
}
|
||||
};
|
||||
flags |= MsFlags::MS_BIND | MsFlags::MS_REC;
|
||||
|
||||
baremount(source, destination, "none", flags, "", &logger).map_err(|e| {
|
||||
anyhow!(
|
||||
|
||||
@@ -29,7 +29,7 @@ impl Network {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn setup_guest_dns(logger: Logger, dns_list: Vec<String>) -> Result<()> {
|
||||
pub fn setup_guest_dns(logger: Logger, dns_list: &[String]) -> Result<()> {
|
||||
do_setup_guest_dns(
|
||||
logger,
|
||||
dns_list,
|
||||
@@ -38,7 +38,7 @@ pub fn setup_guest_dns(logger: Logger, dns_list: Vec<String>) -> Result<()> {
|
||||
)
|
||||
}
|
||||
|
||||
fn do_setup_guest_dns(logger: Logger, dns_list: Vec<String>, src: &str, dst: &str) -> Result<()> {
|
||||
fn do_setup_guest_dns(logger: Logger, dns_list: &[String], src: &str, dst: &str) -> Result<()> {
|
||||
let logger = logger.new(o!( "subsystem" => "network"));
|
||||
|
||||
if dns_list.is_empty() {
|
||||
@@ -124,7 +124,7 @@ mod tests {
|
||||
.expect("failed to write file contents");
|
||||
|
||||
// call do_setup_guest_dns
|
||||
let result = do_setup_guest_dns(logger, dns.clone(), src_filename, dst_filename);
|
||||
let result = do_setup_guest_dns(logger, &dns, src_filename, dst_filename);
|
||||
|
||||
assert!(result.is_ok(), "result should be ok, but {:?}", result);
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -3,16 +3,20 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use crate::linux_abi::*;
|
||||
use crate::mount::{get_mount_fs_type, remove_mounts, TYPE_ROOTFS};
|
||||
use crate::namespace::Namespace;
|
||||
use crate::netlink::Handle;
|
||||
use crate::network::Network;
|
||||
use crate::pci;
|
||||
use crate::uevent::{Uevent, UeventMatcher};
|
||||
use crate::watcher::BindWatcher;
|
||||
use std::collections::hash_map::Entry;
|
||||
use std::collections::HashMap;
|
||||
use std::fmt::{Debug, Formatter};
|
||||
use std::fs;
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
use std::path::Path;
|
||||
use std::str::FromStr;
|
||||
use std::sync::atomic::{AtomicU32, Ordering};
|
||||
use std::sync::Arc;
|
||||
use std::{thread, time};
|
||||
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use kata_types::cpu::CpuSet;
|
||||
use kata_types::mount::StorageDevice;
|
||||
use libc::pid_t;
|
||||
use oci::{Hook, Hooks};
|
||||
use protocols::agent::OnlineCPUMemRequest;
|
||||
@@ -22,22 +26,69 @@ use rustjail::container::BaseContainer;
|
||||
use rustjail::container::LinuxContainer;
|
||||
use rustjail::process::Process;
|
||||
use slog::Logger;
|
||||
use std::collections::HashMap;
|
||||
use std::fs;
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
use std::path::Path;
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
use std::{thread, time};
|
||||
use tokio::sync::mpsc::{channel, Receiver, Sender};
|
||||
use tokio::sync::oneshot;
|
||||
use tokio::sync::Mutex;
|
||||
use tracing::instrument;
|
||||
|
||||
use crate::linux_abi::*;
|
||||
use crate::mount::{get_mount_fs_type, TYPE_ROOTFS};
|
||||
use crate::namespace::Namespace;
|
||||
use crate::netlink::Handle;
|
||||
use crate::network::Network;
|
||||
use crate::pci;
|
||||
use crate::storage::StorageDeviceGeneric;
|
||||
use crate::uevent::{Uevent, UeventMatcher};
|
||||
use crate::watcher::BindWatcher;
|
||||
|
||||
pub const ERR_INVALID_CONTAINER_ID: &str = "Invalid container id";
|
||||
|
||||
type UeventWatcher = (Box<dyn UeventMatcher>, oneshot::Sender<Uevent>);
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct StorageState {
|
||||
count: Arc<AtomicU32>,
|
||||
device: Arc<dyn StorageDevice>,
|
||||
}
|
||||
|
||||
impl Debug for StorageState {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("StorageState").finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl StorageState {
|
||||
fn new() -> Self {
|
||||
StorageState {
|
||||
count: Arc::new(AtomicU32::new(1)),
|
||||
device: Arc::new(StorageDeviceGeneric::default()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_device(device: Arc<dyn StorageDevice>) -> Self {
|
||||
Self {
|
||||
count: Arc::new(AtomicU32::new(1)),
|
||||
device,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn path(&self) -> Option<&str> {
|
||||
self.device.path()
|
||||
}
|
||||
|
||||
pub async fn ref_count(&self) -> u32 {
|
||||
self.count.load(Ordering::Relaxed)
|
||||
}
|
||||
|
||||
async fn inc_ref_count(&self) {
|
||||
self.count.fetch_add(1, Ordering::Acquire);
|
||||
}
|
||||
|
||||
async fn dec_and_test_ref_count(&self) -> bool {
|
||||
self.count.fetch_sub(1, Ordering::AcqRel) == 1
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Sandbox {
|
||||
pub logger: Logger,
|
||||
@@ -52,7 +103,7 @@ pub struct Sandbox {
|
||||
pub shared_utsns: Namespace,
|
||||
pub shared_ipcns: Namespace,
|
||||
pub sandbox_pidns: Option<Namespace>,
|
||||
pub storages: HashMap<String, u32>,
|
||||
pub storages: HashMap<String, StorageState>,
|
||||
pub running: bool,
|
||||
pub no_pivot_root: bool,
|
||||
pub sender: Option<tokio::sync::oneshot::Sender<i32>>,
|
||||
@@ -62,7 +113,6 @@ pub struct Sandbox {
|
||||
pub event_tx: Option<Sender<String>>,
|
||||
pub bind_watcher: BindWatcher,
|
||||
pub pcimap: HashMap<pci::Address, pci::Address>,
|
||||
pub images: HashMap<String, String>,
|
||||
}
|
||||
|
||||
impl Sandbox {
|
||||
@@ -96,89 +146,63 @@ impl Sandbox {
|
||||
event_tx: Some(tx),
|
||||
bind_watcher: BindWatcher::new(),
|
||||
pcimap: HashMap::new(),
|
||||
images: HashMap::new(),
|
||||
})
|
||||
}
|
||||
|
||||
// set_sandbox_storage sets the sandbox level reference
|
||||
// counter for the sandbox storage.
|
||||
// This method also returns a boolean to let
|
||||
// callers know if the storage already existed or not.
|
||||
// It will return true if storage is new.
|
||||
//
|
||||
// It's assumed that caller is calling this method after
|
||||
// acquiring a lock on sandbox.
|
||||
/// Add a new storage object or increase reference count of existing one.
|
||||
/// The caller may detect new storage object by checking `StorageState.refcount == 1`.
|
||||
#[instrument]
|
||||
pub fn set_sandbox_storage(&mut self, path: &str) -> bool {
|
||||
match self.storages.get_mut(path) {
|
||||
None => {
|
||||
self.storages.insert(path.to_string(), 1);
|
||||
true
|
||||
pub async fn add_sandbox_storage(&mut self, path: &str) -> StorageState {
|
||||
match self.storages.entry(path.to_string()) {
|
||||
Entry::Occupied(e) => {
|
||||
let state = e.get().clone();
|
||||
state.inc_ref_count().await;
|
||||
state
|
||||
}
|
||||
Some(count) => {
|
||||
*count += 1;
|
||||
false
|
||||
Entry::Vacant(e) => {
|
||||
let state = StorageState::new();
|
||||
e.insert(state.clone());
|
||||
state
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// unset_sandbox_storage will decrement the sandbox storage
|
||||
// reference counter. If there aren't any containers using
|
||||
// that sandbox storage, this method will remove the
|
||||
// storage reference from the sandbox and return 'true' to
|
||||
// let the caller know that they can clean up the storage
|
||||
// related directories by calling remove_sandbox_storage
|
||||
//
|
||||
// It's assumed that caller is calling this method after
|
||||
// acquiring a lock on sandbox.
|
||||
/// Update the storage device associated with a path.
|
||||
pub fn update_sandbox_storage(
|
||||
&mut self,
|
||||
path: &str,
|
||||
device: Arc<dyn StorageDevice>,
|
||||
) -> std::result::Result<Arc<dyn StorageDevice>, Arc<dyn StorageDevice>> {
|
||||
if !self.storages.contains_key(path) {
|
||||
return Err(device);
|
||||
}
|
||||
|
||||
let state = StorageState::from_device(device);
|
||||
// Safe to unwrap() because we have just ensured existence of entry.
|
||||
let state = self.storages.insert(path.to_string(), state).unwrap();
|
||||
Ok(state.device)
|
||||
}
|
||||
|
||||
/// Decrease reference count and destroy the storage object if reference count reaches zero.
|
||||
/// Returns `Ok(true)` if the reference count has reached zero and the storage object has been
|
||||
/// removed.
|
||||
#[instrument]
|
||||
pub fn unset_sandbox_storage(&mut self, path: &str) -> Result<bool> {
|
||||
match self.storages.get_mut(path) {
|
||||
pub async fn remove_sandbox_storage(&mut self, path: &str) -> Result<bool> {
|
||||
match self.storages.get(path) {
|
||||
None => Err(anyhow!("Sandbox storage with path {} not found", path)),
|
||||
Some(count) => {
|
||||
*count -= 1;
|
||||
if *count < 1 {
|
||||
self.storages.remove(path);
|
||||
return Ok(true);
|
||||
Some(state) => {
|
||||
if state.dec_and_test_ref_count().await {
|
||||
if let Some(storage) = self.storages.remove(path) {
|
||||
storage.device.cleanup()?;
|
||||
}
|
||||
Ok(true)
|
||||
} else {
|
||||
Ok(false)
|
||||
}
|
||||
Ok(false)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// remove_sandbox_storage removes the sandbox storage if no
|
||||
// containers are using that storage.
|
||||
//
|
||||
// It's assumed that caller is calling this method after
|
||||
// acquiring a lock on sandbox.
|
||||
#[instrument]
|
||||
pub fn remove_sandbox_storage(&self, path: &str) -> Result<()> {
|
||||
let mounts = vec![path.to_string()];
|
||||
remove_mounts(&mounts)?;
|
||||
// "remove_dir" will fail if the mount point is backed by a read-only filesystem.
|
||||
// This is the case with the device mapper snapshotter, where we mount the block device directly
|
||||
// at the underlying sandbox path which was provided from the base RO kataShared path from the host.
|
||||
if let Err(err) = fs::remove_dir(path) {
|
||||
warn!(self.logger, "failed to remove dir {}, {:?}", path, err);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// unset_and_remove_sandbox_storage unsets the storage from sandbox
|
||||
// and if there are no containers using this storage it will
|
||||
// remove it from the sandbox.
|
||||
//
|
||||
// It's assumed that caller is calling this method after
|
||||
// acquiring a lock on sandbox.
|
||||
#[instrument]
|
||||
pub fn unset_and_remove_sandbox_storage(&mut self, path: &str) -> Result<()> {
|
||||
if self.unset_sandbox_storage(path)? {
|
||||
return self.remove_sandbox_storage(path);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
pub async fn setup_shared_namespaces(&mut self) -> Result<bool> {
|
||||
// Set up shared IPC namespace
|
||||
@@ -186,22 +210,18 @@ impl Sandbox {
|
||||
.get_ipc()
|
||||
.setup()
|
||||
.await
|
||||
.context("Failed to setup persistent IPC namespace")?;
|
||||
.context("setup persistent IPC namespace")?;
|
||||
|
||||
// // Set up shared UTS namespace
|
||||
self.shared_utsns = Namespace::new(&self.logger)
|
||||
.get_uts(self.hostname.as_str())
|
||||
.setup()
|
||||
.await
|
||||
.context("Failed to setup persistent UTS namespace")?;
|
||||
.context("setup persistent UTS namespace")?;
|
||||
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
pub fn add_container(&mut self, c: LinuxContainer) {
|
||||
self.containers.insert(c.id.clone(), c);
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
pub fn update_shared_pidns(&mut self, c: &LinuxContainer) -> Result<()> {
|
||||
// Populate the shared pid path only if this is an infra container and
|
||||
@@ -226,14 +246,18 @@ impl Sandbox {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn add_container(&mut self, c: LinuxContainer) {
|
||||
self.containers.insert(c.id.clone(), c);
|
||||
}
|
||||
|
||||
pub fn get_container(&mut self, id: &str) -> Option<&mut LinuxContainer> {
|
||||
self.containers.get_mut(id)
|
||||
}
|
||||
|
||||
pub fn find_process(&mut self, pid: pid_t) -> Option<&mut Process> {
|
||||
for (_, c) in self.containers.iter_mut() {
|
||||
if c.processes.get(&pid).is_some() {
|
||||
return c.processes.get_mut(&pid);
|
||||
if let Some(p) = c.processes.get_mut(&pid) {
|
||||
return Some(p);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -282,25 +306,17 @@ impl Sandbox {
|
||||
let guest_cpuset = rustjail_cgroups::fs::get_guest_cpuset()?;
|
||||
|
||||
for (_, ctr) in self.containers.iter() {
|
||||
let cpu = ctr
|
||||
.config
|
||||
.spec
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.linux
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.resources
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.cpu
|
||||
.as_ref();
|
||||
let container_cpust = if let Some(c) = cpu { &c.cpus } else { "" };
|
||||
|
||||
info!(self.logger, "updating {}", ctr.id.as_str());
|
||||
ctr.cgroup_manager
|
||||
.as_ref()
|
||||
.update_cpuset_path(guest_cpuset.as_str(), container_cpust)?;
|
||||
if let Some(spec) = ctr.config.spec.as_ref() {
|
||||
if let Some(linux) = spec.linux.as_ref() {
|
||||
if let Some(resources) = linux.resources.as_ref() {
|
||||
if let Some(cpus) = resources.cpu.as_ref() {
|
||||
info!(self.logger, "updating {}", ctr.id.as_str());
|
||||
ctr.cgroup_manager
|
||||
.update_cpuset_path(guest_cpuset.as_str(), &cpus.cpus)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -362,31 +378,28 @@ impl Sandbox {
|
||||
#[instrument]
|
||||
pub async fn run_oom_event_monitor(&self, mut rx: Receiver<String>, container_id: String) {
|
||||
let logger = self.logger.clone();
|
||||
|
||||
if self.event_tx.is_none() {
|
||||
error!(
|
||||
logger,
|
||||
"sandbox.event_tx not found in run_oom_event_monitor"
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
let tx = self.event_tx.as_ref().unwrap().clone();
|
||||
let tx = match self.event_tx.as_ref() {
|
||||
Some(v) => v.clone(),
|
||||
None => {
|
||||
error!(
|
||||
logger,
|
||||
"sandbox.event_tx not found in run_oom_event_monitor"
|
||||
);
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
let event = rx.recv().await;
|
||||
// None means the container has exited,
|
||||
// and sender in OOM notifier is dropped.
|
||||
// None means the container has exited, and sender in OOM notifier is dropped.
|
||||
if event.is_none() {
|
||||
return;
|
||||
}
|
||||
info!(logger, "got an OOM event {:?}", event);
|
||||
|
||||
let _ = tx
|
||||
.send(container_id.clone())
|
||||
.await
|
||||
.map_err(|e| error!(logger, "failed to send message: {:?}", e));
|
||||
if let Err(e) = tx.send(container_id.clone()).await {
|
||||
error!(logger, "failed to send message: {:?}", e);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
@@ -399,43 +412,40 @@ fn online_resources(logger: &Logger, path: &str, pattern: &str, num: i32) -> Res
|
||||
|
||||
for e in fs::read_dir(path)? {
|
||||
let entry = e?;
|
||||
let tmpname = entry.file_name();
|
||||
let name = tmpname.to_str().unwrap();
|
||||
let p = entry.path();
|
||||
|
||||
if re.is_match(name) {
|
||||
let file = format!("{}/{}", p.to_str().unwrap(), SYSFS_ONLINE_FILE);
|
||||
info!(logger, "{}", file.as_str());
|
||||
|
||||
let c = fs::read_to_string(file.as_str());
|
||||
if c.is_err() {
|
||||
continue;
|
||||
}
|
||||
let c = c.unwrap();
|
||||
|
||||
if c.trim().contains('0') {
|
||||
let r = fs::write(file.as_str(), "1");
|
||||
if r.is_err() {
|
||||
// Skip direntry which doesn't match the pattern.
|
||||
match entry.file_name().to_str() {
|
||||
None => continue,
|
||||
Some(v) => {
|
||||
if !re.is_match(v) {
|
||||
continue;
|
||||
}
|
||||
count += 1;
|
||||
}
|
||||
};
|
||||
|
||||
if num > 0 && count == num {
|
||||
let p = entry.path().join(SYSFS_ONLINE_FILE);
|
||||
if let Ok(c) = fs::read_to_string(&p) {
|
||||
// Try to online the object in offline state.
|
||||
if c.trim().contains('0') && fs::write(&p, "1").is_ok() && num > 0 {
|
||||
count += 1;
|
||||
if count == num {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if num > 0 {
|
||||
return Ok(count);
|
||||
}
|
||||
Ok(count)
|
||||
}
|
||||
|
||||
Ok(0)
|
||||
#[instrument]
|
||||
fn online_memory(logger: &Logger) -> Result<()> {
|
||||
online_resources(logger, SYSFS_MEMORY_ONLINE_PATH, r"memory[0-9]+", -1)
|
||||
.context("online memory resource")?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// max wait for all CPUs to online will use 50 * 100 = 5 seconds.
|
||||
const ONLINE_CPUMEM_WATI_MILLIS: u64 = 50;
|
||||
const ONLINE_CPUMEM_WAIT_MILLIS: u64 = 50;
|
||||
const ONLINE_CPUMEM_MAX_RETRIES: i32 = 100;
|
||||
|
||||
#[instrument]
|
||||
@@ -465,7 +475,7 @@ fn online_cpus(logger: &Logger, num: i32) -> Result<i32> {
|
||||
);
|
||||
return Ok(num);
|
||||
}
|
||||
thread::sleep(time::Duration::from_millis(ONLINE_CPUMEM_WATI_MILLIS));
|
||||
thread::sleep(time::Duration::from_millis(ONLINE_CPUMEM_WAIT_MILLIS));
|
||||
}
|
||||
|
||||
Err(anyhow!(
|
||||
@@ -475,13 +485,6 @@ fn online_cpus(logger: &Logger, num: i32) -> Result<i32> {
|
||||
))
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
fn online_memory(logger: &Logger) -> Result<()> {
|
||||
online_resources(logger, SYSFS_MEMORY_ONLINE_PATH, r"memory[0-9]+", -1)
|
||||
.context("online memory resource")?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn onlined_cpus() -> Result<i32> {
|
||||
let content =
|
||||
fs::read_to_string(SYSFS_CPU_ONLINE_PATH).context("read sysfs cpu online file")?;
|
||||
@@ -526,24 +529,22 @@ mod tests {
|
||||
let tmpdir_path = tmpdir.path().to_str().unwrap();
|
||||
|
||||
// Add a new sandbox storage
|
||||
let new_storage = s.set_sandbox_storage(tmpdir_path);
|
||||
let new_storage = s.add_sandbox_storage(tmpdir_path).await;
|
||||
|
||||
// Check the reference counter
|
||||
let ref_count = s.storages[tmpdir_path];
|
||||
let ref_count = new_storage.ref_count().await;
|
||||
assert_eq!(
|
||||
ref_count, 1,
|
||||
"Invalid refcount, got {} expected 1.",
|
||||
ref_count
|
||||
);
|
||||
assert!(new_storage);
|
||||
|
||||
// Use the existing sandbox storage
|
||||
let new_storage = s.set_sandbox_storage(tmpdir_path);
|
||||
assert!(!new_storage, "Should be false as already exists.");
|
||||
let new_storage = s.add_sandbox_storage(tmpdir_path).await;
|
||||
|
||||
// Since we are using existing storage, the reference counter
|
||||
// should be 2 by now.
|
||||
let ref_count = s.storages[tmpdir_path];
|
||||
let ref_count = new_storage.ref_count().await;
|
||||
assert_eq!(
|
||||
ref_count, 2,
|
||||
"Invalid refcount, got {} expected 2.",
|
||||
@@ -551,52 +552,6 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn remove_sandbox_storage() {
|
||||
skip_if_not_root!();
|
||||
|
||||
let logger = slog::Logger::root(slog::Discard, o!());
|
||||
let s = Sandbox::new(&logger).unwrap();
|
||||
|
||||
let tmpdir = Builder::new().tempdir().unwrap();
|
||||
let tmpdir_path = tmpdir.path().to_str().unwrap();
|
||||
|
||||
let srcdir = Builder::new()
|
||||
.prefix("src")
|
||||
.tempdir_in(tmpdir_path)
|
||||
.unwrap();
|
||||
let srcdir_path = srcdir.path().to_str().unwrap();
|
||||
|
||||
let destdir = Builder::new()
|
||||
.prefix("dest")
|
||||
.tempdir_in(tmpdir_path)
|
||||
.unwrap();
|
||||
let destdir_path = destdir.path().to_str().unwrap();
|
||||
|
||||
let emptydir = Builder::new()
|
||||
.prefix("empty")
|
||||
.tempdir_in(tmpdir_path)
|
||||
.unwrap();
|
||||
|
||||
assert!(
|
||||
s.remove_sandbox_storage(srcdir_path).is_err(),
|
||||
"Expect Err as the directory is not a mountpoint"
|
||||
);
|
||||
|
||||
assert!(s.remove_sandbox_storage("").is_err());
|
||||
|
||||
let invalid_dir = emptydir.path().join("invalid");
|
||||
|
||||
assert!(s
|
||||
.remove_sandbox_storage(invalid_dir.to_str().unwrap())
|
||||
.is_err());
|
||||
|
||||
assert!(bind_mount(srcdir_path, destdir_path, &logger).is_ok());
|
||||
|
||||
assert!(s.remove_sandbox_storage(destdir_path).is_ok());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn unset_and_remove_sandbox_storage() {
|
||||
@@ -606,8 +561,7 @@ mod tests {
|
||||
let mut s = Sandbox::new(&logger).unwrap();
|
||||
|
||||
assert!(
|
||||
s.unset_and_remove_sandbox_storage("/tmp/testEphePath")
|
||||
.is_err(),
|
||||
s.remove_sandbox_storage("/tmp/testEphePath").await.is_err(),
|
||||
"Should fail because sandbox storage doesn't exist"
|
||||
);
|
||||
|
||||
@@ -628,8 +582,12 @@ mod tests {
|
||||
|
||||
assert!(bind_mount(srcdir_path, destdir_path, &logger).is_ok());
|
||||
|
||||
assert!(s.set_sandbox_storage(destdir_path));
|
||||
assert!(s.unset_and_remove_sandbox_storage(destdir_path).is_ok());
|
||||
s.add_sandbox_storage(destdir_path).await;
|
||||
let storage = StorageDeviceGeneric::new(destdir_path.to_string());
|
||||
assert!(s
|
||||
.update_sandbox_storage(destdir_path, Arc::new(storage))
|
||||
.is_ok());
|
||||
assert!(s.remove_sandbox_storage(destdir_path).await.is_ok());
|
||||
|
||||
let other_dir_str;
|
||||
{
|
||||
@@ -642,10 +600,14 @@ mod tests {
|
||||
let other_dir_path = other_dir.path().to_str().unwrap();
|
||||
other_dir_str = other_dir_path.to_string();
|
||||
|
||||
assert!(s.set_sandbox_storage(other_dir_path));
|
||||
s.add_sandbox_storage(other_dir_path).await;
|
||||
let storage = StorageDeviceGeneric::new(other_dir_path.to_string());
|
||||
assert!(s
|
||||
.update_sandbox_storage(other_dir_path, Arc::new(storage))
|
||||
.is_ok());
|
||||
}
|
||||
|
||||
assert!(s.unset_and_remove_sandbox_storage(&other_dir_str).is_err());
|
||||
assert!(s.remove_sandbox_storage(&other_dir_str).await.is_ok());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -657,28 +619,30 @@ mod tests {
|
||||
let storage_path = "/tmp/testEphe";
|
||||
|
||||
// Add a new sandbox storage
|
||||
assert!(s.set_sandbox_storage(storage_path));
|
||||
s.add_sandbox_storage(storage_path).await;
|
||||
// Use the existing sandbox storage
|
||||
let state = s.add_sandbox_storage(storage_path).await;
|
||||
assert!(
|
||||
!s.set_sandbox_storage(storage_path),
|
||||
state.ref_count().await > 1,
|
||||
"Expects false as the storage is not new."
|
||||
);
|
||||
|
||||
assert!(
|
||||
!s.unset_sandbox_storage(storage_path).unwrap(),
|
||||
!s.remove_sandbox_storage(storage_path).await.unwrap(),
|
||||
"Expects false as there is still a storage."
|
||||
);
|
||||
|
||||
// Reference counter should decrement to 1.
|
||||
let ref_count = s.storages[storage_path];
|
||||
let storage = &s.storages[storage_path];
|
||||
let refcount = storage.ref_count().await;
|
||||
assert_eq!(
|
||||
ref_count, 1,
|
||||
refcount, 1,
|
||||
"Invalid refcount, got {} expected 1.",
|
||||
ref_count
|
||||
refcount
|
||||
);
|
||||
|
||||
assert!(
|
||||
s.unset_sandbox_storage(storage_path).unwrap(),
|
||||
s.remove_sandbox_storage(storage_path).await.unwrap(),
|
||||
"Expects true as there is still a storage."
|
||||
);
|
||||
|
||||
@@ -694,7 +658,7 @@ mod tests {
|
||||
// If no container is using the sandbox storage, the reference
|
||||
// counter for it should not exist.
|
||||
assert!(
|
||||
s.unset_sandbox_storage(storage_path).is_err(),
|
||||
s.remove_sandbox_storage(storage_path).await.is_err(),
|
||||
"Expects false as the reference counter should no exist."
|
||||
);
|
||||
}
|
||||
|
||||
@@ -57,7 +57,7 @@ async fn handle_sigchild(logger: Logger, sandbox: Arc<Mutex<Sandbox>>) -> Result
|
||||
continue;
|
||||
}
|
||||
|
||||
let mut p = process.unwrap();
|
||||
let p = process.unwrap();
|
||||
|
||||
let ret: i32 = match wait_status {
|
||||
WaitStatus::Exited(_, c) => c,
|
||||
|
||||
37
src/agent/src/storage/bind_watcher_handler.rs
Normal file
37
src/agent/src/storage/bind_watcher_handler.rs
Normal file
@@ -0,0 +1,37 @@
|
||||
// Copyright (c) 2019 Ant Financial
|
||||
// Copyright (c) 2023 Alibaba Cloud
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use anyhow::Result;
|
||||
use kata_types::mount::StorageDevice;
|
||||
use protocols::agent::Storage;
|
||||
use std::iter;
|
||||
use std::sync::Arc;
|
||||
use tracing::instrument;
|
||||
|
||||
use crate::storage::{new_device, StorageContext, StorageHandler};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct BindWatcherHandler {}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl StorageHandler for BindWatcherHandler {
|
||||
#[instrument]
|
||||
async fn create_device(
|
||||
&self,
|
||||
storage: Storage,
|
||||
ctx: &mut StorageContext,
|
||||
) -> Result<Arc<dyn StorageDevice>> {
|
||||
if let Some(cid) = ctx.cid {
|
||||
ctx.sandbox
|
||||
.lock()
|
||||
.await
|
||||
.bind_watcher
|
||||
.add_container(cid.to_string(), iter::once(storage.clone()), ctx.logger)
|
||||
.await?;
|
||||
}
|
||||
new_device("".to_string())
|
||||
}
|
||||
}
|
||||
197
src/agent/src/storage/block_handler.rs
Normal file
197
src/agent/src/storage/block_handler.rs
Normal file
@@ -0,0 +1,197 @@
|
||||
// Copyright (c) 2019 Ant Financial
|
||||
// Copyright (c) 2023 Alibaba Cloud
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use std::fs;
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
use std::path::Path;
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use kata_types::mount::StorageDevice;
|
||||
use protocols::agent::Storage;
|
||||
use tracing::instrument;
|
||||
|
||||
use crate::device::{
|
||||
get_scsi_device_name, get_virtio_blk_pci_device_name, get_virtio_mmio_device_name,
|
||||
wait_for_pmem_device,
|
||||
};
|
||||
use crate::pci;
|
||||
use crate::storage::{common_storage_handler, new_device, StorageContext, StorageHandler};
|
||||
#[cfg(target_arch = "s390x")]
|
||||
use crate::{ccw, device::get_virtio_blk_ccw_device_name};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct VirtioBlkMmioHandler {}
|
||||
|
||||
impl VirtioBlkMmioHandler {
|
||||
pub async fn update_device_path(
|
||||
storage: &mut Storage,
|
||||
ctx: &mut StorageContext<'_>,
|
||||
) -> Result<()> {
|
||||
if !Path::new(&storage.source).exists() {
|
||||
get_virtio_mmio_device_name(ctx.sandbox, &storage.source)
|
||||
.await
|
||||
.context("failed to get mmio device name")?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl StorageHandler for VirtioBlkMmioHandler {
|
||||
#[instrument]
|
||||
async fn create_device(
|
||||
&self,
|
||||
mut storage: Storage,
|
||||
ctx: &mut StorageContext,
|
||||
) -> Result<Arc<dyn StorageDevice>> {
|
||||
Self::update_device_path(&mut storage, ctx).await?;
|
||||
let path = common_storage_handler(ctx.logger, &storage)?;
|
||||
new_device(path)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct VirtioBlkPciHandler {}
|
||||
|
||||
impl VirtioBlkPciHandler {
|
||||
pub async fn update_device_path(
|
||||
storage: &mut Storage,
|
||||
ctx: &mut StorageContext<'_>,
|
||||
) -> Result<()> {
|
||||
// If hot-plugged, get the device node path based on the PCI path
|
||||
// otherwise use the virt path provided in Storage Source
|
||||
if storage.source.starts_with("/dev") {
|
||||
let metadata = fs::metadata(&storage.source)
|
||||
.context(format!("get metadata on file {:?}", &storage.source))?;
|
||||
let mode = metadata.permissions().mode();
|
||||
if mode & libc::S_IFBLK == 0 {
|
||||
return Err(anyhow!("Invalid device {}", &storage.source));
|
||||
}
|
||||
} else {
|
||||
let pcipath = pci::Path::from_str(&storage.source)?;
|
||||
let dev_path = get_virtio_blk_pci_device_name(ctx.sandbox, &pcipath).await?;
|
||||
storage.source = dev_path;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl StorageHandler for VirtioBlkPciHandler {
|
||||
#[instrument]
|
||||
async fn create_device(
|
||||
&self,
|
||||
mut storage: Storage,
|
||||
ctx: &mut StorageContext,
|
||||
) -> Result<Arc<dyn StorageDevice>> {
|
||||
Self::update_device_path(&mut storage, ctx).await?;
|
||||
let path = common_storage_handler(ctx.logger, &storage)?;
|
||||
new_device(path)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct VirtioBlkCcwHandler {}
|
||||
|
||||
impl VirtioBlkCcwHandler {
|
||||
pub async fn update_device_path(
|
||||
_storage: &mut Storage,
|
||||
_ctx: &mut StorageContext<'_>,
|
||||
) -> Result<()> {
|
||||
#[cfg(target_arch = "s390x")]
|
||||
{
|
||||
let ccw_device = ccw::Device::from_str(&_storage.source)?;
|
||||
let dev_path = get_virtio_blk_ccw_device_name(_ctx.sandbox, &ccw_device).await?;
|
||||
_storage.source = dev_path;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl StorageHandler for VirtioBlkCcwHandler {
|
||||
#[cfg(target_arch = "s390x")]
|
||||
#[instrument]
|
||||
async fn create_device(
|
||||
&self,
|
||||
mut storage: Storage,
|
||||
ctx: &mut StorageContext,
|
||||
) -> Result<Arc<dyn StorageDevice>> {
|
||||
Self::update_device_path(&mut storage, ctx).await?;
|
||||
let path = common_storage_handler(ctx.logger, &storage)?;
|
||||
new_device(path)
|
||||
}
|
||||
|
||||
#[cfg(not(target_arch = "s390x"))]
|
||||
#[instrument]
|
||||
async fn create_device(
|
||||
&self,
|
||||
_storage: Storage,
|
||||
_ctx: &mut StorageContext,
|
||||
) -> Result<Arc<dyn StorageDevice>> {
|
||||
Err(anyhow!("CCW is only supported on s390x"))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct ScsiHandler {}
|
||||
|
||||
impl ScsiHandler {
|
||||
pub async fn update_device_path(
|
||||
storage: &mut Storage,
|
||||
ctx: &mut StorageContext<'_>,
|
||||
) -> Result<()> {
|
||||
// Retrieve the device path from SCSI address.
|
||||
let dev_path = get_scsi_device_name(ctx.sandbox, &storage.source).await?;
|
||||
storage.source = dev_path;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl StorageHandler for ScsiHandler {
|
||||
#[instrument]
|
||||
async fn create_device(
|
||||
&self,
|
||||
mut storage: Storage,
|
||||
ctx: &mut StorageContext,
|
||||
) -> Result<Arc<dyn StorageDevice>> {
|
||||
Self::update_device_path(&mut storage, ctx).await?;
|
||||
let path = common_storage_handler(ctx.logger, &storage)?;
|
||||
new_device(path)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct PmemHandler {}
|
||||
|
||||
impl PmemHandler {
|
||||
pub async fn update_device_path(
|
||||
storage: &mut Storage,
|
||||
ctx: &mut StorageContext<'_>,
|
||||
) -> Result<()> {
|
||||
// Retrieve the device for pmem storage
|
||||
wait_for_pmem_device(ctx.sandbox, &storage.source).await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl StorageHandler for PmemHandler {
|
||||
#[instrument]
|
||||
async fn create_device(
|
||||
&self,
|
||||
mut storage: Storage,
|
||||
ctx: &mut StorageContext,
|
||||
) -> Result<Arc<dyn StorageDevice>> {
|
||||
Self::update_device_path(&mut storage, ctx).await?;
|
||||
let path = common_storage_handler(ctx.logger, &storage)?;
|
||||
new_device(path)
|
||||
}
|
||||
}
|
||||
165
src/agent/src/storage/dm_verity.rs
Normal file
165
src/agent/src/storage/dm_verity.rs
Normal file
@@ -0,0 +1,165 @@
|
||||
// Copyright (c) 2023 Alibaba Cloud
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use image_rs::verity::{create_dmverity_device, destroy_dmverity_device};
|
||||
use kata_sys_util::mount::create_mount_destination;
|
||||
use kata_types::mount::{DmVerityInfo, StorageDevice};
|
||||
use kata_types::volume::{
|
||||
KATA_VOLUME_DMVERITY_OPTION_SOURCE_TYPE, KATA_VOLUME_DMVERITY_OPTION_VERITY_INFO,
|
||||
KATA_VOLUME_DMVERITY_SOURCE_TYPE_PMEM, KATA_VOLUME_DMVERITY_SOURCE_TYPE_SCSI,
|
||||
KATA_VOLUME_DMVERITY_SOURCE_TYPE_VIRTIO_CCW, KATA_VOLUME_DMVERITY_SOURCE_TYPE_VIRTIO_MMIO,
|
||||
KATA_VOLUME_DMVERITY_SOURCE_TYPE_VIRTIO_PCI,
|
||||
};
|
||||
use protocols::agent::Storage;
|
||||
use slog::Logger;
|
||||
use tracing::instrument;
|
||||
|
||||
use crate::storage::block_handler::{
|
||||
PmemHandler, ScsiHandler, VirtioBlkCcwHandler, VirtioBlkMmioHandler, VirtioBlkPciHandler,
|
||||
};
|
||||
use crate::storage::{common_storage_handler, StorageContext, StorageHandler};
|
||||
|
||||
use super::StorageDeviceGeneric;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct DmVerityHandler {}
|
||||
|
||||
impl DmVerityHandler {
|
||||
fn get_dm_verity_info(storage: &Storage) -> Result<DmVerityInfo> {
|
||||
for option in storage.driver_options.iter() {
|
||||
if let Some((key, value)) = option.split_once('=') {
|
||||
if key == KATA_VOLUME_DMVERITY_OPTION_VERITY_INFO {
|
||||
let verity_info: DmVerityInfo = serde_json::from_str(value)?;
|
||||
return Ok(verity_info);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Err(anyhow!("missing DmVerity information for DmVerity volume"))
|
||||
}
|
||||
|
||||
async fn update_source_device(
|
||||
storage: &mut Storage,
|
||||
ctx: &mut StorageContext<'_>,
|
||||
) -> Result<()> {
|
||||
for option in storage.driver_options.clone() {
|
||||
if let Some((key, value)) = option.split_once('=') {
|
||||
if key == KATA_VOLUME_DMVERITY_OPTION_SOURCE_TYPE {
|
||||
match value {
|
||||
KATA_VOLUME_DMVERITY_SOURCE_TYPE_VIRTIO_PCI => {
|
||||
VirtioBlkPciHandler::update_device_path(storage, ctx).await?;
|
||||
}
|
||||
KATA_VOLUME_DMVERITY_SOURCE_TYPE_VIRTIO_MMIO => {
|
||||
VirtioBlkMmioHandler::update_device_path(storage, ctx).await?;
|
||||
}
|
||||
KATA_VOLUME_DMVERITY_SOURCE_TYPE_VIRTIO_CCW => {
|
||||
VirtioBlkCcwHandler::update_device_path(storage, ctx).await?;
|
||||
}
|
||||
KATA_VOLUME_DMVERITY_SOURCE_TYPE_SCSI => {
|
||||
ScsiHandler::update_device_path(storage, ctx).await?;
|
||||
}
|
||||
KATA_VOLUME_DMVERITY_SOURCE_TYPE_PMEM => {
|
||||
PmemHandler::update_device_path(storage, ctx).await?;
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl StorageHandler for DmVerityHandler {
|
||||
#[instrument]
|
||||
async fn create_device(
|
||||
&self,
|
||||
mut storage: Storage,
|
||||
ctx: &mut StorageContext,
|
||||
) -> Result<Arc<dyn StorageDevice>> {
|
||||
Self::update_source_device(&mut storage, ctx).await?;
|
||||
create_mount_destination(&storage.source, &storage.mount_point, "", &storage.fstype)
|
||||
.context("Could not create mountpoint")?;
|
||||
|
||||
let verity_info = Self::get_dm_verity_info(&storage)?;
|
||||
let verity_info = serde_json::to_string(&verity_info)
|
||||
.map_err(|e| anyhow!("failed to serialize dm_verity info, {}", e))?;
|
||||
let verity_device_path = create_dmverity_device(&verity_info, Path::new(storage.source()))
|
||||
.context("create device with dm-verity enabled")?;
|
||||
storage.source = verity_device_path;
|
||||
common_storage_handler(ctx.logger, &storage)?;
|
||||
|
||||
Ok(Arc::new(DmVerityDevice {
|
||||
common: StorageDeviceGeneric::new(storage.mount_point),
|
||||
verity_device_path: storage.source,
|
||||
logger: ctx.logger.clone(),
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
struct DmVerityDevice {
|
||||
common: StorageDeviceGeneric,
|
||||
verity_device_path: String,
|
||||
logger: Logger,
|
||||
}
|
||||
|
||||
impl StorageDevice for DmVerityDevice {
|
||||
fn path(&self) -> Option<&str> {
|
||||
self.common.path()
|
||||
}
|
||||
|
||||
fn cleanup(&self) -> Result<()> {
|
||||
self.common.cleanup().context("clean up dm-verity volume")?;
|
||||
let device_path = &self.verity_device_path;
|
||||
debug!(
|
||||
self.logger,
|
||||
"destroy verity device path = {:?}", device_path
|
||||
);
|
||||
destroy_dmverity_device(device_path)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use kata_types::{mount::DmVerityInfo, volume::KATA_VOLUME_DMVERITY_OPTION_VERITY_INFO};
|
||||
use protocols::agent::Storage;
|
||||
|
||||
use crate::storage::dm_verity::DmVerityHandler;
|
||||
|
||||
#[test]
|
||||
fn test_get_dm_verity_info() {
|
||||
let verity_info = DmVerityInfo {
|
||||
hashtype: "sha256".to_string(),
|
||||
hash: "d86104eee715a1b59b62148641f4ca73edf1be3006c4d481f03f55ac05640570".to_string(),
|
||||
blocknum: 2361,
|
||||
blocksize: 512,
|
||||
hashsize: 4096,
|
||||
offset: 1212416,
|
||||
};
|
||||
|
||||
let verity_info_str = serde_json::to_string(&verity_info);
|
||||
assert!(verity_info_str.is_ok());
|
||||
|
||||
let storage = Storage {
|
||||
driver: KATA_VOLUME_DMVERITY_OPTION_VERITY_INFO.to_string(),
|
||||
driver_options: vec![format!("verity_info={}", verity_info_str.ok().unwrap())],
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
match DmVerityHandler::get_dm_verity_info(&storage) {
|
||||
Ok(result) => {
|
||||
assert_eq!(verity_info, result);
|
||||
}
|
||||
Err(e) => panic!("err = {}", e),
|
||||
}
|
||||
}
|
||||
}
|
||||
293
src/agent/src/storage/ephemeral_handler.rs
Normal file
293
src/agent/src/storage/ephemeral_handler.rs
Normal file
@@ -0,0 +1,293 @@
|
||||
// Copyright (c) 2019 Ant Financial
|
||||
// Copyright (c) 2023 Alibaba Cloud
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use std::fs;
|
||||
use std::fs::OpenOptions;
|
||||
use std::io::Write;
|
||||
use std::os::unix::fs::{MetadataExt, PermissionsExt};
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use kata_sys_util::mount::parse_mount_options;
|
||||
use kata_types::mount::{StorageDevice, KATA_MOUNT_OPTION_FS_GID};
|
||||
use nix::unistd::Gid;
|
||||
use protocols::agent::Storage;
|
||||
use slog::Logger;
|
||||
use tokio::sync::Mutex;
|
||||
use tracing::instrument;
|
||||
|
||||
use crate::device::{DRIVER_EPHEMERAL_TYPE, FS_TYPE_HUGETLB};
|
||||
use crate::mount::baremount;
|
||||
use crate::sandbox::Sandbox;
|
||||
use crate::storage::{
|
||||
common_storage_handler, new_device, parse_options, StorageContext, StorageHandler, MODE_SETGID,
|
||||
};
|
||||
|
||||
const FS_GID_EQ: &str = "fsgid=";
|
||||
const SYS_FS_HUGEPAGES_PREFIX: &str = "/sys/kernel/mm/hugepages";
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct EphemeralHandler {}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl StorageHandler for EphemeralHandler {
|
||||
#[instrument]
|
||||
async fn create_device(
|
||||
&self,
|
||||
mut storage: Storage,
|
||||
ctx: &mut StorageContext,
|
||||
) -> Result<Arc<dyn StorageDevice>> {
|
||||
// hugetlbfs
|
||||
if storage.fstype == FS_TYPE_HUGETLB {
|
||||
info!(ctx.logger, "handle hugetlbfs storage");
|
||||
// Allocate hugepages before mount
|
||||
// /sys/kernel/mm/hugepages/hugepages-1048576kB/nr_hugepages
|
||||
// /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages
|
||||
// options eg "pagesize=2097152,size=524288000"(2M, 500M)
|
||||
Self::allocate_hugepages(ctx.logger, &storage.options.to_vec())
|
||||
.context("allocate hugepages")?;
|
||||
common_storage_handler(ctx.logger, &storage)?;
|
||||
} else if !storage.options.is_empty() {
|
||||
// By now we only support one option field: "fsGroup" which
|
||||
// isn't an valid mount option, thus we should remove it when
|
||||
// do mount.
|
||||
let opts = parse_options(&storage.options);
|
||||
storage.options = Default::default();
|
||||
common_storage_handler(ctx.logger, &storage)?;
|
||||
|
||||
// ephemeral_storage didn't support mount options except fsGroup.
|
||||
if let Some(fsgid) = opts.get(KATA_MOUNT_OPTION_FS_GID) {
|
||||
let gid = fsgid.parse::<u32>()?;
|
||||
|
||||
nix::unistd::chown(storage.mount_point.as_str(), None, Some(Gid::from_raw(gid)))?;
|
||||
|
||||
let meta = fs::metadata(&storage.mount_point)?;
|
||||
let mut permission = meta.permissions();
|
||||
|
||||
let o_mode = meta.mode() | MODE_SETGID;
|
||||
permission.set_mode(o_mode);
|
||||
fs::set_permissions(&storage.mount_point, permission)?;
|
||||
}
|
||||
} else {
|
||||
common_storage_handler(ctx.logger, &storage)?;
|
||||
}
|
||||
|
||||
new_device("".to_string())
|
||||
}
|
||||
}
|
||||
|
||||
impl EphemeralHandler {
|
||||
// Allocate hugepages by writing to sysfs
|
||||
fn allocate_hugepages(logger: &Logger, options: &[String]) -> Result<()> {
|
||||
info!(logger, "mounting hugePages storage options: {:?}", options);
|
||||
|
||||
let (pagesize, size) = Self::get_pagesize_and_size_from_option(options)
|
||||
.context(format!("parse mount options: {:?}", &options))?;
|
||||
|
||||
info!(
|
||||
logger,
|
||||
"allocate hugepages. pageSize: {}, size: {}", pagesize, size
|
||||
);
|
||||
|
||||
// sysfs entry is always of the form hugepages-${pagesize}kB
|
||||
// Ref: https://www.kernel.org/doc/Documentation/vm/hugetlbpage.txt
|
||||
let path = Path::new(SYS_FS_HUGEPAGES_PREFIX)
|
||||
.join(format!("hugepages-{}kB", pagesize / 1024))
|
||||
.join("nr_hugepages");
|
||||
|
||||
// write numpages to nr_hugepages file.
|
||||
let numpages = format!("{}", size / pagesize);
|
||||
info!(logger, "write {} pages to {:?}", &numpages, &path);
|
||||
|
||||
let mut file = OpenOptions::new()
|
||||
.write(true)
|
||||
.open(&path)
|
||||
.context(format!("open nr_hugepages directory {:?}", &path))?;
|
||||
|
||||
file.write_all(numpages.as_bytes())
|
||||
.context(format!("write nr_hugepages failed: {:?}", &path))?;
|
||||
|
||||
// Even if the write succeeds, the kernel isn't guaranteed to be
|
||||
// able to allocate all the pages we requested. Verify that it
|
||||
// did.
|
||||
let verify = fs::read_to_string(&path).context(format!("reading {:?}", &path))?;
|
||||
let allocated = verify
|
||||
.trim_end()
|
||||
.parse::<u64>()
|
||||
.map_err(|_| anyhow!("Unexpected text {:?} in {:?}", &verify, &path))?;
|
||||
if allocated != size / pagesize {
|
||||
return Err(anyhow!(
|
||||
"Only allocated {} of {} hugepages of size {}",
|
||||
allocated,
|
||||
numpages,
|
||||
pagesize
|
||||
));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Parse filesystem options string to retrieve hugepage details
|
||||
// options eg "pagesize=2048,size=107374182"
|
||||
fn get_pagesize_and_size_from_option(options: &[String]) -> Result<(u64, u64)> {
|
||||
let mut pagesize_str: Option<&str> = None;
|
||||
let mut size_str: Option<&str> = None;
|
||||
|
||||
for option in options {
|
||||
let vars: Vec<&str> = option.trim().split(',').collect();
|
||||
|
||||
for var in vars {
|
||||
if let Some(stripped) = var.strip_prefix("pagesize=") {
|
||||
pagesize_str = Some(stripped);
|
||||
} else if let Some(stripped) = var.strip_prefix("size=") {
|
||||
size_str = Some(stripped);
|
||||
}
|
||||
|
||||
if pagesize_str.is_some() && size_str.is_some() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if pagesize_str.is_none() || size_str.is_none() {
|
||||
return Err(anyhow!("no pagesize/size options found"));
|
||||
}
|
||||
|
||||
let pagesize = pagesize_str
|
||||
.unwrap()
|
||||
.parse::<u64>()
|
||||
.context(format!("parse pagesize: {:?}", &pagesize_str))?;
|
||||
let size = size_str
|
||||
.unwrap()
|
||||
.parse::<u64>()
|
||||
.context(format!("parse size: {:?}", &pagesize_str))?;
|
||||
|
||||
Ok((pagesize, size))
|
||||
}
|
||||
}
|
||||
|
||||
// update_ephemeral_mounts takes a list of ephemeral mounts and remounts them
|
||||
// with mount options passed by the caller
|
||||
#[instrument]
|
||||
pub async fn update_ephemeral_mounts(
|
||||
logger: Logger,
|
||||
storages: &[Storage],
|
||||
_sandbox: &Arc<Mutex<Sandbox>>,
|
||||
) -> Result<()> {
|
||||
for storage in storages {
|
||||
let handler_name = &storage.driver;
|
||||
let logger = logger.new(o!(
|
||||
"msg" => "updating tmpfs storage",
|
||||
"subsystem" => "storage",
|
||||
"storage-type" => handler_name.to_owned()));
|
||||
|
||||
match handler_name.as_str() {
|
||||
DRIVER_EPHEMERAL_TYPE => {
|
||||
fs::create_dir_all(&storage.mount_point)?;
|
||||
|
||||
if storage.options.is_empty() {
|
||||
continue;
|
||||
} else {
|
||||
// assume that fsGid has already been set
|
||||
let mount_path = Path::new(&storage.mount_point);
|
||||
let src_path = Path::new(&storage.source);
|
||||
let opts: Vec<&String> = storage
|
||||
.options
|
||||
.iter()
|
||||
.filter(|&opt| !opt.starts_with(FS_GID_EQ))
|
||||
.collect();
|
||||
let (flags, options) = parse_mount_options(&opts)?;
|
||||
|
||||
info!(logger, "mounting storage";
|
||||
"mount-source" => src_path.display(),
|
||||
"mount-destination" => mount_path.display(),
|
||||
"mount-fstype" => storage.fstype.as_str(),
|
||||
"mount-options" => options.as_str(),
|
||||
);
|
||||
|
||||
baremount(
|
||||
src_path,
|
||||
mount_path,
|
||||
storage.fstype.as_str(),
|
||||
flags,
|
||||
options.as_str(),
|
||||
&logger,
|
||||
)?;
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
return Err(anyhow!(
|
||||
"Unsupported storage type for syncing mounts {}. Only ephemeral storage update is supported",
|
||||
storage.driver
|
||||
));
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_get_pagesize_and_size_from_option() {
|
||||
let expected_pagesize = 2048;
|
||||
let expected_size = 107374182;
|
||||
let expected = (expected_pagesize, expected_size);
|
||||
|
||||
let data = vec![
|
||||
// (input, expected, is_ok)
|
||||
("size-1=107374182,pagesize-1=2048", expected, false),
|
||||
("size-1=107374182,pagesize=2048", expected, false),
|
||||
("size=107374182,pagesize-1=2048", expected, false),
|
||||
("size=107374182,pagesize=abc", expected, false),
|
||||
("size=abc,pagesize=2048", expected, false),
|
||||
("size=,pagesize=2048", expected, false),
|
||||
("size=107374182,pagesize=", expected, false),
|
||||
("size=107374182,pagesize=2048", expected, true),
|
||||
("pagesize=2048,size=107374182", expected, true),
|
||||
("foo=bar,pagesize=2048,size=107374182", expected, true),
|
||||
(
|
||||
"foo=bar,pagesize=2048,foo1=bar1,size=107374182",
|
||||
expected,
|
||||
true,
|
||||
),
|
||||
(
|
||||
"pagesize=2048,foo1=bar1,foo=bar,size=107374182",
|
||||
expected,
|
||||
true,
|
||||
),
|
||||
(
|
||||
"foo=bar,pagesize=2048,foo1=bar1,size=107374182,foo2=bar2",
|
||||
expected,
|
||||
true,
|
||||
),
|
||||
(
|
||||
"foo=bar,size=107374182,foo1=bar1,pagesize=2048",
|
||||
expected,
|
||||
true,
|
||||
),
|
||||
];
|
||||
|
||||
for case in data {
|
||||
let input = case.0;
|
||||
let r = EphemeralHandler::get_pagesize_and_size_from_option(&[input.to_string()]);
|
||||
|
||||
let is_ok = case.2;
|
||||
if is_ok {
|
||||
let expected = case.1;
|
||||
let (pagesize, size) = r.unwrap();
|
||||
assert_eq!(expected.0, pagesize);
|
||||
assert_eq!(expected.1, size);
|
||||
} else {
|
||||
assert!(r.is_err());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
99
src/agent/src/storage/fs_handler.rs
Normal file
99
src/agent/src/storage/fs_handler.rs
Normal file
@@ -0,0 +1,99 @@
|
||||
// Copyright (c) 2019 Ant Financial
|
||||
// Copyright (c) 2023 Alibaba Cloud
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use std::fs;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use kata_types::mount::StorageDevice;
|
||||
use kata_types::volume::KATA_VOLUME_OVERLAYFS_CREATE_DIR;
|
||||
use protocols::agent::Storage;
|
||||
use tracing::instrument;
|
||||
|
||||
use crate::storage::{common_storage_handler, new_device, StorageContext, StorageHandler};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct OverlayfsHandler {}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl StorageHandler for OverlayfsHandler {
|
||||
#[instrument]
|
||||
async fn create_device(
|
||||
&self,
|
||||
mut storage: Storage,
|
||||
ctx: &mut StorageContext,
|
||||
) -> Result<Arc<dyn StorageDevice>> {
|
||||
if storage
|
||||
.options
|
||||
.iter()
|
||||
.any(|e| e == "io.katacontainers.fs-opt.overlay-rw")
|
||||
{
|
||||
let cid = ctx
|
||||
.cid
|
||||
.clone()
|
||||
.ok_or_else(|| anyhow!("No container id in rw overlay"))?;
|
||||
let cpath = Path::new(crate::rpc::CONTAINER_BASE).join(cid);
|
||||
let work = cpath.join("work");
|
||||
let upper = cpath.join("upper");
|
||||
|
||||
fs::create_dir_all(&work).context("Creating overlay work directory")?;
|
||||
fs::create_dir_all(&upper).context("Creating overlay upper directory")?;
|
||||
|
||||
storage.fstype = "overlay".into();
|
||||
storage
|
||||
.options
|
||||
.push(format!("upperdir={}", upper.to_string_lossy()));
|
||||
storage
|
||||
.options
|
||||
.push(format!("workdir={}", work.to_string_lossy()));
|
||||
}
|
||||
let overlay_create_dir_prefix = &(KATA_VOLUME_OVERLAYFS_CREATE_DIR.to_string() + "=");
|
||||
for driver_option in &storage.driver_options {
|
||||
if let Some(dir) = driver_option
|
||||
.as_str()
|
||||
.strip_prefix(overlay_create_dir_prefix)
|
||||
{
|
||||
fs::create_dir_all(dir).context("Failed to create directory")?;
|
||||
}
|
||||
}
|
||||
|
||||
let path = common_storage_handler(ctx.logger, &storage)?;
|
||||
new_device(path)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Virtio9pHandler {}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl StorageHandler for Virtio9pHandler {
|
||||
#[instrument]
|
||||
async fn create_device(
|
||||
&self,
|
||||
storage: Storage,
|
||||
ctx: &mut StorageContext,
|
||||
) -> Result<Arc<dyn StorageDevice>> {
|
||||
let path = common_storage_handler(ctx.logger, &storage)?;
|
||||
new_device(path)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct VirtioFsHandler {}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl StorageHandler for VirtioFsHandler {
|
||||
#[instrument]
|
||||
async fn create_device(
|
||||
&self,
|
||||
storage: Storage,
|
||||
ctx: &mut StorageContext,
|
||||
) -> Result<Arc<dyn StorageDevice>> {
|
||||
let path = common_storage_handler(ctx.logger, &storage)?;
|
||||
new_device(path)
|
||||
}
|
||||
}
|
||||
102
src/agent/src/storage/image_pull_handler.rs
Normal file
102
src/agent/src/storage/image_pull_handler.rs
Normal file
@@ -0,0 +1,102 @@
|
||||
// Copyright (c) 2023 Intel Corporation
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use anyhow::{anyhow, Result};
|
||||
use kata_types::mount::KATA_VIRTUAL_VOLUME_IMAGE_GUEST_PULL;
|
||||
use kata_types::mount::{ImagePullVolume, StorageDevice};
|
||||
use protocols::agent::Storage;
|
||||
use std::sync::Arc;
|
||||
use tracing::instrument;
|
||||
|
||||
use crate::image_rpc;
|
||||
use crate::storage::{StorageContext, StorageHandler};
|
||||
|
||||
use super::{common_storage_handler, new_device};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct ImagePullHandler {}
|
||||
|
||||
impl ImagePullHandler {
|
||||
fn get_image_info(storage: &Storage) -> Result<ImagePullVolume> {
|
||||
for option in storage.driver_options.iter() {
|
||||
if let Some((key, value)) = option.split_once('=') {
|
||||
if key == KATA_VIRTUAL_VOLUME_IMAGE_GUEST_PULL {
|
||||
let imagepull_volume: ImagePullVolume = serde_json::from_str(value)?;
|
||||
return Ok(imagepull_volume);
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(anyhow!("missing Image information for ImagePull volume"))
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl StorageHandler for ImagePullHandler {
|
||||
#[instrument]
|
||||
async fn create_device(
|
||||
&self,
|
||||
mut storage: Storage,
|
||||
ctx: &mut StorageContext,
|
||||
) -> Result<Arc<dyn StorageDevice>> {
|
||||
//Currently the image metadata is not used to pulling image in the guest.
|
||||
let image_pull_volume = Self::get_image_info(&storage)?;
|
||||
debug!(ctx.logger, "image_pull_volume = {:?}", image_pull_volume);
|
||||
let image_name = storage.source();
|
||||
debug!(ctx.logger, "image_name = {:?}", image_name);
|
||||
|
||||
let cid = ctx
|
||||
.cid
|
||||
.clone()
|
||||
.ok_or_else(|| anyhow!("failed to get container id"))?;
|
||||
let image_service = image_rpc::ImageService::singleton().await?;
|
||||
let bundle_path = image_service
|
||||
.pull_image_for_container(image_name, &cid, &image_pull_volume.metadata)
|
||||
.await?;
|
||||
|
||||
storage.source = bundle_path;
|
||||
storage.options = vec!["bind".to_string(), "ro".to_string()];
|
||||
|
||||
common_storage_handler(ctx.logger, &storage)?;
|
||||
|
||||
new_device(storage.mount_point)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::collections::HashMap;
|
||||
|
||||
use kata_types::mount::{ImagePullVolume, KATA_VIRTUAL_VOLUME_IMAGE_GUEST_PULL};
|
||||
use protocols::agent::Storage;
|
||||
|
||||
use crate::storage::image_pull_handler::ImagePullHandler;
|
||||
|
||||
#[test]
|
||||
fn test_get_image_info() {
|
||||
let mut res = HashMap::new();
|
||||
res.insert("key1".to_string(), "value1".to_string());
|
||||
res.insert("key2".to_string(), "value2".to_string());
|
||||
|
||||
let image_pull = ImagePullVolume {
|
||||
metadata: res.clone(),
|
||||
};
|
||||
|
||||
let image_pull_str = serde_json::to_string(&image_pull);
|
||||
assert!(image_pull_str.is_ok());
|
||||
|
||||
let storage = Storage {
|
||||
driver: KATA_VIRTUAL_VOLUME_IMAGE_GUEST_PULL.to_string(),
|
||||
driver_options: vec![format!("image_guest_pull={}", image_pull_str.ok().unwrap())],
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
match ImagePullHandler::get_image_info(&storage) {
|
||||
Ok(image_info) => {
|
||||
assert_eq!(image_info.metadata, res);
|
||||
}
|
||||
Err(e) => panic!("err = {}", e),
|
||||
}
|
||||
}
|
||||
}
|
||||
61
src/agent/src/storage/local_handler.rs
Normal file
61
src/agent/src/storage/local_handler.rs
Normal file
@@ -0,0 +1,61 @@
|
||||
// Copyright (c) 2019 Ant Financial
|
||||
// Copyright (c) 2023 Alibaba Cloud
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use std::fs;
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::{Context, Result};
|
||||
use kata_types::mount::{StorageDevice, KATA_MOUNT_OPTION_FS_GID};
|
||||
use nix::unistd::Gid;
|
||||
use protocols::agent::Storage;
|
||||
use tracing::instrument;
|
||||
|
||||
use crate::storage::{new_device, parse_options, StorageContext, StorageHandler, MODE_SETGID};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct LocalHandler {}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl StorageHandler for LocalHandler {
|
||||
#[instrument]
|
||||
async fn create_device(
|
||||
&self,
|
||||
storage: Storage,
|
||||
_ctx: &mut StorageContext,
|
||||
) -> Result<Arc<dyn StorageDevice>> {
|
||||
fs::create_dir_all(&storage.mount_point).context(format!(
|
||||
"failed to create dir all {:?}",
|
||||
&storage.mount_point
|
||||
))?;
|
||||
|
||||
let opts = parse_options(&storage.options);
|
||||
|
||||
let mut need_set_fsgid = false;
|
||||
if let Some(fsgid) = opts.get(KATA_MOUNT_OPTION_FS_GID) {
|
||||
let gid = fsgid.parse::<u32>()?;
|
||||
|
||||
nix::unistd::chown(storage.mount_point.as_str(), None, Some(Gid::from_raw(gid)))?;
|
||||
need_set_fsgid = true;
|
||||
}
|
||||
|
||||
if let Some(mode) = opts.get("mode") {
|
||||
let mut permission = fs::metadata(&storage.mount_point)?.permissions();
|
||||
|
||||
let mut o_mode = u32::from_str_radix(mode, 8)?;
|
||||
|
||||
if need_set_fsgid {
|
||||
// set SetGid mode mask.
|
||||
o_mode |= MODE_SETGID;
|
||||
}
|
||||
permission.set_mode(o_mode);
|
||||
|
||||
fs::set_permissions(&storage.mount_point, permission)?;
|
||||
}
|
||||
|
||||
new_device("".to_string())
|
||||
}
|
||||
}
|
||||
800
src/agent/src/storage/mod.rs
Normal file
800
src/agent/src/storage/mod.rs
Normal file
@@ -0,0 +1,800 @@
|
||||
// Copyright (c) 2019 Ant Financial
|
||||
// Copyright (c) 2023 Alibaba Cloud
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::fs;
|
||||
use std::os::unix::fs::{MetadataExt, PermissionsExt};
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use kata_sys_util::mount::{create_mount_destination, parse_mount_options};
|
||||
use kata_types::mount::{
|
||||
StorageDevice, StorageHandlerManager, KATA_SHAREDFS_GUEST_PREMOUNT_TAG,
|
||||
KATA_VIRTUAL_VOLUME_IMAGE_GUEST_PULL,
|
||||
};
|
||||
use kata_types::volume::KATA_VOLUME_TYPE_DMVERITY;
|
||||
use nix::unistd::{Gid, Uid};
|
||||
use protocols::agent::Storage;
|
||||
use protocols::types::FSGroupChangePolicy;
|
||||
use slog::Logger;
|
||||
use tokio::sync::Mutex;
|
||||
use tracing::instrument;
|
||||
|
||||
use self::bind_watcher_handler::BindWatcherHandler;
|
||||
use self::block_handler::{PmemHandler, ScsiHandler, VirtioBlkMmioHandler, VirtioBlkPciHandler};
|
||||
use self::dm_verity::DmVerityHandler;
|
||||
use self::ephemeral_handler::EphemeralHandler;
|
||||
use self::fs_handler::{OverlayfsHandler, Virtio9pHandler, VirtioFsHandler};
|
||||
use self::image_pull_handler::ImagePullHandler;
|
||||
use self::local_handler::LocalHandler;
|
||||
|
||||
use crate::device::{
|
||||
DRIVER_9P_TYPE, DRIVER_BLK_MMIO_TYPE, DRIVER_BLK_PCI_TYPE, DRIVER_EPHEMERAL_TYPE,
|
||||
DRIVER_LOCAL_TYPE, DRIVER_NVDIMM_TYPE, DRIVER_OVERLAYFS_TYPE, DRIVER_SCSI_TYPE,
|
||||
DRIVER_VIRTIOFS_TYPE, DRIVER_WATCHABLE_BIND_TYPE,
|
||||
};
|
||||
use crate::mount::{baremount, is_mounted, remove_mounts};
|
||||
use crate::sandbox::Sandbox;
|
||||
|
||||
pub use self::ephemeral_handler::update_ephemeral_mounts;
|
||||
|
||||
mod bind_watcher_handler;
|
||||
mod block_handler;
|
||||
mod dm_verity;
|
||||
mod ephemeral_handler;
|
||||
mod fs_handler;
|
||||
mod image_pull_handler;
|
||||
mod local_handler;
|
||||
|
||||
const RW_MASK: u32 = 0o660;
|
||||
const RO_MASK: u32 = 0o440;
|
||||
const EXEC_MASK: u32 = 0o110;
|
||||
const MODE_SETGID: u32 = 0o2000;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct StorageContext<'a> {
|
||||
cid: &'a Option<String>,
|
||||
logger: &'a Logger,
|
||||
sandbox: &'a Arc<Mutex<Sandbox>>,
|
||||
}
|
||||
|
||||
/// An implementation of generic storage device.
|
||||
#[derive(Default, Debug)]
|
||||
pub struct StorageDeviceGeneric {
|
||||
path: Option<String>,
|
||||
}
|
||||
|
||||
impl StorageDeviceGeneric {
|
||||
/// Create a new instance of `StorageStateCommon`.
|
||||
pub fn new(path: String) -> Self {
|
||||
StorageDeviceGeneric { path: Some(path) }
|
||||
}
|
||||
}
|
||||
|
||||
impl StorageDevice for StorageDeviceGeneric {
|
||||
fn path(&self) -> Option<&str> {
|
||||
self.path.as_deref()
|
||||
}
|
||||
|
||||
fn cleanup(&self) -> Result<()> {
|
||||
let path = match self.path() {
|
||||
None => return Ok(()),
|
||||
Some(v) => {
|
||||
if v.is_empty() {
|
||||
// TODO: Bind watch, local, ephemeral volume has empty path, which will get leaked.
|
||||
return Ok(());
|
||||
} else {
|
||||
v
|
||||
}
|
||||
}
|
||||
};
|
||||
if !Path::new(path).exists() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if matches!(is_mounted(path), Ok(true)) {
|
||||
let mounts = vec![path.to_string()];
|
||||
remove_mounts(&mounts)?;
|
||||
}
|
||||
if matches!(is_mounted(path), Ok(true)) {
|
||||
return Err(anyhow!("failed to umount mountpoint {}", path));
|
||||
}
|
||||
|
||||
let p = Path::new(path);
|
||||
if p.is_dir() {
|
||||
let is_empty = p.read_dir()?.next().is_none();
|
||||
if !is_empty {
|
||||
return Err(anyhow!("directory is not empty when clean up storage"));
|
||||
}
|
||||
// "remove_dir" will fail if the mount point is backed by a read-only filesystem.
|
||||
// This is the case with the device mapper snapshotter, where we mount the block device
|
||||
// directly at the underlying sandbox path which was provided from the base RO kataShared
|
||||
// path from the host.
|
||||
let _ = fs::remove_dir(p);
|
||||
} else if !p.is_file() {
|
||||
// TODO: should we remove the file for bind mount?
|
||||
return Err(anyhow!(
|
||||
"storage path {} is neither directory nor file",
|
||||
path
|
||||
));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Trait object to handle storage device.
|
||||
#[async_trait::async_trait]
|
||||
pub trait StorageHandler: Send + Sync {
|
||||
/// Create a new storage device.
|
||||
async fn create_device(
|
||||
&self,
|
||||
storage: Storage,
|
||||
ctx: &mut StorageContext,
|
||||
) -> Result<Arc<dyn StorageDevice>>;
|
||||
}
|
||||
|
||||
#[rustfmt::skip]
|
||||
lazy_static! {
|
||||
pub static ref STORAGE_HANDLERS: StorageHandlerManager<Arc<dyn StorageHandler>> = {
|
||||
let mut manager: StorageHandlerManager<Arc<dyn StorageHandler>> = StorageHandlerManager::new();
|
||||
manager.add_handler(DRIVER_9P_TYPE, Arc::new(Virtio9pHandler{})).unwrap();
|
||||
#[cfg(target_arch = "s390x")]
|
||||
manager.add_handler(crate::device::DRIVER_BLK_CCW_TYPE, Arc::new(self::block_handler::VirtioBlkCcwHandler{})).unwrap();
|
||||
manager.add_handler(DRIVER_BLK_MMIO_TYPE, Arc::new(VirtioBlkMmioHandler{})).unwrap();
|
||||
manager.add_handler(DRIVER_BLK_PCI_TYPE, Arc::new(VirtioBlkPciHandler{})).unwrap();
|
||||
manager.add_handler(DRIVER_EPHEMERAL_TYPE, Arc::new(EphemeralHandler{})).unwrap();
|
||||
manager.add_handler(DRIVER_LOCAL_TYPE, Arc::new(LocalHandler{})).unwrap();
|
||||
manager.add_handler(DRIVER_NVDIMM_TYPE, Arc::new(PmemHandler{})).unwrap();
|
||||
manager.add_handler(DRIVER_OVERLAYFS_TYPE, Arc::new(OverlayfsHandler{})).unwrap();
|
||||
manager.add_handler(DRIVER_SCSI_TYPE, Arc::new(ScsiHandler{})).unwrap();
|
||||
manager.add_handler(DRIVER_VIRTIOFS_TYPE, Arc::new(VirtioFsHandler{})).unwrap();
|
||||
manager.add_handler(DRIVER_WATCHABLE_BIND_TYPE, Arc::new(BindWatcherHandler{})).unwrap();
|
||||
manager.add_handler(KATA_VOLUME_TYPE_DMVERITY, Arc::new(DmVerityHandler{})).unwrap();
|
||||
manager.add_handler(KATA_VIRTUAL_VOLUME_IMAGE_GUEST_PULL, Arc::new(ImagePullHandler{})).unwrap();
|
||||
manager
|
||||
};
|
||||
}
|
||||
|
||||
// add_storages takes a list of storages passed by the caller, and perform the
|
||||
// associated operations such as waiting for the device to show up, and mount
|
||||
// it to a specific location, according to the type of handler chosen, and for
|
||||
// each storage.
|
||||
#[instrument]
|
||||
pub async fn add_storages(
|
||||
logger: Logger,
|
||||
storages: Vec<Storage>,
|
||||
sandbox: &Arc<Mutex<Sandbox>>,
|
||||
cid: Option<String>,
|
||||
) -> Result<Vec<String>> {
|
||||
let mut mount_list = Vec::new();
|
||||
|
||||
for storage in storages {
|
||||
let path = storage.mount_point.clone();
|
||||
let state = sandbox.lock().await.add_sandbox_storage(&path).await;
|
||||
if state.ref_count().await > 1 {
|
||||
if let Some(path) = state.path() {
|
||||
if !path.is_empty() {
|
||||
mount_list.push(path.to_string());
|
||||
}
|
||||
}
|
||||
// The device already exists.
|
||||
continue;
|
||||
}
|
||||
|
||||
if let Some(handler) = STORAGE_HANDLERS.handler(&storage.driver) {
|
||||
let logger =
|
||||
logger.new(o!( "subsystem" => "storage", "storage-type" => storage.driver.clone()));
|
||||
let mut ctx = StorageContext {
|
||||
cid: &cid,
|
||||
logger: &logger,
|
||||
sandbox,
|
||||
};
|
||||
|
||||
match handler.create_device(storage, &mut ctx).await {
|
||||
Ok(device) => {
|
||||
match sandbox
|
||||
.lock()
|
||||
.await
|
||||
.update_sandbox_storage(&path, device.clone())
|
||||
{
|
||||
Ok(d) => {
|
||||
if let Some(path) = device.path() {
|
||||
if !path.is_empty() {
|
||||
mount_list.push(path.to_string());
|
||||
}
|
||||
}
|
||||
drop(d);
|
||||
}
|
||||
Err(device) => {
|
||||
error!(logger, "failed to update device for storage");
|
||||
if let Err(e) = sandbox.lock().await.remove_sandbox_storage(&path).await
|
||||
{
|
||||
warn!(logger, "failed to remove dummy sandbox storage {:?}", e);
|
||||
}
|
||||
if let Err(e) = device.cleanup() {
|
||||
error!(
|
||||
logger,
|
||||
"failed to clean state for storage device {}, {}", path, e
|
||||
);
|
||||
}
|
||||
return Err(anyhow!("failed to update device for storage"));
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
error!(logger, "failed to create device for storage, error: {e:?}");
|
||||
if let Err(e) = sandbox.lock().await.remove_sandbox_storage(&path).await {
|
||||
warn!(logger, "failed to remove dummy sandbox storage {e:?}");
|
||||
}
|
||||
return Err(e);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return Err(anyhow!(
|
||||
"Failed to find the storage handler {}",
|
||||
storage.driver
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(mount_list)
|
||||
}
|
||||
|
||||
pub(crate) fn new_device(path: String) -> Result<Arc<dyn StorageDevice>> {
|
||||
let device = StorageDeviceGeneric::new(path);
|
||||
Ok(Arc::new(device))
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
pub(crate) fn common_storage_handler(logger: &Logger, storage: &Storage) -> Result<String> {
|
||||
mount_storage(logger, storage)?;
|
||||
set_ownership(logger, storage)?;
|
||||
Ok(storage.mount_point.clone())
|
||||
}
|
||||
|
||||
// mount_storage performs the mount described by the storage structure.
|
||||
#[instrument]
|
||||
fn mount_storage(logger: &Logger, storage: &Storage) -> Result<()> {
|
||||
let logger = logger.new(o!("subsystem" => "mount"));
|
||||
|
||||
// There's a special mechanism to create mountpoint from a `sharedfs` instance before
|
||||
// starting the kata-agent. Check for such cases.
|
||||
if storage.source == KATA_SHAREDFS_GUEST_PREMOUNT_TAG && is_mounted(&storage.mount_point)? {
|
||||
warn!(
|
||||
logger,
|
||||
"{} already mounted on {}, ignoring...",
|
||||
KATA_SHAREDFS_GUEST_PREMOUNT_TAG,
|
||||
&storage.mount_point
|
||||
);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let (flags, options) = parse_mount_options(&storage.options)?;
|
||||
let mount_path = Path::new(&storage.mount_point);
|
||||
let src_path = Path::new(&storage.source);
|
||||
create_mount_destination(src_path, mount_path, "", &storage.fstype)
|
||||
.context("Could not create mountpoint")?;
|
||||
|
||||
info!(logger, "mounting storage";
|
||||
"mount-source" => src_path.display(),
|
||||
"mount-destination" => mount_path.display(),
|
||||
"mount-fstype" => storage.fstype.as_str(),
|
||||
"mount-options" => options.as_str(),
|
||||
);
|
||||
|
||||
baremount(
|
||||
src_path,
|
||||
mount_path,
|
||||
storage.fstype.as_str(),
|
||||
flags,
|
||||
options.as_str(),
|
||||
&logger,
|
||||
)
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
pub(crate) fn parse_options(option_list: &[String]) -> HashMap<String, String> {
|
||||
let mut options = HashMap::new();
|
||||
for opt in option_list {
|
||||
let fields: Vec<&str> = opt.split('=').collect();
|
||||
if fields.len() == 2 {
|
||||
options.insert(fields[0].to_string(), fields[1].to_string());
|
||||
}
|
||||
}
|
||||
options
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
pub fn set_ownership(logger: &Logger, storage: &Storage) -> Result<()> {
|
||||
let logger = logger.new(o!("subsystem" => "mount", "fn" => "set_ownership"));
|
||||
|
||||
// If fsGroup is not set, skip performing ownership change
|
||||
if storage.fs_group.is_none() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let fs_group = storage.fs_group();
|
||||
let read_only = storage.options.contains(&String::from("ro"));
|
||||
let mount_path = Path::new(&storage.mount_point);
|
||||
let metadata = mount_path.metadata().map_err(|err| {
|
||||
error!(logger, "failed to obtain metadata for mount path";
|
||||
"mount-path" => mount_path.to_str(),
|
||||
"error" => err.to_string(),
|
||||
);
|
||||
err
|
||||
})?;
|
||||
|
||||
if fs_group.group_change_policy == FSGroupChangePolicy::OnRootMismatch.into()
|
||||
&& metadata.gid() == fs_group.group_id
|
||||
{
|
||||
let mut mask = if read_only { RO_MASK } else { RW_MASK };
|
||||
mask |= EXEC_MASK;
|
||||
|
||||
// With fsGroup change policy to OnRootMismatch, if the current
|
||||
// gid of the mount path root directory matches the desired gid
|
||||
// and the current permission of mount path root directory is correct,
|
||||
// then ownership change will be skipped.
|
||||
let current_mode = metadata.permissions().mode();
|
||||
if (mask & current_mode == mask) && (current_mode & MODE_SETGID != 0) {
|
||||
info!(logger, "skipping ownership change for volume";
|
||||
"mount-path" => mount_path.to_str(),
|
||||
"fs-group" => fs_group.group_id.to_string(),
|
||||
);
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
info!(logger, "performing recursive ownership change";
|
||||
"mount-path" => mount_path.to_str(),
|
||||
"fs-group" => fs_group.group_id.to_string(),
|
||||
);
|
||||
recursive_ownership_change(
|
||||
mount_path,
|
||||
None,
|
||||
Some(Gid::from_raw(fs_group.group_id)),
|
||||
read_only,
|
||||
)
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
pub fn recursive_ownership_change(
|
||||
path: &Path,
|
||||
uid: Option<Uid>,
|
||||
gid: Option<Gid>,
|
||||
read_only: bool,
|
||||
) -> Result<()> {
|
||||
let mut mask = if read_only { RO_MASK } else { RW_MASK };
|
||||
if path.is_dir() {
|
||||
for entry in fs::read_dir(path)? {
|
||||
recursive_ownership_change(entry?.path().as_path(), uid, gid, read_only)?;
|
||||
}
|
||||
mask |= EXEC_MASK;
|
||||
mask |= MODE_SETGID;
|
||||
}
|
||||
|
||||
// We do not want to change the permission of the underlying file
|
||||
// using symlink. Hence we skip symlinks from recursive ownership
|
||||
// and permission changes.
|
||||
if path.is_symlink() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
nix::unistd::chown(path, uid, gid)?;
|
||||
|
||||
if gid.is_some() {
|
||||
let metadata = path.metadata()?;
|
||||
let mut permission = metadata.permissions();
|
||||
let target_mode = metadata.mode() | mask;
|
||||
permission.set_mode(target_mode);
|
||||
fs::set_permissions(path, permission)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use anyhow::Error;
|
||||
use nix::mount::MsFlags;
|
||||
use protocols::agent::FSGroup;
|
||||
use std::fs::File;
|
||||
use tempfile::{tempdir, Builder};
|
||||
use test_utils::{
|
||||
skip_if_not_root, skip_loop_by_user, skip_loop_if_not_root, skip_loop_if_root, TestUserType,
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn test_mount_storage() {
|
||||
#[derive(Debug)]
|
||||
struct TestData<'a> {
|
||||
test_user: TestUserType,
|
||||
storage: Storage,
|
||||
error_contains: &'a str,
|
||||
|
||||
make_source_dir: bool,
|
||||
make_mount_dir: bool,
|
||||
deny_mount_permission: bool,
|
||||
}
|
||||
|
||||
impl Default for TestData<'_> {
|
||||
fn default() -> Self {
|
||||
TestData {
|
||||
test_user: TestUserType::Any,
|
||||
storage: Storage {
|
||||
mount_point: "mnt".to_string(),
|
||||
source: "src".to_string(),
|
||||
fstype: "tmpfs".to_string(),
|
||||
..Default::default()
|
||||
},
|
||||
make_source_dir: true,
|
||||
make_mount_dir: false,
|
||||
deny_mount_permission: false,
|
||||
error_contains: "",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let tests = &[
|
||||
TestData {
|
||||
test_user: TestUserType::NonRootOnly,
|
||||
error_contains: "EPERM: Operation not permitted",
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
test_user: TestUserType::RootOnly,
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
storage: Storage {
|
||||
mount_point: "mnt".to_string(),
|
||||
source: "src".to_string(),
|
||||
fstype: "bind".to_string(),
|
||||
..Default::default()
|
||||
},
|
||||
make_source_dir: false,
|
||||
make_mount_dir: true,
|
||||
error_contains: "Could not create mountpoint",
|
||||
..Default::default()
|
||||
},
|
||||
TestData {
|
||||
test_user: TestUserType::NonRootOnly,
|
||||
deny_mount_permission: true,
|
||||
error_contains: "Could not create mountpoint",
|
||||
..Default::default()
|
||||
},
|
||||
];
|
||||
|
||||
for (i, d) in tests.iter().enumerate() {
|
||||
let msg = format!("test[{}]: {:?}", i, d);
|
||||
|
||||
skip_loop_by_user!(msg, d.test_user);
|
||||
|
||||
let drain = slog::Discard;
|
||||
let logger = slog::Logger::root(drain, o!());
|
||||
|
||||
let tempdir = tempdir().unwrap();
|
||||
|
||||
let source = tempdir.path().join(&d.storage.source);
|
||||
let mount_point = tempdir.path().join(&d.storage.mount_point);
|
||||
|
||||
let storage = Storage {
|
||||
source: source.to_str().unwrap().to_string(),
|
||||
mount_point: mount_point.to_str().unwrap().to_string(),
|
||||
..d.storage.clone()
|
||||
};
|
||||
|
||||
if d.make_source_dir {
|
||||
fs::create_dir_all(&storage.source).unwrap();
|
||||
}
|
||||
if d.make_mount_dir {
|
||||
fs::create_dir_all(&storage.mount_point).unwrap();
|
||||
}
|
||||
|
||||
if d.deny_mount_permission {
|
||||
fs::set_permissions(
|
||||
mount_point.parent().unwrap(),
|
||||
fs::Permissions::from_mode(0o000),
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
let result = mount_storage(&logger, &storage);
|
||||
|
||||
// restore permissions so tempdir can be cleaned up
|
||||
if d.deny_mount_permission {
|
||||
fs::set_permissions(
|
||||
mount_point.parent().unwrap(),
|
||||
fs::Permissions::from_mode(0o755),
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
if result.is_ok() {
|
||||
nix::mount::umount(&mount_point).unwrap();
|
||||
}
|
||||
|
||||
let msg = format!("{}: result: {:?}", msg, result);
|
||||
if d.error_contains.is_empty() {
|
||||
assert!(result.is_ok(), "{}", msg);
|
||||
} else {
|
||||
assert!(result.is_err(), "{}", msg);
|
||||
let error_msg = format!("{}", result.unwrap_err());
|
||||
assert!(error_msg.contains(d.error_contains), "{}", msg);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_set_ownership() {
|
||||
skip_if_not_root!();
|
||||
|
||||
let logger = slog::Logger::root(slog::Discard, o!());
|
||||
|
||||
#[derive(Debug)]
|
||||
struct TestData<'a> {
|
||||
mount_path: &'a str,
|
||||
fs_group: Option<FSGroup>,
|
||||
read_only: bool,
|
||||
expected_group_id: u32,
|
||||
expected_permission: u32,
|
||||
}
|
||||
|
||||
let tests = &[
|
||||
TestData {
|
||||
mount_path: "foo",
|
||||
fs_group: None,
|
||||
read_only: false,
|
||||
expected_group_id: 0,
|
||||
expected_permission: 0,
|
||||
},
|
||||
TestData {
|
||||
mount_path: "rw_mount",
|
||||
fs_group: Some(FSGroup {
|
||||
group_id: 3000,
|
||||
group_change_policy: FSGroupChangePolicy::Always.into(),
|
||||
..Default::default()
|
||||
}),
|
||||
read_only: false,
|
||||
expected_group_id: 3000,
|
||||
expected_permission: RW_MASK | EXEC_MASK | MODE_SETGID,
|
||||
},
|
||||
TestData {
|
||||
mount_path: "ro_mount",
|
||||
fs_group: Some(FSGroup {
|
||||
group_id: 3000,
|
||||
group_change_policy: FSGroupChangePolicy::OnRootMismatch.into(),
|
||||
..Default::default()
|
||||
}),
|
||||
read_only: true,
|
||||
expected_group_id: 3000,
|
||||
expected_permission: RO_MASK | EXEC_MASK | MODE_SETGID,
|
||||
},
|
||||
];
|
||||
|
||||
let tempdir = tempdir().expect("failed to create tmpdir");
|
||||
|
||||
for (i, d) in tests.iter().enumerate() {
|
||||
let msg = format!("test[{}]: {:?}", i, d);
|
||||
|
||||
let mount_dir = tempdir.path().join(d.mount_path);
|
||||
fs::create_dir(&mount_dir)
|
||||
.unwrap_or_else(|_| panic!("{}: failed to create root directory", msg));
|
||||
|
||||
let directory_mode = mount_dir.as_path().metadata().unwrap().permissions().mode();
|
||||
let mut storage_data = Storage::new();
|
||||
if d.read_only {
|
||||
storage_data.set_options(vec!["foo".to_string(), "ro".to_string()]);
|
||||
}
|
||||
if let Some(fs_group) = d.fs_group.clone() {
|
||||
storage_data.set_fs_group(fs_group);
|
||||
}
|
||||
storage_data.mount_point = mount_dir.clone().into_os_string().into_string().unwrap();
|
||||
|
||||
let result = set_ownership(&logger, &storage_data);
|
||||
assert!(result.is_ok());
|
||||
|
||||
assert_eq!(
|
||||
mount_dir.as_path().metadata().unwrap().gid(),
|
||||
d.expected_group_id
|
||||
);
|
||||
assert_eq!(
|
||||
mount_dir.as_path().metadata().unwrap().permissions().mode(),
|
||||
(directory_mode | d.expected_permission)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_recursive_ownership_change() {
|
||||
skip_if_not_root!();
|
||||
|
||||
const COUNT: usize = 5;
|
||||
|
||||
#[derive(Debug)]
|
||||
struct TestData<'a> {
|
||||
// Directory where the recursive ownership change should be performed on
|
||||
path: &'a str,
|
||||
|
||||
// User ID for ownership change
|
||||
uid: u32,
|
||||
|
||||
// Group ID for ownership change
|
||||
gid: u32,
|
||||
|
||||
// Set when the permission should be read-only
|
||||
read_only: bool,
|
||||
|
||||
// The expected permission of all directories after ownership change
|
||||
expected_permission_directory: u32,
|
||||
|
||||
// The expected permission of all files after ownership change
|
||||
expected_permission_file: u32,
|
||||
}
|
||||
|
||||
let tests = &[
|
||||
TestData {
|
||||
path: "no_gid_change",
|
||||
uid: 0,
|
||||
gid: 0,
|
||||
read_only: false,
|
||||
expected_permission_directory: 0,
|
||||
expected_permission_file: 0,
|
||||
},
|
||||
TestData {
|
||||
path: "rw_gid_change",
|
||||
uid: 0,
|
||||
gid: 3000,
|
||||
read_only: false,
|
||||
expected_permission_directory: RW_MASK | EXEC_MASK | MODE_SETGID,
|
||||
expected_permission_file: RW_MASK,
|
||||
},
|
||||
TestData {
|
||||
path: "ro_gid_change",
|
||||
uid: 0,
|
||||
gid: 3000,
|
||||
read_only: true,
|
||||
expected_permission_directory: RO_MASK | EXEC_MASK | MODE_SETGID,
|
||||
expected_permission_file: RO_MASK,
|
||||
},
|
||||
];
|
||||
|
||||
let tempdir = tempdir().expect("failed to create tmpdir");
|
||||
|
||||
for (i, d) in tests.iter().enumerate() {
|
||||
let msg = format!("test[{}]: {:?}", i, d);
|
||||
|
||||
let mount_dir = tempdir.path().join(d.path);
|
||||
fs::create_dir(&mount_dir)
|
||||
.unwrap_or_else(|_| panic!("{}: failed to create root directory", msg));
|
||||
|
||||
let directory_mode = mount_dir.as_path().metadata().unwrap().permissions().mode();
|
||||
let mut file_mode: u32 = 0;
|
||||
|
||||
// create testing directories and files
|
||||
for n in 1..COUNT {
|
||||
let nest_dir = mount_dir.join(format!("nested{}", n));
|
||||
fs::create_dir(&nest_dir)
|
||||
.unwrap_or_else(|_| panic!("{}: failed to create nest directory", msg));
|
||||
|
||||
for f in 1..COUNT {
|
||||
let filename = nest_dir.join(format!("file{}", f));
|
||||
File::create(&filename)
|
||||
.unwrap_or_else(|_| panic!("{}: failed to create file", msg));
|
||||
file_mode = filename.as_path().metadata().unwrap().permissions().mode();
|
||||
}
|
||||
}
|
||||
|
||||
let uid = if d.uid > 0 {
|
||||
Some(Uid::from_raw(d.uid))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let gid = if d.gid > 0 {
|
||||
Some(Gid::from_raw(d.gid))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let result = recursive_ownership_change(&mount_dir, uid, gid, d.read_only);
|
||||
|
||||
assert!(result.is_ok());
|
||||
|
||||
assert_eq!(mount_dir.as_path().metadata().unwrap().gid(), d.gid);
|
||||
assert_eq!(
|
||||
mount_dir.as_path().metadata().unwrap().permissions().mode(),
|
||||
(directory_mode | d.expected_permission_directory)
|
||||
);
|
||||
|
||||
for n in 1..COUNT {
|
||||
let nest_dir = mount_dir.join(format!("nested{}", n));
|
||||
for f in 1..COUNT {
|
||||
let filename = nest_dir.join(format!("file{}", f));
|
||||
let file = Path::new(&filename);
|
||||
|
||||
assert_eq!(file.metadata().unwrap().gid(), d.gid);
|
||||
assert_eq!(
|
||||
file.metadata().unwrap().permissions().mode(),
|
||||
(file_mode | d.expected_permission_file)
|
||||
);
|
||||
}
|
||||
|
||||
let dir = Path::new(&nest_dir);
|
||||
assert_eq!(dir.metadata().unwrap().gid(), d.gid);
|
||||
assert_eq!(
|
||||
dir.metadata().unwrap().permissions().mode(),
|
||||
(directory_mode | d.expected_permission_directory)
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[serial_test::serial]
|
||||
async fn cleanup_storage() {
|
||||
skip_if_not_root!();
|
||||
|
||||
let logger = slog::Logger::root(slog::Discard, o!());
|
||||
|
||||
let tmpdir = Builder::new().tempdir().unwrap();
|
||||
let tmpdir_path = tmpdir.path().to_str().unwrap();
|
||||
|
||||
let srcdir = Builder::new()
|
||||
.prefix("src")
|
||||
.tempdir_in(tmpdir_path)
|
||||
.unwrap();
|
||||
let srcdir_path = srcdir.path().to_str().unwrap();
|
||||
let empty_file = Path::new(srcdir_path).join("emptyfile");
|
||||
fs::write(&empty_file, "test").unwrap();
|
||||
|
||||
let destdir = Builder::new()
|
||||
.prefix("dest")
|
||||
.tempdir_in(tmpdir_path)
|
||||
.unwrap();
|
||||
let destdir_path = destdir.path().to_str().unwrap();
|
||||
|
||||
let emptydir = Builder::new()
|
||||
.prefix("empty")
|
||||
.tempdir_in(tmpdir_path)
|
||||
.unwrap();
|
||||
|
||||
let s = StorageDeviceGeneric::default();
|
||||
assert!(s.cleanup().is_ok());
|
||||
|
||||
let s = StorageDeviceGeneric::new("".to_string());
|
||||
assert!(s.cleanup().is_ok());
|
||||
|
||||
let invalid_dir = emptydir
|
||||
.path()
|
||||
.join("invalid")
|
||||
.to_str()
|
||||
.unwrap()
|
||||
.to_string();
|
||||
let s = StorageDeviceGeneric::new(invalid_dir);
|
||||
assert!(s.cleanup().is_ok());
|
||||
|
||||
assert!(bind_mount(srcdir_path, destdir_path, &logger).is_ok());
|
||||
|
||||
let s = StorageDeviceGeneric::new(destdir_path.to_string());
|
||||
assert!(s.cleanup().is_ok());
|
||||
|
||||
// fail to remove non-empty directory
|
||||
let s = StorageDeviceGeneric::new(srcdir_path.to_string());
|
||||
s.cleanup().unwrap_err();
|
||||
|
||||
// remove a directory without umount
|
||||
fs::remove_file(&empty_file).unwrap();
|
||||
s.cleanup().unwrap();
|
||||
}
|
||||
|
||||
fn bind_mount(src: &str, dst: &str, logger: &Logger) -> Result<(), Error> {
|
||||
let src_path = Path::new(src);
|
||||
let dst_path = Path::new(dst);
|
||||
|
||||
baremount(src_path, dst_path, "bind", MsFlags::MS_BIND, "", logger)
|
||||
}
|
||||
}
|
||||
1777
src/dragonball/Cargo.lock
generated
1777
src/dragonball/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -10,25 +10,28 @@ license = "Apache-2.0"
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0.32"
|
||||
arc-swap = "1.5.0"
|
||||
bytes = "1.1.0"
|
||||
dbs-address-space = "0.3.0"
|
||||
dbs-allocator = "0.1.0"
|
||||
dbs-arch = "0.2.0"
|
||||
dbs-boot = "0.4.0"
|
||||
dbs-device = "0.2.0"
|
||||
dbs-interrupt = { version = "0.2.0", features = ["kvm-irq"] }
|
||||
dbs-legacy-devices = "0.1.0"
|
||||
dbs-upcall = { version = "0.3.0", optional = true }
|
||||
dbs-utils = "0.2.0"
|
||||
dbs-virtio-devices = { version = "0.3.1", optional = true, features = ["virtio-mmio"] }
|
||||
dbs-address-space = { path = "./src/dbs_address_space" }
|
||||
dbs-allocator = { path = "./src/dbs_allocator" }
|
||||
dbs-arch = { path = "./src/dbs_arch" }
|
||||
dbs-boot = { path = "./src/dbs_boot" }
|
||||
dbs-device = { path = "./src/dbs_device" }
|
||||
dbs-interrupt = { path = "./src/dbs_interrupt", features = ["kvm-irq"] }
|
||||
dbs-legacy-devices = { path = "./src/dbs_legacy_devices" }
|
||||
dbs-upcall = { path = "./src/dbs_upcall" , optional = true }
|
||||
dbs-utils = { path = "./src/dbs_utils" }
|
||||
dbs-virtio-devices = { path = "./src/dbs_virtio_devices", optional = true, features = ["virtio-mmio"] }
|
||||
kvm-bindings = "0.6.0"
|
||||
kvm-ioctls = "0.12.0"
|
||||
lazy_static = "1.2"
|
||||
libc = "0.2.39"
|
||||
linux-loader = "0.6.0"
|
||||
linux-loader = "0.8.0"
|
||||
log = "0.4.14"
|
||||
nix = "0.24.2"
|
||||
procfs = "0.12.0"
|
||||
prometheus = { version = "0.13.0", features = ["process"] }
|
||||
seccompiler = "0.2.0"
|
||||
serde = "1.0.27"
|
||||
serde_derive = "1.0.27"
|
||||
@@ -37,13 +40,14 @@ slog = "2.5.2"
|
||||
slog-scope = "4.4.0"
|
||||
thiserror = "1"
|
||||
vmm-sys-util = "0.11.0"
|
||||
virtio-queue = { version = "0.6.0", optional = true }
|
||||
vm-memory = { version = "0.9.0", features = ["backend-mmap"] }
|
||||
virtio-queue = { version = "0.7.0", optional = true }
|
||||
vm-memory = { version = "0.10.0", features = ["backend-mmap"] }
|
||||
crossbeam-channel = "0.5.6"
|
||||
fuse-backend-rs = "0.10.5"
|
||||
|
||||
[dev-dependencies]
|
||||
slog-term = "2.9.0"
|
||||
slog-async = "2.7.0"
|
||||
slog-term = "2.9.0"
|
||||
test-utils = { path = "../libs/test-utils" }
|
||||
|
||||
[features]
|
||||
|
||||
@@ -39,12 +39,15 @@ clean:
|
||||
|
||||
test:
|
||||
ifdef SUPPORT_VIRTUALIZATION
|
||||
cargo test --all-features --target $(TRIPLE) -- --nocapture
|
||||
RUST_BACKTRACE=1 cargo test --all-features --target $(TRIPLE) -- --nocapture --test-threads=1
|
||||
else
|
||||
@echo "INFO: skip testing dragonball, it need virtualization support."
|
||||
exit 0
|
||||
endif
|
||||
|
||||
coverage:
|
||||
RUST_BACKTRACE=1 cargo llvm-cov --all-features --target $(TRIPLE) -- --nocapture --test-threads=1
|
||||
|
||||
endif # ifeq ($(ARCH), s390x)
|
||||
|
||||
.DEFAULT_GOAL := default
|
||||
|
||||
@@ -16,10 +16,22 @@ and configuration process.
|
||||
|
||||
# Documentation
|
||||
|
||||
Device: [Device Document](docs/device.md)
|
||||
vCPU: [vCPU Document](docs/vcpu.md)
|
||||
API: [API Document](docs/api.md)
|
||||
`Upcall`: [`Upcall` Document](docs/upcall.md)
|
||||
- Device: [Device Document](docs/device.md)
|
||||
- vCPU: [vCPU Document](docs/vcpu.md)
|
||||
- API: [API Document](docs/api.md)
|
||||
- `Upcall`: [`Upcall` Document](docs/upcall.md)
|
||||
- `dbs_acpi`: [`dbs_acpi` Document](src/dbs_acpi/README.md)
|
||||
- `dbs_address_space`: [`dbs_address_space` Document](src/dbs_address_space/README.md)
|
||||
- `dbs_allocator`: [`dbs_allocator` Document](src/dbs_allocator/README.md)
|
||||
- `dbs_arch`: [`dbs_arch` Document](src/dbs_arch/README.md)
|
||||
- `dbs_boot`: [`dbs_boot` Document](src/dbs_boot/README.md)
|
||||
- `dbs_device`: [`dbs_device` Document](src/dbs_device/README.md)
|
||||
- `dbs_interrupt`: [`dbs_interrput` Document](src/dbs_interrupt/README.md)
|
||||
- `dbs_legacy_devices`: [`dbs_legacy_devices` Document](src/dbs_legacy_devices/README.md)
|
||||
- `dbs_tdx`: [`dbs_tdx` Document](src/dbs_tdx/README.md)
|
||||
- `dbs_upcall`: [`dbs_upcall` Document](src/dbs_upcall/README.md)
|
||||
- `dbs_utils`: [`dbs_utils` Document](src/dbs_utils/README.md)
|
||||
- `dbs_virtio_devices`: [`dbs_virtio_devices` Document](src/dbs_virtio_devices/README.md)
|
||||
|
||||
Currently, the documents are still actively adding.
|
||||
You could see the [official documentation](docs/) page for more details.
|
||||
|
||||
@@ -16,6 +16,8 @@ use crate::event_manager::EventManager;
|
||||
use crate::vm::{CpuTopology, KernelConfigInfo, VmConfigInfo};
|
||||
use crate::vmm::Vmm;
|
||||
|
||||
use crate::hypervisor_metrics::get_hypervisor_metrics;
|
||||
|
||||
use self::VmConfigError::*;
|
||||
use self::VmmActionError::MachineConfig;
|
||||
|
||||
@@ -58,6 +60,11 @@ pub enum VmmActionError {
|
||||
#[error("Upcall not ready, can't hotplug device.")]
|
||||
UpcallServerNotReady,
|
||||
|
||||
/// Error when get prometheus metrics.
|
||||
/// Currently does not distinguish between error types for metrics.
|
||||
#[error("failed to get hypervisor metrics")]
|
||||
GetHypervisorMetrics,
|
||||
|
||||
/// The action `ConfigureBootSource` failed either because of bad user input or an internal
|
||||
/// error.
|
||||
#[error("failed to configure boot source for VM: {0}")]
|
||||
@@ -135,6 +142,9 @@ pub enum VmmAction {
|
||||
/// Get the configuration of the microVM.
|
||||
GetVmConfiguration,
|
||||
|
||||
/// Get Prometheus Metrics.
|
||||
GetHypervisorMetrics,
|
||||
|
||||
/// Set the microVM configuration (memory & vcpu) using `VmConfig` as input. This
|
||||
/// action can only be called before the microVM has booted.
|
||||
SetVmConfiguration(VmConfigInfo),
|
||||
@@ -208,6 +218,8 @@ pub enum VmmData {
|
||||
Empty,
|
||||
/// The microVM configuration represented by `VmConfigInfo`.
|
||||
MachineConfiguration(Box<VmConfigInfo>),
|
||||
/// Prometheus Metrics represented by String.
|
||||
HypervisorMetrics(String),
|
||||
}
|
||||
|
||||
/// Request data type used to communicate between the API and the VMM.
|
||||
@@ -262,6 +274,7 @@ impl VmmService {
|
||||
VmmAction::GetVmConfiguration => Ok(VmmData::MachineConfiguration(Box::new(
|
||||
self.machine_config.clone(),
|
||||
))),
|
||||
VmmAction::GetHypervisorMetrics => self.get_hypervisor_metrics(),
|
||||
VmmAction::SetVmConfiguration(machine_config) => {
|
||||
self.set_vm_configuration(vmm, machine_config)
|
||||
}
|
||||
@@ -345,7 +358,8 @@ impl VmmService {
|
||||
Some(ref path) => Some(File::open(path).map_err(|e| BootSource(InvalidInitrdPath(e)))?),
|
||||
};
|
||||
|
||||
let mut cmdline = linux_loader::cmdline::Cmdline::new(dbs_boot::layout::CMDLINE_MAX_SIZE);
|
||||
let mut cmdline = linux_loader::cmdline::Cmdline::new(dbs_boot::layout::CMDLINE_MAX_SIZE)
|
||||
.map_err(|err| BootSource(InvalidKernelCommandLine(err)))?;
|
||||
let boot_args = boot_source_config
|
||||
.boot_args
|
||||
.unwrap_or_else(|| String::from(DEFAULT_KERNEL_CMDLINE));
|
||||
@@ -381,6 +395,13 @@ impl VmmService {
|
||||
Ok(VmmData::Empty)
|
||||
}
|
||||
|
||||
/// Get prometheus metrics.
|
||||
fn get_hypervisor_metrics(&self) -> VmmRequestResult {
|
||||
get_hypervisor_metrics()
|
||||
.map_err(|_| VmmActionError::GetHypervisorMetrics)
|
||||
.map(VmmData::HypervisorMetrics)
|
||||
}
|
||||
|
||||
/// Set virtual machine configuration.
|
||||
pub fn set_vm_configuration(
|
||||
&mut self,
|
||||
|
||||
14
src/dragonball/src/dbs_acpi/Cargo.toml
Normal file
14
src/dragonball/src/dbs_acpi/Cargo.toml
Normal file
@@ -0,0 +1,14 @@
|
||||
[package]
|
||||
name = "dbs-acpi"
|
||||
version = "0.1.0"
|
||||
authors = ["Alibaba Dragonball Team"]
|
||||
description = "acpi definitions for virtual machines."
|
||||
license = "Apache-2.0"
|
||||
edition = "2018"
|
||||
homepage = "https://github.com/openanolis/dragonball-sandbox"
|
||||
repository = "https://github.com/openanolis/dragonball-sandbox"
|
||||
keywords = ["dragonball", "acpi", "vmm", "secure-sandbox"]
|
||||
readme = "README.md"
|
||||
|
||||
[dependencies]
|
||||
vm-memory = "0.9.0"
|
||||
11
src/dragonball/src/dbs_acpi/README.md
Normal file
11
src/dragonball/src/dbs_acpi/README.md
Normal file
@@ -0,0 +1,11 @@
|
||||
# dbs-acpi
|
||||
|
||||
`dbs-acpi` provides ACPI data structures for VMM to emulate ACPI behavior.
|
||||
|
||||
## Acknowledgement
|
||||
|
||||
Part of the code is derived from the [Cloud Hypervisor](https://github.com/cloud-hypervisor/cloud-hypervisor) project.
|
||||
|
||||
## License
|
||||
|
||||
This project is licensed under [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0).
|
||||
29
src/dragonball/src/dbs_acpi/src/lib.rs
Normal file
29
src/dragonball/src/dbs_acpi/src/lib.rs
Normal file
@@ -0,0 +1,29 @@
|
||||
// Copyright (c) 2019 Intel Corporation
|
||||
// Copyright (c) 2023 Alibaba Cloud
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
pub mod rsdp;
|
||||
pub mod sdt;
|
||||
|
||||
fn generate_checksum(data: &[u8]) -> u8 {
|
||||
(255 - data.iter().fold(0u8, |acc, x| acc.wrapping_add(*x))).wrapping_add(1)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
#[test]
|
||||
fn test_generate_checksum() {
|
||||
let mut buf = [0x00; 8];
|
||||
let sum = generate_checksum(&buf);
|
||||
assert_eq!(sum, 0);
|
||||
buf[0] = 0xff;
|
||||
let sum = generate_checksum(&buf);
|
||||
assert_eq!(sum, 1);
|
||||
buf[0] = 0xaa;
|
||||
buf[1] = 0xcc;
|
||||
buf[4] = generate_checksum(&buf);
|
||||
let sum = buf.iter().fold(0u8, |s, v| s.wrapping_add(*v));
|
||||
assert_eq!(sum, 0);
|
||||
}
|
||||
}
|
||||
60
src/dragonball/src/dbs_acpi/src/rsdp.rs
Normal file
60
src/dragonball/src/dbs_acpi/src/rsdp.rs
Normal file
@@ -0,0 +1,60 @@
|
||||
// Copyright (c) 2019 Intel Corporation
|
||||
// Copyright (c) 2023 Alibaba Cloud
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
// RSDP (Root System Description Pointer) is a data structure used in the ACPI programming interface.
|
||||
use vm_memory::ByteValued;
|
||||
|
||||
#[repr(packed)]
|
||||
#[derive(Clone, Copy, Default)]
|
||||
pub struct Rsdp {
|
||||
pub signature: [u8; 8],
|
||||
pub checksum: u8,
|
||||
pub oem_id: [u8; 6],
|
||||
pub revision: u8,
|
||||
_rsdt_addr: u32,
|
||||
pub length: u32,
|
||||
pub xsdt_addr: u64,
|
||||
pub extended_checksum: u8,
|
||||
_reserved: [u8; 3],
|
||||
}
|
||||
|
||||
// SAFETY: Rsdp only contains a series of integers
|
||||
unsafe impl ByteValued for Rsdp {}
|
||||
|
||||
impl Rsdp {
|
||||
pub fn new(xsdt_addr: u64) -> Self {
|
||||
let mut rsdp = Rsdp {
|
||||
signature: *b"RSD PTR ",
|
||||
checksum: 0,
|
||||
oem_id: *b"ALICLD",
|
||||
revision: 1,
|
||||
_rsdt_addr: 0,
|
||||
length: std::mem::size_of::<Rsdp>() as u32,
|
||||
xsdt_addr,
|
||||
extended_checksum: 0,
|
||||
_reserved: [0; 3],
|
||||
};
|
||||
rsdp.checksum = super::generate_checksum(&rsdp.as_slice()[0..19]);
|
||||
rsdp.extended_checksum = super::generate_checksum(rsdp.as_slice());
|
||||
rsdp
|
||||
}
|
||||
|
||||
pub fn len() -> usize {
|
||||
std::mem::size_of::<Rsdp>()
|
||||
}
|
||||
}
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::Rsdp;
|
||||
use vm_memory::bytes::ByteValued;
|
||||
#[test]
|
||||
fn test_rsdp() {
|
||||
let rsdp = Rsdp::new(0xa0000);
|
||||
let sum = rsdp
|
||||
.as_slice()
|
||||
.iter()
|
||||
.fold(0u8, |acc, x| acc.wrapping_add(*x));
|
||||
assert_eq!(sum, 0);
|
||||
}
|
||||
}
|
||||
137
src/dragonball/src/dbs_acpi/src/sdt.rs
Normal file
137
src/dragonball/src/dbs_acpi/src/sdt.rs
Normal file
@@ -0,0 +1,137 @@
|
||||
// Copyright (c) 2019 Intel Corporation
|
||||
// Copyright (c) 2023 Alibaba Cloud
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
#[repr(packed)]
|
||||
pub struct GenericAddress {
|
||||
pub address_space_id: u8,
|
||||
pub register_bit_width: u8,
|
||||
pub register_bit_offset: u8,
|
||||
pub access_size: u8,
|
||||
pub address: u64,
|
||||
}
|
||||
|
||||
impl GenericAddress {
|
||||
pub fn io_port_address<T>(address: u16) -> Self {
|
||||
GenericAddress {
|
||||
address_space_id: 1,
|
||||
register_bit_width: 8 * std::mem::size_of::<T>() as u8,
|
||||
register_bit_offset: 0,
|
||||
access_size: std::mem::size_of::<T>() as u8,
|
||||
address: u64::from(address),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn mmio_address<T>(address: u64) -> Self {
|
||||
GenericAddress {
|
||||
address_space_id: 0,
|
||||
register_bit_width: 8 * std::mem::size_of::<T>() as u8,
|
||||
register_bit_offset: 0,
|
||||
access_size: std::mem::size_of::<T>() as u8,
|
||||
address,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Sdt {
|
||||
data: Vec<u8>,
|
||||
}
|
||||
|
||||
#[allow(clippy::len_without_is_empty)]
|
||||
impl Sdt {
|
||||
pub fn new(signature: [u8; 4], length: u32, revision: u8) -> Self {
|
||||
assert!(length >= 36);
|
||||
const OEM_ID: [u8; 6] = *b"ALICLD";
|
||||
const OEM_TABLE: [u8; 8] = *b"RUND ";
|
||||
const CREATOR_ID: [u8; 4] = *b"ALIC";
|
||||
let mut data = Vec::with_capacity(length as usize);
|
||||
data.extend_from_slice(&signature);
|
||||
data.extend_from_slice(&length.to_le_bytes());
|
||||
data.push(revision);
|
||||
data.push(0); // checksum
|
||||
data.extend_from_slice(&OEM_ID); // oem id u32
|
||||
data.extend_from_slice(&OEM_TABLE); // oem table
|
||||
data.extend_from_slice(&1u32.to_le_bytes()); // oem revision u32
|
||||
data.extend_from_slice(&CREATOR_ID); // creator id u32
|
||||
data.extend_from_slice(&1u32.to_le_bytes()); // creator revison u32
|
||||
assert_eq!(data.len(), 36);
|
||||
data.resize(length as usize, 0);
|
||||
let mut sdt = Sdt { data };
|
||||
sdt.update_checksum();
|
||||
sdt
|
||||
}
|
||||
|
||||
pub fn update_checksum(&mut self) {
|
||||
self.data[9] = 0;
|
||||
let checksum = super::generate_checksum(self.data.as_slice());
|
||||
self.data[9] = checksum
|
||||
}
|
||||
|
||||
pub fn as_slice(&self) -> &[u8] {
|
||||
self.data.as_slice()
|
||||
}
|
||||
|
||||
pub fn append<T>(&mut self, value: T) {
|
||||
let orig_length = self.data.len();
|
||||
let new_length = orig_length + std::mem::size_of::<T>();
|
||||
self.data.resize(new_length, 0);
|
||||
self.write_u32(4, new_length as u32);
|
||||
self.write(orig_length, value);
|
||||
}
|
||||
|
||||
pub fn append_slice(&mut self, data: &[u8]) {
|
||||
let orig_length = self.data.len();
|
||||
let new_length = orig_length + data.len();
|
||||
self.write_u32(4, new_length as u32);
|
||||
self.data.extend_from_slice(data);
|
||||
self.update_checksum();
|
||||
}
|
||||
|
||||
/// Write a value at the given offset
|
||||
pub fn write<T>(&mut self, offset: usize, value: T) {
|
||||
assert!((offset + (std::mem::size_of::<T>() - 1)) < self.data.len());
|
||||
unsafe {
|
||||
*(((self.data.as_mut_ptr() as usize) + offset) as *mut T) = value;
|
||||
}
|
||||
self.update_checksum();
|
||||
}
|
||||
|
||||
pub fn write_u8(&mut self, offset: usize, val: u8) {
|
||||
self.write(offset, val);
|
||||
}
|
||||
|
||||
pub fn write_u16(&mut self, offset: usize, val: u16) {
|
||||
self.write(offset, val);
|
||||
}
|
||||
|
||||
pub fn write_u32(&mut self, offset: usize, val: u32) {
|
||||
self.write(offset, val);
|
||||
}
|
||||
|
||||
pub fn write_u64(&mut self, offset: usize, val: u64) {
|
||||
self.write(offset, val);
|
||||
}
|
||||
|
||||
pub fn len(&self) -> usize {
|
||||
self.data.len()
|
||||
}
|
||||
}
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::Sdt;
|
||||
#[test]
|
||||
fn test_sdt() {
|
||||
let mut sdt = Sdt::new(*b"TEST", 40, 1);
|
||||
let sum: u8 = sdt
|
||||
.as_slice()
|
||||
.iter()
|
||||
.fold(0u8, |acc, x| acc.wrapping_add(*x));
|
||||
assert_eq!(sum, 0);
|
||||
sdt.write_u32(36, 0x12345678);
|
||||
let sum: u8 = sdt
|
||||
.as_slice()
|
||||
.iter()
|
||||
.fold(0u8, |acc, x| acc.wrapping_add(*x));
|
||||
assert_eq!(sum, 0);
|
||||
}
|
||||
}
|
||||
20
src/dragonball/src/dbs_address_space/Cargo.toml
Normal file
20
src/dragonball/src/dbs_address_space/Cargo.toml
Normal file
@@ -0,0 +1,20 @@
|
||||
[package]
|
||||
name = "dbs-address-space"
|
||||
version = "0.3.0"
|
||||
authors = ["Alibaba Dragonball Team"]
|
||||
description = "address space manager for virtual machines."
|
||||
license = "Apache-2.0"
|
||||
edition = "2018"
|
||||
homepage = "https://github.com/openanolis/dragonball-sandbox"
|
||||
repository = "https://github.com/openanolis/dragonball-sandbox"
|
||||
keywords = ["dragonball", "address", "vmm", "secure-sandbox"]
|
||||
readme = "README.md"
|
||||
|
||||
[dependencies]
|
||||
arc-swap = ">=0.4.8"
|
||||
libc = "0.2.39"
|
||||
nix = "0.23.1"
|
||||
lazy_static = "1"
|
||||
thiserror = "1"
|
||||
vmm-sys-util = "0.11.0"
|
||||
vm-memory = { version = "0.10", features = ["backend-mmap", "backend-atomic"] }
|
||||
1
src/dragonball/src/dbs_address_space/LICENSE
Symbolic link
1
src/dragonball/src/dbs_address_space/LICENSE
Symbolic link
@@ -0,0 +1 @@
|
||||
../../LICENSE
|
||||
80
src/dragonball/src/dbs_address_space/README.md
Normal file
80
src/dragonball/src/dbs_address_space/README.md
Normal file
@@ -0,0 +1,80 @@
|
||||
# dbs-address-space
|
||||
|
||||
## Design
|
||||
|
||||
The `dbs-address-space` crate is an address space manager for virtual machines, which manages memory and MMIO resources resident in the guest physical address space.
|
||||
|
||||
Main components are:
|
||||
- `AddressSpaceRegion`: Struct to maintain configuration information about a guest address region.
|
||||
```rust
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct AddressSpaceRegion {
|
||||
/// Type of address space regions.
|
||||
pub ty: AddressSpaceRegionType,
|
||||
/// Base address of the region in virtual machine's physical address space.
|
||||
pub base: GuestAddress,
|
||||
/// Size of the address space region.
|
||||
pub size: GuestUsize,
|
||||
/// Host NUMA node ids assigned to this region.
|
||||
pub host_numa_node_id: Option<u32>,
|
||||
|
||||
/// File/offset tuple to back the memory allocation.
|
||||
file_offset: Option<FileOffset>,
|
||||
/// Mmap permission flags.
|
||||
perm_flags: i32,
|
||||
/// Hugepage madvise hint.
|
||||
///
|
||||
/// It needs 'advise' or 'always' policy in host shmem config.
|
||||
is_hugepage: bool,
|
||||
/// Hotplug hint.
|
||||
is_hotplug: bool,
|
||||
/// Anonymous memory hint.
|
||||
///
|
||||
/// It should be true for regions with the MADV_DONTFORK flag enabled.
|
||||
is_anon: bool,
|
||||
}
|
||||
```
|
||||
- `AddressSpaceBase`: Base implementation to manage guest physical address space, without support of region hotplug.
|
||||
```rust
|
||||
#[derive(Clone)]
|
||||
pub struct AddressSpaceBase {
|
||||
regions: Vec<Arc<AddressSpaceRegion>>,
|
||||
layout: AddressSpaceLayout,
|
||||
}
|
||||
```
|
||||
- `AddressSpaceBase`: An address space implementation with region hotplug capability.
|
||||
```rust
|
||||
/// The `AddressSpace` is a wrapper over [AddressSpaceBase] to support hotplug of
|
||||
/// address space regions.
|
||||
#[derive(Clone)]
|
||||
pub struct AddressSpace {
|
||||
state: Arc<ArcSwap<AddressSpaceBase>>,
|
||||
}
|
||||
```
|
||||
|
||||
## Usage
|
||||
```rust
|
||||
// 1. create several memory regions
|
||||
let reg = Arc::new(
|
||||
AddressSpaceRegion::create_default_memory_region(
|
||||
GuestAddress(0x100000),
|
||||
0x100000,
|
||||
None,
|
||||
"shmem",
|
||||
"",
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
)
|
||||
.unwrap()
|
||||
);
|
||||
let regions = vec![reg];
|
||||
// 2. create layout (depending on archs)
|
||||
let layout = AddressSpaceLayout::new(GUEST_PHYS_END, GUEST_MEM_START, GUEST_MEM_END);
|
||||
// 3. create address space from regions and layout
|
||||
let address_space = AddressSpace::from_regions(regions, layout.clone());
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
This project is licensed under [Apache License](http://www.apache.org/licenses/LICENSE-2.0), Version 2.0.
|
||||
830
src/dragonball/src/dbs_address_space/src/address_space.rs
Normal file
830
src/dragonball/src/dbs_address_space/src/address_space.rs
Normal file
@@ -0,0 +1,830 @@
|
||||
// Copyright (C) 2021 Alibaba Cloud. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
//! Physical address space manager for virtual machines.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use arc_swap::ArcSwap;
|
||||
use vm_memory::{GuestAddress, GuestMemoryMmap};
|
||||
|
||||
use crate::{AddressSpaceError, AddressSpaceLayout, AddressSpaceRegion, AddressSpaceRegionType};
|
||||
|
||||
/// Base implementation to manage guest physical address space, without support of region hotplug.
|
||||
#[derive(Clone)]
|
||||
pub struct AddressSpaceBase {
|
||||
regions: Vec<Arc<AddressSpaceRegion>>,
|
||||
layout: AddressSpaceLayout,
|
||||
}
|
||||
|
||||
impl AddressSpaceBase {
|
||||
/// Create an instance of `AddressSpaceBase` from an `AddressSpaceRegion` array.
|
||||
///
|
||||
/// To achieve better performance by using binary search algorithm, the `regions` vector
|
||||
/// will gotten sorted by guest physical address.
|
||||
///
|
||||
/// Note, panicking if some regions intersects with each other.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `regions` - prepared regions to managed by the address space instance.
|
||||
/// * `layout` - prepared address space layout configuration.
|
||||
pub fn from_regions(
|
||||
mut regions: Vec<Arc<AddressSpaceRegion>>,
|
||||
layout: AddressSpaceLayout,
|
||||
) -> Self {
|
||||
regions.sort_unstable_by_key(|v| v.base);
|
||||
for region in regions.iter() {
|
||||
if !layout.is_region_valid(region) {
|
||||
panic!(
|
||||
"Invalid region {:?} for address space layout {:?}",
|
||||
region, layout
|
||||
);
|
||||
}
|
||||
}
|
||||
for idx in 1..regions.len() {
|
||||
if regions[idx].intersect_with(®ions[idx - 1]) {
|
||||
panic!("address space regions intersect with each other");
|
||||
}
|
||||
}
|
||||
AddressSpaceBase { regions, layout }
|
||||
}
|
||||
|
||||
/// Insert a new address space region into the address space.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `region` - the new region to be inserted.
|
||||
pub fn insert_region(
|
||||
&mut self,
|
||||
region: Arc<AddressSpaceRegion>,
|
||||
) -> Result<(), AddressSpaceError> {
|
||||
if !self.layout.is_region_valid(®ion) {
|
||||
return Err(AddressSpaceError::InvalidAddressRange(
|
||||
region.start_addr().0,
|
||||
region.len(),
|
||||
));
|
||||
}
|
||||
for idx in 0..self.regions.len() {
|
||||
if self.regions[idx].intersect_with(®ion) {
|
||||
return Err(AddressSpaceError::InvalidAddressRange(
|
||||
region.start_addr().0,
|
||||
region.len(),
|
||||
));
|
||||
}
|
||||
}
|
||||
self.regions.push(region);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Enumerate all regions in the address space.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `cb` - the callback function to apply to each region.
|
||||
pub fn walk_regions<F>(&self, mut cb: F) -> Result<(), AddressSpaceError>
|
||||
where
|
||||
F: FnMut(&Arc<AddressSpaceRegion>) -> Result<(), AddressSpaceError>,
|
||||
{
|
||||
for reg in self.regions.iter() {
|
||||
cb(reg)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get address space layout associated with the address space.
|
||||
pub fn layout(&self) -> AddressSpaceLayout {
|
||||
self.layout.clone()
|
||||
}
|
||||
|
||||
/// Get maximum of guest physical address in the address space.
|
||||
pub fn last_addr(&self) -> GuestAddress {
|
||||
let mut last_addr = GuestAddress(self.layout.mem_start);
|
||||
for reg in self.regions.iter() {
|
||||
if reg.ty != AddressSpaceRegionType::DAXMemory && reg.last_addr() > last_addr {
|
||||
last_addr = reg.last_addr();
|
||||
}
|
||||
}
|
||||
last_addr
|
||||
}
|
||||
|
||||
/// Check whether the guest physical address `guest_addr` belongs to a DAX memory region.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `guest_addr` - the guest physical address to inquire
|
||||
pub fn is_dax_region(&self, guest_addr: GuestAddress) -> bool {
|
||||
for reg in self.regions.iter() {
|
||||
// Safe because we have validate the region when creating the address space object.
|
||||
if reg.region_type() == AddressSpaceRegionType::DAXMemory
|
||||
&& reg.start_addr() <= guest_addr
|
||||
&& reg.start_addr().0 + reg.len() > guest_addr.0
|
||||
{
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
/// Get protection flags of memory region that guest physical address `guest_addr` belongs to.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `guest_addr` - the guest physical address to inquire
|
||||
pub fn prot_flags(&self, guest_addr: GuestAddress) -> Result<i32, AddressSpaceError> {
|
||||
for reg in self.regions.iter() {
|
||||
if reg.start_addr() <= guest_addr && reg.start_addr().0 + reg.len() > guest_addr.0 {
|
||||
return Ok(reg.prot_flags());
|
||||
}
|
||||
}
|
||||
|
||||
Err(AddressSpaceError::InvalidRegionType)
|
||||
}
|
||||
|
||||
/// Get optional NUMA node id associated with guest physical address `gpa`.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `gpa` - guest physical address to query.
|
||||
pub fn numa_node_id(&self, gpa: u64) -> Option<u32> {
|
||||
for reg in self.regions.iter() {
|
||||
if gpa >= reg.base.0 && gpa < (reg.base.0 + reg.size) {
|
||||
return reg.host_numa_node_id;
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// An address space implementation with region hotplug capability.
|
||||
///
|
||||
/// The `AddressSpace` is a wrapper over [AddressSpaceBase] to support hotplug of
|
||||
/// address space regions.
|
||||
#[derive(Clone)]
|
||||
pub struct AddressSpace {
|
||||
state: Arc<ArcSwap<AddressSpaceBase>>,
|
||||
}
|
||||
|
||||
impl AddressSpace {
|
||||
/// Convert a [GuestMemoryMmap] object into `GuestMemoryAtomic<GuestMemoryMmap>`.
|
||||
pub fn convert_into_vm_as(
|
||||
gm: GuestMemoryMmap,
|
||||
) -> vm_memory::atomic::GuestMemoryAtomic<GuestMemoryMmap> {
|
||||
vm_memory::atomic::GuestMemoryAtomic::from(Arc::new(gm))
|
||||
}
|
||||
|
||||
/// Create an instance of `AddressSpace` from an `AddressSpaceRegion` array.
|
||||
///
|
||||
/// To achieve better performance by using binary search algorithm, the `regions` vector
|
||||
/// will gotten sorted by guest physical address.
|
||||
///
|
||||
/// Note, panicking if some regions intersects with each other.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `regions` - prepared regions to managed by the address space instance.
|
||||
/// * `layout` - prepared address space layout configuration.
|
||||
pub fn from_regions(regions: Vec<Arc<AddressSpaceRegion>>, layout: AddressSpaceLayout) -> Self {
|
||||
let base = AddressSpaceBase::from_regions(regions, layout);
|
||||
|
||||
AddressSpace {
|
||||
state: Arc::new(ArcSwap::new(Arc::new(base))),
|
||||
}
|
||||
}
|
||||
|
||||
/// Insert a new address space region into the address space.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `region` - the new region to be inserted.
|
||||
pub fn insert_region(
|
||||
&mut self,
|
||||
region: Arc<AddressSpaceRegion>,
|
||||
) -> Result<(), AddressSpaceError> {
|
||||
let curr = self.state.load().regions.clone();
|
||||
let layout = self.state.load().layout.clone();
|
||||
let mut base = AddressSpaceBase::from_regions(curr, layout);
|
||||
base.insert_region(region)?;
|
||||
let _old = self.state.swap(Arc::new(base));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Enumerate all regions in the address space.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `cb` - the callback function to apply to each region.
|
||||
pub fn walk_regions<F>(&self, cb: F) -> Result<(), AddressSpaceError>
|
||||
where
|
||||
F: FnMut(&Arc<AddressSpaceRegion>) -> Result<(), AddressSpaceError>,
|
||||
{
|
||||
self.state.load().walk_regions(cb)
|
||||
}
|
||||
|
||||
/// Get address space layout associated with the address space.
|
||||
pub fn layout(&self) -> AddressSpaceLayout {
|
||||
self.state.load().layout()
|
||||
}
|
||||
|
||||
/// Get maximum of guest physical address in the address space.
|
||||
pub fn last_addr(&self) -> GuestAddress {
|
||||
self.state.load().last_addr()
|
||||
}
|
||||
|
||||
/// Check whether the guest physical address `guest_addr` belongs to a DAX memory region.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `guest_addr` - the guest physical address to inquire
|
||||
pub fn is_dax_region(&self, guest_addr: GuestAddress) -> bool {
|
||||
self.state.load().is_dax_region(guest_addr)
|
||||
}
|
||||
|
||||
/// Get protection flags of memory region that guest physical address `guest_addr` belongs to.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `guest_addr` - the guest physical address to inquire
|
||||
pub fn prot_flags(&self, guest_addr: GuestAddress) -> Result<i32, AddressSpaceError> {
|
||||
self.state.load().prot_flags(guest_addr)
|
||||
}
|
||||
|
||||
/// Get optional NUMA node id associated with guest physical address `gpa`.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `gpa` - guest physical address to query.
|
||||
pub fn numa_node_id(&self, gpa: u64) -> Option<u32> {
|
||||
self.state.load().numa_node_id(gpa)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::io::Write;
|
||||
use vm_memory::GuestUsize;
|
||||
use vmm_sys_util::tempfile::TempFile;
|
||||
|
||||
// define macros for unit test
|
||||
const GUEST_PHYS_END: u64 = (1 << 46) - 1;
|
||||
const GUEST_MEM_START: u64 = 0;
|
||||
const GUEST_MEM_END: u64 = GUEST_PHYS_END >> 1;
|
||||
const GUEST_DEVICE_START: u64 = GUEST_MEM_END + 1;
|
||||
|
||||
#[test]
|
||||
fn test_address_space_base_from_regions() {
|
||||
let mut file = TempFile::new().unwrap().into_file();
|
||||
let sample_buf = &[1, 2, 3, 4, 5];
|
||||
assert!(file.write_all(sample_buf).is_ok());
|
||||
file.set_len(0x10000).unwrap();
|
||||
|
||||
let reg = Arc::new(
|
||||
AddressSpaceRegion::create_device_region(GuestAddress(GUEST_DEVICE_START), 0x1000)
|
||||
.unwrap(),
|
||||
);
|
||||
let regions = vec![reg];
|
||||
let layout = AddressSpaceLayout::new(GUEST_PHYS_END, GUEST_MEM_START, GUEST_MEM_END);
|
||||
let address_space = AddressSpaceBase::from_regions(regions, layout.clone());
|
||||
assert_eq!(address_space.layout(), layout);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic(expected = "Invalid region")]
|
||||
fn test_address_space_base_from_regions_when_region_invalid() {
|
||||
let reg = Arc::new(AddressSpaceRegion::build(
|
||||
AddressSpaceRegionType::DefaultMemory,
|
||||
GuestAddress(0x100),
|
||||
0x1000,
|
||||
None,
|
||||
None,
|
||||
0,
|
||||
0,
|
||||
false,
|
||||
));
|
||||
let regions = vec![reg];
|
||||
let layout = AddressSpaceLayout::new(0x2000, 0x200, 0x1800);
|
||||
let _address_space = AddressSpaceBase::from_regions(regions, layout);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic(expected = "address space regions intersect with each other")]
|
||||
fn test_address_space_base_from_regions_when_region_intersected() {
|
||||
let reg1 = Arc::new(AddressSpaceRegion::build(
|
||||
AddressSpaceRegionType::DefaultMemory,
|
||||
GuestAddress(0x100),
|
||||
0x200,
|
||||
None,
|
||||
None,
|
||||
0,
|
||||
0,
|
||||
false,
|
||||
));
|
||||
let reg2 = Arc::new(AddressSpaceRegion::build(
|
||||
AddressSpaceRegionType::DefaultMemory,
|
||||
GuestAddress(0x200),
|
||||
0x200,
|
||||
None,
|
||||
None,
|
||||
0,
|
||||
0,
|
||||
false,
|
||||
));
|
||||
let regions = vec![reg1, reg2];
|
||||
let layout = AddressSpaceLayout::new(0x2000, 0x0, 0x1800);
|
||||
let _address_space = AddressSpaceBase::from_regions(regions, layout);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_address_space_base_insert_region() {
|
||||
let reg1 = Arc::new(AddressSpaceRegion::build(
|
||||
AddressSpaceRegionType::DefaultMemory,
|
||||
GuestAddress(0x100),
|
||||
0x200,
|
||||
None,
|
||||
None,
|
||||
0,
|
||||
0,
|
||||
false,
|
||||
));
|
||||
let reg2 = Arc::new(AddressSpaceRegion::build(
|
||||
AddressSpaceRegionType::DefaultMemory,
|
||||
GuestAddress(0x300),
|
||||
0x200,
|
||||
None,
|
||||
None,
|
||||
0,
|
||||
0,
|
||||
false,
|
||||
));
|
||||
let regions = vec![reg1];
|
||||
let layout = AddressSpaceLayout::new(0x2000, 0x100, 0x1800);
|
||||
let mut address_space = AddressSpaceBase::from_regions(regions, layout);
|
||||
|
||||
// Normal case.
|
||||
address_space.insert_region(reg2).unwrap();
|
||||
assert!(!address_space.regions[1].intersect_with(&address_space.regions[0]));
|
||||
|
||||
// Error invalid address range case when region invaled.
|
||||
let invalid_reg = Arc::new(AddressSpaceRegion::build(
|
||||
AddressSpaceRegionType::DefaultMemory,
|
||||
GuestAddress(0x0),
|
||||
0x100,
|
||||
None,
|
||||
None,
|
||||
0,
|
||||
0,
|
||||
false,
|
||||
));
|
||||
assert_eq!(
|
||||
format!(
|
||||
"{:?}",
|
||||
address_space.insert_region(invalid_reg).err().unwrap()
|
||||
),
|
||||
format!("InvalidAddressRange({:?}, {:?})", 0x0, 0x100)
|
||||
);
|
||||
|
||||
// Error Error invalid address range case when region to be inserted will intersect
|
||||
// exsisting regions.
|
||||
let intersected_reg = Arc::new(AddressSpaceRegion::build(
|
||||
AddressSpaceRegionType::DefaultMemory,
|
||||
GuestAddress(0x400),
|
||||
0x200,
|
||||
None,
|
||||
None,
|
||||
0,
|
||||
0,
|
||||
false,
|
||||
));
|
||||
assert_eq!(
|
||||
format!(
|
||||
"{:?}",
|
||||
address_space.insert_region(intersected_reg).err().unwrap()
|
||||
),
|
||||
format!("InvalidAddressRange({:?}, {:?})", 0x400, 0x200)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_address_space_base_walk_regions() {
|
||||
let reg1 = Arc::new(AddressSpaceRegion::build(
|
||||
AddressSpaceRegionType::DefaultMemory,
|
||||
GuestAddress(0x100),
|
||||
0x200,
|
||||
None,
|
||||
None,
|
||||
0,
|
||||
0,
|
||||
false,
|
||||
));
|
||||
let reg2 = Arc::new(AddressSpaceRegion::build(
|
||||
AddressSpaceRegionType::DefaultMemory,
|
||||
GuestAddress(0x300),
|
||||
0x200,
|
||||
None,
|
||||
None,
|
||||
0,
|
||||
0,
|
||||
false,
|
||||
));
|
||||
let regions = vec![reg1, reg2];
|
||||
let layout = AddressSpaceLayout::new(0x2000, 0x0, 0x1800);
|
||||
let address_space = AddressSpaceBase::from_regions(regions, layout);
|
||||
|
||||
// The argument of walk_regions is a function which takes a &Arc<AddressSpaceRegion>
|
||||
// and returns result. This function will be applied to all regions.
|
||||
fn do_not_have_hotplug(region: &Arc<AddressSpaceRegion>) -> Result<(), AddressSpaceError> {
|
||||
if region.is_hotplug() {
|
||||
Err(AddressSpaceError::InvalidRegionType) // The Error type is dictated to AddressSpaceError.
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
assert!(matches!(
|
||||
address_space.walk_regions(do_not_have_hotplug).unwrap(),
|
||||
()
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_address_space_base_last_addr() {
|
||||
let reg1 = Arc::new(AddressSpaceRegion::build(
|
||||
AddressSpaceRegionType::DefaultMemory,
|
||||
GuestAddress(0x100),
|
||||
0x200,
|
||||
None,
|
||||
None,
|
||||
0,
|
||||
0,
|
||||
false,
|
||||
));
|
||||
let reg2 = Arc::new(AddressSpaceRegion::build(
|
||||
AddressSpaceRegionType::DefaultMemory,
|
||||
GuestAddress(0x300),
|
||||
0x200,
|
||||
None,
|
||||
None,
|
||||
0,
|
||||
0,
|
||||
false,
|
||||
));
|
||||
let regions = vec![reg1, reg2];
|
||||
let layout = AddressSpaceLayout::new(0x2000, 0x0, 0x1800);
|
||||
let address_space = AddressSpaceBase::from_regions(regions, layout);
|
||||
|
||||
assert_eq!(address_space.last_addr(), GuestAddress(0x500 - 1));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_address_space_base_is_dax_region() {
|
||||
let page_size = 4096;
|
||||
let address_space_region = vec![
|
||||
Arc::new(AddressSpaceRegion::new(
|
||||
AddressSpaceRegionType::DefaultMemory,
|
||||
GuestAddress(page_size),
|
||||
page_size as GuestUsize,
|
||||
)),
|
||||
Arc::new(AddressSpaceRegion::new(
|
||||
AddressSpaceRegionType::DefaultMemory,
|
||||
GuestAddress(page_size * 2),
|
||||
page_size as GuestUsize,
|
||||
)),
|
||||
Arc::new(AddressSpaceRegion::new(
|
||||
AddressSpaceRegionType::DAXMemory,
|
||||
GuestAddress(GUEST_DEVICE_START),
|
||||
page_size as GuestUsize,
|
||||
)),
|
||||
];
|
||||
let layout = AddressSpaceLayout::new(GUEST_PHYS_END, GUEST_MEM_START, GUEST_MEM_END);
|
||||
let address_space = AddressSpaceBase::from_regions(address_space_region, layout);
|
||||
|
||||
assert!(!address_space.is_dax_region(GuestAddress(page_size)));
|
||||
assert!(!address_space.is_dax_region(GuestAddress(page_size * 2)));
|
||||
assert!(address_space.is_dax_region(GuestAddress(GUEST_DEVICE_START)));
|
||||
assert!(address_space.is_dax_region(GuestAddress(GUEST_DEVICE_START + 1)));
|
||||
assert!(!address_space.is_dax_region(GuestAddress(GUEST_DEVICE_START + page_size)));
|
||||
assert!(address_space.is_dax_region(GuestAddress(GUEST_DEVICE_START + page_size - 1)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_address_space_base_prot_flags() {
|
||||
let reg1 = Arc::new(AddressSpaceRegion::build(
|
||||
AddressSpaceRegionType::DefaultMemory,
|
||||
GuestAddress(0x100),
|
||||
0x200,
|
||||
Some(0),
|
||||
None,
|
||||
0,
|
||||
0,
|
||||
false,
|
||||
));
|
||||
let reg2 = Arc::new(AddressSpaceRegion::new(
|
||||
AddressSpaceRegionType::DefaultMemory,
|
||||
GuestAddress(0x300),
|
||||
0x300,
|
||||
));
|
||||
let regions = vec![reg1, reg2];
|
||||
let layout = AddressSpaceLayout::new(0x2000, 0x0, 0x1800);
|
||||
let address_space = AddressSpaceBase::from_regions(regions, layout);
|
||||
|
||||
// Normal case, reg1.
|
||||
assert_eq!(address_space.prot_flags(GuestAddress(0x200)).unwrap(), 0);
|
||||
// Normal case, reg2.
|
||||
assert_eq!(
|
||||
address_space.prot_flags(GuestAddress(0x500)).unwrap(),
|
||||
libc::PROT_READ | libc::PROT_WRITE
|
||||
);
|
||||
// Inquire gpa where no region is set.
|
||||
assert!(matches!(
|
||||
address_space.prot_flags(GuestAddress(0x600)),
|
||||
Err(AddressSpaceError::InvalidRegionType)
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_address_space_base_numa_node_id() {
|
||||
let reg1 = Arc::new(AddressSpaceRegion::build(
|
||||
AddressSpaceRegionType::DefaultMemory,
|
||||
GuestAddress(0x100),
|
||||
0x200,
|
||||
Some(0),
|
||||
None,
|
||||
0,
|
||||
0,
|
||||
false,
|
||||
));
|
||||
let reg2 = Arc::new(AddressSpaceRegion::build(
|
||||
AddressSpaceRegionType::DefaultMemory,
|
||||
GuestAddress(0x300),
|
||||
0x300,
|
||||
None,
|
||||
None,
|
||||
0,
|
||||
0,
|
||||
false,
|
||||
));
|
||||
let regions = vec![reg1, reg2];
|
||||
let layout = AddressSpaceLayout::new(0x2000, 0x0, 0x1800);
|
||||
let address_space = AddressSpaceBase::from_regions(regions, layout);
|
||||
|
||||
// Normal case.
|
||||
assert_eq!(address_space.numa_node_id(0x200).unwrap(), 0);
|
||||
// Inquire region with None as its numa node id.
|
||||
assert_eq!(address_space.numa_node_id(0x400), None);
|
||||
// Inquire gpa where no region is set.
|
||||
assert_eq!(address_space.numa_node_id(0x600), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_address_space_convert_into_vm_as() {
|
||||
// ! Further and detailed test is needed here.
|
||||
let gmm = GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0x0), 0x400)]).unwrap();
|
||||
let _vm = AddressSpace::convert_into_vm_as(gmm);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_address_space_insert_region() {
|
||||
let reg1 = Arc::new(AddressSpaceRegion::build(
|
||||
AddressSpaceRegionType::DefaultMemory,
|
||||
GuestAddress(0x100),
|
||||
0x200,
|
||||
None,
|
||||
None,
|
||||
0,
|
||||
0,
|
||||
false,
|
||||
));
|
||||
let reg2 = Arc::new(AddressSpaceRegion::build(
|
||||
AddressSpaceRegionType::DefaultMemory,
|
||||
GuestAddress(0x300),
|
||||
0x200,
|
||||
None,
|
||||
None,
|
||||
0,
|
||||
0,
|
||||
false,
|
||||
));
|
||||
let regions = vec![reg1];
|
||||
let layout = AddressSpaceLayout::new(0x2000, 0x100, 0x1800);
|
||||
let mut address_space = AddressSpace::from_regions(regions, layout);
|
||||
|
||||
// Normal case.
|
||||
assert!(matches!(address_space.insert_region(reg2).unwrap(), ()));
|
||||
|
||||
// Error invalid address range case when region invaled.
|
||||
let invalid_reg = Arc::new(AddressSpaceRegion::build(
|
||||
AddressSpaceRegionType::DefaultMemory,
|
||||
GuestAddress(0x0),
|
||||
0x100,
|
||||
None,
|
||||
None,
|
||||
0,
|
||||
0,
|
||||
false,
|
||||
));
|
||||
assert_eq!(
|
||||
format!(
|
||||
"{:?}",
|
||||
address_space.insert_region(invalid_reg).err().unwrap()
|
||||
),
|
||||
format!("InvalidAddressRange({:?}, {:?})", 0x0, 0x100)
|
||||
);
|
||||
|
||||
// Error Error invalid address range case when region to be inserted will intersect
|
||||
// exsisting regions.
|
||||
let intersected_reg = Arc::new(AddressSpaceRegion::build(
|
||||
AddressSpaceRegionType::DefaultMemory,
|
||||
GuestAddress(0x400),
|
||||
0x200,
|
||||
None,
|
||||
None,
|
||||
0,
|
||||
0,
|
||||
false,
|
||||
));
|
||||
assert_eq!(
|
||||
format!(
|
||||
"{:?}",
|
||||
address_space.insert_region(intersected_reg).err().unwrap()
|
||||
),
|
||||
format!("InvalidAddressRange({:?}, {:?})", 0x400, 0x200)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_address_space_walk_regions() {
|
||||
let reg1 = Arc::new(AddressSpaceRegion::build(
|
||||
AddressSpaceRegionType::DefaultMemory,
|
||||
GuestAddress(0x100),
|
||||
0x200,
|
||||
None,
|
||||
None,
|
||||
0,
|
||||
0,
|
||||
false,
|
||||
));
|
||||
let reg2 = Arc::new(AddressSpaceRegion::build(
|
||||
AddressSpaceRegionType::DefaultMemory,
|
||||
GuestAddress(0x300),
|
||||
0x200,
|
||||
None,
|
||||
None,
|
||||
0,
|
||||
0,
|
||||
false,
|
||||
));
|
||||
let regions = vec![reg1, reg2];
|
||||
let layout = AddressSpaceLayout::new(0x2000, 0x0, 0x1800);
|
||||
let address_space = AddressSpace::from_regions(regions, layout);
|
||||
|
||||
fn access_all_hotplug_flag(
|
||||
region: &Arc<AddressSpaceRegion>,
|
||||
) -> Result<(), AddressSpaceError> {
|
||||
region.is_hotplug();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
assert!(matches!(
|
||||
address_space.walk_regions(access_all_hotplug_flag).unwrap(),
|
||||
()
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_address_space_layout() {
|
||||
let reg = Arc::new(AddressSpaceRegion::build(
|
||||
AddressSpaceRegionType::DefaultMemory,
|
||||
GuestAddress(0x100),
|
||||
0x1000,
|
||||
None,
|
||||
None,
|
||||
0,
|
||||
0,
|
||||
false,
|
||||
));
|
||||
let regions = vec![reg];
|
||||
let layout = AddressSpaceLayout::new(0x2000, 0x0, 0x1800);
|
||||
let address_space = AddressSpace::from_regions(regions, layout.clone());
|
||||
|
||||
assert_eq!(layout, address_space.layout());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_address_space_last_addr() {
|
||||
let reg1 = Arc::new(AddressSpaceRegion::build(
|
||||
AddressSpaceRegionType::DefaultMemory,
|
||||
GuestAddress(0x100),
|
||||
0x200,
|
||||
None,
|
||||
None,
|
||||
0,
|
||||
0,
|
||||
false,
|
||||
));
|
||||
let reg2 = Arc::new(AddressSpaceRegion::build(
|
||||
AddressSpaceRegionType::DefaultMemory,
|
||||
GuestAddress(0x300),
|
||||
0x200,
|
||||
None,
|
||||
None,
|
||||
0,
|
||||
0,
|
||||
false,
|
||||
));
|
||||
let regions = vec![reg1, reg2];
|
||||
let layout = AddressSpaceLayout::new(0x2000, 0x0, 0x1800);
|
||||
let address_space = AddressSpace::from_regions(regions, layout);
|
||||
|
||||
assert_eq!(address_space.last_addr(), GuestAddress(0x500 - 1));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_address_space_is_dax_region() {
|
||||
let page_size = 4096;
|
||||
let address_space_region = vec![
|
||||
Arc::new(AddressSpaceRegion::new(
|
||||
AddressSpaceRegionType::DefaultMemory,
|
||||
GuestAddress(page_size),
|
||||
page_size as GuestUsize,
|
||||
)),
|
||||
Arc::new(AddressSpaceRegion::new(
|
||||
AddressSpaceRegionType::DefaultMemory,
|
||||
GuestAddress(page_size * 2),
|
||||
page_size as GuestUsize,
|
||||
)),
|
||||
Arc::new(AddressSpaceRegion::new(
|
||||
AddressSpaceRegionType::DAXMemory,
|
||||
GuestAddress(GUEST_DEVICE_START),
|
||||
page_size as GuestUsize,
|
||||
)),
|
||||
];
|
||||
let layout = AddressSpaceLayout::new(GUEST_PHYS_END, GUEST_MEM_START, GUEST_MEM_END);
|
||||
let address_space = AddressSpace::from_regions(address_space_region, layout);
|
||||
|
||||
assert!(!address_space.is_dax_region(GuestAddress(page_size)));
|
||||
assert!(!address_space.is_dax_region(GuestAddress(page_size * 2)));
|
||||
assert!(address_space.is_dax_region(GuestAddress(GUEST_DEVICE_START)));
|
||||
assert!(address_space.is_dax_region(GuestAddress(GUEST_DEVICE_START + 1)));
|
||||
assert!(!address_space.is_dax_region(GuestAddress(GUEST_DEVICE_START + page_size)));
|
||||
assert!(address_space.is_dax_region(GuestAddress(GUEST_DEVICE_START + page_size - 1)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_address_space_prot_flags() {
|
||||
let reg1 = Arc::new(AddressSpaceRegion::build(
|
||||
AddressSpaceRegionType::DefaultMemory,
|
||||
GuestAddress(0x100),
|
||||
0x200,
|
||||
Some(0),
|
||||
None,
|
||||
0,
|
||||
0,
|
||||
false,
|
||||
));
|
||||
let reg2 = Arc::new(AddressSpaceRegion::new(
|
||||
AddressSpaceRegionType::DefaultMemory,
|
||||
GuestAddress(0x300),
|
||||
0x300,
|
||||
));
|
||||
let regions = vec![reg1, reg2];
|
||||
let layout = AddressSpaceLayout::new(0x2000, 0x0, 0x1800);
|
||||
let address_space = AddressSpace::from_regions(regions, layout);
|
||||
|
||||
// Normal case, reg1.
|
||||
assert_eq!(address_space.prot_flags(GuestAddress(0x200)).unwrap(), 0);
|
||||
// Normal case, reg2.
|
||||
assert_eq!(
|
||||
address_space.prot_flags(GuestAddress(0x500)).unwrap(),
|
||||
libc::PROT_READ | libc::PROT_WRITE
|
||||
);
|
||||
// Inquire gpa where no region is set.
|
||||
assert!(matches!(
|
||||
address_space.prot_flags(GuestAddress(0x600)),
|
||||
Err(AddressSpaceError::InvalidRegionType)
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_address_space_numa_node_id() {
|
||||
let reg1 = Arc::new(AddressSpaceRegion::build(
|
||||
AddressSpaceRegionType::DefaultMemory,
|
||||
GuestAddress(0x100),
|
||||
0x200,
|
||||
Some(0),
|
||||
None,
|
||||
0,
|
||||
0,
|
||||
false,
|
||||
));
|
||||
let reg2 = Arc::new(AddressSpaceRegion::build(
|
||||
AddressSpaceRegionType::DefaultMemory,
|
||||
GuestAddress(0x300),
|
||||
0x300,
|
||||
None,
|
||||
None,
|
||||
0,
|
||||
0,
|
||||
false,
|
||||
));
|
||||
let regions = vec![reg1, reg2];
|
||||
let layout = AddressSpaceLayout::new(0x2000, 0x0, 0x1800);
|
||||
let address_space = AddressSpace::from_regions(regions, layout);
|
||||
|
||||
// Normal case.
|
||||
assert_eq!(address_space.numa_node_id(0x200).unwrap(), 0);
|
||||
// Inquire region with None as its numa node id.
|
||||
assert_eq!(address_space.numa_node_id(0x400), None);
|
||||
// Inquire gpa where no region is set.
|
||||
assert_eq!(address_space.numa_node_id(0x600), None);
|
||||
}
|
||||
}
|
||||
154
src/dragonball/src/dbs_address_space/src/layout.rs
Normal file
154
src/dragonball/src/dbs_address_space/src/layout.rs
Normal file
@@ -0,0 +1,154 @@
|
||||
// Copyright (C) 2021 Alibaba Cloud. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
use lazy_static::lazy_static;
|
||||
|
||||
use crate::{AddressSpaceRegion, AddressSpaceRegionType};
|
||||
|
||||
// Max retry times for reading /proc
|
||||
const PROC_READ_RETRY: u64 = 5;
|
||||
|
||||
lazy_static! {
|
||||
/// Upper bound of host memory.
|
||||
pub static ref USABLE_END: u64 = {
|
||||
for _ in 0..PROC_READ_RETRY {
|
||||
if let Ok(buf) = std::fs::read("/proc/meminfo") {
|
||||
let content = String::from_utf8_lossy(&buf);
|
||||
for line in content.lines() {
|
||||
if line.starts_with("MemTotal:") {
|
||||
if let Some(end) = line.find(" kB") {
|
||||
if let Ok(size) = line[9..end].trim().parse::<u64>() {
|
||||
return (size << 10) - 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
panic!("Exceed max retry times. Cannot get total mem size from /proc/meminfo");
|
||||
};
|
||||
}
|
||||
|
||||
/// Address space layout configuration.
|
||||
///
|
||||
/// The layout configuration must guarantee that `mem_start` <= `mem_end` <= `phys_end`.
|
||||
/// Non-memory region should be arranged into the range [mem_end, phys_end).
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct AddressSpaceLayout {
|
||||
/// end of guest physical address
|
||||
pub phys_end: u64,
|
||||
/// start of guest memory address
|
||||
pub mem_start: u64,
|
||||
/// end of guest memory address
|
||||
pub mem_end: u64,
|
||||
/// end of usable memory address
|
||||
pub usable_end: u64,
|
||||
}
|
||||
|
||||
impl AddressSpaceLayout {
|
||||
/// Create a new instance of `AddressSpaceLayout`.
|
||||
pub fn new(phys_end: u64, mem_start: u64, mem_end: u64) -> Self {
|
||||
AddressSpaceLayout {
|
||||
phys_end,
|
||||
mem_start,
|
||||
mem_end,
|
||||
usable_end: *USABLE_END,
|
||||
}
|
||||
}
|
||||
|
||||
/// Check whether an region is valid with the constraints of the layout.
|
||||
pub fn is_region_valid(&self, region: &AddressSpaceRegion) -> bool {
|
||||
let region_end = match region.base.0.checked_add(region.size) {
|
||||
None => return false,
|
||||
Some(v) => v,
|
||||
};
|
||||
|
||||
match region.ty {
|
||||
AddressSpaceRegionType::DefaultMemory => {
|
||||
if region.base.0 < self.mem_start || region_end > self.mem_end {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
AddressSpaceRegionType::DeviceMemory | AddressSpaceRegionType::DAXMemory => {
|
||||
if region.base.0 < self.mem_end || region_end > self.phys_end {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use vm_memory::GuestAddress;
|
||||
|
||||
#[test]
|
||||
fn test_is_region_valid() {
|
||||
let layout = AddressSpaceLayout::new(0x1_0000_0000, 0x1000_0000, 0x2000_0000);
|
||||
|
||||
let region = AddressSpaceRegion::new(
|
||||
AddressSpaceRegionType::DefaultMemory,
|
||||
GuestAddress(0x0),
|
||||
0x1_0000,
|
||||
);
|
||||
assert!(!layout.is_region_valid(®ion));
|
||||
let region = AddressSpaceRegion::new(
|
||||
AddressSpaceRegionType::DefaultMemory,
|
||||
GuestAddress(0x2000_0000),
|
||||
0x1_0000,
|
||||
);
|
||||
assert!(!layout.is_region_valid(®ion));
|
||||
let region = AddressSpaceRegion::new(
|
||||
AddressSpaceRegionType::DefaultMemory,
|
||||
GuestAddress(0x1_0000),
|
||||
0x2000_0000,
|
||||
);
|
||||
assert!(!layout.is_region_valid(®ion));
|
||||
let region = AddressSpaceRegion::new(
|
||||
AddressSpaceRegionType::DefaultMemory,
|
||||
GuestAddress(u64::MAX),
|
||||
0x1_0000_0000,
|
||||
);
|
||||
assert!(!layout.is_region_valid(®ion));
|
||||
let region = AddressSpaceRegion::new(
|
||||
AddressSpaceRegionType::DefaultMemory,
|
||||
GuestAddress(0x1000_0000),
|
||||
0x1_0000,
|
||||
);
|
||||
assert!(layout.is_region_valid(®ion));
|
||||
|
||||
let region = AddressSpaceRegion::new(
|
||||
AddressSpaceRegionType::DeviceMemory,
|
||||
GuestAddress(0x1000_0000),
|
||||
0x1_0000,
|
||||
);
|
||||
assert!(!layout.is_region_valid(®ion));
|
||||
let region = AddressSpaceRegion::new(
|
||||
AddressSpaceRegionType::DeviceMemory,
|
||||
GuestAddress(0x1_0000_0000),
|
||||
0x1_0000,
|
||||
);
|
||||
assert!(!layout.is_region_valid(®ion));
|
||||
let region = AddressSpaceRegion::new(
|
||||
AddressSpaceRegionType::DeviceMemory,
|
||||
GuestAddress(0x1_0000),
|
||||
0x1_0000_0000,
|
||||
);
|
||||
assert!(!layout.is_region_valid(®ion));
|
||||
let region = AddressSpaceRegion::new(
|
||||
AddressSpaceRegionType::DeviceMemory,
|
||||
GuestAddress(u64::MAX),
|
||||
0x1_0000_0000,
|
||||
);
|
||||
assert!(!layout.is_region_valid(®ion));
|
||||
let region = AddressSpaceRegion::new(
|
||||
AddressSpaceRegionType::DeviceMemory,
|
||||
GuestAddress(0x8000_0000),
|
||||
0x1_0000,
|
||||
);
|
||||
assert!(layout.is_region_valid(®ion));
|
||||
}
|
||||
}
|
||||
87
src/dragonball/src/dbs_address_space/src/lib.rs
Normal file
87
src/dragonball/src/dbs_address_space/src/lib.rs
Normal file
@@ -0,0 +1,87 @@
|
||||
// Copyright (C) 2021 Alibaba Cloud. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
#![deny(missing_docs)]
|
||||
|
||||
//! Traits and Structs to manage guest physical address space for virtual machines.
|
||||
//!
|
||||
//! The [vm-memory](https://crates.io/crates/vm-memory) implements mechanisms to manage and access
|
||||
//! guest memory resident in guest physical address space. In addition to guest memory, there may
|
||||
//! be other type of devices resident in the same guest physical address space.
|
||||
//!
|
||||
//! The `dbs-address-space` crate provides traits and structs to manage the guest physical address
|
||||
//! space for virtual machines, and mechanisms to coordinate all the devices resident in the
|
||||
//! guest physical address space.
|
||||
|
||||
use vm_memory::GuestUsize;
|
||||
|
||||
mod address_space;
|
||||
pub use self::address_space::{AddressSpace, AddressSpaceBase};
|
||||
|
||||
mod layout;
|
||||
pub use layout::{AddressSpaceLayout, USABLE_END};
|
||||
|
||||
mod memory;
|
||||
pub use memory::{GuestMemoryHybrid, GuestMemoryManager, GuestRegionHybrid, GuestRegionRaw};
|
||||
|
||||
mod numa;
|
||||
pub use self::numa::{NumaIdTable, NumaNode, NumaNodeInfo, MPOL_MF_MOVE, MPOL_PREFERRED};
|
||||
|
||||
mod region;
|
||||
pub use region::{AddressSpaceRegion, AddressSpaceRegionType};
|
||||
|
||||
/// Errors associated with virtual machine address space management.
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum AddressSpaceError {
|
||||
/// Invalid address space region type.
|
||||
#[error("invalid address space region type")]
|
||||
InvalidRegionType,
|
||||
|
||||
/// Invalid address range.
|
||||
#[error("invalid address space region (0x{0:x}, 0x{1:x})")]
|
||||
InvalidAddressRange(u64, GuestUsize),
|
||||
|
||||
/// Invalid guest memory source type.
|
||||
#[error("invalid memory source type {0}")]
|
||||
InvalidMemorySourceType(String),
|
||||
|
||||
/// Failed to create memfd to map anonymous memory.
|
||||
#[error("can not create memfd to map anonymous memory")]
|
||||
CreateMemFd(#[source] nix::Error),
|
||||
|
||||
/// Failed to open memory file.
|
||||
#[error("can not open memory file")]
|
||||
OpenFile(#[source] std::io::Error),
|
||||
|
||||
/// Failed to create directory.
|
||||
#[error("can not create directory")]
|
||||
CreateDir(#[source] std::io::Error),
|
||||
|
||||
/// Failed to set size for memory file.
|
||||
#[error("can not set size for memory file")]
|
||||
SetFileSize(#[source] std::io::Error),
|
||||
|
||||
/// Failed to unlink memory file.
|
||||
#[error("can not unlink memory file")]
|
||||
UnlinkFile(#[source] nix::Error),
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_error_code() {
|
||||
let e = AddressSpaceError::InvalidRegionType;
|
||||
|
||||
assert_eq!(format!("{e}"), "invalid address space region type");
|
||||
assert_eq!(format!("{e:?}"), "InvalidRegionType");
|
||||
assert_eq!(
|
||||
format!(
|
||||
"{}",
|
||||
AddressSpaceError::InvalidMemorySourceType("test".to_string())
|
||||
),
|
||||
"invalid memory source type test"
|
||||
);
|
||||
}
|
||||
}
|
||||
1105
src/dragonball/src/dbs_address_space/src/memory/hybrid.rs
Normal file
1105
src/dragonball/src/dbs_address_space/src/memory/hybrid.rs
Normal file
File diff suppressed because it is too large
Load Diff
193
src/dragonball/src/dbs_address_space/src/memory/mod.rs
Normal file
193
src/dragonball/src/dbs_address_space/src/memory/mod.rs
Normal file
@@ -0,0 +1,193 @@
|
||||
// Copyright (C) 2022 Alibaba Cloud. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
//! Structs to manage guest memory for virtual machines.
|
||||
//!
|
||||
//! The `vm-memory` crate only provides traits and structs to access normal guest memory,
|
||||
//! it doesn't support special guest memory like virtio-fs/virtio-pmem DAX window etc.
|
||||
//! So this crate provides `GuestMemoryManager` over `vm-memory` to provide uniform abstraction
|
||||
//! for all guest memory.
|
||||
//!
|
||||
//! It also provides interfaces to coordinate guest memory hotplug events.
|
||||
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
use vm_memory::{GuestAddressSpace, GuestMemoryAtomic, GuestMemoryLoadGuard, GuestMemoryMmap};
|
||||
|
||||
mod raw_region;
|
||||
pub use raw_region::GuestRegionRaw;
|
||||
|
||||
mod hybrid;
|
||||
pub use hybrid::{GuestMemoryHybrid, GuestRegionHybrid};
|
||||
|
||||
/// Type of source to allocate memory for virtual machines.
|
||||
#[derive(Debug, Eq, PartialEq)]
|
||||
pub enum MemorySourceType {
|
||||
/// File on HugeTlbFs.
|
||||
FileOnHugeTlbFs,
|
||||
/// mmap() without flag `MAP_HUGETLB`.
|
||||
MmapAnonymous,
|
||||
/// mmap() with flag `MAP_HUGETLB`.
|
||||
MmapAnonymousHugeTlbFs,
|
||||
/// memfd() without flag `MFD_HUGETLB`.
|
||||
MemFdShared,
|
||||
/// memfd() with flag `MFD_HUGETLB`.
|
||||
MemFdOnHugeTlbFs,
|
||||
}
|
||||
|
||||
impl MemorySourceType {
|
||||
/// Check whether the memory source is huge page.
|
||||
pub fn is_hugepage(&self) -> bool {
|
||||
*self == Self::FileOnHugeTlbFs
|
||||
|| *self == Self::MmapAnonymousHugeTlbFs
|
||||
|| *self == Self::MemFdOnHugeTlbFs
|
||||
}
|
||||
|
||||
/// Check whether the memory source is anonymous memory.
|
||||
pub fn is_mmap_anonymous(&self) -> bool {
|
||||
*self == Self::MmapAnonymous || *self == Self::MmapAnonymousHugeTlbFs
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for MemorySourceType {
|
||||
type Err = String;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
match s {
|
||||
"hugetlbfs" => Ok(MemorySourceType::FileOnHugeTlbFs),
|
||||
"memfd" => Ok(MemorySourceType::MemFdShared),
|
||||
"shmem" => Ok(MemorySourceType::MemFdShared),
|
||||
"hugememfd" => Ok(MemorySourceType::MemFdOnHugeTlbFs),
|
||||
"hugeshmem" => Ok(MemorySourceType::MemFdOnHugeTlbFs),
|
||||
"anon" => Ok(MemorySourceType::MmapAnonymous),
|
||||
"mmap" => Ok(MemorySourceType::MmapAnonymous),
|
||||
"hugeanon" => Ok(MemorySourceType::MmapAnonymousHugeTlbFs),
|
||||
"hugemmap" => Ok(MemorySourceType::MmapAnonymousHugeTlbFs),
|
||||
_ => Err(format!("unknown memory source type {s}")),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
struct GuestMemoryHotplugManager {}
|
||||
|
||||
/// The `GuestMemoryManager` manages all guest memory for virtual machines.
|
||||
///
|
||||
/// The `GuestMemoryManager` fulfills several different responsibilities.
|
||||
/// - First, it manages different types of guest memory, such as normal guest memory, virtio-fs
|
||||
/// DAX window and virtio-pmem DAX window etc. Different clients may want to access different
|
||||
/// types of memory. So the manager maintains two GuestMemory objects, one contains all guest
|
||||
/// memory, the other contains only normal guest memory.
|
||||
/// - Second, it coordinates memory/DAX window hotplug events, so clients may register hooks
|
||||
/// to receive hotplug notifications.
|
||||
#[allow(unused)]
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct GuestMemoryManager {
|
||||
default: GuestMemoryAtomic<GuestMemoryHybrid>,
|
||||
/// GuestMemory object hosts all guest memory.
|
||||
hybrid: GuestMemoryAtomic<GuestMemoryHybrid>,
|
||||
/// GuestMemory object for vIOMMU.
|
||||
iommu: GuestMemoryAtomic<GuestMemoryHybrid>,
|
||||
/// GuestMemory object hosts normal guest memory.
|
||||
normal: GuestMemoryAtomic<GuestMemoryMmap>,
|
||||
hotplug: Arc<GuestMemoryHotplugManager>,
|
||||
}
|
||||
|
||||
impl GuestMemoryManager {
|
||||
/// Create a new instance of `GuestMemoryManager`.
|
||||
pub fn new() -> Self {
|
||||
Self::default()
|
||||
}
|
||||
|
||||
/// Get a reference to the normal `GuestMemory` object.
|
||||
pub fn get_normal_guest_memory(&self) -> &GuestMemoryAtomic<GuestMemoryMmap> {
|
||||
&self.normal
|
||||
}
|
||||
|
||||
/// Try to downcast the `GuestAddressSpace` object to a `GuestMemoryManager` object.
|
||||
pub fn to_manager<AS: GuestAddressSpace>(_m: &AS) -> Option<&Self> {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for GuestMemoryManager {
|
||||
fn default() -> Self {
|
||||
let hybrid = GuestMemoryAtomic::new(GuestMemoryHybrid::new());
|
||||
let iommu = GuestMemoryAtomic::new(GuestMemoryHybrid::new());
|
||||
let normal = GuestMemoryAtomic::new(GuestMemoryMmap::new());
|
||||
// By default, it provides to the `GuestMemoryHybrid` object containing all guest memory.
|
||||
let default = hybrid.clone();
|
||||
|
||||
GuestMemoryManager {
|
||||
default,
|
||||
hybrid,
|
||||
iommu,
|
||||
normal,
|
||||
hotplug: Arc::new(GuestMemoryHotplugManager::default()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl GuestAddressSpace for GuestMemoryManager {
|
||||
type M = GuestMemoryHybrid;
|
||||
type T = GuestMemoryLoadGuard<GuestMemoryHybrid>;
|
||||
|
||||
fn memory(&self) -> Self::T {
|
||||
self.default.memory()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_memory_source_type() {
|
||||
assert_eq!(
|
||||
MemorySourceType::from_str("hugetlbfs").unwrap(),
|
||||
MemorySourceType::FileOnHugeTlbFs
|
||||
);
|
||||
assert_eq!(
|
||||
MemorySourceType::from_str("memfd").unwrap(),
|
||||
MemorySourceType::MemFdShared
|
||||
);
|
||||
assert_eq!(
|
||||
MemorySourceType::from_str("shmem").unwrap(),
|
||||
MemorySourceType::MemFdShared
|
||||
);
|
||||
assert_eq!(
|
||||
MemorySourceType::from_str("hugememfd").unwrap(),
|
||||
MemorySourceType::MemFdOnHugeTlbFs
|
||||
);
|
||||
assert_eq!(
|
||||
MemorySourceType::from_str("hugeshmem").unwrap(),
|
||||
MemorySourceType::MemFdOnHugeTlbFs
|
||||
);
|
||||
assert_eq!(
|
||||
MemorySourceType::from_str("anon").unwrap(),
|
||||
MemorySourceType::MmapAnonymous
|
||||
);
|
||||
assert_eq!(
|
||||
MemorySourceType::from_str("mmap").unwrap(),
|
||||
MemorySourceType::MmapAnonymous
|
||||
);
|
||||
assert_eq!(
|
||||
MemorySourceType::from_str("hugeanon").unwrap(),
|
||||
MemorySourceType::MmapAnonymousHugeTlbFs
|
||||
);
|
||||
assert_eq!(
|
||||
MemorySourceType::from_str("hugemmap").unwrap(),
|
||||
MemorySourceType::MmapAnonymousHugeTlbFs
|
||||
);
|
||||
assert!(MemorySourceType::from_str("test").is_err());
|
||||
}
|
||||
|
||||
#[ignore]
|
||||
#[test]
|
||||
fn test_to_manager() {
|
||||
let manager = GuestMemoryManager::new();
|
||||
let mgr = GuestMemoryManager::to_manager(&manager).unwrap();
|
||||
|
||||
assert_eq!(&manager as *const _, mgr as *const _);
|
||||
}
|
||||
}
|
||||
990
src/dragonball/src/dbs_address_space/src/memory/raw_region.rs
Normal file
990
src/dragonball/src/dbs_address_space/src/memory/raw_region.rs
Normal file
@@ -0,0 +1,990 @@
|
||||
// Copyright (C) 2022 Alibaba Cloud. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
use std::io::{Read, Write};
|
||||
use std::sync::atomic::Ordering;
|
||||
|
||||
use vm_memory::bitmap::{Bitmap, BS};
|
||||
use vm_memory::mmap::NewBitmap;
|
||||
use vm_memory::volatile_memory::compute_offset;
|
||||
use vm_memory::{
|
||||
guest_memory, volatile_memory, Address, AtomicAccess, Bytes, FileOffset, GuestAddress,
|
||||
GuestMemoryRegion, GuestUsize, MemoryRegionAddress, VolatileSlice,
|
||||
};
|
||||
|
||||
/// Guest memory region for virtio-fs DAX window.
|
||||
#[derive(Debug)]
|
||||
pub struct GuestRegionRaw<B = ()> {
|
||||
guest_base: GuestAddress,
|
||||
addr: *mut u8,
|
||||
size: usize,
|
||||
bitmap: B,
|
||||
}
|
||||
|
||||
impl<B: NewBitmap> GuestRegionRaw<B> {
|
||||
/// Create a `GuestRegionRaw` object from raw pointer.
|
||||
///
|
||||
/// # Safety
|
||||
/// Caller needs to ensure `addr` and `size` are valid with static lifetime.
|
||||
pub unsafe fn new(guest_base: GuestAddress, addr: *mut u8, size: usize) -> Self {
|
||||
let bitmap = B::with_len(size);
|
||||
|
||||
GuestRegionRaw {
|
||||
guest_base,
|
||||
addr,
|
||||
size,
|
||||
bitmap,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<B: Bitmap> Bytes<MemoryRegionAddress> for GuestRegionRaw<B> {
|
||||
type E = guest_memory::Error;
|
||||
|
||||
fn write(&self, buf: &[u8], addr: MemoryRegionAddress) -> guest_memory::Result<usize> {
|
||||
let maddr = addr.raw_value() as usize;
|
||||
self.as_volatile_slice()
|
||||
.unwrap()
|
||||
.write(buf, maddr)
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
fn read(&self, buf: &mut [u8], addr: MemoryRegionAddress) -> guest_memory::Result<usize> {
|
||||
let maddr = addr.raw_value() as usize;
|
||||
self.as_volatile_slice()
|
||||
.unwrap()
|
||||
.read(buf, maddr)
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
fn write_slice(&self, buf: &[u8], addr: MemoryRegionAddress) -> guest_memory::Result<()> {
|
||||
let maddr = addr.raw_value() as usize;
|
||||
self.as_volatile_slice()
|
||||
.unwrap()
|
||||
.write_slice(buf, maddr)
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
fn read_slice(&self, buf: &mut [u8], addr: MemoryRegionAddress) -> guest_memory::Result<()> {
|
||||
let maddr = addr.raw_value() as usize;
|
||||
self.as_volatile_slice()
|
||||
.unwrap()
|
||||
.read_slice(buf, maddr)
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
fn read_from<F>(
|
||||
&self,
|
||||
addr: MemoryRegionAddress,
|
||||
src: &mut F,
|
||||
count: usize,
|
||||
) -> guest_memory::Result<usize>
|
||||
where
|
||||
F: Read,
|
||||
{
|
||||
let maddr = addr.raw_value() as usize;
|
||||
self.as_volatile_slice()
|
||||
.unwrap()
|
||||
.read_from::<F>(maddr, src, count)
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
fn read_exact_from<F>(
|
||||
&self,
|
||||
addr: MemoryRegionAddress,
|
||||
src: &mut F,
|
||||
count: usize,
|
||||
) -> guest_memory::Result<()>
|
||||
where
|
||||
F: Read,
|
||||
{
|
||||
let maddr = addr.raw_value() as usize;
|
||||
self.as_volatile_slice()
|
||||
.unwrap()
|
||||
.read_exact_from::<F>(maddr, src, count)
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
fn write_to<F>(
|
||||
&self,
|
||||
addr: MemoryRegionAddress,
|
||||
dst: &mut F,
|
||||
count: usize,
|
||||
) -> guest_memory::Result<usize>
|
||||
where
|
||||
F: Write,
|
||||
{
|
||||
let maddr = addr.raw_value() as usize;
|
||||
self.as_volatile_slice()
|
||||
.unwrap()
|
||||
.write_to::<F>(maddr, dst, count)
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
fn write_all_to<F>(
|
||||
&self,
|
||||
addr: MemoryRegionAddress,
|
||||
dst: &mut F,
|
||||
count: usize,
|
||||
) -> guest_memory::Result<()>
|
||||
where
|
||||
F: Write,
|
||||
{
|
||||
let maddr = addr.raw_value() as usize;
|
||||
self.as_volatile_slice()
|
||||
.unwrap()
|
||||
.write_all_to::<F>(maddr, dst, count)
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
fn store<T: AtomicAccess>(
|
||||
&self,
|
||||
val: T,
|
||||
addr: MemoryRegionAddress,
|
||||
order: Ordering,
|
||||
) -> guest_memory::Result<()> {
|
||||
self.as_volatile_slice().and_then(|s| {
|
||||
s.store(val, addr.raw_value() as usize, order)
|
||||
.map_err(Into::into)
|
||||
})
|
||||
}
|
||||
|
||||
fn load<T: AtomicAccess>(
|
||||
&self,
|
||||
addr: MemoryRegionAddress,
|
||||
order: Ordering,
|
||||
) -> guest_memory::Result<T> {
|
||||
self.as_volatile_slice()
|
||||
.and_then(|s| s.load(addr.raw_value() as usize, order).map_err(Into::into))
|
||||
}
|
||||
}
|
||||
|
||||
impl<B: Bitmap> GuestMemoryRegion for GuestRegionRaw<B> {
|
||||
type B = B;
|
||||
|
||||
fn len(&self) -> GuestUsize {
|
||||
self.size as GuestUsize
|
||||
}
|
||||
|
||||
fn start_addr(&self) -> GuestAddress {
|
||||
self.guest_base
|
||||
}
|
||||
|
||||
fn bitmap(&self) -> &Self::B {
|
||||
&self.bitmap
|
||||
}
|
||||
|
||||
fn get_host_address(&self, addr: MemoryRegionAddress) -> guest_memory::Result<*mut u8> {
|
||||
// Not sure why wrapping_offset is not unsafe. Anyway this
|
||||
// is safe because we've just range-checked addr using check_address.
|
||||
self.check_address(addr)
|
||||
.ok_or(guest_memory::Error::InvalidBackendAddress)
|
||||
.map(|addr| self.addr.wrapping_offset(addr.raw_value() as isize))
|
||||
}
|
||||
|
||||
fn file_offset(&self) -> Option<&FileOffset> {
|
||||
None
|
||||
}
|
||||
|
||||
unsafe fn as_slice(&self) -> Option<&[u8]> {
|
||||
// This is safe because we mapped the area at addr ourselves, so this slice will not
|
||||
// overflow. However, it is possible to alias.
|
||||
Some(std::slice::from_raw_parts(self.addr, self.size))
|
||||
}
|
||||
|
||||
unsafe fn as_mut_slice(&self) -> Option<&mut [u8]> {
|
||||
// This is safe because we mapped the area at addr ourselves, so this slice will not
|
||||
// overflow. However, it is possible to alias.
|
||||
Some(std::slice::from_raw_parts_mut(self.addr, self.size))
|
||||
}
|
||||
|
||||
fn get_slice(
|
||||
&self,
|
||||
offset: MemoryRegionAddress,
|
||||
count: usize,
|
||||
) -> guest_memory::Result<VolatileSlice<BS<B>>> {
|
||||
let offset = offset.raw_value() as usize;
|
||||
let end = compute_offset(offset, count)?;
|
||||
if end > self.size {
|
||||
return Err(volatile_memory::Error::OutOfBounds { addr: end }.into());
|
||||
}
|
||||
|
||||
// Safe because we checked that offset + count was within our range and we only ever hand
|
||||
// out volatile accessors.
|
||||
Ok(unsafe {
|
||||
VolatileSlice::with_bitmap(
|
||||
(self.addr as usize + offset) as *mut _,
|
||||
count,
|
||||
self.bitmap.slice_at(offset),
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(target_os = "linux")]
|
||||
fn is_hugetlbfs(&self) -> Option<bool> {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
extern crate vmm_sys_util;
|
||||
|
||||
use super::*;
|
||||
use crate::{GuestMemoryHybrid, GuestRegionHybrid};
|
||||
use std::sync::Arc;
|
||||
use vm_memory::{GuestAddressSpace, GuestMemory, VolatileMemory};
|
||||
|
||||
/*
|
||||
use crate::bitmap::tests::test_guest_memory_and_region;
|
||||
use crate::bitmap::AtomicBitmap;
|
||||
use crate::GuestAddressSpace;
|
||||
|
||||
use std::fs::File;
|
||||
use std::mem;
|
||||
use std::path::Path;
|
||||
use vmm_sys_util::tempfile::TempFile;
|
||||
|
||||
type GuestMemoryMmap = super::GuestMemoryMmap<()>;
|
||||
type GuestRegionMmap = super::GuestRegionMmap<()>;
|
||||
type MmapRegion = super::MmapRegion<()>;
|
||||
*/
|
||||
|
||||
#[test]
|
||||
fn test_region_raw_new() {
|
||||
let mut buf = [0u8; 1024];
|
||||
let m =
|
||||
unsafe { GuestRegionRaw::<()>::new(GuestAddress(0x10_0000), &mut buf as *mut _, 1024) };
|
||||
|
||||
assert_eq!(m.start_addr(), GuestAddress(0x10_0000));
|
||||
assert_eq!(m.len(), 1024);
|
||||
}
|
||||
|
||||
/*
|
||||
fn check_guest_memory_mmap(
|
||||
maybe_guest_mem: Result<GuestMemoryMmap, Error>,
|
||||
expected_regions_summary: &[(GuestAddress, usize)],
|
||||
) {
|
||||
assert!(maybe_guest_mem.is_ok());
|
||||
|
||||
let guest_mem = maybe_guest_mem.unwrap();
|
||||
assert_eq!(guest_mem.num_regions(), expected_regions_summary.len());
|
||||
let maybe_last_mem_reg = expected_regions_summary.last();
|
||||
if let Some((region_addr, region_size)) = maybe_last_mem_reg {
|
||||
let mut last_addr = region_addr.unchecked_add(*region_size as u64);
|
||||
if last_addr.raw_value() != 0 {
|
||||
last_addr = last_addr.unchecked_sub(1);
|
||||
}
|
||||
assert_eq!(guest_mem.last_addr(), last_addr);
|
||||
}
|
||||
for ((region_addr, region_size), mmap) in expected_regions_summary
|
||||
.iter()
|
||||
.zip(guest_mem.regions.iter())
|
||||
{
|
||||
assert_eq!(region_addr, &mmap.guest_base);
|
||||
assert_eq!(region_size, &mmap.mapping.size());
|
||||
|
||||
assert!(guest_mem.find_region(*region_addr).is_some());
|
||||
}
|
||||
}
|
||||
|
||||
fn new_guest_memory_mmap(
|
||||
regions_summary: &[(GuestAddress, usize)],
|
||||
) -> Result<GuestMemoryMmap, Error> {
|
||||
GuestMemoryMmap::from_ranges(regions_summary)
|
||||
}
|
||||
|
||||
fn new_guest_memory_mmap_from_regions(
|
||||
regions_summary: &[(GuestAddress, usize)],
|
||||
) -> Result<GuestMemoryMmap, Error> {
|
||||
GuestMemoryMmap::from_regions(
|
||||
regions_summary
|
||||
.iter()
|
||||
.map(|(region_addr, region_size)| {
|
||||
GuestRegionMmap::new(MmapRegion::new(*region_size).unwrap(), *region_addr)
|
||||
.unwrap()
|
||||
})
|
||||
.collect(),
|
||||
)
|
||||
}
|
||||
|
||||
fn new_guest_memory_mmap_from_arc_regions(
|
||||
regions_summary: &[(GuestAddress, usize)],
|
||||
) -> Result<GuestMemoryMmap, Error> {
|
||||
GuestMemoryMmap::from_arc_regions(
|
||||
regions_summary
|
||||
.iter()
|
||||
.map(|(region_addr, region_size)| {
|
||||
Arc::new(
|
||||
GuestRegionMmap::new(MmapRegion::new(*region_size).unwrap(), *region_addr)
|
||||
.unwrap(),
|
||||
)
|
||||
})
|
||||
.collect(),
|
||||
)
|
||||
}
|
||||
|
||||
fn new_guest_memory_mmap_with_files(
|
||||
regions_summary: &[(GuestAddress, usize)],
|
||||
) -> Result<GuestMemoryMmap, Error> {
|
||||
let regions: Vec<(GuestAddress, usize, Option<FileOffset>)> = regions_summary
|
||||
.iter()
|
||||
.map(|(region_addr, region_size)| {
|
||||
let f = TempFile::new().unwrap().into_file();
|
||||
f.set_len(*region_size as u64).unwrap();
|
||||
|
||||
(*region_addr, *region_size, Some(FileOffset::new(f, 0)))
|
||||
})
|
||||
.collect();
|
||||
|
||||
GuestMemoryMmap::from_ranges_with_files(®ions)
|
||||
}
|
||||
*/
|
||||
|
||||
#[test]
|
||||
fn slice_addr() {
|
||||
let mut buf = [0u8; 1024];
|
||||
let m =
|
||||
unsafe { GuestRegionRaw::<()>::new(GuestAddress(0x10_0000), &mut buf as *mut _, 1024) };
|
||||
|
||||
let s = m.get_slice(MemoryRegionAddress(2), 3).unwrap();
|
||||
assert_eq!(s.as_ptr(), &mut buf[2] as *mut _);
|
||||
}
|
||||
|
||||
/*
|
||||
#[test]
|
||||
fn test_address_in_range() {
|
||||
let f1 = TempFile::new().unwrap().into_file();
|
||||
f1.set_len(0x400).unwrap();
|
||||
let f2 = TempFile::new().unwrap().into_file();
|
||||
f2.set_len(0x400).unwrap();
|
||||
|
||||
let start_addr1 = GuestAddress(0x0);
|
||||
let start_addr2 = GuestAddress(0x800);
|
||||
let guest_mem =
|
||||
GuestMemoryMmap::from_ranges(&[(start_addr1, 0x400), (start_addr2, 0x400)]).unwrap();
|
||||
let guest_mem_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[
|
||||
(start_addr1, 0x400, Some(FileOffset::new(f1, 0))),
|
||||
(start_addr2, 0x400, Some(FileOffset::new(f2, 0))),
|
||||
])
|
||||
.unwrap();
|
||||
|
||||
let guest_mem_list = vec![guest_mem, guest_mem_backed_by_file];
|
||||
for guest_mem in guest_mem_list.iter() {
|
||||
assert!(guest_mem.address_in_range(GuestAddress(0x200)));
|
||||
assert!(!guest_mem.address_in_range(GuestAddress(0x600)));
|
||||
assert!(guest_mem.address_in_range(GuestAddress(0xa00)));
|
||||
assert!(!guest_mem.address_in_range(GuestAddress(0xc00)));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_check_address() {
|
||||
let f1 = TempFile::new().unwrap().into_file();
|
||||
f1.set_len(0x400).unwrap();
|
||||
let f2 = TempFile::new().unwrap().into_file();
|
||||
f2.set_len(0x400).unwrap();
|
||||
|
||||
let start_addr1 = GuestAddress(0x0);
|
||||
let start_addr2 = GuestAddress(0x800);
|
||||
let guest_mem =
|
||||
GuestMemoryMmap::from_ranges(&[(start_addr1, 0x400), (start_addr2, 0x400)]).unwrap();
|
||||
let guest_mem_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[
|
||||
(start_addr1, 0x400, Some(FileOffset::new(f1, 0))),
|
||||
(start_addr2, 0x400, Some(FileOffset::new(f2, 0))),
|
||||
])
|
||||
.unwrap();
|
||||
|
||||
let guest_mem_list = vec![guest_mem, guest_mem_backed_by_file];
|
||||
for guest_mem in guest_mem_list.iter() {
|
||||
assert_eq!(
|
||||
guest_mem.check_address(GuestAddress(0x200)),
|
||||
Some(GuestAddress(0x200))
|
||||
);
|
||||
assert_eq!(guest_mem.check_address(GuestAddress(0x600)), None);
|
||||
assert_eq!(
|
||||
guest_mem.check_address(GuestAddress(0xa00)),
|
||||
Some(GuestAddress(0xa00))
|
||||
);
|
||||
assert_eq!(guest_mem.check_address(GuestAddress(0xc00)), None);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_to_region_addr() {
|
||||
let f1 = TempFile::new().unwrap().into_file();
|
||||
f1.set_len(0x400).unwrap();
|
||||
let f2 = TempFile::new().unwrap().into_file();
|
||||
f2.set_len(0x400).unwrap();
|
||||
|
||||
let start_addr1 = GuestAddress(0x0);
|
||||
let start_addr2 = GuestAddress(0x800);
|
||||
let guest_mem =
|
||||
GuestMemoryMmap::from_ranges(&[(start_addr1, 0x400), (start_addr2, 0x400)]).unwrap();
|
||||
let guest_mem_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[
|
||||
(start_addr1, 0x400, Some(FileOffset::new(f1, 0))),
|
||||
(start_addr2, 0x400, Some(FileOffset::new(f2, 0))),
|
||||
])
|
||||
.unwrap();
|
||||
|
||||
let guest_mem_list = vec![guest_mem, guest_mem_backed_by_file];
|
||||
for guest_mem in guest_mem_list.iter() {
|
||||
assert!(guest_mem.to_region_addr(GuestAddress(0x600)).is_none());
|
||||
let (r0, addr0) = guest_mem.to_region_addr(GuestAddress(0x800)).unwrap();
|
||||
let (r1, addr1) = guest_mem.to_region_addr(GuestAddress(0xa00)).unwrap();
|
||||
assert!(r0.as_ptr() == r1.as_ptr());
|
||||
assert_eq!(addr0, MemoryRegionAddress(0));
|
||||
assert_eq!(addr1, MemoryRegionAddress(0x200));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_host_address() {
|
||||
let f1 = TempFile::new().unwrap().into_file();
|
||||
f1.set_len(0x400).unwrap();
|
||||
let f2 = TempFile::new().unwrap().into_file();
|
||||
f2.set_len(0x400).unwrap();
|
||||
|
||||
let start_addr1 = GuestAddress(0x0);
|
||||
let start_addr2 = GuestAddress(0x800);
|
||||
let guest_mem =
|
||||
GuestMemoryMmap::from_ranges(&[(start_addr1, 0x400), (start_addr2, 0x400)]).unwrap();
|
||||
let guest_mem_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[
|
||||
(start_addr1, 0x400, Some(FileOffset::new(f1, 0))),
|
||||
(start_addr2, 0x400, Some(FileOffset::new(f2, 0))),
|
||||
])
|
||||
.unwrap();
|
||||
|
||||
let guest_mem_list = vec![guest_mem, guest_mem_backed_by_file];
|
||||
for guest_mem in guest_mem_list.iter() {
|
||||
assert!(guest_mem.get_host_address(GuestAddress(0x600)).is_err());
|
||||
let ptr0 = guest_mem.get_host_address(GuestAddress(0x800)).unwrap();
|
||||
let ptr1 = guest_mem.get_host_address(GuestAddress(0xa00)).unwrap();
|
||||
assert_eq!(
|
||||
ptr0,
|
||||
guest_mem.find_region(GuestAddress(0x800)).unwrap().as_ptr()
|
||||
);
|
||||
assert_eq!(unsafe { ptr0.offset(0x200) }, ptr1);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_deref() {
|
||||
let f = TempFile::new().unwrap().into_file();
|
||||
f.set_len(0x400).unwrap();
|
||||
|
||||
let start_addr = GuestAddress(0x0);
|
||||
let guest_mem = GuestMemoryMmap::from_ranges(&[(start_addr, 0x400)]).unwrap();
|
||||
let guest_mem_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[(
|
||||
start_addr,
|
||||
0x400,
|
||||
Some(FileOffset::new(f, 0)),
|
||||
)])
|
||||
.unwrap();
|
||||
|
||||
let guest_mem_list = vec![guest_mem, guest_mem_backed_by_file];
|
||||
for guest_mem in guest_mem_list.iter() {
|
||||
let sample_buf = &[1, 2, 3, 4, 5];
|
||||
|
||||
assert_eq!(guest_mem.write(sample_buf, start_addr).unwrap(), 5);
|
||||
let slice = guest_mem
|
||||
.find_region(GuestAddress(0))
|
||||
.unwrap()
|
||||
.as_volatile_slice()
|
||||
.unwrap();
|
||||
|
||||
let buf = &mut [0, 0, 0, 0, 0];
|
||||
assert_eq!(slice.read(buf, 0).unwrap(), 5);
|
||||
assert_eq!(buf, sample_buf);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_read_u64() {
|
||||
let f1 = TempFile::new().unwrap().into_file();
|
||||
f1.set_len(0x1000).unwrap();
|
||||
let f2 = TempFile::new().unwrap().into_file();
|
||||
f2.set_len(0x1000).unwrap();
|
||||
|
||||
let start_addr1 = GuestAddress(0x0);
|
||||
let start_addr2 = GuestAddress(0x1000);
|
||||
let bad_addr = GuestAddress(0x2001);
|
||||
let bad_addr2 = GuestAddress(0x1ffc);
|
||||
let max_addr = GuestAddress(0x2000);
|
||||
|
||||
let gm =
|
||||
GuestMemoryMmap::from_ranges(&[(start_addr1, 0x1000), (start_addr2, 0x1000)]).unwrap();
|
||||
let gm_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[
|
||||
(start_addr1, 0x1000, Some(FileOffset::new(f1, 0))),
|
||||
(start_addr2, 0x1000, Some(FileOffset::new(f2, 0))),
|
||||
])
|
||||
.unwrap();
|
||||
|
||||
let gm_list = vec![gm, gm_backed_by_file];
|
||||
for gm in gm_list.iter() {
|
||||
let val1: u64 = 0xaa55_aa55_aa55_aa55;
|
||||
let val2: u64 = 0x55aa_55aa_55aa_55aa;
|
||||
assert_eq!(
|
||||
format!("{:?}", gm.write_obj(val1, bad_addr).err().unwrap()),
|
||||
format!("InvalidGuestAddress({:?})", bad_addr,)
|
||||
);
|
||||
assert_eq!(
|
||||
format!("{:?}", gm.write_obj(val1, bad_addr2).err().unwrap()),
|
||||
format!(
|
||||
"PartialBuffer {{ expected: {:?}, completed: {:?} }}",
|
||||
mem::size_of::<u64>(),
|
||||
max_addr.checked_offset_from(bad_addr2).unwrap()
|
||||
)
|
||||
);
|
||||
|
||||
gm.write_obj(val1, GuestAddress(0x500)).unwrap();
|
||||
gm.write_obj(val2, GuestAddress(0x1000 + 32)).unwrap();
|
||||
let num1: u64 = gm.read_obj(GuestAddress(0x500)).unwrap();
|
||||
let num2: u64 = gm.read_obj(GuestAddress(0x1000 + 32)).unwrap();
|
||||
assert_eq!(val1, num1);
|
||||
assert_eq!(val2, num2);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn write_and_read() {
|
||||
let f = TempFile::new().unwrap().into_file();
|
||||
f.set_len(0x400).unwrap();
|
||||
|
||||
let mut start_addr = GuestAddress(0x1000);
|
||||
let gm = GuestMemoryMmap::from_ranges(&[(start_addr, 0x400)]).unwrap();
|
||||
let gm_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[(
|
||||
start_addr,
|
||||
0x400,
|
||||
Some(FileOffset::new(f, 0)),
|
||||
)])
|
||||
.unwrap();
|
||||
|
||||
let gm_list = vec![gm, gm_backed_by_file];
|
||||
for gm in gm_list.iter() {
|
||||
let sample_buf = &[1, 2, 3, 4, 5];
|
||||
|
||||
assert_eq!(gm.write(sample_buf, start_addr).unwrap(), 5);
|
||||
|
||||
let buf = &mut [0u8; 5];
|
||||
assert_eq!(gm.read(buf, start_addr).unwrap(), 5);
|
||||
assert_eq!(buf, sample_buf);
|
||||
|
||||
start_addr = GuestAddress(0x13ff);
|
||||
assert_eq!(gm.write(sample_buf, start_addr).unwrap(), 1);
|
||||
assert_eq!(gm.read(buf, start_addr).unwrap(), 1);
|
||||
assert_eq!(buf[0], sample_buf[0]);
|
||||
start_addr = GuestAddress(0x1000);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn read_to_and_write_from_mem() {
|
||||
let f = TempFile::new().unwrap().into_file();
|
||||
f.set_len(0x400).unwrap();
|
||||
|
||||
let gm = GuestMemoryMmap::from_ranges(&[(GuestAddress(0x1000), 0x400)]).unwrap();
|
||||
let gm_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[(
|
||||
GuestAddress(0x1000),
|
||||
0x400,
|
||||
Some(FileOffset::new(f, 0)),
|
||||
)])
|
||||
.unwrap();
|
||||
|
||||
let gm_list = vec![gm, gm_backed_by_file];
|
||||
for gm in gm_list.iter() {
|
||||
let addr = GuestAddress(0x1010);
|
||||
let mut file = if cfg!(unix) {
|
||||
File::open(Path::new("/dev/zero")).unwrap()
|
||||
} else {
|
||||
File::open(Path::new("c:\\Windows\\system32\\ntoskrnl.exe")).unwrap()
|
||||
};
|
||||
gm.write_obj(!0u32, addr).unwrap();
|
||||
gm.read_exact_from(addr, &mut file, mem::size_of::<u32>())
|
||||
.unwrap();
|
||||
let value: u32 = gm.read_obj(addr).unwrap();
|
||||
if cfg!(unix) {
|
||||
assert_eq!(value, 0);
|
||||
} else {
|
||||
assert_eq!(value, 0x0090_5a4d);
|
||||
}
|
||||
|
||||
let mut sink = Vec::new();
|
||||
gm.write_all_to(addr, &mut sink, mem::size_of::<u32>())
|
||||
.unwrap();
|
||||
if cfg!(unix) {
|
||||
assert_eq!(sink, vec![0; mem::size_of::<u32>()]);
|
||||
} else {
|
||||
assert_eq!(sink, vec![0x4d, 0x5a, 0x90, 0x00]);
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn create_vec_with_regions() {
|
||||
let region_size = 0x400;
|
||||
let regions = vec![
|
||||
(GuestAddress(0x0), region_size),
|
||||
(GuestAddress(0x1000), region_size),
|
||||
];
|
||||
let mut iterated_regions = Vec::new();
|
||||
let gm = GuestMemoryMmap::from_ranges(®ions).unwrap();
|
||||
|
||||
for region in gm.iter() {
|
||||
assert_eq!(region.len(), region_size as GuestUsize);
|
||||
}
|
||||
|
||||
for region in gm.iter() {
|
||||
iterated_regions.push((region.start_addr(), region.len() as usize));
|
||||
}
|
||||
assert_eq!(regions, iterated_regions);
|
||||
|
||||
assert!(regions
|
||||
.iter()
|
||||
.map(|x| (x.0, x.1))
|
||||
.eq(iterated_regions.iter().copied()));
|
||||
|
||||
assert_eq!(gm.regions[0].guest_base, regions[0].0);
|
||||
assert_eq!(gm.regions[1].guest_base, regions[1].0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_memory() {
|
||||
let region_size = 0x400;
|
||||
let regions = vec![
|
||||
(GuestAddress(0x0), region_size),
|
||||
(GuestAddress(0x1000), region_size),
|
||||
];
|
||||
let mut iterated_regions = Vec::new();
|
||||
let gm = Arc::new(GuestMemoryMmap::from_ranges(®ions).unwrap());
|
||||
let mem = gm.memory();
|
||||
|
||||
for region in mem.iter() {
|
||||
assert_eq!(region.len(), region_size as GuestUsize);
|
||||
}
|
||||
|
||||
for region in mem.iter() {
|
||||
iterated_regions.push((region.start_addr(), region.len() as usize));
|
||||
}
|
||||
assert_eq!(regions, iterated_regions);
|
||||
|
||||
assert!(regions
|
||||
.iter()
|
||||
.map(|x| (x.0, x.1))
|
||||
.eq(iterated_regions.iter().copied()));
|
||||
|
||||
assert_eq!(gm.regions[0].guest_base, regions[0].0);
|
||||
assert_eq!(gm.regions[1].guest_base, regions[1].0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_access_cross_boundary() {
|
||||
let f1 = TempFile::new().unwrap().into_file();
|
||||
f1.set_len(0x1000).unwrap();
|
||||
let f2 = TempFile::new().unwrap().into_file();
|
||||
f2.set_len(0x1000).unwrap();
|
||||
|
||||
let start_addr1 = GuestAddress(0x0);
|
||||
let start_addr2 = GuestAddress(0x1000);
|
||||
let gm =
|
||||
GuestMemoryMmap::from_ranges(&[(start_addr1, 0x1000), (start_addr2, 0x1000)]).unwrap();
|
||||
let gm_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[
|
||||
(start_addr1, 0x1000, Some(FileOffset::new(f1, 0))),
|
||||
(start_addr2, 0x1000, Some(FileOffset::new(f2, 0))),
|
||||
])
|
||||
.unwrap();
|
||||
|
||||
let gm_list = vec![gm, gm_backed_by_file];
|
||||
for gm in gm_list.iter() {
|
||||
let sample_buf = &[1, 2, 3, 4, 5];
|
||||
assert_eq!(gm.write(sample_buf, GuestAddress(0xffc)).unwrap(), 5);
|
||||
let buf = &mut [0u8; 5];
|
||||
assert_eq!(gm.read(buf, GuestAddress(0xffc)).unwrap(), 5);
|
||||
assert_eq!(buf, sample_buf);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_retrieve_fd_backing_memory_region() {
|
||||
let f = TempFile::new().unwrap().into_file();
|
||||
f.set_len(0x400).unwrap();
|
||||
|
||||
let start_addr = GuestAddress(0x0);
|
||||
let gm = GuestMemoryMmap::from_ranges(&[(start_addr, 0x400)]).unwrap();
|
||||
assert!(gm.find_region(start_addr).is_some());
|
||||
let region = gm.find_region(start_addr).unwrap();
|
||||
assert!(region.file_offset().is_none());
|
||||
|
||||
let gm = GuestMemoryMmap::from_ranges_with_files(&[(
|
||||
start_addr,
|
||||
0x400,
|
||||
Some(FileOffset::new(f, 0)),
|
||||
)])
|
||||
.unwrap();
|
||||
assert!(gm.find_region(start_addr).is_some());
|
||||
let region = gm.find_region(start_addr).unwrap();
|
||||
assert!(region.file_offset().is_some());
|
||||
}
|
||||
|
||||
// Windows needs a dedicated test where it will retrieve the allocation
|
||||
// granularity to determine a proper offset (other than 0) that can be
|
||||
// used for the backing file. Refer to Microsoft docs here:
|
||||
// https://docs.microsoft.com/en-us/windows/desktop/api/memoryapi/nf-memoryapi-mapviewoffile
|
||||
#[test]
|
||||
#[cfg(unix)]
|
||||
fn test_retrieve_offset_from_fd_backing_memory_region() {
|
||||
let f = TempFile::new().unwrap().into_file();
|
||||
f.set_len(0x1400).unwrap();
|
||||
// Needs to be aligned on 4k, otherwise mmap will fail.
|
||||
let offset = 0x1000;
|
||||
|
||||
let start_addr = GuestAddress(0x0);
|
||||
let gm = GuestMemoryMmap::from_ranges(&[(start_addr, 0x400)]).unwrap();
|
||||
assert!(gm.find_region(start_addr).is_some());
|
||||
let region = gm.find_region(start_addr).unwrap();
|
||||
assert!(region.file_offset().is_none());
|
||||
|
||||
let gm = GuestMemoryMmap::from_ranges_with_files(&[(
|
||||
start_addr,
|
||||
0x400,
|
||||
Some(FileOffset::new(f, offset)),
|
||||
)])
|
||||
.unwrap();
|
||||
assert!(gm.find_region(start_addr).is_some());
|
||||
let region = gm.find_region(start_addr).unwrap();
|
||||
assert!(region.file_offset().is_some());
|
||||
assert_eq!(region.file_offset().unwrap().start(), offset);
|
||||
}
|
||||
*/
|
||||
|
||||
#[test]
|
||||
fn test_mmap_insert_region() {
|
||||
let start_addr1 = GuestAddress(0);
|
||||
let start_addr2 = GuestAddress(0x10_0000);
|
||||
|
||||
let guest_mem = GuestMemoryHybrid::<()>::new();
|
||||
let mut raw_buf = [0u8; 0x1000];
|
||||
let raw_ptr = &mut raw_buf as *mut u8;
|
||||
let reg = unsafe { GuestRegionRaw::<()>::new(start_addr1, raw_ptr, 0x1000) };
|
||||
let guest_mem = guest_mem
|
||||
.insert_region(Arc::new(GuestRegionHybrid::from_raw_region(reg)))
|
||||
.unwrap();
|
||||
let reg = unsafe { GuestRegionRaw::<()>::new(start_addr2, raw_ptr, 0x1000) };
|
||||
let gm = &guest_mem
|
||||
.insert_region(Arc::new(GuestRegionHybrid::from_raw_region(reg)))
|
||||
.unwrap();
|
||||
let mem_orig = gm.memory();
|
||||
assert_eq!(mem_orig.num_regions(), 2);
|
||||
|
||||
let reg = unsafe { GuestRegionRaw::new(GuestAddress(0x8000), raw_ptr, 0x1000) };
|
||||
let mmap = Arc::new(GuestRegionHybrid::from_raw_region(reg));
|
||||
let gm = gm.insert_region(mmap).unwrap();
|
||||
let reg = unsafe { GuestRegionRaw::new(GuestAddress(0x4000), raw_ptr, 0x1000) };
|
||||
let mmap = Arc::new(GuestRegionHybrid::from_raw_region(reg));
|
||||
let gm = gm.insert_region(mmap).unwrap();
|
||||
let reg = unsafe { GuestRegionRaw::new(GuestAddress(0xc000), raw_ptr, 0x1000) };
|
||||
let mmap = Arc::new(GuestRegionHybrid::from_raw_region(reg));
|
||||
let gm = gm.insert_region(mmap).unwrap();
|
||||
let reg = unsafe { GuestRegionRaw::new(GuestAddress(0xc000), raw_ptr, 0x1000) };
|
||||
let mmap = Arc::new(GuestRegionHybrid::from_raw_region(reg));
|
||||
gm.insert_region(mmap).unwrap_err();
|
||||
|
||||
assert_eq!(mem_orig.num_regions(), 2);
|
||||
assert_eq!(gm.num_regions(), 5);
|
||||
|
||||
assert_eq!(gm.regions[0].start_addr(), GuestAddress(0x0000));
|
||||
assert_eq!(gm.regions[1].start_addr(), GuestAddress(0x4000));
|
||||
assert_eq!(gm.regions[2].start_addr(), GuestAddress(0x8000));
|
||||
assert_eq!(gm.regions[3].start_addr(), GuestAddress(0xc000));
|
||||
assert_eq!(gm.regions[4].start_addr(), GuestAddress(0x10_0000));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_mmap_remove_region() {
|
||||
let start_addr1 = GuestAddress(0);
|
||||
let start_addr2 = GuestAddress(0x10_0000);
|
||||
|
||||
let guest_mem = GuestMemoryHybrid::<()>::new();
|
||||
let mut raw_buf = [0u8; 0x1000];
|
||||
let reg = unsafe { GuestRegionRaw::<()>::new(start_addr1, &mut raw_buf as *mut _, 0x1000) };
|
||||
let guest_mem = guest_mem
|
||||
.insert_region(Arc::new(GuestRegionHybrid::from_raw_region(reg)))
|
||||
.unwrap();
|
||||
let reg = unsafe { GuestRegionRaw::<()>::new(start_addr2, &mut raw_buf as *mut _, 0x1000) };
|
||||
let gm = &guest_mem
|
||||
.insert_region(Arc::new(GuestRegionHybrid::from_raw_region(reg)))
|
||||
.unwrap();
|
||||
let mem_orig = gm.memory();
|
||||
assert_eq!(mem_orig.num_regions(), 2);
|
||||
|
||||
gm.remove_region(GuestAddress(0), 128).unwrap_err();
|
||||
gm.remove_region(GuestAddress(0x4000), 128).unwrap_err();
|
||||
let (gm, region) = gm.remove_region(GuestAddress(0x10_0000), 0x1000).unwrap();
|
||||
|
||||
assert_eq!(mem_orig.num_regions(), 2);
|
||||
assert_eq!(gm.num_regions(), 1);
|
||||
|
||||
assert_eq!(gm.regions[0].start_addr(), GuestAddress(0x0000));
|
||||
assert_eq!(region.start_addr(), GuestAddress(0x10_0000));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_guest_memory_mmap_get_slice() {
|
||||
let start_addr1 = GuestAddress(0);
|
||||
let mut raw_buf = [0u8; 0x400];
|
||||
let region =
|
||||
unsafe { GuestRegionRaw::<()>::new(start_addr1, &mut raw_buf as *mut _, 0x400) };
|
||||
|
||||
// Normal case.
|
||||
let slice_addr = MemoryRegionAddress(0x100);
|
||||
let slice_size = 0x200;
|
||||
let slice = region.get_slice(slice_addr, slice_size).unwrap();
|
||||
assert_eq!(slice.len(), slice_size);
|
||||
|
||||
// Empty slice.
|
||||
let slice_addr = MemoryRegionAddress(0x200);
|
||||
let slice_size = 0x0;
|
||||
let slice = region.get_slice(slice_addr, slice_size).unwrap();
|
||||
assert!(slice.is_empty());
|
||||
|
||||
// Error case when slice_size is beyond the boundary.
|
||||
let slice_addr = MemoryRegionAddress(0x300);
|
||||
let slice_size = 0x200;
|
||||
assert!(region.get_slice(slice_addr, slice_size).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_guest_memory_mmap_as_volatile_slice() {
|
||||
let start_addr1 = GuestAddress(0);
|
||||
let mut raw_buf = [0u8; 0x400];
|
||||
let region =
|
||||
unsafe { GuestRegionRaw::<()>::new(start_addr1, &mut raw_buf as *mut _, 0x400) };
|
||||
let region_size = 0x400;
|
||||
|
||||
// Test slice length.
|
||||
let slice = region.as_volatile_slice().unwrap();
|
||||
assert_eq!(slice.len(), region_size);
|
||||
|
||||
// Test slice data.
|
||||
let v = 0x1234_5678u32;
|
||||
let r = slice.get_ref::<u32>(0x200).unwrap();
|
||||
r.store(v);
|
||||
assert_eq!(r.load(), v);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_guest_memory_get_slice() {
|
||||
let start_addr1 = GuestAddress(0);
|
||||
let start_addr2 = GuestAddress(0x800);
|
||||
|
||||
let guest_mem = GuestMemoryHybrid::<()>::new();
|
||||
let mut raw_buf = [0u8; 0x400];
|
||||
let reg = unsafe { GuestRegionRaw::<()>::new(start_addr1, &mut raw_buf as *mut _, 0x400) };
|
||||
let guest_mem = guest_mem
|
||||
.insert_region(Arc::new(GuestRegionHybrid::from_raw_region(reg)))
|
||||
.unwrap();
|
||||
let reg = unsafe { GuestRegionRaw::<()>::new(start_addr2, &mut raw_buf as *mut _, 0x400) };
|
||||
let guest_mem = guest_mem
|
||||
.insert_region(Arc::new(GuestRegionHybrid::from_raw_region(reg)))
|
||||
.unwrap();
|
||||
|
||||
// Normal cases.
|
||||
let slice_size = 0x200;
|
||||
let slice = guest_mem
|
||||
.get_slice(GuestAddress(0x100), slice_size)
|
||||
.unwrap();
|
||||
assert_eq!(slice.len(), slice_size);
|
||||
|
||||
let slice_size = 0x400;
|
||||
let slice = guest_mem
|
||||
.get_slice(GuestAddress(0x800), slice_size)
|
||||
.unwrap();
|
||||
assert_eq!(slice.len(), slice_size);
|
||||
|
||||
// Empty slice.
|
||||
assert!(guest_mem
|
||||
.get_slice(GuestAddress(0x900), 0)
|
||||
.unwrap()
|
||||
.is_empty());
|
||||
|
||||
// Error cases, wrong size or base address.
|
||||
assert!(guest_mem.get_slice(GuestAddress(0), 0x500).is_err());
|
||||
assert!(guest_mem.get_slice(GuestAddress(0x600), 0x100).is_err());
|
||||
assert!(guest_mem.get_slice(GuestAddress(0xc00), 0x100).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_checked_offset() {
|
||||
let start_addr1 = GuestAddress(0);
|
||||
let start_addr2 = GuestAddress(0x800);
|
||||
let start_addr3 = GuestAddress(0xc00);
|
||||
|
||||
let guest_mem = GuestMemoryHybrid::<()>::new();
|
||||
let mut raw_buf = [0u8; 0x400];
|
||||
let reg = unsafe { GuestRegionRaw::<()>::new(start_addr1, &mut raw_buf as *mut _, 0x400) };
|
||||
let guest_mem = guest_mem
|
||||
.insert_region(Arc::new(GuestRegionHybrid::from_raw_region(reg)))
|
||||
.unwrap();
|
||||
let reg = unsafe { GuestRegionRaw::<()>::new(start_addr2, &mut raw_buf as *mut _, 0x400) };
|
||||
let guest_mem = guest_mem
|
||||
.insert_region(Arc::new(GuestRegionHybrid::from_raw_region(reg)))
|
||||
.unwrap();
|
||||
let reg = unsafe { GuestRegionRaw::<()>::new(start_addr3, &mut raw_buf as *mut _, 0x400) };
|
||||
let guest_mem = guest_mem
|
||||
.insert_region(Arc::new(GuestRegionHybrid::from_raw_region(reg)))
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(
|
||||
guest_mem.checked_offset(start_addr1, 0x200),
|
||||
Some(GuestAddress(0x200))
|
||||
);
|
||||
assert_eq!(
|
||||
guest_mem.checked_offset(start_addr1, 0xa00),
|
||||
Some(GuestAddress(0xa00))
|
||||
);
|
||||
assert_eq!(
|
||||
guest_mem.checked_offset(start_addr2, 0x7ff),
|
||||
Some(GuestAddress(0xfff))
|
||||
);
|
||||
assert_eq!(guest_mem.checked_offset(start_addr2, 0xc00), None);
|
||||
assert_eq!(guest_mem.checked_offset(start_addr1, std::usize::MAX), None);
|
||||
|
||||
assert_eq!(guest_mem.checked_offset(start_addr1, 0x400), None);
|
||||
assert_eq!(
|
||||
guest_mem.checked_offset(start_addr1, 0x400 - 1),
|
||||
Some(GuestAddress(0x400 - 1))
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_check_range() {
|
||||
let start_addr1 = GuestAddress(0);
|
||||
let start_addr2 = GuestAddress(0x800);
|
||||
let start_addr3 = GuestAddress(0xc00);
|
||||
|
||||
let guest_mem = GuestMemoryHybrid::<()>::new();
|
||||
let mut raw_buf = [0u8; 0x400];
|
||||
let reg = unsafe { GuestRegionRaw::<()>::new(start_addr1, &mut raw_buf as *mut _, 0x400) };
|
||||
let guest_mem = guest_mem
|
||||
.insert_region(Arc::new(GuestRegionHybrid::from_raw_region(reg)))
|
||||
.unwrap();
|
||||
let reg = unsafe { GuestRegionRaw::<()>::new(start_addr2, &mut raw_buf as *mut _, 0x400) };
|
||||
let guest_mem = guest_mem
|
||||
.insert_region(Arc::new(GuestRegionHybrid::from_raw_region(reg)))
|
||||
.unwrap();
|
||||
let reg = unsafe { GuestRegionRaw::<()>::new(start_addr3, &mut raw_buf as *mut _, 0x400) };
|
||||
let guest_mem = guest_mem
|
||||
.insert_region(Arc::new(GuestRegionHybrid::from_raw_region(reg)))
|
||||
.unwrap();
|
||||
|
||||
assert!(guest_mem.check_range(start_addr1, 0x0));
|
||||
assert!(guest_mem.check_range(start_addr1, 0x200));
|
||||
assert!(guest_mem.check_range(start_addr1, 0x400));
|
||||
assert!(!guest_mem.check_range(start_addr1, 0xa00));
|
||||
assert!(guest_mem.check_range(start_addr2, 0x7ff));
|
||||
assert!(guest_mem.check_range(start_addr2, 0x800));
|
||||
assert!(!guest_mem.check_range(start_addr2, 0x801));
|
||||
assert!(!guest_mem.check_range(start_addr2, 0xc00));
|
||||
assert!(!guest_mem.check_range(start_addr1, usize::MAX));
|
||||
}
|
||||
}
|
||||
85
src/dragonball/src/dbs_address_space/src/numa.rs
Normal file
85
src/dragonball/src/dbs_address_space/src/numa.rs
Normal file
@@ -0,0 +1,85 @@
|
||||
// Copyright (C) 2021 Alibaba Cloud. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
//! Types for NUMA information.
|
||||
|
||||
use vm_memory::{GuestAddress, GuestUsize};
|
||||
|
||||
/// Strategy of mbind() and don't lead to OOM.
|
||||
pub const MPOL_PREFERRED: u32 = 1;
|
||||
|
||||
/// Strategy of mbind()
|
||||
pub const MPOL_MF_MOVE: u32 = 2;
|
||||
|
||||
/// Type for recording numa ids of different devices
|
||||
pub struct NumaIdTable {
|
||||
/// vectors of numa id for each memory region
|
||||
pub memory: Vec<u32>,
|
||||
/// vectors of numa id for each cpu
|
||||
pub cpu: Vec<u32>,
|
||||
}
|
||||
|
||||
/// Record numa node memory information.
|
||||
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)]
|
||||
pub struct NumaNodeInfo {
|
||||
/// Base address of the region in guest physical address space.
|
||||
pub base: GuestAddress,
|
||||
/// Size of the address region.
|
||||
pub size: GuestUsize,
|
||||
}
|
||||
|
||||
/// Record all region's info of a numa node.
|
||||
#[derive(Debug, Default, Clone, PartialEq, Eq)]
|
||||
pub struct NumaNode {
|
||||
region_infos: Vec<NumaNodeInfo>,
|
||||
vcpu_ids: Vec<u32>,
|
||||
}
|
||||
|
||||
impl NumaNode {
|
||||
/// get reference of region_infos in numa node.
|
||||
pub fn region_infos(&self) -> &Vec<NumaNodeInfo> {
|
||||
&self.region_infos
|
||||
}
|
||||
|
||||
/// get vcpu ids belonging to a numa node.
|
||||
pub fn vcpu_ids(&self) -> &Vec<u32> {
|
||||
&self.vcpu_ids
|
||||
}
|
||||
|
||||
/// add a new numa region info into this numa node.
|
||||
pub fn add_info(&mut self, info: &NumaNodeInfo) {
|
||||
self.region_infos.push(*info);
|
||||
}
|
||||
|
||||
/// add a group of vcpu ids belong to this numa node
|
||||
pub fn add_vcpu_ids(&mut self, vcpu_ids: &[u32]) {
|
||||
self.vcpu_ids.extend(vcpu_ids)
|
||||
}
|
||||
|
||||
/// create a new numa node struct
|
||||
pub fn new() -> NumaNode {
|
||||
NumaNode {
|
||||
region_infos: Vec::new(),
|
||||
vcpu_ids: Vec::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_create_numa_node() {
|
||||
let mut numa_node = NumaNode::new();
|
||||
let info = NumaNodeInfo {
|
||||
base: GuestAddress(0),
|
||||
size: 1024,
|
||||
};
|
||||
numa_node.add_info(&info);
|
||||
assert_eq!(*numa_node.region_infos(), vec![info]);
|
||||
let vcpu_ids = vec![0, 1, 2, 3];
|
||||
numa_node.add_vcpu_ids(&vcpu_ids);
|
||||
assert_eq!(*numa_node.vcpu_ids(), vcpu_ids);
|
||||
}
|
||||
}
|
||||
564
src/dragonball/src/dbs_address_space/src/region.rs
Normal file
564
src/dragonball/src/dbs_address_space/src/region.rs
Normal file
@@ -0,0 +1,564 @@
|
||||
// Copyright (C) 2021 Alibaba Cloud. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
use std::ffi::CString;
|
||||
use std::fs::{File, OpenOptions};
|
||||
use std::os::unix::io::FromRawFd;
|
||||
use std::path::Path;
|
||||
use std::str::FromStr;
|
||||
|
||||
use nix::sys::memfd;
|
||||
use vm_memory::{Address, FileOffset, GuestAddress, GuestUsize};
|
||||
|
||||
use crate::memory::MemorySourceType;
|
||||
use crate::memory::MemorySourceType::MemFdShared;
|
||||
use crate::AddressSpaceError;
|
||||
|
||||
/// Type of address space regions.
|
||||
///
|
||||
/// On physical machines, physical memory may have different properties, such as
|
||||
/// volatile vs non-volatile, read-only vs read-write, non-executable vs executable etc.
|
||||
/// On virtual machines, the concept of memory property may be extended to support better
|
||||
/// cooperation between the hypervisor and the guest kernel. Here address space region type means
|
||||
/// what the region will be used for by the guest OS, and different permissions and policies may
|
||||
/// be applied to different address space regions.
|
||||
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
|
||||
pub enum AddressSpaceRegionType {
|
||||
/// Normal memory accessible by CPUs and IO devices.
|
||||
DefaultMemory,
|
||||
/// MMIO address region for Devices.
|
||||
DeviceMemory,
|
||||
/// DAX address region for virtio-fs/virtio-pmem.
|
||||
DAXMemory,
|
||||
}
|
||||
|
||||
/// Struct to maintain configuration information about a guest address region.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct AddressSpaceRegion {
|
||||
/// Type of address space regions.
|
||||
pub ty: AddressSpaceRegionType,
|
||||
/// Base address of the region in virtual machine's physical address space.
|
||||
pub base: GuestAddress,
|
||||
/// Size of the address space region.
|
||||
pub size: GuestUsize,
|
||||
/// Host NUMA node ids assigned to this region.
|
||||
pub host_numa_node_id: Option<u32>,
|
||||
|
||||
/// File/offset tuple to back the memory allocation.
|
||||
file_offset: Option<FileOffset>,
|
||||
/// Mmap permission flags.
|
||||
perm_flags: i32,
|
||||
/// Mmap protection flags.
|
||||
prot_flags: i32,
|
||||
/// Hugepage madvise hint.
|
||||
///
|
||||
/// It needs 'advise' or 'always' policy in host shmem config.
|
||||
is_hugepage: bool,
|
||||
/// Hotplug hint.
|
||||
is_hotplug: bool,
|
||||
/// Anonymous memory hint.
|
||||
///
|
||||
/// It should be true for regions with the MADV_DONTFORK flag enabled.
|
||||
is_anon: bool,
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
impl AddressSpaceRegion {
|
||||
/// Create an address space region with default configuration.
|
||||
pub fn new(ty: AddressSpaceRegionType, base: GuestAddress, size: GuestUsize) -> Self {
|
||||
AddressSpaceRegion {
|
||||
ty,
|
||||
base,
|
||||
size,
|
||||
host_numa_node_id: None,
|
||||
file_offset: None,
|
||||
perm_flags: libc::MAP_SHARED,
|
||||
prot_flags: libc::PROT_READ | libc::PROT_WRITE,
|
||||
is_hugepage: false,
|
||||
is_hotplug: false,
|
||||
is_anon: false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create an address space region with all configurable information.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `ty` - Type of the address region
|
||||
/// * `base` - Base address in VM to map content
|
||||
/// * `size` - Length of content to map
|
||||
/// * `numa_node_id` - Optional NUMA node id to allocate memory from
|
||||
/// * `file_offset` - Optional file descriptor and offset to map content from
|
||||
/// * `perm_flags` - mmap permission flags
|
||||
/// * `prot_flags` - mmap protection flags
|
||||
/// * `is_hotplug` - Whether it's a region for hotplug.
|
||||
pub fn build(
|
||||
ty: AddressSpaceRegionType,
|
||||
base: GuestAddress,
|
||||
size: GuestUsize,
|
||||
host_numa_node_id: Option<u32>,
|
||||
file_offset: Option<FileOffset>,
|
||||
perm_flags: i32,
|
||||
prot_flags: i32,
|
||||
is_hotplug: bool,
|
||||
) -> Self {
|
||||
let mut region = Self::new(ty, base, size);
|
||||
|
||||
region.set_host_numa_node_id(host_numa_node_id);
|
||||
region.set_file_offset(file_offset);
|
||||
region.set_perm_flags(perm_flags);
|
||||
region.set_prot_flags(prot_flags);
|
||||
if is_hotplug {
|
||||
region.set_hotplug();
|
||||
}
|
||||
|
||||
region
|
||||
}
|
||||
|
||||
/// Create an address space region to map memory into the virtual machine.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `base` - Base address in VM to map content
|
||||
/// * `size` - Length of content to map
|
||||
/// * `numa_node_id` - Optional NUMA node id to allocate memory from
|
||||
/// * `mem_type` - Memory mapping from, 'shmem' or 'hugetlbfs'
|
||||
/// * `mem_file_path` - Memory file path
|
||||
/// * `mem_prealloc` - Whether to enable pre-allocation of guest memory
|
||||
/// * `is_hotplug` - Whether it's a region for hotplug.
|
||||
pub fn create_default_memory_region(
|
||||
base: GuestAddress,
|
||||
size: GuestUsize,
|
||||
numa_node_id: Option<u32>,
|
||||
mem_type: &str,
|
||||
mem_file_path: &str,
|
||||
mem_prealloc: bool,
|
||||
is_hotplug: bool,
|
||||
) -> Result<AddressSpaceRegion, AddressSpaceError> {
|
||||
Self::create_memory_region(
|
||||
base,
|
||||
size,
|
||||
numa_node_id,
|
||||
mem_type,
|
||||
mem_file_path,
|
||||
mem_prealloc,
|
||||
libc::PROT_READ | libc::PROT_WRITE,
|
||||
is_hotplug,
|
||||
)
|
||||
}
|
||||
|
||||
/// Create an address space region to map memory from memfd/hugetlbfs into the virtual machine.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `base` - Base address in VM to map content
|
||||
/// * `size` - Length of content to map
|
||||
/// * `numa_node_id` - Optional NUMA node id to allocate memory from
|
||||
/// * `mem_type` - Memory mapping from, 'shmem' or 'hugetlbfs'
|
||||
/// * `mem_file_path` - Memory file path
|
||||
/// * `mem_prealloc` - Whether to enable pre-allocation of guest memory
|
||||
/// * `is_hotplug` - Whether it's a region for hotplug.
|
||||
/// * `prot_flags` - mmap protection flags
|
||||
pub fn create_memory_region(
|
||||
base: GuestAddress,
|
||||
size: GuestUsize,
|
||||
numa_node_id: Option<u32>,
|
||||
mem_type: &str,
|
||||
mem_file_path: &str,
|
||||
mem_prealloc: bool,
|
||||
prot_flags: i32,
|
||||
is_hotplug: bool,
|
||||
) -> Result<AddressSpaceRegion, AddressSpaceError> {
|
||||
let perm_flags = if mem_prealloc {
|
||||
libc::MAP_SHARED | libc::MAP_POPULATE
|
||||
} else {
|
||||
libc::MAP_SHARED
|
||||
};
|
||||
let source_type = MemorySourceType::from_str(mem_type)
|
||||
.map_err(|_e| AddressSpaceError::InvalidMemorySourceType(mem_type.to_string()))?;
|
||||
let mut reg = match source_type {
|
||||
MemorySourceType::MemFdShared | MemorySourceType::MemFdOnHugeTlbFs => {
|
||||
let fn_str = if source_type == MemFdShared {
|
||||
CString::new("shmem").expect("CString::new('shmem') failed")
|
||||
} else {
|
||||
CString::new("hugeshmem").expect("CString::new('hugeshmem') failed")
|
||||
};
|
||||
let filename = fn_str.as_c_str();
|
||||
let fd = memfd::memfd_create(filename, memfd::MemFdCreateFlag::empty())
|
||||
.map_err(AddressSpaceError::CreateMemFd)?;
|
||||
// Safe because we have just created the fd.
|
||||
let file: File = unsafe { File::from_raw_fd(fd) };
|
||||
file.set_len(size).map_err(AddressSpaceError::SetFileSize)?;
|
||||
Self::build(
|
||||
AddressSpaceRegionType::DefaultMemory,
|
||||
base,
|
||||
size,
|
||||
numa_node_id,
|
||||
Some(FileOffset::new(file, 0)),
|
||||
perm_flags,
|
||||
prot_flags,
|
||||
is_hotplug,
|
||||
)
|
||||
}
|
||||
MemorySourceType::MmapAnonymous | MemorySourceType::MmapAnonymousHugeTlbFs => {
|
||||
let mut perm_flags = libc::MAP_PRIVATE | libc::MAP_ANONYMOUS;
|
||||
if mem_prealloc {
|
||||
perm_flags |= libc::MAP_POPULATE
|
||||
}
|
||||
Self::build(
|
||||
AddressSpaceRegionType::DefaultMemory,
|
||||
base,
|
||||
size,
|
||||
numa_node_id,
|
||||
None,
|
||||
perm_flags,
|
||||
prot_flags,
|
||||
is_hotplug,
|
||||
)
|
||||
}
|
||||
MemorySourceType::FileOnHugeTlbFs => {
|
||||
let path = Path::new(mem_file_path);
|
||||
if let Some(parent_dir) = path.parent() {
|
||||
// Ensure that the parent directory is existed for the mem file path.
|
||||
std::fs::create_dir_all(parent_dir).map_err(AddressSpaceError::CreateDir)?;
|
||||
}
|
||||
let file = OpenOptions::new()
|
||||
.read(true)
|
||||
.write(true)
|
||||
.create(true)
|
||||
.open(mem_file_path)
|
||||
.map_err(AddressSpaceError::OpenFile)?;
|
||||
nix::unistd::unlink(mem_file_path).map_err(AddressSpaceError::UnlinkFile)?;
|
||||
file.set_len(size).map_err(AddressSpaceError::SetFileSize)?;
|
||||
let file_offset = FileOffset::new(file, 0);
|
||||
Self::build(
|
||||
AddressSpaceRegionType::DefaultMemory,
|
||||
base,
|
||||
size,
|
||||
numa_node_id,
|
||||
Some(file_offset),
|
||||
perm_flags,
|
||||
prot_flags,
|
||||
is_hotplug,
|
||||
)
|
||||
}
|
||||
};
|
||||
|
||||
if source_type.is_hugepage() {
|
||||
reg.set_hugepage();
|
||||
}
|
||||
if source_type.is_mmap_anonymous() {
|
||||
reg.set_anonpage();
|
||||
}
|
||||
|
||||
Ok(reg)
|
||||
}
|
||||
|
||||
/// Create an address region for device MMIO.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `base` - Base address in VM to map content
|
||||
/// * `size` - Length of content to map
|
||||
pub fn create_device_region(
|
||||
base: GuestAddress,
|
||||
size: GuestUsize,
|
||||
) -> Result<AddressSpaceRegion, AddressSpaceError> {
|
||||
Ok(Self::build(
|
||||
AddressSpaceRegionType::DeviceMemory,
|
||||
base,
|
||||
size,
|
||||
None,
|
||||
None,
|
||||
0,
|
||||
0,
|
||||
false,
|
||||
))
|
||||
}
|
||||
|
||||
/// Get type of the address space region.
|
||||
pub fn region_type(&self) -> AddressSpaceRegionType {
|
||||
self.ty
|
||||
}
|
||||
|
||||
/// Get size of region.
|
||||
pub fn len(&self) -> GuestUsize {
|
||||
self.size
|
||||
}
|
||||
|
||||
/// Get the inclusive start physical address of the region.
|
||||
pub fn start_addr(&self) -> GuestAddress {
|
||||
self.base
|
||||
}
|
||||
|
||||
/// Get the inclusive end physical address of the region.
|
||||
pub fn last_addr(&self) -> GuestAddress {
|
||||
debug_assert!(self.size > 0 && self.base.checked_add(self.size).is_some());
|
||||
GuestAddress(self.base.raw_value() + self.size - 1)
|
||||
}
|
||||
|
||||
/// Get mmap permission flags of the address space region.
|
||||
pub fn perm_flags(&self) -> i32 {
|
||||
self.perm_flags
|
||||
}
|
||||
|
||||
/// Set mmap permission flags for the address space region.
|
||||
pub fn set_perm_flags(&mut self, perm_flags: i32) {
|
||||
self.perm_flags = perm_flags;
|
||||
}
|
||||
|
||||
/// Get mmap protection flags of the address space region.
|
||||
pub fn prot_flags(&self) -> i32 {
|
||||
self.prot_flags
|
||||
}
|
||||
|
||||
/// Set mmap protection flags for the address space region.
|
||||
pub fn set_prot_flags(&mut self, prot_flags: i32) {
|
||||
self.prot_flags = prot_flags;
|
||||
}
|
||||
|
||||
/// Get host_numa_node_id flags
|
||||
pub fn host_numa_node_id(&self) -> Option<u32> {
|
||||
self.host_numa_node_id
|
||||
}
|
||||
|
||||
/// Set associated NUMA node ID to allocate memory from for this region.
|
||||
pub fn set_host_numa_node_id(&mut self, host_numa_node_id: Option<u32>) {
|
||||
self.host_numa_node_id = host_numa_node_id;
|
||||
}
|
||||
|
||||
/// Check whether the address space region is backed by a memory file.
|
||||
pub fn has_file(&self) -> bool {
|
||||
self.file_offset.is_some()
|
||||
}
|
||||
|
||||
/// Get optional file associated with the region.
|
||||
pub fn file_offset(&self) -> Option<&FileOffset> {
|
||||
self.file_offset.as_ref()
|
||||
}
|
||||
|
||||
/// Set associated file/offset pair for the region.
|
||||
pub fn set_file_offset(&mut self, file_offset: Option<FileOffset>) {
|
||||
self.file_offset = file_offset;
|
||||
}
|
||||
|
||||
/// Set the hotplug hint.
|
||||
pub fn set_hotplug(&mut self) {
|
||||
self.is_hotplug = true
|
||||
}
|
||||
|
||||
/// Get the hotplug hint.
|
||||
pub fn is_hotplug(&self) -> bool {
|
||||
self.is_hotplug
|
||||
}
|
||||
|
||||
/// Set hugepage hint for `madvise()`, only takes effect when the memory type is `shmem`.
|
||||
pub fn set_hugepage(&mut self) {
|
||||
self.is_hugepage = true
|
||||
}
|
||||
|
||||
/// Get the hugepage hint.
|
||||
pub fn is_hugepage(&self) -> bool {
|
||||
self.is_hugepage
|
||||
}
|
||||
|
||||
/// Set the anonymous memory hint.
|
||||
pub fn set_anonpage(&mut self) {
|
||||
self.is_anon = true
|
||||
}
|
||||
|
||||
/// Get the anonymous memory hint.
|
||||
pub fn is_anonpage(&self) -> bool {
|
||||
self.is_anon
|
||||
}
|
||||
|
||||
/// Check whether the address space region is valid.
|
||||
pub fn is_valid(&self) -> bool {
|
||||
self.size > 0 && self.base.checked_add(self.size).is_some()
|
||||
}
|
||||
|
||||
/// Check whether the address space region intersects with another one.
|
||||
pub fn intersect_with(&self, other: &AddressSpaceRegion) -> bool {
|
||||
// Treat invalid address region as intersecting always
|
||||
let end1 = match self.base.checked_add(self.size) {
|
||||
Some(addr) => addr,
|
||||
None => return true,
|
||||
};
|
||||
let end2 = match other.base.checked_add(other.size) {
|
||||
Some(addr) => addr,
|
||||
None => return true,
|
||||
};
|
||||
|
||||
!(end1 <= other.base || self.base >= end2)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::io::Write;
|
||||
use vmm_sys_util::tempfile::TempFile;
|
||||
|
||||
#[test]
|
||||
fn test_address_space_region_valid() {
|
||||
let reg1 = AddressSpaceRegion::new(
|
||||
AddressSpaceRegionType::DefaultMemory,
|
||||
GuestAddress(0xFFFFFFFFFFFFF000),
|
||||
0x2000,
|
||||
);
|
||||
assert!(!reg1.is_valid());
|
||||
let reg1 = AddressSpaceRegion::new(
|
||||
AddressSpaceRegionType::DefaultMemory,
|
||||
GuestAddress(0xFFFFFFFFFFFFF000),
|
||||
0x1000,
|
||||
);
|
||||
assert!(!reg1.is_valid());
|
||||
let reg1 = AddressSpaceRegion::new(
|
||||
AddressSpaceRegionType::DeviceMemory,
|
||||
GuestAddress(0xFFFFFFFFFFFFE000),
|
||||
0x1000,
|
||||
);
|
||||
assert!(reg1.is_valid());
|
||||
assert_eq!(reg1.start_addr(), GuestAddress(0xFFFFFFFFFFFFE000));
|
||||
assert_eq!(reg1.len(), 0x1000);
|
||||
assert!(!reg1.has_file());
|
||||
assert!(reg1.file_offset().is_none());
|
||||
assert_eq!(reg1.perm_flags(), libc::MAP_SHARED);
|
||||
assert_eq!(reg1.prot_flags(), libc::PROT_READ | libc::PROT_WRITE);
|
||||
assert_eq!(reg1.region_type(), AddressSpaceRegionType::DeviceMemory);
|
||||
|
||||
let tmp_file = TempFile::new().unwrap();
|
||||
let mut f = tmp_file.into_file();
|
||||
let sample_buf = &[1, 2, 3, 4, 5];
|
||||
assert!(f.write_all(sample_buf).is_ok());
|
||||
let reg2 = AddressSpaceRegion::build(
|
||||
AddressSpaceRegionType::DefaultMemory,
|
||||
GuestAddress(0x1000),
|
||||
0x1000,
|
||||
None,
|
||||
Some(FileOffset::new(f, 0x0)),
|
||||
0x5a,
|
||||
0x5a,
|
||||
false,
|
||||
);
|
||||
assert_eq!(reg2.region_type(), AddressSpaceRegionType::DefaultMemory);
|
||||
assert!(reg2.is_valid());
|
||||
assert_eq!(reg2.start_addr(), GuestAddress(0x1000));
|
||||
assert_eq!(reg2.len(), 0x1000);
|
||||
assert!(reg2.has_file());
|
||||
assert!(reg2.file_offset().is_some());
|
||||
assert_eq!(reg2.perm_flags(), 0x5a);
|
||||
assert_eq!(reg2.prot_flags(), 0x5a);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_address_space_region_intersect() {
|
||||
let reg1 = AddressSpaceRegion::new(
|
||||
AddressSpaceRegionType::DefaultMemory,
|
||||
GuestAddress(0x1000),
|
||||
0x1000,
|
||||
);
|
||||
let reg2 = AddressSpaceRegion::new(
|
||||
AddressSpaceRegionType::DefaultMemory,
|
||||
GuestAddress(0x2000),
|
||||
0x1000,
|
||||
);
|
||||
let reg3 = AddressSpaceRegion::new(
|
||||
AddressSpaceRegionType::DefaultMemory,
|
||||
GuestAddress(0x1000),
|
||||
0x1001,
|
||||
);
|
||||
let reg4 = AddressSpaceRegion::new(
|
||||
AddressSpaceRegionType::DefaultMemory,
|
||||
GuestAddress(0x1100),
|
||||
0x100,
|
||||
);
|
||||
let reg5 = AddressSpaceRegion::new(
|
||||
AddressSpaceRegionType::DefaultMemory,
|
||||
GuestAddress(0xFFFFFFFFFFFFF000),
|
||||
0x2000,
|
||||
);
|
||||
|
||||
assert!(!reg1.intersect_with(®2));
|
||||
assert!(!reg2.intersect_with(®1));
|
||||
|
||||
// intersect with self
|
||||
assert!(reg1.intersect_with(®1));
|
||||
|
||||
// intersect with others
|
||||
assert!(reg3.intersect_with(®2));
|
||||
assert!(reg2.intersect_with(®3));
|
||||
assert!(reg1.intersect_with(®4));
|
||||
assert!(reg4.intersect_with(®1));
|
||||
assert!(reg1.intersect_with(®5));
|
||||
assert!(reg5.intersect_with(®1));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_create_device_region() {
|
||||
let reg = AddressSpaceRegion::create_device_region(GuestAddress(0x10000), 0x1000).unwrap();
|
||||
assert_eq!(reg.region_type(), AddressSpaceRegionType::DeviceMemory);
|
||||
assert_eq!(reg.start_addr(), GuestAddress(0x10000));
|
||||
assert_eq!(reg.len(), 0x1000);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_create_default_memory_region() {
|
||||
AddressSpaceRegion::create_default_memory_region(
|
||||
GuestAddress(0x100000),
|
||||
0x100000,
|
||||
None,
|
||||
"invalid",
|
||||
"invalid",
|
||||
false,
|
||||
false,
|
||||
)
|
||||
.unwrap_err();
|
||||
|
||||
let reg = AddressSpaceRegion::create_default_memory_region(
|
||||
GuestAddress(0x100000),
|
||||
0x100000,
|
||||
None,
|
||||
"shmem",
|
||||
"",
|
||||
false,
|
||||
false,
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(reg.region_type(), AddressSpaceRegionType::DefaultMemory);
|
||||
assert_eq!(reg.start_addr(), GuestAddress(0x100000));
|
||||
assert_eq!(reg.last_addr(), GuestAddress(0x1fffff));
|
||||
assert_eq!(reg.len(), 0x100000);
|
||||
assert!(reg.file_offset().is_some());
|
||||
|
||||
let reg = AddressSpaceRegion::create_default_memory_region(
|
||||
GuestAddress(0x100000),
|
||||
0x100000,
|
||||
None,
|
||||
"hugeshmem",
|
||||
"",
|
||||
true,
|
||||
false,
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(reg.region_type(), AddressSpaceRegionType::DefaultMemory);
|
||||
assert_eq!(reg.start_addr(), GuestAddress(0x100000));
|
||||
assert_eq!(reg.last_addr(), GuestAddress(0x1fffff));
|
||||
assert_eq!(reg.len(), 0x100000);
|
||||
assert!(reg.file_offset().is_some());
|
||||
|
||||
let reg = AddressSpaceRegion::create_default_memory_region(
|
||||
GuestAddress(0x100000),
|
||||
0x100000,
|
||||
None,
|
||||
"mmap",
|
||||
"",
|
||||
true,
|
||||
false,
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(reg.region_type(), AddressSpaceRegionType::DefaultMemory);
|
||||
assert_eq!(reg.start_addr(), GuestAddress(0x100000));
|
||||
assert_eq!(reg.last_addr(), GuestAddress(0x1fffff));
|
||||
assert_eq!(reg.len(), 0x100000);
|
||||
assert!(reg.file_offset().is_none());
|
||||
|
||||
// TODO: test hugetlbfs
|
||||
}
|
||||
}
|
||||
14
src/dragonball/src/dbs_allocator/Cargo.toml
Normal file
14
src/dragonball/src/dbs_allocator/Cargo.toml
Normal file
@@ -0,0 +1,14 @@
|
||||
[package]
|
||||
name = "dbs-allocator"
|
||||
version = "0.1.1"
|
||||
authors = ["Liu Jiang <gerry@linux.alibaba.com>"]
|
||||
description = "a resource allocator for virtual machine manager"
|
||||
license = "Apache-2.0"
|
||||
edition = "2018"
|
||||
homepage = "https://github.com/openanolis/dragonball-sandbox"
|
||||
repository = "https://github.com/openanolis/dragonball-sandbox"
|
||||
keywords = ["dragonball"]
|
||||
readme = "README.md"
|
||||
|
||||
[dependencies]
|
||||
thiserror = "1.0"
|
||||
1
src/dragonball/src/dbs_allocator/LICENSE
Symbolic link
1
src/dragonball/src/dbs_allocator/LICENSE
Symbolic link
@@ -0,0 +1 @@
|
||||
../../LICENSE
|
||||
106
src/dragonball/src/dbs_allocator/README.md
Normal file
106
src/dragonball/src/dbs_allocator/README.md
Normal file
@@ -0,0 +1,106 @@
|
||||
# dbs-allocator
|
||||
|
||||
## Design
|
||||
|
||||
The resource manager in the `Dragonball Sandbox` needs to manage and allocate different kinds of resource for the
|
||||
sandbox (virtual machine), such as memory-mapped I/O address space, port I/O address space, legacy IRQ numbers,
|
||||
MSI/MSI-X vectors, device instance id, etc. The `dbs-allocator` crate is designed to help the resource manager
|
||||
to track and allocate these types of resources.
|
||||
|
||||
Main components are:
|
||||
- *Constraints*: struct to declare constraints for resource allocation.
|
||||
```rust
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub struct Constraint {
|
||||
/// Size of resource to allocate.
|
||||
pub size: u64,
|
||||
/// Lower boundary for resource allocation.
|
||||
pub min: u64,
|
||||
/// Upper boundary for resource allocation.
|
||||
pub max: u64,
|
||||
/// Alignment for allocated resource.
|
||||
pub align: u64,
|
||||
/// Policy for resource allocation.
|
||||
pub policy: AllocPolicy,
|
||||
}
|
||||
```
|
||||
- `IntervalTree`: An interval tree implementation specialized for VMM resource management.
|
||||
```rust
|
||||
pub struct IntervalTree<T> {
|
||||
pub(crate) root: Option<Node<T>>,
|
||||
}
|
||||
|
||||
pub fn allocate(&mut self, constraint: &Constraint) -> Option<Range>
|
||||
pub fn free(&mut self, key: &Range) -> Option<T>
|
||||
pub fn insert(&mut self, key: Range, data: Option<T>) -> Self
|
||||
pub fn update(&mut self, key: &Range, data: T) -> Option<T>
|
||||
pub fn delete(&mut self, key: &Range) -> Option<T>
|
||||
pub fn get(&self, key: &Range) -> Option<NodeState<&T>>
|
||||
```
|
||||
|
||||
## Usage
|
||||
The concept of Interval Tree may seem complicated, but using dbs-allocator to do resource allocation and release is simple and straightforward.
|
||||
You can following these steps to allocate your VMM resource.
|
||||
```rust
|
||||
// 1. To start with, we should create an interval tree for some specific resouces and give maximum address/id range as root node. The range here could be address range, id range, etc.
|
||||
|
||||
let mut resources_pool = IntervalTree::new();
|
||||
resources_pool.insert(Range::new(MIN_RANGE, MAX_RANGE), None);
|
||||
|
||||
// 2. Next, create a constraint with the size for your resource, you could also assign the maximum, minimum and alignment for the constraint. Then we could use the constraint to allocate the resource in the range we previously decided. Interval Tree will give you the appropriate range.
|
||||
let mut constraint = Constraint::new(SIZE);
|
||||
let mut resources_range = self.resources_pool.allocate(&constraint);
|
||||
|
||||
// 3. Then we could use the resource range to let other crates like vm-pci / vm-device to create and maintain the device
|
||||
let mut device = Device::create(resources_range, ..)
|
||||
```
|
||||
|
||||
## Example
|
||||
We will show examples for allocating an unused PCI device ID from the PCI device ID pool and allocating memory address using dbs-allocator
|
||||
```rust
|
||||
use dbs_allocator::{Constraint, IntervalTree, Range};
|
||||
|
||||
// Init a dbs-allocator IntervalTree
|
||||
let mut pci_device_pool = IntervalTree::new();
|
||||
|
||||
// Init PCI device id pool with the range 0 to 255
|
||||
pci_device_pool.insert(Range::new(0x0u8, 0xffu8), None);
|
||||
|
||||
// Construct a constraint with size 1 and alignment 1 to ask for an ID.
|
||||
let mut constraint = Constraint::new(1u64).align(1u64);
|
||||
|
||||
// Get an ID from the pci_device_pool
|
||||
let mut id = pci_device_pool.allocate(&constraint).map(|e| e.min as u8);
|
||||
|
||||
// Pass the ID generated from dbs-allocator to vm-pci specified functions to create pci devices
|
||||
let mut pci_device = PciDevice::new(id as u8, ..);
|
||||
|
||||
```
|
||||
|
||||
```rust
|
||||
use dbs_allocator::{Constraint, IntervalTree, Range};
|
||||
|
||||
// Init a dbs-allocator IntervalTree
|
||||
let mut mem_pool = IntervalTree::new();
|
||||
|
||||
// Init memory address from GUEST_MEM_START to GUEST_MEM_END
|
||||
mem_pool.insert(Range::new(GUEST_MEM_START, GUEST_MEM_END), None);
|
||||
|
||||
// Construct a constraint with size, maximum addr and minimum address of memory region to ask for an memory allocation range.
|
||||
let constraint = Constraint::new(region.len())
|
||||
.min(region.start_addr().raw_value())
|
||||
.max(region.last_addr().raw_value());
|
||||
|
||||
// Get the memory allocation range from the pci_device_pool
|
||||
let mem_range = mem_pool.allocate(&constraint).unwrap();
|
||||
|
||||
// Update the mem_range in IntervalTree with memory region info
|
||||
mem_pool.update(&mem_range, region);
|
||||
|
||||
// After allocation, we can use the memory range to do mapping and other memory related work.
|
||||
...
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
This project is licensed under [Apache License](http://www.apache.org/licenses/LICENSE-2.0), Version 2.0.
|
||||
1297
src/dragonball/src/dbs_allocator/src/interval_tree.rs
Normal file
1297
src/dragonball/src/dbs_allocator/src/interval_tree.rs
Normal file
File diff suppressed because it is too large
Load Diff
164
src/dragonball/src/dbs_allocator/src/lib.rs
Normal file
164
src/dragonball/src/dbs_allocator/src/lib.rs
Normal file
@@ -0,0 +1,164 @@
|
||||
// Copyright (C) 2019, 2022 Alibaba Cloud. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
//! Data structures and algorithms to support resource allocation and management.
|
||||
//!
|
||||
//! The `dbs-allocator` crate provides data structures and algorithms to manage and allocate
|
||||
//! integer identifiable resources. The resource manager in virtual machine monitor (VMM) may
|
||||
//! manage and allocate resources for virtual machines by using:
|
||||
//! - [Constraint]: Struct to declare constraints for resource allocation.
|
||||
//! - [IntervalTree]: An interval tree implementation specialized for VMM resource management.
|
||||
|
||||
#![deny(missing_docs)]
|
||||
|
||||
pub mod interval_tree;
|
||||
pub use interval_tree::{IntervalTree, NodeState, Range};
|
||||
|
||||
/// Error codes for resource allocation operations.
|
||||
#[derive(thiserror::Error, Debug, Eq, PartialEq)]
|
||||
pub enum Error {
|
||||
/// Invalid boundary for resource allocation.
|
||||
#[error("invalid boundary constraint: min ({0}), max ({1})")]
|
||||
InvalidBoundary(u64, u64),
|
||||
}
|
||||
|
||||
/// Specialized version of [`std::result::Result`] for resource allocation operations.
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
/// Resource allocation policies.
|
||||
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
|
||||
pub enum AllocPolicy {
|
||||
/// Default resource allocation policy.
|
||||
Default,
|
||||
/// Return the first available resource matching the allocation constraints.
|
||||
FirstMatch,
|
||||
}
|
||||
|
||||
/// Struct to declare resource allocation constraints.
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub struct Constraint {
|
||||
/// Size of resource to allocate.
|
||||
pub size: u64,
|
||||
/// Lower boundary for resource allocation.
|
||||
pub min: u64,
|
||||
/// Upper boundary for resource allocation.
|
||||
pub max: u64,
|
||||
/// Alignment for allocated resource.
|
||||
pub align: u64,
|
||||
/// Policy for resource allocation.
|
||||
pub policy: AllocPolicy,
|
||||
}
|
||||
|
||||
impl Constraint {
|
||||
/// Create a new instance of [`Constraint`] with default settings.
|
||||
pub fn new<T>(size: T) -> Self
|
||||
where
|
||||
u64: From<T>,
|
||||
{
|
||||
Constraint {
|
||||
size: u64::from(size),
|
||||
min: 0,
|
||||
max: u64::MAX,
|
||||
align: 1,
|
||||
policy: AllocPolicy::Default,
|
||||
}
|
||||
}
|
||||
|
||||
/// Set the lower boundary constraint for resource allocation.
|
||||
pub fn min<T>(mut self, min: T) -> Self
|
||||
where
|
||||
u64: From<T>,
|
||||
{
|
||||
self.min = u64::from(min);
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the upper boundary constraint for resource allocation.
|
||||
pub fn max<T>(mut self, max: T) -> Self
|
||||
where
|
||||
u64: From<T>,
|
||||
{
|
||||
self.max = u64::from(max);
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the alignment constraint for allocated resource.
|
||||
pub fn align<T>(mut self, align: T) -> Self
|
||||
where
|
||||
u64: From<T>,
|
||||
{
|
||||
self.align = u64::from(align);
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the resource allocation policy.
|
||||
pub fn policy(mut self, policy: AllocPolicy) -> Self {
|
||||
self.policy = policy;
|
||||
self
|
||||
}
|
||||
|
||||
/// Validate the resource allocation constraints.
|
||||
pub fn validate(&self) -> Result<()> {
|
||||
if self.max < self.min {
|
||||
return Err(Error::InvalidBoundary(self.min, self.max));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
#[test]
|
||||
fn test_set_min() {
|
||||
let constraint = Constraint::new(2_u64).min(1_u64);
|
||||
assert_eq!(constraint.min, 1_u64);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_set_max() {
|
||||
let constraint = Constraint::new(2_u64).max(100_u64);
|
||||
assert_eq!(constraint.max, 100_u64);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_set_align() {
|
||||
let constraint = Constraint::new(2_u64).align(8_u64);
|
||||
assert_eq!(constraint.align, 8_u64);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_set_policy() {
|
||||
let mut constraint = Constraint::new(2_u64).policy(AllocPolicy::FirstMatch);
|
||||
assert_eq!(constraint.policy, AllocPolicy::FirstMatch);
|
||||
constraint = constraint.policy(AllocPolicy::Default);
|
||||
assert_eq!(constraint.policy, AllocPolicy::Default);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_consistently_change_constraint() {
|
||||
let constraint = Constraint::new(2_u64)
|
||||
.min(1_u64)
|
||||
.max(100_u64)
|
||||
.align(8_u64)
|
||||
.policy(AllocPolicy::FirstMatch);
|
||||
assert_eq!(constraint.min, 1_u64);
|
||||
assert_eq!(constraint.max, 100_u64);
|
||||
assert_eq!(constraint.align, 8_u64);
|
||||
assert_eq!(constraint.policy, AllocPolicy::FirstMatch);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_set_invalid_boundary() {
|
||||
// Normal case.
|
||||
let constraint = Constraint::new(2_u64).max(1000_u64).min(999_u64);
|
||||
assert!(constraint.validate().is_ok());
|
||||
|
||||
// Error case.
|
||||
let constraint = Constraint::new(2_u64).max(999_u64).min(1000_u64);
|
||||
assert_eq!(
|
||||
constraint.validate(),
|
||||
Err(Error::InvalidBoundary(1000u64, 999u64))
|
||||
);
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user