mirror of
https://github.com/kata-containers/kata-containers.git
synced 2026-03-16 01:32:26 +00:00
Compare commits
2996 Commits
stable-1.8
...
2.0.0-alph
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
43db1284e9 | ||
|
|
635c7a785a | ||
|
|
1d2f611a80 | ||
|
|
cb58075a26 | ||
|
|
506ba67106 | ||
|
|
81e11c9f7c | ||
|
|
a449786544 | ||
|
|
85256e0494 | ||
|
|
38f196b06e | ||
|
|
d36ad1e730 | ||
|
|
a3eb0c970e | ||
|
|
9b9f2b2ff0 | ||
|
|
0e0f30b152 | ||
|
|
202a877d36 | ||
|
|
48b20e75f8 | ||
|
|
4887ce059c | ||
|
|
54f12461db | ||
|
|
83891592c1 | ||
|
|
c1959f3df3 | ||
|
|
5f9d141159 | ||
|
|
e6aac8390e | ||
|
|
8f5a69373b | ||
|
|
9cdc899c76 | ||
|
|
44ed777c0f | ||
|
|
045c7ae9a3 | ||
|
|
387d3d34dc | ||
|
|
83f116b483 | ||
|
|
97909ea2b9 | ||
|
|
f53901ad43 | ||
|
|
4fdb4cffd5 | ||
|
|
6bc69760c0 | ||
|
|
f7b941b6bf | ||
|
|
555f620653 | ||
|
|
08e3d259ca | ||
|
|
478dfa4b52 | ||
|
|
1d6e7ac405 | ||
|
|
010b7a9fba | ||
|
|
663c7e7921 | ||
|
|
84e389d9d1 | ||
|
|
6d129546e3 | ||
|
|
cb6b0e1370 | ||
|
|
629cc0ae8d | ||
|
|
586d26480c | ||
|
|
14a01d8eb5 | ||
|
|
e90c5d45b3 | ||
|
|
cd9e309f63 | ||
|
|
06b3f313a2 | ||
|
|
6de95bf36c | ||
|
|
11c3d81f68 | ||
|
|
21c830c7da | ||
|
|
715d342519 | ||
|
|
66fe1d2a69 | ||
|
|
241d52d4fa | ||
|
|
359b78c1c9 | ||
|
|
a5589196ce | ||
|
|
3b5768e6e0 | ||
|
|
4fceb03d96 | ||
|
|
93b72558ad | ||
|
|
426a9cab6e | ||
|
|
e540648950 | ||
|
|
98e3e99843 | ||
|
|
a02a8bda66 | ||
|
|
7468750442 | ||
|
|
3ea23a9a5d | ||
|
|
a7041c27dc | ||
|
|
3b98b259b4 | ||
|
|
c95d09a34d | ||
|
|
eb128f8558 | ||
|
|
21711eea08 | ||
|
|
b7d1e30c9f | ||
|
|
63d9a8696f | ||
|
|
33e5ab1bf6 | ||
|
|
d78ffd653d | ||
|
|
018348064e | ||
|
|
9c8b7c3197 | ||
|
|
39a039cde5 | ||
|
|
7aa3168500 | ||
|
|
e8fc25a7f4 | ||
|
|
a1378594d1 | ||
|
|
65865ab74d | ||
|
|
bf9758bf86 | ||
|
|
c29dbae5b2 | ||
|
|
8c850d9e3a | ||
|
|
07d0a4f0aa | ||
|
|
c369692924 | ||
|
|
84d2bacc74 | ||
|
|
0fe23c85c2 | ||
|
|
ab8050c5e0 | ||
|
|
6218b2a558 | ||
|
|
95ccc0f759 | ||
|
|
4c1cacd31d | ||
|
|
8e0f891ebc | ||
|
|
af24829c2a | ||
|
|
afbd03cf01 | ||
|
|
432f9bea6e | ||
|
|
02d8ec0bf8 | ||
|
|
0294fcb992 | ||
|
|
c0dc7676e0 | ||
|
|
6e398f7c71 | ||
|
|
2b92007a5c | ||
|
|
2f07ec9100 | ||
|
|
fd625b3fc5 | ||
|
|
5eec8bdf9d | ||
|
|
e4eb553d12 | ||
|
|
ba3c732f86 | ||
|
|
32431d701c | ||
|
|
6d61ab439c | ||
|
|
986e666b0b | ||
|
|
7d9bdf7b01 | ||
|
|
705713b4f9 | ||
|
|
9fd7189388 | ||
|
|
e2d346c61d | ||
|
|
c948d8a802 | ||
|
|
891b61c993 | ||
|
|
0312a60287 | ||
|
|
7cb647a78b | ||
|
|
e525003e96 | ||
|
|
9220fb8e0c | ||
|
|
2ac3090c20 | ||
|
|
39e354f609 | ||
|
|
0a1ffc1d97 | ||
|
|
669b6e32a5 | ||
|
|
2a19de8aa9 | ||
|
|
7997218ced | ||
|
|
4fe62ade7f | ||
|
|
11c998b6c7 | ||
|
|
92b2ff723c | ||
|
|
aab82f6745 | ||
|
|
e62a8aa98e | ||
|
|
2f948738e4 | ||
|
|
b6a7d8d63a | ||
|
|
5e7d253859 | ||
|
|
ebb8fd576b | ||
|
|
213f5dbaf5 | ||
|
|
1e15465012 | ||
|
|
39f6cb6862 | ||
|
|
4d2574a723 | ||
|
|
9665563145 | ||
|
|
3b53114ad1 | ||
|
|
7aff546655 | ||
|
|
078da1a6de | ||
|
|
c028329755 | ||
|
|
c1b6838e25 | ||
|
|
d0a730c6e8 | ||
|
|
d60902a95e | ||
|
|
aadf8c4a01 | ||
|
|
44e23493a2 | ||
|
|
c3bafd5793 | ||
|
|
2945bcd796 | ||
|
|
d2cae59ec7 | ||
|
|
37b91b3378 | ||
|
|
2d89766d3a | ||
|
|
2c310fecd4 | ||
|
|
434b30255e | ||
|
|
84e0ee13c8 | ||
|
|
abbdf078cd | ||
|
|
ee941e5c56 | ||
|
|
9ff44dba87 | ||
|
|
0a4e2edcf4 | ||
|
|
2c7f27ec4f | ||
|
|
e56b10f835 | ||
|
|
ded27f48d5 | ||
|
|
7df8edef1b | ||
|
|
f61eca8920 | ||
|
|
6a4e667f9c | ||
|
|
3251beaa23 | ||
|
|
c5184641dc | ||
|
|
aa0d4ee0e8 | ||
|
|
4d034b1e21 | ||
|
|
e433719f47 | ||
|
|
0329dbb5aa | ||
|
|
8280208443 | ||
|
|
7087b5f43c | ||
|
|
4f49b160c0 | ||
|
|
fe0a3a0c7c | ||
|
|
8d7817805a | ||
|
|
7c92854e5d | ||
|
|
00da1270be | ||
|
|
13390df005 | ||
|
|
9d3022a85a | ||
|
|
aaa4e5c661 | ||
|
|
73eb5c1a04 | ||
|
|
17a92aed6f | ||
|
|
65717ba6d0 | ||
|
|
fbf1d015e7 | ||
|
|
245183cb28 | ||
|
|
94298dd15c | ||
|
|
9e0807a7f5 | ||
|
|
1c063afc5f | ||
|
|
ee01fa82b5 | ||
|
|
126fa157a3 | ||
|
|
cf066b75ac | ||
|
|
3696318436 | ||
|
|
54b24dad4d | ||
|
|
511dc4a0e4 | ||
|
|
aa40eeffe6 | ||
|
|
4ce44ab067 | ||
|
|
4c28717335 | ||
|
|
397ce26948 | ||
|
|
5717da9262 | ||
|
|
400fed8a0f | ||
|
|
fc6db0b8f2 | ||
|
|
eee4d7012d | ||
|
|
913d1530fb | ||
|
|
2cd0c88574 | ||
|
|
a5436627f5 | ||
|
|
67343a178c | ||
|
|
a390a360db | ||
|
|
41aaa36e6f | ||
|
|
9cba8c4c27 | ||
|
|
ed43117554 | ||
|
|
be273aa43d | ||
|
|
2656d1da22 | ||
|
|
17b9de140f | ||
|
|
87a5d5c8d7 | ||
|
|
6cae294e83 | ||
|
|
4004bd8fbe | ||
|
|
8cffbde514 | ||
|
|
22afde1850 | ||
|
|
63c7ac5bbe | ||
|
|
cd233c047a | ||
|
|
83eef430bd | ||
|
|
204edf0e51 | ||
|
|
35c33bba47 | ||
|
|
e94cf0f135 | ||
|
|
e0a4515609 | ||
|
|
e9a46580b1 | ||
|
|
f0eaeac3be | ||
|
|
3136712d8e | ||
|
|
134175bb9b | ||
|
|
0f4eac434b | ||
|
|
762ec28a6b | ||
|
|
6f17b9cb48 | ||
|
|
17a8fb13a1 | ||
|
|
e787bb0da5 | ||
|
|
cef25917a4 | ||
|
|
f3ab6d2666 | ||
|
|
7a8e816ded | ||
|
|
7dd99c022b | ||
|
|
1ae392285e | ||
|
|
555ddf331a | ||
|
|
0e6a12ce3c | ||
|
|
e8624d89d9 | ||
|
|
cd46d09e0c | ||
|
|
c574ec0528 | ||
|
|
44f29318dd | ||
|
|
a1dcaac9ed | ||
|
|
376c42523a | ||
|
|
6bed2a724d | ||
|
|
fde6447c16 | ||
|
|
171eb70564 | ||
|
|
c6cc8b93f9 | ||
|
|
46392945c6 | ||
|
|
63fdf5328f | ||
|
|
7427fea864 | ||
|
|
18b21eb6ca | ||
|
|
7965445adf | ||
|
|
d9d4820684 | ||
|
|
e66dce1bc7 | ||
|
|
ea82922a54 | ||
|
|
03cdf6c4a9 | ||
|
|
d54723a5c4 | ||
|
|
33459779c4 | ||
|
|
20332298ab | ||
|
|
12d10eb2dc | ||
|
|
de8fe25dd5 | ||
|
|
feac6648fa | ||
|
|
0da101055e | ||
|
|
5f9a77cccc | ||
|
|
23625681d4 | ||
|
|
8a9aa41247 | ||
|
|
9d7bbdc5a6 | ||
|
|
32196ff750 | ||
|
|
9f240b241a | ||
|
|
db5cfebd09 | ||
|
|
2c0e8ff499 | ||
|
|
b74cda0243 | ||
|
|
5bf3231213 | ||
|
|
44b09670b2 | ||
|
|
c2462e7e43 | ||
|
|
c54e5caf37 | ||
|
|
83b1712fa9 | ||
|
|
1efcd038ee | ||
|
|
0f720e6f37 | ||
|
|
78bb6c0f66 | ||
|
|
ab260e4706 | ||
|
|
01bc98de57 | ||
|
|
1c1e7cc137 | ||
|
|
1ad927d4e8 | ||
|
|
a4b3c65c16 | ||
|
|
0c3b2c0972 | ||
|
|
efb975e4d0 | ||
|
|
2c3b4657f5 | ||
|
|
f8e52544bf | ||
|
|
a45cf62e75 | ||
|
|
c36c667b10 | ||
|
|
11bd456a89 | ||
|
|
9585bc929a | ||
|
|
00307a70ee | ||
|
|
4b9ab557c8 | ||
|
|
71f48a3364 | ||
|
|
dd2762fdad | ||
|
|
ea8fb96c3e | ||
|
|
768db1bdc4 | ||
|
|
6be74811dc | ||
|
|
658f77979c | ||
|
|
645dfc81f6 | ||
|
|
83561c4ce3 | ||
|
|
4d443056bf | ||
|
|
22c486aa62 | ||
|
|
005c62a871 | ||
|
|
a8dcff5b4e | ||
|
|
289d61730c | ||
|
|
de7383b2d1 | ||
|
|
d142bf73e6 | ||
|
|
5c3bcd884c | ||
|
|
e2c9426ebf | ||
|
|
11a1cf53c2 | ||
|
|
31a97031f8 | ||
|
|
40b5a56688 | ||
|
|
269daa94ef | ||
|
|
1fa10fea78 | ||
|
|
12996ca1db | ||
|
|
836e3c216d | ||
|
|
055f31716c | ||
|
|
b444393c31 | ||
|
|
1a7b735c3c | ||
|
|
afc7b4d523 | ||
|
|
7498978ca7 | ||
|
|
27d9e4334d | ||
|
|
a3e46a369f | ||
|
|
356222fbba | ||
|
|
bd7d3102c8 | ||
|
|
bb41b7248a | ||
|
|
39e2357024 | ||
|
|
4ee2f8c5b1 | ||
|
|
fa7d00ec25 | ||
|
|
a90dde04c4 | ||
|
|
b2fb86f3ff | ||
|
|
7d667a92ee | ||
|
|
3c1252ea79 | ||
|
|
96a49a894d | ||
|
|
9bf4b859a1 | ||
|
|
c373f846f5 | ||
|
|
a91cb13be8 | ||
|
|
2560e65e75 | ||
|
|
01beb2fda9 | ||
|
|
db679fb869 | ||
|
|
693ad23846 | ||
|
|
61d826e1b7 | ||
|
|
c5d79eb2c1 | ||
|
|
b169476be9 | ||
|
|
4a77b0f8ec | ||
|
|
b602e62a0e | ||
|
|
f1f9414a59 | ||
|
|
c26ce18672 | ||
|
|
09dfd79322 | ||
|
|
e416a0ec61 | ||
|
|
01a12b003b | ||
|
|
c3cf98aca6 | ||
|
|
ec13b28567 | ||
|
|
54482f18df | ||
|
|
b337428947 | ||
|
|
316b5f2b2c | ||
|
|
7526f4957d | ||
|
|
c407421a26 | ||
|
|
73a63baab5 | ||
|
|
1f957e1b87 | ||
|
|
df802cc359 | ||
|
|
b5e741ba8b | ||
|
|
174f9abee8 | ||
|
|
2be8661ffa | ||
|
|
6c7453db78 | ||
|
|
0e4c497c68 | ||
|
|
98ac62dec9 | ||
|
|
7186c01d6e | ||
|
|
0244d95edd | ||
|
|
aa62781aa7 | ||
|
|
d11696de9a | ||
|
|
d042d5c0da | ||
|
|
776da0878e | ||
|
|
f372b85848 | ||
|
|
9949daf4dc | ||
|
|
ce2795e949 | ||
|
|
8c63c18098 | ||
|
|
8057cd72c3 | ||
|
|
4126968bf9 | ||
|
|
a170d00b4c | ||
|
|
112f90b7a0 | ||
|
|
4a1dc1ee25 | ||
|
|
908a42a4af | ||
|
|
1b1e066083 | ||
|
|
7ce9c40c76 | ||
|
|
4edf5379ca | ||
|
|
c3629d37c4 | ||
|
|
3660bb426f | ||
|
|
0af481979b | ||
|
|
d34d66099f | ||
|
|
3594855094 | ||
|
|
8fbc673e68 | ||
|
|
290339da6b | ||
|
|
c4f15f1280 | ||
|
|
d2225334d9 | ||
|
|
7dfc4e0219 | ||
|
|
44b2caa2e5 | ||
|
|
9621a7f3f5 | ||
|
|
4a298cb9b7 | ||
|
|
d33b154dd7 | ||
|
|
3b6a837664 | ||
|
|
8d60612052 | ||
|
|
e0df9739bf | ||
|
|
bf50d1811c | ||
|
|
a5192a16e8 | ||
|
|
3881c06578 | ||
|
|
ed4a1954e4 | ||
|
|
8e88859ee4 | ||
|
|
01b4a64be2 | ||
|
|
b63e517f6d | ||
|
|
508101bc0f | ||
|
|
29b55ab88b | ||
|
|
633748aa76 | ||
|
|
687f2dbe84 | ||
|
|
3ea3d3201b | ||
|
|
3ed472dc8d | ||
|
|
5617120649 | ||
|
|
e5b04a5bf2 | ||
|
|
9bf0d67fdd | ||
|
|
4c35d0911a | ||
|
|
3deb24e5de | ||
|
|
f56d70ccd6 | ||
|
|
b9120b2bb1 | ||
|
|
7c7a4a3b11 | ||
|
|
652bb76dde | ||
|
|
a8717286ca | ||
|
|
dd5b446997 | ||
|
|
1296f6f1ad | ||
|
|
a2d3f9f32d | ||
|
|
2a085ee67b | ||
|
|
af5c9c2320 | ||
|
|
6a10cd960d | ||
|
|
8a439eab9d | ||
|
|
09198eed84 | ||
|
|
a198efcf1d | ||
|
|
661956f5bc | ||
|
|
3ea682d6e0 | ||
|
|
b96c7e5abe | ||
|
|
3de4bdd508 | ||
|
|
a215f87e23 | ||
|
|
39d7a144a9 | ||
|
|
68fc9abc5e | ||
|
|
af3a710921 | ||
|
|
1c11fe20ba | ||
|
|
6cd9b3b0b6 | ||
|
|
449a3a9a14 | ||
|
|
9c3151e5ed | ||
|
|
e9a852dd79 | ||
|
|
1a7539c1f5 | ||
|
|
553237884a | ||
|
|
76f9b34b59 | ||
|
|
cfda17d529 | ||
|
|
1c576659de | ||
|
|
cbd5fa008a | ||
|
|
6eae033f48 | ||
|
|
d10adfdc03 | ||
|
|
743309cdc9 | ||
|
|
e3115e344e | ||
|
|
810a528096 | ||
|
|
9ddf91d1f2 | ||
|
|
efb611aa65 | ||
|
|
d0e30ef11f | ||
|
|
9049395de8 | ||
|
|
dbbf16082d | ||
|
|
abaa45068f | ||
|
|
640fee2e2a | ||
|
|
5baacae0ff | ||
|
|
810c2c93d6 | ||
|
|
a660d802ad | ||
|
|
0afeb527ff | ||
|
|
42061f6c39 | ||
|
|
c688a1504c | ||
|
|
9f15dd2da3 | ||
|
|
f73723a23f | ||
|
|
92301a6382 | ||
|
|
ab2088f7d5 | ||
|
|
9a15457064 | ||
|
|
dc05d7dbbf | ||
|
|
0babd38de0 | ||
|
|
60102188cd | ||
|
|
a10da3efbf | ||
|
|
183622652a | ||
|
|
dcac021637 | ||
|
|
00ff99bcb6 | ||
|
|
e025ba7d08 | ||
|
|
84e4d68b13 | ||
|
|
e3ba17123a | ||
|
|
df7982b95f | ||
|
|
abbb536cc4 | ||
|
|
8f6d0ab165 | ||
|
|
f2d8d715d3 | ||
|
|
9ce2113535 | ||
|
|
38d0be3824 | ||
|
|
62cd08044d | ||
|
|
9c0872dc7e | ||
|
|
cc25216b11 | ||
|
|
d50eea66eb | ||
|
|
9b2fc09982 | ||
|
|
1c27897ba2 | ||
|
|
c384359209 | ||
|
|
154c68eb93 | ||
|
|
000bb8592d | ||
|
|
94311e4997 | ||
|
|
b1748323f0 | ||
|
|
67f203f1b8 | ||
|
|
fc9114dbdc | ||
|
|
0a5315b1c6 | ||
|
|
450a646afd | ||
|
|
e49569a286 | ||
|
|
a4adacaa10 | ||
|
|
bec46bb59b | ||
|
|
628799a42f | ||
|
|
8868eaeb4c | ||
|
|
eb6258b751 | ||
|
|
9dd3f13a92 | ||
|
|
7bcce3da63 | ||
|
|
38224e8b7b | ||
|
|
544730b4b1 | ||
|
|
d4be097b71 | ||
|
|
b6c8b9d9f4 | ||
|
|
4c051ed717 | ||
|
|
e8cc87b378 | ||
|
|
06971246ea | ||
|
|
c5a3fa76be | ||
|
|
e005c37274 | ||
|
|
29c2ff8476 | ||
|
|
383e70344f | ||
|
|
d054556f60 | ||
|
|
cc3506403b | ||
|
|
837a0ee0ae | ||
|
|
3d8ffe4120 | ||
|
|
3ef8f6cf1b | ||
|
|
6af127f7f9 | ||
|
|
c58e6f973c | ||
|
|
fd5549aa5f | ||
|
|
552e9407b8 | ||
|
|
545d61cbe8 | ||
|
|
d937c067f2 | ||
|
|
b7731e97dd | ||
|
|
43f051313e | ||
|
|
613fd0fb60 | ||
|
|
330cc72ef3 | ||
|
|
dd15db3250 | ||
|
|
35d4cac999 | ||
|
|
6727c68005 | ||
|
|
191ee63750 | ||
|
|
7b8e15f3a7 | ||
|
|
562d9fd5c2 | ||
|
|
b9158efe3a | ||
|
|
aeeb6fce73 | ||
|
|
5f29f3e293 | ||
|
|
f2e22eec4f | ||
|
|
336edf75ea | ||
|
|
7f67b9f084 | ||
|
|
fea166d8eb | ||
|
|
7938cd8965 | ||
|
|
0ac43558c8 | ||
|
|
0ff0e54769 | ||
|
|
a47a94218f | ||
|
|
c34bdd06db | ||
|
|
b0edfc75ff | ||
|
|
1abe52abd7 | ||
|
|
eae8449231 | ||
|
|
db696da98b | ||
|
|
c833ac2c53 | ||
|
|
b8b6733f62 | ||
|
|
a17ca14c7a | ||
|
|
3a4025fbf4 | ||
|
|
03478d4540 | ||
|
|
51d7c23e41 | ||
|
|
93a03369ae | ||
|
|
88205cff6d | ||
|
|
660728fb65 | ||
|
|
2331e879af | ||
|
|
1f71114291 | ||
|
|
fab759db73 | ||
|
|
36a9dc5c72 | ||
|
|
48c8d669fe | ||
|
|
df889c37e0 | ||
|
|
75392a744f | ||
|
|
1afad1c0ad | ||
|
|
66f0ec526e | ||
|
|
289b6bc587 | ||
|
|
9a4ee4f205 | ||
|
|
1e046791a3 | ||
|
|
618666ed8c | ||
|
|
164fa18858 | ||
|
|
459e732ead | ||
|
|
7943dd95b4 | ||
|
|
c0d2867a0e | ||
|
|
6b611030db | ||
|
|
45faacfe49 | ||
|
|
510f0a6687 | ||
|
|
be6110d234 | ||
|
|
ee9a53ca4b | ||
|
|
547d580ad5 | ||
|
|
784066a49d | ||
|
|
01713d59cb | ||
|
|
3886dcf646 | ||
|
|
70297c2184 | ||
|
|
a2b6afcd9a | ||
|
|
13a00a2cf2 | ||
|
|
35f54fdb8a | ||
|
|
f2bbcf4eb6 | ||
|
|
60d713e84d | ||
|
|
0c482b2557 | ||
|
|
75d149c2a6 | ||
|
|
519eff7236 | ||
|
|
7be308befe | ||
|
|
8aa2c78dd2 | ||
|
|
daae1db893 | ||
|
|
9112257c23 | ||
|
|
d011b39e96 | ||
|
|
af73bb364e | ||
|
|
615421081a | ||
|
|
78ca966e8d | ||
|
|
77b0dfb05f | ||
|
|
0def9b01de | ||
|
|
744ccd4ed2 | ||
|
|
27433d9178 | ||
|
|
dffc988d92 | ||
|
|
8c7a83b936 | ||
|
|
3fe04a2ddc | ||
|
|
4ec9dd3593 | ||
|
|
cb1849cd2c | ||
|
|
60609cacd0 | ||
|
|
31b5f96f64 | ||
|
|
f6ffb791e7 | ||
|
|
8834e3a759 | ||
|
|
d0615f8220 | ||
|
|
94b3cf2968 | ||
|
|
bcb38548f9 | ||
|
|
0bd41b9dbe | ||
|
|
50c9378659 | ||
|
|
3f1a39c442 | ||
|
|
9dce527793 | ||
|
|
089f3b4651 | ||
|
|
3e3e923aa2 | ||
|
|
69ab09273d | ||
|
|
e06a230c30 | ||
|
|
5b31282558 | ||
|
|
777cee5436 | ||
|
|
0e70b38d06 | ||
|
|
b14f5a1f89 | ||
|
|
d33c2f84a8 | ||
|
|
f55667df38 | ||
|
|
2082a9f2a2 | ||
|
|
764c26063e | ||
|
|
d045169476 | ||
|
|
783cb13f8d | ||
|
|
ae211e5bba | ||
|
|
0de5c42276 | ||
|
|
a1e359d331 | ||
|
|
9df1d0e002 | ||
|
|
254b85aec1 | ||
|
|
5f6e642bb7 | ||
|
|
0a35f504ab | ||
|
|
f2e4edc068 | ||
|
|
764ba9f83d | ||
|
|
5c96a920bd | ||
|
|
2787c545ac | ||
|
|
a19f07b017 | ||
|
|
1b05482680 | ||
|
|
ad70bc3499 | ||
|
|
e4c816bebd | ||
|
|
f8ced638d2 | ||
|
|
691a6a7ac4 | ||
|
|
7fe0100444 | ||
|
|
1bbc1d58bd | ||
|
|
569bd780f1 | ||
|
|
62f64bb009 | ||
|
|
9d50cc1ff9 | ||
|
|
da98191940 | ||
|
|
a1a2da6fcf | ||
|
|
559327c021 | ||
|
|
c51d49277e | ||
|
|
5b226d0d39 | ||
|
|
f8b84d7eba | ||
|
|
7d484dfe4c | ||
|
|
a1cd0f8f76 | ||
|
|
24d7aff60c | ||
|
|
abec17f8f2 | ||
|
|
eca7bd2705 | ||
|
|
c7b4c5eab9 | ||
|
|
729b5faf52 | ||
|
|
4f8cc73e82 | ||
|
|
04489fec2d | ||
|
|
91bd095ee4 | ||
|
|
a4b5a565ff | ||
|
|
c1060a3b9e | ||
|
|
bf8793adad | ||
|
|
f6a10bcae7 | ||
|
|
fa4acad4aa | ||
|
|
929c4e7e3d | ||
|
|
c0995c6201 | ||
|
|
86d8346d0c | ||
|
|
b84cb5e0f1 | ||
|
|
6fa3063e68 | ||
|
|
c26788c329 | ||
|
|
dd21046072 | ||
|
|
6ab89e4549 | ||
|
|
4863aa998e | ||
|
|
f62bceba99 | ||
|
|
ad1563196e | ||
|
|
238f3cec56 | ||
|
|
2b40b6b094 | ||
|
|
aa43e2a9ac | ||
|
|
23a5dc7ff8 | ||
|
|
d5a3d0a61c | ||
|
|
6ce6a262a8 | ||
|
|
93197ddcc3 | ||
|
|
80855a8ed4 | ||
|
|
c3abd51a5b | ||
|
|
b3aa770d95 | ||
|
|
2591a1fb65 | ||
|
|
ba3d4f77fe | ||
|
|
94906c40c3 | ||
|
|
8f6b0a6a41 | ||
|
|
8f70643d57 | ||
|
|
e7b9c36b90 | ||
|
|
09129c1c13 | ||
|
|
8405b56e6f | ||
|
|
5b78a8a0f8 | ||
|
|
afb91c2e02 | ||
|
|
845bf73726 | ||
|
|
30d0b7add7 | ||
|
|
46b68157d1 | ||
|
|
494272b0ac | ||
|
|
312f3e7234 | ||
|
|
7e9cc5690d | ||
|
|
07932d59ab | ||
|
|
ed7240b40f | ||
|
|
15996014db | ||
|
|
d1751a35e1 | ||
|
|
46d1957e0f | ||
|
|
d1fdf4083e | ||
|
|
9a9a8978f5 | ||
|
|
d2d029ce47 | ||
|
|
3e4b381248 | ||
|
|
9e10b341d6 | ||
|
|
e93bf967d2 | ||
|
|
c8dd92d5aa | ||
|
|
41407cfbed | ||
|
|
5f0799f1b7 | ||
|
|
cdd6f7e4d5 | ||
|
|
2d8b278c09 | ||
|
|
801a9a8fd0 | ||
|
|
36626c13c8 | ||
|
|
b4bc00951a | ||
|
|
8b843c5229 | ||
|
|
c152ebf356 | ||
|
|
bc3c07b7d5 | ||
|
|
5982e48774 | ||
|
|
05428a6424 | ||
|
|
aa6a16c597 | ||
|
|
d3f480dc4c | ||
|
|
22a3ca1c36 | ||
|
|
b1909e8ea2 | ||
|
|
84ead984d2 | ||
|
|
443e657750 | ||
|
|
3d0949d60d | ||
|
|
c33068de10 | ||
|
|
948dd3303a | ||
|
|
95c9880bca | ||
|
|
1eec032c63 | ||
|
|
1f93cffd5a | ||
|
|
8680db6071 | ||
|
|
f776e8f217 | ||
|
|
123ba13928 | ||
|
|
5ac6e9a897 | ||
|
|
74d54b0deb | ||
|
|
f2f09230ee | ||
|
|
f42dd7d115 | ||
|
|
2c4cf392f7 | ||
|
|
bb87b44b30 | ||
|
|
880bb2b7b8 | ||
|
|
2a8af23de6 | ||
|
|
67ce7283bc | ||
|
|
cdb1b5c31c | ||
|
|
46e1880c8d | ||
|
|
4134571e86 | ||
|
|
97fe749624 | ||
|
|
c81db9c3da | ||
|
|
7fa0a72f2e | ||
|
|
7965baab62 | ||
|
|
74e7d3dba7 | ||
|
|
b86ab21ce7 | ||
|
|
030211e22a | ||
|
|
82c277384b | ||
|
|
46785d86cc | ||
|
|
1fc5fa9f3c | ||
|
|
324952ce98 | ||
|
|
2ed94cbd9d | ||
|
|
7a3e7efcf2 | ||
|
|
4287ba639b | ||
|
|
fef938f81a | ||
|
|
bca37c3686 | ||
|
|
4d74fa1416 | ||
|
|
da4d89bd9a | ||
|
|
39864c37ff | ||
|
|
83b89d8773 | ||
|
|
f558073ccb | ||
|
|
2faece3583 | ||
|
|
571ede536b | ||
|
|
c8e6054843 | ||
|
|
90184f19f7 | ||
|
|
dec8013748 | ||
|
|
6daec98aaf | ||
|
|
2950b37028 | ||
|
|
0cc1a6f6ed | ||
|
|
5ff0ef9377 | ||
|
|
282d85899e | ||
|
|
d989667c1e | ||
|
|
f71a1eff62 | ||
|
|
42ef119992 | ||
|
|
48540146b6 | ||
|
|
2b785044a2 | ||
|
|
e21dc8babe | ||
|
|
b62814a6f0 | ||
|
|
e3f92fe59b | ||
|
|
a0e09df1df | ||
|
|
6b2a90a9e5 | ||
|
|
94c47dcecd | ||
|
|
9507f45a0f | ||
|
|
d51215878d | ||
|
|
07630b570a | ||
|
|
87eca1fff1 | ||
|
|
f128195249 | ||
|
|
f9cfa172ab | ||
|
|
9e16400897 | ||
|
|
611a86035b | ||
|
|
4176a7c947 | ||
|
|
284927d334 | ||
|
|
d44b9f3356 | ||
|
|
52cff50e1c | ||
|
|
fe8506740b | ||
|
|
b075b5c24e | ||
|
|
d627585dc1 | ||
|
|
7eec67044f | ||
|
|
e99739f9bd | ||
|
|
23e607314e | ||
|
|
9188774c93 | ||
|
|
ba3d3dad7e | ||
|
|
c8e5659c07 | ||
|
|
a5f1744132 | ||
|
|
8cf0f0602f | ||
|
|
9fc7246e8a | ||
|
|
3fc6f4bc55 | ||
|
|
074418f56b | ||
|
|
2fcb8bb4d8 | ||
|
|
b65063248f | ||
|
|
5a17d671a4 | ||
|
|
f45b2d9cc6 | ||
|
|
6fdbef4ff5 | ||
|
|
caac68c09f | ||
|
|
529ec25fb7 | ||
|
|
d804c3979c | ||
|
|
a1e0a4c3f5 | ||
|
|
ff8d23d75f | ||
|
|
af574851be | ||
|
|
e7c785ed19 | ||
|
|
3fb872911e | ||
|
|
712e06ae84 | ||
|
|
c91556aa41 | ||
|
|
7c4e479956 | ||
|
|
4cf0703c58 | ||
|
|
eb0a3d23d9 | ||
|
|
1b2ec4e39e | ||
|
|
5bfca6e38e | ||
|
|
346d96ce4e | ||
|
|
c54f00a7ca | ||
|
|
24fcd1b37d | ||
|
|
52e68f5fce | ||
|
|
64caa3f4d3 | ||
|
|
9a6e299827 | ||
|
|
5b749a56d8 | ||
|
|
0db6974ace | ||
|
|
a5b127b1c4 | ||
|
|
50d4188524 | ||
|
|
0926c8d9b4 | ||
|
|
0075bf85ba | ||
|
|
88e281cb14 | ||
|
|
db5097835a | ||
|
|
1935bf193c | ||
|
|
4deeb058db | ||
|
|
4a28b52553 | ||
|
|
dc38ba77bd | ||
|
|
6534357925 | ||
|
|
21698aadc1 | ||
|
|
7019ce5c9b | ||
|
|
e7457e6248 | ||
|
|
aebc49692b | ||
|
|
6c77d76f24 | ||
|
|
5b50b34df4 | ||
|
|
49184ee562 | ||
|
|
b3987e4786 | ||
|
|
de4582eda3 | ||
|
|
0bf48dca65 | ||
|
|
d90eba8593 | ||
|
|
d26ff71201 | ||
|
|
a5c7e6b934 | ||
|
|
99e04ac8cd | ||
|
|
263f64829d | ||
|
|
5e631391bf | ||
|
|
debc7d93ad | ||
|
|
9d4050e0b1 | ||
|
|
b58ab66f05 | ||
|
|
794e08e243 | ||
|
|
31ddb4d452 | ||
|
|
9ea469bcfa | ||
|
|
3fc17e96fc | ||
|
|
4cf1fa687d | ||
|
|
cfedb06a19 | ||
|
|
d9a7780514 | ||
|
|
862b077598 | ||
|
|
565f14f685 | ||
|
|
2c99b95c53 | ||
|
|
88f8216978 | ||
|
|
987fe3067e | ||
|
|
e467293a3e | ||
|
|
7412b98774 | ||
|
|
ce20d72593 | ||
|
|
495a92d2c3 | ||
|
|
604e1ab24f | ||
|
|
87af599dd0 | ||
|
|
dacd2d34b9 | ||
|
|
14474a49a2 | ||
|
|
a3eb19ca9b | ||
|
|
b9cde5bbaa | ||
|
|
df7cf77a08 | ||
|
|
48e004367c | ||
|
|
e052e57b3e | ||
|
|
355b9c003d | ||
|
|
979f064df3 | ||
|
|
0832294ba1 | ||
|
|
20587519cd | ||
|
|
a9168a3fc9 | ||
|
|
00e0aaa6e4 | ||
|
|
269d31a9ac | ||
|
|
4de74b4d67 | ||
|
|
263fb64ec6 | ||
|
|
6e1e6a2297 | ||
|
|
dd40af2557 | ||
|
|
9afe9310b1 | ||
|
|
74157c8b2b | ||
|
|
7a523c3782 | ||
|
|
b1b8ce427a | ||
|
|
50c3e56aeb | ||
|
|
0d0a84e903 | ||
|
|
73cee17420 | ||
|
|
934422fc15 | ||
|
|
317bfba4b1 | ||
|
|
a5a33436a6 | ||
|
|
1e5746a4f2 | ||
|
|
6d5aef531a | ||
|
|
2f55017fea | ||
|
|
3255640d54 | ||
|
|
7668aeb526 | ||
|
|
104c04d28f | ||
|
|
4bd3ea848d | ||
|
|
792504eee2 | ||
|
|
943136e18b | ||
|
|
e41a6b94f9 | ||
|
|
95e8a7a15c | ||
|
|
28b0ed9209 | ||
|
|
6ce5f30d6c | ||
|
|
9bfc083ef5 | ||
|
|
f3d0978c3f | ||
|
|
3bfbbd666d | ||
|
|
99cf3f80d7 | ||
|
|
7d5e48f1b5 | ||
|
|
688732adee | ||
|
|
d5d7d82eeb | ||
|
|
e02f6dc067 | ||
|
|
262484de68 | ||
|
|
67c401c059 | ||
|
|
835b6e9e1b | ||
|
|
bc4460e12f | ||
|
|
4130913ed7 | ||
|
|
c472a01006 | ||
|
|
f886c0bf35 | ||
|
|
ff5f1b4273 | ||
|
|
c7af16d363 | ||
|
|
f2e6a31dfc | ||
|
|
8ebaac02d1 | ||
|
|
37c2872f29 | ||
|
|
3063391334 | ||
|
|
c4583f4486 | ||
|
|
f2423e7d7c | ||
|
|
50e263d943 | ||
|
|
2cf4189244 | ||
|
|
cd9ab72636 | ||
|
|
f28de59210 | ||
|
|
bdbc806770 | ||
|
|
fb454f87f7 | ||
|
|
d987a30367 | ||
|
|
edb770ee63 | ||
|
|
39370c2aea | ||
|
|
e89195e70e | ||
|
|
d14968b66a | ||
|
|
7d38b84203 | ||
|
|
bc15e44245 | ||
|
|
d392b22ee4 | ||
|
|
3bd4bb66fb | ||
|
|
050f8e9715 | ||
|
|
3e4989db42 | ||
|
|
4fed346d53 | ||
|
|
78ea50c36c | ||
|
|
5e67e04666 | ||
|
|
98a69736c5 | ||
|
|
f246a799aa | ||
|
|
d9a4157841 | ||
|
|
828e0a2205 | ||
|
|
adcac9368f | ||
|
|
4d26ceee79 | ||
|
|
a4e6af0316 | ||
|
|
bdd89473fd | ||
|
|
e9bbe0b343 | ||
|
|
098501ac14 | ||
|
|
220ae83820 | ||
|
|
8b89a868e9 | ||
|
|
4968438992 | ||
|
|
fcf9f9f6dd | ||
|
|
5182a2551d | ||
|
|
cd4cc02568 | ||
|
|
da7f5f3796 | ||
|
|
33434894ba | ||
|
|
c74f9a2bfb | ||
|
|
43f2680e4c | ||
|
|
4ade7e5853 | ||
|
|
27dddf0a25 | ||
|
|
b94dafa980 | ||
|
|
f1a43ac4c6 | ||
|
|
09e316a376 | ||
|
|
db3363fbdf | ||
|
|
c92d77bf99 | ||
|
|
8f33d736e8 | ||
|
|
024a87419e | ||
|
|
963db61cb3 | ||
|
|
3a454814e3 | ||
|
|
71ce7577fb | ||
|
|
b199ae01b4 | ||
|
|
57136faa38 | ||
|
|
0c48630395 | ||
|
|
a84fcf296d | ||
|
|
5318edb8f2 | ||
|
|
829ac720cd | ||
|
|
5a5ffa4493 | ||
|
|
7dc15c28f8 | ||
|
|
a118a60efc | ||
|
|
ed693fe3df | ||
|
|
0e0e74b8bb | ||
|
|
21c8cf4f9f | ||
|
|
4d071fd406 | ||
|
|
749dd0491f | ||
|
|
d2e80f54b1 | ||
|
|
434418c27b | ||
|
|
4d526dbb6e | ||
|
|
1e9e00a529 | ||
|
|
a5b8ff42f7 | ||
|
|
6969c7fc18 | ||
|
|
b2295dbf94 | ||
|
|
add0d445e8 | ||
|
|
48fef40fd9 | ||
|
|
b3ab9cafc1 | ||
|
|
d9782606bb | ||
|
|
289df4da13 | ||
|
|
62a715a330 | ||
|
|
da2a52a3f2 | ||
|
|
dca1f529a2 | ||
|
|
b489bbd919 | ||
|
|
cc5df055bc | ||
|
|
25d75e5b1c | ||
|
|
acc9c7fe0d | ||
|
|
68f2090bab | ||
|
|
cbb8c01412 | ||
|
|
1858c4da2c | ||
|
|
af6bba1095 | ||
|
|
a1fa8d0de8 | ||
|
|
840778788c | ||
|
|
b2ead99ecc | ||
|
|
d4c88b2d78 | ||
|
|
3718af5f9f | ||
|
|
7437ce8442 | ||
|
|
1b2b6b8e02 | ||
|
|
99f6625e1a | ||
|
|
0fb4396f91 | ||
|
|
c191abb770 | ||
|
|
847914ceff | ||
|
|
450402beb5 | ||
|
|
9c48536174 | ||
|
|
07146a0934 | ||
|
|
e08f13ea31 | ||
|
|
61fff8959c | ||
|
|
7aaf61d44d | ||
|
|
9b8fca51eb | ||
|
|
3f45d5e17e | ||
|
|
e4e51673d5 | ||
|
|
7acdaa2d53 | ||
|
|
64e5e2129b | ||
|
|
ded9e71a4d | ||
|
|
a75db86027 | ||
|
|
b57d74c31f | ||
|
|
7885e753a7 | ||
|
|
6c03e2a265 | ||
|
|
8cdd5ed3c0 | ||
|
|
726720dde1 | ||
|
|
a1581e3c67 | ||
|
|
5d9c1a8c13 | ||
|
|
648825cd60 | ||
|
|
8c51e4d916 | ||
|
|
94c2c12d55 | ||
|
|
e770e2ad1b | ||
|
|
92b42c7f6d | ||
|
|
c433580160 | ||
|
|
bbe5584deb | ||
|
|
48a1caeac8 | ||
|
|
a7daa2b935 | ||
|
|
2f22a5681d | ||
|
|
b780c160da | ||
|
|
efc754f6b1 | ||
|
|
adee8b0e35 | ||
|
|
030cd4d41f | ||
|
|
6f294f43d4 | ||
|
|
3d8803bb86 | ||
|
|
7e6fcddefa | ||
|
|
919615fef7 | ||
|
|
148b6d7485 | ||
|
|
43c2796cc5 | ||
|
|
bdae2954c0 | ||
|
|
3fea550a4d | ||
|
|
1af68aae10 | ||
|
|
14534717c7 | ||
|
|
360ffdca37 | ||
|
|
bec009df8d | ||
|
|
7fdda280cb | ||
|
|
eabfd99734 | ||
|
|
a41894da18 | ||
|
|
890a3d5960 | ||
|
|
9a65c18eca | ||
|
|
b22d0a7161 | ||
|
|
722ac5aa97 | ||
|
|
a438d086b2 | ||
|
|
590ed09bfa | ||
|
|
7bf6c6754d | ||
|
|
19115ef5f1 | ||
|
|
587b59ebfc | ||
|
|
67b66c4d95 | ||
|
|
618ae4d03b | ||
|
|
7b9a9ffb79 | ||
|
|
82e51d42ab | ||
|
|
f301c957f6 | ||
|
|
d6b3bffad8 | ||
|
|
0d535f56e5 | ||
|
|
e2d894d52a | ||
|
|
19288aab46 | ||
|
|
456be67094 | ||
|
|
0d98e248ff | ||
|
|
1afb7e5685 | ||
|
|
d8c5706cff | ||
|
|
f4da3f585a | ||
|
|
5e1f5ca735 | ||
|
|
100db8abdc | ||
|
|
f7cc028891 | ||
|
|
0fce78ccf6 | ||
|
|
3d4729d6b2 | ||
|
|
7381cd5b3f | ||
|
|
2744c94eb3 | ||
|
|
b203fdb4a0 | ||
|
|
e0dda36b5f | ||
|
|
56ff870558 | ||
|
|
6be5e5f182 | ||
|
|
47d255a350 | ||
|
|
1789b65c93 | ||
|
|
d66d855e08 | ||
|
|
10c14bcb38 | ||
|
|
95d433c70b | ||
|
|
a800a5dee6 | ||
|
|
7e0a3cc401 | ||
|
|
89e0dfae11 | ||
|
|
d0aae80f55 | ||
|
|
90539ac31b | ||
|
|
02b3b3b977 | ||
|
|
c22b15d122 | ||
|
|
d4f4644312 | ||
|
|
f89834a276 | ||
|
|
a27a3e7049 | ||
|
|
5d527d719e | ||
|
|
17079532bf | ||
|
|
f382cec9e7 | ||
|
|
071030b784 | ||
|
|
99acea93a9 | ||
|
|
a5e3550d4c | ||
|
|
da2749c44d | ||
|
|
1563263c0d | ||
|
|
bdb1047a67 | ||
|
|
00d03c1022 | ||
|
|
86d51f59d4 | ||
|
|
8c04600684 | ||
|
|
576b8a510c | ||
|
|
bce0d604e1 | ||
|
|
9b23d4f143 | ||
|
|
f21d5a37fe | ||
|
|
09a7d15176 | ||
|
|
a6b3368469 | ||
|
|
66b93c7ca0 | ||
|
|
4a2fa6875d | ||
|
|
c8e20e2a18 | ||
|
|
bce1167c33 | ||
|
|
d745bcb034 | ||
|
|
5ba09817d8 | ||
|
|
fb93774d28 | ||
|
|
59948faf32 | ||
|
|
9d71d4e783 | ||
|
|
9ab09f9c2d | ||
|
|
838661044f | ||
|
|
44a5953e45 | ||
|
|
b1d8e9f064 | ||
|
|
a5bc98c150 | ||
|
|
0c207c16ef | ||
|
|
2bc03f23e0 | ||
|
|
0162e41d78 | ||
|
|
1bec735cb6 | ||
|
|
20b558656d | ||
|
|
4b6370160a | ||
|
|
8eb09dec67 | ||
|
|
bb44f65a68 | ||
|
|
1b33fe4022 | ||
|
|
297097779e | ||
|
|
3eaec42c86 | ||
|
|
677c7083b5 | ||
|
|
b9d9009cd9 | ||
|
|
00b3c8b384 | ||
|
|
731dcc0d29 | ||
|
|
a7935d419e | ||
|
|
7327f8a77b | ||
|
|
5e9cb48b8f | ||
|
|
93e6ad1f4f | ||
|
|
9a27ac29bc | ||
|
|
4c5527f8a8 | ||
|
|
9a8f1688d5 | ||
|
|
4c192139cf | ||
|
|
0a69eb8fff | ||
|
|
75f75862c2 | ||
|
|
6767c1a358 | ||
|
|
82d1a9d6f4 | ||
|
|
9480978364 | ||
|
|
a7e2bbd31c | ||
|
|
954d9cea80 | ||
|
|
c15577565e | ||
|
|
ea71133d1a | ||
|
|
b5b1c38bc4 | ||
|
|
709feac057 | ||
|
|
fada1e94b0 | ||
|
|
2051dac527 | ||
|
|
31b9a23da0 | ||
|
|
b496f3f71d | ||
|
|
f75b7fed7e | ||
|
|
23f7cfa9f4 | ||
|
|
8e144e08e6 | ||
|
|
570eff653f | ||
|
|
d690dff164 | ||
|
|
9e87fa21cf | ||
|
|
c5a17f7f4f | ||
|
|
ac6d19aba1 | ||
|
|
0217077a36 | ||
|
|
2f020f5a52 | ||
|
|
98d60532ec | ||
|
|
9f87e7870c | ||
|
|
4cc8e1a2d0 | ||
|
|
341a988e06 | ||
|
|
fa5de87d84 | ||
|
|
b7f51be8ce | ||
|
|
24dbcbe88a | ||
|
|
854cc86e8d | ||
|
|
582f20f489 | ||
|
|
7cfe5b4dd2 | ||
|
|
0013352d9b | ||
|
|
437b3cb2f7 | ||
|
|
b5aa8d4f67 | ||
|
|
f4fe31e74d | ||
|
|
da0ae03222 | ||
|
|
ed64240df2 | ||
|
|
b573d9bcb9 | ||
|
|
b309dc5480 | ||
|
|
87d91710b9 | ||
|
|
ed248cef3b | ||
|
|
97beb2b2d4 | ||
|
|
e803a7f870 | ||
|
|
63e1c440a1 | ||
|
|
bc9b9e2af6 | ||
|
|
0c5cfcd302 | ||
|
|
dd0808ae54 | ||
|
|
74fb9ff570 | ||
|
|
77309f4982 | ||
|
|
b850ab85e2 | ||
|
|
196661bc0d | ||
|
|
b218229589 | ||
|
|
989b3737c7 | ||
|
|
fe6ff5c042 | ||
|
|
3262da0207 | ||
|
|
9bd4e5008c | ||
|
|
0f52c8b56d | ||
|
|
02f21228dd | ||
|
|
e40dcb9376 | ||
|
|
504c706bea | ||
|
|
6e4149d86c | ||
|
|
039ed4eeb8 | ||
|
|
b42fde69c0 | ||
|
|
e14ffb40cf | ||
|
|
925193fb3e | ||
|
|
0549a70d93 | ||
|
|
3bdc40bfd0 | ||
|
|
f6b8387814 | ||
|
|
82d105f759 | ||
|
|
1a0a4bc049 | ||
|
|
92edeb11c6 | ||
|
|
203728676a | ||
|
|
76a5076e56 | ||
|
|
8097c54e79 | ||
|
|
717a30bfe0 | ||
|
|
d5fbd1c25f | ||
|
|
0ec4d799f8 | ||
|
|
7465fde308 | ||
|
|
57b1ce9328 | ||
|
|
a013f9b27b | ||
|
|
53ebe51f1c | ||
|
|
7949cd6ebc | ||
|
|
0f7bb25cf7 | ||
|
|
1eb5d6c900 | ||
|
|
bbf92533f4 | ||
|
|
b20fd9d10e | ||
|
|
5d875be274 | ||
|
|
168665b9a7 | ||
|
|
edc77a0263 | ||
|
|
385268226f | ||
|
|
33bae7053f | ||
|
|
d75e7fc8ca | ||
|
|
2b45f0b2fd | ||
|
|
343a0d35fe | ||
|
|
fae022dc64 | ||
|
|
6e5957830e | ||
|
|
b08ab6ae1f | ||
|
|
c42507903d | ||
|
|
c658770891 | ||
|
|
d4ef9c05d7 | ||
|
|
d5a759e1cf | ||
|
|
8abd2ec53f | ||
|
|
9b622b7e77 | ||
|
|
59e3956397 | ||
|
|
d99693a564 | ||
|
|
e15f3e4938 | ||
|
|
f5125421d0 | ||
|
|
8215a3ce9a | ||
|
|
cf90751638 | ||
|
|
9040f6a8cd | ||
|
|
a0f49a91e4 | ||
|
|
1a1f93bc78 | ||
|
|
e26f342e00 | ||
|
|
da08b3afc9 | ||
|
|
c08976e1a2 | ||
|
|
dd5c6aa757 | ||
|
|
f7223c6f00 | ||
|
|
6d81e44670 | ||
|
|
76c4639ada | ||
|
|
6ab15ab890 | ||
|
|
baa30b4fd0 | ||
|
|
40f2a03c85 | ||
|
|
98687a3463 | ||
|
|
d63b7c92a8 | ||
|
|
16fe8553af | ||
|
|
ace81155a4 | ||
|
|
f639787e02 | ||
|
|
3343e9f7b3 | ||
|
|
9ac68310f7 | ||
|
|
a63013de5c | ||
|
|
c414599635 | ||
|
|
03ee25d4ef | ||
|
|
616f26cfe5 | ||
|
|
4265509e9c | ||
|
|
11a9005aca | ||
|
|
8041fc9314 | ||
|
|
bb347acc56 | ||
|
|
0b430dc71f | ||
|
|
23c00ffa03 | ||
|
|
e31b040085 | ||
|
|
ea1df84727 | ||
|
|
34e2064b39 | ||
|
|
c884f65a26 | ||
|
|
303f126e5b | ||
|
|
26ca2a3429 | ||
|
|
fef124921c | ||
|
|
f59e367706 | ||
|
|
d362102c6a | ||
|
|
f61cbed15a | ||
|
|
b50292a215 | ||
|
|
3bfcdf755a | ||
|
|
47670fcf73 | ||
|
|
30a6a7de39 | ||
|
|
722aa3f2d3 | ||
|
|
755cbcde3c | ||
|
|
5a41e5f240 | ||
|
|
c00849b0bc | ||
|
|
57b103a81b | ||
|
|
fb64a3ec8b | ||
|
|
c9a3b933f8 | ||
|
|
cece49764c | ||
|
|
b6f382ef6f | ||
|
|
4993dfffe6 | ||
|
|
7fa03902b4 | ||
|
|
19458ec473 | ||
|
|
432eda0f83 | ||
|
|
d76eddf41e | ||
|
|
dd6d1e435b | ||
|
|
6fda03ec92 | ||
|
|
ad697cc763 | ||
|
|
0e2be42514 | ||
|
|
25d21060e3 | ||
|
|
12437c2ded | ||
|
|
52c66d20dc | ||
|
|
628ea46c58 | ||
|
|
7d0de42d98 | ||
|
|
c0aedeb7ee | ||
|
|
edd7d9ccd3 | ||
|
|
22d1bc50db | ||
|
|
e8bb3bcf23 | ||
|
|
49be8ee21c | ||
|
|
88b85231ca | ||
|
|
a17d2bbb40 | ||
|
|
77fb8085f4 | ||
|
|
8debe95b61 | ||
|
|
c72c95496e | ||
|
|
c1d9510cb3 | ||
|
|
20b087e3d2 | ||
|
|
096fa046f8 | ||
|
|
1b6affe498 | ||
|
|
2d422a845b | ||
|
|
2e5194e279 | ||
|
|
c89eb81dec | ||
|
|
d8cdd88ace | ||
|
|
f32ae14883 | ||
|
|
f355c026c0 | ||
|
|
dca7a6f98b | ||
|
|
dd6e8eb82c | ||
|
|
c6804c8e2b | ||
|
|
ad7d9b7bab | ||
|
|
228d1512d9 | ||
|
|
70c193132d | ||
|
|
f4428761cb | ||
|
|
6c7f3077c9 | ||
|
|
ad6e1a9d59 | ||
|
|
e888ef5a60 | ||
|
|
dcadf2cbfb | ||
|
|
0b7e456d47 | ||
|
|
0d2ba4766e | ||
|
|
814e5de224 | ||
|
|
de9c42e80f | ||
|
|
1f52f5e7bd | ||
|
|
0d146738de | ||
|
|
36fce98517 | ||
|
|
4f712b0657 | ||
|
|
a06c82a120 | ||
|
|
75f4338350 | ||
|
|
e1eb28836a | ||
|
|
a7ccc24c80 | ||
|
|
5d761cec76 | ||
|
|
bdf6b2d49d | ||
|
|
ad228e3c3b | ||
|
|
e6a7091981 | ||
|
|
206ffc66aa | ||
|
|
c70ba4844f | ||
|
|
639e8271de | ||
|
|
aec0d263fa | ||
|
|
e16ff37f86 | ||
|
|
3df19ff984 | ||
|
|
f1ef63e5c6 | ||
|
|
9b73900ba6 | ||
|
|
64984667ad | ||
|
|
26a9b72c34 | ||
|
|
ad5d879f8c | ||
|
|
8fe64058aa | ||
|
|
0b33519709 | ||
|
|
510ddd28c8 | ||
|
|
523405e62d | ||
|
|
d67aad893f | ||
|
|
8e72cf15e6 | ||
|
|
dbc5a32b74 | ||
|
|
6217689cc0 | ||
|
|
b39d0ced69 | ||
|
|
5a271f06ce | ||
|
|
4f34a54777 | ||
|
|
6570944b67 | ||
|
|
70d8b167e7 | ||
|
|
6a95ad2ca9 | ||
|
|
925122a411 | ||
|
|
02fa22bbdd | ||
|
|
8e2a5eaa36 | ||
|
|
47a6023382 | ||
|
|
eadf97765d | ||
|
|
3aaa77db22 | ||
|
|
ecd072430f | ||
|
|
8058fb0791 | ||
|
|
9f96da2014 | ||
|
|
726f798ff7 | ||
|
|
cbe5642b9d | ||
|
|
7620066c8a | ||
|
|
dc5bc07825 | ||
|
|
71ccc0a6ea | ||
|
|
8065bb615a | ||
|
|
da8101a2aa | ||
|
|
71fc406381 | ||
|
|
35588dd303 | ||
|
|
32220a5beb | ||
|
|
4e81522571 | ||
|
|
ffbae64a2d | ||
|
|
502fdab75e | ||
|
|
f9f2d925ba | ||
|
|
519bbe8f66 | ||
|
|
b18a62c63e | ||
|
|
e73beab5ea | ||
|
|
0061e166d4 | ||
|
|
7504d9e50c | ||
|
|
f009a534c2 | ||
|
|
b4de168cf4 | ||
|
|
ff7019999f | ||
|
|
cad58e8a2d | ||
|
|
f2a506affa | ||
|
|
7266d31813 | ||
|
|
30f9776e60 | ||
|
|
ae08ea3211 | ||
|
|
ae4d8b453e | ||
|
|
7aa48565d0 | ||
|
|
5b58e6a715 | ||
|
|
c7ace4b4bc | ||
|
|
81404baf1d | ||
|
|
da80c70c0c | ||
|
|
111774c859 | ||
|
|
050f03bb36 | ||
|
|
7ff18192a4 | ||
|
|
2456ac52eb | ||
|
|
df9a4015a5 | ||
|
|
80cdf895c5 | ||
|
|
76d9db3e0b | ||
|
|
45fe8700b8 | ||
|
|
0f8b2ad007 | ||
|
|
90704c8bb6 | ||
|
|
d8bcddb3d1 | ||
|
|
986e4dc7b8 | ||
|
|
8ba27e14a1 | ||
|
|
6242af34e4 | ||
|
|
ec6a1cc823 | ||
|
|
613edd5195 | ||
|
|
c986a08ad9 | ||
|
|
27a92f94c8 | ||
|
|
c964a26476 | ||
|
|
fcee080a2d | ||
|
|
e2c17661b0 | ||
|
|
866da10f27 | ||
|
|
8aedafad60 | ||
|
|
c4957ddd8d | ||
|
|
5dda0b7eea | ||
|
|
1e30673adc | ||
|
|
886d859fbe | ||
|
|
bdb34e7617 | ||
|
|
56a5accad0 | ||
|
|
c759cf5f37 | ||
|
|
31232b4416 | ||
|
|
2af09d1d58 | ||
|
|
03dd780ddd | ||
|
|
9f8d4e1291 | ||
|
|
105dc2bd35 | ||
|
|
a1ddf53df4 | ||
|
|
d37061bf46 | ||
|
|
35672b5896 | ||
|
|
6f2597ed11 | ||
|
|
9b624d5b9b | ||
|
|
dc2650889c | ||
|
|
dbfd96583a | ||
|
|
da9f541deb | ||
|
|
f540a80354 | ||
|
|
4357e851bf | ||
|
|
975157d75b | ||
|
|
e8a8e0db79 | ||
|
|
36141d27fc | ||
|
|
16bd983387 | ||
|
|
b96ca2237f | ||
|
|
454775fb97 | ||
|
|
1d79338a1a | ||
|
|
768658f61b | ||
|
|
60f7c4f401 | ||
|
|
46e2f885af | ||
|
|
6daefdb177 | ||
|
|
58d278560e | ||
|
|
62c393c119 | ||
|
|
5201860bb0 | ||
|
|
9758cdba7c | ||
|
|
22cee2d0cd | ||
|
|
c78d6b057e | ||
|
|
731ff7b13f | ||
|
|
f38c67da0c | ||
|
|
a136999258 | ||
|
|
816ea42840 | ||
|
|
7f2b2da7f7 | ||
|
|
409a8a5fbb | ||
|
|
268ddc595f | ||
|
|
23c554ee96 | ||
|
|
1b967a4a6a | ||
|
|
3ec56eaf9f | ||
|
|
e93fb0b3a0 | ||
|
|
0679f6fa59 | ||
|
|
44e2b9aa0a | ||
|
|
a1c85902f6 | ||
|
|
c17dd11e01 | ||
|
|
136b188fd4 | ||
|
|
a614273af5 | ||
|
|
d07297c197 | ||
|
|
84b7165e90 | ||
|
|
d7b6b25059 | ||
|
|
1c27ab79d9 | ||
|
|
6431f1f288 | ||
|
|
319a98fdf8 | ||
|
|
a3a3d1a53e | ||
|
|
81c7a968ed | ||
|
|
96e524d2a0 | ||
|
|
56f1044e12 | ||
|
|
bb99e4152b | ||
|
|
7b0376f3d3 | ||
|
|
f8e7e308c3 | ||
|
|
fad23ea54e | ||
|
|
8963b8e3c9 | ||
|
|
962e1e6566 | ||
|
|
2ecffda170 | ||
|
|
6e9256f483 | ||
|
|
c25c60898b | ||
|
|
ef11bf52a6 | ||
|
|
f2ab58d841 | ||
|
|
d22cdf2dd9 | ||
|
|
6b87ecfc1b | ||
|
|
efd50ecac9 | ||
|
|
4be76e9969 | ||
|
|
d3c63e66e3 | ||
|
|
f92ca1d98d | ||
|
|
2affa1fe26 | ||
|
|
a3eff87e80 | ||
|
|
f0312f607b | ||
|
|
e402601cf8 | ||
|
|
799ac6edf6 | ||
|
|
560902c8f1 | ||
|
|
2093fe6bfd | ||
|
|
79ed0886c6 | ||
|
|
be0726ce50 | ||
|
|
7222f533b8 | ||
|
|
5f1ca16402 | ||
|
|
530360d515 | ||
|
|
4f51687550 | ||
|
|
29dae85ad5 | ||
|
|
91c454da09 | ||
|
|
eb92f49811 | ||
|
|
a48e614ce2 | ||
|
|
7cb2f31c8d | ||
|
|
4092eba600 | ||
|
|
b1c666730e | ||
|
|
b0986a5f7f | ||
|
|
68043b5ca6 | ||
|
|
726df9dad8 | ||
|
|
f1a12ce04c | ||
|
|
18dcd2c2f7 | ||
|
|
b39cb1d13a | ||
|
|
a02c39efd0 | ||
|
|
6f2c036601 | ||
|
|
1353499e74 | ||
|
|
d78a62da87 | ||
|
|
718488b7b5 | ||
|
|
5f7fcd7730 | ||
|
|
c93aa53187 | ||
|
|
c271d1c055 | ||
|
|
3b0b0147bd | ||
|
|
6875d3f6da | ||
|
|
d1cd82d0c1 | ||
|
|
e8788bebd5 | ||
|
|
d75f26d719 | ||
|
|
d314e2d0b7 | ||
|
|
36762c7cad | ||
|
|
0c09d2bf9a | ||
|
|
150a44fa6c | ||
|
|
744354db56 | ||
|
|
45f72219f8 | ||
|
|
5329a71b3d | ||
|
|
d7b02c502e | ||
|
|
50650e9342 | ||
|
|
ea2086b45f | ||
|
|
6c3277e013 | ||
|
|
a07b4cd57f | ||
|
|
02261f9b40 | ||
|
|
f542233cbd | ||
|
|
1e89f7ed68 | ||
|
|
18c7aa44a8 | ||
|
|
5ee838d412 | ||
|
|
7228bab79b | ||
|
|
9b9ff2b7e6 | ||
|
|
e03caf6234 | ||
|
|
2e1ddbc725 | ||
|
|
5ba30fd628 | ||
|
|
e4d5c1b751 | ||
|
|
b25f43e865 | ||
|
|
67e696bf62 | ||
|
|
6c3e0a9c06 | ||
|
|
6fcb76cb37 | ||
|
|
933b16fc61 | ||
|
|
4fda493384 | ||
|
|
a5a74f6d20 | ||
|
|
d4dd5f1508 | ||
|
|
42a89d0dcd | ||
|
|
3a2c0a6506 | ||
|
|
abcc2d5867 | ||
|
|
5be3458f84 | ||
|
|
b7ebc276aa | ||
|
|
db33d71102 | ||
|
|
11e24aa42d | ||
|
|
8a0793234b | ||
|
|
8f6c7c4c7f | ||
|
|
2c1b15def9 | ||
|
|
b029e442b2 | ||
|
|
ca528285cf | ||
|
|
a02fd5982c | ||
|
|
cf22f402d8 | ||
|
|
763bf18daa | ||
|
|
142d3b45e1 | ||
|
|
fb149ce34e | ||
|
|
b05dbe3886 | ||
|
|
701afe9e60 | ||
|
|
3ab7d077d1 | ||
|
|
36c267a1d2 | ||
|
|
38c9cd2b85 | ||
|
|
c2c9c844e2 | ||
|
|
87570b78eb | ||
|
|
bf2813fee8 | ||
|
|
09168ccda7 | ||
|
|
acf833cb4a | ||
|
|
ebf8547c38 | ||
|
|
a8e158c9e3 | ||
|
|
0b28ab9e57 | ||
|
|
8161b4c1c1 | ||
|
|
5c6d94d756 | ||
|
|
ec5cf18bd1 | ||
|
|
174e0c98bc | ||
|
|
2b75f440f0 | ||
|
|
dd28ff5986 | ||
|
|
ef75c3d19e | ||
|
|
b954eecad1 | ||
|
|
53215acc8e | ||
|
|
0f2c5bcf3d | ||
|
|
4014ab0212 | ||
|
|
1e8f84854c | ||
|
|
83e38c959a | ||
|
|
d890478fa2 | ||
|
|
bf1a5ce000 | ||
|
|
0f6fb5439a | ||
|
|
e14071f2bd | ||
|
|
c099be56da | ||
|
|
0d84d799ea | ||
|
|
353564abe0 | ||
|
|
d6c1f531a9 | ||
|
|
8764fc1467 | ||
|
|
b4c3a2ffbd | ||
|
|
fba23796d6 | ||
|
|
22ebc09f00 | ||
|
|
e65bafa793 | ||
|
|
c1d3f1a98b | ||
|
|
10559f0c68 | ||
|
|
c34b0455e4 | ||
|
|
f22370cca0 | ||
|
|
a21d1e693f | ||
|
|
b51c57e6fe | ||
|
|
378d8157a6 | ||
|
|
bc31844106 | ||
|
|
62917621c2 | ||
|
|
dcd48a9ca1 | ||
|
|
e776380ff8 | ||
|
|
0d6a035f95 | ||
|
|
07a0b163f9 | ||
|
|
e4e7c3ae54 | ||
|
|
b446179fa4 | ||
|
|
ade738f7b7 | ||
|
|
0f1fde498d | ||
|
|
f63a18deea | ||
|
|
5d91edd695 | ||
|
|
802bfa26c9 | ||
|
|
05be5bf1f9 | ||
|
|
006d375358 | ||
|
|
e98dee6a22 | ||
|
|
658bd82490 | ||
|
|
bb06a0498a | ||
|
|
1892102dc3 | ||
|
|
c3d77aea6a | ||
|
|
573b73eb10 | ||
|
|
31489976ae | ||
|
|
c8c564bdd6 | ||
|
|
a1af1cb099 | ||
|
|
a227ab852a | ||
|
|
0bcd221fad | ||
|
|
2cb4bb9db7 | ||
|
|
4fc6e33ccc | ||
|
|
be97380a3b | ||
|
|
121de2ccf3 | ||
|
|
695702b61b | ||
|
|
bcf995bfe1 | ||
|
|
08f1c05144 | ||
|
|
d4586d4bcc | ||
|
|
0d80202573 | ||
|
|
618cfbf1db | ||
|
|
de7fe193ab | ||
|
|
25358444ad | ||
|
|
3366a32077 | ||
|
|
804286f90d | ||
|
|
97fce623d5 | ||
|
|
8444a7a99e | ||
|
|
f81370876c | ||
|
|
971fb677b2 | ||
|
|
976f5b2a6e | ||
|
|
32738ba59c | ||
|
|
6f83061139 | ||
|
|
8f22d672db | ||
|
|
df8b7db3ae | ||
|
|
e4a3fd5565 | ||
|
|
4cc94b6063 | ||
|
|
20f2d30ab8 | ||
|
|
6024088ca0 | ||
|
|
a323a87b59 | ||
|
|
063d04f913 | ||
|
|
d4104bf5ab | ||
|
|
617e5e4e25 | ||
|
|
408428edf4 | ||
|
|
4d1a92adea | ||
|
|
31b0db0892 | ||
|
|
ed6f7eb56a | ||
|
|
deb6f16d82 | ||
|
|
f6511471d4 | ||
|
|
9dee04a314 | ||
|
|
018c8c1468 | ||
|
|
ea74b981d9 | ||
|
|
7d0be360bd | ||
|
|
2af240bd18 | ||
|
|
72c5f6a223 | ||
|
|
0a7a4379dc | ||
|
|
7093eec9c4 | ||
|
|
96ed6c59ee | ||
|
|
434fff890a | ||
|
|
1bbf1e4106 | ||
|
|
08d52c53a4 | ||
|
|
6edb3618f6 | ||
|
|
58575231bd | ||
|
|
04ce4c05df | ||
|
|
fa19fd0c15 | ||
|
|
0bf29c8207 | ||
|
|
fe784c1e36 | ||
|
|
70e4dc550a | ||
|
|
fa9b15dafe | ||
|
|
e06c8aafdc | ||
|
|
c2eea35b18 | ||
|
|
02f8b29837 | ||
|
|
8199d10742 | ||
|
|
7951041eb0 | ||
|
|
5cc016c8a2 | ||
|
|
9ee53be986 | ||
|
|
8df33d34e8 | ||
|
|
cd321a3e6e | ||
|
|
47326f566c | ||
|
|
642231ba25 | ||
|
|
87f591a697 | ||
|
|
ec4f27b4c6 | ||
|
|
8c95b7569d | ||
|
|
709bc9aa12 | ||
|
|
a0e6456455 | ||
|
|
fd18b2289d | ||
|
|
fbaefc9af1 | ||
|
|
269c940edc | ||
|
|
4c5b29647b | ||
|
|
72fd6e0c7d | ||
|
|
f994560859 | ||
|
|
ca58bb4ca1 | ||
|
|
d6c4ca5fe5 | ||
|
|
5e6cd0090f | ||
|
|
f0cb0c7ef7 | ||
|
|
2931f8db08 | ||
|
|
c55e73da24 | ||
|
|
06c8d88eb6 | ||
|
|
ea8004087a | ||
|
|
9984636f5a | ||
|
|
21f0059487 | ||
|
|
7b63f210aa | ||
|
|
766f9ed54e | ||
|
|
39e6aa4094 | ||
|
|
a2799bba09 | ||
|
|
176aefff54 | ||
|
|
b7abc27776 | ||
|
|
8ffb0cbe90 | ||
|
|
9c0773a399 | ||
|
|
eb92306c48 | ||
|
|
780cd5f7b9 | ||
|
|
cba7a882aa | ||
|
|
ccc41d7363 | ||
|
|
aafc2a823d | ||
|
|
187d0139aa | ||
|
|
ca26283a1e | ||
|
|
0796f2e5a0 | ||
|
|
d73f27c612 | ||
|
|
bf5685860a | ||
|
|
9d74134bef | ||
|
|
4b9a471f29 | ||
|
|
58c1db54a8 | ||
|
|
ab43e2accb | ||
|
|
c0551de093 | ||
|
|
23e75f0f03 | ||
|
|
b185f31c9e | ||
|
|
6f505389d9 | ||
|
|
0911331974 | ||
|
|
982381bff0 | ||
|
|
57773816b3 | ||
|
|
b298ec4228 | ||
|
|
3add296f78 | ||
|
|
76537265cb | ||
|
|
109e12aa56 | ||
|
|
6c3e266eb9 | ||
|
|
4cddfc63d8 | ||
|
|
4af6a40f39 | ||
|
|
f5048b725b | ||
|
|
14e5bc02ed | ||
|
|
2f98b3ee90 | ||
|
|
ebd86d604e | ||
|
|
7f2371858c | ||
|
|
a588140bc6 | ||
|
|
a935f8a1f4 | ||
|
|
842a00a5b6 | ||
|
|
8599143069 | ||
|
|
193b324242 | ||
|
|
0123f8f2a9 | ||
|
|
97f38c7706 | ||
|
|
ee7f2e1175 | ||
|
|
57d0a8300b | ||
|
|
d895cd0f2d | ||
|
|
f81db93ef0 | ||
|
|
919b009b50 | ||
|
|
8ddc0ceefb | ||
|
|
abfc61b50d | ||
|
|
51997775bd | ||
|
|
45b219107c | ||
|
|
7bf84d05ad | ||
|
|
39b95cc365 | ||
|
|
658bdb1ecb | ||
|
|
e90dc35e51 | ||
|
|
e9aa870255 | ||
|
|
2cae9c3ef4 | ||
|
|
95f4fdb603 | ||
|
|
eaa5c7a442 | ||
|
|
cbf7fb2f75 | ||
|
|
c8ae9c077c | ||
|
|
11c6753bd5 | ||
|
|
21a671eabe | ||
|
|
58ce1b88c4 | ||
|
|
26cef3ce0f | ||
|
|
381ea37d86 | ||
|
|
33abb3ecf8 | ||
|
|
6d17e27de0 | ||
|
|
95386fb1dd | ||
|
|
34fe3b9d6d | ||
|
|
523d49c076 | ||
|
|
2c2a60faf1 | ||
|
|
110f7a8a29 | ||
|
|
38d56c994e | ||
|
|
309dcf9977 | ||
|
|
0acbbf0651 | ||
|
|
bdec513ca4 | ||
|
|
c7a9e454ac | ||
|
|
c38792ef9e | ||
|
|
17be8e37f5 | ||
|
|
06437bb51e | ||
|
|
5da973d465 | ||
|
|
e20dbd04e0 | ||
|
|
5a8b738818 | ||
|
|
59f29270ee | ||
|
|
f3ef220dce | ||
|
|
526d55b4af | ||
|
|
62992f5485 | ||
|
|
31cf6fbe00 | ||
|
|
7a5a57d50f | ||
|
|
14e5437cae | ||
|
|
6935279beb | ||
|
|
f8f29622a4 | ||
|
|
40ee885e8e | ||
|
|
21d38e9948 | ||
|
|
32ef29bd18 | ||
|
|
4c82d526ee | ||
|
|
962b7ee3d2 | ||
|
|
b81131f4c0 | ||
|
|
633f4567f3 | ||
|
|
0a652a1ab8 | ||
|
|
be3bea4325 | ||
|
|
708007e42c | ||
|
|
36306e283c | ||
|
|
3f39d6e807 | ||
|
|
daf52dec57 | ||
|
|
b72a3cdcce | ||
|
|
ee9275fedb | ||
|
|
8831245e30 | ||
|
|
0ae5b142a6 | ||
|
|
6f0873a2c3 | ||
|
|
8cfb06f1a9 | ||
|
|
acbcde3fee | ||
|
|
ea9ecd7386 | ||
|
|
d00742f43f | ||
|
|
14f480af8f | ||
|
|
8e2ee686bd | ||
|
|
b04691e229 | ||
|
|
ab15498bdf | ||
|
|
3c590b0e2c | ||
|
|
df8f21d9fe | ||
|
|
adcd9106f9 | ||
|
|
7fb2706667 | ||
|
|
8f1b28da34 | ||
|
|
37d182463a | ||
|
|
eb77a41535 | ||
|
|
647e535c5b | ||
|
|
24dd8562c9 | ||
|
|
3b3f044463 | ||
|
|
fc4c23fdbb | ||
|
|
c3cfe8204a | ||
|
|
a0968ce1ce | ||
|
|
83d883826b | ||
|
|
dffb4f96ae | ||
|
|
3c7cf589ad | ||
|
|
378191a52c | ||
|
|
def070d651 | ||
|
|
417c1f07f9 | ||
|
|
0de7572f7b | ||
|
|
581ff17857 | ||
|
|
8847af8343 | ||
|
|
c061fe1ff5 | ||
|
|
1f5792ecbb | ||
|
|
4697cf3c79 | ||
|
|
532e0bbf75 | ||
|
|
caf485d3da | ||
|
|
7c443ed218 | ||
|
|
562be90907 | ||
|
|
0cab1924c2 | ||
|
|
9bc582857b | ||
|
|
f4cf2137be | ||
|
|
a786643d0b | ||
|
|
bb193e9dd0 | ||
|
|
0d99a4f49f | ||
|
|
7078d3b530 | ||
|
|
504e5836f0 | ||
|
|
5bbbd2abf9 | ||
|
|
304ec7e231 | ||
|
|
41619e4f83 | ||
|
|
e50915017e | ||
|
|
9e606b3da8 | ||
|
|
eba23b1ae4 | ||
|
|
e39a734184 | ||
|
|
d865515c29 | ||
|
|
58cf7fc5d6 | ||
|
|
b59ea21e4f | ||
|
|
38734bd7c6 | ||
|
|
ec87dca2d8 | ||
|
|
19801bf784 | ||
|
|
bb513a7066 | ||
|
|
bd2623bd86 | ||
|
|
7f65751f62 | ||
|
|
0bd79918d9 | ||
|
|
0ddf99d13c | ||
|
|
561269480c | ||
|
|
35d3c0bf5a | ||
|
|
4a2fdee972 | ||
|
|
d5087c07ca | ||
|
|
dd687223e3 | ||
|
|
67ffa2fd0c | ||
|
|
b193f0f3d0 | ||
|
|
37b83c8923 | ||
|
|
2216d528f6 | ||
|
|
76b0c3c7d3 | ||
|
|
d3340f828d | ||
|
|
611158ac54 | ||
|
|
048616fb8d | ||
|
|
1675410256 | ||
|
|
723fbacb00 | ||
|
|
00751754a9 | ||
|
|
c3230c2561 | ||
|
|
a8284f875b | ||
|
|
f099e2997f | ||
|
|
7d8ce4ec92 | ||
|
|
6fc13e9548 | ||
|
|
12a0354084 | ||
|
|
13bf7d1bbc | ||
|
|
0928519132 | ||
|
|
a5e82c1d4d | ||
|
|
40bf14989d | ||
|
|
e620470fe1 | ||
|
|
0ffe81cb71 | ||
|
|
1406d99aba | ||
|
|
29e2fa0fed | ||
|
|
f6ce46541e | ||
|
|
55af1083ec | ||
|
|
f1315908c7 | ||
|
|
fca74356da | ||
|
|
bbf2a47866 | ||
|
|
b708a4a05c | ||
|
|
d6e4a98387 | ||
|
|
886bef2e3d | ||
|
|
d75841ef23 | ||
|
|
07c1f18e51 | ||
|
|
4738d4e87a | ||
|
|
8f77c33d68 | ||
|
|
f39fa5d489 | ||
|
|
c41c9de839 | ||
|
|
22aedc4fb6 | ||
|
|
225e10cfc4 | ||
|
|
5ebb7cf6f5 | ||
|
|
516b6ff1d0 | ||
|
|
bce6859af0 | ||
|
|
5404aaba6f | ||
|
|
23a35c84c9 | ||
|
|
63785caf04 | ||
|
|
2f552fbf43 | ||
|
|
a5f05bf3e1 | ||
|
|
92037abbf3 | ||
|
|
830b1a90c8 | ||
|
|
439b4eb6e8 | ||
|
|
2b187c30d0 | ||
|
|
8a0b1ca1b5 | ||
|
|
cd243fa935 | ||
|
|
e21cdf13a4 | ||
|
|
e02695bf7b | ||
|
|
ec80a55b74 | ||
|
|
daa80c244d | ||
|
|
13254440c6 | ||
|
|
52394c3c18 | ||
|
|
44e7631970 | ||
|
|
bf7fd2bcd7 | ||
|
|
f1829d078a | ||
|
|
6a307ed322 | ||
|
|
9f2461061f | ||
|
|
93ad0491ef | ||
|
|
8f5fec8064 | ||
|
|
ec3cab5fea | ||
|
|
3cbd21c0b9 | ||
|
|
2f7a60abfb | ||
|
|
a1537a5271 | ||
|
|
344a37c03c | ||
|
|
ed1e343b93 | ||
|
|
7af8e6c641 | ||
|
|
ce288652d5 | ||
|
|
56ba8adc3a | ||
|
|
3594a79c49 | ||
|
|
2cd2dd04ed | ||
|
|
62aab2f872 | ||
|
|
2d13c4653d | ||
|
|
c4ded6ee5e | ||
|
|
39ad9702de | ||
|
|
67dba890cd | ||
|
|
f19da6360d | ||
|
|
a5478b93e0 | ||
|
|
a39a3f15a3 | ||
|
|
4307982fdf | ||
|
|
c518b1ef00 | ||
|
|
7d5a5a7416 | ||
|
|
26f912ef86 | ||
|
|
fcbe6e1f90 | ||
|
|
5e849f143b | ||
|
|
b982373813 | ||
|
|
7dc1a3246c | ||
|
|
4c5fa2d641 | ||
|
|
e599ef430a | ||
|
|
affd6e3216 | ||
|
|
7f4b221bc3 | ||
|
|
af0c137ec3 | ||
|
|
9a8b45fc53 | ||
|
|
3075de446f | ||
|
|
e906095a7b | ||
|
|
7d14aea067 | ||
|
|
ede60044fd | ||
|
|
35d7672d73 | ||
|
|
d814bc50fb | ||
|
|
f14660f82c | ||
|
|
414ddd17e8 | ||
|
|
137769a694 | ||
|
|
b5ea753ff4 | ||
|
|
1ba4841865 | ||
|
|
a3038924fb | ||
|
|
dcf3229b37 | ||
|
|
12f52cdfb8 | ||
|
|
2677a5fa91 | ||
|
|
3940189be0 | ||
|
|
f841e89dc7 | ||
|
|
97d280ee0c | ||
|
|
9c6ed93f80 | ||
|
|
ec0fd1b67a | ||
|
|
cb351dca10 | ||
|
|
44d2ec757c | ||
|
|
cc29b8d4b6 | ||
|
|
8b5fa32d9b | ||
|
|
500d017c31 | ||
|
|
7785352021 | ||
|
|
ed6f715ae6 | ||
|
|
695244afad | ||
|
|
8b20c3f26d | ||
|
|
26f0430a7c | ||
|
|
f0e09c887a | ||
|
|
a1787da97c | ||
|
|
3f458187a4 | ||
|
|
b4385901da | ||
|
|
d3c99be063 | ||
|
|
d0679a6fd1 | ||
|
|
6ddc9b4e8e | ||
|
|
90970d94c0 | ||
|
|
c200b28dc7 | ||
|
|
f0073bec2f | ||
|
|
a193366b3d | ||
|
|
89bca975a3 | ||
|
|
d6a773c90c | ||
|
|
0c0e17f924 | ||
|
|
8bbd9b5878 | ||
|
|
31e2925a9a | ||
|
|
49690b04ed | ||
|
|
d748b26f40 | ||
|
|
ee1ea36d0a | ||
|
|
70edc56fc1 | ||
|
|
b9e0ca340d | ||
|
|
161e3a771b | ||
|
|
576c0bef88 | ||
|
|
f70d6d2acb | ||
|
|
5a1d946046 | ||
|
|
88d79f3517 | ||
|
|
26f31071ca | ||
|
|
b473dc4ac9 | ||
|
|
7beb309faf | ||
|
|
72df219306 | ||
|
|
1a17200cc8 | ||
|
|
6666426739 | ||
|
|
3424444be3 | ||
|
|
cd514b69fd | ||
|
|
e0c179d954 | ||
|
|
4850579da3 | ||
|
|
6e6be98b15 | ||
|
|
474111c1af | ||
|
|
0d7b476923 | ||
|
|
99fa758423 | ||
|
|
83008d4959 | ||
|
|
f6bfb857c0 | ||
|
|
dd2acd26eb | ||
|
|
8b69c75144 | ||
|
|
3a1bbd0271 | ||
|
|
4220a7a9be | ||
|
|
0ede467256 | ||
|
|
41d1c14c68 | ||
|
|
800369d58a | ||
|
|
bee8d66ead | ||
|
|
d9fa73c054 | ||
|
|
66e05e7d13 | ||
|
|
2f3f375b88 | ||
|
|
aedd749168 | ||
|
|
e863e55cdb | ||
|
|
ef3a7e80c6 | ||
|
|
ef74bc533d | ||
|
|
ae14163ca2 | ||
|
|
0bcb32f704 | ||
|
|
bc9f09a3c0 | ||
|
|
9f0c167bf9 | ||
|
|
416b80cefc | ||
|
|
e35caa60ad | ||
|
|
4fc3041a6e | ||
|
|
1ac9c07e22 | ||
|
|
dec385abd4 | ||
|
|
bd5076101c | ||
|
|
568b65c275 | ||
|
|
244917c99d | ||
|
|
024a38bcb1 | ||
|
|
03f2459388 | ||
|
|
16600efc1d | ||
|
|
04fa125e95 | ||
|
|
c871a90b4d | ||
|
|
15860185d9 | ||
|
|
6156341904 | ||
|
|
d43a4350a6 | ||
|
|
fc0142ec8e | ||
|
|
02ef1a64ff | ||
|
|
a8f5e2becf | ||
|
|
487f9efa57 | ||
|
|
1dae056012 | ||
|
|
b200163de9 | ||
|
|
e69ebe5add | ||
|
|
0541a5fc77 | ||
|
|
476f21f22c | ||
|
|
fc45d2e3d1 | ||
|
|
33643797ad | ||
|
|
3c15bc50d0 | ||
|
|
052769196d | ||
|
|
1515bd07a1 | ||
|
|
3adc8626e8 | ||
|
|
4680e58e08 | ||
|
|
f389b94d8a | ||
|
|
9b283254c3 | ||
|
|
2339ac3f93 | ||
|
|
8ae28888e0 | ||
|
|
44a3a441aa | ||
|
|
17a2fb886f | ||
|
|
c8b4fabc37 | ||
|
|
44b9214141 | ||
|
|
f474af1660 | ||
|
|
b7464899ec | ||
|
|
44c37bf774 | ||
|
|
7d9d66d782 | ||
|
|
a93e2d0421 | ||
|
|
f905c16f21 | ||
|
|
eec7fa394f | ||
|
|
5db5f42b71 | ||
|
|
1194154309 | ||
|
|
c5e2c0fe88 | ||
|
|
9f2da28bb9 | ||
|
|
0cd64fb5c9 | ||
|
|
0ea8ef76f0 | ||
|
|
c679cae92c | ||
|
|
80e1997721 | ||
|
|
f077e6efdc | ||
|
|
763a1b6265 | ||
|
|
3323c087c5 | ||
|
|
3d5ed6669c | ||
|
|
58448bbcb8 | ||
|
|
029e7ca680 | ||
|
|
dfb758a82d | ||
|
|
acdd0b8e68 | ||
|
|
c59394d3ed | ||
|
|
a0be57f64f | ||
|
|
f4a7712795 | ||
|
|
927487c142 | ||
|
|
cfbc974fec | ||
|
|
2c3215c018 | ||
|
|
b3015dda26 | ||
|
|
2a670ce022 | ||
|
|
da77124898 | ||
|
|
c5075d08ed | ||
|
|
9a497fedf5 | ||
|
|
daa65a5526 | ||
|
|
198a0695ab | ||
|
|
8391b20805 | ||
|
|
04f4f528f7 | ||
|
|
daf5abce2d | ||
|
|
1b062b3db4 | ||
|
|
7f5989f06c | ||
|
|
c08a26397e | ||
|
|
b54df7e127 | ||
|
|
2885eb0532 | ||
|
|
5fc7219315 | ||
|
|
12e1911aab | ||
|
|
4326ea874a | ||
|
|
67b5841153 | ||
|
|
0e5f6b27e9 | ||
|
|
cd133dc9cb | ||
|
|
20066270b9 | ||
|
|
8939fd802f | ||
|
|
2993cb3dd4 | ||
|
|
7103c4f14a | ||
|
|
c2651a85a8 | ||
|
|
daebbd1e93 | ||
|
|
99954d5025 | ||
|
|
f9d50723b9 | ||
|
|
c6fda444b7 | ||
|
|
b244410443 | ||
|
|
8bdceb92be | ||
|
|
50b445cf35 | ||
|
|
73c8286c7e | ||
|
|
d6d38dae13 | ||
|
|
d69fbcf17f | ||
|
|
4656a72b92 | ||
|
|
7a6f205970 | ||
|
|
c9bd12aa19 | ||
|
|
14d25b82a4 | ||
|
|
0a11230bfb | ||
|
|
7cdc0fe912 | ||
|
|
0309e59cf8 | ||
|
|
a7d888febc | ||
|
|
bdd5c66fc5 | ||
|
|
8dda2dd7a5 | ||
|
|
28b6104710 | ||
|
|
057214f0fe | ||
|
|
7f20dd89a3 | ||
|
|
18e6a6effc | ||
|
|
4ac675453f | ||
|
|
e0010619fc | ||
|
|
81c073f67d | ||
|
|
5d6da3517a | ||
|
|
b821a5df4c | ||
|
|
545fe0c873 | ||
|
|
c72a720b97 | ||
|
|
77cbea5a42 | ||
|
|
02056b613e | ||
|
|
bb0bab2683 | ||
|
|
bd6db3031a | ||
|
|
e45f591219 | ||
|
|
1f8f7629a0 | ||
|
|
8af3dd4511 | ||
|
|
031632d5b0 | ||
|
|
63c06bee70 | ||
|
|
62495d45be | ||
|
|
6268ba4aa3 | ||
|
|
1c453a372e | ||
|
|
ff7b4f6ed7 | ||
|
|
cd842afca4 | ||
|
|
5e17044c23 | ||
|
|
a8952fb79b | ||
|
|
f084384501 | ||
|
|
66a3e812f2 | ||
|
|
67de5d4347 | ||
|
|
114482ed99 | ||
|
|
a38b251120 | ||
|
|
23eb13f23a | ||
|
|
0646a39ff0 | ||
|
|
0f20b6b81b | ||
|
|
b2bec3362b | ||
|
|
830d50e9c5 | ||
|
|
47dfb7d6da | ||
|
|
2865dc21aa | ||
|
|
dfed5a5a6d | ||
|
|
db567a00f3 | ||
|
|
ae1db5650c | ||
|
|
adb3f57a6b | ||
|
|
86d2ab1938 | ||
|
|
64e2ca35dd | ||
|
|
03d4d4937c | ||
|
|
79e8da0675 | ||
|
|
793a22083c | ||
|
|
49ac33ec5c | ||
|
|
1e5e915f5a | ||
|
|
67e7ba3584 | ||
|
|
25e9f01fb2 | ||
|
|
b9b9410b83 | ||
|
|
abb559d52c | ||
|
|
7a88a7c887 | ||
|
|
bf1cf684f5 | ||
|
|
36cc400656 | ||
|
|
e31149077f | ||
|
|
91cb94eff8 | ||
|
|
9f649c4979 | ||
|
|
150bcafc20 | ||
|
|
aeea3f6fa4 | ||
|
|
b7e3997bc1 | ||
|
|
22f04db80d | ||
|
|
a7bafc53c9 | ||
|
|
4064a4ccb7 | ||
|
|
7b1bbac600 | ||
|
|
a3ce12179f | ||
|
|
93775487c8 | ||
|
|
4f1519ec64 | ||
|
|
510b3338d4 | ||
|
|
2b27ddc738 | ||
|
|
44c890176c | ||
|
|
ac0c29012f | ||
|
|
52d015e283 | ||
|
|
5a6b541caf | ||
|
|
2f1bf58b08 | ||
|
|
9155412b24 | ||
|
|
ae5b40ab88 | ||
|
|
f890ffdaf7 | ||
|
|
43a2ea4155 | ||
|
|
7b581c25d8 | ||
|
|
5b9b69a4b7 | ||
|
|
8e577c19d6 | ||
|
|
547c477f4e | ||
|
|
47caba8370 | ||
|
|
9d709cd726 | ||
|
|
62d819c907 | ||
|
|
ee33245d95 | ||
|
|
8e581f4d72 | ||
|
|
f2096f1f4e | ||
|
|
fca7eb822d | ||
|
|
d02babdf81 | ||
|
|
39bc5ab3ee | ||
|
|
ad587e5783 | ||
|
|
2b942524a2 | ||
|
|
8f329dbf48 | ||
|
|
2d65499d03 | ||
|
|
c324b55255 | ||
|
|
6b496e4bf0 | ||
|
|
59adb6e0aa | ||
|
|
4f102f8e8f | ||
|
|
d0bccabbe1 | ||
|
|
8a6d383715 | ||
|
|
6f409b9528 | ||
|
|
deaf1e3780 | ||
|
|
ea6eed47d7 | ||
|
|
50579d043f | ||
|
|
593bd44f20 | ||
|
|
a31dd496eb | ||
|
|
0806dcc19c | ||
|
|
f2d9632bc0 | ||
|
|
72dca93263 | ||
|
|
42821b7c0a | ||
|
|
3f31643452 | ||
|
|
45d0816623 | ||
|
|
0451db9f4e | ||
|
|
3375e736c9 | ||
|
|
e86380aab7 | ||
|
|
706904524a | ||
|
|
ca9f7abba9 | ||
|
|
1aff702a38 | ||
|
|
2ac74ae8b5 | ||
|
|
c2397a0981 | ||
|
|
ec16627eb5 | ||
|
|
d6b956a1aa | ||
|
|
4824669a8d | ||
|
|
bb0488e70a | ||
|
|
4d470e513b | ||
|
|
92470fbf31 | ||
|
|
bdb776d073 | ||
|
|
11d1d07c04 | ||
|
|
6e3ed1b649 | ||
|
|
15a5b3c3f7 | ||
|
|
70155353a1 | ||
|
|
9f84cc8f1c | ||
|
|
6c8c60db8a | ||
|
|
a81e771573 | ||
|
|
343c0b4299 | ||
|
|
dd2bf15ebc | ||
|
|
5775b735cb | ||
|
|
b285989b20 | ||
|
|
44b995078e | ||
|
|
4ae6d31d3e | ||
|
|
7757dceab3 | ||
|
|
813c8c3b56 | ||
|
|
fdb6d957c8 | ||
|
|
3b20aebd5b | ||
|
|
329f70463a | ||
|
|
2796b19668 | ||
|
|
6e161a248e | ||
|
|
2400978f6a | ||
|
|
42765bf64a | ||
|
|
9a0434d6bf | ||
|
|
c10db01599 | ||
|
|
5f5f619dfc | ||
|
|
ca9f571c87 | ||
|
|
b3b0612fbe | ||
|
|
df05b2c5bd | ||
|
|
b99cadb553 | ||
|
|
e14eab084e | ||
|
|
12e4dbe4ca | ||
|
|
baa553da07 | ||
|
|
4276c0c38e | ||
|
|
44b65e1d52 | ||
|
|
3ba9a2472f | ||
|
|
7d435b84f0 | ||
|
|
704d713571 | ||
|
|
d885782df1 | ||
|
|
340d7b2ad5 | ||
|
|
3a8da5f108 | ||
|
|
c8e7f4253e | ||
|
|
1c251bdf05 | ||
|
|
c69eb00f9b | ||
|
|
aeb59479cb | ||
|
|
60e1e7bc31 | ||
|
|
6d391c4355 | ||
|
|
2569cfa34c | ||
|
|
c1d22f98f6 | ||
|
|
9fb0b337ef | ||
|
|
054a8ce3b7 | ||
|
|
086d197f2c | ||
|
|
5a51563a98 | ||
|
|
92ec15d774 | ||
|
|
8347f4d5ca | ||
|
|
41492d5bd9 | ||
|
|
216aa53974 | ||
|
|
7abb8fe326 | ||
|
|
2245e67f93 | ||
|
|
6ebda37d88 | ||
|
|
a12618811d | ||
|
|
aca45c5820 | ||
|
|
ce5dda8249 | ||
|
|
171eceb426 | ||
|
|
5b194b6144 | ||
|
|
8b56573cc7 | ||
|
|
be82c7fc6f | ||
|
|
1205e347f2 | ||
|
|
207ceaba8b | ||
|
|
b9c9ad94cf | ||
|
|
d0c17a8206 | ||
|
|
552b442059 | ||
|
|
5b4ab7dfaa | ||
|
|
4bfa8f8c22 | ||
|
|
1382662c1b | ||
|
|
646e752c38 | ||
|
|
d25a591def | ||
|
|
fd8d9bdc2c | ||
|
|
8de92c03c9 | ||
|
|
be77118243 | ||
|
|
7732e0407a | ||
|
|
f7f267213c | ||
|
|
32c734e10b | ||
|
|
dfdad13e45 | ||
|
|
80bedb005e | ||
|
|
90fc7e6d85 | ||
|
|
4527a8066a | ||
|
|
07db945b09 | ||
|
|
a0b10838ee | ||
|
|
3151f35c86 | ||
|
|
90e3ba6027 | ||
|
|
8ce6cd53b0 | ||
|
|
f1f534c6ae | ||
|
|
6227fa96c7 | ||
|
|
91e9ed0898 | ||
|
|
0c489d322c | ||
|
|
48e949476e | ||
|
|
5b9cebd25a | ||
|
|
6a47808580 | ||
|
|
bce9edd277 | ||
|
|
b7674de3cf | ||
|
|
f6544a3524 | ||
|
|
2b2aeabd89 | ||
|
|
81f376920e | ||
|
|
0cea861f93 | ||
|
|
fa848ba436 | ||
|
|
f4a453b86c | ||
|
|
28de16a450 | ||
|
|
9acbcba967 | ||
|
|
366558ad5b | ||
|
|
81503d7c69 | ||
|
|
410e5e6abb | ||
|
|
1bb6ab9e22 | ||
|
|
bf4ef4324e | ||
|
|
55dc0b2995 | ||
|
|
45970ba796 | ||
|
|
992c895eaa | ||
|
|
717bc4cd26 | ||
|
|
da08a65de3 | ||
|
|
85865f1a2c | ||
|
|
718dbd2a71 | ||
|
|
dd927921c1 | ||
|
|
7bd63d59e5 | ||
|
|
3d1b4a1595 | ||
|
|
c032061bf7 | ||
|
|
87aa1d77ed | ||
|
|
9d1311d0ee | ||
|
|
35ebadcedc | ||
|
|
70b3c774f8 | ||
|
|
5fb4768f83 | ||
|
|
4d73637829 | ||
|
|
8d897f407f | ||
|
|
ff9b2bd04e | ||
|
|
e6f066b828 | ||
|
|
f92d7dd1c1 | ||
|
|
e78941e3e5 | ||
|
|
789dbca6d6 | ||
|
|
a301a9e641 | ||
|
|
d4225ede2f | ||
|
|
49e3f814bc | ||
|
|
c3ac7180f8 | ||
|
|
20432dd99f | ||
|
|
ff3518e3ec | ||
|
|
9830810684 | ||
|
|
644489b6e7 | ||
|
|
31eb51ee7d | ||
|
|
6191ddffb3 | ||
|
|
35269d2db4 | ||
|
|
32aee00673 | ||
|
|
cc33986bef | ||
|
|
3c19ea413e | ||
|
|
07af4edea9 | ||
|
|
163a081776 | ||
|
|
fc8d913713 | ||
|
|
7c6856f2a9 | ||
|
|
45e3f858f0 | ||
|
|
cf7491665b | ||
|
|
7bb4e0470c | ||
|
|
d931d2902d | ||
|
|
29ce01fd11 | ||
|
|
488c3ee353 | ||
|
|
b3d9683743 | ||
|
|
4b30446217 | ||
|
|
d9144c8514 | ||
|
|
f6aa8a23fc | ||
|
|
ef89131b85 | ||
|
|
5165de0d76 | ||
|
|
eb23771d5a | ||
|
|
d189be8579 | ||
|
|
336aa93e6c | ||
|
|
76af465724 | ||
|
|
53d73e56e0 | ||
|
|
3e0e112e2b | ||
|
|
de32be7eed | ||
|
|
a099eafc60 | ||
|
|
9dceb3eed1 | ||
|
|
72056eb89b | ||
|
|
4281bc3543 | ||
|
|
fbd28085d3 | ||
|
|
f90f65247e | ||
|
|
93b632c328 | ||
|
|
b14d117a89 | ||
|
|
ddb71e8ef5 | ||
|
|
dec01c1ec0 | ||
|
|
f17b5c29f3 | ||
|
|
5b6ced536b | ||
|
|
a2a65621a1 | ||
|
|
b8f1a68834 | ||
|
|
f09d4c4626 | ||
|
|
2751de0768 | ||
|
|
48b1ddabed | ||
|
|
019a80f304 | ||
|
|
397decb051 | ||
|
|
e9404cc9e0 | ||
|
|
71c7a9c13e | ||
|
|
70c3fe9dcd | ||
|
|
10c596a4ff | ||
|
|
05c4ea39d0 | ||
|
|
ee2e15c724 | ||
|
|
1c7a02e73d | ||
|
|
a18753b2ff | ||
|
|
f3e89d38a9 | ||
|
|
9dea2f835b | ||
|
|
e415efb0dd | ||
|
|
ea789dbab9 | ||
|
|
01f3f712ac | ||
|
|
11a0718b78 | ||
|
|
4fc974055d | ||
|
|
a85656b355 | ||
|
|
30663685ea | ||
|
|
d6c3ec864b | ||
|
|
fdadb041aa | ||
|
|
a050c155b6 | ||
|
|
5070fcf74d | ||
|
|
c06af5f5cd | ||
|
|
cc61ccf9e9 | ||
|
|
3187a98188 | ||
|
|
a4b7e20457 | ||
|
|
e757a592c1 | ||
|
|
b528ef292d | ||
|
|
427b97c6f5 | ||
|
|
be6ef03407 | ||
|
|
df74729c8c | ||
|
|
a608ff0e7d | ||
|
|
8088a62805 | ||
|
|
92577c635f | ||
|
|
2c3cfed608 | ||
|
|
feade98473 | ||
|
|
ad5669f781 | ||
|
|
0f9defd6e2 | ||
|
|
f88086fcfa | ||
|
|
0ae16176a4 | ||
|
|
da6d284fbb | ||
|
|
8c9c7ddef8 | ||
|
|
9db9b286e3 | ||
|
|
6107694930 | ||
|
|
a4c0827bea | ||
|
|
ca25177ac1 | ||
|
|
ed1078c800 | ||
|
|
f74f61e8d1 | ||
|
|
e96d3ef0d3 | ||
|
|
be151cb589 | ||
|
|
b353e36373 | ||
|
|
81bb561467 | ||
|
|
f483d6f8da | ||
|
|
1d291fc105 | ||
|
|
8ead054e25 | ||
|
|
5a9ff3c235 | ||
|
|
5932803088 | ||
|
|
dacc175536 | ||
|
|
50fd76eb9a | ||
|
|
cde11c36db | ||
|
|
2b2b68d6fa | ||
|
|
1c45f4f666 | ||
|
|
6930c24d14 | ||
|
|
bc83bf052d | ||
|
|
7b6d80adf7 | ||
|
|
0e8b6dc049 | ||
|
|
dda4a4494a | ||
|
|
1404928c05 | ||
|
|
e75713fffb | ||
|
|
e98f9305ad | ||
|
|
788664809f | ||
|
|
75235a8ae4 | ||
|
|
204e40297c | ||
|
|
f0690e4c70 | ||
|
|
82e42b5dc5 | ||
|
|
09c5bbd2dc | ||
|
|
6ac1958c90 | ||
|
|
aa469f4573 | ||
|
|
9a7813e9ea | ||
|
|
a69c49398a | ||
|
|
d2835557a1 | ||
|
|
5a57b52c64 | ||
|
|
10c38aeebf | ||
|
|
c4f922dc2c | ||
|
|
80996b3b40 | ||
|
|
08909b2213 | ||
|
|
53698c166b | ||
|
|
01f7e46984 | ||
|
|
4ac5a6ad4d | ||
|
|
85865d4c4f | ||
|
|
4d6e9c9f68 | ||
|
|
a7c7f92103 | ||
|
|
423e86405e | ||
|
|
8c5fb45f99 | ||
|
|
38af66ca39 | ||
|
|
463e6dee0b | ||
|
|
21343d7d71 | ||
|
|
19c3555412 | ||
|
|
4f57b65147 | ||
|
|
991fe81dff | ||
|
|
7b0c2d1670 | ||
|
|
52022701db | ||
|
|
249945f749 | ||
|
|
47edcb3fec | ||
|
|
184662416d | ||
|
|
ce755056f3 | ||
|
|
ae8a849b32 | ||
|
|
d469cffc7a | ||
|
|
93146d91dc | ||
|
|
057bbf0dc4 | ||
|
|
003313c0f1 | ||
|
|
ee4a91a60a | ||
|
|
16b30c2838 | ||
|
|
4bca929ea7 | ||
|
|
a679fe9540 | ||
|
|
2972a33535 | ||
|
|
7043e19671 | ||
|
|
53441e967a | ||
|
|
d0b3cf6d36 | ||
|
|
a938045d1c | ||
|
|
a20b8d2175 | ||
|
|
a363ed8b6e | ||
|
|
adf7b536cb | ||
|
|
2d53b792b6 | ||
|
|
4d5dde9b6a | ||
|
|
6e1f2e063d | ||
|
|
56ab8c99fe | ||
|
|
4a47a1ba50 | ||
|
|
f93b6d2dd1 | ||
|
|
a521c33537 | ||
|
|
1961d724cf | ||
|
|
ea4050ee2d | ||
|
|
3c7318dbbd | ||
|
|
1923361bcd | ||
|
|
1a9ceb1278 | ||
|
|
6d05197625 | ||
|
|
397905f4ca | ||
|
|
0619b0e110 | ||
|
|
5f1bc49263 | ||
|
|
c8e0c2cc85 | ||
|
|
4e86f5886b | ||
|
|
0916bb6fe3 | ||
|
|
0736e06193 | ||
|
|
2a564c4f4b | ||
|
|
661a6ae85a | ||
|
|
81795a9879 | ||
|
|
092940ee8b | ||
|
|
734c387f4e | ||
|
|
bfd497fdf9 | ||
|
|
ce37ac146d | ||
|
|
b1628639e8 | ||
|
|
d51a5e303d | ||
|
|
97d2ef2712 | ||
|
|
9b98c26a80 | ||
|
|
cc09b01ab2 | ||
|
|
c4e85905db | ||
|
|
bd8b585c79 | ||
|
|
3aad4f40c6 | ||
|
|
7464e97a57 | ||
|
|
cd146dfeed | ||
|
|
d5b066152b | ||
|
|
65012d08b4 | ||
|
|
d1bdf80f56 | ||
|
|
96c49775c0 | ||
|
|
b6e6924a75 | ||
|
|
9f44729bd3 | ||
|
|
de8506d8e5 | ||
|
|
21f8911960 | ||
|
|
7e85032aa7 | ||
|
|
8152e15a61 | ||
|
|
c2e9801a97 | ||
|
|
b111403c00 | ||
|
|
4e370a5c72 | ||
|
|
6dde7b8548 | ||
|
|
3e4fdbbe6f | ||
|
|
e84a9a70b0 | ||
|
|
65b9936798 | ||
|
|
dad59fe092 | ||
|
|
167d54a5ce | ||
|
|
c446b0ef46 | ||
|
|
be72b6bd31 | ||
|
|
24eff72d82 | ||
|
|
ad9cef49c6 | ||
|
|
a31c416529 | ||
|
|
05b6979592 | ||
|
|
90d7b4dad6 | ||
|
|
6899dec884 | ||
|
|
56eb88b8dd | ||
|
|
d9222448bc | ||
|
|
b21db8fc79 | ||
|
|
c2a36f0b20 | ||
|
|
74732c6443 | ||
|
|
b97f8ad710 | ||
|
|
f0503ba54a | ||
|
|
55f0324878 | ||
|
|
ec738d46c1 | ||
|
|
eb3712a075 | ||
|
|
ea4063095d | ||
|
|
64bb90cb92 | ||
|
|
3adb1793eb | ||
|
|
e2e0cfb5bb | ||
|
|
9699fe43a7 | ||
|
|
cd3fbcf864 | ||
|
|
132e812fe4 | ||
|
|
916fe973e2 | ||
|
|
58d8f0b2b5 | ||
|
|
c8403836c0 | ||
|
|
de6e4dc93f | ||
|
|
17e01fd32f | ||
|
|
4ccd771de7 | ||
|
|
bf1201bdfd | ||
|
|
6b5620d1cb | ||
|
|
5fe3f4ae0b | ||
|
|
f503e66dd7 | ||
|
|
9680f08ebf | ||
|
|
7245b21206 | ||
|
|
85a9a4a7be | ||
|
|
2a2a79aa87 | ||
|
|
82759dac14 | ||
|
|
97c7e4b7bf | ||
|
|
0532e91d4b | ||
|
|
33c48db758 | ||
|
|
1c488951e0 | ||
|
|
1c504c7746 | ||
|
|
94e7b1da4f | ||
|
|
3e48c306c4 | ||
|
|
71f5b11b9a | ||
|
|
e0f4c52836 | ||
|
|
058cf5bfca | ||
|
|
895aad108e | ||
|
|
1295bd50f7 | ||
|
|
45d4e5fb62 | ||
|
|
275c683ce5 | ||
|
|
086ac7428d | ||
|
|
40e1cdc5d8 | ||
|
|
6faabd290c | ||
|
|
00d26de0d5 | ||
|
|
612754fd20 | ||
|
|
b7dc0f148c | ||
|
|
ff8f8ab512 | ||
|
|
2c4e950a50 | ||
|
|
30b47ddea1 | ||
|
|
bec23a9388 | ||
|
|
2f6e538e79 | ||
|
|
1ca1b71f9c | ||
|
|
6dcaef7cf8 | ||
|
|
a617dd892a | ||
|
|
b946770c74 | ||
|
|
18d25ca005 | ||
|
|
525ae6aee7 | ||
|
|
052b8af497 | ||
|
|
a9fef80feb | ||
|
|
853ef22eef | ||
|
|
be3266fb00 | ||
|
|
25d6ed98a7 | ||
|
|
57617ea4af | ||
|
|
5484928ac6 | ||
|
|
3644d49950 | ||
|
|
d46348a348 | ||
|
|
8086e5b7a6 | ||
|
|
0e2b6c741a | ||
|
|
a0e6eccbaf | ||
|
|
98227974e7 | ||
|
|
2c002c8a92 | ||
|
|
eb19154a60 | ||
|
|
c0d3ef8c1b | ||
|
|
5c7a685f96 | ||
|
|
8f0d7f6acb | ||
|
|
d2f31dfbf7 | ||
|
|
48820425eb | ||
|
|
0e1b500fec | ||
|
|
d9fe322a5f | ||
|
|
52d6b044b7 | ||
|
|
b284e42aef | ||
|
|
48992d3fdf | ||
|
|
206db3d585 | ||
|
|
75a9d5eab7 | ||
|
|
5b8478c4cf | ||
|
|
91bf410118 | ||
|
|
f1b8da340e | ||
|
|
5a3115e965 | ||
|
|
6a0d61363a | ||
|
|
53901abe10 | ||
|
|
6d07e2cddb |
14
.github/kata-artifacts-action/Dockerfile
vendored
14
.github/kata-artifacts-action/Dockerfile
vendored
@@ -1,14 +0,0 @@
|
||||
FROM ubuntu:latest
|
||||
|
||||
LABEL version="0.0.0"
|
||||
LABEL maintainer="Kata folks"
|
||||
LABEL com.github.actions.name="Prepare artifacts for Kata release page"
|
||||
LABEL com.github.actions.description="Create and upload static binaries and Kata images to release page for a given release"
|
||||
|
||||
ENV GITHUB_ACTION_NAME="Prepare artifacts for Kata release"
|
||||
ENV NEW_VERSION="1.8.2"
|
||||
ENV BRANCH="master"
|
||||
|
||||
RUN git clone https://github.com/kata-containers/packaging.git && cd packaging
|
||||
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
||||
22
.github/kata-artifacts-action/entrypoint.sh
vendored
22
.github/kata-artifacts-action/entrypoint.sh
vendored
@@ -1,22 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -o errexit
|
||||
set -o pipefail
|
||||
set -o nounset
|
||||
|
||||
die() {
|
||||
msg="$*"
|
||||
echo "ERROR: $msg" >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Entrypoint for the container image, we know that the AKS and Kata setup/testing
|
||||
# scripts are located at root.
|
||||
|
||||
cd obs-packaging
|
||||
bash -x ./gen_versions_txt.sh ${BRANCH}
|
||||
cd ../release
|
||||
bash -x ./publish-kata-image.sh -p ${NEW_VERSION}
|
||||
bash -x ./kata-deploy-binaries.sh -p ${NEW_VERSION}
|
||||
|
||||
echo "maybe it worked"
|
||||
24
.github/kata-deploy-action/Dockerfile
vendored
24
.github/kata-deploy-action/Dockerfile
vendored
@@ -1,24 +0,0 @@
|
||||
FROM microsoft/azure-cli:2.0.47
|
||||
|
||||
LABEL version="0.0.0"
|
||||
LABEL maintainer="eric and sai"
|
||||
LABEL com.github.actions.name="Test kata-deploy in an AKS cluster"
|
||||
LABEL com.github.actions.description="Wow. Where do i start. Create an AKS cluster with containerd+runtimeclass, then deploys kata onto it and even might start a workload. nbd"
|
||||
|
||||
ARG AKS_ENGINE_VER="v0.36.4"
|
||||
|
||||
ENV GITHUB_ACTION_NAME="Test kata-deploy in an AKS cluster"
|
||||
|
||||
RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl \
|
||||
&& chmod +x ./kubectl \
|
||||
&& mv ./kubectl /usr/local/bin/kubectl
|
||||
|
||||
RUN curl -LO https://github.com/Azure/aks-engine/releases/download/${AKS_ENGINE_VER}/aks-engine-${AKS_ENGINE_VER}-linux-amd64.tar.gz \
|
||||
&& tar xvf aks-engine-${AKS_ENGINE_VER}-linux-amd64.tar.gz \
|
||||
&& mv aks-engine-${AKS_ENGINE_VER}-linux-amd64/aks-engine /usr/local/bin/aks-engine \
|
||||
&& rm aks-engine-${AKS_ENGINE_VER}-linux-amd64.tar.gz
|
||||
|
||||
COPY kubernetes-containerd.json /
|
||||
COPY setup-aks.sh test-kata.sh entrypoint.sh /
|
||||
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
||||
22
.github/kata-deploy-action/entrypoint.sh
vendored
22
.github/kata-deploy-action/entrypoint.sh
vendored
@@ -1,22 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -o errexit
|
||||
set -o pipefail
|
||||
set -o nounset
|
||||
|
||||
die() {
|
||||
msg="$*"
|
||||
echo "ERROR: $msg" >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Since this is the entrypoint for the container image, we know that the AKS and Kata setup/testing
|
||||
# scripts are located at root.
|
||||
source /setup-aks.sh
|
||||
source /test-kata.sh
|
||||
|
||||
trap destroy_aks EXIT
|
||||
|
||||
setup_aks
|
||||
|
||||
test_kata
|
||||
@@ -1,41 +0,0 @@
|
||||
{
|
||||
"apiVersion": "vlabs",
|
||||
"properties": {
|
||||
"orchestratorProfile": {
|
||||
"orchestratorType": "Kubernetes",
|
||||
"orchestratorVersion": "1.14.1",
|
||||
"kubernetesConfig": {
|
||||
"networkPlugin": "flannel",
|
||||
"containerRuntime": "containerd",
|
||||
"containerdVersion": "1.2.4"
|
||||
}
|
||||
},
|
||||
"masterProfile": {
|
||||
"count": 1,
|
||||
"dnsPrefix": "",
|
||||
"vmSize": "Standard_D2_v2"
|
||||
},
|
||||
"agentPoolProfiles": [
|
||||
{
|
||||
"name": "agentpool",
|
||||
"count": 1,
|
||||
"vmSize": "Standard_D4s_v3",
|
||||
"availabilityProfile": "AvailabilitySet"
|
||||
}
|
||||
],
|
||||
"linuxProfile": {
|
||||
"adminUsername": "azureuser",
|
||||
"ssh": {
|
||||
"publicKeys": [
|
||||
{
|
||||
"keyData": ""
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"servicePrincipalProfile": {
|
||||
"clientId": "",
|
||||
"secret": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
44
.github/kata-deploy-action/setup-aks.sh
vendored
44
.github/kata-deploy-action/setup-aks.sh
vendored
@@ -1,44 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -o errexit
|
||||
set -o pipefail
|
||||
set -o nounset
|
||||
|
||||
export AZURE_HTTP_USER_AGENT="GITHUBACTIONS_${GITHUB_ACTION_NAME}_${GITHUB_REPOSITORY}"
|
||||
|
||||
LOCATION=${LOCATION:-westus2}
|
||||
DNS_PREFIX=${DNS_PREFIX:-kata-deploy-${GITHUB_SHA:0:10}}
|
||||
CLUSTER_CONFIG=${CLUSTER_CONFIG:-/kubernetes-containerd.json}
|
||||
|
||||
function die() {
|
||||
msg="$*"
|
||||
echo "ERROR: $msg" >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
function destroy_aks() {
|
||||
set +x
|
||||
az login --service-principal -u "$AZ_APPID" -p "$AZ_PASSWORD" --tenant "$AZ_TENANT_ID"
|
||||
az group delete --name "$DNS_PREFIX" --yes --no-wait
|
||||
az logout
|
||||
}
|
||||
|
||||
function setup_aks() {
|
||||
|
||||
[[ -z "$AZ_APPID" ]] && die "no Azure service principal ID provided"
|
||||
[[ -z "$AZ_PASSWORD" ]] && die "no Azure service principal secret provided"
|
||||
[[ -z "$AZ_SUBSCRIPTION_ID" ]] && die "no Azure subscription ID provided"
|
||||
[[ -z "$AZ_TENANT_ID" ]] && die "no Azure tenant ID provided"
|
||||
|
||||
# check cluster config existence
|
||||
# TODO
|
||||
|
||||
# Give it a try
|
||||
|
||||
aks-engine deploy --subscription-id "$AZ_SUBSCRIPTION_ID" \
|
||||
--client-id "$AZ_APPID" --client-secret "$AZ_PASSWORD" \
|
||||
--location "$LOCATION" --dns-prefix "$DNS_PREFIX" \
|
||||
--api-model "$CLUSTER_CONFIG" --force-overwrite
|
||||
|
||||
export KUBECONFIG="_output/$DNS_PREFIX/kubeconfig/kubeconfig.$LOCATION.json"
|
||||
}
|
||||
112
.github/kata-deploy-action/test-kata.sh
vendored
112
.github/kata-deploy-action/test-kata.sh
vendored
@@ -1,112 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -o errexit
|
||||
set -o pipefail
|
||||
set -o nounset
|
||||
|
||||
|
||||
function waitForProcess() {
|
||||
wait_time="$1"
|
||||
sleep_time="$2"
|
||||
cmd="$3"
|
||||
while [ "$wait_time" -gt 0 ]; do
|
||||
if eval "$cmd"; then
|
||||
return 0
|
||||
else
|
||||
sleep "$sleep_time"
|
||||
wait_time=$((wait_time-sleep_time))
|
||||
fi
|
||||
done
|
||||
return 1
|
||||
}
|
||||
|
||||
|
||||
|
||||
function run_test() {
|
||||
YAMLPATH="https://raw.githubusercontent.com/egernst/kata-deploy/$GITHUB_SHA/kata-deploy"
|
||||
echo "verify connectivity with a pod using Kata"
|
||||
|
||||
deployment=""
|
||||
busybox_pod="test-nginx"
|
||||
busybox_image="busybox"
|
||||
cmd="kubectl get pods | grep $busybox_pod | grep Completed"
|
||||
wait_time=120
|
||||
sleep_time=3
|
||||
|
||||
for deployment in "nginx-deployment-qemu" "nginx-deployment-nemu"; do
|
||||
# start the kata pod:
|
||||
kubectl apply -f "$YAMLPATH/examples/${deployment}.yaml"
|
||||
kubectl wait --timeout=5m --for=condition=Available deployment/${deployment}
|
||||
kubectl wait --timeout=5m --for=condition=Available deployment/${deployment}
|
||||
kubectl expose deployment/${deployment}
|
||||
|
||||
# test pod connectivity:
|
||||
kubectl run $busybox_pod --restart=Never --image="$busybox_image" -- wget --timeout=5 "$deployment"
|
||||
waitForProcess "$wait_time" "$sleep_time" "$cmd"
|
||||
kubectl logs "$busybox_pod" | grep "index.html"
|
||||
kubectl describe pod "$busybox_pod"
|
||||
|
||||
kubectl delete deployment "$deployment"
|
||||
kubectl delete service "$deployment"
|
||||
kubectl delete pod "$busybox_pod"
|
||||
done
|
||||
}
|
||||
|
||||
|
||||
function test_kata() {
|
||||
set -x
|
||||
#kubectl all the things
|
||||
kubectl get pods --all-namespaces
|
||||
|
||||
YAMLPATH="https://raw.githubusercontent.com/egernst/kata-deploy/$GITHUB_SHA/kata-deploy"
|
||||
|
||||
kubectl apply -f "$YAMLPATH/kata-rbac.yaml"
|
||||
kubectl apply -f "$YAMLPATH/k8s-1.14/kata-nemu-runtimeClass.yaml"
|
||||
kubectl apply -f "$YAMLPATH/k8s-1.14/kata-qemu-runtimeClass.yaml"
|
||||
kubectl apply -f "$YAMLPATH/k8s-1.14/kata-fc-runtimeClass.yaml"
|
||||
|
||||
sleep 5
|
||||
|
||||
kubectl get runtimeclasses
|
||||
|
||||
wget "$YAMLPATH/kata-deploy.yaml"
|
||||
wget "$YAMLPATH/kata-cleanup.yaml"
|
||||
|
||||
# update deployment daemonset to utilize the container under test:
|
||||
sed -i "s#katadocker/kata-deploy#katadocker/kata-deploy-ci:${GITHUB_SHA}#g" kata-deploy.yaml
|
||||
sed -i "s#katadocker/kata-deploy#katadocker/kata-deploy-ci:${GITHUB_SHA}#g" kata-cleanup.yaml
|
||||
|
||||
cat kata-deploy.yaml
|
||||
|
||||
sleep 100
|
||||
|
||||
# deploy kata:
|
||||
kubectl apply -f kata-deploy.yaml
|
||||
|
||||
sleep 1
|
||||
|
||||
#wait for kata-deploy to be up
|
||||
kubectl -n kube-system wait --timeout=5m --for=condition=Ready -l name=kata-deploy pod
|
||||
|
||||
#Do I see this?
|
||||
kubectl get pods --all-namespaces --show-labels
|
||||
kubectl get node --show-labels
|
||||
|
||||
run_test
|
||||
|
||||
# remove kata (yeah, we are about to destroy, but good to test this flow as well):
|
||||
kubectl delete -f kata-deploy.yaml
|
||||
kubectl -n kube-system wait --timeout=5m --for=delete -l name=kata-deploy pod
|
||||
kubectl apply -f kata-cleanup.yaml
|
||||
kubectl -n kube-system wait --timeout=5m --for=condition=Ready -l name=kubelet-kata-cleanup pod
|
||||
|
||||
kubectl get pods --all-namespaces --show-labels
|
||||
kubectl get node --show-labels
|
||||
|
||||
kubectl delete -f kata-cleanup.yaml
|
||||
|
||||
rm kata-cleanup.yaml
|
||||
rm kata-deploy.yaml
|
||||
|
||||
set +x
|
||||
}
|
||||
4
.github/kata-deploy-action/trigger
vendored
4
.github/kata-deploy-action/trigger
vendored
@@ -1,4 +0,0 @@
|
||||
VERSION=1.8.0-alpha1
|
||||
git tag --delete $VERSION
|
||||
git push origin :$VERSION
|
||||
git tag -a $VERSION -m "test tag - $VERSION" && git push origin $VERSION
|
||||
1
.github/workflows/README.md
vendored
1
.github/workflows/README.md
vendored
@@ -1 +0,0 @@
|
||||
adding a readme
|
||||
18
.github/workflows/gather-artifacts.sh
vendored
Executable file
18
.github/workflows/gather-artifacts.sh
vendored
Executable file
@@ -0,0 +1,18 @@
|
||||
#!/bin/bash
|
||||
# Copyright (c) 2019 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
set -o errexit
|
||||
set -o pipefail
|
||||
|
||||
pushd kata-artifacts >>/dev/null
|
||||
for c in ./*.tar.gz
|
||||
do
|
||||
echo "untarring tarball $c"
|
||||
tar -xvf $c
|
||||
done
|
||||
|
||||
tar cvfJ ../kata-static.tar.xz ./opt
|
||||
popd >>/dev/null
|
||||
36
.github/workflows/generate-artifact-tarball.sh
vendored
Executable file
36
.github/workflows/generate-artifact-tarball.sh
vendored
Executable file
@@ -0,0 +1,36 @@
|
||||
#!/bin/bash
|
||||
# Copyright (c) 2019 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
set -o errexit
|
||||
set -o pipefail
|
||||
|
||||
|
||||
main() {
|
||||
artifact_stage=${1:-}
|
||||
artifact=$(echo ${artifact_stage} | sed -n -e 's/^install_//p' | sed -r 's/_/-/g')
|
||||
if [ -z "${artifact}" ]; then
|
||||
"Scripts needs artifact name to build"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
tag=$(echo $GITHUB_REF | cut -d/ -f3-)
|
||||
export GOPATH=$HOME/go
|
||||
|
||||
go get github.com/kata-containers/packaging || true
|
||||
pushd $GOPATH/src/github.com/kata-containers/packaging/release >>/dev/null
|
||||
git checkout $tag
|
||||
pushd ../obs-packaging
|
||||
./gen_versions_txt.sh $tag
|
||||
popd
|
||||
|
||||
source ./kata-deploy-binaries.sh
|
||||
${artifact_stage} $tag
|
||||
popd
|
||||
|
||||
mv $HOME/go/src/github.com/kata-containers/packaging/release/kata-static-${artifact}.tar.gz .
|
||||
}
|
||||
|
||||
main $@
|
||||
41
.github/workflows/hacking.yml
vendored
41
.github/workflows/hacking.yml
vendored
@@ -1,41 +0,0 @@
|
||||
# When a release page is published, start the release artifact process
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- '*'
|
||||
name: Build, Test, and Publish kata-deploy
|
||||
|
||||
jobs:
|
||||
# create image and upload to release page (can we get branch information from release tag?
|
||||
publish-artifacts:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name : the ok ok
|
||||
run: |
|
||||
echo "hello worold"
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y curl git
|
||||
echo "still?"
|
||||
# for test development:
|
||||
git clone https://github.com/egernst/packaging-1
|
||||
cd packaging-1
|
||||
echo `pwd`
|
||||
echo `ls`
|
||||
cd release
|
||||
echo `pwd`
|
||||
echo `ls`
|
||||
|
||||
#./build-artifacts $GITHUB_TAG
|
||||
echo "github tag" $GITHUB_TAG
|
||||
echo "github sha"
|
||||
echo $GITHUB_SHA
|
||||
|
||||
echo "done...."
|
||||
|
||||
# docker run alpine sh -c date
|
||||
# cd obs-packaging
|
||||
#
|
||||
#./gen_versions_txt.sh "stable-1.8"
|
||||
# cd ../release
|
||||
# ./publish-kata-image.sh "1.8.2"
|
||||
# ./kata-deploy-binaries.sh "1.8.2"
|
||||
67
.github/workflows/kata-release.noworking
vendored
67
.github/workflows/kata-release.noworking
vendored
@@ -1,67 +0,0 @@
|
||||
# When a release page is published, start the release artifact process
|
||||
on: release
|
||||
name: Build, Test, and Publish kata-deploy
|
||||
|
||||
jobs:
|
||||
# create image and upload to release page (can we get branch information from release tag?
|
||||
publish-artifacts:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: install-dependencies
|
||||
- run: |
|
||||
apt-get upate
|
||||
apt-get install -y docker-ce git
|
||||
git clone https://github.com/kata-containers/packaging
|
||||
cd packaging/obs-packages
|
||||
./gen_versions_file.txt
|
||||
cd ../release
|
||||
echo "maybe it worked"
|
||||
tree
|
||||
- name: publish-images
|
||||
- run : wget all the things
|
||||
- name: publish-images
|
||||
- run : ./publish-images.sh
|
||||
with:
|
||||
args: tag? sha?
|
||||
- name: create-static-binaries
|
||||
- uses: TBD
|
||||
with: tag?
|
||||
|
||||
# test the artifacts
|
||||
kata-deploy:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@master
|
||||
- name: tag-filter
|
||||
uses: actions/bin/filter@master
|
||||
with:
|
||||
args: tag
|
||||
- name: docker-build
|
||||
uses: actions/docker/cli@master
|
||||
with:
|
||||
args: build --build-arg KATA_VER=${GITHUB_REF##*/} -t katadocker/kata-deploy-ci:${{
|
||||
github.sha }} ./kata-deploy
|
||||
- name: docker-login
|
||||
uses: actions/docker/login@master
|
||||
env:
|
||||
DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }}
|
||||
DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
|
||||
- name: docker-push-sha
|
||||
uses: actions/docker/cli@master
|
||||
with:
|
||||
args: push katadocker/kata-deploy-ci:${{ github.sha }}
|
||||
- name: aks-test
|
||||
uses: ./kata-deploy/action
|
||||
env:
|
||||
AZ_APPID: ${{ secrets.AZ_APPID }}
|
||||
AZ_PASSWORD: ${{ secrets.AZ_PASSWORD }}
|
||||
AZ_SUBSCRIPTION_ID: ${{ secrets.AZ_SUBSCRIPTION_ID }}
|
||||
AZ_TENANT_ID: ${{ secrets.AZ_TENANT_ID }}
|
||||
- name: docker-tag-ref
|
||||
uses: actions/docker/cli@master
|
||||
with:
|
||||
args: tag katadocker/kata-deploy-ci:${{ github.sha }} katadocker/kata-deploy:${GITHUB_REF##*/}
|
||||
- name: docker-push-ref
|
||||
uses: actions/docker/cli@master
|
||||
with:
|
||||
args: push katadocker/kata-deploy:${GITHUB_REF##*/}
|
||||
349
.github/workflows/main.yaml
vendored
Normal file
349
.github/workflows/main.yaml
vendored
Normal file
@@ -0,0 +1,349 @@
|
||||
name: Publish release tarball
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- '*'
|
||||
|
||||
jobs:
|
||||
get-artifact-list:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: get the list
|
||||
run: |
|
||||
git clone https://github.com/kata-containers/packaging
|
||||
pushd packaging
|
||||
tag=$(echo $GITHUB_REF | cut -d/ -f3-)
|
||||
git checkout $tag
|
||||
popd
|
||||
./packaging/artifact-list.sh > artifact-list.txt
|
||||
- name: save-artifact-list
|
||||
uses: actions/upload-artifact@master
|
||||
with:
|
||||
name: artifact-list
|
||||
path: artifact-list.txt
|
||||
|
||||
build-kernel:
|
||||
runs-on: ubuntu-16.04
|
||||
needs: get-artifact-list
|
||||
env:
|
||||
buildstr: "install_kernel"
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: get-artifact-list
|
||||
uses: actions/download-artifact@master
|
||||
with:
|
||||
name: artifact-list
|
||||
- run: |
|
||||
sudo apt-get update && sudo apt install -y flex bison libelf-dev bc iptables
|
||||
- name: build-kernel
|
||||
run: |
|
||||
if grep -q $buildstr ./artifact-list/artifact-list.txt; then
|
||||
$GITHUB_WORKSPACE/.github/workflows/generate-artifact-tarball.sh $buildstr
|
||||
echo ::set-env name=artifact-built::true
|
||||
else
|
||||
echo ::set-env name=artifact-built::false
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: env.artifact-built == 'true'
|
||||
uses: actions/upload-artifact@master
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-static-kernel.tar.gz
|
||||
|
||||
build-experimental-kernel:
|
||||
runs-on: ubuntu-16.04
|
||||
needs: get-artifact-list
|
||||
env:
|
||||
buildstr: "install_experimental_kernel"
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: get-artifact-list
|
||||
uses: actions/download-artifact@master
|
||||
with:
|
||||
name: artifact-list
|
||||
- run: |
|
||||
sudo apt-get update && sudo apt install -y flex bison libelf-dev bc iptables
|
||||
- name: build-experimental-kernel
|
||||
run: |
|
||||
if grep -q $buildstr ./artifact-list/artifact-list.txt; then
|
||||
$GITHUB_WORKSPACE/.github/workflows/generate-artifact-tarball.sh $buildstr
|
||||
echo ::set-env name=artifact-built::true
|
||||
else
|
||||
echo ::set-env name=artifact-built::false
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: env.artifact-built == 'true'
|
||||
uses: actions/upload-artifact@master
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-static-experimental-kernel.tar.gz
|
||||
|
||||
build-qemu:
|
||||
runs-on: ubuntu-16.04
|
||||
needs: get-artifact-list
|
||||
env:
|
||||
buildstr: "install_qemu"
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: get-artifact-list
|
||||
uses: actions/download-artifact@master
|
||||
with:
|
||||
name: artifact-list
|
||||
- name: build-qemu
|
||||
run: |
|
||||
if grep -q $buildstr ./artifact-list/artifact-list.txt; then
|
||||
$GITHUB_WORKSPACE/.github/workflows/generate-artifact-tarball.sh $buildstr
|
||||
echo ::set-env name=artifact-built::true
|
||||
else
|
||||
echo ::set-env name=artifact-built::false
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: env.artifact-built == 'true'
|
||||
uses: actions/upload-artifact@master
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-static-qemu.tar.gz
|
||||
|
||||
build-nemu:
|
||||
runs-on: ubuntu-16.04
|
||||
needs: get-artifact-list
|
||||
env:
|
||||
buildstr: "install_nemu"
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: get-artifact-list
|
||||
uses: actions/download-artifact@master
|
||||
with:
|
||||
name: artifact-list
|
||||
- name: build-nemu
|
||||
run: |
|
||||
if grep -q $buildstr ./artifact-list/artifact-list.txt; then
|
||||
$GITHUB_WORKSPACE/.github/workflows/generate-artifact-tarball.sh $buildstr
|
||||
echo ::set-env name=artifact-built::true
|
||||
else
|
||||
echo ::set-env name=artifact-built::false
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: env.artifact-built == 'true'
|
||||
uses: actions/upload-artifact@master
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-static-nemu.tar.gz
|
||||
|
||||
# Job for building the QEMU binaries with virtiofs support
|
||||
build-qemu-virtiofsd:
|
||||
runs-on: ubuntu-16.04
|
||||
needs: get-artifact-list
|
||||
env:
|
||||
buildstr: "install_qemu_virtiofsd"
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: get-artifact-list
|
||||
uses: actions/download-artifact@master
|
||||
with:
|
||||
name: artifact-list
|
||||
- name: build-qemu-virtiofsd
|
||||
run: |
|
||||
if grep -q $buildstr ./artifact-list/artifact-list.txt; then
|
||||
$GITHUB_WORKSPACE/.github/workflows/generate-artifact-tarball.sh $buildstr
|
||||
echo ::set-env name=artifact-built::true
|
||||
else
|
||||
echo ::set-env name=artifact-built::false
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: env.artifact-built == 'true'
|
||||
uses: actions/upload-artifact@master
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-static-qemu-virtiofsd.tar.gz
|
||||
|
||||
# Job for building the image
|
||||
build-image:
|
||||
runs-on: ubuntu-16.04
|
||||
needs: get-artifact-list
|
||||
env:
|
||||
buildstr: "install_image"
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: get-artifact-list
|
||||
uses: actions/download-artifact@master
|
||||
with:
|
||||
name: artifact-list
|
||||
- name: build-image
|
||||
run: |
|
||||
if grep -q $buildstr ./artifact-list/artifact-list.txt; then
|
||||
$GITHUB_WORKSPACE/.github/workflows/generate-artifact-tarball.sh $buildstr
|
||||
echo ::set-env name=artifact-built::true
|
||||
else
|
||||
echo ::set-env name=artifact-built::false
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: env.artifact-built == 'true'
|
||||
uses: actions/upload-artifact@master
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-static-image.tar.gz
|
||||
|
||||
# Job for building firecracker hypervisor
|
||||
build-firecracker:
|
||||
runs-on: ubuntu-16.04
|
||||
needs: get-artifact-list
|
||||
env:
|
||||
buildstr: "install_firecracker"
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: get-artifact-list
|
||||
uses: actions/download-artifact@master
|
||||
with:
|
||||
name: artifact-list
|
||||
- name: build-firecracker
|
||||
run: |
|
||||
if grep -q $buildstr ./artifact-list/artifact-list.txt; then
|
||||
$GITHUB_WORKSPACE/.github/workflows/generate-artifact-tarball.sh $buildstr
|
||||
echo ::set-env name=artifact-built::true
|
||||
else
|
||||
echo ::set-env name=artifact-built::false
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: env.artifact-built == 'true'
|
||||
uses: actions/upload-artifact@master
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-static-firecracker.tar.gz
|
||||
|
||||
# Job for building cloud-hypervisor
|
||||
build-clh:
|
||||
runs-on: ubuntu-16.04
|
||||
needs: get-artifact-list
|
||||
env:
|
||||
buildstr: "install_clh"
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: get-artifact-list
|
||||
uses: actions/download-artifact@master
|
||||
with:
|
||||
name: artifact-list
|
||||
- name: build-clh
|
||||
run: |
|
||||
if grep -q $buildstr ./artifact-list/artifact-list.txt; then
|
||||
$GITHUB_WORKSPACE/.github/workflows/generate-artifact-tarball.sh $buildstr
|
||||
echo ::set-env name=artifact-built::true
|
||||
else
|
||||
echo ::set-env name=artifact-built::false
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: env.artifact-built == 'true'
|
||||
uses: actions/upload-artifact@master
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-static-clh.tar.gz
|
||||
|
||||
# Job for building kata components
|
||||
build-kata-components:
|
||||
runs-on: ubuntu-16.04
|
||||
needs: get-artifact-list
|
||||
env:
|
||||
buildstr: "install_kata_components"
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: get-artifact-list
|
||||
uses: actions/download-artifact@master
|
||||
with:
|
||||
name: artifact-list
|
||||
- name: build-kata-components
|
||||
run: |
|
||||
if grep -q $buildstr ./artifact-list/artifact-list.txt; then
|
||||
$GITHUB_WORKSPACE/.github/workflows/generate-artifact-tarball.sh $buildstr
|
||||
echo ::set-env name=artifact-built::true
|
||||
else
|
||||
echo ::set-env name=artifact-built::false
|
||||
fi
|
||||
- name: store-artifacts
|
||||
if: env.artifact-built == 'true'
|
||||
uses: actions/upload-artifact@master
|
||||
with:
|
||||
name: kata-artifacts
|
||||
path: kata-static-kata-components.tar.gz
|
||||
|
||||
gather-artifacts:
|
||||
runs-on: ubuntu-16.04
|
||||
needs: [build-experimental-kernel, build-kernel, build-qemu, build-qemu-virtiofsd, build-image, build-firecracker, build-kata-components, build-nemu, build-clh]
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: get-artifacts
|
||||
uses: actions/download-artifact@master
|
||||
with:
|
||||
name: kata-artifacts
|
||||
- name: colate-artifacts
|
||||
run: |
|
||||
$GITHUB_WORKSPACE/.github/workflows/gather-artifacts.sh
|
||||
- name: store-artifacts
|
||||
uses: actions/upload-artifact@master
|
||||
with:
|
||||
name: release-candidate
|
||||
path: kata-static.tar.xz
|
||||
|
||||
kata-deploy:
|
||||
needs: gather-artifacts
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: get-artifacts
|
||||
uses: actions/download-artifact@master
|
||||
with:
|
||||
name: release-candidate
|
||||
- name: build-and-push-kata-deploy-ci
|
||||
id: build-and-push-kata-deploy-ci
|
||||
run: |
|
||||
tag=$(echo $GITHUB_REF | cut -d/ -f3-)
|
||||
git clone https://github.com/kata-containers/packaging
|
||||
pushd packaging
|
||||
git checkout $tag
|
||||
pkg_sha=$(git rev-parse HEAD)
|
||||
popd
|
||||
mv release-candidate/kata-static.tar.xz ./packaging/kata-deploy/kata-static.tar.xz
|
||||
docker build --build-arg KATA_ARTIFACTS=kata-static.tar.xz -t katadocker/kata-deploy-ci:$pkg_sha ./packaging/kata-deploy
|
||||
docker login -u ${{ secrets.DOCKER_USERNAME }} -p ${{ secrets.DOCKER_PASSWORD }}
|
||||
docker push katadocker/kata-deploy-ci:$pkg_sha
|
||||
|
||||
echo "##[set-output name=PKG_SHA;]${pkg_sha}"
|
||||
echo ::set-env name=TAG::$tag
|
||||
- name: test-kata-deploy-ci-in-aks
|
||||
uses: ./packaging/kata-deploy/action
|
||||
with:
|
||||
packaging-sha: ${{steps.build-and-push-kata-deploy-ci.outputs.PKG_SHA}}
|
||||
env:
|
||||
PKG_SHA: ${{steps.build-and-push-kata-deploy-ci.outputs.PKG_SHA}}
|
||||
AZ_APPID: ${{ secrets.AZ_APPID }}
|
||||
AZ_PASSWORD: ${{ secrets.AZ_PASSWORD }}
|
||||
AZ_SUBSCRIPTION_ID: ${{ secrets.AZ_SUBSCRIPTION_ID }}
|
||||
AZ_TENANT_ID: ${{ secrets.AZ_TENANT_ID }}
|
||||
- name: push-tarball
|
||||
run: |
|
||||
# tag the container image we created and push to DockerHub
|
||||
tag=$(echo $GITHUB_REF | cut -d/ -f3-)
|
||||
docker tag katadocker/kata-deploy-ci:${{steps.build-and-push-kata-deploy-ci.outputs.PKG_SHA}} katadocker/kata-deploy:${tag}
|
||||
docker push katadocker/kata-deploy:${tag}
|
||||
|
||||
upload-static-tarball:
|
||||
needs: kata-deploy
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: download-artifacts
|
||||
uses: actions/download-artifact@master
|
||||
with:
|
||||
name: release-candidate
|
||||
- name: install hub
|
||||
run: |
|
||||
HUB_VER=$(curl -s "https://api.github.com/repos/github/hub/releases/latest" | jq -r .tag_name | sed 's/^v//')
|
||||
wget -q -O- https://github.com/github/hub/releases/download/v$HUB_VER/hub-linux-amd64-$HUB_VER.tgz | \
|
||||
tar xz --strip-components=2 --wildcards '*/bin/hub' && sudo mv hub /usr/local/bin/hub
|
||||
- name: push static tarball to github
|
||||
run: |
|
||||
tag=$(echo $GITHUB_REF | cut -d/ -f3-)
|
||||
tarball="kata-static-$tag-x86_64.tar.xz"
|
||||
repo="https://github.com/kata-containers/runtime.git"
|
||||
mv release-candidate/kata-static.tar.xz "release-candidate/${tarball}"
|
||||
git clone "${repo}"
|
||||
cd runtime
|
||||
echo "uploading asset '${tarball}' to '${repo}' tag: ${tag}"
|
||||
GITHUB_TOKEN=${{ secrets.GIT_UPLOAD_TOKEN }} hub release edit -m "" -a "../release-candidate/${tarball}" "${tag}"
|
||||
2
.gitignore
vendored
Normal file
2
.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
**/*.bk
|
||||
**/target
|
||||
45
.travis.yml
Normal file
45
.travis.yml
Normal file
@@ -0,0 +1,45 @@
|
||||
# Copyright (c) 2019 Ant Financial
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
dist: bionic
|
||||
os: linux
|
||||
language: go
|
||||
go: 1.13.9
|
||||
env: target_branch=$TRAVIS_BRANCH
|
||||
|
||||
before_install:
|
||||
- git remote set-branches --add origin "${TRAVIS_BRANCH}"
|
||||
- git fetch
|
||||
- "ci/setup.sh"
|
||||
|
||||
# we use install to run check agent
|
||||
# so that it is easy to skip for non-amd64 platform
|
||||
install:
|
||||
- "ci/install_rust.sh"
|
||||
- export PATH=$PATH:"$HOME/.cargo/bin"
|
||||
- export RUST_AGENT=yes
|
||||
- make -C ${TRAVIS_BUILD_DIR}/src/agent
|
||||
- make -C ${TRAVIS_BUILD_DIR}/src/agent check
|
||||
|
||||
before_script:
|
||||
- "ci/install_go.sh"
|
||||
- "ci/install_vc.sh"
|
||||
- make -C ${TRAVIS_BUILD_DIR}/src/runtime
|
||||
- make -C ${TRAVIS_BUILD_DIR}/src/runtime test
|
||||
- sudo -E PATH=$PATH GOPATH=$GOPATH make -C ${TRAVIS_BUILD_DIR}/src/runtime test
|
||||
|
||||
script:
|
||||
- "ci/static-checks.sh"
|
||||
|
||||
jobs:
|
||||
include:
|
||||
- name: x86_64 test
|
||||
os: linux
|
||||
- name: ppc64le test
|
||||
os: linux-ppc64le
|
||||
install: skip
|
||||
allow_failures:
|
||||
- name: ppc64le test
|
||||
fast_finish: true
|
||||
23
Makefile
Normal file
23
Makefile
Normal file
@@ -0,0 +1,23 @@
|
||||
# Copyright (c) 2020 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
default: runtime agent
|
||||
|
||||
runtime:
|
||||
make -C src/runtime
|
||||
|
||||
agent:
|
||||
make -C src/agent
|
||||
|
||||
test-runtime:
|
||||
make -C src/runtime test
|
||||
|
||||
test-agent:
|
||||
make -C src/agent check
|
||||
|
||||
test: test-runtime test-agent
|
||||
|
||||
generate-protocols:
|
||||
make -C src/agent generate-protocols
|
||||
@@ -66,7 +66,7 @@ the first place to go if you want to use or contribute to the project.
|
||||
|
||||
##### Agent
|
||||
|
||||
The [`kata-agent`](https://github.com/kata-containers/agent) runs inside the
|
||||
The [`kata-agent`](src/agent/README.md) runs inside the
|
||||
virtual machine and sets up the container environment.
|
||||
|
||||
##### KSM throttler
|
||||
@@ -83,7 +83,7 @@ virtual machine.
|
||||
|
||||
##### Runtime
|
||||
|
||||
The [`kata-runtime`](https://github.com/kata-containers/runtime) is usually
|
||||
The [`kata-runtime`](src/runtime/README.md) is usually
|
||||
invoked by a container manager and provides high-level verbs to manage
|
||||
containers.
|
||||
|
||||
@@ -128,7 +128,7 @@ as the component it tests).
|
||||
|
||||
#### OS builder
|
||||
|
||||
The [osbuilder](https://github.com/kata-containers/osbuilder) tool can create
|
||||
The [osbuilder](tools/osbuilder/README.md) tool can create
|
||||
a rootfs and a "mini O/S" image. This image is used by the hypervisor to setup
|
||||
the environment before switching to the workload.
|
||||
|
||||
|
||||
11
ci/go-test.sh
Executable file
11
ci/go-test.sh
Executable file
@@ -0,0 +1,11 @@
|
||||
#
|
||||
# Copyright (c) 2020 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -e
|
||||
|
||||
cidir=$(dirname "$0")
|
||||
source "${cidir}/lib.sh"
|
||||
|
||||
run_go_test
|
||||
22
ci/install_go.sh
Executable file
22
ci/install_go.sh
Executable file
@@ -0,0 +1,22 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright (c) 2019 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -e
|
||||
|
||||
cidir=$(dirname "$0")
|
||||
source "${cidir}/lib.sh"
|
||||
|
||||
clone_tests_repo
|
||||
|
||||
new_goroot=/usr/local/go
|
||||
|
||||
pushd "${tests_repo_dir}"
|
||||
# Force overwrite the current version of golang
|
||||
[ -z "${GOROOT}" ] || rm -rf "${GOROOT}"
|
||||
.ci/install_go.sh -p -f -d "$(dirname ${new_goroot})"
|
||||
[ -z "${GOROOT}" ] || sudo ln -sf "${new_goroot}" "${GOROOT}"
|
||||
go version
|
||||
popd
|
||||
16
ci/install_rust.sh
Executable file
16
ci/install_rust.sh
Executable file
@@ -0,0 +1,16 @@
|
||||
#!/bin/bash
|
||||
# Copyright (c) 2019 Ant Financial
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
set -e
|
||||
|
||||
cidir=$(dirname "$0")
|
||||
source "${cidir}/lib.sh"
|
||||
|
||||
clone_tests_repo
|
||||
|
||||
pushd ${tests_repo_dir}
|
||||
.ci/install_rust.sh
|
||||
popd
|
||||
19
ci/install_vc.sh
Executable file
19
ci/install_vc.sh
Executable file
@@ -0,0 +1,19 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright (c) 2018 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -e
|
||||
|
||||
cidir=$(dirname "$0")
|
||||
vcdir="${cidir}/../src/runtime/virtcontainers/"
|
||||
source "${cidir}/lib.sh"
|
||||
export CI_JOB="${CI_JOB:-default}"
|
||||
|
||||
clone_tests_repo
|
||||
|
||||
if [ "${CI_JOB}" != "PODMAN" ]; then
|
||||
echo "Install virtcontainers"
|
||||
make -C "${vcdir}" && chronic sudo make -C "${vcdir}" install
|
||||
fi
|
||||
31
ci/lib.sh
Normal file
31
ci/lib.sh
Normal file
@@ -0,0 +1,31 @@
|
||||
#
|
||||
# Copyright (c) 2018 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
export tests_repo="${tests_repo:-github.com/kata-containers/tests}"
|
||||
export tests_repo_dir="$GOPATH/src/$tests_repo"
|
||||
|
||||
clone_tests_repo()
|
||||
{
|
||||
# KATA_CI_NO_NETWORK is (has to be) ignored if there is
|
||||
# no existing clone.
|
||||
if [ -d "$tests_repo_dir" -a -n "$KATA_CI_NO_NETWORK" ]
|
||||
then
|
||||
return
|
||||
fi
|
||||
|
||||
go get -d -u "$tests_repo" || true
|
||||
}
|
||||
|
||||
run_static_checks()
|
||||
{
|
||||
clone_tests_repo
|
||||
bash "$tests_repo_dir/.ci/static-checks.sh" "github.com/kata-containers/kata-containers"
|
||||
}
|
||||
|
||||
run_go_test()
|
||||
{
|
||||
clone_tests_repo
|
||||
bash "$tests_repo_dir/.ci/go-test.sh"
|
||||
}
|
||||
16
ci/run.sh
Executable file
16
ci/run.sh
Executable file
@@ -0,0 +1,16 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright (c) 2019 Ant Financial
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
set -e
|
||||
cidir=$(dirname "$0")
|
||||
source "${cidir}/lib.sh"
|
||||
|
||||
clone_tests_repo
|
||||
|
||||
pushd ${tests_repo_dir}
|
||||
.ci/run.sh
|
||||
popd
|
||||
16
ci/setup.sh
Executable file
16
ci/setup.sh
Executable file
@@ -0,0 +1,16 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright (c) 2018 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -e
|
||||
|
||||
cidir=$(dirname "$0")
|
||||
source "${cidir}/lib.sh"
|
||||
|
||||
clone_tests_repo
|
||||
|
||||
pushd "${tests_repo_dir}"
|
||||
.ci/setup.sh
|
||||
popd
|
||||
12
ci/static-checks.sh
Executable file
12
ci/static-checks.sh
Executable file
@@ -0,0 +1,12 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright (c) 2017-2018 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -e
|
||||
|
||||
cidir=$(dirname "$0")
|
||||
source "${cidir}/lib.sh"
|
||||
|
||||
run_static_checks
|
||||
15
src/agent/.cargo/config
Normal file
15
src/agent/.cargo/config
Normal file
@@ -0,0 +1,15 @@
|
||||
## Copyright (c) 2020 ARM Limited
|
||||
##
|
||||
## SPDX-License-Identifier: Apache-2.0
|
||||
##
|
||||
|
||||
[target.aarch64-unknown-linux-musl]
|
||||
## Only setting linker with `aarch64-linux-musl-gcc`, the
|
||||
## `rust-agent` could be totally statically linked.
|
||||
linker = "aarch64-linux-musl-gcc"
|
||||
|
||||
## The __addtf3, __subtf3 and __multf3 symbols are used by aarch64-musl,
|
||||
## but are not provided by rust compiler-builtins.
|
||||
## For now, the only functional workaround accepted by rust communities
|
||||
## is to get them from libgcc.
|
||||
rustflags = [ "-C", "link-arg=-lgcc" ]
|
||||
740
src/agent/Cargo.lock
generated
Normal file
740
src/agent/Cargo.lock
generated
Normal file
@@ -0,0 +1,740 @@
|
||||
# This file is automatically @generated by Cargo.
|
||||
# It is not intended for manual editing.
|
||||
[[package]]
|
||||
name = "addr2line"
|
||||
version = "0.12.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a49806b9dadc843c61e7c97e72490ad7f7220ae249012fbda9ad0609457c0543"
|
||||
dependencies = [
|
||||
"gimli",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "aho-corasick"
|
||||
version = "0.7.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8716408b8bc624ed7f65d223ddb9ac2d044c0547b6fa4b0d554f3a9540496ada"
|
||||
dependencies = [
|
||||
"memchr",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "arc-swap"
|
||||
version = "0.4.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b585a98a234c46fc563103e9278c9391fde1f4e6850334da895d27edb9580f62"
|
||||
|
||||
[[package]]
|
||||
name = "autocfg"
|
||||
version = "1.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d"
|
||||
|
||||
[[package]]
|
||||
name = "backtrace"
|
||||
version = "0.3.48"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0df2f85c8a2abbe3b7d7e748052fdd9b76a0458fdeb16ad4223f5eca78c7c130"
|
||||
dependencies = [
|
||||
"addr2line",
|
||||
"cfg-if",
|
||||
"libc",
|
||||
"object",
|
||||
"rustc-demangle",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "bitflags"
|
||||
version = "1.2.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693"
|
||||
|
||||
[[package]]
|
||||
name = "byteorder"
|
||||
version = "1.3.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de"
|
||||
|
||||
[[package]]
|
||||
name = "caps"
|
||||
version = "0.3.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bf6a638a1f7f409f1e545ff0036b8aa5541692c775dd36b48b75bbde50d83d1c"
|
||||
dependencies = [
|
||||
"errno",
|
||||
"error-chain",
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cc"
|
||||
version = "1.0.54"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7bbb73db36c1246e9034e307d0fba23f9a2e251faa47ade70c1bd252220c8311"
|
||||
|
||||
[[package]]
|
||||
name = "cfg-if"
|
||||
version = "0.1.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
|
||||
|
||||
[[package]]
|
||||
name = "chrono"
|
||||
version = "0.4.11"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "80094f509cf8b5ae86a4966a39b3ff66cd7e2a3e594accec3743ff3fabeab5b2"
|
||||
dependencies = [
|
||||
"num-integer",
|
||||
"num-traits",
|
||||
"time",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam-channel"
|
||||
version = "0.4.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cced8691919c02aac3cb0a1bc2e9b73d89e832bf9a06fc579d4e71b68a2da061"
|
||||
dependencies = [
|
||||
"crossbeam-utils",
|
||||
"maybe-uninit",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam-utils"
|
||||
version = "0.7.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
"cfg-if",
|
||||
"lazy_static",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "errno"
|
||||
version = "0.2.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b480f641ccf0faf324e20c1d3e53d81b7484c698b42ea677f6907ae4db195371"
|
||||
dependencies = [
|
||||
"errno-dragonfly",
|
||||
"libc",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "errno-dragonfly"
|
||||
version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "14ca354e36190500e1e1fb267c647932382b54053c50b14970856c0b00a35067"
|
||||
dependencies = [
|
||||
"gcc",
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "error-chain"
|
||||
version = "0.12.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d371106cc88ffdfb1eabd7111e432da544f16f3e2d7bf1dfe8bf575f1df045cd"
|
||||
dependencies = [
|
||||
"backtrace",
|
||||
"version_check",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "futures"
|
||||
version = "0.1.29"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1b980f2816d6ee8673b6517b52cb0e808a180efc92e5c19d02cdda79066703ef"
|
||||
|
||||
[[package]]
|
||||
name = "gcc"
|
||||
version = "0.3.55"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8f5f3913fa0bfe7ee1fd8248b6b9f42a5af4b9d65ec2dd2c3c26132b950ecfc2"
|
||||
|
||||
[[package]]
|
||||
name = "getrandom"
|
||||
version = "0.1.14"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7abc8dd8451921606d809ba32e95b6111925cd2906060d2dcc29c070220503eb"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"libc",
|
||||
"wasi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "gimli"
|
||||
version = "0.21.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bcc8e0c9bce37868955864dbecd2b1ab2bdf967e6f28066d65aaac620444b65c"
|
||||
|
||||
[[package]]
|
||||
name = "itoa"
|
||||
version = "0.4.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b8b7a7c0c47db5545ed3fef7468ee7bb5b74691498139e4b3f6a20685dc6dd8e"
|
||||
|
||||
[[package]]
|
||||
name = "kata-agent"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"error-chain",
|
||||
"lazy_static",
|
||||
"libc",
|
||||
"logging",
|
||||
"netlink",
|
||||
"nix 0.17.0",
|
||||
"oci",
|
||||
"prctl",
|
||||
"protobuf",
|
||||
"protocols",
|
||||
"regex",
|
||||
"rustjail",
|
||||
"scan_fmt",
|
||||
"scopeguard",
|
||||
"serde_json",
|
||||
"signal-hook",
|
||||
"slog",
|
||||
"slog-scope",
|
||||
"tempfile",
|
||||
"ttrpc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "lazy_static"
|
||||
version = "1.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
|
||||
|
||||
[[package]]
|
||||
name = "libc"
|
||||
version = "0.2.70"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3baa92041a6fec78c687fa0cc2b3fae8884f743d672cf551bed1d6dac6988d0f"
|
||||
|
||||
[[package]]
|
||||
name = "log"
|
||||
version = "0.4.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "14b6052be84e6b71ab17edffc2eeabf5c2c3ae1fdb464aae35ac50c67a44e1f7"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "logging"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"serde_json",
|
||||
"slog",
|
||||
"slog-async",
|
||||
"slog-json",
|
||||
"slog-scope",
|
||||
"tempfile",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "maybe-uninit"
|
||||
version = "2.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00"
|
||||
|
||||
[[package]]
|
||||
name = "memchr"
|
||||
version = "2.3.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3728d817d99e5ac407411fa471ff9800a778d88a24685968b36824eaf4bee400"
|
||||
|
||||
[[package]]
|
||||
name = "netlink"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"nix 0.17.0",
|
||||
"protobuf",
|
||||
"protocols",
|
||||
"rustjail",
|
||||
"scan_fmt",
|
||||
"slog",
|
||||
"slog-async",
|
||||
"slog-json",
|
||||
"slog-scope",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "nix"
|
||||
version = "0.16.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dd0eaf8df8bab402257e0a5c17a254e4cc1f72a93588a1ddfb5d356c801aa7cb"
|
||||
dependencies = [
|
||||
"bitflags",
|
||||
"cc",
|
||||
"cfg-if",
|
||||
"libc",
|
||||
"void",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "nix"
|
||||
version = "0.17.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "50e4785f2c3b7589a0d0c1dd60285e1188adac4006e8abd6dd578e1567027363"
|
||||
dependencies = [
|
||||
"bitflags",
|
||||
"cc",
|
||||
"cfg-if",
|
||||
"libc",
|
||||
"void",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "num-integer"
|
||||
version = "0.1.42"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3f6ea62e9d81a77cd3ee9a2a5b9b609447857f3d358704331e4ef39eb247fcba"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
"num-traits",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "num-traits"
|
||||
version = "0.2.11"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c62be47e61d1842b9170f0fdeec8eba98e60e90e5446449a0545e5152acd7096"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "object"
|
||||
version = "0.19.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9cbca9424c482ee628fa549d9c812e2cd22f1180b9222c9200fdfa6eb31aecb2"
|
||||
|
||||
[[package]]
|
||||
name = "oci"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"serde",
|
||||
"serde_derive",
|
||||
"serde_json",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "path-absolutize"
|
||||
version = "1.2.0"
|
||||
source = "git+git://github.com/magiclen/path-absolutize.git?tag=v1.2.0#a8fea23c20192218bdbb141a0940d13ffba7ec86"
|
||||
dependencies = [
|
||||
"path-dedot",
|
||||
"slash-formatter",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "path-dedot"
|
||||
version = "1.2.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "45c58ab1edb03f77d0bb3f08e4a179dd43ce9bc8eab9867ec53a78285ea3039b"
|
||||
dependencies = [
|
||||
"lazy_static",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ppv-lite86"
|
||||
version = "0.2.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "237a5ed80e274dbc66f86bd59c1e25edc039660be53194b5fe0a482e0f2612ea"
|
||||
|
||||
[[package]]
|
||||
name = "prctl"
|
||||
version = "1.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "059a34f111a9dee2ce1ac2826a68b24601c4298cfeb1a587c3cb493d5ab46f52"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"nix 0.17.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro2"
|
||||
version = "1.0.17"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1502d12e458c49a4c9cbff560d0fe0060c252bc29799ed94ca2ed4bb665a0101"
|
||||
dependencies = [
|
||||
"unicode-xid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "protobuf"
|
||||
version = "2.14.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8e86d370532557ae7573551a1ec8235a0f8d6cb276c7c9e6aa490b511c447485"
|
||||
|
||||
[[package]]
|
||||
name = "protobuf-codegen"
|
||||
version = "2.14.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "de113bba758ccf2c1ef816b127c958001b7831136c9bc3f8e9ec695ac4e82b0c"
|
||||
dependencies = [
|
||||
"protobuf",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "protobuf-codegen-pure"
|
||||
version = "2.14.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2d1a4febc73bf0cada1d77c459a0c8e5973179f1cfd5b0f1ab789d45b17b6440"
|
||||
dependencies = [
|
||||
"protobuf",
|
||||
"protobuf-codegen",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "protocols"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"futures",
|
||||
"protobuf",
|
||||
"ttrpc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "quote"
|
||||
version = "1.0.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "54a21852a652ad6f610c9510194f398ff6f8692e334fd1145fed931f7fbe44ea"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rand"
|
||||
version = "0.7.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03"
|
||||
dependencies = [
|
||||
"getrandom",
|
||||
"libc",
|
||||
"rand_chacha",
|
||||
"rand_core",
|
||||
"rand_hc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rand_chacha"
|
||||
version = "0.2.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402"
|
||||
dependencies = [
|
||||
"ppv-lite86",
|
||||
"rand_core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rand_core"
|
||||
version = "0.5.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19"
|
||||
dependencies = [
|
||||
"getrandom",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rand_hc"
|
||||
version = "0.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c"
|
||||
dependencies = [
|
||||
"rand_core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "redox_syscall"
|
||||
version = "0.1.56"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2439c63f3f6139d1b57529d16bc3b8bb855230c8efcc5d3a896c8bea7c3b1e84"
|
||||
|
||||
[[package]]
|
||||
name = "regex"
|
||||
version = "1.3.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a6020f034922e3194c711b82a627453881bc4682166cabb07134a10c26ba7692"
|
||||
dependencies = [
|
||||
"aho-corasick",
|
||||
"memchr",
|
||||
"regex-syntax",
|
||||
"thread_local",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "regex-syntax"
|
||||
version = "0.6.17"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7fe5bd57d1d7414c6b5ed48563a2c855d995ff777729dcd91c369ec7fea395ae"
|
||||
|
||||
[[package]]
|
||||
name = "remove_dir_all"
|
||||
version = "0.5.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4a83fa3702a688b9359eccba92d153ac33fd2e8462f9e0e3fdf155239ea7792e"
|
||||
dependencies = [
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rustc-demangle"
|
||||
version = "0.1.16"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4c691c0e608126e00913e33f0ccf3727d5fc84573623b8d65b2df340b5201783"
|
||||
|
||||
[[package]]
|
||||
name = "rustjail"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"caps",
|
||||
"error-chain",
|
||||
"lazy_static",
|
||||
"libc",
|
||||
"nix 0.17.0",
|
||||
"oci",
|
||||
"path-absolutize",
|
||||
"prctl",
|
||||
"protobuf",
|
||||
"protocols",
|
||||
"regex",
|
||||
"scan_fmt",
|
||||
"scopeguard",
|
||||
"serde",
|
||||
"serde_derive",
|
||||
"serde_json",
|
||||
"slog",
|
||||
"slog-scope",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ryu"
|
||||
version = "1.0.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ed3d612bc64430efeb3f7ee6ef26d590dce0c43249217bddc62112540c7941e1"
|
||||
|
||||
[[package]]
|
||||
name = "scan_fmt"
|
||||
version = "0.2.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "248286eec0f55678879ef1caec3d76276643ebcb5460d8cb6e732ef40f50aabe"
|
||||
dependencies = [
|
||||
"regex",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "scopeguard"
|
||||
version = "1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
|
||||
|
||||
[[package]]
|
||||
name = "serde"
|
||||
version = "1.0.110"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "99e7b308464d16b56eba9964e4972a3eee817760ab60d88c3f86e1fecb08204c"
|
||||
|
||||
[[package]]
|
||||
name = "serde_derive"
|
||||
version = "1.0.110"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "818fbf6bfa9a42d3bfcaca148547aa00c7b915bec71d1757aa2d44ca68771984"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_json"
|
||||
version = "1.0.53"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "993948e75b189211a9b31a7528f950c6adc21f9720b6438ff80a7fa2f864cea2"
|
||||
dependencies = [
|
||||
"itoa",
|
||||
"ryu",
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "signal-hook"
|
||||
version = "0.1.15"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8ff2db2112d6c761e12522c65f7768548bd6e8cd23d2a9dae162520626629bd6"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"signal-hook-registry",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "signal-hook-registry"
|
||||
version = "1.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "94f478ede9f64724c5d173d7bb56099ec3e2d9fc2774aac65d34b8b890405f41"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "slash-formatter"
|
||||
version = "2.2.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2f7fb98e76e2022054673f3ebc43a4e12890ec6272530629df6237cafbb70569"
|
||||
|
||||
[[package]]
|
||||
name = "slog"
|
||||
version = "2.5.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1cc9c640a4adbfbcc11ffb95efe5aa7af7309e002adab54b185507dbf2377b99"
|
||||
|
||||
[[package]]
|
||||
name = "slog-async"
|
||||
version = "2.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "51b3336ce47ce2f96673499fc07eb85e3472727b9a7a2959964b002c2ce8fbbb"
|
||||
dependencies = [
|
||||
"crossbeam-channel",
|
||||
"slog",
|
||||
"take_mut",
|
||||
"thread_local",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "slog-json"
|
||||
version = "2.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ddc0d2aff1f8f325ef660d9a0eb6e6dcd20b30b3f581a5897f58bf42d061c37a"
|
||||
dependencies = [
|
||||
"chrono",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"slog",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "slog-scope"
|
||||
version = "4.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7c44c89dd8b0ae4537d1ae318353eaf7840b4869c536e31c41e963d1ea523ee6"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"lazy_static",
|
||||
"slog",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "syn"
|
||||
version = "1.0.25"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f14a640819f79b72a710c0be059dce779f9339ae046c8bef12c361d56702146f"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"unicode-xid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "take_mut"
|
||||
version = "0.2.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f764005d11ee5f36500a149ace24e00e3da98b0158b3e2d53a7495660d3f4d60"
|
||||
|
||||
[[package]]
|
||||
name = "tempfile"
|
||||
version = "3.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7a6e24d9338a0a5be79593e2fa15a648add6138caa803e2d5bc782c371732ca9"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"libc",
|
||||
"rand",
|
||||
"redox_syscall",
|
||||
"remove_dir_all",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "thread_local"
|
||||
version = "1.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d40c6d1b69745a6ec6fb1ca717914848da4b44ae29d9b3080cbee91d72a69b14"
|
||||
dependencies = [
|
||||
"lazy_static",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "time"
|
||||
version = "0.1.43"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ttrpc"
|
||||
version = "0.3.0"
|
||||
source = "git+https://github.com/containerd/ttrpc-rust.git?branch=0.3.0#ba1efe3bbb8f8af4895b7623ed1d11561e70e566"
|
||||
dependencies = [
|
||||
"byteorder",
|
||||
"libc",
|
||||
"log",
|
||||
"nix 0.16.1",
|
||||
"protobuf",
|
||||
"protobuf-codegen-pure",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "unicode-xid"
|
||||
version = "0.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "826e7639553986605ec5979c7dd957c7895e93eabed50ab2ffa7f6128a75097c"
|
||||
|
||||
[[package]]
|
||||
name = "version_check"
|
||||
version = "0.9.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b5a972e5669d67ba988ce3dc826706fb0a8b01471c088cb0b6110b805cc36aed"
|
||||
|
||||
[[package]]
|
||||
name = "void"
|
||||
version = "1.0.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d"
|
||||
|
||||
[[package]]
|
||||
name = "wasi"
|
||||
version = "0.9.0+wasi-snapshot-preview1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519"
|
||||
|
||||
[[package]]
|
||||
name = "winapi"
|
||||
version = "0.3.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8093091eeb260906a183e6ae1abdba2ef5ef2257a21801128899c3fc699229c6"
|
||||
dependencies = [
|
||||
"winapi-i686-pc-windows-gnu",
|
||||
"winapi-x86_64-pc-windows-gnu",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "winapi-i686-pc-windows-gnu"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
|
||||
|
||||
[[package]]
|
||||
name = "winapi-x86_64-pc-windows-gnu"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
|
||||
41
src/agent/Cargo.toml
Normal file
41
src/agent/Cargo.toml
Normal file
@@ -0,0 +1,41 @@
|
||||
[package]
|
||||
name = "kata-agent"
|
||||
version = "0.1.0"
|
||||
authors = ["Yang Bo <bo@hyper.sh>"]
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
oci = { path = "oci" }
|
||||
logging = { path = "logging" }
|
||||
rustjail = { path = "rustjail" }
|
||||
protocols = { path = "protocols" }
|
||||
netlink = { path = "netlink" }
|
||||
lazy_static = "1.3.0"
|
||||
error-chain = "0.12.1"
|
||||
ttrpc = { git = "https://github.com/containerd/ttrpc-rust.git", branch="0.3.0" }
|
||||
protobuf = "=2.14.0"
|
||||
libc = "0.2.58"
|
||||
nix = "0.17.0"
|
||||
prctl = "1.0.0"
|
||||
serde_json = "1.0.39"
|
||||
signal-hook = "0.1.9"
|
||||
scan_fmt = "0.2.3"
|
||||
scopeguard = "1.0.0"
|
||||
regex = "1"
|
||||
# slog:
|
||||
# - Dynamic keys required to allow HashMap keys to be slog::Serialized.
|
||||
# - The 'max_*' features allow changing the log level at runtime
|
||||
# (by stopping the compiler from removing log calls).
|
||||
slog = { version = "2.5.2", features = ["dynamic-keys", "max_level_trace", "release_max_level_info"] }
|
||||
slog-scope = "4.1.2"
|
||||
# for testing
|
||||
tempfile = "3.1.0"
|
||||
|
||||
[workspace]
|
||||
members = [
|
||||
"logging",
|
||||
"netlink",
|
||||
"oci",
|
||||
"protocols",
|
||||
"rustjail",
|
||||
]
|
||||
202
src/agent/LICENSE
Normal file
202
src/agent/LICENSE
Normal file
@@ -0,0 +1,202 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
132
src/agent/Makefile
Normal file
132
src/agent/Makefile
Normal file
@@ -0,0 +1,132 @@
|
||||
# Copyright (c) 2019 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
PROJECT_NAME = Kata Containers
|
||||
PROJECT_URL = https://github.com/kata-containers
|
||||
PROJECT_COMPONENT = kata-agent
|
||||
|
||||
TARGET = $(PROJECT_COMPONENT)
|
||||
|
||||
SOURCES := \
|
||||
$(shell find . 2>&1 | grep -E '.*\.rs$$') \
|
||||
Cargo.toml
|
||||
|
||||
VERSION_FILE := ./VERSION
|
||||
VERSION := $(shell grep -v ^\# $(VERSION_FILE))
|
||||
COMMIT_NO := $(shell git rev-parse HEAD 2>/dev/null || true)
|
||||
COMMIT_NO_SHORT := $(shell git rev-parse --short HEAD 2>/dev/null || true)
|
||||
COMMIT := $(if $(shell git status --porcelain --untracked-files=no 2>/dev/null || true),${COMMIT_NO}-dirty,${COMMIT_NO})
|
||||
COMMIT_MSG = $(if $(COMMIT),$(COMMIT),unknown)
|
||||
|
||||
# Exported to allow cargo to see it
|
||||
export VERSION_COMMIT := $(if $(COMMIT),$(VERSION)-$(COMMIT),$(VERSION))
|
||||
|
||||
BUILD_TYPE = release
|
||||
|
||||
# set proto file to generate
|
||||
ifdef proto
|
||||
PROTO_FILE=${proto}
|
||||
endif
|
||||
|
||||
ARCH = $(shell uname -m)
|
||||
LIBC = musl
|
||||
TRIPLE = $(ARCH)-unknown-linux-$(LIBC)
|
||||
|
||||
TARGET_PATH = target/$(TRIPLE)/$(BUILD_TYPE)/$(TARGET)
|
||||
|
||||
DESTDIR :=
|
||||
BINDIR := /usr/bin
|
||||
|
||||
# Define if agent will be installed as init
|
||||
INIT := no
|
||||
|
||||
# Path to systemd unit directory if installed as not init.
|
||||
UNIT_DIR := /usr/lib/systemd/system
|
||||
|
||||
GENERATED_FILES :=
|
||||
|
||||
ifeq ($(INIT),no)
|
||||
# Unit file to start kata agent in systemd systems
|
||||
UNIT_FILES = kata-agent.service
|
||||
GENERATED_FILES := $(UNIT_FILES)
|
||||
# Target to be reached in systemd services
|
||||
UNIT_FILES += kata-containers.target
|
||||
endif
|
||||
|
||||
# Display name of command and it's version (or a message if not available).
|
||||
#
|
||||
# Arguments:
|
||||
#
|
||||
# 1: Name of command
|
||||
define get_command_version
|
||||
$(shell printf "%s: %s\\n" $(1) "$(or $(shell $(1) --version 2>/dev/null), (not available))")
|
||||
endef
|
||||
|
||||
define get_toolchain_version
|
||||
$(shell printf "%s: %s\\n" "toolchain" "$(or $(shell rustup show active-toolchain 2>/dev/null), (unknown))")
|
||||
endef
|
||||
|
||||
define INSTALL_FILE
|
||||
install -D -m 644 $1 $(DESTDIR)$2/$1 || exit 1;
|
||||
endef
|
||||
|
||||
default: $(TARGET) show-header
|
||||
|
||||
$(TARGET): $(TARGET_PATH)
|
||||
|
||||
$(TARGET_PATH): $(SOURCES) | show-summary
|
||||
@cargo build --target $(TRIPLE) --$(BUILD_TYPE)
|
||||
|
||||
show-header:
|
||||
@printf "%s - version %s (commit %s)\n\n" "$(TARGET)" "$(VERSION)" "$(COMMIT_MSG)"
|
||||
|
||||
$(GENERATED_FILES): %: %.in
|
||||
@sed \
|
||||
-e 's|[@]bindir[@]|$(BINDIR)|g' \
|
||||
-e 's|[@]kata-agent[@]|$(TARGET)|g' \
|
||||
"$<" > "$@"
|
||||
|
||||
install: build-service
|
||||
@install -D $(TARGET_PATH) $(DESTDIR)/$(BINDIR)/$(TARGET)
|
||||
|
||||
clean:
|
||||
@cargo clean
|
||||
|
||||
check:
|
||||
@cargo test --all --target $(TRIPLE)
|
||||
|
||||
run:
|
||||
@cargo run --target $(TRIPLE)
|
||||
|
||||
build-service: $(GENERATED_FILES)
|
||||
ifeq ($(INIT),no)
|
||||
@echo "Installing systemd unit files..."
|
||||
$(foreach f,$(UNIT_FILES),$(call INSTALL_FILE,$f,$(UNIT_DIR)))
|
||||
endif
|
||||
|
||||
show-summary: show-header
|
||||
@printf "project:\n"
|
||||
@printf " name: $(PROJECT_NAME)\n"
|
||||
@printf " url: $(PROJECT_URL)\n"
|
||||
@printf " component: $(PROJECT_COMPONENT)\n"
|
||||
@printf "target: $(TARGET)\n"
|
||||
@printf "architecture:\n"
|
||||
@printf " host: $(ARCH)\n"
|
||||
@printf "rust:\n"
|
||||
@printf " %s\n" "$(call get_command_version,cargo)"
|
||||
@printf " %s\n" "$(call get_command_version,rustc)"
|
||||
@printf " %s\n" "$(call get_command_version,rustup)"
|
||||
@printf " %s\n" "$(call get_toolchain_version)"
|
||||
@printf "\n"
|
||||
|
||||
help: show-summary
|
||||
|
||||
.PHONY: \
|
||||
help \
|
||||
show-header \
|
||||
show-summary
|
||||
|
||||
generate-protocols:
|
||||
protocols/hack/update-generated-proto.sh "${PROTO_FILE}"
|
||||
67
src/agent/README.md
Normal file
67
src/agent/README.md
Normal file
@@ -0,0 +1,67 @@
|
||||
# Kata Agent in Rust
|
||||
|
||||
This is a rust version of the [`kata-agent`](https://github.com/kata-containers/kata-agent).
|
||||
|
||||
In Denver PTG, [we discussed about re-writing agent in rust](https://etherpad.openstack.org/p/katacontainers-2019-ptg-denver-agenda):
|
||||
|
||||
> In general, we all think about re-write agent in rust to reduce the footprint of agent. Moreover, Eric mentioned the possibility to stop using gRPC, which may have some impact on footprint. We may begin to do some PoC to show how much we could save by re-writing agent in rust.
|
||||
|
||||
After that, we drafted the initial code here, and any contributions are welcome.
|
||||
|
||||
## Features
|
||||
|
||||
| Feature | Status |
|
||||
| :--|:--:|
|
||||
| **OCI Behaviors** |
|
||||
| create/start containers | :white_check_mark: |
|
||||
| signal/wait process | :white_check_mark: |
|
||||
| exec/list process | :white_check_mark: |
|
||||
| I/O stream | :white_check_mark: |
|
||||
| Cgroups | :white_check_mark: |
|
||||
| Capabilities, rlimit, readonly path, masked path, users | :white_check_mark: |
|
||||
| container stats (`stats_container`) | :white_check_mark: |
|
||||
| Hooks | :white_check_mark: |
|
||||
| **Agent Features & APIs** |
|
||||
| run agent as `init` (mount fs, udev, setup `lo`) | :white_check_mark: |
|
||||
| block device as root device | :white_check_mark: |
|
||||
| Health API | :white_check_mark: |
|
||||
| network, interface/routes (`update_container`) | :white_check_mark: |
|
||||
| File transfer API (`copy_file`) | :white_check_mark: |
|
||||
| Device APIs (`reseed_random_device`, , `online_cpu_memory`, `mem_hotplug_probe`, `set_guet_data_time`) | :white_check_mark: |
|
||||
| vsock support | :white_check_mark: |
|
||||
| virtio-serial support | :heavy_multiplication_x: |
|
||||
| OCI Spec validator | :white_check_mark: |
|
||||
| **Infrastructures**|
|
||||
| Debug Console | :white_check_mark: |
|
||||
| Command line | :white_check_mark: |
|
||||
| Tracing | :heavy_multiplication_x: |
|
||||
|
||||
## Getting Started
|
||||
|
||||
### Dependencies
|
||||
The `rust-agent` depends on [`grpc-rs`](https://github.com/pingcap/grpc-rs) by PingCAP. However, the upstream `grpc-rs` and [gRPC](https://github.com/grpc/grpc) need some changes to be used here, which may take some time to be landed. Therefore, we created a temporary fork or `grpc-rs` here:
|
||||
- https://github.com/alipay/grpc-rs/tree/rust_agent
|
||||
|
||||
### Build from Source
|
||||
The rust-agent need to be built with rust nightly, and static linked with musl.
|
||||
```bash
|
||||
rustup target add x86_64-unknown-linux-musl
|
||||
git submodule update --init --recursive
|
||||
sudo ln -s /usr/bin/g++ /bin/musl-g++
|
||||
cargo build --target x86_64-unknown-linux-musl --release
|
||||
```
|
||||
|
||||
## Run Kata CI with rust-agent
|
||||
* Firstly, install kata as noted by ["how to install Kata"](https://github.com/kata-containers/documentation/blob/master/install/README.md)
|
||||
* Secondly, build your own kata initrd/image following the steps in ["how to build your own initrd/image"](https://github.com/kata-containers/documentation/blob/master/Developer-Guide.md#create-and-install-rootfs-and-initrd-image).
|
||||
notes: Please use your rust agent instead of the go agent when building your initrd/image.
|
||||
* Clone the kata ci test cases from: https://github.com/kata-containers/tests.git, and then run the cri test with:
|
||||
|
||||
```bash
|
||||
$sudo -E PATH=$PATH -E GOPATH=$GOPATH integration/containerd/shimv2/shimv2-tests.sh
|
||||
```
|
||||
|
||||
## Mini Benchmark
|
||||
The memory of 'RssAnon' consumed by the go-agent and rust-agent as below:
|
||||
go-agent: about 11M
|
||||
rust-agent: about 1.1M
|
||||
1
src/agent/VERSION
Normal file
1
src/agent/VERSION
Normal file
@@ -0,0 +1 @@
|
||||
0.0.1
|
||||
22
src/agent/kata-agent.service.in
Normal file
22
src/agent/kata-agent.service.in
Normal file
@@ -0,0 +1,22 @@
|
||||
#
|
||||
# Copyright (c) 2018-2019 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
[Unit]
|
||||
Description=Kata Containers Agent
|
||||
Documentation=https://github.com/kata-containers/kata-containers
|
||||
Wants=kata-containers.target
|
||||
|
||||
[Service]
|
||||
# Send agent output to tty to allow capture debug logs
|
||||
# from a VM vsock port
|
||||
StandardOutput=tty
|
||||
Type=simple
|
||||
ExecStart=@bindir@/@kata-agent@
|
||||
LimitNOFILE=infinity
|
||||
# ExecStop is required for static agent tracing; in all other scenarios
|
||||
# the runtime handles shutting down the VM.
|
||||
ExecStop=/bin/sync ; /usr/bin/systemctl --force poweroff
|
||||
FailureAction=poweroff
|
||||
15
src/agent/kata-containers.target
Normal file
15
src/agent/kata-containers.target
Normal file
@@ -0,0 +1,15 @@
|
||||
#
|
||||
# Copyright (c) 2018-2019 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
[Unit]
|
||||
Description=Kata Containers Agent Target
|
||||
Requires=basic.target
|
||||
Requires=tmp.mount
|
||||
Wants=chronyd.service
|
||||
Requires=kata-agent.service
|
||||
Conflicts=rescue.service rescue.target
|
||||
After=basic.target rescue.service rescue.target
|
||||
AllowIsolate=yes
|
||||
20
src/agent/logging/Cargo.toml
Normal file
20
src/agent/logging/Cargo.toml
Normal file
@@ -0,0 +1,20 @@
|
||||
[package]
|
||||
name = "logging"
|
||||
version = "0.1.0"
|
||||
authors = ["Tim Zhang <tim@hyper.sh>"]
|
||||
edition = "2018"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
serde_json = "1.0.39"
|
||||
# slog:
|
||||
# - Dynamic keys required to allow HashMap keys to be slog::Serialized.
|
||||
# - The 'max_*' features allow changing the log level at runtime
|
||||
# (by stopping the compiler from removing log calls).
|
||||
slog = { version = "2.5.2", features = ["dynamic-keys", "max_level_trace", "release_max_level_info"] }
|
||||
slog-json = "2.3.0"
|
||||
slog-async = "2.3.0"
|
||||
slog-scope = "4.1.2"
|
||||
# for testing
|
||||
tempfile = "3.1.0"
|
||||
252
src/agent/logging/src/lib.rs
Normal file
252
src/agent/logging/src/lib.rs
Normal file
@@ -0,0 +1,252 @@
|
||||
// Copyright (c) 2019 Intel Corporation
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
#[macro_use]
|
||||
extern crate slog;
|
||||
|
||||
use slog::{BorrowedKV, Drain, Key, OwnedKV, OwnedKVList, Record, KV};
|
||||
use std::collections::HashMap;
|
||||
use std::io;
|
||||
use std::io::Write;
|
||||
use std::process;
|
||||
use std::result;
|
||||
use std::sync::Mutex;
|
||||
|
||||
// XXX: 'writer' param used to make testing possible.
|
||||
pub fn create_logger<W>(name: &str, source: &str, level: slog::Level, writer: W) -> slog::Logger
|
||||
where
|
||||
W: Write + Send + Sync + 'static,
|
||||
{
|
||||
let json_drain = slog_json::Json::new(writer)
|
||||
.add_default_keys()
|
||||
.build()
|
||||
.fuse();
|
||||
|
||||
// Ensure only a unique set of key/value fields is logged
|
||||
let unique_drain = UniqueDrain::new(json_drain).fuse();
|
||||
|
||||
// Allow runtime filtering of records by log level
|
||||
let filter_drain = RuntimeLevelFilter::new(unique_drain, level).fuse();
|
||||
|
||||
// Ensure the logger is thread-safe
|
||||
let async_drain = slog_async::Async::new(filter_drain).build().fuse();
|
||||
|
||||
// Add some "standard" fields
|
||||
slog::Logger::root(
|
||||
async_drain.fuse(),
|
||||
o!("version" => env!("CARGO_PKG_VERSION"),
|
||||
"subsystem" => "root",
|
||||
"pid" => process::id().to_string(),
|
||||
"name" => name.to_string(),
|
||||
"source" => source.to_string()),
|
||||
)
|
||||
}
|
||||
|
||||
// Used to convert an slog::OwnedKVList into a hash map.
|
||||
struct HashSerializer {
|
||||
fields: HashMap<String, String>,
|
||||
}
|
||||
|
||||
impl HashSerializer {
|
||||
fn new() -> HashSerializer {
|
||||
HashSerializer {
|
||||
fields: HashMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
fn add_field(&mut self, key: String, value: String) {
|
||||
// Take care to only add the first instance of a key. This matters for loggers (but not
|
||||
// Records) since a child loggers have parents and the loggers are serialised child first
|
||||
// meaning the *newest* fields are serialised first.
|
||||
if !self.fields.contains_key(&key) {
|
||||
self.fields.insert(key, value);
|
||||
}
|
||||
}
|
||||
|
||||
fn remove_field(&mut self, key: &str) {
|
||||
self.fields.remove(key);
|
||||
}
|
||||
}
|
||||
|
||||
impl KV for HashSerializer {
|
||||
fn serialize(&self, _record: &Record, serializer: &mut dyn slog::Serializer) -> slog::Result {
|
||||
for (key, value) in self.fields.iter() {
|
||||
serializer.emit_str(Key::from(key.to_string()), value)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl slog::Serializer for HashSerializer {
|
||||
fn emit_arguments(&mut self, key: Key, value: &std::fmt::Arguments) -> slog::Result {
|
||||
self.add_field(format!("{}", key), format!("{}", value));
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct UniqueDrain<D> {
|
||||
drain: D,
|
||||
}
|
||||
|
||||
impl<D> UniqueDrain<D> {
|
||||
fn new(drain: D) -> Self {
|
||||
UniqueDrain { drain }
|
||||
}
|
||||
}
|
||||
|
||||
impl<D> Drain for UniqueDrain<D>
|
||||
where
|
||||
D: Drain,
|
||||
{
|
||||
type Ok = ();
|
||||
type Err = io::Error;
|
||||
|
||||
fn log(&self, record: &Record, values: &OwnedKVList) -> Result<Self::Ok, Self::Err> {
|
||||
let mut logger_serializer = HashSerializer::new();
|
||||
values.serialize(record, &mut logger_serializer)?;
|
||||
|
||||
let mut record_serializer = HashSerializer::new();
|
||||
record.kv().serialize(record, &mut record_serializer)?;
|
||||
|
||||
for (key, _) in record_serializer.fields.iter() {
|
||||
logger_serializer.remove_field(key);
|
||||
}
|
||||
|
||||
let record_owned_kv = OwnedKV(record_serializer);
|
||||
let record_static = record_static!(record.level(), "");
|
||||
let new_record = Record::new(&record_static, record.msg(), BorrowedKV(&record_owned_kv));
|
||||
|
||||
let logger_owned_kv = OwnedKV(logger_serializer);
|
||||
|
||||
let result = self
|
||||
.drain
|
||||
.log(&new_record, &OwnedKVList::from(logger_owned_kv));
|
||||
|
||||
match result {
|
||||
Ok(_t) => Ok(()),
|
||||
Err(_e) => Err(std::io::Error::new(
|
||||
std::io::ErrorKind::Other,
|
||||
"failed to drain log".to_string(),
|
||||
)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// A RuntimeLevelFilter will discard all log records whose log level is less than the level
|
||||
// specified in the struct.
|
||||
struct RuntimeLevelFilter<D> {
|
||||
drain: D,
|
||||
level: Mutex<slog::Level>,
|
||||
}
|
||||
|
||||
impl<D> RuntimeLevelFilter<D> {
|
||||
fn new(drain: D, level: slog::Level) -> Self {
|
||||
RuntimeLevelFilter {
|
||||
drain,
|
||||
level: Mutex::new(level),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<D> Drain for RuntimeLevelFilter<D>
|
||||
where
|
||||
D: Drain,
|
||||
{
|
||||
type Ok = Option<D::Ok>;
|
||||
type Err = Option<D::Err>;
|
||||
|
||||
fn log(
|
||||
&self,
|
||||
record: &slog::Record,
|
||||
values: &slog::OwnedKVList,
|
||||
) -> result::Result<Self::Ok, Self::Err> {
|
||||
let log_level = self.level.lock().unwrap();
|
||||
|
||||
if record.level().is_at_least(*log_level) {
|
||||
self.drain.log(record, values)?;
|
||||
}
|
||||
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use serde_json::Value;
|
||||
use std::io::prelude::*;
|
||||
use tempfile::NamedTempFile;
|
||||
|
||||
#[test]
|
||||
fn test_create_logger_write_to_tmpfile() {
|
||||
// Create a writer for the logger drain to use
|
||||
let writer = NamedTempFile::new().expect("failed to create tempfile");
|
||||
|
||||
// Used to check file contents before the temp file is unlinked
|
||||
let mut writer_ref = writer.reopen().expect("failed to clone tempfile");
|
||||
|
||||
let level = slog::Level::Trace;
|
||||
let name = "name";
|
||||
let source = "source";
|
||||
let record_subsystem = "record-subsystem";
|
||||
|
||||
let record_key = "record-key-1";
|
||||
let record_value = "record-key-2";
|
||||
|
||||
let logger = create_logger(name, source, level, writer);
|
||||
|
||||
let msg = "foo, bar, baz";
|
||||
|
||||
// Call the logger (which calls the drain)
|
||||
info!(logger, "{}", msg; "subsystem" => record_subsystem, record_key => record_value);
|
||||
|
||||
// Force temp file to be flushed
|
||||
drop(logger);
|
||||
|
||||
let mut contents = String::new();
|
||||
writer_ref
|
||||
.read_to_string(&mut contents)
|
||||
.expect("failed to read tempfile contents");
|
||||
|
||||
// Convert file to JSON
|
||||
let fields: Value =
|
||||
serde_json::from_str(&contents).expect("failed to convert logfile to json");
|
||||
|
||||
// Check the expected JSON fields
|
||||
|
||||
let field_ts = fields.get("ts").expect("failed to find timestamp field");
|
||||
assert_ne!(field_ts, "");
|
||||
|
||||
let field_version = fields.get("version").expect("failed to find version field");
|
||||
assert_eq!(field_version, env!("CARGO_PKG_VERSION"));
|
||||
|
||||
let field_pid = fields.get("pid").expect("failed to find pid field");
|
||||
assert_ne!(field_pid, "");
|
||||
|
||||
let field_level = fields.get("level").expect("failed to find level field");
|
||||
assert_eq!(field_level, "INFO");
|
||||
|
||||
let field_msg = fields.get("msg").expect("failed to find msg field");
|
||||
assert_eq!(field_msg, msg);
|
||||
|
||||
let field_name = fields.get("name").expect("failed to find name field");
|
||||
assert_eq!(field_name, name);
|
||||
|
||||
let field_source = fields.get("source").expect("failed to find source field");
|
||||
assert_eq!(field_source, source);
|
||||
|
||||
let field_subsystem = fields
|
||||
.get("subsystem")
|
||||
.expect("failed to find subsystem field");
|
||||
|
||||
// The records field should take priority over the loggers field of the same name
|
||||
assert_eq!(field_subsystem, record_subsystem);
|
||||
|
||||
let field_record_value = fields
|
||||
.get(record_key)
|
||||
.expect("failed to find record key field");
|
||||
assert_eq!(field_record_value, record_value);
|
||||
}
|
||||
}
|
||||
19
src/agent/netlink/Cargo.toml
Normal file
19
src/agent/netlink/Cargo.toml
Normal file
@@ -0,0 +1,19 @@
|
||||
[package]
|
||||
name = "netlink"
|
||||
version = "0.1.0"
|
||||
authors = ["Yang Bo <yb203166@antfin.com>"]
|
||||
edition = "2018"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
libc = "0.2.58"
|
||||
nix = "0.17.0"
|
||||
protobuf = "=2.14.0"
|
||||
rustjail = { path = "../rustjail" }
|
||||
protocols = { path = "../protocols" }
|
||||
slog = { version = "2.5.2", features = ["dynamic-keys", "max_level_trace", "release_max_level_info"] }
|
||||
slog-json = "2.3.0"
|
||||
slog-async = "2.3.0"
|
||||
slog-scope = "4.1.2"
|
||||
scan_fmt = "0.2.3"
|
||||
2868
src/agent/netlink/src/lib.rs
Normal file
2868
src/agent/netlink/src/lib.rs
Normal file
File diff suppressed because it is too large
Load Diff
251
src/agent/netlink/src/neigh.rs
Normal file
251
src/agent/netlink/src/neigh.rs
Normal file
@@ -0,0 +1,251 @@
|
||||
// Copyright (c) 2020 Ant Financial
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use crate::{
|
||||
__s32, __u16, __u8, addattr_var, ifinfomsg, nlmsghdr, parse_ipaddr, IFA_F_PERMANENT,
|
||||
NLMSG_ALIGNTO, NLM_F_CREATE, NLM_F_EXCL, NLM_F_REQUEST, RTM_NEWNEIGH,
|
||||
};
|
||||
use crate::{NLMSG_ALIGN, NLMSG_DATA, NLMSG_HDRLEN, NLMSG_LENGTH};
|
||||
use protocols::types::ARPNeighbor;
|
||||
use rustjail::errors::*;
|
||||
use std::mem;
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Copy)]
|
||||
pub struct ndmsg {
|
||||
ndm_family: __u8,
|
||||
ndm_pad1: __u8,
|
||||
ndm_pad: __u16,
|
||||
ndm_ifindex: __s32,
|
||||
ndm_state: __u16,
|
||||
ndm_flags: __u8,
|
||||
ndm_type: __u8,
|
||||
}
|
||||
|
||||
pub const NDA_UNSPEC: __u16 = 0;
|
||||
pub const NDA_DST: __u16 = 1;
|
||||
pub const NDA_LLADDR: __u16 = 2;
|
||||
pub const NDA_CACHEINFO: __u16 = 3;
|
||||
pub const NDA_PROBES: __u16 = 4;
|
||||
pub const NDA_VLAN: __u16 = 5;
|
||||
pub const NDA_PORT: __u16 = 6;
|
||||
pub const NDA_VNI: __u16 = 7;
|
||||
pub const NDA_IFINDEX: __u16 = 8;
|
||||
pub const NDA_MASTER: __u16 = 9;
|
||||
pub const NDA_LINK_NETNSID: __u16 = 10;
|
||||
pub const NDA_SRC_VNI: __u16 = 11;
|
||||
pub const __NDA_MAX: __u16 = 12;
|
||||
|
||||
impl Clone for ndmsg {
|
||||
fn clone(&self) -> Self {
|
||||
Self { ..*self }
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for ndmsg {
|
||||
fn default() -> Self {
|
||||
unsafe { mem::zeroed::<Self>() }
|
||||
}
|
||||
}
|
||||
|
||||
impl crate::RtnlHandle {
|
||||
pub fn add_arp_neighbors(&mut self, neighs: &Vec<ARPNeighbor>) -> Result<()> {
|
||||
for neigh in neighs {
|
||||
self.add_one_arp_neighbor(&neigh)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
pub fn add_one_arp_neighbor(&mut self, neigh: &ARPNeighbor) -> Result<()> {
|
||||
let dev: ifinfomsg;
|
||||
|
||||
match self.find_link_by_name(&neigh.device) {
|
||||
Ok(d) => dev = d,
|
||||
Err(e) => {
|
||||
return Err(ErrorKind::ErrorCode(format!(
|
||||
"Could not find link from device {}: {}",
|
||||
neigh.device, e
|
||||
))
|
||||
.into());
|
||||
}
|
||||
}
|
||||
|
||||
if neigh.toIPAddress.is_none() {
|
||||
return Err(ErrorKind::ErrorCode("toIPAddress is required".to_string()).into());
|
||||
}
|
||||
|
||||
let to_ip = &neigh.toIPAddress.as_ref().unwrap().address;
|
||||
if to_ip.is_empty() {
|
||||
return Err(ErrorKind::ErrorCode("toIPAddress.address is required".to_string()).into());
|
||||
}
|
||||
|
||||
let mut v: Vec<u8> = vec![0; 2048];
|
||||
unsafe {
|
||||
// init
|
||||
let mut nlh: *mut nlmsghdr = v.as_mut_ptr() as *mut nlmsghdr;
|
||||
let mut ndm: *mut ndmsg = NLMSG_DATA!(nlh) as *mut ndmsg;
|
||||
|
||||
(*nlh).nlmsg_len = NLMSG_LENGTH!(mem::size_of::<ndmsg>()) as u32;
|
||||
(*nlh).nlmsg_type = RTM_NEWNEIGH;
|
||||
(*nlh).nlmsg_flags = NLM_F_REQUEST | NLM_F_CREATE | NLM_F_EXCL;
|
||||
|
||||
self.seq += 1;
|
||||
self.dump = self.seq;
|
||||
(*nlh).nlmsg_seq = self.seq;
|
||||
|
||||
(*ndm).ndm_family = libc::AF_UNSPEC as __u8;
|
||||
(*ndm).ndm_state = IFA_F_PERMANENT as __u16;
|
||||
|
||||
// process lladdr
|
||||
if neigh.lladdr != "" {
|
||||
let llabuf = parse_mac(&neigh.lladdr)?;
|
||||
|
||||
addattr_var(nlh, NDA_LLADDR, llabuf.as_ptr() as *const u8, llabuf.len());
|
||||
}
|
||||
|
||||
// process destination
|
||||
let (family, ip_data) = parse_addr(&to_ip)?;
|
||||
(*ndm).ndm_family = family;
|
||||
addattr_var(nlh, NDA_DST, ip_data.as_ptr() as *const u8, ip_data.len());
|
||||
|
||||
// process state
|
||||
if neigh.state != 0 {
|
||||
(*ndm).ndm_state = neigh.state as __u16;
|
||||
}
|
||||
|
||||
// process flags
|
||||
(*ndm).ndm_flags = (*ndm).ndm_flags | neigh.flags as __u8;
|
||||
|
||||
// process dev
|
||||
(*ndm).ndm_ifindex = dev.ifi_index;
|
||||
|
||||
// send
|
||||
self.rtnl_talk(v.as_mut_slice(), false)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_mac(hwaddr: &str) -> Result<Vec<u8>> {
|
||||
let mut hw: Vec<u8> = vec![0; 6];
|
||||
|
||||
let (hw0, hw1, hw2, hw3, hw4, hw5) = scan_fmt!(hwaddr, "{x}:{x}:{x}:{x}:{x}:{x}",
|
||||
[hex u8], [hex u8], [hex u8], [hex u8], [hex u8],
|
||||
[hex u8])?;
|
||||
|
||||
hw[0] = hw0;
|
||||
hw[1] = hw1;
|
||||
hw[2] = hw2;
|
||||
hw[3] = hw3;
|
||||
hw[4] = hw4;
|
||||
hw[5] = hw5;
|
||||
|
||||
Ok(hw)
|
||||
}
|
||||
|
||||
fn parse_addr(ip_address: &str) -> Result<(__u8, Vec<u8>)> {
|
||||
let ip_data = parse_ipaddr(ip_address)?;
|
||||
let family: __u8;
|
||||
|
||||
// ipv6
|
||||
if ip_data.len() == 16 {
|
||||
family = libc::AF_INET6 as __u8;
|
||||
} else {
|
||||
family = libc::AF_INET as __u8;
|
||||
}
|
||||
|
||||
Ok((family, ip_data))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::{RtnlHandle, NETLINK_ROUTE};
|
||||
use protocols::types::IPAddress;
|
||||
use std::process::Command;
|
||||
|
||||
fn clean_env_for_test_add_one_arp_neighbor(dummy_name: &str, ip: &str) {
|
||||
// ip link delete dummy
|
||||
Command::new("ip")
|
||||
.args(&["link", "delete", dummy_name])
|
||||
.output()
|
||||
.expect("prepare: failed to delete dummy");
|
||||
|
||||
// ip neigh del dev dummy ip
|
||||
Command::new("ip")
|
||||
.args(&["neigh", "del", dummy_name, ip])
|
||||
.output()
|
||||
.expect("prepare: failed to delete neigh");
|
||||
}
|
||||
|
||||
fn prepare_env_for_test_add_one_arp_neighbor(dummy_name: &str, ip: &str) {
|
||||
clean_env_for_test_add_one_arp_neighbor(dummy_name, ip);
|
||||
// modprobe dummy
|
||||
Command::new("modprobe")
|
||||
.arg("dummy")
|
||||
.output()
|
||||
.expect("failed to run modprobe dummy");
|
||||
|
||||
// ip link add dummy type dummy
|
||||
Command::new("ip")
|
||||
.args(&["link", "add", dummy_name, "type", "dummy"])
|
||||
.output()
|
||||
.expect("failed to add dummy interface");
|
||||
|
||||
// ip addr add 192.168.0.2/16 dev dummy
|
||||
Command::new("ip")
|
||||
.args(&["addr", "add", "192.168.0.2/16", "dev", dummy_name])
|
||||
.output()
|
||||
.expect("failed to add ip for dummy");
|
||||
|
||||
// ip link set dummy up;
|
||||
Command::new("ip")
|
||||
.args(&["link", "set", dummy_name, "up"])
|
||||
.output()
|
||||
.expect("failed to up dummy");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_add_one_arp_neighbor() {
|
||||
// skip_if_not_root
|
||||
if !nix::unistd::Uid::effective().is_root() {
|
||||
println!("INFO: skipping {} which needs root", module_path!());
|
||||
return;
|
||||
}
|
||||
|
||||
let mac = "6a:92:3a:59:70:aa";
|
||||
let to_ip = "169.254.1.1";
|
||||
let dummy_name = "dummy_for_arp";
|
||||
|
||||
prepare_env_for_test_add_one_arp_neighbor(dummy_name, to_ip);
|
||||
|
||||
let mut ip_address = IPAddress::new();
|
||||
ip_address.set_address(to_ip.to_string());
|
||||
|
||||
let mut neigh = ARPNeighbor::new();
|
||||
neigh.set_toIPAddress(ip_address);
|
||||
neigh.set_device(dummy_name.to_string());
|
||||
neigh.set_lladdr(mac.to_string());
|
||||
neigh.set_state(0x80);
|
||||
|
||||
let mut rtnl = RtnlHandle::new(NETLINK_ROUTE, 0).unwrap();
|
||||
|
||||
rtnl.add_one_arp_neighbor(&neigh).unwrap();
|
||||
|
||||
// ip neigh show dev dummy ip
|
||||
let stdout = Command::new("ip")
|
||||
.args(&["neigh", "show", "dev", dummy_name, to_ip])
|
||||
.output()
|
||||
.expect("failed to show neigh")
|
||||
.stdout;
|
||||
|
||||
let stdout = std::str::from_utf8(&stdout).expect("failed to conveert stdout");
|
||||
|
||||
assert_eq!(stdout, format!("{} lladdr {} PERMANENT\n", to_ip, mac));
|
||||
|
||||
clean_env_for_test_add_one_arp_neighbor(dummy_name, to_ip);
|
||||
}
|
||||
}
|
||||
11
src/agent/oci/Cargo.toml
Normal file
11
src/agent/oci/Cargo.toml
Normal file
@@ -0,0 +1,11 @@
|
||||
[package]
|
||||
name = "oci"
|
||||
version = "0.1.0"
|
||||
authors = ["Yang Bo <bo@hyper.sh>"]
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
serde = "1.0.91"
|
||||
serde_derive = "1.0.91"
|
||||
serde_json = "1.0.39"
|
||||
libc = "0.2.58"
|
||||
1616
src/agent/oci/src/lib.rs
Normal file
1616
src/agent/oci/src/lib.rs
Normal file
File diff suppressed because it is too large
Load Diff
88
src/agent/oci/src/serialize.rs
Normal file
88
src/agent/oci/src/serialize.rs
Normal file
@@ -0,0 +1,88 @@
|
||||
// Copyright (c) 2019 Ant Financial
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json;
|
||||
|
||||
use std::error;
|
||||
use std::fmt::{Display, Formatter, Result as FmtResult};
|
||||
use std::fs::File;
|
||||
use std::io;
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum Error {
|
||||
Io(io::Error),
|
||||
Json(serde_json::Error),
|
||||
}
|
||||
|
||||
impl Display for Error {
|
||||
fn fmt(&self, f: &mut Formatter) -> FmtResult {
|
||||
match *self {
|
||||
Error::Io(ref e) => e.fmt(f),
|
||||
Error::Json(ref e) => e.fmt(f),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl error::Error for Error {
|
||||
fn description(&self) -> &str {
|
||||
match *self {
|
||||
Error::Io(ref e) => e.description(),
|
||||
Error::Json(ref e) => e.description(),
|
||||
}
|
||||
}
|
||||
|
||||
fn cause(&self) -> Option<&dyn error::Error> {
|
||||
match *self {
|
||||
Error::Io(ref e) => Some(e),
|
||||
Error::Json(ref e) => Some(e),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<io::Error> for Error {
|
||||
fn from(e: io::Error) -> Error {
|
||||
Error::Io(e)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<serde_json::Error> for Error {
|
||||
fn from(e: serde_json::Error) -> Error {
|
||||
Error::Json(e)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn to_writer<W, T>(o: &T, w: W) -> Result<()>
|
||||
where
|
||||
W: io::Write,
|
||||
T: Serialize,
|
||||
{
|
||||
Ok(serde_json::to_writer(w, o)?)
|
||||
}
|
||||
|
||||
pub fn serialize<T>(o: &T, path: &str) -> Result<()>
|
||||
where
|
||||
T: Serialize,
|
||||
{
|
||||
let f = File::create(path)?;
|
||||
Ok(serde_json::to_writer(f, o)?)
|
||||
}
|
||||
|
||||
pub fn to_string<T>(o: &T) -> Result<String>
|
||||
where
|
||||
T: Serialize,
|
||||
{
|
||||
Ok(serde_json::to_string(o)?)
|
||||
}
|
||||
|
||||
pub fn deserialize<T>(path: &str) -> Result<T>
|
||||
where
|
||||
for<'a> T: Deserialize<'a>,
|
||||
{
|
||||
let f = File::open(path)?;
|
||||
Ok(serde_json::from_reader(f)?)
|
||||
}
|
||||
10
src/agent/protocols/Cargo.toml
Normal file
10
src/agent/protocols/Cargo.toml
Normal file
@@ -0,0 +1,10 @@
|
||||
[package]
|
||||
name = "protocols"
|
||||
version = "0.1.0"
|
||||
authors = ["Hui Zhu <teawater@hyper.sh>"]
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
ttrpc = { git = "https://github.com/containerd/ttrpc-rust.git", branch="0.3.0" }
|
||||
protobuf = "=2.14.0"
|
||||
futures = "0.1.27"
|
||||
142
src/agent/protocols/hack/update-generated-proto.sh
Executable file
142
src/agent/protocols/hack/update-generated-proto.sh
Executable file
@@ -0,0 +1,142 @@
|
||||
#!/bin/bash
|
||||
|
||||
# //
|
||||
# // Copyright 2020 Ant Financial
|
||||
# //
|
||||
# // SPDX-License-Identifier: Apache-2.0
|
||||
# //
|
||||
|
||||
die() {
|
||||
cat <<EOT >&2
|
||||
====================================================================
|
||||
==== compile protocols failed ====
|
||||
|
||||
$1
|
||||
|
||||
====================================================================
|
||||
EOT
|
||||
exit 1
|
||||
}
|
||||
|
||||
show_succeed_msg() {
|
||||
echo "===================================================================="
|
||||
echo "==== ===="
|
||||
echo "==== compile protocols succeed ===="
|
||||
echo "==== ===="
|
||||
echo "===================================================================="
|
||||
}
|
||||
|
||||
show_usage() {
|
||||
echo "===================================================================="
|
||||
echo ""
|
||||
echo " USAGE: make PROTO_FILE=<xyz.proto> generate-protocols"
|
||||
echo ""
|
||||
echo " Where PROTO_FILE may be:"
|
||||
echo " all: will compile all protocol buffer files"
|
||||
echo ""
|
||||
echo " Or compile individually by using the exact proto file:"
|
||||
|
||||
# iterate over proto files
|
||||
for file in "$@"
|
||||
do
|
||||
echo " $file"
|
||||
done
|
||||
|
||||
echo ""
|
||||
echo "===================================================================="
|
||||
}
|
||||
|
||||
generate_go_sources() {
|
||||
local cmd="protoc -I$GOPATH/src/github.com/kata-containers/agent/vendor/github.com/gogo/protobuf:$GOPATH/src/github.com/kata-containers/agent/vendor:$GOPATH/src/github.com/gogo/protobuf:$GOPATH/src/github.com/gogo/googleapis:$GOPATH/src:$GOPATH/src/github.com/kata-containers/kata-containers/src/agent/protocols/protos \
|
||||
--gogottrpc_out=plugins=ttrpc+fieldpath,\
|
||||
import_path=github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/agent/protocols/grpc,\
|
||||
\
|
||||
Mgithub.com/kata-containers/kata-containers/src/agent/protocols/protos/github.com/kata-containers/agent/pkg/types/types.proto=github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/agent/protocols,\
|
||||
\
|
||||
Mgithub.com/kata-containers/kata-containers/src/agent/protocols/protos/oci.proto=github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/agent/protocols/grpc,\
|
||||
\
|
||||
Mgogoproto/gogo.proto=github.com/gogo/protobuf/gogoproto,Mgoogle/protobuf/any.proto=github.com/gogo/protobuf/types,Mgoogle/protobuf/descriptor.proto=github.com/gogo/protobuf/protoc-gen-gogo/descriptor,Mgoogle/protobuf/duration.proto=github.com/gogo/protobuf/types,Mgoogle/protobuf/empty.proto=github.com/gogo/protobuf/types,Mgoogle/protobuf/field_mask.proto=github.com/gogo/protobuf/types,Mgoogle/protobuf/timestamp.proto=github.com/gogo/protobuf/types,Mgoogle/protobuf/wrappers.proto=github.com/gogo/protobuf/types,Mgoogle/rpc/status.proto=github.com/gogo/googleapis/google/rpc\
|
||||
:$GOPATH/src \
|
||||
$GOPATH/src/github.com/kata-containers/kata-containers/src/agent/protocols/protos/$1"
|
||||
|
||||
echo $cmd
|
||||
$cmd
|
||||
[ $? -eq 0 ] || die "Failed to generate golang file from $1"
|
||||
}
|
||||
|
||||
generate_rust_sources() {
|
||||
local cmd="protoc --rust_out=./protocols/src/ \
|
||||
--ttrpc_out=./protocols/src/,plugins=ttrpc:./protocols/src/ \
|
||||
--plugin=protoc-gen-ttrpc=`which ttrpc_rust_plugin` \
|
||||
-I $GOPATH/src/github.com/kata-containers/agent/vendor/github.com/gogo/protobuf:$GOPATH/src/github.com/kata-containers/agent/vendor:$GOPATH/src/github.com/gogo/protobuf:$GOPATH/src/github.com/gogo/googleapis:$GOPATH/src:$GOPATH/src/github.com/kata-containers/kata-containers/src/agent/protocols/protos \
|
||||
$GOPATH/src/github.com/kata-containers/kata-containers/src/agent/protocols/protos/$1"
|
||||
|
||||
echo $cmd
|
||||
$cmd
|
||||
[ $? -eq 0 ] || die "Failed to generate rust file from $1"
|
||||
|
||||
if [ "$1" = "oci.proto" ]; then
|
||||
# Need change Box<Self> to ::std::boxed::Box<Self> because there is another struct Box
|
||||
sed 's/fn into_any(self: Box<Self>) -> ::std::boxed::Box<::std::any::Any> {/fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<::std::any::Any> {/g' ./protocols/src/oci.rs > ./protocols/src/new_oci.rs
|
||||
sed 's/fn into_any(self: Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {/fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {/g' ./protocols/src/oci.rs > ./protocols/src/new_oci.rs
|
||||
mv ./protocols/src/new_oci.rs ./protocols/src/oci.rs
|
||||
fi;
|
||||
}
|
||||
|
||||
if [ "$(basename $(pwd))" != "agent" ]; then
|
||||
die "Please go to directory of protocols before execute this shell"
|
||||
fi
|
||||
|
||||
# Protocol buffer files required to generate golang/rust bindings.
|
||||
proto_files_list=(agent.proto health.proto oci.proto github.com/kata-containers/agent/pkg/types/types.proto)
|
||||
|
||||
if [ "$1" = "" ]; then
|
||||
show_usage "${proto_files_list[@]}"
|
||||
exit 1
|
||||
fi;
|
||||
|
||||
# pre-requirement check
|
||||
which protoc
|
||||
[ $? -eq 0 ] || die "Please install protoc from github.com/protocolbuffers/protobuf"
|
||||
|
||||
which protoc-gen-rust
|
||||
[ $? -eq 0 ] || die "Please install protobuf-codegen from github.com/pingcap/grpc-rs"
|
||||
|
||||
which ttrpc_rust_plugin
|
||||
[ $? -eq 0 ] || die "Please install ttrpc_rust_plugin from https://github.com/containerd/ttrpc-rust"
|
||||
|
||||
which protoc-gen-gogottrpc
|
||||
[ $? -eq 0 ] || die "Please install protoc-gen-gogottrpc from https://github.com/containerd/ttrpc"
|
||||
|
||||
# do generate work
|
||||
target=$1
|
||||
|
||||
# compile all proto files
|
||||
if [ "$target" = "all" ]; then
|
||||
# compile all proto files
|
||||
for f in ${proto_files_list[@]}; do
|
||||
echo -e "\n [golang] compiling ${f} ..."
|
||||
generate_go_sources $f
|
||||
echo -e " [golang] ${f} compiled\n"
|
||||
|
||||
echo -e "\n [rust] compiling ${f} ..."
|
||||
generate_rust_sources $f
|
||||
echo -e " [rust] ${f} compiled\n"
|
||||
done
|
||||
else
|
||||
# compile individual proto file
|
||||
for f in ${proto_files_list[@]}; do
|
||||
if [ "$target" = "$f" ]; then
|
||||
echo -e "\n [golang] compiling ${target} ..."
|
||||
generate_go_sources $target
|
||||
echo -e " [golang] ${target} compiled\n"
|
||||
|
||||
echo -e "\n [rust] compiling ${target} ..."
|
||||
generate_rust_sources $target
|
||||
echo -e " [rust] ${target} compiled\n"
|
||||
fi
|
||||
done
|
||||
fi;
|
||||
|
||||
# if have no errors, compilation will succeed
|
||||
show_succeed_msg
|
||||
509
src/agent/protocols/protos/agent.proto
Normal file
509
src/agent/protocols/protos/agent.proto
Normal file
@@ -0,0 +1,509 @@
|
||||
//
|
||||
// Copyright 2017 HyperHQ Inc.
|
||||
// Copyright 2019 Ant Financial
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
option go_package = "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/agent/protocols/grpc";
|
||||
|
||||
package grpc;
|
||||
|
||||
import "github.com/kata-containers/kata-containers/src/agent/protocols/protos/oci.proto";
|
||||
import "github.com/kata-containers/kata-containers/src/agent/protocols/protos/github.com/kata-containers/agent/pkg/types/types.proto";
|
||||
|
||||
import "google/protobuf/empty.proto";
|
||||
|
||||
// unstable
|
||||
service AgentService {
|
||||
// execution
|
||||
rpc CreateContainer(CreateContainerRequest) returns (google.protobuf.Empty);
|
||||
rpc StartContainer(StartContainerRequest) returns (google.protobuf.Empty);
|
||||
|
||||
// RemoveContainer will tear down an existing container by forcibly terminating
|
||||
// all processes running inside that container and releasing all internal
|
||||
// resources associated with it.
|
||||
// RemoveContainer will wait for all processes termination before returning.
|
||||
// If any process can not be killed or if it can not be killed after
|
||||
// the RemoveContainerRequest timeout, RemoveContainer will return an error.
|
||||
rpc RemoveContainer(RemoveContainerRequest) returns (google.protobuf.Empty);
|
||||
rpc ExecProcess(ExecProcessRequest) returns (google.protobuf.Empty);
|
||||
rpc SignalProcess(SignalProcessRequest) returns (google.protobuf.Empty);
|
||||
rpc WaitProcess(WaitProcessRequest) returns (WaitProcessResponse); // wait & reap like waitpid(2)
|
||||
rpc ListProcesses(ListProcessesRequest) returns (ListProcessesResponse);
|
||||
rpc UpdateContainer(UpdateContainerRequest) returns (google.protobuf.Empty);
|
||||
rpc StatsContainer(StatsContainerRequest) returns (StatsContainerResponse);
|
||||
rpc PauseContainer(PauseContainerRequest) returns (google.protobuf.Empty);
|
||||
rpc ResumeContainer(ResumeContainerRequest) returns (google.protobuf.Empty);
|
||||
|
||||
// stdio
|
||||
rpc WriteStdin(WriteStreamRequest) returns (WriteStreamResponse);
|
||||
rpc ReadStdout(ReadStreamRequest) returns (ReadStreamResponse);
|
||||
rpc ReadStderr(ReadStreamRequest) returns (ReadStreamResponse);
|
||||
rpc CloseStdin(CloseStdinRequest) returns (google.protobuf.Empty);
|
||||
rpc TtyWinResize(TtyWinResizeRequest) returns (google.protobuf.Empty);
|
||||
|
||||
// networking
|
||||
rpc UpdateInterface(UpdateInterfaceRequest) returns (types.Interface);
|
||||
rpc UpdateRoutes(UpdateRoutesRequest) returns (Routes);
|
||||
rpc ListInterfaces(ListInterfacesRequest) returns(Interfaces);
|
||||
rpc ListRoutes(ListRoutesRequest) returns (Routes);
|
||||
rpc AddARPNeighbors(AddARPNeighborsRequest) returns (google.protobuf.Empty);
|
||||
|
||||
// tracing
|
||||
rpc StartTracing(StartTracingRequest) returns (google.protobuf.Empty);
|
||||
rpc StopTracing(StopTracingRequest) returns (google.protobuf.Empty);
|
||||
|
||||
// misc (TODO: some rpcs can be replaced by hyperstart-exec)
|
||||
rpc CreateSandbox(CreateSandboxRequest) returns (google.protobuf.Empty);
|
||||
rpc DestroySandbox(DestroySandboxRequest) returns (google.protobuf.Empty);
|
||||
rpc OnlineCPUMem(OnlineCPUMemRequest) returns (google.protobuf.Empty);
|
||||
rpc ReseedRandomDev(ReseedRandomDevRequest) returns (google.protobuf.Empty);
|
||||
rpc GetGuestDetails(GuestDetailsRequest) returns (GuestDetailsResponse);
|
||||
rpc MemHotplugByProbe(MemHotplugByProbeRequest) returns (google.protobuf.Empty);
|
||||
rpc SetGuestDateTime(SetGuestDateTimeRequest) returns (google.protobuf.Empty);
|
||||
rpc CopyFile(CopyFileRequest) returns (google.protobuf.Empty);
|
||||
}
|
||||
|
||||
message CreateContainerRequest {
|
||||
string container_id = 1;
|
||||
string exec_id = 2;
|
||||
StringUser string_user = 3;
|
||||
repeated Device devices = 4;
|
||||
repeated Storage storages = 5;
|
||||
Spec OCI = 6;
|
||||
|
||||
// This field is used to indicate if the container needs to join
|
||||
// sandbox shared pid ns or create a new namespace. This field is
|
||||
// meant to override the NEWPID config settings in the OCI spec.
|
||||
// The agent would receive an OCI spec with PID namespace cleared
|
||||
// out altogether and not just the pid ns path.
|
||||
bool sandbox_pidns = 7;
|
||||
}
|
||||
|
||||
message StartContainerRequest {
|
||||
string container_id = 1;
|
||||
}
|
||||
|
||||
message RemoveContainerRequest {
|
||||
string container_id = 1;
|
||||
|
||||
// RemoveContainer will return an error if
|
||||
// it could not kill some container processes
|
||||
// after timeout seconds.
|
||||
// Setting timeout to 0 means RemoveContainer will
|
||||
// wait for ever.
|
||||
uint32 timeout = 2;
|
||||
}
|
||||
|
||||
message ExecProcessRequest {
|
||||
string container_id = 1;
|
||||
string exec_id = 2;
|
||||
StringUser string_user = 3;
|
||||
Process process = 4;
|
||||
}
|
||||
|
||||
message SignalProcessRequest {
|
||||
string container_id = 1;
|
||||
|
||||
// Special case for SignalProcess(): exec_id can be empty(""),
|
||||
// which means to send the signal to all the processes including their descendants.
|
||||
// Other APIs with exec_id should treat empty exec_id as an invalid request.
|
||||
string exec_id = 2;
|
||||
uint32 signal = 3;
|
||||
}
|
||||
|
||||
message WaitProcessRequest {
|
||||
string container_id = 1;
|
||||
string exec_id = 2;
|
||||
}
|
||||
|
||||
message WaitProcessResponse {
|
||||
int32 status = 1;
|
||||
}
|
||||
|
||||
// ListProcessesRequest contains the options used to list running processes inside the container
|
||||
message ListProcessesRequest {
|
||||
string container_id = 1;
|
||||
string format = 2;
|
||||
repeated string args = 3;
|
||||
}
|
||||
|
||||
// ListProcessesResponse represents the list of running processes inside the container
|
||||
message ListProcessesResponse {
|
||||
bytes process_list = 1;
|
||||
}
|
||||
|
||||
message UpdateContainerRequest {
|
||||
string container_id = 1;
|
||||
LinuxResources resources = 2;
|
||||
}
|
||||
|
||||
message StatsContainerRequest {
|
||||
string container_id = 1;
|
||||
}
|
||||
|
||||
message PauseContainerRequest {
|
||||
string container_id = 1;
|
||||
}
|
||||
|
||||
message ResumeContainerRequest {
|
||||
string container_id = 1;
|
||||
}
|
||||
|
||||
message CpuUsage {
|
||||
uint64 total_usage = 1;
|
||||
repeated uint64 percpu_usage = 2;
|
||||
uint64 usage_in_kernelmode = 3;
|
||||
uint64 usage_in_usermode = 4;
|
||||
}
|
||||
|
||||
message ThrottlingData {
|
||||
uint64 periods = 1;
|
||||
uint64 throttled_periods = 2;
|
||||
uint64 throttled_time = 3;
|
||||
}
|
||||
|
||||
message CpuStats {
|
||||
CpuUsage cpu_usage = 1;
|
||||
ThrottlingData throttling_data = 2;
|
||||
}
|
||||
|
||||
message PidsStats {
|
||||
uint64 current = 1;
|
||||
uint64 limit = 2;
|
||||
}
|
||||
|
||||
message MemoryData {
|
||||
uint64 usage = 1;
|
||||
uint64 max_usage = 2;
|
||||
uint64 failcnt = 3;
|
||||
uint64 limit = 4;
|
||||
}
|
||||
|
||||
message MemoryStats {
|
||||
uint64 cache = 1;
|
||||
MemoryData usage = 2;
|
||||
MemoryData swap_usage = 3;
|
||||
MemoryData kernel_usage = 4;
|
||||
bool use_hierarchy = 5;
|
||||
map<string, uint64> stats = 6;
|
||||
}
|
||||
|
||||
|
||||
message BlkioStatsEntry {
|
||||
uint64 major = 1;
|
||||
uint64 minor = 2;
|
||||
string op = 3;
|
||||
uint64 value = 4;
|
||||
}
|
||||
|
||||
message BlkioStats {
|
||||
repeated BlkioStatsEntry io_service_bytes_recursive = 1; // number of bytes transferred to and from the block device
|
||||
repeated BlkioStatsEntry io_serviced_recursive = 2;
|
||||
repeated BlkioStatsEntry io_queued_recursive = 3;
|
||||
repeated BlkioStatsEntry io_service_time_recursive = 4;
|
||||
repeated BlkioStatsEntry io_wait_time_recursive = 5;
|
||||
repeated BlkioStatsEntry io_merged_recursive = 6;
|
||||
repeated BlkioStatsEntry io_time_recursive = 7;
|
||||
repeated BlkioStatsEntry sectors_recursive = 8;
|
||||
}
|
||||
|
||||
message HugetlbStats {
|
||||
uint64 usage = 1;
|
||||
uint64 max_usage = 2;
|
||||
uint64 failcnt = 3;
|
||||
}
|
||||
|
||||
message CgroupStats {
|
||||
CpuStats cpu_stats = 1;
|
||||
MemoryStats memory_stats = 2;
|
||||
PidsStats pids_stats = 3;
|
||||
BlkioStats blkio_stats = 4;
|
||||
map<string, HugetlbStats> hugetlb_stats = 5; // the map is in the format "size of hugepage: stats of the hugepage"
|
||||
|
||||
}
|
||||
|
||||
message NetworkStats {
|
||||
string name = 1;
|
||||
uint64 rx_bytes = 2;
|
||||
uint64 rx_packets = 3;
|
||||
uint64 rx_errors = 4;
|
||||
uint64 rx_dropped = 5;
|
||||
uint64 tx_bytes = 6;
|
||||
uint64 tx_packets = 7;
|
||||
uint64 tx_errors = 8;
|
||||
uint64 tx_dropped = 9;
|
||||
}
|
||||
|
||||
message StatsContainerResponse {
|
||||
CgroupStats cgroup_stats = 1;
|
||||
repeated NetworkStats network_stats = 2;
|
||||
}
|
||||
|
||||
message WriteStreamRequest {
|
||||
string container_id = 1;
|
||||
string exec_id = 2;
|
||||
bytes data = 3;
|
||||
}
|
||||
|
||||
message WriteStreamResponse {
|
||||
uint32 len = 1;
|
||||
}
|
||||
|
||||
message ReadStreamRequest {
|
||||
string container_id = 1;
|
||||
string exec_id = 2;
|
||||
uint32 len = 3;
|
||||
}
|
||||
|
||||
message ReadStreamResponse {
|
||||
bytes data = 1;
|
||||
}
|
||||
|
||||
message CloseStdinRequest {
|
||||
string container_id = 1;
|
||||
string exec_id = 2;
|
||||
}
|
||||
|
||||
message TtyWinResizeRequest {
|
||||
string container_id = 1;
|
||||
string exec_id = 2;
|
||||
uint32 row = 3;
|
||||
uint32 column = 4;
|
||||
}
|
||||
|
||||
message KernelModule {
|
||||
// This field is the name of the kernel module.
|
||||
string name = 1;
|
||||
// This field are the parameters for the kernel module which are
|
||||
// whitespace-delimited key=value pairs passed to modprobe(8).
|
||||
repeated string parameters = 2;
|
||||
}
|
||||
|
||||
message CreateSandboxRequest {
|
||||
string hostname = 1;
|
||||
repeated string dns = 2;
|
||||
repeated Storage storages = 3;
|
||||
|
||||
// This field means that a pause process needs to be created by the
|
||||
// agent. This pid namespace of the pause process will be treated as
|
||||
// a shared pid namespace. All containers created will join this shared
|
||||
// pid namespace.
|
||||
bool sandbox_pidns = 4;
|
||||
// SandboxId identifies which sandbox is using the agent. We allow only
|
||||
// one sandbox per agent and implicitly require that CreateSandbox is
|
||||
// called before other sandbox/network calls.
|
||||
string sandbox_id = 5;
|
||||
// This field, if non-empty, designates an absolute path to a directory
|
||||
// that the agent will search for OCI hooks to run within the guest.
|
||||
string guest_hook_path = 6;
|
||||
// This field is the list of kernel modules to be loaded in the guest kernel.
|
||||
repeated KernelModule kernel_modules = 7;
|
||||
}
|
||||
|
||||
message DestroySandboxRequest {
|
||||
}
|
||||
|
||||
message Interfaces {
|
||||
repeated types.Interface Interfaces = 1;
|
||||
}
|
||||
|
||||
message Routes {
|
||||
repeated types.Route Routes = 1;
|
||||
}
|
||||
|
||||
message UpdateInterfaceRequest {
|
||||
types.Interface interface = 1;
|
||||
}
|
||||
|
||||
message UpdateRoutesRequest {
|
||||
Routes routes = 1;
|
||||
}
|
||||
|
||||
message ListInterfacesRequest {
|
||||
}
|
||||
|
||||
message ListRoutesRequest {
|
||||
}
|
||||
|
||||
message ARPNeighbors {
|
||||
repeated types.ARPNeighbor ARPNeighbors = 1;
|
||||
}
|
||||
|
||||
message AddARPNeighborsRequest {
|
||||
ARPNeighbors neighbors = 1;
|
||||
}
|
||||
|
||||
message OnlineCPUMemRequest {
|
||||
// Wait specifies if the caller waits for the agent to online all resources.
|
||||
// If true the agent returns once all resources have been connected, otherwise all
|
||||
// resources are connected asynchronously and the agent returns immediately.
|
||||
bool wait = 1;
|
||||
|
||||
// NbCpus specifies the number of CPUs that were added and the agent has to online.
|
||||
uint32 nb_cpus = 2;
|
||||
|
||||
// CpuOnly specifies whether only online CPU or not.
|
||||
bool cpu_only = 3;
|
||||
}
|
||||
|
||||
message ReseedRandomDevRequest {
|
||||
// Data specifies the random data used to reseed the guest crng.
|
||||
bytes data = 2;
|
||||
}
|
||||
|
||||
// AgentDetails provides information to the client about the running agent.
|
||||
message AgentDetails {
|
||||
// Semantic version of agent (see https://semver.org).
|
||||
string version = 1;
|
||||
|
||||
// Set if the agent is running as PID 1.
|
||||
bool init_daemon = 2;
|
||||
|
||||
// List of available device handlers.
|
||||
repeated string device_handlers = 3;
|
||||
|
||||
// List of available storage handlers.
|
||||
repeated string storage_handlers = 4;
|
||||
|
||||
// Set only if the agent is built with seccomp support and the guest
|
||||
// environment supports seccomp.
|
||||
bool supports_seccomp = 5;
|
||||
}
|
||||
|
||||
message GuestDetailsRequest {
|
||||
// MemBlockSize asks server to return the system memory block size that can be used
|
||||
// for memory hotplug alignment. Typically the server returns what's in
|
||||
// /sys/devices/system/memory/block_size_bytes.
|
||||
bool mem_block_size = 1;
|
||||
|
||||
// MemoryHotplugProbe asks server to return whether guest kernel supports memory hotplug
|
||||
// via probeinterface. Typically the server will check if the path
|
||||
// /sys/devices/system/memory/probe exists.
|
||||
bool mem_hotplug_probe = 2;
|
||||
}
|
||||
|
||||
message GuestDetailsResponse {
|
||||
// MemBlockSizeBytes returns the system memory block size in bytes.
|
||||
uint64 mem_block_size_bytes = 1;
|
||||
|
||||
AgentDetails agent_details = 2;
|
||||
|
||||
bool support_mem_hotplug_probe = 3;
|
||||
}
|
||||
|
||||
message MemHotplugByProbeRequest {
|
||||
// server needs to send the value of memHotplugProbeAddr into file /sys/devices/system/memory/probe,
|
||||
// in order to notify the guest kernel about hot-add memory event
|
||||
repeated uint64 memHotplugProbeAddr = 1;
|
||||
}
|
||||
|
||||
message SetGuestDateTimeRequest {
|
||||
// Sec the second since the Epoch.
|
||||
int64 Sec = 1;
|
||||
// Usec the microseconds portion of time since the Epoch.
|
||||
int64 Usec = 2;
|
||||
}
|
||||
|
||||
// Storage represents both the rootfs of the container, and any volume that
|
||||
// could have been defined through the Mount list of the OCI specification.
|
||||
message Storage {
|
||||
// Driver is used to define the way the storage is passed through the
|
||||
// virtual machine. It can be "9p", "blk", or something else, but for
|
||||
// all cases, this will define if some extra steps are required before
|
||||
// this storage gets mounted into the container.
|
||||
string driver = 1;
|
||||
// DriverOptions allows the caller to define a list of options such
|
||||
// as block sizes, numbers of luns, ... which are very specific to
|
||||
// every device and cannot be generalized through extra fields.
|
||||
repeated string driver_options = 2;
|
||||
// Source can be anything representing the source of the storage. This
|
||||
// will be handled by the proper handler based on the Driver used.
|
||||
// For instance, it can be a very simple path if the caller knows the
|
||||
// name of device inside the VM, or it can be some sort of identifier
|
||||
// to let the agent find the device inside the VM.
|
||||
string source = 3;
|
||||
// Fstype represents the filesystem that needs to be used to mount the
|
||||
// storage inside the VM. For instance, it could be "xfs" for block
|
||||
// device, "9p" for shared filesystem, or "tmpfs" for shared /dev/shm.
|
||||
string fstype = 4;
|
||||
// Options describes the additional options that might be needed to
|
||||
// mount properly the storage filesytem.
|
||||
repeated string options = 5;
|
||||
// MountPoint refers to the path where the storage should be mounted
|
||||
// inside the VM.
|
||||
string mount_point = 6;
|
||||
}
|
||||
|
||||
// Device represents only the devices that could have been defined through the
|
||||
// Linux Device list of the OCI specification.
|
||||
message Device {
|
||||
// Id can be used to identify the device inside the VM. Some devices
|
||||
// might not need it to be identified on the VM, and will rely on the
|
||||
// provided VmPath instead.
|
||||
string id = 1;
|
||||
// Type defines the type of device described. This can be "blk",
|
||||
// "scsi", "vfio", ...
|
||||
// Particularly, this should be used to trigger the use of the
|
||||
// appropriate device handler.
|
||||
string type = 2;
|
||||
// VmPath can be used by the caller to provide directly the path of
|
||||
// the device as it will appear inside the VM. For some devices, the
|
||||
// device id or the list of options passed might not be enough to find
|
||||
// the device. In those cases, the caller should predict and provide
|
||||
// this vm_path.
|
||||
string vm_path = 3;
|
||||
// ContainerPath defines the path where the device should be found inside
|
||||
// the container. This path should match the path of the device from
|
||||
// the device list listed inside the OCI spec. This is used in order
|
||||
// to identify the right device in the spec and update it with the
|
||||
// right options such as major/minor numbers as they appear inside
|
||||
// the VM for instance. Note that an empty ctr_path should be used
|
||||
// to make sure the device handler inside the agent is called, but
|
||||
// no spec update needs to be performed. This has to happen for the
|
||||
// case of rootfs, when a device has to be waited for after it has
|
||||
// been hotplugged. An equivalent Storage entry should be defined if
|
||||
// any mount needs to be performed afterwards.
|
||||
string container_path = 4;
|
||||
// Options allows the caller to define a list of options such as block
|
||||
// sizes, numbers of luns, ... which are very specific to every device
|
||||
// and cannot be generalized through extra fields.
|
||||
repeated string options = 5;
|
||||
}
|
||||
|
||||
message StringUser {
|
||||
string uid = 1;
|
||||
string gid = 2;
|
||||
repeated string additionalGids = 3;
|
||||
}
|
||||
|
||||
message CopyFileRequest {
|
||||
// Path is the destination file in the guest. It must be absolute,
|
||||
// canonical and below /run.
|
||||
string path = 1;
|
||||
// FileSize is the expected file size, for security reasons write operations
|
||||
// are made in a temporary file, once it has the expected size, it's moved
|
||||
// to the destination path.
|
||||
int64 file_size = 2;
|
||||
// FileMode is the file mode.
|
||||
uint32 file_mode = 3;
|
||||
// DirMode is the mode for the parent directories of destination path.
|
||||
uint32 dir_mode = 4;
|
||||
// Uid is the numeric user id.
|
||||
int32 uid = 5;
|
||||
// Gid is the numeric group id.
|
||||
int32 gid = 6;
|
||||
// Offset for the next write operation.
|
||||
int64 offset = 7;
|
||||
// Data to write in the destination file.
|
||||
bytes data = 8;
|
||||
}
|
||||
|
||||
message StartTracingRequest {
|
||||
}
|
||||
|
||||
message StopTracingRequest {
|
||||
}
|
||||
@@ -0,0 +1,144 @@
|
||||
// Protocol Buffers for Go with Gadgets
|
||||
//
|
||||
// Copyright (c) 2013, The GoGo Authors. All rights reserved.
|
||||
// http://github.com/gogo/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
syntax = "proto2";
|
||||
package gogoproto;
|
||||
|
||||
import "google/protobuf/descriptor.proto";
|
||||
|
||||
option java_package = "com.google.protobuf";
|
||||
option java_outer_classname = "GoGoProtos";
|
||||
option go_package = "github.com/gogo/protobuf/gogoproto";
|
||||
|
||||
extend google.protobuf.EnumOptions {
|
||||
optional bool goproto_enum_prefix = 62001;
|
||||
optional bool goproto_enum_stringer = 62021;
|
||||
optional bool enum_stringer = 62022;
|
||||
optional string enum_customname = 62023;
|
||||
optional bool enumdecl = 62024;
|
||||
}
|
||||
|
||||
extend google.protobuf.EnumValueOptions {
|
||||
optional string enumvalue_customname = 66001;
|
||||
}
|
||||
|
||||
extend google.protobuf.FileOptions {
|
||||
optional bool goproto_getters_all = 63001;
|
||||
optional bool goproto_enum_prefix_all = 63002;
|
||||
optional bool goproto_stringer_all = 63003;
|
||||
optional bool verbose_equal_all = 63004;
|
||||
optional bool face_all = 63005;
|
||||
optional bool gostring_all = 63006;
|
||||
optional bool populate_all = 63007;
|
||||
optional bool stringer_all = 63008;
|
||||
optional bool onlyone_all = 63009;
|
||||
|
||||
optional bool equal_all = 63013;
|
||||
optional bool description_all = 63014;
|
||||
optional bool testgen_all = 63015;
|
||||
optional bool benchgen_all = 63016;
|
||||
optional bool marshaler_all = 63017;
|
||||
optional bool unmarshaler_all = 63018;
|
||||
optional bool stable_marshaler_all = 63019;
|
||||
|
||||
optional bool sizer_all = 63020;
|
||||
|
||||
optional bool goproto_enum_stringer_all = 63021;
|
||||
optional bool enum_stringer_all = 63022;
|
||||
|
||||
optional bool unsafe_marshaler_all = 63023;
|
||||
optional bool unsafe_unmarshaler_all = 63024;
|
||||
|
||||
optional bool goproto_extensions_map_all = 63025;
|
||||
optional bool goproto_unrecognized_all = 63026;
|
||||
optional bool gogoproto_import = 63027;
|
||||
optional bool protosizer_all = 63028;
|
||||
optional bool compare_all = 63029;
|
||||
optional bool typedecl_all = 63030;
|
||||
optional bool enumdecl_all = 63031;
|
||||
|
||||
optional bool goproto_registration = 63032;
|
||||
optional bool messagename_all = 63033;
|
||||
|
||||
optional bool goproto_sizecache_all = 63034;
|
||||
optional bool goproto_unkeyed_all = 63035;
|
||||
}
|
||||
|
||||
extend google.protobuf.MessageOptions {
|
||||
optional bool goproto_getters = 64001;
|
||||
optional bool goproto_stringer = 64003;
|
||||
optional bool verbose_equal = 64004;
|
||||
optional bool face = 64005;
|
||||
optional bool gostring = 64006;
|
||||
optional bool populate = 64007;
|
||||
optional bool stringer = 67008;
|
||||
optional bool onlyone = 64009;
|
||||
|
||||
optional bool equal = 64013;
|
||||
optional bool description = 64014;
|
||||
optional bool testgen = 64015;
|
||||
optional bool benchgen = 64016;
|
||||
optional bool marshaler = 64017;
|
||||
optional bool unmarshaler = 64018;
|
||||
optional bool stable_marshaler = 64019;
|
||||
|
||||
optional bool sizer = 64020;
|
||||
|
||||
optional bool unsafe_marshaler = 64023;
|
||||
optional bool unsafe_unmarshaler = 64024;
|
||||
|
||||
optional bool goproto_extensions_map = 64025;
|
||||
optional bool goproto_unrecognized = 64026;
|
||||
|
||||
optional bool protosizer = 64028;
|
||||
optional bool compare = 64029;
|
||||
|
||||
optional bool typedecl = 64030;
|
||||
|
||||
optional bool messagename = 64033;
|
||||
|
||||
optional bool goproto_sizecache = 64034;
|
||||
optional bool goproto_unkeyed = 64035;
|
||||
}
|
||||
|
||||
extend google.protobuf.FieldOptions {
|
||||
optional bool nullable = 65001;
|
||||
optional bool embed = 65002;
|
||||
optional string customtype = 65003;
|
||||
optional string customname = 65004;
|
||||
optional string jsontag = 65005;
|
||||
optional string moretags = 65006;
|
||||
optional string casttype = 65007;
|
||||
optional string castkey = 65008;
|
||||
optional string castvalue = 65009;
|
||||
|
||||
optional bool stdtime = 65010;
|
||||
optional bool stdduration = 65011;
|
||||
optional bool wktpointer = 65012;
|
||||
|
||||
}
|
||||
@@ -0,0 +1,59 @@
|
||||
//
|
||||
// Copyright 2018 Intel Corporation.
|
||||
// Copyright (c) 2019 Ant Financial
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
option go_package = "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/agent/protocols";
|
||||
|
||||
package types;
|
||||
|
||||
enum IPFamily {
|
||||
v4 = 0;
|
||||
v6 = 1;
|
||||
}
|
||||
|
||||
message IPAddress {
|
||||
IPFamily family = 1;
|
||||
string address = 2;
|
||||
string mask = 3;
|
||||
}
|
||||
|
||||
message Interface {
|
||||
string device = 1;
|
||||
string name = 2;
|
||||
repeated IPAddress IPAddresses = 3;
|
||||
uint64 mtu = 4;
|
||||
string hwAddr = 5;
|
||||
|
||||
// pciAddr is the PCI address in the format "bridgeAddr/deviceAddr".
|
||||
// Here, bridgeAddr is the address at which the bridge is attached on the root bus,
|
||||
// while deviceAddr is the address at which the network device is attached on the bridge.
|
||||
string pciAddr = 6;
|
||||
|
||||
// Type defines the type of interface described by this structure.
|
||||
// The expected values are the one that are defined by the netlink
|
||||
// library, regarding each type of link. Here is a non exhaustive
|
||||
// list: "veth", "macvtap", "vlan", "macvlan", "tap", ...
|
||||
string type = 7;
|
||||
uint32 raw_flags = 8;
|
||||
}
|
||||
|
||||
message Route {
|
||||
string dest = 1;
|
||||
string gateway = 2;
|
||||
string device = 3;
|
||||
string source = 4;
|
||||
uint32 scope = 5;
|
||||
}
|
||||
|
||||
message ARPNeighbor {
|
||||
IPAddress toIPAddress = 1;
|
||||
string device = 2;
|
||||
string lladdr = 3;
|
||||
int32 state = 4;
|
||||
int32 flags = 5;
|
||||
}
|
||||
887
src/agent/protocols/protos/google/protobuf/descriptor.proto
Normal file
887
src/agent/protocols/protos/google/protobuf/descriptor.proto
Normal file
@@ -0,0 +1,887 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
// Protocol Buffers - Google's data interchange format
|
||||
// Copyright 2008 Google Inc. All rights reserved.
|
||||
// https://developers.google.com/protocol-buffers/
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Author: kenton@google.com (Kenton Varda)
|
||||
// Based on original Protocol Buffers design by
|
||||
// Sanjay Ghemawat, Jeff Dean, and others.
|
||||
//
|
||||
// The messages in this file describe the definitions found in .proto files.
|
||||
// A valid .proto file can be translated directly to a FileDescriptorProto
|
||||
// without any other information (e.g. without reading its imports).
|
||||
|
||||
|
||||
syntax = "proto2";
|
||||
|
||||
package google.protobuf;
|
||||
|
||||
option go_package = "github.com/golang/protobuf/protoc-gen-go/descriptor;descriptor";
|
||||
option java_package = "com.google.protobuf";
|
||||
option java_outer_classname = "DescriptorProtos";
|
||||
option csharp_namespace = "Google.Protobuf.Reflection";
|
||||
option objc_class_prefix = "GPB";
|
||||
option cc_enable_arenas = true;
|
||||
|
||||
// descriptor.proto must be optimized for speed because reflection-based
|
||||
// algorithms don't work during bootstrapping.
|
||||
option optimize_for = SPEED;
|
||||
|
||||
// The protocol compiler can output a FileDescriptorSet containing the .proto
|
||||
// files it parses.
|
||||
message FileDescriptorSet {
|
||||
repeated FileDescriptorProto file = 1;
|
||||
}
|
||||
|
||||
// Describes a complete .proto file.
|
||||
message FileDescriptorProto {
|
||||
optional string name = 1; // file name, relative to root of source tree
|
||||
optional string package = 2; // e.g. "foo", "foo.bar", etc.
|
||||
|
||||
// Names of files imported by this file.
|
||||
repeated string dependency = 3;
|
||||
// Indexes of the public imported files in the dependency list above.
|
||||
repeated int32 public_dependency = 10;
|
||||
// Indexes of the weak imported files in the dependency list.
|
||||
// For Google-internal migration only. Do not use.
|
||||
repeated int32 weak_dependency = 11;
|
||||
|
||||
// All top-level definitions in this file.
|
||||
repeated DescriptorProto message_type = 4;
|
||||
repeated EnumDescriptorProto enum_type = 5;
|
||||
repeated ServiceDescriptorProto service = 6;
|
||||
repeated FieldDescriptorProto extension = 7;
|
||||
|
||||
optional FileOptions options = 8;
|
||||
|
||||
// This field contains optional information about the original source code.
|
||||
// You may safely remove this entire field without harming runtime
|
||||
// functionality of the descriptors -- the information is needed only by
|
||||
// development tools.
|
||||
optional SourceCodeInfo source_code_info = 9;
|
||||
|
||||
// The syntax of the proto file.
|
||||
// The supported values are "proto2" and "proto3".
|
||||
optional string syntax = 12;
|
||||
}
|
||||
|
||||
// Describes a message type.
|
||||
message DescriptorProto {
|
||||
optional string name = 1;
|
||||
|
||||
repeated FieldDescriptorProto field = 2;
|
||||
repeated FieldDescriptorProto extension = 6;
|
||||
|
||||
repeated DescriptorProto nested_type = 3;
|
||||
repeated EnumDescriptorProto enum_type = 4;
|
||||
|
||||
message ExtensionRange {
|
||||
optional int32 start = 1; // Inclusive.
|
||||
optional int32 end = 2; // Exclusive.
|
||||
|
||||
optional ExtensionRangeOptions options = 3;
|
||||
}
|
||||
repeated ExtensionRange extension_range = 5;
|
||||
|
||||
repeated OneofDescriptorProto oneof_decl = 8;
|
||||
|
||||
optional MessageOptions options = 7;
|
||||
|
||||
// Range of reserved tag numbers. Reserved tag numbers may not be used by
|
||||
// fields or extension ranges in the same message. Reserved ranges may
|
||||
// not overlap.
|
||||
message ReservedRange {
|
||||
optional int32 start = 1; // Inclusive.
|
||||
optional int32 end = 2; // Exclusive.
|
||||
}
|
||||
repeated ReservedRange reserved_range = 9;
|
||||
// Reserved field names, which may not be used by fields in the same message.
|
||||
// A given name may only be reserved once.
|
||||
repeated string reserved_name = 10;
|
||||
}
|
||||
|
||||
message ExtensionRangeOptions {
|
||||
// The parser stores options it doesn't recognize here. See above.
|
||||
repeated UninterpretedOption uninterpreted_option = 999;
|
||||
|
||||
// Clients can define custom options in extensions of this message. See above.
|
||||
extensions 1000 to max;
|
||||
}
|
||||
|
||||
// Describes a field within a message.
|
||||
message FieldDescriptorProto {
|
||||
enum Type {
|
||||
// 0 is reserved for errors.
|
||||
// Order is weird for historical reasons.
|
||||
TYPE_DOUBLE = 1;
|
||||
TYPE_FLOAT = 2;
|
||||
// Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if
|
||||
// negative values are likely.
|
||||
TYPE_INT64 = 3;
|
||||
TYPE_UINT64 = 4;
|
||||
// Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if
|
||||
// negative values are likely.
|
||||
TYPE_INT32 = 5;
|
||||
TYPE_FIXED64 = 6;
|
||||
TYPE_FIXED32 = 7;
|
||||
TYPE_BOOL = 8;
|
||||
TYPE_STRING = 9;
|
||||
// Tag-delimited aggregate.
|
||||
// Group type is deprecated and not supported in proto3. However, Proto3
|
||||
// implementations should still be able to parse the group wire format and
|
||||
// treat group fields as unknown fields.
|
||||
TYPE_GROUP = 10;
|
||||
TYPE_MESSAGE = 11; // Length-delimited aggregate.
|
||||
|
||||
// New in version 2.
|
||||
TYPE_BYTES = 12;
|
||||
TYPE_UINT32 = 13;
|
||||
TYPE_ENUM = 14;
|
||||
TYPE_SFIXED32 = 15;
|
||||
TYPE_SFIXED64 = 16;
|
||||
TYPE_SINT32 = 17; // Uses ZigZag encoding.
|
||||
TYPE_SINT64 = 18; // Uses ZigZag encoding.
|
||||
}
|
||||
|
||||
enum Label {
|
||||
// 0 is reserved for errors
|
||||
LABEL_OPTIONAL = 1;
|
||||
LABEL_REQUIRED = 2;
|
||||
LABEL_REPEATED = 3;
|
||||
}
|
||||
|
||||
optional string name = 1;
|
||||
optional int32 number = 3;
|
||||
optional Label label = 4;
|
||||
|
||||
// If type_name is set, this need not be set. If both this and type_name
|
||||
// are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP.
|
||||
optional Type type = 5;
|
||||
|
||||
// For message and enum types, this is the name of the type. If the name
|
||||
// starts with a '.', it is fully-qualified. Otherwise, C++-like scoping
|
||||
// rules are used to find the type (i.e. first the nested types within this
|
||||
// message are searched, then within the parent, on up to the root
|
||||
// namespace).
|
||||
optional string type_name = 6;
|
||||
|
||||
// For extensions, this is the name of the type being extended. It is
|
||||
// resolved in the same manner as type_name.
|
||||
optional string extendee = 2;
|
||||
|
||||
// For numeric types, contains the original text representation of the value.
|
||||
// For booleans, "true" or "false".
|
||||
// For strings, contains the default text contents (not escaped in any way).
|
||||
// For bytes, contains the C escaped value. All bytes >= 128 are escaped.
|
||||
// TODO(kenton): Base-64 encode?
|
||||
optional string default_value = 7;
|
||||
|
||||
// If set, gives the index of a oneof in the containing type's oneof_decl
|
||||
// list. This field is a member of that oneof.
|
||||
optional int32 oneof_index = 9;
|
||||
|
||||
// JSON name of this field. The value is set by protocol compiler. If the
|
||||
// user has set a "json_name" option on this field, that option's value
|
||||
// will be used. Otherwise, it's deduced from the field's name by converting
|
||||
// it to camelCase.
|
||||
optional string json_name = 10;
|
||||
|
||||
optional FieldOptions options = 8;
|
||||
}
|
||||
|
||||
// Describes a oneof.
|
||||
message OneofDescriptorProto {
|
||||
optional string name = 1;
|
||||
optional OneofOptions options = 2;
|
||||
}
|
||||
|
||||
// Describes an enum type.
|
||||
message EnumDescriptorProto {
|
||||
optional string name = 1;
|
||||
|
||||
repeated EnumValueDescriptorProto value = 2;
|
||||
|
||||
optional EnumOptions options = 3;
|
||||
|
||||
// Range of reserved numeric values. Reserved values may not be used by
|
||||
// entries in the same enum. Reserved ranges may not overlap.
|
||||
//
|
||||
// Note that this is distinct from DescriptorProto.ReservedRange in that it
|
||||
// is inclusive such that it can appropriately represent the entire int32
|
||||
// domain.
|
||||
message EnumReservedRange {
|
||||
optional int32 start = 1; // Inclusive.
|
||||
optional int32 end = 2; // Inclusive.
|
||||
}
|
||||
|
||||
// Range of reserved numeric values. Reserved numeric values may not be used
|
||||
// by enum values in the same enum declaration. Reserved ranges may not
|
||||
// overlap.
|
||||
repeated EnumReservedRange reserved_range = 4;
|
||||
|
||||
// Reserved enum value names, which may not be reused. A given name may only
|
||||
// be reserved once.
|
||||
repeated string reserved_name = 5;
|
||||
}
|
||||
|
||||
// Describes a value within an enum.
|
||||
message EnumValueDescriptorProto {
|
||||
optional string name = 1;
|
||||
optional int32 number = 2;
|
||||
|
||||
optional EnumValueOptions options = 3;
|
||||
}
|
||||
|
||||
// Describes a service.
|
||||
message ServiceDescriptorProto {
|
||||
optional string name = 1;
|
||||
repeated MethodDescriptorProto method = 2;
|
||||
|
||||
optional ServiceOptions options = 3;
|
||||
}
|
||||
|
||||
// Describes a method of a service.
|
||||
message MethodDescriptorProto {
|
||||
optional string name = 1;
|
||||
|
||||
// Input and output type names. These are resolved in the same way as
|
||||
// FieldDescriptorProto.type_name, but must refer to a message type.
|
||||
optional string input_type = 2;
|
||||
optional string output_type = 3;
|
||||
|
||||
optional MethodOptions options = 4;
|
||||
|
||||
// Identifies if client streams multiple client messages
|
||||
optional bool client_streaming = 5 [default = false];
|
||||
// Identifies if server streams multiple server messages
|
||||
optional bool server_streaming = 6 [default = false];
|
||||
}
|
||||
|
||||
|
||||
// ===================================================================
|
||||
// Options
|
||||
|
||||
// Each of the definitions above may have "options" attached. These are
|
||||
// just annotations which may cause code to be generated slightly differently
|
||||
// or may contain hints for code that manipulates protocol messages.
|
||||
//
|
||||
// Clients may define custom options as extensions of the *Options messages.
|
||||
// These extensions may not yet be known at parsing time, so the parser cannot
|
||||
// store the values in them. Instead it stores them in a field in the *Options
|
||||
// message called uninterpreted_option. This field must have the same name
|
||||
// across all *Options messages. We then use this field to populate the
|
||||
// extensions when we build a descriptor, at which point all protos have been
|
||||
// parsed and so all extensions are known.
|
||||
//
|
||||
// Extension numbers for custom options may be chosen as follows:
|
||||
// * For options which will only be used within a single application or
|
||||
// organization, or for experimental options, use field numbers 50000
|
||||
// through 99999. It is up to you to ensure that you do not use the
|
||||
// same number for multiple options.
|
||||
// * For options which will be published and used publicly by multiple
|
||||
// independent entities, e-mail protobuf-global-extension-registry@google.com
|
||||
// to reserve extension numbers. Simply provide your project name (e.g.
|
||||
// Objective-C plugin) and your project website (if available) -- there's no
|
||||
// need to explain how you intend to use them. Usually you only need one
|
||||
// extension number. You can declare multiple options with only one extension
|
||||
// number by putting them in a sub-message. See the Custom Options section of
|
||||
// the docs for examples:
|
||||
// https://developers.google.com/protocol-buffers/docs/proto#options
|
||||
// If this turns out to be popular, a web service will be set up
|
||||
// to automatically assign option numbers.
|
||||
|
||||
message FileOptions {
|
||||
|
||||
// Sets the Java package where classes generated from this .proto will be
|
||||
// placed. By default, the proto package is used, but this is often
|
||||
// inappropriate because proto packages do not normally start with backwards
|
||||
// domain names.
|
||||
optional string java_package = 1;
|
||||
|
||||
|
||||
// If set, all the classes from the .proto file are wrapped in a single
|
||||
// outer class with the given name. This applies to both Proto1
|
||||
// (equivalent to the old "--one_java_file" option) and Proto2 (where
|
||||
// a .proto always translates to a single class, but you may want to
|
||||
// explicitly choose the class name).
|
||||
optional string java_outer_classname = 8;
|
||||
|
||||
// If set true, then the Java code generator will generate a separate .java
|
||||
// file for each top-level message, enum, and service defined in the .proto
|
||||
// file. Thus, these types will *not* be nested inside the outer class
|
||||
// named by java_outer_classname. However, the outer class will still be
|
||||
// generated to contain the file's getDescriptor() method as well as any
|
||||
// top-level extensions defined in the file.
|
||||
optional bool java_multiple_files = 10 [default = false];
|
||||
|
||||
// This option does nothing.
|
||||
optional bool java_generate_equals_and_hash = 20 [deprecated=true];
|
||||
|
||||
// If set true, then the Java2 code generator will generate code that
|
||||
// throws an exception whenever an attempt is made to assign a non-UTF-8
|
||||
// byte sequence to a string field.
|
||||
// Message reflection will do the same.
|
||||
// However, an extension field still accepts non-UTF-8 byte sequences.
|
||||
// This option has no effect on when used with the lite runtime.
|
||||
optional bool java_string_check_utf8 = 27 [default = false];
|
||||
|
||||
|
||||
// Generated classes can be optimized for speed or code size.
|
||||
enum OptimizeMode {
|
||||
SPEED = 1; // Generate complete code for parsing, serialization,
|
||||
// etc.
|
||||
CODE_SIZE = 2; // Use ReflectionOps to implement these methods.
|
||||
LITE_RUNTIME = 3; // Generate code using MessageLite and the lite runtime.
|
||||
}
|
||||
optional OptimizeMode optimize_for = 9 [default = SPEED];
|
||||
|
||||
// Sets the Go package where structs generated from this .proto will be
|
||||
// placed. If omitted, the Go package will be derived from the following:
|
||||
// - The basename of the package import path, if provided.
|
||||
// - Otherwise, the package statement in the .proto file, if present.
|
||||
// - Otherwise, the basename of the .proto file, without extension.
|
||||
optional string go_package = 11;
|
||||
|
||||
|
||||
|
||||
|
||||
// Should generic services be generated in each language? "Generic" services
|
||||
// are not specific to any particular RPC system. They are generated by the
|
||||
// main code generators in each language (without additional plugins).
|
||||
// Generic services were the only kind of service generation supported by
|
||||
// early versions of google.protobuf.
|
||||
//
|
||||
// Generic services are now considered deprecated in favor of using plugins
|
||||
// that generate code specific to your particular RPC system. Therefore,
|
||||
// these default to false. Old code which depends on generic services should
|
||||
// explicitly set them to true.
|
||||
optional bool cc_generic_services = 16 [default = false];
|
||||
optional bool java_generic_services = 17 [default = false];
|
||||
optional bool py_generic_services = 18 [default = false];
|
||||
optional bool php_generic_services = 42 [default = false];
|
||||
|
||||
// Is this file deprecated?
|
||||
// Depending on the target platform, this can emit Deprecated annotations
|
||||
// for everything in the file, or it will be completely ignored; in the very
|
||||
// least, this is a formalization for deprecating files.
|
||||
optional bool deprecated = 23 [default = false];
|
||||
|
||||
// Enables the use of arenas for the proto messages in this file. This applies
|
||||
// only to generated classes for C++.
|
||||
optional bool cc_enable_arenas = 31 [default = false];
|
||||
|
||||
|
||||
// Sets the objective c class prefix which is prepended to all objective c
|
||||
// generated classes from this .proto. There is no default.
|
||||
optional string objc_class_prefix = 36;
|
||||
|
||||
// Namespace for generated classes; defaults to the package.
|
||||
optional string csharp_namespace = 37;
|
||||
|
||||
// By default Swift generators will take the proto package and CamelCase it
|
||||
// replacing '.' with underscore and use that to prefix the types/symbols
|
||||
// defined. When this options is provided, they will use this value instead
|
||||
// to prefix the types/symbols defined.
|
||||
optional string swift_prefix = 39;
|
||||
|
||||
// Sets the php class prefix which is prepended to all php generated classes
|
||||
// from this .proto. Default is empty.
|
||||
optional string php_class_prefix = 40;
|
||||
|
||||
// Use this option to change the namespace of php generated classes. Default
|
||||
// is empty. When this option is empty, the package name will be used for
|
||||
// determining the namespace.
|
||||
optional string php_namespace = 41;
|
||||
|
||||
// Use this option to change the namespace of php generated metadata classes.
|
||||
// Default is empty. When this option is empty, the proto file name will be
|
||||
// used for determining the namespace.
|
||||
optional string php_metadata_namespace = 44;
|
||||
|
||||
// Use this option to change the package of ruby generated classes. Default
|
||||
// is empty. When this option is not set, the package name will be used for
|
||||
// determining the ruby package.
|
||||
optional string ruby_package = 45;
|
||||
|
||||
|
||||
// The parser stores options it doesn't recognize here.
|
||||
// See the documentation for the "Options" section above.
|
||||
repeated UninterpretedOption uninterpreted_option = 999;
|
||||
|
||||
// Clients can define custom options in extensions of this message.
|
||||
// See the documentation for the "Options" section above.
|
||||
extensions 1000 to max;
|
||||
|
||||
reserved 38;
|
||||
}
|
||||
|
||||
message MessageOptions {
|
||||
// Set true to use the old proto1 MessageSet wire format for extensions.
|
||||
// This is provided for backwards-compatibility with the MessageSet wire
|
||||
// format. You should not use this for any other reason: It's less
|
||||
// efficient, has fewer features, and is more complicated.
|
||||
//
|
||||
// The message must be defined exactly as follows:
|
||||
// message Foo {
|
||||
// option message_set_wire_format = true;
|
||||
// extensions 4 to max;
|
||||
// }
|
||||
// Note that the message cannot have any defined fields; MessageSets only
|
||||
// have extensions.
|
||||
//
|
||||
// All extensions of your type must be singular messages; e.g. they cannot
|
||||
// be int32s, enums, or repeated messages.
|
||||
//
|
||||
// Because this is an option, the above two restrictions are not enforced by
|
||||
// the protocol compiler.
|
||||
optional bool message_set_wire_format = 1 [default = false];
|
||||
|
||||
// Disables the generation of the standard "descriptor()" accessor, which can
|
||||
// conflict with a field of the same name. This is meant to make migration
|
||||
// from proto1 easier; new code should avoid fields named "descriptor".
|
||||
optional bool no_standard_descriptor_accessor = 2 [default = false];
|
||||
|
||||
// Is this message deprecated?
|
||||
// Depending on the target platform, this can emit Deprecated annotations
|
||||
// for the message, or it will be completely ignored; in the very least,
|
||||
// this is a formalization for deprecating messages.
|
||||
optional bool deprecated = 3 [default = false];
|
||||
|
||||
// Whether the message is an automatically generated map entry type for the
|
||||
// maps field.
|
||||
//
|
||||
// For maps fields:
|
||||
// map<KeyType, ValueType> map_field = 1;
|
||||
// The parsed descriptor looks like:
|
||||
// message MapFieldEntry {
|
||||
// option map_entry = true;
|
||||
// optional KeyType key = 1;
|
||||
// optional ValueType value = 2;
|
||||
// }
|
||||
// repeated MapFieldEntry map_field = 1;
|
||||
//
|
||||
// Implementations may choose not to generate the map_entry=true message, but
|
||||
// use a native map in the target language to hold the keys and values.
|
||||
// The reflection APIs in such implementations still need to work as
|
||||
// if the field is a repeated message field.
|
||||
//
|
||||
// NOTE: Do not set the option in .proto files. Always use the maps syntax
|
||||
// instead. The option should only be implicitly set by the proto compiler
|
||||
// parser.
|
||||
optional bool map_entry = 7;
|
||||
|
||||
reserved 8; // javalite_serializable
|
||||
reserved 9; // javanano_as_lite
|
||||
|
||||
|
||||
// The parser stores options it doesn't recognize here. See above.
|
||||
repeated UninterpretedOption uninterpreted_option = 999;
|
||||
|
||||
// Clients can define custom options in extensions of this message. See above.
|
||||
extensions 1000 to max;
|
||||
}
|
||||
|
||||
message FieldOptions {
|
||||
// The ctype option instructs the C++ code generator to use a different
|
||||
// representation of the field than it normally would. See the specific
|
||||
// options below. This option is not yet implemented in the open source
|
||||
// release -- sorry, we'll try to include it in a future version!
|
||||
optional CType ctype = 1 [default = STRING];
|
||||
enum CType {
|
||||
// Default mode.
|
||||
STRING = 0;
|
||||
|
||||
CORD = 1;
|
||||
|
||||
STRING_PIECE = 2;
|
||||
}
|
||||
// The packed option can be enabled for repeated primitive fields to enable
|
||||
// a more efficient representation on the wire. Rather than repeatedly
|
||||
// writing the tag and type for each element, the entire array is encoded as
|
||||
// a single length-delimited blob. In proto3, only explicit setting it to
|
||||
// false will avoid using packed encoding.
|
||||
optional bool packed = 2;
|
||||
|
||||
// The jstype option determines the JavaScript type used for values of the
|
||||
// field. The option is permitted only for 64 bit integral and fixed types
|
||||
// (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING
|
||||
// is represented as JavaScript string, which avoids loss of precision that
|
||||
// can happen when a large value is converted to a floating point JavaScript.
|
||||
// Specifying JS_NUMBER for the jstype causes the generated JavaScript code to
|
||||
// use the JavaScript "number" type. The behavior of the default option
|
||||
// JS_NORMAL is implementation dependent.
|
||||
//
|
||||
// This option is an enum to permit additional types to be added, e.g.
|
||||
// goog.math.Integer.
|
||||
optional JSType jstype = 6 [default = JS_NORMAL];
|
||||
enum JSType {
|
||||
// Use the default type.
|
||||
JS_NORMAL = 0;
|
||||
|
||||
// Use JavaScript strings.
|
||||
JS_STRING = 1;
|
||||
|
||||
// Use JavaScript numbers.
|
||||
JS_NUMBER = 2;
|
||||
}
|
||||
|
||||
// Should this field be parsed lazily? Lazy applies only to message-type
|
||||
// fields. It means that when the outer message is initially parsed, the
|
||||
// inner message's contents will not be parsed but instead stored in encoded
|
||||
// form. The inner message will actually be parsed when it is first accessed.
|
||||
//
|
||||
// This is only a hint. Implementations are free to choose whether to use
|
||||
// eager or lazy parsing regardless of the value of this option. However,
|
||||
// setting this option true suggests that the protocol author believes that
|
||||
// using lazy parsing on this field is worth the additional bookkeeping
|
||||
// overhead typically needed to implement it.
|
||||
//
|
||||
// This option does not affect the public interface of any generated code;
|
||||
// all method signatures remain the same. Furthermore, thread-safety of the
|
||||
// interface is not affected by this option; const methods remain safe to
|
||||
// call from multiple threads concurrently, while non-const methods continue
|
||||
// to require exclusive access.
|
||||
//
|
||||
//
|
||||
// Note that implementations may choose not to check required fields within
|
||||
// a lazy sub-message. That is, calling IsInitialized() on the outer message
|
||||
// may return true even if the inner message has missing required fields.
|
||||
// This is necessary because otherwise the inner message would have to be
|
||||
// parsed in order to perform the check, defeating the purpose of lazy
|
||||
// parsing. An implementation which chooses not to check required fields
|
||||
// must be consistent about it. That is, for any particular sub-message, the
|
||||
// implementation must either *always* check its required fields, or *never*
|
||||
// check its required fields, regardless of whether or not the message has
|
||||
// been parsed.
|
||||
optional bool lazy = 5 [default = false];
|
||||
|
||||
// Is this field deprecated?
|
||||
// Depending on the target platform, this can emit Deprecated annotations
|
||||
// for accessors, or it will be completely ignored; in the very least, this
|
||||
// is a formalization for deprecating fields.
|
||||
optional bool deprecated = 3 [default = false];
|
||||
|
||||
// For Google-internal migration only. Do not use.
|
||||
optional bool weak = 10 [default = false];
|
||||
|
||||
|
||||
// The parser stores options it doesn't recognize here. See above.
|
||||
repeated UninterpretedOption uninterpreted_option = 999;
|
||||
|
||||
// Clients can define custom options in extensions of this message. See above.
|
||||
extensions 1000 to max;
|
||||
|
||||
reserved 4; // removed jtype
|
||||
}
|
||||
|
||||
message OneofOptions {
|
||||
// The parser stores options it doesn't recognize here. See above.
|
||||
repeated UninterpretedOption uninterpreted_option = 999;
|
||||
|
||||
// Clients can define custom options in extensions of this message. See above.
|
||||
extensions 1000 to max;
|
||||
}
|
||||
|
||||
message EnumOptions {
|
||||
|
||||
// Set this option to true to allow mapping different tag names to the same
|
||||
// value.
|
||||
optional bool allow_alias = 2;
|
||||
|
||||
// Is this enum deprecated?
|
||||
// Depending on the target platform, this can emit Deprecated annotations
|
||||
// for the enum, or it will be completely ignored; in the very least, this
|
||||
// is a formalization for deprecating enums.
|
||||
optional bool deprecated = 3 [default = false];
|
||||
|
||||
reserved 5; // javanano_as_lite
|
||||
|
||||
// The parser stores options it doesn't recognize here. See above.
|
||||
repeated UninterpretedOption uninterpreted_option = 999;
|
||||
|
||||
// Clients can define custom options in extensions of this message. See above.
|
||||
extensions 1000 to max;
|
||||
}
|
||||
|
||||
message EnumValueOptions {
|
||||
// Is this enum value deprecated?
|
||||
// Depending on the target platform, this can emit Deprecated annotations
|
||||
// for the enum value, or it will be completely ignored; in the very least,
|
||||
// this is a formalization for deprecating enum values.
|
||||
optional bool deprecated = 1 [default = false];
|
||||
|
||||
// The parser stores options it doesn't recognize here. See above.
|
||||
repeated UninterpretedOption uninterpreted_option = 999;
|
||||
|
||||
// Clients can define custom options in extensions of this message. See above.
|
||||
extensions 1000 to max;
|
||||
}
|
||||
|
||||
message ServiceOptions {
|
||||
|
||||
// Note: Field numbers 1 through 32 are reserved for Google's internal RPC
|
||||
// framework. We apologize for hoarding these numbers to ourselves, but
|
||||
// we were already using them long before we decided to release Protocol
|
||||
// Buffers.
|
||||
|
||||
// Is this service deprecated?
|
||||
// Depending on the target platform, this can emit Deprecated annotations
|
||||
// for the service, or it will be completely ignored; in the very least,
|
||||
// this is a formalization for deprecating services.
|
||||
optional bool deprecated = 33 [default = false];
|
||||
|
||||
// The parser stores options it doesn't recognize here. See above.
|
||||
repeated UninterpretedOption uninterpreted_option = 999;
|
||||
|
||||
// Clients can define custom options in extensions of this message. See above.
|
||||
extensions 1000 to max;
|
||||
}
|
||||
|
||||
message MethodOptions {
|
||||
|
||||
// Note: Field numbers 1 through 32 are reserved for Google's internal RPC
|
||||
// framework. We apologize for hoarding these numbers to ourselves, but
|
||||
// we were already using them long before we decided to release Protocol
|
||||
// Buffers.
|
||||
|
||||
// Is this method deprecated?
|
||||
// Depending on the target platform, this can emit Deprecated annotations
|
||||
// for the method, or it will be completely ignored; in the very least,
|
||||
// this is a formalization for deprecating methods.
|
||||
optional bool deprecated = 33 [default = false];
|
||||
|
||||
// Is this method side-effect-free (or safe in HTTP parlance), or idempotent,
|
||||
// or neither? HTTP based RPC implementation may choose GET verb for safe
|
||||
// methods, and PUT verb for idempotent methods instead of the default POST.
|
||||
enum IdempotencyLevel {
|
||||
IDEMPOTENCY_UNKNOWN = 0;
|
||||
NO_SIDE_EFFECTS = 1; // implies idempotent
|
||||
IDEMPOTENT = 2; // idempotent, but may have side effects
|
||||
}
|
||||
optional IdempotencyLevel idempotency_level = 34
|
||||
[default = IDEMPOTENCY_UNKNOWN];
|
||||
|
||||
// The parser stores options it doesn't recognize here. See above.
|
||||
repeated UninterpretedOption uninterpreted_option = 999;
|
||||
|
||||
// Clients can define custom options in extensions of this message. See above.
|
||||
extensions 1000 to max;
|
||||
}
|
||||
|
||||
|
||||
// A message representing a option the parser does not recognize. This only
|
||||
// appears in options protos created by the compiler::Parser class.
|
||||
// DescriptorPool resolves these when building Descriptor objects. Therefore,
|
||||
// options protos in descriptor objects (e.g. returned by Descriptor::options(),
|
||||
// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions
|
||||
// in them.
|
||||
message UninterpretedOption {
|
||||
// The name of the uninterpreted option. Each string represents a segment in
|
||||
// a dot-separated name. is_extension is true iff a segment represents an
|
||||
// extension (denoted with parentheses in options specs in .proto files).
|
||||
// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents
|
||||
// "foo.(bar.baz).qux".
|
||||
message NamePart {
|
||||
required string name_part = 1;
|
||||
required bool is_extension = 2;
|
||||
}
|
||||
repeated NamePart name = 2;
|
||||
|
||||
// The value of the uninterpreted option, in whatever type the tokenizer
|
||||
// identified it as during parsing. Exactly one of these should be set.
|
||||
optional string identifier_value = 3;
|
||||
optional uint64 positive_int_value = 4;
|
||||
optional int64 negative_int_value = 5;
|
||||
optional double double_value = 6;
|
||||
optional bytes string_value = 7;
|
||||
optional string aggregate_value = 8;
|
||||
}
|
||||
|
||||
// ===================================================================
|
||||
// Optional source code info
|
||||
|
||||
// Encapsulates information about the original source file from which a
|
||||
// FileDescriptorProto was generated.
|
||||
message SourceCodeInfo {
|
||||
// A Location identifies a piece of source code in a .proto file which
|
||||
// corresponds to a particular definition. This information is intended
|
||||
// to be useful to IDEs, code indexers, documentation generators, and similar
|
||||
// tools.
|
||||
//
|
||||
// For example, say we have a file like:
|
||||
// message Foo {
|
||||
// optional string foo = 1;
|
||||
// }
|
||||
// Let's look at just the field definition:
|
||||
// optional string foo = 1;
|
||||
// ^ ^^ ^^ ^ ^^^
|
||||
// a bc de f ghi
|
||||
// We have the following locations:
|
||||
// span path represents
|
||||
// [a,i) [ 4, 0, 2, 0 ] The whole field definition.
|
||||
// [a,b) [ 4, 0, 2, 0, 4 ] The label (optional).
|
||||
// [c,d) [ 4, 0, 2, 0, 5 ] The type (string).
|
||||
// [e,f) [ 4, 0, 2, 0, 1 ] The name (foo).
|
||||
// [g,h) [ 4, 0, 2, 0, 3 ] The number (1).
|
||||
//
|
||||
// Notes:
|
||||
// - A location may refer to a repeated field itself (i.e. not to any
|
||||
// particular index within it). This is used whenever a set of elements are
|
||||
// logically enclosed in a single code segment. For example, an entire
|
||||
// extend block (possibly containing multiple extension definitions) will
|
||||
// have an outer location whose path refers to the "extensions" repeated
|
||||
// field without an index.
|
||||
// - Multiple locations may have the same path. This happens when a single
|
||||
// logical declaration is spread out across multiple places. The most
|
||||
// obvious example is the "extend" block again -- there may be multiple
|
||||
// extend blocks in the same scope, each of which will have the same path.
|
||||
// - A location's span is not always a subset of its parent's span. For
|
||||
// example, the "extendee" of an extension declaration appears at the
|
||||
// beginning of the "extend" block and is shared by all extensions within
|
||||
// the block.
|
||||
// - Just because a location's span is a subset of some other location's span
|
||||
// does not mean that it is a descendant. For example, a "group" defines
|
||||
// both a type and a field in a single declaration. Thus, the locations
|
||||
// corresponding to the type and field and their components will overlap.
|
||||
// - Code which tries to interpret locations should probably be designed to
|
||||
// ignore those that it doesn't understand, as more types of locations could
|
||||
// be recorded in the future.
|
||||
repeated Location location = 1;
|
||||
message Location {
|
||||
// Identifies which part of the FileDescriptorProto was defined at this
|
||||
// location.
|
||||
//
|
||||
// Each element is a field number or an index. They form a path from
|
||||
// the root FileDescriptorProto to the place where the definition. For
|
||||
// example, this path:
|
||||
// [ 4, 3, 2, 7, 1 ]
|
||||
// refers to:
|
||||
// file.message_type(3) // 4, 3
|
||||
// .field(7) // 2, 7
|
||||
// .name() // 1
|
||||
// This is because FileDescriptorProto.message_type has field number 4:
|
||||
// repeated DescriptorProto message_type = 4;
|
||||
// and DescriptorProto.field has field number 2:
|
||||
// repeated FieldDescriptorProto field = 2;
|
||||
// and FieldDescriptorProto.name has field number 1:
|
||||
// optional string name = 1;
|
||||
//
|
||||
// Thus, the above path gives the location of a field name. If we removed
|
||||
// the last element:
|
||||
// [ 4, 3, 2, 7 ]
|
||||
// this path refers to the whole field declaration (from the beginning
|
||||
// of the label to the terminating semicolon).
|
||||
repeated int32 path = 1 [packed = true];
|
||||
|
||||
// Always has exactly three or four elements: start line, start column,
|
||||
// end line (optional, otherwise assumed same as start line), end column.
|
||||
// These are packed into a single field for efficiency. Note that line
|
||||
// and column numbers are zero-based -- typically you will want to add
|
||||
// 1 to each before displaying to a user.
|
||||
repeated int32 span = 2 [packed = true];
|
||||
|
||||
// If this SourceCodeInfo represents a complete declaration, these are any
|
||||
// comments appearing before and after the declaration which appear to be
|
||||
// attached to the declaration.
|
||||
//
|
||||
// A series of line comments appearing on consecutive lines, with no other
|
||||
// tokens appearing on those lines, will be treated as a single comment.
|
||||
//
|
||||
// leading_detached_comments will keep paragraphs of comments that appear
|
||||
// before (but not connected to) the current element. Each paragraph,
|
||||
// separated by empty lines, will be one comment element in the repeated
|
||||
// field.
|
||||
//
|
||||
// Only the comment content is provided; comment markers (e.g. //) are
|
||||
// stripped out. For block comments, leading whitespace and an asterisk
|
||||
// will be stripped from the beginning of each line other than the first.
|
||||
// Newlines are included in the output.
|
||||
//
|
||||
// Examples:
|
||||
//
|
||||
// optional int32 foo = 1; // Comment attached to foo.
|
||||
// // Comment attached to bar.
|
||||
// optional int32 bar = 2;
|
||||
//
|
||||
// optional string baz = 3;
|
||||
// // Comment attached to baz.
|
||||
// // Another line attached to baz.
|
||||
//
|
||||
// // Comment attached to qux.
|
||||
// //
|
||||
// // Another line attached to qux.
|
||||
// optional double qux = 4;
|
||||
//
|
||||
// // Detached comment for corge. This is not leading or trailing comments
|
||||
// // to qux or corge because there are blank lines separating it from
|
||||
// // both.
|
||||
//
|
||||
// // Detached comment for corge paragraph 2.
|
||||
//
|
||||
// optional string corge = 5;
|
||||
// /* Block comment attached
|
||||
// * to corge. Leading asterisks
|
||||
// * will be removed. */
|
||||
// /* Block comment attached to
|
||||
// * grault. */
|
||||
// optional int32 grault = 6;
|
||||
//
|
||||
// // ignored detached comments.
|
||||
optional string leading_comments = 3;
|
||||
optional string trailing_comments = 4;
|
||||
repeated string leading_detached_comments = 6;
|
||||
}
|
||||
}
|
||||
|
||||
// Describes the relationship between generated code and its original source
|
||||
// file. A GeneratedCodeInfo message is associated with only one generated
|
||||
// source file, but may contain references to different source .proto files.
|
||||
message GeneratedCodeInfo {
|
||||
// An Annotation connects some span of text in generated code to an element
|
||||
// of its generating .proto file.
|
||||
repeated Annotation annotation = 1;
|
||||
message Annotation {
|
||||
// Identifies the element in the original source .proto file. This field
|
||||
// is formatted the same as SourceCodeInfo.Location.path.
|
||||
repeated int32 path = 1 [packed = true];
|
||||
|
||||
// Identifies the filesystem path to the original source .proto.
|
||||
optional string source_file = 2;
|
||||
|
||||
// Identifies the starting offset in bytes in the generated code
|
||||
// that relates to the identified object.
|
||||
optional int32 begin = 3;
|
||||
|
||||
// Identifies the ending offset in bytes in the generated code that
|
||||
// relates to the identified offset. The end offset should be one past
|
||||
// the last relevant byte (so the length of the text = end - begin).
|
||||
optional int32 end = 4;
|
||||
}
|
||||
}
|
||||
52
src/agent/protocols/protos/google/protobuf/empty.proto
Normal file
52
src/agent/protocols/protos/google/protobuf/empty.proto
Normal file
@@ -0,0 +1,52 @@
|
||||
// Protocol Buffers - Google's data interchange format
|
||||
// Copyright 2008 Google Inc. All rights reserved.
|
||||
// https://developers.google.com/protocol-buffers/
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.protobuf;
|
||||
|
||||
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
|
||||
option go_package = "types";
|
||||
option java_package = "com.google.protobuf";
|
||||
option java_outer_classname = "EmptyProto";
|
||||
option java_multiple_files = true;
|
||||
option objc_class_prefix = "GPB";
|
||||
option cc_enable_arenas = true;
|
||||
|
||||
// A generic empty message that you can re-use to avoid defining duplicated
|
||||
// empty messages in your APIs. A typical example is to use it as the request
|
||||
// or the response type of an API method. For instance:
|
||||
//
|
||||
// service Foo {
|
||||
// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);
|
||||
// }
|
||||
//
|
||||
// The JSON representation for `Empty` is empty JSON object `{}`.
|
||||
message Empty {}
|
||||
120
src/agent/protocols/protos/google/protobuf/wrappers.proto
Normal file
120
src/agent/protocols/protos/google/protobuf/wrappers.proto
Normal file
@@ -0,0 +1,120 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
// Protocol Buffers - Google's data interchange format
|
||||
// Copyright 2008 Google Inc. All rights reserved.
|
||||
// https://developers.google.com/protocol-buffers/
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Wrappers for primitive (non-message) types. These types are useful
|
||||
// for embedding primitives in the `google.protobuf.Any` type and for places
|
||||
// where we need to distinguish between the absence of a primitive
|
||||
// typed field and its default value.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.protobuf;
|
||||
|
||||
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
|
||||
option cc_enable_arenas = true;
|
||||
option go_package = "github.com/golang/protobuf/ptypes/wrappers";
|
||||
option java_package = "com.google.protobuf";
|
||||
option java_outer_classname = "WrappersProto";
|
||||
option java_multiple_files = true;
|
||||
option objc_class_prefix = "GPB";
|
||||
|
||||
// Wrapper message for `double`.
|
||||
//
|
||||
// The JSON representation for `DoubleValue` is JSON number.
|
||||
message DoubleValue {
|
||||
// The double value.
|
||||
double value = 1;
|
||||
}
|
||||
|
||||
// Wrapper message for `float`.
|
||||
//
|
||||
// The JSON representation for `FloatValue` is JSON number.
|
||||
message FloatValue {
|
||||
// The float value.
|
||||
float value = 1;
|
||||
}
|
||||
|
||||
// Wrapper message for `int64`.
|
||||
//
|
||||
// The JSON representation for `Int64Value` is JSON string.
|
||||
message Int64Value {
|
||||
// The int64 value.
|
||||
int64 value = 1;
|
||||
}
|
||||
|
||||
// Wrapper message for `uint64`.
|
||||
//
|
||||
// The JSON representation for `UInt64Value` is JSON string.
|
||||
message UInt64Value {
|
||||
// The uint64 value.
|
||||
uint64 value = 1;
|
||||
}
|
||||
|
||||
// Wrapper message for `int32`.
|
||||
//
|
||||
// The JSON representation for `Int32Value` is JSON number.
|
||||
message Int32Value {
|
||||
// The int32 value.
|
||||
int32 value = 1;
|
||||
}
|
||||
|
||||
// Wrapper message for `uint32`.
|
||||
//
|
||||
// The JSON representation for `UInt32Value` is JSON number.
|
||||
message UInt32Value {
|
||||
// The uint32 value.
|
||||
uint32 value = 1;
|
||||
}
|
||||
|
||||
// Wrapper message for `bool`.
|
||||
//
|
||||
// The JSON representation for `BoolValue` is JSON `true` and `false`.
|
||||
message BoolValue {
|
||||
// The bool value.
|
||||
bool value = 1;
|
||||
}
|
||||
|
||||
// Wrapper message for `string`.
|
||||
//
|
||||
// The JSON representation for `StringValue` is JSON string.
|
||||
message StringValue {
|
||||
// The string value.
|
||||
string value = 1;
|
||||
}
|
||||
|
||||
// Wrapper message for `bytes`.
|
||||
//
|
||||
// The JSON representation for `BytesValue` is JSON string.
|
||||
message BytesValue {
|
||||
// The bytes value.
|
||||
bytes value = 1;
|
||||
}
|
||||
42
src/agent/protocols/protos/health.proto
Normal file
42
src/agent/protocols/protos/health.proto
Normal file
@@ -0,0 +1,42 @@
|
||||
//
|
||||
// Copyright 2017 HyperHQ Inc.
|
||||
// Copyright (c) 2019 Ant Financial
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
option go_package = "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/agent/protocols/grpc";
|
||||
|
||||
package grpc;
|
||||
|
||||
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
|
||||
|
||||
option (gogoproto.equal_all) = true;
|
||||
option (gogoproto.populate_all) = true;
|
||||
option (gogoproto.testgen_all) = true;
|
||||
option (gogoproto.benchgen_all) = true;
|
||||
|
||||
message CheckRequest {
|
||||
string service = 1;
|
||||
}
|
||||
|
||||
message HealthCheckResponse {
|
||||
enum ServingStatus {
|
||||
UNKNOWN = 0;
|
||||
SERVING = 1;
|
||||
NOT_SERVING = 2;
|
||||
}
|
||||
ServingStatus status = 1;
|
||||
}
|
||||
|
||||
message VersionCheckResponse {
|
||||
string grpc_version = 1;
|
||||
string agent_version = 2;
|
||||
}
|
||||
|
||||
service Health {
|
||||
rpc Check(CheckRequest) returns (HealthCheckResponse);
|
||||
rpc Version(CheckRequest) returns (VersionCheckResponse);
|
||||
}
|
||||
465
src/agent/protocols/protos/oci.proto
Normal file
465
src/agent/protocols/protos/oci.proto
Normal file
@@ -0,0 +1,465 @@
|
||||
//
|
||||
// Copyright (c) 2017 Intel Corporation
|
||||
// Copyright (c) 2019 Ant Financial
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
option go_package = "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/agent/protocols/grpc";
|
||||
|
||||
package grpc;
|
||||
|
||||
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
|
||||
import "google/protobuf/wrappers.proto";
|
||||
|
||||
option (gogoproto.equal_all) = true;
|
||||
option (gogoproto.populate_all) = true;
|
||||
option (gogoproto.testgen_all) = true;
|
||||
option (gogoproto.benchgen_all) = true;
|
||||
|
||||
message Spec {
|
||||
// Version of the Open Container Initiative Runtime Specification with which the bundle complies.
|
||||
string Version = 1;
|
||||
|
||||
// Process configures the container process.
|
||||
Process Process = 2;
|
||||
|
||||
// Root configures the container's root filesystem.
|
||||
Root Root = 3;
|
||||
|
||||
// Hostname configures the container's hostname.
|
||||
string Hostname = 4;
|
||||
|
||||
// Mounts configures additional mounts (on top of Root).
|
||||
repeated Mount Mounts = 5 [(gogoproto.nullable) = false];
|
||||
|
||||
// Hooks configures callbacks for container lifecycle events.
|
||||
Hooks Hooks = 6;
|
||||
|
||||
// Annotations contains arbitrary metadata for the container.
|
||||
map<string, string> Annotations = 7;
|
||||
|
||||
// Linux is platform-specific configuration for Linux based containers.
|
||||
Linux Linux = 8;
|
||||
|
||||
// Solaris is platform-specific configuration for Solaris based containers.
|
||||
Solaris Solaris = 9;
|
||||
// Windows is platform-specific configuration for Windows based containers.
|
||||
Windows Windows = 10;
|
||||
}
|
||||
|
||||
message Process {
|
||||
// Terminal creates an interactive terminal for the container.
|
||||
bool Terminal = 1;
|
||||
|
||||
// ConsoleSize specifies the size of the console.
|
||||
Box ConsoleSize = 2;
|
||||
|
||||
// User specifies user information for the process.
|
||||
User User = 3 [(gogoproto.nullable) = false];
|
||||
|
||||
// Args specifies the binary and arguments for the application to execute.
|
||||
repeated string Args = 4;
|
||||
|
||||
// Env populates the process environment for the process.
|
||||
repeated string Env = 5;
|
||||
|
||||
// Cwd is the current working directory for the process and must be
|
||||
// relative to the container's root.
|
||||
string Cwd = 6;
|
||||
|
||||
// Capabilities are Linux capabilities that are kept for the process.
|
||||
LinuxCapabilities Capabilities = 7;
|
||||
|
||||
// Rlimits specifies rlimit options to apply to the process.
|
||||
repeated POSIXRlimit Rlimits = 8 [(gogoproto.nullable) = false];
|
||||
|
||||
// NoNewPrivileges controls whether additional privileges could be gained by processes in the container.
|
||||
bool NoNewPrivileges = 9;
|
||||
|
||||
// ApparmorProfile specifies the apparmor profile for the container.
|
||||
string ApparmorProfile = 10;
|
||||
|
||||
// Specify an oom_score_adj for the container.
|
||||
int64 OOMScoreAdj = 11;
|
||||
|
||||
// SelinuxLabel specifies the selinux context that the container process is run as.
|
||||
string SelinuxLabel = 12;
|
||||
}
|
||||
|
||||
message Box {
|
||||
// Height is the vertical dimension of a box.
|
||||
uint32 Height = 1;
|
||||
|
||||
// Width is the horizontal dimension of a box.
|
||||
uint32 Width = 2;
|
||||
}
|
||||
|
||||
message User {
|
||||
// UID is the user id.
|
||||
uint32 UID = 1;
|
||||
|
||||
// GID is the group id.
|
||||
uint32 GID = 2;
|
||||
|
||||
// AdditionalGids are additional group ids set for the container's process.
|
||||
repeated uint32 AdditionalGids = 3;
|
||||
|
||||
// Username is the user name.
|
||||
string Username = 4;
|
||||
}
|
||||
|
||||
message LinuxCapabilities {
|
||||
// Bounding is the set of capabilities checked by the kernel.
|
||||
repeated string Bounding = 1;
|
||||
|
||||
// Effective is the set of capabilities checked by the kernel.
|
||||
repeated string Effective = 2;
|
||||
|
||||
// Inheritable is the capabilities preserved across execve.
|
||||
repeated string Inheritable = 3;
|
||||
|
||||
// Permitted is the limiting superset for effective capabilities.
|
||||
repeated string Permitted = 4;
|
||||
|
||||
// Ambient is the ambient set of capabilities that are kept.
|
||||
repeated string Ambient = 5;
|
||||
}
|
||||
|
||||
message POSIXRlimit {
|
||||
// Type of the rlimit to set
|
||||
string Type = 1;
|
||||
|
||||
// Hard is the hard limit for the specified type
|
||||
uint64 Hard = 2;
|
||||
|
||||
// Soft is the soft limit for the specified type
|
||||
uint64 Soft = 3;
|
||||
}
|
||||
|
||||
message Mount {
|
||||
// destination is the path inside the container expect when it starts with "tmp:/"
|
||||
string destination = 1;
|
||||
|
||||
// source is the path inside the container expect when it starts with "vm:/dev/" or "tmp:/"
|
||||
// the path which starts with "vm:/dev/" refers the guest vm's "/dev",
|
||||
// especially, "vm:/dev/hostfs/" refers to the shared filesystem.
|
||||
// "tmp:/" is a temporary directory which is used for temporary mounts.
|
||||
string source = 2;
|
||||
string type = 3;
|
||||
repeated string options = 4;
|
||||
}
|
||||
|
||||
message Root {
|
||||
// Path is the absolute path to the container's root filesystem.
|
||||
string Path = 1;
|
||||
|
||||
// Readonly makes the root filesystem for the container readonly before the process is executed.
|
||||
bool Readonly = 2;
|
||||
}
|
||||
|
||||
message Hooks {
|
||||
// Prestart is a list of hooks to be run before the container process is executed.
|
||||
repeated Hook Prestart = 1 [(gogoproto.nullable) = false];
|
||||
|
||||
// Poststart is a list of hooks to be run after the container process is started.
|
||||
repeated Hook Poststart = 2 [(gogoproto.nullable) = false];
|
||||
|
||||
// Poststop is a list of hooks to be run after the container process exits.
|
||||
repeated Hook Poststop = 3 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
message Hook {
|
||||
string Path = 1;
|
||||
repeated string Args = 2;
|
||||
repeated string Env = 3;
|
||||
int64 Timeout = 4;
|
||||
}
|
||||
|
||||
message Linux {
|
||||
// UIDMapping specifies user mappings for supporting user namespaces.
|
||||
repeated LinuxIDMapping UIDMappings = 1 [(gogoproto.nullable) = false];
|
||||
|
||||
// GIDMapping specifies group mappings for supporting user namespaces.
|
||||
repeated LinuxIDMapping GIDMappings = 2 [(gogoproto.nullable) = false];
|
||||
|
||||
// Sysctl are a set of key value pairs that are set for the container on start
|
||||
map<string, string> Sysctl = 3;
|
||||
|
||||
// Resources contain cgroup information for handling resource constraints
|
||||
// for the container
|
||||
LinuxResources Resources = 4;
|
||||
|
||||
// CgroupsPath specifies the path to cgroups that are created and/or joined by the container.
|
||||
// The path is expected to be relative to the cgroups mountpoint.
|
||||
// If resources are specified, the cgroups at CgroupsPath will be updated based on resources.
|
||||
string CgroupsPath = 5;
|
||||
|
||||
// Namespaces contains the namespaces that are created and/or joined by the container
|
||||
repeated LinuxNamespace Namespaces = 6 [(gogoproto.nullable) = false];
|
||||
|
||||
// Devices are a list of device nodes that are created for the container
|
||||
repeated LinuxDevice Devices = 7 [(gogoproto.nullable) = false];
|
||||
|
||||
// Seccomp specifies the seccomp security settings for the container.
|
||||
LinuxSeccomp Seccomp = 8;
|
||||
|
||||
// RootfsPropagation is the rootfs mount propagation mode for the container.
|
||||
string RootfsPropagation = 9;
|
||||
|
||||
// MaskedPaths masks over the provided paths inside the container.
|
||||
repeated string MaskedPaths = 10;
|
||||
|
||||
// ReadonlyPaths sets the provided paths as RO inside the container.
|
||||
repeated string ReadonlyPaths = 11;
|
||||
|
||||
// MountLabel specifies the selinux context for the mounts in the container.
|
||||
string MountLabel = 12;
|
||||
|
||||
// IntelRdt contains Intel Resource Director Technology (RDT) information
|
||||
// for handling resource constraints (e.g., L3 cache) for the container
|
||||
LinuxIntelRdt IntelRdt = 13;
|
||||
}
|
||||
|
||||
message Windows {
|
||||
// Dummy string, never used.
|
||||
string dummy = 1;
|
||||
}
|
||||
|
||||
message Solaris {
|
||||
// Dummy string, never used.
|
||||
string dummy = 1;
|
||||
}
|
||||
|
||||
message LinuxIDMapping {
|
||||
// HostID is the starting UID/GID on the host to be mapped to 'ContainerID'
|
||||
uint32 HostID = 1;
|
||||
|
||||
// ContainerID is the starting UID/GID in the container
|
||||
uint32 ContainerID = 2;
|
||||
|
||||
// Size is the number of IDs to be mapped
|
||||
uint32 Size = 3;
|
||||
}
|
||||
|
||||
message LinuxNamespace {
|
||||
// Type is the type of namespace
|
||||
string Type = 1;
|
||||
|
||||
// Path is a path to an existing namespace persisted on disk that can be joined
|
||||
// and is of the same type
|
||||
string Path = 2;
|
||||
}
|
||||
|
||||
message LinuxDevice {
|
||||
// Path to the device.
|
||||
string Path = 1;
|
||||
|
||||
// Device type, block, char, etc.
|
||||
string Type = 2;
|
||||
|
||||
// Major is the device's major number.
|
||||
int64 Major = 3;
|
||||
|
||||
// Minor is the device's minor number.
|
||||
int64 Minor = 4;
|
||||
|
||||
// FileMode permission bits for the device.
|
||||
uint32 FileMode = 5;
|
||||
|
||||
// UID of the device.
|
||||
uint32 UID = 6;
|
||||
|
||||
// Gid of the device.
|
||||
uint32 GID = 7;
|
||||
}
|
||||
|
||||
message LinuxResources {
|
||||
// Devices configures the device whitelist.
|
||||
repeated LinuxDeviceCgroup Devices = 1 [(gogoproto.nullable) = false];
|
||||
|
||||
// Memory restriction configuration
|
||||
LinuxMemory Memory = 2;
|
||||
|
||||
// CPU resource restriction configuration
|
||||
LinuxCPU CPU = 3;
|
||||
|
||||
// Task resource restriction configuration.
|
||||
LinuxPids Pids = 4;
|
||||
|
||||
// BlockIO restriction configuration
|
||||
LinuxBlockIO BlockIO = 5;
|
||||
|
||||
// Hugetlb limit (in bytes)
|
||||
repeated LinuxHugepageLimit HugepageLimits = 6 [(gogoproto.nullable) = false];
|
||||
|
||||
// Network restriction configuration
|
||||
LinuxNetwork Network = 7;
|
||||
}
|
||||
|
||||
message LinuxMemory {
|
||||
// Memory limit (in bytes).
|
||||
int64 Limit = 1;
|
||||
|
||||
// Memory reservation or soft_limit (in bytes).
|
||||
int64 Reservation = 2;
|
||||
|
||||
// Total memory limit (memory + swap).
|
||||
int64 Swap = 3;
|
||||
|
||||
// Kernel memory limit (in bytes).
|
||||
int64 Kernel = 4;
|
||||
|
||||
// Kernel memory limit for tcp (in bytes)
|
||||
int64 KernelTCP = 5;
|
||||
|
||||
// How aggressive the kernel will swap memory pages.
|
||||
uint64 Swappiness = 6;
|
||||
|
||||
// DisableOOMKiller disables the OOM killer for out of memory conditions
|
||||
bool DisableOOMKiller = 7;
|
||||
}
|
||||
|
||||
message LinuxCPU {
|
||||
// CPU shares (relative weight (ratio) vs. other cgroups with cpu shares).
|
||||
uint64 Shares = 1;
|
||||
|
||||
// CPU hardcap limit (in usecs). Allowed cpu time in a given period.
|
||||
int64 Quota = 2;
|
||||
|
||||
// CPU period to be used for hardcapping (in usecs).
|
||||
uint64 Period = 3;
|
||||
|
||||
// How much time realtime scheduling may use (in usecs).
|
||||
int64 RealtimeRuntime = 4;
|
||||
|
||||
// CPU period to be used for realtime scheduling (in usecs).
|
||||
uint64 RealtimePeriod = 5;
|
||||
|
||||
// CPUs to use within the cpuset. Default is to use any CPU available.
|
||||
string Cpus = 6;
|
||||
|
||||
// List of memory nodes in the cpuset. Default is to use any available memory node.
|
||||
string Mems = 7;
|
||||
}
|
||||
|
||||
message LinuxWeightDevice {
|
||||
// Major is the device's major number.
|
||||
int64 Major = 1;
|
||||
|
||||
// Minor is the device's minor number.
|
||||
int64 Minor = 2;
|
||||
|
||||
// Weight is the bandwidth rate for the device.
|
||||
uint32 Weight = 3;
|
||||
|
||||
// LeafWeight is the bandwidth rate for the device while competing with the cgroup's child cgroups, CFQ scheduler only
|
||||
uint32 LeafWeight = 4;
|
||||
}
|
||||
|
||||
message LinuxThrottleDevice {
|
||||
// Major is the device's major number.
|
||||
int64 Major = 1;
|
||||
|
||||
// Minor is the device's minor number.
|
||||
int64 Minor = 2;
|
||||
|
||||
// Rate is the IO rate limit per cgroup per device
|
||||
uint64 Rate = 3;
|
||||
}
|
||||
|
||||
message LinuxBlockIO {
|
||||
// Specifies per cgroup weight
|
||||
uint32 Weight = 1;
|
||||
|
||||
// Specifies tasks' weight in the given cgroup while competing with the cgroup's child cgroups, CFQ scheduler only
|
||||
uint32 LeafWeight = 2;
|
||||
|
||||
// Weight per cgroup per device, can override BlkioWeight
|
||||
repeated LinuxWeightDevice WeightDevice = 3 [(gogoproto.nullable) = false];
|
||||
|
||||
// IO read rate limit per cgroup per device, bytes per second
|
||||
repeated LinuxThrottleDevice ThrottleReadBpsDevice = 4 [(gogoproto.nullable) = false];
|
||||
|
||||
// IO write rate limit per cgroup per device, bytes per second
|
||||
repeated LinuxThrottleDevice ThrottleWriteBpsDevice = 5 [(gogoproto.nullable) = false];
|
||||
|
||||
// IO read rate limit per cgroup per device, IO per second
|
||||
repeated LinuxThrottleDevice ThrottleReadIOPSDevice = 6 [(gogoproto.nullable) = false];
|
||||
|
||||
// IO write rate limit per cgroup per device, IO per second
|
||||
repeated LinuxThrottleDevice ThrottleWriteIOPSDevice = 7 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
message LinuxPids {
|
||||
// Maximum number of PIDs. Default is "no limit".
|
||||
int64 Limit = 1;
|
||||
}
|
||||
|
||||
message LinuxDeviceCgroup {
|
||||
// Allow or deny
|
||||
bool Allow = 1;
|
||||
|
||||
// Device type, block, char, etc.
|
||||
string Type = 2;
|
||||
|
||||
// Major is the device's major number.
|
||||
int64 Major = 3;
|
||||
|
||||
// Minor is the device's minor number.
|
||||
int64 Minor = 4;
|
||||
|
||||
// Cgroup access permissions format, rwm.
|
||||
string Access = 5;
|
||||
}
|
||||
|
||||
message LinuxNetwork {
|
||||
// Set class identifier for container's network packets
|
||||
uint32 ClassID = 1;
|
||||
|
||||
// Set priority of network traffic for container
|
||||
repeated LinuxInterfacePriority Priorities = 2 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
message LinuxHugepageLimit {
|
||||
// Pagesize is the hugepage size
|
||||
string Pagesize = 1;
|
||||
|
||||
// Limit is the limit of "hugepagesize" hugetlb usage
|
||||
uint64 Limit = 2;
|
||||
}
|
||||
|
||||
message LinuxInterfacePriority {
|
||||
// Name is the name of the network interface
|
||||
string Name = 1;
|
||||
|
||||
// Priority for the interface
|
||||
uint32 Priority = 2;
|
||||
}
|
||||
|
||||
message LinuxSeccomp {
|
||||
string DefaultAction = 1;
|
||||
repeated string Architectures = 2;
|
||||
repeated LinuxSyscall Syscalls = 3 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
message LinuxSeccompArg {
|
||||
uint64 Index = 1;
|
||||
uint64 Value = 2;
|
||||
uint64 ValueTwo = 3;
|
||||
string Op = 4;
|
||||
}
|
||||
|
||||
message LinuxSyscall {
|
||||
repeated string Names = 1;
|
||||
string Action = 2;
|
||||
repeated LinuxSeccompArg Args = 3 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
message LinuxIntelRdt {
|
||||
// The schema for L3 cache id and capacity bitmask (CBM)
|
||||
// Format: "L3:<cache_id0>=<cbm0>;<cache_id1>=<cbm1>;..."
|
||||
string L3CacheSchema = 1;
|
||||
}
|
||||
14012
src/agent/protocols/src/agent.rs
Normal file
14012
src/agent/protocols/src/agent.rs
Normal file
File diff suppressed because it is too large
Load Diff
761
src/agent/protocols/src/agent_ttrpc.rs
Normal file
761
src/agent/protocols/src/agent_ttrpc.rs
Normal file
@@ -0,0 +1,761 @@
|
||||
// Copyright (c) 2020 Ant Financial
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
// This file is generated by ttrpc-compiler 0.2.0. Do not edit
|
||||
// @generated
|
||||
|
||||
// https://github.com/Manishearth/rust-clippy/issues/702
|
||||
#![allow(unknown_lints)]
|
||||
#![allow(clipto_camel_casepy)]
|
||||
|
||||
#![cfg_attr(rustfmt, rustfmt_skip)]
|
||||
|
||||
#![allow(box_pointers)]
|
||||
#![allow(dead_code)]
|
||||
#![allow(missing_docs)]
|
||||
#![allow(non_camel_case_types)]
|
||||
#![allow(non_snake_case)]
|
||||
#![allow(non_upper_case_globals)]
|
||||
#![allow(trivial_casts)]
|
||||
#![allow(unsafe_code)]
|
||||
#![allow(unused_imports)]
|
||||
#![allow(unused_results)]
|
||||
use protobuf::{CodedInputStream, CodedOutputStream, Message};
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct AgentServiceClient {
|
||||
client: ::ttrpc::Client,
|
||||
}
|
||||
|
||||
impl AgentServiceClient {
|
||||
pub fn new(client: ::ttrpc::Client) -> Self {
|
||||
AgentServiceClient {
|
||||
client: client,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn create_container(&self, req: &super::agent::CreateContainerRequest, timeout_nano: i64) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
let mut cres = super::empty::Empty::new();
|
||||
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "CreateContainer", cres);
|
||||
Ok(cres)
|
||||
}
|
||||
|
||||
pub fn start_container(&self, req: &super::agent::StartContainerRequest, timeout_nano: i64) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
let mut cres = super::empty::Empty::new();
|
||||
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "StartContainer", cres);
|
||||
Ok(cres)
|
||||
}
|
||||
|
||||
pub fn remove_container(&self, req: &super::agent::RemoveContainerRequest, timeout_nano: i64) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
let mut cres = super::empty::Empty::new();
|
||||
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "RemoveContainer", cres);
|
||||
Ok(cres)
|
||||
}
|
||||
|
||||
pub fn exec_process(&self, req: &super::agent::ExecProcessRequest, timeout_nano: i64) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
let mut cres = super::empty::Empty::new();
|
||||
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "ExecProcess", cres);
|
||||
Ok(cres)
|
||||
}
|
||||
|
||||
pub fn signal_process(&self, req: &super::agent::SignalProcessRequest, timeout_nano: i64) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
let mut cres = super::empty::Empty::new();
|
||||
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "SignalProcess", cres);
|
||||
Ok(cres)
|
||||
}
|
||||
|
||||
pub fn wait_process(&self, req: &super::agent::WaitProcessRequest, timeout_nano: i64) -> ::ttrpc::Result<super::agent::WaitProcessResponse> {
|
||||
let mut cres = super::agent::WaitProcessResponse::new();
|
||||
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "WaitProcess", cres);
|
||||
Ok(cres)
|
||||
}
|
||||
|
||||
pub fn list_processes(&self, req: &super::agent::ListProcessesRequest, timeout_nano: i64) -> ::ttrpc::Result<super::agent::ListProcessesResponse> {
|
||||
let mut cres = super::agent::ListProcessesResponse::new();
|
||||
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "ListProcesses", cres);
|
||||
Ok(cres)
|
||||
}
|
||||
|
||||
pub fn update_container(&self, req: &super::agent::UpdateContainerRequest, timeout_nano: i64) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
let mut cres = super::empty::Empty::new();
|
||||
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "UpdateContainer", cres);
|
||||
Ok(cres)
|
||||
}
|
||||
|
||||
pub fn stats_container(&self, req: &super::agent::StatsContainerRequest, timeout_nano: i64) -> ::ttrpc::Result<super::agent::StatsContainerResponse> {
|
||||
let mut cres = super::agent::StatsContainerResponse::new();
|
||||
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "StatsContainer", cres);
|
||||
Ok(cres)
|
||||
}
|
||||
|
||||
pub fn pause_container(&self, req: &super::agent::PauseContainerRequest, timeout_nano: i64) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
let mut cres = super::empty::Empty::new();
|
||||
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "PauseContainer", cres);
|
||||
Ok(cres)
|
||||
}
|
||||
|
||||
pub fn resume_container(&self, req: &super::agent::ResumeContainerRequest, timeout_nano: i64) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
let mut cres = super::empty::Empty::new();
|
||||
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "ResumeContainer", cres);
|
||||
Ok(cres)
|
||||
}
|
||||
|
||||
pub fn write_stdin(&self, req: &super::agent::WriteStreamRequest, timeout_nano: i64) -> ::ttrpc::Result<super::agent::WriteStreamResponse> {
|
||||
let mut cres = super::agent::WriteStreamResponse::new();
|
||||
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "WriteStdin", cres);
|
||||
Ok(cres)
|
||||
}
|
||||
|
||||
pub fn read_stdout(&self, req: &super::agent::ReadStreamRequest, timeout_nano: i64) -> ::ttrpc::Result<super::agent::ReadStreamResponse> {
|
||||
let mut cres = super::agent::ReadStreamResponse::new();
|
||||
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "ReadStdout", cres);
|
||||
Ok(cres)
|
||||
}
|
||||
|
||||
pub fn read_stderr(&self, req: &super::agent::ReadStreamRequest, timeout_nano: i64) -> ::ttrpc::Result<super::agent::ReadStreamResponse> {
|
||||
let mut cres = super::agent::ReadStreamResponse::new();
|
||||
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "ReadStderr", cres);
|
||||
Ok(cres)
|
||||
}
|
||||
|
||||
pub fn close_stdin(&self, req: &super::agent::CloseStdinRequest, timeout_nano: i64) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
let mut cres = super::empty::Empty::new();
|
||||
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "CloseStdin", cres);
|
||||
Ok(cres)
|
||||
}
|
||||
|
||||
pub fn tty_win_resize(&self, req: &super::agent::TtyWinResizeRequest, timeout_nano: i64) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
let mut cres = super::empty::Empty::new();
|
||||
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "TtyWinResize", cres);
|
||||
Ok(cres)
|
||||
}
|
||||
|
||||
pub fn update_interface(&self, req: &super::agent::UpdateInterfaceRequest, timeout_nano: i64) -> ::ttrpc::Result<super::types::Interface> {
|
||||
let mut cres = super::types::Interface::new();
|
||||
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "UpdateInterface", cres);
|
||||
Ok(cres)
|
||||
}
|
||||
|
||||
pub fn update_routes(&self, req: &super::agent::UpdateRoutesRequest, timeout_nano: i64) -> ::ttrpc::Result<super::agent::Routes> {
|
||||
let mut cres = super::agent::Routes::new();
|
||||
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "UpdateRoutes", cres);
|
||||
Ok(cres)
|
||||
}
|
||||
|
||||
pub fn list_interfaces(&self, req: &super::agent::ListInterfacesRequest, timeout_nano: i64) -> ::ttrpc::Result<super::agent::Interfaces> {
|
||||
let mut cres = super::agent::Interfaces::new();
|
||||
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "ListInterfaces", cres);
|
||||
Ok(cres)
|
||||
}
|
||||
|
||||
pub fn list_routes(&self, req: &super::agent::ListRoutesRequest, timeout_nano: i64) -> ::ttrpc::Result<super::agent::Routes> {
|
||||
let mut cres = super::agent::Routes::new();
|
||||
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "ListRoutes", cres);
|
||||
Ok(cres)
|
||||
}
|
||||
|
||||
pub fn add_arp_neighbors(&self, req: &super::agent::AddARPNeighborsRequest, timeout_nano: i64) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
let mut cres = super::empty::Empty::new();
|
||||
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "AddARPNeighbors", cres);
|
||||
Ok(cres)
|
||||
}
|
||||
|
||||
pub fn start_tracing(&self, req: &super::agent::StartTracingRequest, timeout_nano: i64) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
let mut cres = super::empty::Empty::new();
|
||||
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "StartTracing", cres);
|
||||
Ok(cres)
|
||||
}
|
||||
|
||||
pub fn stop_tracing(&self, req: &super::agent::StopTracingRequest, timeout_nano: i64) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
let mut cres = super::empty::Empty::new();
|
||||
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "StopTracing", cres);
|
||||
Ok(cres)
|
||||
}
|
||||
|
||||
pub fn create_sandbox(&self, req: &super::agent::CreateSandboxRequest, timeout_nano: i64) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
let mut cres = super::empty::Empty::new();
|
||||
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "CreateSandbox", cres);
|
||||
Ok(cres)
|
||||
}
|
||||
|
||||
pub fn destroy_sandbox(&self, req: &super::agent::DestroySandboxRequest, timeout_nano: i64) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
let mut cres = super::empty::Empty::new();
|
||||
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "DestroySandbox", cres);
|
||||
Ok(cres)
|
||||
}
|
||||
|
||||
pub fn online_cpu_mem(&self, req: &super::agent::OnlineCPUMemRequest, timeout_nano: i64) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
let mut cres = super::empty::Empty::new();
|
||||
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "OnlineCPUMem", cres);
|
||||
Ok(cres)
|
||||
}
|
||||
|
||||
pub fn reseed_random_dev(&self, req: &super::agent::ReseedRandomDevRequest, timeout_nano: i64) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
let mut cres = super::empty::Empty::new();
|
||||
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "ReseedRandomDev", cres);
|
||||
Ok(cres)
|
||||
}
|
||||
|
||||
pub fn get_guest_details(&self, req: &super::agent::GuestDetailsRequest, timeout_nano: i64) -> ::ttrpc::Result<super::agent::GuestDetailsResponse> {
|
||||
let mut cres = super::agent::GuestDetailsResponse::new();
|
||||
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "GetGuestDetails", cres);
|
||||
Ok(cres)
|
||||
}
|
||||
|
||||
pub fn mem_hotplug_by_probe(&self, req: &super::agent::MemHotplugByProbeRequest, timeout_nano: i64) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
let mut cres = super::empty::Empty::new();
|
||||
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "MemHotplugByProbe", cres);
|
||||
Ok(cres)
|
||||
}
|
||||
|
||||
pub fn set_guest_date_time(&self, req: &super::agent::SetGuestDateTimeRequest, timeout_nano: i64) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
let mut cres = super::empty::Empty::new();
|
||||
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "SetGuestDateTime", cres);
|
||||
Ok(cres)
|
||||
}
|
||||
|
||||
pub fn copy_file(&self, req: &super::agent::CopyFileRequest, timeout_nano: i64) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
let mut cres = super::empty::Empty::new();
|
||||
::ttrpc::client_request!(self, req, timeout_nano, "grpc.AgentService", "CopyFile", cres);
|
||||
Ok(cres)
|
||||
}
|
||||
}
|
||||
|
||||
struct CreateContainerMethod {
|
||||
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl ::ttrpc::MethodHandler for CreateContainerMethod {
|
||||
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
|
||||
::ttrpc::request_handler!(self, ctx, req, agent, CreateContainerRequest, create_container);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct StartContainerMethod {
|
||||
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl ::ttrpc::MethodHandler for StartContainerMethod {
|
||||
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
|
||||
::ttrpc::request_handler!(self, ctx, req, agent, StartContainerRequest, start_container);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct RemoveContainerMethod {
|
||||
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl ::ttrpc::MethodHandler for RemoveContainerMethod {
|
||||
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
|
||||
::ttrpc::request_handler!(self, ctx, req, agent, RemoveContainerRequest, remove_container);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct ExecProcessMethod {
|
||||
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl ::ttrpc::MethodHandler for ExecProcessMethod {
|
||||
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
|
||||
::ttrpc::request_handler!(self, ctx, req, agent, ExecProcessRequest, exec_process);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct SignalProcessMethod {
|
||||
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl ::ttrpc::MethodHandler for SignalProcessMethod {
|
||||
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
|
||||
::ttrpc::request_handler!(self, ctx, req, agent, SignalProcessRequest, signal_process);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct WaitProcessMethod {
|
||||
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl ::ttrpc::MethodHandler for WaitProcessMethod {
|
||||
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
|
||||
::ttrpc::request_handler!(self, ctx, req, agent, WaitProcessRequest, wait_process);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct ListProcessesMethod {
|
||||
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl ::ttrpc::MethodHandler for ListProcessesMethod {
|
||||
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
|
||||
::ttrpc::request_handler!(self, ctx, req, agent, ListProcessesRequest, list_processes);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct UpdateContainerMethod {
|
||||
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl ::ttrpc::MethodHandler for UpdateContainerMethod {
|
||||
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
|
||||
::ttrpc::request_handler!(self, ctx, req, agent, UpdateContainerRequest, update_container);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct StatsContainerMethod {
|
||||
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl ::ttrpc::MethodHandler for StatsContainerMethod {
|
||||
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
|
||||
::ttrpc::request_handler!(self, ctx, req, agent, StatsContainerRequest, stats_container);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct PauseContainerMethod {
|
||||
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl ::ttrpc::MethodHandler for PauseContainerMethod {
|
||||
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
|
||||
::ttrpc::request_handler!(self, ctx, req, agent, PauseContainerRequest, pause_container);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct ResumeContainerMethod {
|
||||
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl ::ttrpc::MethodHandler for ResumeContainerMethod {
|
||||
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
|
||||
::ttrpc::request_handler!(self, ctx, req, agent, ResumeContainerRequest, resume_container);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct WriteStdinMethod {
|
||||
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl ::ttrpc::MethodHandler for WriteStdinMethod {
|
||||
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
|
||||
::ttrpc::request_handler!(self, ctx, req, agent, WriteStreamRequest, write_stdin);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct ReadStdoutMethod {
|
||||
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl ::ttrpc::MethodHandler for ReadStdoutMethod {
|
||||
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
|
||||
::ttrpc::request_handler!(self, ctx, req, agent, ReadStreamRequest, read_stdout);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct ReadStderrMethod {
|
||||
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl ::ttrpc::MethodHandler for ReadStderrMethod {
|
||||
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
|
||||
::ttrpc::request_handler!(self, ctx, req, agent, ReadStreamRequest, read_stderr);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct CloseStdinMethod {
|
||||
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl ::ttrpc::MethodHandler for CloseStdinMethod {
|
||||
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
|
||||
::ttrpc::request_handler!(self, ctx, req, agent, CloseStdinRequest, close_stdin);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct TtyWinResizeMethod {
|
||||
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl ::ttrpc::MethodHandler for TtyWinResizeMethod {
|
||||
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
|
||||
::ttrpc::request_handler!(self, ctx, req, agent, TtyWinResizeRequest, tty_win_resize);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct UpdateInterfaceMethod {
|
||||
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl ::ttrpc::MethodHandler for UpdateInterfaceMethod {
|
||||
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
|
||||
::ttrpc::request_handler!(self, ctx, req, agent, UpdateInterfaceRequest, update_interface);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct UpdateRoutesMethod {
|
||||
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl ::ttrpc::MethodHandler for UpdateRoutesMethod {
|
||||
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
|
||||
::ttrpc::request_handler!(self, ctx, req, agent, UpdateRoutesRequest, update_routes);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct ListInterfacesMethod {
|
||||
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl ::ttrpc::MethodHandler for ListInterfacesMethod {
|
||||
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
|
||||
::ttrpc::request_handler!(self, ctx, req, agent, ListInterfacesRequest, list_interfaces);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct ListRoutesMethod {
|
||||
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl ::ttrpc::MethodHandler for ListRoutesMethod {
|
||||
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
|
||||
::ttrpc::request_handler!(self, ctx, req, agent, ListRoutesRequest, list_routes);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct AddArpNeighborsMethod {
|
||||
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl ::ttrpc::MethodHandler for AddArpNeighborsMethod {
|
||||
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
|
||||
::ttrpc::request_handler!(self, ctx, req, agent, AddARPNeighborsRequest, add_arp_neighbors);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct StartTracingMethod {
|
||||
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl ::ttrpc::MethodHandler for StartTracingMethod {
|
||||
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
|
||||
::ttrpc::request_handler!(self, ctx, req, agent, StartTracingRequest, start_tracing);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct StopTracingMethod {
|
||||
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl ::ttrpc::MethodHandler for StopTracingMethod {
|
||||
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
|
||||
::ttrpc::request_handler!(self, ctx, req, agent, StopTracingRequest, stop_tracing);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct CreateSandboxMethod {
|
||||
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl ::ttrpc::MethodHandler for CreateSandboxMethod {
|
||||
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
|
||||
::ttrpc::request_handler!(self, ctx, req, agent, CreateSandboxRequest, create_sandbox);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct DestroySandboxMethod {
|
||||
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl ::ttrpc::MethodHandler for DestroySandboxMethod {
|
||||
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
|
||||
::ttrpc::request_handler!(self, ctx, req, agent, DestroySandboxRequest, destroy_sandbox);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct OnlineCpuMemMethod {
|
||||
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl ::ttrpc::MethodHandler for OnlineCpuMemMethod {
|
||||
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
|
||||
::ttrpc::request_handler!(self, ctx, req, agent, OnlineCPUMemRequest, online_cpu_mem);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct ReseedRandomDevMethod {
|
||||
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl ::ttrpc::MethodHandler for ReseedRandomDevMethod {
|
||||
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
|
||||
::ttrpc::request_handler!(self, ctx, req, agent, ReseedRandomDevRequest, reseed_random_dev);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct GetGuestDetailsMethod {
|
||||
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl ::ttrpc::MethodHandler for GetGuestDetailsMethod {
|
||||
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
|
||||
::ttrpc::request_handler!(self, ctx, req, agent, GuestDetailsRequest, get_guest_details);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct MemHotplugByProbeMethod {
|
||||
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl ::ttrpc::MethodHandler for MemHotplugByProbeMethod {
|
||||
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
|
||||
::ttrpc::request_handler!(self, ctx, req, agent, MemHotplugByProbeRequest, mem_hotplug_by_probe);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct SetGuestDateTimeMethod {
|
||||
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl ::ttrpc::MethodHandler for SetGuestDateTimeMethod {
|
||||
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
|
||||
::ttrpc::request_handler!(self, ctx, req, agent, SetGuestDateTimeRequest, set_guest_date_time);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct CopyFileMethod {
|
||||
service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl ::ttrpc::MethodHandler for CopyFileMethod {
|
||||
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
|
||||
::ttrpc::request_handler!(self, ctx, req, agent, CopyFileRequest, copy_file);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub trait AgentService {
|
||||
fn create_container(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::CreateContainerRequest) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/CreateContainer is not supported".to_string())))
|
||||
}
|
||||
fn start_container(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::StartContainerRequest) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/StartContainer is not supported".to_string())))
|
||||
}
|
||||
fn remove_container(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::RemoveContainerRequest) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/RemoveContainer is not supported".to_string())))
|
||||
}
|
||||
fn exec_process(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::ExecProcessRequest) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/ExecProcess is not supported".to_string())))
|
||||
}
|
||||
fn signal_process(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::SignalProcessRequest) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/SignalProcess is not supported".to_string())))
|
||||
}
|
||||
fn wait_process(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::WaitProcessRequest) -> ::ttrpc::Result<super::agent::WaitProcessResponse> {
|
||||
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/WaitProcess is not supported".to_string())))
|
||||
}
|
||||
fn list_processes(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::ListProcessesRequest) -> ::ttrpc::Result<super::agent::ListProcessesResponse> {
|
||||
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/ListProcesses is not supported".to_string())))
|
||||
}
|
||||
fn update_container(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::UpdateContainerRequest) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/UpdateContainer is not supported".to_string())))
|
||||
}
|
||||
fn stats_container(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::StatsContainerRequest) -> ::ttrpc::Result<super::agent::StatsContainerResponse> {
|
||||
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/StatsContainer is not supported".to_string())))
|
||||
}
|
||||
fn pause_container(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::PauseContainerRequest) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/PauseContainer is not supported".to_string())))
|
||||
}
|
||||
fn resume_container(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::ResumeContainerRequest) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/ResumeContainer is not supported".to_string())))
|
||||
}
|
||||
fn write_stdin(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::WriteStreamRequest) -> ::ttrpc::Result<super::agent::WriteStreamResponse> {
|
||||
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/WriteStdin is not supported".to_string())))
|
||||
}
|
||||
fn read_stdout(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::ReadStreamRequest) -> ::ttrpc::Result<super::agent::ReadStreamResponse> {
|
||||
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/ReadStdout is not supported".to_string())))
|
||||
}
|
||||
fn read_stderr(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::ReadStreamRequest) -> ::ttrpc::Result<super::agent::ReadStreamResponse> {
|
||||
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/ReadStderr is not supported".to_string())))
|
||||
}
|
||||
fn close_stdin(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::CloseStdinRequest) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/CloseStdin is not supported".to_string())))
|
||||
}
|
||||
fn tty_win_resize(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::TtyWinResizeRequest) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/TtyWinResize is not supported".to_string())))
|
||||
}
|
||||
fn update_interface(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::UpdateInterfaceRequest) -> ::ttrpc::Result<super::types::Interface> {
|
||||
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/UpdateInterface is not supported".to_string())))
|
||||
}
|
||||
fn update_routes(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::UpdateRoutesRequest) -> ::ttrpc::Result<super::agent::Routes> {
|
||||
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/UpdateRoutes is not supported".to_string())))
|
||||
}
|
||||
fn list_interfaces(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::ListInterfacesRequest) -> ::ttrpc::Result<super::agent::Interfaces> {
|
||||
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/ListInterfaces is not supported".to_string())))
|
||||
}
|
||||
fn list_routes(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::ListRoutesRequest) -> ::ttrpc::Result<super::agent::Routes> {
|
||||
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/ListRoutes is not supported".to_string())))
|
||||
}
|
||||
fn add_arp_neighbors(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::AddARPNeighborsRequest) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/AddARPNeighbors is not supported".to_string())))
|
||||
}
|
||||
fn start_tracing(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::StartTracingRequest) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/StartTracing is not supported".to_string())))
|
||||
}
|
||||
fn stop_tracing(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::StopTracingRequest) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/StopTracing is not supported".to_string())))
|
||||
}
|
||||
fn create_sandbox(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::CreateSandboxRequest) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/CreateSandbox is not supported".to_string())))
|
||||
}
|
||||
fn destroy_sandbox(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::DestroySandboxRequest) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/DestroySandbox is not supported".to_string())))
|
||||
}
|
||||
fn online_cpu_mem(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::OnlineCPUMemRequest) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/OnlineCPUMem is not supported".to_string())))
|
||||
}
|
||||
fn reseed_random_dev(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::ReseedRandomDevRequest) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/ReseedRandomDev is not supported".to_string())))
|
||||
}
|
||||
fn get_guest_details(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::GuestDetailsRequest) -> ::ttrpc::Result<super::agent::GuestDetailsResponse> {
|
||||
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/GetGuestDetails is not supported".to_string())))
|
||||
}
|
||||
fn mem_hotplug_by_probe(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::MemHotplugByProbeRequest) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/MemHotplugByProbe is not supported".to_string())))
|
||||
}
|
||||
fn set_guest_date_time(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::SetGuestDateTimeRequest) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/SetGuestDateTime is not supported".to_string())))
|
||||
}
|
||||
fn copy_file(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::agent::CopyFileRequest) -> ::ttrpc::Result<super::empty::Empty> {
|
||||
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.AgentService/CopyFile is not supported".to_string())))
|
||||
}
|
||||
}
|
||||
|
||||
pub fn create_agent_service(service: Arc<std::boxed::Box<dyn AgentService + Send + Sync>>) -> HashMap <String, Box<dyn ::ttrpc::MethodHandler + Send + Sync>> {
|
||||
let mut methods = HashMap::new();
|
||||
|
||||
methods.insert("/grpc.AgentService/CreateContainer".to_string(),
|
||||
std::boxed::Box::new(CreateContainerMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
|
||||
|
||||
methods.insert("/grpc.AgentService/StartContainer".to_string(),
|
||||
std::boxed::Box::new(StartContainerMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
|
||||
|
||||
methods.insert("/grpc.AgentService/RemoveContainer".to_string(),
|
||||
std::boxed::Box::new(RemoveContainerMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
|
||||
|
||||
methods.insert("/grpc.AgentService/ExecProcess".to_string(),
|
||||
std::boxed::Box::new(ExecProcessMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
|
||||
|
||||
methods.insert("/grpc.AgentService/SignalProcess".to_string(),
|
||||
std::boxed::Box::new(SignalProcessMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
|
||||
|
||||
methods.insert("/grpc.AgentService/WaitProcess".to_string(),
|
||||
std::boxed::Box::new(WaitProcessMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
|
||||
|
||||
methods.insert("/grpc.AgentService/ListProcesses".to_string(),
|
||||
std::boxed::Box::new(ListProcessesMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
|
||||
|
||||
methods.insert("/grpc.AgentService/UpdateContainer".to_string(),
|
||||
std::boxed::Box::new(UpdateContainerMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
|
||||
|
||||
methods.insert("/grpc.AgentService/StatsContainer".to_string(),
|
||||
std::boxed::Box::new(StatsContainerMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
|
||||
|
||||
methods.insert("/grpc.AgentService/PauseContainer".to_string(),
|
||||
std::boxed::Box::new(PauseContainerMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
|
||||
|
||||
methods.insert("/grpc.AgentService/ResumeContainer".to_string(),
|
||||
std::boxed::Box::new(ResumeContainerMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
|
||||
|
||||
methods.insert("/grpc.AgentService/WriteStdin".to_string(),
|
||||
std::boxed::Box::new(WriteStdinMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
|
||||
|
||||
methods.insert("/grpc.AgentService/ReadStdout".to_string(),
|
||||
std::boxed::Box::new(ReadStdoutMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
|
||||
|
||||
methods.insert("/grpc.AgentService/ReadStderr".to_string(),
|
||||
std::boxed::Box::new(ReadStderrMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
|
||||
|
||||
methods.insert("/grpc.AgentService/CloseStdin".to_string(),
|
||||
std::boxed::Box::new(CloseStdinMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
|
||||
|
||||
methods.insert("/grpc.AgentService/TtyWinResize".to_string(),
|
||||
std::boxed::Box::new(TtyWinResizeMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
|
||||
|
||||
methods.insert("/grpc.AgentService/UpdateInterface".to_string(),
|
||||
std::boxed::Box::new(UpdateInterfaceMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
|
||||
|
||||
methods.insert("/grpc.AgentService/UpdateRoutes".to_string(),
|
||||
std::boxed::Box::new(UpdateRoutesMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
|
||||
|
||||
methods.insert("/grpc.AgentService/ListInterfaces".to_string(),
|
||||
std::boxed::Box::new(ListInterfacesMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
|
||||
|
||||
methods.insert("/grpc.AgentService/ListRoutes".to_string(),
|
||||
std::boxed::Box::new(ListRoutesMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
|
||||
|
||||
methods.insert("/grpc.AgentService/AddARPNeighbors".to_string(),
|
||||
std::boxed::Box::new(AddArpNeighborsMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
|
||||
|
||||
methods.insert("/grpc.AgentService/StartTracing".to_string(),
|
||||
std::boxed::Box::new(StartTracingMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
|
||||
|
||||
methods.insert("/grpc.AgentService/StopTracing".to_string(),
|
||||
std::boxed::Box::new(StopTracingMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
|
||||
|
||||
methods.insert("/grpc.AgentService/CreateSandbox".to_string(),
|
||||
std::boxed::Box::new(CreateSandboxMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
|
||||
|
||||
methods.insert("/grpc.AgentService/DestroySandbox".to_string(),
|
||||
std::boxed::Box::new(DestroySandboxMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
|
||||
|
||||
methods.insert("/grpc.AgentService/OnlineCPUMem".to_string(),
|
||||
std::boxed::Box::new(OnlineCpuMemMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
|
||||
|
||||
methods.insert("/grpc.AgentService/ReseedRandomDev".to_string(),
|
||||
std::boxed::Box::new(ReseedRandomDevMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
|
||||
|
||||
methods.insert("/grpc.AgentService/GetGuestDetails".to_string(),
|
||||
std::boxed::Box::new(GetGuestDetailsMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
|
||||
|
||||
methods.insert("/grpc.AgentService/MemHotplugByProbe".to_string(),
|
||||
std::boxed::Box::new(MemHotplugByProbeMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
|
||||
|
||||
methods.insert("/grpc.AgentService/SetGuestDateTime".to_string(),
|
||||
std::boxed::Box::new(SetGuestDateTimeMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
|
||||
|
||||
methods.insert("/grpc.AgentService/CopyFile".to_string(),
|
||||
std::boxed::Box::new(CopyFileMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
|
||||
|
||||
methods
|
||||
}
|
||||
242
src/agent/protocols/src/empty.rs
Normal file
242
src/agent/protocols/src/empty.rs
Normal file
@@ -0,0 +1,242 @@
|
||||
// Copyright (c) 2019 Ant Financial
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
// This file is generated by rust-protobuf 2.14.0. Do not edit
|
||||
// @generated
|
||||
|
||||
// https://github.com/rust-lang/rust-clippy/issues/702
|
||||
#![allow(unknown_lints)]
|
||||
#![allow(clippy::all)]
|
||||
|
||||
#![cfg_attr(rustfmt, rustfmt_skip)]
|
||||
|
||||
#![allow(box_pointers)]
|
||||
#![allow(dead_code)]
|
||||
#![allow(missing_docs)]
|
||||
#![allow(non_camel_case_types)]
|
||||
#![allow(non_snake_case)]
|
||||
#![allow(non_upper_case_globals)]
|
||||
#![allow(trivial_casts)]
|
||||
#![allow(unsafe_code)]
|
||||
#![allow(unused_imports)]
|
||||
#![allow(unused_results)]
|
||||
//! Generated file from `google/protobuf/empty.proto`
|
||||
|
||||
use protobuf::Message as Message_imported_for_functions;
|
||||
use protobuf::ProtobufEnum as ProtobufEnum_imported_for_functions;
|
||||
|
||||
/// Generated files are compatible only with the same version
|
||||
/// of protobuf runtime.
|
||||
// const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_14_0;
|
||||
|
||||
#[derive(PartialEq,Clone,Default)]
|
||||
pub struct Empty {
|
||||
// special fields
|
||||
pub unknown_fields: ::protobuf::UnknownFields,
|
||||
pub cached_size: ::protobuf::CachedSize,
|
||||
}
|
||||
|
||||
impl<'a> ::std::default::Default for &'a Empty {
|
||||
fn default() -> &'a Empty {
|
||||
<Empty as ::protobuf::Message>::default_instance()
|
||||
}
|
||||
}
|
||||
|
||||
impl Empty {
|
||||
pub fn new() -> Empty {
|
||||
::std::default::Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
impl ::protobuf::Message for Empty {
|
||||
fn is_initialized(&self) -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
|
||||
while !is.eof()? {
|
||||
let (field_number, wire_type) = is.read_tag_unpack()?;
|
||||
match field_number {
|
||||
_ => {
|
||||
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
|
||||
},
|
||||
};
|
||||
}
|
||||
::std::result::Result::Ok(())
|
||||
}
|
||||
|
||||
// Compute sizes of nested messages
|
||||
#[allow(unused_variables)]
|
||||
fn compute_size(&self) -> u32 {
|
||||
let mut my_size = 0;
|
||||
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
|
||||
self.cached_size.set(my_size);
|
||||
my_size
|
||||
}
|
||||
|
||||
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
|
||||
os.write_unknown_fields(self.get_unknown_fields())?;
|
||||
::std::result::Result::Ok(())
|
||||
}
|
||||
|
||||
fn get_cached_size(&self) -> u32 {
|
||||
self.cached_size.get()
|
||||
}
|
||||
|
||||
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
|
||||
&self.unknown_fields
|
||||
}
|
||||
|
||||
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
|
||||
&mut self.unknown_fields
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn (::std::any::Any) {
|
||||
self as &dyn (::std::any::Any)
|
||||
}
|
||||
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
|
||||
self as &mut dyn (::std::any::Any)
|
||||
}
|
||||
fn into_any(self: Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
|
||||
self
|
||||
}
|
||||
|
||||
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
|
||||
Self::descriptor_static()
|
||||
}
|
||||
|
||||
fn new() -> Empty {
|
||||
Empty::new()
|
||||
}
|
||||
|
||||
fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
|
||||
static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy::INIT;
|
||||
unsafe {
|
||||
descriptor.get(|| {
|
||||
let fields = ::std::vec::Vec::new();
|
||||
::protobuf::reflect::MessageDescriptor::new_pb_name::<Empty>(
|
||||
"Empty",
|
||||
fields,
|
||||
file_descriptor_proto()
|
||||
)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn default_instance() -> &'static Empty {
|
||||
static mut instance: ::protobuf::lazy::Lazy<Empty> = ::protobuf::lazy::Lazy::INIT;
|
||||
unsafe {
|
||||
instance.get(Empty::new)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ::protobuf::Clear for Empty {
|
||||
fn clear(&mut self) {
|
||||
self.unknown_fields.clear();
|
||||
}
|
||||
}
|
||||
|
||||
impl ::std::fmt::Debug for Empty {
|
||||
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
|
||||
::protobuf::text_format::fmt(self, f)
|
||||
}
|
||||
}
|
||||
|
||||
impl ::protobuf::reflect::ProtobufValue for Empty {
|
||||
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
|
||||
::protobuf::reflect::ReflectValueRef::Message(self)
|
||||
}
|
||||
}
|
||||
|
||||
static file_descriptor_proto_data: &'static [u8] = b"\
|
||||
\n\x1bgoogle/protobuf/empty.proto\x12\x0fgoogle.protobuf\"\x07\n\x05Empt\
|
||||
yBT\n\x13com.google.protobufB\nEmptyProtoP\x01Z\x05types\xf8\x01\x01\xa2\
|
||||
\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesJ\xa9\x14\n\x06\x12\
|
||||
\x04\x1e\03\x10\n\xcc\x0c\n\x01\x0c\x12\x03\x1e\0\x122\xc1\x0c\x20Protoc\
|
||||
ol\x20Buffers\x20-\x20Google's\x20data\x20interchange\x20format\n\x20Cop\
|
||||
yright\x202008\x20Google\x20Inc.\x20\x20All\x20rights\x20reserved.\n\x20\
|
||||
https://developers.google.com/protocol-buffers/\n\n\x20Redistribution\
|
||||
\x20and\x20use\x20in\x20source\x20and\x20binary\x20forms,\x20with\x20or\
|
||||
\x20without\n\x20modification,\x20are\x20permitted\x20provided\x20that\
|
||||
\x20the\x20following\x20conditions\x20are\n\x20met:\n\n\x20\x20\x20\x20\
|
||||
\x20*\x20Redistributions\x20of\x20source\x20code\x20must\x20retain\x20th\
|
||||
e\x20above\x20copyright\n\x20notice,\x20this\x20list\x20of\x20conditions\
|
||||
\x20and\x20the\x20following\x20disclaimer.\n\x20\x20\x20\x20\x20*\x20Red\
|
||||
istributions\x20in\x20binary\x20form\x20must\x20reproduce\x20the\x20abov\
|
||||
e\n\x20copyright\x20notice,\x20this\x20list\x20of\x20conditions\x20and\
|
||||
\x20the\x20following\x20disclaimer\n\x20in\x20the\x20documentation\x20an\
|
||||
d/or\x20other\x20materials\x20provided\x20with\x20the\n\x20distribution.\
|
||||
\n\x20\x20\x20\x20\x20*\x20Neither\x20the\x20name\x20of\x20Google\x20Inc\
|
||||
.\x20nor\x20the\x20names\x20of\x20its\n\x20contributors\x20may\x20be\x20\
|
||||
used\x20to\x20endorse\x20or\x20promote\x20products\x20derived\x20from\n\
|
||||
\x20this\x20software\x20without\x20specific\x20prior\x20written\x20permi\
|
||||
ssion.\n\n\x20THIS\x20SOFTWARE\x20IS\x20PROVIDED\x20BY\x20THE\x20COPYRIG\
|
||||
HT\x20HOLDERS\x20AND\x20CONTRIBUTORS\n\x20\"AS\x20IS\"\x20AND\x20ANY\x20\
|
||||
EXPRESS\x20OR\x20IMPLIED\x20WARRANTIES,\x20INCLUDING,\x20BUT\x20NOT\n\
|
||||
\x20LIMITED\x20TO,\x20THE\x20IMPLIED\x20WARRANTIES\x20OF\x20MERCHANTABIL\
|
||||
ITY\x20AND\x20FITNESS\x20FOR\n\x20A\x20PARTICULAR\x20PURPOSE\x20ARE\x20D\
|
||||
ISCLAIMED.\x20IN\x20NO\x20EVENT\x20SHALL\x20THE\x20COPYRIGHT\n\x20OWNER\
|
||||
\x20OR\x20CONTRIBUTORS\x20BE\x20LIABLE\x20FOR\x20ANY\x20DIRECT,\x20INDIR\
|
||||
ECT,\x20INCIDENTAL,\n\x20SPECIAL,\x20EXEMPLARY,\x20OR\x20CONSEQUENTIAL\
|
||||
\x20DAMAGES\x20(INCLUDING,\x20BUT\x20NOT\n\x20LIMITED\x20TO,\x20PROCUREM\
|
||||
ENT\x20OF\x20SUBSTITUTE\x20GOODS\x20OR\x20SERVICES;\x20LOSS\x20OF\x20USE\
|
||||
,\n\x20DATA,\x20OR\x20PROFITS;\x20OR\x20BUSINESS\x20INTERRUPTION)\x20HOW\
|
||||
EVER\x20CAUSED\x20AND\x20ON\x20ANY\n\x20THEORY\x20OF\x20LIABILITY,\x20WH\
|
||||
ETHER\x20IN\x20CONTRACT,\x20STRICT\x20LIABILITY,\x20OR\x20TORT\n\x20(INC\
|
||||
LUDING\x20NEGLIGENCE\x20OR\x20OTHERWISE)\x20ARISING\x20IN\x20ANY\x20WAY\
|
||||
\x20OUT\x20OF\x20THE\x20USE\n\x20OF\x20THIS\x20SOFTWARE,\x20EVEN\x20IF\
|
||||
\x20ADVISED\x20OF\x20THE\x20POSSIBILITY\x20OF\x20SUCH\x20DAMAGE.\n\n\x08\
|
||||
\n\x01\x02\x12\x03\x20\x08\x17\n\x08\n\x01\x08\x12\x03\"\0;\n\x0b\n\x04\
|
||||
\x08\xe7\x07\0\x12\x03\"\0;\n\x0c\n\x05\x08\xe7\x07\0\x02\x12\x03\"\x07\
|
||||
\x17\n\r\n\x06\x08\xe7\x07\0\x02\0\x12\x03\"\x07\x17\n\x0e\n\x07\x08\xe7\
|
||||
\x07\0\x02\0\x01\x12\x03\"\x07\x17\n\x0c\n\x05\x08\xe7\x07\0\x07\x12\x03\
|
||||
\"\x1a:\n\x08\n\x01\x08\x12\x03#\0\x1c\n\x0b\n\x04\x08\xe7\x07\x01\x12\
|
||||
\x03#\0\x1c\n\x0c\n\x05\x08\xe7\x07\x01\x02\x12\x03#\x07\x11\n\r\n\x06\
|
||||
\x08\xe7\x07\x01\x02\0\x12\x03#\x07\x11\n\x0e\n\x07\x08\xe7\x07\x01\x02\
|
||||
\0\x01\x12\x03#\x07\x11\n\x0c\n\x05\x08\xe7\x07\x01\x07\x12\x03#\x14\x1b\
|
||||
\n\x08\n\x01\x08\x12\x03$\0,\n\x0b\n\x04\x08\xe7\x07\x02\x12\x03$\0,\n\
|
||||
\x0c\n\x05\x08\xe7\x07\x02\x02\x12\x03$\x07\x13\n\r\n\x06\x08\xe7\x07\
|
||||
\x02\x02\0\x12\x03$\x07\x13\n\x0e\n\x07\x08\xe7\x07\x02\x02\0\x01\x12\
|
||||
\x03$\x07\x13\n\x0c\n\x05\x08\xe7\x07\x02\x07\x12\x03$\x16+\n\x08\n\x01\
|
||||
\x08\x12\x03%\0+\n\x0b\n\x04\x08\xe7\x07\x03\x12\x03%\0+\n\x0c\n\x05\x08\
|
||||
\xe7\x07\x03\x02\x12\x03%\x07\x1b\n\r\n\x06\x08\xe7\x07\x03\x02\0\x12\
|
||||
\x03%\x07\x1b\n\x0e\n\x07\x08\xe7\x07\x03\x02\0\x01\x12\x03%\x07\x1b\n\
|
||||
\x0c\n\x05\x08\xe7\x07\x03\x07\x12\x03%\x1e*\n\x08\n\x01\x08\x12\x03&\0\
|
||||
\"\n\x0b\n\x04\x08\xe7\x07\x04\x12\x03&\0\"\n\x0c\n\x05\x08\xe7\x07\x04\
|
||||
\x02\x12\x03&\x07\x1a\n\r\n\x06\x08\xe7\x07\x04\x02\0\x12\x03&\x07\x1a\n\
|
||||
\x0e\n\x07\x08\xe7\x07\x04\x02\0\x01\x12\x03&\x07\x1a\n\x0c\n\x05\x08\
|
||||
\xe7\x07\x04\x03\x12\x03&\x1d!\n\x08\n\x01\x08\x12\x03'\0!\n\x0b\n\x04\
|
||||
\x08\xe7\x07\x05\x12\x03'\0!\n\x0c\n\x05\x08\xe7\x07\x05\x02\x12\x03'\
|
||||
\x07\x18\n\r\n\x06\x08\xe7\x07\x05\x02\0\x12\x03'\x07\x18\n\x0e\n\x07\
|
||||
\x08\xe7\x07\x05\x02\0\x01\x12\x03'\x07\x18\n\x0c\n\x05\x08\xe7\x07\x05\
|
||||
\x07\x12\x03'\x1b\x20\n\x08\n\x01\x08\x12\x03(\0\x1f\n\x0b\n\x04\x08\xe7\
|
||||
\x07\x06\x12\x03(\0\x1f\n\x0c\n\x05\x08\xe7\x07\x06\x02\x12\x03(\x07\x17\
|
||||
\n\r\n\x06\x08\xe7\x07\x06\x02\0\x12\x03(\x07\x17\n\x0e\n\x07\x08\xe7\
|
||||
\x07\x06\x02\0\x01\x12\x03(\x07\x17\n\x0c\n\x05\x08\xe7\x07\x06\x03\x12\
|
||||
\x03(\x1a\x1e\n\xfb\x02\n\x02\x04\0\x12\x033\0\x10\x1a\xef\x02\x20A\x20g\
|
||||
eneric\x20empty\x20message\x20that\x20you\x20can\x20re-use\x20to\x20avoi\
|
||||
d\x20defining\x20duplicated\n\x20empty\x20messages\x20in\x20your\x20APIs\
|
||||
.\x20A\x20typical\x20example\x20is\x20to\x20use\x20it\x20as\x20the\x20re\
|
||||
quest\n\x20or\x20the\x20response\x20type\x20of\x20an\x20API\x20method.\
|
||||
\x20For\x20instance:\n\n\x20\x20\x20\x20\x20service\x20Foo\x20{\n\x20\
|
||||
\x20\x20\x20\x20\x20\x20rpc\x20Bar(google.protobuf.Empty)\x20returns\x20\
|
||||
(google.protobuf.Empty);\n\x20\x20\x20\x20\x20}\n\n\x20The\x20JSON\x20re\
|
||||
presentation\x20for\x20`Empty`\x20is\x20empty\x20JSON\x20object\x20`{}`.\
|
||||
\n\n\n\n\x03\x04\0\x01\x12\x033\x08\rb\x06proto3\
|
||||
";
|
||||
|
||||
static mut file_descriptor_proto_lazy: ::protobuf::lazy::Lazy<::protobuf::descriptor::FileDescriptorProto> = ::protobuf::lazy::Lazy::INIT;
|
||||
|
||||
fn parse_descriptor_proto() -> ::protobuf::descriptor::FileDescriptorProto {
|
||||
::protobuf::parse_from_bytes(file_descriptor_proto_data).unwrap()
|
||||
}
|
||||
|
||||
pub fn file_descriptor_proto() -> &'static ::protobuf::descriptor::FileDescriptorProto {
|
||||
unsafe {
|
||||
file_descriptor_proto_lazy.get(|| {
|
||||
parse_descriptor_proto()
|
||||
})
|
||||
}
|
||||
}
|
||||
685
src/agent/protocols/src/health.rs
Normal file
685
src/agent/protocols/src/health.rs
Normal file
@@ -0,0 +1,685 @@
|
||||
// Copyright (c) 2019 Ant Financial
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
// This file is generated by rust-protobuf 2.14.0. Do not edit
|
||||
// @generated
|
||||
|
||||
// https://github.com/rust-lang/rust-clippy/issues/702
|
||||
#![allow(unknown_lints)]
|
||||
#![allow(clippy::all)]
|
||||
|
||||
#![cfg_attr(rustfmt, rustfmt_skip)]
|
||||
|
||||
#![allow(box_pointers)]
|
||||
#![allow(dead_code)]
|
||||
#![allow(missing_docs)]
|
||||
#![allow(non_camel_case_types)]
|
||||
#![allow(non_snake_case)]
|
||||
#![allow(non_upper_case_globals)]
|
||||
#![allow(trivial_casts)]
|
||||
#![allow(unsafe_code)]
|
||||
#![allow(unused_imports)]
|
||||
#![allow(unused_results)]
|
||||
//! Generated file from `health.proto`
|
||||
|
||||
use protobuf::Message as Message_imported_for_functions;
|
||||
use protobuf::ProtobufEnum as ProtobufEnum_imported_for_functions;
|
||||
|
||||
/// Generated files are compatible only with the same version
|
||||
/// of protobuf runtime.
|
||||
// const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_14_0;
|
||||
|
||||
#[derive(PartialEq,Clone,Default)]
|
||||
pub struct CheckRequest {
|
||||
// message fields
|
||||
pub service: ::std::string::String,
|
||||
// special fields
|
||||
pub unknown_fields: ::protobuf::UnknownFields,
|
||||
pub cached_size: ::protobuf::CachedSize,
|
||||
}
|
||||
|
||||
impl<'a> ::std::default::Default for &'a CheckRequest {
|
||||
fn default() -> &'a CheckRequest {
|
||||
<CheckRequest as ::protobuf::Message>::default_instance()
|
||||
}
|
||||
}
|
||||
|
||||
impl CheckRequest {
|
||||
pub fn new() -> CheckRequest {
|
||||
::std::default::Default::default()
|
||||
}
|
||||
|
||||
// string service = 1;
|
||||
|
||||
|
||||
pub fn get_service(&self) -> &str {
|
||||
&self.service
|
||||
}
|
||||
pub fn clear_service(&mut self) {
|
||||
self.service.clear();
|
||||
}
|
||||
|
||||
// Param is passed by value, moved
|
||||
pub fn set_service(&mut self, v: ::std::string::String) {
|
||||
self.service = v;
|
||||
}
|
||||
|
||||
// Mutable pointer to the field.
|
||||
// If field is not initialized, it is initialized with default value first.
|
||||
pub fn mut_service(&mut self) -> &mut ::std::string::String {
|
||||
&mut self.service
|
||||
}
|
||||
|
||||
// Take field
|
||||
pub fn take_service(&mut self) -> ::std::string::String {
|
||||
::std::mem::replace(&mut self.service, ::std::string::String::new())
|
||||
}
|
||||
}
|
||||
|
||||
impl ::protobuf::Message for CheckRequest {
|
||||
fn is_initialized(&self) -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
|
||||
while !is.eof()? {
|
||||
let (field_number, wire_type) = is.read_tag_unpack()?;
|
||||
match field_number {
|
||||
1 => {
|
||||
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.service)?;
|
||||
},
|
||||
_ => {
|
||||
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
|
||||
},
|
||||
};
|
||||
}
|
||||
::std::result::Result::Ok(())
|
||||
}
|
||||
|
||||
// Compute sizes of nested messages
|
||||
#[allow(unused_variables)]
|
||||
fn compute_size(&self) -> u32 {
|
||||
let mut my_size = 0;
|
||||
if !self.service.is_empty() {
|
||||
my_size += ::protobuf::rt::string_size(1, &self.service);
|
||||
}
|
||||
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
|
||||
self.cached_size.set(my_size);
|
||||
my_size
|
||||
}
|
||||
|
||||
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
|
||||
if !self.service.is_empty() {
|
||||
os.write_string(1, &self.service)?;
|
||||
}
|
||||
os.write_unknown_fields(self.get_unknown_fields())?;
|
||||
::std::result::Result::Ok(())
|
||||
}
|
||||
|
||||
fn get_cached_size(&self) -> u32 {
|
||||
self.cached_size.get()
|
||||
}
|
||||
|
||||
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
|
||||
&self.unknown_fields
|
||||
}
|
||||
|
||||
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
|
||||
&mut self.unknown_fields
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn (::std::any::Any) {
|
||||
self as &dyn (::std::any::Any)
|
||||
}
|
||||
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
|
||||
self as &mut dyn (::std::any::Any)
|
||||
}
|
||||
fn into_any(self: Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
|
||||
self
|
||||
}
|
||||
|
||||
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
|
||||
Self::descriptor_static()
|
||||
}
|
||||
|
||||
fn new() -> CheckRequest {
|
||||
CheckRequest::new()
|
||||
}
|
||||
|
||||
fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
|
||||
static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy::INIT;
|
||||
unsafe {
|
||||
descriptor.get(|| {
|
||||
let mut fields = ::std::vec::Vec::new();
|
||||
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeString>(
|
||||
"service",
|
||||
|m: &CheckRequest| { &m.service },
|
||||
|m: &mut CheckRequest| { &mut m.service },
|
||||
));
|
||||
::protobuf::reflect::MessageDescriptor::new_pb_name::<CheckRequest>(
|
||||
"CheckRequest",
|
||||
fields,
|
||||
file_descriptor_proto()
|
||||
)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn default_instance() -> &'static CheckRequest {
|
||||
static mut instance: ::protobuf::lazy::Lazy<CheckRequest> = ::protobuf::lazy::Lazy::INIT;
|
||||
unsafe {
|
||||
instance.get(CheckRequest::new)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ::protobuf::Clear for CheckRequest {
|
||||
fn clear(&mut self) {
|
||||
self.service.clear();
|
||||
self.unknown_fields.clear();
|
||||
}
|
||||
}
|
||||
|
||||
impl ::std::fmt::Debug for CheckRequest {
|
||||
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
|
||||
::protobuf::text_format::fmt(self, f)
|
||||
}
|
||||
}
|
||||
|
||||
impl ::protobuf::reflect::ProtobufValue for CheckRequest {
|
||||
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
|
||||
::protobuf::reflect::ReflectValueRef::Message(self)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(PartialEq,Clone,Default)]
|
||||
pub struct HealthCheckResponse {
|
||||
// message fields
|
||||
pub status: HealthCheckResponse_ServingStatus,
|
||||
// special fields
|
||||
pub unknown_fields: ::protobuf::UnknownFields,
|
||||
pub cached_size: ::protobuf::CachedSize,
|
||||
}
|
||||
|
||||
impl<'a> ::std::default::Default for &'a HealthCheckResponse {
|
||||
fn default() -> &'a HealthCheckResponse {
|
||||
<HealthCheckResponse as ::protobuf::Message>::default_instance()
|
||||
}
|
||||
}
|
||||
|
||||
impl HealthCheckResponse {
|
||||
pub fn new() -> HealthCheckResponse {
|
||||
::std::default::Default::default()
|
||||
}
|
||||
|
||||
// .grpc.HealthCheckResponse.ServingStatus status = 1;
|
||||
|
||||
|
||||
pub fn get_status(&self) -> HealthCheckResponse_ServingStatus {
|
||||
self.status
|
||||
}
|
||||
pub fn clear_status(&mut self) {
|
||||
self.status = HealthCheckResponse_ServingStatus::UNKNOWN;
|
||||
}
|
||||
|
||||
// Param is passed by value, moved
|
||||
pub fn set_status(&mut self, v: HealthCheckResponse_ServingStatus) {
|
||||
self.status = v;
|
||||
}
|
||||
}
|
||||
|
||||
impl ::protobuf::Message for HealthCheckResponse {
|
||||
fn is_initialized(&self) -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
|
||||
while !is.eof()? {
|
||||
let (field_number, wire_type) = is.read_tag_unpack()?;
|
||||
match field_number {
|
||||
1 => {
|
||||
::protobuf::rt::read_proto3_enum_with_unknown_fields_into(wire_type, is, &mut self.status, 1, &mut self.unknown_fields)?
|
||||
},
|
||||
_ => {
|
||||
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
|
||||
},
|
||||
};
|
||||
}
|
||||
::std::result::Result::Ok(())
|
||||
}
|
||||
|
||||
// Compute sizes of nested messages
|
||||
#[allow(unused_variables)]
|
||||
fn compute_size(&self) -> u32 {
|
||||
let mut my_size = 0;
|
||||
if self.status != HealthCheckResponse_ServingStatus::UNKNOWN {
|
||||
my_size += ::protobuf::rt::enum_size(1, self.status);
|
||||
}
|
||||
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
|
||||
self.cached_size.set(my_size);
|
||||
my_size
|
||||
}
|
||||
|
||||
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
|
||||
if self.status != HealthCheckResponse_ServingStatus::UNKNOWN {
|
||||
os.write_enum(1, self.status.value())?;
|
||||
}
|
||||
os.write_unknown_fields(self.get_unknown_fields())?;
|
||||
::std::result::Result::Ok(())
|
||||
}
|
||||
|
||||
fn get_cached_size(&self) -> u32 {
|
||||
self.cached_size.get()
|
||||
}
|
||||
|
||||
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
|
||||
&self.unknown_fields
|
||||
}
|
||||
|
||||
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
|
||||
&mut self.unknown_fields
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn (::std::any::Any) {
|
||||
self as &dyn (::std::any::Any)
|
||||
}
|
||||
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
|
||||
self as &mut dyn (::std::any::Any)
|
||||
}
|
||||
fn into_any(self: Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
|
||||
self
|
||||
}
|
||||
|
||||
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
|
||||
Self::descriptor_static()
|
||||
}
|
||||
|
||||
fn new() -> HealthCheckResponse {
|
||||
HealthCheckResponse::new()
|
||||
}
|
||||
|
||||
fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
|
||||
static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy::INIT;
|
||||
unsafe {
|
||||
descriptor.get(|| {
|
||||
let mut fields = ::std::vec::Vec::new();
|
||||
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeEnum<HealthCheckResponse_ServingStatus>>(
|
||||
"status",
|
||||
|m: &HealthCheckResponse| { &m.status },
|
||||
|m: &mut HealthCheckResponse| { &mut m.status },
|
||||
));
|
||||
::protobuf::reflect::MessageDescriptor::new_pb_name::<HealthCheckResponse>(
|
||||
"HealthCheckResponse",
|
||||
fields,
|
||||
file_descriptor_proto()
|
||||
)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn default_instance() -> &'static HealthCheckResponse {
|
||||
static mut instance: ::protobuf::lazy::Lazy<HealthCheckResponse> = ::protobuf::lazy::Lazy::INIT;
|
||||
unsafe {
|
||||
instance.get(HealthCheckResponse::new)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ::protobuf::Clear for HealthCheckResponse {
|
||||
fn clear(&mut self) {
|
||||
self.status = HealthCheckResponse_ServingStatus::UNKNOWN;
|
||||
self.unknown_fields.clear();
|
||||
}
|
||||
}
|
||||
|
||||
impl ::std::fmt::Debug for HealthCheckResponse {
|
||||
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
|
||||
::protobuf::text_format::fmt(self, f)
|
||||
}
|
||||
}
|
||||
|
||||
impl ::protobuf::reflect::ProtobufValue for HealthCheckResponse {
|
||||
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
|
||||
::protobuf::reflect::ReflectValueRef::Message(self)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone,PartialEq,Eq,Debug,Hash)]
|
||||
pub enum HealthCheckResponse_ServingStatus {
|
||||
UNKNOWN = 0,
|
||||
SERVING = 1,
|
||||
NOT_SERVING = 2,
|
||||
}
|
||||
|
||||
impl ::protobuf::ProtobufEnum for HealthCheckResponse_ServingStatus {
|
||||
fn value(&self) -> i32 {
|
||||
*self as i32
|
||||
}
|
||||
|
||||
fn from_i32(value: i32) -> ::std::option::Option<HealthCheckResponse_ServingStatus> {
|
||||
match value {
|
||||
0 => ::std::option::Option::Some(HealthCheckResponse_ServingStatus::UNKNOWN),
|
||||
1 => ::std::option::Option::Some(HealthCheckResponse_ServingStatus::SERVING),
|
||||
2 => ::std::option::Option::Some(HealthCheckResponse_ServingStatus::NOT_SERVING),
|
||||
_ => ::std::option::Option::None
|
||||
}
|
||||
}
|
||||
|
||||
fn values() -> &'static [Self] {
|
||||
static values: &'static [HealthCheckResponse_ServingStatus] = &[
|
||||
HealthCheckResponse_ServingStatus::UNKNOWN,
|
||||
HealthCheckResponse_ServingStatus::SERVING,
|
||||
HealthCheckResponse_ServingStatus::NOT_SERVING,
|
||||
];
|
||||
values
|
||||
}
|
||||
|
||||
fn enum_descriptor_static() -> &'static ::protobuf::reflect::EnumDescriptor {
|
||||
static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::EnumDescriptor> = ::protobuf::lazy::Lazy::INIT;
|
||||
unsafe {
|
||||
descriptor.get(|| {
|
||||
::protobuf::reflect::EnumDescriptor::new_pb_name::<HealthCheckResponse_ServingStatus>("HealthCheckResponse.ServingStatus", file_descriptor_proto())
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ::std::marker::Copy for HealthCheckResponse_ServingStatus {
|
||||
}
|
||||
|
||||
impl ::std::default::Default for HealthCheckResponse_ServingStatus {
|
||||
fn default() -> Self {
|
||||
HealthCheckResponse_ServingStatus::UNKNOWN
|
||||
}
|
||||
}
|
||||
|
||||
impl ::protobuf::reflect::ProtobufValue for HealthCheckResponse_ServingStatus {
|
||||
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
|
||||
::protobuf::reflect::ReflectValueRef::Enum(self.descriptor())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(PartialEq,Clone,Default)]
|
||||
pub struct VersionCheckResponse {
|
||||
// message fields
|
||||
pub grpc_version: ::std::string::String,
|
||||
pub agent_version: ::std::string::String,
|
||||
// special fields
|
||||
pub unknown_fields: ::protobuf::UnknownFields,
|
||||
pub cached_size: ::protobuf::CachedSize,
|
||||
}
|
||||
|
||||
impl<'a> ::std::default::Default for &'a VersionCheckResponse {
|
||||
fn default() -> &'a VersionCheckResponse {
|
||||
<VersionCheckResponse as ::protobuf::Message>::default_instance()
|
||||
}
|
||||
}
|
||||
|
||||
impl VersionCheckResponse {
|
||||
pub fn new() -> VersionCheckResponse {
|
||||
::std::default::Default::default()
|
||||
}
|
||||
|
||||
// string grpc_version = 1;
|
||||
|
||||
|
||||
pub fn get_grpc_version(&self) -> &str {
|
||||
&self.grpc_version
|
||||
}
|
||||
pub fn clear_grpc_version(&mut self) {
|
||||
self.grpc_version.clear();
|
||||
}
|
||||
|
||||
// Param is passed by value, moved
|
||||
pub fn set_grpc_version(&mut self, v: ::std::string::String) {
|
||||
self.grpc_version = v;
|
||||
}
|
||||
|
||||
// Mutable pointer to the field.
|
||||
// If field is not initialized, it is initialized with default value first.
|
||||
pub fn mut_grpc_version(&mut self) -> &mut ::std::string::String {
|
||||
&mut self.grpc_version
|
||||
}
|
||||
|
||||
// Take field
|
||||
pub fn take_grpc_version(&mut self) -> ::std::string::String {
|
||||
::std::mem::replace(&mut self.grpc_version, ::std::string::String::new())
|
||||
}
|
||||
|
||||
// string agent_version = 2;
|
||||
|
||||
|
||||
pub fn get_agent_version(&self) -> &str {
|
||||
&self.agent_version
|
||||
}
|
||||
pub fn clear_agent_version(&mut self) {
|
||||
self.agent_version.clear();
|
||||
}
|
||||
|
||||
// Param is passed by value, moved
|
||||
pub fn set_agent_version(&mut self, v: ::std::string::String) {
|
||||
self.agent_version = v;
|
||||
}
|
||||
|
||||
// Mutable pointer to the field.
|
||||
// If field is not initialized, it is initialized with default value first.
|
||||
pub fn mut_agent_version(&mut self) -> &mut ::std::string::String {
|
||||
&mut self.agent_version
|
||||
}
|
||||
|
||||
// Take field
|
||||
pub fn take_agent_version(&mut self) -> ::std::string::String {
|
||||
::std::mem::replace(&mut self.agent_version, ::std::string::String::new())
|
||||
}
|
||||
}
|
||||
|
||||
impl ::protobuf::Message for VersionCheckResponse {
|
||||
fn is_initialized(&self) -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
|
||||
while !is.eof()? {
|
||||
let (field_number, wire_type) = is.read_tag_unpack()?;
|
||||
match field_number {
|
||||
1 => {
|
||||
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.grpc_version)?;
|
||||
},
|
||||
2 => {
|
||||
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.agent_version)?;
|
||||
},
|
||||
_ => {
|
||||
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
|
||||
},
|
||||
};
|
||||
}
|
||||
::std::result::Result::Ok(())
|
||||
}
|
||||
|
||||
// Compute sizes of nested messages
|
||||
#[allow(unused_variables)]
|
||||
fn compute_size(&self) -> u32 {
|
||||
let mut my_size = 0;
|
||||
if !self.grpc_version.is_empty() {
|
||||
my_size += ::protobuf::rt::string_size(1, &self.grpc_version);
|
||||
}
|
||||
if !self.agent_version.is_empty() {
|
||||
my_size += ::protobuf::rt::string_size(2, &self.agent_version);
|
||||
}
|
||||
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
|
||||
self.cached_size.set(my_size);
|
||||
my_size
|
||||
}
|
||||
|
||||
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
|
||||
if !self.grpc_version.is_empty() {
|
||||
os.write_string(1, &self.grpc_version)?;
|
||||
}
|
||||
if !self.agent_version.is_empty() {
|
||||
os.write_string(2, &self.agent_version)?;
|
||||
}
|
||||
os.write_unknown_fields(self.get_unknown_fields())?;
|
||||
::std::result::Result::Ok(())
|
||||
}
|
||||
|
||||
fn get_cached_size(&self) -> u32 {
|
||||
self.cached_size.get()
|
||||
}
|
||||
|
||||
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
|
||||
&self.unknown_fields
|
||||
}
|
||||
|
||||
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
|
||||
&mut self.unknown_fields
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn (::std::any::Any) {
|
||||
self as &dyn (::std::any::Any)
|
||||
}
|
||||
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
|
||||
self as &mut dyn (::std::any::Any)
|
||||
}
|
||||
fn into_any(self: Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
|
||||
self
|
||||
}
|
||||
|
||||
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
|
||||
Self::descriptor_static()
|
||||
}
|
||||
|
||||
fn new() -> VersionCheckResponse {
|
||||
VersionCheckResponse::new()
|
||||
}
|
||||
|
||||
fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
|
||||
static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy::INIT;
|
||||
unsafe {
|
||||
descriptor.get(|| {
|
||||
let mut fields = ::std::vec::Vec::new();
|
||||
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeString>(
|
||||
"grpc_version",
|
||||
|m: &VersionCheckResponse| { &m.grpc_version },
|
||||
|m: &mut VersionCheckResponse| { &mut m.grpc_version },
|
||||
));
|
||||
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeString>(
|
||||
"agent_version",
|
||||
|m: &VersionCheckResponse| { &m.agent_version },
|
||||
|m: &mut VersionCheckResponse| { &mut m.agent_version },
|
||||
));
|
||||
::protobuf::reflect::MessageDescriptor::new_pb_name::<VersionCheckResponse>(
|
||||
"VersionCheckResponse",
|
||||
fields,
|
||||
file_descriptor_proto()
|
||||
)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn default_instance() -> &'static VersionCheckResponse {
|
||||
static mut instance: ::protobuf::lazy::Lazy<VersionCheckResponse> = ::protobuf::lazy::Lazy::INIT;
|
||||
unsafe {
|
||||
instance.get(VersionCheckResponse::new)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ::protobuf::Clear for VersionCheckResponse {
|
||||
fn clear(&mut self) {
|
||||
self.grpc_version.clear();
|
||||
self.agent_version.clear();
|
||||
self.unknown_fields.clear();
|
||||
}
|
||||
}
|
||||
|
||||
impl ::std::fmt::Debug for VersionCheckResponse {
|
||||
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
|
||||
::protobuf::text_format::fmt(self, f)
|
||||
}
|
||||
}
|
||||
|
||||
impl ::protobuf::reflect::ProtobufValue for VersionCheckResponse {
|
||||
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
|
||||
::protobuf::reflect::ReflectValueRef::Message(self)
|
||||
}
|
||||
}
|
||||
|
||||
static file_descriptor_proto_data: &'static [u8] = b"\
|
||||
\n\x0chealth.proto\x12\x04grpc\x1a-github.com/gogo/protobuf/gogoproto/go\
|
||||
go.proto\"(\n\x0cCheckRequest\x12\x18\n\x07service\x18\x01\x20\x01(\tR\
|
||||
\x07service\"\x92\x01\n\x13HealthCheckResponse\x12?\n\x06status\x18\x01\
|
||||
\x20\x01(\x0e2'.grpc.HealthCheckResponse.ServingStatusR\x06status\":\n\r\
|
||||
ServingStatus\x12\x0b\n\x07UNKNOWN\x10\0\x12\x0b\n\x07SERVING\x10\x01\
|
||||
\x12\x0f\n\x0bNOT_SERVING\x10\x02\"^\n\x14VersionCheckResponse\x12!\n\
|
||||
\x0cgrpc_version\x18\x01\x20\x01(\tR\x0bgrpcVersion\x12#\n\ragent_versio\
|
||||
n\x18\x02\x20\x01(\tR\x0cagentVersion2{\n\x06Health\x126\n\x05Check\x12\
|
||||
\x12.grpc.CheckRequest\x1a\x19.grpc.HealthCheckResponse\x129\n\x07Versio\
|
||||
n\x12\x12.grpc.CheckRequest\x1a\x1a.grpc.VersionCheckResponseB\x10\xa8\
|
||||
\xe2\x1e\x01\xb8\xe2\x1e\x01\xc0\xe2\x1e\x01\xf8\xe1\x1e\x01J\xe7\x08\n\
|
||||
\x06\x12\x04\x07\0'\x01\nq\n\x01\x0c\x12\x03\x07\0\x122g\n\x20Copyright\
|
||||
\x202017\x20HyperHQ\x20Inc.\n\x20Copyright\x20(c)\x202019\x20Ant\x20Fina\
|
||||
ncial\n\n\x20SPDX-License-Identifier:\x20Apache-2.0\n\n\n\x08\n\x01\x02\
|
||||
\x12\x03\t\x08\x0c\n\t\n\x02\x03\0\x12\x03\x0b\x076\n\x08\n\x01\x08\x12\
|
||||
\x03\r\0$\n\x0b\n\x04\x08\xe7\x07\0\x12\x03\r\0$\n\x0c\n\x05\x08\xe7\x07\
|
||||
\0\x02\x12\x03\r\x07\x1c\n\r\n\x06\x08\xe7\x07\0\x02\0\x12\x03\r\x07\x1c\
|
||||
\n\x0e\n\x07\x08\xe7\x07\0\x02\0\x01\x12\x03\r\x08\x1b\n\x0c\n\x05\x08\
|
||||
\xe7\x07\0\x03\x12\x03\r\x1f#\n\x08\n\x01\x08\x12\x03\x0e\0'\n\x0b\n\x04\
|
||||
\x08\xe7\x07\x01\x12\x03\x0e\0'\n\x0c\n\x05\x08\xe7\x07\x01\x02\x12\x03\
|
||||
\x0e\x07\x1f\n\r\n\x06\x08\xe7\x07\x01\x02\0\x12\x03\x0e\x07\x1f\n\x0e\n\
|
||||
\x07\x08\xe7\x07\x01\x02\0\x01\x12\x03\x0e\x08\x1e\n\x0c\n\x05\x08\xe7\
|
||||
\x07\x01\x03\x12\x03\x0e\"&\n\x08\n\x01\x08\x12\x03\x0f\0&\n\x0b\n\x04\
|
||||
\x08\xe7\x07\x02\x12\x03\x0f\0&\n\x0c\n\x05\x08\xe7\x07\x02\x02\x12\x03\
|
||||
\x0f\x07\x1e\n\r\n\x06\x08\xe7\x07\x02\x02\0\x12\x03\x0f\x07\x1e\n\x0e\n\
|
||||
\x07\x08\xe7\x07\x02\x02\0\x01\x12\x03\x0f\x08\x1d\n\x0c\n\x05\x08\xe7\
|
||||
\x07\x02\x03\x12\x03\x0f!%\n\x08\n\x01\x08\x12\x03\x10\0'\n\x0b\n\x04\
|
||||
\x08\xe7\x07\x03\x12\x03\x10\0'\n\x0c\n\x05\x08\xe7\x07\x03\x02\x12\x03\
|
||||
\x10\x07\x1f\n\r\n\x06\x08\xe7\x07\x03\x02\0\x12\x03\x10\x07\x1f\n\x0e\n\
|
||||
\x07\x08\xe7\x07\x03\x02\0\x01\x12\x03\x10\x08\x1e\n\x0c\n\x05\x08\xe7\
|
||||
\x07\x03\x03\x12\x03\x10\"&\n\n\n\x02\x04\0\x12\x04\x12\0\x14\x01\n\n\n\
|
||||
\x03\x04\0\x01\x12\x03\x12\x08\x14\n\x0b\n\x04\x04\0\x02\0\x12\x03\x13\
|
||||
\x08\x1b\n\r\n\x05\x04\0\x02\0\x04\x12\x04\x13\x08\x12\x16\n\x0c\n\x05\
|
||||
\x04\0\x02\0\x05\x12\x03\x13\x08\x0e\n\x0c\n\x05\x04\0\x02\0\x01\x12\x03\
|
||||
\x13\x0f\x16\n\x0c\n\x05\x04\0\x02\0\x03\x12\x03\x13\x19\x1a\n\n\n\x02\
|
||||
\x04\x01\x12\x04\x16\0\x1d\x01\n\n\n\x03\x04\x01\x01\x12\x03\x16\x08\x1b\
|
||||
\n\x0c\n\x04\x04\x01\x04\0\x12\x04\x17\x08\x1b\t\n\x0c\n\x05\x04\x01\x04\
|
||||
\0\x01\x12\x03\x17\r\x1a\n\r\n\x06\x04\x01\x04\0\x02\0\x12\x03\x18\x10\
|
||||
\x1c\n\x0e\n\x07\x04\x01\x04\0\x02\0\x01\x12\x03\x18\x10\x17\n\x0e\n\x07\
|
||||
\x04\x01\x04\0\x02\0\x02\x12\x03\x18\x1a\x1b\n\r\n\x06\x04\x01\x04\0\x02\
|
||||
\x01\x12\x03\x19\x10\x1c\n\x0e\n\x07\x04\x01\x04\0\x02\x01\x01\x12\x03\
|
||||
\x19\x10\x17\n\x0e\n\x07\x04\x01\x04\0\x02\x01\x02\x12\x03\x19\x1a\x1b\n\
|
||||
\r\n\x06\x04\x01\x04\0\x02\x02\x12\x03\x1a\x10\x20\n\x0e\n\x07\x04\x01\
|
||||
\x04\0\x02\x02\x01\x12\x03\x1a\x10\x1b\n\x0e\n\x07\x04\x01\x04\0\x02\x02\
|
||||
\x02\x12\x03\x1a\x1e\x1f\n\x0b\n\x04\x04\x01\x02\0\x12\x03\x1c\x08!\n\r\
|
||||
\n\x05\x04\x01\x02\0\x04\x12\x04\x1c\x08\x1b\t\n\x0c\n\x05\x04\x01\x02\0\
|
||||
\x06\x12\x03\x1c\x08\x15\n\x0c\n\x05\x04\x01\x02\0\x01\x12\x03\x1c\x16\
|
||||
\x1c\n\x0c\n\x05\x04\x01\x02\0\x03\x12\x03\x1c\x1f\x20\n\n\n\x02\x04\x02\
|
||||
\x12\x04\x1f\0\"\x01\n\n\n\x03\x04\x02\x01\x12\x03\x1f\x08\x1c\n\x0b\n\
|
||||
\x04\x04\x02\x02\0\x12\x03\x20\x08\x20\n\r\n\x05\x04\x02\x02\0\x04\x12\
|
||||
\x04\x20\x08\x1f\x1e\n\x0c\n\x05\x04\x02\x02\0\x05\x12\x03\x20\x08\x0e\n\
|
||||
\x0c\n\x05\x04\x02\x02\0\x01\x12\x03\x20\x0f\x1b\n\x0c\n\x05\x04\x02\x02\
|
||||
\0\x03\x12\x03\x20\x1e\x1f\n\x0b\n\x04\x04\x02\x02\x01\x12\x03!\x08!\n\r\
|
||||
\n\x05\x04\x02\x02\x01\x04\x12\x04!\x08\x20\x20\n\x0c\n\x05\x04\x02\x02\
|
||||
\x01\x05\x12\x03!\x08\x0e\n\x0c\n\x05\x04\x02\x02\x01\x01\x12\x03!\x0f\
|
||||
\x1c\n\x0c\n\x05\x04\x02\x02\x01\x03\x12\x03!\x1f\x20\n\n\n\x02\x06\0\
|
||||
\x12\x04$\0'\x01\n\n\n\x03\x06\0\x01\x12\x03$\x08\x0e\n\x0b\n\x04\x06\0\
|
||||
\x02\0\x12\x03%\x08>\n\x0c\n\x05\x06\0\x02\0\x01\x12\x03%\x0c\x11\n\x0c\
|
||||
\n\x05\x06\0\x02\0\x02\x12\x03%\x12\x1e\n\x0c\n\x05\x06\0\x02\0\x03\x12\
|
||||
\x03%)<\n\x0b\n\x04\x06\0\x02\x01\x12\x03&\x08A\n\x0c\n\x05\x06\0\x02\
|
||||
\x01\x01\x12\x03&\x0c\x13\n\x0c\n\x05\x06\0\x02\x01\x02\x12\x03&\x14\x20\
|
||||
\n\x0c\n\x05\x06\0\x02\x01\x03\x12\x03&+?b\x06proto3\
|
||||
";
|
||||
|
||||
static mut file_descriptor_proto_lazy: ::protobuf::lazy::Lazy<::protobuf::descriptor::FileDescriptorProto> = ::protobuf::lazy::Lazy::INIT;
|
||||
|
||||
fn parse_descriptor_proto() -> ::protobuf::descriptor::FileDescriptorProto {
|
||||
::protobuf::parse_from_bytes(file_descriptor_proto_data).unwrap()
|
||||
}
|
||||
|
||||
pub fn file_descriptor_proto() -> &'static ::protobuf::descriptor::FileDescriptorProto {
|
||||
unsafe {
|
||||
file_descriptor_proto_lazy.get(|| {
|
||||
parse_descriptor_proto()
|
||||
})
|
||||
}
|
||||
}
|
||||
94
src/agent/protocols/src/health_ttrpc.rs
Normal file
94
src/agent/protocols/src/health_ttrpc.rs
Normal file
@@ -0,0 +1,94 @@
|
||||
// Copyright (c) 2019 Ant Financial
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
// This file is generated by ttrpc-compiler 0.2.0. Do not edit
|
||||
// @generated
|
||||
|
||||
// https://github.com/Manishearth/rust-clippy/issues/702
|
||||
#![allow(unknown_lints)]
|
||||
#![allow(clipto_camel_casepy)]
|
||||
|
||||
#![cfg_attr(rustfmt, rustfmt_skip)]
|
||||
|
||||
#![allow(box_pointers)]
|
||||
#![allow(dead_code)]
|
||||
#![allow(missing_docs)]
|
||||
#![allow(non_camel_case_types)]
|
||||
#![allow(non_snake_case)]
|
||||
#![allow(non_upper_case_globals)]
|
||||
#![allow(trivial_casts)]
|
||||
#![allow(unsafe_code)]
|
||||
#![allow(unused_imports)]
|
||||
#![allow(unused_results)]
|
||||
use protobuf::{CodedInputStream, CodedOutputStream, Message};
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct HealthClient {
|
||||
client: ::ttrpc::Client,
|
||||
}
|
||||
|
||||
impl HealthClient {
|
||||
pub fn new(client: ::ttrpc::Client) -> Self {
|
||||
HealthClient {
|
||||
client: client,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn check(&self, req: &super::health::CheckRequest, timeout_nano: i64) -> ::ttrpc::Result<super::health::HealthCheckResponse> {
|
||||
let mut cres = super::health::HealthCheckResponse::new();
|
||||
::ttrpc::client_request!(self, req, timeout_nano, "grpc.Health", "Check", cres);
|
||||
Ok(cres)
|
||||
}
|
||||
|
||||
pub fn version(&self, req: &super::health::CheckRequest, timeout_nano: i64) -> ::ttrpc::Result<super::health::VersionCheckResponse> {
|
||||
let mut cres = super::health::VersionCheckResponse::new();
|
||||
::ttrpc::client_request!(self, req, timeout_nano, "grpc.Health", "Version", cres);
|
||||
Ok(cres)
|
||||
}
|
||||
}
|
||||
|
||||
struct CheckMethod {
|
||||
service: Arc<std::boxed::Box<dyn Health + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl ::ttrpc::MethodHandler for CheckMethod {
|
||||
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
|
||||
::ttrpc::request_handler!(self, ctx, req, health, CheckRequest, check);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct VersionMethod {
|
||||
service: Arc<std::boxed::Box<dyn Health + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl ::ttrpc::MethodHandler for VersionMethod {
|
||||
fn handler(&self, ctx: ::ttrpc::TtrpcContext, req: ::ttrpc::Request) -> ::ttrpc::Result<()> {
|
||||
::ttrpc::request_handler!(self, ctx, req, health, CheckRequest, version);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub trait Health {
|
||||
fn check(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::health::CheckRequest) -> ::ttrpc::Result<super::health::HealthCheckResponse> {
|
||||
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.Health/Check is not supported".to_string())))
|
||||
}
|
||||
fn version(&self, _ctx: &::ttrpc::TtrpcContext, _req: super::health::CheckRequest) -> ::ttrpc::Result<super::health::VersionCheckResponse> {
|
||||
Err(::ttrpc::Error::RpcStatus(::ttrpc::get_status(::ttrpc::Code::NOT_FOUND, "/grpc.Health/Version is not supported".to_string())))
|
||||
}
|
||||
}
|
||||
|
||||
pub fn create_health(service: Arc<std::boxed::Box<dyn Health + Send + Sync>>) -> HashMap <String, Box<dyn ::ttrpc::MethodHandler + Send + Sync>> {
|
||||
let mut methods = HashMap::new();
|
||||
|
||||
methods.insert("/grpc.Health/Check".to_string(),
|
||||
std::boxed::Box::new(CheckMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
|
||||
|
||||
methods.insert("/grpc.Health/Version".to_string(),
|
||||
std::boxed::Box::new(VersionMethod{service: service.clone()}) as std::boxed::Box<dyn ::ttrpc::MethodHandler + Send + Sync>);
|
||||
|
||||
methods
|
||||
}
|
||||
21
src/agent/protocols/src/lib.rs
Normal file
21
src/agent/protocols/src/lib.rs
Normal file
@@ -0,0 +1,21 @@
|
||||
// Copyright (c) 2020 Ant Financial
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
#![allow(bare_trait_objects)]
|
||||
|
||||
pub mod agent;
|
||||
pub mod agent_ttrpc;
|
||||
pub mod health;
|
||||
pub mod health_ttrpc;
|
||||
pub mod oci;
|
||||
pub mod types;
|
||||
pub mod empty;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
#[test]
|
||||
fn it_works() {
|
||||
assert_eq!(2 + 2, 4);
|
||||
}
|
||||
}
|
||||
10375
src/agent/protocols/src/oci.rs
Normal file
10375
src/agent/protocols/src/oci.rs
Normal file
File diff suppressed because it is too large
Load Diff
1563
src/agent/protocols/src/types.rs
Normal file
1563
src/agent/protocols/src/types.rs
Normal file
File diff suppressed because it is too large
Load Diff
25
src/agent/rustjail/Cargo.toml
Normal file
25
src/agent/rustjail/Cargo.toml
Normal file
@@ -0,0 +1,25 @@
|
||||
[package]
|
||||
name = "rustjail"
|
||||
version = "0.1.0"
|
||||
authors = ["Yang Bo <bo@hyper.sh>"]
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
error-chain = "0.12.1"
|
||||
serde = "1.0.91"
|
||||
serde_json = "1.0.39"
|
||||
serde_derive = "1.0.91"
|
||||
oci = { path = "../oci" }
|
||||
protocols = { path ="../protocols" }
|
||||
caps = "0.3.0"
|
||||
nix = "0.17.0"
|
||||
scopeguard = "1.0.0"
|
||||
prctl = "1.0.0"
|
||||
lazy_static = "1.3.0"
|
||||
libc = "0.2.58"
|
||||
protobuf = "2.8.1"
|
||||
slog = "2.5.2"
|
||||
slog-scope = "4.1.2"
|
||||
scan_fmt = "0.2"
|
||||
regex = "1.1"
|
||||
path-absolutize = { git = "git://github.com/magiclen/path-absolutize.git", tag= "v1.2.0" }
|
||||
135
src/agent/rustjail/src/capabilities.rs
Normal file
135
src/agent/rustjail/src/capabilities.rs
Normal file
@@ -0,0 +1,135 @@
|
||||
// Copyright (c) 2019 Ant Financial
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
// looks like we can use caps to manipulate capabilities
|
||||
// conveniently, use caps to do it directly.. maybe
|
||||
|
||||
use lazy_static;
|
||||
|
||||
use crate::errors::*;
|
||||
use crate::log_child;
|
||||
use crate::sync::write_count;
|
||||
use caps::{self, CapSet, Capability, CapsHashSet};
|
||||
use oci::LinuxCapabilities;
|
||||
use std::collections::HashMap;
|
||||
use std::os::unix::io::RawFd;
|
||||
|
||||
lazy_static! {
|
||||
pub static ref CAPSMAP: HashMap<String, Capability> = {
|
||||
let mut m = HashMap::new();
|
||||
m.insert("CAP_CHOWN".to_string(), Capability::CAP_CHOWN);
|
||||
m.insert("CAP_DAC_OVERRIDE".to_string(), Capability::CAP_DAC_OVERRIDE);
|
||||
m.insert(
|
||||
"CAP_DAC_READ_SEARCH".to_string(),
|
||||
Capability::CAP_DAC_READ_SEARCH,
|
||||
);
|
||||
m.insert("CAP_FOWNER".to_string(), Capability::CAP_FOWNER);
|
||||
m.insert("CAP_FSETID".to_string(), Capability::CAP_FSETID);
|
||||
m.insert("CAP_KILL".to_string(), Capability::CAP_KILL);
|
||||
m.insert("CAP_SETGID".to_string(), Capability::CAP_SETGID);
|
||||
m.insert("CAP_SETUID".to_string(), Capability::CAP_SETUID);
|
||||
m.insert("CAP_SETPCAP".to_string(), Capability::CAP_SETPCAP);
|
||||
m.insert(
|
||||
"CAP_LINUX_IMMUTABLE".to_string(),
|
||||
Capability::CAP_LINUX_IMMUTABLE,
|
||||
);
|
||||
m.insert(
|
||||
"CAP_NET_BIND_SERVICE".to_string(),
|
||||
Capability::CAP_NET_BIND_SERVICE,
|
||||
);
|
||||
m.insert(
|
||||
"CAP_NET_BROADCAST".to_string(),
|
||||
Capability::CAP_NET_BROADCAST,
|
||||
);
|
||||
m.insert("CAP_NET_ADMIN".to_string(), Capability::CAP_NET_ADMIN);
|
||||
m.insert("CAP_NET_RAW".to_string(), Capability::CAP_NET_RAW);
|
||||
m.insert("CAP_IPC_LOCK".to_string(), Capability::CAP_IPC_LOCK);
|
||||
m.insert("CAP_IPC_OWNER".to_string(), Capability::CAP_IPC_OWNER);
|
||||
m.insert("CAP_SYS_MODULE".to_string(), Capability::CAP_SYS_MODULE);
|
||||
m.insert("CAP_SYS_RAWIO".to_string(), Capability::CAP_SYS_RAWIO);
|
||||
m.insert("CAP_SYS_CHROOT".to_string(), Capability::CAP_SYS_CHROOT);
|
||||
m.insert("CAP_SYS_PTRACE".to_string(), Capability::CAP_SYS_PTRACE);
|
||||
m.insert("CAP_SYS_PACCT".to_string(), Capability::CAP_SYS_PACCT);
|
||||
m.insert("CAP_SYS_ADMIN".to_string(), Capability::CAP_SYS_ADMIN);
|
||||
m.insert("CAP_SYS_BOOT".to_string(), Capability::CAP_SYS_BOOT);
|
||||
m.insert("CAP_SYS_NICE".to_string(), Capability::CAP_SYS_NICE);
|
||||
m.insert("CAP_SYS_RESOURCE".to_string(), Capability::CAP_SYS_RESOURCE);
|
||||
m.insert("CAP_SYS_TIME".to_string(), Capability::CAP_SYS_TIME);
|
||||
m.insert(
|
||||
"CAP_SYS_TTY_CONFIG".to_string(),
|
||||
Capability::CAP_SYS_TTY_CONFIG,
|
||||
);
|
||||
m.insert("CAP_MKNOD".to_string(), Capability::CAP_MKNOD);
|
||||
m.insert("CAP_LEASE".to_string(), Capability::CAP_LEASE);
|
||||
m.insert("CAP_AUDIT_WRITE".to_string(), Capability::CAP_AUDIT_WRITE);
|
||||
m.insert("CAP_AUDIT_CONTROL".to_string(), Capability::CAP_AUDIT_WRITE);
|
||||
m.insert("CAP_SETFCAP".to_string(), Capability::CAP_SETFCAP);
|
||||
m.insert("CAP_MAC_OVERRIDE".to_string(), Capability::CAP_MAC_OVERRIDE);
|
||||
m.insert("CAP_SYSLOG".to_string(), Capability::CAP_SYSLOG);
|
||||
m.insert("CAP_WAKE_ALARM".to_string(), Capability::CAP_WAKE_ALARM);
|
||||
m.insert(
|
||||
"CAP_BLOCK_SUSPEND".to_string(),
|
||||
Capability::CAP_BLOCK_SUSPEND,
|
||||
);
|
||||
m.insert("CAP_AUDIT_READ".to_string(), Capability::CAP_AUDIT_READ);
|
||||
m
|
||||
};
|
||||
}
|
||||
|
||||
fn to_capshashset(cfd_log: RawFd, caps: &[String]) -> CapsHashSet {
|
||||
let mut r = CapsHashSet::new();
|
||||
|
||||
for cap in caps.iter() {
|
||||
let c = CAPSMAP.get(cap);
|
||||
|
||||
if c.is_none() {
|
||||
log_child!(cfd_log, "{} is not a cap", cap);
|
||||
continue;
|
||||
}
|
||||
|
||||
r.insert(*c.unwrap());
|
||||
}
|
||||
|
||||
r
|
||||
}
|
||||
|
||||
pub fn reset_effective() -> Result<()> {
|
||||
caps::set(None, CapSet::Effective, caps::all())?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn drop_priviledges(cfd_log: RawFd, caps: &LinuxCapabilities) -> Result<()> {
|
||||
let all = caps::all();
|
||||
|
||||
for c in all.difference(&to_capshashset(cfd_log, caps.bounding.as_ref())) {
|
||||
caps::drop(None, CapSet::Bounding, *c)?;
|
||||
}
|
||||
|
||||
caps::set(
|
||||
None,
|
||||
CapSet::Effective,
|
||||
to_capshashset(cfd_log, caps.effective.as_ref()),
|
||||
)?;
|
||||
caps::set(
|
||||
None,
|
||||
CapSet::Permitted,
|
||||
to_capshashset(cfd_log, caps.permitted.as_ref()),
|
||||
)?;
|
||||
caps::set(
|
||||
None,
|
||||
CapSet::Inheritable,
|
||||
to_capshashset(cfd_log, caps.inheritable.as_ref()),
|
||||
)?;
|
||||
|
||||
if let Err(_) = caps::set(
|
||||
None,
|
||||
CapSet::Ambient,
|
||||
to_capshashset(cfd_log, caps.ambient.as_ref()),
|
||||
) {
|
||||
log_child!(cfd_log, "failed to set ambient capability");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
1474
src/agent/rustjail/src/cgroups/fs/mod.rs
Normal file
1474
src/agent/rustjail/src/cgroups/fs/mod.rs
Normal file
File diff suppressed because it is too large
Load Diff
49
src/agent/rustjail/src/cgroups/mod.rs
Normal file
49
src/agent/rustjail/src/cgroups/mod.rs
Normal file
@@ -0,0 +1,49 @@
|
||||
// Copyright (c) 2019 Ant Financial
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use crate::errors::*;
|
||||
// use crate::configs::{FreezerState, Config};
|
||||
use oci::LinuxResources;
|
||||
use protocols::agent::CgroupStats;
|
||||
use std::collections::HashMap;
|
||||
|
||||
pub mod fs;
|
||||
pub mod systemd;
|
||||
|
||||
pub type FreezerState = &'static str;
|
||||
|
||||
pub trait Manager {
|
||||
fn apply(&self, _pid: i32) -> Result<()> {
|
||||
Err(ErrorKind::ErrorCode("not supported!".to_string()).into())
|
||||
}
|
||||
|
||||
fn get_pids(&self) -> Result<Vec<i32>> {
|
||||
Err(ErrorKind::ErrorCode("not supported!".to_string()).into())
|
||||
}
|
||||
|
||||
fn get_all_pids(&self) -> Result<Vec<i32>> {
|
||||
Err(ErrorKind::ErrorCode("not supported!".to_string()).into())
|
||||
}
|
||||
|
||||
fn get_stats(&self) -> Result<CgroupStats> {
|
||||
Err(ErrorKind::ErrorCode("not supported!".to_string()).into())
|
||||
}
|
||||
|
||||
fn freeze(&self, _state: FreezerState) -> Result<()> {
|
||||
Err(ErrorKind::ErrorCode("not supported!".to_string()).into())
|
||||
}
|
||||
|
||||
fn destroy(&mut self) -> Result<()> {
|
||||
Err(ErrorKind::ErrorCode("not supported!".to_string()).into())
|
||||
}
|
||||
|
||||
fn get_paths(&self) -> Result<HashMap<String, String>> {
|
||||
Err(ErrorKind::ErrorCode("not supported!".to_string()).into())
|
||||
}
|
||||
|
||||
fn set(&self, _container: &LinuxResources, _update: bool) -> Result<()> {
|
||||
Err(ErrorKind::ErrorCode("not supported!".to_string()).into())
|
||||
}
|
||||
}
|
||||
10
src/agent/rustjail/src/cgroups/systemd.rs
Normal file
10
src/agent/rustjail/src/cgroups/systemd.rs
Normal file
@@ -0,0 +1,10 @@
|
||||
// Copyright (c) 2019 Ant Financial
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use crate::cgroups::Manager as CgroupManager;
|
||||
|
||||
pub struct Manager {}
|
||||
|
||||
impl CgroupManager for Manager {}
|
||||
56
src/agent/rustjail/src/configs/device.rs
Normal file
56
src/agent/rustjail/src/configs/device.rs
Normal file
@@ -0,0 +1,56 @@
|
||||
// Copyright (c) 2019 Ant Financial
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use libc::*;
|
||||
use serde;
|
||||
#[macro_use]
|
||||
use serde_derive;
|
||||
use serde_json;
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct Device {
|
||||
#[serde(default)]
|
||||
r#type: char,
|
||||
#[serde(default)]
|
||||
path: String,
|
||||
#[serde(default)]
|
||||
major: i64,
|
||||
#[serde(default)]
|
||||
minor: i64,
|
||||
#[serde(default)]
|
||||
permissions: String,
|
||||
#[serde(default)]
|
||||
file_mode: mode_t,
|
||||
#[serde(default)]
|
||||
uid: i32,
|
||||
#[serde(default)]
|
||||
gid: i32,
|
||||
#[serde(default)]
|
||||
allow: bool,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct BlockIODevice {
|
||||
#[serde(default)]
|
||||
major: i64,
|
||||
#[serde(default)]
|
||||
minor: i64,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct WeightDevice {
|
||||
block: BlockIODevice,
|
||||
#[serde(default)]
|
||||
weight: u16,
|
||||
#[serde(default, rename = "leafWeight")]
|
||||
leaf_weight: u16,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct ThrottleDevice {
|
||||
block: BlockIODevice,
|
||||
#[serde(default)]
|
||||
rate: u64,
|
||||
}
|
||||
494
src/agent/rustjail/src/configs/mod.rs
Normal file
494
src/agent/rustjail/src/configs/mod.rs
Normal file
@@ -0,0 +1,494 @@
|
||||
// Copyright (c) 2019 Ant Financial
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use serde;
|
||||
#[macro_use]
|
||||
use serde_derive;
|
||||
use serde_json;
|
||||
|
||||
use protocols::oci::State as OCIState;
|
||||
|
||||
use crate::errors::*;
|
||||
use std::collections::HashMap;
|
||||
use std::fmt;
|
||||
use std::path::PathBuf;
|
||||
use std::time::Duration;
|
||||
|
||||
use nix::unistd;
|
||||
|
||||
use self::device::{Device, ThrottleDevice, WeightDevice};
|
||||
use self::namespaces::Namespaces;
|
||||
use crate::specconv::CreateOpts;
|
||||
|
||||
pub mod device;
|
||||
pub mod namespaces;
|
||||
pub mod validator;
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct Rlimit {
|
||||
#[serde(default)]
|
||||
r#type: i32,
|
||||
#[serde(default)]
|
||||
hard: i32,
|
||||
#[serde(default)]
|
||||
soft: i32,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct IDMap {
|
||||
#[serde(default)]
|
||||
container_id: i32,
|
||||
#[serde(default)]
|
||||
host_id: i32,
|
||||
#[serde(default)]
|
||||
size: i32,
|
||||
}
|
||||
|
||||
type Action = i32;
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct Seccomp {
|
||||
#[serde(default)]
|
||||
default_action: Action,
|
||||
#[serde(default)]
|
||||
architectures: Vec<String>,
|
||||
#[serde(default)]
|
||||
syscalls: Vec<Syscall>,
|
||||
}
|
||||
|
||||
type Operator = i32;
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct Arg {
|
||||
#[serde(default)]
|
||||
index: u32,
|
||||
#[serde(default)]
|
||||
value: u64,
|
||||
#[serde(default)]
|
||||
value_two: u64,
|
||||
#[serde(default)]
|
||||
op: Operator,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct Syscall {
|
||||
#[serde(default, skip_serializing_if = "String::is_empty")]
|
||||
name: String,
|
||||
#[serde(default)]
|
||||
action: Action,
|
||||
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||
args: Vec<Arg>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct Config<'a> {
|
||||
#[serde(default)]
|
||||
no_pivot_root: bool,
|
||||
#[serde(default)]
|
||||
parent_death_signal: i32,
|
||||
#[serde(default)]
|
||||
rootfs: String,
|
||||
#[serde(default)]
|
||||
readonlyfs: bool,
|
||||
#[serde(default, rename = "rootPropagation")]
|
||||
root_propagation: i32,
|
||||
#[serde(default)]
|
||||
mounts: Vec<Mount>,
|
||||
#[serde(default)]
|
||||
devices: Vec<Device>,
|
||||
#[serde(default)]
|
||||
mount_label: String,
|
||||
#[serde(default)]
|
||||
hostname: String,
|
||||
#[serde(default)]
|
||||
namespaces: Namespaces,
|
||||
#[serde(default)]
|
||||
capabilities: Option<Capabilities>,
|
||||
#[serde(default)]
|
||||
networks: Vec<Network>,
|
||||
#[serde(default)]
|
||||
routes: Vec<Route>,
|
||||
#[serde(default)]
|
||||
cgroups: Option<Cgroup<'a>>,
|
||||
#[serde(default, skip_serializing_if = "String::is_empty")]
|
||||
apparmor_profile: String,
|
||||
#[serde(default, skip_serializing_if = "String::is_empty")]
|
||||
process_label: String,
|
||||
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||
rlimits: Vec<Rlimit>,
|
||||
#[serde(default)]
|
||||
oom_score_adj: Option<i32>,
|
||||
#[serde(default)]
|
||||
uid_mappings: Vec<IDMap>,
|
||||
#[serde(default)]
|
||||
gid_mappings: Vec<IDMap>,
|
||||
#[serde(default)]
|
||||
mask_paths: Vec<String>,
|
||||
#[serde(default)]
|
||||
readonly_paths: Vec<String>,
|
||||
#[serde(default)]
|
||||
sysctl: HashMap<String, String>,
|
||||
#[serde(default)]
|
||||
seccomp: Option<Seccomp>,
|
||||
#[serde(default)]
|
||||
no_new_privileges: bool,
|
||||
hooks: Option<Hooks>,
|
||||
#[serde(default)]
|
||||
version: String,
|
||||
#[serde(default)]
|
||||
labels: Vec<String>,
|
||||
#[serde(default)]
|
||||
no_new_keyring: bool,
|
||||
#[serde(default)]
|
||||
intel_rdt: Option<IntelRdt>,
|
||||
#[serde(default)]
|
||||
rootless_euid: bool,
|
||||
#[serde(default)]
|
||||
rootless_cgroups: bool,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct Hooks {
|
||||
prestart: Vec<Box<Hook>>,
|
||||
poststart: Vec<Box<Hook>>,
|
||||
poststop: Vec<Box<Hook>>,
|
||||
}
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct Capabilities {
|
||||
bounding: Vec<String>,
|
||||
effective: Vec<String>,
|
||||
inheritable: Vec<String>,
|
||||
permitted: Vec<String>,
|
||||
ambient: Vec<String>,
|
||||
}
|
||||
|
||||
pub trait Hook {
|
||||
fn run(&self, state: &OCIState) -> Result<()>;
|
||||
}
|
||||
|
||||
pub struct FuncHook {
|
||||
// run: fn(&OCIState) -> Result<()>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct Command {
|
||||
#[serde(default)]
|
||||
path: String,
|
||||
#[serde(default)]
|
||||
args: Vec<String>,
|
||||
#[serde(default)]
|
||||
env: Vec<String>,
|
||||
#[serde(default)]
|
||||
dir: String,
|
||||
#[serde(default)]
|
||||
timeout: Duration,
|
||||
}
|
||||
|
||||
pub struct CommandHook {
|
||||
command: Command,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct Mount {
|
||||
#[serde(default)]
|
||||
source: String,
|
||||
#[serde(default)]
|
||||
destination: String,
|
||||
#[serde(default)]
|
||||
device: String,
|
||||
#[serde(default)]
|
||||
flags: i32,
|
||||
#[serde(default)]
|
||||
propagation_flags: Vec<i32>,
|
||||
#[serde(default)]
|
||||
data: String,
|
||||
#[serde(default)]
|
||||
relabel: String,
|
||||
#[serde(default)]
|
||||
extensions: i32,
|
||||
#[serde(default)]
|
||||
premount_cmds: Vec<Command>,
|
||||
#[serde(default)]
|
||||
postmount_cmds: Vec<Command>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct HugepageLimit {
|
||||
#[serde(default)]
|
||||
page_size: String,
|
||||
#[serde(default)]
|
||||
limit: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct IntelRdt {
|
||||
#[serde(default, skip_serializing_if = "String::is_empty")]
|
||||
l3_cache_schema: String,
|
||||
#[serde(
|
||||
default,
|
||||
rename = "memBwSchema",
|
||||
skip_serializing_if = "String::is_empty"
|
||||
)]
|
||||
mem_bw_schema: String,
|
||||
}
|
||||
|
||||
pub type FreezerState = String;
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct Cgroup<'a> {
|
||||
#[serde(default, skip_serializing_if = "String::is_empty")]
|
||||
name: String,
|
||||
#[serde(default, skip_serializing_if = "String::is_empty")]
|
||||
parent: String,
|
||||
#[serde(default)]
|
||||
path: String,
|
||||
#[serde(default)]
|
||||
scope_prefix: String,
|
||||
paths: HashMap<String, String>,
|
||||
resource: &'a Resources<'a>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct Resources<'a> {
|
||||
#[serde(default)]
|
||||
allow_all_devices: bool,
|
||||
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||
allowed_devices: Vec<&'a Device>,
|
||||
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||
denied_devices: Vec<&'a Device>,
|
||||
#[serde(default)]
|
||||
devices: Vec<&'a Device>,
|
||||
#[serde(default)]
|
||||
memory: i64,
|
||||
#[serde(default)]
|
||||
memory_reservation: i64,
|
||||
#[serde(default)]
|
||||
memory_swap: i64,
|
||||
#[serde(default)]
|
||||
kernel_memory: i64,
|
||||
#[serde(default)]
|
||||
kernel_memory_tcp: i64,
|
||||
#[serde(default)]
|
||||
cpu_shares: u64,
|
||||
#[serde(default)]
|
||||
cpu_quota: i64,
|
||||
#[serde(default)]
|
||||
cpu_period: u64,
|
||||
#[serde(default)]
|
||||
cpu_rt_quota: i64,
|
||||
#[serde(default)]
|
||||
cpu_rt_period: u64,
|
||||
#[serde(default)]
|
||||
cpuset_cpus: String,
|
||||
#[serde(default)]
|
||||
cpuset_mems: String,
|
||||
#[serde(default)]
|
||||
pids_limit: i64,
|
||||
#[serde(default)]
|
||||
blkio_weight: u64,
|
||||
#[serde(default)]
|
||||
blkio_leaf_weight: u64,
|
||||
#[serde(default)]
|
||||
blkio_weight_device: Vec<&'a WeightDevice>,
|
||||
#[serde(default)]
|
||||
blkio_throttle_read_bps_device: Vec<&'a ThrottleDevice>,
|
||||
#[serde(default)]
|
||||
blkio_throttle_write_bps_device: Vec<&'a ThrottleDevice>,
|
||||
#[serde(default)]
|
||||
blkio_throttle_read_iops_device: Vec<&'a ThrottleDevice>,
|
||||
#[serde(default)]
|
||||
blkio_throttle_write_iops_device: Vec<&'a ThrottleDevice>,
|
||||
#[serde(default)]
|
||||
freezer: FreezerState,
|
||||
#[serde(default)]
|
||||
hugetlb_limit: Vec<&'a HugepageLimit>,
|
||||
#[serde(default)]
|
||||
oom_kill_disable: bool,
|
||||
#[serde(default)]
|
||||
memory_swapiness: u64,
|
||||
#[serde(default)]
|
||||
net_prio_ifpriomap: Vec<&'a IfPrioMap>,
|
||||
#[serde(default)]
|
||||
net_cls_classid_u: u32,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct Network {
|
||||
#[serde(default)]
|
||||
r#type: String,
|
||||
#[serde(default)]
|
||||
name: String,
|
||||
#[serde(default)]
|
||||
bridge: String,
|
||||
#[serde(default)]
|
||||
mac_address: String,
|
||||
#[serde(default)]
|
||||
address: String,
|
||||
#[serde(default)]
|
||||
gateway: String,
|
||||
#[serde(default)]
|
||||
ipv6_address: String,
|
||||
#[serde(default)]
|
||||
ipv6_gateway: String,
|
||||
#[serde(default)]
|
||||
mtu: i32,
|
||||
#[serde(default)]
|
||||
txqueuelen: i32,
|
||||
#[serde(default)]
|
||||
host_interface_name: String,
|
||||
#[serde(default)]
|
||||
hairpin_mode: bool,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct Route {
|
||||
#[serde(default)]
|
||||
destination: String,
|
||||
#[serde(default)]
|
||||
source: String,
|
||||
#[serde(default)]
|
||||
gateway: String,
|
||||
#[serde(default)]
|
||||
interface_name: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct IfPrioMap {
|
||||
#[serde(default)]
|
||||
interface: String,
|
||||
#[serde(default)]
|
||||
priority: i32,
|
||||
}
|
||||
|
||||
impl IfPrioMap {
|
||||
fn cgroup_string(&self) -> String {
|
||||
format!("{} {}", self.interface, self.priority)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
impl Config {
|
||||
fn new(opts: &CreateOpts) -> Result<Self> {
|
||||
if opts.spec.is_none() {
|
||||
return Err(ErrorKind::ErrorCode("invalid createopts!".into()));
|
||||
}
|
||||
|
||||
let root = unistd::getcwd().chain_err(|| "cannot getwd")?;
|
||||
let root = root.as_path().canonicalize().chain_err(||
|
||||
"cannot resolve root into absolute path")?;
|
||||
let mut root = root.into();
|
||||
let cwd = root.clone();
|
||||
|
||||
let spec = opts.spec.as_ref().unwrap();
|
||||
if spec.root.is_none() {
|
||||
return Err(ErrorKind::ErrorCode("no root".into()));
|
||||
}
|
||||
|
||||
let rootfs = PathBuf::from(&spec.root.as_ref().unwrap().path);
|
||||
if rootfs.is_relative() {
|
||||
root = format!("{}/{}", root, rootfs.into());
|
||||
}
|
||||
|
||||
// handle annotations
|
||||
let mut label = spec.annotations
|
||||
.iter()
|
||||
.map(|(key, value)| format!("{}={}", key, value)).collect();
|
||||
label.push(format!("bundle={}", cwd));
|
||||
|
||||
let mut config = Config {
|
||||
rootfs: root,
|
||||
no_pivot_root: opts.no_pivot_root,
|
||||
readonlyfs: spec.root.as_ref().unwrap().readonly,
|
||||
hostname: spec.hostname.clone(),
|
||||
labels: label,
|
||||
no_new_keyring: opts.no_new_keyring,
|
||||
rootless_euid: opts.rootless_euid,
|
||||
rootless_cgroups: opts.rootless_cgroups,
|
||||
};
|
||||
|
||||
config.mounts = Vec::new();
|
||||
for m in &spec.mounts {
|
||||
config.mounts.push(Mount::new(&cwd, &m)?);
|
||||
}
|
||||
|
||||
config.devices = create_devices(&spec)?;
|
||||
config.cgroups = Cgroups::new(&opts)?;
|
||||
|
||||
if spec.linux.as_ref().is_none() {
|
||||
return Err(ErrorKind::ErrorCode("no linux configuration".into()));
|
||||
}
|
||||
let linux = spec.linux.as_ref().unwrap();
|
||||
|
||||
let propagation = MOUNTPROPAGATIONMAPPING.get(linux.rootfs_propagation);
|
||||
if propagation.is_none() {
|
||||
Err(ErrorKind::ErrorCode("rootfs propagation not support".into()));
|
||||
}
|
||||
|
||||
config.root_propagation = propagation.unwrap();
|
||||
if config.no_pivot_root && (config.root_propagation & MSFlags::MSPRIVATE != 0) {
|
||||
return Err(ErrorKind::ErrorCode("[r]private is not safe without pivot root".into()));
|
||||
}
|
||||
|
||||
// handle namespaces
|
||||
let m: HashMap<String, String> = HashMap::new();
|
||||
for ns in &linux.namespaces {
|
||||
if NAMESPACEMAPPING.get(&ns.r#type.as_str()).is_none() {
|
||||
return Err(ErrorKind::ErrorCode("namespace don't exist".into()));
|
||||
}
|
||||
|
||||
if m.get(&ns.r#type).is_some() {
|
||||
return Err(ErrorKind::ErrorCode(format!("duplicate ns {}", ns.r#type)));
|
||||
}
|
||||
|
||||
m.insert(ns.r#type, ns.path);
|
||||
}
|
||||
|
||||
if m.contains_key(oci::NETWORKNAMESPACE) {
|
||||
let path = m.get(oci::NETWORKNAMESPACE).unwrap();
|
||||
if path == "" {
|
||||
config.networks = vec![Network {
|
||||
r#type: "loopback",
|
||||
}];
|
||||
}
|
||||
}
|
||||
|
||||
if m.contains_key(oci::USERNAMESPACE) {
|
||||
setup_user_namespace(&spec, &mut config)?;
|
||||
}
|
||||
|
||||
config.namespaces = m.iter().map(|(key, value)| Namespace {
|
||||
r#type: key,
|
||||
path: value,
|
||||
}).collect();
|
||||
config.mask_paths = linux.mask_paths;
|
||||
config.readonly_path = linux.readonly_path;
|
||||
config.mount_label = linux.mount_label;
|
||||
config.sysctl = linux.sysctl;
|
||||
config.seccomp = None;
|
||||
config.intelrdt = None;
|
||||
|
||||
if spec.process.is_some() {
|
||||
let process = spec.process.as_ref().unwrap();
|
||||
config.oom_score_adj = process.oom_score_adj;
|
||||
config.process_label = process.selinux_label.clone();
|
||||
if process.capabilities.as_ref().is_some() {
|
||||
let cap = process.capabilities.as_ref().unwrap();
|
||||
config.capabilities = Some(Capabilities {
|
||||
..cap
|
||||
})
|
||||
}
|
||||
}
|
||||
config.hooks = None;
|
||||
config.version = spec.version;
|
||||
Ok(config)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl Mount {
|
||||
fn new(cwd: &str, m: &oci::Mount) -> Result<Self> {
|
||||
}
|
||||
}
|
||||
*/
|
||||
46
src/agent/rustjail/src/configs/namespaces.rs
Normal file
46
src/agent/rustjail/src/configs/namespaces.rs
Normal file
@@ -0,0 +1,46 @@
|
||||
// Copyright (c) 2019 Ant Financial
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use serde;
|
||||
#[macro_use]
|
||||
use serde_derive;
|
||||
use serde_json;
|
||||
|
||||
use std::collections::HashMap;
|
||||
#[macro_use]
|
||||
use lazy_static;
|
||||
|
||||
pub type NamespaceType = String;
|
||||
pub type Namespaces = Vec<Namespace>;
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct Namespace {
|
||||
#[serde(default)]
|
||||
r#type: NamespaceType,
|
||||
#[serde(default)]
|
||||
path: String,
|
||||
}
|
||||
|
||||
pub const NEWNET: &'static str = "NEWNET";
|
||||
pub const NEWPID: &'static str = "NEWPID";
|
||||
pub const NEWNS: &'static str = "NEWNS";
|
||||
pub const NEWUTS: &'static str = "NEWUTS";
|
||||
pub const NEWUSER: &'static str = "NEWUSER";
|
||||
pub const NEWCGROUP: &'static str = "NEWCGROUP";
|
||||
pub const NEWIPC: &'static str = "NEWIPC";
|
||||
|
||||
lazy_static! {
|
||||
static ref TYPETONAME: HashMap<&'static str, &'static str> = {
|
||||
let mut m = HashMap::new();
|
||||
m.insert("pid", "pid");
|
||||
m.insert("network", "net");
|
||||
m.insert("mount", "mnt");
|
||||
m.insert("user", "user");
|
||||
m.insert("uts", "uts");
|
||||
m.insert("ipc", "ipc");
|
||||
m.insert("cgroup", "cgroup");
|
||||
m
|
||||
};
|
||||
}
|
||||
23
src/agent/rustjail/src/configs/validator.rs
Normal file
23
src/agent/rustjail/src/configs/validator.rs
Normal file
@@ -0,0 +1,23 @@
|
||||
// Copyright (c) 2019 Ant Financial
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use crate::configs::Config;
|
||||
use std::io::Result;
|
||||
|
||||
pub trait Validator {
|
||||
fn validate(&self, config: &Config) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ConfigValidator {}
|
||||
|
||||
impl Validator for ConfigValidator {}
|
||||
|
||||
impl ConfigValidator {
|
||||
fn new() -> Self {
|
||||
ConfigValidator {}
|
||||
}
|
||||
}
|
||||
1562
src/agent/rustjail/src/container.rs
Normal file
1562
src/agent/rustjail/src/container.rs
Normal file
File diff suppressed because it is too large
Load Diff
34
src/agent/rustjail/src/errors.rs
Normal file
34
src/agent/rustjail/src/errors.rs
Normal file
@@ -0,0 +1,34 @@
|
||||
// Copyright (c) 2019 Ant Financial
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
// define errors here
|
||||
|
||||
error_chain! {
|
||||
types {
|
||||
Error, ErrorKind, ResultExt, Result;
|
||||
}
|
||||
// foreign error conv to chain error
|
||||
foreign_links {
|
||||
Io(std::io::Error);
|
||||
Nix(nix::Error);
|
||||
Ffi(std::ffi::NulError);
|
||||
Caps(caps::errors::Error);
|
||||
Serde(serde_json::Error);
|
||||
FromUTF8(std::string::FromUtf8Error);
|
||||
Parse(std::num::ParseIntError);
|
||||
Scanfmt(scan_fmt::parse::ScanError);
|
||||
Ip(std::net::AddrParseError);
|
||||
Regex(regex::Error);
|
||||
EnvVar(std::env::VarError);
|
||||
UTF8(std::str::Utf8Error);
|
||||
}
|
||||
// define new errors
|
||||
errors {
|
||||
ErrorCode(t: String) {
|
||||
description("Error Code")
|
||||
display("Error Code: '{}'", t)
|
||||
}
|
||||
}
|
||||
}
|
||||
585
src/agent/rustjail/src/lib.rs
Normal file
585
src/agent/rustjail/src/lib.rs
Normal file
@@ -0,0 +1,585 @@
|
||||
// Copyright (c) 2019 Ant Financial
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
// #![allow(unused_attributes)]
|
||||
// #![allow(unused_imports)]
|
||||
// #![allow(unused_variables)]
|
||||
// #![allow(unused_mut)]
|
||||
#![allow(dead_code)]
|
||||
// #![allow(deprecated)]
|
||||
// #![allow(unused_must_use)]
|
||||
#![allow(non_upper_case_globals)]
|
||||
// #![allow(unused_comparisons)]
|
||||
#[macro_use]
|
||||
extern crate error_chain;
|
||||
extern crate serde;
|
||||
extern crate serde_json;
|
||||
#[macro_use]
|
||||
extern crate serde_derive;
|
||||
extern crate caps;
|
||||
extern crate protocols;
|
||||
#[macro_use]
|
||||
extern crate scopeguard;
|
||||
extern crate prctl;
|
||||
#[macro_use]
|
||||
extern crate lazy_static;
|
||||
extern crate libc;
|
||||
extern crate protobuf;
|
||||
#[macro_use]
|
||||
extern crate slog;
|
||||
#[macro_use]
|
||||
extern crate scan_fmt;
|
||||
extern crate oci;
|
||||
extern crate path_absolutize;
|
||||
extern crate regex;
|
||||
|
||||
// Convenience macro to obtain the scope logger
|
||||
macro_rules! sl {
|
||||
() => {
|
||||
slog_scope::logger().new(o!("subsystem" => "rustjail"))
|
||||
};
|
||||
}
|
||||
|
||||
pub mod capabilities;
|
||||
pub mod cgroups;
|
||||
pub mod container;
|
||||
pub mod errors;
|
||||
pub mod mount;
|
||||
pub mod process;
|
||||
pub mod specconv;
|
||||
pub mod sync;
|
||||
pub mod validator;
|
||||
|
||||
// pub mod factory;
|
||||
//pub mod configs;
|
||||
// pub mod devices;
|
||||
// pub mod init;
|
||||
// pub mod rootfs;
|
||||
// pub mod capabilities;
|
||||
// pub mod console;
|
||||
// pub mod stats;
|
||||
// pub mod user;
|
||||
//pub mod intelrdt;
|
||||
|
||||
// construtc ociSpec from grpcSpec, which is needed for hook
|
||||
// execution. since hooks read config.json
|
||||
|
||||
use oci::{
|
||||
Box as ociBox, Hooks as ociHooks, Linux as ociLinux, LinuxCapabilities as ociLinuxCapabilities,
|
||||
Mount as ociMount, POSIXRlimit as ociPOSIXRlimit, Process as ociProcess, Root as ociRoot,
|
||||
Spec as ociSpec, User as ociUser,
|
||||
};
|
||||
use protocols::oci::{
|
||||
Hooks as grpcHooks, Linux as grpcLinux, Mount as grpcMount, Process as grpcProcess,
|
||||
Root as grpcRoot, Spec as grpcSpec,
|
||||
};
|
||||
use std::collections::HashMap;
|
||||
use std::mem::MaybeUninit;
|
||||
|
||||
pub fn process_grpc_to_oci(p: &grpcProcess) -> ociProcess {
|
||||
let console_size = if p.ConsoleSize.is_some() {
|
||||
let c = p.ConsoleSize.as_ref().unwrap();
|
||||
Some(ociBox {
|
||||
height: c.Height,
|
||||
width: c.Width,
|
||||
})
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let user = if p.User.is_some() {
|
||||
let u = p.User.as_ref().unwrap();
|
||||
ociUser {
|
||||
uid: u.UID,
|
||||
gid: u.GID,
|
||||
additional_gids: u.AdditionalGids.clone(),
|
||||
username: u.Username.clone(),
|
||||
}
|
||||
} else {
|
||||
unsafe { MaybeUninit::zeroed().assume_init() }
|
||||
};
|
||||
|
||||
let capabilities = if p.Capabilities.is_some() {
|
||||
let cap = p.Capabilities.as_ref().unwrap();
|
||||
|
||||
Some(ociLinuxCapabilities {
|
||||
bounding: cap.Bounding.clone().into_vec(),
|
||||
effective: cap.Effective.clone().into_vec(),
|
||||
inheritable: cap.Inheritable.clone().into_vec(),
|
||||
permitted: cap.Permitted.clone().into_vec(),
|
||||
ambient: cap.Ambient.clone().into_vec(),
|
||||
})
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let rlimits = {
|
||||
let mut r = Vec::new();
|
||||
for lm in p.Rlimits.iter() {
|
||||
r.push(ociPOSIXRlimit {
|
||||
r#type: lm.Type.clone(),
|
||||
hard: lm.Hard,
|
||||
soft: lm.Soft,
|
||||
});
|
||||
}
|
||||
r
|
||||
};
|
||||
|
||||
ociProcess {
|
||||
terminal: p.Terminal,
|
||||
console_size,
|
||||
user,
|
||||
args: p.Args.clone().into_vec(),
|
||||
env: p.Env.clone().into_vec(),
|
||||
cwd: p.Cwd.clone(),
|
||||
capabilities,
|
||||
rlimits,
|
||||
no_new_privileges: p.NoNewPrivileges,
|
||||
apparmor_profile: p.ApparmorProfile.clone(),
|
||||
oom_score_adj: Some(p.OOMScoreAdj as i32),
|
||||
selinux_label: p.SelinuxLabel.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
fn process_oci_to_grpc(_p: ociProcess) -> grpcProcess {
|
||||
// dont implement it for now
|
||||
unsafe { MaybeUninit::zeroed().assume_init() }
|
||||
}
|
||||
|
||||
fn root_grpc_to_oci(root: &grpcRoot) -> ociRoot {
|
||||
ociRoot {
|
||||
path: root.Path.clone(),
|
||||
readonly: root.Readonly,
|
||||
}
|
||||
}
|
||||
|
||||
fn root_oci_to_grpc(_root: &ociRoot) -> grpcRoot {
|
||||
unsafe { MaybeUninit::zeroed().assume_init() }
|
||||
}
|
||||
|
||||
fn mount_grpc_to_oci(m: &grpcMount) -> ociMount {
|
||||
ociMount {
|
||||
destination: m.destination.clone(),
|
||||
r#type: m.field_type.clone(),
|
||||
source: m.source.clone(),
|
||||
options: m.options.clone().into_vec(),
|
||||
}
|
||||
}
|
||||
|
||||
fn mount_oci_to_grpc(_m: &ociMount) -> grpcMount {
|
||||
unsafe { MaybeUninit::zeroed().assume_init() }
|
||||
}
|
||||
|
||||
use oci::Hook as ociHook;
|
||||
use protocols::oci::Hook as grpcHook;
|
||||
|
||||
fn hook_grpc_to_oci(h: &[grpcHook]) -> Vec<ociHook> {
|
||||
let mut r = Vec::new();
|
||||
for e in h.iter() {
|
||||
r.push(ociHook {
|
||||
path: e.Path.clone(),
|
||||
args: e.Args.clone().into_vec(),
|
||||
env: e.Env.clone().into_vec(),
|
||||
timeout: Some(e.Timeout as i32),
|
||||
});
|
||||
}
|
||||
r
|
||||
}
|
||||
|
||||
fn hooks_grpc_to_oci(h: &grpcHooks) -> ociHooks {
|
||||
let prestart = hook_grpc_to_oci(h.Prestart.as_ref());
|
||||
|
||||
let poststart = hook_grpc_to_oci(h.Poststart.as_ref());
|
||||
|
||||
let poststop = hook_grpc_to_oci(h.Poststop.as_ref());
|
||||
|
||||
ociHooks {
|
||||
prestart,
|
||||
poststart,
|
||||
poststop,
|
||||
}
|
||||
}
|
||||
|
||||
fn hooks_oci_to_grpc(_h: &ociHooks) -> grpcHooks {
|
||||
unsafe { MaybeUninit::zeroed().assume_init() }
|
||||
}
|
||||
|
||||
use oci::{
|
||||
LinuxDevice as ociLinuxDevice, LinuxIDMapping as ociLinuxIDMapping,
|
||||
LinuxIntelRdt as ociLinuxIntelRdt, LinuxNamespace as ociLinuxNamespace,
|
||||
LinuxResources as ociLinuxResources, LinuxSeccomp as ociLinuxSeccomp,
|
||||
};
|
||||
use protocols::oci::{
|
||||
LinuxIDMapping as grpcLinuxIDMapping, LinuxResources as grpcLinuxResources,
|
||||
LinuxSeccomp as grpcLinuxSeccomp,
|
||||
};
|
||||
|
||||
fn idmap_grpc_to_oci(im: &grpcLinuxIDMapping) -> ociLinuxIDMapping {
|
||||
ociLinuxIDMapping {
|
||||
container_id: im.ContainerID,
|
||||
host_id: im.HostID,
|
||||
size: im.Size,
|
||||
}
|
||||
}
|
||||
|
||||
fn idmaps_grpc_to_oci(ims: &[grpcLinuxIDMapping]) -> Vec<ociLinuxIDMapping> {
|
||||
let mut r = Vec::new();
|
||||
for im in ims.iter() {
|
||||
r.push(idmap_grpc_to_oci(im));
|
||||
}
|
||||
r
|
||||
}
|
||||
|
||||
use oci::{
|
||||
LinuxBlockIO as ociLinuxBlockIO, LinuxBlockIODevice as ociLinuxBlockIODevice,
|
||||
LinuxCPU as ociLinuxCPU, LinuxDeviceCgroup as ociLinuxDeviceCgroup,
|
||||
LinuxHugepageLimit as ociLinuxHugepageLimit,
|
||||
LinuxInterfacePriority as ociLinuxInterfacePriority, LinuxMemory as ociLinuxMemory,
|
||||
LinuxNetwork as ociLinuxNetwork, LinuxPids as ociLinuxPids,
|
||||
LinuxThrottleDevice as ociLinuxThrottleDevice, LinuxWeightDevice as ociLinuxWeightDevice,
|
||||
};
|
||||
use protocols::oci::{
|
||||
LinuxBlockIO as grpcLinuxBlockIO, LinuxThrottleDevice as grpcLinuxThrottleDevice,
|
||||
LinuxWeightDevice as grpcLinuxWeightDevice,
|
||||
};
|
||||
|
||||
fn throttle_devices_grpc_to_oci(tds: &[grpcLinuxThrottleDevice]) -> Vec<ociLinuxThrottleDevice> {
|
||||
let mut r = Vec::new();
|
||||
for td in tds.iter() {
|
||||
r.push(ociLinuxThrottleDevice {
|
||||
blk: ociLinuxBlockIODevice {
|
||||
major: td.Major,
|
||||
minor: td.Minor,
|
||||
},
|
||||
rate: td.Rate,
|
||||
});
|
||||
}
|
||||
r
|
||||
}
|
||||
|
||||
fn weight_devices_grpc_to_oci(wds: &[grpcLinuxWeightDevice]) -> Vec<ociLinuxWeightDevice> {
|
||||
let mut r = Vec::new();
|
||||
for wd in wds.iter() {
|
||||
r.push(ociLinuxWeightDevice {
|
||||
blk: ociLinuxBlockIODevice {
|
||||
major: wd.Major,
|
||||
minor: wd.Minor,
|
||||
},
|
||||
weight: Some(wd.Weight as u16),
|
||||
leaf_weight: Some(wd.LeafWeight as u16),
|
||||
});
|
||||
}
|
||||
r
|
||||
}
|
||||
|
||||
fn blockio_grpc_to_oci(blk: &grpcLinuxBlockIO) -> ociLinuxBlockIO {
|
||||
let weight_device = weight_devices_grpc_to_oci(blk.WeightDevice.as_ref());
|
||||
let throttle_read_bps_device = throttle_devices_grpc_to_oci(blk.ThrottleReadBpsDevice.as_ref());
|
||||
let throttle_write_bps_device =
|
||||
throttle_devices_grpc_to_oci(blk.ThrottleWriteBpsDevice.as_ref());
|
||||
let throttle_read_iops_device =
|
||||
throttle_devices_grpc_to_oci(blk.ThrottleReadIOPSDevice.as_ref());
|
||||
let throttle_write_iops_device =
|
||||
throttle_devices_grpc_to_oci(blk.ThrottleWriteIOPSDevice.as_ref());
|
||||
|
||||
ociLinuxBlockIO {
|
||||
weight: Some(blk.Weight as u16),
|
||||
leaf_weight: Some(blk.LeafWeight as u16),
|
||||
weight_device,
|
||||
throttle_read_bps_device,
|
||||
throttle_write_bps_device,
|
||||
throttle_read_iops_device,
|
||||
throttle_write_iops_device,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn resources_grpc_to_oci(res: &grpcLinuxResources) -> ociLinuxResources {
|
||||
let devices = {
|
||||
let mut d = Vec::new();
|
||||
for dev in res.Devices.iter() {
|
||||
let major = if dev.Major == -1 {
|
||||
None
|
||||
} else {
|
||||
Some(dev.Major)
|
||||
};
|
||||
|
||||
let minor = if dev.Minor == -1 {
|
||||
None
|
||||
} else {
|
||||
Some(dev.Minor)
|
||||
};
|
||||
d.push(ociLinuxDeviceCgroup {
|
||||
allow: dev.Allow,
|
||||
r#type: dev.Type.clone(),
|
||||
major,
|
||||
minor,
|
||||
access: dev.Access.clone(),
|
||||
});
|
||||
}
|
||||
d
|
||||
};
|
||||
|
||||
let memory = if res.Memory.is_some() {
|
||||
let mem = res.Memory.as_ref().unwrap();
|
||||
Some(ociLinuxMemory {
|
||||
limit: Some(mem.Limit),
|
||||
reservation: Some(mem.Reservation),
|
||||
swap: Some(mem.Swap),
|
||||
kernel: Some(mem.Kernel),
|
||||
kernel_tcp: Some(mem.KernelTCP),
|
||||
swapiness: Some(mem.Swappiness as i64),
|
||||
disable_oom_killer: Some(mem.DisableOOMKiller),
|
||||
})
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let cpu = if res.CPU.is_some() {
|
||||
let c = res.CPU.as_ref().unwrap();
|
||||
Some(ociLinuxCPU {
|
||||
shares: Some(c.Shares),
|
||||
quota: Some(c.Quota),
|
||||
period: Some(c.Period),
|
||||
realtime_runtime: Some(c.RealtimeRuntime),
|
||||
realtime_period: Some(c.RealtimePeriod),
|
||||
cpus: c.Cpus.clone(),
|
||||
mems: c.Mems.clone(),
|
||||
})
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let pids = if res.Pids.is_some() {
|
||||
let p = res.Pids.as_ref().unwrap();
|
||||
Some(ociLinuxPids { limit: p.Limit })
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let block_io = if res.BlockIO.is_some() {
|
||||
let blk = res.BlockIO.as_ref().unwrap();
|
||||
// copy LinuxBlockIO
|
||||
Some(blockio_grpc_to_oci(blk))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let hugepage_limits = {
|
||||
let mut r = Vec::new();
|
||||
for hl in res.HugepageLimits.iter() {
|
||||
r.push(ociLinuxHugepageLimit {
|
||||
page_size: hl.Pagesize.clone(),
|
||||
limit: hl.Limit,
|
||||
});
|
||||
}
|
||||
r
|
||||
};
|
||||
|
||||
let network = if res.Network.is_some() {
|
||||
let net = res.Network.as_ref().unwrap();
|
||||
let priorities = {
|
||||
let mut r = Vec::new();
|
||||
for pr in net.Priorities.iter() {
|
||||
r.push(ociLinuxInterfacePriority {
|
||||
name: pr.Name.clone(),
|
||||
priority: pr.Priority,
|
||||
});
|
||||
}
|
||||
r
|
||||
};
|
||||
Some(ociLinuxNetwork {
|
||||
class_id: Some(net.ClassID),
|
||||
priorities,
|
||||
})
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
ociLinuxResources {
|
||||
devices,
|
||||
memory,
|
||||
cpu,
|
||||
pids,
|
||||
block_io,
|
||||
hugepage_limits,
|
||||
network,
|
||||
rdma: HashMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
use oci::{LinuxSeccompArg as ociLinuxSeccompArg, LinuxSyscall as ociLinuxSyscall};
|
||||
|
||||
fn seccomp_grpc_to_oci(sec: &grpcLinuxSeccomp) -> ociLinuxSeccomp {
|
||||
let syscalls = {
|
||||
let mut r = Vec::new();
|
||||
|
||||
for sys in sec.Syscalls.iter() {
|
||||
let mut args = Vec::new();
|
||||
|
||||
for arg in sys.Args.iter() {
|
||||
args.push(ociLinuxSeccompArg {
|
||||
index: arg.Index as u32,
|
||||
value: arg.Value,
|
||||
value_two: arg.ValueTwo,
|
||||
op: arg.Op.clone(),
|
||||
});
|
||||
}
|
||||
|
||||
r.push(ociLinuxSyscall {
|
||||
names: sys.Names.clone().into_vec(),
|
||||
action: sys.Action.clone(),
|
||||
args,
|
||||
});
|
||||
}
|
||||
r
|
||||
};
|
||||
|
||||
ociLinuxSeccomp {
|
||||
default_action: sec.DefaultAction.clone(),
|
||||
architectures: sec.Architectures.clone().into_vec(),
|
||||
syscalls,
|
||||
}
|
||||
}
|
||||
|
||||
fn linux_grpc_to_oci(l: &grpcLinux) -> ociLinux {
|
||||
let uid_mappings = idmaps_grpc_to_oci(l.UIDMappings.as_ref());
|
||||
let gid_mappings = idmaps_grpc_to_oci(l.GIDMappings.as_ref());
|
||||
|
||||
let resources = if l.Resources.is_some() {
|
||||
Some(resources_grpc_to_oci(l.Resources.as_ref().unwrap()))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let seccomp = if l.Seccomp.is_some() {
|
||||
Some(seccomp_grpc_to_oci(l.Seccomp.as_ref().unwrap()))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let namespaces = {
|
||||
let mut r = Vec::new();
|
||||
|
||||
for ns in l.Namespaces.iter() {
|
||||
r.push(ociLinuxNamespace {
|
||||
r#type: ns.Type.clone(),
|
||||
path: ns.Path.clone(),
|
||||
});
|
||||
}
|
||||
r
|
||||
};
|
||||
|
||||
let devices = {
|
||||
let mut r = Vec::new();
|
||||
|
||||
for d in l.Devices.iter() {
|
||||
r.push(ociLinuxDevice {
|
||||
path: d.Path.clone(),
|
||||
r#type: d.Type.clone(),
|
||||
major: d.Major,
|
||||
minor: d.Minor,
|
||||
file_mode: Some(d.FileMode),
|
||||
uid: Some(d.UID),
|
||||
gid: Some(d.GID),
|
||||
});
|
||||
}
|
||||
r
|
||||
};
|
||||
|
||||
let intel_rdt = if l.IntelRdt.is_some() {
|
||||
let rdt = l.IntelRdt.as_ref().unwrap();
|
||||
|
||||
Some(ociLinuxIntelRdt {
|
||||
l3_cache_schema: rdt.L3CacheSchema.clone(),
|
||||
})
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
ociLinux {
|
||||
uid_mappings,
|
||||
gid_mappings,
|
||||
sysctl: l.Sysctl.clone(),
|
||||
resources,
|
||||
cgroups_path: l.CgroupsPath.clone(),
|
||||
namespaces,
|
||||
devices,
|
||||
seccomp,
|
||||
rootfs_propagation: l.RootfsPropagation.clone(),
|
||||
masked_paths: l.MaskedPaths.clone().into_vec(),
|
||||
readonly_paths: l.ReadonlyPaths.clone().into_vec(),
|
||||
mount_label: l.MountLabel.clone(),
|
||||
intel_rdt,
|
||||
}
|
||||
}
|
||||
|
||||
fn linux_oci_to_grpc(_l: &ociLinux) -> grpcLinux {
|
||||
grpcLinux::default()
|
||||
}
|
||||
|
||||
pub fn grpc_to_oci(grpc: &grpcSpec) -> ociSpec {
|
||||
// process
|
||||
let process = if grpc.Process.is_some() {
|
||||
Some(process_grpc_to_oci(grpc.Process.as_ref().unwrap()))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// root
|
||||
let root = if grpc.Root.is_some() {
|
||||
Some(root_grpc_to_oci(grpc.Root.as_ref().unwrap()))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// mounts
|
||||
let mounts = {
|
||||
let mut r = Vec::new();
|
||||
for m in grpc.Mounts.iter() {
|
||||
r.push(mount_grpc_to_oci(m));
|
||||
}
|
||||
r
|
||||
};
|
||||
|
||||
// hooks
|
||||
let hooks = if grpc.Hooks.is_some() {
|
||||
Some(hooks_grpc_to_oci(grpc.Hooks.as_ref().unwrap()))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// Linux
|
||||
let linux = if grpc.Linux.is_some() {
|
||||
Some(linux_grpc_to_oci(grpc.Linux.as_ref().unwrap()))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
ociSpec {
|
||||
version: grpc.Version.clone(),
|
||||
process,
|
||||
root,
|
||||
hostname: grpc.Hostname.clone(),
|
||||
mounts,
|
||||
hooks,
|
||||
annotations: grpc.Annotations.clone(),
|
||||
linux,
|
||||
solaris: None,
|
||||
windows: None,
|
||||
vm: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn oci_to_grpc(_oci: &ociSpec) -> grpcSpec {
|
||||
unsafe { MaybeUninit::zeroed().assume_init() }
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
#[test]
|
||||
fn it_works() {
|
||||
assert_eq!(2 + 2, 4);
|
||||
}
|
||||
}
|
||||
744
src/agent/rustjail/src/mount.rs
Normal file
744
src/agent/rustjail/src/mount.rs
Normal file
@@ -0,0 +1,744 @@
|
||||
// Copyright (c) 2019 Ant Financial
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use libc::uid_t;
|
||||
use nix::errno::Errno;
|
||||
use nix::fcntl::{self, OFlag};
|
||||
use nix::mount::{self, MntFlags, MsFlags};
|
||||
use nix::sys::stat::{self, Mode, SFlag};
|
||||
use nix::unistd::{self, Gid, Uid};
|
||||
use nix::NixPath;
|
||||
use oci::{LinuxDevice, Mount, Spec};
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::fs::{self, OpenOptions};
|
||||
use std::os::unix;
|
||||
use std::os::unix::io::RawFd;
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use path_absolutize::*;
|
||||
use scan_fmt;
|
||||
use std::fs::File;
|
||||
use std::io::{BufRead, BufReader};
|
||||
|
||||
use crate::container::DEFAULT_DEVICES;
|
||||
use crate::errors::*;
|
||||
use crate::sync::write_count;
|
||||
use lazy_static;
|
||||
use std::string::ToString;
|
||||
|
||||
use crate::log_child;
|
||||
|
||||
// Info reveals information about a particular mounted filesystem. This
|
||||
// struct is populated from the content in the /proc/<pid>/mountinfo file.
|
||||
pub struct Info {
|
||||
id: i32,
|
||||
parent: i32,
|
||||
major: i32,
|
||||
minor: i32,
|
||||
root: String,
|
||||
mount_point: String,
|
||||
opts: String,
|
||||
optional: String,
|
||||
fstype: String,
|
||||
source: String,
|
||||
vfs_opts: String,
|
||||
}
|
||||
|
||||
const MOUNTINFOFORMAT: &'static str = "{d} {d} {d}:{d} {} {} {} {}";
|
||||
|
||||
lazy_static! {
|
||||
static ref PROPAGATION: HashMap<&'static str, MsFlags> = {
|
||||
let mut m = HashMap::new();
|
||||
m.insert("shared", MsFlags::MS_SHARED | MsFlags::MS_REC);
|
||||
m.insert("private", MsFlags::MS_PRIVATE | MsFlags::MS_REC);
|
||||
m.insert("slave", MsFlags::MS_SLAVE | MsFlags::MS_REC);
|
||||
m
|
||||
};
|
||||
static ref OPTIONS: HashMap<&'static str, (bool, MsFlags)> = {
|
||||
let mut m = HashMap::new();
|
||||
m.insert("defaults", (false, MsFlags::empty()));
|
||||
m.insert("ro", (false, MsFlags::MS_RDONLY));
|
||||
m.insert("rw", (true, MsFlags::MS_RDONLY));
|
||||
m.insert("suid", (true, MsFlags::MS_NOSUID));
|
||||
m.insert("nosuid", (false, MsFlags::MS_NOSUID));
|
||||
m.insert("dev", (true, MsFlags::MS_NODEV));
|
||||
m.insert("nodev", (false, MsFlags::MS_NODEV));
|
||||
m.insert("exec", (true, MsFlags::MS_NOEXEC));
|
||||
m.insert("noexec", (false, MsFlags::MS_NOEXEC));
|
||||
m.insert("sync", (false, MsFlags::MS_SYNCHRONOUS));
|
||||
m.insert("async", (true, MsFlags::MS_SYNCHRONOUS));
|
||||
m.insert("dirsync", (false, MsFlags::MS_DIRSYNC));
|
||||
m.insert("remount", (false, MsFlags::MS_REMOUNT));
|
||||
m.insert("mand", (false, MsFlags::MS_MANDLOCK));
|
||||
m.insert("nomand", (true, MsFlags::MS_MANDLOCK));
|
||||
m.insert("atime", (true, MsFlags::MS_NOATIME));
|
||||
m.insert("noatime", (false, MsFlags::MS_NOATIME));
|
||||
m.insert("diratime", (true, MsFlags::MS_NODIRATIME));
|
||||
m.insert("nodiratime", (false, MsFlags::MS_NODIRATIME));
|
||||
m.insert("bind", (false, MsFlags::MS_BIND));
|
||||
m.insert("rbind", (false, MsFlags::MS_BIND | MsFlags::MS_REC));
|
||||
m.insert("unbindable", (false, MsFlags::MS_UNBINDABLE));
|
||||
m.insert(
|
||||
"runbindable",
|
||||
(false, MsFlags::MS_UNBINDABLE | MsFlags::MS_REC),
|
||||
);
|
||||
m.insert("private", (false, MsFlags::MS_PRIVATE));
|
||||
m.insert("rprivate", (false, MsFlags::MS_PRIVATE | MsFlags::MS_REC));
|
||||
m.insert("shared", (false, MsFlags::MS_SHARED));
|
||||
m.insert("rshared", (false, MsFlags::MS_SHARED | MsFlags::MS_REC));
|
||||
m.insert("slave", (false, MsFlags::MS_SLAVE));
|
||||
m.insert("rslave", (false, MsFlags::MS_SLAVE | MsFlags::MS_REC));
|
||||
m.insert("relatime", (false, MsFlags::MS_RELATIME));
|
||||
m.insert("norelatime", (true, MsFlags::MS_RELATIME));
|
||||
m.insert("strictatime", (false, MsFlags::MS_STRICTATIME));
|
||||
m.insert("nostrictatime", (true, MsFlags::MS_STRICTATIME));
|
||||
m
|
||||
};
|
||||
}
|
||||
|
||||
pub fn init_rootfs(
|
||||
cfd_log: RawFd,
|
||||
spec: &Spec,
|
||||
cpath: &HashMap<String, String>,
|
||||
mounts: &HashMap<String, String>,
|
||||
bind_device: bool,
|
||||
) -> Result<()> {
|
||||
lazy_static::initialize(&OPTIONS);
|
||||
lazy_static::initialize(&PROPAGATION);
|
||||
lazy_static::initialize(&LINUXDEVICETYPE);
|
||||
|
||||
let linux = spec.linux.as_ref().unwrap();
|
||||
let mut flags = MsFlags::MS_REC;
|
||||
match PROPAGATION.get(&linux.rootfs_propagation.as_str()) {
|
||||
Some(fl) => flags |= *fl,
|
||||
None => flags |= MsFlags::MS_SLAVE,
|
||||
}
|
||||
|
||||
let rootfs = spec.root.as_ref().unwrap().path.as_str();
|
||||
let root = fs::canonicalize(rootfs)?;
|
||||
let rootfs = root.to_str().unwrap();
|
||||
|
||||
mount::mount(None::<&str>, "/", None::<&str>, flags, None::<&str>)?;
|
||||
mount::mount(
|
||||
Some(rootfs),
|
||||
rootfs,
|
||||
None::<&str>,
|
||||
MsFlags::MS_BIND | MsFlags::MS_REC,
|
||||
None::<&str>,
|
||||
)?;
|
||||
|
||||
for m in &spec.mounts {
|
||||
let (mut flags, data) = parse_mount(&m);
|
||||
if !m.destination.starts_with("/") || m.destination.contains("..") {
|
||||
return Err(ErrorKind::Nix(nix::Error::Sys(Errno::EINVAL)).into());
|
||||
}
|
||||
if m.r#type == "cgroup" {
|
||||
mount_cgroups(cfd_log, &m, rootfs, flags, &data, cpath, mounts)?;
|
||||
} else {
|
||||
if m.destination == "/dev" {
|
||||
flags &= !MsFlags::MS_RDONLY;
|
||||
}
|
||||
|
||||
mount_from(cfd_log, &m, &rootfs, flags, &data, "")?;
|
||||
}
|
||||
}
|
||||
|
||||
let olddir = unistd::getcwd()?;
|
||||
unistd::chdir(rootfs)?;
|
||||
|
||||
default_symlinks()?;
|
||||
create_devices(&linux.devices, bind_device)?;
|
||||
ensure_ptmx()?;
|
||||
|
||||
unistd::chdir(&olddir)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn mount_cgroups(
|
||||
cfd_log: RawFd,
|
||||
m: &Mount,
|
||||
rootfs: &str,
|
||||
flags: MsFlags,
|
||||
_data: &str,
|
||||
cpath: &HashMap<String, String>,
|
||||
mounts: &HashMap<String, String>,
|
||||
) -> Result<()> {
|
||||
// mount tmpfs
|
||||
let ctm = Mount {
|
||||
source: "tmpfs".to_string(),
|
||||
r#type: "tmpfs".to_string(),
|
||||
destination: m.destination.clone(),
|
||||
options: Vec::new(),
|
||||
};
|
||||
|
||||
let cflags = MsFlags::MS_NOEXEC | MsFlags::MS_NOSUID | MsFlags::MS_NODEV;
|
||||
// info!(logger, "tmpfs");
|
||||
mount_from(cfd_log, &ctm, rootfs, cflags, "", "")?;
|
||||
let olddir = unistd::getcwd()?;
|
||||
|
||||
unistd::chdir(rootfs)?;
|
||||
|
||||
let mut srcs: HashSet<String> = HashSet::new();
|
||||
|
||||
// bind mount cgroups
|
||||
for (key, mount) in mounts.iter() {
|
||||
log_child!(cfd_log, "mount cgroup subsystem {}", key);
|
||||
let source = if cpath.get(key).is_some() {
|
||||
cpath.get(key).unwrap()
|
||||
} else {
|
||||
continue;
|
||||
};
|
||||
|
||||
let base = if let Some(o) = mount.rfind('/') {
|
||||
&mount[o + 1..]
|
||||
} else {
|
||||
&mount[..]
|
||||
};
|
||||
|
||||
let destination = format!("{}/{}", m.destination.as_str(), base);
|
||||
|
||||
if srcs.contains(source) {
|
||||
// already mounted, xxx,yyy style cgroup
|
||||
if key != base {
|
||||
let src = format!("{}/{}", m.destination.as_str(), key);
|
||||
unix::fs::symlink(destination.as_str(), &src[1..])?;
|
||||
}
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
srcs.insert(source.to_string());
|
||||
|
||||
log_child!(cfd_log, "mount destination: {}", destination.as_str());
|
||||
|
||||
let bm = Mount {
|
||||
source: source.to_string(),
|
||||
r#type: "bind".to_string(),
|
||||
destination: destination.clone(),
|
||||
options: Vec::new(),
|
||||
};
|
||||
|
||||
let mut mount_flags: MsFlags = flags | MsFlags::MS_REC | MsFlags::MS_BIND;
|
||||
if key.contains("systemd") {
|
||||
mount_flags &= !MsFlags::MS_RDONLY;
|
||||
}
|
||||
mount_from(cfd_log, &bm, rootfs, mount_flags, "", "")?;
|
||||
|
||||
if key != base {
|
||||
let src = format!("{}/{}", m.destination.as_str(), key);
|
||||
match unix::fs::symlink(destination.as_str(), &src[1..]) {
|
||||
Err(e) => {
|
||||
log_child!(
|
||||
cfd_log,
|
||||
"symlink: {} {} err: {}",
|
||||
key,
|
||||
destination.as_str(),
|
||||
e.to_string()
|
||||
);
|
||||
|
||||
return Err(e.into());
|
||||
}
|
||||
Ok(_) => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
unistd::chdir(&olddir)?;
|
||||
|
||||
if flags.contains(MsFlags::MS_RDONLY) {
|
||||
let dest = format!("{}{}", rootfs, m.destination.as_str());
|
||||
mount::mount(
|
||||
Some(dest.as_str()),
|
||||
dest.as_str(),
|
||||
None::<&str>,
|
||||
flags | MsFlags::MS_BIND | MsFlags::MS_REMOUNT,
|
||||
None::<&str>,
|
||||
)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn pivot_rootfs<P: ?Sized + NixPath>(path: &P) -> Result<()> {
|
||||
let oldroot = fcntl::open("/", OFlag::O_DIRECTORY | OFlag::O_RDONLY, Mode::empty())?;
|
||||
defer!(unistd::close(oldroot).unwrap());
|
||||
let newroot = fcntl::open(path, OFlag::O_DIRECTORY | OFlag::O_RDONLY, Mode::empty())?;
|
||||
defer!(unistd::close(newroot).unwrap());
|
||||
unistd::pivot_root(path, path)?;
|
||||
mount::umount2("/", MntFlags::MNT_DETACH)?;
|
||||
unistd::fchdir(newroot)?;
|
||||
stat::umask(Mode::from_bits_truncate(0o022));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Parse /proc/self/mountinfo because comparing Dev and ino does not work from
|
||||
// bind mounts
|
||||
fn parse_mount_table() -> Result<Vec<Info>> {
|
||||
let file = File::open("/proc/self/mountinfo")?;
|
||||
let reader = BufReader::new(file);
|
||||
let mut infos = Vec::new();
|
||||
|
||||
for (_index, line) in reader.lines().enumerate() {
|
||||
let line = line?;
|
||||
|
||||
let (id, parent, major, minor, root, mount_point, opts, optional) = scan_fmt!(
|
||||
&line,
|
||||
MOUNTINFOFORMAT,
|
||||
i32,
|
||||
i32,
|
||||
i32,
|
||||
i32,
|
||||
String,
|
||||
String,
|
||||
String,
|
||||
String
|
||||
)?;
|
||||
|
||||
let fields: Vec<&str> = line.split(" - ").collect();
|
||||
if fields.len() == 2 {
|
||||
let (fstype, source, vfs_opts) =
|
||||
scan_fmt!(fields[1], "{} {} {}", String, String, String)?;
|
||||
|
||||
let mut optional_new = String::new();
|
||||
if optional != "-" {
|
||||
optional_new = optional;
|
||||
}
|
||||
|
||||
let info = Info {
|
||||
id,
|
||||
parent,
|
||||
major,
|
||||
minor,
|
||||
root,
|
||||
mount_point,
|
||||
opts,
|
||||
optional: optional_new,
|
||||
fstype,
|
||||
source,
|
||||
vfs_opts,
|
||||
};
|
||||
|
||||
infos.push(info);
|
||||
} else {
|
||||
return Err(ErrorKind::ErrorCode("failed to parse mount info file".to_string()).into());
|
||||
}
|
||||
}
|
||||
|
||||
Ok(infos)
|
||||
}
|
||||
|
||||
pub fn ms_move_root(rootfs: &str) -> Result<bool> {
|
||||
unistd::chdir(rootfs)?;
|
||||
let mount_infos = parse_mount_table()?;
|
||||
|
||||
let root_path = Path::new(rootfs);
|
||||
let abs_root_buf = root_path.absolutize()?;
|
||||
let abs_root = abs_root_buf.to_str().ok_or::<Error>(
|
||||
ErrorKind::ErrorCode(format!("failed to parse {} to absolute path", rootfs)).into(),
|
||||
)?;
|
||||
|
||||
for info in mount_infos.iter() {
|
||||
let mount_point = Path::new(&info.mount_point);
|
||||
let abs_mount_buf = mount_point.absolutize()?;
|
||||
let abs_mount_point = abs_mount_buf.to_str().ok_or::<Error>(
|
||||
ErrorKind::ErrorCode(format!(
|
||||
"failed to parse {} to absolute path",
|
||||
info.mount_point
|
||||
))
|
||||
.into(),
|
||||
)?;
|
||||
let abs_mount_point_string = String::from(abs_mount_point);
|
||||
|
||||
// Umount every syfs and proc file systems, except those under the container rootfs
|
||||
if (info.fstype != "proc" && info.fstype != "sysfs")
|
||||
|| abs_mount_point_string.starts_with(abs_root)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
// Be sure umount events are not propagated to the host.
|
||||
mount::mount(
|
||||
None::<&str>,
|
||||
abs_mount_point,
|
||||
None::<&str>,
|
||||
MsFlags::MS_SLAVE | MsFlags::MS_REC,
|
||||
None::<&str>,
|
||||
)?;
|
||||
match mount::umount2(abs_mount_point, MntFlags::MNT_DETACH) {
|
||||
Ok(_) => (),
|
||||
Err(e) => {
|
||||
if e.ne(&nix::Error::from(Errno::EINVAL)) && e.ne(&nix::Error::from(Errno::EPERM)) {
|
||||
return Err(ErrorKind::ErrorCode(e.to_string()).into());
|
||||
}
|
||||
|
||||
// If we have not privileges for umounting (e.g. rootless), then
|
||||
// cover the path.
|
||||
mount::mount(
|
||||
Some("tmpfs"),
|
||||
abs_mount_point,
|
||||
Some("tmpfs"),
|
||||
MsFlags::empty(),
|
||||
None::<&str>,
|
||||
)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mount::mount(
|
||||
Some(abs_root),
|
||||
"/",
|
||||
None::<&str>,
|
||||
MsFlags::MS_MOVE,
|
||||
None::<&str>,
|
||||
)?;
|
||||
unistd::chroot(".")?;
|
||||
unistd::chdir("/")?;
|
||||
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
fn parse_mount(m: &Mount) -> (MsFlags, String) {
|
||||
let mut flags = MsFlags::empty();
|
||||
let mut data = Vec::new();
|
||||
|
||||
for o in &m.options {
|
||||
match OPTIONS.get(o.as_str()) {
|
||||
Some(v) => {
|
||||
let (clear, fl) = *v;
|
||||
if clear {
|
||||
flags &= !fl;
|
||||
} else {
|
||||
flags |= fl;
|
||||
}
|
||||
}
|
||||
|
||||
None => data.push(o.clone()),
|
||||
}
|
||||
}
|
||||
|
||||
(flags, data.join(","))
|
||||
}
|
||||
|
||||
fn mount_from(
|
||||
cfd_log: RawFd,
|
||||
m: &Mount,
|
||||
rootfs: &str,
|
||||
flags: MsFlags,
|
||||
data: &str,
|
||||
_label: &str,
|
||||
) -> Result<()> {
|
||||
let d = String::from(data);
|
||||
let dest = format!("{}{}", rootfs, &m.destination);
|
||||
|
||||
let src = if m.r#type.as_str() == "bind" {
|
||||
let src = fs::canonicalize(m.source.as_str())?;
|
||||
let dir = if src.is_file() {
|
||||
Path::new(&dest).parent().unwrap()
|
||||
} else {
|
||||
Path::new(&dest)
|
||||
};
|
||||
|
||||
// let _ = fs::create_dir_all(&dir);
|
||||
match fs::create_dir_all(&dir) {
|
||||
Ok(_) => {}
|
||||
Err(e) => {
|
||||
log_child!(
|
||||
cfd_log,
|
||||
"creat dir {}: {}",
|
||||
dir.to_str().unwrap(),
|
||||
e.to_string()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// make sure file exists so we can bind over it
|
||||
if src.is_file() {
|
||||
let _ = OpenOptions::new().create(true).write(true).open(&dest);
|
||||
}
|
||||
src
|
||||
} else {
|
||||
let _ = fs::create_dir_all(&dest);
|
||||
PathBuf::from(&m.source)
|
||||
};
|
||||
|
||||
// ignore this check since some mount's src didn't been a directory
|
||||
// such as tmpfs.
|
||||
/*
|
||||
match stat::stat(src.to_str().unwrap()) {
|
||||
Ok(_) => {}
|
||||
Err(e) => {
|
||||
info!("{}: {}", src.to_str().unwrap(), e.as_errno().unwrap().desc());
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
match stat::stat(dest.as_str()) {
|
||||
Ok(_) => {}
|
||||
Err(e) => {
|
||||
log_child!(
|
||||
cfd_log,
|
||||
"{}: {}",
|
||||
dest.as_str(),
|
||||
e.as_errno().unwrap().desc()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
match mount::mount(
|
||||
Some(src.to_str().unwrap()),
|
||||
dest.as_str(),
|
||||
Some(m.r#type.as_str()),
|
||||
flags,
|
||||
Some(d.as_str()),
|
||||
) {
|
||||
Ok(_) => {}
|
||||
Err(e) => {
|
||||
log_child!(cfd_log, "mount error: {}", e.as_errno().unwrap().desc());
|
||||
return Err(e.into());
|
||||
}
|
||||
}
|
||||
|
||||
if flags.contains(MsFlags::MS_BIND)
|
||||
&& flags.intersects(
|
||||
!(MsFlags::MS_REC
|
||||
| MsFlags::MS_REMOUNT
|
||||
| MsFlags::MS_BIND
|
||||
| MsFlags::MS_PRIVATE
|
||||
| MsFlags::MS_SHARED
|
||||
| MsFlags::MS_SLAVE),
|
||||
)
|
||||
{
|
||||
match mount::mount(
|
||||
Some(dest.as_str()),
|
||||
dest.as_str(),
|
||||
None::<&str>,
|
||||
flags | MsFlags::MS_REMOUNT,
|
||||
None::<&str>,
|
||||
) {
|
||||
Err(e) => {
|
||||
log_child!(
|
||||
cfd_log,
|
||||
"remout {}: {}",
|
||||
dest.as_str(),
|
||||
e.as_errno().unwrap().desc()
|
||||
);
|
||||
return Err(e.into());
|
||||
}
|
||||
Ok(_) => {}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
static SYMLINKS: &'static [(&'static str, &'static str)] = &[
|
||||
("/proc/self/fd", "dev/fd"),
|
||||
("/proc/self/fd/0", "dev/stdin"),
|
||||
("/proc/self/fd/1", "dev/stdout"),
|
||||
("/proc/self/fd/2", "dev/stderr"),
|
||||
];
|
||||
|
||||
fn default_symlinks() -> Result<()> {
|
||||
if Path::new("/proc/kcore").exists() {
|
||||
unix::fs::symlink("/proc/kcore", "dev/kcore")?;
|
||||
}
|
||||
for &(src, dst) in SYMLINKS {
|
||||
unix::fs::symlink(src, dst)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
fn create_devices(devices: &[LinuxDevice], bind: bool) -> Result<()> {
|
||||
let op: fn(&LinuxDevice) -> Result<()> = if bind { bind_dev } else { mknod_dev };
|
||||
let old = stat::umask(Mode::from_bits_truncate(0o000));
|
||||
for dev in DEFAULT_DEVICES.iter() {
|
||||
op(dev)?;
|
||||
}
|
||||
for dev in devices {
|
||||
if !dev.path.starts_with("/dev") || dev.path.contains("..") {
|
||||
let msg = format!("{} is not a valid device path", dev.path);
|
||||
bail!(ErrorKind::ErrorCode(msg));
|
||||
}
|
||||
op(dev)?;
|
||||
}
|
||||
stat::umask(old);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn ensure_ptmx() -> Result<()> {
|
||||
let _ = fs::remove_file("dev/ptmx");
|
||||
unix::fs::symlink("pts/ptmx", "dev/ptmx")?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn makedev(major: u64, minor: u64) -> u64 {
|
||||
(minor & 0xff) | ((major & 0xfff) << 8) | ((minor & !0xff) << 12) | ((major & !0xfff) << 32)
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
static ref LINUXDEVICETYPE: HashMap<&'static str, SFlag> = {
|
||||
let mut m = HashMap::new();
|
||||
m.insert("c", SFlag::S_IFCHR);
|
||||
m.insert("b", SFlag::S_IFBLK);
|
||||
m.insert("p", SFlag::S_IFIFO);
|
||||
m
|
||||
};
|
||||
}
|
||||
|
||||
fn mknod_dev(dev: &LinuxDevice) -> Result<()> {
|
||||
let f = match LINUXDEVICETYPE.get(dev.r#type.as_str()) {
|
||||
Some(v) => v,
|
||||
None => return Err(ErrorKind::ErrorCode("invalid spec".to_string()).into()),
|
||||
};
|
||||
|
||||
stat::mknod(
|
||||
&dev.path[1..],
|
||||
*f,
|
||||
Mode::from_bits_truncate(dev.file_mode.unwrap_or(0)),
|
||||
makedev(dev.major as u64, dev.minor as u64),
|
||||
)?;
|
||||
|
||||
unistd::chown(
|
||||
&dev.path[1..],
|
||||
Some(Uid::from_raw(dev.uid.unwrap_or(0) as uid_t)),
|
||||
Some(Gid::from_raw(dev.gid.unwrap_or(0) as uid_t)),
|
||||
)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn bind_dev(dev: &LinuxDevice) -> Result<()> {
|
||||
let fd = fcntl::open(
|
||||
&dev.path[1..],
|
||||
OFlag::O_RDWR | OFlag::O_CREAT,
|
||||
Mode::from_bits_truncate(0o644),
|
||||
)?;
|
||||
|
||||
unistd::close(fd)?;
|
||||
|
||||
mount::mount(
|
||||
Some(&*dev.path),
|
||||
&dev.path[1..],
|
||||
None::<&str>,
|
||||
MsFlags::MS_BIND,
|
||||
None::<&str>,
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn finish_rootfs(cfd_log: RawFd, spec: &Spec) -> Result<()> {
|
||||
let olddir = unistd::getcwd()?;
|
||||
log_child!(cfd_log, "old cwd: {}", olddir.to_str().unwrap());
|
||||
unistd::chdir("/")?;
|
||||
if spec.linux.is_some() {
|
||||
let linux = spec.linux.as_ref().unwrap();
|
||||
|
||||
for path in linux.masked_paths.iter() {
|
||||
mask_path(path)?;
|
||||
}
|
||||
|
||||
for path in linux.readonly_paths.iter() {
|
||||
readonly_path(path)?;
|
||||
}
|
||||
}
|
||||
|
||||
for m in spec.mounts.iter() {
|
||||
if m.destination == "/dev" {
|
||||
let (flags, _) = parse_mount(m);
|
||||
if flags.contains(MsFlags::MS_RDONLY) {
|
||||
mount::mount(
|
||||
Some("/dev"),
|
||||
"/dev",
|
||||
None::<&str>,
|
||||
flags | MsFlags::MS_REMOUNT,
|
||||
None::<&str>,
|
||||
)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if spec.root.as_ref().unwrap().readonly {
|
||||
let flags = MsFlags::MS_BIND | MsFlags::MS_RDONLY | MsFlags::MS_NODEV | MsFlags::MS_REMOUNT;
|
||||
|
||||
mount::mount(Some("/"), "/", None::<&str>, flags, None::<&str>)?;
|
||||
}
|
||||
stat::umask(Mode::from_bits_truncate(0o022));
|
||||
unistd::chdir(&olddir)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn mask_path(path: &str) -> Result<()> {
|
||||
if !path.starts_with("/") || path.contains("..") {
|
||||
return Err(nix::Error::Sys(Errno::EINVAL).into());
|
||||
}
|
||||
|
||||
//info!("{}", path);
|
||||
|
||||
match mount::mount(
|
||||
Some("/dev/null"),
|
||||
path,
|
||||
None::<&str>,
|
||||
MsFlags::MS_BIND,
|
||||
None::<&str>,
|
||||
) {
|
||||
Err(nix::Error::Sys(e)) => {
|
||||
if e != Errno::ENOENT && e != Errno::ENOTDIR {
|
||||
//info!("{}: {}", path, e.desc());
|
||||
return Err(nix::Error::Sys(e).into());
|
||||
}
|
||||
}
|
||||
|
||||
Err(e) => {
|
||||
//info!("{}: {}", path, e.as_errno().unwrap().desc());
|
||||
return Err(e.into());
|
||||
}
|
||||
|
||||
Ok(_) => {}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn readonly_path(path: &str) -> Result<()> {
|
||||
if !path.starts_with("/") || path.contains("..") {
|
||||
return Err(nix::Error::Sys(Errno::EINVAL).into());
|
||||
}
|
||||
|
||||
//info!("{}", path);
|
||||
|
||||
match mount::mount(
|
||||
Some(&path[1..]),
|
||||
path,
|
||||
None::<&str>,
|
||||
MsFlags::MS_BIND | MsFlags::MS_REC,
|
||||
None::<&str>,
|
||||
) {
|
||||
Err(nix::Error::Sys(e)) => {
|
||||
if e == Errno::ENOENT {
|
||||
return Ok(());
|
||||
} else {
|
||||
//info!("{}: {}", path, e.desc());
|
||||
return Err(nix::Error::Sys(e).into());
|
||||
}
|
||||
}
|
||||
|
||||
Err(e) => {
|
||||
//info!("{}: {}", path, e.as_errno().unwrap().desc());
|
||||
return Err(e.into());
|
||||
}
|
||||
|
||||
Ok(_) => {}
|
||||
}
|
||||
|
||||
mount::mount(
|
||||
Some(&path[1..]),
|
||||
&path[1..],
|
||||
None::<&str>,
|
||||
MsFlags::MS_BIND | MsFlags::MS_REC | MsFlags::MS_RDONLY | MsFlags::MS_REMOUNT,
|
||||
None::<&str>,
|
||||
)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
161
src/agent/rustjail/src/process.rs
Normal file
161
src/agent/rustjail/src/process.rs
Normal file
@@ -0,0 +1,161 @@
|
||||
// Copyright (c) 2019 Ant Financial
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
// use std::process::{Stdio, Command, ExitStatus};
|
||||
use libc::pid_t;
|
||||
use std::fs::File;
|
||||
use std::os::unix::io::RawFd;
|
||||
|
||||
// use crate::configs::{Capabilities, Rlimit};
|
||||
// use crate::cgroups::Manager as CgroupManager;
|
||||
// use crate::intelrdt::Manager as RdtManager;
|
||||
|
||||
use nix::fcntl::{fcntl, FcntlArg, OFlag};
|
||||
use nix::sys::signal::{self, Signal};
|
||||
use nix::sys::socket::{self, AddressFamily, SockFlag, SockType};
|
||||
use nix::sys::wait::{self, WaitStatus};
|
||||
use nix::unistd::{self, Pid};
|
||||
use nix::Result;
|
||||
|
||||
use nix::Error;
|
||||
use oci::Process as OCIProcess;
|
||||
use slog::Logger;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Process {
|
||||
pub exec_id: String,
|
||||
pub stdin: Option<RawFd>,
|
||||
pub stdout: Option<RawFd>,
|
||||
pub stderr: Option<RawFd>,
|
||||
pub exit_pipe_r: Option<RawFd>,
|
||||
pub exit_pipe_w: Option<RawFd>,
|
||||
pub extra_files: Vec<File>,
|
||||
// pub caps: Capabilities,
|
||||
// pub rlimits: Vec<Rlimit>,
|
||||
pub term_master: Option<RawFd>,
|
||||
pub tty: bool,
|
||||
pub parent_stdin: Option<RawFd>,
|
||||
pub parent_stdout: Option<RawFd>,
|
||||
pub parent_stderr: Option<RawFd>,
|
||||
pub init: bool,
|
||||
// pid of the init/exec process. since we have no command
|
||||
// struct to store pid, we must store pid here.
|
||||
pub pid: pid_t,
|
||||
|
||||
pub exit_code: i32,
|
||||
pub oci: OCIProcess,
|
||||
pub logger: Logger,
|
||||
}
|
||||
|
||||
pub trait ProcessOperations {
|
||||
fn pid(&self) -> Pid;
|
||||
fn wait(&self) -> Result<WaitStatus>;
|
||||
fn signal(&self, sig: Signal) -> Result<()>;
|
||||
}
|
||||
|
||||
impl ProcessOperations for Process {
|
||||
fn pid(&self) -> Pid {
|
||||
Pid::from_raw(self.pid)
|
||||
}
|
||||
|
||||
fn wait(&self) -> Result<WaitStatus> {
|
||||
wait::waitpid(Some(self.pid()), None)
|
||||
}
|
||||
|
||||
fn signal(&self, sig: Signal) -> Result<()> {
|
||||
signal::kill(self.pid(), Some(sig))
|
||||
}
|
||||
}
|
||||
|
||||
impl Process {
|
||||
pub fn new(
|
||||
logger: &Logger,
|
||||
ocip: &OCIProcess,
|
||||
id: &str,
|
||||
init: bool,
|
||||
pipe_size: i32,
|
||||
) -> Result<Self> {
|
||||
let logger = logger.new(o!("subsystem" => "process"));
|
||||
|
||||
let mut p = Process {
|
||||
exec_id: String::from(id),
|
||||
stdin: None,
|
||||
stdout: None,
|
||||
stderr: None,
|
||||
exit_pipe_w: None,
|
||||
exit_pipe_r: None,
|
||||
extra_files: Vec::new(),
|
||||
tty: ocip.terminal,
|
||||
term_master: None,
|
||||
parent_stdin: None,
|
||||
parent_stdout: None,
|
||||
parent_stderr: None,
|
||||
init,
|
||||
pid: -1,
|
||||
exit_code: 0,
|
||||
oci: ocip.clone(),
|
||||
logger: logger.clone(),
|
||||
};
|
||||
|
||||
info!(logger, "before create console socket!");
|
||||
|
||||
if !p.tty {
|
||||
info!(logger, "created console socket!");
|
||||
|
||||
let (stdin, pstdin) = unistd::pipe2(OFlag::O_CLOEXEC)?;
|
||||
p.parent_stdin = Some(pstdin);
|
||||
p.stdin = Some(stdin);
|
||||
|
||||
let (pstdout, stdout) = create_extended_pipe(OFlag::O_CLOEXEC, pipe_size)?;
|
||||
p.parent_stdout = Some(pstdout);
|
||||
p.stdout = Some(stdout);
|
||||
|
||||
let (pstderr, stderr) = create_extended_pipe(OFlag::O_CLOEXEC, pipe_size)?;
|
||||
p.parent_stderr = Some(pstderr);
|
||||
p.stderr = Some(stderr);
|
||||
}
|
||||
Ok(p)
|
||||
}
|
||||
}
|
||||
|
||||
fn create_extended_pipe(flags: OFlag, pipe_size: i32) -> Result<(RawFd, RawFd)> {
|
||||
let (r, w) = unistd::pipe2(flags)?;
|
||||
if pipe_size > 0 {
|
||||
fcntl(w, FcntlArg::F_SETPIPE_SZ(pipe_size))?;
|
||||
}
|
||||
Ok((r, w))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::process::create_extended_pipe;
|
||||
use nix::fcntl::{fcntl, FcntlArg, OFlag};
|
||||
use std::fs;
|
||||
use std::os::unix::io::RawFd;
|
||||
|
||||
fn get_pipe_max_size() -> i32 {
|
||||
fs::read_to_string("/proc/sys/fs/pipe-max-size")
|
||||
.unwrap()
|
||||
.trim()
|
||||
.parse::<i32>()
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
fn get_pipe_size(fd: RawFd) -> i32 {
|
||||
fcntl(fd, FcntlArg::F_GETPIPE_SZ).unwrap()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_create_extended_pipe() {
|
||||
// Test the default
|
||||
let (r, w) = create_extended_pipe(OFlag::O_CLOEXEC, 0).unwrap();
|
||||
|
||||
// Test setting to the max size
|
||||
let max_size = get_pipe_max_size();
|
||||
let (r, w) = create_extended_pipe(OFlag::O_CLOEXEC, max_size).unwrap();
|
||||
let actual_size = get_pipe_size(w);
|
||||
assert_eq!(max_size, actual_size);
|
||||
}
|
||||
}
|
||||
159
src/agent/rustjail/src/specconv.rs
Normal file
159
src/agent/rustjail/src/specconv.rs
Normal file
@@ -0,0 +1,159 @@
|
||||
// Copyright (c) 2019 Ant Financial
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use oci::Spec;
|
||||
// use crate::configs::namespaces;
|
||||
// use crate::configs::device::Device;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct CreateOpts {
|
||||
pub cgroup_name: String,
|
||||
pub use_systemd_cgroup: bool,
|
||||
pub no_pivot_root: bool,
|
||||
pub no_new_keyring: bool,
|
||||
pub spec: Option<Spec>,
|
||||
pub rootless_euid: bool,
|
||||
pub rootless_cgroup: bool,
|
||||
}
|
||||
/*
|
||||
const WILDCARD: i32 = -1;
|
||||
|
||||
lazy_static! {
|
||||
static ref NAEMSPACEMAPPING: HashMap<&'static str, &'static str> = {
|
||||
let mut m = HashMap::new();
|
||||
m.insert(oci::PIDNAMESPACE, namespaces::NEWPID);
|
||||
m.insert(oci::NETWORKNAMESPACE, namespaces::NEWNET);
|
||||
m.insert(oci::UTSNAMESPACE, namespaces::NEWUTS);
|
||||
m.insert(oci::MOUNTNAMESPACE, namespaces::NEWNS);
|
||||
m.insert(oci::IPCNAMESPACE, namespaces::NEWIPC);
|
||||
m.insert(oci::USERNAMESPACE, namespaces::NEWUSER);
|
||||
m.insert(oci::CGROUPNAMESPACE, namespaces::NEWCGROUP);
|
||||
m
|
||||
};
|
||||
|
||||
static ref MOUNTPROPAGATIONMAPPING: HashMap<&'static str, MsFlags> = {
|
||||
let mut m = HashMap::new();
|
||||
m.insert("rprivate", MsFlags::MS_PRIVATE | MsFlags::MS_REC);
|
||||
m.insert("private", MsFlags::MS_PRIVATE);
|
||||
m.insert("rslave", MsFlags::MS_SLAVE | MsFlags::MS_REC);
|
||||
m.insert("slave", MsFlags::MS_SLAVE);
|
||||
m.insert("rshared", MsFlags::MS_SHARED | MsFlags::MS_REC);
|
||||
m.insert("shared", MsFlags::MS_SHARED);
|
||||
m.insert("runbindable", MsFlags::MS_UNBINDABLE | MsFlags::MS_REC);
|
||||
m.insert("unbindable", MsFlags::MS_UNBINDABLE);
|
||||
m
|
||||
};
|
||||
|
||||
static ref ALLOWED_DEVICES: Vec<Device> = {
|
||||
let mut m = Vec::new();
|
||||
m.push(Device {
|
||||
r#type: 'c',
|
||||
major: WILDCARD,
|
||||
minor: WILDCARD,
|
||||
permissions: "m",
|
||||
allow: true,
|
||||
});
|
||||
|
||||
m.push(Device {
|
||||
r#type: 'b',
|
||||
major: WILDCARD,
|
||||
minor: WILDCARD,
|
||||
permissions: "m",
|
||||
allow: true,
|
||||
});
|
||||
|
||||
m.push(Device {
|
||||
r#type: 'c',
|
||||
path: "/dev/null".to_string(),
|
||||
major: 1,
|
||||
minor: 3,
|
||||
permissions: "rwm",
|
||||
allow: true,
|
||||
});
|
||||
|
||||
m.push(Device {
|
||||
r#type: 'c',
|
||||
path: String::from("/dev/random"),
|
||||
major: 1,
|
||||
minor: 8,
|
||||
permissions: "rwm",
|
||||
allow: true,
|
||||
});
|
||||
|
||||
m.push(Device {
|
||||
r#type: 'c',
|
||||
path: String::from("/dev/full"),
|
||||
major: 1,
|
||||
minor: 7,
|
||||
permissions: "rwm",
|
||||
allow: true,
|
||||
});
|
||||
|
||||
m.push(Device {
|
||||
r#type: 'c',
|
||||
path: String::from("/dev/tty"),
|
||||
major: 5,
|
||||
minor: 0,
|
||||
permissions: "rwm",
|
||||
allow: true,
|
||||
});
|
||||
|
||||
m.push(Device {
|
||||
r#type: 'c',
|
||||
path: String::from("/dev/zero"),
|
||||
major: 1,
|
||||
minor: 5,
|
||||
permissions: "rwm",
|
||||
allow: true,
|
||||
});
|
||||
|
||||
m.push(Device {
|
||||
r#type: 'c',
|
||||
path: String::from("/dev/urandom"),
|
||||
major: 1,
|
||||
minor: 9,
|
||||
permissions: "rwm",
|
||||
allow: true,
|
||||
});
|
||||
|
||||
m.push(Device {
|
||||
r#type: 'c',
|
||||
path: String::from("/dev/console"),
|
||||
major: 5,
|
||||
minor: 1,
|
||||
permissions: "rwm",
|
||||
allow: true,
|
||||
});
|
||||
|
||||
m.push(Device {
|
||||
r#type: 'c',
|
||||
path: String::from(""),
|
||||
major: 136,
|
||||
minor: WILDCARD,
|
||||
permissions: "rwm",
|
||||
allow: true,
|
||||
});
|
||||
|
||||
m.push(Device {
|
||||
r#type: 'c',
|
||||
path: String::from(""),
|
||||
major: 5,
|
||||
minor: 2,
|
||||
permissions: "rwm",
|
||||
allow: true,
|
||||
});
|
||||
|
||||
m.push(Device {
|
||||
r#type: 'c',
|
||||
path: String::from(""),
|
||||
major: 10,
|
||||
minor: 200,
|
||||
permissions: "rwm",
|
||||
allow: true,
|
||||
});
|
||||
m
|
||||
};
|
||||
}
|
||||
*/
|
||||
177
src/agent/rustjail/src/sync.rs
Normal file
177
src/agent/rustjail/src/sync.rs
Normal file
@@ -0,0 +1,177 @@
|
||||
// Copyright (c) 2019 Ant Financial
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use crate::errors::*;
|
||||
use nix::errno::Errno;
|
||||
use nix::unistd;
|
||||
use nix::Error;
|
||||
use std::mem;
|
||||
use std::os::unix::io::RawFd;
|
||||
|
||||
pub const SYNC_SUCCESS: i32 = 1;
|
||||
pub const SYNC_FAILED: i32 = 2;
|
||||
pub const SYNC_DATA: i32 = 3;
|
||||
|
||||
const DATA_SIZE: usize = 100;
|
||||
const MSG_SIZE: usize = mem::size_of::<i32>();
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! log_child {
|
||||
($fd:expr, $($arg:tt)+) => ({
|
||||
let lfd = $fd;
|
||||
let mut log_str = format_args!($($arg)+).to_string();
|
||||
log_str.push('\n');
|
||||
write_count(lfd, log_str.as_bytes(), log_str.len());
|
||||
})
|
||||
}
|
||||
|
||||
pub fn write_count(fd: RawFd, buf: &[u8], count: usize) -> Result<usize> {
|
||||
let mut len = 0;
|
||||
|
||||
loop {
|
||||
match unistd::write(fd, &buf[len..]) {
|
||||
Ok(l) => {
|
||||
len += l;
|
||||
if len == count {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
Err(e) => {
|
||||
if e != Error::from_errno(Errno::EINTR) {
|
||||
return Err(e.into());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(len)
|
||||
}
|
||||
|
||||
fn read_count(fd: RawFd, count: usize) -> Result<Vec<u8>> {
|
||||
let mut v: Vec<u8> = vec![0; count];
|
||||
let mut len = 0;
|
||||
|
||||
loop {
|
||||
match unistd::read(fd, &mut v[len..]) {
|
||||
Ok(l) => {
|
||||
len += l;
|
||||
if len == count || l == 0 {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
Err(e) => {
|
||||
if e != Error::from_errno(Errno::EINTR) {
|
||||
return Err(e.into());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(v[0..len].to_vec())
|
||||
}
|
||||
|
||||
pub fn read_sync(fd: RawFd) -> Result<Vec<u8>> {
|
||||
let buf = read_count(fd, MSG_SIZE)?;
|
||||
if buf.len() != MSG_SIZE {
|
||||
return Err(ErrorKind::ErrorCode(format!(
|
||||
"process: {} failed to receive sync message from peer: got msg length: {}, expected: {}",
|
||||
std::process::id(),
|
||||
buf.len(),
|
||||
MSG_SIZE
|
||||
))
|
||||
.into());
|
||||
}
|
||||
let buf_array: [u8; MSG_SIZE] = [buf[0], buf[1], buf[2], buf[3]];
|
||||
let msg: i32 = i32::from_be_bytes(buf_array);
|
||||
match msg {
|
||||
SYNC_SUCCESS => return Ok(Vec::new()),
|
||||
SYNC_DATA => {
|
||||
let buf = read_count(fd, MSG_SIZE)?;
|
||||
let buf_array: [u8; MSG_SIZE] = [buf[0], buf[1], buf[2], buf[3]];
|
||||
let msg_length: i32 = i32::from_be_bytes(buf_array);
|
||||
let data_buf = read_count(fd, msg_length as usize)?;
|
||||
|
||||
return Ok(data_buf);
|
||||
}
|
||||
SYNC_FAILED => {
|
||||
let mut error_buf = vec![];
|
||||
loop {
|
||||
let buf = read_count(fd, DATA_SIZE)?;
|
||||
|
||||
error_buf.extend(&buf);
|
||||
if DATA_SIZE == buf.len() {
|
||||
continue;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
let error_str = match std::str::from_utf8(&error_buf) {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
return Err(ErrorKind::ErrorCode(format!(
|
||||
"receive error message from child process failed: {:?}",
|
||||
e
|
||||
))
|
||||
.into())
|
||||
}
|
||||
};
|
||||
|
||||
return Err(ErrorKind::ErrorCode(String::from(error_str)).into());
|
||||
}
|
||||
_ => return Err(ErrorKind::ErrorCode("error in receive sync message".to_string()).into()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn write_sync(fd: RawFd, msg_type: i32, data_str: &str) -> Result<()> {
|
||||
let buf = msg_type.to_be_bytes();
|
||||
|
||||
let count = write_count(fd, &buf, MSG_SIZE)?;
|
||||
if count != MSG_SIZE {
|
||||
return Err(ErrorKind::ErrorCode("error in send sync message".to_string()).into());
|
||||
}
|
||||
|
||||
match msg_type {
|
||||
SYNC_FAILED => match write_count(fd, data_str.as_bytes(), data_str.len()) {
|
||||
Ok(_count) => unistd::close(fd)?,
|
||||
Err(e) => {
|
||||
unistd::close(fd)?;
|
||||
return Err(
|
||||
ErrorKind::ErrorCode("error in send message to process".to_string()).into(),
|
||||
);
|
||||
}
|
||||
},
|
||||
SYNC_DATA => {
|
||||
let length: i32 = data_str.len() as i32;
|
||||
match write_count(fd, &length.to_be_bytes(), MSG_SIZE) {
|
||||
Ok(_count) => (),
|
||||
Err(e) => {
|
||||
unistd::close(fd)?;
|
||||
return Err(ErrorKind::ErrorCode(
|
||||
"error in send message to process".to_string(),
|
||||
)
|
||||
.into());
|
||||
}
|
||||
}
|
||||
|
||||
match write_count(fd, data_str.as_bytes(), data_str.len()) {
|
||||
Ok(_count) => (),
|
||||
Err(e) => {
|
||||
unistd::close(fd)?;
|
||||
return Err(ErrorKind::ErrorCode(
|
||||
"error in send message to process".to_string(),
|
||||
)
|
||||
.into());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_ => (),
|
||||
};
|
||||
|
||||
Ok(())
|
||||
}
|
||||
309
src/agent/rustjail/src/validator.rs
Normal file
309
src/agent/rustjail/src/validator.rs
Normal file
@@ -0,0 +1,309 @@
|
||||
// Copyright (c) 2019 Ant Financial
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use crate::container::Config;
|
||||
use crate::errors::*;
|
||||
use lazy_static;
|
||||
use nix::errno::Errno;
|
||||
use nix::Error;
|
||||
use oci::{LinuxIDMapping, LinuxNamespace, Spec};
|
||||
use protobuf::RepeatedField;
|
||||
use std::collections::HashMap;
|
||||
use std::path::{Component, PathBuf};
|
||||
|
||||
fn contain_namespace(nses: &Vec<LinuxNamespace>, key: &str) -> bool {
|
||||
for ns in nses {
|
||||
if ns.r#type.as_str() == key {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
|
||||
fn get_namespace_path(nses: &Vec<LinuxNamespace>, key: &str) -> Result<String> {
|
||||
for ns in nses {
|
||||
if ns.r#type.as_str() == key {
|
||||
return Ok(ns.path.clone());
|
||||
}
|
||||
}
|
||||
|
||||
Err(ErrorKind::Nix(Error::from_errno(Errno::EINVAL)).into())
|
||||
}
|
||||
|
||||
fn rootfs(root: &str) -> Result<()> {
|
||||
let path = PathBuf::from(root);
|
||||
// not absolute path or not exists
|
||||
if !path.exists() || !path.is_absolute() {
|
||||
return Err(ErrorKind::Nix(Error::from_errno(Errno::EINVAL)).into());
|
||||
}
|
||||
|
||||
// symbolic link? ..?
|
||||
let mut stack: Vec<String> = Vec::new();
|
||||
for c in path.components() {
|
||||
if stack.is_empty() {
|
||||
if c == Component::RootDir || c == Component::ParentDir {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if c == Component::ParentDir {
|
||||
stack.pop();
|
||||
continue;
|
||||
}
|
||||
|
||||
stack.push(c.as_os_str().to_str().unwrap().to_string());
|
||||
}
|
||||
|
||||
let mut cleaned = PathBuf::from("/");
|
||||
for e in stack.iter() {
|
||||
cleaned.push(e);
|
||||
}
|
||||
|
||||
let canon = path.canonicalize()?;
|
||||
if cleaned != canon {
|
||||
// There is symbolic in path
|
||||
return Err(ErrorKind::Nix(Error::from_errno(Errno::EINVAL)).into());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn network(_oci: &Spec) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn hostname(oci: &Spec) -> Result<()> {
|
||||
if oci.hostname.is_empty() || oci.hostname == "".to_string() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if oci.linux.is_none() {
|
||||
return Err(ErrorKind::Nix(Error::from_errno(Errno::EINVAL)).into());
|
||||
}
|
||||
let linux = oci.linux.as_ref().unwrap();
|
||||
if !contain_namespace(&linux.namespaces, "uts") {
|
||||
return Err(ErrorKind::Nix(Error::from_errno(Errno::EINVAL)).into());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn security(oci: &Spec) -> Result<()> {
|
||||
let linux = oci.linux.as_ref().unwrap();
|
||||
if linux.masked_paths.len() == 0 && linux.readonly_paths.len() == 0 {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if !contain_namespace(&linux.namespaces, "mount") {
|
||||
return Err(ErrorKind::Nix(Error::from_errno(Errno::EINVAL)).into());
|
||||
}
|
||||
|
||||
// don't care about selinux at present
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn idmapping(maps: &Vec<LinuxIDMapping>) -> Result<()> {
|
||||
for map in maps {
|
||||
if map.size > 0 {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
Err(ErrorKind::Nix(Error::from_errno(Errno::EINVAL)).into())
|
||||
}
|
||||
|
||||
fn usernamespace(oci: &Spec) -> Result<()> {
|
||||
let linux = oci.linux.as_ref().unwrap();
|
||||
if contain_namespace(&linux.namespaces, "user") {
|
||||
let user_ns = PathBuf::from("/proc/self/ns/user");
|
||||
if !user_ns.exists() {
|
||||
return Err(ErrorKind::ErrorCode("user namespace not supported!".to_string()).into());
|
||||
}
|
||||
// check if idmappings is correct, at least I saw idmaps
|
||||
// with zero size was passed to agent
|
||||
idmapping(&linux.uid_mappings)?;
|
||||
idmapping(&linux.gid_mappings)?;
|
||||
} else {
|
||||
// no user namespace but idmap
|
||||
if linux.uid_mappings.len() != 0 || linux.gid_mappings.len() != 0 {
|
||||
return Err(ErrorKind::Nix(Error::from_errno(Errno::EINVAL)).into());
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn cgroupnamespace(oci: &Spec) -> Result<()> {
|
||||
let linux = oci.linux.as_ref().unwrap();
|
||||
if contain_namespace(&linux.namespaces, "cgroup") {
|
||||
let path = PathBuf::from("/proc/self/ns/cgroup");
|
||||
if !path.exists() {
|
||||
return Err(ErrorKind::ErrorCode("cgroup unsupported!".to_string()).into());
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
pub static ref SYSCTLS: HashMap<&'static str, bool> = {
|
||||
let mut m = HashMap::new();
|
||||
m.insert("kernel.msgmax", true);
|
||||
m.insert("kernel.msgmnb", true);
|
||||
m.insert("kernel.msgmni", true);
|
||||
m.insert("kernel.sem", true);
|
||||
m.insert("kernel.shmall", true);
|
||||
m.insert("kernel.shmmax", true);
|
||||
m.insert("kernel.shmmni", true);
|
||||
m.insert("kernel.shm_rmid_forced", true);
|
||||
m
|
||||
};
|
||||
}
|
||||
|
||||
fn check_host_ns(path: &str) -> Result<()> {
|
||||
let cpath = PathBuf::from(path);
|
||||
let hpath = PathBuf::from("/proc/self/ns/net");
|
||||
|
||||
let real_hpath = hpath.read_link()?;
|
||||
let meta = cpath.symlink_metadata()?;
|
||||
let file_type = meta.file_type();
|
||||
|
||||
if !file_type.is_symlink() {
|
||||
return Ok(());
|
||||
}
|
||||
let real_cpath = cpath.read_link()?;
|
||||
if real_cpath == real_hpath {
|
||||
return Err(ErrorKind::Nix(Error::from_errno(Errno::EINVAL)).into());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn sysctl(oci: &Spec) -> Result<()> {
|
||||
let linux = oci.linux.as_ref().unwrap();
|
||||
for (key, _) in linux.sysctl.iter() {
|
||||
if SYSCTLS.contains_key(key.as_str()) || key.starts_with("fs.mqueue.") {
|
||||
if contain_namespace(&linux.namespaces, "ipc") {
|
||||
continue;
|
||||
} else {
|
||||
return Err(ErrorKind::Nix(Error::from_errno(Errno::EINVAL)).into());
|
||||
}
|
||||
}
|
||||
|
||||
if key.starts_with("net.") {
|
||||
if !contain_namespace(&linux.namespaces, "network") {
|
||||
return Err(ErrorKind::Nix(Error::from_errno(Errno::EINVAL)).into());
|
||||
}
|
||||
|
||||
let net = get_namespace_path(&linux.namespaces, "network")?;
|
||||
if net.is_empty() || net == "".to_string() {
|
||||
continue;
|
||||
}
|
||||
|
||||
check_host_ns(net.as_str())?;
|
||||
}
|
||||
|
||||
if contain_namespace(&linux.namespaces, "uts") {
|
||||
if key == "kernel.domainname" {
|
||||
continue;
|
||||
}
|
||||
|
||||
if key == "kernel.hostname" {
|
||||
return Err(ErrorKind::Nix(Error::from_errno(Errno::EINVAL)).into());
|
||||
}
|
||||
}
|
||||
|
||||
return Err(ErrorKind::Nix(Error::from_errno(Errno::EINVAL)).into());
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn rootless_euid_mapping(oci: &Spec) -> Result<()> {
|
||||
let linux = oci.linux.as_ref().unwrap();
|
||||
if !contain_namespace(&linux.namespaces, "user") {
|
||||
return Err(ErrorKind::Nix(Error::from_errno(Errno::EINVAL)).into());
|
||||
}
|
||||
|
||||
if linux.gid_mappings.len() == 0 || linux.gid_mappings.len() == 0 {
|
||||
return Err(ErrorKind::Nix(Error::from_errno(Errno::EINVAL)).into());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn has_idmapping(maps: &Vec<LinuxIDMapping>, id: u32) -> bool {
|
||||
for map in maps {
|
||||
if id >= map.container_id && id < map.container_id + map.size {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
fn rootless_euid_mount(oci: &Spec) -> Result<()> {
|
||||
let linux = oci.linux.as_ref().unwrap();
|
||||
|
||||
for mnt in oci.mounts.iter() {
|
||||
for opt in mnt.options.iter() {
|
||||
if opt.starts_with("uid=") || opt.starts_with("gid=") {
|
||||
let fields: Vec<&str> = opt.split('=').collect();
|
||||
|
||||
if fields.len() != 2 {
|
||||
return Err(ErrorKind::Nix(Error::from_errno(Errno::EINVAL)).into());
|
||||
}
|
||||
|
||||
let id = fields[1].trim().parse::<u32>()?;
|
||||
|
||||
if opt.starts_with("uid=") {
|
||||
if !has_idmapping(&linux.uid_mappings, id) {
|
||||
return Err(ErrorKind::Nix(Error::from_errno(Errno::EINVAL)).into());
|
||||
}
|
||||
}
|
||||
|
||||
if opt.starts_with("gid=") {
|
||||
if !has_idmapping(&linux.gid_mappings, id) {
|
||||
return Err(ErrorKind::Nix(Error::from_errno(Errno::EINVAL)).into());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn rootless_euid(oci: &Spec) -> Result<()> {
|
||||
rootless_euid_mapping(oci)?;
|
||||
rootless_euid_mount(oci)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn validate(conf: &Config) -> Result<()> {
|
||||
lazy_static::initialize(&SYSCTLS);
|
||||
let oci = conf.spec.as_ref().unwrap();
|
||||
|
||||
if oci.linux.is_none() {
|
||||
return Err(ErrorKind::Nix(Error::from_errno(Errno::EINVAL)).into());
|
||||
}
|
||||
|
||||
if oci.root.is_none() {
|
||||
return Err(ErrorKind::Nix(Error::from_errno(Errno::EINVAL)).into());
|
||||
}
|
||||
let root = oci.root.as_ref().unwrap().path.as_str();
|
||||
|
||||
rootfs(root)?;
|
||||
network(oci)?;
|
||||
hostname(oci)?;
|
||||
security(oci)?;
|
||||
usernamespace(oci)?;
|
||||
cgroupnamespace(oci)?;
|
||||
sysctl(&oci)?;
|
||||
|
||||
if conf.rootless_euid {
|
||||
rootless_euid(oci)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
881
src/agent/src/config.rs
Normal file
881
src/agent/src/config.rs
Normal file
@@ -0,0 +1,881 @@
|
||||
// Copyright (c) 2019 Ant Financial
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
use rustjail::errors::*;
|
||||
use std::fs;
|
||||
use std::time;
|
||||
|
||||
const DEBUG_CONSOLE_FLAG: &str = "agent.debug_console";
|
||||
const DEV_MODE_FLAG: &str = "agent.devmode";
|
||||
const LOG_LEVEL_OPTION: &str = "agent.log";
|
||||
const HOTPLUG_TIMOUT_OPTION: &str = "agent.hotplug_timeout";
|
||||
const DEBUG_CONSOLE_VPORT_OPTION: &str = "agent.debug_console_vport";
|
||||
const LOG_VPORT_OPTION: &str = "agent.log_vport";
|
||||
const CONTAINER_PIPE_SIZE_OPTION: &str = "agent.container_pipe_size";
|
||||
|
||||
const DEFAULT_LOG_LEVEL: slog::Level = slog::Level::Info;
|
||||
const DEFAULT_HOTPLUG_TIMEOUT: time::Duration = time::Duration::from_secs(3);
|
||||
const DEFAULT_CONTAINER_PIPE_SIZE: i32 = 0;
|
||||
|
||||
// FIXME: unused
|
||||
const TRACE_MODE_FLAG: &str = "agent.trace";
|
||||
const USE_VSOCK_FLAG: &str = "agent.use_vsock";
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct agentConfig {
|
||||
pub debug_console: bool,
|
||||
pub dev_mode: bool,
|
||||
pub log_level: slog::Level,
|
||||
pub hotplug_timeout: time::Duration,
|
||||
pub debug_console_vport: i32,
|
||||
pub log_vport: i32,
|
||||
pub container_pipe_size: i32,
|
||||
}
|
||||
|
||||
impl agentConfig {
|
||||
pub fn new() -> agentConfig {
|
||||
agentConfig {
|
||||
debug_console: false,
|
||||
dev_mode: false,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
debug_console_vport: 0,
|
||||
log_vport: 0,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn parse_cmdline(&mut self, file: &str) -> Result<()> {
|
||||
let cmdline = fs::read_to_string(file)?;
|
||||
let params: Vec<&str> = cmdline.split_ascii_whitespace().collect();
|
||||
for param in params.iter() {
|
||||
if param.eq(&DEBUG_CONSOLE_FLAG) {
|
||||
self.debug_console = true;
|
||||
}
|
||||
|
||||
if param.eq(&DEV_MODE_FLAG) {
|
||||
self.dev_mode = true;
|
||||
}
|
||||
|
||||
if param.starts_with(format!("{}=", LOG_LEVEL_OPTION).as_str()) {
|
||||
let level = get_log_level(param)?;
|
||||
self.log_level = level;
|
||||
}
|
||||
|
||||
if param.starts_with(format!("{}=", HOTPLUG_TIMOUT_OPTION).as_str()) {
|
||||
let hotplugTimeout = get_hotplug_timeout(param)?;
|
||||
// ensure the timeout is a positive value
|
||||
if hotplugTimeout.as_secs() > 0 {
|
||||
self.hotplug_timeout = hotplugTimeout;
|
||||
}
|
||||
}
|
||||
|
||||
if param.starts_with(format!("{}=", DEBUG_CONSOLE_VPORT_OPTION).as_str()) {
|
||||
let port = get_vsock_port(param)?;
|
||||
if port > 0 {
|
||||
self.debug_console_vport = port;
|
||||
}
|
||||
}
|
||||
|
||||
if param.starts_with(format!("{}=", LOG_VPORT_OPTION).as_str()) {
|
||||
let port = get_vsock_port(param)?;
|
||||
if port > 0 {
|
||||
self.log_vport = port;
|
||||
}
|
||||
}
|
||||
|
||||
if param.starts_with(format!("{}=", CONTAINER_PIPE_SIZE_OPTION).as_str()) {
|
||||
let container_pipe_size = get_container_pipe_size(param)?;
|
||||
self.container_pipe_size = container_pipe_size
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn get_vsock_port(p: &str) -> Result<i32> {
|
||||
let fields: Vec<&str> = p.split("=").collect();
|
||||
if fields.len() != 2 {
|
||||
return Err(ErrorKind::ErrorCode("invalid port parameter".to_string()).into());
|
||||
}
|
||||
|
||||
Ok(fields[1].parse::<i32>()?)
|
||||
}
|
||||
|
||||
// Map logrus (https://godoc.org/github.com/sirupsen/logrus)
|
||||
// log level to the equivalent slog log levels.
|
||||
//
|
||||
// Note: Logrus names are used for compatability with the previous
|
||||
// golang-based agent.
|
||||
fn logrus_to_slog_level(logrus_level: &str) -> Result<slog::Level> {
|
||||
let level = match logrus_level {
|
||||
// Note: different semantics to logrus: log, but don't panic.
|
||||
"fatal" | "panic" => slog::Level::Critical,
|
||||
|
||||
"critical" => slog::Level::Critical,
|
||||
"error" => slog::Level::Error,
|
||||
"warn" | "warning" => slog::Level::Warning,
|
||||
"info" => slog::Level::Info,
|
||||
"debug" => slog::Level::Debug,
|
||||
|
||||
// Not in logrus
|
||||
"trace" => slog::Level::Trace,
|
||||
|
||||
_ => {
|
||||
return Err(ErrorKind::ErrorCode(String::from("invalid log level")).into());
|
||||
}
|
||||
};
|
||||
|
||||
Ok(level)
|
||||
}
|
||||
|
||||
fn get_log_level(param: &str) -> Result<slog::Level> {
|
||||
let fields: Vec<&str> = param.split("=").collect();
|
||||
|
||||
if fields.len() != 2 {
|
||||
return Err(ErrorKind::ErrorCode(String::from("invalid log level parameter")).into());
|
||||
}
|
||||
|
||||
if fields[0] != LOG_LEVEL_OPTION {
|
||||
Err(ErrorKind::ErrorCode(String::from("invalid log level key name")).into())
|
||||
} else {
|
||||
Ok(logrus_to_slog_level(fields[1])?)
|
||||
}
|
||||
}
|
||||
|
||||
fn get_hotplug_timeout(param: &str) -> Result<time::Duration> {
|
||||
let fields: Vec<&str> = param.split("=").collect();
|
||||
|
||||
if fields.len() != 2 {
|
||||
return Err(ErrorKind::ErrorCode(String::from("invalid hotplug timeout parameter")).into());
|
||||
}
|
||||
|
||||
let key = fields[0];
|
||||
if key != HOTPLUG_TIMOUT_OPTION {
|
||||
return Err(ErrorKind::ErrorCode(String::from("invalid hotplug timeout key name")).into());
|
||||
}
|
||||
|
||||
let value = fields[1].parse::<u64>();
|
||||
if value.is_err() {
|
||||
return Err(ErrorKind::ErrorCode(String::from("unable to parse hotplug timeout")).into());
|
||||
}
|
||||
|
||||
Ok(time::Duration::from_secs(value.unwrap()))
|
||||
}
|
||||
|
||||
fn get_container_pipe_size(param: &str) -> Result<i32> {
|
||||
let fields: Vec<&str> = param.split("=").collect();
|
||||
|
||||
if fields.len() != 2 {
|
||||
return Err(
|
||||
ErrorKind::ErrorCode(String::from("invalid container pipe size parameter")).into(),
|
||||
);
|
||||
}
|
||||
|
||||
let key = fields[0];
|
||||
if key != CONTAINER_PIPE_SIZE_OPTION {
|
||||
return Err(
|
||||
ErrorKind::ErrorCode(String::from("invalid container pipe size key name")).into(),
|
||||
);
|
||||
}
|
||||
|
||||
let res = fields[1].parse::<i32>();
|
||||
if res.is_err() {
|
||||
return Err(
|
||||
ErrorKind::ErrorCode(String::from("unable to parse container pipe size")).into(),
|
||||
);
|
||||
}
|
||||
|
||||
let value = res.unwrap();
|
||||
if value < 0 {
|
||||
return Err(ErrorKind::ErrorCode(String::from(
|
||||
"container pipe size should not be negative",
|
||||
))
|
||||
.into());
|
||||
}
|
||||
|
||||
Ok(value)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::fs::File;
|
||||
use std::io::Write;
|
||||
use std::time;
|
||||
use tempfile::tempdir;
|
||||
|
||||
const ERR_INVALID_LOG_LEVEL: &str = "invalid log level";
|
||||
const ERR_INVALID_LOG_LEVEL_PARAM: &str = "invalid log level parameter";
|
||||
const ERR_INVALID_LOG_LEVEL_KEY: &str = "invalid log level key name";
|
||||
|
||||
const ERR_INVALID_HOTPLUG_TIMEOUT: &str = "invalid hotplug timeout parameter";
|
||||
const ERR_INVALID_HOTPLUG_TIMEOUT_PARAM: &str = "unable to parse hotplug timeout";
|
||||
const ERR_INVALID_HOTPLUG_TIMEOUT_KEY: &str = "invalid hotplug timeout key name";
|
||||
|
||||
const ERR_INVALID_CONTAINER_PIPE_SIZE: &str = "invalid container pipe size parameter";
|
||||
const ERR_INVALID_CONTAINER_PIPE_SIZE_PARAM: &str = "unable to parse container pipe size";
|
||||
const ERR_INVALID_CONTAINER_PIPE_SIZE_KEY: &str = "invalid container pipe size key name";
|
||||
const ERR_INVALID_CONTAINER_PIPE_NEGATIVE: &str = "container pipe size should not be negative";
|
||||
|
||||
// helper function to make errors less crazy-long
|
||||
fn make_err(desc: &str) -> Error {
|
||||
ErrorKind::ErrorCode(desc.to_string()).into()
|
||||
}
|
||||
|
||||
// Parameters:
|
||||
//
|
||||
// 1: expected Result
|
||||
// 2: actual Result
|
||||
// 3: string used to identify the test on error
|
||||
macro_rules! assert_result {
|
||||
($expected_result:expr, $actual_result:expr, $msg:expr) => {
|
||||
if $expected_result.is_ok() {
|
||||
let expected_level = $expected_result.as_ref().unwrap();
|
||||
let actual_level = $actual_result.unwrap();
|
||||
assert!(*expected_level == actual_level, $msg);
|
||||
} else {
|
||||
let expected_error = $expected_result.as_ref().unwrap_err();
|
||||
let actual_error = $actual_result.unwrap_err();
|
||||
|
||||
let expected_error_msg = format!("{:?}", expected_error);
|
||||
let actual_error_msg = format!("{:?}", actual_error);
|
||||
|
||||
assert!(expected_error_msg == actual_error_msg, $msg);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_new() {
|
||||
let config = agentConfig::new();
|
||||
assert_eq!(config.debug_console, false);
|
||||
assert_eq!(config.dev_mode, false);
|
||||
assert_eq!(config.log_level, DEFAULT_LOG_LEVEL);
|
||||
assert_eq!(config.hotplug_timeout, DEFAULT_HOTPLUG_TIMEOUT);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_cmdline() {
|
||||
#[derive(Debug)]
|
||||
struct TestData<'a> {
|
||||
contents: &'a str,
|
||||
debug_console: bool,
|
||||
dev_mode: bool,
|
||||
log_level: slog::Level,
|
||||
hotplug_timeout: time::Duration,
|
||||
container_pipe_size: i32,
|
||||
}
|
||||
|
||||
let tests = &[
|
||||
TestData {
|
||||
contents: "agent.debug_consolex agent.devmode",
|
||||
debug_console: false,
|
||||
dev_mode: true,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.debug_console agent.devmodex",
|
||||
debug_console: true,
|
||||
dev_mode: false,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.logx=debug",
|
||||
debug_console: false,
|
||||
dev_mode: false,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.log=debug",
|
||||
debug_console: false,
|
||||
dev_mode: false,
|
||||
log_level: slog::Level::Debug,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
},
|
||||
TestData {
|
||||
contents: "",
|
||||
debug_console: false,
|
||||
dev_mode: false,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
},
|
||||
TestData {
|
||||
contents: "foo",
|
||||
debug_console: false,
|
||||
dev_mode: false,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
},
|
||||
TestData {
|
||||
contents: "foo bar",
|
||||
debug_console: false,
|
||||
dev_mode: false,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
},
|
||||
TestData {
|
||||
contents: "foo bar",
|
||||
debug_console: false,
|
||||
dev_mode: false,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
},
|
||||
TestData {
|
||||
contents: "foo agent bar",
|
||||
debug_console: false,
|
||||
dev_mode: false,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
},
|
||||
TestData {
|
||||
contents: "foo debug_console agent bar devmode",
|
||||
debug_console: false,
|
||||
dev_mode: false,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.debug_console",
|
||||
debug_console: true,
|
||||
dev_mode: false,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
},
|
||||
TestData {
|
||||
contents: " agent.debug_console ",
|
||||
debug_console: true,
|
||||
dev_mode: false,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.debug_console foo",
|
||||
debug_console: true,
|
||||
dev_mode: false,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
},
|
||||
TestData {
|
||||
contents: " agent.debug_console foo",
|
||||
debug_console: true,
|
||||
dev_mode: false,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
},
|
||||
TestData {
|
||||
contents: "foo agent.debug_console bar",
|
||||
debug_console: true,
|
||||
dev_mode: false,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
},
|
||||
TestData {
|
||||
contents: "foo agent.debug_console",
|
||||
debug_console: true,
|
||||
dev_mode: false,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
},
|
||||
TestData {
|
||||
contents: "foo agent.debug_console ",
|
||||
debug_console: true,
|
||||
dev_mode: false,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.devmode",
|
||||
debug_console: false,
|
||||
dev_mode: true,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
},
|
||||
TestData {
|
||||
contents: " agent.devmode ",
|
||||
debug_console: false,
|
||||
dev_mode: true,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.devmode foo",
|
||||
debug_console: false,
|
||||
dev_mode: true,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
},
|
||||
TestData {
|
||||
contents: " agent.devmode foo",
|
||||
debug_console: false,
|
||||
dev_mode: true,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
},
|
||||
TestData {
|
||||
contents: "foo agent.devmode bar",
|
||||
debug_console: false,
|
||||
dev_mode: true,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
},
|
||||
TestData {
|
||||
contents: "foo agent.devmode",
|
||||
debug_console: false,
|
||||
dev_mode: true,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
},
|
||||
TestData {
|
||||
contents: "foo agent.devmode ",
|
||||
debug_console: false,
|
||||
dev_mode: true,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.devmode agent.debug_console",
|
||||
debug_console: true,
|
||||
dev_mode: true,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.devmode agent.debug_console agent.hotplug_timeout=100",
|
||||
debug_console: true,
|
||||
dev_mode: true,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: time::Duration::from_secs(100),
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.devmode agent.debug_console agent.hotplug_timeout=0",
|
||||
debug_console: true,
|
||||
dev_mode: true,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.devmode agent.debug_console agent.container_pipe_size=2097152",
|
||||
debug_console: true,
|
||||
dev_mode: true,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: 2097152,
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.devmode agent.debug_console agent.container_pipe_size=100",
|
||||
debug_console: true,
|
||||
dev_mode: true,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: 100,
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.devmode agent.debug_console agent.container_pipe_size=0",
|
||||
debug_console: true,
|
||||
dev_mode: true,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
},
|
||||
TestData {
|
||||
contents: "agent.devmode agent.debug_console agent.container_pip_siz=100",
|
||||
debug_console: true,
|
||||
dev_mode: true,
|
||||
log_level: DEFAULT_LOG_LEVEL,
|
||||
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
|
||||
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
|
||||
},
|
||||
];
|
||||
|
||||
let dir = tempdir().expect("failed to create tmpdir");
|
||||
|
||||
// First, check a missing file is handled
|
||||
let file_path = dir.path().join("enoent");
|
||||
|
||||
let filename = file_path.to_str().expect("failed to create filename");
|
||||
|
||||
let mut config = agentConfig::new();
|
||||
let result = config.parse_cmdline(&filename.to_owned());
|
||||
assert!(result.is_err());
|
||||
|
||||
// Now, test various combinations of file contents
|
||||
for (i, d) in tests.iter().enumerate() {
|
||||
let msg = format!("test[{}]: {:?}", i, d);
|
||||
|
||||
let file_path = dir.path().join("cmdline");
|
||||
|
||||
let filename = file_path.to_str().expect("failed to create filename");
|
||||
|
||||
let mut file =
|
||||
File::create(filename).expect(&format!("{}: failed to create file", msg));
|
||||
|
||||
file.write_all(d.contents.as_bytes())
|
||||
.expect(&format!("{}: failed to write file contents", msg));
|
||||
|
||||
let mut config = agentConfig::new();
|
||||
assert_eq!(config.debug_console, false, "{}", msg);
|
||||
assert_eq!(config.dev_mode, false, "{}", msg);
|
||||
assert_eq!(
|
||||
config.hotplug_timeout,
|
||||
time::Duration::from_secs(3),
|
||||
"{}",
|
||||
msg
|
||||
);
|
||||
assert_eq!(config.container_pipe_size, 0, "{}", msg);
|
||||
|
||||
let result = config.parse_cmdline(filename);
|
||||
assert!(result.is_ok(), "{}", msg);
|
||||
|
||||
assert_eq!(d.debug_console, config.debug_console, "{}", msg);
|
||||
assert_eq!(d.dev_mode, config.dev_mode, "{}", msg);
|
||||
assert_eq!(d.log_level, config.log_level, "{}", msg);
|
||||
assert_eq!(d.hotplug_timeout, config.hotplug_timeout, "{}", msg);
|
||||
assert_eq!(d.container_pipe_size, config.container_pipe_size, "{}", msg);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_logrus_to_slog_level() {
|
||||
#[derive(Debug)]
|
||||
struct TestData<'a> {
|
||||
logrus_level: &'a str,
|
||||
result: Result<slog::Level>,
|
||||
}
|
||||
|
||||
let tests = &[
|
||||
TestData {
|
||||
logrus_level: "",
|
||||
result: Err(make_err(ERR_INVALID_LOG_LEVEL)),
|
||||
},
|
||||
TestData {
|
||||
logrus_level: "foo",
|
||||
result: Err(make_err(ERR_INVALID_LOG_LEVEL)),
|
||||
},
|
||||
TestData {
|
||||
logrus_level: "debugging",
|
||||
result: Err(make_err(ERR_INVALID_LOG_LEVEL)),
|
||||
},
|
||||
TestData {
|
||||
logrus_level: "xdebug",
|
||||
result: Err(make_err(ERR_INVALID_LOG_LEVEL)),
|
||||
},
|
||||
TestData {
|
||||
logrus_level: "trace",
|
||||
result: Ok(slog::Level::Trace),
|
||||
},
|
||||
TestData {
|
||||
logrus_level: "debug",
|
||||
result: Ok(slog::Level::Debug),
|
||||
},
|
||||
TestData {
|
||||
logrus_level: "info",
|
||||
result: Ok(slog::Level::Info),
|
||||
},
|
||||
TestData {
|
||||
logrus_level: "warn",
|
||||
result: Ok(slog::Level::Warning),
|
||||
},
|
||||
TestData {
|
||||
logrus_level: "warning",
|
||||
result: Ok(slog::Level::Warning),
|
||||
},
|
||||
TestData {
|
||||
logrus_level: "error",
|
||||
result: Ok(slog::Level::Error),
|
||||
},
|
||||
TestData {
|
||||
logrus_level: "critical",
|
||||
result: Ok(slog::Level::Critical),
|
||||
},
|
||||
TestData {
|
||||
logrus_level: "fatal",
|
||||
result: Ok(slog::Level::Critical),
|
||||
},
|
||||
TestData {
|
||||
logrus_level: "panic",
|
||||
result: Ok(slog::Level::Critical),
|
||||
},
|
||||
];
|
||||
|
||||
for (i, d) in tests.iter().enumerate() {
|
||||
let msg = format!("test[{}]: {:?}", i, d);
|
||||
|
||||
let result = logrus_to_slog_level(d.logrus_level);
|
||||
|
||||
let msg = format!("{}: result: {:?}", msg, result);
|
||||
|
||||
assert_result!(d.result, result, format!("{}", msg));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_log_level() {
|
||||
#[derive(Debug)]
|
||||
struct TestData<'a> {
|
||||
param: &'a str,
|
||||
result: Result<slog::Level>,
|
||||
}
|
||||
|
||||
let tests = &[
|
||||
TestData {
|
||||
param: "",
|
||||
result: Err(make_err(ERR_INVALID_LOG_LEVEL_PARAM)),
|
||||
},
|
||||
TestData {
|
||||
param: "=",
|
||||
result: Err(make_err(ERR_INVALID_LOG_LEVEL_KEY)),
|
||||
},
|
||||
TestData {
|
||||
param: "x=",
|
||||
result: Err(make_err(ERR_INVALID_LOG_LEVEL_KEY)),
|
||||
},
|
||||
TestData {
|
||||
param: "=y",
|
||||
result: Err(make_err(ERR_INVALID_LOG_LEVEL_KEY)),
|
||||
},
|
||||
TestData {
|
||||
param: "==",
|
||||
result: Err(make_err(ERR_INVALID_LOG_LEVEL_PARAM)),
|
||||
},
|
||||
TestData {
|
||||
param: "= =",
|
||||
result: Err(make_err(ERR_INVALID_LOG_LEVEL_PARAM)),
|
||||
},
|
||||
TestData {
|
||||
param: "x=y",
|
||||
result: Err(make_err(ERR_INVALID_LOG_LEVEL_KEY)),
|
||||
},
|
||||
TestData {
|
||||
param: "agent=debug",
|
||||
result: Err(make_err(ERR_INVALID_LOG_LEVEL_KEY)),
|
||||
},
|
||||
TestData {
|
||||
param: "agent.logg=debug",
|
||||
result: Err(make_err(ERR_INVALID_LOG_LEVEL_KEY)),
|
||||
},
|
||||
TestData {
|
||||
param: "agent.log=trace",
|
||||
result: Ok(slog::Level::Trace),
|
||||
},
|
||||
TestData {
|
||||
param: "agent.log=debug",
|
||||
result: Ok(slog::Level::Debug),
|
||||
},
|
||||
TestData {
|
||||
param: "agent.log=info",
|
||||
result: Ok(slog::Level::Info),
|
||||
},
|
||||
TestData {
|
||||
param: "agent.log=warn",
|
||||
result: Ok(slog::Level::Warning),
|
||||
},
|
||||
TestData {
|
||||
param: "agent.log=warning",
|
||||
result: Ok(slog::Level::Warning),
|
||||
},
|
||||
TestData {
|
||||
param: "agent.log=error",
|
||||
result: Ok(slog::Level::Error),
|
||||
},
|
||||
TestData {
|
||||
param: "agent.log=critical",
|
||||
result: Ok(slog::Level::Critical),
|
||||
},
|
||||
TestData {
|
||||
param: "agent.log=fatal",
|
||||
result: Ok(slog::Level::Critical),
|
||||
},
|
||||
TestData {
|
||||
param: "agent.log=panic",
|
||||
result: Ok(slog::Level::Critical),
|
||||
},
|
||||
];
|
||||
|
||||
for (i, d) in tests.iter().enumerate() {
|
||||
let msg = format!("test[{}]: {:?}", i, d);
|
||||
|
||||
let result = get_log_level(d.param);
|
||||
|
||||
let msg = format!("{}: result: {:?}", msg, result);
|
||||
|
||||
assert_result!(d.result, result, format!("{}", msg));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_hotplug_timeout() {
|
||||
#[derive(Debug)]
|
||||
struct TestData<'a> {
|
||||
param: &'a str,
|
||||
result: Result<time::Duration>,
|
||||
}
|
||||
|
||||
let tests = &[
|
||||
TestData {
|
||||
param: "",
|
||||
result: Err(make_err(ERR_INVALID_HOTPLUG_TIMEOUT)),
|
||||
},
|
||||
TestData {
|
||||
param: "agent.hotplug_timeout",
|
||||
result: Err(make_err(ERR_INVALID_HOTPLUG_TIMEOUT)),
|
||||
},
|
||||
TestData {
|
||||
param: "foo=bar",
|
||||
result: Err(make_err(ERR_INVALID_HOTPLUG_TIMEOUT_KEY)),
|
||||
},
|
||||
TestData {
|
||||
param: "agent.hotplug_timeot=1",
|
||||
result: Err(make_err(ERR_INVALID_HOTPLUG_TIMEOUT_KEY)),
|
||||
},
|
||||
TestData {
|
||||
param: "agent.hotplug_timeout=1",
|
||||
result: Ok(time::Duration::from_secs(1)),
|
||||
},
|
||||
TestData {
|
||||
param: "agent.hotplug_timeout=3",
|
||||
result: Ok(time::Duration::from_secs(3)),
|
||||
},
|
||||
TestData {
|
||||
param: "agent.hotplug_timeout=3600",
|
||||
result: Ok(time::Duration::from_secs(3600)),
|
||||
},
|
||||
TestData {
|
||||
param: "agent.hotplug_timeout=0",
|
||||
result: Ok(time::Duration::from_secs(0)),
|
||||
},
|
||||
TestData {
|
||||
param: "agent.hotplug_timeout=-1",
|
||||
result: Err(make_err(ERR_INVALID_HOTPLUG_TIMEOUT_PARAM)),
|
||||
},
|
||||
TestData {
|
||||
param: "agent.hotplug_timeout=4jbsdja",
|
||||
result: Err(make_err(ERR_INVALID_HOTPLUG_TIMEOUT_PARAM)),
|
||||
},
|
||||
TestData {
|
||||
param: "agent.hotplug_timeout=foo",
|
||||
result: Err(make_err(ERR_INVALID_HOTPLUG_TIMEOUT_PARAM)),
|
||||
},
|
||||
TestData {
|
||||
param: "agent.hotplug_timeout=j",
|
||||
result: Err(make_err(ERR_INVALID_HOTPLUG_TIMEOUT_PARAM)),
|
||||
},
|
||||
];
|
||||
|
||||
for (i, d) in tests.iter().enumerate() {
|
||||
let msg = format!("test[{}]: {:?}", i, d);
|
||||
|
||||
let result = get_hotplug_timeout(d.param);
|
||||
|
||||
let msg = format!("{}: result: {:?}", msg, result);
|
||||
|
||||
assert_result!(d.result, result, format!("{}", msg));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_container_pipe_size() {
|
||||
#[derive(Debug)]
|
||||
struct TestData<'a> {
|
||||
param: &'a str,
|
||||
result: Result<i32>,
|
||||
}
|
||||
|
||||
let tests = &[
|
||||
TestData {
|
||||
param: "",
|
||||
result: Err(make_err(ERR_INVALID_CONTAINER_PIPE_SIZE)),
|
||||
},
|
||||
TestData {
|
||||
param: "agent.container_pipe_size",
|
||||
result: Err(make_err(ERR_INVALID_CONTAINER_PIPE_SIZE)),
|
||||
},
|
||||
TestData {
|
||||
param: "foo=bar",
|
||||
result: Err(make_err(ERR_INVALID_CONTAINER_PIPE_SIZE_KEY)),
|
||||
},
|
||||
TestData {
|
||||
param: "agent.container_pip_siz=1",
|
||||
result: Err(make_err(ERR_INVALID_CONTAINER_PIPE_SIZE_KEY)),
|
||||
},
|
||||
TestData {
|
||||
param: "agent.container_pipe_size=1",
|
||||
result: Ok(1),
|
||||
},
|
||||
TestData {
|
||||
param: "agent.container_pipe_size=3",
|
||||
result: Ok(3),
|
||||
},
|
||||
TestData {
|
||||
param: "agent.container_pipe_size=2097152",
|
||||
result: Ok(2097152),
|
||||
},
|
||||
TestData {
|
||||
param: "agent.container_pipe_size=0",
|
||||
result: Ok(0),
|
||||
},
|
||||
TestData {
|
||||
param: "agent.container_pipe_size=-1",
|
||||
result: Err(make_err(ERR_INVALID_CONTAINER_PIPE_NEGATIVE)),
|
||||
},
|
||||
TestData {
|
||||
param: "agent.container_pipe_size=foobar",
|
||||
result: Err(make_err(ERR_INVALID_CONTAINER_PIPE_SIZE_PARAM)),
|
||||
},
|
||||
TestData {
|
||||
param: "agent.container_pipe_size=j",
|
||||
result: Err(make_err(ERR_INVALID_CONTAINER_PIPE_SIZE_PARAM)),
|
||||
},
|
||||
TestData {
|
||||
param: "agent.container_pipe_size=4jbsdja",
|
||||
result: Err(make_err(ERR_INVALID_CONTAINER_PIPE_SIZE_PARAM)),
|
||||
},
|
||||
TestData {
|
||||
param: "agent.container_pipe_size=4294967296",
|
||||
result: Err(make_err(ERR_INVALID_CONTAINER_PIPE_SIZE_PARAM)),
|
||||
},
|
||||
];
|
||||
|
||||
for (i, d) in tests.iter().enumerate() {
|
||||
let msg = format!("test[{}]: {:?}", i, d);
|
||||
|
||||
let result = get_container_pipe_size(d.param);
|
||||
|
||||
let msg = format!("{}: result: {:?}", msg, result);
|
||||
|
||||
assert_result!(d.result, result, format!("{}", msg));
|
||||
}
|
||||
}
|
||||
}
|
||||
362
src/agent/src/device.rs
Normal file
362
src/agent/src/device.rs
Normal file
@@ -0,0 +1,362 @@
|
||||
// Copyright (c) 2019 Ant Financial
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use libc::{c_uint, major, minor};
|
||||
use std::collections::HashMap;
|
||||
use std::fs;
|
||||
use std::os::unix::fs::MetadataExt;
|
||||
use std::path::Path;
|
||||
use std::sync::{mpsc, Arc, Mutex};
|
||||
|
||||
use crate::linux_abi::*;
|
||||
use crate::mount::{DRIVERBLKTYPE, DRIVERMMIOBLKTYPE, DRIVERNVDIMMTYPE, DRIVERSCSITYPE};
|
||||
use crate::sandbox::Sandbox;
|
||||
use crate::{AGENT_CONFIG, GLOBAL_DEVICE_WATCHER};
|
||||
use oci::Spec;
|
||||
use protocols::agent::Device;
|
||||
use rustjail::errors::*;
|
||||
|
||||
// Convenience macro to obtain the scope logger
|
||||
macro_rules! sl {
|
||||
() => {
|
||||
slog_scope::logger().new(o!("subsystem" => "device"))
|
||||
};
|
||||
}
|
||||
|
||||
// DeviceHandler is the type of callback to be defined to handle every type of device driver.
|
||||
type DeviceHandler = fn(&Device, &mut Spec, &Arc<Mutex<Sandbox>>) -> Result<()>;
|
||||
|
||||
// DeviceHandlerList lists the supported drivers.
|
||||
#[cfg_attr(rustfmt, rustfmt_skip)]
|
||||
lazy_static! {
|
||||
static ref DEVICEHANDLERLIST: HashMap<&'static str, DeviceHandler> = {
|
||||
let mut m: HashMap<&'static str, DeviceHandler> = HashMap::new();
|
||||
m.insert(DRIVERBLKTYPE, virtio_blk_device_handler);
|
||||
m.insert(DRIVERMMIOBLKTYPE, virtiommio_blk_device_handler);
|
||||
m.insert(DRIVERNVDIMMTYPE, virtio_nvdimm_device_handler);
|
||||
m.insert(DRIVERSCSITYPE, virtio_scsi_device_handler);
|
||||
m
|
||||
};
|
||||
}
|
||||
|
||||
pub fn rescan_pci_bus() -> Result<()> {
|
||||
online_device(SYSFS_PCI_BUS_RESCAN_FILE)
|
||||
}
|
||||
|
||||
pub fn online_device(path: &str) -> Result<()> {
|
||||
fs::write(path, "1")?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// get_pci_device_address fetches the complete PCI address in sysfs, based on the PCI
|
||||
// identifier provided. This should be in the format: "bridgeAddr/deviceAddr".
|
||||
// Here, bridgeAddr is the address at which the bridge is attached on the root bus,
|
||||
// while deviceAddr is the address at which the device is attached on the bridge.
|
||||
fn get_pci_device_address(pci_id: &str) -> Result<String> {
|
||||
let tokens: Vec<&str> = pci_id.split("/").collect();
|
||||
|
||||
if tokens.len() != 2 {
|
||||
return Err(ErrorKind::ErrorCode(format!(
|
||||
"PCI Identifier for device should be of format [bridgeAddr/deviceAddr], got {}",
|
||||
pci_id
|
||||
))
|
||||
.into());
|
||||
}
|
||||
|
||||
let bridge_id = tokens[0];
|
||||
let device_id = tokens[1];
|
||||
|
||||
// Deduce the complete bridge address based on the bridge address identifier passed
|
||||
// and the fact that bridges are attached on the main bus with function 0.
|
||||
let pci_bridge_addr = format!("0000:00:{}.0", bridge_id);
|
||||
|
||||
// Find out the bus exposed by bridge
|
||||
let bridge_bus_path = format!("{}/{}/pci_bus/", SYSFS_PCI_BUS_PREFIX, pci_bridge_addr);
|
||||
|
||||
let files_slice: Vec<_> = fs::read_dir(&bridge_bus_path)
|
||||
.unwrap()
|
||||
.map(|res| res.unwrap().path())
|
||||
.collect();
|
||||
let bus_num = files_slice.len();
|
||||
|
||||
if bus_num != 1 {
|
||||
return Err(ErrorKind::ErrorCode(format!(
|
||||
"Expected an entry for bus in {}, got {} entries instead",
|
||||
bridge_bus_path, bus_num
|
||||
))
|
||||
.into());
|
||||
}
|
||||
|
||||
let bus = files_slice[0].file_name().unwrap().to_str().unwrap();
|
||||
|
||||
// Device address is based on the bus of the bridge to which it is attached.
|
||||
// We do not pass devices as multifunction, hence the trailing 0 in the address.
|
||||
let pci_device_addr = format!("{}:{}.0", bus, device_id);
|
||||
|
||||
let bridge_device_pci_addr = format!("{}/{}", pci_bridge_addr, pci_device_addr);
|
||||
|
||||
info!(
|
||||
sl!(),
|
||||
"Fetched PCI address for device PCIAddr:{}\n", bridge_device_pci_addr
|
||||
);
|
||||
|
||||
Ok(bridge_device_pci_addr)
|
||||
}
|
||||
|
||||
fn get_device_name(sandbox: &Arc<Mutex<Sandbox>>, dev_addr: &str) -> Result<String> {
|
||||
// Keep the same lock order as uevent::handle_block_add_event(), otherwise it may cause deadlock.
|
||||
let mut w = GLOBAL_DEVICE_WATCHER.lock().unwrap();
|
||||
let sb = sandbox.lock().unwrap();
|
||||
for (key, value) in sb.pci_device_map.iter() {
|
||||
if key.contains(dev_addr) {
|
||||
info!(sl!(), "Device {} found in pci device map", dev_addr);
|
||||
return Ok(format!("{}/{}", SYSTEM_DEV_PATH, value));
|
||||
}
|
||||
}
|
||||
drop(sb);
|
||||
|
||||
// If device is not found in the device map, hotplug event has not
|
||||
// been received yet, create and add channel to the watchers map.
|
||||
// The key of the watchers map is the device we are interested in.
|
||||
// Note this is done inside the lock, not to miss any events from the
|
||||
// global udev listener.
|
||||
let (tx, rx) = mpsc::channel::<String>();
|
||||
w.insert(dev_addr.to_string(), tx);
|
||||
drop(w);
|
||||
|
||||
info!(sl!(), "Waiting on channel for device notification\n");
|
||||
let hotplug_timeout = AGENT_CONFIG.read().unwrap().hotplug_timeout;
|
||||
let dev_name = match rx.recv_timeout(hotplug_timeout) {
|
||||
Ok(name) => name,
|
||||
Err(_) => {
|
||||
GLOBAL_DEVICE_WATCHER.lock().unwrap().remove_entry(dev_addr);
|
||||
return Err(ErrorKind::ErrorCode(format!(
|
||||
"Timeout reached after {:?} waiting for device {}",
|
||||
hotplug_timeout, dev_addr
|
||||
))
|
||||
.into());
|
||||
}
|
||||
};
|
||||
|
||||
Ok(format!("{}/{}", SYSTEM_DEV_PATH, &dev_name))
|
||||
}
|
||||
|
||||
pub fn get_scsi_device_name(sandbox: &Arc<Mutex<Sandbox>>, scsi_addr: &str) -> Result<String> {
|
||||
let dev_sub_path = format!("{}{}/{}", SCSI_HOST_CHANNEL, scsi_addr, SCSI_BLOCK_SUFFIX);
|
||||
|
||||
scan_scsi_bus(scsi_addr)?;
|
||||
get_device_name(sandbox, &dev_sub_path)
|
||||
}
|
||||
|
||||
pub fn get_pci_device_name(sandbox: &Arc<Mutex<Sandbox>>, pci_id: &str) -> Result<String> {
|
||||
let pci_addr = get_pci_device_address(pci_id)?;
|
||||
|
||||
rescan_pci_bus()?;
|
||||
get_device_name(sandbox, &pci_addr)
|
||||
}
|
||||
|
||||
/// Scan SCSI bus for the given SCSI address(SCSI-Id and LUN)
|
||||
fn scan_scsi_bus(scsi_addr: &str) -> Result<()> {
|
||||
let tokens: Vec<&str> = scsi_addr.split(":").collect();
|
||||
if tokens.len() != 2 {
|
||||
return Err(ErrorKind::Msg(format!(
|
||||
"Unexpected format for SCSI Address: {}, expect SCSIID:LUA",
|
||||
scsi_addr
|
||||
))
|
||||
.into());
|
||||
}
|
||||
|
||||
// Scan scsi host passing in the channel, SCSI id and LUN.
|
||||
// Channel is always 0 because we have only one SCSI controller.
|
||||
let scan_data = format!("0 {} {}", tokens[0], tokens[1]);
|
||||
|
||||
for entry in fs::read_dir(SYSFS_SCSI_HOST_PATH)? {
|
||||
let host = entry?.file_name();
|
||||
let scan_path = format!(
|
||||
"{}/{}/{}",
|
||||
SYSFS_SCSI_HOST_PATH,
|
||||
host.to_str().unwrap(),
|
||||
"scan"
|
||||
);
|
||||
|
||||
fs::write(scan_path, &scan_data)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// update_spec_device_list takes a device description provided by the caller,
|
||||
// trying to find it on the guest. Once this device has been identified, the
|
||||
// "real" information that can be read from inside the VM is used to update
|
||||
// the same device in the list of devices provided through the OCI spec.
|
||||
// This is needed to update information about minor/major numbers that cannot
|
||||
// be predicted from the caller.
|
||||
fn update_spec_device_list(device: &Device, spec: &mut Spec) -> Result<()> {
|
||||
let major_id: c_uint;
|
||||
let minor_id: c_uint;
|
||||
|
||||
// If no container_path is provided, we won't be able to match and
|
||||
// update the device in the OCI spec device list. This is an error.
|
||||
if device.container_path == "" {
|
||||
return Err(ErrorKind::Msg(format!(
|
||||
"container_path cannot empty for device {:?}",
|
||||
device
|
||||
))
|
||||
.into());
|
||||
}
|
||||
|
||||
let linux = match spec.linux.as_mut() {
|
||||
None => {
|
||||
return Err(
|
||||
ErrorKind::ErrorCode("Spec didn't container linux field".to_string()).into(),
|
||||
)
|
||||
}
|
||||
Some(l) => l,
|
||||
};
|
||||
|
||||
if !Path::new(&device.vm_path).exists() {
|
||||
return Err(ErrorKind::Msg(format!("vm_path:{} doesn't exist", device.vm_path)).into());
|
||||
}
|
||||
|
||||
let meta = fs::metadata(&device.vm_path)?;
|
||||
let dev_id = meta.rdev();
|
||||
unsafe {
|
||||
major_id = major(dev_id);
|
||||
minor_id = minor(dev_id);
|
||||
}
|
||||
|
||||
info!(
|
||||
sl!(),
|
||||
"got the device: dev_path: {}, major: {}, minor: {}\n", &device.vm_path, major_id, minor_id
|
||||
);
|
||||
|
||||
let devices = linux.devices.as_mut_slice();
|
||||
for dev in devices.iter_mut() {
|
||||
if dev.path == device.container_path {
|
||||
let host_major = dev.major;
|
||||
let host_minor = dev.minor;
|
||||
|
||||
dev.major = major_id as i64;
|
||||
dev.minor = minor_id as i64;
|
||||
|
||||
info!(
|
||||
sl!(),
|
||||
"change the device from major: {} minor: {} to vm device major: {} minor: {}",
|
||||
host_major,
|
||||
host_minor,
|
||||
major_id,
|
||||
minor_id
|
||||
);
|
||||
|
||||
// Resources must be updated since they are used to identify the
|
||||
// device in the devices cgroup.
|
||||
if let Some(res) = linux.resources.as_mut() {
|
||||
let ds = res.devices.as_mut_slice();
|
||||
for d in ds.iter_mut() {
|
||||
if d.major == Some(host_major) && d.minor == Some(host_minor) {
|
||||
d.major = Some(major_id as i64);
|
||||
d.minor = Some(minor_id as i64);
|
||||
|
||||
info!(
|
||||
sl!(),
|
||||
"set resources for device major: {} minor: {}\n", major_id, minor_id
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// device.Id should be the predicted device name (vda, vdb, ...)
|
||||
// device.VmPath already provides a way to send it in
|
||||
fn virtiommio_blk_device_handler(
|
||||
device: &Device,
|
||||
spec: &mut Spec,
|
||||
_sandbox: &Arc<Mutex<Sandbox>>,
|
||||
) -> Result<()> {
|
||||
if device.vm_path == "" {
|
||||
return Err(ErrorKind::Msg("Invalid path for virtio mmio blk device".to_string()).into());
|
||||
}
|
||||
|
||||
update_spec_device_list(device, spec)
|
||||
}
|
||||
|
||||
// device.Id should be the PCI address in the format "bridgeAddr/deviceAddr".
|
||||
// Here, bridgeAddr is the address at which the brige is attached on the root bus,
|
||||
// while deviceAddr is the address at which the device is attached on the bridge.
|
||||
fn virtio_blk_device_handler(
|
||||
device: &Device,
|
||||
spec: &mut Spec,
|
||||
sandbox: &Arc<Mutex<Sandbox>>,
|
||||
) -> Result<()> {
|
||||
let mut dev = device.clone();
|
||||
dev.vm_path = get_pci_device_name(sandbox, &device.id)?;
|
||||
update_spec_device_list(&dev, spec)
|
||||
}
|
||||
|
||||
// device.Id should be the SCSI address of the disk in the format "scsiID:lunID"
|
||||
fn virtio_scsi_device_handler(
|
||||
device: &Device,
|
||||
spec: &mut Spec,
|
||||
sandbox: &Arc<Mutex<Sandbox>>,
|
||||
) -> Result<()> {
|
||||
let mut dev = device.clone();
|
||||
dev.vm_path = get_scsi_device_name(sandbox, &device.id)?;
|
||||
update_spec_device_list(&dev, spec)
|
||||
}
|
||||
|
||||
fn virtio_nvdimm_device_handler(
|
||||
device: &Device,
|
||||
spec: &mut Spec,
|
||||
_sandbox: &Arc<Mutex<Sandbox>>,
|
||||
) -> Result<()> {
|
||||
if device.vm_path == "" {
|
||||
return Err(ErrorKind::Msg("Invalid path for nvdimm device".to_string()).into());
|
||||
}
|
||||
|
||||
update_spec_device_list(device, spec)
|
||||
}
|
||||
|
||||
pub fn add_devices(
|
||||
devices: &[Device],
|
||||
spec: &mut Spec,
|
||||
sandbox: &Arc<Mutex<Sandbox>>,
|
||||
) -> Result<()> {
|
||||
for device in devices.iter() {
|
||||
add_device(device, spec, sandbox)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn add_device(device: &Device, spec: &mut Spec, sandbox: &Arc<Mutex<Sandbox>>) -> Result<()> {
|
||||
// log before validation to help with debugging gRPC protocol version differences.
|
||||
info!(sl!(), "device-id: {}, device-type: {}, device-vm-path: {}, device-container-path: {}, device-options: {:?}",
|
||||
device.id, device.field_type, device.vm_path, device.container_path, device.options);
|
||||
|
||||
if device.field_type == "" {
|
||||
return Err(ErrorKind::Msg(format!("invalid type for device {:?}", device)).into());
|
||||
}
|
||||
|
||||
if device.id == "" && device.vm_path == "" {
|
||||
return Err(
|
||||
ErrorKind::Msg(format!("invalid ID and VM path for device {:?}", device)).into(),
|
||||
);
|
||||
}
|
||||
|
||||
if device.container_path == "" {
|
||||
return Err(
|
||||
ErrorKind::Msg(format!("invalid container path for device {:?}", device)).into(),
|
||||
);
|
||||
}
|
||||
|
||||
match DEVICEHANDLERLIST.get(device.field_type.as_str()) {
|
||||
None => Err(ErrorKind::Msg(format!("Unknown device type {}", device.field_type)).into()),
|
||||
Some(dev_handler) => dev_handler(device, spec, sandbox),
|
||||
}
|
||||
}
|
||||
50
src/agent/src/linux_abi.rs
Normal file
50
src/agent/src/linux_abi.rs
Normal file
@@ -0,0 +1,50 @@
|
||||
// Copyright (c) 2019 Ant Financial
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
/// Linux ABI related constants.
|
||||
|
||||
pub const SYSFS_DIR: &str = "/sys";
|
||||
|
||||
pub const SYSFS_PCI_BUS_PREFIX: &str = "/sys/bus/pci/devices";
|
||||
pub const SYSFS_PCI_BUS_RESCAN_FILE: &str = "/sys/bus/pci/rescan";
|
||||
#[cfg(any(
|
||||
target_arch = "powerpc64le",
|
||||
target_arch = "s390x",
|
||||
target_arch = "x86_64",
|
||||
target_arch = "x86"
|
||||
))]
|
||||
pub const PCI_ROOT_BUS_PATH: &str = "/devices/pci0000:00";
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
pub const PCI_ROOT_BUS_PATH: &str = "/devices/platform/4010000000.pcie/pci0000:00";
|
||||
|
||||
pub const SYSFS_CPU_ONLINE_PATH: &str = "/sys/devices/system/cpu";
|
||||
|
||||
pub const SYSFS_MEMORY_BLOCK_SIZE_PATH: &str = "/sys/devices/system/memory/block_size_bytes";
|
||||
pub const SYSFS_MEMORY_HOTPLUG_PROBE_PATH: &str = "/sys/devices/system/memory/probe";
|
||||
pub const SYSFS_MEMORY_ONLINE_PATH: &str = "/sys/devices/system/memory";
|
||||
|
||||
// Here in "0:0", the first number is the SCSI host number because
|
||||
// only one SCSI controller has been plugged, while the second number
|
||||
// is always 0.
|
||||
pub const SCSI_HOST_CHANNEL: &str = "0:0:";
|
||||
pub const SCSI_BLOCK_SUFFIX: &str = "block";
|
||||
pub const SYSFS_SCSI_HOST_PATH: &str = "/sys/class/scsi_host";
|
||||
|
||||
pub const SYSFS_CGROUPPATH: &str = "/sys/fs/cgroup";
|
||||
pub const SYSFS_ONLINE_FILE: &str = "online";
|
||||
|
||||
pub const PROC_MOUNTSTATS: &str = "/proc/self/mountstats";
|
||||
pub const PROC_CGROUPS: &str = "/proc/cgroups";
|
||||
|
||||
pub const SYSTEM_DEV_PATH: &str = "/dev";
|
||||
|
||||
// Linux UEvent related consts.
|
||||
pub const U_EVENT_ACTION: &str = "ACTION";
|
||||
pub const U_EVENT_ACTION_ADD: &str = "add";
|
||||
pub const U_EVENT_DEV_PATH: &str = "DEVPATH";
|
||||
pub const U_EVENT_SUB_SYSTEM: &str = "SUBSYSTEM";
|
||||
pub const U_EVENT_SEQ_NUM: &str = "SEQNUM";
|
||||
pub const U_EVENT_DEV_NAME: &str = "DEVNAME";
|
||||
pub const U_EVENT_INTERFACE: &str = "INTERFACE";
|
||||
471
src/agent/src/main.rs
Normal file
471
src/agent/src/main.rs
Normal file
@@ -0,0 +1,471 @@
|
||||
// Copyright (c) 2019 Ant Financial
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#![allow(non_camel_case_types)]
|
||||
#![allow(unused_parens)]
|
||||
#![allow(unused_unsafe)]
|
||||
#![allow(dead_code)]
|
||||
#![allow(non_snake_case)]
|
||||
#[macro_use]
|
||||
extern crate lazy_static;
|
||||
extern crate oci;
|
||||
extern crate prctl;
|
||||
extern crate protocols;
|
||||
extern crate regex;
|
||||
extern crate rustjail;
|
||||
extern crate scan_fmt;
|
||||
extern crate serde_json;
|
||||
extern crate signal_hook;
|
||||
|
||||
#[macro_use]
|
||||
extern crate scopeguard;
|
||||
|
||||
#[macro_use]
|
||||
extern crate slog;
|
||||
#[macro_use]
|
||||
extern crate netlink;
|
||||
|
||||
use nix::fcntl::{self, OFlag};
|
||||
use nix::sys::socket::{self, AddressFamily, SockAddr, SockFlag, SockType};
|
||||
use nix::sys::wait::{self, WaitStatus};
|
||||
use nix::unistd;
|
||||
use prctl::set_child_subreaper;
|
||||
use rustjail::errors::*;
|
||||
use signal_hook::{iterator::Signals, SIGCHLD};
|
||||
use std::collections::HashMap;
|
||||
use std::env;
|
||||
use std::fs::{self, File};
|
||||
use std::os::unix::fs as unixfs;
|
||||
use std::os::unix::io::AsRawFd;
|
||||
use std::path::Path;
|
||||
use std::sync::mpsc::{self, Sender};
|
||||
use std::sync::{Arc, Mutex, RwLock};
|
||||
use std::{io, thread};
|
||||
use unistd::Pid;
|
||||
|
||||
mod config;
|
||||
mod device;
|
||||
mod linux_abi;
|
||||
mod mount;
|
||||
mod namespace;
|
||||
mod network;
|
||||
pub mod random;
|
||||
mod sandbox;
|
||||
#[cfg(test)]
|
||||
mod test_utils;
|
||||
mod uevent;
|
||||
mod version;
|
||||
|
||||
use mount::{cgroups_mount, general_mount};
|
||||
use sandbox::Sandbox;
|
||||
use slog::Logger;
|
||||
use uevent::watch_uevents;
|
||||
|
||||
mod rpc;
|
||||
|
||||
const NAME: &str = "kata-agent";
|
||||
const VSOCK_ADDR: &str = "vsock://-1";
|
||||
const VSOCK_PORT: u16 = 1024;
|
||||
const KERNEL_CMDLINE_FILE: &str = "/proc/cmdline";
|
||||
const CONSOLE_PATH: &str = "/dev/console";
|
||||
|
||||
lazy_static! {
|
||||
static ref GLOBAL_DEVICE_WATCHER: Arc<Mutex<HashMap<String, Sender<String>>>> =
|
||||
Arc::new(Mutex::new(HashMap::new()));
|
||||
static ref AGENT_CONFIG: Arc<RwLock<agentConfig>> =
|
||||
Arc::new(RwLock::new(config::agentConfig::new()));
|
||||
}
|
||||
|
||||
use std::mem::MaybeUninit;
|
||||
|
||||
fn announce(logger: &Logger) {
|
||||
let commit = match env::var("VERSION_COMMIT") {
|
||||
Ok(s) => s,
|
||||
Err(_) => String::from(""),
|
||||
};
|
||||
|
||||
info!(logger, "announce";
|
||||
"agent-commit" => commit.as_str(),
|
||||
|
||||
// Avoid any possibility of confusion with the old agent
|
||||
"agent-type" => "rust",
|
||||
|
||||
"agent-version" => version::AGENT_VERSION,
|
||||
"api-version" => version::API_VERSION,
|
||||
);
|
||||
}
|
||||
|
||||
fn main() -> Result<()> {
|
||||
let args: Vec<String> = env::args().collect();
|
||||
if args.len() == 2 && args[1] == "init" {
|
||||
rustjail::container::init_child();
|
||||
exit(0);
|
||||
}
|
||||
|
||||
env::set_var("RUST_BACKTRACE", "full");
|
||||
|
||||
lazy_static::initialize(&SHELLS);
|
||||
|
||||
lazy_static::initialize(&AGENT_CONFIG);
|
||||
|
||||
// support vsock log
|
||||
let (rfd, wfd) = unistd::pipe2(OFlag::O_CLOEXEC)?;
|
||||
let writer = unsafe { File::from_raw_fd(wfd) };
|
||||
|
||||
let agentConfig = AGENT_CONFIG.clone();
|
||||
|
||||
if unistd::getpid() == Pid::from_raw(1) {
|
||||
// Init a temporary logger used by init agent as init process
|
||||
// since before do the base mount, it wouldn't access "/proc/cmdline"
|
||||
// to get the customzied debug level.
|
||||
let logger = logging::create_logger(NAME, "agent", slog::Level::Debug, writer);
|
||||
init_agent_as_init(&logger)?;
|
||||
}
|
||||
|
||||
// once parsed cmdline and set the config, release the write lock
|
||||
// as soon as possible in case other thread would get read lock on
|
||||
// it.
|
||||
{
|
||||
let mut config = agentConfig.write().unwrap();
|
||||
config.parse_cmdline(KERNEL_CMDLINE_FILE)?;
|
||||
}
|
||||
|
||||
let config = agentConfig.read().unwrap();
|
||||
let log_vport = config.log_vport as u32;
|
||||
let log_handle = thread::spawn(move || -> Result<()> {
|
||||
let mut reader = unsafe { File::from_raw_fd(rfd) };
|
||||
if log_vport > 0 {
|
||||
let listenfd = socket::socket(
|
||||
AddressFamily::Vsock,
|
||||
SockType::Stream,
|
||||
SockFlag::SOCK_CLOEXEC,
|
||||
None,
|
||||
)?;
|
||||
let addr = SockAddr::new_vsock(libc::VMADDR_CID_ANY, log_vport);
|
||||
socket::bind(listenfd, &addr)?;
|
||||
socket::listen(listenfd, 1)?;
|
||||
let datafd = socket::accept4(listenfd, SockFlag::SOCK_CLOEXEC)?;
|
||||
let mut log_writer = unsafe { File::from_raw_fd(datafd) };
|
||||
let _ = io::copy(&mut reader, &mut log_writer)?;
|
||||
let _ = unistd::close(listenfd);
|
||||
let _ = unistd::close(datafd);
|
||||
}
|
||||
// copy log to stdout
|
||||
let mut stdout_writer = io::stdout();
|
||||
let _ = io::copy(&mut reader, &mut stdout_writer)?;
|
||||
Ok(())
|
||||
});
|
||||
|
||||
let writer = unsafe { File::from_raw_fd(wfd) };
|
||||
// Recreate a logger with the log level get from "/proc/cmdline".
|
||||
let logger = logging::create_logger(NAME, "agent", config.log_level, writer);
|
||||
|
||||
announce(&logger);
|
||||
|
||||
if args.len() == 2 && args[1] == "--version" {
|
||||
// force logger to flush
|
||||
drop(logger);
|
||||
|
||||
exit(0);
|
||||
}
|
||||
|
||||
// This "unused" variable is required as it enables the global (and crucially static) logger,
|
||||
// which is required to satisfy the the lifetime constraints of the auto-generated gRPC code.
|
||||
let _guard = slog_scope::set_global_logger(logger.new(o!("subsystem" => "rpc")));
|
||||
|
||||
let shells = SHELLS.clone();
|
||||
let debug_console_vport = config.debug_console_vport as u32;
|
||||
|
||||
let shell_handle = if config.debug_console {
|
||||
let thread_logger = logger.clone();
|
||||
|
||||
thread::spawn(move || {
|
||||
let shells = shells.lock().unwrap();
|
||||
let result = setup_debug_console(shells.to_vec(), debug_console_vport);
|
||||
if result.is_err() {
|
||||
// Report error, but don't fail
|
||||
warn!(thread_logger, "failed to setup debug console";
|
||||
"error" => format!("{}", result.unwrap_err()));
|
||||
}
|
||||
})
|
||||
} else {
|
||||
unsafe { MaybeUninit::zeroed().assume_init() }
|
||||
};
|
||||
|
||||
// Initialize unique sandbox structure.
|
||||
let s = Sandbox::new(&logger).map_err(|e| {
|
||||
error!(logger, "Failed to create sandbox with error: {:?}", e);
|
||||
e
|
||||
})?;
|
||||
|
||||
let sandbox = Arc::new(Mutex::new(s));
|
||||
|
||||
setup_signal_handler(&logger, sandbox.clone()).unwrap();
|
||||
watch_uevents(sandbox.clone());
|
||||
|
||||
let (tx, rx) = mpsc::channel::<i32>();
|
||||
sandbox.lock().unwrap().sender = Some(tx);
|
||||
|
||||
//vsock:///dev/vsock, port
|
||||
let mut server = rpc::start(sandbox.clone(), VSOCK_ADDR, VSOCK_PORT);
|
||||
|
||||
/*
|
||||
let _ = fs::remove_file("/tmp/testagent");
|
||||
let _ = fs::remove_dir_all("/run/agent");
|
||||
let mut server = grpc::start(sandbox.clone(), "unix:///tmp/testagent", 1);
|
||||
*/
|
||||
|
||||
let handle = thread::spawn(move || {
|
||||
// info!("Press ENTER to exit...");
|
||||
// let _ = io::stdin().read(&mut [0]).unwrap();
|
||||
// thread::sleep(Duration::from_secs(3000));
|
||||
|
||||
let _ = rx.recv().unwrap();
|
||||
});
|
||||
// receive something from destroy_sandbox here?
|
||||
// or in the thread above? It depneds whether grpc request
|
||||
// are run in another thread or in the main thead?
|
||||
// let _ = rx.wait();
|
||||
|
||||
let _ = server.start().unwrap();
|
||||
|
||||
handle.join().unwrap();
|
||||
|
||||
server.shutdown();
|
||||
|
||||
let _ = log_handle.join();
|
||||
|
||||
if config.debug_console {
|
||||
shell_handle.join().unwrap();
|
||||
}
|
||||
|
||||
let _ = fs::remove_file("/tmp/testagent");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
use nix::sys::wait::WaitPidFlag;
|
||||
|
||||
fn setup_signal_handler(logger: &Logger, sandbox: Arc<Mutex<Sandbox>>) -> Result<()> {
|
||||
let logger = logger.new(o!("subsystem" => "signals"));
|
||||
|
||||
set_child_subreaper(true).map_err(|err| {
|
||||
format!(
|
||||
"failed to setup agent as a child subreaper, failed with {}",
|
||||
err
|
||||
)
|
||||
})?;
|
||||
|
||||
let signals = Signals::new(&[SIGCHLD])?;
|
||||
|
||||
let s = sandbox.clone();
|
||||
|
||||
thread::spawn(move || {
|
||||
'outer: for sig in signals.forever() {
|
||||
info!(logger, "received signal"; "signal" => sig);
|
||||
|
||||
// sevral signals can be combined together
|
||||
// as one. So loop around to reap all
|
||||
// exited children
|
||||
'inner: loop {
|
||||
let wait_status = match wait::waitpid(
|
||||
Some(Pid::from_raw(-1)),
|
||||
Some(WaitPidFlag::WNOHANG | WaitPidFlag::__WALL),
|
||||
) {
|
||||
Ok(s) => {
|
||||
if s == WaitStatus::StillAlive {
|
||||
continue 'outer;
|
||||
}
|
||||
s
|
||||
}
|
||||
Err(e) => {
|
||||
info!(
|
||||
logger,
|
||||
"waitpid reaper failed";
|
||||
"error" => e.as_errno().unwrap().desc()
|
||||
);
|
||||
continue 'outer;
|
||||
}
|
||||
};
|
||||
|
||||
let pid = wait_status.pid();
|
||||
if pid.is_some() {
|
||||
let raw_pid = pid.unwrap().as_raw();
|
||||
let child_pid = format!("{}", raw_pid);
|
||||
|
||||
let logger = logger.new(o!("child-pid" => child_pid));
|
||||
|
||||
let mut sandbox = s.lock().unwrap();
|
||||
let process = sandbox.find_process(raw_pid);
|
||||
if process.is_none() {
|
||||
info!(logger, "child exited unexpectedly");
|
||||
continue 'inner;
|
||||
}
|
||||
|
||||
let mut p = process.unwrap();
|
||||
|
||||
if p.exit_pipe_w.is_none() {
|
||||
error!(logger, "the process's exit_pipe_w isn't set");
|
||||
continue 'inner;
|
||||
}
|
||||
let pipe_write = p.exit_pipe_w.unwrap();
|
||||
let ret: i32;
|
||||
|
||||
match wait_status {
|
||||
WaitStatus::Exited(_, c) => ret = c,
|
||||
WaitStatus::Signaled(_, sig, _) => ret = sig as i32,
|
||||
_ => {
|
||||
info!(logger, "got wrong status for process";
|
||||
"child-status" => format!("{:?}", wait_status));
|
||||
continue 'inner;
|
||||
}
|
||||
}
|
||||
|
||||
p.exit_code = ret;
|
||||
let _ = unistd::close(pipe_write);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// init_agent_as_init will do the initializations such as setting up the rootfs
|
||||
// when this agent has been run as the init process.
|
||||
fn init_agent_as_init(logger: &Logger) -> Result<()> {
|
||||
general_mount(logger)?;
|
||||
cgroups_mount(logger)?;
|
||||
|
||||
fs::remove_file(Path::new("/dev/ptmx"))?;
|
||||
unixfs::symlink(Path::new("/dev/pts/ptmx"), Path::new("/dev/ptmx"))?;
|
||||
|
||||
unistd::setsid()?;
|
||||
|
||||
unsafe {
|
||||
libc::ioctl(io::stdin().as_raw_fd(), libc::TIOCSCTTY, 1);
|
||||
}
|
||||
|
||||
env::set_var("PATH", "/bin:/sbin/:/usr/bin/:/usr/sbin/");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
static ref SHELLS: Arc<Mutex<Vec<String>>> = {
|
||||
let mut v = Vec::new();
|
||||
|
||||
if !cfg!(test) {
|
||||
v.push("/bin/bash".to_string());
|
||||
v.push("/bin/sh".to_string());
|
||||
}
|
||||
|
||||
Arc::new(Mutex::new(v))
|
||||
};
|
||||
}
|
||||
|
||||
// pub static mut LOG_LEVEL: ;
|
||||
// pub static mut TRACE_MODE: ;
|
||||
|
||||
use crate::config::agentConfig;
|
||||
use nix::sys::stat::Mode;
|
||||
use std::os::unix::io::{FromRawFd, RawFd};
|
||||
use std::path::PathBuf;
|
||||
use std::process::{exit, Command, Stdio};
|
||||
|
||||
fn setup_debug_console(shells: Vec<String>, port: u32) -> Result<()> {
|
||||
for shell in shells.iter() {
|
||||
let binary = PathBuf::from(shell);
|
||||
if binary.exists() {
|
||||
let f: RawFd = if port > 0 {
|
||||
let listenfd = socket::socket(
|
||||
AddressFamily::Vsock,
|
||||
SockType::Stream,
|
||||
SockFlag::SOCK_CLOEXEC,
|
||||
None,
|
||||
)?;
|
||||
let addr = SockAddr::new_vsock(libc::VMADDR_CID_ANY, port);
|
||||
socket::bind(listenfd, &addr)?;
|
||||
socket::listen(listenfd, 1)?;
|
||||
socket::accept4(listenfd, SockFlag::SOCK_CLOEXEC)?
|
||||
} else {
|
||||
let mut flags = OFlag::empty();
|
||||
flags.insert(OFlag::O_RDWR);
|
||||
flags.insert(OFlag::O_CLOEXEC);
|
||||
fcntl::open(CONSOLE_PATH, flags, Mode::empty())?
|
||||
};
|
||||
|
||||
let cmd = Command::new(shell)
|
||||
.arg("-i")
|
||||
.stdin(unsafe { Stdio::from_raw_fd(f) })
|
||||
.stdout(unsafe { Stdio::from_raw_fd(f) })
|
||||
.stderr(unsafe { Stdio::from_raw_fd(f) })
|
||||
.spawn();
|
||||
|
||||
let mut cmd = match cmd {
|
||||
Ok(c) => c,
|
||||
Err(_) => {
|
||||
return Err(ErrorKind::ErrorCode("failed to spawn shell".to_string()).into())
|
||||
}
|
||||
};
|
||||
|
||||
cmd.wait()?;
|
||||
|
||||
return Ok(());
|
||||
} else {
|
||||
return Err(ErrorKind::ErrorCode("invalid shell".to_string()).into());
|
||||
}
|
||||
}
|
||||
|
||||
Err(ErrorKind::ErrorCode("no shell".to_string()).into())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::fs::File;
|
||||
use std::io::Write;
|
||||
use tempfile::tempdir;
|
||||
|
||||
#[test]
|
||||
fn test_setup_debug_console_no_shells() {
|
||||
// Guarantee no shells have been added
|
||||
// (required to avoid racing with
|
||||
// test_setup_debug_console_invalid_shell()).
|
||||
let shells_ref = SHELLS.clone();
|
||||
let mut shells = shells_ref.lock().unwrap();
|
||||
shells.clear();
|
||||
|
||||
let result = setup_debug_console(shells.to_vec(), 0);
|
||||
|
||||
assert!(result.is_err());
|
||||
assert_eq!(result.unwrap_err().to_string(), "Error Code: 'no shell'");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_setup_debug_console_invalid_shell() {
|
||||
let shells_ref = SHELLS.clone();
|
||||
let mut shells = shells_ref.lock().unwrap();
|
||||
|
||||
let dir = tempdir().expect("failed to create tmpdir");
|
||||
|
||||
// Add an invalid shell
|
||||
let shell = dir
|
||||
.path()
|
||||
.join("enoent")
|
||||
.to_str()
|
||||
.expect("failed to construct shell path")
|
||||
.to_string();
|
||||
|
||||
shells.push(shell);
|
||||
|
||||
let result = setup_debug_console(shells.to_vec(), 0);
|
||||
|
||||
assert!(result.is_err());
|
||||
assert_eq!(
|
||||
result.unwrap_err().to_string(),
|
||||
"Error Code: 'invalid shell'"
|
||||
);
|
||||
}
|
||||
}
|
||||
1238
src/agent/src/mount.rs
Normal file
1238
src/agent/src/mount.rs
Normal file
File diff suppressed because it is too large
Load Diff
239
src/agent/src/namespace.rs
Normal file
239
src/agent/src/namespace.rs
Normal file
@@ -0,0 +1,239 @@
|
||||
// Copyright (c) 2019 Ant Financial
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use nix::mount::MsFlags;
|
||||
use nix::sched::{unshare, CloneFlags};
|
||||
use nix::unistd::{getpid, gettid};
|
||||
use std::fmt;
|
||||
use std::fs;
|
||||
use std::fs::File;
|
||||
use std::os::unix::io::AsRawFd;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::thread;
|
||||
|
||||
use crate::mount::{BareMount, FLAGS};
|
||||
use slog::Logger;
|
||||
|
||||
//use container::Process;
|
||||
const PERSISTENT_NS_DIR: &str = "/var/run/sandbox-ns";
|
||||
pub const NSTYPEIPC: &str = "ipc";
|
||||
pub const NSTYPEUTS: &str = "uts";
|
||||
pub const NSTYPEPID: &str = "pid";
|
||||
|
||||
pub fn get_current_thread_ns_path(ns_type: &str) -> String {
|
||||
format!(
|
||||
"/proc/{}/task/{}/ns/{}",
|
||||
getpid().to_string(),
|
||||
gettid().to_string(),
|
||||
ns_type
|
||||
)
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Namespace {
|
||||
logger: Logger,
|
||||
pub path: String,
|
||||
persistent_ns_dir: String,
|
||||
ns_type: NamespaceType,
|
||||
//only used for uts namespace
|
||||
pub hostname: Option<String>,
|
||||
}
|
||||
|
||||
impl Namespace {
|
||||
pub fn new(logger: &Logger) -> Self {
|
||||
Namespace {
|
||||
logger: logger.clone(),
|
||||
path: String::from(""),
|
||||
persistent_ns_dir: String::from(PERSISTENT_NS_DIR),
|
||||
ns_type: NamespaceType::IPC,
|
||||
hostname: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn as_ipc(mut self) -> Self {
|
||||
self.ns_type = NamespaceType::IPC;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn as_uts(mut self, hostname: &str) -> Self {
|
||||
self.ns_type = NamespaceType::UTS;
|
||||
if hostname != "" {
|
||||
self.hostname = Some(String::from(hostname));
|
||||
}
|
||||
self
|
||||
}
|
||||
|
||||
pub fn set_root_dir(mut self, dir: &str) -> Self {
|
||||
self.persistent_ns_dir = dir.to_string();
|
||||
self
|
||||
}
|
||||
|
||||
// setup_persistent_ns creates persistent namespace without switching to it.
|
||||
// Note, pid namespaces cannot be persisted.
|
||||
pub fn setup(mut self) -> Result<Self, String> {
|
||||
if let Err(err) = fs::create_dir_all(&self.persistent_ns_dir) {
|
||||
return Err(err.to_string());
|
||||
}
|
||||
|
||||
let ns_path = PathBuf::from(&self.persistent_ns_dir);
|
||||
let ns_type = self.ns_type.clone();
|
||||
let logger = self.logger.clone();
|
||||
|
||||
let new_ns_path = ns_path.join(&ns_type.get());
|
||||
|
||||
if let Err(err) = File::create(new_ns_path.as_path()) {
|
||||
return Err(err.to_string());
|
||||
}
|
||||
|
||||
self.path = new_ns_path.clone().into_os_string().into_string().unwrap();
|
||||
let hostname = self.hostname.clone();
|
||||
|
||||
let new_thread = thread::spawn(move || {
|
||||
let origin_ns_path = get_current_thread_ns_path(&ns_type.get());
|
||||
|
||||
let _origin_ns_fd = match File::open(Path::new(&origin_ns_path)) {
|
||||
Err(err) => return Err(err.to_string()),
|
||||
Ok(file) => file.as_raw_fd(),
|
||||
};
|
||||
|
||||
// Create a new netns on the current thread.
|
||||
let cf = ns_type.get_flags().clone();
|
||||
|
||||
if let Err(err) = unshare(cf) {
|
||||
return Err(err.to_string());
|
||||
}
|
||||
|
||||
if ns_type == NamespaceType::UTS && hostname.is_some() {
|
||||
match nix::unistd::sethostname(hostname.unwrap()) {
|
||||
Err(err) => return Err(err.to_string()),
|
||||
Ok(_) => (),
|
||||
}
|
||||
}
|
||||
// Bind mount the new namespace from the current thread onto the mount point to persist it.
|
||||
let source: &str = origin_ns_path.as_str();
|
||||
let destination: &str = new_ns_path.as_path().to_str().unwrap_or("none");
|
||||
|
||||
let mut flags = MsFlags::empty();
|
||||
|
||||
match FLAGS.get("rbind") {
|
||||
Some(x) => {
|
||||
let (_, f) = *x;
|
||||
flags = flags | f;
|
||||
}
|
||||
None => (),
|
||||
};
|
||||
|
||||
let bare_mount = BareMount::new(source, destination, "none", flags, "", &logger);
|
||||
if let Err(err) = bare_mount.mount() {
|
||||
return Err(format!(
|
||||
"Failed to mount {} to {} with err:{:?}",
|
||||
source, destination, err
|
||||
));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
});
|
||||
|
||||
match new_thread.join() {
|
||||
Ok(t) => match t {
|
||||
Err(err) => return Err(err),
|
||||
Ok(()) => (),
|
||||
},
|
||||
Err(err) => return Err(format!("Failed to join thread {:?}!", err)),
|
||||
}
|
||||
|
||||
Ok(self)
|
||||
}
|
||||
}
|
||||
|
||||
/// Represents the Namespace type.
|
||||
#[derive(Clone, Copy, PartialEq)]
|
||||
enum NamespaceType {
|
||||
IPC,
|
||||
UTS,
|
||||
PID,
|
||||
}
|
||||
|
||||
impl NamespaceType {
|
||||
/// Get the string representation of the namespace type.
|
||||
pub fn get(&self) -> &str {
|
||||
match *self {
|
||||
Self::IPC => "ipc",
|
||||
Self::UTS => "uts",
|
||||
Self::PID => "pid",
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the associate flags with the namespace type.
|
||||
pub fn get_flags(&self) -> CloneFlags {
|
||||
match *self {
|
||||
Self::IPC => CloneFlags::CLONE_NEWIPC,
|
||||
Self::UTS => CloneFlags::CLONE_NEWUTS,
|
||||
Self::PID => CloneFlags::CLONE_NEWPID,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for NamespaceType {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}", self.get())
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for NamespaceType {
|
||||
fn default() -> Self {
|
||||
NamespaceType::IPC
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::{Namespace, NamespaceType};
|
||||
use crate::{mount::remove_mounts, skip_if_not_root};
|
||||
use nix::sched::CloneFlags;
|
||||
use tempfile::Builder;
|
||||
|
||||
#[test]
|
||||
fn test_setup_persistent_ns() {
|
||||
skip_if_not_root!();
|
||||
// Create dummy logger and temp folder.
|
||||
let logger = slog::Logger::root(slog::Discard, o!());
|
||||
let tmpdir = Builder::new().prefix("ipc").tempdir().unwrap();
|
||||
|
||||
let ns_ipc = Namespace::new(&logger)
|
||||
.as_ipc()
|
||||
.set_root_dir(tmpdir.path().to_str().unwrap())
|
||||
.setup();
|
||||
|
||||
assert!(ns_ipc.is_ok());
|
||||
assert!(remove_mounts(&vec![ns_ipc.unwrap().path]).is_ok());
|
||||
|
||||
let logger = slog::Logger::root(slog::Discard, o!());
|
||||
let tmpdir = Builder::new().prefix("ipc").tempdir().unwrap();
|
||||
|
||||
let ns_uts = Namespace::new(&logger)
|
||||
.as_uts("test_hostname")
|
||||
.set_root_dir(tmpdir.path().to_str().unwrap())
|
||||
.setup();
|
||||
|
||||
assert!(ns_uts.is_ok());
|
||||
assert!(remove_mounts(&vec![ns_uts.unwrap().path]).is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_namespace_type() {
|
||||
let ipc = NamespaceType::IPC;
|
||||
assert_eq!("ipc", ipc.get());
|
||||
assert_eq!(CloneFlags::CLONE_NEWIPC, ipc.get_flags());
|
||||
|
||||
let uts = NamespaceType::UTS;
|
||||
assert_eq!("uts", uts.get());
|
||||
assert_eq!(CloneFlags::CLONE_NEWUTS, uts.get_flags());
|
||||
|
||||
let pid = NamespaceType::PID;
|
||||
assert_eq!("pid", pid.get());
|
||||
assert_eq!(CloneFlags::CLONE_NEWPID, pid.get_flags());
|
||||
}
|
||||
}
|
||||
30
src/agent/src/network.rs
Normal file
30
src/agent/src/network.rs
Normal file
@@ -0,0 +1,30 @@
|
||||
// Copyright (c) 2019 Ant Financial
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use protocols::types::{Interface, Route};
|
||||
use std::collections::HashMap;
|
||||
|
||||
// Network fully describes a sandbox network with its interfaces, routes and dns
|
||||
// related information.
|
||||
#[derive(Debug, Default)]
|
||||
pub struct Network {
|
||||
ifaces: HashMap<String, Interface>,
|
||||
routes: Vec<Route>,
|
||||
dns: Vec<String>,
|
||||
}
|
||||
|
||||
impl Network {
|
||||
pub fn new() -> Network {
|
||||
Network {
|
||||
ifaces: HashMap::new(),
|
||||
routes: Vec::new(),
|
||||
dns: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_dns(&mut self, dns: String) {
|
||||
self.dns.push(dns);
|
||||
}
|
||||
}
|
||||
42
src/agent/src/random.rs
Normal file
42
src/agent/src/random.rs
Normal file
@@ -0,0 +1,42 @@
|
||||
// Copyright (c) 2019 Ant Financial
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use libc;
|
||||
use nix::errno::Errno;
|
||||
use nix::fcntl::{self, OFlag};
|
||||
use nix::sys::stat::Mode;
|
||||
use rustjail::errors::*;
|
||||
use std::fs;
|
||||
|
||||
pub const RNGDEV: &str = "/dev/random";
|
||||
pub const RNDADDTOENTCNT: libc::c_int = 0x40045201;
|
||||
pub const RNDRESEEDRNG: libc::c_int = 0x5207;
|
||||
|
||||
// Handle the differing ioctl(2) request types for different targets
|
||||
#[cfg(target_env = "musl")]
|
||||
type IoctlRequestType = libc::c_int;
|
||||
#[cfg(target_env = "gnu")]
|
||||
type IoctlRequestType = libc::c_ulong;
|
||||
|
||||
pub fn reseed_rng(data: &[u8]) -> Result<()> {
|
||||
let len = data.len() as libc::c_long;
|
||||
fs::write(RNGDEV, data)?;
|
||||
|
||||
let fd = fcntl::open(RNGDEV, OFlag::O_RDWR, Mode::from_bits_truncate(0o022))?;
|
||||
|
||||
let ret = unsafe {
|
||||
libc::ioctl(
|
||||
fd,
|
||||
RNDADDTOENTCNT as IoctlRequestType,
|
||||
&len as *const libc::c_long,
|
||||
)
|
||||
};
|
||||
let _ = Errno::result(ret).map(drop)?;
|
||||
|
||||
let ret = unsafe { libc::ioctl(fd, RNDRESEEDRNG as IoctlRequestType, 0) };
|
||||
let _ = Errno::result(ret).map(drop)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
1655
src/agent/src/rpc.rs
Normal file
1655
src/agent/src/rpc.rs
Normal file
File diff suppressed because it is too large
Load Diff
556
src/agent/src/sandbox.rs
Normal file
556
src/agent/src/sandbox.rs
Normal file
@@ -0,0 +1,556 @@
|
||||
// Copyright (c) 2019 Ant Financial
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
//use crate::container::Container;
|
||||
use crate::linux_abi::*;
|
||||
use crate::mount::{get_mount_fs_type, remove_mounts, TYPEROOTFS};
|
||||
use crate::namespace::Namespace;
|
||||
use crate::network::Network;
|
||||
use libc::pid_t;
|
||||
use netlink::{RtnlHandle, NETLINK_ROUTE};
|
||||
use protocols::agent::OnlineCPUMemRequest;
|
||||
use regex::Regex;
|
||||
use rustjail::cgroups;
|
||||
use rustjail::container::BaseContainer;
|
||||
use rustjail::container::LinuxContainer;
|
||||
use rustjail::errors::*;
|
||||
use rustjail::process::Process;
|
||||
use slog::Logger;
|
||||
use std::collections::HashMap;
|
||||
use std::fs;
|
||||
use std::sync::mpsc::Sender;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Sandbox {
|
||||
pub logger: Logger,
|
||||
pub id: String,
|
||||
pub hostname: String,
|
||||
pub containers: HashMap<String, LinuxContainer>,
|
||||
pub network: Network,
|
||||
pub mounts: Vec<String>,
|
||||
pub container_mounts: HashMap<String, Vec<String>>,
|
||||
pub pci_device_map: HashMap<String, String>,
|
||||
pub shared_utsns: Namespace,
|
||||
pub shared_ipcns: Namespace,
|
||||
pub storages: HashMap<String, u32>,
|
||||
pub running: bool,
|
||||
pub no_pivot_root: bool,
|
||||
pub sandbox_pid_ns: bool,
|
||||
pub sender: Option<Sender<i32>>,
|
||||
pub rtnl: Option<RtnlHandle>,
|
||||
}
|
||||
|
||||
impl Sandbox {
|
||||
pub fn new(logger: &Logger) -> Result<Self> {
|
||||
let fs_type = get_mount_fs_type("/")?;
|
||||
let logger = logger.new(o!("subsystem" => "sandbox"));
|
||||
|
||||
Ok(Sandbox {
|
||||
logger: logger.clone(),
|
||||
id: String::new(),
|
||||
hostname: String::new(),
|
||||
network: Network::new(),
|
||||
containers: HashMap::new(),
|
||||
mounts: Vec::new(),
|
||||
container_mounts: HashMap::new(),
|
||||
pci_device_map: HashMap::new(),
|
||||
shared_utsns: Namespace::new(&logger),
|
||||
shared_ipcns: Namespace::new(&logger),
|
||||
storages: HashMap::new(),
|
||||
running: false,
|
||||
no_pivot_root: fs_type.eq(TYPEROOTFS),
|
||||
sandbox_pid_ns: false,
|
||||
sender: None,
|
||||
rtnl: Some(RtnlHandle::new(NETLINK_ROUTE, 0).unwrap()),
|
||||
})
|
||||
}
|
||||
|
||||
// set_sandbox_storage sets the sandbox level reference
|
||||
// counter for the sandbox storage.
|
||||
// This method also returns a boolean to let
|
||||
// callers know if the storage already existed or not.
|
||||
// It will return true if storage is new.
|
||||
//
|
||||
// It's assumed that caller is calling this method after
|
||||
// acquiring a lock on sandbox.
|
||||
pub fn set_sandbox_storage(&mut self, path: &str) -> bool {
|
||||
match self.storages.get_mut(path) {
|
||||
None => {
|
||||
self.storages.insert(path.to_string(), 1);
|
||||
true
|
||||
}
|
||||
Some(count) => {
|
||||
*count += 1;
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// unset_sandbox_storage will decrement the sandbox storage
|
||||
// reference counter. If there aren't any containers using
|
||||
// that sandbox storage, this method will remove the
|
||||
// storage reference from the sandbox and return 'true' to
|
||||
// let the caller know that they can clean up the storage
|
||||
// related directories by calling remove_sandbox_storage
|
||||
//
|
||||
// It's assumed that caller is calling this method after
|
||||
// acquiring a lock on sandbox.
|
||||
pub fn unset_sandbox_storage(&mut self, path: &str) -> Result<bool> {
|
||||
match self.storages.get_mut(path) {
|
||||
None => {
|
||||
return Err(ErrorKind::ErrorCode(format!(
|
||||
"Sandbox storage with path {} not found",
|
||||
path
|
||||
))
|
||||
.into())
|
||||
}
|
||||
Some(count) => {
|
||||
*count -= 1;
|
||||
if *count < 1 {
|
||||
self.storages.remove(path);
|
||||
return Ok(true);
|
||||
}
|
||||
return Ok(false);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// remove_sandbox_storage removes the sandbox storage if no
|
||||
// containers are using that storage.
|
||||
//
|
||||
// It's assumed that caller is calling this method after
|
||||
// acquiring a lock on sandbox.
|
||||
pub fn remove_sandbox_storage(&self, path: &str) -> Result<()> {
|
||||
let mounts = vec![path.to_string()];
|
||||
remove_mounts(&mounts)?;
|
||||
fs::remove_dir_all(path)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// unset_and_remove_sandbox_storage unsets the storage from sandbox
|
||||
// and if there are no containers using this storage it will
|
||||
// remove it from the sandbox.
|
||||
//
|
||||
// It's assumed that caller is calling this method after
|
||||
// acquiring a lock on sandbox.
|
||||
pub fn unset_and_remove_sandbox_storage(&mut self, path: &str) -> Result<()> {
|
||||
match self.unset_sandbox_storage(path) {
|
||||
Ok(res) => {
|
||||
if res {
|
||||
return self.remove_sandbox_storage(path);
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
return Err(err);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn is_running(&self) -> bool {
|
||||
self.running
|
||||
}
|
||||
|
||||
pub fn set_hostname(&mut self, hostname: String) {
|
||||
self.hostname = hostname;
|
||||
}
|
||||
|
||||
pub fn setup_shared_namespaces(&mut self) -> Result<bool> {
|
||||
// Set up shared IPC namespace
|
||||
self.shared_ipcns = match Namespace::new(&self.logger).as_ipc().setup() {
|
||||
Ok(ns) => ns,
|
||||
Err(err) => {
|
||||
return Err(ErrorKind::ErrorCode(format!(
|
||||
"Failed to setup persistent IPC namespace with error: {}",
|
||||
err
|
||||
))
|
||||
.into())
|
||||
}
|
||||
};
|
||||
|
||||
// // Set up shared UTS namespace
|
||||
self.shared_utsns = match Namespace::new(&self.logger)
|
||||
.as_uts(self.hostname.as_str())
|
||||
.setup()
|
||||
{
|
||||
Ok(ns) => ns,
|
||||
Err(err) => {
|
||||
return Err(ErrorKind::ErrorCode(format!(
|
||||
"Failed to setup persistent UTS namespace with error: {}",
|
||||
err
|
||||
))
|
||||
.into())
|
||||
}
|
||||
};
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
pub fn add_container(&mut self, c: LinuxContainer) {
|
||||
self.containers.insert(c.id.clone(), c);
|
||||
}
|
||||
|
||||
pub fn get_container(&mut self, id: &str) -> Option<&mut LinuxContainer> {
|
||||
self.containers.get_mut(id)
|
||||
}
|
||||
|
||||
pub fn find_process(&mut self, pid: pid_t) -> Option<&mut Process> {
|
||||
for (_, c) in self.containers.iter_mut() {
|
||||
if c.processes.get(&pid).is_some() {
|
||||
return c.processes.get_mut(&pid);
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
pub fn destroy(&mut self) -> Result<()> {
|
||||
for (_, ctr) in &mut self.containers {
|
||||
ctr.destroy()?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn online_cpu_memory(&self, req: &OnlineCPUMemRequest) -> Result<()> {
|
||||
if req.nb_cpus > 0 {
|
||||
// online cpus
|
||||
online_cpus(&self.logger, req.nb_cpus as i32)?;
|
||||
}
|
||||
|
||||
if !req.cpu_only {
|
||||
// online memory
|
||||
online_memory(&self.logger)?;
|
||||
}
|
||||
|
||||
let cpuset = cgroups::fs::get_guest_cpuset()?;
|
||||
|
||||
for (_, ctr) in self.containers.iter() {
|
||||
info!(self.logger, "updating {}", ctr.id.as_str());
|
||||
ctr.cgroup_manager
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.update_cpuset_path(cpuset.as_str())?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn online_resources(logger: &Logger, path: &str, pattern: &str, num: i32) -> Result<i32> {
|
||||
let mut count = 0;
|
||||
let re = Regex::new(pattern)?;
|
||||
|
||||
for e in fs::read_dir(path)? {
|
||||
let entry = e?;
|
||||
let tmpname = entry.file_name();
|
||||
let name = tmpname.to_str().unwrap();
|
||||
let p = entry.path();
|
||||
|
||||
if re.is_match(name) {
|
||||
let file = format!("{}/{}", p.to_str().unwrap(), SYSFS_ONLINE_FILE);
|
||||
info!(logger, "{}", file.as_str());
|
||||
let c = fs::read_to_string(file.as_str())?;
|
||||
|
||||
if c.trim().contains("0") {
|
||||
fs::write(file.as_str(), "1")?;
|
||||
count += 1;
|
||||
|
||||
if num > 0 && count == num {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if num > 0 {
|
||||
return Ok(count);
|
||||
}
|
||||
|
||||
Ok(0)
|
||||
}
|
||||
|
||||
fn online_cpus(logger: &Logger, num: i32) -> Result<i32> {
|
||||
online_resources(logger, SYSFS_CPU_ONLINE_PATH, r"cpu[0-9]+", num)
|
||||
}
|
||||
|
||||
fn online_memory(logger: &Logger) -> Result<()> {
|
||||
online_resources(logger, SYSFS_MEMORY_ONLINE_PATH, r"memory[0-9]+", -1)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
//use rustjail::Error;
|
||||
use super::Sandbox;
|
||||
use crate::{mount::BareMount, skip_if_not_root};
|
||||
use nix::mount::MsFlags;
|
||||
use oci::{Linux, Root, Spec};
|
||||
use rustjail::container::LinuxContainer;
|
||||
use rustjail::specconv::CreateOpts;
|
||||
use slog::Logger;
|
||||
use tempfile::Builder;
|
||||
|
||||
fn bind_mount(src: &str, dst: &str, logger: &Logger) -> Result<(), rustjail::errors::Error> {
|
||||
let baremount = BareMount::new(src, dst, "bind", MsFlags::MS_BIND, "", &logger);
|
||||
baremount.mount()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn set_sandbox_storage() {
|
||||
let logger = slog::Logger::root(slog::Discard, o!());
|
||||
let mut s = Sandbox::new(&logger).unwrap();
|
||||
|
||||
let tmpdir = Builder::new().tempdir().unwrap();
|
||||
let tmpdir_path = tmpdir.path().to_str().unwrap();
|
||||
|
||||
// Add a new sandbox storage
|
||||
let new_storage = s.set_sandbox_storage(&tmpdir_path);
|
||||
|
||||
// Check the reference counter
|
||||
let ref_count = s.storages[tmpdir_path];
|
||||
assert_eq!(
|
||||
ref_count, 1,
|
||||
"Invalid refcount, got {} expected 1.",
|
||||
ref_count
|
||||
);
|
||||
assert_eq!(new_storage, true);
|
||||
|
||||
// Use the existing sandbox storage
|
||||
let new_storage = s.set_sandbox_storage(&tmpdir_path);
|
||||
assert_eq!(new_storage, false, "Should be false as already exists.");
|
||||
|
||||
// Since we are using existing storage, the reference counter
|
||||
// should be 2 by now.
|
||||
let ref_count = s.storages[tmpdir_path];
|
||||
assert_eq!(
|
||||
ref_count, 2,
|
||||
"Invalid refcount, got {} expected 2.",
|
||||
ref_count
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn remove_sandbox_storage() {
|
||||
skip_if_not_root!();
|
||||
|
||||
let logger = slog::Logger::root(slog::Discard, o!());
|
||||
let s = Sandbox::new(&logger).unwrap();
|
||||
|
||||
let tmpdir = Builder::new().tempdir().unwrap();
|
||||
let tmpdir_path = tmpdir.path().to_str().unwrap();
|
||||
|
||||
let srcdir = Builder::new()
|
||||
.prefix("src")
|
||||
.tempdir_in(tmpdir_path)
|
||||
.unwrap();
|
||||
let srcdir_path = srcdir.path().to_str().unwrap();
|
||||
|
||||
let destdir = Builder::new()
|
||||
.prefix("dest")
|
||||
.tempdir_in(tmpdir_path)
|
||||
.unwrap();
|
||||
let destdir_path = destdir.path().to_str().unwrap();
|
||||
|
||||
let emptydir = Builder::new()
|
||||
.prefix("empty")
|
||||
.tempdir_in(tmpdir_path)
|
||||
.unwrap();
|
||||
|
||||
assert!(
|
||||
s.remove_sandbox_storage(&srcdir_path).is_err(),
|
||||
"Expect Err as the directory i not a mountpoint"
|
||||
);
|
||||
|
||||
assert!(s.remove_sandbox_storage("").is_err());
|
||||
|
||||
let invalid_dir = emptydir.path().join("invalid");
|
||||
|
||||
assert!(s
|
||||
.remove_sandbox_storage(invalid_dir.to_str().unwrap())
|
||||
.is_err());
|
||||
|
||||
// Now, create a double mount as this guarantees the directory cannot
|
||||
// be deleted after the first umount.
|
||||
for _i in 0..2 {
|
||||
assert!(bind_mount(srcdir_path, destdir_path, &logger).is_ok());
|
||||
}
|
||||
|
||||
assert!(
|
||||
s.remove_sandbox_storage(destdir_path).is_err(),
|
||||
"Expect fail as deletion cannot happen due to the second mount."
|
||||
);
|
||||
|
||||
// This time it should work as the previous two calls have undone the double
|
||||
// mount.
|
||||
assert!(s.remove_sandbox_storage(destdir_path).is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[allow(unused_assignments)]
|
||||
fn unset_and_remove_sandbox_storage() {
|
||||
skip_if_not_root!();
|
||||
|
||||
let logger = slog::Logger::root(slog::Discard, o!());
|
||||
let mut s = Sandbox::new(&logger).unwrap();
|
||||
|
||||
// FIX: This test fails, not sure why yet.
|
||||
assert!(
|
||||
s.unset_and_remove_sandbox_storage("/tmp/testEphePath")
|
||||
.is_err(),
|
||||
"Should fail because sandbox storage doesn't exist"
|
||||
);
|
||||
|
||||
let tmpdir = Builder::new().tempdir().unwrap();
|
||||
let tmpdir_path = tmpdir.path().to_str().unwrap();
|
||||
|
||||
let srcdir = Builder::new()
|
||||
.prefix("src")
|
||||
.tempdir_in(tmpdir_path)
|
||||
.unwrap();
|
||||
let srcdir_path = srcdir.path().to_str().unwrap();
|
||||
|
||||
let destdir = Builder::new()
|
||||
.prefix("dest")
|
||||
.tempdir_in(tmpdir_path)
|
||||
.unwrap();
|
||||
let destdir_path = destdir.path().to_str().unwrap();
|
||||
|
||||
assert!(bind_mount(srcdir_path, destdir_path, &logger).is_ok());
|
||||
|
||||
assert_eq!(s.set_sandbox_storage(&destdir_path), true);
|
||||
assert!(s.unset_and_remove_sandbox_storage(&destdir_path).is_ok());
|
||||
|
||||
let mut other_dir_str = String::new();
|
||||
{
|
||||
// Create another folder in a separate scope to ensure that is
|
||||
// deleted
|
||||
let other_dir = Builder::new()
|
||||
.prefix("dir")
|
||||
.tempdir_in(tmpdir_path)
|
||||
.unwrap();
|
||||
let other_dir_path = other_dir.path().to_str().unwrap();
|
||||
other_dir_str = other_dir_path.to_string();
|
||||
|
||||
assert_eq!(s.set_sandbox_storage(&other_dir_path), true);
|
||||
}
|
||||
|
||||
assert!(s.unset_and_remove_sandbox_storage(&other_dir_str).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn unset_sandbox_storage() {
|
||||
let logger = slog::Logger::root(slog::Discard, o!());
|
||||
let mut s = Sandbox::new(&logger).unwrap();
|
||||
|
||||
let storage_path = "/tmp/testEphe";
|
||||
|
||||
// Add a new sandbox storage
|
||||
assert_eq!(s.set_sandbox_storage(&storage_path), true);
|
||||
// Use the existing sandbox storage
|
||||
assert_eq!(
|
||||
s.set_sandbox_storage(&storage_path),
|
||||
false,
|
||||
"Expects false as the storage is not new."
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
s.unset_sandbox_storage(&storage_path).unwrap(),
|
||||
false,
|
||||
"Expects false as there is still a storage."
|
||||
);
|
||||
|
||||
// Reference counter should decrement to 1.
|
||||
let ref_count = s.storages[storage_path];
|
||||
assert_eq!(
|
||||
ref_count, 1,
|
||||
"Invalid refcount, got {} expected 1.",
|
||||
ref_count
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
s.unset_sandbox_storage(&storage_path).unwrap(),
|
||||
true,
|
||||
"Expects true as there is still a storage."
|
||||
);
|
||||
|
||||
// Since no container is using this sandbox storage anymore
|
||||
// there should not be any reference in sandbox struct
|
||||
// for the given storage
|
||||
assert!(
|
||||
!s.storages.contains_key(storage_path),
|
||||
"The storages map should not contain the key {}",
|
||||
storage_path
|
||||
);
|
||||
|
||||
// If no container is using the sandbox storage, the reference
|
||||
// counter for it should not exist.
|
||||
assert!(
|
||||
s.unset_sandbox_storage(&storage_path).is_err(),
|
||||
"Expects false as the reference counter should no exist."
|
||||
);
|
||||
}
|
||||
|
||||
fn create_dummy_opts() -> CreateOpts {
|
||||
let mut root = Root::default();
|
||||
root.path = String::from("/");
|
||||
|
||||
let linux = Linux::default();
|
||||
let mut spec = Spec::default();
|
||||
spec.root = Some(root).into();
|
||||
spec.linux = Some(linux).into();
|
||||
|
||||
CreateOpts {
|
||||
cgroup_name: "".to_string(),
|
||||
use_systemd_cgroup: false,
|
||||
no_pivot_root: false,
|
||||
no_new_keyring: false,
|
||||
spec: Some(spec),
|
||||
rootless_euid: false,
|
||||
rootless_cgroup: false,
|
||||
}
|
||||
}
|
||||
|
||||
fn create_linuxcontainer() -> LinuxContainer {
|
||||
LinuxContainer::new(
|
||||
"some_id",
|
||||
"/run/agent",
|
||||
create_dummy_opts(),
|
||||
&slog_scope::logger(),
|
||||
)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn get_container_entry_exist() {
|
||||
skip_if_not_root!();
|
||||
let logger = slog::Logger::root(slog::Discard, o!());
|
||||
let mut s = Sandbox::new(&logger).unwrap();
|
||||
let linux_container = create_linuxcontainer();
|
||||
|
||||
s.containers
|
||||
.insert("testContainerID".to_string(), linux_container);
|
||||
let cnt = s.get_container("testContainerID");
|
||||
assert!(cnt.is_some());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn get_container_no_entry() {
|
||||
let logger = slog::Logger::root(slog::Discard, o!());
|
||||
let mut s = Sandbox::new(&logger).unwrap();
|
||||
|
||||
let cnt = s.get_container("testContainerID");
|
||||
assert!(cnt.is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn add_and_get_container() {
|
||||
skip_if_not_root!();
|
||||
let logger = slog::Logger::root(slog::Discard, o!());
|
||||
let mut s = Sandbox::new(&logger).unwrap();
|
||||
let linux_container = create_linuxcontainer();
|
||||
|
||||
s.add_container(linux_container);
|
||||
assert!(s.get_container("some_id").is_some());
|
||||
}
|
||||
}
|
||||
59
src/agent/src/test_utils.rs
Normal file
59
src/agent/src/test_utils.rs
Normal file
@@ -0,0 +1,59 @@
|
||||
// Copyright (c) 2019 Intel Corporation
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#[cfg(test)]
|
||||
mod test_utils {
|
||||
#[macro_export]
|
||||
#[allow(unused_macros)]
|
||||
macro_rules! skip_if_root {
|
||||
() => {
|
||||
if nix::unistd::Uid::effective().is_root() {
|
||||
println!("INFO: skipping {} which needs non-root", module_path!());
|
||||
return;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
#[allow(unused_macros)]
|
||||
macro_rules! skip_if_not_root {
|
||||
() => {
|
||||
if !nix::unistd::Uid::effective().is_root() {
|
||||
println!("INFO: skipping {} which needs root", module_path!());
|
||||
return;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
#[allow(unused_macros)]
|
||||
macro_rules! skip_loop_if_root {
|
||||
($msg:expr) => {
|
||||
if nix::unistd::Uid::effective().is_root() {
|
||||
println!(
|
||||
"INFO: skipping loop {} in {} which needs non-root",
|
||||
$msg,
|
||||
module_path!()
|
||||
);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
#[allow(unused_macros)]
|
||||
macro_rules! skip_loop_if_not_root {
|
||||
($msg:expr) => {
|
||||
if !nix::unistd::Uid::effective().is_root() {
|
||||
println!(
|
||||
"INFO: skipping loop {} in {} which needs root",
|
||||
$msg,
|
||||
module_path!()
|
||||
);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
147
src/agent/src/uevent.rs
Normal file
147
src/agent/src/uevent.rs
Normal file
@@ -0,0 +1,147 @@
|
||||
// Copyright (c) 2019 Ant Financial
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use crate::device::online_device;
|
||||
use crate::linux_abi::*;
|
||||
use crate::sandbox::Sandbox;
|
||||
use crate::GLOBAL_DEVICE_WATCHER;
|
||||
use netlink::{RtnlHandle, NETLINK_UEVENT};
|
||||
use slog::Logger;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::thread;
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
struct Uevent {
|
||||
action: String,
|
||||
devpath: String,
|
||||
devname: String,
|
||||
subsystem: String,
|
||||
seqnum: String,
|
||||
interface: String,
|
||||
}
|
||||
|
||||
impl Uevent {
|
||||
fn new(message: &str) -> Self {
|
||||
let mut msg_iter = message.split('\0');
|
||||
let mut event = Uevent::default();
|
||||
|
||||
msg_iter.next(); // skip the first value
|
||||
for arg in msg_iter {
|
||||
let key_val: Vec<&str> = arg.splitn(2, '=').collect();
|
||||
if key_val.len() == 2 {
|
||||
match key_val[0] {
|
||||
U_EVENT_ACTION => event.action = String::from(key_val[1]),
|
||||
U_EVENT_DEV_NAME => event.devname = String::from(key_val[1]),
|
||||
U_EVENT_SUB_SYSTEM => event.subsystem = String::from(key_val[1]),
|
||||
U_EVENT_DEV_PATH => event.devpath = String::from(key_val[1]),
|
||||
U_EVENT_SEQ_NUM => event.seqnum = String::from(key_val[1]),
|
||||
U_EVENT_INTERFACE => event.interface = String::from(key_val[1]),
|
||||
_ => (),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
event
|
||||
}
|
||||
|
||||
// Check whether this is a block device hot-add event.
|
||||
fn is_block_add_event(&self) -> bool {
|
||||
self.action == U_EVENT_ACTION_ADD
|
||||
&& self.subsystem == "block"
|
||||
&& self.devpath.starts_with(PCI_ROOT_BUS_PATH)
|
||||
&& self.devname != ""
|
||||
}
|
||||
|
||||
fn handle_block_add_event(&self, sandbox: &Arc<Mutex<Sandbox>>) {
|
||||
// Keep the same lock order as device::get_device_name(), otherwise it may cause deadlock.
|
||||
let mut w = GLOBAL_DEVICE_WATCHER.lock().unwrap();
|
||||
let mut sb = sandbox.lock().unwrap();
|
||||
|
||||
// Add the device node name to the pci device map.
|
||||
sb.pci_device_map
|
||||
.insert(self.devpath.clone(), self.devname.clone());
|
||||
|
||||
// Notify watchers that are interested in the udev event.
|
||||
// Close the channel after watcher has been notified.
|
||||
let devpath = self.devpath.clone();
|
||||
let empties: Vec<_> = w
|
||||
.iter()
|
||||
.filter(|(dev_addr, _)| {
|
||||
let pci_p = format!("{}/{}", PCI_ROOT_BUS_PATH, *dev_addr);
|
||||
|
||||
// blk block device
|
||||
devpath.starts_with(pci_p.as_str()) ||
|
||||
// scsi block device
|
||||
{
|
||||
(*dev_addr).ends_with(SCSI_BLOCK_SUFFIX) &&
|
||||
devpath.contains(*dev_addr)
|
||||
}
|
||||
})
|
||||
.map(|(k, sender)| {
|
||||
let devname = self.devname.clone();
|
||||
let _ = sender.send(devname);
|
||||
k.clone()
|
||||
})
|
||||
.collect();
|
||||
|
||||
// Remove notified nodes from the watcher map.
|
||||
for empty in empties {
|
||||
w.remove(&empty);
|
||||
}
|
||||
}
|
||||
|
||||
fn process(&self, logger: &Logger, sandbox: &Arc<Mutex<Sandbox>>) {
|
||||
if self.is_block_add_event() {
|
||||
return self.handle_block_add_event(sandbox);
|
||||
} else if self.action == U_EVENT_ACTION_ADD {
|
||||
let online_path = format!("{}/{}/online", SYSFS_DIR, &self.devpath);
|
||||
// It's a memory hot-add event.
|
||||
if online_path.starts_with(SYSFS_MEMORY_ONLINE_PATH) {
|
||||
if let Err(e) = online_device(online_path.as_ref()) {
|
||||
error!(
|
||||
*logger,
|
||||
"failed to online device";
|
||||
"device" => &self.devpath,
|
||||
"error" => format!("{}", e),
|
||||
);
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
debug!(*logger, "ignoring event"; "uevent" => format!("{:?}", self));
|
||||
}
|
||||
}
|
||||
|
||||
pub fn watch_uevents(sandbox: Arc<Mutex<Sandbox>>) {
|
||||
thread::spawn(move || {
|
||||
let rtnl = RtnlHandle::new(NETLINK_UEVENT, 1).unwrap();
|
||||
let logger = sandbox
|
||||
.lock()
|
||||
.unwrap()
|
||||
.logger
|
||||
.new(o!("subsystem" => "uevent"));
|
||||
|
||||
loop {
|
||||
match rtnl.recv_message() {
|
||||
Err(e) => {
|
||||
error!(logger, "receive uevent message failed"; "error" => format!("{}", e))
|
||||
}
|
||||
Ok(data) => {
|
||||
let text = String::from_utf8(data);
|
||||
match text {
|
||||
Err(e) => {
|
||||
error!(logger, "failed to convert bytes to text"; "error" => format!("{}", e))
|
||||
}
|
||||
Ok(text) => {
|
||||
let event = Uevent::new(&text);
|
||||
info!(logger, "got uevent message"; "event" => format!("{:?}", event));
|
||||
event.process(&logger, &sandbox);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
7
src/agent/src/version.rs
Normal file
7
src/agent/src/version.rs
Normal file
@@ -0,0 +1,7 @@
|
||||
// Copyright (c) 2019 Ant Financial
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
pub const AGENT_VERSION: &str = "1.4.5";
|
||||
pub const API_VERSION: &str = "0.0.1";
|
||||
30
src/runtime/.ci/go-no-os-exit.sh
Executable file
30
src/runtime/.ci/go-no-os-exit.sh
Executable file
@@ -0,0 +1,30 @@
|
||||
#!/bin/bash
|
||||
# Copyright (c) 2018 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# Check there are no os.Exit() calls creeping into the code
|
||||
# We don't use that exit path in the Kata codebase.
|
||||
|
||||
# Allow the path to check to be over-ridden.
|
||||
# Default to the current directory.
|
||||
go_packages=${1:-.}
|
||||
|
||||
echo "Checking for no os.Exit() calls for package [${go_packages}]"
|
||||
|
||||
candidates=`go list -f '{{.Dir}}/*.go' $go_packages`
|
||||
for f in $candidates; do
|
||||
filename=`basename $f`
|
||||
# skip all go test files
|
||||
[[ $filename == *_test.go ]] && continue
|
||||
# skip exit.go where, the only file we should call os.Exit() from.
|
||||
[[ $filename == "exit.go" ]] && continue
|
||||
files="$f $files"
|
||||
done
|
||||
|
||||
[ -z "$files" ] && echo "No files to check, skipping" && exit 0
|
||||
|
||||
if egrep -n '\<os\.Exit\>' $files; then
|
||||
echo "Direct calls to os.Exit() are forbidden, please use exit() so atexit() works"
|
||||
exit 1
|
||||
fi
|
||||
14
src/runtime/.ci/go-test.sh
Executable file
14
src/runtime/.ci/go-test.sh
Executable file
@@ -0,0 +1,14 @@
|
||||
#!/bin/bash
|
||||
# Copyright (c) 2018 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -e
|
||||
|
||||
cidir=$(dirname "$0")
|
||||
source "${cidir}/lib.sh"
|
||||
export CI_JOB="${CI_JOB:-default}"
|
||||
|
||||
if [ "${CI_JOB}" != "PODMAN" ]; then
|
||||
run_go_test
|
||||
fi
|
||||
71
src/runtime/.ci/install-yq.sh
Executable file
71
src/runtime/.ci/install-yq.sh
Executable file
@@ -0,0 +1,71 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Copyright (c) 2018 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
# If we fail for any reason a message will be displayed
|
||||
die() {
|
||||
msg="$*"
|
||||
echo "ERROR: $msg" >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Install the yq yaml query package from the mikefarah github repo
|
||||
# Install via binary download, as we may not have golang installed at this point
|
||||
function install_yq() {
|
||||
GOPATH=${GOPATH:-${HOME}/go}
|
||||
local yq_path="${GOPATH}/bin/yq"
|
||||
local yq_pkg="github.com/mikefarah/yq"
|
||||
[ -x "${GOPATH}/bin/yq" ] && return
|
||||
|
||||
read -r -a sysInfo <<< "$(uname -sm)"
|
||||
|
||||
case "${sysInfo[0]}" in
|
||||
"Linux" | "Darwin")
|
||||
goos="${sysInfo[0],}"
|
||||
;;
|
||||
"*")
|
||||
die "OS ${sysInfo[0]} not supported"
|
||||
;;
|
||||
esac
|
||||
|
||||
case "${sysInfo[1]}" in
|
||||
"aarch64")
|
||||
goarch=arm64
|
||||
;;
|
||||
"ppc64le")
|
||||
goarch=ppc64le
|
||||
;;
|
||||
"x86_64")
|
||||
goarch=amd64
|
||||
;;
|
||||
"s390x")
|
||||
goarch=s390x
|
||||
;;
|
||||
"*")
|
||||
die "Arch ${sysInfo[1]} not supported"
|
||||
;;
|
||||
esac
|
||||
|
||||
mkdir -p "${GOPATH}/bin"
|
||||
|
||||
# Check curl
|
||||
if ! command -v "curl" >/dev/null; then
|
||||
die "Please install curl"
|
||||
fi
|
||||
|
||||
local yq_version=3.1.0
|
||||
|
||||
local yq_url="https://${yq_pkg}/releases/download/${yq_version}/yq_${goos}_${goarch}"
|
||||
curl -o "${yq_path}" -LSsf ${yq_url}
|
||||
[ $? -ne 0 ] && die "Download ${yq_url} failed"
|
||||
chmod +x ${yq_path}
|
||||
|
||||
if ! command -v "${yq_path}" >/dev/null; then
|
||||
die "Cannot not get ${yq_path} executable"
|
||||
fi
|
||||
}
|
||||
|
||||
install_yq
|
||||
16
src/runtime/.ci/install_go.sh
Executable file
16
src/runtime/.ci/install_go.sh
Executable file
@@ -0,0 +1,16 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright (c) 2019 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -e
|
||||
|
||||
cidir=$(dirname "$0")
|
||||
source "${cidir}/lib.sh"
|
||||
|
||||
clone_tests_repo
|
||||
|
||||
pushd "${tests_repo_dir}"
|
||||
.ci/install_go.sh -p -f
|
||||
popd
|
||||
34
src/runtime/.ci/lib.sh
Normal file
34
src/runtime/.ci/lib.sh
Normal file
@@ -0,0 +1,34 @@
|
||||
#
|
||||
# Copyright (c) 2018 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
export tests_repo="${tests_repo:-github.com/kata-containers/tests}"
|
||||
export tests_repo_dir="$GOPATH/src/$tests_repo"
|
||||
|
||||
clone_tests_repo()
|
||||
{
|
||||
# KATA_CI_NO_NETWORK is (has to be) ignored if there is
|
||||
# no existing clone.
|
||||
if [ -d "$tests_repo_dir" -a -n "$KATA_CI_NO_NETWORK" ]
|
||||
then
|
||||
return
|
||||
fi
|
||||
|
||||
go get -d -u "$tests_repo" || true
|
||||
if [ -n "${TRAVIS_BRANCH:-}" ]; then
|
||||
( cd "${tests_repo_dir}" && git checkout "${TRAVIS_BRANCH}" )
|
||||
fi
|
||||
}
|
||||
|
||||
run_static_checks()
|
||||
{
|
||||
clone_tests_repo
|
||||
bash "$tests_repo_dir/.ci/static-checks.sh" "github.com/kata-containers/runtime"
|
||||
}
|
||||
|
||||
run_go_test()
|
||||
{
|
||||
clone_tests_repo
|
||||
bash "$tests_repo_dir/.ci/go-test.sh"
|
||||
}
|
||||
14
src/runtime/.ci/run.sh
Executable file
14
src/runtime/.ci/run.sh
Executable file
@@ -0,0 +1,14 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright (c) 2018 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -e
|
||||
|
||||
cidir=$(dirname "$0")
|
||||
source "${cidir}/lib.sh"
|
||||
|
||||
pushd "${tests_repo_dir}"
|
||||
.ci/run.sh
|
||||
popd
|
||||
25
src/runtime/.ci/setup.sh
Executable file
25
src/runtime/.ci/setup.sh
Executable file
@@ -0,0 +1,25 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright (c) 2018 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -e
|
||||
|
||||
cidir=$(dirname "$0")
|
||||
source "${cidir}/lib.sh"
|
||||
export CI_JOB="${CI_JOB:-default}"
|
||||
|
||||
clone_tests_repo
|
||||
|
||||
pushd "${tests_repo_dir}"
|
||||
.ci/setup.sh
|
||||
popd
|
||||
|
||||
if [ "${CI_JOB}" != "PODMAN" ]; then
|
||||
echo "Setup virtcontainers environment"
|
||||
chronic sudo -E PATH=$PATH bash -c "${cidir}/../virtcontainers/utils/virtcontainers-setup.sh"
|
||||
|
||||
echo "Install virtcontainers"
|
||||
make -C "${cidir}/../virtcontainers" && chronic sudo make -C "${cidir}/../virtcontainers" install
|
||||
fi
|
||||
16
src/runtime/.ci/static-checks.sh
Executable file
16
src/runtime/.ci/static-checks.sh
Executable file
@@ -0,0 +1,16 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright (c) 2018 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -e
|
||||
|
||||
cidir=$(dirname "$0")
|
||||
source "${cidir}/lib.sh"
|
||||
|
||||
# Build kata-runtime before running static checks
|
||||
make -C "${cidir}/../"
|
||||
|
||||
# Run static checks
|
||||
run_static_checks
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user