mirror of
https://github.com/kubeshark/kubeshark.git
synced 2026-02-22 22:53:04 +00:00
Compare commits
673 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4817ed2a80 | ||
|
|
d66ec06928 | ||
|
|
125e3abe6c | ||
|
|
8221c4ef10 | ||
|
|
7f216b2958 | ||
|
|
67006e2fc7 | ||
|
|
d0adbc357f | ||
|
|
8e135d570b | ||
|
|
f21f68a7e0 | ||
|
|
5f13f7d28d | ||
|
|
80d23d62bd | ||
|
|
bba1bbd1fb | ||
|
|
4a6628a3e8 | ||
|
|
bec0b25daa | ||
|
|
9248f07af0 | ||
|
|
a1e05db4b0 | ||
|
|
b3f6fdc831 | ||
|
|
e0c010eb29 | ||
|
|
d9fedc5bec | ||
|
|
d1b4f9dcb1 | ||
|
|
629fb118e8 | ||
|
|
b7ab3da6d2 | ||
|
|
3027fdab40 | ||
|
|
a7692a664d | ||
|
|
696f3fca93 | ||
|
|
36e47e3080 | ||
|
|
994307f45c | ||
|
|
ba9b85bb12 | ||
|
|
6a890e6653 | ||
|
|
22766c2983 | ||
|
|
da1d2c5260 | ||
|
|
7b94c9beff | ||
|
|
f026c3604a | ||
|
|
517c127d93 | ||
|
|
580c612982 | ||
|
|
7a1cd9afbc | ||
|
|
5bbf1e7eb0 | ||
|
|
aa9fb41ee5 | ||
|
|
816f614ebb | ||
|
|
674a554767 | ||
|
|
cc3f8c86ff | ||
|
|
99aff8d513 | ||
|
|
a2e0e013e5 | ||
|
|
41f36ba9c2 | ||
|
|
ecc577ccc8 | ||
|
|
b7b0e3dcee | ||
|
|
1926067bd9 | ||
|
|
1eeed3e58e | ||
|
|
49755671f5 | ||
|
|
223ada3e2b | ||
|
|
1bd8f9b8c5 | ||
|
|
b86f80ebd7 | ||
|
|
3fcc51c5c3 | ||
|
|
783aa03b6a | ||
|
|
68da6a819a | ||
|
|
afa81e7be9 | ||
|
|
d8b87a90e4 | ||
|
|
0f1194bfeb | ||
|
|
3a8817592f | ||
|
|
e800d67e27 | ||
|
|
fc0ec5a840 | ||
|
|
9144d98d04 | ||
|
|
3d5c999be1 | ||
|
|
98173350ec | ||
|
|
16d779449a | ||
|
|
fdaef243e4 | ||
|
|
0a0b0cde36 | ||
|
|
13dd178334 | ||
|
|
d61e6ab8eb | ||
|
|
b6672661ad | ||
|
|
6374f79292 | ||
|
|
8039731daf | ||
|
|
bdbe4888d2 | ||
|
|
88c72cda82 | ||
|
|
ca844394fc | ||
|
|
2513c136de | ||
|
|
3c6307e93f | ||
|
|
1c883c73e4 | ||
|
|
95637bfce8 | ||
|
|
f155e4f1b7 | ||
|
|
32caeb37e4 | ||
|
|
1dfef1be23 | ||
|
|
a0eb85e71d | ||
|
|
ad738387b7 | ||
|
|
c695a3c5e5 | ||
|
|
de154731e9 | ||
|
|
d3789f2bc0 | ||
|
|
84f2ec944d | ||
|
|
193e2ab03e | ||
|
|
a3fea3b610 | ||
|
|
b34cc21bcf | ||
|
|
e0dec54c6a | ||
|
|
17ce638a78 | ||
|
|
4191aa4ce5 | ||
|
|
9069f10d94 | ||
|
|
53697d74ee | ||
|
|
51f3e3b7ce | ||
|
|
2a640c8d38 | ||
|
|
be96d4e099 | ||
|
|
ec616cb32c | ||
|
|
669974d608 | ||
|
|
219fc0a126 | ||
|
|
e70167c694 | ||
|
|
ba126dff51 | ||
|
|
377ff44d71 | ||
|
|
557506096c | ||
|
|
47d5764cdc | ||
|
|
32136520d8 | ||
|
|
19d2822d3e | ||
|
|
a31bb0e6e7 | ||
|
|
7de515dd3a | ||
|
|
5089e9ccb8 | ||
|
|
c837874bbe | ||
|
|
13b35f1672 | ||
|
|
4ec06b7c95 | ||
|
|
df0aea1462 | ||
|
|
64a4c5ce62 | ||
|
|
9c9cefc406 | ||
|
|
db23ff6338 | ||
|
|
a699755858 | ||
|
|
b7efd94414 | ||
|
|
be86ea8ecb | ||
|
|
6ea1073fe9 | ||
|
|
48bf3f25c5 | ||
|
|
28ae2a645b | ||
|
|
b7530a3c6b | ||
|
|
7168b5c515 | ||
|
|
50d29f1e93 | ||
|
|
01656b6c78 | ||
|
|
a16f818bdf | ||
|
|
c88b3b0ba7 | ||
|
|
e7778fe537 | ||
|
|
126f8b48d5 | ||
|
|
b9296d7849 | ||
|
|
cddccd58fa | ||
|
|
3965916837 | ||
|
|
ba1254f7e9 | ||
|
|
df1915cce6 | ||
|
|
88ea7120c4 | ||
|
|
f43a61f891 | ||
|
|
067875d544 | ||
|
|
77ed1fdefe | ||
|
|
e1f8a24897 | ||
|
|
40177b8fa9 | ||
|
|
6d0512fd57 | ||
|
|
75931d9123 | ||
|
|
d6143f5a6a | ||
|
|
a58f72ed87 | ||
|
|
d22e30f86d | ||
|
|
806aa12feb | ||
|
|
30e6d28672 | ||
|
|
ef84f90cd9 | ||
|
|
b49ca767c9 | ||
|
|
d1cc890cad | ||
|
|
a9a75533af | ||
|
|
1aef7be3fb | ||
|
|
c1e812e449 | ||
|
|
c2b73025f3 | ||
|
|
af2086a54d | ||
|
|
359623c538 | ||
|
|
3798bf7a01 | ||
|
|
487f0b9332 | ||
|
|
39c5df64e6 | ||
|
|
22a777ac79 | ||
|
|
06e0def53e | ||
|
|
b88f1c7014 | ||
|
|
f4e2d2f9ca | ||
|
|
f017020f62 | ||
|
|
32ffa6132d | ||
|
|
0bb0c4b256 | ||
|
|
28696d2f5c | ||
|
|
7ab63ec745 | ||
|
|
ddabbac317 | ||
|
|
5a4901f7bd | ||
|
|
5a322fc58a | ||
|
|
53c3dabcbf | ||
|
|
6b6915c7ee | ||
|
|
e819759c2d | ||
|
|
b39c5dd5d3 | ||
|
|
0f402789f1 | ||
|
|
d4fade3599 | ||
|
|
054c4a9e8b | ||
|
|
35c1a88724 | ||
|
|
fe3f93c91b | ||
|
|
24aa4db0bc | ||
|
|
ef44257942 | ||
|
|
0b58558f70 | ||
|
|
cdd306b890 | ||
|
|
3cc9ff8616 | ||
|
|
247498492a | ||
|
|
867c7058a0 | ||
|
|
f1021f61b6 | ||
|
|
9162c4fb64 | ||
|
|
e7fc7b791a | ||
|
|
9914183d7d | ||
|
|
c0751ad4cb | ||
|
|
0aca81fbcb | ||
|
|
24dccab3e4 | ||
|
|
db607aff16 | ||
|
|
ec1728ef91 | ||
|
|
93de6e8934 | ||
|
|
5998d00e6a | ||
|
|
afafb2c625 | ||
|
|
b125860d06 | ||
|
|
68aabf262f | ||
|
|
d279b7272d | ||
|
|
d15e1cca54 | ||
|
|
d8761e1e31 | ||
|
|
a9d2cb5ac2 | ||
|
|
ddcf973e35 | ||
|
|
b6d1804326 | ||
|
|
6dc12af55b | ||
|
|
d78b0b987a | ||
|
|
9889787833 | ||
|
|
8fe0544175 | ||
|
|
09afa1983a | ||
|
|
669b5cb1f2 | ||
|
|
25e0949761 | ||
|
|
fa07f973c0 | ||
|
|
c38bdcd977 | ||
|
|
51a4165304 | ||
|
|
c8cd1f57c4 | ||
|
|
dfde87140a | ||
|
|
64b6368e63 | ||
|
|
6af2d11878 | ||
|
|
2b552b5847 | ||
|
|
72ec983b24 | ||
|
|
2f899a943c | ||
|
|
12f6b04a49 | ||
|
|
f010f349a1 | ||
|
|
26e23dc94f | ||
|
|
6785f024e4 | ||
|
|
92dab2e2f7 | ||
|
|
4da51c40b9 | ||
|
|
18d051af28 | ||
|
|
cef012d1f3 | ||
|
|
4802cca646 | ||
|
|
4117d008a9 | ||
|
|
91e3546196 | ||
|
|
4db2a80675 | ||
|
|
bfa3efd23a | ||
|
|
c48187a02e | ||
|
|
f6d7510a14 | ||
|
|
f9e0c36d5f | ||
|
|
a8dd332ff8 | ||
|
|
8e5df14f49 | ||
|
|
6307871584 | ||
|
|
7e77a76334 | ||
|
|
f2b7df7e02 | ||
|
|
b0af52ba9c | ||
|
|
ddc1dc3d71 | ||
|
|
d99bfea0db | ||
|
|
bed9d06c59 | ||
|
|
aaeb3ca1eb | ||
|
|
7df35e04a8 | ||
|
|
a5be1a8eaa | ||
|
|
8ba3e603a4 | ||
|
|
db51e6dbc2 | ||
|
|
77878e97f5 | ||
|
|
36767eda27 | ||
|
|
6c01078f97 | ||
|
|
6c06307d68 | ||
|
|
2223cad038 | ||
|
|
c1fc4447ef | ||
|
|
ea3eecfa04 | ||
|
|
51968f2aae | ||
|
|
15f7a3559a | ||
|
|
cc9627c884 | ||
|
|
d3f2cdbf0e | ||
|
|
28bfbf4186 | ||
|
|
d3c21a07bb | ||
|
|
510d5e5ed8 | ||
|
|
1070d17e20 | ||
|
|
6b8beb50ad | ||
|
|
68877b254b | ||
|
|
dd91087157 | ||
|
|
cf3ce0180b | ||
|
|
b4dc321829 | ||
|
|
7e893a5b52 | ||
|
|
33dabe8bbf | ||
|
|
ddf354f34e | ||
|
|
88f8998df3 | ||
|
|
fc0f6a8452 | ||
|
|
cc9dbbef2e | ||
|
|
696fed8345 | ||
|
|
c03de2222d | ||
|
|
a028211f0a | ||
|
|
c94a399bc3 | ||
|
|
788bcd4846 | ||
|
|
e2ef9eff05 | ||
|
|
7ce18ecaa9 | ||
|
|
7737bdf4fc | ||
|
|
bed59e12ea | ||
|
|
19723debb2 | ||
|
|
d82df9d670 | ||
|
|
d295cecfc2 | ||
|
|
9c291bbf47 | ||
|
|
cb0e89934d | ||
|
|
820fb64f8d | ||
|
|
62d4c3a86e | ||
|
|
2757b7419f | ||
|
|
3b5cd6c77b | ||
|
|
7e56d45c6b | ||
|
|
0e2bca9729 | ||
|
|
b1a40df069 | ||
|
|
773cf371f3 | ||
|
|
1527f43396 | ||
|
|
c9a2b9eb44 | ||
|
|
2b92bb74c7 | ||
|
|
fb08481909 | ||
|
|
e215870b9d | ||
|
|
426c6450ba | ||
|
|
a3383ee6cc | ||
|
|
bdff836040 | ||
|
|
9f8ecc8e4e | ||
|
|
9cdf1aa68b | ||
|
|
78481d4bcc | ||
|
|
706a2fc9b5 | ||
|
|
1064305934 | ||
|
|
9f1586ab50 | ||
|
|
6d79598c5d | ||
|
|
22bdbda718 | ||
|
|
23e2493890 | ||
|
|
a7905bc1ba | ||
|
|
4831b44dfa | ||
|
|
6817fd70ab | ||
|
|
3803bad6a4 | ||
|
|
d0b621070c | ||
|
|
6add6fb1ec | ||
|
|
e1106e25c4 | ||
|
|
48adf86b25 | ||
|
|
2ea5dc0df0 | ||
|
|
bb0172b151 | ||
|
|
ef7c80df05 | ||
|
|
5bd44b57f4 | ||
|
|
41dacbff1a | ||
|
|
d94ce4dce3 | ||
|
|
65ab0ca668 | ||
|
|
9bc3ea5ffc | ||
|
|
2d17d1a83d | ||
|
|
78c89cc5b4 | ||
|
|
b5c9a31380 | ||
|
|
3dfff2b7a5 | ||
|
|
583a5b97ee | ||
|
|
64aae06fe5 | ||
|
|
1ccaa03fb2 | ||
|
|
3222212367 | ||
|
|
c5681871e4 | ||
|
|
1ac3ba0a6d | ||
|
|
d3520765eb | ||
|
|
fa1e7bcf01 | ||
|
|
bf182b6330 | ||
|
|
f59f84af02 | ||
|
|
cae5a92a13 | ||
|
|
7afb1d8b9b | ||
|
|
f628192216 | ||
|
|
b1feb4e33f | ||
|
|
94dff24aed | ||
|
|
d00d2eafa7 | ||
|
|
63eb39b451 | ||
|
|
149a8b7efe | ||
|
|
247fbc1291 | ||
|
|
0e74238e56 | ||
|
|
05ecef557f | ||
|
|
63325ec890 | ||
|
|
579cb47ecf | ||
|
|
7ed4088b4b | ||
|
|
f95db49317 | ||
|
|
749b19512e | ||
|
|
746eff1e23 | ||
|
|
b7a8d9a41a | ||
|
|
995fb96f24 | ||
|
|
5d4557d1dd | ||
|
|
78c1c02fe6 | ||
|
|
742a56272b | ||
|
|
b7b3603e57 | ||
|
|
54c5da2fcb | ||
|
|
a5efb6b625 | ||
|
|
7dcb2d23a0 | ||
|
|
f4ff4d4dd6 | ||
|
|
dd5761f112 | ||
|
|
854836056d | ||
|
|
090368295c | ||
|
|
67038e324b | ||
|
|
a5fb7e0474 | ||
|
|
1a0625d37c | ||
|
|
7ec1f595a1 | ||
|
|
3998485944 | ||
|
|
e5de984acd | ||
|
|
18d6345e80 | ||
|
|
661e17ace9 | ||
|
|
cc78b291af | ||
|
|
7c8adee7a8 | ||
|
|
461ad1921e | ||
|
|
5ca90d70ff | ||
|
|
65bda4e844 | ||
|
|
c533bcd38c | ||
|
|
1d17f83931 | ||
|
|
b9c3704bae | ||
|
|
08602c75e0 | ||
|
|
46799f6665 | ||
|
|
250a878407 | ||
|
|
b32f5f9e12 | ||
|
|
5325f94f2b | ||
|
|
fc3bf69348 | ||
|
|
7f41c348e6 | ||
|
|
eb69ebf008 | ||
|
|
9f889a7a36 | ||
|
|
909cc8de15 | ||
|
|
a0313e9e5a | ||
|
|
3aed354ab8 | ||
|
|
7fe9ecbca4 | ||
|
|
9e6af8c0bc | ||
|
|
2c8f2e903f | ||
|
|
ca451e08f6 | ||
|
|
45bfebc956 | ||
|
|
acaa29f8eb | ||
|
|
470ab3d7ed | ||
|
|
a259361a96 | ||
|
|
0350bcdd61 | ||
|
|
cddc7d25fd | ||
|
|
635a9d3256 | ||
|
|
2224d0e9f4 | ||
|
|
db01c4e9e3 | ||
|
|
0659d0fead | ||
|
|
988bb16260 | ||
|
|
b4e8573634 | ||
|
|
cfa12ea45e | ||
|
|
9a7c23f070 | ||
|
|
0f1f832ddd | ||
|
|
dfe5605032 | ||
|
|
4c2884c40f | ||
|
|
4fb179f623 | ||
|
|
796fc1453c | ||
|
|
0ef3e2d018 | ||
|
|
77a14410f4 | ||
|
|
f269a61842 | ||
|
|
51eddd3ae4 | ||
|
|
64b22daa2a | ||
|
|
3a2d34647e | ||
|
|
d8ee89225c | ||
|
|
f7ce141d0d | ||
|
|
3c25cec633 | ||
|
|
7b86d32174 | ||
|
|
aeda619104 | ||
|
|
98738cb5a6 | ||
|
|
bf3285cb8b | ||
|
|
5f9084e497 | ||
|
|
f2a384c8db | ||
|
|
207d89fa17 | ||
|
|
3b758d15a0 | ||
|
|
261e850a59 | ||
|
|
242a276c5f | ||
|
|
b9f9e860b6 | ||
|
|
1404c68a22 | ||
|
|
400c681369 | ||
|
|
a4761e3262 | ||
|
|
313d26670b | ||
|
|
16f1e116c0 | ||
|
|
2d625eccaa | ||
|
|
19443501da | ||
|
|
4ef91a2701 | ||
|
|
bc031be0fe | ||
|
|
f32a7d97ec | ||
|
|
aeda024986 | ||
|
|
98198b9733 | ||
|
|
0bf7c83b86 | ||
|
|
a8df589076 | ||
|
|
c07f1851b3 | ||
|
|
5c4c913a27 | ||
|
|
71111248bd | ||
|
|
5efb48f0c5 | ||
|
|
cc980dbaf8 | ||
|
|
1afe27e969 | ||
|
|
8df5e015c5 | ||
|
|
6b898077f1 | ||
|
|
e93cd978e8 | ||
|
|
bada6dae68 | ||
|
|
8814e08871 | ||
|
|
6b7a94a850 | ||
|
|
7b004e7a1f | ||
|
|
836b87d517 | ||
|
|
646da4810d | ||
|
|
a6d349a8fa | ||
|
|
9d58c662a8 | ||
|
|
e4a09be4e2 | ||
|
|
7208ed85d3 | ||
|
|
7a5bf83336 | ||
|
|
87b8a067c9 | ||
|
|
3fe765e072 | ||
|
|
a163f9cc0e | ||
|
|
2edb987c07 | ||
|
|
c0d7d0fe80 | ||
|
|
be5bd6a372 | ||
|
|
42df7aa42f | ||
|
|
9a9052198f | ||
|
|
2fb83c3642 | ||
|
|
d44674fe86 | ||
|
|
c57ed1efd3 | ||
|
|
c19cd00c77 | ||
|
|
39f8d40b76 | ||
|
|
bf731073c8 | ||
|
|
4bb68afaaf | ||
|
|
2126fc83a7 | ||
|
|
d0c1dbcd5e | ||
|
|
ad9dfbce40 | ||
|
|
139336d4ee | ||
|
|
f68fed0de8 | ||
|
|
1d7d242e6c | ||
|
|
aa904e23c7 | ||
|
|
baf0e65337 | ||
|
|
a33a3467fc | ||
|
|
a9b598bc41 | ||
|
|
0aee367ad5 | ||
|
|
8c7d9ea8fd | ||
|
|
fab0f713ed | ||
|
|
2563cc1922 | ||
|
|
26c9f42eba | ||
|
|
00dd3a93df | ||
|
|
d02293ab55 | ||
|
|
60cfa92efb | ||
|
|
01b187aaa3 | ||
|
|
38d121556c | ||
|
|
2d73b46b44 | ||
|
|
466b9099bd | ||
|
|
bbe3338c3c | ||
|
|
2780791068 | ||
|
|
e65656c1df | ||
|
|
df7d1ac10c | ||
|
|
c342885cae | ||
|
|
44adb397c1 | ||
|
|
657ea8570c | ||
|
|
686dd5fba1 | ||
|
|
90e6e99386 | ||
|
|
aa9109df12 | ||
|
|
9a37781355 | ||
|
|
5ce10b626f | ||
|
|
26d75da588 | ||
|
|
95edac9f8f | ||
|
|
f6c4d43eb1 | ||
|
|
47b9cd0c8d | ||
|
|
fb06545887 | ||
|
|
ea594ea70a | ||
|
|
3cc543827a | ||
|
|
18addbb980 | ||
|
|
d2b9bddf78 | ||
|
|
3ebf816a68 | ||
|
|
504ecc4f83 | ||
|
|
562dff0d6c | ||
|
|
02990912b7 | ||
|
|
0aedc023aa | ||
|
|
0801ea8c74 | ||
|
|
83be3558ed | ||
|
|
100b397cdf | ||
|
|
c2cad11e0a | ||
|
|
c42481deb8 | ||
|
|
39d1b77045 | ||
|
|
f19db77228 | ||
|
|
077fc6c126 | ||
|
|
aeeb9e6c9f | ||
|
|
384ed4e16b | ||
|
|
5dafc015bb | ||
|
|
d1b17d4534 | ||
|
|
b9333e4d67 | ||
|
|
c962864d0b | ||
|
|
07b080e97a | ||
|
|
e4684a10af | ||
|
|
8b5e55d53a | ||
|
|
ee968adec5 | ||
|
|
2e7fad77a9 | ||
|
|
7f6f710b3f | ||
|
|
261df8261f | ||
|
|
e14bc8064b | ||
|
|
6f3b2c6755 | ||
|
|
bdaa626f30 | ||
|
|
e1ada1768d | ||
|
|
b05dbed71e | ||
|
|
9b66600602 | ||
|
|
d826b95687 | ||
|
|
0dac046f57 | ||
|
|
abc506af1a | ||
|
|
08f42a7b96 | ||
|
|
b90edfe1fb | ||
|
|
c32a13094e | ||
|
|
3f01b2de60 | ||
|
|
bdf81380d8 | ||
|
|
889730e9e5 | ||
|
|
3b3927ce3a | ||
|
|
8e07226060 | ||
|
|
b7f0dd7f3c | ||
|
|
d9ec538aff | ||
|
|
362b17dec4 | ||
|
|
4216f07ec7 | ||
|
|
a4d35599df | ||
|
|
bcc1a36d71 | ||
|
|
b4a3a0451e | ||
|
|
1a2892d46e | ||
|
|
db8c9ec163 | ||
|
|
b11619fbfa | ||
|
|
49ce15f9da | ||
|
|
65c5df3211 | ||
|
|
de7f598001 | ||
|
|
28fa6e494f | ||
|
|
0c68c0f99f | ||
|
|
cedb7bc8bc | ||
|
|
ca35177b44 | ||
|
|
08b96c2663 | ||
|
|
606fddc776 | ||
|
|
9a95fa364c | ||
|
|
72a18871b1 | ||
|
|
0a00dfbaf5 | ||
|
|
b9a7cfb4fa | ||
|
|
f0b68cb44b | ||
|
|
dcd42798c7 | ||
|
|
384f62f94f | ||
|
|
7312addb65 | ||
|
|
41ba509428 | ||
|
|
85da7f71ac | ||
|
|
6ca727373c | ||
|
|
f4c6613e6e | ||
|
|
37f615c9bb | ||
|
|
ac9298d681 | ||
|
|
2a20dc173c | ||
|
|
a6dd98d241 | ||
|
|
5d15667a03 | ||
|
|
43e9e90f7d | ||
|
|
776f9ce9af | ||
|
|
e62ebea8d8 | ||
|
|
eee641082e | ||
|
|
a1096cf2b9 | ||
|
|
1c31f5df01 | ||
|
|
6e0f2aa10a | ||
|
|
22e8490b93 | ||
|
|
17e038e703 | ||
|
|
4cb4ba568b | ||
|
|
79cc2e70b6 | ||
|
|
fe2423e9d9 | ||
|
|
964d30bf80 | ||
|
|
4b2c678fa3 | ||
|
|
a9fdde4110 | ||
|
|
4eeab41e6d | ||
|
|
dce7de6cc9 | ||
|
|
c809117b2c | ||
|
|
2c6c71cf49 | ||
|
|
1533d1ec28 | ||
|
|
edf5c8cd6d | ||
|
|
6c69fb6bc4 | ||
|
|
4dd941f8d9 | ||
|
|
90294e32c1 | ||
|
|
5f4a856c5e | ||
|
|
4e3233ade8 | ||
|
|
846f253a03 | ||
|
|
f128ae3993 | ||
|
|
38da25ecc8 | ||
|
|
bf777f9fca | ||
|
|
3c4272c6d1 | ||
|
|
920535b643 | ||
|
|
b55dd5b072 | ||
|
|
f5cf15d657 | ||
|
|
fe623438c4 | ||
|
|
1c35a9f143 | ||
|
|
d6766a8ac5 | ||
|
|
eaa21f0789 | ||
|
|
ac6aee07f5 | ||
|
|
5b428c5636 | ||
|
|
894f97ca41 | ||
|
|
45f8c8a834 | ||
|
|
75ff80218b | ||
|
|
6255e1d4ef | ||
|
|
8ea606fb59 | ||
|
|
c9d41c53ca | ||
|
|
3995dae16d | ||
|
|
447fcf1b31 |
@@ -1,16 +0,0 @@
|
||||
# Files
|
||||
.dockerignore
|
||||
.editorconfig
|
||||
.gitignore
|
||||
Dockerfile
|
||||
Makefile
|
||||
LICENSE
|
||||
**/*.md
|
||||
**/*_test.go
|
||||
*.out
|
||||
|
||||
# Folders
|
||||
.git/
|
||||
.github/
|
||||
build/
|
||||
**/node_modules/
|
||||
3
.github/ISSUE_TEMPLATE/bug_report.md
vendored
3
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -10,6 +10,9 @@ assignees: ''
|
||||
**Describe the bug**
|
||||
A clear and concise description of what the bug is.
|
||||
|
||||
**Provide more information**
|
||||
Running on EKS, AKS, GKE, Minikube, Rancher, OpenShift? Number of Nodes? CNI?
|
||||
|
||||
**To Reproduce**
|
||||
Steps to reproduce the behavior:
|
||||
1. Run `kubeshark <command> ...`
|
||||
|
||||
46
.github/static/kubeshark.rb.tmpl
vendored
Normal file
46
.github/static/kubeshark.rb.tmpl
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
# typed: false
|
||||
# frozen_string_literal: true
|
||||
|
||||
class Kubeshark < Formula
|
||||
desc ""
|
||||
homepage "https://github.com/kubeshark/kubeshark"
|
||||
version "${CLEAN_VERSION}"
|
||||
|
||||
on_macos do
|
||||
if Hardware::CPU.arm?
|
||||
url "https://github.com/kubeshark/kubeshark/releases/download/${FULL_VERSION}/kubeshark_darwin_arm64"
|
||||
sha256 "${DARWIN_ARM64_SHA256}"
|
||||
|
||||
def install
|
||||
bin.install "kubeshark_darwin_arm64" => "kubeshark"
|
||||
end
|
||||
end
|
||||
if Hardware::CPU.intel?
|
||||
url "https://github.com/kubeshark/kubeshark/releases/download/${FULL_VERSION}/kubeshark_darwin_amd64"
|
||||
sha256 "${DARWIN_AMD64_SHA256}"
|
||||
|
||||
def install
|
||||
bin.install "kubeshark_darwin_amd64" => "kubeshark"
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
on_linux do
|
||||
if Hardware::CPU.intel?
|
||||
url "https://github.com/kubeshark/kubeshark/releases/download/${FULL_VERSION}/kubeshark_linux_amd64"
|
||||
sha256 "${LINUX_AMD64_SHA256}"
|
||||
|
||||
def install
|
||||
bin.install "kubeshark_linux_amd64" => "kubeshark"
|
||||
end
|
||||
end
|
||||
if Hardware::CPU.arm? && Hardware::CPU.is_64_bit?
|
||||
url "https://github.com/kubeshark/kubeshark/releases/download/${FULL_VERSION}/kubeshark_linux_arm64"
|
||||
sha256 "${LINUX_ARM64_SHA256}"
|
||||
|
||||
def install
|
||||
bin.install "kubeshark_linux_arm64" => "kubeshark"
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
36
.github/workflows/helm.yml
vendored
Normal file
36
.github/workflows/helm.yml
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
on:
|
||||
push:
|
||||
# Sequence of patterns matched against refs/tags
|
||||
tags:
|
||||
- 'v*' # Push events to matching v*, i.e. v1.0, v20.15.10
|
||||
|
||||
name: Release Helm Charts
|
||||
|
||||
jobs:
|
||||
release:
|
||||
# depending on default permission settings for your org (contents being read-only or read-write for workloads), you will have to add permissions
|
||||
# see: https://docs.github.com/en/actions/security-guides/automatic-token-authentication#modifying-the-permissions-for-the-github_token
|
||||
permissions:
|
||||
contents: write
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Configure Git
|
||||
run: |
|
||||
git config user.name "$GITHUB_ACTOR"
|
||||
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
|
||||
|
||||
- name: Install Helm
|
||||
uses: azure/setup-helm@v3
|
||||
|
||||
- name: Run chart-releaser
|
||||
uses: helm/chart-releaser-action@v1.5.0
|
||||
with:
|
||||
charts_dir: .
|
||||
charts_repo_url: https://kubeshark.github.io/kubeshark
|
||||
env:
|
||||
CR_TOKEN: "${{ secrets.HELM_TOKEN }}"
|
||||
10
.github/workflows/linter.yml
vendored
10
.github/workflows/linter.yml
vendored
@@ -16,16 +16,18 @@ jobs:
|
||||
name: Golint
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- uses: actions/setup-go@v2
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '^1.17'
|
||||
go-version-file: 'go.mod'
|
||||
|
||||
- name: Go lint
|
||||
uses: golangci/golangci-lint-action@v2
|
||||
uses: golangci/golangci-lint-action@v3
|
||||
with:
|
||||
version: latest
|
||||
args: --timeout=10m
|
||||
|
||||
58
.github/workflows/release.yml
vendored
58
.github/workflows/release.yml
vendored
@@ -1,7 +1,8 @@
|
||||
on:
|
||||
push:
|
||||
# Sequence of patterns matched against refs/tags
|
||||
tags:
|
||||
- '*'
|
||||
- 'v*' # Push events to matching v*, i.e. v1.0, v20.15.10
|
||||
|
||||
name: Release
|
||||
|
||||
@@ -13,22 +14,26 @@ jobs:
|
||||
release:
|
||||
name: Build and publish a new release
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
version: ${{ steps.version.outputs.tag }}
|
||||
steps:
|
||||
- name: Set up Go 1.17
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '1.17'
|
||||
|
||||
- name: Check out the repo
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
|
||||
- name: Version
|
||||
id: version
|
||||
shell: bash
|
||||
run: |
|
||||
echo "##[set-output name=tag;]$(echo ${GITHUB_REF#refs/*/})"
|
||||
echo "##[set-output name=build_timestamp;]$(echo $(date +%s))"
|
||||
echo "##[set-output name=branch;]$(echo ${GITHUB_REF#refs/heads/})"
|
||||
{
|
||||
echo "tag=${GITHUB_REF#refs/*/}"
|
||||
echo "build_timestamp=$(date +%s)"
|
||||
echo "branch=${GITHUB_REF#refs/heads/}"
|
||||
} >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Build
|
||||
run: make build-all VER='${{ steps.version.outputs.tag }}' BUILD_TIMESTAMP='${{ steps.version.outputs.build_timestamp }}'
|
||||
@@ -44,32 +49,19 @@ jobs:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
artifacts: "bin/*"
|
||||
tag: ${{ steps.version.outputs.tag }}
|
||||
prerelease: true
|
||||
prerelease: false
|
||||
bodyFile: 'bin/README.md'
|
||||
|
||||
brew-tap:
|
||||
name: Create Homebrew formulae
|
||||
runs-on: ubuntu-latest
|
||||
brew:
|
||||
name: Publish a new Homebrew formulae
|
||||
needs: [release]
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- name: Bump core homebrew formula
|
||||
uses: mislav/bump-homebrew-formula-action@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Fetch all tags
|
||||
run: git fetch --force --tags
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.17
|
||||
|
||||
- name: Run GoReleaser
|
||||
uses: goreleaser/goreleaser-action@v2
|
||||
with:
|
||||
distribution: goreleaser
|
||||
version: ${{ env.GITHUB_REF_NAME }}
|
||||
args: release --rm-dist
|
||||
# A PR will be sent to github.com/Homebrew/homebrew-core to update this formula:
|
||||
formula-name: kubeshark
|
||||
push-to: kubeshark/homebrew-core
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.HOMEBREW_TOKEN }}
|
||||
COMMITTER_TOKEN: ${{ secrets.COMMITTER_TOKEN }}
|
||||
|
||||
10
.github/workflows/test.yml
vendored
10
.github/workflows/test.yml
vendored
@@ -15,17 +15,17 @@ jobs:
|
||||
timeout-minutes: 20
|
||||
steps:
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Set up Go 1.17
|
||||
uses: actions/setup-go@v2
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '^1.17'
|
||||
go-version-file: 'go.mod'
|
||||
|
||||
- name: Test
|
||||
run: make test
|
||||
|
||||
- name: Upload coverage to Codecov
|
||||
uses: codecov/codecov-action@v2
|
||||
uses: codecov/codecov-action@v3
|
||||
|
||||
7
.gitignore
vendored
7
.gitignore
vendored
@@ -56,4 +56,11 @@ cypress.env.json
|
||||
# Object files
|
||||
*.o
|
||||
|
||||
# Binaries
|
||||
bin
|
||||
|
||||
# Scripts
|
||||
scripts/
|
||||
|
||||
# CWD config YAML
|
||||
kubeshark.yaml
|
||||
@@ -1,4 +1,4 @@
|
||||

|
||||

|
||||
|
||||
# Contributing to Kubeshark
|
||||
|
||||
|
||||
167
Makefile
167
Makefile
@@ -9,21 +9,29 @@ COMMIT_HASH=$(shell git rev-parse HEAD)
|
||||
GIT_BRANCH=$(shell git branch --show-current | tr '[:upper:]' '[:lower:]')
|
||||
GIT_VERSION=$(shell git branch --show-current | tr '[:upper:]' '[:lower:]')
|
||||
BUILD_TIMESTAMP=$(shell date +%s)
|
||||
export VER?=0.0
|
||||
export VER?=0.0.0
|
||||
|
||||
help: ## Print this help message.
|
||||
@awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST)
|
||||
|
||||
build-debug: ## Build for debuging.
|
||||
build-debug: ## Build for debugging.
|
||||
export CGO_ENABLED=1
|
||||
export GCLFAGS='-gcflags="all=-N -l"'
|
||||
${MAKE} build-base
|
||||
|
||||
build: ## Build.
|
||||
export CGO_ENABLED=0
|
||||
export LDFLAGS_EXT='-extldflags=-static -s -w'
|
||||
${MAKE} build-base
|
||||
|
||||
build-race: ## Build with -race flag.
|
||||
export CGO_ENABLED=1
|
||||
export GCLFAGS='-race'
|
||||
export LDFLAGS_EXT='-extldflags=-static -s -w'
|
||||
${MAKE} build-base
|
||||
|
||||
build-base: ## Build binary (select the platform via GOOS / GOARCH env variables).
|
||||
CGO_ENABLED=0 go build ${GCLFAGS} -ldflags="${LDFLAGS_EXT} \
|
||||
go build ${GCLFAGS} -ldflags="${LDFLAGS_EXT} \
|
||||
-X 'github.com/kubeshark/kubeshark/misc.GitCommitHash=$(COMMIT_HASH)' \
|
||||
-X 'github.com/kubeshark/kubeshark/misc.Branch=$(GIT_BRANCH)' \
|
||||
-X 'github.com/kubeshark/kubeshark/misc.BuildTimestamp=$(BUILD_TIMESTAMP)' \
|
||||
@@ -32,15 +40,30 @@ build-base: ## Build binary (select the platform via GOOS / GOARCH env variables
|
||||
-o bin/kubeshark_$(SUFFIX) kubeshark.go && \
|
||||
cd bin && shasum -a 256 kubeshark_${SUFFIX} > kubeshark_${SUFFIX}.sha256
|
||||
|
||||
build-brew: ## Build binary for brew/core CI
|
||||
go build ${GCLFAGS} -ldflags="${LDFLAGS_EXT} \
|
||||
-X 'github.com/kubeshark/kubeshark/misc.GitCommitHash=$(COMMIT_HASH)' \
|
||||
-X 'github.com/kubeshark/kubeshark/misc.Branch=$(GIT_BRANCH)' \
|
||||
-X 'github.com/kubeshark/kubeshark/misc.BuildTimestamp=$(BUILD_TIMESTAMP)' \
|
||||
-X 'github.com/kubeshark/kubeshark/misc.Platform=$(SUFFIX)' \
|
||||
-X 'github.com/kubeshark/kubeshark/misc.Ver=$(VER)'" \
|
||||
-o kubeshark kubeshark.go
|
||||
|
||||
build-windows-amd64:
|
||||
$(MAKE) build GOOS=windows GOARCH=amd64 && \
|
||||
mv ./bin/kubeshark_windows_amd64 ./bin/kubeshark.exe && \
|
||||
rm bin/kubeshark_windows_amd64.sha256 && \
|
||||
cd bin && shasum -a 256 kubeshark.exe > kubeshark.exe.sha256
|
||||
|
||||
build-all: ## Build for all supported platforms.
|
||||
export CGO_ENABLED=0
|
||||
echo "Compiling for every OS and Platform" && \
|
||||
mkdir -p bin && sed s/_VER_/$(VER)/g RELEASE.md.TEMPLATE > bin/README.md && \
|
||||
$(MAKE) build GOOS=linux GOARCH=amd64 && \
|
||||
$(MAKE) build GOOS=linux GOARCH=arm64 && \
|
||||
$(MAKE) build GOOS=darwin GOARCH=amd64 && \
|
||||
$(MAKE) build GOOS=darwin GOARCH=arm64 && \
|
||||
$(MAKE) build GOOS=windows GOARCH=amd64 && \
|
||||
mv ./bin/kubeshark_windows_amd64 ./bin/kubeshark.exe && \
|
||||
$(MAKE) build-windows-amd64 && \
|
||||
echo "---------" && \
|
||||
find ./bin -ls
|
||||
|
||||
@@ -53,3 +76,137 @@ test: ## Run cli tests.
|
||||
|
||||
lint: ## Lint the source code.
|
||||
golangci-lint run
|
||||
|
||||
kubectl-view-all-resources: ## This command outputs all Kubernetes resources using YAML format and pipes it to VS Code
|
||||
./kubectl.sh view-all-resources
|
||||
|
||||
kubectl-view-kubeshark-resources: ## This command outputs all Kubernetes resources in "kubeshark" namespace using YAML format and pipes it to VS Code
|
||||
./kubectl.sh view-kubeshark-resources
|
||||
|
||||
generate-helm-values: ## Generate the Helm values from config.yaml
|
||||
mv ~/.kubeshark/config.yaml ~/.kubeshark/config.yaml.old; bin/kubeshark__ config>helm-chart/values.yaml;mv ~/.kubeshark/config.yaml.old ~/.kubeshark/config.yaml
|
||||
sed -i 's/^license:.*/license: ""/' helm-chart/values.yaml && sed -i '1i # find a detailed description here: https://github.com/kubeshark/kubeshark/blob/master/helm-chart/README.md' helm-chart/values.yaml
|
||||
|
||||
generate-manifests: ## Generate the manifests from the Helm chart using default configuration
|
||||
helm template kubeshark -n default ./helm-chart > ./manifests/complete.yaml
|
||||
|
||||
logs-sniffer:
|
||||
export LOGS_POD_PREFIX=kubeshark-worker-
|
||||
export LOGS_CONTAINER='-c sniffer'
|
||||
export LOGS_FOLLOW=
|
||||
${MAKE} logs
|
||||
|
||||
logs-sniffer-follow:
|
||||
export LOGS_POD_PREFIX=kubeshark-worker-
|
||||
export LOGS_CONTAINER='-c sniffer'
|
||||
export LOGS_FOLLOW=--follow
|
||||
${MAKE} logs
|
||||
|
||||
logs-tracer:
|
||||
export LOGS_POD_PREFIX=kubeshark-worker-
|
||||
export LOGS_CONTAINER='-c tracer'
|
||||
export LOGS_FOLLOW=
|
||||
${MAKE} logs
|
||||
|
||||
logs-tracer-follow:
|
||||
export LOGS_POD_PREFIX=kubeshark-worker-
|
||||
export LOGS_CONTAINER='-c tracer'
|
||||
export LOGS_FOLLOW=--follow
|
||||
${MAKE} logs
|
||||
|
||||
logs-worker: logs-sniffer
|
||||
|
||||
logs-worker-follow: logs-sniffer-follow
|
||||
|
||||
logs-hub:
|
||||
export LOGS_POD_PREFIX=kubeshark-hub
|
||||
export LOGS_FOLLOW=
|
||||
${MAKE} logs
|
||||
|
||||
logs-hub-follow:
|
||||
export LOGS_POD_PREFIX=kubeshark-hub
|
||||
export LOGS_FOLLOW=--follow
|
||||
${MAKE} logs
|
||||
|
||||
logs-front:
|
||||
export LOGS_POD_PREFIX=kubeshark-front
|
||||
export LOGS_FOLLOW=
|
||||
${MAKE} logs
|
||||
|
||||
logs-front-follow:
|
||||
export LOGS_POD_PREFIX=kubeshark-front
|
||||
export LOGS_FOLLOW=--follow
|
||||
${MAKE} logs
|
||||
|
||||
logs:
|
||||
kubectl logs $$(kubectl get pods | awk '$$1 ~ /^$(LOGS_POD_PREFIX)/' | awk 'END {print $$1}') $(LOGS_CONTAINER) $(LOGS_FOLLOW)
|
||||
|
||||
ssh-node:
|
||||
kubectl ssh node $$(kubectl get nodes | awk 'END {print $$1}')
|
||||
|
||||
exec-worker:
|
||||
export EXEC_POD_PREFIX=kubeshark-worker-
|
||||
${MAKE} exec
|
||||
|
||||
exec-hub:
|
||||
export EXEC_POD_PREFIX=kubeshark-hub
|
||||
${MAKE} exec
|
||||
|
||||
exec-front:
|
||||
export EXEC_POD_PREFIX=kubeshark-front
|
||||
${MAKE} exec
|
||||
|
||||
exec:
|
||||
kubectl exec --stdin --tty $$(kubectl get pods | awk '$$1 ~ /^$(EXEC_POD_PREFIX)/' | awk 'END {print $$1}') -- /bin/sh
|
||||
|
||||
helm-install:
|
||||
cd helm-chart && helm install kubeshark . --set tap.docker.tag=$(TAG) && cd ..
|
||||
|
||||
helm-install-debug:
|
||||
cd helm-chart && helm install kubeshark . --set tap.docker.tag=$(TAG) --set tap.debug=true && cd ..
|
||||
|
||||
helm-install-profile:
|
||||
cd helm-chart && helm install kubeshark . --set tap.docker.tag=$(TAG) --set tap.pprof.enabled=true && cd ..
|
||||
|
||||
helm-uninstall:
|
||||
helm uninstall kubeshark
|
||||
|
||||
proxy:
|
||||
kubeshark proxy
|
||||
|
||||
port-forward:
|
||||
kubectl port-forward $$(kubectl get pods | awk '$$1 ~ /^$(POD_PREFIX)/' | awk 'END {print $$1}') $(SRC_PORT):$(DST_PORT)
|
||||
|
||||
release:
|
||||
@cd ../worker && git checkout master && git pull && git tag -d v$(VERSION); git tag v$(VERSION) && git push origin --tags
|
||||
@cd ../tracer && git checkout master && git pull && git tag -d v$(VERSION); git tag v$(VERSION) && git push origin --tags
|
||||
@cd ../hub && git checkout master && git pull && git tag -d v$(VERSION); git tag v$(VERSION) && git push origin --tags
|
||||
@cd ../front && git checkout master && git pull && git tag -d v$(VERSION); git tag v$(VERSION) && git push origin --tags
|
||||
@cd ../kubeshark && git checkout master && git pull && sed -i 's/^version:.*/version: "$(VERSION)"/' helm-chart/Chart.yaml && make && make generate-helm-values && make generate-manifests
|
||||
@git add -A . && git commit -m ":bookmark: Bump the Helm chart version to $(VERSION)" && git push
|
||||
@git tag -d v$(VERSION); git tag v$(VERSION) && git push origin --tags
|
||||
@cd helm-chart && cp -r . ../../kubeshark.github.io/charts/chart
|
||||
@cd ../kubeshark.github.io/ && git add -A . && git commit -m ":sparkles: Update the Helm chart" && git push
|
||||
@cd ../kubeshark
|
||||
|
||||
soft-release:
|
||||
@cd ../worker && git checkout master && git pull && git tag -d v$(VERSION); git tag v$(VERSION) && git push origin --tags
|
||||
@cd ../tracer && git checkout master && git pull && git tag -d v$(VERSION); git tag v$(VERSION) && git push origin --tags
|
||||
@cd ../hub && git checkout master && git pull && git tag -d v$(VERSION); git tag v$(VERSION) && git push origin --tags
|
||||
@cd ../front && git checkout master && git pull && git tag -d v$(VERSION); git tag v$(VERSION) && git push origin --tags
|
||||
@cd ../kubeshark && git checkout master && git pull && sed -i 's/^version:.*/version: "$(VERSION)"/' helm-chart/Chart.yaml && make && make generate-helm-values && make generate-manifests
|
||||
@git add -A . && git commit -m ":bookmark: Bump the Helm chart version to $(VERSION)" && git push
|
||||
# @git tag -d v$(VERSION); git tag v$(VERSION) && git push origin --tags
|
||||
# @cd helm-chart && cp -r . ../../kubeshark.github.io/charts/chart
|
||||
# @cd ../kubeshark.github.io/ && git add -A . && git commit -m ":sparkles: Update the Helm chart" && git push
|
||||
# @cd ../kubeshark
|
||||
|
||||
branch:
|
||||
@cd ../worker && git checkout master && git pull && git checkout -b $(name); git push --set-upstream origin $(name)
|
||||
@cd ../hub && git checkout master && git pull && git checkout -b $(name); git push --set-upstream origin $(name)
|
||||
@cd ../front && git checkout master && git pull && git checkout -b $(name); git push --set-upstream origin $(name)
|
||||
|
||||
switch-to-branch:
|
||||
@cd ../worker && git checkout $(name)
|
||||
@cd ../hub && git checkout $(name)
|
||||
@cd ../front && git checkout $(name)
|
||||
|
||||
85
README.md
85
README.md
@@ -1,18 +1,15 @@
|
||||
<p align="center">
|
||||
<img src="https://raw.githubusercontent.com/kubeshark/assets/master/svg/kubeshark-logo.svg" alt="Kubeshark: Traffic viewer for Kubernetes." height="128px"/>
|
||||
<img src="https://raw.githubusercontent.com/kubeshark/assets/master/svg/kubeshark-logo.svg" alt="Kubeshark: Traffic analyzer for Kubernetes." height="128px"/>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://github.com/kubeshark/kubeshark/blob/main/LICENSE">
|
||||
<img alt="GitHub License" src="https://img.shields.io/github/license/kubeshark/kubeshark?logo=GitHub&style=flat-square">
|
||||
</a>
|
||||
<a href="https://github.com/kubeshark/kubeshark/releases/latest">
|
||||
<img alt="GitHub Latest Release" src="https://img.shields.io/github/v/release/kubeshark/kubeshark?logo=GitHub&style=flat-square">
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/kubeshark/kubeshark">
|
||||
<img alt="Docker pulls" src="https://img.shields.io/docker/pulls/kubeshark/kubeshark?color=%23099cec&logo=Docker&style=flat-square">
|
||||
<a href="https://hub.docker.com/r/kubeshark/worker">
|
||||
<img alt="Docker pulls" src="https://img.shields.io/docker/pulls/kubeshark/worker?color=%23099cec&logo=Docker&style=flat-square">
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/kubeshark/kubeshark">
|
||||
<a href="https://hub.docker.com/r/kubeshark/worker">
|
||||
<img alt="Image size" src="https://img.shields.io/docker/image-size/kubeshark/kubeshark/latest?logo=Docker&style=flat-square">
|
||||
</a>
|
||||
<a href="https://discord.gg/WkvRGMUcx7">
|
||||
@@ -23,87 +20,61 @@
|
||||
</a>
|
||||
</p>
|
||||
|
||||
<p>
|
||||
<p align="center">
|
||||
Mizu (by UP9) is now Kubeshark, read more about it <a href="https://www.kubeshark.co/mizu-is-now-kubeshark">here</a>.
|
||||
<b>
|
||||
Want to see Kubeshark in action, right now? Visit this
|
||||
<a href="https://demo.kubeshark.co/">live demo deployment</a> of Kubeshark.
|
||||
</b>
|
||||
</p>
|
||||
|
||||
Kubeshark, the API Traffic Viewer for kubernetes, provides deep visibility and monitoring of all API traffic and payloads going in, out and across containers and pods inside a Kubernetes cluster.
|
||||
|
||||
Think of a combination of Chrome Dev Tools, TCPDump and Wireshark, re-invented for Kubernetes.
|
||||
**Kubeshark** is an API Traffic Analyzer for [**Kubernetes**](https://kubernetes.io/) providing real-time, protocol-level visibility into Kubernetes’ internal network, capturing and monitoring all traffic and payloads going in, out and across containers, pods, nodes and clusters.
|
||||
|
||||

|
||||
|
||||
## Download
|
||||
Think [TCPDump](https://en.wikipedia.org/wiki/Tcpdump) and [Wireshark](https://www.wireshark.org/) re-invented for Kubernetes
|
||||
|
||||
Kubeshark uses a ~45MB pre-compiled executable binary to communicate with the Kubernetes API. We recommend downloading the `kubeshark` CLI by using one of these options:
|
||||
## Getting Started
|
||||
|
||||
- Choose the right binary, download and use directly from [the latest stable release](https://github.com/kubeshark/kubeshark/releases/latest).
|
||||
|
||||
- Use the shell script below :point_down: to automatically download the right binary for your operating system and CPU architecture:
|
||||
|
||||
```shell
|
||||
sh <(curl -Ls https://kubeshark.co/install)
|
||||
```
|
||||
|
||||
- Compile it from source using `make` command then use `./bin/kubeshark__` executable.
|
||||
|
||||
## Run
|
||||
|
||||
Use the `kubeshark` CLI to capture and view streaming API traffic in real time.
|
||||
Download **Kubeshark**'s binary distribution [latest release](https://github.com/kubeshark/kubeshark/releases/latest) and run following one of these examples:
|
||||
|
||||
```shell
|
||||
kubeshark tap
|
||||
```
|
||||
|
||||
### Troubleshooting Installation
|
||||
If something doesn't work or simply to play it safe prior to installing;
|
||||
```shell
|
||||
kubeshark tap -n sock-shop "(catalo*|front-end*)"
|
||||
```
|
||||
|
||||
> Make sure you have access to https://hub.docker.com/
|
||||
Running any of the :point_up: above commands will open the [Web UI](https://docs.kubeshark.co/en/ui) in your browser which streams the traffic in your Kubernetes cluster in real-time.
|
||||
|
||||
> Make sure `kubeshark` executable in your `PATH`.
|
||||
### Homebrew
|
||||
|
||||
### Select Pods
|
||||
|
||||
#### Monitoring a Specific Pod:
|
||||
[Homebrew](https://brew.sh/) :beer: users install Kubeshark CLI with:
|
||||
|
||||
```shell
|
||||
kubeshark tap catalogue-b87b45784-sxc8q
|
||||
brew install kubeshark
|
||||
```
|
||||
|
||||
#### Monitoring a Set of Pods Using Regex:
|
||||
### Helm
|
||||
|
||||
Add the helm repository and install the chart:
|
||||
|
||||
```shell
|
||||
kubeshark tap "(catalo*|front-end*)"
|
||||
helm repo add kubeshark https://helm.kubeshark.co
|
||||
helm install kubeshark kubeshark/kubeshark
|
||||
```
|
||||
|
||||
### Specify the Namespace
|
||||
## Building From Source
|
||||
|
||||
By default, Kubeshark targets the `default` namespace.
|
||||
To specify a different namespace:
|
||||
|
||||
```
|
||||
kubeshark tap -n sock-shop
|
||||
```
|
||||
|
||||
### Specify All Namespaces
|
||||
|
||||
The default strategy of Kubeshark waits for the new pods
|
||||
to be created. To simply tap all existing namespaces run:
|
||||
|
||||
```
|
||||
kubeshark tap -A
|
||||
```
|
||||
Clone this repository and run `make` command to build it. After the build is complete, the executable can be found at `./bin/kubeshark__`.
|
||||
|
||||
## Documentation
|
||||
|
||||
Visit our documentation website: [docs.kubeshark.co](https://docs.kubeshark.co)
|
||||
|
||||
The documentation resources are open-source and can be found on GitHub: [kubeshark/docs](https://github.com/kubeshark/docs)
|
||||
To learn more, read the [documentation](https://docs.kubeshark.co).
|
||||
|
||||
## Contributing
|
||||
|
||||
We ❤️ pull requests! See [CONTRIBUTING.md](CONTRIBUTING.md) for the contribution guide.
|
||||
We :heart: pull requests! See [CONTRIBUTING.md](CONTRIBUTING.md) for the contribution guide.
|
||||
|
||||
## Code of Conduct
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Kubeshark release _VER_
|
||||
Kubeshark CHANGELOG is now part of [Kubeshark wiki](https://github.com/kubeshark/kubeshark/wiki/CHANGELOG)
|
||||
Release notes coming soon ..
|
||||
|
||||
## Download Kubeshark for your platform
|
||||
|
||||
|
||||
21
cmd/check.go
21
cmd/check.go
@@ -1,21 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/kubeshark/kubeshark/misc"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var checkCmd = &cobra.Command{
|
||||
Use: "check",
|
||||
Short: fmt.Sprintf("Check the %s resources for potential problems", misc.Software),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
runCheck()
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(checkCmd)
|
||||
}
|
||||
@@ -1,123 +0,0 @@
|
||||
package check
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
"github.com/kubeshark/kubeshark/docker"
|
||||
"github.com/kubeshark/kubeshark/kubernetes"
|
||||
"github.com/kubeshark/kubeshark/misc"
|
||||
"github.com/kubeshark/kubeshark/utils"
|
||||
"github.com/rs/zerolog/log"
|
||||
core "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func ImagePullInCluster(ctx context.Context, kubernetesProvider *kubernetes.Provider) bool {
|
||||
log.Info().Str("procedure", "image-pull-in-cluster").Msg("Checking:")
|
||||
|
||||
namespace := "default"
|
||||
podName := fmt.Sprintf("%s-test", misc.Program)
|
||||
|
||||
defer func() {
|
||||
if err := kubernetesProvider.RemovePod(ctx, namespace, podName); err != nil {
|
||||
log.Error().
|
||||
Str("namespace", namespace).
|
||||
Str("pod", podName).
|
||||
Err(err).
|
||||
Msg("While removing test pod!")
|
||||
}
|
||||
}()
|
||||
|
||||
if err := createImagePullInClusterPod(ctx, kubernetesProvider, namespace, podName); err != nil {
|
||||
log.Error().
|
||||
Str("namespace", namespace).
|
||||
Str("pod", podName).
|
||||
Err(err).
|
||||
Msg("While creating test pod!")
|
||||
return false
|
||||
}
|
||||
|
||||
if err := checkImagePulled(ctx, kubernetesProvider, namespace, podName); err != nil {
|
||||
log.Printf("%v cluster is not able to pull %s containers from docker hub, err: %v", misc.Program, fmt.Sprintf(utils.Red, "✗"), err)
|
||||
log.Error().
|
||||
Str("namespace", namespace).
|
||||
Str("pod", podName).
|
||||
Err(err).
|
||||
Msg("Unable to pull images from Docker Hub!")
|
||||
return false
|
||||
}
|
||||
|
||||
log.Info().
|
||||
Str("namespace", namespace).
|
||||
Str("pod", podName).
|
||||
Msg("Pulling images from Docker Hub is passed.")
|
||||
return true
|
||||
}
|
||||
|
||||
func checkImagePulled(ctx context.Context, kubernetesProvider *kubernetes.Provider, namespace string, podName string) error {
|
||||
podExactRegex := regexp.MustCompile(fmt.Sprintf("^%s$", podName))
|
||||
podWatchHelper := kubernetes.NewPodWatchHelper(kubernetesProvider, podExactRegex)
|
||||
eventChan, errorChan := kubernetes.FilteredWatch(ctx, podWatchHelper, []string{namespace}, podWatchHelper)
|
||||
|
||||
timeAfter := time.After(30 * time.Second)
|
||||
|
||||
for {
|
||||
select {
|
||||
case wEvent, ok := <-eventChan:
|
||||
if !ok {
|
||||
eventChan = nil
|
||||
continue
|
||||
}
|
||||
|
||||
pod, err := wEvent.ToPod()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if pod.Status.Phase == core.PodRunning {
|
||||
return nil
|
||||
}
|
||||
case err, ok := <-errorChan:
|
||||
if !ok {
|
||||
errorChan = nil
|
||||
continue
|
||||
}
|
||||
|
||||
return err
|
||||
case <-timeAfter:
|
||||
return fmt.Errorf("image not pulled in time")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func createImagePullInClusterPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, namespace string, podName string) error {
|
||||
image := docker.GetWorkerImage()
|
||||
log.Info().Str("image", image).Msg("Testing image pull:")
|
||||
var zero int64
|
||||
pod := &core.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: core.PodSpec{
|
||||
Containers: []core.Container{
|
||||
{
|
||||
Name: "probe",
|
||||
Image: image,
|
||||
ImagePullPolicy: "Always",
|
||||
Command: []string{"cat"},
|
||||
Stdin: true,
|
||||
},
|
||||
},
|
||||
TerminationGracePeriodSeconds: &zero,
|
||||
},
|
||||
}
|
||||
|
||||
if _, err := kubernetesProvider.CreatePod(ctx, namespace, pod); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
package check
|
||||
|
||||
import (
|
||||
"github.com/kubeshark/kubeshark/config"
|
||||
"github.com/kubeshark/kubeshark/kubernetes"
|
||||
"github.com/kubeshark/kubeshark/semver"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
func KubernetesApi() (*kubernetes.Provider, *semver.SemVersion, bool) {
|
||||
log.Info().Str("procedure", "kubernetes-api").Msg("Checking:")
|
||||
|
||||
kubernetesProvider, err := kubernetes.NewProvider(config.Config.KubeConfigPath(), config.Config.Kube.Context)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("Can't initialize the client!")
|
||||
return nil, nil, false
|
||||
}
|
||||
log.Info().Msg("Initialization of the client is passed.")
|
||||
|
||||
kubernetesVersion, err := kubernetesProvider.GetKubernetesVersion()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("Can't query the Kubernetes API!")
|
||||
return nil, nil, false
|
||||
}
|
||||
log.Info().Msg("Querying the Kubernetes API is passed.")
|
||||
|
||||
return kubernetesProvider, kubernetesVersion, true
|
||||
}
|
||||
@@ -1,91 +0,0 @@
|
||||
package check
|
||||
|
||||
import (
|
||||
"context"
|
||||
"embed"
|
||||
"fmt"
|
||||
|
||||
"github.com/kubeshark/kubeshark/config"
|
||||
"github.com/kubeshark/kubeshark/kubernetes"
|
||||
"github.com/rs/zerolog/log"
|
||||
rbac "k8s.io/api/rbac/v1"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
)
|
||||
|
||||
func KubernetesPermissions(ctx context.Context, embedFS embed.FS, kubernetesProvider *kubernetes.Provider) bool {
|
||||
log.Info().Str("procedure", "kubernetes-permissions").Msg("Checking:")
|
||||
|
||||
var filePath string
|
||||
if config.Config.IsNsRestrictedMode() {
|
||||
filePath = "permissionFiles/permissions-ns-tap.yaml"
|
||||
} else {
|
||||
filePath = "permissionFiles/permissions-all-namespaces-tap.yaml"
|
||||
}
|
||||
|
||||
data, err := embedFS.ReadFile(filePath)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("While checking Kubernetes permissions!")
|
||||
return false
|
||||
}
|
||||
|
||||
decode := scheme.Codecs.UniversalDeserializer().Decode
|
||||
obj, _, err := decode(data, nil, nil)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("While checking Kubernetes permissions!")
|
||||
return false
|
||||
}
|
||||
|
||||
switch resource := obj.(type) {
|
||||
case *rbac.Role:
|
||||
return checkRulesPermissions(ctx, kubernetesProvider, resource.Rules, config.Config.SelfNamespace)
|
||||
case *rbac.ClusterRole:
|
||||
return checkRulesPermissions(ctx, kubernetesProvider, resource.Rules, "")
|
||||
}
|
||||
|
||||
log.Error().Msg("While checking Kubernetes permissions! Resource of types 'Role' or 'ClusterRole' are not found in permission files.")
|
||||
return false
|
||||
}
|
||||
|
||||
func checkRulesPermissions(ctx context.Context, kubernetesProvider *kubernetes.Provider, rules []rbac.PolicyRule, namespace string) bool {
|
||||
permissionsExist := true
|
||||
|
||||
for _, rule := range rules {
|
||||
for _, group := range rule.APIGroups {
|
||||
for _, resource := range rule.Resources {
|
||||
for _, verb := range rule.Verbs {
|
||||
exist, err := kubernetesProvider.CanI(ctx, namespace, resource, verb, group)
|
||||
permissionsExist = checkPermissionExist(group, resource, verb, namespace, exist, err) && permissionsExist
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return permissionsExist
|
||||
}
|
||||
|
||||
func checkPermissionExist(group string, resource string, verb string, namespace string, exist bool, err error) bool {
|
||||
var groupAndNamespace string
|
||||
if group != "" && namespace != "" {
|
||||
groupAndNamespace = fmt.Sprintf("in api group '%v' and namespace '%v'", group, namespace)
|
||||
} else if group != "" {
|
||||
groupAndNamespace = fmt.Sprintf("in api group '%v'", group)
|
||||
} else if namespace != "" {
|
||||
groupAndNamespace = fmt.Sprintf("in namespace '%v'", namespace)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("verb", verb).
|
||||
Str("resource", resource).
|
||||
Str("group-and-namespace", groupAndNamespace).
|
||||
Err(err).
|
||||
Msg("While checking Kubernetes permissions!")
|
||||
return false
|
||||
} else if !exist {
|
||||
log.Error().Msg(fmt.Sprintf("Can't %v %v %v", verb, resource, groupAndNamespace))
|
||||
return false
|
||||
}
|
||||
|
||||
log.Info().Msg(fmt.Sprintf("Can %v %v %v", verb, resource, groupAndNamespace))
|
||||
return true
|
||||
}
|
||||
@@ -1,121 +0,0 @@
|
||||
package check
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/kubeshark/kubeshark/config"
|
||||
"github.com/kubeshark/kubeshark/kubernetes"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
func KubernetesResources(ctx context.Context, kubernetesProvider *kubernetes.Provider) bool {
|
||||
log.Info().Str("procedure", "k8s-components").Msg("Checking:")
|
||||
|
||||
exist, err := kubernetesProvider.DoesNamespaceExist(ctx, config.Config.SelfNamespace)
|
||||
allResourcesExist := checkResourceExist(config.Config.SelfNamespace, "namespace", exist, err)
|
||||
|
||||
exist, err = kubernetesProvider.DoesConfigMapExist(ctx, config.Config.SelfNamespace, kubernetes.ConfigMapName)
|
||||
allResourcesExist = checkResourceExist(kubernetes.ConfigMapName, "config map", exist, err) && allResourcesExist
|
||||
|
||||
exist, err = kubernetesProvider.DoesServiceAccountExist(ctx, config.Config.SelfNamespace, kubernetes.ServiceAccountName)
|
||||
allResourcesExist = checkResourceExist(kubernetes.ServiceAccountName, "service account", exist, err) && allResourcesExist
|
||||
|
||||
if config.Config.IsNsRestrictedMode() {
|
||||
exist, err = kubernetesProvider.DoesRoleExist(ctx, config.Config.SelfNamespace, kubernetes.RoleName)
|
||||
allResourcesExist = checkResourceExist(kubernetes.RoleName, "role", exist, err) && allResourcesExist
|
||||
|
||||
exist, err = kubernetesProvider.DoesRoleBindingExist(ctx, config.Config.SelfNamespace, kubernetes.RoleBindingName)
|
||||
allResourcesExist = checkResourceExist(kubernetes.RoleBindingName, "role binding", exist, err) && allResourcesExist
|
||||
} else {
|
||||
exist, err = kubernetesProvider.DoesClusterRoleExist(ctx, kubernetes.ClusterRoleName)
|
||||
allResourcesExist = checkResourceExist(kubernetes.ClusterRoleName, "cluster role", exist, err) && allResourcesExist
|
||||
|
||||
exist, err = kubernetesProvider.DoesClusterRoleBindingExist(ctx, kubernetes.ClusterRoleBindingName)
|
||||
allResourcesExist = checkResourceExist(kubernetes.ClusterRoleBindingName, "cluster role binding", exist, err) && allResourcesExist
|
||||
}
|
||||
|
||||
exist, err = kubernetesProvider.DoesServiceExist(ctx, config.Config.SelfNamespace, kubernetes.HubServiceName)
|
||||
allResourcesExist = checkResourceExist(kubernetes.HubServiceName, "service", exist, err) && allResourcesExist
|
||||
|
||||
allResourcesExist = checkPodResourcesExist(ctx, kubernetesProvider) && allResourcesExist
|
||||
|
||||
return allResourcesExist
|
||||
}
|
||||
|
||||
func checkPodResourcesExist(ctx context.Context, kubernetesProvider *kubernetes.Provider) bool {
|
||||
if pods, err := kubernetesProvider.ListPodsByAppLabel(ctx, config.Config.SelfNamespace, kubernetes.HubPodName); err != nil {
|
||||
log.Error().
|
||||
Str("name", kubernetes.HubPodName).
|
||||
Err(err).
|
||||
Msg("While checking if pod is running!")
|
||||
return false
|
||||
} else if len(pods) == 0 {
|
||||
log.Error().
|
||||
Str("name", kubernetes.HubPodName).
|
||||
Msg("Pod doesn't exist!")
|
||||
return false
|
||||
} else if !kubernetes.IsPodRunning(&pods[0]) {
|
||||
log.Error().
|
||||
Str("name", kubernetes.HubPodName).
|
||||
Msg("Pod is not running!")
|
||||
return false
|
||||
}
|
||||
|
||||
log.Info().
|
||||
Str("name", kubernetes.HubPodName).
|
||||
Msg("Pod is running.")
|
||||
|
||||
if pods, err := kubernetesProvider.ListPodsByAppLabel(ctx, config.Config.SelfNamespace, kubernetes.WorkerPodName); err != nil {
|
||||
log.Error().
|
||||
Str("name", kubernetes.WorkerPodName).
|
||||
Err(err).
|
||||
Msg("While checking if pods are running!")
|
||||
return false
|
||||
} else {
|
||||
workers := 0
|
||||
notRunningWorkers := 0
|
||||
|
||||
for _, pod := range pods {
|
||||
workers += 1
|
||||
if !kubernetes.IsPodRunning(&pod) {
|
||||
notRunningWorkers += 1
|
||||
}
|
||||
}
|
||||
|
||||
if notRunningWorkers > 0 {
|
||||
log.Error().
|
||||
Str("name", kubernetes.WorkerPodName).
|
||||
Msg(fmt.Sprintf("%d/%d pods are not running!", notRunningWorkers, workers))
|
||||
return false
|
||||
}
|
||||
|
||||
log.Info().
|
||||
Str("name", kubernetes.WorkerPodName).
|
||||
Msg(fmt.Sprintf("All %d pods are running.", workers))
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
func checkResourceExist(resourceName string, resourceType string, exist bool, err error) bool {
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("name", resourceName).
|
||||
Str("type", resourceType).
|
||||
Err(err).
|
||||
Msg("Checking if resource exists!")
|
||||
return false
|
||||
} else if !exist {
|
||||
log.Error().
|
||||
Str("name", resourceName).
|
||||
Str("type", resourceType).
|
||||
Msg("Resource doesn't exist!")
|
||||
return false
|
||||
}
|
||||
|
||||
log.Info().
|
||||
Str("name", resourceName).
|
||||
Str("type", resourceType).
|
||||
Msg("Resource exist.")
|
||||
return true
|
||||
}
|
||||
@@ -1,22 +0,0 @@
|
||||
package check
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/kubeshark/kubeshark/kubernetes"
|
||||
"github.com/kubeshark/kubeshark/semver"
|
||||
"github.com/kubeshark/kubeshark/utils"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
func KubernetesVersion(kubernetesVersion *semver.SemVersion) bool {
|
||||
log.Info().Str("procedure", "kubernetes-version").Msg("Checking:")
|
||||
|
||||
if err := kubernetes.ValidateKubernetesVersion(kubernetesVersion); err != nil {
|
||||
log.Error().Str("k8s-version", string(*kubernetesVersion)).Err(err).Msg(fmt.Sprintf(utils.Red, "The cluster does not have the minimum required Kubernetes API version!"))
|
||||
return false
|
||||
}
|
||||
|
||||
log.Info().Str("k8s-version", string(*kubernetesVersion)).Msg("Minimum required Kubernetes API version is passed.")
|
||||
return true
|
||||
}
|
||||
@@ -1,40 +0,0 @@
|
||||
package check
|
||||
|
||||
import (
|
||||
"github.com/kubeshark/kubeshark/config"
|
||||
"github.com/kubeshark/kubeshark/internal/connect"
|
||||
"github.com/kubeshark/kubeshark/kubernetes"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
func ServerConnection(kubernetesProvider *kubernetes.Provider) bool {
|
||||
log.Info().Str("procedure", "server-connectivity").Msg("Checking:")
|
||||
|
||||
var connectedToHub, connectedToFront bool
|
||||
|
||||
if err := checkProxy(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort), "/echo", kubernetesProvider); err != nil {
|
||||
log.Error().Err(err).Msg("Couldn't connect to Hub using proxy!")
|
||||
} else {
|
||||
connectedToHub = true
|
||||
log.Info().Msg("Connected successfully to Hub using proxy.")
|
||||
}
|
||||
|
||||
if err := checkProxy(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Front.SrcPort), "", kubernetesProvider); err != nil {
|
||||
log.Error().Err(err).Msg("Couldn't connect to Front using proxy!")
|
||||
} else {
|
||||
connectedToFront = true
|
||||
log.Info().Msg("Connected successfully to Front using proxy.")
|
||||
}
|
||||
|
||||
return connectedToHub && connectedToFront
|
||||
}
|
||||
|
||||
func checkProxy(serverUrl string, path string, kubernetesProvider *kubernetes.Provider) error {
|
||||
log.Info().Str("url", serverUrl).Msg("Connecting:")
|
||||
connector := connect.NewConnector(serverUrl, connect.DefaultRetries, connect.DefaultTimeout)
|
||||
if err := connector.TestConnection(path); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,56 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"embed"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/kubeshark/kubeshark/cmd/check"
|
||||
"github.com/kubeshark/kubeshark/misc"
|
||||
"github.com/kubeshark/kubeshark/utils"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
var (
|
||||
//go:embed permissionFiles
|
||||
embedFS embed.FS
|
||||
)
|
||||
|
||||
func runCheck() {
|
||||
log.Info().Msg(fmt.Sprintf("Checking the %s resources...", misc.Software))
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel() // cancel will be called when this function exits
|
||||
|
||||
kubernetesProvider, kubernetesVersion, checkPassed := check.KubernetesApi()
|
||||
|
||||
if checkPassed {
|
||||
checkPassed = check.KubernetesVersion(kubernetesVersion)
|
||||
}
|
||||
|
||||
if checkPassed {
|
||||
checkPassed = check.KubernetesPermissions(ctx, embedFS, kubernetesProvider)
|
||||
}
|
||||
|
||||
if checkPassed {
|
||||
checkPassed = check.ImagePullInCluster(ctx, kubernetesProvider)
|
||||
}
|
||||
if checkPassed {
|
||||
checkPassed = check.KubernetesResources(ctx, kubernetesProvider)
|
||||
}
|
||||
|
||||
if checkPassed {
|
||||
checkPassed = check.ServerConnection(kubernetesProvider)
|
||||
}
|
||||
|
||||
if checkPassed {
|
||||
log.Info().Msg(fmt.Sprintf(utils.Green, "All checks are passed."))
|
||||
} else {
|
||||
log.Error().
|
||||
Str("command1", fmt.Sprintf("%s %s", misc.Program, cleanCmd.Use)).
|
||||
Str("command2", fmt.Sprintf("%s %s", misc.Program, tapCmd.Use)).
|
||||
Msg(fmt.Sprintf(utils.Red, fmt.Sprintf("There are issues in your %s resources! Run these commands:", misc.Software)))
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
23
cmd/clean.go
23
cmd/clean.go
@@ -3,7 +3,12 @@ package cmd
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/creasty/defaults"
|
||||
"github.com/kubeshark/kubeshark/config"
|
||||
"github.com/kubeshark/kubeshark/config/configStructs"
|
||||
"github.com/kubeshark/kubeshark/kubernetes/helm"
|
||||
"github.com/kubeshark/kubeshark/misc"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@@ -11,11 +16,27 @@ var cleanCmd = &cobra.Command{
|
||||
Use: "clean",
|
||||
Short: fmt.Sprintf("Removes all %s resources", misc.Software),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
performCleanCommand()
|
||||
resp, err := helm.NewHelm(
|
||||
config.Config.Tap.Release.Repo,
|
||||
config.Config.Tap.Release.Name,
|
||||
config.Config.Tap.Release.Namespace,
|
||||
).Uninstall()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
} else {
|
||||
log.Info().Msgf("Uninstalled the Helm release: %s", resp.Release.Name)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(cleanCmd)
|
||||
|
||||
defaultTapConfig := configStructs.TapConfig{}
|
||||
if err := defaults.Set(&defaultTapConfig); err != nil {
|
||||
log.Debug().Err(err).Send()
|
||||
}
|
||||
|
||||
cleanCmd.Flags().StringP(configStructs.ReleaseNamespaceLabel, "s", defaultTapConfig.Release.Namespace, "Release namespace of Kubeshark")
|
||||
}
|
||||
|
||||
@@ -1,14 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"github.com/kubeshark/kubeshark/config"
|
||||
)
|
||||
|
||||
func performCleanCommand() {
|
||||
kubernetesProvider, err := getKubernetesProviderForCli()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
finishSelfExecution(kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.SelfNamespace)
|
||||
}
|
||||
@@ -14,52 +14,50 @@ import (
|
||||
"github.com/kubeshark/kubeshark/kubernetes"
|
||||
"github.com/kubeshark/kubeshark/misc"
|
||||
"github.com/kubeshark/kubeshark/misc/fsUtils"
|
||||
"github.com/kubeshark/kubeshark/resources"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
func startProxyReportErrorIfAny(kubernetesProvider *kubernetes.Provider, ctx context.Context, cancel context.CancelFunc, serviceName string, proxyPortLabel string, srcPort uint16, dstPort uint16, healthCheck string) {
|
||||
httpServer, err := kubernetes.StartProxy(kubernetesProvider, config.Config.Tap.Proxy.Host, srcPort, config.Config.SelfNamespace, serviceName, cancel)
|
||||
func startProxyReportErrorIfAny(kubernetesProvider *kubernetes.Provider, ctx context.Context, serviceName string, podName string, proxyPortLabel string, srcPort uint16, dstPort uint16, healthCheck string) {
|
||||
httpServer, err := kubernetes.StartProxy(kubernetesProvider, config.Config.Tap.Proxy.Host, srcPort, config.Config.Tap.Release.Namespace, serviceName)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Err(errormessage.FormatError(err)).
|
||||
Msg(fmt.Sprintf("Error occured while running k8s proxy. Try setting different port by using --%s", proxyPortLabel))
|
||||
cancel()
|
||||
Msg(fmt.Sprintf("Error occurred while running K8s proxy. Try setting different port using --%s", proxyPortLabel))
|
||||
return
|
||||
}
|
||||
|
||||
connector := connect.NewConnector(kubernetes.GetLocalhostOnPort(srcPort), connect.DefaultRetries, connect.DefaultTimeout)
|
||||
connector := connect.NewConnector(kubernetes.GetProxyOnPort(srcPort), connect.DefaultRetries, connect.DefaultTimeout)
|
||||
if err := connector.TestConnection(healthCheck); err != nil {
|
||||
log.Error().Msg("Couldn't connect using proxy, stopping proxy and trying to create port-forward..")
|
||||
log.Warn().
|
||||
Str("service", serviceName).
|
||||
Msg("Couldn't connect using proxy, stopping proxy and trying to create port-forward...")
|
||||
if err := httpServer.Shutdown(ctx); err != nil {
|
||||
log.Error().
|
||||
Err(errormessage.FormatError(err)).
|
||||
Msg("Error occurred while stopping proxy.")
|
||||
}
|
||||
|
||||
podRegex, _ := regexp.Compile(kubernetes.HubPodName)
|
||||
if _, err := kubernetes.NewPortForward(kubernetesProvider, config.Config.SelfNamespace, podRegex, srcPort, dstPort, ctx, cancel); err != nil {
|
||||
podRegex, _ := regexp.Compile(podName)
|
||||
if _, err := kubernetes.NewPortForward(kubernetesProvider, config.Config.Tap.Release.Namespace, podRegex, srcPort, dstPort, ctx); err != nil {
|
||||
log.Error().
|
||||
Str("pod-regex", podRegex.String()).
|
||||
Err(errormessage.FormatError(err)).
|
||||
Msg(fmt.Sprintf("Error occured while running port forward. Try setting different port by using --%s", proxyPortLabel))
|
||||
cancel()
|
||||
Msg(fmt.Sprintf("Error occurred while running port forward. Try setting different port using --%s", proxyPortLabel))
|
||||
return
|
||||
}
|
||||
|
||||
connector = connect.NewConnector(kubernetes.GetLocalhostOnPort(srcPort), connect.DefaultRetries, connect.DefaultTimeout)
|
||||
connector = connect.NewConnector(kubernetes.GetProxyOnPort(srcPort), connect.DefaultRetries, connect.DefaultTimeout)
|
||||
if err := connector.TestConnection(healthCheck); err != nil {
|
||||
log.Error().
|
||||
Str("service", serviceName).
|
||||
Err(errormessage.FormatError(err)).
|
||||
Msg("Couldn't connect to service.")
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getKubernetesProviderForCli() (*kubernetes.Provider, error) {
|
||||
func getKubernetesProviderForCli(silent bool, dontCheckVersion bool) (*kubernetes.Provider, error) {
|
||||
kubeConfigPath := config.Config.KubeConfigPath()
|
||||
kubernetesProvider, err := kubernetes.NewProvider(kubeConfigPath, config.Config.Kube.Context)
|
||||
if err != nil {
|
||||
@@ -67,22 +65,26 @@ func getKubernetesProviderForCli() (*kubernetes.Provider, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Info().Str("path", kubeConfigPath).Msg("Using kubeconfig:")
|
||||
if !silent {
|
||||
log.Info().Str("path", kubeConfigPath).Msg("Using kubeconfig:")
|
||||
}
|
||||
|
||||
if err := kubernetesProvider.ValidateNotProxy(); err != nil {
|
||||
handleKubernetesProviderError(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
kubernetesVersion, err := kubernetesProvider.GetKubernetesVersion()
|
||||
if err != nil {
|
||||
handleKubernetesProviderError(err)
|
||||
return nil, err
|
||||
}
|
||||
if !dontCheckVersion {
|
||||
kubernetesVersion, err := kubernetesProvider.GetKubernetesVersion()
|
||||
if err != nil {
|
||||
handleKubernetesProviderError(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := kubernetes.ValidateKubernetesVersion(kubernetesVersion); err != nil {
|
||||
handleKubernetesProviderError(err)
|
||||
return nil, err
|
||||
if err := kubernetes.ValidateKubernetesVersion(kubernetesVersion); err != nil {
|
||||
handleKubernetesProviderError(err)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return kubernetesProvider, nil
|
||||
@@ -97,11 +99,10 @@ func handleKubernetesProviderError(err error) {
|
||||
}
|
||||
}
|
||||
|
||||
func finishSelfExecution(kubernetesProvider *kubernetes.Provider, isNsRestrictedMode bool, selfNamespace string) {
|
||||
func finishSelfExecution(kubernetesProvider *kubernetes.Provider) {
|
||||
removalCtx, cancel := context.WithTimeout(context.Background(), cleanupTimeout)
|
||||
defer cancel()
|
||||
dumpLogsIfNeeded(removalCtx, kubernetesProvider)
|
||||
resources.CleanUpSelfResources(removalCtx, cancel, kubernetesProvider, isNsRestrictedMode, selfNamespace)
|
||||
}
|
||||
|
||||
func dumpLogsIfNeeded(ctx context.Context, kubernetesProvider *kubernetes.Provider) {
|
||||
@@ -110,7 +111,7 @@ func dumpLogsIfNeeded(ctx context.Context, kubernetesProvider *kubernetes.Provid
|
||||
}
|
||||
dotDir := misc.GetDotFolderPath()
|
||||
filePath := path.Join(dotDir, fmt.Sprintf("%s_logs_%s.zip", misc.Program, time.Now().Format("2006_01_02__15_04_05")))
|
||||
if err := fsUtils.DumpLogs(ctx, kubernetesProvider, filePath); err != nil {
|
||||
if err := fsUtils.DumpLogs(ctx, kubernetesProvider, filePath, config.Config.Logs.Grep); err != nil {
|
||||
log.Error().Err(err).Msg("Failed to dump logs.")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
|
||||
"github.com/creasty/defaults"
|
||||
"github.com/kubeshark/kubeshark/config"
|
||||
@@ -16,27 +17,26 @@ var configCmd = &cobra.Command{
|
||||
Use: "config",
|
||||
Short: fmt.Sprintf("Generate %s config with default values", misc.Software),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
configWithDefaults, err := config.GetConfigWithDefaults()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("Failed generating config with defaults.")
|
||||
return nil
|
||||
}
|
||||
|
||||
if config.Config.Config.Regenerate {
|
||||
if err := config.WriteConfig(configWithDefaults); err != nil {
|
||||
defaultConfig := config.CreateDefaultConfig()
|
||||
if err := defaults.Set(&defaultConfig); err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
return nil
|
||||
}
|
||||
if err := config.WriteConfig(&defaultConfig); err != nil {
|
||||
log.Error().Err(err).Msg("Failed generating config with defaults.")
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Info().Str("config-path", config.Config.ConfigFilePath).Msg("Template file written to config path.")
|
||||
log.Info().Str("config-path", config.ConfigFilePath).Msg("Template file written to config path.")
|
||||
} else {
|
||||
template, err := utils.PrettyYaml(configWithDefaults)
|
||||
template, err := utils.PrettyYaml(config.Config)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("Failed converting config with defaults to YAML.")
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Debug().Str("template", template).Msg("Writing template config...")
|
||||
log.Debug().Str("template", template).Msg("Printing template config...")
|
||||
fmt.Printf("%v", template)
|
||||
}
|
||||
|
||||
@@ -52,5 +52,5 @@ func init() {
|
||||
log.Debug().Err(err).Send()
|
||||
}
|
||||
|
||||
configCmd.Flags().BoolP(configStructs.RegenerateConfigName, "r", defaultConfig.Config.Regenerate, fmt.Sprintf("Regenerate the config file with default values to path %s or to chosen path using --%s", defaultConfig.ConfigFilePath, config.ConfigFilePathCommandName))
|
||||
configCmd.Flags().BoolP(configStructs.RegenerateConfigName, "r", defaultConfig.Config.Regenerate, fmt.Sprintf("Regenerate the config file with default values to path %s", path.Join(misc.GetDotFolderPath(), "config.yaml")))
|
||||
}
|
||||
|
||||
156
cmd/console.go
Normal file
156
cmd/console.go
Normal file
@@ -0,0 +1,156 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/creasty/defaults"
|
||||
"github.com/gorilla/websocket"
|
||||
"github.com/kubeshark/kubeshark/config"
|
||||
"github.com/kubeshark/kubeshark/config/configStructs"
|
||||
"github.com/kubeshark/kubeshark/kubernetes"
|
||||
"github.com/kubeshark/kubeshark/utils"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var consoleCmd = &cobra.Command{
|
||||
Use: "console",
|
||||
Short: "Stream the scripting console logs into shell",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
runConsole()
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(consoleCmd)
|
||||
|
||||
defaultTapConfig := configStructs.TapConfig{}
|
||||
if err := defaults.Set(&defaultTapConfig); err != nil {
|
||||
log.Debug().Err(err).Send()
|
||||
}
|
||||
|
||||
consoleCmd.Flags().Uint16(configStructs.ProxyFrontPortLabel, defaultTapConfig.Proxy.Front.Port, "Provide a custom port for the Kubeshark")
|
||||
consoleCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the Kubeshark")
|
||||
consoleCmd.Flags().StringP(configStructs.ReleaseNamespaceLabel, "s", defaultTapConfig.Release.Namespace, "Release namespace of Kubeshark")
|
||||
}
|
||||
|
||||
func runConsoleWithoutProxy() {
|
||||
log.Info().Msg("Starting scripting console ...")
|
||||
time.Sleep(5 * time.Second)
|
||||
hubUrl := kubernetes.GetHubUrl()
|
||||
for {
|
||||
|
||||
// Attempt to connect to the Hub every second
|
||||
response, err := http.Get(fmt.Sprintf("%s/echo", hubUrl))
|
||||
if err != nil || response.StatusCode != 200 {
|
||||
log.Info().Msg(fmt.Sprintf(utils.Yellow, "Couldn't connect to Hub."))
|
||||
time.Sleep(5 * time.Second)
|
||||
continue
|
||||
}
|
||||
|
||||
interrupt := make(chan os.Signal, 1)
|
||||
signal.Notify(interrupt, os.Interrupt)
|
||||
|
||||
log.Info().Str("host", config.Config.Tap.Proxy.Host).Str("url", hubUrl).Msg("Connecting to:")
|
||||
u := url.URL{
|
||||
Scheme: "ws",
|
||||
Host: fmt.Sprintf("%s:%d", config.Config.Tap.Proxy.Host, config.Config.Tap.Proxy.Front.Port),
|
||||
Path: "/api/scripts/logs",
|
||||
}
|
||||
headers := http.Header{}
|
||||
headers.Set(utils.X_KUBESHARK_CAPTURE_HEADER_KEY, utils.X_KUBESHARK_CAPTURE_HEADER_IGNORE_VALUE)
|
||||
headers.Set("License-Key", config.Config.License)
|
||||
|
||||
c, _, err := websocket.DefaultDialer.Dial(u.String(), headers)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("Websocket dial error, retrying in 5 seconds...")
|
||||
time.Sleep(5 * time.Second) // Delay before retrying
|
||||
continue
|
||||
}
|
||||
defer c.Close()
|
||||
|
||||
done := make(chan struct{})
|
||||
|
||||
go func() {
|
||||
defer close(done)
|
||||
for {
|
||||
_, message, err := c.ReadMessage()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("Error reading websocket message, reconnecting...")
|
||||
break // Break to reconnect
|
||||
}
|
||||
|
||||
msg := string(message)
|
||||
if strings.Contains(msg, ":ERROR]") {
|
||||
msg = fmt.Sprintf(utils.Red, msg)
|
||||
fmt.Fprintln(os.Stderr, msg)
|
||||
} else {
|
||||
fmt.Fprintln(os.Stdout, msg)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
ticker := time.NewTicker(time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
select {
|
||||
case <-done:
|
||||
log.Warn().Msg(fmt.Sprintf(utils.Yellow, "Connection closed, reconnecting..."))
|
||||
time.Sleep(5 * time.Second) // Delay before reconnecting
|
||||
continue // Reconnect after error
|
||||
case <-interrupt:
|
||||
err := c.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, ""))
|
||||
if err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
continue
|
||||
}
|
||||
|
||||
select {
|
||||
case <-done:
|
||||
case <-time.After(time.Second):
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func runConsole() {
|
||||
go runConsoleWithoutProxy()
|
||||
|
||||
// Create interrupt channel and setup signal handling once
|
||||
interrupt := make(chan os.Signal, 1)
|
||||
signal.Notify(interrupt, os.Interrupt)
|
||||
done := make(chan struct{})
|
||||
|
||||
ticker := time.NewTicker(5 * time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-interrupt:
|
||||
// Handle interrupt and exit gracefully
|
||||
log.Warn().Msg(fmt.Sprintf(utils.Yellow, "Received interrupt, exiting..."))
|
||||
select {
|
||||
case <-done:
|
||||
case <-time.After(time.Second):
|
||||
}
|
||||
return
|
||||
|
||||
case <-ticker.C:
|
||||
// Attempt to connect to the Hub every second
|
||||
hubUrl := kubernetes.GetHubUrl()
|
||||
response, err := http.Get(fmt.Sprintf("%s/echo", hubUrl))
|
||||
if err != nil || response.StatusCode != 200 {
|
||||
log.Info().Msg(fmt.Sprintf(utils.Yellow, "Couldn't connect to Hub. Establishing proxy..."))
|
||||
runProxy(false, true)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
21
cmd/license.go
Normal file
21
cmd/license.go
Normal file
@@ -0,0 +1,21 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/kubeshark/kubeshark/config"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var licenseCmd = &cobra.Command{
|
||||
Use: "license",
|
||||
Short: "Print the license loaded string",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
fmt.Println(config.Config.License)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(licenseCmd)
|
||||
}
|
||||
@@ -18,7 +18,7 @@ var logsCmd = &cobra.Command{
|
||||
Use: "logs",
|
||||
Short: "Create a ZIP file with logs for GitHub issues or troubleshooting",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
kubernetesProvider, err := getKubernetesProviderForCli()
|
||||
kubernetesProvider, err := getKubernetesProviderForCli(false, false)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
@@ -30,7 +30,7 @@ var logsCmd = &cobra.Command{
|
||||
|
||||
log.Debug().Str("logs-path", config.Config.Logs.FilePath()).Msg("Using this logs path...")
|
||||
|
||||
if dumpLogsErr := fsUtils.DumpLogs(ctx, kubernetesProvider, config.Config.Logs.FilePath()); dumpLogsErr != nil {
|
||||
if dumpLogsErr := fsUtils.DumpLogs(ctx, kubernetesProvider, config.Config.Logs.FilePath(), config.Config.Logs.Grep); dumpLogsErr != nil {
|
||||
log.Error().Err(dumpLogsErr).Msg("Failed to dump logs.")
|
||||
}
|
||||
|
||||
@@ -47,4 +47,5 @@ func init() {
|
||||
}
|
||||
|
||||
logsCmd.Flags().StringP(configStructs.FileLogsName, "f", defaultLogsConfig.FileStr, fmt.Sprintf("Path for zip file (default current <pwd>\\%s_logs.zip)", misc.Program))
|
||||
logsCmd.Flags().StringP(configStructs.GrepLogsName, "g", defaultLogsConfig.Grep, "Regexp to do grepping on the logs")
|
||||
}
|
||||
|
||||
101
cmd/pcapDump.go
Normal file
101
cmd/pcapDump.go
Normal file
@@ -0,0 +1,101 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/creasty/defaults"
|
||||
"github.com/kubeshark/kubeshark/config/configStructs"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/client-go/util/homedir"
|
||||
)
|
||||
|
||||
// pcapDumpCmd represents the consolidated pcapdump command
|
||||
var pcapDumpCmd = &cobra.Command{
|
||||
Use: "pcapdump",
|
||||
Short: "Store all captured traffic (including decrypted TLS) in a PCAP file.",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
// Retrieve the kubeconfig path from the flag
|
||||
kubeconfig, _ := cmd.Flags().GetString(configStructs.PcapKubeconfig)
|
||||
|
||||
// If kubeconfig is not provided, use the default location
|
||||
if kubeconfig == "" {
|
||||
if home := homedir.HomeDir(); home != "" {
|
||||
kubeconfig = filepath.Join(home, ".kube", "config")
|
||||
} else {
|
||||
return errors.New("kubeconfig flag not provided and no home directory available for default config location")
|
||||
}
|
||||
}
|
||||
|
||||
// Use the current context in kubeconfig
|
||||
config, err := clientcmd.BuildConfigFromFlags("", kubeconfig)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("Error building kubeconfig")
|
||||
return err
|
||||
}
|
||||
|
||||
clientset, err := kubernetes.NewForConfig(config)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("Error creating Kubernetes client")
|
||||
return err
|
||||
}
|
||||
|
||||
// Handle copy operation if the copy string is provided
|
||||
|
||||
if !cmd.Flags().Changed(configStructs.PcapDumpEnabled) {
|
||||
destDir, _ := cmd.Flags().GetString(configStructs.PcapDest)
|
||||
log.Info().Msg("Copying PCAP files")
|
||||
err = copyPcapFiles(clientset, config, destDir)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("Error copying PCAP files")
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// Handle start operation if the start string is provided
|
||||
|
||||
enabled, err := cmd.Flags().GetBool(configStructs.PcapDumpEnabled)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("Error getting pcapdump enable flag")
|
||||
return err
|
||||
}
|
||||
timeInterval, _ := cmd.Flags().GetString(configStructs.PcapTimeInterval)
|
||||
maxTime, _ := cmd.Flags().GetString(configStructs.PcapMaxTime)
|
||||
maxSize, _ := cmd.Flags().GetString(configStructs.PcapMaxSize)
|
||||
err = startStopPcap(clientset, enabled, timeInterval, maxTime, maxSize)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("Error starting/stopping PCAP dump")
|
||||
return err
|
||||
}
|
||||
|
||||
if enabled {
|
||||
log.Info().Msg("Pcapdump started successfully")
|
||||
return nil
|
||||
} else {
|
||||
log.Info().Msg("Pcapdump stopped successfully")
|
||||
return nil
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(pcapDumpCmd)
|
||||
|
||||
defaultPcapDumpConfig := configStructs.PcapDumpConfig{}
|
||||
if err := defaults.Set(&defaultPcapDumpConfig); err != nil {
|
||||
log.Debug().Err(err).Send()
|
||||
}
|
||||
|
||||
pcapDumpCmd.Flags().String(configStructs.PcapTimeInterval, defaultPcapDumpConfig.PcapTimeInterval, "Time interval for PCAP file rotation (used with --start)")
|
||||
pcapDumpCmd.Flags().String(configStructs.PcapMaxTime, defaultPcapDumpConfig.PcapMaxTime, "Maximum time for retaining old PCAP files (used with --start)")
|
||||
pcapDumpCmd.Flags().String(configStructs.PcapMaxSize, defaultPcapDumpConfig.PcapMaxSize, "Maximum size of PCAP files before deletion (used with --start)")
|
||||
pcapDumpCmd.Flags().String(configStructs.PcapDest, "", "Local destination path for copied PCAP files (can not be used together with --enabled)")
|
||||
pcapDumpCmd.Flags().String(configStructs.PcapKubeconfig, "", "Enabled/Disable to pcap dumps (can not be used together with --dest)")
|
||||
|
||||
}
|
||||
370
cmd/pcapDumpRunner.go
Normal file
370
cmd/pcapDumpRunner.go
Normal file
@@ -0,0 +1,370 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/kubeshark/gopacket"
|
||||
"github.com/kubeshark/gopacket/layers"
|
||||
"github.com/kubeshark/gopacket/pcapgo"
|
||||
"github.com/rs/zerolog/log"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
clientk8s "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/remotecommand"
|
||||
)
|
||||
|
||||
const label = "app.kubeshark.co/app=worker"
|
||||
const SELF_RESOURCES_PREFIX = "kubeshark-"
|
||||
const SUFFIX_CONFIG_MAP = "config-map"
|
||||
|
||||
// NamespaceFiles represents the namespace and the files found in that namespace.
|
||||
type NamespaceFiles struct {
|
||||
Namespace string // The namespace in which the files were found
|
||||
SrcDir string // The source directory from which the files were listed
|
||||
Files []string // List of files found in the namespace
|
||||
}
|
||||
|
||||
// listWorkerPods fetches all worker pods from multiple namespaces
|
||||
func listWorkerPods(ctx context.Context, clientset *clientk8s.Clientset, namespaces []string) ([]corev1.Pod, error) {
|
||||
var allPods []corev1.Pod
|
||||
labelSelector := label
|
||||
|
||||
for _, namespace := range namespaces {
|
||||
// List all pods matching the label in the current namespace
|
||||
pods, err := clientset.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{
|
||||
LabelSelector: labelSelector,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list worker pods in namespace %s: %w", namespace, err)
|
||||
}
|
||||
|
||||
// Accumulate the pods
|
||||
allPods = append(allPods, pods.Items...)
|
||||
}
|
||||
|
||||
return allPods, nil
|
||||
}
|
||||
|
||||
// listFilesInPodDir lists all files in the specified directory inside the pod across multiple namespaces
|
||||
func listFilesInPodDir(ctx context.Context, clientset *clientk8s.Clientset, config *rest.Config, podName string, namespaces []string, configMapName, configMapKey string) ([]NamespaceFiles, error) {
|
||||
var namespaceFilesList []NamespaceFiles
|
||||
|
||||
for _, namespace := range namespaces {
|
||||
// Attempt to get the ConfigMap in the current namespace
|
||||
configMap, err := clientset.CoreV1().ConfigMaps(namespace).Get(ctx, configMapName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if the source directory exists in the ConfigMap
|
||||
srcDir, ok := configMap.Data[configMapKey]
|
||||
if !ok || srcDir == "" {
|
||||
log.Error().Msgf("source directory not found in ConfigMap %s in namespace %s", configMapName, namespace)
|
||||
continue
|
||||
}
|
||||
|
||||
// Attempt to get the pod in the current namespace
|
||||
pod, err := clientset.CoreV1().Pods(namespace).Get(ctx, podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("failed to get pod %s in namespace %s", podName, namespace)
|
||||
continue
|
||||
}
|
||||
|
||||
nodeName := pod.Spec.NodeName
|
||||
srcFilePath := filepath.Join("data", nodeName, srcDir)
|
||||
|
||||
cmd := []string{"ls", srcFilePath}
|
||||
req := clientset.CoreV1().RESTClient().Post().
|
||||
Resource("pods").
|
||||
Name(podName).
|
||||
Namespace(namespace).
|
||||
SubResource("exec").
|
||||
Param("container", "sniffer").
|
||||
Param("stdout", "true").
|
||||
Param("stderr", "true").
|
||||
Param("command", cmd[0]).
|
||||
Param("command", cmd[1])
|
||||
|
||||
exec, err := remotecommand.NewSPDYExecutor(config, "POST", req.URL())
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("failed to initialize executor for pod %s in namespace %s", podName, namespace)
|
||||
continue
|
||||
}
|
||||
|
||||
var stdoutBuf bytes.Buffer
|
||||
var stderrBuf bytes.Buffer
|
||||
|
||||
// Execute the command to list files
|
||||
err = exec.StreamWithContext(ctx, remotecommand.StreamOptions{
|
||||
Stdout: &stdoutBuf,
|
||||
Stderr: &stderrBuf,
|
||||
})
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("error listing files in pod %s in namespace %s: %s", podName, namespace, stderrBuf.String())
|
||||
continue
|
||||
}
|
||||
|
||||
// Split the output (file names) into a list
|
||||
files := strings.Split(strings.TrimSpace(stdoutBuf.String()), "\n")
|
||||
if len(files) > 0 {
|
||||
// Append the NamespaceFiles struct to the list
|
||||
namespaceFilesList = append(namespaceFilesList, NamespaceFiles{
|
||||
Namespace: namespace,
|
||||
SrcDir: srcDir,
|
||||
Files: files,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if len(namespaceFilesList) == 0 {
|
||||
return nil, fmt.Errorf("no files found in pod %s across the provided namespaces", podName)
|
||||
}
|
||||
|
||||
return namespaceFilesList, nil
|
||||
}
|
||||
|
||||
// copyFileFromPod copies a single file from a pod to a local destination
|
||||
func copyFileFromPod(ctx context.Context, clientset *kubernetes.Clientset, config *rest.Config, podName, namespace, srcDir, srcFile, destFile string) error {
|
||||
// Get the pod to retrieve its node name
|
||||
pod, err := clientset.CoreV1().Pods(namespace).Get(ctx, podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get pod %s in namespace %s: %w", podName, namespace, err)
|
||||
}
|
||||
|
||||
// Construct the complete path using /data, the node name, srcDir, and srcFile
|
||||
nodeName := pod.Spec.NodeName
|
||||
srcFilePath := filepath.Join("data", nodeName, srcDir, srcFile)
|
||||
|
||||
// Execute the `cat` command to read the file at the srcFilePath
|
||||
cmd := []string{"cat", srcFilePath}
|
||||
req := clientset.CoreV1().RESTClient().Post().
|
||||
Resource("pods").
|
||||
Name(podName).
|
||||
Namespace(namespace).
|
||||
SubResource("exec").
|
||||
Param("container", "sniffer").
|
||||
Param("stdout", "true").
|
||||
Param("stderr", "true").
|
||||
Param("command", cmd[0]).
|
||||
Param("command", cmd[1])
|
||||
|
||||
exec, err := remotecommand.NewSPDYExecutor(config, "POST", req.URL())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to initialize executor for pod %s in namespace %s: %w", podName, namespace, err)
|
||||
}
|
||||
|
||||
// Create the local file to write the content to
|
||||
outFile, err := os.Create(destFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create destination file: %w", err)
|
||||
}
|
||||
defer outFile.Close()
|
||||
|
||||
// Capture stderr for error logging
|
||||
var stderrBuf bytes.Buffer
|
||||
|
||||
// Stream the file content from the pod to the local file
|
||||
err = exec.StreamWithContext(ctx, remotecommand.StreamOptions{
|
||||
Stdout: outFile,
|
||||
Stderr: &stderrBuf,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error copying file from pod %s in namespace %s: %s", podName, namespace, stderrBuf.String())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func mergePCAPs(outputFile string, inputFiles []string) error {
|
||||
// Create the output file
|
||||
f, err := os.Create(outputFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
// Create a pcap writer for the output file
|
||||
writer := pcapgo.NewWriter(f)
|
||||
err = writer.WriteFileHeader(65536, layers.LinkTypeEthernet) // Snapshot length and LinkType
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, inputFile := range inputFiles {
|
||||
log.Info().Msgf("Merging %s int %s", inputFile, outputFile)
|
||||
// Open each input file
|
||||
file, err := os.Open(inputFile)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("Failed to open %v", inputFile)
|
||||
continue
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
reader, err := pcapgo.NewReader(file)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("Failed to create pcapng reader for %v", file.Name())
|
||||
continue
|
||||
}
|
||||
|
||||
// Create the packet source
|
||||
packetSource := gopacket.NewPacketSource(reader, layers.LinkTypeEthernet)
|
||||
|
||||
for packet := range packetSource.Packets() {
|
||||
err := writer.WritePacket(packet.Metadata().CaptureInfo, packet.Data())
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("Failed to write packet to %v", outputFile)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// setPcapConfigInKubernetes sets the PCAP config for all pods across multiple namespaces
|
||||
func setPcapConfigInKubernetes(ctx context.Context, clientset *clientk8s.Clientset, podName string, namespaces []string, enabledPcap bool, timeInterval, maxTime, maxSize string) error {
|
||||
for _, namespace := range namespaces {
|
||||
// Load the existing ConfigMap in the current namespace
|
||||
configMap, err := clientset.CoreV1().ConfigMaps(namespace).Get(ctx, "kubeshark-config-map", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("failed to get ConfigMap in namespace %s", namespace)
|
||||
continue
|
||||
}
|
||||
|
||||
// Update the values with user-provided input
|
||||
configMap.Data["PCAP_TIME_INTERVAL"] = timeInterval
|
||||
configMap.Data["PCAP_MAX_SIZE"] = maxSize
|
||||
configMap.Data["PCAP_MAX_TIME"] = maxTime
|
||||
configMap.Data["PCAP_DUMP_ENABLE"] = strconv.FormatBool(enabledPcap)
|
||||
|
||||
// Apply the updated ConfigMap back to the cluster in the current namespace
|
||||
_, err = clientset.CoreV1().ConfigMaps(namespace).Update(ctx, configMap, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("failed to update ConfigMap in namespace %s", namespace)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// startPcap function for starting the PCAP capture
|
||||
func startStopPcap(clientset *kubernetes.Clientset, pcapEnable bool, timeInterval, maxTime, maxSize string) error {
|
||||
kubernetesProvider, err := getKubernetesProviderForCli(false, false)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
return err
|
||||
}
|
||||
|
||||
targetNamespaces := kubernetesProvider.GetNamespaces()
|
||||
|
||||
// List worker pods
|
||||
workerPods, err := listWorkerPods(context.Background(), clientset, targetNamespaces)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("Error listing worker pods")
|
||||
return err
|
||||
}
|
||||
|
||||
// Iterate over each pod to start the PCAP capture by updating the configuration in Kubernetes
|
||||
for _, pod := range workerPods {
|
||||
err := setPcapConfigInKubernetes(context.Background(), clientset, pod.Name, targetNamespaces, pcapEnable, timeInterval, maxTime, maxSize)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("Error setting PCAP config for pod %s", pod.Name)
|
||||
continue
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// copyPcapFiles function for copying the PCAP files from the worker pods
|
||||
func copyPcapFiles(clientset *kubernetes.Clientset, config *rest.Config, destDir string) error {
|
||||
kubernetesProvider, err := getKubernetesProviderForCli(false, false)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
return err
|
||||
}
|
||||
|
||||
targetNamespaces := kubernetesProvider.GetNamespaces()
|
||||
|
||||
// List worker pods
|
||||
workerPods, err := listWorkerPods(context.Background(), clientset, targetNamespaces)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("Error listing worker pods")
|
||||
return err
|
||||
}
|
||||
var currentFiles []string
|
||||
|
||||
// Iterate over each pod to get the PCAP directory from config and copy files
|
||||
for _, pod := range workerPods {
|
||||
// Get the list of NamespaceFiles (files per namespace) and their source directories
|
||||
namespaceFiles, err := listFilesInPodDir(context.Background(), clientset, config, pod.Name, targetNamespaces, SELF_RESOURCES_PREFIX+SUFFIX_CONFIG_MAP, "PCAP_SRC_DIR")
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("Error listing files in pod %s", pod.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
// Copy each file from the pod to the local destination for each namespace
|
||||
for _, nsFiles := range namespaceFiles {
|
||||
for _, file := range nsFiles.Files {
|
||||
destFile := filepath.Join(destDir, file)
|
||||
|
||||
// Pass the correct namespace and related details to the function
|
||||
err = copyFileFromPod(context.Background(), clientset, config, pod.Name, nsFiles.Namespace, nsFiles.SrcDir, file, destFile)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("Error copying file from pod %s in namespace %s", pod.Name, nsFiles.Namespace)
|
||||
} else {
|
||||
log.Info().Msgf("Copied %s from %s to %s", file, pod.Name, destFile)
|
||||
}
|
||||
|
||||
currentFiles = append(currentFiles, destFile)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if len(currentFiles) == 0 {
|
||||
log.Error().Msgf("No files to merge")
|
||||
return nil
|
||||
// continue
|
||||
}
|
||||
|
||||
// Generate a temporary filename based on the first file
|
||||
tempMergedFile := currentFiles[0] + "_temp"
|
||||
|
||||
// Merge the PCAPs into the temporary file
|
||||
err = mergePCAPs(tempMergedFile, currentFiles)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("Error merging files")
|
||||
return err
|
||||
// continue
|
||||
}
|
||||
|
||||
// Remove the original files after merging
|
||||
for _, file := range currentFiles {
|
||||
err := os.Remove(file)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("Error removing file %s", file)
|
||||
}
|
||||
}
|
||||
|
||||
// Rename the temp file to the final name (removing "_temp")
|
||||
finalMergedFile := strings.TrimSuffix(tempMergedFile, "_temp")
|
||||
err = os.Rename(tempMergedFile, finalMergedFile)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("Error renaming merged file %s", tempMergedFile)
|
||||
// continue
|
||||
return err
|
||||
}
|
||||
|
||||
log.Info().Msgf("Merged file created: %s", finalMergedFile)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,25 +0,0 @@
|
||||
# This example shows permissions that enrich the logs with additional info
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: kubeshark-runner-debug-clusterrole
|
||||
rules:
|
||||
- apiGroups: ["events.k8s.io"]
|
||||
resources: ["events"]
|
||||
verbs: ["watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["get"]
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: kubeshark-runner-debug-clusterrolebindings
|
||||
subjects:
|
||||
- kind: User
|
||||
name: user-with-clusterwide-access
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: kubeshark-runner-debug-clusterrole
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
@@ -1,37 +0,0 @@
|
||||
# This example shows permissions that are required for Kubeshark to resolve IPs to service names
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: kubeshark-resolver-clusterrole
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["serviceaccounts"]
|
||||
verbs: ["get", "create"]
|
||||
- apiGroups: ["rbac.authorization.k8s.io"]
|
||||
resources: ["clusterroles"]
|
||||
verbs: ["get", "list", "create", "delete"]
|
||||
- apiGroups: ["rbac.authorization.k8s.io"]
|
||||
resources: ["clusterrolebindings"]
|
||||
verbs: ["get", "list", "create", "delete"]
|
||||
- apiGroups: ["", "apps", "extensions"]
|
||||
resources: ["pods"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["", "apps", "extensions"]
|
||||
resources: ["services"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["", "apps", "extensions"]
|
||||
resources: ["endpoints"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: kubeshark-resolver-clusterrolebindings
|
||||
subjects:
|
||||
- kind: User
|
||||
name: user-with-clusterwide-access
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: kubeshark-resolver-clusterrole
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
@@ -1,40 +0,0 @@
|
||||
# This example shows the permissions that are required in order to run the `kubeshark tap` command
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: kubeshark-runner-clusterrole
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["list", "watch", "create"]
|
||||
- apiGroups: [""]
|
||||
resources: ["services"]
|
||||
verbs: ["get", "create"]
|
||||
- apiGroups: ["apps"]
|
||||
resources: ["daemonsets"]
|
||||
verbs: ["create", "patch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["namespaces"]
|
||||
verbs: ["list", "watch", "create", "delete"]
|
||||
- apiGroups: [""]
|
||||
resources: ["services/proxy"]
|
||||
verbs: ["get", "create"]
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: ["create"]
|
||||
- apiGroups: [""]
|
||||
resources: ["pods/log"]
|
||||
verbs: ["get"]
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: kubeshark-runner-clusterrolebindings
|
||||
subjects:
|
||||
- kind: User
|
||||
name: user-with-clusterwide-access
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: kubeshark-runner-clusterrole
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
@@ -1,25 +0,0 @@
|
||||
# This example shows permissions that enrich the logs with additional info in namespace-restricted mode
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: kubeshark-runner-debug-role
|
||||
rules:
|
||||
- apiGroups: ["events.k8s.io"]
|
||||
resources: ["events"]
|
||||
verbs: ["watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["get"]
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: kubeshark-runner-debug-rolebindings
|
||||
subjects:
|
||||
- kind: User
|
||||
name: user-with-restricted-access
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: kubeshark-runner-debug-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
@@ -1,37 +0,0 @@
|
||||
# This example shows permissions that are required for Kubeshark to resolve IPs to service names in namespace-restricted mode
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: kubeshark-resolver-role
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["serviceaccounts"]
|
||||
verbs: ["get", "list", "create", "delete"]
|
||||
- apiGroups: ["rbac.authorization.k8s.io"]
|
||||
resources: ["roles"]
|
||||
verbs: ["get", "list", "create", "delete"]
|
||||
- apiGroups: ["rbac.authorization.k8s.io"]
|
||||
resources: ["rolebindings"]
|
||||
verbs: ["get", "list", "create", "delete"]
|
||||
- apiGroups: ["", "apps", "extensions"]
|
||||
resources: ["pods"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["", "apps", "extensions"]
|
||||
resources: ["services"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["", "apps", "extensions"]
|
||||
resources: ["endpoints"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: kubeshark-resolver-rolebindings
|
||||
subjects:
|
||||
- kind: User
|
||||
name: user-with-restricted-access
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: kubeshark-resolver-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
@@ -1,37 +0,0 @@
|
||||
# This example shows the permissions that are required in order to run the `kubeshark tap` command in namespace-restricted mode
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: kubeshark-runner-role
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["list", "watch", "create"]
|
||||
- apiGroups: [""]
|
||||
resources: ["services"]
|
||||
verbs: ["get", "create", "delete"]
|
||||
- apiGroups: ["apps"]
|
||||
resources: ["daemonsets"]
|
||||
verbs: ["create", "patch", "delete"]
|
||||
- apiGroups: [""]
|
||||
resources: ["services/proxy"]
|
||||
verbs: ["get", "create", "delete"]
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: ["create", "delete"]
|
||||
- apiGroups: [""]
|
||||
resources: ["pods/log"]
|
||||
verbs: ["get"]
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: kubeshark-runner-rolebindings
|
||||
subjects:
|
||||
- kind: User
|
||||
name: user-with-restricted-access
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: kubeshark-runner-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
32
cmd/pprof.go
Normal file
32
cmd/pprof.go
Normal file
@@ -0,0 +1,32 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"github.com/creasty/defaults"
|
||||
"github.com/kubeshark/kubeshark/config/configStructs"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var pprofCmd = &cobra.Command{
|
||||
Use: "pprof",
|
||||
Short: "Select a Kubeshark container and open the pprof web UI in the browser",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
runPprof()
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(pprofCmd)
|
||||
|
||||
defaultTapConfig := configStructs.TapConfig{}
|
||||
if err := defaults.Set(&defaultTapConfig); err != nil {
|
||||
log.Debug().Err(err).Send()
|
||||
}
|
||||
|
||||
pprofCmd.Flags().Uint16(configStructs.ProxyFrontPortLabel, defaultTapConfig.Proxy.Front.Port, "Provide a custom port for the proxy/port-forward")
|
||||
pprofCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the proxy/port-forward")
|
||||
pprofCmd.Flags().StringP(configStructs.ReleaseNamespaceLabel, "s", defaultTapConfig.Release.Namespace, "Release namespace of Kubeshark")
|
||||
pprofCmd.Flags().Uint16(configStructs.PprofPortLabel, defaultTapConfig.Pprof.Port, "Provide a custom port for the pprof server")
|
||||
pprofCmd.Flags().String(configStructs.PprofViewLabel, defaultTapConfig.Pprof.View, "Change the default view of the pprof web interface")
|
||||
}
|
||||
176
cmd/pprofRunner.go
Normal file
176
cmd/pprofRunner.go
Normal file
@@ -0,0 +1,176 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/go-cmd/cmd"
|
||||
"github.com/kubeshark/kubeshark/config"
|
||||
"github.com/kubeshark/kubeshark/kubernetes"
|
||||
"github.com/kubeshark/kubeshark/utils"
|
||||
"github.com/rivo/tview"
|
||||
"github.com/rs/zerolog/log"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func runPprof() {
|
||||
runProxy(false, true)
|
||||
|
||||
provider, err := getKubernetesProviderForCli(false, false)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
hubPods, err := provider.ListPodsByAppLabel(ctx, config.Config.Tap.Release.Namespace, map[string]string{kubernetes.AppLabelKey: "hub"})
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Err(err).
|
||||
Msg("Failed to list hub pods!")
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
|
||||
workerPods, err := provider.ListPodsByAppLabel(ctx, config.Config.Tap.Release.Namespace, map[string]string{kubernetes.AppLabelKey: "worker"})
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Err(err).
|
||||
Msg("Failed to list worker pods!")
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
|
||||
fullscreen := true
|
||||
|
||||
app := tview.NewApplication()
|
||||
list := tview.NewList()
|
||||
|
||||
var currentCmd *cmd.Cmd
|
||||
|
||||
i := 48
|
||||
for _, pod := range hubPods {
|
||||
for _, container := range pod.Spec.Containers {
|
||||
log.Info().Str("pod", pod.Name).Str("container", container.Name).Send()
|
||||
homeUrl := fmt.Sprintf("%s/debug/pprof/", kubernetes.GetHubUrl())
|
||||
modal := buildNewModal(
|
||||
pod,
|
||||
container,
|
||||
homeUrl,
|
||||
app,
|
||||
list,
|
||||
fullscreen,
|
||||
currentCmd,
|
||||
)
|
||||
list.AddItem(fmt.Sprintf("pod: %s container: %s", pod.Name, container.Name), pod.Spec.NodeName, rune(i), func() {
|
||||
app.SetRoot(modal, fullscreen)
|
||||
})
|
||||
i++
|
||||
}
|
||||
}
|
||||
|
||||
for _, pod := range workerPods {
|
||||
for _, container := range pod.Spec.Containers {
|
||||
log.Info().Str("pod", pod.Name).Str("container", container.Name).Send()
|
||||
homeUrl := fmt.Sprintf("%s/pprof/%s/%s/", kubernetes.GetHubUrl(), pod.Status.HostIP, container.Name)
|
||||
modal := buildNewModal(
|
||||
pod,
|
||||
container,
|
||||
homeUrl,
|
||||
app,
|
||||
list,
|
||||
fullscreen,
|
||||
currentCmd,
|
||||
)
|
||||
list.AddItem(fmt.Sprintf("pod: %s container: %s", pod.Name, container.Name), pod.Spec.NodeName, rune(i), func() {
|
||||
app.SetRoot(modal, fullscreen)
|
||||
})
|
||||
i++
|
||||
}
|
||||
}
|
||||
|
||||
list.AddItem("Quit", "Press to exit", 'q', func() {
|
||||
if currentCmd != nil {
|
||||
err = currentCmd.Stop()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("name", currentCmd.Name).Msg("Failed to stop process!")
|
||||
}
|
||||
}
|
||||
app.Stop()
|
||||
})
|
||||
|
||||
if err := app.SetRoot(list, fullscreen).EnableMouse(true).Run(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func buildNewModal(
|
||||
pod v1.Pod,
|
||||
container v1.Container,
|
||||
homeUrl string,
|
||||
app *tview.Application,
|
||||
list *tview.List,
|
||||
fullscreen bool,
|
||||
currentCmd *cmd.Cmd,
|
||||
) *tview.Modal {
|
||||
return tview.NewModal().
|
||||
SetText(fmt.Sprintf("pod: %s container: %s", pod.Name, container.Name)).
|
||||
AddButtons([]string{
|
||||
"Open Debug Home Page",
|
||||
"Profile: CPU",
|
||||
"Profile: Memory",
|
||||
"Profile: Goroutine",
|
||||
"Cancel",
|
||||
}).
|
||||
SetDoneFunc(func(buttonIndex int, buttonLabel string) {
|
||||
var err error
|
||||
port := fmt.Sprintf(":%d", config.Config.Tap.Pprof.Port)
|
||||
view := fmt.Sprintf("http://localhost%s/ui/%s", port, config.Config.Tap.Pprof.View)
|
||||
|
||||
switch buttonLabel {
|
||||
case "Open Debug Home Page":
|
||||
utils.OpenBrowser(homeUrl)
|
||||
case "Profile: CPU":
|
||||
if currentCmd != nil {
|
||||
err = currentCmd.Stop()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("name", currentCmd.Name).Msg("Failed to stop process!")
|
||||
}
|
||||
}
|
||||
currentCmd = cmd.NewCmd("go", "tool", "pprof", "-http", port, "-no_browser", fmt.Sprintf("%sprofile", homeUrl))
|
||||
currentCmd.Start()
|
||||
utils.OpenBrowser(view)
|
||||
case "Profile: Memory":
|
||||
if currentCmd != nil {
|
||||
err = currentCmd.Stop()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("name", currentCmd.Name).Msg("Failed to stop process!")
|
||||
}
|
||||
}
|
||||
currentCmd = cmd.NewCmd("go", "tool", "pprof", "-http", port, "-no_browser", fmt.Sprintf("%sheap", homeUrl))
|
||||
currentCmd.Start()
|
||||
utils.OpenBrowser(view)
|
||||
case "Profile: Goroutine":
|
||||
if currentCmd != nil {
|
||||
err = currentCmd.Stop()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("name", currentCmd.Name).Msg("Failed to stop process!")
|
||||
}
|
||||
}
|
||||
currentCmd = cmd.NewCmd("go", "tool", "pprof", "-http", port, "-no_browser", fmt.Sprintf("%sgoroutine", homeUrl))
|
||||
currentCmd.Start()
|
||||
utils.OpenBrowser(view)
|
||||
case "Cancel":
|
||||
if currentCmd != nil {
|
||||
err = currentCmd.Stop()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("name", currentCmd.Name).Msg("Failed to stop process!")
|
||||
}
|
||||
}
|
||||
fallthrough
|
||||
default:
|
||||
app.SetRoot(list, fullscreen)
|
||||
}
|
||||
})
|
||||
}
|
||||
10
cmd/proxy.go
10
cmd/proxy.go
@@ -9,9 +9,9 @@ import (
|
||||
|
||||
var proxyCmd = &cobra.Command{
|
||||
Use: "proxy",
|
||||
Short: "Open the web UI (front-end) in the browser via proxy/port-forward.",
|
||||
Short: "Open the web UI (front-end) in the browser via proxy/port-forward",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
runProxy()
|
||||
runProxy(true, false)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
@@ -24,7 +24,7 @@ func init() {
|
||||
log.Debug().Err(err).Send()
|
||||
}
|
||||
|
||||
proxyCmd.Flags().Uint16(configStructs.ProxyFrontPortLabel, defaultTapConfig.Proxy.Front.SrcPort, "Provide a custom port for the front-end proxy/port-forward.")
|
||||
proxyCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.SrcPort, "Provide a custom port for the Hub proxy/port-forward.")
|
||||
proxyCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the proxy/port-forward.")
|
||||
proxyCmd.Flags().Uint16(configStructs.ProxyFrontPortLabel, defaultTapConfig.Proxy.Front.Port, "Provide a custom port for the proxy/port-forward")
|
||||
proxyCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the proxy/port-forward")
|
||||
proxyCmd.Flags().StringP(configStructs.ReleaseNamespaceLabel, "s", defaultTapConfig.Release.Namespace, "Release namespace of Kubeshark")
|
||||
}
|
||||
|
||||
@@ -14,8 +14,8 @@ import (
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
func runProxy() {
|
||||
kubernetesProvider, err := getKubernetesProviderForCli()
|
||||
func runProxy(block bool, noBrowser bool) {
|
||||
kubernetesProvider, err := getKubernetesProviderForCli(false, false)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -23,10 +23,10 @@ func runProxy() {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
exists, err := kubernetesProvider.DoesServiceExist(ctx, config.Config.SelfNamespace, kubernetes.FrontServiceName)
|
||||
exists, err := kubernetesProvider.DoesServiceExist(ctx, config.Config.Tap.Release.Namespace, kubernetes.FrontServiceName)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("service", misc.Program).
|
||||
Str("service", kubernetes.FrontServiceName).
|
||||
Err(err).
|
||||
Msg("Failed to found service!")
|
||||
cancel()
|
||||
@@ -42,36 +42,66 @@ func runProxy() {
|
||||
return
|
||||
}
|
||||
|
||||
url := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Front.SrcPort)
|
||||
exists, err = kubernetesProvider.DoesServiceExist(ctx, config.Config.Tap.Release.Namespace, kubernetes.HubServiceName)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("service", kubernetes.HubServiceName).
|
||||
Err(err).
|
||||
Msg("Failed to found service!")
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
|
||||
response, err := http.Get(fmt.Sprintf("%s/", url))
|
||||
if !exists {
|
||||
log.Error().
|
||||
Str("service", kubernetes.HubServiceName).
|
||||
Str("command", fmt.Sprintf("%s %s", misc.Program, tapCmd.Use)).
|
||||
Msg("Service not found! You should run the command first:")
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
|
||||
var establishedProxy bool
|
||||
|
||||
frontUrl := kubernetes.GetProxyOnPort(config.Config.Tap.Proxy.Front.Port)
|
||||
response, err := http.Get(fmt.Sprintf("%s/", frontUrl))
|
||||
if err == nil && response.StatusCode == 200 {
|
||||
log.Info().
|
||||
Str("service", kubernetes.FrontServiceName).
|
||||
Int("port", int(config.Config.Tap.Proxy.Front.SrcPort)).
|
||||
Int("port", int(config.Config.Tap.Proxy.Front.Port)).
|
||||
Msg("Found a running service.")
|
||||
|
||||
okToOpen(url)
|
||||
return
|
||||
}
|
||||
log.Info().Msg("Establishing connection to K8s cluster...")
|
||||
startProxyReportErrorIfAny(kubernetesProvider, ctx, cancel, kubernetes.FrontServiceName, configStructs.ProxyFrontPortLabel, config.Config.Tap.Proxy.Front.SrcPort, config.Config.Tap.Proxy.Front.DstPort, "")
|
||||
okToOpen("Kubeshark", frontUrl, noBrowser)
|
||||
} else {
|
||||
startProxyReportErrorIfAny(
|
||||
kubernetesProvider,
|
||||
ctx,
|
||||
kubernetes.FrontServiceName,
|
||||
kubernetes.FrontPodName,
|
||||
configStructs.ProxyFrontPortLabel,
|
||||
config.Config.Tap.Proxy.Front.Port,
|
||||
configStructs.ContainerPort,
|
||||
"",
|
||||
)
|
||||
connector := connect.NewConnector(frontUrl, connect.DefaultRetries, connect.DefaultTimeout)
|
||||
if err := connector.TestConnection(""); err != nil {
|
||||
log.Error().Msg(fmt.Sprintf(utils.Red, "Couldn't connect to Front."))
|
||||
return
|
||||
}
|
||||
|
||||
connector := connect.NewConnector(url, connect.DefaultRetries, connect.DefaultTimeout)
|
||||
if err := connector.TestConnection(""); err != nil {
|
||||
log.Error().Msg(fmt.Sprintf(utils.Red, "Couldn't connect to Front."))
|
||||
return
|
||||
establishedProxy = true
|
||||
okToOpen("Kubeshark", frontUrl, noBrowser)
|
||||
}
|
||||
if establishedProxy && block {
|
||||
utils.WaitForTermination(ctx, cancel)
|
||||
}
|
||||
|
||||
okToOpen(url)
|
||||
|
||||
utils.WaitForTermination(ctx, cancel)
|
||||
}
|
||||
|
||||
func okToOpen(url string) {
|
||||
log.Info().Str("url", url).Msg(fmt.Sprintf(utils.Green, fmt.Sprintf("%s is available at:", misc.Software)))
|
||||
func okToOpen(name string, url string, noBrowser bool) {
|
||||
log.Info().Str("url", url).Msg(fmt.Sprintf(utils.Green, fmt.Sprintf("%s is available at:", name)))
|
||||
|
||||
if !config.Config.HeadlessMode {
|
||||
if !config.Config.HeadlessMode && !noBrowser {
|
||||
utils.OpenBrowser(url)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,10 +12,10 @@ import (
|
||||
|
||||
var rootCmd = &cobra.Command{
|
||||
Use: "kubeshark",
|
||||
Short: fmt.Sprintf("%s: The API Traffic Viewer for Kubernetes", misc.Software),
|
||||
Long: fmt.Sprintf(`%s: The API Traffic Viewer for Kubernetes
|
||||
Short: fmt.Sprintf("%s: %s", misc.Software, misc.Description),
|
||||
Long: fmt.Sprintf(`%s: %s
|
||||
An extensible Kubernetes-aware network sniffer and kernel tracer.
|
||||
For more info: %s`, misc.Software, misc.Website),
|
||||
For more info: %s`, misc.Software, misc.Description, misc.Website),
|
||||
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
if err := config.InitConfig(cmd); err != nil {
|
||||
log.Fatal().Err(err).Send()
|
||||
@@ -32,8 +32,7 @@ func init() {
|
||||
}
|
||||
|
||||
rootCmd.PersistentFlags().StringSlice(config.SetCommandName, []string{}, fmt.Sprintf("Override values using --%s", config.SetCommandName))
|
||||
rootCmd.PersistentFlags().String(config.ConfigFilePathCommandName, defaultConfig.ConfigFilePath, fmt.Sprintf("Override config file path using --%s", config.ConfigFilePathCommandName))
|
||||
rootCmd.PersistentFlags().BoolP(config.DebugFlag, "d", false, "Enable debug mode.")
|
||||
rootCmd.PersistentFlags().BoolP(config.DebugFlag, "d", false, "Enable debug mode")
|
||||
}
|
||||
|
||||
// Execute adds all child commands to the root command and sets flags appropriately.
|
||||
|
||||
374
cmd/scripts.go
Normal file
374
cmd/scripts.go
Normal file
@@ -0,0 +1,374 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/creasty/defaults"
|
||||
"github.com/fsnotify/fsnotify"
|
||||
"github.com/kubeshark/kubeshark/config"
|
||||
"github.com/kubeshark/kubeshark/config/configStructs"
|
||||
"github.com/kubeshark/kubeshark/kubernetes"
|
||||
"github.com/kubeshark/kubeshark/misc"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
)
|
||||
|
||||
var scriptsCmd = &cobra.Command{
|
||||
Use: "scripts",
|
||||
Short: "Watch the `scripting.source` and/or `scripting.sources` folders for changes and update the scripts",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
runScripts()
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(scriptsCmd)
|
||||
|
||||
defaultTapConfig := configStructs.TapConfig{}
|
||||
if err := defaults.Set(&defaultTapConfig); err != nil {
|
||||
log.Debug().Err(err).Send()
|
||||
}
|
||||
|
||||
scriptsCmd.Flags().Uint16(configStructs.ProxyFrontPortLabel, defaultTapConfig.Proxy.Front.Port, "Provide a custom port for the Kubeshark")
|
||||
scriptsCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the Kubeshark")
|
||||
scriptsCmd.Flags().StringP(configStructs.ReleaseNamespaceLabel, "s", defaultTapConfig.Release.Namespace, "Release namespace of Kubeshark")
|
||||
}
|
||||
|
||||
func runScripts() {
|
||||
if config.Config.Scripting.Source == "" && len(config.Config.Scripting.Sources) == 0 {
|
||||
log.Error().Msg("Both `scripting.source` and `scripting.sources` fields are empty.")
|
||||
return
|
||||
}
|
||||
|
||||
kubernetesProvider, err := getKubernetesProviderForCli(false, false)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
return
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
signalChan := make(chan os.Signal, 1)
|
||||
signal.Notify(signalChan, os.Interrupt)
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
watchConfigMap(ctx, kubernetesProvider)
|
||||
}()
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
watchScripts(ctx, kubernetesProvider, true)
|
||||
}()
|
||||
|
||||
go func() {
|
||||
<-signalChan
|
||||
log.Debug().Msg("Received interrupt, stopping watchers.")
|
||||
cancel()
|
||||
}()
|
||||
|
||||
wg.Wait()
|
||||
|
||||
}
|
||||
|
||||
func createScript(provider *kubernetes.Provider, script misc.ConfigMapScript) (index int64, err error) {
|
||||
const maxRetries = 5
|
||||
var scripts map[int64]misc.ConfigMapScript
|
||||
|
||||
for i := 0; i < maxRetries; i++ {
|
||||
scripts, err = kubernetes.ConfigGetScripts(provider)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
script.Active = kubernetes.IsActiveScript(provider, script.Title)
|
||||
index = int64(len(scripts))
|
||||
if script.Title != "New Script" {
|
||||
for i, v := range scripts {
|
||||
if v.Title == script.Title {
|
||||
index = int64(i)
|
||||
}
|
||||
}
|
||||
}
|
||||
scripts[index] = script
|
||||
|
||||
log.Info().Str("title", script.Title).Bool("Active", script.Active).Int64("Index", index).Msg("Creating script")
|
||||
var data []byte
|
||||
data, err = json.Marshal(scripts)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
_, err = kubernetes.SetConfig(provider, kubernetes.CONFIG_SCRIPTING_SCRIPTS, string(data))
|
||||
if err == nil {
|
||||
return index, nil
|
||||
}
|
||||
|
||||
if k8serrors.IsConflict(err) {
|
||||
log.Warn().Err(err).Msg("Conflict detected, retrying update...")
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
continue
|
||||
}
|
||||
|
||||
return 0, err
|
||||
}
|
||||
|
||||
log.Error().Msg("Max retries reached for creating script due to conflicts.")
|
||||
return 0, errors.New("max retries reached due to conflicts while creating script")
|
||||
}
|
||||
|
||||
func updateScript(provider *kubernetes.Provider, index int64, script misc.ConfigMapScript) (err error) {
|
||||
var scripts map[int64]misc.ConfigMapScript
|
||||
scripts, err = kubernetes.ConfigGetScripts(provider)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
script.Active = kubernetes.IsActiveScript(provider, script.Title)
|
||||
scripts[index] = script
|
||||
|
||||
var data []byte
|
||||
data, err = json.Marshal(scripts)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
_, err = kubernetes.SetConfig(provider, kubernetes.CONFIG_SCRIPTING_SCRIPTS, string(data))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func deleteScript(provider *kubernetes.Provider, index int64) (err error) {
|
||||
var scripts map[int64]misc.ConfigMapScript
|
||||
scripts, err = kubernetes.ConfigGetScripts(provider)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = kubernetes.DeleteActiveScriptByTitle(provider, scripts[index].Title)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
delete(scripts, index)
|
||||
|
||||
var data []byte
|
||||
data, err = json.Marshal(scripts)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
_, err = kubernetes.SetConfig(provider, kubernetes.CONFIG_SCRIPTING_SCRIPTS, string(data))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func watchScripts(ctx context.Context, provider *kubernetes.Provider, block bool) {
|
||||
files := make(map[string]int64)
|
||||
|
||||
scripts, err := config.Config.Scripting.GetScripts()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
return
|
||||
}
|
||||
|
||||
for _, script := range scripts {
|
||||
index, err := createScript(provider, script.ConfigMap())
|
||||
if err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
continue
|
||||
}
|
||||
|
||||
files[script.Path] = index
|
||||
}
|
||||
|
||||
watcher, err := fsnotify.NewWatcher()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
return
|
||||
}
|
||||
if block {
|
||||
defer watcher.Close()
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
signalChan := make(chan os.Signal, 1)
|
||||
signal.Notify(signalChan, os.Interrupt)
|
||||
|
||||
go func() {
|
||||
<-signalChan
|
||||
log.Debug().Msg("Received interrupt, stopping script watch.")
|
||||
cancel()
|
||||
watcher.Close()
|
||||
}()
|
||||
|
||||
if err := watcher.Add(config.Config.Scripting.Source); err != nil {
|
||||
log.Error().Err(err).Msg("Failed to add scripting source to watcher")
|
||||
return
|
||||
}
|
||||
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
log.Debug().Msg("Script watcher exiting gracefully.")
|
||||
return
|
||||
|
||||
// watch for events
|
||||
case event := <-watcher.Events:
|
||||
if !strings.HasSuffix(event.Name, "js") {
|
||||
log.Info().Str("file", event.Name).Msg("Ignoring file")
|
||||
continue
|
||||
}
|
||||
switch event.Op {
|
||||
case fsnotify.Create:
|
||||
script, err := misc.ReadScriptFile(event.Name)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
continue
|
||||
}
|
||||
|
||||
index, err := createScript(provider, script.ConfigMap())
|
||||
if err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
continue
|
||||
}
|
||||
|
||||
files[script.Path] = index
|
||||
|
||||
case fsnotify.Write:
|
||||
index := files[event.Name]
|
||||
script, err := misc.ReadScriptFile(event.Name)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
continue
|
||||
}
|
||||
|
||||
err = updateScript(provider, index, script.ConfigMap())
|
||||
if err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
continue
|
||||
}
|
||||
|
||||
case fsnotify.Rename:
|
||||
index := files[event.Name]
|
||||
err := deleteScript(provider, index)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
continue
|
||||
}
|
||||
|
||||
default:
|
||||
// pass
|
||||
}
|
||||
|
||||
case err, ok := <-watcher.Errors:
|
||||
if !ok {
|
||||
log.Info().Msg("Watcher errors channel closed.")
|
||||
return
|
||||
}
|
||||
log.Error().Err(err).Msg("Watcher error encountered")
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
if err := watcher.Add(config.Config.Scripting.Source); err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
}
|
||||
|
||||
for _, source := range config.Config.Scripting.Sources {
|
||||
if err := watcher.Add(source); err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
}
|
||||
}
|
||||
|
||||
log.Info().Str("folder", config.Config.Scripting.Source).Interface("folders", config.Config.Scripting.Sources).Msg("Watching scripts against changes:")
|
||||
|
||||
if block {
|
||||
<-ctx.Done()
|
||||
}
|
||||
}
|
||||
|
||||
func watchConfigMap(ctx context.Context, provider *kubernetes.Provider) {
|
||||
clientset := provider.GetClientSet()
|
||||
configMapName := kubernetes.SELF_RESOURCES_PREFIX + kubernetes.SUFFIX_CONFIG_MAP
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
log.Info().Msg("ConfigMap watcher exiting gracefully.")
|
||||
return
|
||||
|
||||
default:
|
||||
watcher, err := clientset.CoreV1().ConfigMaps(config.Config.Tap.Release.Namespace).Watch(context.TODO(), metav1.ListOptions{
|
||||
FieldSelector: "metadata.name=" + configMapName,
|
||||
})
|
||||
if err != nil {
|
||||
log.Warn().Err(err).Msg("ConfigMap not found, retrying in 5 seconds...")
|
||||
time.Sleep(5 * time.Second)
|
||||
continue
|
||||
}
|
||||
|
||||
for event := range watcher.ResultChan() {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
log.Info().Msg("ConfigMap watcher loop exiting gracefully.")
|
||||
watcher.Stop()
|
||||
return
|
||||
|
||||
default:
|
||||
if event.Type == watch.Added {
|
||||
log.Info().Msg("ConfigMap created or modified")
|
||||
runScriptsSync(provider)
|
||||
} else if event.Type == watch.Deleted {
|
||||
log.Warn().Msg("ConfigMap deleted, waiting for recreation...")
|
||||
watcher.Stop()
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func runScriptsSync(provider *kubernetes.Provider) {
|
||||
files := make(map[string]int64)
|
||||
|
||||
scripts, err := config.Config.Scripting.GetScripts()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
return
|
||||
}
|
||||
|
||||
for _, script := range scripts {
|
||||
index, err := createScript(provider, script.ConfigMap())
|
||||
if err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
continue
|
||||
}
|
||||
files[script.Path] = index
|
||||
}
|
||||
log.Info().Msg("Synchronized scripts with ConfigMap.")
|
||||
}
|
||||
39
cmd/tap.go
39
cmd/tap.go
@@ -2,21 +2,18 @@ package cmd
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/creasty/defaults"
|
||||
"github.com/kubeshark/kubeshark/config"
|
||||
"github.com/kubeshark/kubeshark/config/configStructs"
|
||||
"github.com/kubeshark/kubeshark/errormessage"
|
||||
"github.com/kubeshark/kubeshark/misc"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var tapCmd = &cobra.Command{
|
||||
Use: "tap [POD REGEX]",
|
||||
Short: "Capture the network traffic in your Kubernetes cluster.",
|
||||
Long: "Capture the network traffic in your Kubernetes cluster.",
|
||||
Short: "Capture the network traffic in your Kubernetes cluster",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
tap()
|
||||
return nil
|
||||
@@ -44,17 +41,25 @@ func init() {
|
||||
log.Debug().Err(err).Send()
|
||||
}
|
||||
|
||||
tapCmd.Flags().StringP(configStructs.DockerRegistryLabel, "r", defaultTapConfig.Docker.Registry, "The Docker registry that's hosting the images.")
|
||||
tapCmd.Flags().StringP(configStructs.DockerTagLabel, "t", defaultTapConfig.Docker.Tag, "The tag of the Docker images that are going to be pulled.")
|
||||
tapCmd.Flags().Uint16(configStructs.ProxyFrontPortLabel, defaultTapConfig.Proxy.Front.SrcPort, "Provide a custom port for the front-end proxy/port-forward.")
|
||||
tapCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.SrcPort, "Provide a custom port for the Hub proxy/port-forward.")
|
||||
tapCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the proxy/port-forward.")
|
||||
tapCmd.Flags().StringSliceP(configStructs.NamespacesLabel, "n", defaultTapConfig.Namespaces, "Namespaces selector.")
|
||||
tapCmd.Flags().BoolP(configStructs.AllNamespacesLabel, "A", defaultTapConfig.AllNamespaces, "Tap all namespaces.")
|
||||
tapCmd.Flags().String(configStructs.StorageLimitLabel, defaultTapConfig.StorageLimit, "Override the default storage limit. (per node)")
|
||||
tapCmd.Flags().Bool(configStructs.DryRunLabel, defaultTapConfig.DryRun, "Preview of all pods matching the regex, without tapping them.")
|
||||
tapCmd.Flags().StringP(configStructs.PcapLabel, "p", defaultTapConfig.Pcap, fmt.Sprintf("Capture from a PCAP snapshot of %s (.tar.gz) using your Docker Daemon instead of Kubernetes.", misc.Software))
|
||||
tapCmd.Flags().Bool(configStructs.ServiceMeshLabel, defaultTapConfig.ServiceMesh, "Capture the encrypted traffic if the cluster is configured with a service mesh and with mTLS.")
|
||||
tapCmd.Flags().Bool(configStructs.TlsLabel, defaultTapConfig.Tls, "Capture the traffic that's encrypted with OpenSSL or Go crypto/tls libraries.")
|
||||
tapCmd.Flags().Bool(configStructs.DebugLabel, defaultTapConfig.Debug, "Enable the debug mode.")
|
||||
tapCmd.Flags().StringP(configStructs.DockerRegistryLabel, "r", defaultTapConfig.Docker.Registry, "The Docker registry that's hosting the images")
|
||||
tapCmd.Flags().StringP(configStructs.DockerTagLabel, "t", defaultTapConfig.Docker.Tag, "The tag of the Docker images that are going to be pulled")
|
||||
tapCmd.Flags().String(configStructs.DockerImagePullPolicy, defaultTapConfig.Docker.ImagePullPolicy, "ImagePullPolicy for the Docker images")
|
||||
tapCmd.Flags().StringSlice(configStructs.DockerImagePullSecrets, defaultTapConfig.Docker.ImagePullSecrets, "ImagePullSecrets for the Docker images")
|
||||
tapCmd.Flags().Uint16(configStructs.ProxyFrontPortLabel, defaultTapConfig.Proxy.Front.Port, "Provide a custom port for the proxy/port-forward")
|
||||
tapCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the proxy/port-forward")
|
||||
tapCmd.Flags().StringSliceP(configStructs.NamespacesLabel, "n", defaultTapConfig.Namespaces, "Namespaces selector")
|
||||
tapCmd.Flags().StringSliceP(configStructs.ExcludedNamespacesLabel, "e", defaultTapConfig.ExcludedNamespaces, "Excluded namespaces")
|
||||
tapCmd.Flags().StringP(configStructs.ReleaseNamespaceLabel, "s", defaultTapConfig.Release.Namespace, "Release namespace of Kubeshark")
|
||||
tapCmd.Flags().Bool(configStructs.PersistentStorageLabel, defaultTapConfig.PersistentStorage, "Enable persistent storage (PersistentVolumeClaim)")
|
||||
tapCmd.Flags().Bool(configStructs.PersistentStorageStaticLabel, defaultTapConfig.PersistentStorageStatic, "Persistent storage static provision")
|
||||
tapCmd.Flags().String(configStructs.EfsFileSytemIdAndPathLabel, defaultTapConfig.EfsFileSytemIdAndPath, "EFS file system ID")
|
||||
tapCmd.Flags().String(configStructs.StorageLimitLabel, defaultTapConfig.StorageLimit, "Override the default storage limit (per node)")
|
||||
tapCmd.Flags().String(configStructs.StorageClassLabel, defaultTapConfig.StorageClass, "Override the default storage class of the PersistentVolumeClaim (per node)")
|
||||
tapCmd.Flags().Bool(configStructs.DryRunLabel, defaultTapConfig.DryRun, "Preview of all pods matching the regex, without tapping them")
|
||||
tapCmd.Flags().Bool(configStructs.ServiceMeshLabel, defaultTapConfig.ServiceMesh, "Capture the encrypted traffic if the cluster is configured with a service mesh and with mTLS")
|
||||
tapCmd.Flags().Bool(configStructs.TlsLabel, defaultTapConfig.Tls, "Capture the traffic that's encrypted with OpenSSL or Go crypto/tls libraries")
|
||||
tapCmd.Flags().Bool(configStructs.IgnoreTaintedLabel, defaultTapConfig.IgnoreTainted, "Ignore tainted pods while running Worker DaemonSet")
|
||||
tapCmd.Flags().Bool(configStructs.IngressEnabledLabel, defaultTapConfig.Ingress.Enabled, "Enable Ingress")
|
||||
tapCmd.Flags().Bool(configStructs.TelemetryEnabledLabel, defaultTapConfig.Telemetry.Enabled, "Enable/disable Telemetry")
|
||||
tapCmd.Flags().Bool(configStructs.ResourceGuardEnabledLabel, defaultTapConfig.ResourceGuard.Enabled, "Enable/disable resource guard")
|
||||
}
|
||||
|
||||
@@ -1,353 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/kubeshark/kubeshark/config"
|
||||
"github.com/kubeshark/kubeshark/docker"
|
||||
"github.com/kubeshark/kubeshark/internal/connect"
|
||||
"github.com/kubeshark/kubeshark/kubernetes"
|
||||
"github.com/kubeshark/kubeshark/misc"
|
||||
"github.com/kubeshark/kubeshark/utils"
|
||||
"github.com/rs/zerolog/log"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func logPullingImage(image string, reader io.ReadCloser) {
|
||||
scanner := bufio.NewScanner(reader)
|
||||
for scanner.Scan() {
|
||||
text := scanner.Text()
|
||||
var data map[string]interface{}
|
||||
if err := json.Unmarshal([]byte(text), &data); err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
continue
|
||||
}
|
||||
|
||||
var id string
|
||||
if val, ok := data["id"]; ok {
|
||||
id = val.(string)
|
||||
}
|
||||
|
||||
var status string
|
||||
if val, ok := data["status"]; ok {
|
||||
status = val.(string)
|
||||
}
|
||||
|
||||
var progress string
|
||||
if val, ok := data["progress"]; ok {
|
||||
progress = val.(string)
|
||||
}
|
||||
|
||||
e := log.Info()
|
||||
if image != "" {
|
||||
e = e.Str("image", image)
|
||||
}
|
||||
|
||||
if progress != "" {
|
||||
e = e.Str("progress", progress)
|
||||
}
|
||||
|
||||
e.Msg(fmt.Sprintf("[%-12s] %-18s", id, status))
|
||||
}
|
||||
}
|
||||
|
||||
func pullImages(ctx context.Context, cli *client.Client, imageFront string, imageHub string, imageWorker string) error {
|
||||
readerFront, err := cli.ImagePull(ctx, imageFront, types.ImagePullOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer readerFront.Close()
|
||||
logPullingImage(imageFront, readerFront)
|
||||
|
||||
readerHub, err := cli.ImagePull(ctx, imageHub, types.ImagePullOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer readerHub.Close()
|
||||
logPullingImage(imageHub, readerHub)
|
||||
|
||||
readerWorker, err := cli.ImagePull(ctx, imageWorker, types.ImagePullOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer readerWorker.Close()
|
||||
logPullingImage(imageWorker, readerWorker)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func cleanUpOldContainers(
|
||||
ctx context.Context,
|
||||
cli *client.Client,
|
||||
nameFront string,
|
||||
nameHub string,
|
||||
nameWorker string,
|
||||
) error {
|
||||
containers, err := cli.ContainerList(ctx, types.ContainerListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, container := range containers {
|
||||
f := fmt.Sprintf("/%s", nameFront)
|
||||
h := fmt.Sprintf("/%s", nameHub)
|
||||
w := fmt.Sprintf("/%s", nameWorker)
|
||||
if utils.Contains(container.Names, f) || utils.Contains(container.Names, h) || utils.Contains(container.Names, w) {
|
||||
err = cli.ContainerRemove(ctx, container.ID, types.ContainerRemoveOptions{Force: true})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func createAndStartContainers(
|
||||
ctx context.Context,
|
||||
cli *client.Client,
|
||||
imageFront string,
|
||||
imageHub string,
|
||||
imageWorker string,
|
||||
tarReader io.Reader,
|
||||
) (
|
||||
respFront container.ContainerCreateCreatedBody,
|
||||
respHub container.ContainerCreateCreatedBody,
|
||||
respWorker container.ContainerCreateCreatedBody,
|
||||
workerIPAddr string,
|
||||
err error,
|
||||
) {
|
||||
log.Info().Msg("Creating containers...")
|
||||
|
||||
nameFront := fmt.Sprintf("%s-front", misc.Program)
|
||||
nameHub := fmt.Sprintf("%s-hub", misc.Program)
|
||||
nameWorker := fmt.Sprintf("%s-worker", misc.Program)
|
||||
|
||||
err = cleanUpOldContainers(ctx, cli, nameFront, nameHub, nameWorker)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
hostIP := "0.0.0.0"
|
||||
|
||||
hostConfigFront := &container.HostConfig{
|
||||
PortBindings: nat.PortMap{
|
||||
nat.Port(fmt.Sprintf("%d/tcp", config.Config.Tap.Proxy.Front.DstPort)): []nat.PortBinding{
|
||||
{
|
||||
HostIP: hostIP,
|
||||
HostPort: fmt.Sprintf("%d", config.Config.Tap.Proxy.Front.SrcPort),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
respFront, err = cli.ContainerCreate(ctx, &container.Config{
|
||||
Image: imageFront,
|
||||
Tty: false,
|
||||
Env: []string{
|
||||
"REACT_APP_DEFAULT_FILTER= ",
|
||||
"REACT_APP_HUB_HOST= ",
|
||||
fmt.Sprintf("REACT_APP_HUB_PORT=%d", config.Config.Tap.Proxy.Hub.SrcPort),
|
||||
},
|
||||
}, hostConfigFront, nil, nil, nameFront)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
hostConfigHub := &container.HostConfig{
|
||||
PortBindings: nat.PortMap{
|
||||
nat.Port(fmt.Sprintf("%d/tcp", config.Config.Tap.Proxy.Hub.DstPort)): []nat.PortBinding{
|
||||
{
|
||||
HostIP: hostIP,
|
||||
HostPort: fmt.Sprintf("%d", config.Config.Tap.Proxy.Hub.SrcPort),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
cmdHub := []string{"-port", fmt.Sprintf("%d", config.Config.Tap.Proxy.Hub.DstPort)}
|
||||
if config.DebugMode {
|
||||
cmdHub = append(cmdHub, fmt.Sprintf("-%s", config.DebugFlag))
|
||||
}
|
||||
|
||||
respHub, err = cli.ContainerCreate(ctx, &container.Config{
|
||||
Image: imageHub,
|
||||
Cmd: cmdHub,
|
||||
Tty: false,
|
||||
ExposedPorts: nat.PortSet{nat.Port(fmt.Sprintf("%d/tcp", config.Config.Tap.Proxy.Hub.DstPort)): {}},
|
||||
}, hostConfigHub, nil, nil, nameHub)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
cmdWorker := []string{"-f", "./import", "-port", fmt.Sprintf("%d", config.Config.Tap.Proxy.Worker.DstPort)}
|
||||
if config.DebugMode {
|
||||
cmdWorker = append(cmdWorker, fmt.Sprintf("-%s", config.DebugFlag))
|
||||
}
|
||||
|
||||
respWorker, err = cli.ContainerCreate(ctx, &container.Config{
|
||||
Image: imageWorker,
|
||||
Cmd: cmdWorker,
|
||||
Tty: false,
|
||||
}, nil, nil, nil, nameWorker)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if err = cli.CopyToContainer(ctx, respWorker.ID, "/app/import", tarReader, types.CopyToContainerOptions{}); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
log.Info().Msg("Starting containers...")
|
||||
|
||||
if err = cli.ContainerStart(ctx, respFront.ID, types.ContainerStartOptions{}); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if err = cli.ContainerStart(ctx, respHub.ID, types.ContainerStartOptions{}); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if err = cli.ContainerStart(ctx, respWorker.ID, types.ContainerStartOptions{}); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var containerWorker types.ContainerJSON
|
||||
containerWorker, err = cli.ContainerInspect(ctx, respWorker.ID)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
workerIPAddr = containerWorker.NetworkSettings.IPAddress
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func stopAndRemoveContainers(
|
||||
ctx context.Context,
|
||||
cli *client.Client,
|
||||
respFront container.ContainerCreateCreatedBody,
|
||||
respHub container.ContainerCreateCreatedBody,
|
||||
respWorker container.ContainerCreateCreatedBody,
|
||||
) (err error) {
|
||||
log.Warn().Msg("Stopping containers...")
|
||||
err = cli.ContainerStop(ctx, respFront.ID, nil)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = cli.ContainerStop(ctx, respHub.ID, nil)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = cli.ContainerStop(ctx, respWorker.ID, nil)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
log.Warn().Msg("Removing containers...")
|
||||
err = cli.ContainerRemove(ctx, respFront.ID, types.ContainerRemoveOptions{})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = cli.ContainerRemove(ctx, respHub.ID, types.ContainerRemoveOptions{})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = cli.ContainerRemove(ctx, respWorker.ID, types.ContainerRemoveOptions{})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func pcap(tarPath string) {
|
||||
docker.SetRegistry(config.Config.Tap.Docker.Registry)
|
||||
docker.SetTag(config.Config.Tap.Docker.Tag)
|
||||
|
||||
ctx := context.Background()
|
||||
cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
|
||||
if err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
return
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
imageFront := docker.GetFrontImage()
|
||||
imageHub := docker.GetHubImage()
|
||||
imageWorker := docker.GetWorkerImage()
|
||||
|
||||
err = pullImages(ctx, cli, imageFront, imageHub, imageWorker)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
return
|
||||
}
|
||||
|
||||
tarFile, err := os.Open(tarPath)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
return
|
||||
}
|
||||
defer tarFile.Close()
|
||||
tarReader := bufio.NewReader(tarFile)
|
||||
|
||||
respFront, respHub, respWorker, workerIPAddr, err := createAndStartContainers(
|
||||
ctx,
|
||||
cli,
|
||||
imageFront,
|
||||
imageHub,
|
||||
imageWorker,
|
||||
tarReader,
|
||||
)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
return
|
||||
}
|
||||
|
||||
workerPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: "docker",
|
||||
},
|
||||
Status: v1.PodStatus{
|
||||
PodIP: workerIPAddr,
|
||||
Phase: v1.PodRunning,
|
||||
ContainerStatuses: []v1.ContainerStatus{
|
||||
{
|
||||
Ready: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
connector = connect.NewConnector(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort), connect.DefaultRetries, connect.DefaultTimeout)
|
||||
connector.PostWorkerPodToHub(workerPod)
|
||||
|
||||
log.Info().
|
||||
Str("url", kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort)).
|
||||
Msg(fmt.Sprintf(utils.Green, "Hub is available at:"))
|
||||
|
||||
url := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Front.SrcPort)
|
||||
log.Info().Str("url", url).Msg(fmt.Sprintf(utils.Green, fmt.Sprintf("%s is available at:", misc.Software)))
|
||||
|
||||
if !config.Config.HeadlessMode {
|
||||
utils.OpenBrowser(url)
|
||||
}
|
||||
|
||||
ctxC, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
utils.WaitForTermination(ctxC, cancel)
|
||||
|
||||
err = stopAndRemoveContainers(ctx, cli, respFront, respHub, respWorker)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
}
|
||||
}
|
||||
316
cmd/tapRunner.go
316
cmd/tapRunner.go
@@ -2,20 +2,19 @@ package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/kubeshark/kubeshark/docker"
|
||||
"github.com/kubeshark/kubeshark/internal/connect"
|
||||
"github.com/kubeshark/kubeshark/kubernetes/helm"
|
||||
"github.com/kubeshark/kubeshark/misc"
|
||||
"github.com/kubeshark/kubeshark/resources"
|
||||
"github.com/kubeshark/kubeshark/utils"
|
||||
|
||||
core "k8s.io/api/core/v1"
|
||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/kubeshark/kubeshark/config"
|
||||
"github.com/kubeshark/kubeshark/config/configStructs"
|
||||
@@ -27,53 +26,49 @@ import (
|
||||
const cleanupTimeout = time.Minute
|
||||
|
||||
type tapState struct {
|
||||
startTime time.Time
|
||||
targetNamespaces []string
|
||||
selfServiceAccountExists bool
|
||||
startTime time.Time
|
||||
targetNamespaces []string
|
||||
}
|
||||
|
||||
var state tapState
|
||||
var connector *connect.Connector
|
||||
var hubPodReady bool
|
||||
var frontPodReady bool
|
||||
var proxyDone bool
|
||||
|
||||
type Readiness struct {
|
||||
Hub bool
|
||||
Front bool
|
||||
Proxy bool
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
var ready *Readiness
|
||||
|
||||
func tap() {
|
||||
ready = &Readiness{}
|
||||
state.startTime = time.Now()
|
||||
docker.SetRegistry(config.Config.Tap.Docker.Registry)
|
||||
docker.SetTag(config.Config.Tap.Docker.Tag)
|
||||
log.Info().Str("registry", docker.GetRegistry()).Str("tag", docker.GetTag()).Msg("Using Docker:")
|
||||
if config.Config.Tap.Pcap != "" {
|
||||
pcap(config.Config.Tap.Pcap)
|
||||
return
|
||||
}
|
||||
log.Info().Str("registry", config.Config.Tap.Docker.Registry).Str("tag", config.Config.Tap.Docker.Tag).Msg("Using Docker:")
|
||||
|
||||
log.Info().
|
||||
Str("limit", config.Config.Tap.StorageLimit).
|
||||
Msg(fmt.Sprintf("%s will store the traffic up to a limit (per node). Oldest TCP streams will be removed once the limit is reached.", misc.Software))
|
||||
Msg(fmt.Sprintf("%s will store the traffic up to a limit (per node). Oldest TCP/UDP streams will be removed once the limit is reached.", misc.Software))
|
||||
|
||||
connector = connect.NewConnector(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort), connect.DefaultRetries, connect.DefaultTimeout)
|
||||
|
||||
kubernetesProvider, err := getKubernetesProviderForCli()
|
||||
kubernetesProvider, err := getKubernetesProviderForCli(false, false)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
return
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel() // cancel will be called when this function exits
|
||||
|
||||
state.targetNamespaces = getNamespaces(kubernetesProvider)
|
||||
state.targetNamespaces = kubernetesProvider.GetNamespaces()
|
||||
|
||||
if config.Config.IsNsRestrictedMode() {
|
||||
if len(state.targetNamespaces) != 1 || !utils.Contains(state.targetNamespaces, config.Config.SelfNamespace) {
|
||||
log.Error().Msg(fmt.Sprintf("%s can't resolve IPs in other namespaces when running in namespace restricted mode. You can use the same namespace for --%s and --%s", misc.Software, configStructs.NamespacesLabel, config.SelfNamespaceConfigName))
|
||||
return
|
||||
}
|
||||
}
|
||||
log.Info().
|
||||
Bool("enabled", config.Config.Tap.Telemetry.Enabled).
|
||||
Str("notice", "Telemetry can be disabled by setting the flag: --telemetry-enabled=false").
|
||||
Msg("Telemetry")
|
||||
|
||||
log.Info().Strs("namespaces", state.targetNamespaces).Msg("Targetting pods in:")
|
||||
log.Info().Strs("namespaces", state.targetNamespaces).Msg("Targeting pods in:")
|
||||
|
||||
if err := printTargettedPodsPreview(ctx, kubernetesProvider, state.targetNamespaces); err != nil {
|
||||
if err := printTargetedPodsPreview(ctx, kubernetesProvider, state.targetNamespaces); err != nil {
|
||||
log.Error().Err(errormessage.FormatError(err)).Msg("Error listing pods!")
|
||||
}
|
||||
|
||||
@@ -82,30 +77,47 @@ func tap() {
|
||||
}
|
||||
|
||||
log.Info().Msg(fmt.Sprintf("Waiting for the creation of %s resources...", misc.Software))
|
||||
if state.selfServiceAccountExists, err = resources.CreateHubResources(ctx, kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.SelfNamespace, config.Config.Tap.Resources.Hub, config.Config.ImagePullPolicy(), config.Config.Tap.Debug); err != nil {
|
||||
var statusError *k8serrors.StatusError
|
||||
if errors.As(err, &statusError) && (statusError.ErrStatus.Reason == metav1.StatusReasonAlreadyExists) {
|
||||
log.Warn().Msg(fmt.Sprintf("%s is already running in this namespace, change the `selfnamespace` configuration or run `%s clean` to remove the currently running %s instance", misc.Software, misc.Program, misc.Software))
|
||||
} else {
|
||||
defer resources.CleanUpSelfResources(ctx, cancel, kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.SelfNamespace)
|
||||
log.Error().Err(errormessage.FormatError(err)).Msg("Error creating resources!")
|
||||
}
|
||||
|
||||
return
|
||||
rel, err := helm.NewHelm(
|
||||
config.Config.Tap.Release.Repo,
|
||||
config.Config.Tap.Release.Name,
|
||||
config.Config.Tap.Release.Namespace,
|
||||
).Install()
|
||||
if err != nil {
|
||||
if err.Error() != "cannot re-use a name that is still in use" {
|
||||
log.Error().Err(err).Send()
|
||||
os.Exit(1)
|
||||
}
|
||||
log.Info().Msg("Found an existing installation, skipping Helm install...")
|
||||
|
||||
updateConfig(kubernetesProvider)
|
||||
postFrontStarted(ctx, kubernetesProvider, cancel)
|
||||
} else {
|
||||
log.Info().Msgf("Installed the Helm release: %s", rel.Name)
|
||||
|
||||
go watchHubEvents(ctx, kubernetesProvider, cancel)
|
||||
go watchHubPod(ctx, kubernetesProvider, cancel)
|
||||
go watchFrontPod(ctx, kubernetesProvider, cancel)
|
||||
}
|
||||
|
||||
defer finishTapExecution(kubernetesProvider)
|
||||
|
||||
go watchHubEvents(ctx, kubernetesProvider, cancel)
|
||||
go watchHubPod(ctx, kubernetesProvider, cancel)
|
||||
go watchFrontPod(ctx, kubernetesProvider, cancel)
|
||||
|
||||
// block until exit signal or error
|
||||
utils.WaitForTermination(ctx, cancel)
|
||||
|
||||
if !config.Config.Tap.Ingress.Enabled {
|
||||
printProxyCommandSuggestion()
|
||||
}
|
||||
}
|
||||
|
||||
func printProxyCommandSuggestion() {
|
||||
log.Warn().
|
||||
Str("command", fmt.Sprintf("%s proxy", misc.Program)).
|
||||
Msg(fmt.Sprintf(utils.Yellow, "To re-establish a proxy/port-forward, run:"))
|
||||
}
|
||||
|
||||
func finishTapExecution(kubernetesProvider *kubernetes.Provider) {
|
||||
finishSelfExecution(kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.SelfNamespace)
|
||||
finishSelfExecution(kubernetesProvider)
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -113,69 +125,20 @@ This function is a bit problematic as it might be detached from the actual pods
|
||||
The alternative would be to wait for Hub to be ready and then query it for the pods it listens to, this has
|
||||
the arguably worse drawback of taking a relatively very long time before the user sees which pods are targeted, if any.
|
||||
*/
|
||||
func printTargettedPodsPreview(ctx context.Context, kubernetesProvider *kubernetes.Provider, namespaces []string) error {
|
||||
func printTargetedPodsPreview(ctx context.Context, kubernetesProvider *kubernetes.Provider, namespaces []string) error {
|
||||
if matchingPods, err := kubernetesProvider.ListAllRunningPodsMatchingRegex(ctx, config.Config.Tap.PodRegex(), namespaces); err != nil {
|
||||
return err
|
||||
} else {
|
||||
if len(matchingPods) == 0 {
|
||||
printNoPodsFoundSuggestion(namespaces)
|
||||
}
|
||||
for _, targettedPod := range matchingPods {
|
||||
log.Info().Msg(fmt.Sprintf("New pod: %s", fmt.Sprintf(utils.Green, targettedPod.Name)))
|
||||
for _, targetedPod := range matchingPods {
|
||||
log.Info().Msg(fmt.Sprintf("Targeted pod: %s", fmt.Sprintf(utils.Green, targetedPod.Name)))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func startWorkerSyncer(ctx context.Context, cancel context.CancelFunc, provider *kubernetes.Provider, targetNamespaces []string, startTime time.Time) error {
|
||||
workerSyncer, err := kubernetes.CreateAndStartWorkerSyncer(ctx, provider, kubernetes.WorkerSyncerConfig{
|
||||
TargetNamespaces: targetNamespaces,
|
||||
PodFilterRegex: *config.Config.Tap.PodRegex(),
|
||||
SelfNamespace: config.Config.SelfNamespace,
|
||||
WorkerResources: config.Config.Tap.Resources.Worker,
|
||||
ImagePullPolicy: config.Config.ImagePullPolicy(),
|
||||
SelfServiceAccountExists: state.selfServiceAccountExists,
|
||||
ServiceMesh: config.Config.Tap.ServiceMesh,
|
||||
Tls: config.Config.Tap.Tls,
|
||||
Debug: config.Config.Tap.Debug,
|
||||
}, startTime)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case syncerErr, ok := <-workerSyncer.ErrorOut:
|
||||
if !ok {
|
||||
log.Debug().Msg("workerSyncer err channel closed, ending listener loop")
|
||||
return
|
||||
}
|
||||
log.Error().Msg(getK8sTapManagerErrorText(syncerErr))
|
||||
cancel()
|
||||
case _, ok := <-workerSyncer.TapPodChangesOut:
|
||||
if !ok {
|
||||
log.Debug().Msg("workerSyncer pod changes channel closed, ending listener loop")
|
||||
return
|
||||
}
|
||||
go connector.PostTargettedPodsToHub(workerSyncer.CurrentlyTargettedPods)
|
||||
case pod, ok := <-workerSyncer.WorkerPodsChanges:
|
||||
if !ok {
|
||||
log.Debug().Msg("workerSyncer worker status changed channel closed, ending listener loop")
|
||||
return
|
||||
}
|
||||
go connector.PostWorkerPodToHub(pod)
|
||||
case <-ctx.Done():
|
||||
log.Debug().Msg("workerSyncer event listener loop exiting due to context done")
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func printNoPodsFoundSuggestion(targetNamespaces []string) {
|
||||
var suggestionStr string
|
||||
if !utils.Contains(targetNamespaces, kubernetes.K8sAllNamespaces) {
|
||||
@@ -184,23 +147,10 @@ func printNoPodsFoundSuggestion(targetNamespaces []string) {
|
||||
log.Warn().Msg(fmt.Sprintf("Did not find any currently running pods that match the regex argument, %s will automatically target matching pods if any are created later%s", misc.Software, suggestionStr))
|
||||
}
|
||||
|
||||
func getK8sTapManagerErrorText(err kubernetes.K8sTapManagerError) string {
|
||||
switch err.TapManagerReason {
|
||||
case kubernetes.TapManagerPodListError:
|
||||
return fmt.Sprintf("Failed to update currently targetted pods: %v", err.OriginalError)
|
||||
case kubernetes.TapManagerPodWatchError:
|
||||
return fmt.Sprintf("Error occured in K8s pod watch: %v", err.OriginalError)
|
||||
case kubernetes.TapManagerWorkerUpdateError:
|
||||
return fmt.Sprintf("Error updating worker: %v", err.OriginalError)
|
||||
default:
|
||||
return fmt.Sprintf("Unknown error occured in K8s tap manager: %v", err.OriginalError)
|
||||
}
|
||||
}
|
||||
|
||||
func watchHubPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc) {
|
||||
podExactRegex := regexp.MustCompile(fmt.Sprintf("^%s$", kubernetes.HubPodName))
|
||||
podExactRegex := regexp.MustCompile(fmt.Sprintf("^%s", kubernetes.HubPodName))
|
||||
podWatchHelper := kubernetes.NewPodWatchHelper(kubernetesProvider, podExactRegex)
|
||||
eventChan, errorChan := kubernetes.FilteredWatch(ctx, podWatchHelper, []string{config.Config.SelfNamespace}, podWatchHelper)
|
||||
eventChan, errorChan := kubernetes.FilteredWatch(ctx, podWatchHelper, []string{config.Config.Tap.Release.Namespace}, podWatchHelper)
|
||||
isPodReady := false
|
||||
|
||||
timeAfter := time.After(120 * time.Second)
|
||||
@@ -214,9 +164,9 @@ func watchHubPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, c
|
||||
|
||||
switch wEvent.Type {
|
||||
case kubernetes.EventAdded:
|
||||
log.Info().Str("pod", kubernetes.HubPodName).Msg("Added pod.")
|
||||
log.Info().Str("pod", kubernetes.HubPodName).Msg("Added:")
|
||||
case kubernetes.EventDeleted:
|
||||
log.Info().Str("pod", kubernetes.HubPodName).Msg("Removed pod.")
|
||||
log.Info().Str("pod", kubernetes.HubPodName).Msg("Removed:")
|
||||
cancel()
|
||||
return
|
||||
case kubernetes.EventModified:
|
||||
@@ -235,12 +185,23 @@ func watchHubPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, c
|
||||
|
||||
if modifiedPod.Status.Phase == core.PodRunning && !isPodReady {
|
||||
isPodReady = true
|
||||
hubPodReady = true
|
||||
postHubStarted(ctx, kubernetesProvider, cancel)
|
||||
|
||||
ready.Lock()
|
||||
ready.Hub = true
|
||||
ready.Unlock()
|
||||
log.Info().Str("pod", kubernetes.HubPodName).Msg("Ready.")
|
||||
}
|
||||
|
||||
ready.Lock()
|
||||
proxyDone := ready.Proxy
|
||||
hubPodReady := ready.Hub
|
||||
frontPodReady := ready.Front
|
||||
ready.Unlock()
|
||||
|
||||
if !proxyDone && hubPodReady && frontPodReady {
|
||||
proxyDone = true
|
||||
ready.Lock()
|
||||
ready.Proxy = true
|
||||
ready.Unlock()
|
||||
postFrontStarted(ctx, kubernetesProvider, cancel)
|
||||
}
|
||||
case kubernetes.EventBookmark:
|
||||
@@ -256,7 +217,7 @@ func watchHubPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, c
|
||||
|
||||
log.Error().
|
||||
Str("pod", kubernetes.HubPodName).
|
||||
Str("namespace", config.Config.SelfNamespace).
|
||||
Str("namespace", config.Config.Tap.Release.Namespace).
|
||||
Err(err).
|
||||
Msg("Failed creating pod.")
|
||||
cancel()
|
||||
@@ -278,9 +239,9 @@ func watchHubPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, c
|
||||
}
|
||||
|
||||
func watchFrontPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc) {
|
||||
podExactRegex := regexp.MustCompile(fmt.Sprintf("^%s$", kubernetes.FrontPodName))
|
||||
podExactRegex := regexp.MustCompile(fmt.Sprintf("^%s", kubernetes.FrontPodName))
|
||||
podWatchHelper := kubernetes.NewPodWatchHelper(kubernetesProvider, podExactRegex)
|
||||
eventChan, errorChan := kubernetes.FilteredWatch(ctx, podWatchHelper, []string{config.Config.SelfNamespace}, podWatchHelper)
|
||||
eventChan, errorChan := kubernetes.FilteredWatch(ctx, podWatchHelper, []string{config.Config.Tap.Release.Namespace}, podWatchHelper)
|
||||
isPodReady := false
|
||||
|
||||
timeAfter := time.After(120 * time.Second)
|
||||
@@ -294,9 +255,9 @@ func watchFrontPod(ctx context.Context, kubernetesProvider *kubernetes.Provider,
|
||||
|
||||
switch wEvent.Type {
|
||||
case kubernetes.EventAdded:
|
||||
log.Info().Str("pod", kubernetes.FrontPodName).Msg("Added pod.")
|
||||
log.Info().Str("pod", kubernetes.FrontPodName).Msg("Added:")
|
||||
case kubernetes.EventDeleted:
|
||||
log.Info().Str("pod", kubernetes.FrontPodName).Msg("Removed pod.")
|
||||
log.Info().Str("pod", kubernetes.FrontPodName).Msg("Removed:")
|
||||
cancel()
|
||||
return
|
||||
case kubernetes.EventModified:
|
||||
@@ -315,11 +276,22 @@ func watchFrontPod(ctx context.Context, kubernetesProvider *kubernetes.Provider,
|
||||
|
||||
if modifiedPod.Status.Phase == core.PodRunning && !isPodReady {
|
||||
isPodReady = true
|
||||
frontPodReady = true
|
||||
ready.Lock()
|
||||
ready.Front = true
|
||||
ready.Unlock()
|
||||
log.Info().Str("pod", kubernetes.FrontPodName).Msg("Ready.")
|
||||
}
|
||||
|
||||
ready.Lock()
|
||||
proxyDone := ready.Proxy
|
||||
hubPodReady := ready.Hub
|
||||
frontPodReady := ready.Front
|
||||
ready.Unlock()
|
||||
|
||||
if !proxyDone && hubPodReady && frontPodReady {
|
||||
proxyDone = true
|
||||
ready.Lock()
|
||||
ready.Proxy = true
|
||||
ready.Unlock()
|
||||
postFrontStarted(ctx, kubernetesProvider, cancel)
|
||||
}
|
||||
case kubernetes.EventBookmark:
|
||||
@@ -335,10 +307,9 @@ func watchFrontPod(ctx context.Context, kubernetesProvider *kubernetes.Provider,
|
||||
|
||||
log.Error().
|
||||
Str("pod", kubernetes.FrontPodName).
|
||||
Str("namespace", config.Config.SelfNamespace).
|
||||
Str("namespace", config.Config.Tap.Release.Namespace).
|
||||
Err(err).
|
||||
Msg("Failed creating pod.")
|
||||
cancel()
|
||||
|
||||
case <-timeAfter:
|
||||
if !isPodReady {
|
||||
@@ -359,7 +330,7 @@ func watchFrontPod(ctx context.Context, kubernetesProvider *kubernetes.Provider,
|
||||
func watchHubEvents(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc) {
|
||||
podExactRegex := regexp.MustCompile(fmt.Sprintf("^%s", kubernetes.HubPodName))
|
||||
eventWatchHelper := kubernetes.NewEventWatchHelper(kubernetesProvider, podExactRegex, "pod")
|
||||
eventChan, errorChan := kubernetes.FilteredWatch(ctx, eventWatchHelper, []string{config.Config.SelfNamespace}, eventWatchHelper)
|
||||
eventChan, errorChan := kubernetes.FilteredWatch(ctx, eventWatchHelper, []string{config.Config.Tap.Release.Namespace}, eventWatchHelper)
|
||||
for {
|
||||
select {
|
||||
case wEvent, ok := <-eventChan:
|
||||
@@ -425,39 +396,74 @@ func watchHubEvents(ctx context.Context, kubernetesProvider *kubernetes.Provider
|
||||
}
|
||||
}
|
||||
|
||||
func postHubStarted(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc) {
|
||||
startProxyReportErrorIfAny(kubernetesProvider, ctx, cancel, kubernetes.HubServiceName, configStructs.ProxyFrontPortLabel, config.Config.Tap.Proxy.Hub.SrcPort, config.Config.Tap.Proxy.Hub.DstPort, "/echo")
|
||||
|
||||
if err := startWorkerSyncer(ctx, cancel, kubernetesProvider, state.targetNamespaces, state.startTime); err != nil {
|
||||
log.Error().Err(errormessage.FormatError(err)).Msg("Error starting worker syncer")
|
||||
cancel()
|
||||
}
|
||||
|
||||
url := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort)
|
||||
log.Info().Str("url", url).Msg(fmt.Sprintf(utils.Green, "Hub is available at:"))
|
||||
}
|
||||
|
||||
func postFrontStarted(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc) {
|
||||
startProxyReportErrorIfAny(kubernetesProvider, ctx, cancel, kubernetes.FrontServiceName, configStructs.ProxyHubPortLabel, config.Config.Tap.Proxy.Front.SrcPort, config.Config.Tap.Proxy.Front.DstPort, "")
|
||||
startProxyReportErrorIfAny(
|
||||
kubernetesProvider,
|
||||
ctx,
|
||||
kubernetes.FrontServiceName,
|
||||
kubernetes.FrontPodName,
|
||||
configStructs.ProxyFrontPortLabel,
|
||||
config.Config.Tap.Proxy.Front.Port,
|
||||
configStructs.ContainerPort,
|
||||
"",
|
||||
)
|
||||
|
||||
url := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Front.SrcPort)
|
||||
var url string
|
||||
if config.Config.Tap.Ingress.Enabled {
|
||||
url = fmt.Sprintf("http://%s", config.Config.Tap.Ingress.Host)
|
||||
} else {
|
||||
url = kubernetes.GetProxyOnPort(config.Config.Tap.Proxy.Front.Port)
|
||||
}
|
||||
log.Info().Str("url", url).Msg(fmt.Sprintf(utils.Green, fmt.Sprintf("%s is available at:", misc.Software)))
|
||||
|
||||
if !config.Config.HeadlessMode {
|
||||
utils.OpenBrowser(url)
|
||||
}
|
||||
}
|
||||
|
||||
func getNamespaces(kubernetesProvider *kubernetes.Provider) []string {
|
||||
if config.Config.Tap.AllNamespaces {
|
||||
return []string{kubernetes.K8sAllNamespaces}
|
||||
} else if len(config.Config.Tap.Namespaces) > 0 {
|
||||
return utils.Unique(config.Config.Tap.Namespaces)
|
||||
} else {
|
||||
currentNamespace, err := kubernetesProvider.CurrentNamespace()
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("Error getting current namespace!")
|
||||
}
|
||||
return []string{currentNamespace}
|
||||
for !ready.Hub {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
|
||||
|
||||
if (config.Config.Scripting.Source != "" || len(config.Config.Scripting.Sources) > 0) && config.Config.Scripting.WatchScripts {
|
||||
watchScripts(ctx, kubernetesProvider, false)
|
||||
}
|
||||
|
||||
if config.Config.Scripting.Console {
|
||||
go runConsoleWithoutProxy()
|
||||
}
|
||||
}
|
||||
|
||||
func updateConfig(kubernetesProvider *kubernetes.Provider) {
|
||||
_, _ = kubernetes.SetSecret(kubernetesProvider, kubernetes.SECRET_LICENSE, config.Config.License)
|
||||
_, _ = kubernetes.SetConfig(kubernetesProvider, kubernetes.CONFIG_POD_REGEX, config.Config.Tap.PodRegexStr)
|
||||
_, _ = kubernetes.SetConfig(kubernetesProvider, kubernetes.CONFIG_NAMESPACES, strings.Join(config.Config.Tap.Namespaces, ","))
|
||||
_, _ = kubernetes.SetConfig(kubernetesProvider, kubernetes.CONFIG_EXCLUDED_NAMESPACES, strings.Join(config.Config.Tap.ExcludedNamespaces, ","))
|
||||
|
||||
data, err := json.Marshal(config.Config.Scripting.Env)
|
||||
if err != nil {
|
||||
log.Error().Str("config", kubernetes.CONFIG_SCRIPTING_ENV).Err(err).Send()
|
||||
return
|
||||
} else {
|
||||
_, _ = kubernetes.SetConfig(kubernetesProvider, kubernetes.CONFIG_SCRIPTING_ENV, string(data))
|
||||
}
|
||||
|
||||
ingressEnabled := ""
|
||||
if config.Config.Tap.Ingress.Enabled {
|
||||
ingressEnabled = "true"
|
||||
}
|
||||
|
||||
authEnabled := ""
|
||||
if config.Config.Tap.Auth.Enabled {
|
||||
authEnabled = "true"
|
||||
}
|
||||
|
||||
_, _ = kubernetes.SetConfig(kubernetesProvider, kubernetes.CONFIG_INGRESS_ENABLED, ingressEnabled)
|
||||
_, _ = kubernetes.SetConfig(kubernetesProvider, kubernetes.CONFIG_INGRESS_HOST, config.Config.Tap.Ingress.Host)
|
||||
|
||||
_, _ = kubernetes.SetConfig(kubernetesProvider, kubernetes.CONFIG_PROXY_FRONT_PORT, fmt.Sprint(config.Config.Tap.Proxy.Front.Port))
|
||||
|
||||
_, _ = kubernetes.SetConfig(kubernetesProvider, kubernetes.CONFIG_AUTH_ENABLED, authEnabled)
|
||||
_, _ = kubernetes.SetConfig(kubernetesProvider, kubernetes.CONFIG_AUTH_TYPE, config.Config.Tap.Auth.Type)
|
||||
_, _ = kubernetes.SetConfig(kubernetesProvider, kubernetes.CONFIG_AUTH_SAML_IDP_METADATA_URL, config.Config.Tap.Auth.Saml.IdpMetadataUrl)
|
||||
}
|
||||
|
||||
@@ -5,18 +5,21 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/creasty/defaults"
|
||||
"github.com/goccy/go-yaml"
|
||||
"github.com/kubeshark/kubeshark/misc"
|
||||
"github.com/kubeshark/kubeshark/misc/version"
|
||||
"github.com/kubeshark/kubeshark/utils"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -28,16 +31,17 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
Config ConfigStruct
|
||||
DebugMode bool
|
||||
cmdName string
|
||||
Config ConfigStruct
|
||||
DebugMode bool
|
||||
cmdName string
|
||||
ConfigFilePath string
|
||||
)
|
||||
|
||||
func InitConfig(cmd *cobra.Command) error {
|
||||
var err error
|
||||
DebugMode, err = cmd.Flags().GetBool(DebugFlag)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg(fmt.Sprintf("Can't recieve '%s' flag", DebugFlag))
|
||||
log.Error().Err(err).Msg(fmt.Sprintf("Can't receive '%s' flag", DebugFlag))
|
||||
}
|
||||
|
||||
if DebugMode {
|
||||
@@ -48,21 +52,41 @@ func InitConfig(cmd *cobra.Command) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
go version.CheckNewerVersion()
|
||||
if !utils.Contains([]string{
|
||||
"console",
|
||||
"pro",
|
||||
"manifests",
|
||||
"license",
|
||||
}, cmd.Use) {
|
||||
go version.CheckNewerVersion()
|
||||
}
|
||||
|
||||
Config = CreateDefaultConfig()
|
||||
Config.Tap.Debug = DebugMode
|
||||
cmdName = cmd.Name()
|
||||
if utils.Contains([]string{
|
||||
"clean",
|
||||
"console",
|
||||
"pro",
|
||||
"proxy",
|
||||
"scripts",
|
||||
"pprof",
|
||||
}, cmdName) {
|
||||
cmdName = "tap"
|
||||
}
|
||||
|
||||
if err := defaults.Set(&Config); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
configFilePathFlag := cmd.Flags().Lookup(ConfigFilePathCommandName)
|
||||
configFilePath := configFilePathFlag.Value.String()
|
||||
if err := loadConfigFile(configFilePath, &Config); err != nil {
|
||||
if configFilePathFlag.Changed || !os.IsNotExist(err) {
|
||||
ConfigFilePath = path.Join(misc.GetDotFolderPath(), "config.yaml")
|
||||
if err := loadConfigFile(&Config, utils.Contains([]string{
|
||||
"manifests",
|
||||
"license",
|
||||
}, cmd.Use)); err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return fmt.Errorf("invalid config, %w\n"+
|
||||
"you can regenerate the file by removing it (%v) and using `kubeshark config -r`", err, configFilePath)
|
||||
"you can regenerate the file by removing it (%v) and using `kubeshark config -r`", err, ConfigFilePath)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -92,29 +116,51 @@ func WriteConfig(config *ConfigStruct) error {
|
||||
}
|
||||
|
||||
data := []byte(template)
|
||||
if err := os.WriteFile(Config.ConfigFilePath, data, 0644); err != nil {
|
||||
|
||||
if _, err := os.Stat(ConfigFilePath); os.IsNotExist(err) {
|
||||
err = os.MkdirAll(filepath.Dir(ConfigFilePath), 0700)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed creating directories, err: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := os.WriteFile(ConfigFilePath, data, 0644); err != nil {
|
||||
return fmt.Errorf("failed writing config, err: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func loadConfigFile(configFilePath string, config *ConfigStruct) error {
|
||||
reader, openErr := os.Open(configFilePath)
|
||||
if openErr != nil {
|
||||
return openErr
|
||||
func loadConfigFile(config *ConfigStruct, silent bool) error {
|
||||
cwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
buf, readErr := io.ReadAll(reader)
|
||||
if readErr != nil {
|
||||
return readErr
|
||||
cwdConfig := filepath.Join(cwd, fmt.Sprintf("%s.yaml", misc.Program))
|
||||
reader, err := os.Open(cwdConfig)
|
||||
if err != nil {
|
||||
reader, err = os.Open(ConfigFilePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
ConfigFilePath = cwdConfig
|
||||
}
|
||||
defer reader.Close()
|
||||
|
||||
buf, err := io.ReadAll(reader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := yaml.Unmarshal(buf, config); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Info().Str("path", configFilePath).Msg("Found config file!")
|
||||
if !silent {
|
||||
log.Info().Str("path", ConfigFilePath).Msg("Found config file!")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -123,9 +169,7 @@ func initFlag(f *pflag.Flag) {
|
||||
configElemValue := reflect.ValueOf(&Config).Elem()
|
||||
|
||||
var flagPath []string
|
||||
if !utils.Contains([]string{ConfigFilePathCommandName}, f.Name) {
|
||||
flagPath = append(flagPath, cmdName)
|
||||
}
|
||||
flagPath = append(flagPath, cmdName)
|
||||
|
||||
flagPath = append(flagPath, strings.Split(f.Name, "-")...)
|
||||
|
||||
@@ -180,7 +224,7 @@ func mergeSetFlag(configElemValue reflect.Value, setValues []string) error {
|
||||
}
|
||||
|
||||
if len(setErrors) > 0 {
|
||||
return fmt.Errorf(strings.Join(setErrors, "\n"))
|
||||
return errors.New(strings.Join(setErrors, "\n"))
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -2,51 +2,129 @@ package config
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/kubeshark/kubeshark/config/configStructs"
|
||||
"github.com/kubeshark/kubeshark/misc"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/util/homedir"
|
||||
)
|
||||
|
||||
const (
|
||||
SelfNamespaceConfigName = "selfnamespace"
|
||||
ConfigFilePathCommandName = "configpath"
|
||||
KubeConfigPathConfigName = "kube-configpath"
|
||||
KubeConfigPathConfigName = "kube-configPath"
|
||||
)
|
||||
|
||||
func CreateDefaultConfig() ConfigStruct {
|
||||
return ConfigStruct{}
|
||||
return ConfigStruct{
|
||||
Tap: configStructs.TapConfig{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "kubernetes.io/os",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"linux"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Capabilities: configStructs.CapabilitiesConfig{
|
||||
NetworkCapture: []string{
|
||||
// NET_RAW is required to listen the network traffic
|
||||
"NET_RAW",
|
||||
// NET_ADMIN is required to listen the network traffic
|
||||
"NET_ADMIN",
|
||||
},
|
||||
ServiceMeshCapture: []string{
|
||||
// SYS_ADMIN is required to read /proc/PID/net/ns + to install eBPF programs (kernel < 5.8)
|
||||
"SYS_ADMIN",
|
||||
// SYS_PTRACE is required to set netns to other process + to open libssl.so of other process
|
||||
"SYS_PTRACE",
|
||||
// DAC_OVERRIDE is required to read /proc/PID/environ
|
||||
"DAC_OVERRIDE",
|
||||
},
|
||||
EBPFCapture: []string{
|
||||
// SYS_ADMIN is required to read /proc/PID/net/ns + to install eBPF programs (kernel < 5.8)
|
||||
"SYS_ADMIN",
|
||||
// SYS_PTRACE is required to set netns to other process + to open libssl.so of other process
|
||||
"SYS_PTRACE",
|
||||
// SYS_RESOURCE is required to change rlimits for eBPF
|
||||
"SYS_RESOURCE",
|
||||
// IPC_LOCK is required for ebpf perf buffers allocations after some amount of size buffer size:
|
||||
// https://github.com/kubeshark/tracer/blob/13e24725ba8b98216dd0e553262e6d9c56dce5fa/main.go#L82)
|
||||
"IPC_LOCK",
|
||||
},
|
||||
},
|
||||
Auth: configStructs.AuthConfig{
|
||||
Saml: configStructs.SamlConfig{
|
||||
RoleAttribute: "role",
|
||||
Roles: map[string]configStructs.Role{
|
||||
"admin": {
|
||||
Filter: "",
|
||||
CanDownloadPCAP: true,
|
||||
CanUseScripting: true,
|
||||
CanUpdateTargetedPods: true,
|
||||
CanStopTrafficCapturing: true,
|
||||
ShowAdminConsoleLink: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
EnabledDissectors: []string{
|
||||
"amqp",
|
||||
"dns",
|
||||
"http",
|
||||
"icmp",
|
||||
"kafka",
|
||||
"redis",
|
||||
"sctp",
|
||||
"syscall",
|
||||
// "tcp",
|
||||
// "udp",
|
||||
"ws",
|
||||
"tls",
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type KubeConfig struct {
|
||||
ConfigPathStr string `yaml:"configpath"`
|
||||
Context string `yaml:"context"`
|
||||
ConfigPathStr string `yaml:"configPath" json:"configPath"`
|
||||
Context string `yaml:"context" json:"context"`
|
||||
}
|
||||
|
||||
type ManifestsConfig struct {
|
||||
Dump bool `yaml:"dump" json:"dump"`
|
||||
}
|
||||
|
||||
type ConfigStruct struct {
|
||||
Tap configStructs.TapConfig `yaml:"tap"`
|
||||
Logs configStructs.LogsConfig `yaml:"logs"`
|
||||
Config configStructs.ConfigConfig `yaml:"config,omitempty"`
|
||||
Kube KubeConfig `yaml:"kube"`
|
||||
SelfNamespace string `yaml:"selfnamespace" default:"kubeshark"`
|
||||
DumpLogs bool `yaml:"dumplogs" default:"false"`
|
||||
ConfigFilePath string `yaml:"configpath,omitempty" readonly:""`
|
||||
HeadlessMode bool `yaml:"headless" default:"false"`
|
||||
}
|
||||
|
||||
func (config *ConfigStruct) SetDefaults() {
|
||||
config.ConfigFilePath = path.Join(misc.GetDotFolderPath(), "config.yaml")
|
||||
Tap configStructs.TapConfig `yaml:"tap" json:"tap"`
|
||||
Logs configStructs.LogsConfig `yaml:"logs" json:"logs"`
|
||||
Config configStructs.ConfigConfig `yaml:"config,omitempty" json:"config,omitempty"`
|
||||
PcapDump configStructs.PcapDumpConfig `yaml:"pcapdump" json:"pcapdump"`
|
||||
Kube KubeConfig `yaml:"kube" json:"kube"`
|
||||
DumpLogs bool `yaml:"dumpLogs" json:"dumpLogs" default:"false"`
|
||||
HeadlessMode bool `yaml:"headless" json:"headless" default:"false"`
|
||||
License string `yaml:"license" json:"license" default:""`
|
||||
CloudLicenseEnabled bool `yaml:"cloudLicenseEnabled" json:"cloudLicenseEnabled" default:"true"`
|
||||
SupportChatEnabled bool `yaml:"supportChatEnabled" json:"supportChatEnabled" default:"true"`
|
||||
InternetConnectivity bool `yaml:"internetConnectivity" json:"internetConnectivity" default:"true"`
|
||||
DissectorsUpdatingEnabled bool `yaml:"dissectorsUpdatingEnabled" json:"dissectorsUpdatingEnabled" default:"true"`
|
||||
Scripting configStructs.ScriptingConfig `yaml:"scripting" json:"scripting"`
|
||||
Manifests ManifestsConfig `yaml:"manifests,omitempty" json:"manifests,omitempty"`
|
||||
Timezone string `yaml:"timezone" json:"timezone"`
|
||||
}
|
||||
|
||||
func (config *ConfigStruct) ImagePullPolicy() v1.PullPolicy {
|
||||
return v1.PullPolicy(config.Tap.Docker.ImagePullPolicy)
|
||||
}
|
||||
|
||||
func (config *ConfigStruct) IsNsRestrictedMode() bool {
|
||||
return config.SelfNamespace != misc.Program // Notice "kubeshark" string must match the default SelfNamespace
|
||||
func (config *ConfigStruct) ImagePullSecrets() []v1.LocalObjectReference {
|
||||
var ref []v1.LocalObjectReference
|
||||
for _, name := range config.Tap.Docker.ImagePullSecrets {
|
||||
ref = append(ref, v1.LocalObjectReference{Name: name})
|
||||
}
|
||||
|
||||
return ref
|
||||
}
|
||||
|
||||
func (config *ConfigStruct) KubeConfigPath() string {
|
||||
|
||||
@@ -5,5 +5,5 @@ const (
|
||||
)
|
||||
|
||||
type ConfigConfig struct {
|
||||
Regenerate bool `yaml:"regenerate,omitempty" default:"false" readonly:""`
|
||||
Regenerate bool `yaml:"regenerate,omitempty" json:"regenerate,omitempty" default:"false" readonly:""`
|
||||
}
|
||||
|
||||
@@ -10,10 +10,12 @@ import (
|
||||
|
||||
const (
|
||||
FileLogsName = "file"
|
||||
GrepLogsName = "grep"
|
||||
)
|
||||
|
||||
type LogsConfig struct {
|
||||
FileStr string `yaml:"file"`
|
||||
FileStr string `yaml:"file" json:"file"`
|
||||
Grep string `yaml:"grep" json:"grep"`
|
||||
}
|
||||
|
||||
func (config *LogsConfig) Validate() error {
|
||||
|
||||
92
config/configStructs/scriptingConfig.go
Normal file
92
config/configStructs/scriptingConfig.go
Normal file
@@ -0,0 +1,92 @@
|
||||
package configStructs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/kubeshark/kubeshark/misc"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
type ScriptingConfig struct {
|
||||
Env map[string]interface{} `yaml:"env" json:"env" default:"{}"`
|
||||
Source string `yaml:"source" json:"source" default:""`
|
||||
Sources []string `yaml:"sources" json:"sources" default:"[]"`
|
||||
WatchScripts bool `yaml:"watchScripts" json:"watchScripts" default:"true"`
|
||||
Active []string `yaml:"active" json:"active" default:"[]"`
|
||||
Console bool `yaml:"console" json:"console" default:"true"`
|
||||
}
|
||||
|
||||
func (config *ScriptingConfig) GetScripts() (scripts []*misc.Script, err error) {
|
||||
// Check if both Source and Sources are empty
|
||||
if config.Source == "" && len(config.Sources) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var allFiles []struct {
|
||||
Source string
|
||||
File fs.DirEntry
|
||||
}
|
||||
|
||||
// Handle single Source directory
|
||||
if config.Source != "" {
|
||||
files, err := os.ReadDir(config.Source)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read directory %s: %v", config.Source, err)
|
||||
}
|
||||
for _, file := range files {
|
||||
allFiles = append(allFiles, struct {
|
||||
Source string
|
||||
File fs.DirEntry
|
||||
}{Source: config.Source, File: file})
|
||||
}
|
||||
}
|
||||
|
||||
// Handle multiple Sources directories
|
||||
if len(config.Sources) > 0 {
|
||||
for _, source := range config.Sources {
|
||||
files, err := os.ReadDir(source)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read directory %s: %v", source, err)
|
||||
}
|
||||
for _, file := range files {
|
||||
allFiles = append(allFiles, struct {
|
||||
Source string
|
||||
File fs.DirEntry
|
||||
}{Source: source, File: file})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Iterate over all collected files
|
||||
for _, f := range allFiles {
|
||||
if f.File.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
// Construct the full path based on the relevant source directory
|
||||
path := filepath.Join(f.Source, f.File.Name())
|
||||
if !strings.HasSuffix(f.File.Name(), ".js") { // Use file name suffix for skipping non-JS files
|
||||
log.Info().Str("path", path).Msg("Skipping non-JS file")
|
||||
continue
|
||||
}
|
||||
|
||||
// Read the script file
|
||||
var script *misc.Script
|
||||
script, err = misc.ReadScriptFile(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read script file %s: %v", path, err)
|
||||
}
|
||||
|
||||
// Append the valid script to the scripts slice
|
||||
scripts = append(scripts, script)
|
||||
|
||||
log.Debug().Str("path", path).Msg("Found script:")
|
||||
}
|
||||
|
||||
// Return the collected scripts and nil error if successful
|
||||
return scripts, nil
|
||||
}
|
||||
@@ -4,74 +4,248 @@ import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
|
||||
"github.com/kubeshark/kubeshark/kubernetes"
|
||||
"github.com/kubeshark/kubeshark/utils"
|
||||
"github.com/rs/zerolog/log"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
networking "k8s.io/api/networking/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
DockerRegistryLabel = "docker-registry"
|
||||
DockerTagLabel = "docker-tag"
|
||||
ProxyFrontPortLabel = "proxy-front-port"
|
||||
ProxyHubPortLabel = "proxy-hub-port"
|
||||
ProxyHostLabel = "proxy-host"
|
||||
NamespacesLabel = "namespaces"
|
||||
AllNamespacesLabel = "allnamespaces"
|
||||
StorageLimitLabel = "storagelimit"
|
||||
DryRunLabel = "dryrun"
|
||||
PcapLabel = "pcap"
|
||||
ServiceMeshLabel = "servicemesh"
|
||||
TlsLabel = "tls"
|
||||
DebugLabel = "debug"
|
||||
DockerRegistryLabel = "docker-registry"
|
||||
DockerTagLabel = "docker-tag"
|
||||
DockerImagePullPolicy = "docker-imagePullPolicy"
|
||||
DockerImagePullSecrets = "docker-imagePullSecrets"
|
||||
ProxyFrontPortLabel = "proxy-front-port"
|
||||
ProxyHubPortLabel = "proxy-hub-port"
|
||||
ProxyHostLabel = "proxy-host"
|
||||
NamespacesLabel = "namespaces"
|
||||
ExcludedNamespacesLabel = "excludedNamespaces"
|
||||
ReleaseNamespaceLabel = "release-namespace"
|
||||
PersistentStorageLabel = "persistentStorage"
|
||||
PersistentStorageStaticLabel = "persistentStorageStatic"
|
||||
EfsFileSytemIdAndPathLabel = "efsFileSytemIdAndPath"
|
||||
StorageLimitLabel = "storageLimit"
|
||||
StorageClassLabel = "storageClass"
|
||||
DryRunLabel = "dryRun"
|
||||
PcapLabel = "pcap"
|
||||
ServiceMeshLabel = "serviceMesh"
|
||||
TlsLabel = "tls"
|
||||
IgnoreTaintedLabel = "ignoreTainted"
|
||||
IngressEnabledLabel = "ingress-enabled"
|
||||
TelemetryEnabledLabel = "telemetry-enabled"
|
||||
ResourceGuardEnabledLabel = "resource-guard-enabled"
|
||||
PprofPortLabel = "pprof-port"
|
||||
PprofViewLabel = "pprof-view"
|
||||
DebugLabel = "debug"
|
||||
ContainerPort = 8080
|
||||
ContainerPortStr = "8080"
|
||||
PcapDest = "dest"
|
||||
PcapMaxSize = "maxSize"
|
||||
PcapMaxTime = "maxTime"
|
||||
PcapTimeInterval = "timeInterval"
|
||||
PcapKubeconfig = "kubeconfig"
|
||||
PcapDumpEnabled = "enabled"
|
||||
)
|
||||
|
||||
type ResourceLimitsHub struct {
|
||||
CPU string `yaml:"cpu" json:"cpu" default:"0"`
|
||||
Memory string `yaml:"memory" json:"memory" default:"5Gi"`
|
||||
}
|
||||
|
||||
type ResourceLimitsWorker struct {
|
||||
CPU string `yaml:"cpu" json:"cpu" default:"0"`
|
||||
Memory string `yaml:"memory" json:"memory" default:"3Gi"`
|
||||
}
|
||||
|
||||
type ResourceRequests struct {
|
||||
CPU string `yaml:"cpu" json:"cpu" default:"50m"`
|
||||
Memory string `yaml:"memory" json:"memory" default:"50Mi"`
|
||||
}
|
||||
|
||||
type ResourceRequirementsHub struct {
|
||||
Limits ResourceLimitsHub `yaml:"limits" json:"limits"`
|
||||
Requests ResourceRequests `yaml:"requests" json:"requests"`
|
||||
}
|
||||
|
||||
type ResourceRequirementsWorker struct {
|
||||
Limits ResourceLimitsHub `yaml:"limits" json:"limits"`
|
||||
Requests ResourceRequests `yaml:"requests" json:"requests"`
|
||||
}
|
||||
|
||||
type WorkerConfig struct {
|
||||
SrcPort uint16 `yaml:"src-port" default:"8897"`
|
||||
DstPort uint16 `yaml:"dst-port" default:"8897"`
|
||||
SrvPort uint16 `yaml:"srvPort" json:"srvPort" default:"30001"`
|
||||
}
|
||||
|
||||
type HubConfig struct {
|
||||
SrcPort uint16 `yaml:"src-port" default:"8898"`
|
||||
DstPort uint16 `yaml:"dst-port" default:"8898"`
|
||||
SrvPort uint16 `yaml:"srvPort" json:"srvPort" default:"8898"`
|
||||
}
|
||||
|
||||
type FrontConfig struct {
|
||||
SrcPort uint16 `yaml:"src-port" default:"8899"`
|
||||
DstPort uint16 `yaml:"dst-port" default:"80"`
|
||||
Port uint16 `yaml:"port" json:"port" default:"8899"`
|
||||
}
|
||||
|
||||
type ProxyConfig struct {
|
||||
Worker WorkerConfig `yaml:"worker"`
|
||||
Hub HubConfig `yaml:"hub"`
|
||||
Front FrontConfig `yaml:"front"`
|
||||
Host string `yaml:"host" default:"127.0.0.1"`
|
||||
Worker WorkerConfig `yaml:"worker" json:"worker"`
|
||||
Hub HubConfig `yaml:"hub" json:"hub"`
|
||||
Front FrontConfig `yaml:"front" json:"front"`
|
||||
Host string `yaml:"host" json:"host" default:"127.0.0.1"`
|
||||
}
|
||||
|
||||
type OverrideImageConfig struct {
|
||||
Worker string `yaml:"worker" json:"worker"`
|
||||
Hub string `yaml:"hub" json:"hub"`
|
||||
Front string `yaml:"front" json:"front"`
|
||||
}
|
||||
type OverrideTagConfig struct {
|
||||
Worker string `yaml:"worker" json:"worker"`
|
||||
Hub string `yaml:"hub" json:"hub"`
|
||||
Front string `yaml:"front" json:"front"`
|
||||
}
|
||||
|
||||
type DockerConfig struct {
|
||||
Registry string `yaml:"registry" default:"docker.io/kubeshark"`
|
||||
Tag string `yaml:"tag" default:"latest"`
|
||||
ImagePullPolicy string `yaml:"imagepullpolicy" default:"Always"`
|
||||
Registry string `yaml:"registry" json:"registry" default:"docker.io/kubeshark"`
|
||||
Tag string `yaml:"tag" json:"tag" default:""`
|
||||
TagLocked bool `yaml:"tagLocked" json:"tagLocked" default:"true"`
|
||||
ImagePullPolicy string `yaml:"imagePullPolicy" json:"imagePullPolicy" default:"Always"`
|
||||
ImagePullSecrets []string `yaml:"imagePullSecrets" json:"imagePullSecrets"`
|
||||
OverrideImage OverrideImageConfig `yaml:"overrideImage" json:"overrideImage"`
|
||||
OverrideTag OverrideTagConfig `yaml:"overrideTag" json:"overrideTag"`
|
||||
}
|
||||
|
||||
type ResourcesConfig struct {
|
||||
Worker kubernetes.Resources `yaml:"worker"`
|
||||
Hub kubernetes.Resources `yaml:"hub"`
|
||||
Hub ResourceRequirementsHub `yaml:"hub" json:"hub"`
|
||||
Sniffer ResourceRequirementsWorker `yaml:"sniffer" json:"sniffer"`
|
||||
Tracer ResourceRequirementsWorker `yaml:"tracer" json:"tracer"`
|
||||
}
|
||||
|
||||
type Role struct {
|
||||
Filter string `yaml:"filter" json:"filter" default:""`
|
||||
CanDownloadPCAP bool `yaml:"canDownloadPCAP" json:"canDownloadPCAP" default:"false"`
|
||||
CanUseScripting bool `yaml:"canUseScripting" json:"canUseScripting" default:"false"`
|
||||
CanUpdateTargetedPods bool `yaml:"canUpdateTargetedPods" json:"canUpdateTargetedPods" default:"false"`
|
||||
CanStopTrafficCapturing bool `yaml:"canStopTrafficCapturing" json:"canStopTrafficCapturing" default:"false"`
|
||||
ShowAdminConsoleLink bool `yaml:"showAdminConsoleLink" json:"showAdminConsoleLink" default:"false"`
|
||||
}
|
||||
|
||||
type SamlConfig struct {
|
||||
IdpMetadataUrl string `yaml:"idpMetadataUrl" json:"idpMetadataUrl"`
|
||||
X509crt string `yaml:"x509crt" json:"x509crt"`
|
||||
X509key string `yaml:"x509key" json:"x509key"`
|
||||
RoleAttribute string `yaml:"roleAttribute" json:"roleAttribute"`
|
||||
Roles map[string]Role `yaml:"roles" json:"roles"`
|
||||
}
|
||||
|
||||
type AuthConfig struct {
|
||||
Enabled bool `yaml:"enabled" json:"enabled" default:"false"`
|
||||
Type string `yaml:"type" json:"type" default:"saml"`
|
||||
Saml SamlConfig `yaml:"saml" json:"saml"`
|
||||
}
|
||||
|
||||
type IngressConfig struct {
|
||||
Enabled bool `yaml:"enabled" json:"enabled" default:"false"`
|
||||
ClassName string `yaml:"className" json:"className" default:""`
|
||||
Host string `yaml:"host" json:"host" default:"ks.svc.cluster.local"`
|
||||
TLS []networking.IngressTLS `yaml:"tls" json:"tls" default:"[]"`
|
||||
Annotations map[string]string `yaml:"annotations" json:"annotations" default:"{}"`
|
||||
}
|
||||
|
||||
type ReleaseConfig struct {
|
||||
Repo string `yaml:"repo" json:"repo" default:"https://helm.kubeshark.co"`
|
||||
Name string `yaml:"name" json:"name" default:"kubeshark"`
|
||||
Namespace string `yaml:"namespace" json:"namespace" default:"default"`
|
||||
}
|
||||
|
||||
type TelemetryConfig struct {
|
||||
Enabled bool `yaml:"enabled" json:"enabled" default:"true"`
|
||||
}
|
||||
|
||||
type ResourceGuardConfig struct {
|
||||
Enabled bool `yaml:"enabled" json:"enabled" default:"false"`
|
||||
}
|
||||
|
||||
type SentryConfig struct {
|
||||
Enabled bool `yaml:"enabled" json:"enabled" default:"false"`
|
||||
Environment string `yaml:"environment" json:"environment" default:"production"`
|
||||
}
|
||||
|
||||
type CapabilitiesConfig struct {
|
||||
NetworkCapture []string `yaml:"networkCapture" json:"networkCapture" default:"[]"`
|
||||
ServiceMeshCapture []string `yaml:"serviceMeshCapture" json:"serviceMeshCapture" default:"[]"`
|
||||
EBPFCapture []string `yaml:"ebpfCapture" json:"ebpfCapture" default:"[]"`
|
||||
}
|
||||
|
||||
type MetricsConfig struct {
|
||||
Port uint16 `yaml:"port" json:"port" default:"49100"`
|
||||
}
|
||||
|
||||
type PprofConfig struct {
|
||||
Enabled bool `yaml:"enabled" json:"enabled" default:"false"`
|
||||
Port uint16 `yaml:"port" json:"port" default:"8000"`
|
||||
View string `yaml:"view" json:"view" default:"flamegraph"`
|
||||
}
|
||||
|
||||
type MiscConfig struct {
|
||||
JsonTTL string `yaml:"jsonTTL" json:"jsonTTL" default:"5m"`
|
||||
PcapTTL string `yaml:"pcapTTL" json:"pcapTTL" default:"10s"`
|
||||
PcapErrorTTL string `yaml:"pcapErrorTTL" json:"pcapErrorTTL" default:"60s"`
|
||||
TrafficSampleRate int `yaml:"trafficSampleRate" json:"trafficSampleRate" default:"100"`
|
||||
TcpStreamChannelTimeoutMs int `yaml:"tcpStreamChannelTimeoutMs" json:"tcpStreamChannelTimeoutMs" default:"10000"`
|
||||
TcpStreamChannelTimeoutShow bool `yaml:"tcpStreamChannelTimeoutShow" json:"tcpStreamChannelTimeoutShow" default:"false"`
|
||||
ResolutionStrategy string `yaml:"resolutionStrategy" json:"resolutionStrategy" default:"auto"`
|
||||
DuplicateTimeframe string `yaml:"duplicateTimeframe" json:"duplicateTimeframe" default:"200ms"`
|
||||
DetectDuplicates bool `yaml:"detectDuplicates" json:"detectDuplicates" default:"false"`
|
||||
StaleTimeoutSeconds int `yaml:"staleTimeoutSeconds" json:"staleTimeoutSeconds" default:"30"`
|
||||
}
|
||||
|
||||
type PcapDumpConfig struct {
|
||||
PcapDumpEnabled bool `yaml:"enabled" json:"enabled" default:"true"`
|
||||
PcapTimeInterval string `yaml:"timeInterval" json:"timeInterval" default:"1m"`
|
||||
PcapMaxTime string `yaml:"maxTime" json:"maxTime" default:"1h"`
|
||||
PcapMaxSize string `yaml:"maxSize" json:"maxSize" default:"500MB"`
|
||||
PcapSrcDir string `yaml:"pcapSrcDir" json:"pcapSrcDir" default:"pcapdump"`
|
||||
}
|
||||
|
||||
type TapConfig struct {
|
||||
Docker DockerConfig `yaml:"docker"`
|
||||
Proxy ProxyConfig `yaml:"proxy"`
|
||||
PodRegexStr string `yaml:"regex" default:".*"`
|
||||
Namespaces []string `yaml:"namespaces"`
|
||||
AllNamespaces bool `yaml:"allnamespaces" default:"false"`
|
||||
StorageLimit string `yaml:"storagelimit" default:"200MB"`
|
||||
DryRun bool `yaml:"dryrun" default:"false"`
|
||||
Pcap string `yaml:"pcap" default:""`
|
||||
Resources ResourcesConfig `yaml:"resources"`
|
||||
ServiceMesh bool `yaml:"servicemesh" default:"true"`
|
||||
Tls bool `yaml:"tls" default:"true"`
|
||||
PacketCapture string `yaml:"packetcapture" default:"libpcap"`
|
||||
Debug bool `yaml:"debug" default:"false"`
|
||||
Docker DockerConfig `yaml:"docker" json:"docker"`
|
||||
Proxy ProxyConfig `yaml:"proxy" json:"proxy"`
|
||||
PodRegexStr string `yaml:"regex" json:"regex" default:".*"`
|
||||
Namespaces []string `yaml:"namespaces" json:"namespaces" default:"[]"`
|
||||
ExcludedNamespaces []string `yaml:"excludedNamespaces" json:"excludedNamespaces" default:"[]"`
|
||||
BpfOverride string `yaml:"bpfOverride" json:"bpfOverride" default:""`
|
||||
Stopped bool `yaml:"stopped" json:"stopped" default:"false"`
|
||||
Release ReleaseConfig `yaml:"release" json:"release"`
|
||||
PersistentStorage bool `yaml:"persistentStorage" json:"persistentStorage" default:"false"`
|
||||
PersistentStorageStatic bool `yaml:"persistentStorageStatic" json:"persistentStorageStatic" default:"false"`
|
||||
EfsFileSytemIdAndPath string `yaml:"efsFileSytemIdAndPath" json:"efsFileSytemIdAndPath" default:""`
|
||||
StorageLimit string `yaml:"storageLimit" json:"storageLimit" default:"5000Mi"`
|
||||
StorageClass string `yaml:"storageClass" json:"storageClass" default:"standard"`
|
||||
DryRun bool `yaml:"dryRun" json:"dryRun" default:"false"`
|
||||
Resources ResourcesConfig `yaml:"resources" json:"resources"`
|
||||
ServiceMesh bool `yaml:"serviceMesh" json:"serviceMesh" default:"true"`
|
||||
Tls bool `yaml:"tls" json:"tls" default:"true"`
|
||||
DisableTlsLog bool `yaml:"disableTlsLog" json:"disableTlsLog" default:"true"`
|
||||
PacketCapture string `yaml:"packetCapture" json:"packetCapture" default:"best"`
|
||||
IgnoreTainted bool `yaml:"ignoreTainted" json:"ignoreTainted" default:"false"`
|
||||
Labels map[string]string `yaml:"labels" json:"labels" default:"{}"`
|
||||
Annotations map[string]string `yaml:"annotations" json:"annotations" default:"{}"`
|
||||
NodeSelectorTerms []v1.NodeSelectorTerm `yaml:"nodeSelectorTerms" json:"nodeSelectorTerms" default:"[]"`
|
||||
Auth AuthConfig `yaml:"auth" json:"auth"`
|
||||
Ingress IngressConfig `yaml:"ingress" json:"ingress"`
|
||||
IPv6 bool `yaml:"ipv6" json:"ipv6" default:"true"`
|
||||
Debug bool `yaml:"debug" json:"debug" default:"false"`
|
||||
Telemetry TelemetryConfig `yaml:"telemetry" json:"telemetry"`
|
||||
ResourceGuard ResourceGuardConfig `yaml:"resourceGuard" json:"resourceGuard"`
|
||||
Sentry SentryConfig `yaml:"sentry" json:"sentry"`
|
||||
DefaultFilter string `yaml:"defaultFilter" json:"defaultFilter" default:"!dns and !tcp and !udp and !icmp"`
|
||||
ScriptingDisabled bool `yaml:"scriptingDisabled" json:"scriptingDisabled" default:"false"`
|
||||
TargetedPodsUpdateDisabled bool `yaml:"targetedPodsUpdateDisabled" json:"targetedPodsUpdateDisabled" default:"false"`
|
||||
PresetFiltersChangingEnabled bool `yaml:"presetFiltersChangingEnabled" json:"presetFiltersChangingEnabled" default:"true"`
|
||||
RecordingDisabled bool `yaml:"recordingDisabled" json:"recordingDisabled" default:"false"`
|
||||
StopTrafficCapturingDisabled bool `yaml:"stopTrafficCapturingDisabled" json:"stopTrafficCapturingDisabled" default:"false"`
|
||||
Capabilities CapabilitiesConfig `yaml:"capabilities" json:"capabilities"`
|
||||
GlobalFilter string `yaml:"globalFilter" json:"globalFilter" default:""`
|
||||
EnabledDissectors []string `yaml:"enabledDissectors" json:"enabledDissectors"`
|
||||
Metrics MetricsConfig `yaml:"metrics" json:"metrics"`
|
||||
Pprof PprofConfig `yaml:"pprof" json:"pprof"`
|
||||
Misc MiscConfig `yaml:"misc" json:"misc"`
|
||||
}
|
||||
|
||||
func (config *TapConfig) PodRegex() *regexp.Regexp {
|
||||
@@ -79,24 +253,11 @@ func (config *TapConfig) PodRegex() *regexp.Regexp {
|
||||
return podRegex
|
||||
}
|
||||
|
||||
func (config *TapConfig) StorageLimitBytes() int64 {
|
||||
storageLimitBytes, err := utils.HumanReadableToBytes(config.StorageLimit)
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Send()
|
||||
}
|
||||
return storageLimitBytes
|
||||
}
|
||||
|
||||
func (config *TapConfig) Validate() error {
|
||||
_, compileErr := regexp.Compile(config.PodRegexStr)
|
||||
if compileErr != nil {
|
||||
return fmt.Errorf("%s is not a valid regex %s", config.PodRegexStr, compileErr)
|
||||
}
|
||||
|
||||
_, parseHumanDataSizeErr := utils.HumanReadableToBytes(config.StorageLimit)
|
||||
if parseHumanDataSizeErr != nil {
|
||||
return fmt.Errorf("Could not parse --%s value %s", StorageLimitLabel, config.StorageLimit)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,46 +0,0 @@
|
||||
package docker
|
||||
|
||||
import "fmt"
|
||||
|
||||
const (
|
||||
hub = "hub"
|
||||
worker = "worker"
|
||||
front = "front"
|
||||
)
|
||||
|
||||
var (
|
||||
registry = "docker.io/kubeshark"
|
||||
tag = "latest"
|
||||
)
|
||||
|
||||
func GetRegistry() string {
|
||||
return registry
|
||||
}
|
||||
|
||||
func SetRegistry(value string) {
|
||||
registry = value
|
||||
}
|
||||
|
||||
func GetTag() string {
|
||||
return tag
|
||||
}
|
||||
|
||||
func SetTag(value string) {
|
||||
tag = value
|
||||
}
|
||||
|
||||
func getImage(image string) string {
|
||||
return fmt.Sprintf("%s/%s:%s", registry, image, tag)
|
||||
}
|
||||
|
||||
func GetHubImage() string {
|
||||
return getImage(hub)
|
||||
}
|
||||
|
||||
func GetWorkerImage() string {
|
||||
return getImage(worker)
|
||||
}
|
||||
|
||||
func GetFrontImage() string {
|
||||
return getImage(front)
|
||||
}
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
regexpsyntax "regexp/syntax"
|
||||
|
||||
"github.com/kubeshark/kubeshark/config"
|
||||
"github.com/kubeshark/kubeshark/config/configStructs"
|
||||
"github.com/kubeshark/kubeshark/misc"
|
||||
|
||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
@@ -18,12 +19,12 @@ func FormatError(err error) error {
|
||||
if k8serrors.IsForbidden(err) {
|
||||
errorNew = fmt.Errorf("insufficient permissions: %w. "+
|
||||
"supply the required permission or control %s's access to namespaces by setting %s "+
|
||||
"in the config file or setting the targetted namespace with --%s %s=<NAMEPSACE>",
|
||||
"in the config file or setting the targeted namespace with --%s %s=<NAMESPACE>",
|
||||
err,
|
||||
misc.Software,
|
||||
config.SelfNamespaceConfigName,
|
||||
configStructs.ReleaseNamespaceLabel,
|
||||
config.SetCommandName,
|
||||
config.SelfNamespaceConfigName)
|
||||
configStructs.ReleaseNamespaceLabel)
|
||||
} else if syntaxError, isSyntaxError := asRegexSyntaxError(err); isSyntaxError {
|
||||
errorNew = fmt.Errorf("regex %s is invalid: %w", syntaxError.Expr, err)
|
||||
} else {
|
||||
|
||||
181
go.mod
181
go.mod
@@ -1,106 +1,161 @@
|
||||
module github.com/kubeshark/kubeshark
|
||||
|
||||
go 1.17
|
||||
go 1.21.1
|
||||
|
||||
require (
|
||||
github.com/creasty/defaults v1.5.2
|
||||
github.com/docker/docker v20.10.22+incompatible
|
||||
github.com/docker/go-connections v0.4.0
|
||||
github.com/docker/go-units v0.4.0
|
||||
github.com/fsnotify/fsnotify v1.6.0
|
||||
github.com/go-cmd/cmd v1.4.3
|
||||
github.com/goccy/go-yaml v1.11.2
|
||||
github.com/google/go-github/v37 v37.0.0
|
||||
github.com/kubeshark/base v0.5.0
|
||||
github.com/gorilla/websocket v1.4.2
|
||||
github.com/kubeshark/gopacket v1.1.39
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/rivo/tview v0.0.0-20240818110301-fd649dbf1223
|
||||
github.com/robertkrimen/otto v0.2.1
|
||||
github.com/rs/zerolog v1.28.0
|
||||
github.com/spf13/cobra v1.3.0
|
||||
github.com/spf13/cobra v1.7.0
|
||||
github.com/spf13/pflag v1.0.5
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
k8s.io/api v0.23.3
|
||||
k8s.io/apimachinery v0.23.3
|
||||
k8s.io/client-go v0.23.3
|
||||
k8s.io/kubectl v0.23.3
|
||||
github.com/tanqiangyes/grep-go v0.0.0-20220515134556-b36bff9c3d8e
|
||||
helm.sh/helm/v3 v3.12.0
|
||||
k8s.io/api v0.28.3
|
||||
k8s.io/apimachinery v0.28.3
|
||||
k8s.io/client-go v0.28.3
|
||||
k8s.io/kubectl v0.28.3
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go/compute v1.2.0 // indirect
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
|
||||
github.com/Azure/go-autorest/autorest v0.11.24 // indirect
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.18 // indirect
|
||||
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
|
||||
github.com/Azure/go-autorest/logger v0.2.1 // indirect
|
||||
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
|
||||
github.com/BurntSushi/toml v1.2.1 // indirect
|
||||
github.com/MakeNowJust/heredoc v1.0.0 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.0 // indirect
|
||||
github.com/PuerkitoBio/purell v1.1.1 // indirect
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
|
||||
github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5 // indirect
|
||||
github.com/Masterminds/goutils v1.1.1 // indirect
|
||||
github.com/Masterminds/semver/v3 v3.2.0 // indirect
|
||||
github.com/Masterminds/sprig/v3 v3.2.3 // indirect
|
||||
github.com/Masterminds/squirrel v1.5.3 // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/chai2010/gettext-go v1.0.2 // indirect
|
||||
github.com/containerd/containerd v1.7.0 // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.2.3 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/docker/distribution v2.7.1+incompatible // indirect
|
||||
github.com/docker/cli v20.10.21+incompatible // indirect
|
||||
github.com/docker/distribution v2.8.2+incompatible // indirect
|
||||
github.com/docker/docker v20.10.24+incompatible // indirect
|
||||
github.com/docker/docker-credential-helpers v0.7.0 // indirect
|
||||
github.com/docker/go-connections v0.4.0 // indirect
|
||||
github.com/docker/go-metrics v0.0.1 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.10.1 // indirect
|
||||
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
|
||||
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect
|
||||
github.com/fvbommel/sortorder v1.0.2 // indirect
|
||||
github.com/fatih/color v1.13.0 // indirect
|
||||
github.com/gdamore/encoding v1.0.0 // indirect
|
||||
github.com/gdamore/tcell/v2 v2.7.1 // indirect
|
||||
github.com/go-errors/errors v1.4.2 // indirect
|
||||
github.com/go-logr/logr v1.2.3 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.5 // indirect
|
||||
github.com/go-openapi/jsonreference v0.19.6 // indirect
|
||||
github.com/go-openapi/swag v0.21.1 // indirect
|
||||
github.com/go-gorp/gorp/v3 v3.0.5 // indirect
|
||||
github.com/go-logr/logr v1.2.4 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.6 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
github.com/go-openapi/swag v0.22.3 // indirect
|
||||
github.com/go-playground/validator/v10 v10.14.0 // indirect
|
||||
github.com/gobwas/glob v0.2.3 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang-jwt/jwt/v4 v4.2.0 // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/google/btree v1.0.1 // indirect
|
||||
github.com/google/go-cmp v0.5.7 // indirect
|
||||
github.com/google/gnostic-models v0.6.8 // indirect
|
||||
github.com/google/go-cmp v0.5.9 // indirect
|
||||
github.com/google/go-querystring v1.1.0 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/googleapis/gnostic v0.5.5 // indirect
|
||||
github.com/gorilla/mux v1.8.0 // indirect
|
||||
github.com/gosuri/uitable v0.0.4 // indirect
|
||||
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
|
||||
github.com/imdario/mergo v0.3.12 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/huandu/xstrings v1.4.0 // indirect
|
||||
github.com/imdario/mergo v0.3.13 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jmoiron/sqlx v1.3.5 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.16.0 // indirect
|
||||
github.com/kubeshark/tracerproto v1.0.0 // indirect
|
||||
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect
|
||||
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect
|
||||
github.com/lib/pq v1.10.7 // indirect
|
||||
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
|
||||
github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.16 // indirect
|
||||
github.com/mattn/go-isatty v0.0.19 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.15 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/mitchellh/copystructure v1.2.0 // indirect
|
||||
github.com/mitchellh/go-wordwrap v1.0.1 // indirect
|
||||
github.com/mitchellh/reflectwalk v1.0.2 // indirect
|
||||
github.com/moby/locker v1.0.1 // indirect
|
||||
github.com/moby/spdystream v0.2.0 // indirect
|
||||
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect
|
||||
github.com/moby/term v0.0.0-20221205130635-1aeaba878587 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
|
||||
github.com/morikuni/aec v1.0.0 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.0.2 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b // indirect
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/russross/blackfriday v1.6.0 // indirect
|
||||
github.com/sirupsen/logrus v1.7.0 // indirect
|
||||
github.com/stretchr/testify v1.8.1 // indirect
|
||||
github.com/xlab/treeprint v1.1.0 // indirect
|
||||
go.starlark.net v0.0.0-20220203230714-bb14e151c28f // indirect
|
||||
golang.org/x/crypto v0.0.0-20220208050332-20e1d8d225ab // indirect
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect
|
||||
golang.org/x/net v0.2.0 // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect
|
||||
golang.org/x/sys v0.2.0 // indirect
|
||||
golang.org/x/term v0.2.0 // indirect
|
||||
golang.org/x/text v0.4.0 // indirect
|
||||
golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11 // indirect
|
||||
golang.org/x/tools v0.1.12 // indirect
|
||||
github.com/prometheus/client_golang v1.16.0 // indirect
|
||||
github.com/prometheus/client_model v0.4.0 // indirect
|
||||
github.com/prometheus/common v0.44.0 // indirect
|
||||
github.com/prometheus/procfs v0.10.1 // indirect
|
||||
github.com/rivo/uniseg v0.4.7 // indirect
|
||||
github.com/rubenv/sql-migrate v1.3.1 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/shopspring/decimal v1.3.1 // indirect
|
||||
github.com/sirupsen/logrus v1.9.0 // indirect
|
||||
github.com/spf13/cast v1.5.0 // indirect
|
||||
github.com/stretchr/testify v1.8.3 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
||||
github.com/xlab/treeprint v1.2.0 // indirect
|
||||
go.opentelemetry.io/otel v1.14.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.14.0 // indirect
|
||||
go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect
|
||||
golang.org/x/crypto v0.14.0 // indirect
|
||||
golang.org/x/mod v0.10.0 // indirect
|
||||
golang.org/x/net v0.17.0 // indirect
|
||||
golang.org/x/oauth2 v0.8.0 // indirect
|
||||
golang.org/x/sync v0.2.0 // indirect
|
||||
golang.org/x/sys v0.17.0 // indirect
|
||||
golang.org/x/term v0.17.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/protobuf v1.27.1 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect
|
||||
google.golang.org/grpc v1.54.0 // indirect
|
||||
google.golang.org/protobuf v1.30.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/sourcemap.v1 v1.0.5 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
k8s.io/cli-runtime v0.23.3 // indirect
|
||||
k8s.io/component-base v0.23.3 // indirect
|
||||
k8s.io/klog/v2 v2.40.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20220124234850-424119656bbf // indirect
|
||||
k8s.io/utils v0.0.0-20220127004650-9b3446523e65 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect
|
||||
sigs.k8s.io/kustomize/api v0.11.1 // indirect
|
||||
sigs.k8s.io/kustomize/kyaml v0.13.3 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.27.1 // indirect
|
||||
k8s.io/apiserver v0.27.1 // indirect
|
||||
k8s.io/cli-runtime v0.28.3 // indirect
|
||||
k8s.io/component-base v0.28.3 // indirect
|
||||
k8s.io/klog/v2 v2.100.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect
|
||||
k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 // indirect
|
||||
oras.land/oras-go v1.2.2 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
||||
sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 // indirect
|
||||
sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
|
||||
sigs.k8s.io/yaml v1.3.0 // indirect
|
||||
)
|
||||
|
||||
25
helm-chart/Chart.yaml
Normal file
25
helm-chart/Chart.yaml
Normal file
@@ -0,0 +1,25 @@
|
||||
apiVersion: v2
|
||||
name: kubeshark
|
||||
version: "52.3.90"
|
||||
description: The API Traffic Analyzer for Kubernetes
|
||||
home: https://kubeshark.co
|
||||
keywords:
|
||||
- kubeshark
|
||||
- packet capture
|
||||
- traffic capture
|
||||
- traffic analyzer
|
||||
- network sniffer
|
||||
- observability
|
||||
- devops
|
||||
- microservice
|
||||
- forensics
|
||||
- api
|
||||
kubeVersion: '>= 1.16.0-0'
|
||||
maintainers:
|
||||
- email: info@kubeshark.co
|
||||
name: Kubeshark
|
||||
url: https://kubeshark.co
|
||||
sources:
|
||||
- https://github.com/kubeshark/kubeshark/tree/master/helm-chart
|
||||
type: application
|
||||
icon: https://raw.githubusercontent.com/kubeshark/assets/master/logo/vector/logo.svg
|
||||
191
helm-chart/LICENSE
Normal file
191
helm-chart/LICENSE
Normal file
@@ -0,0 +1,191 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
https://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
Copyright 2022 Kubeshark
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
https://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
279
helm-chart/README.md
Normal file
279
helm-chart/README.md
Normal file
@@ -0,0 +1,279 @@
|
||||
# Helm Chart of Kubeshark
|
||||
|
||||
## Official
|
||||
|
||||
Add the Helm repo for Kubeshark:
|
||||
|
||||
```shell
|
||||
helm repo add kubeshark https://helm.kubeshark.co
|
||||
```
|
||||
|
||||
then install Kubeshark:
|
||||
|
||||
```shell
|
||||
helm install kubeshark kubeshark/kubeshark
|
||||
```
|
||||
|
||||
## Local
|
||||
|
||||
Clone the repo:
|
||||
|
||||
```shell
|
||||
git clone git@github.com:kubeshark/kubeshark.git --depth 1
|
||||
cd kubeshark/helm-chart
|
||||
```
|
||||
|
||||
In case you want to clone a specific tag of the repo (e.g. `v52.3.59`):
|
||||
|
||||
```shell
|
||||
git clone git@github.com:kubeshark/kubeshark.git --depth 1 --branch <tag>
|
||||
cd kubeshark/helm-chart
|
||||
```
|
||||
> See the list of available tags here: https://github.com/kubeshark/kubeshark/tags
|
||||
|
||||
Render the templates
|
||||
|
||||
```shell
|
||||
helm template .
|
||||
```
|
||||
|
||||
Install Kubeshark:
|
||||
|
||||
```shell
|
||||
helm install kubeshark .
|
||||
```
|
||||
|
||||
Uninstall Kubeshark:
|
||||
|
||||
```shell
|
||||
helm uninstall kubeshark
|
||||
```
|
||||
|
||||
## Port-forward
|
||||
|
||||
Do the port forwarding:
|
||||
|
||||
```shell
|
||||
kubectl port-forward service/kubeshark-front 8899:80
|
||||
```
|
||||
|
||||
Visit [localhost:8899](http://localhost:8899)
|
||||
|
||||
You can also use `kubeshark proxy` for a more stable port-forward connection.
|
||||
|
||||
## Add a License Key
|
||||
|
||||
When it's necessary, you can use:
|
||||
|
||||
```shell
|
||||
--set license=YOUR_LICENSE_GOES_HERE
|
||||
```
|
||||
|
||||
Get your license from Kubeshark's [Admin Console](https://console.kubeshark.co/).
|
||||
|
||||
## Installing with Ingress (EKS) enabled
|
||||
|
||||
```shell
|
||||
helm install kubeshark kubeshark/kubeshark -f values.yaml
|
||||
```
|
||||
|
||||
Set this `value.yaml`:
|
||||
```shell
|
||||
tap:
|
||||
ingress:
|
||||
enabled: true
|
||||
className: "alb"
|
||||
host: ks.example.com
|
||||
tls: []
|
||||
annotations:
|
||||
alb.ingress.kubernetes.io/certificate-arn: arn:aws:acm:us-east-1:7..8:certificate/b...65c
|
||||
alb.ingress.kubernetes.io/target-type: ip
|
||||
alb.ingress.kubernetes.io/scheme: internet-facing
|
||||
```
|
||||
|
||||
## Disabling IPV6
|
||||
|
||||
Not all have IPV6 enabled, hence this has to be disabled as follows:
|
||||
|
||||
```shell
|
||||
helm install kubeshark kubeshark/kubeshark \
|
||||
--set tap.ipv6=false
|
||||
```
|
||||
|
||||
## Prometheus Metrics
|
||||
|
||||
Please refer to [metrics](./metrics.md) documentation for details.
|
||||
|
||||
## Override Tag, Tags, Images
|
||||
|
||||
In addition to using a private registry, you can further override the images' tag, specific image tags and specific image names.
|
||||
|
||||
Example for overriding image names:
|
||||
|
||||
```yaml
|
||||
docker:
|
||||
overrideImage:
|
||||
worker: docker.io/kubeshark/worker:v52.3.87
|
||||
front: docker.io/kubeshark/front:v52.3.87
|
||||
hub: docker.io/kubeshark/hub:v52.3.87
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
| Parameter | Description | Default |
|
||||
|-------------------------------------------|-----------------------------------------------|---------------------------------------------------------|
|
||||
| `tap.docker.registry` | Docker registry to pull from | `docker.io/kubeshark` |
|
||||
| `tap.docker.tag` | Tag of the Docker images | `latest` |
|
||||
| `tap.docker.tagLocked` | Lock the Docker image tags to prevent automatic upgrades to the latest branch image version. | `true` |
|
||||
| `tap.docker.tagLocked` | If `false` - use latest minor tag | `true` |
|
||||
| `tap.docker.imagePullPolicy` | Kubernetes image pull policy | `Always` |
|
||||
| `tap.docker.imagePullSecrets` | Kubernetes secrets to pull the images | `[]` |
|
||||
| `tap.docker.overrideImage` | Can be used to directly override image names | `""` |
|
||||
| `tap.docker.overrideTag` | Can be used to override image tags | `""` |
|
||||
| `tap.proxy.hub.srvPort` | Hub server port. Change if already occupied. | `8898` |
|
||||
| `tap.proxy.worker.srvPort` | Worker server port. Change if already occupied.| `30001` |
|
||||
| `tap.proxy.front.port` | Front service port. Change if already occupied.| `8899` |
|
||||
| `tap.proxy.host` | Change to 0.0.0.0 top open up to the world. | `127.0.0.1` |
|
||||
| `tap.regex` | Target (process traffic from) pods that match regex | `.*` |
|
||||
| `tap.namespaces` | Target pods in namespaces | `[]` |
|
||||
| `tap.excludedNamespaces` | Exclude pods in namespaces | `[]` |
|
||||
| `tap.bpfOverride` | When using AF_PACKET as a traffic capture backend, override any existing pod targeting rules and set explicit BPF expression (e.g. `net 0.0.0.0/0`). | `[]` |
|
||||
| `tap.stopped` | Set to `false` to have traffic processing start automatically. When set to `true`, traffic processing is stopped by default, resulting in almost no resource consumption (e.g. Kubeshark is dormant). This property can be dynamically control via the dashboard. | `false` |
|
||||
| `tap.release.repo` | URL of the Helm chart repository | `https://helm.kubeshark.co` |
|
||||
| `tap.release.name` | Helm release name | `kubeshark` |
|
||||
| `tap.release.namespace` | Helm release namespace | `default` |
|
||||
| `tap.persistentStorage` | Use `persistentVolumeClaim` instead of `emptyDir` | `false` |
|
||||
| `tap.persistentStorageStatic` | Use static persistent volume provisioning (explicitly defined `PersistentVolume` ) | `false` |
|
||||
| `tap.efsFileSytemIdAndPath` | [EFS file system ID and, optionally, subpath and/or access point](https://github.com/kubernetes-sigs/aws-efs-csi-driver/blob/master/examples/kubernetes/access_points/README.md) `<FileSystemId>:<Path>:<AccessPointId>` | "" |
|
||||
| `tap.storageLimit` | Limit of either the `emptyDir` or `persistentVolumeClaim` | `500Mi` |
|
||||
| `tap.storageClass` | Storage class of the `PersistentVolumeClaim` | `standard` |
|
||||
| `tap.dryRun` | Preview of all pods matching the regex, without tapping them | `false` |
|
||||
| `tap.resources.hub.limits.cpu` | CPU limit for hub | `""` (no limit) |
|
||||
| `tap.resources.hub.limits.memory` | Memory limit for hub | `5Gi` |
|
||||
| `tap.resources.hub.requests.cpu` | CPU request for hub | `50m` |
|
||||
| `tap.resources.hub.requests.memory` | Memory request for hub | `50Mi` |
|
||||
| `tap.resources.sniffer.limits.cpu` | CPU limit for sniffer | `""` (no limit) |
|
||||
| `tap.resources.sniffer.limits.memory` | Memory limit for sniffer | `3Gi` |
|
||||
| `tap.resources.sniffer.requests.cpu` | CPU request for sniffer | `50m` |
|
||||
| `tap.resources.sniffer.requests.memory` | Memory request for sniffer | `50Mi` |
|
||||
| `tap.resources.tracer.limits.cpu` | CPU limit for tracer | `""` (no limit) |
|
||||
| `tap.resources.tracer.limits.memory` | Memory limit for tracer | `3Gi` |
|
||||
| `tap.resources.tracer.requests.cpu` | CPU request for tracer | `50m` |
|
||||
| `tap.resources.tracer.requests.memory` | Memory request for tracer | `50Mi` |
|
||||
| `tap.serviceMesh` | Capture traffic from service meshes like Istio, Linkerd, Consul, etc. | `true` |
|
||||
| `tap.tls` | Capture the encrypted/TLS traffic from cryptography libraries like OpenSSL | `true` |
|
||||
| `tap.disableTlsLog` | Suppress logging for TLS/eBPF | `true` |
|
||||
| `tap.ignoreTainted` | Whether to ignore tainted nodes | `false` |
|
||||
| `tap.labels` | Kubernetes labels to apply to all Kubeshark resources | `{}` |
|
||||
| `tap.annotations` | Kubernetes annotations to apply to all Kubeshark resources | `{}` |
|
||||
| `tap.nodeSelectorTerms` | Node selector terms | `[{"matchExpressions":[{"key":"kubernetes.io/os","operator":"In","values":["linux"]}]}]` |
|
||||
| `tap.auth.enabled` | Enable authentication | `false` |
|
||||
| `tap.auth.type` | Authentication type (1 option available: `saml`) | `saml` |
|
||||
| `tap.auth.approvedEmails` | List of approved email addresses for authentication | `[]` |
|
||||
| `tap.auth.approvedDomains` | List of approved email domains for authentication | `[]` |
|
||||
| `tap.auth.saml.idpMetadataUrl` | SAML IDP metadata URL <br/>(effective, if `tap.auth.type = saml`) | `` |
|
||||
| `tap.auth.saml.x509crt` | A self-signed X.509 `.cert` contents <br/>(effective, if `tap.auth.type = saml`) | `` |
|
||||
| `tap.auth.saml.x509key` | A self-signed X.509 `.key` contents <br/>(effective, if `tap.auth.type = saml`) | `` |
|
||||
| `tap.auth.saml.roleAttribute` | A SAML attribute name corresponding to user's authorization role <br/>(effective, if `tap.auth.type = saml`) | `role` |
|
||||
| `tap.auth.saml.roles` | A list of SAML authorization roles and their permissions <br/>(effective, if `tap.auth.type = saml`) | `{"admin":{"canDownloadPCAP":true,"canUpdateTargetedPods":true,"canUseScripting":true, "canStopTrafficCapturing":true, "filter":"","showAdminConsoleLink":true}}` |
|
||||
| `tap.ingress.enabled` | Enable `Ingress` | `false` |
|
||||
| `tap.ingress.className` | Ingress class name | `""` |
|
||||
| `tap.ingress.host` | Host of the `Ingress` | `ks.svc.cluster.local` |
|
||||
| `tap.ingress.tls` | `Ingress` TLS configuration | `[]` |
|
||||
| `tap.ingress.annotations` | `Ingress` annotations | `{}` |
|
||||
| `tap.ipv6` | Enable IPv6 support for the front-end | `true` |
|
||||
| `tap.debug` | Enable debug mode | `false` |
|
||||
| `tap.telemetry.enabled` | Enable anonymous usage statistics collection | `true` |
|
||||
| `tap.resourceGuard.enabled` | Enable resource guard worker process, which watches RAM/disk usage and enables/disables traffic capture based on available resources | `false` |
|
||||
| `tap.sentry.enabled` | Enable sending of error logs to Sentry | `false` |
|
||||
| `tap.sentry.environment` | Sentry environment to label error logs with | `production` |
|
||||
| `tap.defaultFilter` | Sets the default dashboard KFL filter (e.g. `http`). By default, this value is set to filter out noisy protocols such as DNS, UDP, ICMP and TCP. The user can easily change this in the Dashboard. You can also change this value to change this behavior. | `"!dns and !tcp and !udp and !icmp"` |
|
||||
| `tap.globalFilter` | Prepends to any KFL filter and can be used to limit what is visible in the dashboard. For example, `redact("request.headers.Authorization")` will redact the appropriate field. Another example `!dns` will not show any DNS traffic. | `""` |
|
||||
| `tap.metrics.port` | Pod port used to expose Prometheus metrics | `49100` |
|
||||
| `tap.enabledDissectors` | This is an array of strings representing the list of supported protocols. Remove or comment out redundant protocols (e.g., dns).| The default list excludes: `dns` and `tcp` |
|
||||
| `logs.file` | Logs dump path | `""` |
|
||||
| `pcapdump.enabled` | Enable recording of all traffic captured according to other parameters. Whatever Kubeshark captures, considering pod targeting rules, will be stored in pcap files ready to be viewed by tools | `true` |
|
||||
| `pcapdump.maxTime` | The time window into the past that will be stored. Older traffic will be discarded. | `2h` |
|
||||
| `pcapdump.maxSize` | The maximum storage size the PCAP files will consume. Old files that cause to surpass storage consumption will get discarded. | `500MB` |
|
||||
| `kube.configPath` | Path to the `kubeconfig` file (`$HOME/.kube/config`) | `""` |
|
||||
| `kube.context` | Kubernetes context to use for the deployment | `""` |
|
||||
| `dumpLogs` | Enable dumping of logs | `false` |
|
||||
| `headless` | Enable running in headless mode | `false` |
|
||||
| `license` | License key for the Pro/Enterprise edition | `""` |
|
||||
| `scripting.env` | Environment variables for the scripting | `{}` |
|
||||
| `scripting.source` | Source directory of the scripts | `""` |
|
||||
| `scripting.watchScripts` | Enable watch mode for the scripts in source directory | `true` |
|
||||
| `timezone` | IANA time zone applied to time shown in the front-end | `""` (local time zone applies) |
|
||||
| `supportChatEnabled` | Enable real-time support chat channel based on Intercom | `true` |
|
||||
| `internetConnectivity` | Turns off API requests that are dependant on Internet connectivity such as `telemetry` and `online-support`. | `true` |
|
||||
| `dissectorsUpdatingEnabled` | Turns off UI for enabling/disabling dissectors | `true` |
|
||||
|
||||
KernelMapping pairs kernel versions with a
|
||||
DriverContainer image. Kernel versions can be matched
|
||||
literally or using a regular expression
|
||||
|
||||
## Installing with SAML enabled
|
||||
|
||||
### Prerequisites:
|
||||
|
||||
##### 1. Generate X.509 certificate & key (TL;DR: https://ubuntu.com/server/docs/security-certificates)
|
||||
|
||||
**Example:**
|
||||
```
|
||||
openssl genrsa -out mykey.key 2048
|
||||
openssl req -new -key mykey.key -out mycsr.csr
|
||||
openssl x509 -signkey mykey.key -in mycsr.csr -req -days 365 -out mycert.crt
|
||||
```
|
||||
|
||||
**What you get:**
|
||||
- `mycert.crt` - use it for `tap.auth.saml.x509crt`
|
||||
- `mykey.key` - use it for `tap.auth.saml.x509crt`
|
||||
|
||||
##### 2. Prepare your SAML IDP
|
||||
|
||||
You should set up the required SAML IDP (Google, Auth0, your custom IDP, etc.)
|
||||
|
||||
During setup, an IDP provider will typically request to enter:
|
||||
- Metadata URL
|
||||
- ACS URL (Assertion Consumer Service URL, aka Callback URL)
|
||||
- SLO URL (Single Logout URL)
|
||||
|
||||
Correspondingly, you will enter these (if you run the most default Kubeshark setup):
|
||||
- [http://localhost:8899/saml/metadata](http://localhost:8899/saml/metadata)
|
||||
- [http://localhost:8899/saml/acs](http://localhost:8899/saml/acs)
|
||||
- [http://localhost:8899/saml/slo](http://localhost:8899/saml/slo)
|
||||
|
||||
Otherwise, if you have `tap.ingress.enabled == true`, change protocol & domain respectively - showing example domain:
|
||||
- [https://kubeshark.example.com/saml/metadata](https://kubeshark.example.com/saml/metadata)
|
||||
- [https://kubeshark.example.com/saml/acs](https://kubeshark.example.com/saml/acs)
|
||||
- [https://kubeshark.example.com/saml/slo](https://kubeshark.example.com/saml/slo)
|
||||
|
||||
```shell
|
||||
helm install kubeshark kubeshark/kubeshark -f values.yaml
|
||||
```
|
||||
|
||||
Set this `value.yaml`:
|
||||
```shell
|
||||
tap:
|
||||
auth:
|
||||
enabled: true
|
||||
type: saml
|
||||
saml:
|
||||
idpMetadataUrl: "https://ti..th0.com/samlp/metadata/MpWiDCM..qdnDG"
|
||||
x509crt: |
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDlTCCAn0CFFRUzMh+dZvp+FvWd4gRaiBVN8EvMA0GCSqGSIb3DQEBCwUAMIGG
|
||||
MSQwIgYJKoZIhvcNAQkBFhV3ZWJtYXN0ZXJAZXhhbXBsZS5jb20wHhcNMjMxMjI4
|
||||
........<redacted: please, generate your own X.509 cert>........
|
||||
ZMzM7YscqZwoVhTOhrD4/5nIfOD/hTWG/MBe2Um1V1IYF8aVEllotTKTgsF6ZblA
|
||||
miCOgl6lIlZy
|
||||
-----END CERTIFICATE-----
|
||||
x509key: |
|
||||
-----BEGIN PRIVATE KEY-----
|
||||
MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDlgDFKsRHj+mok
|
||||
euOF0IpwToOEpQGtafB75ytv3psD/tQAzEIug+rkDriVvsfcvafj0qcaTeYvnCoz
|
||||
........<redacted: please, generate your own X.509 key>.........
|
||||
sUpBCu0E3nRJM/QB2ui5KhNR7uvPSL+kSsaEq19/mXqsL+mRi9aqy2wMEvUSU/kt
|
||||
UaV5sbRtTzYLxpOSQyi8CEFA+A==
|
||||
-----END PRIVATE KEY-----
|
||||
```
|
||||
55
helm-chart/metrics.md
Normal file
55
helm-chart/metrics.md
Normal file
@@ -0,0 +1,55 @@
|
||||
# Metrics
|
||||
|
||||
Kubeshark provides metrics from `worker` components.
|
||||
It can be useful for monitoring and debugging purpose.
|
||||
|
||||
## Configuration
|
||||
|
||||
By default, Kubeshark uses port `49100` to expose metrics via service `kubeshark-worker-metrics`.
|
||||
|
||||
In case you use [kube-prometheus-stack] (https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack) community Helm chart, additional scrape configuration for Kubeshark worker metrics endpoint can be configured with values:
|
||||
|
||||
```
|
||||
prometheus:
|
||||
enabled: true
|
||||
prometheusSpec:
|
||||
additionalScrapeConfigs: |
|
||||
- job_name: 'kubeshark-worker-metrics'
|
||||
kubernetes_sd_configs:
|
||||
- role: endpoints
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_pod_name]
|
||||
target_label: pod
|
||||
- source_labels: [__meta_kubernetes_pod_node_name]
|
||||
target_label: node
|
||||
- source_labels: [__meta_kubernetes_endpoint_port_name]
|
||||
action: keep
|
||||
regex: ^metrics$
|
||||
- source_labels: [__address__, __meta_kubernetes_endpoint_port_number]
|
||||
action: replace
|
||||
regex: ([^:]+)(?::\d+)?
|
||||
replacement: $1:49100
|
||||
target_label: __address__
|
||||
- action: labelmap
|
||||
regex: __meta_kubernetes_service_label_(.+)
|
||||
```
|
||||
|
||||
|
||||
## Available metrics
|
||||
|
||||
| Name | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| kubeshark_received_packets_total | Counter | Total number of packets received |
|
||||
| kubeshark_dropped_packets_total | Counter | Total number of packets dropped |
|
||||
| kubeshark_processed_bytes_total | Counter | Total number of bytes processed |
|
||||
| kubeshark_tcp_packets_total | Counter | Total number of TCP packets |
|
||||
| kubeshark_dns_packets_total | Counter | Total number of DNS packets |
|
||||
| kubeshark_icmp_packets_total | Counter | Total number of ICMP packets |
|
||||
| kubeshark_reassembled_tcp_payloads_total | Counter | Total number of reassembled TCP payloads |
|
||||
| kubeshark_matched_pairs_total | Counter | Total number of matched pairs |
|
||||
| kubeshark_dropped_tcp_streams_total | Counter | Total number of dropped TCP streams |
|
||||
| kubeshark_live_tcp_streams | Gauge | Number of live TCP streams |
|
||||
|
||||
## Ready-to-use Dashboard
|
||||
|
||||
You can import a ready-to-use dashboard from [Grafana's Dashboards Portal](https://grafana.com/grafana/dashboards/21332-kubeshark-dashboard-v3-4/).
|
||||
12
helm-chart/templates/01-service-account.yaml
Normal file
12
helm-chart/templates/01-service-account.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
{{- if .Values.tap.annotations }}
|
||||
{{- toYaml .Values.tap.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
name: {{ include "kubeshark.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
74
helm-chart/templates/02-cluster-role.yaml
Normal file
74
helm-chart/templates/02-cluster-role.yaml
Normal file
@@ -0,0 +1,74 @@
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
{{- if .Values.tap.annotations }}
|
||||
{{- toYaml .Values.tap.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
name: kubeshark-cluster-role-{{ .Release.Namespace }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
- extensions
|
||||
- apps
|
||||
resources:
|
||||
- nodes
|
||||
- pods
|
||||
- services
|
||||
- endpoints
|
||||
- persistentvolumeclaims
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- namespaces
|
||||
verbs:
|
||||
- get
|
||||
resourceNames:
|
||||
- kube-system
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- networkpolicies
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- delete
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
{{- if .Values.tap.annotations }}
|
||||
{{- toYaml .Values.tap.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
name: kubeshark-self-config-role
|
||||
namespace: {{ .Release.Namespace }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
- v1
|
||||
resourceNames:
|
||||
- kubeshark-secret
|
||||
- kubeshark-config-map
|
||||
resources:
|
||||
- secrets
|
||||
- configmaps
|
||||
verbs:
|
||||
- get
|
||||
- watch
|
||||
- list
|
||||
- update
|
||||
- patch
|
||||
40
helm-chart/templates/03-cluster-role-binding.yaml
Normal file
40
helm-chart/templates/03-cluster-role-binding.yaml
Normal file
@@ -0,0 +1,40 @@
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
{{- if .Values.tap.annotations }}
|
||||
{{- toYaml .Values.tap.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
name: kubeshark-cluster-role-binding-{{ .Release.Namespace }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: kubeshark-cluster-role-{{ .Release.Namespace }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "kubeshark.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
{{- if .Values.tap.annotations }}
|
||||
{{- toYaml .Values.tap.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
name: kubeshark-self-config-role-binding
|
||||
namespace: {{ .Release.Namespace }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: kubeshark-self-config-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "kubeshark.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
114
helm-chart/templates/04-hub-deployment.yaml
Normal file
114
helm-chart/templates/04-hub-deployment.yaml
Normal file
@@ -0,0 +1,114 @@
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app.kubeshark.co/app: hub
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
{{- if .Values.tap.annotations }}
|
||||
{{- toYaml .Values.tap.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
name: {{ include "kubeshark.name" . }}-hub
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
replicas: 1 # Set the desired number of replicas
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubeshark.co/app: hub
|
||||
{{- include "kubeshark.selectorLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubeshark.co/app: hub
|
||||
{{- include "kubeshark.labels" . | nindent 8 }}
|
||||
spec:
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
serviceAccountName: {{ include "kubeshark.serviceAccountName" . }}
|
||||
containers:
|
||||
- name: hub
|
||||
command:
|
||||
- ./hub
|
||||
- -port
|
||||
- "8080"
|
||||
{{- if .Values.tap.debug }}
|
||||
- -debug
|
||||
{{- end }}
|
||||
env:
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: SENTRY_ENABLED
|
||||
value: '{{ (include "sentry.enabled" .) }}'
|
||||
- name: SENTRY_ENVIRONMENT
|
||||
value: '{{ .Values.tap.sentry.environment }}'
|
||||
- name: KUBESHARK_CLOUD_API_URL
|
||||
value: 'https://api.kubeshark.co'
|
||||
- name: PROFILING_ENABLED
|
||||
value: '{{ .Values.tap.pprof.enabled }}'
|
||||
{{- if .Values.tap.docker.overrideImage.hub }}
|
||||
image: '{{ .Values.tap.docker.overrideImage.hub }}'
|
||||
{{- else if .Values.tap.docker.overrideTag.hub }}
|
||||
image: '{{ .Values.tap.docker.registry }}/hub:{{ .Values.tap.docker.overrideTag.hub }}'
|
||||
{{ else }}
|
||||
image: '{{ .Values.tap.docker.registry }}/hub:{{ not (eq .Values.tap.docker.tag "") | ternary .Values.tap.docker.tag (include "kubeshark.defaultVersion" .) }}'
|
||||
{{- end }}
|
||||
imagePullPolicy: {{ .Values.tap.docker.imagePullPolicy }}
|
||||
{{- if .Values.tap.docker.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- range .Values.tap.docker.imagePullSecrets }}
|
||||
- name: {{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
readinessProbe:
|
||||
periodSeconds: 1
|
||||
failureThreshold: 3
|
||||
successThreshold: 1
|
||||
initialDelaySeconds: 3
|
||||
tcpSocket:
|
||||
port: 8080
|
||||
livenessProbe:
|
||||
periodSeconds: 1
|
||||
failureThreshold: 3
|
||||
successThreshold: 1
|
||||
initialDelaySeconds: 3
|
||||
tcpSocket:
|
||||
port: 8080
|
||||
resources:
|
||||
limits:
|
||||
{{ if ne (toString .Values.tap.resources.hub.limits.cpu) "0" }}
|
||||
cpu: {{ .Values.tap.resources.hub.limits.cpu }}
|
||||
{{ end }}
|
||||
{{ if ne (toString .Values.tap.resources.hub.limits.memory) "0" }}
|
||||
memory: {{ .Values.tap.resources.hub.limits.memory }}
|
||||
{{ end }}
|
||||
requests:
|
||||
{{ if ne (toString .Values.tap.resources.hub.requests.cpu) "0" }}
|
||||
cpu: {{ .Values.tap.resources.hub.requests.cpu }}
|
||||
{{ end }}
|
||||
{{ if ne (toString .Values.tap.resources.hub.requests.memor) "0" }}
|
||||
memory: {{ .Values.tap.resources.hub.requests.memory }}
|
||||
{{ end }}
|
||||
volumeMounts:
|
||||
- name: saml-x509-volume
|
||||
mountPath: "/etc/saml/x509"
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: saml-x509-volume
|
||||
projected:
|
||||
sources:
|
||||
- secret:
|
||||
name: kubeshark-saml-x509-crt-secret
|
||||
items:
|
||||
- key: AUTH_SAML_X509_CRT
|
||||
path: kubeshark.crt
|
||||
- secret:
|
||||
name: kubeshark-saml-x509-key-secret
|
||||
items:
|
||||
- key: AUTH_SAML_X509_KEY
|
||||
path: kubeshark.key
|
||||
21
helm-chart/templates/05-hub-service.yaml
Normal file
21
helm-chart/templates/05-hub-service.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app.kubeshark.co/app: hub
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
{{- if .Values.tap.annotations }}
|
||||
{{- toYaml .Values.tap.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
name: kubeshark-hub
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
ports:
|
||||
- name: kubeshark-hub
|
||||
port: 80
|
||||
targetPort: 8080
|
||||
selector:
|
||||
app.kubeshark.co/app: hub
|
||||
type: ClusterIP
|
||||
116
helm-chart/templates/06-front-deployment.yaml
Normal file
116
helm-chart/templates/06-front-deployment.yaml
Normal file
@@ -0,0 +1,116 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app.kubeshark.co/app: front
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
{{- if .Values.tap.annotations }}
|
||||
{{- toYaml .Values.tap.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
name: {{ include "kubeshark.name" . }}-front
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
replicas: 1 # Set the desired number of replicas
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubeshark.co/app: front
|
||||
{{- include "kubeshark.selectorLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubeshark.co/app: front
|
||||
{{- include "kubeshark.labels" . | nindent 8 }}
|
||||
spec:
|
||||
containers:
|
||||
- env:
|
||||
- name: REACT_APP_AUTH_ENABLED
|
||||
value: '{{- if or (and .Values.cloudLicenseEnabled (not (empty .Values.license))) (not .Values.internetConnectivity) -}}
|
||||
"false"
|
||||
{{- else -}}
|
||||
{{ .Values.cloudLicenseEnabled | ternary "true" .Values.tap.auth.enabled }}
|
||||
{{- end }}'
|
||||
- name: REACT_APP_AUTH_TYPE
|
||||
value: '{{ not (eq .Values.tap.auth.type "") | ternary (.Values.cloudLicenseEnabled | ternary "oidc" .Values.tap.auth.type) " " }}'
|
||||
- name: REACT_APP_AUTH_SAML_IDP_METADATA_URL
|
||||
value: '{{ not (eq .Values.tap.auth.saml.idpMetadataUrl "") | ternary .Values.tap.auth.saml.idpMetadataUrl " " }}'
|
||||
- name: REACT_APP_TIMEZONE
|
||||
value: '{{ not (eq .Values.timezone "") | ternary .Values.timezone " " }}'
|
||||
- name: REACT_APP_SCRIPTING_DISABLED
|
||||
value: '{{ .Values.tap.scriptingDisabled }}'
|
||||
- name: REACT_APP_TARGETED_PODS_UPDATE_DISABLED
|
||||
value: '{{ .Values.tap.targetedPodsUpdateDisabled }}'
|
||||
- name: REACT_APP_PRESET_FILTERS_CHANGING_ENABLED
|
||||
value: '{{ .Values.tap.presetFiltersChangingEnabled }}'
|
||||
- name: REACT_APP_BPF_OVERRIDE_DISABLED
|
||||
value: '{{ eq .Values.tap.packetCapture "ebpf" | ternary "true" "false" }}'
|
||||
- name: REACT_APP_RECORDING_DISABLED
|
||||
value: '{{ .Values.tap.recordingDisabled }}'
|
||||
- name: REACT_APP_STOP_TRAFFIC_CAPTURING_DISABLED
|
||||
value: '{{- if and .Values.tap.stopTrafficCapturingDisabled .Values.tap.stopped -}}
|
||||
false
|
||||
{{- else -}}
|
||||
{{ .Values.tap.stopTrafficCapturingDisabled | ternary "true" "false" }}
|
||||
{{- end -}}'
|
||||
- name: 'REACT_APP_CLOUD_LICENSE_ENABLED'
|
||||
value: '{{- if or (and .Values.cloudLicenseEnabled (not (empty .Values.license))) (not .Values.internetConnectivity) -}}
|
||||
"false"
|
||||
{{- else -}}
|
||||
{{ .Values.cloudLicenseEnabled }}
|
||||
{{- end }}'
|
||||
- name: REACT_APP_SUPPORT_CHAT_ENABLED
|
||||
value: '{{ and .Values.supportChatEnabled .Values.internetConnectivity | ternary "true" "false" }}'
|
||||
- name: REACT_APP_DISSECTORS_UPDATING_ENABLED
|
||||
value: '{{ .Values.dissectorsUpdatingEnabled | ternary "true" "false" }}'
|
||||
- name: REACT_APP_SENTRY_ENABLED
|
||||
value: '{{ (include "sentry.enabled" .) }}'
|
||||
- name: REACT_APP_SENTRY_ENVIRONMENT
|
||||
value: '{{ .Values.tap.sentry.environment }}'
|
||||
{{- if .Values.tap.docker.overrideImage.front }}
|
||||
image: '{{ .Values.tap.docker.overrideImage.front }}'
|
||||
{{- else if .Values.tap.docker.overrideTag.front }}
|
||||
image: '{{ .Values.tap.docker.registry }}/front:{{ .Values.tap.docker.overrideTag.front }}'
|
||||
{{ else }}
|
||||
image: '{{ .Values.tap.docker.registry }}/front:{{ not (eq .Values.tap.docker.tag "") | ternary .Values.tap.docker.tag (include "kubeshark.defaultVersion" .) }}'
|
||||
{{- end }}
|
||||
imagePullPolicy: {{ .Values.tap.docker.imagePullPolicy }}
|
||||
{{- if .Values.tap.docker.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- range .Values.tap.docker.imagePullSecrets }}
|
||||
- name: {{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
name: kubeshark-front
|
||||
livenessProbe:
|
||||
periodSeconds: 1
|
||||
failureThreshold: 3
|
||||
successThreshold: 1
|
||||
initialDelaySeconds: 3
|
||||
tcpSocket:
|
||||
port: 8080
|
||||
readinessProbe:
|
||||
periodSeconds: 1
|
||||
failureThreshold: 3
|
||||
successThreshold: 1
|
||||
initialDelaySeconds: 3
|
||||
tcpSocket:
|
||||
port: 8080
|
||||
timeoutSeconds: 1
|
||||
resources:
|
||||
limits:
|
||||
cpu: 750m
|
||||
memory: 1Gi
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: 50Mi
|
||||
volumeMounts:
|
||||
- name: nginx-config
|
||||
mountPath: /etc/nginx/conf.d/default.conf
|
||||
subPath: default.conf
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: nginx-config
|
||||
configMap:
|
||||
name: kubeshark-nginx-config-map
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
serviceAccountName: {{ include "kubeshark.serviceAccountName" . }}
|
||||
20
helm-chart/templates/07-front-service.yaml
Normal file
20
helm-chart/templates/07-front-service.yaml
Normal file
@@ -0,0 +1,20 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
{{- if .Values.tap.annotations }}
|
||||
{{- toYaml .Values.tap.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
name: kubeshark-front
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
ports:
|
||||
- name: kubeshark-front
|
||||
port: 80
|
||||
targetPort: 8080
|
||||
selector:
|
||||
app.kubeshark.co/app: front
|
||||
type: ClusterIP
|
||||
43
helm-chart/templates/08-persistent-volume-claim.yaml
Normal file
43
helm-chart/templates/08-persistent-volume-claim.yaml
Normal file
@@ -0,0 +1,43 @@
|
||||
---
|
||||
{{- if .Values.tap.persistentStorageStatic }}
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: kubeshark-persistent-volume
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
capacity:
|
||||
storage: {{ .Values.tap.storageLimit }}
|
||||
volumeMode: Filesystem
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
persistentVolumeReclaimPolicy: Retain
|
||||
storageClassName: {{ .Values.tap.storageClass }}
|
||||
{{- if .Values.tap.efsFileSytemIdAndPath }}
|
||||
csi:
|
||||
driver: efs.csi.aws.com
|
||||
volumeHandle: {{ .Values.tap.efsFileSytemIdAndPath }}
|
||||
{{ end }}
|
||||
---
|
||||
{{ end }}
|
||||
{{- if .Values.tap.persistentStorage }}
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
{{- if .Values.tap.annotations }}
|
||||
{{- toYaml .Values.tap.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
name: kubeshark-persistent-volume-claim
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ .Values.tap.storageLimit }}
|
||||
storageClassName: {{ .Values.tap.storageClass }}
|
||||
status: {}
|
||||
{{- end }}
|
||||
281
helm-chart/templates/09-worker-daemon-set.yaml
Normal file
281
helm-chart/templates/09-worker-daemon-set.yaml
Normal file
@@ -0,0 +1,281 @@
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
labels:
|
||||
app.kubeshark.co/app: worker
|
||||
sidecar.istio.io/inject: "false"
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
{{- if .Values.tap.annotations }}
|
||||
{{- toYaml .Values.tap.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
name: kubeshark-worker-daemon-set
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubeshark.co/app: worker
|
||||
{{- include "kubeshark.selectorLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubeshark.co/app: worker
|
||||
{{- include "kubeshark.labels" . | nindent 8 }}
|
||||
name: kubeshark-worker-daemon-set
|
||||
namespace: kubeshark
|
||||
spec:
|
||||
containers:
|
||||
- command:
|
||||
- ./worker
|
||||
- -i
|
||||
- any
|
||||
- -port
|
||||
- '{{ .Values.tap.proxy.worker.srvPort }}'
|
||||
- -metrics-port
|
||||
- '{{ .Values.tap.metrics.port }}'
|
||||
- -packet-capture
|
||||
- '{{ .Values.tap.packetCapture }}'
|
||||
{{- if .Values.tap.tls }}
|
||||
- -unixsocket
|
||||
{{- end }}
|
||||
{{- if .Values.tap.serviceMesh }}
|
||||
- -servicemesh
|
||||
{{- end }}
|
||||
- -procfs
|
||||
- /hostproc
|
||||
{{- if ne .Values.tap.packetCapture "ebpf" }}
|
||||
- -disable-ebpf
|
||||
{{- end }}
|
||||
{{- if .Values.tap.resourceGuard.enabled }}
|
||||
- -enable-resource-guard
|
||||
{{- end }}
|
||||
- -resolution-strategy
|
||||
- '{{ .Values.tap.misc.resolutionStrategy }}'
|
||||
- -staletimeout
|
||||
- '{{ .Values.tap.misc.staleTimeoutSeconds }}'
|
||||
{{- if .Values.tap.debug }}
|
||||
- -debug
|
||||
{{- end }}
|
||||
{{- if .Values.tap.docker.overrideImage.worker }}
|
||||
image: '{{ .Values.tap.docker.overrideImage.worker }}'
|
||||
{{- else if .Values.tap.docker.overrideTag.worker }}
|
||||
image: '{{ .Values.tap.docker.registry }}/worker:{{ .Values.tap.docker.overrideTag.worker }}{{ include "kubeshark.dockerTagDebugVersion" . }}'
|
||||
{{ else }}
|
||||
image: '{{ .Values.tap.docker.registry }}/worker:{{ not (eq .Values.tap.docker.tag "") | ternary .Values.tap.docker.tag (include "kubeshark.defaultVersion" .) }}{{ include "kubeshark.dockerTagDebugVersion" . }}'
|
||||
{{- end }}
|
||||
imagePullPolicy: {{ .Values.tap.docker.imagePullPolicy }}
|
||||
{{- if .Values.tap.docker.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- range .Values.tap.docker.imagePullSecrets }}
|
||||
- name: {{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
name: sniffer
|
||||
ports:
|
||||
- containerPort: {{ .Values.tap.metrics.port }}
|
||||
protocol: TCP
|
||||
name: metrics
|
||||
env:
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: TCP_STREAM_CHANNEL_TIMEOUT_MS
|
||||
value: '{{ .Values.tap.misc.tcpStreamChannelTimeoutMs }}'
|
||||
- name: TCP_STREAM_CHANNEL_TIMEOUT_SHOW
|
||||
value: '{{ .Values.tap.misc.tcpStreamChannelTimeoutShow }}'
|
||||
- name: KUBESHARK_CLOUD_API_URL
|
||||
value: 'https://api.kubeshark.co'
|
||||
- name: PROFILING_ENABLED
|
||||
value: '{{ .Values.tap.pprof.enabled }}'
|
||||
- name: SENTRY_ENABLED
|
||||
value: '{{ (include "sentry.enabled" .) }}'
|
||||
- name: SENTRY_ENVIRONMENT
|
||||
value: '{{ .Values.tap.sentry.environment }}'
|
||||
resources:
|
||||
limits:
|
||||
{{ if ne (toString .Values.tap.resources.sniffer.limits.cpu) "0" }}
|
||||
cpu: {{ .Values.tap.resources.sniffer.limits.cpu }}
|
||||
{{ end }}
|
||||
{{ if ne (toString .Values.tap.resources.sniffer.limits.memory) "0" }}
|
||||
memory: {{ .Values.tap.resources.sniffer.limits.memory }}
|
||||
{{ end }}
|
||||
requests:
|
||||
{{ if ne (toString .Values.tap.resources.sniffer.requests.cpu) "0" }}
|
||||
cpu: {{ .Values.tap.resources.sniffer.requests.cpu }}
|
||||
{{ end }}
|
||||
{{ if ne (toString .Values.tap.resources.sniffer.requests.memory) "0" }}
|
||||
memory: {{ .Values.tap.resources.sniffer.requests.memory }}
|
||||
{{ end }}
|
||||
securityContext:
|
||||
capabilities:
|
||||
add:
|
||||
{{- range .Values.tap.capabilities.networkCapture }}
|
||||
{{ print "- " . }}
|
||||
{{- end }}
|
||||
{{- if .Values.tap.serviceMesh }}
|
||||
{{- range .Values.tap.capabilities.serviceMeshCapture }}
|
||||
{{ print "- " . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
drop:
|
||||
- ALL
|
||||
readinessProbe:
|
||||
periodSeconds: 1
|
||||
failureThreshold: 3
|
||||
successThreshold: 1
|
||||
initialDelaySeconds: 5
|
||||
tcpSocket:
|
||||
port: {{ .Values.tap.proxy.worker.srvPort }}
|
||||
livenessProbe:
|
||||
periodSeconds: 1
|
||||
failureThreshold: 3
|
||||
successThreshold: 1
|
||||
initialDelaySeconds: 5
|
||||
tcpSocket:
|
||||
port: {{ .Values.tap.proxy.worker.srvPort }}
|
||||
volumeMounts:
|
||||
- mountPath: /hostproc
|
||||
name: proc
|
||||
readOnly: true
|
||||
- mountPath: /sys
|
||||
name: sys
|
||||
readOnly: true
|
||||
- mountPath: /app/data
|
||||
name: data
|
||||
{{- if .Values.tap.tls }}
|
||||
- command:
|
||||
- ./tracer
|
||||
- -procfs
|
||||
- /hostproc
|
||||
{{- if ne .Values.tap.packetCapture "ebpf" }}
|
||||
- -disable-ebpf
|
||||
{{- end }}
|
||||
{{- if .Values.tap.debug }}
|
||||
- -debug
|
||||
{{- end }}
|
||||
{{- if .Values.tap.disableTlsLog }}
|
||||
- -disable-tls-log
|
||||
{{- end }}
|
||||
{{- if .Values.tap.pprof.enabled }}
|
||||
- -port
|
||||
- '{{ add .Values.tap.proxy.worker.srvPort 1 }}'
|
||||
{{- end }}
|
||||
{{- if .Values.tap.docker.overrideTag.worker }}
|
||||
image: '{{ .Values.tap.docker.registry }}/worker:{{ .Values.tap.docker.overrideTag.worker }}{{ include "kubeshark.dockerTagDebugVersion" . }}'
|
||||
{{ else }}
|
||||
image: '{{ .Values.tap.docker.registry }}/worker:{{ not (eq .Values.tap.docker.tag "") | ternary .Values.tap.docker.tag (include "kubeshark.defaultVersion" .) }}{{ include "kubeshark.dockerTagDebugVersion" . }}'
|
||||
{{- end }}
|
||||
imagePullPolicy: {{ .Values.tap.docker.imagePullPolicy }}
|
||||
{{- if .Values.tap.docker.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- range .Values.tap.docker.imagePullSecrets }}
|
||||
- name: {{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
name: tracer
|
||||
env:
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: PROFILING_ENABLED
|
||||
value: '{{ .Values.tap.pprof.enabled }}'
|
||||
- name: SENTRY_ENABLED
|
||||
value: '{{ (include "sentry.enabled" .) }}'
|
||||
- name: SENTRY_ENVIRONMENT
|
||||
value: '{{ .Values.tap.sentry.environment }}'
|
||||
resources:
|
||||
limits:
|
||||
{{ if ne (toString .Values.tap.resources.tracer.limits.cpu) "0" }}
|
||||
cpu: {{ .Values.tap.resources.tracer.limits.cpu }}
|
||||
{{ end }}
|
||||
{{ if ne (toString .Values.tap.resources.tracer.limits.memory) "0" }}
|
||||
memory: {{ .Values.tap.resources.tracer.limits.memory }}
|
||||
{{ end }}
|
||||
requests:
|
||||
{{ if ne (toString .Values.tap.resources.tracer.requests.cpu) "0" }}
|
||||
cpu: {{ .Values.tap.resources.tracer.requests.cpu }}
|
||||
{{ end }}
|
||||
{{ if ne (toString .Values.tap.resources.tracer.requests.memory) "0" }}
|
||||
memory: {{ .Values.tap.resources.tracer.requests.memory }}
|
||||
{{ end }}
|
||||
securityContext:
|
||||
capabilities:
|
||||
add:
|
||||
{{- range .Values.tap.capabilities.ebpfCapture }}
|
||||
{{ print "- " . }}
|
||||
{{- end }}
|
||||
{{- range .Values.tap.capabilities.networkCapture }}
|
||||
{{ print "- " . }}
|
||||
{{- end }}
|
||||
drop:
|
||||
- ALL
|
||||
volumeMounts:
|
||||
- mountPath: /hostproc
|
||||
name: proc
|
||||
readOnly: true
|
||||
- mountPath: /sys
|
||||
name: sys
|
||||
readOnly: true
|
||||
- mountPath: /app/data
|
||||
name: data
|
||||
- mountPath: /etc/os-release
|
||||
name: os-release
|
||||
readOnly: true
|
||||
- mountPath: /hostroot
|
||||
mountPropagation: HostToContainer
|
||||
name: root
|
||||
readOnly: true
|
||||
{{- end }}
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
hostNetwork: true
|
||||
serviceAccountName: {{ include "kubeshark.serviceAccountName" . }}
|
||||
terminationGracePeriodSeconds: 0
|
||||
tolerations:
|
||||
- effect: NoExecute
|
||||
operator: Exists
|
||||
{{- if not .Values.tap.ignoreTainted }}
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
{{- end }}
|
||||
{{- if gt (len .Values.tap.nodeSelectorTerms) 0}}
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
{{- toYaml .Values.tap.nodeSelectorTerms | nindent 12 }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- hostPath:
|
||||
path: /proc
|
||||
name: proc
|
||||
- hostPath:
|
||||
path: /sys
|
||||
name: sys
|
||||
- name: lib-modules
|
||||
hostPath:
|
||||
path: /lib/modules
|
||||
- hostPath:
|
||||
path: /etc/os-release
|
||||
name: os-release
|
||||
- hostPath:
|
||||
path: /
|
||||
name: root
|
||||
- name: data
|
||||
{{- if .Values.tap.persistentStorage }}
|
||||
persistentVolumeClaim:
|
||||
claimName: kubeshark-persistent-volume-claim
|
||||
{{- else }}
|
||||
emptyDir:
|
||||
sizeLimit: {{ .Values.tap.storageLimit }}
|
||||
{{- end }}
|
||||
39
helm-chart/templates/10-ingress.yaml
Normal file
39
helm-chart/templates/10-ingress.yaml
Normal file
@@ -0,0 +1,39 @@
|
||||
---
|
||||
{{- if .Values.tap.ingress.enabled }}
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
annotations:
|
||||
nginx.org/websocket-services: "kubeshark-front"
|
||||
{{- if .Values.tap.annotations }}
|
||||
{{- toYaml .Values.tap.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.tap.ingress.annotations }}
|
||||
{{- toYaml .Values.tap.ingress.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
name: kubeshark-ingress
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
{{- if .Values.tap.ingress.className }}
|
||||
ingressClassName: {{ .Values.tap.ingress.className }}
|
||||
{{- end }}
|
||||
rules:
|
||||
- host: {{ .Values.tap.ingress.host }}
|
||||
http:
|
||||
paths:
|
||||
- backend:
|
||||
service:
|
||||
name: kubeshark-front
|
||||
port:
|
||||
number: 80
|
||||
path: /
|
||||
pathType: Prefix
|
||||
{{- if .Values.tap.ingress.tls }}
|
||||
tls:
|
||||
{{- toYaml .Values.tap.ingress.tls | nindent 2 }}
|
||||
{{- end }}
|
||||
status:
|
||||
loadBalancer: {}
|
||||
{{- end }}
|
||||
61
helm-chart/templates/11-nginx-config-map.yaml
Normal file
61
helm-chart/templates/11-nginx-config-map.yaml
Normal file
@@ -0,0 +1,61 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: kubeshark-nginx-config-map
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
data:
|
||||
default.conf: |
|
||||
server {
|
||||
listen 8080;
|
||||
{{- if .Values.tap.ipv6 }}
|
||||
listen [::]:8080;
|
||||
{{- end }}
|
||||
access_log /dev/stdout;
|
||||
error_log /dev/stdout;
|
||||
|
||||
client_body_buffer_size 64k;
|
||||
client_header_buffer_size 32k;
|
||||
large_client_header_buffers 8 64k;
|
||||
|
||||
location /api {
|
||||
rewrite ^/api(.*)$ $1 break;
|
||||
proxy_pass http://kubeshark-hub;
|
||||
proxy_set_header X-Forwarded-For $remote_addr;
|
||||
proxy_set_header Host $http_host;
|
||||
proxy_set_header Upgrade websocket;
|
||||
proxy_set_header Connection Upgrade;
|
||||
proxy_set_header Authorization $http_authorization;
|
||||
proxy_pass_header Authorization;
|
||||
proxy_connect_timeout 4s;
|
||||
proxy_read_timeout 120s;
|
||||
proxy_send_timeout 12s;
|
||||
proxy_pass_request_headers on;
|
||||
}
|
||||
|
||||
location /saml {
|
||||
rewrite ^/saml(.*)$ /saml$1 break;
|
||||
proxy_pass http://kubeshark-hub;
|
||||
proxy_set_header X-Forwarded-For $remote_addr;
|
||||
proxy_set_header Host $http_host;
|
||||
proxy_connect_timeout 4s;
|
||||
proxy_read_timeout 120s;
|
||||
proxy_send_timeout 12s;
|
||||
proxy_pass_request_headers on;
|
||||
}
|
||||
|
||||
location / {
|
||||
root /usr/share/nginx/html;
|
||||
index index.html index.htm;
|
||||
try_files $uri $uri/ /index.html;
|
||||
expires -1;
|
||||
add_header Cache-Control no-cache;
|
||||
}
|
||||
error_page 500 502 503 504 /50x.html;
|
||||
location = /50x.html {
|
||||
root /usr/share/nginx/html;
|
||||
}
|
||||
}
|
||||
|
||||
59
helm-chart/templates/12-config-map.yaml
Normal file
59
helm-chart/templates/12-config-map.yaml
Normal file
@@ -0,0 +1,59 @@
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: kubeshark-config-map
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app.kubeshark.co/app: hub
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
data:
|
||||
POD_REGEX: '{{ .Values.tap.regex }}'
|
||||
NAMESPACES: '{{ gt (len .Values.tap.namespaces) 0 | ternary (join "," .Values.tap.namespaces) "" }}'
|
||||
EXCLUDED_NAMESPACES: '{{ gt (len .Values.tap.excludedNamespaces) 0 | ternary (join "," .Values.tap.excludedNamespaces) "" }}'
|
||||
BPF_OVERRIDE: '{{ .Values.tap.bpfOverride }}'
|
||||
STOPPED: '{{ .Values.tap.stopped | ternary "true" "false" }}'
|
||||
SCRIPTING_SCRIPTS: '{}'
|
||||
SCRIPTING_ACTIVE_SCRIPTS: '{{ gt (len .Values.scripting.active) 0 | ternary (join "," .Values.scripting.active) "" }}'
|
||||
INGRESS_ENABLED: '{{ .Values.tap.ingress.enabled }}'
|
||||
INGRESS_HOST: '{{ .Values.tap.ingress.host }}'
|
||||
PROXY_FRONT_PORT: '{{ .Values.tap.proxy.front.port }}'
|
||||
AUTH_ENABLED: '{{- if and .Values.cloudLicenseEnabled (not (empty .Values.license)) -}}
|
||||
"false"
|
||||
{{- else -}}
|
||||
{{ .Values.cloudLicenseEnabled | ternary "true" (.Values.tap.auth.enabled | ternary "true" "") }}
|
||||
{{- end }}'
|
||||
AUTH_TYPE: '{{ .Values.cloudLicenseEnabled | ternary "oidc" (.Values.tap.auth.type) }}'
|
||||
AUTH_SAML_IDP_METADATA_URL: '{{ .Values.tap.auth.saml.idpMetadataUrl }}'
|
||||
AUTH_SAML_ROLE_ATTRIBUTE: '{{ .Values.tap.auth.saml.roleAttribute }}'
|
||||
AUTH_SAML_ROLES: '{{ .Values.tap.auth.saml.roles | toJson }}'
|
||||
TELEMETRY_DISABLED: '{{ not .Values.internetConnectivity | ternary "true" (not .Values.tap.telemetry.enabled | ternary "true" "false") }}'
|
||||
SCRIPTING_DISABLED: '{{ .Values.tap.scriptingDisabled | ternary "true" "" }}'
|
||||
TARGETED_PODS_UPDATE_DISABLED: '{{ .Values.tap.targetedPodsUpdateDisabled | ternary "true" "" }}'
|
||||
PRESET_FILTERS_CHANGING_ENABLED: '{{ .Values.tap.presetFiltersChangingEnabled | ternary "true" "" }}'
|
||||
RECORDING_DISABLED: '{{ .Values.tap.recordingDisabled | ternary "true" "" }}'
|
||||
STOP_TRAFFIC_CAPTURING_DISABLED: '{{- if and .Values.tap.stopTrafficCapturingDisabled .Values.tap.stopped -}}
|
||||
false
|
||||
{{- else -}}
|
||||
{{ .Values.tap.stopTrafficCapturingDisabled | ternary "true" "false" }}
|
||||
{{- end }}'
|
||||
GLOBAL_FILTER: {{ include "kubeshark.escapeDoubleQuotes" .Values.tap.globalFilter | quote }}
|
||||
DEFAULT_FILTER: {{ include "kubeshark.escapeDoubleQuotes" .Values.tap.defaultFilter | quote }}
|
||||
TRAFFIC_SAMPLE_RATE: '{{ .Values.tap.misc.trafficSampleRate }}'
|
||||
JSON_TTL: '{{ .Values.tap.misc.jsonTTL }}'
|
||||
PCAP_TTL: '{{ .Values.tap.misc.pcapTTL }}'
|
||||
PCAP_ERROR_TTL: '{{ .Values.tap.misc.pcapErrorTTL }}'
|
||||
TIMEZONE: '{{ not (eq .Values.timezone "") | ternary .Values.timezone " " }}'
|
||||
CLOUD_LICENSE_ENABLED: '{{- if and .Values.cloudLicenseEnabled (not (empty .Values.license)) -}}
|
||||
false
|
||||
{{- else -}}
|
||||
{{ .Values.cloudLicenseEnabled }}
|
||||
{{- end }}'
|
||||
DUPLICATE_TIMEFRAME: '{{ .Values.tap.misc.duplicateTimeframe }}'
|
||||
ENABLED_DISSECTORS: '{{ gt (len .Values.tap.enabledDissectors) 0 | ternary (join "," .Values.tap.enabledDissectors) "" }}'
|
||||
DISSECTORS_UPDATING_ENABLED: '{{ .Values.dissectorsUpdatingEnabled | ternary "true" "false" }}'
|
||||
DETECT_DUPLICATES: '{{ .Values.tap.misc.detectDuplicates | ternary "true" "false" }}'
|
||||
PCAP_DUMP_ENABLE: '{{ .Values.pcapdump.enabled }}'
|
||||
PCAP_TIME_INTERVAL: '{{ .Values.pcapdump.timeInterval }}'
|
||||
PCAP_MAX_TIME: '{{ .Values.pcapdump.maxTime }}'
|
||||
PCAP_MAX_SIZE: '{{ .Values.pcapdump.maxSize }}'
|
||||
PCAP_SRC_DIR: '{{ .Values.pcapdump.pcapSrcDir }}'
|
||||
41
helm-chart/templates/13-secret.yaml
Normal file
41
helm-chart/templates/13-secret.yaml
Normal file
@@ -0,0 +1,41 @@
|
||||
kind: Secret
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: kubeshark-secret
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app.kubeshark.co/app: hub
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
stringData:
|
||||
LICENSE: '{{ .Values.license }}'
|
||||
SCRIPTING_ENV: '{{ .Values.scripting.env | toJson }}'
|
||||
|
||||
---
|
||||
|
||||
kind: Secret
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: kubeshark-saml-x509-crt-secret
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app.kubeshark.co/app: hub
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
stringData:
|
||||
AUTH_SAML_X509_CRT: |
|
||||
{{ .Values.tap.auth.saml.x509crt | nindent 4 }}
|
||||
|
||||
---
|
||||
|
||||
kind: Secret
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: kubeshark-saml-x509-key-secret
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app.kubeshark.co/app: hub
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
stringData:
|
||||
AUTH_SAML_X509_KEY: |
|
||||
{{ .Values.tap.auth.saml.x509key | nindent 4 }}
|
||||
|
||||
---
|
||||
@@ -0,0 +1,53 @@
|
||||
{{- if .Capabilities.APIVersions.Has "security.openshift.io/v1/SecurityContextConstraints" }}
|
||||
apiVersion: security.openshift.io/v1
|
||||
kind: SecurityContextConstraints
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install
|
||||
{{- if .Values.tap.annotations }}
|
||||
{{- toYaml .Values.tap.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
name: kubeshark-scc
|
||||
priority: 10
|
||||
allowPrivilegedContainer: true
|
||||
allowHostDirVolumePlugin: true
|
||||
allowHostNetwork: true
|
||||
allowHostPorts: true
|
||||
allowHostPID: true
|
||||
allowHostIPC: true
|
||||
readOnlyRootFilesystem: false
|
||||
requiredDropCapabilities:
|
||||
- MKNOD
|
||||
allowedCapabilities:
|
||||
- NET_RAW
|
||||
- NET_ADMIN
|
||||
- SYS_ADMIN
|
||||
- SYS_PTRACE
|
||||
- DAC_OVERRIDE
|
||||
- SYS_RESOURCE
|
||||
- SYS_MODULE
|
||||
- IPC_LOCK
|
||||
runAsUser:
|
||||
type: RunAsAny
|
||||
fsGroup:
|
||||
type: MustRunAs
|
||||
seLinuxContext:
|
||||
type: RunAsAny
|
||||
supplementalGroups:
|
||||
type: RunAsAny
|
||||
seccompProfiles:
|
||||
- '*'
|
||||
volumes:
|
||||
- configMap
|
||||
- downwardAPI
|
||||
- emptyDir
|
||||
- persistentVolumeClaim
|
||||
- secret
|
||||
- hostPath
|
||||
- projected
|
||||
- ephemeral
|
||||
users:
|
||||
- system:serviceaccount:{{ .Release.Namespace }}:kubeshark-service-account
|
||||
{{- end }}
|
||||
23
helm-chart/templates/15-worker-service-metrics.yaml
Normal file
23
helm-chart/templates/15-worker-service-metrics.yaml
Normal file
@@ -0,0 +1,23 @@
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
prometheus.io/scrape: 'true'
|
||||
prometheus.io/port: '{{ .Values.tap.metrics.port }}'
|
||||
{{- if .Values.tap.annotations }}
|
||||
{{- toYaml .Values.tap.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
name: kubeshark-worker-metrics
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
selector:
|
||||
app.kubeshark.co/app: worker
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
ports:
|
||||
- name: metrics
|
||||
protocol: TCP
|
||||
port: {{ .Values.tap.metrics.port }}
|
||||
targetPort: {{ .Values.tap.metrics.port }}
|
||||
76
helm-chart/templates/16-network-policies.yaml
Normal file
76
helm-chart/templates/16-network-policies.yaml
Normal file
@@ -0,0 +1,76 @@
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
{{- if .Values.tap.annotations }}
|
||||
{{- toYaml .Values.tap.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
name: kubeshark-hub-network-policy
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
app.kubeshark.co/app: hub
|
||||
policyTypes:
|
||||
- Ingress
|
||||
- Egress
|
||||
ingress:
|
||||
- ports:
|
||||
- protocol: TCP
|
||||
port: 8080
|
||||
egress:
|
||||
- {}
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
{{- if .Values.tap.annotations }}
|
||||
{{- toYaml .Values.tap.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
name: kubeshark-front-network-policy
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
app.kubeshark.co/app: front
|
||||
policyTypes:
|
||||
- Ingress
|
||||
- Egress
|
||||
ingress:
|
||||
- ports:
|
||||
- protocol: TCP
|
||||
port: 8080
|
||||
egress:
|
||||
- {}
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
{{- if .Values.tap.annotations }}
|
||||
{{- toYaml .Values.tap.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
name: kubeshark-worker-network-policy
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
app.kubeshark.co/app: worker
|
||||
policyTypes:
|
||||
- Ingress
|
||||
- Egress
|
||||
ingress:
|
||||
- ports:
|
||||
- protocol: TCP
|
||||
port: {{ .Values.tap.proxy.worker.srvPort }}
|
||||
- protocol: TCP
|
||||
port: {{ .Values.tap.metrics.port }}
|
||||
egress:
|
||||
- {}
|
||||
39
helm-chart/templates/NOTES.txt
Normal file
39
helm-chart/templates/NOTES.txt
Normal file
@@ -0,0 +1,39 @@
|
||||
Thank you for installing {{ title .Chart.Name }}.
|
||||
|
||||
Registry: {{ .Values.tap.docker.registry }}
|
||||
Tag: {{ not (eq .Values.tap.docker.tag "") | ternary .Values.tap.docker.tag (printf "v%s" .Chart.Version) }}
|
||||
|
||||
{{- if .Values.tap.docker.overrideTag.worker }}
|
||||
Overridden worker tag: {{ .Values.tap.docker.overrideTag.worker }}
|
||||
{{ end }}
|
||||
|
||||
{{- if .Values.tap.docker.overrideTag.hub }}
|
||||
Overridden hub tag: {{ .Values.tap.docker.overrideTag.hub }}
|
||||
{{ end }}
|
||||
|
||||
{{- if .Values.tap.docker.overrideTag.front }}
|
||||
Overridden front tag: {{ .Values.tap.docker.overrideTag.front }}
|
||||
{{ end }}
|
||||
|
||||
Your deployment has been successful. The release is named `{{ .Release.Name }}` and it has been deployed in the `{{ .Release.Namespace }}` namespace.
|
||||
|
||||
{{- if .Values.tap.telemetry.enabled }}
|
||||
Notice: Telemetry is enabled. Kubeshark will collect anonymous usage statistics.
|
||||
{{ end }}
|
||||
|
||||
{{- if .Values.tap.ingress.enabled }}
|
||||
|
||||
You can now access the application through the following URL:
|
||||
http{{ if .Values.tap.ingress.tls }}s{{ end }}://{{ .Values.tap.ingress.host }}
|
||||
|
||||
{{- else }}
|
||||
To access the application, follow these steps:
|
||||
|
||||
1. Perform port forwarding with the following commands:
|
||||
|
||||
kubectl port-forward -n {{ .Release.Namespace }} service/kubeshark-front 8899:80
|
||||
|
||||
2. Once port forwarding is done, you can access the application by visiting the following URL in your web browser:
|
||||
http://0.0.0.0:8899
|
||||
|
||||
{{ end }}
|
||||
88
helm-chart/templates/_helpers.tpl
Normal file
88
helm-chart/templates/_helpers.tpl
Normal file
@@ -0,0 +1,88 @@
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "kubeshark.name" -}}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "kubeshark.fullname" -}}
|
||||
{{- printf "%s-%s" .Release.Name .Chart.Name | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "kubeshark.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Common labels
|
||||
*/}}
|
||||
{{- define "kubeshark.labels" -}}
|
||||
helm.sh/chart: {{ include "kubeshark.chart" . }}
|
||||
{{ include "kubeshark.selectorLabels" . }}
|
||||
app.kubernetes.io/version: {{ .Chart.Version | quote }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- if .Values.tap.labels }}
|
||||
{{ toYaml .Values.tap.labels }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Selector labels
|
||||
*/}}
|
||||
{{- define "kubeshark.selectorLabels" -}}
|
||||
app.kubernetes.io/name: {{ include "kubeshark.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
{{- define "kubeshark.serviceAccountName" -}}
|
||||
{{- printf "%s-service-account" .Release.Name }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Escape double quotes in a string
|
||||
*/}}
|
||||
{{- define "kubeshark.escapeDoubleQuotes" -}}
|
||||
{{- regexReplaceAll "\"" . "\"" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Define debug docker tag suffix
|
||||
*/}}
|
||||
{{- define "kubeshark.dockerTagDebugVersion" -}}
|
||||
{{- .Values.tap.pprof.enabled | ternary "-debug" "" }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create docker tag default version
|
||||
*/}}
|
||||
{{- define "kubeshark.defaultVersion" -}}
|
||||
{{- $defaultVersion := (printf "v%s" .Chart.Version) -}}
|
||||
{{- if not .Values.tap.docker.tagLocked }}
|
||||
{{- $defaultVersion = regexReplaceAll "^([^.]+\\.[^.]+).*" $defaultVersion "$1" -}}
|
||||
{{- end }}
|
||||
{{- $defaultVersion }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Set sentry based on internet connectivity and telemetry
|
||||
*/}}
|
||||
{{- define "sentry.enabled" -}}
|
||||
{{- $sentryEnabledVal := .Values.tap.sentry.enabled -}}
|
||||
{{- if not .Values.internetConnectivity -}}
|
||||
{{- $sentryEnabledVal = false -}}
|
||||
{{- else if not .Values.tap.telemetry.enabled -}}
|
||||
{{- $sentryEnabledVal = false -}}
|
||||
{{- end -}}
|
||||
{{- $sentryEnabledVal -}}
|
||||
{{- end -}}
|
||||
180
helm-chart/values.yaml
Normal file
180
helm-chart/values.yaml
Normal file
@@ -0,0 +1,180 @@
|
||||
# find a detailed description here: https://github.com/kubeshark/kubeshark/blob/master/helm-chart/README.md
|
||||
tap:
|
||||
docker:
|
||||
registry: docker.io/kubeshark
|
||||
tag: ""
|
||||
tagLocked: true
|
||||
imagePullPolicy: Always
|
||||
imagePullSecrets: []
|
||||
overrideImage:
|
||||
worker: ""
|
||||
hub: ""
|
||||
front: ""
|
||||
overrideTag:
|
||||
worker: ""
|
||||
hub: ""
|
||||
front: ""
|
||||
proxy:
|
||||
worker:
|
||||
srvPort: 30001
|
||||
hub:
|
||||
srvPort: 8898
|
||||
front:
|
||||
port: 8899
|
||||
host: 127.0.0.1
|
||||
regex: .*
|
||||
namespaces: []
|
||||
excludedNamespaces: []
|
||||
bpfOverride: ""
|
||||
stopped: false
|
||||
release:
|
||||
repo: https://helm.kubeshark.co
|
||||
name: kubeshark
|
||||
namespace: default
|
||||
persistentStorage: false
|
||||
persistentStorageStatic: false
|
||||
efsFileSytemIdAndPath: ""
|
||||
storageLimit: 5000Mi
|
||||
storageClass: standard
|
||||
dryRun: false
|
||||
resources:
|
||||
hub:
|
||||
limits:
|
||||
cpu: "0"
|
||||
memory: 5Gi
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: 50Mi
|
||||
sniffer:
|
||||
limits:
|
||||
cpu: "0"
|
||||
memory: 5Gi
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: 50Mi
|
||||
tracer:
|
||||
limits:
|
||||
cpu: "0"
|
||||
memory: 5Gi
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: 50Mi
|
||||
serviceMesh: true
|
||||
tls: true
|
||||
disableTlsLog: true
|
||||
packetCapture: best
|
||||
ignoreTainted: false
|
||||
labels: {}
|
||||
annotations: {}
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: kubernetes.io/os
|
||||
operator: In
|
||||
values:
|
||||
- linux
|
||||
auth:
|
||||
enabled: false
|
||||
type: saml
|
||||
saml:
|
||||
idpMetadataUrl: ""
|
||||
x509crt: ""
|
||||
x509key: ""
|
||||
roleAttribute: role
|
||||
roles:
|
||||
admin:
|
||||
filter: ""
|
||||
canDownloadPCAP: true
|
||||
canUseScripting: true
|
||||
canUpdateTargetedPods: true
|
||||
canStopTrafficCapturing: true
|
||||
showAdminConsoleLink: true
|
||||
ingress:
|
||||
enabled: false
|
||||
className: ""
|
||||
host: ks.svc.cluster.local
|
||||
tls: []
|
||||
annotations: {}
|
||||
ipv6: true
|
||||
debug: false
|
||||
telemetry:
|
||||
enabled: true
|
||||
resourceGuard:
|
||||
enabled: false
|
||||
sentry:
|
||||
enabled: false
|
||||
environment: production
|
||||
defaultFilter: "!dns and !tcp and !udp and !icmp"
|
||||
scriptingDisabled: false
|
||||
targetedPodsUpdateDisabled: false
|
||||
presetFiltersChangingEnabled: true
|
||||
recordingDisabled: false
|
||||
stopTrafficCapturingDisabled: false
|
||||
capabilities:
|
||||
networkCapture:
|
||||
- NET_RAW
|
||||
- NET_ADMIN
|
||||
serviceMeshCapture:
|
||||
- SYS_ADMIN
|
||||
- SYS_PTRACE
|
||||
- DAC_OVERRIDE
|
||||
ebpfCapture:
|
||||
- SYS_ADMIN
|
||||
- SYS_PTRACE
|
||||
- SYS_RESOURCE
|
||||
- IPC_LOCK
|
||||
globalFilter: ""
|
||||
enabledDissectors:
|
||||
- amqp
|
||||
- dns
|
||||
- http
|
||||
- icmp
|
||||
- kafka
|
||||
- redis
|
||||
- sctp
|
||||
- syscall
|
||||
- ws
|
||||
- tls
|
||||
metrics:
|
||||
port: 49100
|
||||
pprof:
|
||||
enabled: false
|
||||
port: 8000
|
||||
view: flamegraph
|
||||
misc:
|
||||
jsonTTL: 5m
|
||||
pcapTTL: 10s
|
||||
pcapErrorTTL: 60s
|
||||
trafficSampleRate: 100
|
||||
tcpStreamChannelTimeoutMs: 10000
|
||||
tcpStreamChannelTimeoutShow: false
|
||||
resolutionStrategy: auto
|
||||
duplicateTimeframe: 200ms
|
||||
detectDuplicates: false
|
||||
staleTimeoutSeconds: 30
|
||||
logs:
|
||||
file: ""
|
||||
grep: ""
|
||||
pcapdump:
|
||||
enabled: true
|
||||
timeInterval: 1m
|
||||
maxTime: 1h
|
||||
maxSize: 500MB
|
||||
pcapSrcDir: pcapdump
|
||||
kube:
|
||||
configPath: ""
|
||||
context: ""
|
||||
dumpLogs: false
|
||||
headless: false
|
||||
license: ""
|
||||
cloudLicenseEnabled: true
|
||||
supportChatEnabled: true
|
||||
internetConnectivity: true
|
||||
dissectorsUpdatingEnabled: true
|
||||
scripting:
|
||||
env: {}
|
||||
source: ""
|
||||
sources: []
|
||||
watchScripts: true
|
||||
active: []
|
||||
console: true
|
||||
timezone: ""
|
||||
94
install.sh
Normal file
94
install.sh
Normal file
@@ -0,0 +1,94 @@
|
||||
#!/bin/sh
|
||||
|
||||
EXE_NAME=kubeshark
|
||||
ALIAS_NAME=ks
|
||||
PROG_NAME=Kubeshark
|
||||
INSTALL_PATH=/usr/local/bin/$EXE_NAME
|
||||
ALIAS_PATH=/usr/local/bin/$ALIAS_NAME
|
||||
REPO=https://github.com/kubeshark/kubeshark
|
||||
OS=$(echo $(uname -s) | tr '[:upper:]' '[:lower:]')
|
||||
ARCH=$(echo $(uname -m) | tr '[:upper:]' '[:lower:]')
|
||||
SUPPORTED_PAIRS="linux_amd64 linux_arm64 darwin_amd64 darwin_arm64"
|
||||
|
||||
ESC="\033["
|
||||
F_DEFAULT=39
|
||||
F_RED=31
|
||||
F_GREEN=32
|
||||
F_YELLOW=33
|
||||
B_DEFAULT=49
|
||||
B_RED=41
|
||||
B_BLUE=44
|
||||
B_LIGHT_BLUE=104
|
||||
|
||||
if [ "$ARCH" = "x86_64" ]; then
|
||||
ARCH="amd64"
|
||||
fi
|
||||
|
||||
if [ "$ARCH" = "aarch64" ]; then
|
||||
ARCH="arm64"
|
||||
fi
|
||||
|
||||
echo $SUPPORTED_PAIRS | grep -w -q "${OS}_${ARCH}"
|
||||
|
||||
if [ $? != 0 ] ; then
|
||||
echo "\n${ESC}${F_RED}m🛑 Unsupported OS \"$OS\" or architecture \"$ARCH\". Failed to install $PROG_NAME.${ESC}${F_DEFAULT}m"
|
||||
echo "${ESC}${B_RED}mPlease report 🐛 to $REPO/issues${ESC}${F_DEFAULT}m"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check for Homebrew and kubeshark installation
|
||||
if command -v brew >/dev/null; then
|
||||
if brew list kubeshark &>/dev/null; then
|
||||
echo "📦 Found $PROG_NAME instance installed with Homebrew"
|
||||
echo "${ESC}${F_GREEN}m⬇️ Removing before installation with script${ESC}${F_DEFAULT}m"
|
||||
brew uninstall kubeshark
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "\n🦈 ${ESC}${F_DEFAULT};${B_BLUE}m Started to download $PROG_NAME ${ESC}${B_DEFAULT};${F_DEFAULT}m"
|
||||
|
||||
if curl -# --fail -Lo $EXE_NAME ${REPO}/releases/latest/download/${EXE_NAME}_${OS}_${ARCH} ; then
|
||||
chmod +x $PWD/$EXE_NAME
|
||||
echo "\n${ESC}${F_GREEN}m⬇️ $PROG_NAME is downloaded into $PWD/$EXE_NAME${ESC}${F_DEFAULT}m"
|
||||
else
|
||||
echo "\n${ESC}${F_RED}m🛑 Couldn't download ${REPO}/releases/latest/download/${EXE_NAME}_${OS}_${ARCH}\n\
|
||||
⚠️ Check your internet connection.\n\
|
||||
⚠️ Make sure 'curl' command is available.\n\
|
||||
⚠️ Make sure there is no directory named '${EXE_NAME}' in ${PWD}\n\
|
||||
${ESC}${F_DEFAULT}m"
|
||||
echo "${ESC}${B_RED}mPlease report 🐛 to $REPO/issues${ESC}${F_DEFAULT}m"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
use_cmd=$EXE_NAME
|
||||
printf "Do you want to install system-wide? Requires sudo 😇 (y/N)? "
|
||||
old_stty_cfg=$(stty -g)
|
||||
stty raw -echo ; answer=$(head -c 1) ; stty $old_stty_cfg
|
||||
if echo "$answer" | grep -iq "^y" ;then
|
||||
echo "$answer"
|
||||
sudo mv ./$EXE_NAME $INSTALL_PATH || exit 1
|
||||
echo "${ESC}${F_GREEN}m$PROG_NAME is installed into $INSTALL_PATH${ESC}${F_DEFAULT}m\n"
|
||||
|
||||
ls $ALIAS_PATH >> /dev/null 2>&1
|
||||
if [ $? != 0 ] ; then
|
||||
printf "Do you want to add 'ks' alias for Kubeshark? (y/N)? "
|
||||
old_stty_cfg=$(stty -g)
|
||||
stty raw -echo ; answer=$(head -c 1) ; stty $old_stty_cfg
|
||||
if echo "$answer" | grep -iq "^y" ; then
|
||||
echo "$answer"
|
||||
sudo ln -s $INSTALL_PATH $ALIAS_PATH
|
||||
|
||||
use_cmd=$ALIAS_NAME
|
||||
else
|
||||
echo "$answer"
|
||||
fi
|
||||
else
|
||||
use_cmd=$ALIAS_NAME
|
||||
fi
|
||||
else
|
||||
echo "$answer"
|
||||
use_cmd="./$EXE_NAME"
|
||||
fi
|
||||
|
||||
echo "${ESC}${F_GREEN}m✅ You can use the ${ESC}${F_DEFAULT};${B_LIGHT_BLUE}m $use_cmd ${ESC}${B_DEFAULT};${F_GREEN}m command now.${ESC}${F_DEFAULT}m"
|
||||
echo "\n${ESC}${F_YELLOW}mPlease give us a star 🌟 on ${ESC}${F_DEFAULT}m$REPO${ESC}${F_YELLOW}m if you ❤️ $PROG_NAME!${ESC}${F_DEFAULT}m"
|
||||
@@ -4,15 +4,16 @@ import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/kubeshark/kubeshark/config"
|
||||
"github.com/kubeshark/kubeshark/utils"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
core "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
@@ -24,6 +25,7 @@ type Connector struct {
|
||||
|
||||
const DefaultRetries = 3
|
||||
const DefaultTimeout = 2 * time.Second
|
||||
const DefaultSleep = 1 * time.Second
|
||||
|
||||
func NewConnector(url string, retries int, timeout time.Duration) *Connector {
|
||||
return &Connector{
|
||||
@@ -45,7 +47,7 @@ func (connector *Connector) TestConnection(path string) error {
|
||||
break
|
||||
}
|
||||
retriesLeft -= 1
|
||||
time.Sleep(time.Second)
|
||||
time.Sleep(5 * DefaultSleep)
|
||||
}
|
||||
|
||||
if retriesLeft == 0 {
|
||||
@@ -64,7 +66,6 @@ func (connector *Connector) isReachable(path string) (bool, error) {
|
||||
}
|
||||
|
||||
func (connector *Connector) PostWorkerPodToHub(pod *v1.Pod) {
|
||||
// TODO: This request is responsible for proxy_server.go:147] Error while proxying request: context canceled log
|
||||
postWorkerUrl := fmt.Sprintf("%s/pods/worker", connector.url)
|
||||
|
||||
if podMarshalled, err := json.Marshal(pod); err != nil {
|
||||
@@ -72,68 +73,85 @@ func (connector *Connector) PostWorkerPodToHub(pod *v1.Pod) {
|
||||
} else {
|
||||
ok := false
|
||||
for !ok {
|
||||
if _, err = utils.Post(postWorkerUrl, "application/json", bytes.NewBuffer(podMarshalled), connector.client); err != nil {
|
||||
var resp *http.Response
|
||||
if resp, err = utils.Post(postWorkerUrl, "application/json", bytes.NewBuffer(podMarshalled), connector.client, config.Config.License); err != nil || resp.StatusCode != http.StatusOK {
|
||||
if _, ok := err.(*url.Error); ok {
|
||||
break
|
||||
}
|
||||
log.Debug().Err(err).Msg("Failed sending the Worker pod to Hub:")
|
||||
log.Warn().Err(err).Msg("Failed sending the Worker pod to Hub. Retrying...")
|
||||
} else {
|
||||
ok = true
|
||||
log.Debug().Interface("worker-pod", pod).Msg("Reported worker pod to Hub:")
|
||||
connector.PostStorageLimitToHub(config.Config.Tap.StorageLimitBytes())
|
||||
return
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
time.Sleep(DefaultSleep)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type postStorageLimit struct {
|
||||
Limit int64 `json:"limit"`
|
||||
type postLicenseRequest struct {
|
||||
License string `json:"license"`
|
||||
}
|
||||
|
||||
func (connector *Connector) PostStorageLimitToHub(limit int64) {
|
||||
payload := &postStorageLimit{
|
||||
Limit: limit,
|
||||
func (connector *Connector) PostLicense(license string) {
|
||||
postLicenseUrl := fmt.Sprintf("%s/license", connector.url)
|
||||
|
||||
payload := postLicenseRequest{
|
||||
License: license,
|
||||
}
|
||||
postStorageLimitUrl := fmt.Sprintf("%s/pcaps/set-storage-limit", connector.url)
|
||||
|
||||
if payloadMarshalled, err := json.Marshal(payload); err != nil {
|
||||
log.Error().Err(err).Msg("Failed to marshal the storage limit:")
|
||||
log.Error().Err(err).Msg("Failed to marshal the payload:")
|
||||
} else {
|
||||
ok := false
|
||||
for !ok {
|
||||
if _, err = utils.Post(postStorageLimitUrl, "application/json", bytes.NewBuffer(payloadMarshalled), connector.client); err != nil {
|
||||
var resp *http.Response
|
||||
if resp, err = utils.Post(postLicenseUrl, "application/json", bytes.NewBuffer(payloadMarshalled), connector.client, config.Config.License); err != nil || resp.StatusCode != http.StatusOK {
|
||||
if _, ok := err.(*url.Error); ok {
|
||||
break
|
||||
}
|
||||
log.Debug().Err(err).Msg("Failed sending the storage limit to Hub:")
|
||||
log.Warn().Err(err).Msg("Failed sending the license to Hub. Retrying...")
|
||||
} else {
|
||||
ok = true
|
||||
log.Debug().Int("limit", int(limit)).Msg("Reported storage limit to Hub:")
|
||||
log.Debug().Str("license", license).Msg("Reported license to Hub:")
|
||||
return
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
time.Sleep(DefaultSleep)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (connector *Connector) PostTargettedPodsToHub(pods []core.Pod) {
|
||||
postTargettedUrl := fmt.Sprintf("%s/pods/targetted", connector.url)
|
||||
func (connector *Connector) PostPcapsMerge(out *os.File) {
|
||||
postEnvUrl := fmt.Sprintf("%s/pcaps/merge", connector.url)
|
||||
|
||||
if podsMarshalled, err := json.Marshal(pods); err != nil {
|
||||
log.Error().Err(err).Msg("Failed to marshal the targetted pods:")
|
||||
if envMarshalled, err := json.Marshal(map[string]string{"query": ""}); err != nil {
|
||||
log.Error().Err(err).Msg("Failed to marshal the env:")
|
||||
} else {
|
||||
ok := false
|
||||
for !ok {
|
||||
if _, err = utils.Post(postTargettedUrl, "application/json", bytes.NewBuffer(podsMarshalled), connector.client); err != nil {
|
||||
var resp *http.Response
|
||||
if resp, err = utils.Post(postEnvUrl, "application/json", bytes.NewBuffer(envMarshalled), connector.client, config.Config.License); err != nil || resp.StatusCode != http.StatusOK {
|
||||
if _, ok := err.(*url.Error); ok {
|
||||
break
|
||||
}
|
||||
log.Debug().Err(err).Msg("Failed sending the targetted pods to Hub:")
|
||||
log.Warn().Err(err).Msg("Failed exported PCAP download. Retrying...")
|
||||
} else {
|
||||
ok = true
|
||||
log.Debug().Int("pod-count", len(pods)).Msg("Reported targetted pods to Hub:")
|
||||
defer resp.Body.Close()
|
||||
|
||||
// Check server response
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
log.Error().Str("status", resp.Status).Err(err).Msg("Failed exported PCAP download.")
|
||||
return
|
||||
}
|
||||
|
||||
// Writer the body to file
|
||||
_, err = io.Copy(out, resp.Body)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("Failed writing PCAP export:")
|
||||
return
|
||||
}
|
||||
log.Info().Str("path", out.Name()).Msg("Downloaded exported PCAP:")
|
||||
return
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
time.Sleep(DefaultSleep)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
13
kubectl.sh
Executable file
13
kubectl.sh
Executable file
@@ -0,0 +1,13 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Useful kubectl commands for Kubeshark development
|
||||
|
||||
# This command outputs all Kubernetes resources using YAML format and pipes it to VS Code
|
||||
if [ $1 = "view-all-resources" ] ; then
|
||||
kubectl get $(kubectl api-resources | awk '{print $1}' | tail -n +2 | tr '\n' ',' | sed s/,\$//) -o yaml | code -
|
||||
fi
|
||||
|
||||
# This command outputs all Kubernetes resources in "kubeshark" namespace using YAML format and pipes it to VS Code
|
||||
if [[ $1 = "view-kubeshark-resources" ]] ; then
|
||||
kubectl get $(kubectl api-resources | awk '{print $1}' | tail -n +2 | tr '\n' ',' | sed s/,\$//) -n kubeshark -o yaml | code -
|
||||
fi
|
||||
139
kubernetes/config.go
Normal file
139
kubernetes/config.go
Normal file
@@ -0,0 +1,139 @@
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/kubeshark/kubeshark/config"
|
||||
"github.com/kubeshark/kubeshark/misc"
|
||||
"github.com/rs/zerolog/log"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
SUFFIX_SECRET = "secret"
|
||||
SUFFIX_CONFIG_MAP = "config-map"
|
||||
SECRET_LICENSE = "LICENSE"
|
||||
CONFIG_POD_REGEX = "POD_REGEX"
|
||||
CONFIG_NAMESPACES = "NAMESPACES"
|
||||
CONFIG_EXCLUDED_NAMESPACES = "EXCLUDED_NAMESPACES"
|
||||
CONFIG_SCRIPTING_ENV = "SCRIPTING_ENV"
|
||||
CONFIG_INGRESS_ENABLED = "INGRESS_ENABLED"
|
||||
CONFIG_INGRESS_HOST = "INGRESS_HOST"
|
||||
CONFIG_PROXY_FRONT_PORT = "PROXY_FRONT_PORT"
|
||||
CONFIG_AUTH_ENABLED = "AUTH_ENABLED"
|
||||
CONFIG_AUTH_TYPE = "AUTH_TYPE"
|
||||
CONFIG_AUTH_SAML_IDP_METADATA_URL = "AUTH_SAML_IDP_METADATA_URL"
|
||||
CONFIG_SCRIPTING_SCRIPTS = "SCRIPTING_SCRIPTS"
|
||||
CONFIG_SCRIPTING_ACTIVE_SCRIPTS = "SCRIPTING_ACTIVE_SCRIPTS"
|
||||
CONFIG_PCAP_DUMP_ENABLE = "PCAP_DUMP_ENABLE"
|
||||
CONFIG_TIME_INTERVAL = "TIME_INTERVAL"
|
||||
CONFIG_MAX_TIME = "MAX_TIME"
|
||||
CONFIG_MAX_SIZE = "MAX_SIZE"
|
||||
)
|
||||
|
||||
func SetSecret(provider *Provider, key string, value string) (updated bool, err error) {
|
||||
var secret *v1.Secret
|
||||
secret, err = provider.clientSet.CoreV1().Secrets(config.Config.Tap.Release.Namespace).Get(context.TODO(), SELF_RESOURCES_PREFIX+SUFFIX_SECRET, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if secret.StringData[key] != value {
|
||||
updated = true
|
||||
}
|
||||
secret.Data[key] = []byte(value)
|
||||
|
||||
_, err = provider.clientSet.CoreV1().Secrets(config.Config.Tap.Release.Namespace).Update(context.TODO(), secret, metav1.UpdateOptions{})
|
||||
if err == nil {
|
||||
if updated {
|
||||
log.Info().Str("secret", key).Str("value", value).Msg("Updated:")
|
||||
}
|
||||
} else {
|
||||
log.Error().Str("secret", key).Err(err).Send()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func GetConfig(provider *Provider, key string) (value string, err error) {
|
||||
var configMap *v1.ConfigMap
|
||||
configMap, err = provider.clientSet.CoreV1().ConfigMaps(config.Config.Tap.Release.Namespace).Get(context.TODO(), SELF_RESOURCES_PREFIX+SUFFIX_CONFIG_MAP, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
value = configMap.Data[key]
|
||||
return
|
||||
}
|
||||
|
||||
func SetConfig(provider *Provider, key string, value string) (updated bool, err error) {
|
||||
var configMap *v1.ConfigMap
|
||||
configMap, err = provider.clientSet.CoreV1().ConfigMaps(config.Config.Tap.Release.Namespace).Get(context.TODO(), SELF_RESOURCES_PREFIX+SUFFIX_CONFIG_MAP, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if configMap.Data[key] != value {
|
||||
updated = true
|
||||
}
|
||||
configMap.Data[key] = value
|
||||
|
||||
_, err = provider.clientSet.CoreV1().ConfigMaps(config.Config.Tap.Release.Namespace).Update(context.TODO(), configMap, metav1.UpdateOptions{})
|
||||
if err == nil {
|
||||
if updated {
|
||||
log.Info().
|
||||
Str("config", key).
|
||||
Str("value", func() string {
|
||||
if len(value) > 10 {
|
||||
return value[:10]
|
||||
}
|
||||
return value
|
||||
}()).
|
||||
Int("length", len(value)).
|
||||
Msg("Updated. Printing only 10 first characters of value:")
|
||||
}
|
||||
} else {
|
||||
log.Error().Str("config", key).Err(err).Send()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func ConfigGetScripts(provider *Provider) (scripts map[int64]misc.ConfigMapScript, err error) {
|
||||
var data string
|
||||
data, err = GetConfig(provider, CONFIG_SCRIPTING_SCRIPTS)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = json.Unmarshal([]byte(data), &scripts)
|
||||
return
|
||||
}
|
||||
|
||||
func IsActiveScript(provider *Provider, title string) bool {
|
||||
configActiveScripts, err := GetConfig(provider, CONFIG_SCRIPTING_ACTIVE_SCRIPTS)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return strings.Contains(configActiveScripts, title)
|
||||
}
|
||||
|
||||
func DeleteActiveScriptByTitle(provider *Provider, title string) (err error) {
|
||||
configActiveScripts, err := GetConfig(provider, CONFIG_SCRIPTING_ACTIVE_SCRIPTS)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
activeScripts := strings.Split(configActiveScripts, ",")
|
||||
|
||||
idx := slices.Index(activeScripts, title)
|
||||
if idx != -1 {
|
||||
activeScripts = slices.Delete(activeScripts, idx, idx+1)
|
||||
_, err = SetConfig(provider, CONFIG_SCRIPTING_ACTIVE_SCRIPTS, strings.Join(activeScripts, ","))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,25 +1,12 @@
|
||||
package kubernetes
|
||||
|
||||
const (
|
||||
SelfResourcesPrefix = "kubeshark-"
|
||||
FrontPodName = SelfResourcesPrefix + "front"
|
||||
SELF_RESOURCES_PREFIX = "kubeshark-"
|
||||
FrontPodName = SELF_RESOURCES_PREFIX + "front"
|
||||
FrontServiceName = FrontPodName
|
||||
HubPodName = SelfResourcesPrefix + "hub"
|
||||
HubPodName = SELF_RESOURCES_PREFIX + "hub"
|
||||
HubServiceName = HubPodName
|
||||
ClusterRoleBindingName = SelfResourcesPrefix + "cluster-role-binding"
|
||||
ClusterRoleName = SelfResourcesPrefix + "cluster-role"
|
||||
K8sAllNamespaces = ""
|
||||
RoleBindingName = SelfResourcesPrefix + "role-binding"
|
||||
RoleName = SelfResourcesPrefix + "role"
|
||||
ServiceAccountName = SelfResourcesPrefix + "service-account"
|
||||
WorkerDaemonSetName = SelfResourcesPrefix + "worker-daemon-set"
|
||||
WorkerPodName = SelfResourcesPrefix + "worker"
|
||||
ConfigMapName = SelfResourcesPrefix + "config"
|
||||
MinKubernetesServerVersion = "1.16.0"
|
||||
)
|
||||
|
||||
const (
|
||||
LabelPrefixApp = "app.kubernetes.io/"
|
||||
LabelManagedBy = LabelPrefixApp + "managed-by"
|
||||
LabelCreatedBy = LabelPrefixApp + "created-by"
|
||||
AppLabelKey = "app.kubeshark.co/app"
|
||||
)
|
||||
|
||||
192
kubernetes/cp.go
Normal file
192
kubernetes/cp.go
Normal file
@@ -0,0 +1,192 @@
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/tools/remotecommand"
|
||||
)
|
||||
|
||||
func CopyFromPod(ctx context.Context, provider *Provider, pod v1.Pod, srcPath string, dstPath string) error {
|
||||
const containerName = "sniffer"
|
||||
cmdArr := []string{"tar", "cf", "-", srcPath}
|
||||
req := provider.clientSet.CoreV1().RESTClient().
|
||||
Post().
|
||||
Namespace(pod.Namespace).
|
||||
Resource("pods").
|
||||
Name(pod.Name).
|
||||
SubResource("exec").
|
||||
VersionedParams(&v1.PodExecOptions{
|
||||
Container: containerName,
|
||||
Command: cmdArr,
|
||||
Stdin: true,
|
||||
Stdout: true,
|
||||
Stderr: true,
|
||||
TTY: false,
|
||||
}, scheme.ParameterCodec)
|
||||
|
||||
exec, err := remotecommand.NewSPDYExecutor(&provider.clientConfig, "POST", req.URL())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
reader, outStream := io.Pipe()
|
||||
errReader, errStream := io.Pipe()
|
||||
go logErrors(errReader, pod)
|
||||
go func() {
|
||||
defer outStream.Close()
|
||||
err = exec.StreamWithContext(ctx, remotecommand.StreamOptions{
|
||||
Stdin: os.Stdin,
|
||||
Stdout: outStream,
|
||||
Stderr: errStream,
|
||||
Tty: false,
|
||||
})
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("pod", pod.Name).Msg("SPDYExecutor:")
|
||||
}
|
||||
}()
|
||||
|
||||
prefix := getPrefix(srcPath)
|
||||
prefix = path.Clean(prefix)
|
||||
prefix = stripPathShortcuts(prefix)
|
||||
dstPath = path.Join(dstPath, path.Base(prefix))
|
||||
err = untarAll(reader, dstPath, prefix)
|
||||
// fo(reader)
|
||||
return err
|
||||
}
|
||||
|
||||
// func fo(fi io.Reader) {
|
||||
// fo, err := os.Create("output.tar")
|
||||
// if err != nil {
|
||||
// panic(err)
|
||||
// }
|
||||
|
||||
// // make a buffer to keep chunks that are read
|
||||
// buf := make([]byte, 1024)
|
||||
// for {
|
||||
// // read a chunk
|
||||
// n, err := fi.Read(buf)
|
||||
// if err != nil && err != io.EOF {
|
||||
// panic(err)
|
||||
// }
|
||||
// if n == 0 {
|
||||
// break
|
||||
// }
|
||||
|
||||
// // write a chunk
|
||||
// if _, err := fo.Write(buf[:n]); err != nil {
|
||||
// panic(err)
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
func logErrors(reader io.Reader, pod v1.Pod) {
|
||||
r := bufio.NewReader(reader)
|
||||
for {
|
||||
msg, _, err := r.ReadLine()
|
||||
log.Warn().Str("pod", pod.Name).Str("msg", string(msg)).Msg("SPDYExecutor:")
|
||||
if err != nil {
|
||||
if err != io.EOF {
|
||||
log.Error().Err(err).Send()
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func untarAll(reader io.Reader, destDir, prefix string) error {
|
||||
tarReader := tar.NewReader(reader)
|
||||
for {
|
||||
header, err := tarReader.Next()
|
||||
if err != nil {
|
||||
if err != io.EOF {
|
||||
return err
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(header.Name, prefix) {
|
||||
return fmt.Errorf("tar contents corrupted")
|
||||
}
|
||||
|
||||
mode := header.FileInfo().Mode()
|
||||
destFileName := filepath.Join(destDir, header.Name[len(prefix):])
|
||||
|
||||
baseName := filepath.Dir(destFileName)
|
||||
if err := os.MkdirAll(baseName, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
if header.FileInfo().IsDir() {
|
||||
if err := os.MkdirAll(destFileName, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
evaledPath, err := filepath.EvalSymlinks(baseName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if mode&os.ModeSymlink != 0 {
|
||||
linkname := header.Linkname
|
||||
|
||||
if !filepath.IsAbs(linkname) {
|
||||
_ = filepath.Join(evaledPath, linkname)
|
||||
}
|
||||
|
||||
if err := os.Symlink(linkname, destFileName); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
outFile, err := os.Create(destFileName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer outFile.Close()
|
||||
if _, err := io.Copy(outFile, tarReader); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := outFile.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getPrefix(file string) string {
|
||||
return strings.TrimLeft(file, "/")
|
||||
}
|
||||
|
||||
func stripPathShortcuts(p string) string {
|
||||
newPath := p
|
||||
trimmed := strings.TrimPrefix(newPath, "../")
|
||||
|
||||
for trimmed != newPath {
|
||||
newPath = trimmed
|
||||
trimmed = strings.TrimPrefix(newPath, "../")
|
||||
}
|
||||
|
||||
// trim leftover {".", ".."}
|
||||
if newPath == "." || newPath == ".." {
|
||||
newPath = ""
|
||||
}
|
||||
|
||||
if len(newPath) > 0 && string(newPath[0]) == "/" {
|
||||
return newPath[1:]
|
||||
}
|
||||
|
||||
return newPath
|
||||
}
|
||||
186
kubernetes/helm/helm.go
Normal file
186
kubernetes/helm/helm.go
Normal file
@@ -0,0 +1,186 @@
|
||||
package helm
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/kubeshark/kubeshark/config"
|
||||
"github.com/kubeshark/kubeshark/misc"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rs/zerolog/log"
|
||||
"helm.sh/helm/v3/pkg/action"
|
||||
"helm.sh/helm/v3/pkg/chart"
|
||||
"helm.sh/helm/v3/pkg/chart/loader"
|
||||
"helm.sh/helm/v3/pkg/cli"
|
||||
"helm.sh/helm/v3/pkg/downloader"
|
||||
"helm.sh/helm/v3/pkg/getter"
|
||||
"helm.sh/helm/v3/pkg/kube"
|
||||
"helm.sh/helm/v3/pkg/registry"
|
||||
"helm.sh/helm/v3/pkg/release"
|
||||
"helm.sh/helm/v3/pkg/repo"
|
||||
)
|
||||
|
||||
const ENV_HELM_DRIVER = "HELM_DRIVER"
|
||||
|
||||
var settings = cli.New()
|
||||
|
||||
type Helm struct {
|
||||
repo string
|
||||
releaseName string
|
||||
releaseNamespace string
|
||||
}
|
||||
|
||||
func NewHelm(repo string, releaseName string, releaseNamespace string) *Helm {
|
||||
return &Helm{
|
||||
repo: repo,
|
||||
releaseName: releaseName,
|
||||
releaseNamespace: releaseNamespace,
|
||||
}
|
||||
}
|
||||
|
||||
func parseOCIRef(chartRef string) (string, string, error) {
|
||||
refTagRegexp := regexp.MustCompile(`^(oci://[^:]+(:[0-9]{1,5})?[^:]+):(.*)$`)
|
||||
caps := refTagRegexp.FindStringSubmatch(chartRef)
|
||||
if len(caps) != 4 {
|
||||
return "", "", errors.Errorf("improperly formatted oci chart reference: %s", chartRef)
|
||||
}
|
||||
chartRef = caps[1]
|
||||
tag := caps[3]
|
||||
|
||||
return chartRef, tag, nil
|
||||
}
|
||||
|
||||
func (h *Helm) Install() (rel *release.Release, err error) {
|
||||
kubeConfigPath := config.Config.KubeConfigPath()
|
||||
actionConfig := new(action.Configuration)
|
||||
if err = actionConfig.Init(kube.GetConfig(kubeConfigPath, "", h.releaseNamespace), h.releaseNamespace, os.Getenv(ENV_HELM_DRIVER), func(format string, v ...interface{}) {
|
||||
log.Info().Msgf(format, v...)
|
||||
}); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
client := action.NewInstall(actionConfig)
|
||||
client.Namespace = h.releaseNamespace
|
||||
client.ReleaseName = h.releaseName
|
||||
|
||||
chartPath := os.Getenv(fmt.Sprintf("%s_HELM_CHART_PATH", strings.ToUpper(misc.Program)))
|
||||
if chartPath == "" {
|
||||
var chartURL string
|
||||
chartURL, err = repo.FindChartInRepoURL(h.repo, h.releaseName, "", "", "", "", getter.All(&cli.EnvSettings{}))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var cp string
|
||||
cp, err = client.ChartPathOptions.LocateChart(chartURL, settings)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
m := &downloader.Manager{
|
||||
Out: os.Stdout,
|
||||
ChartPath: cp,
|
||||
Keyring: client.ChartPathOptions.Keyring,
|
||||
SkipUpdate: false,
|
||||
Getters: getter.All(settings),
|
||||
RepositoryConfig: settings.RepositoryConfig,
|
||||
RepositoryCache: settings.RepositoryCache,
|
||||
Debug: settings.Debug,
|
||||
}
|
||||
|
||||
dl := downloader.ChartDownloader{
|
||||
Out: m.Out,
|
||||
Verify: m.Verify,
|
||||
Keyring: m.Keyring,
|
||||
RepositoryConfig: m.RepositoryConfig,
|
||||
RepositoryCache: m.RepositoryCache,
|
||||
RegistryClient: m.RegistryClient,
|
||||
Getters: m.Getters,
|
||||
Options: []getter.Option{
|
||||
getter.WithInsecureSkipVerifyTLS(false),
|
||||
},
|
||||
}
|
||||
|
||||
repoPath := filepath.Dir(m.ChartPath)
|
||||
err = os.MkdirAll(repoPath, os.ModePerm)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
version := ""
|
||||
if registry.IsOCI(chartURL) {
|
||||
chartURL, version, err = parseOCIRef(chartURL)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
dl.Options = append(dl.Options,
|
||||
getter.WithRegistryClient(m.RegistryClient),
|
||||
getter.WithTagName(version))
|
||||
}
|
||||
|
||||
log.Info().
|
||||
Str("url", chartURL).
|
||||
Str("repo-path", repoPath).
|
||||
Msg("Downloading Helm chart:")
|
||||
|
||||
if _, _, err = dl.DownloadTo(chartURL, version, repoPath); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
chartPath = m.ChartPath
|
||||
}
|
||||
var chart *chart.Chart
|
||||
chart, err = loader.Load(chartPath)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
log.Info().
|
||||
Str("release", chart.Metadata.Name).
|
||||
Str("version", chart.Metadata.Version).
|
||||
Strs("source", chart.Metadata.Sources).
|
||||
Str("kube-version", chart.Metadata.KubeVersion).
|
||||
Msg("Installing using Helm:")
|
||||
|
||||
var configMarshalled []byte
|
||||
configMarshalled, err = json.Marshal(config.Config)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var configUnmarshalled map[string]interface{}
|
||||
err = json.Unmarshal(configMarshalled, &configUnmarshalled)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
rel, err = client.Run(chart, configUnmarshalled)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (h *Helm) Uninstall() (resp *release.UninstallReleaseResponse, err error) {
|
||||
kubeConfigPath := config.Config.KubeConfigPath()
|
||||
actionConfig := new(action.Configuration)
|
||||
if err = actionConfig.Init(kube.GetConfig(kubeConfigPath, "", h.releaseNamespace), h.releaseNamespace, os.Getenv(ENV_HELM_DRIVER), func(format string, v ...interface{}) {
|
||||
log.Info().Msgf(format, v...)
|
||||
}); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
client := action.NewUninstall(actionConfig)
|
||||
|
||||
resp, err = client.Run(h.releaseName)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
@@ -1,38 +1,30 @@
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/kubeshark/kubeshark/docker"
|
||||
"github.com/kubeshark/kubeshark/config"
|
||||
"github.com/kubeshark/kubeshark/misc"
|
||||
"github.com/kubeshark/kubeshark/semver"
|
||||
"github.com/kubeshark/kubeshark/utils"
|
||||
"github.com/rs/zerolog/log"
|
||||
auth "k8s.io/api/authorization/v1"
|
||||
"github.com/tanqiangyes/grep-go/reader"
|
||||
core "k8s.io/api/core/v1"
|
||||
rbac "k8s.io/api/rbac/v1"
|
||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/version"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
applyconfapp "k8s.io/client-go/applyconfigurations/apps/v1"
|
||||
applyconfcore "k8s.io/client-go/applyconfigurations/core/v1"
|
||||
applyconfmeta "k8s.io/client-go/applyconfigurations/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
watchtools "k8s.io/client-go/tools/watch"
|
||||
)
|
||||
|
||||
type Provider struct {
|
||||
@@ -43,14 +35,6 @@ type Provider struct {
|
||||
createdBy string
|
||||
}
|
||||
|
||||
const (
|
||||
fieldManagerName = "kubeshark-manager"
|
||||
procfsVolumeName = "proc"
|
||||
procfsMountPath = "/hostproc"
|
||||
sysfsVolumeName = "sys"
|
||||
sysfsMountPath = "/sys"
|
||||
)
|
||||
|
||||
func NewProvider(kubeConfigPath string, contextName string) (*Provider, error) {
|
||||
kubernetesConfig := loadKubernetesConfiguration(kubeConfigPath, contextName)
|
||||
restClientConfig, err := kubernetesConfig.ClientConfig()
|
||||
@@ -89,368 +73,11 @@ func NewProvider(kubeConfigPath string, contextName string) (*Provider, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
//NewProviderInCluster Used in another repo that calls this function
|
||||
func NewProviderInCluster() (*Provider, error) {
|
||||
restClientConfig, err := rest.InClusterConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
clientSet, err := getClientSet(restClientConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Provider{
|
||||
clientSet: clientSet,
|
||||
kubernetesConfig: nil, // not relevant in cluster
|
||||
clientConfig: *restClientConfig,
|
||||
managedBy: misc.Program,
|
||||
createdBy: misc.Program,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (provider *Provider) CurrentNamespace() (string, error) {
|
||||
if provider.kubernetesConfig == nil {
|
||||
return "", errors.New("kubernetesConfig is nil, The CLI will not work with in-cluster kubernetes config, use a kubeconfig file when initializing the Provider")
|
||||
}
|
||||
ns, _, err := provider.kubernetesConfig.Namespace()
|
||||
return ns, err
|
||||
}
|
||||
|
||||
func (provider *Provider) WaitUtilNamespaceDeleted(ctx context.Context, name string) error {
|
||||
fieldSelector := fmt.Sprintf("metadata.name=%s", name)
|
||||
var limit int64 = 1
|
||||
lw := &cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
options.FieldSelector = fieldSelector
|
||||
options.Limit = limit
|
||||
return provider.clientSet.CoreV1().Namespaces().List(ctx, options)
|
||||
},
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.FieldSelector = fieldSelector
|
||||
options.Limit = limit
|
||||
return provider.clientSet.CoreV1().Namespaces().Watch(ctx, options)
|
||||
},
|
||||
}
|
||||
|
||||
var preconditionFunc watchtools.PreconditionFunc = func(store cache.Store) (bool, error) {
|
||||
_, exists, err := store.Get(&core.Namespace{ObjectMeta: metav1.ObjectMeta{Name: name}})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if exists {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
conditionFunc := func(e watch.Event) (bool, error) {
|
||||
if e.Type == watch.Deleted {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
obj := &core.Namespace{}
|
||||
_, err := watchtools.UntilWithSync(ctx, lw, obj, preconditionFunc, conditionFunc)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (provider *Provider) CreateNamespace(ctx context.Context, name string) (*core.Namespace, error) {
|
||||
namespaceSpec := &core.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: map[string]string{
|
||||
LabelManagedBy: provider.managedBy,
|
||||
LabelCreatedBy: provider.createdBy,
|
||||
},
|
||||
},
|
||||
}
|
||||
return provider.clientSet.CoreV1().Namespaces().Create(ctx, namespaceSpec, metav1.CreateOptions{})
|
||||
}
|
||||
|
||||
type PodOptions struct {
|
||||
Namespace string
|
||||
PodName string
|
||||
PodImage string
|
||||
ServiceAccountName string
|
||||
Resources Resources
|
||||
ImagePullPolicy core.PullPolicy
|
||||
Debug bool
|
||||
}
|
||||
|
||||
func (provider *Provider) BuildHubPod(opts *PodOptions) (*core.Pod, error) {
|
||||
configMapVolume := &core.ConfigMapVolumeSource{}
|
||||
configMapVolume.Name = ConfigMapName
|
||||
|
||||
cpuLimit, err := resource.ParseQuantity(opts.Resources.CpuLimit)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid cpu limit for %s container", opts.PodName)
|
||||
}
|
||||
memLimit, err := resource.ParseQuantity(opts.Resources.MemoryLimit)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid memory limit for %s container", opts.PodName)
|
||||
}
|
||||
cpuRequests, err := resource.ParseQuantity(opts.Resources.CpuRequests)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid cpu request for %s container", opts.PodName)
|
||||
}
|
||||
memRequests, err := resource.ParseQuantity(opts.Resources.MemoryRequests)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid memory request for %s container", opts.PodName)
|
||||
}
|
||||
|
||||
command := []string{
|
||||
"./hub",
|
||||
}
|
||||
|
||||
if opts.Debug {
|
||||
command = append(command, "-debug")
|
||||
}
|
||||
|
||||
containers := []core.Container{
|
||||
{
|
||||
Name: opts.PodName,
|
||||
Image: opts.PodImage,
|
||||
ImagePullPolicy: opts.ImagePullPolicy,
|
||||
Command: command,
|
||||
Resources: core.ResourceRequirements{
|
||||
Limits: core.ResourceList{
|
||||
"cpu": cpuLimit,
|
||||
"memory": memLimit,
|
||||
},
|
||||
Requests: core.ResourceList{
|
||||
"cpu": cpuRequests,
|
||||
"memory": memRequests,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
pod := &core.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: opts.PodName,
|
||||
Labels: map[string]string{
|
||||
"app": opts.PodName,
|
||||
LabelManagedBy: provider.managedBy,
|
||||
LabelCreatedBy: provider.createdBy,
|
||||
},
|
||||
},
|
||||
Spec: core.PodSpec{
|
||||
Containers: containers,
|
||||
DNSPolicy: core.DNSClusterFirstWithHostNet,
|
||||
TerminationGracePeriodSeconds: new(int64),
|
||||
Tolerations: []core.Toleration{
|
||||
{
|
||||
Operator: core.TolerationOpExists,
|
||||
Effect: core.TaintEffectNoExecute,
|
||||
},
|
||||
{
|
||||
Operator: core.TolerationOpExists,
|
||||
Effect: core.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
//define the service account only when it exists to prevent pod crash
|
||||
if opts.ServiceAccountName != "" {
|
||||
pod.Spec.ServiceAccountName = opts.ServiceAccountName
|
||||
}
|
||||
return pod, nil
|
||||
}
|
||||
|
||||
func (provider *Provider) BuildFrontPod(opts *PodOptions, hubHost string, hubPort string) (*core.Pod, error) {
|
||||
configMapVolume := &core.ConfigMapVolumeSource{}
|
||||
configMapVolume.Name = ConfigMapName
|
||||
|
||||
cpuLimit, err := resource.ParseQuantity(opts.Resources.CpuLimit)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid cpu limit for %s container", opts.PodName)
|
||||
}
|
||||
memLimit, err := resource.ParseQuantity(opts.Resources.MemoryLimit)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid memory limit for %s container", opts.PodName)
|
||||
}
|
||||
cpuRequests, err := resource.ParseQuantity(opts.Resources.CpuRequests)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid cpu request for %s container", opts.PodName)
|
||||
}
|
||||
memRequests, err := resource.ParseQuantity(opts.Resources.MemoryRequests)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid memory request for %s container", opts.PodName)
|
||||
}
|
||||
|
||||
volumeMounts := []core.VolumeMount{}
|
||||
volumes := []core.Volume{}
|
||||
|
||||
containers := []core.Container{
|
||||
{
|
||||
Name: opts.PodName,
|
||||
Image: docker.GetFrontImage(),
|
||||
ImagePullPolicy: opts.ImagePullPolicy,
|
||||
VolumeMounts: volumeMounts,
|
||||
ReadinessProbe: &core.Probe{
|
||||
FailureThreshold: 3,
|
||||
ProbeHandler: core.ProbeHandler{
|
||||
TCPSocket: &core.TCPSocketAction{
|
||||
Port: intstr.Parse("80"),
|
||||
},
|
||||
},
|
||||
PeriodSeconds: 1,
|
||||
SuccessThreshold: 1,
|
||||
TimeoutSeconds: 1,
|
||||
},
|
||||
Resources: core.ResourceRequirements{
|
||||
Limits: core.ResourceList{
|
||||
"cpu": cpuLimit,
|
||||
"memory": memLimit,
|
||||
},
|
||||
Requests: core.ResourceList{
|
||||
"cpu": cpuRequests,
|
||||
"memory": memRequests,
|
||||
},
|
||||
},
|
||||
Env: []core.EnvVar{
|
||||
{
|
||||
Name: "REACT_APP_DEFAULT_FILTER",
|
||||
Value: "timestamp >= now()",
|
||||
},
|
||||
{
|
||||
Name: "REACT_APP_HUB_HOST",
|
||||
Value: " ",
|
||||
},
|
||||
{
|
||||
Name: "REACT_APP_HUB_PORT",
|
||||
Value: hubPort,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
pod := &core.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: opts.PodName,
|
||||
Labels: map[string]string{
|
||||
"app": opts.PodName,
|
||||
LabelManagedBy: provider.managedBy,
|
||||
LabelCreatedBy: provider.createdBy,
|
||||
},
|
||||
},
|
||||
Spec: core.PodSpec{
|
||||
Containers: containers,
|
||||
Volumes: volumes,
|
||||
DNSPolicy: core.DNSClusterFirstWithHostNet,
|
||||
TerminationGracePeriodSeconds: new(int64),
|
||||
Tolerations: []core.Toleration{
|
||||
{
|
||||
Operator: core.TolerationOpExists,
|
||||
Effect: core.TaintEffectNoExecute,
|
||||
},
|
||||
{
|
||||
Operator: core.TolerationOpExists,
|
||||
Effect: core.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
//define the service account only when it exists to prevent pod crash
|
||||
if opts.ServiceAccountName != "" {
|
||||
pod.Spec.ServiceAccountName = opts.ServiceAccountName
|
||||
}
|
||||
return pod, nil
|
||||
}
|
||||
|
||||
func (provider *Provider) CreatePod(ctx context.Context, namespace string, podSpec *core.Pod) (*core.Pod, error) {
|
||||
return provider.clientSet.CoreV1().Pods(namespace).Create(ctx, podSpec, metav1.CreateOptions{})
|
||||
}
|
||||
|
||||
func (provider *Provider) CreateService(ctx context.Context, namespace string, serviceName string, appLabelValue string, targetPort int, port int32) (*core.Service, error) {
|
||||
service := core.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: serviceName,
|
||||
Labels: map[string]string{
|
||||
LabelManagedBy: provider.managedBy,
|
||||
LabelCreatedBy: provider.createdBy,
|
||||
},
|
||||
},
|
||||
Spec: core.ServiceSpec{
|
||||
Ports: []core.ServicePort{
|
||||
{
|
||||
Name: serviceName,
|
||||
TargetPort: intstr.FromInt(targetPort),
|
||||
Port: port,
|
||||
},
|
||||
},
|
||||
Type: core.ServiceTypeClusterIP,
|
||||
Selector: map[string]string{"app": appLabelValue},
|
||||
},
|
||||
}
|
||||
return provider.clientSet.CoreV1().Services(namespace).Create(ctx, &service, metav1.CreateOptions{})
|
||||
}
|
||||
|
||||
func (provider *Provider) CanI(ctx context.Context, namespace string, resource string, verb string, group string) (bool, error) {
|
||||
selfSubjectAccessReview := &auth.SelfSubjectAccessReview{
|
||||
Spec: auth.SelfSubjectAccessReviewSpec{
|
||||
ResourceAttributes: &auth.ResourceAttributes{
|
||||
Namespace: namespace,
|
||||
Resource: resource,
|
||||
Verb: verb,
|
||||
Group: group,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
response, err := provider.clientSet.AuthorizationV1().SelfSubjectAccessReviews().Create(ctx, selfSubjectAccessReview, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return response.Status.Allowed, nil
|
||||
}
|
||||
|
||||
func (provider *Provider) DoesNamespaceExist(ctx context.Context, name string) (bool, error) {
|
||||
namespaceResource, err := provider.clientSet.CoreV1().Namespaces().Get(ctx, name, metav1.GetOptions{})
|
||||
return provider.doesResourceExist(namespaceResource, err)
|
||||
}
|
||||
|
||||
func (provider *Provider) DoesConfigMapExist(ctx context.Context, namespace string, name string) (bool, error) {
|
||||
configMapResource, err := provider.clientSet.CoreV1().ConfigMaps(namespace).Get(ctx, name, metav1.GetOptions{})
|
||||
return provider.doesResourceExist(configMapResource, err)
|
||||
}
|
||||
|
||||
func (provider *Provider) DoesServiceAccountExist(ctx context.Context, namespace string, name string) (bool, error) {
|
||||
serviceAccountResource, err := provider.clientSet.CoreV1().ServiceAccounts(namespace).Get(ctx, name, metav1.GetOptions{})
|
||||
return provider.doesResourceExist(serviceAccountResource, err)
|
||||
}
|
||||
|
||||
func (provider *Provider) DoesServiceExist(ctx context.Context, namespace string, name string) (bool, error) {
|
||||
serviceResource, err := provider.clientSet.CoreV1().Services(namespace).Get(ctx, name, metav1.GetOptions{})
|
||||
return provider.doesResourceExist(serviceResource, err)
|
||||
}
|
||||
|
||||
func (provider *Provider) DoesClusterRoleExist(ctx context.Context, name string) (bool, error) {
|
||||
clusterRoleResource, err := provider.clientSet.RbacV1().ClusterRoles().Get(ctx, name, metav1.GetOptions{})
|
||||
return provider.doesResourceExist(clusterRoleResource, err)
|
||||
}
|
||||
|
||||
func (provider *Provider) DoesClusterRoleBindingExist(ctx context.Context, name string) (bool, error) {
|
||||
clusterRoleBindingResource, err := provider.clientSet.RbacV1().ClusterRoleBindings().Get(ctx, name, metav1.GetOptions{})
|
||||
return provider.doesResourceExist(clusterRoleBindingResource, err)
|
||||
}
|
||||
|
||||
func (provider *Provider) DoesRoleExist(ctx context.Context, namespace string, name string) (bool, error) {
|
||||
roleResource, err := provider.clientSet.RbacV1().Roles(namespace).Get(ctx, name, metav1.GetOptions{})
|
||||
return provider.doesResourceExist(roleResource, err)
|
||||
}
|
||||
|
||||
func (provider *Provider) DoesRoleBindingExist(ctx context.Context, namespace string, name string) (bool, error) {
|
||||
roleBindingResource, err := provider.clientSet.RbacV1().RoleBindings(namespace).Get(ctx, name, metav1.GetOptions{})
|
||||
return provider.doesResourceExist(roleBindingResource, err)
|
||||
}
|
||||
|
||||
func (provider *Provider) doesResourceExist(resource interface{}, err error) (bool, error) {
|
||||
// Getting NotFound error is the expected behavior when a resource does not exist.
|
||||
if k8serrors.IsNotFound(err) {
|
||||
@@ -464,431 +91,6 @@ func (provider *Provider) doesResourceExist(resource interface{}, err error) (bo
|
||||
return resource != nil, nil
|
||||
}
|
||||
|
||||
func (provider *Provider) CreateSelfRBAC(ctx context.Context, namespace string, serviceAccountName string, clusterRoleName string, clusterRoleBindingName string, version string, resources []string) error {
|
||||
serviceAccount := &core.ServiceAccount{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: serviceAccountName,
|
||||
Labels: map[string]string{
|
||||
fmt.Sprintf("%s-cli-version", misc.Program): version,
|
||||
LabelManagedBy: provider.managedBy,
|
||||
LabelCreatedBy: provider.createdBy,
|
||||
},
|
||||
},
|
||||
}
|
||||
clusterRole := &rbac.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: clusterRoleName,
|
||||
Labels: map[string]string{
|
||||
fmt.Sprintf("%s-cli-version", misc.Program): version,
|
||||
LabelManagedBy: provider.managedBy,
|
||||
LabelCreatedBy: provider.createdBy,
|
||||
},
|
||||
},
|
||||
Rules: []rbac.PolicyRule{
|
||||
{
|
||||
APIGroups: []string{"", "extensions", "apps"},
|
||||
Resources: resources,
|
||||
Verbs: []string{"list", "get", "watch"},
|
||||
},
|
||||
},
|
||||
}
|
||||
clusterRoleBinding := &rbac.ClusterRoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: clusterRoleBindingName,
|
||||
Labels: map[string]string{
|
||||
fmt.Sprintf("%s-cli-version", misc.Program): version,
|
||||
LabelManagedBy: provider.managedBy,
|
||||
LabelCreatedBy: provider.createdBy,
|
||||
},
|
||||
},
|
||||
RoleRef: rbac.RoleRef{
|
||||
Name: clusterRoleName,
|
||||
Kind: "ClusterRole",
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
},
|
||||
Subjects: []rbac.Subject{
|
||||
{
|
||||
Kind: "ServiceAccount",
|
||||
Name: serviceAccountName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
},
|
||||
}
|
||||
_, err := provider.clientSet.CoreV1().ServiceAccounts(namespace).Create(ctx, serviceAccount, metav1.CreateOptions{})
|
||||
if err != nil && !k8serrors.IsAlreadyExists(err) {
|
||||
return err
|
||||
}
|
||||
_, err = provider.clientSet.RbacV1().ClusterRoles().Create(ctx, clusterRole, metav1.CreateOptions{})
|
||||
if err != nil && !k8serrors.IsAlreadyExists(err) {
|
||||
return err
|
||||
}
|
||||
_, err = provider.clientSet.RbacV1().ClusterRoleBindings().Create(ctx, clusterRoleBinding, metav1.CreateOptions{})
|
||||
if err != nil && !k8serrors.IsAlreadyExists(err) {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (provider *Provider) CreateSelfRBACNamespaceRestricted(ctx context.Context, namespace string, serviceAccountName string, roleName string, roleBindingName string, version string) error {
|
||||
serviceAccount := &core.ServiceAccount{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: serviceAccountName,
|
||||
Labels: map[string]string{
|
||||
fmt.Sprintf("%s-cli-version", misc.Program): version,
|
||||
LabelManagedBy: provider.managedBy,
|
||||
LabelCreatedBy: provider.createdBy,
|
||||
},
|
||||
},
|
||||
}
|
||||
role := &rbac.Role{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: roleName,
|
||||
Labels: map[string]string{
|
||||
fmt.Sprintf("%s-cli-version", misc.Program): version,
|
||||
LabelManagedBy: provider.managedBy,
|
||||
LabelCreatedBy: provider.createdBy,
|
||||
},
|
||||
},
|
||||
Rules: []rbac.PolicyRule{
|
||||
{
|
||||
APIGroups: []string{"", "extensions", "apps"},
|
||||
Resources: []string{"pods", "services", "endpoints"},
|
||||
Verbs: []string{"list", "get", "watch"},
|
||||
},
|
||||
},
|
||||
}
|
||||
roleBinding := &rbac.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: roleBindingName,
|
||||
Labels: map[string]string{
|
||||
fmt.Sprintf("%s-cli-version", misc.Program): version,
|
||||
LabelManagedBy: provider.managedBy,
|
||||
LabelCreatedBy: provider.createdBy,
|
||||
},
|
||||
},
|
||||
RoleRef: rbac.RoleRef{
|
||||
Name: roleName,
|
||||
Kind: "Role",
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
},
|
||||
Subjects: []rbac.Subject{
|
||||
{
|
||||
Kind: "ServiceAccount",
|
||||
Name: serviceAccountName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
},
|
||||
}
|
||||
_, err := provider.clientSet.CoreV1().ServiceAccounts(namespace).Create(ctx, serviceAccount, metav1.CreateOptions{})
|
||||
if err != nil && !k8serrors.IsAlreadyExists(err) {
|
||||
return err
|
||||
}
|
||||
_, err = provider.clientSet.RbacV1().Roles(namespace).Create(ctx, role, metav1.CreateOptions{})
|
||||
if err != nil && !k8serrors.IsAlreadyExists(err) {
|
||||
return err
|
||||
}
|
||||
_, err = provider.clientSet.RbacV1().RoleBindings(namespace).Create(ctx, roleBinding, metav1.CreateOptions{})
|
||||
if err != nil && !k8serrors.IsAlreadyExists(err) {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (provider *Provider) RemoveNamespace(ctx context.Context, name string) error {
|
||||
err := provider.clientSet.CoreV1().Namespaces().Delete(ctx, name, metav1.DeleteOptions{})
|
||||
return provider.handleRemovalError(err)
|
||||
}
|
||||
|
||||
func (provider *Provider) RemoveClusterRole(ctx context.Context, name string) error {
|
||||
err := provider.clientSet.RbacV1().ClusterRoles().Delete(ctx, name, metav1.DeleteOptions{})
|
||||
return provider.handleRemovalError(err)
|
||||
}
|
||||
|
||||
func (provider *Provider) RemoveClusterRoleBinding(ctx context.Context, name string) error {
|
||||
err := provider.clientSet.RbacV1().ClusterRoleBindings().Delete(ctx, name, metav1.DeleteOptions{})
|
||||
return provider.handleRemovalError(err)
|
||||
}
|
||||
|
||||
func (provider *Provider) RemoveRoleBinding(ctx context.Context, namespace string, name string) error {
|
||||
err := provider.clientSet.RbacV1().RoleBindings(namespace).Delete(ctx, name, metav1.DeleteOptions{})
|
||||
return provider.handleRemovalError(err)
|
||||
}
|
||||
|
||||
func (provider *Provider) RemoveRole(ctx context.Context, namespace string, name string) error {
|
||||
err := provider.clientSet.RbacV1().Roles(namespace).Delete(ctx, name, metav1.DeleteOptions{})
|
||||
return provider.handleRemovalError(err)
|
||||
}
|
||||
|
||||
func (provider *Provider) RemoveServiceAccount(ctx context.Context, namespace string, name string) error {
|
||||
err := provider.clientSet.CoreV1().ServiceAccounts(namespace).Delete(ctx, name, metav1.DeleteOptions{})
|
||||
return provider.handleRemovalError(err)
|
||||
}
|
||||
|
||||
func (provider *Provider) RemovePod(ctx context.Context, namespace string, podName string) error {
|
||||
err := provider.clientSet.CoreV1().Pods(namespace).Delete(ctx, podName, metav1.DeleteOptions{})
|
||||
return provider.handleRemovalError(err)
|
||||
}
|
||||
|
||||
func (provider *Provider) RemoveConfigMap(ctx context.Context, namespace string, configMapName string) error {
|
||||
err := provider.clientSet.CoreV1().ConfigMaps(namespace).Delete(ctx, configMapName, metav1.DeleteOptions{})
|
||||
return provider.handleRemovalError(err)
|
||||
}
|
||||
|
||||
func (provider *Provider) RemoveService(ctx context.Context, namespace string, serviceName string) error {
|
||||
err := provider.clientSet.CoreV1().Services(namespace).Delete(ctx, serviceName, metav1.DeleteOptions{})
|
||||
return provider.handleRemovalError(err)
|
||||
}
|
||||
|
||||
func (provider *Provider) RemoveDaemonSet(ctx context.Context, namespace string, daemonSetName string) error {
|
||||
err := provider.clientSet.AppsV1().DaemonSets(namespace).Delete(ctx, daemonSetName, metav1.DeleteOptions{})
|
||||
return provider.handleRemovalError(err)
|
||||
}
|
||||
|
||||
func (provider *Provider) handleRemovalError(err error) error {
|
||||
// Ignore NotFound - There is nothing to delete.
|
||||
// Ignore Forbidden - Assume that a user could not have created the resource in the first place.
|
||||
if k8serrors.IsNotFound(err) || k8serrors.IsForbidden(err) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (provider *Provider) ApplyWorkerDaemonSet(
|
||||
ctx context.Context,
|
||||
namespace string,
|
||||
daemonSetName string,
|
||||
podImage string,
|
||||
workerPodName string,
|
||||
nodeNames []string,
|
||||
serviceAccountName string,
|
||||
resources Resources,
|
||||
imagePullPolicy core.PullPolicy,
|
||||
serviceMesh bool,
|
||||
tls bool,
|
||||
debug bool,
|
||||
) error {
|
||||
log.Debug().
|
||||
Int("node-count", len(nodeNames)).
|
||||
Str("namespace", namespace).
|
||||
Str("daemonset-name", daemonSetName).
|
||||
Str("image", podImage).
|
||||
Str("pod", workerPodName).
|
||||
Msg("Applying worker DaemonSets.")
|
||||
|
||||
if len(nodeNames) == 0 {
|
||||
return fmt.Errorf("DaemonSet %s must target at least 1 pod", daemonSetName)
|
||||
}
|
||||
|
||||
command := []string{"./worker", "-i", "any", "-port", "8897"}
|
||||
|
||||
if debug {
|
||||
command = append(command, "-debug")
|
||||
}
|
||||
|
||||
if serviceMesh {
|
||||
command = append(command, "-servicemesh")
|
||||
}
|
||||
|
||||
if tls {
|
||||
command = append(command, "-tls")
|
||||
}
|
||||
|
||||
if serviceMesh || tls {
|
||||
command = append(command, "-procfs", procfsMountPath)
|
||||
}
|
||||
|
||||
workerContainer := applyconfcore.Container()
|
||||
workerContainer.WithName(workerPodName)
|
||||
workerContainer.WithImage(podImage)
|
||||
workerContainer.WithImagePullPolicy(imagePullPolicy)
|
||||
|
||||
caps := applyconfcore.Capabilities().WithDrop("ALL")
|
||||
|
||||
caps = caps.WithAdd("NET_RAW").WithAdd("NET_ADMIN") // to listen to traffic using libpcap
|
||||
|
||||
if serviceMesh || tls {
|
||||
caps = caps.WithAdd("SYS_ADMIN") // to read /proc/PID/net/ns + to install eBPF programs (kernel < 5.8)
|
||||
caps = caps.WithAdd("SYS_PTRACE") // to set netns to other process + to open libssl.so of other process
|
||||
|
||||
if serviceMesh {
|
||||
caps = caps.WithAdd("DAC_OVERRIDE") // to read /proc/PID/environ
|
||||
}
|
||||
|
||||
if tls {
|
||||
caps = caps.WithAdd("SYS_RESOURCE") // to change rlimits for eBPF
|
||||
}
|
||||
}
|
||||
|
||||
workerContainer.WithSecurityContext(applyconfcore.SecurityContext().WithCapabilities(caps))
|
||||
|
||||
workerContainer.WithCommand(command...)
|
||||
|
||||
if debug {
|
||||
workerContainer.WithEnv(
|
||||
applyconfcore.EnvVar().WithName("MEMORY_PROFILING_ENABLED").WithValue("true"),
|
||||
applyconfcore.EnvVar().WithName("MEMORY_PROFILING_INTERVAL_SECONDS").WithValue("10"),
|
||||
applyconfcore.EnvVar().WithName("MEMORY_USAGE_INTERVAL_MILLISECONDS").WithValue("500"),
|
||||
)
|
||||
}
|
||||
|
||||
cpuLimit, err := resource.ParseQuantity(resources.CpuLimit)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid cpu limit for %s container", workerPodName)
|
||||
}
|
||||
memLimit, err := resource.ParseQuantity(resources.MemoryLimit)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid memory limit for %s container", workerPodName)
|
||||
}
|
||||
cpuRequests, err := resource.ParseQuantity(resources.CpuRequests)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid cpu request for %s container", workerPodName)
|
||||
}
|
||||
memRequests, err := resource.ParseQuantity(resources.MemoryRequests)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid memory request for %s container", workerPodName)
|
||||
}
|
||||
workerResourceLimits := core.ResourceList{
|
||||
"cpu": cpuLimit,
|
||||
"memory": memLimit,
|
||||
}
|
||||
workerResourceRequests := core.ResourceList{
|
||||
"cpu": cpuRequests,
|
||||
"memory": memRequests,
|
||||
}
|
||||
workerResources := applyconfcore.ResourceRequirements().WithRequests(workerResourceRequests).WithLimits(workerResourceLimits)
|
||||
workerContainer.WithResources(workerResources)
|
||||
|
||||
matchFields := make([]*applyconfcore.NodeSelectorTermApplyConfiguration, 0)
|
||||
for _, nodeName := range nodeNames {
|
||||
nodeSelectorRequirement := applyconfcore.NodeSelectorRequirement()
|
||||
nodeSelectorRequirement.WithKey("metadata.name")
|
||||
nodeSelectorRequirement.WithOperator(core.NodeSelectorOpIn)
|
||||
nodeSelectorRequirement.WithValues(nodeName)
|
||||
|
||||
nodeSelectorTerm := applyconfcore.NodeSelectorTerm()
|
||||
nodeSelectorTerm.WithMatchFields(nodeSelectorRequirement)
|
||||
matchFields = append(matchFields, nodeSelectorTerm)
|
||||
}
|
||||
|
||||
nodeSelector := applyconfcore.NodeSelector()
|
||||
nodeSelector.WithNodeSelectorTerms(matchFields...)
|
||||
nodeAffinity := applyconfcore.NodeAffinity()
|
||||
nodeAffinity.WithRequiredDuringSchedulingIgnoredDuringExecution(nodeSelector)
|
||||
affinity := applyconfcore.Affinity()
|
||||
affinity.WithNodeAffinity(nodeAffinity)
|
||||
|
||||
noExecuteToleration := applyconfcore.Toleration()
|
||||
noExecuteToleration.WithOperator(core.TolerationOpExists)
|
||||
noExecuteToleration.WithEffect(core.TaintEffectNoExecute)
|
||||
noScheduleToleration := applyconfcore.Toleration()
|
||||
noScheduleToleration.WithOperator(core.TolerationOpExists)
|
||||
noScheduleToleration.WithEffect(core.TaintEffectNoSchedule)
|
||||
|
||||
// Host procfs is needed inside the container because we need access to
|
||||
// the network namespaces of processes on the machine.
|
||||
//
|
||||
procfsVolume := applyconfcore.Volume()
|
||||
procfsVolume.WithName(procfsVolumeName).WithHostPath(applyconfcore.HostPathVolumeSource().WithPath("/proc"))
|
||||
procfsVolumeMount := applyconfcore.VolumeMount().WithName(procfsVolumeName).WithMountPath(procfsMountPath).WithReadOnly(true)
|
||||
workerContainer.WithVolumeMounts(procfsVolumeMount)
|
||||
|
||||
// We need access to /sys in order to install certain eBPF tracepoints
|
||||
//
|
||||
sysfsVolume := applyconfcore.Volume()
|
||||
sysfsVolume.WithName(sysfsVolumeName).WithHostPath(applyconfcore.HostPathVolumeSource().WithPath("/sys"))
|
||||
sysfsVolumeMount := applyconfcore.VolumeMount().WithName(sysfsVolumeName).WithMountPath(sysfsMountPath).WithReadOnly(true)
|
||||
workerContainer.WithVolumeMounts(sysfsVolumeMount)
|
||||
|
||||
podSpec := applyconfcore.PodSpec()
|
||||
podSpec.WithHostNetwork(true)
|
||||
podSpec.WithDNSPolicy(core.DNSClusterFirstWithHostNet)
|
||||
podSpec.WithTerminationGracePeriodSeconds(0)
|
||||
if serviceAccountName != "" {
|
||||
podSpec.WithServiceAccountName(serviceAccountName)
|
||||
}
|
||||
podSpec.WithContainers(workerContainer)
|
||||
podSpec.WithAffinity(affinity)
|
||||
podSpec.WithTolerations(noExecuteToleration, noScheduleToleration)
|
||||
podSpec.WithVolumes(procfsVolume, sysfsVolume)
|
||||
|
||||
podTemplate := applyconfcore.PodTemplateSpec()
|
||||
podTemplate.WithLabels(map[string]string{
|
||||
"app": workerPodName,
|
||||
LabelManagedBy: provider.managedBy,
|
||||
LabelCreatedBy: provider.createdBy,
|
||||
})
|
||||
podTemplate.WithSpec(podSpec)
|
||||
|
||||
labelSelector := applyconfmeta.LabelSelector()
|
||||
labelSelector.WithMatchLabels(map[string]string{"app": workerPodName})
|
||||
|
||||
applyOptions := metav1.ApplyOptions{
|
||||
Force: true,
|
||||
FieldManager: fieldManagerName,
|
||||
}
|
||||
|
||||
daemonSet := applyconfapp.DaemonSet(daemonSetName, namespace)
|
||||
daemonSet.
|
||||
WithLabels(map[string]string{
|
||||
LabelManagedBy: provider.managedBy,
|
||||
LabelCreatedBy: provider.createdBy,
|
||||
}).
|
||||
WithSpec(applyconfapp.DaemonSetSpec().WithSelector(labelSelector).WithTemplate(podTemplate))
|
||||
|
||||
_, err = provider.clientSet.AppsV1().DaemonSets(namespace).Apply(ctx, daemonSet, applyOptions)
|
||||
return err
|
||||
}
|
||||
|
||||
func (provider *Provider) ResetWorkerDaemonSet(ctx context.Context, namespace string, daemonSetName string, podImage string, workerPodName string) error {
|
||||
workerContainer := applyconfcore.Container()
|
||||
workerContainer.WithName(workerPodName)
|
||||
workerContainer.WithImage(podImage)
|
||||
|
||||
nodeSelectorRequirement := applyconfcore.NodeSelectorRequirement()
|
||||
nodeSelectorRequirement.WithKey(fmt.Sprintf("%s-non-existing-label", misc.Program))
|
||||
nodeSelectorRequirement.WithOperator(core.NodeSelectorOpExists)
|
||||
nodeSelectorTerm := applyconfcore.NodeSelectorTerm()
|
||||
nodeSelectorTerm.WithMatchExpressions(nodeSelectorRequirement)
|
||||
nodeSelector := applyconfcore.NodeSelector()
|
||||
nodeSelector.WithNodeSelectorTerms(nodeSelectorTerm)
|
||||
nodeAffinity := applyconfcore.NodeAffinity()
|
||||
nodeAffinity.WithRequiredDuringSchedulingIgnoredDuringExecution(nodeSelector)
|
||||
affinity := applyconfcore.Affinity()
|
||||
affinity.WithNodeAffinity(nodeAffinity)
|
||||
|
||||
podSpec := applyconfcore.PodSpec()
|
||||
podSpec.WithContainers(workerContainer)
|
||||
podSpec.WithAffinity(affinity)
|
||||
|
||||
podTemplate := applyconfcore.PodTemplateSpec()
|
||||
podTemplate.WithLabels(map[string]string{
|
||||
"app": workerPodName,
|
||||
LabelManagedBy: provider.managedBy,
|
||||
LabelCreatedBy: provider.createdBy,
|
||||
})
|
||||
podTemplate.WithSpec(podSpec)
|
||||
|
||||
labelSelector := applyconfmeta.LabelSelector()
|
||||
labelSelector.WithMatchLabels(map[string]string{"app": workerPodName})
|
||||
|
||||
applyOptions := metav1.ApplyOptions{
|
||||
Force: true,
|
||||
FieldManager: fieldManagerName,
|
||||
}
|
||||
|
||||
daemonSet := applyconfapp.DaemonSet(daemonSetName, namespace)
|
||||
daemonSet.
|
||||
WithLabels(map[string]string{
|
||||
LabelManagedBy: provider.managedBy,
|
||||
LabelCreatedBy: provider.createdBy,
|
||||
}).
|
||||
WithSpec(applyconfapp.DaemonSetSpec().WithSelector(labelSelector).WithTemplate(podTemplate))
|
||||
|
||||
_, err := provider.clientSet.AppsV1().DaemonSets(namespace).Apply(ctx, daemonSet, applyOptions)
|
||||
return err
|
||||
}
|
||||
|
||||
func (provider *Provider) listPodsImpl(ctx context.Context, regex *regexp.Regexp, namespaces []string, listOptions metav1.ListOptions) ([]core.Pod, error) {
|
||||
var pods []core.Pod
|
||||
for _, namespace := range namespaces {
|
||||
@@ -913,10 +115,6 @@ func (provider *Provider) ListAllPodsMatchingRegex(ctx context.Context, regex *r
|
||||
return provider.listPodsImpl(ctx, regex, namespaces, metav1.ListOptions{})
|
||||
}
|
||||
|
||||
func (provider *Provider) GetPod(ctx context.Context, namespaces string, podName string) (*core.Pod, error) {
|
||||
return provider.clientSet.CoreV1().Pods(namespaces).Get(ctx, podName, metav1.GetOptions{})
|
||||
}
|
||||
|
||||
func (provider *Provider) ListAllRunningPodsMatchingRegex(ctx context.Context, regex *regexp.Regexp, namespaces []string) ([]core.Pod, error) {
|
||||
pods, err := provider.ListAllPodsMatchingRegex(ctx, regex, namespaces)
|
||||
if err != nil {
|
||||
@@ -932,8 +130,14 @@ func (provider *Provider) ListAllRunningPodsMatchingRegex(ctx context.Context, r
|
||||
return matchingPods, nil
|
||||
}
|
||||
|
||||
func (provider *Provider) ListPodsByAppLabel(ctx context.Context, namespaces string, labelName string) ([]core.Pod, error) {
|
||||
pods, err := provider.clientSet.CoreV1().Pods(namespaces).List(ctx, metav1.ListOptions{LabelSelector: fmt.Sprintf("app=%s", labelName)})
|
||||
func (provider *Provider) ListPodsByAppLabel(ctx context.Context, namespaces string, labels map[string]string) ([]core.Pod, error) {
|
||||
pods, err := provider.clientSet.CoreV1().Pods(namespaces).List(ctx, metav1.ListOptions{
|
||||
LabelSelector: metav1.FormatLabelSelector(
|
||||
&metav1.LabelSelector{
|
||||
MatchLabels: labels,
|
||||
},
|
||||
),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -941,16 +145,7 @@ func (provider *Provider) ListPodsByAppLabel(ctx context.Context, namespaces str
|
||||
return pods.Items, err
|
||||
}
|
||||
|
||||
func (provider *Provider) ListAllNamespaces(ctx context.Context) ([]core.Namespace, error) {
|
||||
namespaces, err := provider.clientSet.CoreV1().Namespaces().List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return namespaces.Items, err
|
||||
}
|
||||
|
||||
func (provider *Provider) GetPodLogs(ctx context.Context, namespace string, podName string, containerName string) (string, error) {
|
||||
func (provider *Provider) GetPodLogs(ctx context.Context, namespace string, podName string, containerName string, grep string) (string, error) {
|
||||
podLogOpts := core.PodLogOptions{Container: containerName}
|
||||
req := provider.clientSet.CoreV1().Pods(namespace).GetLogs(podName, &podLogOpts)
|
||||
podLogs, err := req.Stream(ctx)
|
||||
@@ -962,8 +157,26 @@ func (provider *Provider) GetPodLogs(ctx context.Context, namespace string, podN
|
||||
if _, err = io.Copy(buf, podLogs); err != nil {
|
||||
return "", fmt.Errorf("error copy information from podLogs to buf, ns: %s, pod: %s, %w", namespace, podName, err)
|
||||
}
|
||||
str := buf.String()
|
||||
return str, nil
|
||||
|
||||
if grep != "" {
|
||||
finder, err := reader.NewFinder(grep, true, true)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
read, err := reader.NewStdReader(bufio.NewReader(buf), []reader.Finder{finder})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
read.Run()
|
||||
result := read.Result()[0]
|
||||
|
||||
log.Info().Str("namespace", namespace).Str("pod", podName).Str("container", containerName).Int("lines", len(result.Lines)).Str("grep", grep).Send()
|
||||
return strings.Join(result.MatchString, "\n"), nil
|
||||
} else {
|
||||
log.Info().Str("namespace", namespace).Str("pod", podName).Str("container", containerName).Send()
|
||||
return buf.String(), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (provider *Provider) GetNamespaceEvents(ctx context.Context, namespace string) (string, error) {
|
||||
@@ -975,41 +188,6 @@ func (provider *Provider) GetNamespaceEvents(ctx context.Context, namespace stri
|
||||
return eventList.String(), nil
|
||||
}
|
||||
|
||||
func (provider *Provider) ListManagedServiceAccounts(ctx context.Context, namespace string) (*core.ServiceAccountList, error) {
|
||||
listOptions := metav1.ListOptions{
|
||||
LabelSelector: fmt.Sprintf("%s=%s", LabelManagedBy, provider.managedBy),
|
||||
}
|
||||
return provider.clientSet.CoreV1().ServiceAccounts(namespace).List(ctx, listOptions)
|
||||
}
|
||||
|
||||
func (provider *Provider) ListManagedClusterRoles(ctx context.Context) (*rbac.ClusterRoleList, error) {
|
||||
listOptions := metav1.ListOptions{
|
||||
LabelSelector: fmt.Sprintf("%s=%s", LabelManagedBy, provider.managedBy),
|
||||
}
|
||||
return provider.clientSet.RbacV1().ClusterRoles().List(ctx, listOptions)
|
||||
}
|
||||
|
||||
func (provider *Provider) ListManagedClusterRoleBindings(ctx context.Context) (*rbac.ClusterRoleBindingList, error) {
|
||||
listOptions := metav1.ListOptions{
|
||||
LabelSelector: fmt.Sprintf("%s=%s", LabelManagedBy, provider.managedBy),
|
||||
}
|
||||
return provider.clientSet.RbacV1().ClusterRoleBindings().List(ctx, listOptions)
|
||||
}
|
||||
|
||||
func (provider *Provider) ListManagedRoles(ctx context.Context, namespace string) (*rbac.RoleList, error) {
|
||||
listOptions := metav1.ListOptions{
|
||||
LabelSelector: fmt.Sprintf("%s=%s", LabelManagedBy, provider.managedBy),
|
||||
}
|
||||
return provider.clientSet.RbacV1().Roles(namespace).List(ctx, listOptions)
|
||||
}
|
||||
|
||||
func (provider *Provider) ListManagedRoleBindings(ctx context.Context, namespace string) (*rbac.RoleBindingList, error) {
|
||||
listOptions := metav1.ListOptions{
|
||||
LabelSelector: fmt.Sprintf("%s=%s", LabelManagedBy, provider.managedBy),
|
||||
}
|
||||
return provider.clientSet.RbacV1().RoleBindings(namespace).List(ctx, listOptions)
|
||||
}
|
||||
|
||||
// ValidateNotProxy We added this after a customer tried to run kubeshark from lens, which used len's kube config, which have cluster server configuration, which points to len's local proxy.
|
||||
// The workaround was to use the user's local default kube config.
|
||||
// For now - we are blocking the option to run kubeshark through a proxy to k8s server
|
||||
@@ -1049,6 +227,30 @@ func (provider *Provider) GetKubernetesVersion() (*semver.SemVersion, error) {
|
||||
return &serverVersionSemVer, nil
|
||||
}
|
||||
|
||||
func (provider *Provider) GetNamespaces() (namespaces []string) {
|
||||
if len(config.Config.Tap.Namespaces) > 0 {
|
||||
namespaces = utils.Unique(config.Config.Tap.Namespaces)
|
||||
} else {
|
||||
namespaceList, err := provider.clientSet.CoreV1().Namespaces().List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
return
|
||||
}
|
||||
|
||||
for _, ns := range namespaceList.Items {
|
||||
namespaces = append(namespaces, ns.Name)
|
||||
}
|
||||
}
|
||||
|
||||
namespaces = utils.Diff(namespaces, config.Config.Tap.ExcludedNamespaces)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (provider *Provider) GetClientSet() *kubernetes.Clientset {
|
||||
return provider.clientSet
|
||||
}
|
||||
|
||||
func getClientSet(config *rest.Config) (*kubernetes.Clientset, error) {
|
||||
clientSet, err := kubernetes.NewForConfig(config)
|
||||
if err != nil {
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/kubeshark/kubeshark/config"
|
||||
"github.com/rs/zerolog/log"
|
||||
"k8s.io/apimachinery/pkg/util/httpstream"
|
||||
"k8s.io/client-go/tools/portforward"
|
||||
@@ -21,8 +22,9 @@ import (
|
||||
const k8sProxyApiPrefix = "/"
|
||||
const selfServicePort = 80
|
||||
|
||||
func StartProxy(kubernetesProvider *Provider, proxyHost string, srcPort uint16, selfNamespace string, selfServiceName string, cancel context.CancelFunc) (*http.Server, error) {
|
||||
func StartProxy(kubernetesProvider *Provider, proxyHost string, srcPort uint16, selfNamespace string, selfServiceName string) (*http.Server, error) {
|
||||
log.Info().
|
||||
Str("proxy-host", proxyHost).
|
||||
Str("namespace", selfNamespace).
|
||||
Str("service", selfServiceName).
|
||||
Int("src-port", int(srcPort)).
|
||||
@@ -55,7 +57,7 @@ func StartProxy(kubernetesProvider *Provider, proxyHost string, srcPort uint16,
|
||||
go func() {
|
||||
if err := server.Serve(l); err != nil && err != http.ErrServerClosed {
|
||||
log.Error().Err(err).Msg("While creating proxy!")
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -66,8 +68,12 @@ func getSelfHubProxiedHostAndPath(selfNamespace string, selfServiceName string)
|
||||
return fmt.Sprintf("/api/v1/namespaces/%s/services/%s:%d/proxy", selfNamespace, selfServiceName, selfServicePort)
|
||||
}
|
||||
|
||||
func GetLocalhostOnPort(port uint16) string {
|
||||
return fmt.Sprintf("http://localhost:%d", port)
|
||||
func GetProxyOnPort(port uint16) string {
|
||||
return fmt.Sprintf("http://%s:%d", config.Config.Tap.Proxy.Host, port)
|
||||
}
|
||||
|
||||
func GetHubUrl() string {
|
||||
return fmt.Sprintf("%s/api", GetProxyOnPort(config.Config.Tap.Proxy.Front.Port))
|
||||
}
|
||||
|
||||
func getRerouteHttpHandlerSelfAPI(proxyHandler http.Handler, selfNamespace string, selfServiceName string) http.Handler {
|
||||
@@ -99,8 +105,8 @@ func getRerouteHttpHandlerSelfStatic(proxyHandler http.Handler, selfNamespace st
|
||||
})
|
||||
}
|
||||
|
||||
func NewPortForward(kubernetesProvider *Provider, namespace string, podRegex *regexp.Regexp, srcPort uint16, dstPort uint16, ctx context.Context, cancel context.CancelFunc) (*portforward.PortForwarder, error) {
|
||||
pods, err := kubernetesProvider.ListAllRunningPodsMatchingRegex(ctx, podRegex, []string{namespace})
|
||||
func NewPortForward(kubernetesProvider *Provider, namespace string, podRegex *regexp.Regexp, srcPort uint16, dstPort uint16, ctx context.Context) (*portforward.PortForwarder, error) {
|
||||
pods, err := kubernetesProvider.ListPodsByAppLabel(ctx, namespace, map[string]string{AppLabelKey: "front"})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if len(pods) == 0 {
|
||||
@@ -132,7 +138,8 @@ func NewPortForward(kubernetesProvider *Provider, namespace string, podRegex *re
|
||||
go func() {
|
||||
if err = forwarder.ForwardPorts(); err != nil {
|
||||
log.Error().Err(err).Msg("While Kubernetes port-forwarding!")
|
||||
cancel()
|
||||
log.Info().Str("command", fmt.Sprintf("kubectl port-forward -n %s service/kubeshark-front 8899:80", config.Config.Tap.Release.Namespace)).Msg("Please try running:")
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
package kubernetes
|
||||
|
||||
type Resources struct {
|
||||
CpuLimit string `yaml:"cpu-limit" default:"750m"`
|
||||
MemoryLimit string `yaml:"memory-limit" default:"1Gi"`
|
||||
CpuRequests string `yaml:"cpu-requests" default:"50m"`
|
||||
MemoryRequests string `yaml:"memory-requests" default:"50Mi"`
|
||||
}
|
||||
@@ -1,95 +0,0 @@
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
|
||||
"github.com/kubeshark/base/pkg/models"
|
||||
core "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func GetNodeHostToTargettedPodsMap(targettedPods []core.Pod) models.NodeToPodsMap {
|
||||
nodeToTargettedPodsMap := make(models.NodeToPodsMap)
|
||||
for _, pod := range targettedPods {
|
||||
minimizedPod := getMinimizedPod(pod)
|
||||
|
||||
existingList := nodeToTargettedPodsMap[pod.Spec.NodeName]
|
||||
if existingList == nil {
|
||||
nodeToTargettedPodsMap[pod.Spec.NodeName] = []core.Pod{minimizedPod}
|
||||
} else {
|
||||
nodeToTargettedPodsMap[pod.Spec.NodeName] = append(nodeToTargettedPodsMap[pod.Spec.NodeName], minimizedPod)
|
||||
}
|
||||
}
|
||||
return nodeToTargettedPodsMap
|
||||
}
|
||||
|
||||
func getMinimizedPod(fullPod core.Pod) core.Pod {
|
||||
return core.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fullPod.Name,
|
||||
Namespace: fullPod.Namespace,
|
||||
},
|
||||
Status: core.PodStatus{
|
||||
PodIP: fullPod.Status.PodIP,
|
||||
ContainerStatuses: getMinimizedContainerStatuses(fullPod),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getMinimizedContainerStatuses(fullPod core.Pod) []core.ContainerStatus {
|
||||
result := make([]core.ContainerStatus, len(fullPod.Status.ContainerStatuses))
|
||||
|
||||
for i, container := range fullPod.Status.ContainerStatuses {
|
||||
result[i] = core.ContainerStatus{
|
||||
ContainerID: container.ContainerID,
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func excludeSelfPods(pods []core.Pod) []core.Pod {
|
||||
selfPrefixRegex := regexp.MustCompile("^" + SelfResourcesPrefix)
|
||||
|
||||
nonSelfPods := make([]core.Pod, 0)
|
||||
for _, pod := range pods {
|
||||
if !selfPrefixRegex.MatchString(pod.Name) {
|
||||
nonSelfPods = append(nonSelfPods, pod)
|
||||
}
|
||||
}
|
||||
|
||||
return nonSelfPods
|
||||
}
|
||||
|
||||
func getPodArrayDiff(oldPods []core.Pod, newPods []core.Pod) (added []core.Pod, removed []core.Pod) {
|
||||
added = getMissingPods(newPods, oldPods)
|
||||
removed = getMissingPods(oldPods, newPods)
|
||||
|
||||
return added, removed
|
||||
}
|
||||
|
||||
//returns pods present in pods1 array and missing in pods2 array
|
||||
func getMissingPods(pods1 []core.Pod, pods2 []core.Pod) []core.Pod {
|
||||
missingPods := make([]core.Pod, 0)
|
||||
for _, pod1 := range pods1 {
|
||||
var found = false
|
||||
for _, pod2 := range pods2 {
|
||||
if pod1.UID == pod2.UID {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
missingPods = append(missingPods, pod1)
|
||||
}
|
||||
}
|
||||
return missingPods
|
||||
}
|
||||
|
||||
func GetPodInfosForPods(pods []core.Pod) []*models.PodInfo {
|
||||
podInfos := make([]*models.PodInfo, 0)
|
||||
for _, pod := range pods {
|
||||
podInfos = append(podInfos, &models.PodInfo{Name: pod.Name, Namespace: pod.Namespace, NodeName: pod.Spec.NodeName})
|
||||
}
|
||||
return podInfos
|
||||
}
|
||||
@@ -1,389 +0,0 @@
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
"github.com/kubeshark/base/pkg/models"
|
||||
"github.com/kubeshark/kubeshark/debounce"
|
||||
"github.com/kubeshark/kubeshark/docker"
|
||||
"github.com/kubeshark/kubeshark/misc"
|
||||
"github.com/kubeshark/kubeshark/utils"
|
||||
"github.com/rs/zerolog/log"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
const updateWorkersDelay = 5 * time.Second
|
||||
|
||||
type TargettedPodChangeEvent struct {
|
||||
Added []v1.Pod
|
||||
Removed []v1.Pod
|
||||
}
|
||||
|
||||
// WorkerSyncer uses a k8s pod watch to update Worker daemonsets when targeted pods are removed or created
|
||||
type WorkerSyncer struct {
|
||||
startTime time.Time
|
||||
context context.Context
|
||||
CurrentlyTargettedPods []v1.Pod
|
||||
config WorkerSyncerConfig
|
||||
kubernetesProvider *Provider
|
||||
TapPodChangesOut chan TargettedPodChangeEvent
|
||||
WorkerPodsChanges chan *v1.Pod
|
||||
ErrorOut chan K8sTapManagerError
|
||||
nodeToTargettedPodMap models.NodeToPodsMap
|
||||
targettedNodes []string
|
||||
}
|
||||
|
||||
type WorkerSyncerConfig struct {
|
||||
TargetNamespaces []string
|
||||
PodFilterRegex regexp.Regexp
|
||||
SelfNamespace string
|
||||
WorkerResources Resources
|
||||
ImagePullPolicy v1.PullPolicy
|
||||
SelfServiceAccountExists bool
|
||||
ServiceMesh bool
|
||||
Tls bool
|
||||
Debug bool
|
||||
}
|
||||
|
||||
func CreateAndStartWorkerSyncer(ctx context.Context, kubernetesProvider *Provider, config WorkerSyncerConfig, startTime time.Time) (*WorkerSyncer, error) {
|
||||
syncer := &WorkerSyncer{
|
||||
startTime: startTime.Truncate(time.Second), // Round down because k8s CreationTimestamp is given in 1 sec resolution.
|
||||
context: ctx,
|
||||
CurrentlyTargettedPods: make([]v1.Pod, 0),
|
||||
config: config,
|
||||
kubernetesProvider: kubernetesProvider,
|
||||
TapPodChangesOut: make(chan TargettedPodChangeEvent, 100),
|
||||
WorkerPodsChanges: make(chan *v1.Pod, 100),
|
||||
ErrorOut: make(chan K8sTapManagerError, 100),
|
||||
}
|
||||
|
||||
if err, _ := syncer.updateCurrentlyTargettedPods(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := syncer.updateWorkers(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
go syncer.watchPodsForTargetting()
|
||||
go syncer.watchWorkerEvents()
|
||||
go syncer.watchWorkerPods()
|
||||
return syncer, nil
|
||||
}
|
||||
|
||||
func (workerSyncer *WorkerSyncer) watchWorkerPods() {
|
||||
selfResourceRegex := regexp.MustCompile(fmt.Sprintf("^%s.*", WorkerPodName))
|
||||
podWatchHelper := NewPodWatchHelper(workerSyncer.kubernetesProvider, selfResourceRegex)
|
||||
eventChan, errorChan := FilteredWatch(workerSyncer.context, podWatchHelper, []string{workerSyncer.config.SelfNamespace}, podWatchHelper)
|
||||
|
||||
for {
|
||||
select {
|
||||
case wEvent, ok := <-eventChan:
|
||||
if !ok {
|
||||
eventChan = nil
|
||||
continue
|
||||
}
|
||||
|
||||
pod, err := wEvent.ToPod()
|
||||
if err != nil {
|
||||
log.Error().Str("pod", WorkerPodName).Err(err).Msg(fmt.Sprintf("While parsing %s resource!", misc.Software))
|
||||
continue
|
||||
}
|
||||
|
||||
log.Debug().
|
||||
Str("pod", pod.Name).
|
||||
Str("node", pod.Spec.NodeName).
|
||||
Interface("phase", pod.Status.Phase).
|
||||
Msg("Watching pod events...")
|
||||
if pod.Spec.NodeName != "" {
|
||||
workerSyncer.WorkerPodsChanges <- pod
|
||||
}
|
||||
|
||||
case err, ok := <-errorChan:
|
||||
if !ok {
|
||||
errorChan = nil
|
||||
continue
|
||||
}
|
||||
log.Error().Str("pod", WorkerPodName).Err(err).Msg("While watching pod!")
|
||||
|
||||
case <-workerSyncer.context.Done():
|
||||
log.Debug().
|
||||
Str("pod", WorkerPodName).
|
||||
Msg("Watching pod, context done.")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (workerSyncer *WorkerSyncer) watchWorkerEvents() {
|
||||
selfResourceRegex := regexp.MustCompile(fmt.Sprintf("^%s.*", WorkerPodName))
|
||||
eventWatchHelper := NewEventWatchHelper(workerSyncer.kubernetesProvider, selfResourceRegex, "pod")
|
||||
eventChan, errorChan := FilteredWatch(workerSyncer.context, eventWatchHelper, []string{workerSyncer.config.SelfNamespace}, eventWatchHelper)
|
||||
|
||||
for {
|
||||
select {
|
||||
case wEvent, ok := <-eventChan:
|
||||
if !ok {
|
||||
eventChan = nil
|
||||
continue
|
||||
}
|
||||
|
||||
event, err := wEvent.ToEvent()
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("pod", WorkerPodName).
|
||||
Err(err).
|
||||
Msg("Parsing resource event.")
|
||||
continue
|
||||
}
|
||||
|
||||
log.Debug().
|
||||
Str("pod", WorkerPodName).
|
||||
Str("event", event.Name).
|
||||
Time("time", event.CreationTimestamp.Time).
|
||||
Str("name", event.Regarding.Name).
|
||||
Str("kind", event.Regarding.Kind).
|
||||
Str("reason", event.Reason).
|
||||
Str("note", event.Note).
|
||||
Msg("Watching events.")
|
||||
|
||||
pod, err1 := workerSyncer.kubernetesProvider.GetPod(workerSyncer.context, workerSyncer.config.SelfNamespace, event.Regarding.Name)
|
||||
if err1 != nil {
|
||||
log.Error().Str("name", event.Regarding.Name).Msg("Couldn't get pod")
|
||||
continue
|
||||
}
|
||||
|
||||
workerSyncer.WorkerPodsChanges <- pod
|
||||
|
||||
case err, ok := <-errorChan:
|
||||
if !ok {
|
||||
errorChan = nil
|
||||
continue
|
||||
}
|
||||
|
||||
log.Error().
|
||||
Str("pod", WorkerPodName).
|
||||
Err(err).
|
||||
Msg("While watching events.")
|
||||
|
||||
case <-workerSyncer.context.Done():
|
||||
log.Debug().
|
||||
Str("pod", WorkerPodName).
|
||||
Msg("Watching pod events, context done.")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (workerSyncer *WorkerSyncer) watchPodsForTargetting() {
|
||||
podWatchHelper := NewPodWatchHelper(workerSyncer.kubernetesProvider, &workerSyncer.config.PodFilterRegex)
|
||||
eventChan, errorChan := FilteredWatch(workerSyncer.context, podWatchHelper, workerSyncer.config.TargetNamespaces, podWatchHelper)
|
||||
|
||||
handleChangeInPods := func() {
|
||||
err, changeFound := workerSyncer.updateCurrentlyTargettedPods()
|
||||
if err != nil {
|
||||
workerSyncer.ErrorOut <- K8sTapManagerError{
|
||||
OriginalError: err,
|
||||
TapManagerReason: TapManagerPodListError,
|
||||
}
|
||||
}
|
||||
|
||||
if !changeFound {
|
||||
log.Debug().Msg("Nothing changed. Updating workers is not needed.")
|
||||
return
|
||||
}
|
||||
if err := workerSyncer.updateWorkers(); err != nil {
|
||||
workerSyncer.ErrorOut <- K8sTapManagerError{
|
||||
OriginalError: err,
|
||||
TapManagerReason: TapManagerWorkerUpdateError,
|
||||
}
|
||||
}
|
||||
}
|
||||
restartWorkersDebouncer := debounce.NewDebouncer(updateWorkersDelay, handleChangeInPods)
|
||||
|
||||
for {
|
||||
select {
|
||||
case wEvent, ok := <-eventChan:
|
||||
if !ok {
|
||||
eventChan = nil
|
||||
continue
|
||||
}
|
||||
|
||||
pod, err := wEvent.ToPod()
|
||||
if err != nil {
|
||||
workerSyncer.handleErrorInWatchLoop(err, restartWorkersDebouncer)
|
||||
continue
|
||||
}
|
||||
|
||||
switch wEvent.Type {
|
||||
case EventAdded:
|
||||
log.Debug().
|
||||
Str("pod", pod.Name).
|
||||
Str("namespace", pod.Namespace).
|
||||
Msg("Added matching pod.")
|
||||
if err := restartWorkersDebouncer.SetOn(); err != nil {
|
||||
log.Error().
|
||||
Str("pod", pod.Name).
|
||||
Str("namespace", pod.Namespace).
|
||||
Err(err).
|
||||
Msg("While restarting workers!")
|
||||
}
|
||||
case EventDeleted:
|
||||
log.Debug().
|
||||
Str("pod", pod.Name).
|
||||
Str("namespace", pod.Namespace).
|
||||
Msg("Removed matching pod.")
|
||||
if err := restartWorkersDebouncer.SetOn(); err != nil {
|
||||
log.Error().
|
||||
Str("pod", pod.Name).
|
||||
Str("namespace", pod.Namespace).
|
||||
Err(err).
|
||||
Msg("While restarting workers!")
|
||||
}
|
||||
case EventModified:
|
||||
log.Debug().
|
||||
Str("pod", pod.Name).
|
||||
Str("namespace", pod.Namespace).
|
||||
Str("ip", pod.Status.PodIP).
|
||||
Interface("phase", pod.Status.Phase).
|
||||
Msg("Modified matching pod.")
|
||||
|
||||
// Act only if the modified pod has already obtained an IP address.
|
||||
// After filtering for IPs, on a normal pod restart this includes the following events:
|
||||
// - Pod deletion
|
||||
// - Pod reaches start state
|
||||
// - Pod reaches ready state
|
||||
// Ready/unready transitions might also trigger this event.
|
||||
if pod.Status.PodIP != "" {
|
||||
if err := restartWorkersDebouncer.SetOn(); err != nil {
|
||||
log.Error().
|
||||
Str("pod", pod.Name).
|
||||
Str("namespace", pod.Namespace).
|
||||
Err(err).
|
||||
Msg("While restarting workers!")
|
||||
}
|
||||
}
|
||||
case EventBookmark:
|
||||
break
|
||||
case EventError:
|
||||
break
|
||||
}
|
||||
case err, ok := <-errorChan:
|
||||
if !ok {
|
||||
errorChan = nil
|
||||
continue
|
||||
}
|
||||
|
||||
workerSyncer.handleErrorInWatchLoop(err, restartWorkersDebouncer)
|
||||
continue
|
||||
|
||||
case <-workerSyncer.context.Done():
|
||||
log.Debug().Msg("Watching pods, context done. Stopping \"restart workers debouncer\"")
|
||||
restartWorkersDebouncer.Cancel()
|
||||
// TODO: Does this also perform cleanup?
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (workerSyncer *WorkerSyncer) handleErrorInWatchLoop(err error, restartWorkersDebouncer *debounce.Debouncer) {
|
||||
log.Error().Err(err).Msg("While watching pods, got an error! Stopping \"restart workers debouncer\"")
|
||||
restartWorkersDebouncer.Cancel()
|
||||
workerSyncer.ErrorOut <- K8sTapManagerError{
|
||||
OriginalError: err,
|
||||
TapManagerReason: TapManagerPodWatchError,
|
||||
}
|
||||
}
|
||||
|
||||
func (workerSyncer *WorkerSyncer) updateCurrentlyTargettedPods() (err error, changesFound bool) {
|
||||
if matchingPods, err := workerSyncer.kubernetesProvider.ListAllRunningPodsMatchingRegex(workerSyncer.context, &workerSyncer.config.PodFilterRegex, workerSyncer.config.TargetNamespaces); err != nil {
|
||||
return err, false
|
||||
} else {
|
||||
podsToTarget := excludeSelfPods(matchingPods)
|
||||
addedPods, removedPods := getPodArrayDiff(workerSyncer.CurrentlyTargettedPods, podsToTarget)
|
||||
for _, addedPod := range addedPods {
|
||||
log.Info().Str("pod", addedPod.Name).Msg("Currently targetting:")
|
||||
}
|
||||
for _, removedPod := range removedPods {
|
||||
log.Info().Str("pod", removedPod.Name).Msg("Pod is no longer running. Targetting is stopped.")
|
||||
}
|
||||
if len(addedPods) > 0 || len(removedPods) > 0 {
|
||||
workerSyncer.CurrentlyTargettedPods = podsToTarget
|
||||
workerSyncer.nodeToTargettedPodMap = GetNodeHostToTargettedPodsMap(workerSyncer.CurrentlyTargettedPods)
|
||||
workerSyncer.TapPodChangesOut <- TargettedPodChangeEvent{
|
||||
Added: addedPods,
|
||||
Removed: removedPods,
|
||||
}
|
||||
return nil, true
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
}
|
||||
|
||||
func (workerSyncer *WorkerSyncer) updateWorkers() error {
|
||||
nodesToTarget := make([]string, len(workerSyncer.nodeToTargettedPodMap))
|
||||
i := 0
|
||||
for node := range workerSyncer.nodeToTargettedPodMap {
|
||||
nodesToTarget[i] = node
|
||||
i++
|
||||
}
|
||||
|
||||
if utils.EqualStringSlices(nodesToTarget, workerSyncer.targettedNodes) {
|
||||
log.Debug().Msg("Skipping apply, DaemonSet is up to date")
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Debug().Strs("nodes", nodesToTarget).Msg("Updating DaemonSet to run on nodes.")
|
||||
|
||||
image := docker.GetWorkerImage()
|
||||
|
||||
if len(workerSyncer.nodeToTargettedPodMap) > 0 {
|
||||
var serviceAccountName string
|
||||
if workerSyncer.config.SelfServiceAccountExists {
|
||||
serviceAccountName = ServiceAccountName
|
||||
} else {
|
||||
serviceAccountName = ""
|
||||
}
|
||||
|
||||
nodeNames := make([]string, 0, len(workerSyncer.nodeToTargettedPodMap))
|
||||
for nodeName := range workerSyncer.nodeToTargettedPodMap {
|
||||
nodeNames = append(nodeNames, nodeName)
|
||||
}
|
||||
|
||||
if err := workerSyncer.kubernetesProvider.ApplyWorkerDaemonSet(
|
||||
workerSyncer.context,
|
||||
workerSyncer.config.SelfNamespace,
|
||||
WorkerDaemonSetName,
|
||||
image,
|
||||
WorkerPodName,
|
||||
nodeNames,
|
||||
serviceAccountName,
|
||||
workerSyncer.config.WorkerResources,
|
||||
workerSyncer.config.ImagePullPolicy,
|
||||
workerSyncer.config.ServiceMesh,
|
||||
workerSyncer.config.Tls,
|
||||
workerSyncer.config.Debug); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debug().Int("worker-count", len(workerSyncer.nodeToTargettedPodMap)).Msg("Successfully created workers.")
|
||||
} else {
|
||||
if err := workerSyncer.kubernetesProvider.ResetWorkerDaemonSet(
|
||||
workerSyncer.context,
|
||||
workerSyncer.config.SelfNamespace,
|
||||
WorkerDaemonSetName,
|
||||
image,
|
||||
WorkerPodName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debug().Msg("Successfully resetted Worker DaemonSet")
|
||||
}
|
||||
|
||||
workerSyncer.targettedNodes = nodesToTarget
|
||||
|
||||
return nil
|
||||
}
|
||||
34
manifests/README.md
Normal file
34
manifests/README.md
Normal file
@@ -0,0 +1,34 @@
|
||||
# Manifests
|
||||
|
||||
## Apply
|
||||
|
||||
Clone the repo:
|
||||
|
||||
```shell
|
||||
git clone git@github.com:kubeshark/kubeshark.git --depth 1
|
||||
cd kubeshark/manifests
|
||||
```
|
||||
|
||||
To apply the manifests, run:
|
||||
|
||||
```shell
|
||||
kubectl apply -f .
|
||||
```
|
||||
|
||||
To clean up:
|
||||
|
||||
```shell
|
||||
kubectl delete namespace kubeshark
|
||||
kubectl delete clusterrolebinding kubeshark-cluster-role-binding
|
||||
kubectl delete clusterrole kubeshark-cluster-role
|
||||
```
|
||||
|
||||
## Accessing
|
||||
|
||||
Do the port forwarding:
|
||||
|
||||
```shell
|
||||
kubectl port-forward service/kubeshark-front 8899:80
|
||||
```
|
||||
|
||||
Visit [localhost:8899](http://localhost:8899)
|
||||
891
manifests/complete.yaml
Normal file
891
manifests/complete.yaml
Normal file
@@ -0,0 +1,891 @@
|
||||
---
|
||||
# Source: kubeshark/templates/16-network-policies.yaml
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.3.90
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.3.90"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
name: kubeshark-hub-network-policy
|
||||
namespace: default
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
app.kubeshark.co/app: hub
|
||||
policyTypes:
|
||||
- Ingress
|
||||
- Egress
|
||||
ingress:
|
||||
- ports:
|
||||
- protocol: TCP
|
||||
port: 8080
|
||||
egress:
|
||||
- {}
|
||||
---
|
||||
# Source: kubeshark/templates/16-network-policies.yaml
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.3.90
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.3.90"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
name: kubeshark-front-network-policy
|
||||
namespace: default
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
app.kubeshark.co/app: front
|
||||
policyTypes:
|
||||
- Ingress
|
||||
- Egress
|
||||
ingress:
|
||||
- ports:
|
||||
- protocol: TCP
|
||||
port: 8080
|
||||
egress:
|
||||
- {}
|
||||
---
|
||||
# Source: kubeshark/templates/16-network-policies.yaml
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.3.90
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.3.90"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
name: kubeshark-worker-network-policy
|
||||
namespace: default
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
app.kubeshark.co/app: worker
|
||||
policyTypes:
|
||||
- Ingress
|
||||
- Egress
|
||||
ingress:
|
||||
- ports:
|
||||
- protocol: TCP
|
||||
port: 30001
|
||||
- protocol: TCP
|
||||
port: 49100
|
||||
egress:
|
||||
- {}
|
||||
---
|
||||
# Source: kubeshark/templates/01-service-account.yaml
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.3.90
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.3.90"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
name: kubeshark-service-account
|
||||
namespace: default
|
||||
---
|
||||
# Source: kubeshark/templates/13-secret.yaml
|
||||
kind: Secret
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: kubeshark-secret
|
||||
namespace: default
|
||||
labels:
|
||||
app.kubeshark.co/app: hub
|
||||
helm.sh/chart: kubeshark-52.3.90
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.3.90"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
stringData:
|
||||
LICENSE: ''
|
||||
SCRIPTING_ENV: '{}'
|
||||
---
|
||||
# Source: kubeshark/templates/13-secret.yaml
|
||||
kind: Secret
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: kubeshark-saml-x509-crt-secret
|
||||
namespace: default
|
||||
labels:
|
||||
app.kubeshark.co/app: hub
|
||||
helm.sh/chart: kubeshark-52.3.90
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.3.90"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
stringData:
|
||||
AUTH_SAML_X509_CRT: |
|
||||
---
|
||||
# Source: kubeshark/templates/13-secret.yaml
|
||||
kind: Secret
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: kubeshark-saml-x509-key-secret
|
||||
namespace: default
|
||||
labels:
|
||||
app.kubeshark.co/app: hub
|
||||
helm.sh/chart: kubeshark-52.3.90
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.3.90"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
stringData:
|
||||
AUTH_SAML_X509_KEY: |
|
||||
---
|
||||
# Source: kubeshark/templates/11-nginx-config-map.yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: kubeshark-nginx-config-map
|
||||
namespace: default
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.3.90
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.3.90"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
data:
|
||||
default.conf: |
|
||||
server {
|
||||
listen 8080;
|
||||
listen [::]:8080;
|
||||
access_log /dev/stdout;
|
||||
error_log /dev/stdout;
|
||||
|
||||
client_body_buffer_size 64k;
|
||||
client_header_buffer_size 32k;
|
||||
large_client_header_buffers 8 64k;
|
||||
|
||||
location /api {
|
||||
rewrite ^/api(.*)$ $1 break;
|
||||
proxy_pass http://kubeshark-hub;
|
||||
proxy_set_header X-Forwarded-For $remote_addr;
|
||||
proxy_set_header Host $http_host;
|
||||
proxy_set_header Upgrade websocket;
|
||||
proxy_set_header Connection Upgrade;
|
||||
proxy_set_header Authorization $http_authorization;
|
||||
proxy_pass_header Authorization;
|
||||
proxy_connect_timeout 4s;
|
||||
proxy_read_timeout 120s;
|
||||
proxy_send_timeout 12s;
|
||||
proxy_pass_request_headers on;
|
||||
}
|
||||
|
||||
location /saml {
|
||||
rewrite ^/saml(.*)$ /saml$1 break;
|
||||
proxy_pass http://kubeshark-hub;
|
||||
proxy_set_header X-Forwarded-For $remote_addr;
|
||||
proxy_set_header Host $http_host;
|
||||
proxy_connect_timeout 4s;
|
||||
proxy_read_timeout 120s;
|
||||
proxy_send_timeout 12s;
|
||||
proxy_pass_request_headers on;
|
||||
}
|
||||
|
||||
location / {
|
||||
root /usr/share/nginx/html;
|
||||
index index.html index.htm;
|
||||
try_files $uri $uri/ /index.html;
|
||||
expires -1;
|
||||
add_header Cache-Control no-cache;
|
||||
}
|
||||
error_page 500 502 503 504 /50x.html;
|
||||
location = /50x.html {
|
||||
root /usr/share/nginx/html;
|
||||
}
|
||||
}
|
||||
---
|
||||
# Source: kubeshark/templates/12-config-map.yaml
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: kubeshark-config-map
|
||||
namespace: default
|
||||
labels:
|
||||
app.kubeshark.co/app: hub
|
||||
helm.sh/chart: kubeshark-52.3.90
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.3.90"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
data:
|
||||
POD_REGEX: '.*'
|
||||
NAMESPACES: ''
|
||||
EXCLUDED_NAMESPACES: ''
|
||||
BPF_OVERRIDE: ''
|
||||
STOPPED: 'false'
|
||||
SCRIPTING_SCRIPTS: '{}'
|
||||
SCRIPTING_ACTIVE_SCRIPTS: ''
|
||||
INGRESS_ENABLED: 'false'
|
||||
INGRESS_HOST: 'ks.svc.cluster.local'
|
||||
PROXY_FRONT_PORT: '8899'
|
||||
AUTH_ENABLED: 'true'
|
||||
AUTH_TYPE: 'oidc'
|
||||
AUTH_SAML_IDP_METADATA_URL: ''
|
||||
AUTH_SAML_ROLE_ATTRIBUTE: 'role'
|
||||
AUTH_SAML_ROLES: '{"admin":{"canDownloadPCAP":true,"canStopTrafficCapturing":true,"canUpdateTargetedPods":true,"canUseScripting":true,"filter":"","showAdminConsoleLink":true}}'
|
||||
TELEMETRY_DISABLED: 'false'
|
||||
SCRIPTING_DISABLED: ''
|
||||
TARGETED_PODS_UPDATE_DISABLED: ''
|
||||
PRESET_FILTERS_CHANGING_ENABLED: 'true'
|
||||
RECORDING_DISABLED: ''
|
||||
STOP_TRAFFIC_CAPTURING_DISABLED: 'false'
|
||||
GLOBAL_FILTER: ""
|
||||
DEFAULT_FILTER: "!dns and !tcp and !udp and !icmp"
|
||||
TRAFFIC_SAMPLE_RATE: '100'
|
||||
JSON_TTL: '5m'
|
||||
PCAP_TTL: '10s'
|
||||
PCAP_ERROR_TTL: '60s'
|
||||
TIMEZONE: ' '
|
||||
CLOUD_LICENSE_ENABLED: 'true'
|
||||
DUPLICATE_TIMEFRAME: '200ms'
|
||||
ENABLED_DISSECTORS: 'amqp,dns,http,icmp,kafka,redis,sctp,syscall,ws,tls'
|
||||
DISSECTORS_UPDATING_ENABLED: 'true'
|
||||
DETECT_DUPLICATES: 'false'
|
||||
PCAP_DUMP_ENABLE: 'true'
|
||||
PCAP_TIME_INTERVAL: '1m'
|
||||
PCAP_MAX_TIME: '1h'
|
||||
PCAP_MAX_SIZE: '500MB'
|
||||
PCAP_SRC_DIR: 'pcapdump'
|
||||
---
|
||||
# Source: kubeshark/templates/02-cluster-role.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.3.90
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.3.90"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
name: kubeshark-cluster-role-default
|
||||
namespace: default
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
- extensions
|
||||
- apps
|
||||
resources:
|
||||
- nodes
|
||||
- pods
|
||||
- services
|
||||
- endpoints
|
||||
- persistentvolumeclaims
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- namespaces
|
||||
verbs:
|
||||
- get
|
||||
resourceNames:
|
||||
- kube-system
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- networkpolicies
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- delete
|
||||
---
|
||||
# Source: kubeshark/templates/03-cluster-role-binding.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.3.90
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.3.90"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
name: kubeshark-cluster-role-binding-default
|
||||
namespace: default
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: kubeshark-cluster-role-default
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubeshark-service-account
|
||||
namespace: default
|
||||
---
|
||||
# Source: kubeshark/templates/02-cluster-role.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.3.90
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.3.90"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
name: kubeshark-self-config-role
|
||||
namespace: default
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
- v1
|
||||
resourceNames:
|
||||
- kubeshark-secret
|
||||
- kubeshark-config-map
|
||||
resources:
|
||||
- secrets
|
||||
- configmaps
|
||||
verbs:
|
||||
- get
|
||||
- watch
|
||||
- list
|
||||
- update
|
||||
- patch
|
||||
---
|
||||
# Source: kubeshark/templates/03-cluster-role-binding.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.3.90
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.3.90"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
name: kubeshark-self-config-role-binding
|
||||
namespace: default
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: kubeshark-self-config-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubeshark-service-account
|
||||
namespace: default
|
||||
---
|
||||
# Source: kubeshark/templates/05-hub-service.yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app.kubeshark.co/app: hub
|
||||
helm.sh/chart: kubeshark-52.3.90
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.3.90"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
name: kubeshark-hub
|
||||
namespace: default
|
||||
spec:
|
||||
ports:
|
||||
- name: kubeshark-hub
|
||||
port: 80
|
||||
targetPort: 8080
|
||||
selector:
|
||||
app.kubeshark.co/app: hub
|
||||
type: ClusterIP
|
||||
---
|
||||
# Source: kubeshark/templates/07-front-service.yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.3.90
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.3.90"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
name: kubeshark-front
|
||||
namespace: default
|
||||
spec:
|
||||
ports:
|
||||
- name: kubeshark-front
|
||||
port: 80
|
||||
targetPort: 8080
|
||||
selector:
|
||||
app.kubeshark.co/app: front
|
||||
type: ClusterIP
|
||||
---
|
||||
# Source: kubeshark/templates/15-worker-service-metrics.yaml
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.3.90
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.3.90"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
prometheus.io/scrape: 'true'
|
||||
prometheus.io/port: '49100'
|
||||
name: kubeshark-worker-metrics
|
||||
namespace: default
|
||||
spec:
|
||||
selector:
|
||||
app.kubeshark.co/app: worker
|
||||
helm.sh/chart: kubeshark-52.3.90
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.3.90"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
ports:
|
||||
- name: metrics
|
||||
protocol: TCP
|
||||
port: 49100
|
||||
targetPort: 49100
|
||||
---
|
||||
# Source: kubeshark/templates/09-worker-daemon-set.yaml
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
labels:
|
||||
app.kubeshark.co/app: worker
|
||||
sidecar.istio.io/inject: "false"
|
||||
helm.sh/chart: kubeshark-52.3.90
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.3.90"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
name: kubeshark-worker-daemon-set
|
||||
namespace: default
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubeshark.co/app: worker
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubeshark.co/app: worker
|
||||
helm.sh/chart: kubeshark-52.3.90
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.3.90"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
name: kubeshark-worker-daemon-set
|
||||
namespace: kubeshark
|
||||
spec:
|
||||
containers:
|
||||
- command:
|
||||
- ./worker
|
||||
- -i
|
||||
- any
|
||||
- -port
|
||||
- '30001'
|
||||
- -metrics-port
|
||||
- '49100'
|
||||
- -packet-capture
|
||||
- 'best'
|
||||
- -unixsocket
|
||||
- -servicemesh
|
||||
- -procfs
|
||||
- /hostproc
|
||||
- -disable-ebpf
|
||||
- -resolution-strategy
|
||||
- 'auto'
|
||||
- -staletimeout
|
||||
- '30'
|
||||
image: 'docker.io/kubeshark/worker:v52.3.90'
|
||||
imagePullPolicy: Always
|
||||
name: sniffer
|
||||
ports:
|
||||
- containerPort: 49100
|
||||
protocol: TCP
|
||||
name: metrics
|
||||
env:
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: TCP_STREAM_CHANNEL_TIMEOUT_MS
|
||||
value: '10000'
|
||||
- name: TCP_STREAM_CHANNEL_TIMEOUT_SHOW
|
||||
value: 'false'
|
||||
- name: KUBESHARK_CLOUD_API_URL
|
||||
value: 'https://api.kubeshark.co'
|
||||
- name: PROFILING_ENABLED
|
||||
value: 'false'
|
||||
- name: SENTRY_ENABLED
|
||||
value: 'false'
|
||||
- name: SENTRY_ENVIRONMENT
|
||||
value: 'production'
|
||||
resources:
|
||||
limits:
|
||||
|
||||
|
||||
memory: 5Gi
|
||||
|
||||
requests:
|
||||
|
||||
cpu: 50m
|
||||
|
||||
|
||||
memory: 50Mi
|
||||
|
||||
securityContext:
|
||||
capabilities:
|
||||
add:
|
||||
- NET_RAW
|
||||
- NET_ADMIN
|
||||
- SYS_ADMIN
|
||||
- SYS_PTRACE
|
||||
- DAC_OVERRIDE
|
||||
drop:
|
||||
- ALL
|
||||
readinessProbe:
|
||||
periodSeconds: 1
|
||||
failureThreshold: 3
|
||||
successThreshold: 1
|
||||
initialDelaySeconds: 5
|
||||
tcpSocket:
|
||||
port: 30001
|
||||
livenessProbe:
|
||||
periodSeconds: 1
|
||||
failureThreshold: 3
|
||||
successThreshold: 1
|
||||
initialDelaySeconds: 5
|
||||
tcpSocket:
|
||||
port: 30001
|
||||
volumeMounts:
|
||||
- mountPath: /hostproc
|
||||
name: proc
|
||||
readOnly: true
|
||||
- mountPath: /sys
|
||||
name: sys
|
||||
readOnly: true
|
||||
- mountPath: /app/data
|
||||
name: data
|
||||
- command:
|
||||
- ./tracer
|
||||
- -procfs
|
||||
- /hostproc
|
||||
- -disable-ebpf
|
||||
- -disable-tls-log
|
||||
image: 'docker.io/kubeshark/worker:v52.3.90'
|
||||
imagePullPolicy: Always
|
||||
name: tracer
|
||||
env:
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: PROFILING_ENABLED
|
||||
value: 'false'
|
||||
- name: SENTRY_ENABLED
|
||||
value: 'false'
|
||||
- name: SENTRY_ENVIRONMENT
|
||||
value: 'production'
|
||||
resources:
|
||||
limits:
|
||||
|
||||
|
||||
memory: 5Gi
|
||||
|
||||
requests:
|
||||
|
||||
cpu: 50m
|
||||
|
||||
|
||||
memory: 50Mi
|
||||
|
||||
securityContext:
|
||||
capabilities:
|
||||
add:
|
||||
- SYS_ADMIN
|
||||
- SYS_PTRACE
|
||||
- SYS_RESOURCE
|
||||
- IPC_LOCK
|
||||
- NET_RAW
|
||||
- NET_ADMIN
|
||||
drop:
|
||||
- ALL
|
||||
volumeMounts:
|
||||
- mountPath: /hostproc
|
||||
name: proc
|
||||
readOnly: true
|
||||
- mountPath: /sys
|
||||
name: sys
|
||||
readOnly: true
|
||||
- mountPath: /app/data
|
||||
name: data
|
||||
- mountPath: /etc/os-release
|
||||
name: os-release
|
||||
readOnly: true
|
||||
- mountPath: /hostroot
|
||||
mountPropagation: HostToContainer
|
||||
name: root
|
||||
readOnly: true
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
hostNetwork: true
|
||||
serviceAccountName: kubeshark-service-account
|
||||
terminationGracePeriodSeconds: 0
|
||||
tolerations:
|
||||
- effect: NoExecute
|
||||
operator: Exists
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: kubernetes.io/os
|
||||
operator: In
|
||||
values:
|
||||
- linux
|
||||
volumes:
|
||||
- hostPath:
|
||||
path: /proc
|
||||
name: proc
|
||||
- hostPath:
|
||||
path: /sys
|
||||
name: sys
|
||||
- name: lib-modules
|
||||
hostPath:
|
||||
path: /lib/modules
|
||||
- hostPath:
|
||||
path: /etc/os-release
|
||||
name: os-release
|
||||
- hostPath:
|
||||
path: /
|
||||
name: root
|
||||
- name: data
|
||||
emptyDir:
|
||||
sizeLimit: 5000Mi
|
||||
---
|
||||
# Source: kubeshark/templates/04-hub-deployment.yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app.kubeshark.co/app: hub
|
||||
helm.sh/chart: kubeshark-52.3.90
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.3.90"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
name: kubeshark-hub
|
||||
namespace: default
|
||||
spec:
|
||||
replicas: 1 # Set the desired number of replicas
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubeshark.co/app: hub
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubeshark.co/app: hub
|
||||
helm.sh/chart: kubeshark-52.3.90
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.3.90"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
spec:
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
serviceAccountName: kubeshark-service-account
|
||||
containers:
|
||||
- name: hub
|
||||
command:
|
||||
- ./hub
|
||||
- -port
|
||||
- "8080"
|
||||
env:
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: SENTRY_ENABLED
|
||||
value: 'false'
|
||||
- name: SENTRY_ENVIRONMENT
|
||||
value: 'production'
|
||||
- name: KUBESHARK_CLOUD_API_URL
|
||||
value: 'https://api.kubeshark.co'
|
||||
- name: PROFILING_ENABLED
|
||||
value: 'false'
|
||||
image: 'docker.io/kubeshark/hub:v52.3.90'
|
||||
imagePullPolicy: Always
|
||||
readinessProbe:
|
||||
periodSeconds: 1
|
||||
failureThreshold: 3
|
||||
successThreshold: 1
|
||||
initialDelaySeconds: 3
|
||||
tcpSocket:
|
||||
port: 8080
|
||||
livenessProbe:
|
||||
periodSeconds: 1
|
||||
failureThreshold: 3
|
||||
successThreshold: 1
|
||||
initialDelaySeconds: 3
|
||||
tcpSocket:
|
||||
port: 8080
|
||||
resources:
|
||||
limits:
|
||||
|
||||
|
||||
memory: 5Gi
|
||||
|
||||
requests:
|
||||
|
||||
cpu: 50m
|
||||
|
||||
|
||||
memory: 50Mi
|
||||
|
||||
volumeMounts:
|
||||
- name: saml-x509-volume
|
||||
mountPath: "/etc/saml/x509"
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: saml-x509-volume
|
||||
projected:
|
||||
sources:
|
||||
- secret:
|
||||
name: kubeshark-saml-x509-crt-secret
|
||||
items:
|
||||
- key: AUTH_SAML_X509_CRT
|
||||
path: kubeshark.crt
|
||||
- secret:
|
||||
name: kubeshark-saml-x509-key-secret
|
||||
items:
|
||||
- key: AUTH_SAML_X509_KEY
|
||||
path: kubeshark.key
|
||||
---
|
||||
# Source: kubeshark/templates/06-front-deployment.yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app.kubeshark.co/app: front
|
||||
helm.sh/chart: kubeshark-52.3.90
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.3.90"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
name: kubeshark-front
|
||||
namespace: default
|
||||
spec:
|
||||
replicas: 1 # Set the desired number of replicas
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubeshark.co/app: front
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubeshark.co/app: front
|
||||
helm.sh/chart: kubeshark-52.3.90
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.3.90"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
spec:
|
||||
containers:
|
||||
- env:
|
||||
- name: REACT_APP_AUTH_ENABLED
|
||||
value: 'true'
|
||||
- name: REACT_APP_AUTH_TYPE
|
||||
value: 'oidc'
|
||||
- name: REACT_APP_AUTH_SAML_IDP_METADATA_URL
|
||||
value: ' '
|
||||
- name: REACT_APP_TIMEZONE
|
||||
value: ' '
|
||||
- name: REACT_APP_SCRIPTING_DISABLED
|
||||
value: 'false'
|
||||
- name: REACT_APP_TARGETED_PODS_UPDATE_DISABLED
|
||||
value: 'false'
|
||||
- name: REACT_APP_PRESET_FILTERS_CHANGING_ENABLED
|
||||
value: 'true'
|
||||
- name: REACT_APP_BPF_OVERRIDE_DISABLED
|
||||
value: 'false'
|
||||
- name: REACT_APP_RECORDING_DISABLED
|
||||
value: 'false'
|
||||
- name: REACT_APP_STOP_TRAFFIC_CAPTURING_DISABLED
|
||||
value: 'false'
|
||||
- name: 'REACT_APP_CLOUD_LICENSE_ENABLED'
|
||||
value: 'true'
|
||||
- name: REACT_APP_SUPPORT_CHAT_ENABLED
|
||||
value: 'true'
|
||||
- name: REACT_APP_DISSECTORS_UPDATING_ENABLED
|
||||
value: 'true'
|
||||
- name: REACT_APP_SENTRY_ENABLED
|
||||
value: 'false'
|
||||
- name: REACT_APP_SENTRY_ENVIRONMENT
|
||||
value: 'production'
|
||||
image: 'docker.io/kubeshark/front:v52.3.90'
|
||||
imagePullPolicy: Always
|
||||
name: kubeshark-front
|
||||
livenessProbe:
|
||||
periodSeconds: 1
|
||||
failureThreshold: 3
|
||||
successThreshold: 1
|
||||
initialDelaySeconds: 3
|
||||
tcpSocket:
|
||||
port: 8080
|
||||
readinessProbe:
|
||||
periodSeconds: 1
|
||||
failureThreshold: 3
|
||||
successThreshold: 1
|
||||
initialDelaySeconds: 3
|
||||
tcpSocket:
|
||||
port: 8080
|
||||
timeoutSeconds: 1
|
||||
resources:
|
||||
limits:
|
||||
cpu: 750m
|
||||
memory: 1Gi
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: 50Mi
|
||||
volumeMounts:
|
||||
- name: nginx-config
|
||||
mountPath: /etc/nginx/conf.d/default.conf
|
||||
subPath: default.conf
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: nginx-config
|
||||
configMap:
|
||||
name: kubeshark-nginx-config-map
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
serviceAccountName: kubeshark-service-account
|
||||
25
manifests/prometheus/kube_prometheus_stack.yaml
Normal file
25
manifests/prometheus/kube_prometheus_stack.yaml
Normal file
@@ -0,0 +1,25 @@
|
||||
grafana:
|
||||
additionalDataSources: []
|
||||
prometheus:
|
||||
prometheusSpec:
|
||||
scrapeInterval: 10s
|
||||
evaluationInterval: 30s
|
||||
additionalScrapeConfigs: |
|
||||
- job_name: 'kubeshark-worker-metrics'
|
||||
kubernetes_sd_configs:
|
||||
- role: endpoints
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_pod_name]
|
||||
target_label: pod
|
||||
- source_labels: [__meta_kubernetes_pod_node_name]
|
||||
target_label: node
|
||||
- source_labels: [__meta_kubernetes_endpoint_port_name]
|
||||
action: keep
|
||||
regex: ^metrics$
|
||||
- source_labels: [__address__, __meta_kubernetes_endpoint_port_number]
|
||||
action: replace
|
||||
regex: ([^:]+)(?::\d+)?
|
||||
replacement: $1:49100
|
||||
target_label: __address__
|
||||
- action: labelmap
|
||||
regex: __meta_kubernetes_service_label_(.+)
|
||||
12
manifests/tls/certificate.yaml
Normal file
12
manifests/tls/certificate.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: kubeshark-tls
|
||||
namespace: default
|
||||
spec:
|
||||
issuerRef:
|
||||
name: letsencrypt-prod
|
||||
kind: ClusterIssuer
|
||||
secretName: cert-kubeshark
|
||||
dnsNames:
|
||||
- ks.svc.cluster.local
|
||||
14
manifests/tls/cluster-issuer.yaml
Normal file
14
manifests/tls/cluster-issuer.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: ClusterIssuer
|
||||
metadata:
|
||||
name: letsencrypt-prod
|
||||
spec:
|
||||
acme:
|
||||
server: https://acme-v02.api.letsencrypt.org/directory
|
||||
email: info@kubeshark.co
|
||||
privateKeySecretRef:
|
||||
name: letsencrypt-prod-key
|
||||
solvers:
|
||||
- http01:
|
||||
ingress:
|
||||
class: kubeshark-ingress-class
|
||||
15
manifests/tls/run.sh
Executable file
15
manifests/tls/run.sh
Executable file
@@ -0,0 +1,15 @@
|
||||
#!/bin/bash
|
||||
|
||||
__dir="$(cd -P -- "$(dirname -- "$0")" && pwd -P)"
|
||||
|
||||
helm repo add jetstack https://charts.jetstack.io
|
||||
helm repo update
|
||||
kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.9.1/cert-manager.crds.yaml
|
||||
helm install \
|
||||
cert-manager jetstack/cert-manager \
|
||||
--namespace cert-manager \
|
||||
--create-namespace \
|
||||
--version v1.9.1
|
||||
|
||||
kubectl apply -f ${__dir}/cluster-issuer.yaml
|
||||
kubectl apply -f ${__dir}/certificate.yaml
|
||||
@@ -9,9 +9,11 @@ import (
|
||||
var (
|
||||
Software = "Kubeshark"
|
||||
Program = "kubeshark"
|
||||
Description = "The API Traffic Analyzer for Kubernetes"
|
||||
Website = "https://kubeshark.co"
|
||||
Ver = "0.0"
|
||||
Branch = "develop"
|
||||
Email = "info@kubeshark.co"
|
||||
Ver = "0.0.0"
|
||||
Branch = "master"
|
||||
GitCommitHash = "" // this var is overridden using ldflags in makefile when building
|
||||
BuildTimestamp = "" // this var is overridden using ldflags in makefile when building
|
||||
RBACVersion = "v1"
|
||||
|
||||
22
misc/fsUtils/globUtils.go
Normal file
22
misc/fsUtils/globUtils.go
Normal file
@@ -0,0 +1,22 @@
|
||||
package fsUtils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
func RemoveFilesByExtension(dirPath string, ext string) error {
|
||||
files, err := filepath.Glob(filepath.Join(dirPath, fmt.Sprintf("/*.%s", ext)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, f := range files {
|
||||
if err := os.Remove(f); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -13,15 +13,15 @@ import (
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
func DumpLogs(ctx context.Context, provider *kubernetes.Provider, filePath string) error {
|
||||
podExactRegex := regexp.MustCompile("^" + kubernetes.SelfResourcesPrefix)
|
||||
pods, err := provider.ListAllPodsMatchingRegex(ctx, podExactRegex, []string{config.Config.SelfNamespace})
|
||||
func DumpLogs(ctx context.Context, provider *kubernetes.Provider, filePath string, grep string) error {
|
||||
podExactRegex := regexp.MustCompile("^" + kubernetes.SELF_RESOURCES_PREFIX)
|
||||
pods, err := provider.ListAllPodsMatchingRegex(ctx, podExactRegex, []string{config.Config.Tap.Release.Namespace})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(pods) == 0 {
|
||||
return fmt.Errorf("No %s pods found in namespace %s", misc.Software, config.Config.SelfNamespace)
|
||||
return fmt.Errorf("No %s pods found in namespace %s", misc.Software, config.Config.Tap.Release.Namespace)
|
||||
}
|
||||
|
||||
newZipFile, err := os.Create(filePath)
|
||||
@@ -34,7 +34,7 @@ func DumpLogs(ctx context.Context, provider *kubernetes.Provider, filePath strin
|
||||
|
||||
for _, pod := range pods {
|
||||
for _, container := range pod.Spec.Containers {
|
||||
logs, err := provider.GetPodLogs(ctx, pod.Namespace, pod.Name, container.Name)
|
||||
logs, err := provider.GetPodLogs(ctx, pod.Namespace, pod.Name, container.Name, grep)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("Failed to get logs!")
|
||||
continue
|
||||
@@ -60,23 +60,23 @@ func DumpLogs(ctx context.Context, provider *kubernetes.Provider, filePath strin
|
||||
}
|
||||
}
|
||||
|
||||
events, err := provider.GetNamespaceEvents(ctx, config.Config.SelfNamespace)
|
||||
events, err := provider.GetNamespaceEvents(ctx, config.Config.Tap.Release.Namespace)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("Failed to get k8b events!")
|
||||
} else {
|
||||
log.Debug().Str("namespace", config.Config.SelfNamespace).Msg("Successfully read events.")
|
||||
log.Debug().Str("namespace", config.Config.Tap.Release.Namespace).Msg("Successfully read events.")
|
||||
}
|
||||
|
||||
if err := AddStrToZip(zipWriter, events, fmt.Sprintf("%s_events.log", config.Config.SelfNamespace)); err != nil {
|
||||
if err := AddStrToZip(zipWriter, events, fmt.Sprintf("%s_events.log", config.Config.Tap.Release.Namespace)); err != nil {
|
||||
log.Error().Err(err).Msg("Failed write logs!")
|
||||
} else {
|
||||
log.Debug().Str("namespace", config.Config.SelfNamespace).Msg("Successfully added events.")
|
||||
log.Debug().Str("namespace", config.Config.Tap.Release.Namespace).Msg("Successfully added events.")
|
||||
}
|
||||
|
||||
if err := AddFileToZip(zipWriter, config.Config.ConfigFilePath); err != nil {
|
||||
if err := AddFileToZip(zipWriter, config.ConfigFilePath); err != nil {
|
||||
log.Error().Err(err).Msg("Failed write file!")
|
||||
} else {
|
||||
log.Debug().Str("file-path", config.Config.ConfigFilePath).Msg("Successfully added file.")
|
||||
log.Debug().Str("file-path", config.ConfigFilePath).Msg("Successfully added file.")
|
||||
}
|
||||
|
||||
log.Info().Str("path", filePath).Msg("You can find the ZIP file with all logs at:")
|
||||
|
||||
71
misc/scripting.go
Normal file
71
misc/scripting.go
Normal file
@@ -0,0 +1,71 @@
|
||||
package misc
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/robertkrimen/otto/ast"
|
||||
"github.com/robertkrimen/otto/file"
|
||||
"github.com/robertkrimen/otto/parser"
|
||||
)
|
||||
|
||||
type Script struct {
|
||||
Path string `json:"path"`
|
||||
Title string `json:"title"`
|
||||
Code string `json:"code"`
|
||||
Active bool `json:"active"`
|
||||
}
|
||||
|
||||
type ConfigMapScript struct {
|
||||
Title string `json:"title"`
|
||||
Code string `json:"code"`
|
||||
Active bool `json:"active"`
|
||||
}
|
||||
|
||||
func (s *Script) ConfigMap() ConfigMapScript {
|
||||
return ConfigMapScript{
|
||||
Title: s.Title,
|
||||
Code: s.Code,
|
||||
Active: s.Active,
|
||||
}
|
||||
}
|
||||
|
||||
func ReadScriptFile(path string) (script *Script, err error) {
|
||||
filename := filepath.Base(path)
|
||||
var body []byte
|
||||
body, err = os.ReadFile(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
content := string(body)
|
||||
|
||||
var program *ast.Program
|
||||
program, err = parser.ParseFile(nil, filename, content, parser.StoreComments)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var title string
|
||||
var titleIsSet bool
|
||||
code := content
|
||||
|
||||
var idx0 file.Idx
|
||||
for node, comments := range program.Comments {
|
||||
if (titleIsSet && node.Idx0() > idx0) || len(comments) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
idx0 = node.Idx0()
|
||||
title = comments[0].Text
|
||||
titleIsSet = true
|
||||
}
|
||||
|
||||
script = &Script{
|
||||
Path: path,
|
||||
Title: title,
|
||||
Code: code,
|
||||
Active: false,
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
@@ -16,7 +16,7 @@ import (
|
||||
)
|
||||
|
||||
func CheckNewerVersion() {
|
||||
if os.Getenv("KUBESHARK_DISABLE_VERSION_CHECK") != "" {
|
||||
if os.Getenv(fmt.Sprintf("%s_DISABLE_VERSION_CHECK", strings.ToUpper(misc.Program))) != "" {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -44,7 +44,7 @@ func CheckNewerVersion() {
|
||||
} else {
|
||||
downloadCommand = fmt.Sprintf("sh <(curl -Ls %s/install)", misc.Website)
|
||||
}
|
||||
msg := fmt.Sprintf("There is a new release! %v -> %v run:", misc.Ver, latestVersion)
|
||||
msg := fmt.Sprintf("There is a new release! %v -> %v Please upgrade to the latest release, as new releases are not always backward compatible. Run:", misc.Ver, latestVersion)
|
||||
log.Warn().Str("command", downloadCommand).Msg(fmt.Sprintf(utils.Yellow, msg))
|
||||
}
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user