mirror of
https://github.com/falcosecurity/falco.git
synced 2026-03-20 11:42:06 +00:00
Compare commits
452 Commits
agent/0.47
...
agent/0.76
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
19db7890b3 | ||
|
|
cef147708a | ||
|
|
1c9f86bdd8 | ||
|
|
db0d913acc | ||
|
|
e0458cba67 | ||
|
|
af564f17a6 | ||
|
|
cd2b210fe3 | ||
|
|
d6d975e28c | ||
|
|
5ac3e7d074 | ||
|
|
60af4166de | ||
|
|
d321666ee5 | ||
|
|
7169dd9cf0 | ||
|
|
15ed651da9 | ||
|
|
7441052b9a | ||
|
|
69ede8a785 | ||
|
|
8dd34205a8 | ||
|
|
f379e97124 | ||
|
|
109f86cd85 | ||
|
|
e51fbd6569 | ||
|
|
060bf78ed8 | ||
|
|
a2a4cbf586 | ||
|
|
b4bd11bf70 | ||
|
|
d5869599f7 | ||
|
|
b0bc00224c | ||
|
|
2f4b39ae6f | ||
|
|
326fb2998a | ||
|
|
e3ef7a2ed4 | ||
|
|
43f7ee00fb | ||
|
|
8bcd0e8f05 | ||
|
|
85f51cf38c | ||
|
|
2467766f07 | ||
|
|
2cbff6ff70 | ||
|
|
e02135f9f0 | ||
|
|
c1de3dfe7a | ||
|
|
27df0ad29b | ||
|
|
e7c2068267 | ||
|
|
ffed7ef63c | ||
|
|
fe283dcd76 | ||
|
|
4a0ec07235 | ||
|
|
fdebfb5b6c | ||
|
|
0b775fa722 | ||
|
|
33faa911d7 | ||
|
|
24fb84df60 | ||
|
|
7550683862 | ||
|
|
5755e79fe9 | ||
|
|
dfbe450eeb | ||
|
|
0867245b73 | ||
|
|
82377348ce | ||
|
|
fdb2312bcf | ||
|
|
fbb5451fd9 | ||
|
|
83c309a6c0 | ||
|
|
6bcf397a17 | ||
|
|
9ceb11a7c8 | ||
|
|
e4443bea8e | ||
|
|
15e2d0bf7e | ||
|
|
480ba4e0f8 | ||
|
|
6aae17600f | ||
|
|
e9e0177901 | ||
|
|
01459fb49a | ||
|
|
d36df62d1e | ||
|
|
36d775100e | ||
|
|
0020b05624 | ||
|
|
3edfc6ba8e | ||
|
|
9ed1ff5f26 | ||
|
|
664d8fbc1d | ||
|
|
6078d4bd43 | ||
|
|
53776b0ec6 | ||
|
|
2eda3432e9 | ||
|
|
56e07f53f2 | ||
|
|
09d570d985 | ||
|
|
87fd4aba70 | ||
|
|
332e3ad874 | ||
|
|
5127d51732 | ||
|
|
d8fdaa0d88 | ||
|
|
b993683b96 | ||
|
|
b8027b5e54 | ||
|
|
d57b3fe3cf | ||
|
|
dd3a7df346 | ||
|
|
ba1c8e4506 | ||
|
|
ccea09b089 | ||
|
|
9ec26795c5 | ||
|
|
5844030bcb | ||
|
|
eeae04ac67 | ||
|
|
e5bd58ab91 | ||
|
|
2fa867e8d0 | ||
|
|
55b9408c7d | ||
|
|
31482c2a18 | ||
|
|
5b65fe11f1 | ||
|
|
5d21936f60 | ||
|
|
5f688d89e4 | ||
|
|
2bda0f7ed5 | ||
|
|
9b35e06db8 | ||
|
|
60d609b8ec | ||
|
|
38f1d20ab2 | ||
|
|
5230b22876 | ||
|
|
1676333d7b | ||
|
|
4a8ac8d164 | ||
|
|
e1044629cb | ||
|
|
080305c7a0 | ||
|
|
26d5ea0123 | ||
|
|
53ca4349f9 | ||
|
|
0fcd01f98d | ||
|
|
1b591dc4f3 | ||
|
|
43b773e9b2 | ||
|
|
0d88c3020d | ||
|
|
33a28cc173 | ||
|
|
a68d2ad769 | ||
|
|
a921012a6c | ||
|
|
08afb75009 | ||
|
|
823c105f54 | ||
|
|
bde8d67330 | ||
|
|
9504d420f0 | ||
|
|
4f5ab79c69 | ||
|
|
6540a856fa | ||
|
|
c3c171c7e5 | ||
|
|
011cb2f030 | ||
|
|
59ab40d457 | ||
|
|
cf5397f701 | ||
|
|
cff8ca428a | ||
|
|
d9cb1e2b27 | ||
|
|
96992d7ac3 | ||
|
|
a22099c8c3 | ||
|
|
0e009fc89a | ||
|
|
1a41eeada7 | ||
|
|
fefb8ba614 | ||
|
|
2bc9d35d37 | ||
|
|
09748fcbb3 | ||
|
|
a0e88417fc | ||
|
|
e44ce9a8d3 | ||
|
|
c4c5d2f585 | ||
|
|
340ee2ece7 | ||
|
|
00dd3c47c0 | ||
|
|
7c8a85158a | ||
|
|
d0650688d5 | ||
|
|
425196f974 | ||
|
|
70d6e8de2f | ||
|
|
6dfdadf527 | ||
|
|
606af16f27 | ||
|
|
3b5f959de9 | ||
|
|
a4d3d4d731 | ||
|
|
276ab9139f | ||
|
|
ee02571889 | ||
|
|
6aa2373acd | ||
|
|
b0cf038e1d | ||
|
|
548790c663 | ||
|
|
151d1e67c5 | ||
|
|
68cca84ba6 | ||
|
|
46f993fa40 | ||
|
|
42167e53cc | ||
|
|
4e7fcf3f88 | ||
|
|
64a014c356 | ||
|
|
ac82dd4b54 | ||
|
|
70e49161b1 | ||
|
|
1cdacc1494 | ||
|
|
ca9e1ebfef | ||
|
|
6be38a3237 | ||
|
|
bf1f2cb2fd | ||
|
|
ac70325522 | ||
|
|
608d4e234f | ||
|
|
d21fb408d4 | ||
|
|
aaa294abd1 | ||
|
|
8e46db05c6 | ||
|
|
4efda9cb97 | ||
|
|
57c1b33562 | ||
|
|
75a44a67f9 | ||
|
|
fbfd540ad2 | ||
|
|
e88c9ec8e3 | ||
|
|
3202704950 | ||
|
|
689c02666f | ||
|
|
12de2e4119 | ||
|
|
cb7dab61e8 | ||
|
|
9791881444 | ||
|
|
84b3543cc0 | ||
|
|
71fee6753b | ||
|
|
7ff2f66437 | ||
|
|
1f008d6c39 | ||
|
|
dc44655ec2 | ||
|
|
ef9e045a40 | ||
|
|
0ec46feef2 | ||
|
|
2ebe9e06a8 | ||
|
|
33974c6912 | ||
|
|
9883656882 | ||
|
|
d5a107b15f | ||
|
|
b208008be1 | ||
|
|
6397c3a556 | ||
|
|
1221399ac5 | ||
|
|
de3ca31b15 | ||
|
|
463ade2b1d | ||
|
|
1c645862e1 | ||
|
|
f123313389 | ||
|
|
1753d16962 | ||
|
|
61f738826c | ||
|
|
7ae765bfc9 | ||
|
|
f6b3068259 | ||
|
|
e1293a7eca | ||
|
|
02645e7a2e | ||
|
|
c8c0a97f64 | ||
|
|
d96cf4c369 | ||
|
|
e2be47e3c2 | ||
|
|
ee2c668746 | ||
|
|
09e1caf4bb | ||
|
|
68d29fc906 | ||
|
|
7ac49a2f99 | ||
|
|
e6006e3787 | ||
|
|
5d856ef97a | ||
|
|
3b486fb6c6 | ||
|
|
daedcf172f | ||
|
|
414a4aaba7 | ||
|
|
5382aa4e3b | ||
|
|
3a60caa9ed | ||
|
|
7a31c59fe4 | ||
|
|
8167510694 | ||
|
|
e92ca7574e | ||
|
|
ae73f75d81 | ||
|
|
1635d08df0 | ||
|
|
5420d0e3a0 | ||
|
|
72014f3522 | ||
|
|
aed1897cf1 | ||
|
|
1e33358742 | ||
|
|
dca7686e47 | ||
|
|
5c09ef2c3f | ||
|
|
8641f3c958 | ||
|
|
283c6eea99 | ||
|
|
aa073586f1 | ||
|
|
498d083980 | ||
|
|
c41bcbd240 | ||
|
|
c7d61305cc | ||
|
|
ab3da5dfcf | ||
|
|
95bb96e6ec | ||
|
|
1666d03afc | ||
|
|
a38f7f181b | ||
|
|
2d0963e97c | ||
|
|
fbdeb26e99 | ||
|
|
7e4d9f5b51 | ||
|
|
5bb94c81ed | ||
|
|
30ebfd4bcc | ||
|
|
64145ba961 | ||
|
|
598cbbe5e7 | ||
|
|
6fd7f0d628 | ||
|
|
240a8ffffa | ||
|
|
d1265ff520 | ||
|
|
0bc2d4f162 | ||
|
|
2c189d6a60 | ||
|
|
ebed9f8dfd | ||
|
|
ed2586eafb | ||
|
|
9d6fe878e1 | ||
|
|
de520a60fb | ||
|
|
d6fe29b47d | ||
|
|
5c1aa8dc44 | ||
|
|
8d57d18959 | ||
|
|
a71cbcd7ee | ||
|
|
3349decd22 | ||
|
|
eecc92736b | ||
|
|
f1b44da90c | ||
|
|
42e50356cf | ||
|
|
9e7ce4d36f | ||
|
|
2991ea423a | ||
|
|
481582ca09 | ||
|
|
38f488bfda | ||
|
|
42a3dd1ea3 | ||
|
|
b8743385e8 | ||
|
|
87caa55b17 | ||
|
|
646aed5b8b | ||
|
|
6bfff60fc3 | ||
|
|
99d6bccc81 | ||
|
|
6ebbbd47d8 | ||
|
|
69ebcdd8e9 | ||
|
|
74c97489bd | ||
|
|
5bafa198c6 | ||
|
|
edce729bd9 | ||
|
|
f426c4292d | ||
|
|
277d8ab887 | ||
|
|
697d718739 | ||
|
|
307a484425 | ||
|
|
c5a964e651 | ||
|
|
612fbb00d9 | ||
|
|
e88612a1af | ||
|
|
a86e3fc748 | ||
|
|
e97056569f | ||
|
|
f92f74eaa8 | ||
|
|
0e163b892f | ||
|
|
4d148ce28f | ||
|
|
974d864b3b | ||
|
|
a3c83e7f6e | ||
|
|
dafc4c2b88 | ||
|
|
c066be3905 | ||
|
|
f5ce6752be | ||
|
|
060db62644 | ||
|
|
1ad91c05f5 | ||
|
|
76876bc3ae | ||
|
|
e183de3b89 | ||
|
|
87a6c74290 | ||
|
|
d42d0e2dd1 | ||
|
|
718113f7bd | ||
|
|
955e1d78b1 | ||
|
|
135b4d9975 | ||
|
|
0cabeddf49 | ||
|
|
6127ca6e41 | ||
|
|
a2a707f771 | ||
|
|
3c2051176e | ||
|
|
73fbbdb577 | ||
|
|
52b006e875 | ||
|
|
f72182d9af | ||
|
|
8d58589c39 | ||
|
|
ec5adfe892 | ||
|
|
a25166b7ac | ||
|
|
18900089f3 | ||
|
|
490a3fef00 | ||
|
|
5e8dc8bce4 | ||
|
|
d29742a617 | ||
|
|
353defe362 | ||
|
|
6b9620084f | ||
|
|
537565d27a | ||
|
|
b2529f1108 | ||
|
|
561c388dab | ||
|
|
db469c6514 | ||
|
|
fb36af12cf | ||
|
|
7d711dbb32 | ||
|
|
58357d3bf9 | ||
|
|
8b98a61bcc | ||
|
|
f70a7aef6f | ||
|
|
c12ab700ec | ||
|
|
38f562ea89 | ||
|
|
f1aadef054 | ||
|
|
800a3f1ea1 | ||
|
|
1c21b3bc8a | ||
|
|
185729d5d6 | ||
|
|
0a69fc0c85 | ||
|
|
88faa7c1e7 | ||
|
|
a0a6914b6a | ||
|
|
31464de885 | ||
|
|
df08a80a12 | ||
|
|
8a1f62c610 | ||
|
|
1e205db8aa | ||
|
|
9b308d2793 | ||
|
|
3d5789a297 | ||
|
|
b9d0857362 | ||
|
|
1afbaba632 | ||
|
|
e0a5034a43 | ||
|
|
6356490b1c | ||
|
|
511d0997da | ||
|
|
6f9f1e4792 | ||
|
|
a99f09da96 | ||
|
|
c09b6390a3 | ||
|
|
3f2814259a | ||
|
|
b04bccd1a7 | ||
|
|
e21fecf0ef | ||
|
|
ceafeca87e | ||
|
|
9285aa59c1 | ||
|
|
1e0ddba11a | ||
|
|
34e17cb951 | ||
|
|
bc83ac18a0 | ||
|
|
10d0c8f982 | ||
|
|
8f53bcbb05 | ||
|
|
7286b50f4d | ||
|
|
4c60b7c1d2 | ||
|
|
85480f32d6 | ||
|
|
4139370df5 | ||
|
|
b6d1101cb6 | ||
|
|
43d53bb09e | ||
|
|
af3a708251 | ||
|
|
f4bb49f1f5 | ||
|
|
362a6b7b9a | ||
|
|
77a5429cae | ||
|
|
9ecdf30314 | ||
|
|
7c419b6d6b | ||
|
|
767f2d5bb4 | ||
|
|
3cbf641ded | ||
|
|
4ab72d0391 | ||
|
|
9e933ce5ba | ||
|
|
c3c6ec67f7 | ||
|
|
9062459669 | ||
|
|
94cef1b541 | ||
|
|
dd6b4fd7c0 | ||
|
|
c6953e810b | ||
|
|
104c99c42e | ||
|
|
f2bfa584e4 | ||
|
|
6f54a752a2 | ||
|
|
6c04f53d24 | ||
|
|
db67034338 | ||
|
|
2a2dcaf25d | ||
|
|
e6aefef4eb | ||
|
|
7db8e0921c | ||
|
|
ea97325708 | ||
|
|
3840622984 | ||
|
|
0ee32178b7 | ||
|
|
8b116c2ad1 | ||
|
|
37388c56ff | ||
|
|
0d46fcf819 | ||
|
|
c6c074ef60 | ||
|
|
14c9d05f9f | ||
|
|
882c6c94ea | ||
|
|
349372d733 | ||
|
|
858a69bb2c | ||
|
|
1d0c9b1714 | ||
|
|
8aa9c21d11 | ||
|
|
64ecd157fd | ||
|
|
2bad529d33 | ||
|
|
09a9ab4f85 | ||
|
|
39e9043ac7 | ||
|
|
f4abec4639 | ||
|
|
bed5ab4f0c | ||
|
|
4f645c49e1 | ||
|
|
54b30bc248 | ||
|
|
b509c4f0c8 | ||
|
|
af8d6c9d10 | ||
|
|
ef08478bb7 | ||
|
|
a616301bd9 | ||
|
|
8e2a3ef5c3 | ||
|
|
47bd6af69a | ||
|
|
d1d0dbdbde | ||
|
|
94fcc5399e | ||
|
|
da61134463 | ||
|
|
4189bb72da | ||
|
|
d2d6118b9b | ||
|
|
4915fdfc3a | ||
|
|
b855066dcb | ||
|
|
ae7f5eb631 | ||
|
|
5f9f5c47d1 | ||
|
|
c6b433c2df | ||
|
|
29cc8ee571 | ||
|
|
c66b6402d8 | ||
|
|
2e5ed34357 | ||
|
|
3e1117d746 | ||
|
|
7fddaf2499 | ||
|
|
28e9478dbb | ||
|
|
ae0ba57306 | ||
|
|
a0b26def13 | ||
|
|
4fc2870c59 | ||
|
|
2fad859600 | ||
|
|
bef628dc05 | ||
|
|
1db2339ece | ||
|
|
f68fba103e | ||
|
|
897df28036 | ||
|
|
6ab0139532 | ||
|
|
24c21307d0 | ||
|
|
da77df142f | ||
|
|
81a145fd4f | ||
|
|
fa4c2948bf | ||
|
|
e49c3e68e7 | ||
|
|
f64148999a | ||
|
|
30b1f23b17 | ||
|
|
20d81523a1 | ||
|
|
c140b23678 | ||
|
|
3fbcb35e91 | ||
|
|
f547dc97ab | ||
|
|
917d66e9e8 | ||
|
|
73e52e1e91 | ||
|
|
318286f8c4 | ||
|
|
0c44711e76 | ||
|
|
b6f08cc403 | ||
|
|
8d181e9051 |
5
.gitignore
vendored
5
.gitignore
vendored
@@ -1,6 +1,7 @@
|
||||
/build*
|
||||
*~
|
||||
test/falco_test.pyc
|
||||
*.pyc
|
||||
|
||||
test/falco_tests.yaml
|
||||
test/traces-negative
|
||||
test/traces-positive
|
||||
@@ -12,7 +13,7 @@ test/results*.json.*
|
||||
userspace/falco/lua/re.lua
|
||||
userspace/falco/lua/lpeg.so
|
||||
|
||||
docker/event-generator/event-generator
|
||||
docker/event-generator/event_generator
|
||||
docker/event-generator/mysqld
|
||||
docker/event-generator/httpd
|
||||
docker/event-generator/sha1sum
|
||||
|
||||
23
.travis.yml
23
.travis.yml
@@ -2,6 +2,9 @@ language: c
|
||||
env:
|
||||
- BUILD_TYPE=Debug
|
||||
- BUILD_TYPE=Release
|
||||
sudo: required
|
||||
services:
|
||||
- docker
|
||||
before_install:
|
||||
- sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test
|
||||
- sudo apt-get update
|
||||
@@ -9,33 +12,29 @@ install:
|
||||
- sudo apt-get --force-yes install g++-4.8
|
||||
- sudo apt-get install rpm linux-headers-$(uname -r)
|
||||
- git clone https://github.com/draios/sysdig.git ../sysdig
|
||||
- sudo apt-get install -y python-pip libvirt-dev jq
|
||||
- sudo apt-get install -y python-pip libvirt-dev jq dkms
|
||||
- cd ..
|
||||
- curl -Lo avocado-36.0-tar.gz https://github.com/avocado-framework/avocado/archive/36.0lts.tar.gz
|
||||
- tar -zxvf avocado-36.0-tar.gz
|
||||
- cd avocado-36.0lts
|
||||
- sudo pip install -r requirements-travis.txt
|
||||
- sudo -H pip install -r requirements.txt
|
||||
- sudo python setup.py install
|
||||
- cd ../falco
|
||||
before_script:
|
||||
- export KERNELDIR=/lib/modules/$(ls /lib/modules | sort | head -1)/build
|
||||
- export KERNELDIR=/lib/modules/$(uname -r)/build
|
||||
script:
|
||||
- set -e
|
||||
- export CC="gcc-4.8"
|
||||
- export CXX="g++-4.8"
|
||||
- wget https://s3.amazonaws.com/download.draios.com/dependencies/cmake-3.3.2.tar.gz
|
||||
- tar -xzf cmake-3.3.2.tar.gz
|
||||
- cd cmake-3.3.2
|
||||
- ./bootstrap --prefix=/usr
|
||||
- make
|
||||
- sudo make install
|
||||
- cd ..
|
||||
- mkdir build
|
||||
- cd build
|
||||
- cmake .. -DCMAKE_BUILD_TYPE=$BUILD_TYPE
|
||||
- cmake .. -DCMAKE_BUILD_TYPE=$BUILD_TYPE -DDRAIOS_DEBUG_FLAGS="-D_DEBUG -DNDEBUG"
|
||||
- make VERBOSE=1
|
||||
- make package
|
||||
- cd ..
|
||||
- cp falco*.deb ../docker/local
|
||||
- cd ../docker/local
|
||||
- docker build -t sysdig/falco:test .
|
||||
- cd ../..
|
||||
- sudo test/run_regression_tests.sh $TRAVIS_BRANCH
|
||||
notifications:
|
||||
webhooks:
|
||||
|
||||
162
CHANGELOG.md
162
CHANGELOG.md
@@ -2,6 +2,168 @@
|
||||
|
||||
This file documents all notable changes to Falco. The release numbering uses [semantic versioning](http://semver.org).
|
||||
|
||||
## v0.8.1
|
||||
|
||||
Released 2017-10-10
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* Fix packaging to specify correct built-in config file [[#288](https://github.com/draios/falco/pull/288)]
|
||||
|
||||
## v0.8.0
|
||||
|
||||
Released 2017-10-10
|
||||
|
||||
**Important**: the location for falco's configuration file has moved from `/etc/falco.yaml` to `/etc/falco/falco.yaml`. The default rules file has moved from `/etc/falco_rules.yaml` to `/etc/falco/falco_rules.yaml`. In addition, 0.8.0 has added a _local_ ruls file to `/etc/falco/falco_rules.local.yaml`. See [the documentation](https://github.com/draios/falco/wiki/Falco-Default-and-Local-Rules-Files) for more details.
|
||||
|
||||
### Major Changes
|
||||
|
||||
* Add the ability to append one list to another list by setting an `append: true` attribute. [[#264](https://github.com/draios/falco/pull/264)]
|
||||
* Add the ability to append one macro/rule to another list by setting an `append: true` attribute. [[#277](https://github.com/draios/falco/pull/277)]
|
||||
* Ensure that falco rules/config files are preserved across package upgrades/removes if modified. [[#278](https://github.com/draios/falco/pull/278)]
|
||||
* Add the notion of a "local" rules file that should contain modifications to the default falco rules file. [[#278](https://github.com/draios/falco/pull/278)]
|
||||
* When using json output, separately include the individual templated fields in the json object. [[#282](https://github.com/draios/falco/pull/282)]
|
||||
* Add the ability to keep a file/program pipe handle open across rule notifications. [[#283](https://github.com/draios/falco/pull/283)]
|
||||
* New argument `-V` validates rules file and immediately exits. [[#286](https://github.com/draios/falco/pull/286)]
|
||||
|
||||
### Minor Changes
|
||||
|
||||
* Minor updates to falco example programs [[#248](https://github.com/draios/falco/pull/248)] [[#275](https://github.com/draios/falco/pull/275)]
|
||||
* Also validate macros at rule parse time. [[#257](https://github.com/draios/falco/pull/257)]
|
||||
* Minor README typo fixes [[#276](https://github.com/draios/falco/pull/276)]
|
||||
* Add a government CLA (contributor license agreement). [[#263](https://github.com/draios/falco/pull/263)]
|
||||
* Add ability to only run rules with a priority >= some threshold [[#281](https://github.com/draios/falco/pull/281)]
|
||||
* Add ability to make output channels unbuffered [[#285](https://github.com/draios/falco/pull/285)]
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* Fix installation of falco on OSX [[#252](https://github.com/draios/falco/pull/252)]
|
||||
* Fix a bug that caused the trailing whitespace of a quoted string to be accidentally removed [[#254](https://github.com/draios/falco/pull/254)]
|
||||
* When multiple sets of kernel headers are installed, find the one for the running kernel [[#260](https://github.com/draios/falco/pull/260)]
|
||||
* Allow pathnames in rule/macro conditions to contain '.' characters [[#262](https://github.com/draios/falco/pull/262)]
|
||||
* Fix a bug where a list named "foo" would be substituted even if it were a substring of a longer word like "my_foo" [[#258](https://github.com/draios/falco/pull/258)]
|
||||
* Remove extra trailing newlines from rule output strings [[#265](https://github.com/draios/falco/pull/265)]
|
||||
* Improve build pathnames to avoid relative paths when possible [[#284](https://github.com/draios/falco/pull/284)]
|
||||
|
||||
### Rule Changes
|
||||
|
||||
* Significant changes to default ruleset to address FPs. These changes resulted from hundreds of hours of use in actual customer environments. [[#247](https://github.com/draios/falco/pull/247)] [[#259](https://github.com/draios/falco/pull/259)]
|
||||
* Add official gitlab EE docker image to list of known shell spawning images. Thanks @dkerwin! [[#270](https://github.com/draios/falco/pull/270)]
|
||||
* Add keepalived to list of shell spawning binaries. Thanks @dkerwin! [[#269](https://github.com/draios/falco/pull/269)]
|
||||
|
||||
## v0.7.0
|
||||
|
||||
Released 2017-05-30
|
||||
|
||||
### Major Changes
|
||||
|
||||
* Update the priorities of falco rules to use a wider range of priorities rather than just ERROR/WARNING. More info on the use of priorities in the ruleset can be found [here](https://github.com/draios/falco/wiki/Falco-Rules#rule-priorities). [[#244](https://github.com/draios/falco/pull/244)]
|
||||
|
||||
### Minor Changes
|
||||
|
||||
None.
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* Fix typos in various markdown files. Thanks @sublimino! [[#241](https://github.com/draios/falco/pull/241)]
|
||||
|
||||
### Rule Changes
|
||||
|
||||
* Add gitlab-mon as a gitlab binary, which allows it to run shells, etc. Thanks @dkerwin! [[#237](https://github.com/draios/falco/pull/237)]
|
||||
* A new rule Terminal shell in container" that looks for shells spawned in a container with an attached terminal. [[#242](https://github.com/draios/falco/pull/242)]
|
||||
* Fix some FPs related to the sysdig monitor agent. [[#243](https://github.com/draios/falco/pull/243)]
|
||||
* Fix some FPs related to stating containers combined with missed events [[#243](https://github.com/draios/falco/pull/243)]
|
||||
|
||||
## v0.6.1
|
||||
|
||||
Released 2017-05-15
|
||||
|
||||
### Major Changes
|
||||
|
||||
None
|
||||
|
||||
### Minor Changes
|
||||
|
||||
* Small changes to token bucket used to throttle falco events [[#234](https://github.com/draios/falco/pull/234)] [[#235](https://github.com/draios/falco/pull/235)] [[#236](https://github.com/draios/falco/pull/236)] [[#238](https://github.com/draios/falco/pull/238)]
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* Update the falco driver to work with kernel 4.11 [[#829](https://github.com/draios/sysdig/pull/829)]
|
||||
|
||||
### Rule Changes
|
||||
|
||||
* Don't allow apache2 to spawn shells in containers [[#231](https://github.com/draios/falco/issues/231)] [[#232](https://github.com/draios/falco/pull/232)]
|
||||
|
||||
## v0.6.0
|
||||
|
||||
Released 2017-03-29
|
||||
|
||||
### Major Changes
|
||||
|
||||
* Add the notion of tagged falco rules. Full documentation for this feature is available on the [wiki](https://github.com/draios/falco/wiki/Falco-Rules#rule-tags). [[#58](https://github.com/draios/falco/issues/58)] [[#59](https://github.com/draios/falco/issues/59)] [[#60](https://github.com/draios/falco/issues/60)] [[#206](https://github.com/draios/falco/pull/206)]
|
||||
* Falco now has its own dedicated kernel module. Previously, it would depend on sysdig being installed and would use sysdig's `sysdig-probe` kernel module. This ensures you can upgrade sysdig and falco without kernel driver compatibility problems. More details on the kernel module and its installation are on the [wiki](https://github.com/draios/falco/wiki/Falco-Kernel-Module). [[#215](https://github.com/draios/falco/issues/215)] [[#223](https://github.com/draios/falco/issues/223)] [[#224](https://github.com/draios/falco/pull/224)]
|
||||
* When providing multiple rules files by specifying `-r' multiple times, make sure that you can override rules/lists/macros. Previously, a list/macro/rule specified in an earlier file could not be overridden in a later file. [[#176](https://github.com/draios/falco/issues/176)] [[#177](https://github.com/draios/falco/pull/177)]
|
||||
* Add example k8s yaml files that show how to run falco as a k8s DaemonSet, and how to run falco-event-generator as a deployment running on one node. [[#222](https://github.com/draios/falco/pull/222)] [[#225](https://github.com/draios/falco/issues/225)] [[#226](https://github.com/draios/falco/pull/226)]
|
||||
* Update third party libraries to address security vulnerabilities. [[#182](https://github.com/draios/falco/pull/182)]
|
||||
* Falco can now be built on OSX. Like sysdig, on OSX it is limited to reading existing trace files. [[#210](https://github.com/draios/falco/pull/210)]
|
||||
|
||||
### Minor Changes
|
||||
* Several changes to [falco-event-generator](https://github.com/draios/falco/wiki/Generating-Sample-Events) to improve usability. [[#205](https://github.com/draios/falco/pull/205)]
|
||||
* Switch to a formatter cache provided by sysdig code instead of using our own. [[#212](https://github.com/draios/falco/pull/212)]
|
||||
* Add automated tests that use locally-built docker images. [[#188](https://github.com/draios/falco/issues/188)]
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* Make sure output strings are not truncated when a given %field expression has a NULL value. [[#180](https://github.com/draios/falco/issues/180)] [[#181](https://github.com/draios/falco/pull/181)]
|
||||
* Allow ASSERTs when running travisci tests. [[#199](https://github.com/draios/falco/pull/199)]
|
||||
* Fix make dependencies for lyaml. [[#204](https://github.com/draios/falco/pull/204)] [[#130](https://github.com/draios/falco/issues/130)]
|
||||
* (This was a change in sysdig, but affected falco). Prevent hangs when traversing malformed parent thread state. [[#208](https://github.com/draios/falco/issues/208)]
|
||||
|
||||
### Rule Changes
|
||||
|
||||
* Add confd as a program that can write files below /etc and fleetctl as a program that can spawn shells. [[#175](https://github.com/draios/falco/pull/175)]
|
||||
* Add [exechealthz](https://github.com/kubernetes/contrib/tree/master/exec-healthz), a k8s liveness checking utility, to the list of shell spawners. [[#190](https://github.com/draios/falco/pull/190)]
|
||||
* Eliminate FPs related to weekly ubuntu cron jobs. [[#192](https://github.com/draios/falco/pull/192)]
|
||||
* Allow shells spawned by ansible, and eliminate FPs when managing machines via ansible. [[#193](https://github.com/draios/falco/pull/193)] [[#196](https://github.com/draios/falco/pull/196)] [[#202](https://github.com/draios/falco/pull/202)]
|
||||
* Eliminate FPs related to use of other security products. Thanks to @juju4 for the useful rule updates. [[#200](https://github.com/draios/falco/pull/200)]
|
||||
* Add additional possible locations for denyhosts, add [PM2](http://pm2.keymetrics.io/) as a shell spawner. [[#202](https://github.com/draios/falco/pull/202)]
|
||||
* Add flanneld as a privileged container, improve grouping for the "x running y" macros, allow denyhosts to spawn shells. [[#207](https://github.com/draios/falco/pull/207)]
|
||||
* Handle systemd changing its name to "(systemd)", add sv (part of [runit](http://smarden.org/runit/)) as a program that can write below /etc, allow writing to all `/dev/tty*` files. [[#209](https://github.com/draios/falco/pull/209)]
|
||||
* Add erl_child_setup as a shell spawner. Thanks to @dkerwin for the useful rule updates. [[#218](https://github.com/draios/falco/pull/218)] [[#221](https://github.com/draios/falco/pull/221)]
|
||||
* Add support for gitlab omnibus containers/pods. Thanks to @dkerwin for the useful rule updates. [[#220](https://github.com/draios/falco/pull/220)]
|
||||
|
||||
## v0.5.0
|
||||
|
||||
Released 2016-12-22
|
||||
|
||||
Starting with this release, we're adding a new section "Rule Changes" devoted to changes to the default ruleset `falco_rules.yaml`.
|
||||
|
||||
### Major Changes
|
||||
|
||||
* Cache event formatting objects so they are not re-created for every falco notification. This can result in significant speedups when the ruleset results in lots of notifications. [[#158](https://github.com/draios/falco/pull/158)]
|
||||
* Falco notifications are now throttled by a token bucket, preventing a flood of notifications when many events match a rule. Controlled by the `outputs, rate` and `outputs, max_burst` options. [[#161](https://github.com/draios/falco/pull/161)]
|
||||
|
||||
### Minor Changes
|
||||
|
||||
* When run from a container, you can provide the environment variable `SYSDIG_SKIP_LOAD` to skip the process of building/loading the kernel module. Thanks @carlsverre for the fix. [[#145](https://github.com/draios/falco/pull/145)]
|
||||
* Fully implement `USE_BUNDLED_DEPS` within CMakeFiles so you can build with external third-party libraries. [[#147](https://github.com/draios/falco/pull/147)]
|
||||
* Improve error messages that result when trying to load a rule with a malformed `output:` attribute [[#150](https://github.com/draios/falco/pull/150)] [[#151](https://github.com/draios/falco/pull/151)]
|
||||
* Add the ability to write event capture statistics to a file via the `-s <statsfile>` option. [[#155](https://github.com/draios/falco/pull/155)]
|
||||
* New configuration option `log_level` controls the verbosity of falco's logging. [[#160](https://github.com/draios/falco/pull/160)]
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* Improve compatibility with Sysdig Cloud Agent build [[#148](https://github.com/draios/falco/pull/148)]
|
||||
|
||||
### Rule Changes
|
||||
|
||||
* Add DNF as non-alerting for RPM and package management. Thanks @djcross for the fix. [[#153](https://github.com/draios/falco/pull/153)]
|
||||
* Make `google_containers/kube-proxy` a trusted image, affecting the File Open by Privileged Container/Sensitive Mount by Container rules. [[#159](https://github.com/draios/falco/pull/159)]
|
||||
* Add fail2ban-server as a program that can spawn shells. Thanks @jcoetzee for the fix. [[#168](https://github.com/draios/falco/pull/168)]
|
||||
* Add systemd as a program that can access sensitive files. Thanks @jcoetzee for the fix. [[#169](https://github.com/draios/falco/pull/169)]
|
||||
* Add apt/apt-get as programs that can spawn shells. Thanks @jcoetzee for the fix. [[#170](https://github.com/draios/falco/pull/170)]
|
||||
|
||||
## v0.4.0
|
||||
|
||||
Released 2016-10-25
|
||||
|
||||
109
CMakeLists.txt
109
CMakeLists.txt
@@ -7,14 +7,16 @@ if(NOT DEFINED FALCO_VERSION)
|
||||
endif()
|
||||
|
||||
if(NOT DEFINED FALCO_ETC_DIR)
|
||||
set(FALCO_ETC_DIR "/etc")
|
||||
set(FALCO_ETC_DIR "/etc/falco")
|
||||
endif()
|
||||
|
||||
if(NOT CMAKE_BUILD_TYPE)
|
||||
SET(CMAKE_BUILD_TYPE Release)
|
||||
endif()
|
||||
|
||||
set(DRAIOS_DEBUG_FLAGS "-D_DEBUG")
|
||||
if(NOT DRAIOS_DEBUG_FLAGS)
|
||||
set(DRAIOS_DEBUG_FLAGS "-D_DEBUG")
|
||||
endif()
|
||||
|
||||
set(CMAKE_C_FLAGS "-Wall -ggdb ${DRAIOS_FEATURE_FLAGS}")
|
||||
set(CMAKE_CXX_FLAGS "-Wall -ggdb --std=c++0x ${DRAIOS_FEATURE_FLAGS}")
|
||||
@@ -27,7 +29,9 @@ set(CMAKE_CXX_FLAGS_RELEASE "-O3 -fno-strict-aliasing -DNDEBUG")
|
||||
|
||||
add_definitions(-DPLATFORM_NAME="${CMAKE_SYSTEM_NAME}")
|
||||
add_definitions(-DK8S_DISABLE_THREAD)
|
||||
add_definitions(-DHAS_CAPTURE)
|
||||
if(CMAKE_SYSTEM_NAME MATCHES "Linux")
|
||||
add_definitions(-DHAS_CAPTURE)
|
||||
endif()
|
||||
|
||||
if(CMAKE_BUILD_TYPE STREQUAL "Debug")
|
||||
set(KBUILD_FLAGS "${DRAIOS_DEBUG_FLAGS} ${DRAIOS_FEATURE_FLAGS}")
|
||||
@@ -37,13 +41,19 @@ endif()
|
||||
|
||||
set(PACKAGE_NAME "falco")
|
||||
set(PROBE_VERSION "${FALCO_VERSION}")
|
||||
set(PROBE_NAME "sysdig-probe")
|
||||
set(PROBE_DEVICE_NAME "sysdig")
|
||||
set(CMAKE_INSTALL_PREFIX /usr)
|
||||
set(PROBE_NAME "falco-probe")
|
||||
set(PROBE_DEVICE_NAME "falco")
|
||||
if (CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT)
|
||||
set(CMAKE_INSTALL_PREFIX /usr CACHE PATH "Default install path" FORCE)
|
||||
endif()
|
||||
|
||||
set(CMD_MAKE make)
|
||||
|
||||
set(SYSDIG_DIR "${PROJECT_SOURCE_DIR}/../sysdig")
|
||||
# make luaJIT work on OS X
|
||||
if(APPLE)
|
||||
set(CMAKE_EXE_LINKER_FLAGS "-pagezero_size 10000 -image_base 100000000")
|
||||
endif()
|
||||
|
||||
include(ExternalProject)
|
||||
|
||||
@@ -51,7 +61,7 @@ option(USE_BUNDLED_DEPS "Enable bundled dependencies instead of using the system
|
||||
|
||||
#
|
||||
# zlib
|
||||
|
||||
#
|
||||
option(USE_BUNDLED_ZLIB "Enable building of the bundled zlib" ${USE_BUNDLED_DEPS})
|
||||
|
||||
if(NOT USE_BUNDLED_ZLIB)
|
||||
@@ -68,7 +78,7 @@ else()
|
||||
set(ZLIB_INCLUDE "${ZLIB_SRC}")
|
||||
set(ZLIB_LIB "${ZLIB_SRC}/libz.a")
|
||||
ExternalProject_Add(zlib
|
||||
URL "http://download.draios.com/dependencies/zlib-1.2.8.tar.gz"
|
||||
URL "https://s3.amazonaws.com/download.draios.com/dependencies/zlib-1.2.8.tar.gz"
|
||||
URL_MD5 "44d667c142d7cda120332623eab69f40"
|
||||
CONFIGURE_COMMAND "./configure"
|
||||
BUILD_COMMAND ${CMD_MAKE}
|
||||
@@ -94,11 +104,12 @@ else()
|
||||
set(JQ_INCLUDE "${JQ_SRC}")
|
||||
set(JQ_LIB "${JQ_SRC}/.libs/libjq.a")
|
||||
ExternalProject_Add(jq
|
||||
URL "http://download.draios.com/dependencies/jq-1.5.tar.gz"
|
||||
URL "https://s3.amazonaws.com/download.draios.com/dependencies/jq-1.5.tar.gz"
|
||||
URL_MD5 "0933532b086bd8b6a41c1b162b1731f9"
|
||||
CONFIGURE_COMMAND ./configure --disable-maintainer-mode --enable-all-static --disable-dependency-tracking
|
||||
BUILD_COMMAND ${CMD_MAKE} LDFLAGS=-all-static
|
||||
BUILD_IN_SOURCE 1
|
||||
PATCH_COMMAND wget -O jq-1.5-fix-tokenadd.patch https://github.com/stedolan/jq/commit/8eb1367ca44e772963e704a700ef72ae2e12babd.patch && patch -i jq-1.5-fix-tokenadd.patch
|
||||
INSTALL_COMMAND "")
|
||||
endif()
|
||||
|
||||
@@ -123,7 +134,7 @@ else()
|
||||
set(CURSES_LIBRARIES "${CURSES_BUNDLE_DIR}/lib/libncurses.a")
|
||||
message(STATUS "Using bundled ncurses in '${CURSES_BUNDLE_DIR}'")
|
||||
ExternalProject_Add(ncurses
|
||||
URL "http://download.draios.com/dependencies/ncurses-6.0-20150725.tgz"
|
||||
URL "https://s3.amazonaws.com/download.draios.com/dependencies/ncurses-6.0-20150725.tgz"
|
||||
URL_MD5 "32b8913312e738d707ae68da439ca1f4"
|
||||
CONFIGURE_COMMAND ./configure --without-cxx --without-cxx-binding --without-ada --without-manpages --without-progs --without-tests --with-terminfo-dirs=/etc/terminfo:/lib/terminfo:/usr/share/terminfo
|
||||
BUILD_COMMAND ${CMD_MAKE}
|
||||
@@ -150,7 +161,7 @@ else()
|
||||
set(B64_INCLUDE "${B64_SRC}/include")
|
||||
set(B64_LIB "${B64_SRC}/src/libb64.a")
|
||||
ExternalProject_Add(b64
|
||||
URL "http://download.draios.com/dependencies/libb64-1.2.src.zip"
|
||||
URL "https://s3.amazonaws.com/download.draios.com/dependencies/libb64-1.2.src.zip"
|
||||
URL_MD5 "a609809408327117e2c643bed91b76c5"
|
||||
CONFIGURE_COMMAND ""
|
||||
BUILD_COMMAND ${CMD_MAKE}
|
||||
@@ -204,8 +215,8 @@ else()
|
||||
message(STATUS "Using bundled openssl in '${OPENSSL_BUNDLE_DIR}'")
|
||||
|
||||
ExternalProject_Add(openssl
|
||||
URL "http://download.draios.com/dependencies/openssl-1.0.2d.tar.gz"
|
||||
URL_MD5 "38dd619b2e77cbac69b99f52a053d25a"
|
||||
URL "https://s3.amazonaws.com/download.draios.com/dependencies/openssl-1.0.2j.tar.gz"
|
||||
URL_MD5 "96322138f0b69e61b7212bc53d5e912b"
|
||||
CONFIGURE_COMMAND ./config shared --prefix=${OPENSSL_INSTALL_DIR}
|
||||
BUILD_COMMAND ${CMD_MAKE}
|
||||
BUILD_IN_SOURCE 1
|
||||
@@ -235,9 +246,9 @@ else()
|
||||
|
||||
ExternalProject_Add(curl
|
||||
DEPENDS openssl
|
||||
URL "http://download.draios.com/dependencies/curl-7.45.0.tar.bz2"
|
||||
URL_MD5 "62c1a352b28558f25ba6209214beadc8"
|
||||
CONFIGURE_COMMAND ./configure ${CURL_SSL_OPTION} --disable-shared --enable-optimize --disable-curldebug --disable-rt --enable-http --disable-ftp --disable-file --disable-ldap --disable-ldaps --disable-rtsp --disable-telnet --disable-tftp --disable-pop3 --disable-imap --disable-smb --disable-smtp --disable-gopher --disable-sspi --disable-ntlm-wb --disable-tls-srp --without-winssl --without-darwinssl --without-polarssl --without-cyassl --without-nss --without-axtls --without-ca-path --without-ca-bundle --without-libmetalink --without-librtmp --without-winidn --without-libidn --without-nghttp2 --without-libssh2
|
||||
URL "https://s3.amazonaws.com/download.draios.com/dependencies/curl-7.56.0.tar.bz2"
|
||||
URL_MD5 "e0caf257103e0c77cee5be7e9ac66ca4"
|
||||
CONFIGURE_COMMAND ./configure ${CURL_SSL_OPTION} --disable-shared --enable-optimize --disable-curldebug --disable-rt --enable-http --disable-ftp --disable-file --disable-ldap --disable-ldaps --disable-rtsp --disable-telnet --disable-tftp --disable-pop3 --disable-imap --disable-smb --disable-smtp --disable-gopher --disable-sspi --disable-ntlm-wb --disable-tls-srp --without-winssl --without-darwinssl --without-polarssl --without-cyassl --without-nss --without-axtls --without-ca-path --without-ca-bundle --without-libmetalink --without-librtmp --without-winidn --without-libidn --without-nghttp2 --without-libssh2 --disable-threaded-resolver
|
||||
BUILD_COMMAND ${CMD_MAKE}
|
||||
BUILD_IN_SOURCE 1
|
||||
INSTALL_COMMAND "")
|
||||
@@ -269,7 +280,7 @@ else()
|
||||
set(LUAJIT_INCLUDE "${LUAJIT_SRC}")
|
||||
set(LUAJIT_LIB "${LUAJIT_SRC}/libluajit.a")
|
||||
ExternalProject_Add(luajit
|
||||
URL "http://download.draios.com/dependencies/LuaJIT-2.0.3.tar.gz"
|
||||
URL "https://s3.amazonaws.com/download.draios.com/dependencies/LuaJIT-2.0.3.tar.gz"
|
||||
URL_MD5 "f14e9104be513913810cd59c8c658dc0"
|
||||
CONFIGURE_COMMAND ""
|
||||
BUILD_COMMAND ${CMD_MAKE}
|
||||
@@ -292,14 +303,19 @@ if(NOT USE_BUNDLED_LPEG)
|
||||
else()
|
||||
set(LPEG_SRC "${PROJECT_BINARY_DIR}/lpeg-prefix/src/lpeg")
|
||||
set(LPEG_LIB "${PROJECT_BINARY_DIR}/lpeg-prefix/src/lpeg/build/lpeg.a")
|
||||
message(STATUS "Using bundled lpeg in '${LPEG_SRC}'")
|
||||
set(LPEG_DEPENDENCIES "")
|
||||
if(USE_BUNDLED_LUAJIT)
|
||||
list(APPEND LPEG_DEPENDENCIES "luajit")
|
||||
endif()
|
||||
ExternalProject_Add(lpeg
|
||||
DEPENDS luajit
|
||||
URL "http://s3.amazonaws.com/download.draios.com/dependencies/lpeg-1.0.0.tar.gz"
|
||||
URL_MD5 "0aec64ccd13996202ad0c099e2877ece"
|
||||
BUILD_COMMAND LUA_INCLUDE=${LUAJIT_INCLUDE} "${PROJECT_SOURCE_DIR}/scripts/build-lpeg.sh" "${LPEG_SRC}/build"
|
||||
BUILD_IN_SOURCE 1
|
||||
DEPENDS ${LPEG_DEPENDENCIES}
|
||||
URL "https://s3.amazonaws.com/download.draios.com/dependencies/lpeg-1.0.0.tar.gz"
|
||||
URL_MD5 "0aec64ccd13996202ad0c099e2877ece"
|
||||
BUILD_COMMAND LUA_INCLUDE=${LUAJIT_INCLUDE} "${PROJECT_SOURCE_DIR}/scripts/build-lpeg.sh" "${LPEG_SRC}/build"
|
||||
BUILD_IN_SOURCE 1
|
||||
CONFIGURE_COMMAND ""
|
||||
INSTALL_COMMAND "")
|
||||
INSTALL_COMMAND "")
|
||||
endif()
|
||||
|
||||
#
|
||||
@@ -318,15 +334,23 @@ if(NOT USE_BUNDLED_LIBYAML)
|
||||
message(FATAL_ERROR "Couldn't find system libyaml")
|
||||
endif()
|
||||
else()
|
||||
find_path(AUTORECONF_BIN NAMES autoreconf)
|
||||
if(AUTORECONF_BIN)
|
||||
message(STATUS "Found autoreconf: ${AUTORECONF_BIN}")
|
||||
else()
|
||||
message(FATAL_ERROR "Couldn't find system autoreconf. Please install autoreconf before continuing or use system libyaml")
|
||||
endif()
|
||||
|
||||
set(LIBYAML_SRC "${PROJECT_BINARY_DIR}/libyaml-prefix/src/libyaml/src")
|
||||
set(LIBYAML_LIB "${LIBYAML_SRC}/.libs/libyaml.a")
|
||||
message(STATUS "Using bundled libyaml in '${LIBYAML_SRC}'")
|
||||
ExternalProject_Add(libyaml
|
||||
URL "http://download.draios.com/dependencies/libyaml-0.1.4.tar.gz"
|
||||
URL_MD5 "4a4bced818da0b9ae7fc8ebc690792a7"
|
||||
BUILD_COMMAND ${CMD_MAKE}
|
||||
BUILD_IN_SOURCE 1
|
||||
URL "https://s3.amazonaws.com/download.draios.com/dependencies/libyaml-0.1.4.tar.gz"
|
||||
URL_MD5 "4a4bced818da0b9ae7fc8ebc690792a7"
|
||||
BUILD_COMMAND ${CMD_MAKE}
|
||||
BUILD_IN_SOURCE 1
|
||||
CONFIGURE_COMMAND ./bootstrap && ./configure
|
||||
INSTALL_COMMAND "")
|
||||
INSTALL_COMMAND "")
|
||||
endif()
|
||||
|
||||
#
|
||||
@@ -347,8 +371,17 @@ if(NOT USE_BUNDLED_LYAML)
|
||||
else()
|
||||
set(LYAML_SRC "${PROJECT_BINARY_DIR}/lyaml-prefix/src/lyaml/ext/yaml")
|
||||
set(LYAML_LIB "${LYAML_SRC}/.libs/yaml.a")
|
||||
message(STATUS "Using bundled lyaml in '${LYAML_SRC}'")
|
||||
set(LYAML_DEPENDENCIES "")
|
||||
if(USE_BUNDLED_LUAJIT)
|
||||
list(APPEND LYAML_DEPENDENCIES "luajit")
|
||||
endif()
|
||||
if(USE_BUNDLED_LIBYAML)
|
||||
list(APPEND LYAML_DEPENDENCIES "libyaml")
|
||||
endif()
|
||||
ExternalProject_Add(lyaml
|
||||
URL "http://download.draios.com/dependencies/lyaml-release-v6.0.tar.gz"
|
||||
DEPENDS ${LYAML_DEPENDENCIES}
|
||||
URL "https://s3.amazonaws.com/download.draios.com/dependencies/lyaml-release-v6.0.tar.gz"
|
||||
URL_MD5 "dc3494689a0dce7cf44e7a99c72b1f30"
|
||||
BUILD_COMMAND ${CMD_MAKE}
|
||||
BUILD_IN_SOURCE 1
|
||||
@@ -359,13 +392,19 @@ endif()
|
||||
install(FILES falco.yaml
|
||||
DESTINATION "${FALCO_ETC_DIR}")
|
||||
|
||||
add_subdirectory("${SYSDIG_DIR}/driver" "${PROJECT_BINARY_DIR}/driver")
|
||||
add_subdirectory(rules)
|
||||
|
||||
if(CMAKE_SYSTEM_NAME MATCHES "Linux")
|
||||
add_subdirectory("${SYSDIG_DIR}/driver" "${PROJECT_BINARY_DIR}/driver")
|
||||
endif()
|
||||
add_subdirectory("${SYSDIG_DIR}/userspace/libscap" "${PROJECT_BINARY_DIR}/userspace/libscap")
|
||||
add_subdirectory("${SYSDIG_DIR}/userspace/libsinsp" "${PROJECT_BINARY_DIR}/userspace/libsinsp")
|
||||
|
||||
add_subdirectory(scripts)
|
||||
set(FALCO_SINSP_LIBRARY sinsp)
|
||||
set(FALCO_SHARE_DIR ${CMAKE_INSTALL_PREFIX}/share/falco)
|
||||
set(FALCO_SHARE_DIR share/falco)
|
||||
set(FALCO_ABSOLUTE_SHARE_DIR "${CMAKE_INSTALL_PREFIX}/${FALCO_SHARE_DIR}")
|
||||
set(FALCO_BIN_DIR bin)
|
||||
add_subdirectory(scripts)
|
||||
add_subdirectory(userspace/engine)
|
||||
add_subdirectory(userspace/falco)
|
||||
|
||||
@@ -385,12 +424,12 @@ set(CPACK_GENERATOR DEB RPM TGZ)
|
||||
set(CPACK_DEBIAN_PACKAGE_MAINTAINER "Sysdig <support@sysdig.com>")
|
||||
set(CPACK_DEBIAN_PACKAGE_SECTION "utils")
|
||||
set(CPACK_DEBIAN_PACKAGE_HOMEPAGE "http://www.sysdig.org")
|
||||
set(CPACK_DEBIAN_PACKAGE_DEPENDS "sysdig")
|
||||
set(CPACK_DEBIAN_PACKAGE_CONTROL_EXTRA "${PROJECT_SOURCE_DIR}/scripts/debian/postinst;${PROJECT_SOURCE_DIR}/scripts/debian/prerm;${PROJECT_SOURCE_DIR}/scripts/debian/postrm")
|
||||
set(CPACK_DEBIAN_PACKAGE_DEPENDS "dkms (>= 2.1.0.0)")
|
||||
set(CPACK_DEBIAN_PACKAGE_CONTROL_EXTRA "${CMAKE_BINARY_DIR}/scripts/debian/postinst;${CMAKE_BINARY_DIR}/scripts/debian/prerm;${PROJECT_SOURCE_DIR}/scripts/debian/postrm;${PROJECT_SOURCE_DIR}/cpack/debian/conffiles")
|
||||
|
||||
set(CPACK_RPM_PACKAGE_LICENSE "GPLv2")
|
||||
set(CPACK_RPM_PACKAGE_URL "http://www.sysdig.org")
|
||||
set(CPACK_RPM_PACKAGE_REQUIRES "sysdig")
|
||||
set(CPACK_RPM_PACKAGE_REQUIRES "dkms, gcc, make, kernel-devel, perl")
|
||||
set(CPACK_RPM_POST_INSTALL_SCRIPT_FILE "${PROJECT_SOURCE_DIR}/scripts/rpm/postinstall")
|
||||
set(CPACK_RPM_PRE_UNINSTALL_SCRIPT_FILE "${PROJECT_SOURCE_DIR}/scripts/rpm/preuninstall")
|
||||
set(CPACK_RPM_POST_UNINSTALL_SCRIPT_FILE "${PROJECT_SOURCE_DIR}/scripts/rpm/postuninstall")
|
||||
|
||||
38
README.md
38
README.md
@@ -1,8 +1,8 @@
|
||||
# Sysdig Falco
|
||||
|
||||
####Latest release
|
||||
#### Latest release
|
||||
|
||||
**v0.4.0**
|
||||
**v0.8.1**
|
||||
Read the [change log](https://github.com/draios/falco/blob/dev/CHANGELOG.md)
|
||||
|
||||
Dev Branch: [](https://travis-ci.org/draios/falco)<br />
|
||||
@@ -22,16 +22,20 @@ Falco can detect and alert on any behavior that involves making Linux system cal
|
||||
- A non-device file is written to `/dev`
|
||||
- A standard system binary (like `ls`) makes an outbound network connection
|
||||
|
||||
#### How Falco Compares to Other Security Tools like SELinux, Auditd, etc.
|
||||
|
||||
One of the questions we often get when we talk about Sysdig Falco is “How does it compare to other tools like SELinux, AppArmor, Auditd, etc. that also have security policies?”. We wrote a [blog post](https://sysdig.com/blog/selinux-seccomp-falco-technical-discussion/) comparing Falco to other tools.
|
||||
|
||||
|
||||
Documentation
|
||||
---
|
||||
[Visit the wiki] (https://github.com/draios/falco/wiki) for full documentation on falco.
|
||||
[Visit the wiki](https://github.com/draios/falco/wiki) for full documentation on falco.
|
||||
|
||||
Join the Community
|
||||
---
|
||||
* Contact the [official mailing list] (https://groups.google.com/forum/#!forum/falco) for support and to talk with other users.
|
||||
* Follow us on [Twitter] (https://twitter.com/sysdig) for general falco and sysdig news.
|
||||
* This is our [blog] (https://sysdig.com/blog/), where you can find the latest [falco](https://sysdig.com/blog/tag/falco/) posts.
|
||||
* Join our [Public Slack](https://sysdig.slack.com) channel for sysdig and falco announcements and discussions.
|
||||
* Follow us on [Twitter](https://twitter.com/sysdig) for general falco and sysdig news.
|
||||
* This is our [blog](https://sysdig.com/blog/), where you can find the latest [falco](https://sysdig.com/blog/tag/falco/) posts.
|
||||
* Join our [Public Slack](https://slack.sysdig.com) channel for sysdig and falco announcements and discussions.
|
||||
|
||||
License Terms
|
||||
---
|
||||
@@ -39,7 +43,7 @@ Falco is licensed to you under the [GPL 2.0](./COPYING) open source license.
|
||||
|
||||
Contributor License Agreements
|
||||
---
|
||||
###Background
|
||||
### Background
|
||||
As we did for sysdig, we are formalizing the way that we accept contributions of code from the contributing community. We must now ask that contributions to falco be provided subject to the terms and conditions of a [Contributor License Agreement (CLA)](./cla). The CLA comes in two forms, applicable to contributions by individuals, or by legal entities such as corporations and their employees. We recognize that entering into a CLA with us involves real consideration on your part, and we’ve tried to make this process as clear and simple as possible.
|
||||
|
||||
We’ve modeled our CLA off of industry standards, such as [the CLA used by Kubernetes](https://github.com/kubernetes/kubernetes/blob/master/CONTRIBUTING.md). Note that this agreement is not a transfer of copyright ownership, this simply is a license agreement for contributions, intended to clarify the intellectual property license granted with contributions from any person or entity. It is for your protection as a contributor as well as the protection of falco; it does not change your rights to use your own contributions for any other purpose.
|
||||
@@ -52,19 +56,31 @@ Contributor License Agreements
|
||||
|
||||
As always, we are grateful for your past and present contributions to falco.
|
||||
|
||||
###What do I need to do in order to contribute code?
|
||||
### What do I need to do in order to contribute code?
|
||||
|
||||
**Individual contributions**: Individuals who wish to make contributions must review the [Individual Contributor License Agreement](./cla/falco_contributor_agreement.txt) and indicate agreement by adding the following line to every GIT commit message:
|
||||
|
||||
```
|
||||
falco-CLA-1.0-signed-off-by: Joe Smith <joe.smith@email.com>
|
||||
```
|
||||
|
||||
Use your real name; pseudonyms or anonymous contributions are not allowed.
|
||||
|
||||
**Corporate contributions**: Employees of corporations, members of LLCs or LLPs, or others acting on behalf of a contributing entity, must review the [Corporate Contributor License Agreement](./cla/falco_corp_contributor_agreement.txt), must be an authorized representative of the contributing entity, and indicate agreement to it on behalf of the contributing entity by adding the following lines to every GIT commit message:
|
||||
|
||||
```
|
||||
falco-CLA-1.0-contributing-entity: Full Legal Name of Entity
|
||||
falco-CLA-1.0-signed-off-by: Joe Smith <joe.smith@email.com>
|
||||
falco-CLA-1.0-contributing-entity: Full Legal Name of Entity
|
||||
falco-CLA-1.0-signed-off-by: Joe Smith <joe.smith@email.com>
|
||||
```
|
||||
|
||||
Use a real name of a natural person who is an authorized representative of the contributing entity; pseudonyms or anonymous contributions are not allowed.
|
||||
|
||||
**Government contributions**: Employees or officers of the United States Government, must review the [Government Contributor License Agreement](https://github.com/draios/falco/blob/dev/cla/falco_govt_contributor_agreement.txt), must be an authorized representative of the contributing entity, and indicate agreement to it on behalf of the contributing entity by adding the following lines to every GIT commit message:
|
||||
|
||||
```
|
||||
falco-CLA-1.0-contributing-govt-entity: Full Legal Name of Entity
|
||||
falco-CLA-1.0-signed-off-by: Joe Smith <joe.smith@email.com>
|
||||
This file is a work of authorship of an employee or officer of the United States Government and is not subject to copyright in the United States under 17 USC 105.
|
||||
```
|
||||
|
||||
Use a real name of a natural person who is an authorized representative of the contributing entity; pseudonyms or anonymous contributions are not allowed.
|
||||
|
||||
33
cla/falco_govt_contributor_agreement.txt
Normal file
33
cla/falco_govt_contributor_agreement.txt
Normal file
@@ -0,0 +1,33 @@
|
||||
DRAIOS, INC. <20> OPEN SOURCE CONTRIBUTION AGREEMENT FOR UNITED STATES GOVERNMENT CONTRIBUTING ENTITIES (<28>Agreement<6E>)
|
||||
|
||||
Draios, Inc. (<28>Draios<6F> or <20>Sysdig<69>) welcomes the work of others on our open source software projects. To contribute code back to our repositories, we require a contributing entity that is a United States Government agency to complete, and agree to, the Government Contributor Agreement (GCA) set forth here, by and through a designated authorized representative. This agreement clarifies the ability for us to use and incorporate the contributions of a government contributing entity in our projects and products. After agreeing to these terms, a contributing entity may contribute to our projects. To indicate the agreement of the contributing entity, an authorized representative shall follow the procedure set forth below under TO AGREE, after reading this Agreement. A <20>contributing entity<74> means any agency or unit of the United States government. We provide a separate CLA for individual contributors.
|
||||
|
||||
You accept and agree to the following terms and conditions for Your present and future Contributions that are submitted to Draios/Sysdig.
|
||||
|
||||
1. Definitions. "You" (or "Your") shall mean the contributing entity that has authored or otherwise has the right to contribute the Contribution, and that is making this Agreement with Draios/Sysdig. "Contribution" shall mean any original work of authorship, including any modifications or additions to an existing work, that is intentionally submitted by You to Draios/Sysdig for inclusion in, or documentation of, any of the products owned or managed by Draios/Sysdig (the "Work"). For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to Draios/Sysdig or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, Draios/Sysdig for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by You as "Not a Contribution."
|
||||
|
||||
2. Contributions Not Subject to Copyright. Each Contribution is a work authored by the United States Government or an employee or officer thereof and is not subject to copyright under 17 U.S.C. 105.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of this Agreement, You hereby grant to Draios/Sysdig and to recipients of software distributed by Draios/Sysdig a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims that You have the right to license and that are necessarily infringed by Your Contribution(s) alone or by combination of Your Contribution(s) with the Work to which such Contribution(s) was submitted. If any entity other than Draios/Sysdig institutes patent litigation against You or any other entity (including a cross-claim or counterclaim in a lawsuit) alleging that your Contribution, or the Work to which you have contributed, constitutes direct or contributory patent infringement, then any patent licenses granted to that entity under this Agreement for that Contribution or Work shall terminate as of the date such litigation is filed.
|
||||
|
||||
4. You represent to Draios/Sysdig that You own or have the right to contribute Your Contributions to Draios/Sysdig, and that You are legally entitled to grant the license set forth above.
|
||||
|
||||
5. You represent that each of Your Contributions is Your original creation (see section 7 for submissions on behalf of others). You represent that Your Contribution submissions include complete details of any third-party license or other restriction (including, but not limited to, related patents and trademarks) of which You are personally aware and which are associated with any part of Your Contributions. You represent that Your sign-off indicating assent to this Agreement includes the real name of a natural person who is an authorized representative of You, and not a pseudonym, and that You are not attempting or making an anonymous Contribution.
|
||||
|
||||
6. You are not expected to provide support for Your Contributions, except to the extent You desire to provide support. You may provide support for free, for a fee, or not at all. Unless required by applicable law or agreed to in writing, You provide Your Contributions to Draios/Sysdig on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON- INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE.
|
||||
|
||||
7. If You wish to submit work that is not Your original creation, You may submit it to Draios/Sysdig separately from any Contribution, identifying the complete details of its source and of any license or other restriction (including, but not limited to, related patents, trademarks, and license agreements) of which You are aware, and conspicuously marking the work as "Submitted on behalf of a third-party: [named here]".
|
||||
|
||||
8. You agree to notify Draios/Sysdig of any facts or circumstances of which you become aware that would make these representations inaccurate in any respect.
|
||||
|
||||
9. You understand and agree that this project and Your Contribution are public and that a record of the contribution, including all personal information that You submit with it, including the sign-off of Your authorized representative, may be stored by Draios/Sysdig indefinitely and may be redistributed to others. You understand and agree that Draios/Sysdig has no obligation to use any Contribution in any Draios/Sysdig project or product, and Draios/Sysdig may decline to accept Your Contributions or Draios/Sysdig may remove Your Contributions from Draios/Sysdig projects or products at any time without notice. You understand and agree that Draios/Sysdig is not and will not pay You any form of compensation, in currency, equity or otherwise, in exchange for Your Contributions or for Your assent to this Agreement. You understand and agree that You are independent of Draios/Sysdig and You are not, by entering into this Agreement or providing Your Contributions, becoming employed, hired as an independent contractor, or forming any other relationship with Draios/Sysdig relating to employment, compensation or ownership or involving any fiduciary obligation.
|
||||
|
||||
TO AGREE:
|
||||
Add the following lines to every GIT commit message:
|
||||
|
||||
falco-CLA-1.0-contributing-govt-entity: Full Legal Name of Entity
|
||||
falco-CLA-1.0-signed-off-by: Joe Smith joe.smith@email.com
|
||||
This file is a work of authorship of an employee or officer of the United States Government and is not subject to copyright in the United States under 17 USC 105.
|
||||
|
||||
Use a real name of a natural person who is an authorized representative of the contributing entity; pseudonyms or anonymous contributions are not allowed.
|
||||
|
||||
4
cpack/debian/conffiles
Normal file
4
cpack/debian/conffiles
Normal file
@@ -0,0 +1,4 @@
|
||||
/etc/falco/falco.yaml
|
||||
/etc/falco/falco_rules.yaml
|
||||
/etc/falco/application_rules.yaml
|
||||
/etc/falco/falco_rules.local.yaml
|
||||
@@ -26,10 +26,10 @@ RUN echo "deb http://httpredir.debian.org/debian jessie main" > /etc/apt/sources
|
||||
gcc-5 \
|
||||
gcc-4.9 && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Terribly terrible hacks: since our base Debian image ships with GCC 5.0 which breaks older kernels,
|
||||
# revert the default to gcc-4.9. Also, since some customers use some very old distributions whose kernel
|
||||
# makefile is hardcoded for gcc-4.6 or so (e.g. Debian Wheezy), we pretend to have gcc 4.6/4.7 by symlinking
|
||||
# it to 4.9
|
||||
# Since our base Debian image ships with GCC 5.0 which breaks older kernels, revert the
|
||||
# default to gcc-4.9. Also, since some customers use some very old distributions whose kernel
|
||||
# makefile is hardcoded for gcc-4.6 or so (e.g. Debian Wheezy), we pretend to have gcc 4.6/4.7
|
||||
# by symlinking it to 4.9
|
||||
|
||||
RUN rm -rf /usr/bin/gcc \
|
||||
&& ln -s /usr/bin/gcc-4.9 /usr/bin/gcc \
|
||||
|
||||
@@ -11,7 +11,7 @@ if [[ -z "${SYSDIG_SKIP_LOAD}" ]]; then
|
||||
ln -s $SYSDIG_HOST_ROOT/usr/src/$i /usr/src/$i
|
||||
done
|
||||
|
||||
/usr/bin/sysdig-probe-loader
|
||||
/usr/bin/falco-probe-loader
|
||||
fi
|
||||
|
||||
exec "$@"
|
||||
|
||||
@@ -50,6 +50,8 @@ void usage(char *program)
|
||||
printf(" then read a sensitive file\n");
|
||||
printf(" write_rpm_database Write to files below /var/lib/rpm\n");
|
||||
printf(" spawn_shell Run a shell (bash)\n");
|
||||
printf(" Used by spawn_shell_under_httpd below\n");
|
||||
printf(" spawn_shell_under_httpd Run a shell (bash) under a httpd process\n");
|
||||
printf(" db_program_spawn_process As a database program, try to spawn\n");
|
||||
printf(" another program\n");
|
||||
printf(" modify_binary_dirs Modify a file below /bin\n");
|
||||
@@ -64,7 +66,7 @@ void usage(char *program)
|
||||
printf(" non_sudo_setuid Setuid as a non-root user\n");
|
||||
printf(" create_files_below_dev Create files below /dev\n");
|
||||
printf(" exec_ls execve() the program ls\n");
|
||||
printf(" (used by user_mgmt_binaries below)\n");
|
||||
printf(" (used by user_mgmt_binaries, db_program_spawn_process)\n");
|
||||
printf(" user_mgmt_binaries Become the program \"vipw\", which triggers\n");
|
||||
printf(" rules related to user management programs\n");
|
||||
printf(" exfiltration Read /etc/shadow and send it via udp to a\n");
|
||||
@@ -97,6 +99,8 @@ void exfiltration()
|
||||
|
||||
shadow.open("/etc/shadow");
|
||||
|
||||
printf("Reading /etc/shadow and sending to 10.5.2.6:8197...\n");
|
||||
|
||||
if(!shadow.is_open())
|
||||
{
|
||||
fprintf(stderr, "Could not open /etc/shadow for reading: %s", strerror(errno));
|
||||
@@ -219,7 +223,7 @@ void write_rpm_database() {
|
||||
}
|
||||
|
||||
void spawn_shell() {
|
||||
printf("Spawning a shell using system()...\n");
|
||||
printf("Spawning a shell to run \"ls > /dev/null\" using system()...\n");
|
||||
int rc;
|
||||
|
||||
if ((rc = system("ls > /dev/null")) != 0)
|
||||
@@ -228,9 +232,14 @@ void spawn_shell() {
|
||||
}
|
||||
}
|
||||
|
||||
void spawn_shell_under_httpd() {
|
||||
printf("Becoming the program \"httpd\" and then spawning a shell\n");
|
||||
respawn("./httpd", "spawn_shell", "0");
|
||||
}
|
||||
|
||||
void db_program_spawn_process() {
|
||||
printf("Becoming the program \"mysql\" and then spawning a shell\n");
|
||||
respawn("./mysqld", "spawn_shell", "0");
|
||||
printf("Becoming the program \"mysql\" and then running ls\n");
|
||||
respawn("./mysqld", "exec_ls", "0");
|
||||
}
|
||||
|
||||
void modify_binary_dirs() {
|
||||
@@ -259,6 +268,7 @@ void mkdir_binary_dirs() {
|
||||
|
||||
void change_thread_namespace() {
|
||||
printf("Calling setns() to change namespaces...\n");
|
||||
printf("NOTE: does not result in a falco notification in containers, unless container run with --privileged or --security-opt seccomp=unconfined\n");
|
||||
// It doesn't matter that the arguments to setns are
|
||||
// bogus. It's the attempt to call it that will trigger the
|
||||
// rule.
|
||||
@@ -268,6 +278,7 @@ void change_thread_namespace() {
|
||||
void system_user_interactive() {
|
||||
pid_t child;
|
||||
|
||||
printf("Forking a child that becomes user=daemon and then tries to run /bin/login...\n");
|
||||
// Fork a child and do everything in the child.
|
||||
if ((child = fork()) == 0)
|
||||
{
|
||||
@@ -313,6 +324,8 @@ void system_procs_network_activity() {
|
||||
void non_sudo_setuid() {
|
||||
pid_t child;
|
||||
|
||||
printf("Forking a child that becomes \"daemon\" user and then \"root\"...\n");
|
||||
|
||||
// Fork a child and do everything in the child.
|
||||
if ((child = fork()) == 0)
|
||||
{
|
||||
@@ -354,6 +367,7 @@ map<string, action_t> defined_actions = {{"write_binary_dir", write_binary_dir},
|
||||
{"read_sensitive_file_after_startup", read_sensitive_file_after_startup},
|
||||
{"write_rpm_database", write_rpm_database},
|
||||
{"spawn_shell", spawn_shell},
|
||||
{"spawn_shell_under_httpd", spawn_shell_under_httpd},
|
||||
{"db_program_spawn_process", db_program_spawn_process},
|
||||
{"modify_binary_dirs", modify_binary_dirs},
|
||||
{"mkdir_binary_dirs", mkdir_binary_dirs},
|
||||
@@ -367,6 +381,9 @@ map<string, action_t> defined_actions = {{"write_binary_dir", write_binary_dir},
|
||||
{"user_mgmt_binaries", user_mgmt_binaries},
|
||||
{"exfiltration", exfiltration}};
|
||||
|
||||
// Some actions don't directly result in suspicious behavior. These
|
||||
// actions are excluded from the ones run with -a all.
|
||||
set<string> exclude_from_all_actions = {"spawn_shell", "exec_ls", "network_activity"};
|
||||
|
||||
void create_symlinks(const char *program)
|
||||
{
|
||||
@@ -394,9 +411,9 @@ void run_actions(map<string, action_t> &actions, int interval, bool once)
|
||||
{
|
||||
for (auto action : actions)
|
||||
{
|
||||
sleep(interval);
|
||||
printf("***Action %s\n", action.first.c_str());
|
||||
action.second();
|
||||
sleep(interval);
|
||||
}
|
||||
if(once)
|
||||
{
|
||||
@@ -428,7 +445,7 @@ int main(int argc, char **argv)
|
||||
// Parse the args
|
||||
//
|
||||
while((op = getopt_long(argc, argv,
|
||||
"ha:i:l:",
|
||||
"ha:i:l:o",
|
||||
long_options, &long_index)) != -1)
|
||||
{
|
||||
switch(op)
|
||||
@@ -437,12 +454,16 @@ int main(int argc, char **argv)
|
||||
usage(argv[0]);
|
||||
exit(1);
|
||||
case 'a':
|
||||
if((it = defined_actions.find(optarg)) == defined_actions.end())
|
||||
// "all" is already implied
|
||||
if (strcmp(optarg, "all") != 0)
|
||||
{
|
||||
fprintf(stderr, "No action with name \"%s\" known, exiting.\n", optarg);
|
||||
exit(1);
|
||||
if((it = defined_actions.find(optarg)) == defined_actions.end())
|
||||
{
|
||||
fprintf(stderr, "No action with name \"%s\" known, exiting.\n", optarg);
|
||||
exit(1);
|
||||
}
|
||||
actions.insert(*it);
|
||||
}
|
||||
actions.insert(*it);
|
||||
break;
|
||||
case 'i':
|
||||
interval = atoi(optarg);
|
||||
@@ -482,7 +503,13 @@ int main(int argc, char **argv)
|
||||
|
||||
if(actions.size() == 0)
|
||||
{
|
||||
actions = defined_actions;
|
||||
for(auto &act : defined_actions)
|
||||
{
|
||||
if(exclude_from_all_actions.find(act.first) == exclude_from_all_actions.end())
|
||||
{
|
||||
actions.insert(act);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
setvbuf(stdout, NULL, _IONBF, 0);
|
||||
|
||||
50
docker/local/Dockerfile
Normal file
50
docker/local/Dockerfile
Normal file
@@ -0,0 +1,50 @@
|
||||
FROM debian:unstable
|
||||
|
||||
MAINTAINER Sysdig <support@sysdig.com>
|
||||
|
||||
ENV FALCO_VERSION 0.1.1dev
|
||||
|
||||
LABEL RUN="docker run -i -t -v /var/run/docker.sock:/host/var/run/docker.sock -v /dev:/host/dev -v /proc:/host/proc:ro -v /boot:/host/boot:ro -v /lib/modules:/host/lib/modules:ro -v /usr:/host/usr:ro --name NAME IMAGE"
|
||||
|
||||
ENV SYSDIG_HOST_ROOT /host
|
||||
|
||||
ENV HOME /root
|
||||
|
||||
RUN cp /etc/skel/.bashrc /root && cp /etc/skel/.profile /root
|
||||
|
||||
ADD http://download.draios.com/apt-draios-priority /etc/apt/preferences.d/
|
||||
|
||||
RUN echo "deb http://httpredir.debian.org/debian jessie main" > /etc/apt/sources.list.d/jessie.list \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y --no-install-recommends \
|
||||
bash-completion \
|
||||
curl \
|
||||
jq \
|
||||
gnupg2 \
|
||||
ca-certificates \
|
||||
gcc \
|
||||
gcc-5 \
|
||||
gcc-4.9 \
|
||||
dkms && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Since our base Debian image ships with GCC 5.0 which breaks older kernels, revert the
|
||||
# default to gcc-4.9. Also, since some customers use some very old distributions whose kernel
|
||||
# makefile is hardcoded for gcc-4.6 or so (e.g. Debian Wheezy), we pretend to have gcc 4.6/4.7
|
||||
# by symlinking it to 4.9
|
||||
|
||||
RUN rm -rf /usr/bin/gcc \
|
||||
&& ln -s /usr/bin/gcc-4.9 /usr/bin/gcc \
|
||||
&& ln -s /usr/bin/gcc-4.9 /usr/bin/gcc-4.8 \
|
||||
&& ln -s /usr/bin/gcc-4.9 /usr/bin/gcc-4.7 \
|
||||
&& ln -s /usr/bin/gcc-4.9 /usr/bin/gcc-4.6
|
||||
|
||||
RUN ln -s $SYSDIG_HOST_ROOT/lib/modules /lib/modules
|
||||
|
||||
ADD falco-${FALCO_VERSION}-x86_64.deb /
|
||||
RUN dpkg -i /falco-${FALCO_VERSION}-x86_64.deb
|
||||
|
||||
COPY ./docker-entrypoint.sh /
|
||||
|
||||
ENTRYPOINT ["/docker-entrypoint.sh"]
|
||||
|
||||
CMD ["/usr/bin/falco"]
|
||||
17
docker/local/docker-entrypoint.sh
Executable file
17
docker/local/docker-entrypoint.sh
Executable file
@@ -0,0 +1,17 @@
|
||||
#!/bin/bash
|
||||
#set -e
|
||||
|
||||
# Set the SYSDIG_SKIP_LOAD variable to skip loading the sysdig kernel module
|
||||
|
||||
if [[ -z "${SYSDIG_SKIP_LOAD}" ]]; then
|
||||
echo "* Setting up /usr/src links from host"
|
||||
|
||||
for i in $(ls $SYSDIG_HOST_ROOT/usr/src)
|
||||
do
|
||||
ln -s $SYSDIG_HOST_ROOT/usr/src/$i /usr/src/$i
|
||||
done
|
||||
|
||||
/usr/bin/falco-probe-loader
|
||||
fi
|
||||
|
||||
exec "$@"
|
||||
@@ -26,10 +26,10 @@ RUN echo "deb http://httpredir.debian.org/debian jessie main" > /etc/apt/sources
|
||||
gcc-5 \
|
||||
gcc-4.9 && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Terribly terrible hacks: since our base Debian image ships with GCC 5.0 which breaks older kernels,
|
||||
# revert the default to gcc-4.9. Also, since some customers use some very old distributions whose kernel
|
||||
# makefile is hardcoded for gcc-4.6 or so (e.g. Debian Wheezy), we pretend to have gcc 4.6/4.7 by symlinking
|
||||
# it to 4.9
|
||||
# Since our base Debian image ships with GCC 5.0 which breaks older kernels, revert the
|
||||
# default to gcc-4.9. Also, since some customers use some very old distributions whose kernel
|
||||
# makefile is hardcoded for gcc-4.6 or so (e.g. Debian Wheezy), we pretend to have gcc 4.6/4.7
|
||||
# by symlinking it to 4.9
|
||||
|
||||
RUN rm -rf /usr/bin/gcc \
|
||||
&& ln -s /usr/bin/gcc-4.9 /usr/bin/gcc \
|
||||
|
||||
@@ -11,7 +11,7 @@ if [[ -z "${SYSDIG_SKIP_LOAD}" ]]; then
|
||||
ln -s $SYSDIG_HOST_ROOT/usr/src/$i /usr/src/$i
|
||||
done
|
||||
|
||||
/usr/bin/sysdig-probe-loader
|
||||
/usr/bin/falco-probe-loader
|
||||
fi
|
||||
|
||||
exec "$@"
|
||||
|
||||
92
examples/k8s-using-daemonset/README.md
Normal file
92
examples/k8s-using-daemonset/README.md
Normal file
@@ -0,0 +1,92 @@
|
||||
# Example Kubernetes Daemon Sets for Sysdig Falco
|
||||
|
||||
This directory gives you the required YAML files to stand up Sysdig Falco on Kubernetes as a Daemon Set. This will result in a Falco Pod being deployed to each node, and thus the ability to monitor any running containers for abnormal behavior.
|
||||
|
||||
The two options are provided to deploy a Daemon Set:
|
||||
- `k8s-with-rbac` - This directory provides a definition to deploy a Daemon Set on Kubernetes with RBAC enabled.
|
||||
- `k8s-without-rbac` - This directory provides a definition to deploy a Daemon Set on Kubernetes without RBAC enabled.
|
||||
|
||||
Also provided:
|
||||
- `falco-event-generator-deployment.yaml` - A Kubernetes Deployment to generate sample events. This is useful for testing, but note it will generate a large number of events.
|
||||
|
||||
## Deploying to Kubernetes with RBAC enabled
|
||||
|
||||
Since v1.8 RBAC has been available in Kubernetes, and running with RBAC enabled is considered the best practice. The `k8s-with-rbac` directory provides the YAML to create a Service Account for Falco, as well as the ClusterRoles and bindings to grant the appropriate permissions to the Service Account.
|
||||
|
||||
```
|
||||
k8s-using-daemonset$ kubectl create -f k8s-with-rbac/falco-account.yaml
|
||||
serviceaccount "falco-account" created
|
||||
clusterrole "falco-cluster-role" created
|
||||
clusterrolebinding "falco-cluster-role-binding" created
|
||||
k8s-using-daemonset$
|
||||
```
|
||||
|
||||
The Daemon Set also relies on a Kubernetes ConfigMap to store the Falco configuration and make the configuration available to the Falco Pods. This allows you to manage custom configuration without rebuilding and redeploying the underlying Pods. In order to create the ConfigMap you'll need to first need to copy the required configuration from their location in this GitHub repo to the `k8s-with-rbac/falco-config/` directory. Any modification of the configuration should be performed on these copies rather than the original files.
|
||||
|
||||
```
|
||||
k8s-using-daemonset$ cp ../../falco.yaml k8s-with-rbac/falco-config/
|
||||
k8s-using-daemonset$ cp ../../rules/falco_rules.* k8s-with-rbac/falco-config/
|
||||
```
|
||||
|
||||
If you want to send Falco alerts to a Slack channel, you'll want to modify the `falco.yaml` file to point to your Slack webhook. For more information on getting a webhook URL for your Slack team, refer to the [Slack documentation](https://api.slack.com/incoming-webhooks). Add the below to the bottom of the `falco.yaml` config file you just copied to enable Slack messages.
|
||||
|
||||
```
|
||||
program_output:
|
||||
enabled: true
|
||||
keep_alive: false
|
||||
program: "jq '{text: .output}' | curl -d @- -X POST https://hooks.slack.com/services/see_your_slack_team/apps_settings_for/a_webhook_url"
|
||||
```
|
||||
|
||||
You will also need to enable JSON output. Find the `json_output: false` setting in the `falco.yaml` file and change it to read `json_output: true`. Any custom rules for your environment can be added to into the `falco_rules.local.yaml` file and they will be picked up by Falco at start time. You can now create the ConfigMap in Kubernetes.
|
||||
|
||||
```
|
||||
k8s-using-daemonset$ kubectl create configmap falco-config --from-file=k8s-with-rbac/falco-config
|
||||
configmap "falco-config" created
|
||||
k8s-using-daemonset$
|
||||
```
|
||||
|
||||
Now that we have the requirements for our Daemon Set in place, we can create our Daemon Set.
|
||||
|
||||
```
|
||||
k8s-using-daemonset$ kubectl create -f k8s-with-rbac/falco-daemonset-configmap.yaml
|
||||
daemonset "falco" created
|
||||
k8s-using-daemonset$
|
||||
```
|
||||
|
||||
|
||||
## Deploying to Kubernetes without RBAC enabled
|
||||
|
||||
If you are running Kubernetes with Legacy Authorization enabled, you can use `kubectl` to deploy the Daemon Set provided in the `k8s-without-rbac` directory. The example provides the ability to post messages to a Slack channel via a webhook. For more information on getting a webhook URL for your Slack team, refer to the [Slack documentation](https://api.slack.com/incoming-webhooks). Modify the [`args`](https://github.com/draios/falco/blob/dev/examples/k8s-using-daemonset/falco-daemonset.yaml#L21) passed to the Falco container to point to the appropriate URL for your webhook.
|
||||
|
||||
```
|
||||
k8s-using-daemonset$ kubectl create -f k8s-without-rbac/falco-daemonset.yaml
|
||||
```
|
||||
|
||||
|
||||
## Verifying the installation
|
||||
|
||||
In order to test that Falco is working correctly, you can launch a shell in a Pod. You should see a message in your Slack channel (if configured), or in the logs of the Falco pod.
|
||||
|
||||
```
|
||||
k8s-using-daemonset$ kubectl get pods
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
falco-74htl 1/1 Running 0 13h
|
||||
falco-fqz2m 1/1 Running 0 13h
|
||||
falco-sgjfx 1/1 Running 0 13h
|
||||
k8s-using-daemonset$ kubectl exec -it falco-74htl bash
|
||||
root@falco-74htl:/# exit
|
||||
k8s-using-daemonset$ kubectl logs falco-74htl
|
||||
{"output":"17:48:58.590038385: Notice A shell was spawned in a container with an attached terminal (user=root k8s.pod=falco-74htl container=a98c2aa8e670 shell=bash parent=<NA> cmdline=bash terminal=34816)","priority":"Notice","rule":"Terminal shell in container","time":"2017-12-20T17:48:58.590038385Z", "output_fields": {"container.id":"a98c2aa8e670","evt.time":1513792138590038385,"k8s.pod.name":"falco-74htl","proc.cmdline":"bash ","proc.name":"bash","proc.pname":null,"proc.tty":34816,"user.name":"root"}}
|
||||
k8s-using-daemonset$
|
||||
```
|
||||
|
||||
Alternatively, you can deploy the [Falco Event Generator](https://github.com/draios/falco/wiki/Generating-Sample-Events) deployement to have events automatically generated. Please note that this Deployment will generate a large number of events.
|
||||
|
||||
```
|
||||
k8s-using-daemonset$ kubectl create -f falco-event-generator-deployment.yaml \
|
||||
&& sleep 1 \
|
||||
&& kubectl delete -f falco-event-generator-deployment.yaml
|
||||
deployment "falco-event-generator-deployment" created
|
||||
deployment "falco-event-generator-deployment" deleted
|
||||
k8s-using-daemonset$
|
||||
```
|
||||
@@ -0,0 +1,17 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: falco-event-generator-deployment
|
||||
labels:
|
||||
name: falco-event-generator-deployment
|
||||
app: demo
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: falco-event-generator
|
||||
spec:
|
||||
containers:
|
||||
- name: falco-event-generator
|
||||
image: sysdig/falco-event-generator:latest
|
||||
@@ -0,0 +1,29 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: falco-account
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: falco-cluster-role
|
||||
rules:
|
||||
- apiGroups: ["extensions",""]
|
||||
resources: ["nodes","namespaces","pods","replicationcontrollers","services","events","configmaps"]
|
||||
verbs: ["get","list","watch"]
|
||||
- nonResourceURLs: ["/healthz", "/healthz/*"]
|
||||
verbs: ["get"]
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: falco-cluster-role-binding
|
||||
namespace: default
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: falco-account
|
||||
namespace: default
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: falco-cluster-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
@@ -0,0 +1,65 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: falco
|
||||
labels:
|
||||
name: falco-daemonset
|
||||
app: demo
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: falco
|
||||
app: demo
|
||||
role: security
|
||||
spec:
|
||||
serviceAccount: falco-account
|
||||
containers:
|
||||
- name: falco
|
||||
image: sysdig/falco:latest
|
||||
securityContext:
|
||||
privileged: true
|
||||
args: [ "/usr/bin/falco", "-K", "/var/run/secrets/kubernetes.io/serviceaccount/token", "-k", "https://kubernetes", "-pk"]
|
||||
volumeMounts:
|
||||
- mountPath: /host/var/run/docker.sock
|
||||
name: docker-socket
|
||||
readOnly: true
|
||||
- mountPath: /host/dev
|
||||
name: dev-fs
|
||||
readOnly: true
|
||||
- mountPath: /host/proc
|
||||
name: proc-fs
|
||||
readOnly: true
|
||||
- mountPath: /host/boot
|
||||
name: boot-fs
|
||||
readOnly: true
|
||||
- mountPath: /host/lib/modules
|
||||
name: lib-modules
|
||||
readOnly: true
|
||||
- mountPath: /host/usr
|
||||
name: usr-fs
|
||||
readOnly: true
|
||||
- mountPath: /etc/falco
|
||||
name: falco-config
|
||||
volumes:
|
||||
- name: docker-socket
|
||||
hostPath:
|
||||
path: /var/run/docker.sock
|
||||
- name: dev-fs
|
||||
hostPath:
|
||||
path: /dev
|
||||
- name: proc-fs
|
||||
hostPath:
|
||||
path: /proc
|
||||
- name: boot-fs
|
||||
hostPath:
|
||||
path: /boot
|
||||
- name: lib-modules
|
||||
hostPath:
|
||||
path: /lib/modules
|
||||
- name: usr-fs
|
||||
hostPath:
|
||||
path: /usr
|
||||
- name: falco-config
|
||||
configMap:
|
||||
name: falco-config
|
||||
@@ -0,0 +1,59 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: falco
|
||||
labels:
|
||||
name: falco-daemonset
|
||||
app: demo
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: falco
|
||||
app: demo
|
||||
role: security
|
||||
spec:
|
||||
containers:
|
||||
- name: falco
|
||||
image: sysdig/falco:latest
|
||||
securityContext:
|
||||
privileged: true
|
||||
args: [ "/usr/bin/falco", "-K", "/var/run/secrets/kubernetes.io/serviceaccount/token", "-k", "https://kubernetes", "-pk", "-o", "json_output=true", "-o", "program_output.enabled=true", "-o", "program_output.program=jq '{text: .output}' | curl -d @- -X POST https://hooks.slack.com/services/see_your_slack_team/apps_settings_for/a_webhook_url"]
|
||||
volumeMounts:
|
||||
- mountPath: /host/var/run/docker.sock
|
||||
name: docker-socket
|
||||
readOnly: true
|
||||
- mountPath: /host/dev
|
||||
name: dev-fs
|
||||
readOnly: true
|
||||
- mountPath: /host/proc
|
||||
name: proc-fs
|
||||
readOnly: true
|
||||
- mountPath: /host/boot
|
||||
name: boot-fs
|
||||
readOnly: true
|
||||
- mountPath: /host/lib/modules
|
||||
name: lib-modules
|
||||
readOnly: true
|
||||
- mountPath: /host/usr
|
||||
name: usr-fs
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: docker-socket
|
||||
hostPath:
|
||||
path: /var/run/docker.sock
|
||||
- name: dev-fs
|
||||
hostPath:
|
||||
path: /dev
|
||||
- name: proc-fs
|
||||
hostPath:
|
||||
path: /proc
|
||||
- name: boot-fs
|
||||
hostPath:
|
||||
path: /boot
|
||||
- name: lib-modules
|
||||
hostPath:
|
||||
path: /lib/modules
|
||||
- name: usr-fs
|
||||
hostPath:
|
||||
path: /usr
|
||||
@@ -1,11 +1,9 @@
|
||||
# Owned by software vendor, serving install-software.sh.
|
||||
express_server:
|
||||
container_name: express_server
|
||||
image: node:latest
|
||||
working_dir: /usr/src/app
|
||||
command: bash -c "npm install && node server.js"
|
||||
command: bash -c "apt-get -y update && apt-get -y install runit && npm install && runsv /usr/src/app"
|
||||
ports:
|
||||
- "8080:8080"
|
||||
- "8181:8181"
|
||||
volumes:
|
||||
- ${PWD}:/usr/src/app
|
||||
|
||||
@@ -20,5 +18,4 @@ falco:
|
||||
- /boot:/host/boot:ro
|
||||
- /lib/modules:/host/lib/modules:ro
|
||||
- /usr:/host/usr:ro
|
||||
- ${PWD}/../../rules/falco_rules.yaml:/etc/falco_rules.yaml
|
||||
tty: true
|
||||
|
||||
2
examples/nodejs-bad-rest-api/run
Executable file
2
examples/nodejs-bad-rest-api/run
Executable file
@@ -0,0 +1,2 @@
|
||||
#!/bin/sh
|
||||
node server.js
|
||||
@@ -2,19 +2,19 @@ var express = require('express'); // call express
|
||||
var app = express(); // define our app using express
|
||||
var child_process = require('child_process');
|
||||
|
||||
var port = process.env.PORT || 8080; // set our port
|
||||
var port = process.env.PORT || 8181; // set our port
|
||||
|
||||
// ROUTES FOR OUR API
|
||||
// =============================================================================
|
||||
var router = express.Router(); // get an instance of the express Router
|
||||
|
||||
// test route to make sure everything is working (accessed at GET http://localhost:8080/api)
|
||||
// test route to make sure everything is working (accessed at GET http://localhost:8181/api)
|
||||
router.get('/', function(req, res) {
|
||||
res.json({ message: 'API available'});
|
||||
});
|
||||
|
||||
router.get('/exec/:cmd', function(req, res) {
|
||||
var ret = child_process.spawnSync(req.params.cmd);
|
||||
var ret = child_process.spawnSync(req.params.cmd, { shell: true});
|
||||
res.send(ret.stdout);
|
||||
});
|
||||
|
||||
|
||||
58
falco.yaml
58
falco.yaml
@@ -1,5 +1,15 @@
|
||||
# File containing Falco rules, loaded at startup.
|
||||
rules_file: /etc/falco_rules.yaml
|
||||
# File(s) containing Falco rules, loaded at startup.
|
||||
#
|
||||
# falco_rules.yaml ships with the falco package and is overridden with
|
||||
# every new software version. falco_rules.local.yaml is only created
|
||||
# if it doesn't exist. If you want to customize the set of rules, add
|
||||
# your customizations to falco_rules.local.yaml.
|
||||
#
|
||||
# The files will be read in the order presented here, so make sure if
|
||||
# you have overrides they appear in later files.
|
||||
rules_file:
|
||||
- /etc/falco/falco_rules.yaml
|
||||
- /etc/falco/falco_rules.local.yaml
|
||||
|
||||
# Whether to output events in json or text
|
||||
json_output: false
|
||||
@@ -9,6 +19,37 @@ json_output: false
|
||||
log_stderr: true
|
||||
log_syslog: true
|
||||
|
||||
# Minimum log level to include in logs. Note: these levels are
|
||||
# separate from the priority field of rules. This refers only to the
|
||||
# log level of falco's internal logging. Can be one of "emergency",
|
||||
# "alert", "critical", "error", "warning", "notice", "info", "debug".
|
||||
log_level: info
|
||||
|
||||
# Minimum rule priority level to load and run. All rules having a
|
||||
# priority more severe than this level will be loaded/run. Can be one
|
||||
# of "emergency", "alert", "critical", "error", "warning", "notice",
|
||||
# "info", "debug".
|
||||
priority: debug
|
||||
|
||||
# Whether or not output to any of the output channels below is
|
||||
# buffered. Defaults to true
|
||||
buffered_outputs: true
|
||||
|
||||
# A throttling mechanism implemented as a token bucket limits the
|
||||
# rate of falco notifications. This throttling is controlled by the following configuration
|
||||
# options:
|
||||
# - rate: the number of tokens (i.e. right to send a notification)
|
||||
# gained per second. Defaults to 1.
|
||||
# - max_burst: the maximum number of tokens outstanding. Defaults to 1000.
|
||||
#
|
||||
# With these defaults, falco could send up to 1000 notifications after
|
||||
# an initial quiet period, and then up to 1 notification per second
|
||||
# afterward. It would gain the full burst back after 1000 seconds of
|
||||
# no activity.
|
||||
|
||||
outputs:
|
||||
rate: 1
|
||||
max_burst: 1000
|
||||
|
||||
# Where security notifications should go.
|
||||
# Multiple outputs can be enabled.
|
||||
@@ -16,8 +57,13 @@ log_syslog: true
|
||||
syslog_output:
|
||||
enabled: true
|
||||
|
||||
# If keep_alive is set to true, the file will be opened once and
|
||||
# continuously written to, with each output message on its own
|
||||
# line. If keep_alive is set to false, the file will be re-opened
|
||||
# for each output message.
|
||||
file_output:
|
||||
enabled: false
|
||||
keep_alive: false
|
||||
filename: ./events.txt
|
||||
|
||||
stdout_output:
|
||||
@@ -28,7 +74,15 @@ stdout_output:
|
||||
# program: "jq '{text: .output}' | curl -d @- -X POST https://hooks.slack.com/services/XXX"
|
||||
# - logging (alternate method than syslog):
|
||||
# program: logger -t falco-test
|
||||
# - send over a network connection:
|
||||
# program: nc host.example.com 80
|
||||
|
||||
# If keep_alive is set to true, the program will be started once and
|
||||
# continuously written to, with each output message on its own
|
||||
# line. If keep_alive is set to false, the program will be re-spawned
|
||||
# for each output message.
|
||||
|
||||
program_output:
|
||||
enabled: false
|
||||
keep_alive: false
|
||||
program: mail -s "Falco Notification" someone@example.com
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
if(NOT DEFINED FALCO_ETC_DIR)
|
||||
set(FALCO_ETC_DIR "/etc")
|
||||
set(FALCO_ETC_DIR "/etc/falco")
|
||||
endif()
|
||||
|
||||
if(NOT DEFINED FALCO_RULES_DEST_FILENAME)
|
||||
set(FALCO_RULES_DEST_FILENAME "falco_rules.yaml")
|
||||
set(FALCO_LOCAL_RULES_DEST_FILENAME "falco_rules.local.yaml")
|
||||
set(FALCO_APP_RULES_DEST_FILENAME "application_rules.yaml")
|
||||
endif()
|
||||
|
||||
if(DEFINED FALCO_COMPONENT)
|
||||
@@ -11,9 +13,25 @@ install(FILES falco_rules.yaml
|
||||
COMPONENT "${FALCO_COMPONENT}"
|
||||
DESTINATION "${FALCO_ETC_DIR}"
|
||||
RENAME "${FALCO_RULES_DEST_FILENAME}")
|
||||
|
||||
install(FILES falco_rules.local.yaml
|
||||
COMPONENT "${FALCO_COMPONENT}"
|
||||
DESTINATION "${FALCO_ETC_DIR}"
|
||||
RENAME "${FALCO_LOCAL_RULES_DEST_FILENAME}")
|
||||
|
||||
# Intentionally *not* installing application_rules.yaml. Not needed
|
||||
# when falco is embedded in other projects.
|
||||
else()
|
||||
install(FILES falco_rules.yaml
|
||||
DESTINATION "${FALCO_ETC_DIR}"
|
||||
RENAME "${FALCO_RULES_DEST_FILENAME}")
|
||||
DESTINATION "${FALCO_ETC_DIR}"
|
||||
RENAME "${FALCO_RULES_DEST_FILENAME}")
|
||||
|
||||
install(FILES falco_rules.local.yaml
|
||||
DESTINATION "${FALCO_ETC_DIR}"
|
||||
RENAME "${FALCO_LOCAL_RULES_DEST_FILENAME}")
|
||||
|
||||
install(FILES application_rules.yaml
|
||||
DESTINATION "${FALCO_ETC_DIR}"
|
||||
RENAME "${FALCO_APP_RULES_DEST_FILENAME}")
|
||||
endif()
|
||||
|
||||
|
||||
169
rules/application_rules.yaml
Normal file
169
rules/application_rules.yaml
Normal file
@@ -0,0 +1,169 @@
|
||||
################################################################
|
||||
# By default all application-related rules are disabled for
|
||||
# performance reasons. Depending on the application(s) you use,
|
||||
# uncomment the corresponding rule definitions for
|
||||
# application-specific activity monitoring.
|
||||
################################################################
|
||||
|
||||
# Elasticsearch ports
|
||||
- macro: elasticsearch_cluster_port
|
||||
condition: fd.sport=9300
|
||||
- macro: elasticsearch_api_port
|
||||
condition: fd.sport=9200
|
||||
- macro: elasticsearch_port
|
||||
condition: elasticsearch_cluster_port or elasticsearch_api_port
|
||||
|
||||
# - rule: Elasticsearch unexpected network inbound traffic
|
||||
# desc: inbound network traffic to elasticsearch on a port other than the standard ports
|
||||
# condition: user.name = elasticsearch and inbound and not elasticsearch_port
|
||||
# output: "Inbound network traffic to Elasticsearch on unexpected port (connection=%fd.name)"
|
||||
# priority: WARNING
|
||||
|
||||
# - rule: Elasticsearch unexpected network outbound traffic
|
||||
# desc: outbound network traffic from elasticsearch on a port other than the standard ports
|
||||
# condition: user.name = elasticsearch and outbound and not elasticsearch_cluster_port
|
||||
# output: "Outbound network traffic from Elasticsearch on unexpected port (connection=%fd.name)"
|
||||
# priority: WARNING
|
||||
|
||||
|
||||
# ActiveMQ ports
|
||||
- macro: activemq_cluster_port
|
||||
condition: fd.sport=61616
|
||||
- macro: activemq_web_port
|
||||
condition: fd.sport=8161
|
||||
- macro: activemq_port
|
||||
condition: activemq_web_port or activemq_cluster_port
|
||||
|
||||
# - rule: Activemq unexpected network inbound traffic
|
||||
# desc: inbound network traffic to activemq on a port other than the standard ports
|
||||
# condition: user.name = activemq and inbound and not activemq_port
|
||||
# output: "Inbound network traffic to ActiveMQ on unexpected port (connection=%fd.name)"
|
||||
# priority: WARNING
|
||||
|
||||
# - rule: Activemq unexpected network outbound traffic
|
||||
# desc: outbound network traffic from activemq on a port other than the standard ports
|
||||
# condition: user.name = activemq and outbound and not activemq_cluster_port
|
||||
# output: "Outbound network traffic from ActiveMQ on unexpected port (connection=%fd.name)"
|
||||
# priority: WARNING
|
||||
|
||||
|
||||
# Cassandra ports
|
||||
# https://docs.datastax.com/en/cassandra/2.0/cassandra/security/secureFireWall_r.html
|
||||
- macro: cassandra_thrift_client_port
|
||||
condition: fd.sport=9160
|
||||
- macro: cassandra_cql_port
|
||||
condition: fd.sport=9042
|
||||
- macro: cassandra_cluster_port
|
||||
condition: fd.sport=7000
|
||||
- macro: cassandra_ssl_cluster_port
|
||||
condition: fd.sport=7001
|
||||
- macro: cassandra_jmx_port
|
||||
condition: fd.sport=7199
|
||||
- macro: cassandra_port
|
||||
condition: >
|
||||
cassandra_thrift_client_port or
|
||||
cassandra_cql_port or cassandra_cluster_port or
|
||||
cassandra_ssl_cluster_port or cassandra_jmx_port
|
||||
|
||||
# - rule: Cassandra unexpected network inbound traffic
|
||||
# desc: inbound network traffic to cassandra on a port other than the standard ports
|
||||
# condition: user.name = cassandra and inbound and not cassandra_port
|
||||
# output: "Inbound network traffic to Cassandra on unexpected port (connection=%fd.name)"
|
||||
# priority: WARNING
|
||||
|
||||
# - rule: Cassandra unexpected network outbound traffic
|
||||
# desc: outbound network traffic from cassandra on a port other than the standard ports
|
||||
# condition: user.name = cassandra and outbound and not (cassandra_ssl_cluster_port or cassandra_cluster_port)
|
||||
# output: "Outbound network traffic from Cassandra on unexpected port (connection=%fd.name)"
|
||||
# priority: WARNING
|
||||
|
||||
# Couchdb ports
|
||||
# https://github.com/davisp/couchdb/blob/master/etc/couchdb/local.ini
|
||||
- macro: couchdb_httpd_port
|
||||
condition: fd.sport=5984
|
||||
- macro: couchdb_httpd_ssl_port
|
||||
condition: fd.sport=6984
|
||||
# xxx can't tell what clustering ports are used. not writing rules for this
|
||||
# yet.
|
||||
|
||||
# Fluentd ports
|
||||
- macro: fluentd_http_port
|
||||
condition: fd.sport=9880
|
||||
- macro: fluentd_forward_port
|
||||
condition: fd.sport=24224
|
||||
|
||||
# - rule: Fluentd unexpected network inbound traffic
|
||||
# desc: inbound network traffic to fluentd on a port other than the standard ports
|
||||
# condition: user.name = td-agent and inbound and not (fluentd_forward_port or fluentd_http_port)
|
||||
# output: "Inbound network traffic to Fluentd on unexpected port (connection=%fd.name)"
|
||||
# priority: WARNING
|
||||
|
||||
# - rule: Tdagent unexpected network outbound traffic
|
||||
# desc: outbound network traffic from fluentd on a port other than the standard ports
|
||||
# condition: user.name = td-agent and outbound and not fluentd_forward_port
|
||||
# output: "Outbound network traffic from Fluentd on unexpected port (connection=%fd.name)"
|
||||
# priority: WARNING
|
||||
|
||||
# Gearman ports
|
||||
# http://gearman.org/protocol/
|
||||
# - rule: Gearman unexpected network outbound traffic
|
||||
# desc: outbound network traffic from gearman on a port other than the standard ports
|
||||
# condition: user.name = gearman and outbound and outbound and not fd.sport = 4730
|
||||
# output: "Outbound network traffic from Gearman on unexpected port (connection=%fd.name)"
|
||||
# priority: WARNING
|
||||
|
||||
# Zookeeper
|
||||
- macro: zookeeper_port
|
||||
condition: fd.sport = 2181
|
||||
|
||||
# Kafka ports
|
||||
# - rule: Kafka unexpected network inbound traffic
|
||||
# desc: inbound network traffic to kafka on a port other than the standard ports
|
||||
# condition: user.name = kafka and inbound and fd.sport != 9092
|
||||
# output: "Inbound network traffic to Kafka on unexpected port (connection=%fd.name)"
|
||||
# priority: WARNING
|
||||
|
||||
# Memcached ports
|
||||
# - rule: Memcached unexpected network inbound traffic
|
||||
# desc: inbound network traffic to memcached on a port other than the standard ports
|
||||
# condition: user.name = memcached and inbound and fd.sport != 11211
|
||||
# output: "Inbound network traffic to Memcached on unexpected port (connection=%fd.name)"
|
||||
# priority: WARNING
|
||||
|
||||
# - rule: Memcached unexpected network outbound traffic
|
||||
# desc: any outbound network traffic from memcached. memcached never initiates outbound connections.
|
||||
# condition: user.name = memcached and outbound
|
||||
# output: "Unexpected Memcached outbound connection (connection=%fd.name)"
|
||||
# priority: WARNING
|
||||
|
||||
|
||||
# MongoDB ports
|
||||
- macro: mongodb_server_port
|
||||
condition: fd.sport = 27017
|
||||
- macro: mongodb_shardserver_port
|
||||
condition: fd.sport = 27018
|
||||
- macro: mongodb_configserver_port
|
||||
condition: fd.sport = 27019
|
||||
- macro: mongodb_webserver_port
|
||||
condition: fd.sport = 28017
|
||||
|
||||
# - rule: Mongodb unexpected network inbound traffic
|
||||
# desc: inbound network traffic to mongodb on a port other than the standard ports
|
||||
# condition: >
|
||||
# user.name = mongodb and inbound and not (mongodb_server_port or
|
||||
# mongodb_shardserver_port or mongodb_configserver_port or mongodb_webserver_port)
|
||||
# output: "Inbound network traffic to MongoDB on unexpected port (connection=%fd.name)"
|
||||
# priority: WARNING
|
||||
|
||||
# MySQL ports
|
||||
# - rule: Mysql unexpected network inbound traffic
|
||||
# desc: inbound network traffic to mysql on a port other than the standard ports
|
||||
# condition: user.name = mysql and inbound and fd.sport != 3306
|
||||
# output: "Inbound network traffic to MySQL on unexpected port (connection=%fd.name)"
|
||||
# priority: WARNING
|
||||
|
||||
# - rule: HTTP server unexpected network inbound traffic
|
||||
# desc: inbound network traffic to a http server program on a port other than the standard ports
|
||||
# condition: proc.name in (http_server_binaries) and inbound and fd.sport != 80 and fd.sport != 443
|
||||
# output: "Inbound network traffic to HTTP Server on unexpected port (connection=%fd.name)"
|
||||
# priority: WARNING
|
||||
13
rules/falco_rules.local.yaml
Normal file
13
rules/falco_rules.local.yaml
Normal file
@@ -0,0 +1,13 @@
|
||||
####################
|
||||
# Your custom rules!
|
||||
####################
|
||||
|
||||
# Add new rules, like this one
|
||||
# - rule: The program "sudo" is run in a container
|
||||
# desc: An event will trigger every time you run sudo in a container
|
||||
# condition: evt.type = execve and evt.dir=< and container.id != host and proc.name = sudo
|
||||
# output: "Sudo run in container (user=%user.name %container.info parent=%proc.pname cmdline=%proc.cmdline)"
|
||||
# priority: ERROR
|
||||
# tags: [users, container]
|
||||
|
||||
# Or override/append to any rule, macro, or list from the Default Rules
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,5 +1,14 @@
|
||||
configure_file(debian/postinst.in debian/postinst)
|
||||
configure_file(debian/prerm.in debian/prerm)
|
||||
|
||||
file(COPY "${PROJECT_SOURCE_DIR}/scripts/debian/falco"
|
||||
DESTINATION "${PROJECT_BINARY_DIR}/scripts/debian")
|
||||
|
||||
file(COPY "${PROJECT_SOURCE_DIR}/scripts/rpm/falco"
|
||||
DESTINATION "${PROJECT_BINARY_DIR}/scripts/rpm")
|
||||
|
||||
if(CMAKE_SYSTEM_NAME MATCHES "Linux")
|
||||
install(PROGRAMS ${SYSDIG_DIR}/scripts/sysdig-probe-loader
|
||||
DESTINATION ${FALCO_BIN_DIR}
|
||||
RENAME falco-probe-loader)
|
||||
endif()
|
||||
|
||||
@@ -1,9 +0,0 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
NAME=falco
|
||||
|
||||
if [ -x "/etc/init.d/$NAME" ]; then
|
||||
update-rc.d $NAME defaults >/dev/null
|
||||
fi
|
||||
|
||||
32
scripts/debian/postinst.in
Executable file
32
scripts/debian/postinst.in
Executable file
@@ -0,0 +1,32 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
DKMS_PACKAGE_NAME="@PACKAGE_NAME@"
|
||||
DKMS_VERSION="@PROBE_VERSION@"
|
||||
NAME="@PACKAGE_NAME@"
|
||||
|
||||
postinst_found=0
|
||||
|
||||
case "$1" in
|
||||
configure)
|
||||
for DKMS_POSTINST in /usr/lib/dkms/common.postinst /usr/share/$DKMS_PACKAGE_NAME/postinst; do
|
||||
if [ -f $DKMS_POSTINST ]; then
|
||||
$DKMS_POSTINST $DKMS_PACKAGE_NAME $DKMS_VERSION /usr/share/$DKMS_PACKAGE_NAME "" $2
|
||||
postinst_found=1
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [ "$postinst_found" -eq 0 ]; then
|
||||
echo "ERROR: DKMS version is too old and $DKMS_PACKAGE_NAME was not"
|
||||
echo "built with legacy DKMS support."
|
||||
echo "You must either rebuild $DKMS_PACKAGE_NAME with legacy postinst"
|
||||
echo "support or upgrade DKMS to a more current version."
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
|
||||
if [ -x "/etc/init.d/$NAME" ]; then
|
||||
update-rc.d $NAME defaults >/dev/null
|
||||
fi
|
||||
|
||||
@@ -1,13 +0,0 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
NAME=falco
|
||||
|
||||
if [ -x "/etc/init.d/$NAME" ]; then
|
||||
if [ -x "`which invoke-rc.d 2>/dev/null`" ]; then
|
||||
invoke-rc.d $NAME stop || exit $?
|
||||
else
|
||||
/etc/init.d/$NAME stop || exit $?
|
||||
fi
|
||||
fi
|
||||
|
||||
23
scripts/debian/prerm.in
Executable file
23
scripts/debian/prerm.in
Executable file
@@ -0,0 +1,23 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
NAME="@PACKAGE_NAME@"
|
||||
|
||||
if [ -x "/etc/init.d/$NAME" ]; then
|
||||
if [ -x "`which invoke-rc.d 2>/dev/null`" ]; then
|
||||
invoke-rc.d $NAME stop || exit $?
|
||||
else
|
||||
/etc/init.d/$NAME stop || exit $?
|
||||
fi
|
||||
fi
|
||||
|
||||
DKMS_PACKAGE_NAME="@PACKAGE_NAME@"
|
||||
DKMS_VERSION="@PROBE_VERSION@"
|
||||
|
||||
case "$1" in
|
||||
remove|upgrade|deconfigure)
|
||||
if [ "$(dkms status -m $DKMS_PACKAGE_NAME -v $DKMS_VERSION)" ]; then
|
||||
dkms remove -m $DKMS_PACKAGE_NAME -v $DKMS_VERSION --all
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
@@ -1 +1,15 @@
|
||||
dkms add -m falco -v %{version} --rpm_safe_upgrade
|
||||
if [ `uname -r | grep -c "BOOT"` -eq 0 ] && [ -e /lib/modules/`uname -r`/build/include ]; then
|
||||
dkms build -m falco -v %{version}
|
||||
dkms install --force -m falco -v %{version}
|
||||
elif [ `uname -r | grep -c "BOOT"` -gt 0 ]; then
|
||||
echo -e ""
|
||||
echo -e "Module build for the currently running kernel was skipped since you"
|
||||
echo -e "are running a BOOT variant of the kernel."
|
||||
else
|
||||
echo -e ""
|
||||
echo -e "Module build for the currently running kernel was skipped since the"
|
||||
echo -e "kernel source for this kernel does not seem to be installed."
|
||||
fi
|
||||
|
||||
/sbin/chkconfig --add falco
|
||||
|
||||
@@ -2,3 +2,5 @@ if [ $1 = 0 ]; then
|
||||
/sbin/service falco stop > /dev/null 2>&1
|
||||
/sbin/chkconfig --del falco
|
||||
fi
|
||||
|
||||
dkms remove -m falco -v %{version} --all --rpm_safe_upgrade
|
||||
|
||||
@@ -4,6 +4,9 @@ import os
|
||||
import re
|
||||
import json
|
||||
import sets
|
||||
import glob
|
||||
import shutil
|
||||
import subprocess
|
||||
|
||||
from avocado import Test
|
||||
from avocado.utils import process
|
||||
@@ -17,15 +20,17 @@ class FalcoTest(Test):
|
||||
"""
|
||||
self.falcodir = self.params.get('falcodir', '/', default=os.path.join(self.basedir, '../build'))
|
||||
|
||||
self.stdout_contains = self.params.get('stdout_contains', '*', default='')
|
||||
self.stderr_contains = self.params.get('stderr_contains', '*', default='')
|
||||
self.exit_status = self.params.get('exit_status', '*', default=0)
|
||||
self.should_detect = self.params.get('detect', '*', default=False)
|
||||
self.trace_file = self.params.get('trace_file', '*')
|
||||
self.trace_file = self.params.get('trace_file', '*', default='')
|
||||
|
||||
if not os.path.isabs(self.trace_file):
|
||||
if self.trace_file and not os.path.isabs(self.trace_file):
|
||||
self.trace_file = os.path.join(self.basedir, self.trace_file)
|
||||
|
||||
self.json_output = self.params.get('json_output', '*', default=False)
|
||||
self.priority = self.params.get('priority', '*', default='debug')
|
||||
self.rules_file = self.params.get('rules_file', '*', default=os.path.join(self.basedir, '../rules/falco_rules.yaml'))
|
||||
|
||||
if not isinstance(self.rules_file, list):
|
||||
@@ -42,6 +47,8 @@ class FalcoTest(Test):
|
||||
if not os.path.isabs(self.conf_file):
|
||||
self.conf_file = os.path.join(self.basedir, self.conf_file)
|
||||
|
||||
self.run_duration = self.params.get('run_duration', '*', default='')
|
||||
|
||||
self.disabled_rules = self.params.get('disabled_rules', '*', default='')
|
||||
|
||||
if self.disabled_rules == '':
|
||||
@@ -55,6 +62,16 @@ class FalcoTest(Test):
|
||||
for rule in self.disabled_rules:
|
||||
self.disabled_args = self.disabled_args + "-D " + rule + " "
|
||||
|
||||
self.detect_counts = self.params.get('detect_counts', '*', default=False)
|
||||
if self.detect_counts == False:
|
||||
self.detect_counts = {}
|
||||
else:
|
||||
detect_counts = {}
|
||||
for item in self.detect_counts:
|
||||
for item2 in item:
|
||||
detect_counts[item2[0]] = item2[1]
|
||||
self.detect_counts = detect_counts
|
||||
|
||||
self.rules_warning = self.params.get('rules_warning', '*', default=False)
|
||||
if self.rules_warning == False:
|
||||
self.rules_warning = sets.Set()
|
||||
@@ -78,15 +95,23 @@ class FalcoTest(Test):
|
||||
if not isinstance(self.detect_level, list):
|
||||
self.detect_level = [self.detect_level]
|
||||
|
||||
# Doing this in 2 steps instead of simply using
|
||||
# module_is_loaded to avoid logging lsmod output to the log.
|
||||
lsmod_output = process.system_output("lsmod", verbose=False)
|
||||
self.package = self.params.get('package', '*', default='None')
|
||||
|
||||
if linux_modules.parse_lsmod_for_module(lsmod_output, 'sysdig_probe') == {}:
|
||||
self.log.debug("Loading sysdig kernel module")
|
||||
process.run('sudo insmod {}/driver/sysdig-probe.ko'.format(self.falcodir))
|
||||
if self.package == 'None':
|
||||
# Doing this in 2 steps instead of simply using
|
||||
# module_is_loaded to avoid logging lsmod output to the log.
|
||||
lsmod_output = process.system_output("lsmod", verbose=False)
|
||||
|
||||
self.str_variant = self.trace_file
|
||||
if linux_modules.parse_lsmod_for_module(lsmod_output, 'falco_probe') == {}:
|
||||
self.log.debug("Loading falco kernel module")
|
||||
process.run('insmod {}/driver/falco-probe.ko'.format(self.falcodir), sudo=True)
|
||||
|
||||
self.addl_docker_run_args = self.params.get('addl_docker_run_args', '*', default='')
|
||||
|
||||
self.copy_local_driver = self.params.get('copy_local_driver', '*', default=False)
|
||||
|
||||
# Used by possibly_copy_local_driver as well as docker run
|
||||
self.module_dir = os.path.expanduser("~/.sysdig")
|
||||
|
||||
self.outputs = self.params.get('outputs', '*', default='')
|
||||
|
||||
@@ -100,8 +125,26 @@ class FalcoTest(Test):
|
||||
output['file'] = item2[0]
|
||||
output['line'] = item2[1]
|
||||
outputs.append(output)
|
||||
filedir = os.path.dirname(output['file'])
|
||||
# Create the parent directory for the trace file if it doesn't exist.
|
||||
if not os.path.isdir(filedir):
|
||||
os.makedirs(filedir)
|
||||
self.outputs = outputs
|
||||
|
||||
self.disable_tags = self.params.get('disable_tags', '*', default='')
|
||||
|
||||
if self.disable_tags == '':
|
||||
self.disable_tags=[]
|
||||
|
||||
self.run_tags = self.params.get('run_tags', '*', default='')
|
||||
|
||||
if self.run_tags == '':
|
||||
self.run_tags=[]
|
||||
|
||||
def tearDown(self):
|
||||
if self.package != 'None':
|
||||
self.uninstall_package()
|
||||
|
||||
def check_rules_warnings(self, res):
|
||||
|
||||
found_warning = sets.Set()
|
||||
@@ -160,6 +203,28 @@ class FalcoTest(Test):
|
||||
if not events_detected > 0:
|
||||
self.fail("Detected {} events at level {} when should have detected > 0".format(events_detected, level))
|
||||
|
||||
def check_detections_by_rule(self, res):
|
||||
# Get the number of events detected for each rule. Must match the expected counts.
|
||||
match = re.search('Triggered rules by rule name:(.*)', res.stdout, re.DOTALL)
|
||||
if match is None:
|
||||
self.fail("Could not find a block 'Triggered rules by rule name: ...' in falco output")
|
||||
|
||||
triggered_rules = match.group(1)
|
||||
|
||||
for rule, count in self.detect_counts.iteritems():
|
||||
expected = '{}: (\d+)'.format(rule)
|
||||
match = re.search(expected, triggered_rules)
|
||||
|
||||
if match is None:
|
||||
actual_count = 0
|
||||
else:
|
||||
actual_count = int(match.group(1))
|
||||
|
||||
if actual_count != count:
|
||||
self.fail("Different counts for rule {}: expected={}, actual={}".format(rule, count, actual_count))
|
||||
else:
|
||||
self.log.debug("Found expected count for rule {}: {}".format(rule, count))
|
||||
|
||||
def check_outputs(self):
|
||||
for output in self.outputs:
|
||||
# Open the provided file and match each line against the
|
||||
@@ -188,12 +253,112 @@ class FalcoTest(Test):
|
||||
if not attr in obj:
|
||||
self.fail("Falco JSON object {} does not contain property \"{}\"".format(line, attr))
|
||||
|
||||
def install_package(self):
|
||||
|
||||
if self.package.startswith("docker:"):
|
||||
|
||||
image = self.package.split(":", 1)[1]
|
||||
# Remove an existing falco-test container first. Note we don't check the output--docker rm
|
||||
# doesn't have an -i equivalent.
|
||||
res = process.run("docker rm falco-test", ignore_status=True)
|
||||
rules_dir = os.path.abspath(os.path.join(self.basedir, "./rules"))
|
||||
conf_dir = os.path.abspath(os.path.join(self.basedir, "../"))
|
||||
traces_dir = os.path.abspath(os.path.join(self.basedir, "./trace_files"))
|
||||
self.falco_binary_path = "docker run -i -t --name falco-test --privileged " \
|
||||
"-v {}:/host/rules -v {}:/host/conf -v {}:/host/traces " \
|
||||
"-v /var/run/docker.sock:/host/var/run/docker.sock " \
|
||||
"-v /dev:/host/dev -v /proc:/host/proc:ro -v /boot:/host/boot:ro " \
|
||||
"-v /lib/modules:/host/lib/modules:ro -v {}:/root/.sysdig:ro -v " \
|
||||
"/usr:/host/usr:ro {} {} falco".format(
|
||||
rules_dir, conf_dir, traces_dir,
|
||||
self.module_dir, self.addl_docker_run_args, image)
|
||||
|
||||
elif self.package.endswith(".deb"):
|
||||
self.falco_binary_path = '/usr/bin/falco';
|
||||
|
||||
package_glob = "{}/{}".format(self.falcodir, self.package)
|
||||
|
||||
matches = glob.glob(package_glob)
|
||||
|
||||
if len(matches) != 1:
|
||||
self.fail("Package path {} did not match exactly 1 file. Instead it matched: {}", package_glob, ",".join(matches))
|
||||
|
||||
package_path = matches[0]
|
||||
|
||||
cmdline = "dpkg -i {}".format(package_path)
|
||||
self.log.debug("Installing debian package via \"{}\"".format(cmdline))
|
||||
res = process.run(cmdline, timeout=120, sudo=True)
|
||||
|
||||
def uninstall_package(self):
|
||||
|
||||
if self.package.startswith("docker:"):
|
||||
# Remove the falco-test image. Here we *do* check the return value
|
||||
res = process.run("docker rm falco-test")
|
||||
|
||||
elif self.package.endswith(".deb"):
|
||||
cmdline = "dpkg --purge falco"
|
||||
self.log.debug("Uninstalling debian package via \"{}\"".format(cmdline))
|
||||
res = process.run(cmdline, timeout=120, sudo=True)
|
||||
|
||||
def possibly_copy_driver(self):
|
||||
# Remove the contents of ~/.sysdig regardless of
|
||||
# copy_local_driver.
|
||||
self.log.debug("Checking for module dir {}".format(self.module_dir))
|
||||
if os.path.isdir(self.module_dir):
|
||||
self.log.info("Removing files below directory {}".format(self.module_dir))
|
||||
for rmfile in glob.glob(self.module_dir + "/*"):
|
||||
self.log.debug("Removing file {}".format(rmfile))
|
||||
os.remove(rmfile)
|
||||
|
||||
if self.copy_local_driver:
|
||||
verstr = subprocess.check_output([self.falco_binary_path, "--version"]).rstrip()
|
||||
self.log.info("verstr {}".format(verstr))
|
||||
falco_version = verstr.split(" ")[2]
|
||||
self.log.info("falco_version {}".format(falco_version))
|
||||
arch = subprocess.check_output(["uname", "-m"]).rstrip()
|
||||
self.log.info("arch {}".format(arch))
|
||||
kernel_release = subprocess.check_output(["uname", "-r"]).rstrip()
|
||||
self.log.info("kernel release {}".format(kernel_release))
|
||||
|
||||
# sysdig-probe-loader has a more comprehensive set of ways to
|
||||
# find the config hash. We only look at /boot/config-<kernel release>
|
||||
md5_output = subprocess.check_output(["md5sum", "/boot/config-{}".format(kernel_release)]).rstrip()
|
||||
config_hash = md5_output.split(" ")[0]
|
||||
|
||||
probe_filename = "falco-probe-{}-{}-{}-{}.ko".format(falco_version, arch, kernel_release, config_hash)
|
||||
driver_path = os.path.join(self.falcodir, "driver", "falco-probe.ko")
|
||||
module_path = os.path.join(self.module_dir, probe_filename)
|
||||
self.log.debug("Copying {} to {}".format(driver_path, module_path))
|
||||
shutil.copyfile(driver_path, module_path)
|
||||
|
||||
def test(self):
|
||||
self.log.info("Trace file %s", self.trace_file)
|
||||
|
||||
# Run the provided trace file though falco
|
||||
cmd = '{}/userspace/falco/falco {} {} -c {} -e {} -o json_output={} -v'.format(
|
||||
self.falcodir, self.rules_args, self.disabled_args, self.conf_file, self.trace_file, self.json_output)
|
||||
self.falco_binary_path = '{}/userspace/falco/falco'.format(self.falcodir)
|
||||
|
||||
self.possibly_copy_driver()
|
||||
|
||||
if self.package != 'None':
|
||||
# This sets falco_binary_path as a side-effect.
|
||||
self.install_package()
|
||||
|
||||
trace_arg = self.trace_file
|
||||
|
||||
if self.trace_file:
|
||||
trace_arg = "-e {}".format(self.trace_file)
|
||||
|
||||
# Run falco
|
||||
cmd = '{} {} {} -c {} {} -o json_output={} -o priority={} -v'.format(
|
||||
self.falco_binary_path, self.rules_args, self.disabled_args, self.conf_file, trace_arg, self.json_output, self.priority)
|
||||
|
||||
for tag in self.disable_tags:
|
||||
cmd += ' -T {}'.format(tag)
|
||||
|
||||
for tag in self.run_tags:
|
||||
cmd += ' -t {}'.format(tag)
|
||||
|
||||
if self.run_duration:
|
||||
cmd += ' -M {}'.format(self.run_duration)
|
||||
|
||||
self.falco_proc = process.SubProcess(cmd)
|
||||
|
||||
@@ -204,6 +369,11 @@ class FalcoTest(Test):
|
||||
if match is None:
|
||||
self.fail("Stderr of falco process did not contain content matching {}".format(self.stderr_contains))
|
||||
|
||||
if self.stdout_contains != '':
|
||||
match = re.search(self.stdout_contains, res.stdout)
|
||||
if match is None:
|
||||
self.fail("Stdout of falco process '{}' did not contain content matching {}".format(res.stdout, self.stdout_contains))
|
||||
|
||||
if res.exit_status != self.exit_status:
|
||||
self.error("Falco command \"{}\" exited with unexpected return value {} (!= {})".format(
|
||||
cmd, res.exit_status, self.exit_status))
|
||||
@@ -216,6 +386,8 @@ class FalcoTest(Test):
|
||||
if len(self.rules_events) > 0:
|
||||
self.check_rules_events(res)
|
||||
self.check_detections(res)
|
||||
if len(self.detect_counts) > 0:
|
||||
self.check_detections_by_rule(res)
|
||||
self.check_json_output(res)
|
||||
self.check_outputs()
|
||||
pass
|
||||
|
||||
657
test/falco_tests.yaml
Normal file
657
test/falco_tests.yaml
Normal file
@@ -0,0 +1,657 @@
|
||||
trace_files: !mux
|
||||
|
||||
docker_package:
|
||||
package: docker:sysdig/falco:test
|
||||
detect: True
|
||||
detect_level: WARNING
|
||||
rules_file: /host/rules/rule_names_with_spaces.yaml
|
||||
trace_file: /host/traces/cat_write.scap
|
||||
conf_file: /host/conf/falco.yaml
|
||||
|
||||
# This uses a volume mount to overwrite and prevent /usr/sbin/dkms
|
||||
# from being run. As a result, it will force falco-probe-loader to
|
||||
# fall back to loading the driver from ~/.sysdig. Setting
|
||||
# copy_local_driver to True copied the driver to ~/.sysdig, so it
|
||||
# will be available. In this case, we're running live for 5 seconds
|
||||
# just to see if falco can load the driver.
|
||||
|
||||
docker_package_local_driver:
|
||||
package: docker:sysdig/falco:test
|
||||
addl_docker_run_args: -v /dev/null:/usr/sbin/dkms
|
||||
copy_local_driver: True
|
||||
detect: False
|
||||
detect_level: WARNING
|
||||
rules_file: /host/rules/tagged_rules.yaml
|
||||
conf_file: /host/conf/falco.yaml
|
||||
run_duration: 5
|
||||
|
||||
debian_package:
|
||||
package: falco*.deb
|
||||
detect: True
|
||||
detect_level: WARNING
|
||||
rules_file:
|
||||
- rules/rule_names_with_spaces.yaml
|
||||
trace_file: trace_files/cat_write.scap
|
||||
|
||||
builtin_rules_no_warnings:
|
||||
detect: False
|
||||
trace_file: trace_files/empty.scap
|
||||
rules_warning: False
|
||||
|
||||
test_warnings:
|
||||
detect: False
|
||||
trace_file: trace_files/empty.scap
|
||||
rules_file: rules/falco_rules_warnings.yaml
|
||||
rules_warning:
|
||||
- no_evttype
|
||||
- evttype_not_equals
|
||||
- leading_not
|
||||
- not_equals_at_end
|
||||
- not_at_end
|
||||
- not_before_trailing_evttype
|
||||
- not_equals_before_trailing_evttype
|
||||
- not_equals_and_not
|
||||
- not_equals_before_in
|
||||
- not_before_in
|
||||
- not_in_before_in
|
||||
- leading_in_not_equals_before_evttype
|
||||
- leading_in_not_equals_at_evttype
|
||||
- not_with_evttypes
|
||||
- not_with_evttypes_addl
|
||||
- not_equals_before_evttype
|
||||
- not_equals_before_in_evttype
|
||||
- not_before_evttype
|
||||
- not_before_evttype_using_in
|
||||
rules_events:
|
||||
- no_warnings: [execve]
|
||||
- no_evttype: [all]
|
||||
- evttype_not_equals: [all]
|
||||
- leading_not: [all]
|
||||
- not_equals_after_evttype: [execve]
|
||||
- not_after_evttype: [execve]
|
||||
- leading_trailing_evttypes: [execve,open]
|
||||
- leading_multtrailing_evttypes: [connect,execve,open]
|
||||
- leading_multtrailing_evttypes_using_in: [connect,execve,open]
|
||||
- not_equals_at_end: [all]
|
||||
- not_at_end: [all]
|
||||
- not_before_trailing_evttype: [all]
|
||||
- not_equals_before_trailing_evttype: [all]
|
||||
- not_equals_and_not: [all]
|
||||
- not_equals_before_in: [all]
|
||||
- not_before_in: [all]
|
||||
- not_in_before_in: [all]
|
||||
- evttype_in: [execve,open]
|
||||
- evttype_in_plus_trailing: [connect,execve,open]
|
||||
- leading_in_not_equals_before_evttype: [all]
|
||||
- leading_in_not_equals_at_evttype: [all]
|
||||
- not_with_evttypes: [all]
|
||||
- not_with_evttypes_addl: [all]
|
||||
- not_equals_before_evttype: [all]
|
||||
- not_equals_before_in_evttype: [all]
|
||||
- not_before_evttype: [all]
|
||||
- not_before_evttype_using_in: [all]
|
||||
- repeated_evttypes: [open]
|
||||
- repeated_evttypes_with_in: [open]
|
||||
- repeated_evttypes_with_separate_in: [open]
|
||||
- repeated_evttypes_with_mix: [open]
|
||||
|
||||
rule_names_with_spaces:
|
||||
detect: True
|
||||
detect_level: WARNING
|
||||
rules_file:
|
||||
- rules/rule_names_with_spaces.yaml
|
||||
trace_file: trace_files/cat_write.scap
|
||||
|
||||
multiple_rules_first_empty:
|
||||
detect: True
|
||||
detect_level: WARNING
|
||||
rules_file:
|
||||
- rules/empty_rules.yaml
|
||||
- rules/single_rule.yaml
|
||||
trace_file: trace_files/cat_write.scap
|
||||
|
||||
multiple_rules_last_empty:
|
||||
detect: True
|
||||
detect_level: WARNING
|
||||
rules_file:
|
||||
- rules/single_rule.yaml
|
||||
- rules/empty_rules.yaml
|
||||
trace_file: trace_files/cat_write.scap
|
||||
|
||||
multiple_rules:
|
||||
detect: True
|
||||
detect_level:
|
||||
- WARNING
|
||||
- INFO
|
||||
- ERROR
|
||||
rules_file:
|
||||
- rules/single_rule.yaml
|
||||
- rules/double_rule.yaml
|
||||
trace_file: trace_files/cat_write.scap
|
||||
|
||||
multiple_rules_suppress_info:
|
||||
detect: True
|
||||
detect_level:
|
||||
- WARNING
|
||||
- ERROR
|
||||
priority: WARNING
|
||||
detect_counts:
|
||||
- open_from_cat: 8
|
||||
- exec_from_cat: 1
|
||||
- access_from_cat: 0
|
||||
rules_file:
|
||||
- rules/single_rule.yaml
|
||||
- rules/double_rule.yaml
|
||||
trace_file: trace_files/cat_write.scap
|
||||
|
||||
multiple_rules_overriding:
|
||||
detect: False
|
||||
rules_file:
|
||||
- rules/single_rule.yaml
|
||||
- rules/override_rule.yaml
|
||||
trace_file: trace_files/cat_write.scap
|
||||
|
||||
macro_overriding:
|
||||
detect: False
|
||||
rules_file:
|
||||
- rules/single_rule.yaml
|
||||
- rules/override_macro.yaml
|
||||
trace_file: trace_files/cat_write.scap
|
||||
|
||||
list_overriding:
|
||||
detect: False
|
||||
rules_file:
|
||||
- rules/single_rule.yaml
|
||||
- rules/override_list.yaml
|
||||
trace_file: trace_files/cat_write.scap
|
||||
|
||||
nested_list_overriding:
|
||||
detect: False
|
||||
rules_file:
|
||||
- rules/single_rule.yaml
|
||||
- rules/override_nested_list.yaml
|
||||
trace_file: trace_files/cat_write.scap
|
||||
|
||||
list_substring:
|
||||
detect: False
|
||||
rules_file:
|
||||
- rules/list_substring.yaml
|
||||
trace_file: trace_files/cat_write.scap
|
||||
|
||||
list_sub_front:
|
||||
detect: True
|
||||
detect_level: WARNING
|
||||
rules_file:
|
||||
- rules/list_sub_front.yaml
|
||||
trace_file: trace_files/cat_write.scap
|
||||
|
||||
list_sub_mid:
|
||||
detect: True
|
||||
detect_level: WARNING
|
||||
rules_file:
|
||||
- rules/list_sub_mid.yaml
|
||||
trace_file: trace_files/cat_write.scap
|
||||
|
||||
list_sub_end:
|
||||
detect: True
|
||||
detect_level: WARNING
|
||||
rules_file:
|
||||
- rules/list_sub_end.yaml
|
||||
trace_file: trace_files/cat_write.scap
|
||||
|
||||
list_sub_bare:
|
||||
detect: True
|
||||
detect_level: WARNING
|
||||
rules_file:
|
||||
- rules/list_sub_bare.yaml
|
||||
trace_file: trace_files/cat_write.scap
|
||||
|
||||
list_sub_whitespace:
|
||||
detect: True
|
||||
detect_level: WARNING
|
||||
rules_file:
|
||||
- rules/list_sub_whitespace.yaml
|
||||
trace_file: trace_files/cat_write.scap
|
||||
|
||||
list_order:
|
||||
detect: True
|
||||
detect_level: WARNING
|
||||
rules_file:
|
||||
- rules/list_order.yaml
|
||||
trace_file: trace_files/cat_write.scap
|
||||
|
||||
macro_order:
|
||||
detect: True
|
||||
detect_level: WARNING
|
||||
rules_file:
|
||||
- rules/macro_order.yaml
|
||||
trace_file: trace_files/cat_write.scap
|
||||
|
||||
rule_order:
|
||||
detect: True
|
||||
detect_level: WARNING
|
||||
rules_file:
|
||||
- rules/rule_order.yaml
|
||||
trace_file: trace_files/cat_write.scap
|
||||
|
||||
invalid_rule_output:
|
||||
exit_status: 1
|
||||
stderr_contains: "Runtime error: Error loading rules:.* Invalid output format 'An open was seen %not_a_real_field': 'invalid formatting token not_a_real_field'. Exiting."
|
||||
rules_file:
|
||||
- rules/invalid_rule_output.yaml
|
||||
trace_file: trace_files/cat_write.scap
|
||||
|
||||
disabled_rules:
|
||||
detect: False
|
||||
rules_file:
|
||||
- rules/empty_rules.yaml
|
||||
- rules/single_rule.yaml
|
||||
disabled_rules:
|
||||
- open_from_cat
|
||||
trace_file: trace_files/cat_write.scap
|
||||
|
||||
disabled_rules_using_regex:
|
||||
detect: False
|
||||
rules_file:
|
||||
- rules/empty_rules.yaml
|
||||
- rules/single_rule.yaml
|
||||
disabled_rules:
|
||||
- "open.*"
|
||||
trace_file: trace_files/cat_write.scap
|
||||
|
||||
disabled_rules_using_enabled_flag:
|
||||
detect: False
|
||||
rules_file:
|
||||
- rules/single_rule_enabled_flag.yaml
|
||||
trace_file: trace_files/cat_write.scap
|
||||
|
||||
disabled_and_enabled_rules_1:
|
||||
exit_status: 1
|
||||
stderr_contains: "Runtime error: You can not specify both disabled .-D/-T. and enabled .-t. rules. Exiting."
|
||||
disable_tags: [a]
|
||||
run_tags: [a]
|
||||
rules_file:
|
||||
- rules/single_rule.yaml
|
||||
trace_file: trace_files/cat_write.scap
|
||||
|
||||
disabled_and_enabled_rules_2:
|
||||
exit_status: 1
|
||||
stderr_contains: "Runtime error: You can not specify both disabled .-D/-T. and enabled .-t. rules. Exiting."
|
||||
disabled_rules:
|
||||
- "open.*"
|
||||
run_tags: [a]
|
||||
rules_file:
|
||||
- rules/single_rule.yaml
|
||||
trace_file: trace_files/cat_write.scap
|
||||
|
||||
null_output_field:
|
||||
detect: True
|
||||
detect_level: WARNING
|
||||
rules_file:
|
||||
- rules/null_output_field.yaml
|
||||
trace_file: trace_files/cat_write.scap
|
||||
stdout_contains: "Warning An open was seen .cport=<NA> command=cat /dev/null."
|
||||
|
||||
file_output:
|
||||
detect: True
|
||||
detect_level: WARNING
|
||||
rules_file:
|
||||
- rules/single_rule.yaml
|
||||
conf_file: confs/file_output.yaml
|
||||
trace_file: trace_files/cat_write.scap
|
||||
outputs:
|
||||
- /tmp/falco_outputs/file_output.txt: Warning An open was seen
|
||||
|
||||
program_output:
|
||||
detect: True
|
||||
detect_level: WARNING
|
||||
rules_file:
|
||||
- rules/single_rule.yaml
|
||||
conf_file: confs/program_output.yaml
|
||||
trace_file: trace_files/cat_write.scap
|
||||
outputs:
|
||||
- /tmp/falco_outputs/program_output.txt: Warning An open was seen
|
||||
|
||||
detect_counts:
|
||||
detect: True
|
||||
detect_level: WARNING
|
||||
trace_file: traces-positive/falco-event-generator.scap
|
||||
detect_counts:
|
||||
- "Write below binary dir": 1
|
||||
- "Read sensitive file untrusted": 3
|
||||
- "Run shell untrusted": 1
|
||||
- "Write below rpm database": 1
|
||||
- "Write below etc": 1
|
||||
- "System procs network activity": 1
|
||||
- "Mkdir binary dirs": 1
|
||||
- "System user interactive": 1
|
||||
- "DB program spawned process": 1
|
||||
- "Non sudo setuid": 1
|
||||
- "Create files below dev": 1
|
||||
- "Modify binary dirs": 2
|
||||
- "Change thread namespace": 2
|
||||
|
||||
disabled_tags_a:
|
||||
detect: True
|
||||
detect_level: WARNING
|
||||
rules_file:
|
||||
- rules/tagged_rules.yaml
|
||||
trace_file: trace_files/open-multiple-files.scap
|
||||
disable_tags: [a]
|
||||
detect_counts:
|
||||
- open_1: 0
|
||||
- open_2: 1
|
||||
- open_3: 1
|
||||
- open_4: 0
|
||||
- open_5: 0
|
||||
- open_6: 1
|
||||
- open_7: 0
|
||||
- open_8: 0
|
||||
- open_9: 0
|
||||
- open_10: 0
|
||||
- open_11: 1
|
||||
- open_12: 1
|
||||
- open_13: 1
|
||||
|
||||
disabled_tags_b:
|
||||
detect: True
|
||||
detect_level: WARNING
|
||||
rules_file:
|
||||
- rules/tagged_rules.yaml
|
||||
trace_file: trace_files/open-multiple-files.scap
|
||||
disable_tags: [b]
|
||||
detect_counts:
|
||||
- open_1: 1
|
||||
- open_2: 0
|
||||
- open_3: 1
|
||||
- open_4: 0
|
||||
- open_5: 1
|
||||
- open_6: 0
|
||||
- open_7: 0
|
||||
- open_8: 0
|
||||
- open_9: 1
|
||||
- open_10: 0
|
||||
- open_11: 1
|
||||
- open_12: 1
|
||||
- open_13: 1
|
||||
|
||||
disabled_tags_c:
|
||||
detect: True
|
||||
detect_level: WARNING
|
||||
rules_file:
|
||||
- rules/tagged_rules.yaml
|
||||
trace_file: trace_files/open-multiple-files.scap
|
||||
disable_tags: [c]
|
||||
detect_counts:
|
||||
- open_1: 1
|
||||
- open_2: 1
|
||||
- open_3: 0
|
||||
- open_4: 1
|
||||
- open_5: 0
|
||||
- open_6: 0
|
||||
- open_7: 0
|
||||
- open_8: 1
|
||||
- open_9: 0
|
||||
- open_10: 0
|
||||
- open_11: 1
|
||||
- open_12: 1
|
||||
- open_13: 1
|
||||
|
||||
disabled_tags_ab:
|
||||
detect: True
|
||||
detect_level: WARNING
|
||||
rules_file:
|
||||
- rules/tagged_rules.yaml
|
||||
trace_file: trace_files/open-multiple-files.scap
|
||||
disable_tags: [a, b]
|
||||
detect_counts:
|
||||
- open_1: 0
|
||||
- open_2: 0
|
||||
- open_3: 1
|
||||
- open_4: 0
|
||||
- open_5: 0
|
||||
- open_6: 0
|
||||
- open_7: 0
|
||||
- open_8: 0
|
||||
- open_9: 0
|
||||
- open_10: 0
|
||||
- open_11: 1
|
||||
- open_12: 1
|
||||
- open_13: 1
|
||||
|
||||
disabled_tags_abc:
|
||||
detect: True
|
||||
detect_level: WARNING
|
||||
rules_file:
|
||||
- rules/tagged_rules.yaml
|
||||
trace_file: trace_files/open-multiple-files.scap
|
||||
disable_tags: [a, b, c]
|
||||
detect_counts:
|
||||
- open_1: 0
|
||||
- open_2: 0
|
||||
- open_3: 0
|
||||
- open_4: 0
|
||||
- open_5: 0
|
||||
- open_6: 0
|
||||
- open_7: 0
|
||||
- open_8: 0
|
||||
- open_9: 0
|
||||
- open_10: 0
|
||||
- open_11: 1
|
||||
- open_12: 1
|
||||
- open_13: 1
|
||||
|
||||
run_tags_a:
|
||||
detect: True
|
||||
detect_level: WARNING
|
||||
rules_file:
|
||||
- rules/tagged_rules.yaml
|
||||
trace_file: trace_files/open-multiple-files.scap
|
||||
run_tags: [a]
|
||||
detect_counts:
|
||||
- open_1: 1
|
||||
- open_2: 0
|
||||
- open_3: 0
|
||||
- open_4: 1
|
||||
- open_5: 1
|
||||
- open_6: 0
|
||||
- open_7: 1
|
||||
- open_8: 1
|
||||
- open_9: 1
|
||||
- open_10: 1
|
||||
- open_11: 0
|
||||
- open_12: 0
|
||||
- open_13: 0
|
||||
|
||||
run_tags_b:
|
||||
detect: True
|
||||
detect_level: WARNING
|
||||
rules_file:
|
||||
- rules/tagged_rules.yaml
|
||||
trace_file: trace_files/open-multiple-files.scap
|
||||
run_tags: [b]
|
||||
detect_counts:
|
||||
- open_1: 0
|
||||
- open_2: 1
|
||||
- open_3: 0
|
||||
- open_4: 1
|
||||
- open_5: 0
|
||||
- open_6: 1
|
||||
- open_7: 1
|
||||
- open_8: 1
|
||||
- open_9: 0
|
||||
- open_10: 1
|
||||
- open_11: 0
|
||||
- open_12: 0
|
||||
- open_13: 0
|
||||
|
||||
run_tags_c:
|
||||
detect: True
|
||||
detect_level: WARNING
|
||||
rules_file:
|
||||
- rules/tagged_rules.yaml
|
||||
trace_file: trace_files/open-multiple-files.scap
|
||||
run_tags: [c]
|
||||
detect_counts:
|
||||
- open_1: 0
|
||||
- open_2: 0
|
||||
- open_3: 1
|
||||
- open_4: 0
|
||||
- open_5: 1
|
||||
- open_6: 1
|
||||
- open_7: 1
|
||||
- open_8: 0
|
||||
- open_9: 1
|
||||
- open_10: 1
|
||||
- open_11: 0
|
||||
- open_12: 0
|
||||
- open_13: 0
|
||||
|
||||
run_tags_ab:
|
||||
detect: True
|
||||
detect_level: WARNING
|
||||
rules_file:
|
||||
- rules/tagged_rules.yaml
|
||||
trace_file: trace_files/open-multiple-files.scap
|
||||
run_tags: [a, b]
|
||||
detect_counts:
|
||||
- open_1: 1
|
||||
- open_2: 1
|
||||
- open_3: 0
|
||||
- open_4: 1
|
||||
- open_5: 1
|
||||
- open_6: 1
|
||||
- open_7: 1
|
||||
- open_8: 1
|
||||
- open_9: 1
|
||||
- open_10: 1
|
||||
- open_11: 0
|
||||
- open_12: 0
|
||||
- open_13: 0
|
||||
|
||||
run_tags_bc:
|
||||
detect: True
|
||||
detect_level: WARNING
|
||||
rules_file:
|
||||
- rules/tagged_rules.yaml
|
||||
trace_file: trace_files/open-multiple-files.scap
|
||||
run_tags: [b, c]
|
||||
detect_counts:
|
||||
- open_1: 0
|
||||
- open_2: 1
|
||||
- open_3: 1
|
||||
- open_4: 1
|
||||
- open_5: 1
|
||||
- open_6: 1
|
||||
- open_7: 1
|
||||
- open_8: 1
|
||||
- open_9: 1
|
||||
- open_10: 1
|
||||
- open_11: 0
|
||||
- open_12: 0
|
||||
- open_13: 0
|
||||
|
||||
run_tags_abc:
|
||||
detect: True
|
||||
detect_level: WARNING
|
||||
rules_file:
|
||||
- rules/tagged_rules.yaml
|
||||
trace_file: trace_files/open-multiple-files.scap
|
||||
run_tags: [a, b, c]
|
||||
detect_counts:
|
||||
- open_1: 1
|
||||
- open_2: 1
|
||||
- open_3: 1
|
||||
- open_4: 1
|
||||
- open_5: 1
|
||||
- open_6: 1
|
||||
- open_7: 1
|
||||
- open_8: 1
|
||||
- open_9: 1
|
||||
- open_10: 1
|
||||
- open_11: 0
|
||||
- open_12: 0
|
||||
- open_13: 0
|
||||
|
||||
run_tags_d:
|
||||
detect: True
|
||||
detect_level: WARNING
|
||||
rules_file:
|
||||
- rules/tagged_rules.yaml
|
||||
trace_file: trace_files/open-multiple-files.scap
|
||||
run_tags: [d]
|
||||
detect_counts:
|
||||
- open_1: 0
|
||||
- open_2: 0
|
||||
- open_3: 0
|
||||
- open_4: 0
|
||||
- open_5: 0
|
||||
- open_6: 0
|
||||
- open_7: 0
|
||||
- open_8: 0
|
||||
- open_9: 0
|
||||
- open_10: 0
|
||||
- open_11: 1
|
||||
- open_12: 0
|
||||
- open_13: 0
|
||||
|
||||
list_append_failure:
|
||||
exit_status: 1
|
||||
stderr_contains: "List my_list has 'append' key but no list by that name already exists. Exiting"
|
||||
rules_file:
|
||||
- rules/list_append_failure.yaml
|
||||
trace_file: trace_files/cat_write.scap
|
||||
|
||||
list_append:
|
||||
detect: True
|
||||
detect_level: WARNING
|
||||
rules_file:
|
||||
- rules/list_append.yaml
|
||||
trace_file: trace_files/cat_write.scap
|
||||
|
||||
list_append_false:
|
||||
detect: False
|
||||
rules_file:
|
||||
- rules/list_append_false.yaml
|
||||
trace_file: trace_files/cat_write.scap
|
||||
|
||||
macro_append_failure:
|
||||
exit_status: 1
|
||||
stderr_contains: "Macro my_macro has 'append' key but no macro by that name already exists. Exiting"
|
||||
rules_file:
|
||||
- rules/macro_append_failure.yaml
|
||||
trace_file: trace_files/cat_write.scap
|
||||
|
||||
macro_append:
|
||||
detect: True
|
||||
detect_level: WARNING
|
||||
rules_file:
|
||||
- rules/macro_append.yaml
|
||||
trace_file: trace_files/cat_write.scap
|
||||
|
||||
macro_append_false:
|
||||
detect: False
|
||||
rules_file:
|
||||
- rules/macro_append_false.yaml
|
||||
trace_file: trace_files/cat_write.scap
|
||||
|
||||
rule_append_failure:
|
||||
exit_status: 1
|
||||
stderr_contains: "Rule my_rule has 'append' key but no rule by that name already exists. Exiting"
|
||||
rules_file:
|
||||
- rules/rule_append_failure.yaml
|
||||
trace_file: trace_files/cat_write.scap
|
||||
|
||||
rule_append:
|
||||
detect: True
|
||||
detect_level: WARNING
|
||||
rules_file:
|
||||
- rules/rule_append.yaml
|
||||
trace_file: trace_files/cat_write.scap
|
||||
|
||||
rule_append_false:
|
||||
detect: False
|
||||
rules_file:
|
||||
- rules/rule_append_false.yaml
|
||||
trace_file: trace_files/cat_write.scap
|
||||
|
||||
@@ -1,147 +0,0 @@
|
||||
trace_files: !mux
|
||||
builtin_rules_no_warnings:
|
||||
detect: False
|
||||
trace_file: trace_files/empty.scap
|
||||
rules_warning: False
|
||||
|
||||
test_warnings:
|
||||
detect: False
|
||||
trace_file: trace_files/empty.scap
|
||||
rules_file: rules/falco_rules_warnings.yaml
|
||||
rules_warning:
|
||||
- no_evttype
|
||||
- evttype_not_equals
|
||||
- leading_not
|
||||
- not_equals_at_end
|
||||
- not_at_end
|
||||
- not_before_trailing_evttype
|
||||
- not_equals_before_trailing_evttype
|
||||
- not_equals_and_not
|
||||
- not_equals_before_in
|
||||
- not_before_in
|
||||
- not_in_before_in
|
||||
- leading_in_not_equals_before_evttype
|
||||
- leading_in_not_equals_at_evttype
|
||||
- not_with_evttypes
|
||||
- not_with_evttypes_addl
|
||||
- not_equals_before_evttype
|
||||
- not_equals_before_in_evttype
|
||||
- not_before_evttype
|
||||
- not_before_evttype_using_in
|
||||
rules_events:
|
||||
- no_warnings: [execve]
|
||||
- no_evttype: [all]
|
||||
- evttype_not_equals: [all]
|
||||
- leading_not: [all]
|
||||
- not_equals_after_evttype: [execve]
|
||||
- not_after_evttype: [execve]
|
||||
- leading_trailing_evttypes: [execve,open]
|
||||
- leading_multtrailing_evttypes: [connect,execve,open]
|
||||
- leading_multtrailing_evttypes_using_in: [connect,execve,open]
|
||||
- not_equals_at_end: [all]
|
||||
- not_at_end: [all]
|
||||
- not_before_trailing_evttype: [all]
|
||||
- not_equals_before_trailing_evttype: [all]
|
||||
- not_equals_and_not: [all]
|
||||
- not_equals_before_in: [all]
|
||||
- not_before_in: [all]
|
||||
- not_in_before_in: [all]
|
||||
- evttype_in: [execve,open]
|
||||
- evttype_in_plus_trailing: [connect,execve,open]
|
||||
- leading_in_not_equals_before_evttype: [all]
|
||||
- leading_in_not_equals_at_evttype: [all]
|
||||
- not_with_evttypes: [all]
|
||||
- not_with_evttypes_addl: [all]
|
||||
- not_equals_before_evttype: [all]
|
||||
- not_equals_before_in_evttype: [all]
|
||||
- not_before_evttype: [all]
|
||||
- not_before_evttype_using_in: [all]
|
||||
- repeated_evttypes: [open]
|
||||
- repeated_evttypes_with_in: [open]
|
||||
- repeated_evttypes_with_separate_in: [open]
|
||||
- repeated_evttypes_with_mix: [open]
|
||||
|
||||
rule_names_with_spaces:
|
||||
detect: True
|
||||
detect_level: WARNING
|
||||
rules_file:
|
||||
- rules/rule_names_with_spaces.yaml
|
||||
trace_file: trace_files/cat_write.scap
|
||||
|
||||
multiple_rules_first_empty:
|
||||
detect: True
|
||||
detect_level: WARNING
|
||||
rules_file:
|
||||
- rules/empty_rules.yaml
|
||||
- rules/single_rule.yaml
|
||||
trace_file: trace_files/cat_write.scap
|
||||
|
||||
multiple_rules_last_empty:
|
||||
detect: True
|
||||
detect_level: WARNING
|
||||
rules_file:
|
||||
- rules/single_rule.yaml
|
||||
- rules/empty_rules.yaml
|
||||
trace_file: trace_files/cat_write.scap
|
||||
|
||||
multiple_rules:
|
||||
detect: True
|
||||
detect_level:
|
||||
- WARNING
|
||||
- INFO
|
||||
- ERROR
|
||||
rules_file:
|
||||
- rules/single_rule.yaml
|
||||
- rules/double_rule.yaml
|
||||
trace_file: trace_files/cat_write.scap
|
||||
|
||||
invalid_rule_output:
|
||||
exit_status: 1
|
||||
stderr_contains: "Runtime error: Error loading rules:.* Invalid output format 'An open was seen %not_a_real_field': 'invalid formatting token not_a_real_field'. Exiting."
|
||||
rules_file:
|
||||
- rules/invalid_rule_output.yaml
|
||||
trace_file: trace_files/cat_write.scap
|
||||
|
||||
disabled_rules:
|
||||
detect: False
|
||||
rules_file:
|
||||
- rules/empty_rules.yaml
|
||||
- rules/single_rule.yaml
|
||||
disabled_rules:
|
||||
- open_from_cat
|
||||
trace_file: trace_files/cat_write.scap
|
||||
|
||||
disabled_rules_using_regex:
|
||||
detect: False
|
||||
rules_file:
|
||||
- rules/empty_rules.yaml
|
||||
- rules/single_rule.yaml
|
||||
disabled_rules:
|
||||
- "open.*"
|
||||
trace_file: trace_files/cat_write.scap
|
||||
|
||||
disabled_rules_using_enabled_flag:
|
||||
detect: False
|
||||
rules_file:
|
||||
- rules/single_rule_enabled_flag.yaml
|
||||
trace_file: trace_files/cat_write.scap
|
||||
|
||||
file_output:
|
||||
detect: True
|
||||
detect_level: WARNING
|
||||
rules_file:
|
||||
- rules/single_rule.yaml
|
||||
conf_file: confs/file_output.yaml
|
||||
trace_file: trace_files/cat_write.scap
|
||||
outputs:
|
||||
- /tmp/falco_outputs/file_output.txt: Warning An open was seen
|
||||
|
||||
program_output:
|
||||
detect: True
|
||||
detect_level: WARNING
|
||||
rules_file:
|
||||
- rules/single_rule.yaml
|
||||
conf_file: confs/program_output.yaml
|
||||
trace_file: trace_files/cat_write.scap
|
||||
outputs:
|
||||
- /tmp/falco_outputs/program_output.txt: Warning An open was seen
|
||||
196
test/falco_traces.yaml.in
Normal file
196
test/falco_traces.yaml.in
Normal file
@@ -0,0 +1,196 @@
|
||||
has_json_output: !mux
|
||||
yes:
|
||||
json_output: True
|
||||
no:
|
||||
json_output: False
|
||||
|
||||
traces: !mux
|
||||
change-thread-namespace:
|
||||
trace_file: traces-positive/change-thread-namespace.scap
|
||||
detect: True
|
||||
detect_level: NOTICE
|
||||
detect_counts:
|
||||
- "Change thread namespace": 2
|
||||
|
||||
container-privileged:
|
||||
trace_file: traces-positive/container-privileged.scap
|
||||
detect: True
|
||||
detect_level: INFO
|
||||
detect_counts:
|
||||
- "Launch Privileged Container": 1
|
||||
|
||||
container-sensitive-mount:
|
||||
trace_file: traces-positive/container-sensitive-mount.scap
|
||||
detect: True
|
||||
detect_level: INFO
|
||||
detect_counts:
|
||||
- "Launch Sensitive Mount Container": 1
|
||||
|
||||
create-files-below-dev:
|
||||
trace_file: traces-positive/create-files-below-dev.scap
|
||||
detect: True
|
||||
detect_level: ERROR
|
||||
detect_counts:
|
||||
- "Create files below dev": 1
|
||||
|
||||
db-program-spawned-process:
|
||||
trace_file: traces-positive/db-program-spawned-process.scap
|
||||
detect: True
|
||||
detect_level: NOTICE
|
||||
detect_counts:
|
||||
- "DB program spawned process": 1
|
||||
|
||||
falco-event-generator:
|
||||
trace_file: traces-positive/falco-event-generator.scap
|
||||
detect: True
|
||||
detect_level: [ERROR, WARNING, INFO, NOTICE, DEBUG]
|
||||
detect_counts:
|
||||
- "Write below binary dir": 1
|
||||
- "Read sensitive file untrusted": 3
|
||||
- "Run shell untrusted": 1
|
||||
- "Write below rpm database": 1
|
||||
- "Write below etc": 1
|
||||
- "System procs network activity": 1
|
||||
- "Mkdir binary dirs": 1
|
||||
- "System user interactive": 1
|
||||
- "DB program spawned process": 1
|
||||
- "Non sudo setuid": 1
|
||||
- "Create files below dev": 1
|
||||
- "Modify binary dirs": 2
|
||||
- "Change thread namespace": 2
|
||||
|
||||
installer-fbash-manages-service:
|
||||
trace_file: traces-info/installer-fbash-manages-service.scap
|
||||
detect: True
|
||||
detect_level: INFO
|
||||
detect_counts:
|
||||
- "Installer bash manages service": 4
|
||||
|
||||
installer-bash-non-https-connection:
|
||||
trace_file: traces-positive/installer-bash-non-https-connection.scap
|
||||
detect: True
|
||||
detect_level: NOTICE
|
||||
detect_counts:
|
||||
- "Installer bash non https connection": 1
|
||||
|
||||
installer-fbash-runs-pkgmgmt:
|
||||
trace_file: traces-info/installer-fbash-runs-pkgmgmt.scap
|
||||
detect: True
|
||||
detect_level: [NOTICE, INFO]
|
||||
detect_counts:
|
||||
- "Installer bash runs pkgmgmt program": 4
|
||||
- "Installer bash non https connection": 4
|
||||
|
||||
installer-bash-starts-network-server:
|
||||
trace_file: traces-positive/installer-bash-starts-network-server.scap
|
||||
detect: True
|
||||
detect_level: NOTICE
|
||||
detect_counts:
|
||||
- "Installer bash starts network server": 2
|
||||
- "Installer bash non https connection": 3
|
||||
|
||||
installer-bash-starts-session:
|
||||
trace_file: traces-positive/installer-bash-starts-session.scap
|
||||
detect: True
|
||||
detect_level: NOTICE
|
||||
detect_counts:
|
||||
- "Installer bash starts session": 1
|
||||
- "Installer bash non https connection": 3
|
||||
|
||||
mkdir-binary-dirs:
|
||||
trace_file: traces-positive/mkdir-binary-dirs.scap
|
||||
detect: True
|
||||
detect_level: ERROR
|
||||
detect_counts:
|
||||
- "Mkdir binary dirs": 1
|
||||
|
||||
modify-binary-dirs:
|
||||
trace_file: traces-positive/modify-binary-dirs.scap
|
||||
detect: True
|
||||
detect_level: ERROR
|
||||
detect_counts:
|
||||
- "Modify binary dirs": 1
|
||||
|
||||
modify-package-repo-list-installer:
|
||||
trace_file: traces-info/modify-package-repo-list-installer.scap
|
||||
detect: True
|
||||
detect_level: INFO
|
||||
detect_counts:
|
||||
- "Write below etc in installer": 1
|
||||
|
||||
non-sudo-setuid:
|
||||
trace_file: traces-positive/non-sudo-setuid.scap
|
||||
detect: True
|
||||
detect_level: NOTICE
|
||||
detect_counts:
|
||||
- "Non sudo setuid": 1
|
||||
|
||||
read-sensitive-file-after-startup:
|
||||
trace_file: traces-positive/read-sensitive-file-after-startup.scap
|
||||
detect: True
|
||||
detect_level: WARNING
|
||||
detect_counts:
|
||||
- "Read sensitive file untrusted": 1
|
||||
|
||||
read-sensitive-file-untrusted:
|
||||
trace_file: traces-positive/read-sensitive-file-untrusted.scap
|
||||
detect: True
|
||||
detect_level: WARNING
|
||||
detect_counts:
|
||||
- "Read sensitive file untrusted": 1
|
||||
|
||||
run-shell-untrusted:
|
||||
trace_file: traces-positive/run-shell-untrusted.scap
|
||||
detect: True
|
||||
detect_level: DEBUG
|
||||
detect_counts:
|
||||
- "Run shell untrusted": 1
|
||||
|
||||
system-binaries-network-activity:
|
||||
trace_file: traces-positive/system-binaries-network-activity.scap
|
||||
detect: True
|
||||
detect_level: NOTICE
|
||||
detect_counts:
|
||||
- "System procs network activity": 1
|
||||
|
||||
system-user-interactive:
|
||||
trace_file: traces-positive/system-user-interactive.scap
|
||||
detect: True
|
||||
detect_level: INFO
|
||||
detect_counts:
|
||||
- "System user interactive": 1
|
||||
|
||||
user-mgmt-binaries:
|
||||
trace_file: traces-positive/user-mgmt-binaries.scap
|
||||
detect: True
|
||||
detect_level: NOTICE
|
||||
detect_counts:
|
||||
- "User mgmt binaries": 1
|
||||
|
||||
write-binary-dir:
|
||||
trace_file: traces-positive/write-binary-dir.scap
|
||||
detect: True
|
||||
detect_level: ERROR
|
||||
detect_counts:
|
||||
- "Write below binary dir": 4
|
||||
|
||||
write-etc:
|
||||
trace_file: traces-positive/write-etc.scap
|
||||
detect: True
|
||||
detect_level: ERROR
|
||||
detect_counts:
|
||||
- "Write below etc": 1
|
||||
|
||||
write-etc-installer:
|
||||
trace_file: traces-info/write-etc-installer.scap
|
||||
detect: True
|
||||
detect_level: INFO
|
||||
detect_counts:
|
||||
- "Write below etc in installer": 1
|
||||
|
||||
write-rpm-database:
|
||||
trace_file: traces-positive/write-rpm-database.scap
|
||||
detect: True
|
||||
detect_level: ERROR
|
||||
detect_counts:
|
||||
- "Write below rpm database": 1
|
||||
@@ -13,23 +13,35 @@ if (substr(script.basename, 1, 1) != '/') {
|
||||
|
||||
results = paste(script.basename, "results.json", sep='/')
|
||||
output = "./output.png"
|
||||
metric = "cpu"
|
||||
|
||||
GetoptLong(
|
||||
"results=s", "Path to results file",
|
||||
"benchmark=s", "Benchmark from results file to graph",
|
||||
"variant=s@", "Variant(s) to include in graph. Can be specified multiple times",
|
||||
"output=s", "Output graph file"
|
||||
"output=s", "Output graph file",
|
||||
"metric=s", "Metric to graph. Can be one of (cpu|drops)"
|
||||
)
|
||||
|
||||
if (metric == "cpu") {
|
||||
data_metric="cpu_usage"
|
||||
yaxis_label="CPU Usage (%)"
|
||||
title="Falco/Sysdig/Multimatch CPU Usage: %s"
|
||||
} else if (metric == "drops") {
|
||||
data_metric="drop_pct"
|
||||
yaxis_label="Event Drops (%)"
|
||||
title="Falco/Sysdig/Multimatch Event Drops: %s"
|
||||
}
|
||||
|
||||
res <- fromJSON(results, flatten=TRUE)
|
||||
|
||||
res2 = res[res$benchmark == benchmark & res$variant %in% variant,]
|
||||
|
||||
plot <- ggplot(data=res2, aes(x=sample, y=cpu_usage, group=variant, colour=variant)) +
|
||||
plot <- ggplot(data=res2, aes(x=sample, y=get(data_metric), group=variant, colour=variant)) +
|
||||
geom_line() +
|
||||
ylab("CPU Usage (%)") +
|
||||
ylab(yaxis_label) +
|
||||
xlab("Time") +
|
||||
ggtitle(sprintf("Falco/Sysdig CPU Usage: %s", benchmark))
|
||||
ggtitle(sprintf(title, benchmark))
|
||||
theme(legend.position=c(.2, .88));
|
||||
|
||||
print(paste("Writing graph to", output, sep=" "))
|
||||
|
||||
12
test/rules/list_append.yaml
Normal file
12
test/rules/list_append.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
- list: my_list
|
||||
items: [not-cat]
|
||||
|
||||
- list: my_list
|
||||
append: true
|
||||
items: [cat]
|
||||
|
||||
- rule: Open From Cat
|
||||
desc: A process named cat does an open
|
||||
condition: evt.type=open and proc.name in (my_list)
|
||||
output: "An open was seen (command=%proc.cmdline)"
|
||||
priority: WARNING
|
||||
3
test/rules/list_append_failure.yaml
Normal file
3
test/rules/list_append_failure.yaml
Normal file
@@ -0,0 +1,3 @@
|
||||
- list: my_list
|
||||
items: [not-cat]
|
||||
append: true
|
||||
12
test/rules/list_append_false.yaml
Normal file
12
test/rules/list_append_false.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
- list: my_list
|
||||
items: [cat]
|
||||
|
||||
- list: my_list
|
||||
append: false
|
||||
items: [not-cat]
|
||||
|
||||
- rule: Open From Cat
|
||||
desc: A process named cat does an open
|
||||
condition: evt.type=open and proc.name in (my_list)
|
||||
output: "An open was seen (command=%proc.cmdline)"
|
||||
priority: WARNING
|
||||
14
test/rules/list_order.yaml
Normal file
14
test/rules/list_order.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
- list: cat_binaries
|
||||
items: [not_cat]
|
||||
|
||||
- list: cat_binaries
|
||||
items: [cat]
|
||||
|
||||
- macro: is_cat
|
||||
condition: proc.name in (cat_binaries)
|
||||
|
||||
- rule: open_from_cat
|
||||
desc: A process named cat does an open
|
||||
condition: evt.type=open and is_cat
|
||||
output: "An open was seen (command=%proc.cmdline)"
|
||||
priority: WARNING
|
||||
11
test/rules/list_sub_bare.yaml
Normal file
11
test/rules/list_sub_bare.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
- list: cat_binaries
|
||||
items: [cat]
|
||||
|
||||
- macro: is_cat
|
||||
condition: proc.name=cat_binaries
|
||||
|
||||
- rule: open_from_cat
|
||||
desc: A process named cat does an open
|
||||
condition: evt.type=open and is_cat
|
||||
output: "An open was seen (command=%proc.cmdline)"
|
||||
priority: WARNING
|
||||
11
test/rules/list_sub_end.yaml
Normal file
11
test/rules/list_sub_end.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
- list: cat_binaries
|
||||
items: [cat]
|
||||
|
||||
- macro: is_cat
|
||||
condition: proc.name in (ls, cat_binaries)
|
||||
|
||||
- rule: open_from_cat
|
||||
desc: A process named cat does an open
|
||||
condition: evt.type=open and is_cat
|
||||
output: "An open was seen (command=%proc.cmdline)"
|
||||
priority: WARNING
|
||||
11
test/rules/list_sub_front.yaml
Normal file
11
test/rules/list_sub_front.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
- list: cat_binaries
|
||||
items: [cat]
|
||||
|
||||
- macro: is_cat
|
||||
condition: proc.name in (cat_binaries, ps)
|
||||
|
||||
- rule: open_from_cat
|
||||
desc: A process named cat does an open
|
||||
condition: evt.type=open and is_cat
|
||||
output: "An open was seen (command=%proc.cmdline)"
|
||||
priority: WARNING
|
||||
11
test/rules/list_sub_mid.yaml
Normal file
11
test/rules/list_sub_mid.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
- list: cat_binaries
|
||||
items: [cat]
|
||||
|
||||
- macro: is_cat
|
||||
condition: proc.name in (ls, cat_binaries, ps)
|
||||
|
||||
- rule: open_from_cat
|
||||
desc: A process named cat does an open
|
||||
condition: evt.type=open and is_cat
|
||||
output: "An open was seen (command=%proc.cmdline)"
|
||||
priority: WARNING
|
||||
11
test/rules/list_sub_whitespace.yaml
Normal file
11
test/rules/list_sub_whitespace.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
- list: cat_binaries
|
||||
items: [cat]
|
||||
|
||||
- macro: is_cat
|
||||
condition: proc.name= cat_binaries or proc.name=nopey
|
||||
|
||||
- rule: open_from_cat
|
||||
desc: A process named cat does an open
|
||||
condition: evt.type=open and is_cat
|
||||
output: "An open was seen (command=%proc.cmdline)"
|
||||
priority: WARNING
|
||||
8
test/rules/list_substring.yaml
Normal file
8
test/rules/list_substring.yaml
Normal file
@@ -0,0 +1,8 @@
|
||||
- list: my_list
|
||||
items: ['"one string"']
|
||||
|
||||
- rule: my_rule
|
||||
desc: my description
|
||||
condition: evt.type=open and fd.name in (file_my_list)
|
||||
output: my output
|
||||
priority: INFO
|
||||
12
test/rules/macro_append.yaml
Normal file
12
test/rules/macro_append.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
- macro: my_macro
|
||||
condition: proc.name=not-cat
|
||||
|
||||
- macro: my_macro
|
||||
append: true
|
||||
condition: or proc.name=cat
|
||||
|
||||
- rule: Open From Cat
|
||||
desc: A process named cat does an open
|
||||
condition: evt.type=open and my_macro
|
||||
output: "An open was seen (command=%proc.cmdline)"
|
||||
priority: WARNING
|
||||
3
test/rules/macro_append_failure.yaml
Normal file
3
test/rules/macro_append_failure.yaml
Normal file
@@ -0,0 +1,3 @@
|
||||
- macro: my_macro
|
||||
condition: proc.name=not-cat
|
||||
append: true
|
||||
12
test/rules/macro_append_false.yaml
Normal file
12
test/rules/macro_append_false.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
- macro: my_macro
|
||||
condition: proc.name=cat
|
||||
|
||||
- macro: my_macro
|
||||
append: false
|
||||
condition: proc.name=not-cat
|
||||
|
||||
- rule: Open From Cat
|
||||
desc: A process named cat does an open
|
||||
condition: evt.type=open and my_macro
|
||||
output: "An open was seen (command=%proc.cmdline)"
|
||||
priority: WARNING
|
||||
14
test/rules/macro_order.yaml
Normal file
14
test/rules/macro_order.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
- list: cat_binaries
|
||||
items: [cat]
|
||||
|
||||
- macro: is_cat
|
||||
condition: proc.name in (not_cat)
|
||||
|
||||
- macro: is_cat
|
||||
condition: proc.name in (cat_binaries)
|
||||
|
||||
- rule: open_from_cat
|
||||
desc: A process named cat does an open
|
||||
condition: evt.type=open and is_cat
|
||||
output: "An open was seen (command=%proc.cmdline)"
|
||||
priority: WARNING
|
||||
5
test/rules/null_output_field.yaml
Normal file
5
test/rules/null_output_field.yaml
Normal file
@@ -0,0 +1,5 @@
|
||||
- rule: open_from_cat
|
||||
desc: A process named cat does an open
|
||||
condition: evt.type=open and proc.name=cat
|
||||
output: "An open was seen (cport=%fd.cport command=%proc.cmdline)"
|
||||
priority: WARNING
|
||||
2
test/rules/override_list.yaml
Normal file
2
test/rules/override_list.yaml
Normal file
@@ -0,0 +1,2 @@
|
||||
- list: cat_capable_binaries
|
||||
items: [not-cat]
|
||||
2
test/rules/override_macro.yaml
Normal file
2
test/rules/override_macro.yaml
Normal file
@@ -0,0 +1,2 @@
|
||||
- macro: is_cat
|
||||
condition: proc.name in (not-cat)
|
||||
2
test/rules/override_nested_list.yaml
Normal file
2
test/rules/override_nested_list.yaml
Normal file
@@ -0,0 +1,2 @@
|
||||
- list: cat_binaries
|
||||
items: [not-cat]
|
||||
5
test/rules/override_rule.yaml
Normal file
5
test/rules/override_rule.yaml
Normal file
@@ -0,0 +1,5 @@
|
||||
- rule: open_from_cat
|
||||
desc: A process named cat does an open
|
||||
condition: evt.type=open and proc.name=not-cat
|
||||
output: "An open was seen (command=%proc.cmdline)"
|
||||
priority: WARNING
|
||||
9
test/rules/rule_append.yaml
Normal file
9
test/rules/rule_append.yaml
Normal file
@@ -0,0 +1,9 @@
|
||||
- rule: my_rule
|
||||
desc: A process named cat does an open
|
||||
condition: evt.type=open and fd.name=not-a-real-file
|
||||
output: "An open of /dev/null was seen (command=%proc.cmdline)"
|
||||
priority: WARNING
|
||||
|
||||
- rule: my_rule
|
||||
append: true
|
||||
condition: or fd.name=/dev/null
|
||||
3
test/rules/rule_append_failure.yaml
Normal file
3
test/rules/rule_append_failure.yaml
Normal file
@@ -0,0 +1,3 @@
|
||||
- rule: my_rule
|
||||
condition: evt.type=open
|
||||
append: true
|
||||
9
test/rules/rule_append_false.yaml
Normal file
9
test/rules/rule_append_false.yaml
Normal file
@@ -0,0 +1,9 @@
|
||||
- rule: my_rule
|
||||
desc: A process named cat does an open
|
||||
condition: evt.type=open and fd.name=/dev/null
|
||||
output: "An open of /dev/null was seen (command=%proc.cmdline)"
|
||||
priority: WARNING
|
||||
|
||||
- rule: my_rule
|
||||
append: true
|
||||
condition: and fd.name=not-a-real-file
|
||||
17
test/rules/rule_order.yaml
Normal file
17
test/rules/rule_order.yaml
Normal file
@@ -0,0 +1,17 @@
|
||||
- list: cat_binaries
|
||||
items: [cat]
|
||||
|
||||
- macro: is_cat
|
||||
condition: proc.name in (cat_binaries)
|
||||
|
||||
- rule: open_from_cat
|
||||
desc: A process named cat does an open
|
||||
condition: evt.type=open and proc.name=not_cat
|
||||
output: "An open was seen (command=%proc.cmdline)"
|
||||
priority: WARNING
|
||||
|
||||
- rule: open_from_cat
|
||||
desc: A process named cat does an open
|
||||
condition: evt.type=open and is_cat
|
||||
output: "An open was seen (command=%proc.cmdline)"
|
||||
priority: WARNING
|
||||
@@ -1,5 +1,11 @@
|
||||
- list: cat_binaries
|
||||
items: [cat]
|
||||
|
||||
- list: cat_capable_binaries
|
||||
items: [cat_binaries]
|
||||
|
||||
- macro: is_cat
|
||||
condition: proc.name=cat
|
||||
condition: proc.name in (cat_capable_binaries)
|
||||
|
||||
- rule: open_from_cat
|
||||
desc: A process named cat does an open
|
||||
|
||||
93
test/rules/tagged_rules.yaml
Normal file
93
test/rules/tagged_rules.yaml
Normal file
@@ -0,0 +1,93 @@
|
||||
- macro: open_read
|
||||
condition: (evt.type=open or evt.type=openat) and evt.is_open_read=true and fd.typechar='f'
|
||||
|
||||
- rule: open_1
|
||||
desc: open one
|
||||
condition: open_read and fd.name=/tmp/file-1
|
||||
output: Open one (file=%fd.name)
|
||||
priority: WARNING
|
||||
tags: [a]
|
||||
|
||||
- rule: open_2
|
||||
desc: open two
|
||||
condition: open_read and fd.name=/tmp/file-2
|
||||
output: Open two (file=%fd.name)
|
||||
priority: WARNING
|
||||
tags: [b]
|
||||
|
||||
- rule: open_3
|
||||
desc: open three
|
||||
condition: open_read and fd.name=/tmp/file-3
|
||||
output: Open three (file=%fd.name)
|
||||
priority: WARNING
|
||||
tags: [c]
|
||||
|
||||
- rule: open_4
|
||||
desc: open four
|
||||
condition: open_read and fd.name=/tmp/file-4
|
||||
output: Open four (file=%fd.name)
|
||||
priority: WARNING
|
||||
tags: [a, b]
|
||||
|
||||
- rule: open_5
|
||||
desc: open file
|
||||
condition: open_read and fd.name=/tmp/file-5
|
||||
output: Open file (file=%fd.name)
|
||||
priority: WARNING
|
||||
tags: [a, c]
|
||||
|
||||
- rule: open_6
|
||||
desc: open six
|
||||
condition: open_read and fd.name=/tmp/file-6
|
||||
output: Open six (file=%fd.name)
|
||||
priority: WARNING
|
||||
tags: [b, c]
|
||||
|
||||
- rule: open_7
|
||||
desc: open seven
|
||||
condition: open_read and fd.name=/tmp/file-7
|
||||
output: Open seven (file=%fd.name)
|
||||
priority: WARNING
|
||||
tags: [a, b, c]
|
||||
|
||||
- rule: open_8
|
||||
desc: open eight
|
||||
condition: open_read and fd.name=/tmp/file-8
|
||||
output: Open eight (file=%fd.name)
|
||||
priority: WARNING
|
||||
tags: [b, a]
|
||||
|
||||
- rule: open_9
|
||||
desc: open nine
|
||||
condition: open_read and fd.name=/tmp/file-9
|
||||
output: Open nine (file=%fd.name)
|
||||
priority: WARNING
|
||||
tags: [c, a]
|
||||
|
||||
- rule: open_10
|
||||
desc: open ten
|
||||
condition: open_read and fd.name=/tmp/file-10
|
||||
output: Open ten (file=%fd.name)
|
||||
priority: WARNING
|
||||
tags: [b, c, a]
|
||||
|
||||
- rule: open_11
|
||||
desc: open eleven
|
||||
condition: open_read and fd.name=/tmp/file-11
|
||||
output: Open eleven (file=%fd.name)
|
||||
priority: WARNING
|
||||
tags: [d]
|
||||
|
||||
- rule: open_12
|
||||
desc: open twelve
|
||||
condition: open_read and fd.name=/tmp/file-12
|
||||
output: Open twelve (file=%fd.name)
|
||||
priority: WARNING
|
||||
tags: []
|
||||
|
||||
- rule: open_13
|
||||
desc: open thirteen
|
||||
condition: open_read and fd.name=/tmp/file-13
|
||||
output: Open thirteen (file=%fd.name)
|
||||
priority: WARNING
|
||||
|
||||
@@ -143,13 +143,13 @@ function start_subject_prog() {
|
||||
if [ -z $RULES_FILE ]; then
|
||||
RULES_FILE=$SOURCE/../output/rules.yaml
|
||||
fi
|
||||
sudo $ROOT/test_mm -s $SOURCE/search_order.yaml -r $RULES_FILE > ./prog-output.txt 2>&1 &
|
||||
sudo FALCO_STATS_EXTRA_variant=$VARIANT FALCO_STATS_EXTRA_benchmark=$live_test $ROOT/test_mm -S $SOURCE/search_order.yaml -s $STATS_FILE -r $RULES_FILE > ./prog-output.txt 2>&1 &
|
||||
elif [[ $ROOT == *"falco"* ]]; then
|
||||
echo " starting falco..."
|
||||
if [ -z $RULES_FILE ]; then
|
||||
RULES_FILE=$SOURCE/rules/falco_rules.yaml
|
||||
fi
|
||||
sudo $ROOT/userspace/falco/falco -c $SOURCE/falco.yaml -r $RULES_FILE --option=stdout_output.enabled=false > ./prog-output.txt -A 2>&1 &
|
||||
sudo FALCO_STATS_EXTRA_variant=$VARIANT FALCO_STATS_EXTRA_benchmark=$live_test $ROOT/userspace/falco/falco -c $SOURCE/falco.yaml -s $STATS_FILE -r $RULES_FILE --option=stdout_output.enabled=false > ./prog-output.txt -A 2>&1 &
|
||||
elif [[ $ROOT == *"sysdig"* ]]; then
|
||||
echo " starting sysdig..."
|
||||
sudo $ROOT/userspace/sysdig/sysdig -N -z evt.type=none &
|
||||
@@ -323,6 +323,7 @@ usage() {
|
||||
echo " -r/--root: root directory containing falco/sysdig binaries (i.e. where you ran 'cmake')"
|
||||
echo " -s/--source: root directory containing falco/sysdig source code"
|
||||
echo " -R/--results: append test results to this file"
|
||||
echo " -S/--stats: append capture statistics to this file (only works for falco/test_mm)"
|
||||
echo " -o/--output: append program output to this file"
|
||||
echo " -U/--rules: path to rules file (only applicable for falco/test_mm)"
|
||||
echo " -t/--test: test to run. Argument has the following format:"
|
||||
@@ -342,7 +343,7 @@ usage() {
|
||||
echo " -F/--falco-agent: When running an agent, whether or not to enable falco"
|
||||
}
|
||||
|
||||
OPTS=`getopt -o hv:r:s:R:o:U:t:T: --long help,variant:,root:,source:,results:,output:,rules:,test:,tracedir:,agent-autodrop:,falco-agent: -n $0 -- "$@"`
|
||||
OPTS=`getopt -o hv:r:s:R:S:o:U:t:T: --long help,variant:,root:,source:,results:,stats:,output:,rules:,test:,tracedir:,agent-autodrop:,falco-agent: -n $0 -- "$@"`
|
||||
|
||||
if [ $? != 0 ]; then
|
||||
echo "Exiting" >&2
|
||||
@@ -356,6 +357,7 @@ ROOT=`dirname $0`/../build
|
||||
SOURCE=$ROOT
|
||||
SCRIPTDIR=`dirname $0`
|
||||
RESULTS_FILE=`dirname $0`/results.json
|
||||
STATS_FILE=`dirname $0`/capture_stats.json
|
||||
OUTPUT_FILE=`dirname $0`/program-output.txt
|
||||
RULES_FILE=
|
||||
TEST=trace:all
|
||||
@@ -371,6 +373,7 @@ while true; do
|
||||
-r | --root ) ROOT="$2"; shift 2;;
|
||||
-s | --source ) SOURCE="$2"; shift 2;;
|
||||
-R | --results ) RESULTS_FILE="$2"; shift 2;;
|
||||
-S | --stats ) STATS_FILE="$2"; shift 2;;
|
||||
-o | --output ) OUTPUT_FILE="$2"; shift 2;;
|
||||
-U | --rules ) RULES_FILE="$2"; shift 2;;
|
||||
-t | --test ) TEST="$2"; shift 2;;
|
||||
@@ -405,6 +408,11 @@ if [ -z $RESULTS_FILE ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z $STATS_FILE ]; then
|
||||
echo "An output file for capture statistics must be provided. Not continuing."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z $OUTPUT_FILE ]; then
|
||||
echo "An file for program output must be provided. Not continuing."
|
||||
exit 1
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
|
||||
SCRIPT=$(readlink -f $0)
|
||||
SCRIPTDIR=$(dirname $SCRIPT)
|
||||
MULT_FILE=$SCRIPTDIR/falco_tests.yaml
|
||||
BRANCH=$1
|
||||
|
||||
function download_trace_files() {
|
||||
@@ -19,56 +18,59 @@ function prepare_multiplex_fileset() {
|
||||
|
||||
dir=$1
|
||||
detect=$2
|
||||
detect_level=$3
|
||||
json_output=$4
|
||||
|
||||
for trace in $SCRIPTDIR/$dir/*.scap ; do
|
||||
[ -e "$trace" ] || continue
|
||||
NAME=`basename $trace .scap`
|
||||
cat << EOF >> $MULT_FILE
|
||||
$NAME-detect-$detect-json-$json_output:
|
||||
|
||||
# falco_traces.yaml might already have an entry for this trace
|
||||
# file, with specific detection levels and counts. If so, skip
|
||||
# it. Otherwise, add a generic entry showing whether or not to
|
||||
# detect anything.
|
||||
grep -q "$NAME:" $SCRIPTDIR/falco_traces.yaml && continue
|
||||
|
||||
cat << EOF >> $SCRIPTDIR/falco_traces.yaml
|
||||
$NAME:
|
||||
detect: $detect
|
||||
detect_level: $detect_level
|
||||
detect_level: WARNING
|
||||
trace_file: $trace
|
||||
json_output: $json_output
|
||||
EOF
|
||||
done
|
||||
}
|
||||
|
||||
function prepare_multiplex_file() {
|
||||
cp $SCRIPTDIR/falco_tests.yaml.in $MULT_FILE
|
||||
cp $SCRIPTDIR/falco_traces.yaml.in $SCRIPTDIR/falco_traces.yaml
|
||||
|
||||
prepare_multiplex_fileset traces-positive True WARNING False
|
||||
prepare_multiplex_fileset traces-negative False WARNING True
|
||||
prepare_multiplex_fileset traces-info True INFO False
|
||||
prepare_multiplex_fileset traces-positive True
|
||||
prepare_multiplex_fileset traces-negative False
|
||||
prepare_multiplex_fileset traces-info True
|
||||
|
||||
prepare_multiplex_fileset traces-positive True WARNING True
|
||||
prepare_multiplex_fileset traces-info True INFO True
|
||||
|
||||
echo "Contents of $MULT_FILE:"
|
||||
cat $MULT_FILE
|
||||
echo "Contents of $SCRIPTDIR/falco_traces.yaml:"
|
||||
cat $SCRIPTDIR/falco_traces.yaml
|
||||
}
|
||||
|
||||
function run_tests() {
|
||||
rm -rf /tmp/falco_outputs
|
||||
mkdir /tmp/falco_outputs
|
||||
CMD="avocado run --multiplex $MULT_FILE --job-results-dir $SCRIPTDIR/job-results -- $SCRIPTDIR/falco_test.py"
|
||||
echo "Running: $CMD"
|
||||
$CMD
|
||||
TEST_RC=$?
|
||||
}
|
||||
|
||||
|
||||
function print_test_failure_details() {
|
||||
echo "Showing full job logs for any tests that failed:"
|
||||
jq '.tests[] | select(.status != "PASS") | .logfile' $SCRIPTDIR/job-results/latest/results.json | xargs cat
|
||||
}
|
||||
|
||||
function run_tests() {
|
||||
rm -rf /tmp/falco_outputs
|
||||
mkdir /tmp/falco_outputs
|
||||
TEST_RC=0
|
||||
for mult in $SCRIPTDIR/falco_traces.yaml $SCRIPTDIR/falco_tests.yaml; do
|
||||
CMD="avocado run --multiplex $mult --job-results-dir $SCRIPTDIR/job-results -- $SCRIPTDIR/falco_test.py"
|
||||
echo "Running: $CMD"
|
||||
$CMD
|
||||
RC=$?
|
||||
TEST_RC=$((TEST_RC+$RC))
|
||||
if [ $RC -ne 0 ]; then
|
||||
print_test_failure_details
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
download_trace_files
|
||||
prepare_multiplex_file
|
||||
run_tests
|
||||
if [ $TEST_RC -ne 0 ]; then
|
||||
print_test_failure_details
|
||||
fi
|
||||
|
||||
exit $TEST_RC
|
||||
|
||||
BIN
test/trace_files/open-multiple-files.scap
Normal file
BIN
test/trace_files/open-multiple-files.scap
Normal file
Binary file not shown.
@@ -4,7 +4,7 @@ include_directories("${PROJECT_SOURCE_DIR}/../sysdig/userspace/libsinsp")
|
||||
include_directories("${PROJECT_BINARY_DIR}/userspace/engine")
|
||||
include_directories("${LUAJIT_INCLUDE}")
|
||||
|
||||
add_library(falco_engine STATIC rules.cpp falco_common.cpp falco_engine.cpp formats.cpp)
|
||||
add_library(falco_engine STATIC rules.cpp falco_common.cpp falco_engine.cpp token_bucket.cpp formats.cpp)
|
||||
|
||||
target_include_directories(falco_engine PUBLIC
|
||||
"${LUAJIT_INCLUDE}")
|
||||
@@ -27,5 +27,3 @@ install(DIRECTORY lua
|
||||
DESTINATION "${FALCO_SHARE_DIR}"
|
||||
FILES_MATCHING PATTERN *.lua)
|
||||
endif()
|
||||
|
||||
add_subdirectory("${PROJECT_SOURCE_DIR}/../falco/rules" "${PROJECT_BINARY_DIR}/rules")
|
||||
|
||||
@@ -18,5 +18,5 @@ along with falco. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
#pragma once
|
||||
|
||||
#define FALCO_ENGINE_LUA_DIR "${FALCO_SHARE_DIR}/lua/"
|
||||
#define FALCO_ENGINE_LUA_DIR "${FALCO_ABSOLUTE_SHARE_DIR}/lua/"
|
||||
#define FALCO_ENGINE_SOURCE_LUA_DIR "${PROJECT_SOURCE_DIR}/../falco/userspace/engine/lua/"
|
||||
|
||||
@@ -21,9 +21,23 @@ along with falco. If not, see <http://www.gnu.org/licenses/>.
|
||||
#include "config_falco_engine.h"
|
||||
#include "falco_common.h"
|
||||
|
||||
std::vector<std::string> falco_common::priority_names = {
|
||||
"Emergency",
|
||||
"Alert",
|
||||
"Critical",
|
||||
"Error",
|
||||
"Warning",
|
||||
"Notice",
|
||||
"Informational",
|
||||
"Debug"};
|
||||
|
||||
falco_common::falco_common()
|
||||
{
|
||||
m_ls = lua_open();
|
||||
if(!m_ls)
|
||||
{
|
||||
throw falco_exception("Cannot open lua");
|
||||
}
|
||||
luaL_openlibs(m_ls);
|
||||
}
|
||||
|
||||
|
||||
@@ -74,6 +74,22 @@ public:
|
||||
|
||||
void set_inspector(sinsp *inspector);
|
||||
|
||||
// Priority levels, as a vector of strings
|
||||
static std::vector<std::string> priority_names;
|
||||
|
||||
// Same as numbers/indices into the above vector
|
||||
enum priority_type
|
||||
{
|
||||
PRIORITY_EMERGENCY = 0,
|
||||
PRIORITY_ALERT = 1,
|
||||
PRIORITY_CRITICAL = 2,
|
||||
PRIORITY_ERROR = 3,
|
||||
PRIORITY_WARNING = 4,
|
||||
PRIORITY_NOTICE = 5,
|
||||
PRIORITY_INFORMATIONAL = 6,
|
||||
PRIORITY_DEBUG = 7
|
||||
};
|
||||
|
||||
protected:
|
||||
lua_State *m_ls;
|
||||
|
||||
|
||||
@@ -40,7 +40,9 @@ string lua_print_stats = "print_stats";
|
||||
using namespace std;
|
||||
|
||||
falco_engine::falco_engine(bool seed_rng)
|
||||
: m_rules(NULL), m_sampling_ratio(1), m_sampling_multiplier(0),
|
||||
: m_rules(NULL), m_next_ruleset_id(0),
|
||||
m_min_priority(falco_common::PRIORITY_DEBUG),
|
||||
m_sampling_ratio(1), m_sampling_multiplier(0),
|
||||
m_replace_container_info(false)
|
||||
{
|
||||
luaopen_lpeg(m_ls);
|
||||
@@ -49,10 +51,14 @@ falco_engine::falco_engine(bool seed_rng)
|
||||
falco_common::init(m_lua_main_filename.c_str(), FALCO_ENGINE_SOURCE_LUA_DIR);
|
||||
falco_rules::init(m_ls);
|
||||
|
||||
m_evttype_filter.reset(new sinsp_evttype_filter());
|
||||
|
||||
if(seed_rng)
|
||||
{
|
||||
srandom((unsigned) getpid());
|
||||
}
|
||||
|
||||
m_default_ruleset_id = find_ruleset_id(m_default_ruleset);
|
||||
}
|
||||
|
||||
falco_engine::~falco_engine()
|
||||
@@ -84,7 +90,7 @@ void falco_engine::load_rules(const string &rules_content, bool verbose, bool al
|
||||
bool json_output = false;
|
||||
falco_formats::init(m_inspector, m_ls, json_output);
|
||||
|
||||
m_rules->load_rules(rules_content, verbose, all_events, m_extra, m_replace_container_info);
|
||||
m_rules->load_rules(rules_content, verbose, all_events, m_extra, m_replace_container_info, m_min_priority);
|
||||
}
|
||||
|
||||
void falco_engine::load_rules_file(const string &rules_filename, bool verbose, bool all_events)
|
||||
@@ -105,20 +111,64 @@ void falco_engine::load_rules_file(const string &rules_filename, bool verbose, b
|
||||
load_rules(rules_content, verbose, all_events);
|
||||
}
|
||||
|
||||
void falco_engine::enable_rule(string &pattern, bool enabled)
|
||||
void falco_engine::enable_rule(const string &pattern, bool enabled, const string &ruleset)
|
||||
{
|
||||
m_evttype_filter.enable(pattern, enabled);
|
||||
uint16_t ruleset_id = find_ruleset_id(ruleset);
|
||||
|
||||
m_evttype_filter->enable(pattern, enabled, ruleset_id);
|
||||
}
|
||||
|
||||
unique_ptr<falco_engine::rule_result> falco_engine::process_event(sinsp_evt *ev)
|
||||
void falco_engine::enable_rule(const string &pattern, bool enabled)
|
||||
{
|
||||
enable_rule(pattern, enabled, m_default_ruleset);
|
||||
}
|
||||
|
||||
void falco_engine::enable_rule_by_tag(const set<string> &tags, bool enabled, const string &ruleset)
|
||||
{
|
||||
uint16_t ruleset_id = find_ruleset_id(ruleset);
|
||||
|
||||
m_evttype_filter->enable_tags(tags, enabled, ruleset_id);
|
||||
}
|
||||
|
||||
void falco_engine::enable_rule_by_tag(const set<string> &tags, bool enabled)
|
||||
{
|
||||
enable_rule_by_tag(tags, enabled, m_default_ruleset);
|
||||
}
|
||||
|
||||
void falco_engine::set_min_priority(falco_common::priority_type priority)
|
||||
{
|
||||
m_min_priority = priority;
|
||||
}
|
||||
|
||||
uint16_t falco_engine::find_ruleset_id(const std::string &ruleset)
|
||||
{
|
||||
auto it = m_known_rulesets.lower_bound(ruleset);
|
||||
|
||||
if(it == m_known_rulesets.end() ||
|
||||
it->first != ruleset)
|
||||
{
|
||||
it = m_known_rulesets.emplace_hint(it,
|
||||
std::make_pair(ruleset, m_next_ruleset_id++));
|
||||
}
|
||||
|
||||
return it->second;
|
||||
}
|
||||
|
||||
void falco_engine::evttypes_for_ruleset(std::vector<bool> &evttypes, const std::string &ruleset)
|
||||
{
|
||||
uint16_t ruleset_id = find_ruleset_id(ruleset);
|
||||
|
||||
return m_evttype_filter->evttypes_for_ruleset(evttypes, ruleset_id);
|
||||
}
|
||||
|
||||
unique_ptr<falco_engine::rule_result> falco_engine::process_event(sinsp_evt *ev, uint16_t ruleset_id)
|
||||
{
|
||||
if(should_drop_evt())
|
||||
{
|
||||
return unique_ptr<struct rule_result>();
|
||||
}
|
||||
|
||||
if(!m_evttype_filter.run(ev))
|
||||
if(!m_evttype_filter->run(ev, ruleset_id))
|
||||
{
|
||||
return unique_ptr<struct rule_result>();
|
||||
}
|
||||
@@ -141,7 +191,7 @@ unique_ptr<falco_engine::rule_result> falco_engine::process_event(sinsp_evt *ev)
|
||||
res->evt = ev;
|
||||
const char *p = lua_tostring(m_ls, -3);
|
||||
res->rule = p;
|
||||
res->priority = lua_tostring(m_ls, -2);
|
||||
res->priority_num = (falco_common::priority_type) lua_tonumber(m_ls, -2);
|
||||
res->format = lua_tostring(m_ls, -1);
|
||||
lua_pop(m_ls, 3);
|
||||
}
|
||||
@@ -153,6 +203,11 @@ unique_ptr<falco_engine::rule_result> falco_engine::process_event(sinsp_evt *ev)
|
||||
return res;
|
||||
}
|
||||
|
||||
unique_ptr<falco_engine::rule_result> falco_engine::process_event(sinsp_evt *ev)
|
||||
{
|
||||
return process_event(ev, m_default_ruleset_id);
|
||||
}
|
||||
|
||||
void falco_engine::describe_rule(string *rule)
|
||||
{
|
||||
return m_rules->describe_rule(rule);
|
||||
@@ -180,10 +235,16 @@ void falco_engine::print_stats()
|
||||
}
|
||||
|
||||
void falco_engine::add_evttype_filter(string &rule,
|
||||
list<uint32_t> &evttypes,
|
||||
set<uint32_t> &evttypes,
|
||||
set<string> &tags,
|
||||
sinsp_filter* filter)
|
||||
{
|
||||
m_evttype_filter.add(rule, evttypes, filter);
|
||||
m_evttype_filter->add(rule, evttypes, tags, filter);
|
||||
}
|
||||
|
||||
void falco_engine::clear_filters()
|
||||
{
|
||||
m_evttype_filter.reset(new sinsp_evttype_filter());
|
||||
}
|
||||
|
||||
void falco_engine::set_sampling_ratio(uint32_t sampling_ratio)
|
||||
|
||||
@@ -19,6 +19,8 @@ along with falco. If not, see <http://www.gnu.org/licenses/>.
|
||||
#pragma once
|
||||
|
||||
#include <string>
|
||||
#include <memory>
|
||||
#include <set>
|
||||
|
||||
#include "sinsp.h"
|
||||
#include "filter.h"
|
||||
@@ -46,23 +48,65 @@ public:
|
||||
void load_rules(const std::string &rules_content, bool verbose, bool all_events);
|
||||
|
||||
//
|
||||
// Enable/Disable any rules matching the provided pattern (regex).
|
||||
// Enable/Disable any rules matching the provided pattern
|
||||
// (regex). When provided, enable/disable these rules in the
|
||||
// context of the provided ruleset. The ruleset (id) can later
|
||||
// be passed as an argument to process_event(). This allows
|
||||
// for different sets of rules being active at once.
|
||||
//
|
||||
void enable_rule(std::string &pattern, bool enabled);
|
||||
void enable_rule(const std::string &pattern, bool enabled, const std::string &ruleset);
|
||||
|
||||
// Wrapper that assumes the default ruleset
|
||||
void enable_rule(const std::string &pattern, bool enabled);
|
||||
|
||||
//
|
||||
// Enable/Disable any rules with any of the provided tags (set, exact matches only)
|
||||
//
|
||||
void enable_rule_by_tag(const std::set<std::string> &tags, bool enabled, const std::string &ruleset);
|
||||
|
||||
// Wrapper that assumes the default ruleset
|
||||
void enable_rule_by_tag(const std::set<std::string> &tags, bool enabled);
|
||||
|
||||
// Only load rules having this priority or more severe.
|
||||
void set_min_priority(falco_common::priority_type priority);
|
||||
|
||||
struct rule_result {
|
||||
sinsp_evt *evt;
|
||||
std::string rule;
|
||||
std::string priority;
|
||||
falco_common::priority_type priority_num;
|
||||
std::string format;
|
||||
};
|
||||
|
||||
//
|
||||
// Return the ruleset id corresponding to this ruleset name,
|
||||
// creating a new one if necessary. If you provide any ruleset
|
||||
// to enable_rule/enable_rule_by_tag(), you should look up the
|
||||
// ruleset id and pass it to process_event().
|
||||
//
|
||||
uint16_t find_ruleset_id(const std::string &ruleset);
|
||||
|
||||
//
|
||||
// Given a ruleset, fill in a bitset containing the event
|
||||
// types for which this ruleset can run.
|
||||
//
|
||||
void evttypes_for_ruleset(std::vector<bool> &evttypes, const std::string &ruleset);
|
||||
|
||||
//
|
||||
// Given an event, check it against the set of rules in the
|
||||
// engine and if a matching rule is found, return details on
|
||||
// the rule that matched. If no rule matched, returns NULL.
|
||||
//
|
||||
// the reutrned rule_result is allocated and must be delete()d.
|
||||
// When ruleset_id is provided, use the enabled/disabled status
|
||||
// associated with the provided ruleset. This is only useful
|
||||
// when you have previously called enable_rule/enable_rule_by_tag
|
||||
// with a ruleset string.
|
||||
//
|
||||
// the returned rule_result is allocated and must be delete()d.
|
||||
std::unique_ptr<rule_result> process_event(sinsp_evt *ev, uint16_t ruleset_id);
|
||||
|
||||
//
|
||||
// Wrapper assuming the default ruleset
|
||||
//
|
||||
std::unique_ptr<rule_result> process_event(sinsp_evt *ev);
|
||||
|
||||
//
|
||||
@@ -77,13 +121,17 @@ public:
|
||||
void print_stats();
|
||||
|
||||
//
|
||||
// Add a filter, which is related to the specified list of
|
||||
// Add a filter, which is related to the specified set of
|
||||
// event types, to the engine.
|
||||
//
|
||||
void add_evttype_filter(std::string &rule,
|
||||
list<uint32_t> &evttypes,
|
||||
std::set<uint32_t> &evttypes,
|
||||
std::set<std::string> &tags,
|
||||
sinsp_filter* filter);
|
||||
|
||||
// Clear all existing filters.
|
||||
void clear_filters();
|
||||
|
||||
//
|
||||
// Set the sampling ratio, which can affect which events are
|
||||
// matched against the set of rules.
|
||||
@@ -116,7 +164,10 @@ private:
|
||||
inline bool should_drop_evt();
|
||||
|
||||
falco_rules *m_rules;
|
||||
sinsp_evttype_filter m_evttype_filter;
|
||||
uint16_t m_next_ruleset_id;
|
||||
std::map<string, uint16_t> m_known_rulesets;
|
||||
std::unique_ptr<sinsp_evttype_filter> m_evttype_filter;
|
||||
falco_common::priority_type m_min_priority;
|
||||
|
||||
//
|
||||
// Here's how the sampling ratio and multiplier influence
|
||||
@@ -142,6 +193,8 @@ private:
|
||||
double m_sampling_multiplier;
|
||||
|
||||
std::string m_lua_main_filename = "rule_loader.lua";
|
||||
std::string m_default_ruleset = "falco-default-ruleset";
|
||||
uint32_t m_default_ruleset_id;
|
||||
|
||||
std::string m_extra;
|
||||
bool m_replace_container_info;
|
||||
|
||||
@@ -24,12 +24,14 @@ along with falco. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
|
||||
sinsp* falco_formats::s_inspector = NULL;
|
||||
bool s_json_output = false;
|
||||
bool falco_formats::s_json_output = false;
|
||||
sinsp_evt_formatter_cache *falco_formats::s_formatters = NULL;
|
||||
|
||||
const static struct luaL_reg ll_falco [] =
|
||||
{
|
||||
{"formatter", &falco_formats::formatter},
|
||||
{"free_formatter", &falco_formats::free_formatter},
|
||||
{"free_formatters", &falco_formats::free_formatters},
|
||||
{"format_event", &falco_formats::format_event},
|
||||
{NULL,NULL}
|
||||
};
|
||||
@@ -38,6 +40,10 @@ void falco_formats::init(sinsp* inspector, lua_State *ls, bool json_output)
|
||||
{
|
||||
s_inspector = inspector;
|
||||
s_json_output = json_output;
|
||||
if(!s_formatters)
|
||||
{
|
||||
s_formatters = new sinsp_evt_formatter_cache(s_inspector);
|
||||
}
|
||||
|
||||
luaL_openlib(ls, "formats", ll_falco, 0);
|
||||
}
|
||||
@@ -73,30 +79,68 @@ int falco_formats::free_formatter(lua_State *ls)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int falco_formats::free_formatters(lua_State *ls)
|
||||
{
|
||||
if(s_formatters)
|
||||
{
|
||||
delete(s_formatters);
|
||||
s_formatters = NULL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int falco_formats::format_event (lua_State *ls)
|
||||
{
|
||||
string line;
|
||||
string json_line;
|
||||
|
||||
if (!lua_islightuserdata(ls, -1) ||
|
||||
if (!lua_isstring(ls, -1) ||
|
||||
!lua_isstring(ls, -2) ||
|
||||
!lua_isstring(ls, -3) ||
|
||||
!lua_islightuserdata(ls, -4)) {
|
||||
throw falco_exception("Invalid arguments passed to format_event()\n");
|
||||
lua_pushstring(ls, "Invalid arguments passed to format_event()");
|
||||
lua_error(ls);
|
||||
}
|
||||
sinsp_evt* evt = (sinsp_evt*)lua_topointer(ls, 1);
|
||||
const char *rule = (char *) lua_tostring(ls, 2);
|
||||
const char *level = (char *) lua_tostring(ls, 3);
|
||||
sinsp_evt_formatter* formatter = (sinsp_evt_formatter*)lua_topointer(ls, 4);
|
||||
const char *format = (char *) lua_tostring(ls, 4);
|
||||
|
||||
formatter->tostring(evt, &line);
|
||||
string sformat = format;
|
||||
|
||||
// For JSON output, the formatter returned just the output
|
||||
// string containing the format text and values. Use this to
|
||||
// build a more detailed object containing the event time,
|
||||
// rule, severity, full output, and fields.
|
||||
try {
|
||||
s_formatters->tostring(evt, sformat, &line);
|
||||
|
||||
if(s_json_output)
|
||||
{
|
||||
s_inspector->set_buffer_format(sinsp_evt::PF_JSON);
|
||||
s_formatters->tostring(evt, sformat, &json_line);
|
||||
|
||||
// The formatted string might have a leading newline. If it does, remove it.
|
||||
if (json_line[0] == '\n')
|
||||
{
|
||||
json_line.erase(0, 1);
|
||||
}
|
||||
|
||||
s_inspector->set_buffer_format(sinsp_evt::PF_NORMAL);
|
||||
}
|
||||
}
|
||||
catch (sinsp_exception& e)
|
||||
{
|
||||
string err = "Invalid output format '" + sformat + "': '" + string(e.what()) + "'";
|
||||
lua_pushstring(ls, err.c_str());
|
||||
lua_error(ls);
|
||||
}
|
||||
|
||||
// For JSON output, the formatter returned a json-as-text
|
||||
// object containing all the fields in the original format
|
||||
// message as well as the event time in ns. Use this to build
|
||||
// a more detailed object containing the event time, rule,
|
||||
// severity, full output, and fields.
|
||||
if (s_json_output) {
|
||||
Json::Value event;
|
||||
Json::FastWriter writer;
|
||||
string full_line;
|
||||
|
||||
// Convert the time-as-nanoseconds to a more json-friendly ISO8601.
|
||||
time_t evttime = evt->get_ts()/1000000000;
|
||||
@@ -111,16 +155,26 @@ int falco_formats::format_event (lua_State *ls)
|
||||
event["time"] = iso8601evttime;
|
||||
event["rule"] = rule;
|
||||
event["priority"] = level;
|
||||
// This is the filled-in output line.
|
||||
event["output"] = line;
|
||||
|
||||
line = writer.write(event);
|
||||
full_line = writer.write(event);
|
||||
|
||||
// Json::FastWriter may add a trailing newline. If it
|
||||
// does, remove it.
|
||||
if (line[line.length()-1] == '\n')
|
||||
if (full_line[full_line.length()-1] == '\n')
|
||||
{
|
||||
line.resize(line.length()-1);
|
||||
full_line.resize(full_line.length()-1);
|
||||
}
|
||||
|
||||
// Cheat-graft the output from the formatter into this
|
||||
// string. Avoids an unnecessary json parse just to
|
||||
// merge the formatted fields at the object level.
|
||||
full_line.pop_back();
|
||||
full_line.append(", \"output_fields\": ");
|
||||
full_line.append(json_line);
|
||||
full_line.append("}");
|
||||
line = full_line;
|
||||
}
|
||||
|
||||
lua_pushstring(ls, line.c_str());
|
||||
|
||||
@@ -39,8 +39,13 @@ class falco_formats
|
||||
// falco.free_formatter(formatter)
|
||||
static int free_formatter(lua_State *ls);
|
||||
|
||||
// falco.free_formatters()
|
||||
static int free_formatters(lua_State *ls);
|
||||
|
||||
// formatted_string = falco.format_event(evt, formatter)
|
||||
static int format_event(lua_State *ls);
|
||||
|
||||
static sinsp* s_inspector;
|
||||
static sinsp_evt_formatter_cache *s_formatters;
|
||||
static bool s_json_output;
|
||||
};
|
||||
|
||||
@@ -20,6 +20,7 @@ local compiler = {}
|
||||
|
||||
compiler.verbose = false
|
||||
compiler.all_events = false
|
||||
compiler.trim = parser.trim
|
||||
|
||||
function compiler.set_verbose(verbose)
|
||||
compiler.verbose = verbose
|
||||
@@ -58,14 +59,15 @@ end
|
||||
definition uses another macro).
|
||||
|
||||
--]]
|
||||
function expand_macros(ast, defs, changed)
|
||||
|
||||
function copy(obj)
|
||||
if type(obj) ~= 'table' then return obj end
|
||||
local res = {}
|
||||
for k, v in pairs(obj) do res[copy(k)] = copy(v) end
|
||||
return res
|
||||
end
|
||||
function copy_ast_obj(obj)
|
||||
if type(obj) ~= 'table' then return obj end
|
||||
local res = {}
|
||||
for k, v in pairs(obj) do res[copy_ast_obj(k)] = copy_ast_obj(v) end
|
||||
return res
|
||||
end
|
||||
|
||||
function expand_macros(ast, defs, changed)
|
||||
|
||||
if (ast.type == "Rule") then
|
||||
return expand_macros(ast.filter, defs, changed)
|
||||
@@ -74,7 +76,7 @@ function expand_macros(ast, defs, changed)
|
||||
if (defs[ast.value.value] == nil) then
|
||||
error("Undefined macro '".. ast.value.value .. "' used in filter.")
|
||||
end
|
||||
ast.value = copy(defs[ast.value.value])
|
||||
ast.value = copy_ast_obj(defs[ast.value.value])
|
||||
changed = true
|
||||
return changed
|
||||
end
|
||||
@@ -86,7 +88,7 @@ function expand_macros(ast, defs, changed)
|
||||
if (defs[ast.left.value] == nil) then
|
||||
error("Undefined macro '".. ast.left.value .. "' used in filter.")
|
||||
end
|
||||
ast.left = copy(defs[ast.left.value])
|
||||
ast.left = copy_ast_obj(defs[ast.left.value])
|
||||
changed = true
|
||||
end
|
||||
|
||||
@@ -94,7 +96,7 @@ function expand_macros(ast, defs, changed)
|
||||
if (defs[ast.right.value] == nil) then
|
||||
error("Undefined macro ".. ast.right.value .. " used in filter.")
|
||||
end
|
||||
ast.right = copy(defs[ast.right.value])
|
||||
ast.right = copy_ast_obj(defs[ast.right.value])
|
||||
changed = true
|
||||
end
|
||||
|
||||
@@ -107,7 +109,7 @@ function expand_macros(ast, defs, changed)
|
||||
if (defs[ast.argument.value] == nil) then
|
||||
error("Undefined macro ".. ast.argument.value .. " used in filter.")
|
||||
end
|
||||
ast.argument = copy(defs[ast.argument.value])
|
||||
ast.argument = copy_ast_obj(defs[ast.argument.value])
|
||||
changed = true
|
||||
end
|
||||
return expand_macros(ast.argument, defs, changed)
|
||||
@@ -281,7 +283,7 @@ function get_evttypes(name, ast, source)
|
||||
return evttypes
|
||||
end
|
||||
|
||||
function compiler.compile_macro(line, list_defs)
|
||||
function compiler.compile_macro(line, macro_defs, list_defs)
|
||||
|
||||
for name, items in pairs(list_defs) do
|
||||
line = string.gsub(line, name, table.concat(items, ", "))
|
||||
@@ -300,6 +302,21 @@ function compiler.compile_macro(line, list_defs)
|
||||
check_for_ignored_syscalls_events(ast, 'macro', line)
|
||||
end
|
||||
|
||||
-- Simply as a validation step, try to expand all macros in this
|
||||
-- macro's condition. This changes the ast, so we make a copy
|
||||
-- first.
|
||||
local ast_copy = copy_ast_obj(ast)
|
||||
|
||||
if (ast.type == "Rule") then
|
||||
-- Line is a filter, so expand macro references
|
||||
repeat
|
||||
expanded = expand_macros(ast_copy, macro_defs, false)
|
||||
until expanded == false
|
||||
|
||||
else
|
||||
error("Unexpected top-level AST type: "..ast.type)
|
||||
end
|
||||
|
||||
return ast
|
||||
end
|
||||
|
||||
@@ -309,7 +326,12 @@ end
|
||||
function compiler.compile_filter(name, source, macro_defs, list_defs)
|
||||
|
||||
for name, items in pairs(list_defs) do
|
||||
source = string.gsub(source, name, table.concat(items, ", "))
|
||||
local begin_name_pat = "^("..name..")([%s(),=])"
|
||||
local mid_name_pat = "([%s(),=])("..name..")([%s(),=])"
|
||||
local end_name_pat = "([%s(),=])("..name..")$"
|
||||
source = string.gsub(source, begin_name_pat, table.concat(items, ", ").."%2")
|
||||
source = string.gsub(source, mid_name_pat, "%1"..table.concat(items, ", ").."%3")
|
||||
source = string.gsub(source, end_name_pat, "%1"..table.concat(items, ", "))
|
||||
end
|
||||
|
||||
local ast, error_msg = parser.parse_filter(source)
|
||||
|
||||
@@ -127,10 +127,11 @@ function trim(s)
|
||||
if (type(s) ~= "string") then return s end
|
||||
return (s:gsub("^%s*(.-)%s*$", "%1"))
|
||||
end
|
||||
parser.trim = trim
|
||||
|
||||
local function terminal (tag)
|
||||
-- Rather than trim the whitespace in this way, it would be nicer to exclude it from the capture...
|
||||
return token(V(tag), tag) / function (tok) return { type = tag, value = trim(tok)} end
|
||||
return token(V(tag), tag) / function (tok) val = tok; if tag ~= "String" then val = trim(tok) end; return { type = tag, value = val} end
|
||||
end
|
||||
|
||||
local function unaryboolop (op, e)
|
||||
@@ -237,7 +238,7 @@ local G = {
|
||||
Identifier = V"idStart" * V"idRest"^0;
|
||||
Macro = V"idStart" * V"idRest"^0 * -P".";
|
||||
Int = digit^1;
|
||||
PathString = (alnum + S'-_/*?')^1;
|
||||
PathString = (alnum + S'.-_/*?')^1;
|
||||
Index = V"Int" + V"PathString";
|
||||
FieldName = V"Identifier" * (P"." + V"Identifier")^1 * (P"[" * V"Index" * P"]")^-1;
|
||||
Name = C(V"Identifier") * -V"idRest";
|
||||
|
||||
@@ -58,6 +58,17 @@ function map(f, arr)
|
||||
return res
|
||||
end
|
||||
|
||||
priorities = {"Emergency", "Alert", "Critical", "Error", "Warning", "Notice", "Informational", "Debug"}
|
||||
|
||||
local function priority_num_for(s)
|
||||
s = string.lower(s)
|
||||
for i,v in ipairs(priorities) do
|
||||
if (string.find(string.lower(v), "^"..s)) then
|
||||
return i - 1 -- (numbers start at 0, lua indices start at 1)
|
||||
end
|
||||
end
|
||||
error("Invalid priority level: "..s)
|
||||
end
|
||||
|
||||
--[[
|
||||
Take a filter AST and set it up in the libsinsp runtime, using the filter API.
|
||||
@@ -121,7 +132,16 @@ end
|
||||
-- object. The by_name index is used for things like describing rules,
|
||||
-- and the by_idx index is used to map the relational node index back
|
||||
-- to a rule.
|
||||
local state = {macros={}, lists={}, filter_ast=nil, rules_by_name={}, n_rules=0, rules_by_idx={}}
|
||||
local state = {macros={}, lists={}, filter_ast=nil, rules_by_name={}, macros_by_name={}, lists_by_name={},
|
||||
n_rules=0, rules_by_idx={}, ordered_rule_names={}, ordered_macro_names={}, ordered_list_names={}}
|
||||
|
||||
local function reset_rules(rules_mgr)
|
||||
falco_rules.clear_filters(rules_mgr)
|
||||
state.n_rules = 0
|
||||
state.rules_by_idx = {}
|
||||
state.macros = {}
|
||||
state.lists = {}
|
||||
end
|
||||
|
||||
-- From http://lua-users.org/wiki/TableUtils
|
||||
--
|
||||
@@ -162,7 +182,7 @@ function table.tostring( tbl )
|
||||
end
|
||||
|
||||
|
||||
function load_rules(rules_content, rules_mgr, verbose, all_events, extra, replace_container_info)
|
||||
function load_rules(rules_content, rules_mgr, verbose, all_events, extra, replace_container_info, min_priority)
|
||||
|
||||
compiler.set_verbose(verbose)
|
||||
compiler.set_all_events(all_events)
|
||||
@@ -178,34 +198,75 @@ function load_rules(rules_content, rules_mgr, verbose, all_events, extra, replac
|
||||
error("Rules content \""..rules_content.."\" is not yaml")
|
||||
end
|
||||
|
||||
for i,v in ipairs(rules) do -- iterate over yaml list
|
||||
-- Iterate over yaml list. In this pass, all we're doing is
|
||||
-- populating the set of rules, macros, and lists. We're not
|
||||
-- expanding/compiling anything yet. All that will happen in a
|
||||
-- second pass
|
||||
for i,v in ipairs(rules) do
|
||||
|
||||
if (not (type(v) == "table")) then
|
||||
error ("Unexpected element of type " ..type(v)..". Each element should be a yaml associative array.")
|
||||
end
|
||||
|
||||
if (v['macro']) then
|
||||
local ast = compiler.compile_macro(v['condition'], state.lists)
|
||||
state.macros[v['macro']] = ast.filter.value
|
||||
if state.macros_by_name[v['macro']] == nil then
|
||||
state.ordered_macro_names[#state.ordered_macro_names+1] = v['macro']
|
||||
end
|
||||
|
||||
elseif (v['list']) then
|
||||
-- list items are represented in yaml as a native list, so no
|
||||
-- parsing necessary
|
||||
local items = {}
|
||||
|
||||
-- List items may be references to other lists, so go through
|
||||
-- the items and expand any references to the items in the list
|
||||
for i, item in ipairs(v['items']) do
|
||||
if (state.lists[item] == nil) then
|
||||
items[#items+1] = item
|
||||
else
|
||||
for i, exp_item in ipairs(state.lists[item]) do
|
||||
items[#items+1] = exp_item
|
||||
end
|
||||
for i, field in ipairs({'condition'}) do
|
||||
if (v[field] == nil) then
|
||||
error ("Missing "..field.." in macro with name "..v['macro'])
|
||||
end
|
||||
end
|
||||
|
||||
state.lists[v['list']] = items
|
||||
-- Possibly append to the condition field of an existing macro
|
||||
append = false
|
||||
|
||||
if v['append'] then
|
||||
append = v['append']
|
||||
end
|
||||
|
||||
if append then
|
||||
if state.macros_by_name[v['macro']] == nil then
|
||||
error ("Macro " ..v['macro'].. " has 'append' key but no macro by that name already exists")
|
||||
end
|
||||
|
||||
state.macros_by_name[v['macro']]['condition'] = state.macros_by_name[v['macro']]['condition'] .. " " .. v['condition']
|
||||
|
||||
else
|
||||
state.macros_by_name[v['macro']] = v
|
||||
end
|
||||
|
||||
elseif (v['list']) then
|
||||
|
||||
if state.lists_by_name[v['list']] == nil then
|
||||
state.ordered_list_names[#state.ordered_list_names+1] = v['list']
|
||||
end
|
||||
|
||||
for i, field in ipairs({'items'}) do
|
||||
if (v[field] == nil) then
|
||||
error ("Missing "..field.." in list with name "..v['list'])
|
||||
end
|
||||
end
|
||||
|
||||
-- Possibly append to an existing list
|
||||
append = false
|
||||
|
||||
if v['append'] then
|
||||
append = v['append']
|
||||
end
|
||||
|
||||
if append then
|
||||
if state.lists_by_name[v['list']] == nil then
|
||||
error ("List " ..v['list'].. " has 'append' key but no list by that name already exists")
|
||||
end
|
||||
|
||||
for i, elem in ipairs(v['items']) do
|
||||
table.insert(state.lists_by_name[v['list']]['items'], elem)
|
||||
end
|
||||
else
|
||||
state.lists_by_name[v['list']] = v
|
||||
end
|
||||
|
||||
elseif (v['rule']) then
|
||||
|
||||
@@ -213,88 +274,175 @@ function load_rules(rules_content, rules_mgr, verbose, all_events, extra, replac
|
||||
error ("Missing name in rule")
|
||||
end
|
||||
|
||||
for i, field in ipairs({'condition', 'output', 'desc', 'priority'}) do
|
||||
if (v[field] == nil) then
|
||||
error ("Missing "..field.." in rule with name "..v['rule'])
|
||||
end
|
||||
-- Possibly append to the condition field of an existing rule
|
||||
append = false
|
||||
|
||||
if v['append'] then
|
||||
append = v['append']
|
||||
end
|
||||
|
||||
state.rules_by_name[v['rule']] = v
|
||||
if append then
|
||||
|
||||
local filter_ast, evttypes = compiler.compile_filter(v['rule'], v['condition'],
|
||||
state.macros, state.lists)
|
||||
|
||||
if (filter_ast.type == "Rule") then
|
||||
state.n_rules = state.n_rules + 1
|
||||
|
||||
state.rules_by_idx[state.n_rules] = v
|
||||
|
||||
-- Store the index of this formatter in each relational expression that
|
||||
-- this rule contains.
|
||||
-- This index will eventually be stamped in events passing this rule, and
|
||||
-- we'll use it later to determine which output to display when we get an
|
||||
-- event.
|
||||
mark_relational_nodes(filter_ast.filter.value, state.n_rules)
|
||||
|
||||
install_filter(filter_ast.filter.value)
|
||||
|
||||
-- Pass the filter and event types back up
|
||||
falco_rules.add_filter(rules_mgr, v['rule'], evttypes)
|
||||
|
||||
-- Rule ASTs are merged together into one big AST, with "OR" between each
|
||||
-- rule.
|
||||
if (state.filter_ast == nil) then
|
||||
state.filter_ast = filter_ast.filter.value
|
||||
else
|
||||
state.filter_ast = { type = "BinaryBoolOp", operator = "or", left = state.filter_ast, right = filter_ast.filter.value }
|
||||
end
|
||||
|
||||
-- Enable/disable the rule
|
||||
if (v['enabled'] == nil) then
|
||||
v['enabled'] = true
|
||||
end
|
||||
|
||||
if (v['enabled'] == false) then
|
||||
falco_rules.enable_rule(rules_mgr, v['rule'], 0)
|
||||
end
|
||||
|
||||
-- If the format string contains %container.info, replace it
|
||||
-- with extra. Otherwise, add extra onto the end of the format
|
||||
-- string.
|
||||
if string.find(v['output'], "%container.info", nil, true) ~= nil then
|
||||
|
||||
-- There may not be any extra, or we're not supposed
|
||||
-- to replace it, in which case we use the generic
|
||||
-- "%container.name (id=%container.id)"
|
||||
if replace_container_info == false then
|
||||
v['output'] = string.gsub(v['output'], "%%container.info", "%%container.name (id=%%container.id)")
|
||||
if extra ~= "" then
|
||||
v['output'] = v['output'].." "..extra
|
||||
end
|
||||
else
|
||||
safe_extra = string.gsub(extra, "%%", "%%%%")
|
||||
v['output'] = string.gsub(v['output'], "%%container.info", safe_extra)
|
||||
-- For append rules, all you need is the condition
|
||||
for i, field in ipairs({'condition'}) do
|
||||
if (v[field] == nil) then
|
||||
error ("Missing "..field.." in rule with name "..v['rule'])
|
||||
end
|
||||
else
|
||||
-- Just add the extra to the end
|
||||
if extra ~= "" then
|
||||
v['output'] = v['output'].." "..extra
|
||||
end
|
||||
end
|
||||
|
||||
-- Ensure that the output field is properly formatted by
|
||||
-- creating a formatter from it. Any error will be thrown
|
||||
-- up to the top level.
|
||||
formatter = formats.formatter(v['output'])
|
||||
formats.free_formatter(formatter)
|
||||
if state.rules_by_name[v['rule']] == nil then
|
||||
error ("Rule " ..v['rule'].. " has 'append' key but no rule by that name already exists")
|
||||
end
|
||||
|
||||
state.rules_by_name[v['rule']]['condition'] = state.rules_by_name[v['rule']]['condition'] .. " " .. v['condition']
|
||||
|
||||
else
|
||||
error ("Unexpected type in load_rule: "..filter_ast.type)
|
||||
|
||||
for i, field in ipairs({'condition', 'output', 'desc', 'priority'}) do
|
||||
if (v[field] == nil) then
|
||||
error ("Missing "..field.." in rule with name "..v['rule'])
|
||||
end
|
||||
end
|
||||
|
||||
-- Convert the priority-as-string to a priority-as-number now
|
||||
v['priority_num'] = priority_num_for(v['priority'])
|
||||
|
||||
if v['priority_num'] <= min_priority then
|
||||
-- Note that we can overwrite rules, but the rules are still
|
||||
-- loaded in the order in which they first appeared,
|
||||
-- potentially across multiple files.
|
||||
if state.rules_by_name[v['rule']] == nil then
|
||||
state.ordered_rule_names[#state.ordered_rule_names+1] = v['rule']
|
||||
end
|
||||
|
||||
-- The output field might be a folded-style, which adds a
|
||||
-- newline to the end. Remove any trailing newlines.
|
||||
v['output'] = compiler.trim(v['output'])
|
||||
|
||||
state.rules_by_name[v['rule']] = v
|
||||
end
|
||||
end
|
||||
else
|
||||
error ("Unknown rule object: "..table.tostring(v))
|
||||
end
|
||||
end
|
||||
|
||||
-- We've now loaded all the rules, macros, and list. Now
|
||||
-- compile/expand the rules, macros, and lists. We use
|
||||
-- ordered_rule_{lists,macros,names} to compile them in the order
|
||||
-- in which they appeared in the file(s).
|
||||
reset_rules(rules_mgr)
|
||||
|
||||
for i, name in ipairs(state.ordered_list_names) do
|
||||
|
||||
local v = state.lists_by_name[name]
|
||||
|
||||
-- list items are represented in yaml as a native list, so no
|
||||
-- parsing necessary
|
||||
local items = {}
|
||||
|
||||
-- List items may be references to other lists, so go through
|
||||
-- the items and expand any references to the items in the list
|
||||
for i, item in ipairs(v['items']) do
|
||||
if (state.lists[item] == nil) then
|
||||
items[#items+1] = item
|
||||
else
|
||||
for i, exp_item in ipairs(state.lists[item]) do
|
||||
items[#items+1] = exp_item
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
state.lists[v['list']] = items
|
||||
end
|
||||
|
||||
for i, name in ipairs(state.ordered_macro_names) do
|
||||
|
||||
local v = state.macros_by_name[name]
|
||||
|
||||
local ast = compiler.compile_macro(v['condition'], state.macros, state.lists)
|
||||
state.macros[v['macro']] = ast.filter.value
|
||||
end
|
||||
|
||||
for i, name in ipairs(state.ordered_rule_names) do
|
||||
|
||||
local v = state.rules_by_name[name]
|
||||
|
||||
local filter_ast, evttypes = compiler.compile_filter(v['rule'], v['condition'],
|
||||
state.macros, state.lists)
|
||||
|
||||
if (filter_ast.type == "Rule") then
|
||||
state.n_rules = state.n_rules + 1
|
||||
|
||||
state.rules_by_idx[state.n_rules] = v
|
||||
|
||||
-- Store the index of this formatter in each relational expression that
|
||||
-- this rule contains.
|
||||
-- This index will eventually be stamped in events passing this rule, and
|
||||
-- we'll use it later to determine which output to display when we get an
|
||||
-- event.
|
||||
mark_relational_nodes(filter_ast.filter.value, state.n_rules)
|
||||
|
||||
install_filter(filter_ast.filter.value)
|
||||
|
||||
if (v['tags'] == nil) then
|
||||
v['tags'] = {}
|
||||
end
|
||||
|
||||
-- Pass the filter and event types back up
|
||||
falco_rules.add_filter(rules_mgr, v['rule'], evttypes, v['tags'])
|
||||
|
||||
-- Rule ASTs are merged together into one big AST, with "OR" between each
|
||||
-- rule.
|
||||
if (state.filter_ast == nil) then
|
||||
state.filter_ast = filter_ast.filter.value
|
||||
else
|
||||
state.filter_ast = { type = "BinaryBoolOp", operator = "or", left = state.filter_ast, right = filter_ast.filter.value }
|
||||
end
|
||||
|
||||
-- Enable/disable the rule
|
||||
if (v['enabled'] == nil) then
|
||||
v['enabled'] = true
|
||||
end
|
||||
|
||||
if (v['enabled'] == false) then
|
||||
falco_rules.enable_rule(rules_mgr, v['rule'], 0)
|
||||
end
|
||||
|
||||
-- If the format string contains %container.info, replace it
|
||||
-- with extra. Otherwise, add extra onto the end of the format
|
||||
-- string.
|
||||
if string.find(v['output'], "%container.info", nil, true) ~= nil then
|
||||
|
||||
-- There may not be any extra, or we're not supposed
|
||||
-- to replace it, in which case we use the generic
|
||||
-- "%container.name (id=%container.id)"
|
||||
if replace_container_info == false then
|
||||
v['output'] = string.gsub(v['output'], "%%container.info", "%%container.name (id=%%container.id)")
|
||||
if extra ~= "" then
|
||||
v['output'] = v['output'].." "..extra
|
||||
end
|
||||
else
|
||||
safe_extra = string.gsub(extra, "%%", "%%%%")
|
||||
v['output'] = string.gsub(v['output'], "%%container.info", safe_extra)
|
||||
end
|
||||
else
|
||||
-- Just add the extra to the end
|
||||
if extra ~= "" then
|
||||
v['output'] = v['output'].." "..extra
|
||||
end
|
||||
end
|
||||
|
||||
-- Ensure that the output field is properly formatted by
|
||||
-- creating a formatter from it. Any error will be thrown
|
||||
-- up to the top level.
|
||||
formatter = formats.formatter(v['output'])
|
||||
formats.free_formatter(formatter)
|
||||
else
|
||||
error ("Unexpected type in load_rule: "..filter_ast.type)
|
||||
end
|
||||
end
|
||||
|
||||
io.flush()
|
||||
end
|
||||
|
||||
@@ -369,7 +517,10 @@ function on_event(evt_, rule_id)
|
||||
rule_output_counts.by_name[rule.rule] = rule_output_counts.by_name[rule.rule] + 1
|
||||
end
|
||||
|
||||
return rule.rule, rule.priority, rule.output
|
||||
-- Prefix output with '*' so formatting is permissive
|
||||
output = "*"..rule.output
|
||||
|
||||
return rule.rule, rule.priority_num, output
|
||||
end
|
||||
|
||||
function print_stats()
|
||||
|
||||
@@ -28,6 +28,7 @@ extern "C" {
|
||||
#include "falco_engine.h"
|
||||
const static struct luaL_reg ll_falco_rules [] =
|
||||
{
|
||||
{"clear_filters", &falco_rules::clear_filters},
|
||||
{"add_filter", &falco_rules::add_filter},
|
||||
{"enable_rule", &falco_rules::enable_rule},
|
||||
{NULL,NULL}
|
||||
@@ -44,44 +45,77 @@ void falco_rules::init(lua_State *ls)
|
||||
luaL_openlib(ls, "falco_rules", ll_falco_rules, 0);
|
||||
}
|
||||
|
||||
int falco_rules::add_filter(lua_State *ls)
|
||||
int falco_rules::clear_filters(lua_State *ls)
|
||||
{
|
||||
if (! lua_islightuserdata(ls, -3) ||
|
||||
! lua_isstring(ls, -2) ||
|
||||
! lua_istable(ls, -1))
|
||||
if (! lua_islightuserdata(ls, -1))
|
||||
{
|
||||
throw falco_exception("Invalid arguments passed to add_filter()\n");
|
||||
lua_pushstring(ls, "Invalid arguments passed to clear_filters()");
|
||||
lua_error(ls);
|
||||
}
|
||||
|
||||
falco_rules *rules = (falco_rules *) lua_topointer(ls, -3);
|
||||
const char *rulec = lua_tostring(ls, -2);
|
||||
falco_rules *rules = (falco_rules *) lua_topointer(ls, -1);
|
||||
rules->clear_filters();
|
||||
|
||||
list<uint32_t> evttypes;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void falco_rules::clear_filters()
|
||||
{
|
||||
m_engine->clear_filters();
|
||||
}
|
||||
|
||||
int falco_rules::add_filter(lua_State *ls)
|
||||
{
|
||||
if (! lua_islightuserdata(ls, -4) ||
|
||||
! lua_isstring(ls, -3) ||
|
||||
! lua_istable(ls, -2) ||
|
||||
! lua_istable(ls, -1))
|
||||
{
|
||||
lua_pushstring(ls, "Invalid arguments passed to add_filter()");
|
||||
lua_error(ls);
|
||||
}
|
||||
|
||||
falco_rules *rules = (falco_rules *) lua_topointer(ls, -4);
|
||||
const char *rulec = lua_tostring(ls, -3);
|
||||
|
||||
set<uint32_t> evttypes;
|
||||
|
||||
lua_pushnil(ls); /* first key */
|
||||
while (lua_next(ls, -3) != 0) {
|
||||
// key is at index -2, value is at index
|
||||
// -1. We want the keys.
|
||||
evttypes.insert(luaL_checknumber(ls, -2));
|
||||
|
||||
// Remove value, keep key for next iteration
|
||||
lua_pop(ls, 1);
|
||||
}
|
||||
|
||||
set<string> tags;
|
||||
|
||||
lua_pushnil(ls); /* first key */
|
||||
while (lua_next(ls, -2) != 0) {
|
||||
// key is at index -2, value is at index
|
||||
// -1. We want the keys.
|
||||
evttypes.push_back(luaL_checknumber(ls, -2));
|
||||
tags.insert(lua_tostring(ls, -1));
|
||||
|
||||
// Remove value, keep key for next iteration
|
||||
lua_pop(ls, 1);
|
||||
}
|
||||
|
||||
std::string rule = rulec;
|
||||
rules->add_filter(rule, evttypes);
|
||||
rules->add_filter(rule, evttypes, tags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void falco_rules::add_filter(string &rule, list<uint32_t> &evttypes)
|
||||
void falco_rules::add_filter(string &rule, set<uint32_t> &evttypes, set<string> &tags)
|
||||
{
|
||||
// While the current rule was being parsed, a sinsp_filter
|
||||
// object was being populated by lua_parser. Grab that filter
|
||||
// and pass it to the engine.
|
||||
sinsp_filter *filter = m_lua_parser->get_filter(true);
|
||||
|
||||
m_engine->add_evttype_filter(rule, evttypes, filter);
|
||||
m_engine->add_evttype_filter(rule, evttypes, tags, filter);
|
||||
}
|
||||
|
||||
int falco_rules::enable_rule(lua_State *ls)
|
||||
@@ -90,7 +124,8 @@ int falco_rules::enable_rule(lua_State *ls)
|
||||
! lua_isstring(ls, -2) ||
|
||||
! lua_isnumber(ls, -1))
|
||||
{
|
||||
throw falco_exception("Invalid arguments passed to enable_rule()\n");
|
||||
lua_pushstring(ls, "Invalid arguments passed to enable_rule()");
|
||||
lua_error(ls);
|
||||
}
|
||||
|
||||
falco_rules *rules = (falco_rules *) lua_topointer(ls, -3);
|
||||
@@ -110,7 +145,8 @@ void falco_rules::enable_rule(string &rule, bool enabled)
|
||||
|
||||
void falco_rules::load_rules(const string &rules_content,
|
||||
bool verbose, bool all_events,
|
||||
string &extra, bool replace_container_info)
|
||||
string &extra, bool replace_container_info,
|
||||
falco_common::priority_type min_priority)
|
||||
{
|
||||
lua_getglobal(m_ls, m_lua_load_rules.c_str());
|
||||
if(lua_isfunction(m_ls, -1))
|
||||
@@ -186,7 +222,8 @@ void falco_rules::load_rules(const string &rules_content,
|
||||
lua_pushboolean(m_ls, (all_events ? 1 : 0));
|
||||
lua_pushstring(m_ls, extra.c_str());
|
||||
lua_pushboolean(m_ls, (replace_container_info ? 1 : 0));
|
||||
if(lua_pcall(m_ls, 6, 0, 0) != 0)
|
||||
lua_pushnumber(m_ls, min_priority);
|
||||
if(lua_pcall(m_ls, 7, 0, 0) != 0)
|
||||
{
|
||||
const char* lerr = lua_tostring(m_ls, -1);
|
||||
string err = "Error loading rules:" + string(lerr);
|
||||
|
||||
@@ -18,12 +18,14 @@ along with falco. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <list>
|
||||
#include <set>
|
||||
|
||||
#include "sinsp.h"
|
||||
|
||||
#include "lua_parser.h"
|
||||
|
||||
#include "falco_common.h"
|
||||
|
||||
class falco_engine;
|
||||
|
||||
class falco_rules
|
||||
@@ -32,15 +34,18 @@ class falco_rules
|
||||
falco_rules(sinsp* inspector, falco_engine *engine, lua_State *ls);
|
||||
~falco_rules();
|
||||
void load_rules(const string &rules_content, bool verbose, bool all_events,
|
||||
std::string &extra, bool replace_container_info);
|
||||
std::string &extra, bool replace_container_info,
|
||||
falco_common::priority_type min_priority);
|
||||
void describe_rule(string *rule);
|
||||
|
||||
static void init(lua_State *ls);
|
||||
static int clear_filters(lua_State *ls);
|
||||
static int add_filter(lua_State *ls);
|
||||
static int enable_rule(lua_State *ls);
|
||||
|
||||
private:
|
||||
void add_filter(string &rule, list<uint32_t> &evttypes);
|
||||
void clear_filters();
|
||||
void add_filter(string &rule, std::set<uint32_t> &evttypes, std::set<string> &tags);
|
||||
void enable_rule(string &rule, bool enabled);
|
||||
|
||||
lua_parser* m_lua_parser;
|
||||
|
||||
91
userspace/engine/token_bucket.cpp
Normal file
91
userspace/engine/token_bucket.cpp
Normal file
@@ -0,0 +1,91 @@
|
||||
/*
|
||||
Copyright (C) 2016 Draios inc.
|
||||
|
||||
This file is part of falco.
|
||||
|
||||
falco is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License version 2 as
|
||||
published by the Free Software Foundation.
|
||||
|
||||
falco is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with falco. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <cstddef>
|
||||
#include <sys/time.h>
|
||||
|
||||
#include "utils.h"
|
||||
#include "token_bucket.h"
|
||||
|
||||
token_bucket::token_bucket()
|
||||
{
|
||||
init(1, 1);
|
||||
}
|
||||
|
||||
token_bucket::~token_bucket()
|
||||
{
|
||||
}
|
||||
|
||||
void token_bucket::init(double rate, double max_tokens, uint64_t now)
|
||||
{
|
||||
m_rate = rate;
|
||||
m_max_tokens = max_tokens;
|
||||
m_tokens = max_tokens;
|
||||
|
||||
if(now == 0)
|
||||
{
|
||||
now = sinsp_utils::get_current_time_ns();
|
||||
}
|
||||
|
||||
m_last_seen = now;
|
||||
}
|
||||
|
||||
bool token_bucket::claim()
|
||||
{
|
||||
uint64_t now = sinsp_utils::get_current_time_ns();
|
||||
|
||||
return claim(1, now);
|
||||
}
|
||||
|
||||
bool token_bucket::claim(double tokens, uint64_t now)
|
||||
{
|
||||
double tokens_gained = m_rate * ((now - m_last_seen) / (1000000000.0));
|
||||
m_last_seen = now;
|
||||
|
||||
m_tokens += tokens_gained;
|
||||
|
||||
//
|
||||
// Cap at max_tokens
|
||||
//
|
||||
if(m_tokens > m_max_tokens)
|
||||
{
|
||||
m_tokens = m_max_tokens;
|
||||
}
|
||||
|
||||
//
|
||||
// If m_tokens is < tokens, can't claim.
|
||||
//
|
||||
if(m_tokens < tokens)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
m_tokens -= tokens;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
double token_bucket::get_tokens()
|
||||
{
|
||||
return m_tokens;
|
||||
}
|
||||
|
||||
uint64_t token_bucket::get_last_seen()
|
||||
{
|
||||
return m_last_seen;
|
||||
}
|
||||
77
userspace/engine/token_bucket.h
Normal file
77
userspace/engine/token_bucket.h
Normal file
@@ -0,0 +1,77 @@
|
||||
/*
|
||||
Copyright (C) 2016 Draios inc.
|
||||
|
||||
This file is part of falco.
|
||||
|
||||
falco is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License version 2 as
|
||||
published by the Free Software Foundation.
|
||||
|
||||
falco is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with falco. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <cstdint>
|
||||
|
||||
// A simple token bucket that accumulates tokens at a fixed rate and allows
|
||||
// for limited bursting in the form of "banked" tokens.
|
||||
class token_bucket
|
||||
{
|
||||
public:
|
||||
token_bucket();
|
||||
virtual ~token_bucket();
|
||||
|
||||
//
|
||||
// Initialize the token bucket and start accumulating tokens
|
||||
//
|
||||
void init(double rate, double max_tokens, uint64_t now = 0);
|
||||
|
||||
//
|
||||
// Try to claim tokens tokens from the token bucket, using a
|
||||
// timestamp of now. Returns true if the tokens could be
|
||||
// claimed. Also updates internal metrics.
|
||||
//
|
||||
bool claim(double tokens, uint64_t now);
|
||||
|
||||
// Simpler version of claim that claims a single token and
|
||||
// uses the current time for now
|
||||
bool claim();
|
||||
|
||||
// Return the current number of tokens available
|
||||
double get_tokens();
|
||||
|
||||
// Return the last time someone tried to claim a token.
|
||||
uint64_t get_last_seen();
|
||||
|
||||
private:
|
||||
|
||||
//
|
||||
// The number of tokens generated per second.
|
||||
//
|
||||
double m_rate;
|
||||
|
||||
//
|
||||
// The maximum number of tokens that can be banked for future
|
||||
// claim()s.
|
||||
//
|
||||
double m_max_tokens;
|
||||
|
||||
//
|
||||
// The current number of tokens
|
||||
//
|
||||
double m_tokens;
|
||||
|
||||
//
|
||||
// The last time claim() was called (or the object was created).
|
||||
// Nanoseconds since the epoch.
|
||||
//
|
||||
uint64_t m_last_seen;
|
||||
};
|
||||
|
||||
@@ -9,7 +9,7 @@ include_directories("${CURL_INCLUDE_DIR}")
|
||||
include_directories("${YAMLCPP_INCLUDE_DIR}")
|
||||
include_directories("${DRAIOS_DEPENDENCIES_DIR}/yaml-${DRAIOS_YAML_VERSION}/target/include")
|
||||
|
||||
add_executable(falco configuration.cpp logger.cpp falco_outputs.cpp falco.cpp)
|
||||
add_executable(falco configuration.cpp logger.cpp falco_outputs.cpp statsfilewriter.cpp falco.cpp)
|
||||
|
||||
target_link_libraries(falco falco_engine sinsp)
|
||||
target_link_libraries(falco
|
||||
@@ -19,7 +19,7 @@ target_link_libraries(falco
|
||||
|
||||
configure_file(config_falco.h.in config_falco.h)
|
||||
|
||||
install(TARGETS falco DESTINATION bin)
|
||||
install(TARGETS falco DESTINATION ${FALCO_BIN_DIR})
|
||||
install(DIRECTORY lua
|
||||
DESTINATION share/falco
|
||||
DESTINATION ${FALCO_SHARE_DIR}
|
||||
FILES_MATCHING PATTERN *.lua)
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
#define FALCO_LUA_DIR "${CMAKE_INSTALL_PREFIX}/${FALCO_SHARE_DIR}/lua/"
|
||||
#define FALCO_SOURCE_DIR "${PROJECT_SOURCE_DIR}"
|
||||
#define FALCO_SOURCE_CONF_FILE "${PROJECT_SOURCE_DIR}/falco.yaml"
|
||||
#define FALCO_INSTALL_CONF_FILE "/etc/falco.yaml"
|
||||
#define FALCO_INSTALL_CONF_FILE "/etc/falco/falco.yaml"
|
||||
#define FALCO_SOURCE_LUA_DIR "${PROJECT_SOURCE_DIR}/userspace/falco/lua/"
|
||||
|
||||
#define PROBE_NAME "${PROBE_NAME}"
|
||||
|
||||
@@ -22,7 +22,8 @@ along with falco. If not, see <http://www.gnu.org/licenses/>.
|
||||
using namespace std;
|
||||
|
||||
falco_configuration::falco_configuration()
|
||||
: m_config(NULL)
|
||||
: m_buffered_outputs(true),
|
||||
m_config(NULL)
|
||||
{
|
||||
}
|
||||
|
||||
@@ -51,20 +52,37 @@ void falco_configuration::init(string conf_filename, list<string> &cmdline_optio
|
||||
|
||||
init_cmdline_options(cmdline_options);
|
||||
|
||||
m_rules_filenames.push_back(m_config->get_scalar<string>("rules_file", "/etc/falco_rules.yaml"));
|
||||
list<string> rules_files;
|
||||
|
||||
m_config->get_sequence<list<string>>(rules_files, string("rules_file"));
|
||||
|
||||
for(auto &file : rules_files)
|
||||
{
|
||||
// Here, we only include files that exist
|
||||
struct stat buffer;
|
||||
if(stat(file.c_str(), &buffer) == 0)
|
||||
{
|
||||
m_rules_filenames.push_back(file);
|
||||
}
|
||||
}
|
||||
|
||||
m_json_output = m_config->get_scalar<bool>("json_output", false);
|
||||
|
||||
falco_outputs::output_config file_output;
|
||||
file_output.name = "file";
|
||||
if (m_config->get_scalar<bool>("file_output", "enabled", false))
|
||||
{
|
||||
string filename;
|
||||
string filename, keep_alive;
|
||||
filename = m_config->get_scalar<string>("file_output", "filename", "");
|
||||
if (filename == string(""))
|
||||
{
|
||||
throw invalid_argument("Error reading config file (" + m_config_file + "): file output enabled but no filename in configuration block");
|
||||
}
|
||||
file_output.options["filename"] = filename;
|
||||
|
||||
keep_alive = m_config->get_scalar<string>("file_output", "keep_alive", "");
|
||||
file_output.options["keep_alive"] = keep_alive;
|
||||
|
||||
m_outputs.push_back(file_output);
|
||||
}
|
||||
|
||||
@@ -86,13 +104,17 @@ void falco_configuration::init(string conf_filename, list<string> &cmdline_optio
|
||||
program_output.name = "program";
|
||||
if (m_config->get_scalar<bool>("program_output", "enabled", false))
|
||||
{
|
||||
string program;
|
||||
string program, keep_alive;
|
||||
program = m_config->get_scalar<string>("program_output", "program", "");
|
||||
if (program == string(""))
|
||||
{
|
||||
throw sinsp_exception("Error reading config file (" + m_config_file + "): program output enabled but no program in configuration block");
|
||||
}
|
||||
program_output.options["program"] = program;
|
||||
|
||||
keep_alive = m_config->get_scalar<string>("program_output", "keep_alive", "");
|
||||
program_output.options["keep_alive"] = keep_alive;
|
||||
|
||||
m_outputs.push_back(program_output);
|
||||
}
|
||||
|
||||
@@ -101,6 +123,28 @@ void falco_configuration::init(string conf_filename, list<string> &cmdline_optio
|
||||
throw invalid_argument("Error reading config file (" + m_config_file + "): No outputs configured. Please configure at least one output file output enabled but no filename in configuration block");
|
||||
}
|
||||
|
||||
string log_level = m_config->get_scalar<string>("log_level", "info");
|
||||
|
||||
falco_logger::set_level(log_level);
|
||||
|
||||
m_notifications_rate = m_config->get_scalar<uint32_t>("outputs", "rate", 1);
|
||||
m_notifications_max_burst = m_config->get_scalar<uint32_t>("outputs", "max_burst", 1000);
|
||||
|
||||
string priority = m_config->get_scalar<string>("priority", "debug");
|
||||
vector<string>::iterator it;
|
||||
|
||||
auto comp = [priority] (string &s) {
|
||||
return (strcasecmp(s.c_str(), priority.c_str()) == 0);
|
||||
};
|
||||
|
||||
if((it = std::find_if(falco_common::priority_names.begin(), falco_common::priority_names.end(), comp)) == falco_common::priority_names.end())
|
||||
{
|
||||
throw invalid_argument("Unknown priority \"" + priority + "\"--must be one of emergency, alert, critical, error, warning, notice, informational, debug");
|
||||
}
|
||||
m_min_priority = (falco_common::priority_type) (it - falco_common::priority_names.begin());
|
||||
|
||||
m_buffered_outputs = m_config->get_scalar<bool>("buffered_outputs", true);
|
||||
|
||||
falco_logger::log_stderr = m_config->get_scalar<bool>("log_stderr", false);
|
||||
falco_logger::log_syslog = m_config->get_scalar<bool>("log_syslog", true);
|
||||
}
|
||||
|
||||
@@ -18,6 +18,9 @@ along with falco. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
#include <unistd.h>
|
||||
#include <yaml-cpp/yaml.h>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
@@ -127,6 +130,27 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
// called with the last variadic arg (where the sequence is expected to be found)
|
||||
template <typename T>
|
||||
void get_sequence(T& ret, const std::string& name)
|
||||
{
|
||||
YAML::Node child_node = m_root[name];
|
||||
if(child_node.IsDefined())
|
||||
{
|
||||
if(child_node.IsSequence())
|
||||
{
|
||||
for(const YAML::Node& item : child_node)
|
||||
{
|
||||
ret.insert(ret.end(), item.as<typename T::value_type>());
|
||||
}
|
||||
}
|
||||
else if(child_node.IsScalar())
|
||||
{
|
||||
ret.insert(ret.end(), child_node.as<typename T::value_type>());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
YAML::Node m_root;
|
||||
};
|
||||
@@ -144,6 +168,12 @@ class falco_configuration
|
||||
std::list<std::string> m_rules_filenames;
|
||||
bool m_json_output;
|
||||
std::vector<falco_outputs::output_config> m_outputs;
|
||||
uint32_t m_notifications_rate;
|
||||
uint32_t m_notifications_max_burst;
|
||||
|
||||
falco_common::priority_type m_min_priority;
|
||||
|
||||
bool m_buffered_outputs;
|
||||
private:
|
||||
void init_cmdline_options(std::list<std::string> &cmdline_options);
|
||||
|
||||
|
||||
@@ -36,6 +36,7 @@ along with falco. If not, see <http://www.gnu.org/licenses/>.
|
||||
#include "configuration.h"
|
||||
#include "falco_engine.h"
|
||||
#include "config_falco.h"
|
||||
#include "statsfilewriter.h"
|
||||
|
||||
bool g_terminate = false;
|
||||
//
|
||||
@@ -52,6 +53,7 @@ static void signal_callback(int signal)
|
||||
static void usage()
|
||||
{
|
||||
printf(
|
||||
"falco version " FALCO_VERSION "\n"
|
||||
"Usage: falco [options]\n\n"
|
||||
"Options:\n"
|
||||
" -h, --help Print this page\n"
|
||||
@@ -59,6 +61,7 @@ static void usage()
|
||||
" -A Monitor all events, including those with EF_DROP_FALCO flag.\n"
|
||||
" -d, --daemon Run as a daemon\n"
|
||||
" -D <pattern> Disable any rules matching the regex <pattern>. Can be specified multiple times.\n"
|
||||
" Can not be specified with -t.\n"
|
||||
" -e <events_file> Read the events from <events_file> (in .scap format) instead of tapping into live.\n"
|
||||
" -k <url>, --k8s-api=<url>\n"
|
||||
" Enable Kubernetes support by connecting to the API server\n"
|
||||
@@ -84,6 +87,7 @@ static void usage()
|
||||
" Marathon url is optional and defaults to Mesos address, port 8080.\n"
|
||||
" The API servers can also be specified via the environment variable\n"
|
||||
" FALCO_MESOS_API.\n"
|
||||
" -M <num_seconds> Stop collecting after <num_seconds> reached.\n"
|
||||
" -o, --option <key>=<val> Set the value of option <key> to <val>. Overrides values in configuration file.\n"
|
||||
" <key> can be a two-part <key>.<subkey>\n"
|
||||
" -p <output_format>, --print=<output_format>\n"
|
||||
@@ -97,7 +101,19 @@ static void usage()
|
||||
" -P, --pidfile <pid_file> When run as a daemon, write pid to specified file\n"
|
||||
" -r <rules_file> Rules file (defaults to value set in configuration file, or /etc/falco_rules.yaml).\n"
|
||||
" Can be specified multiple times to read from multiple files.\n"
|
||||
" -s <stats_file> If specified, write statistics related to falco's reading/processing of events\n"
|
||||
" to this file. (Only useful in live mode).\n"
|
||||
" -T <tag> Disable any rules with a tag=<tag>. Can be specified multiple times.\n"
|
||||
" Can not be specified with -t.\n"
|
||||
" -t <tag> Only run those rules with a tag=<tag>. Can be specified multiple times.\n"
|
||||
" Can not be specified with -T/-D.\n"
|
||||
" -U,--unbuffered Turn off output buffering to configured outputs. This causes every\n"
|
||||
" single line emitted by falco to be flushed, which generates higher CPU\n"
|
||||
" usage but is useful when piping those outputs into another process\n"
|
||||
" or into a script.\n"
|
||||
" -V,--validate <rules_file> Read the contents of the specified rules file and exit\n"
|
||||
" -v Verbose output.\n"
|
||||
" --version Print version number.\n"
|
||||
"\n"
|
||||
);
|
||||
}
|
||||
@@ -124,11 +140,25 @@ std::list<string> cmdline_options;
|
||||
//
|
||||
uint64_t do_inspect(falco_engine *engine,
|
||||
falco_outputs *outputs,
|
||||
sinsp* inspector)
|
||||
sinsp* inspector,
|
||||
uint64_t duration_to_tot_ns,
|
||||
string &stats_filename)
|
||||
{
|
||||
uint64_t num_evts = 0;
|
||||
int32_t res;
|
||||
sinsp_evt* ev;
|
||||
StatsFileWriter writer;
|
||||
uint64_t duration_start = 0;
|
||||
|
||||
if (stats_filename != "")
|
||||
{
|
||||
string errstr;
|
||||
|
||||
if (!writer.init(inspector, stats_filename, 5, errstr))
|
||||
{
|
||||
throw falco_exception(errstr);
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Loop through the events
|
||||
@@ -138,6 +168,8 @@ uint64_t do_inspect(falco_engine *engine,
|
||||
|
||||
res = inspector->next(&ev);
|
||||
|
||||
writer.handle();
|
||||
|
||||
if (g_terminate)
|
||||
{
|
||||
break;
|
||||
@@ -160,6 +192,17 @@ uint64_t do_inspect(falco_engine *engine,
|
||||
throw sinsp_exception(inspector->getlasterr().c_str());
|
||||
}
|
||||
|
||||
if (duration_start == 0)
|
||||
{
|
||||
duration_start = ev->get_ts();
|
||||
} else if(duration_to_tot_ns > 0)
|
||||
{
|
||||
if(ev->get_ts() - duration_start >= duration_to_tot_ns)
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if(!inspector->is_debug_enabled() &&
|
||||
ev->get_category() & EC_INTERNAL)
|
||||
{
|
||||
@@ -174,7 +217,7 @@ uint64_t do_inspect(falco_engine *engine,
|
||||
unique_ptr<falco_engine::rule_result> res = engine->process_event(ev);
|
||||
if(res)
|
||||
{
|
||||
outputs->handle_event(res->evt, res->rule, res->priority, res->format);
|
||||
outputs->handle_event(res->evt, res->rule, res->priority_num, res->format);
|
||||
}
|
||||
|
||||
num_evts++;
|
||||
@@ -202,6 +245,8 @@ int falco_init(int argc, char **argv)
|
||||
string pidfilename = "/var/run/falco.pid";
|
||||
bool describe_all_rules = false;
|
||||
string describe_rule = "";
|
||||
string validate_rules_file = "";
|
||||
string stats_filename = "";
|
||||
bool verbose = false;
|
||||
bool all_events = false;
|
||||
string* k8s_api = 0;
|
||||
@@ -209,6 +254,7 @@ int falco_init(int argc, char **argv)
|
||||
string* mesos_api = 0;
|
||||
string output_format = "";
|
||||
bool replace_container_info = false;
|
||||
int duration_to_tot = 0;
|
||||
|
||||
// Used for writing trace files
|
||||
int duration_seconds = 0;
|
||||
@@ -216,6 +262,8 @@ int falco_init(int argc, char **argv)
|
||||
int file_limit = 0;
|
||||
unsigned long event_limit = 0L;
|
||||
bool compress = false;
|
||||
bool buffered_outputs = true;
|
||||
bool buffered_cmdline = false;
|
||||
|
||||
// Used for stats
|
||||
uint64_t num_evts;
|
||||
@@ -232,6 +280,9 @@ int falco_init(int argc, char **argv)
|
||||
{"option", required_argument, 0, 'o'},
|
||||
{"print", required_argument, 0, 'p' },
|
||||
{"pidfile", required_argument, 0, 'P' },
|
||||
{"unbuffered", no_argument, 0, 'U' },
|
||||
{"version", no_argument, 0, 0 },
|
||||
{"validate", required_argument, 0, 0 },
|
||||
{"writefile", required_argument, 0, 'w' },
|
||||
|
||||
{0, 0, 0, 0}
|
||||
@@ -241,12 +292,15 @@ int falco_init(int argc, char **argv)
|
||||
{
|
||||
set<string> disabled_rule_patterns;
|
||||
string pattern;
|
||||
string all_rules = ".*";
|
||||
set<string> disabled_rule_tags;
|
||||
set<string> enabled_rule_tags;
|
||||
|
||||
//
|
||||
// Parse the args
|
||||
//
|
||||
while((op = getopt_long(argc, argv,
|
||||
"hc:AdD:e:k:K:Ll:m:o:P:p:r:vw:",
|
||||
"hc:AdD:e:k:K:Ll:m:M:o:P:p:r:s:T:t:UvV:w:",
|
||||
long_options, &long_index)) != -1)
|
||||
{
|
||||
switch(op)
|
||||
@@ -287,6 +341,13 @@ int falco_init(int argc, char **argv)
|
||||
case 'm':
|
||||
mesos_api = new string(optarg);
|
||||
break;
|
||||
case 'M':
|
||||
duration_to_tot = atoi(optarg);
|
||||
if(duration_to_tot <= 0)
|
||||
{
|
||||
throw sinsp_exception(string("invalid duration") + optarg);
|
||||
}
|
||||
break;
|
||||
case 'o':
|
||||
cmdline_options.push_back(optarg);
|
||||
break;
|
||||
@@ -318,9 +379,25 @@ int falco_init(int argc, char **argv)
|
||||
case 'r':
|
||||
rules_filenames.push_back(optarg);
|
||||
break;
|
||||
case 's':
|
||||
stats_filename = optarg;
|
||||
break;
|
||||
case 'T':
|
||||
disabled_rule_tags.insert(optarg);
|
||||
break;
|
||||
case 't':
|
||||
enabled_rule_tags.insert(optarg);
|
||||
break;
|
||||
case 'U':
|
||||
buffered_outputs = false;
|
||||
buffered_cmdline = true;
|
||||
break;
|
||||
case 'v':
|
||||
verbose = true;
|
||||
break;
|
||||
case 'V':
|
||||
validate_rules_file = optarg;
|
||||
break;
|
||||
case 'w':
|
||||
outfile = optarg;
|
||||
break;
|
||||
@@ -333,6 +410,13 @@ int falco_init(int argc, char **argv)
|
||||
|
||||
}
|
||||
|
||||
if(string(long_options[long_index].name) == "version")
|
||||
{
|
||||
printf("falco version %s\n", FALCO_VERSION);
|
||||
return EXIT_SUCCESS;
|
||||
}
|
||||
|
||||
|
||||
inspector = new sinsp();
|
||||
engine = new falco_engine();
|
||||
engine->set_inspector(inspector);
|
||||
@@ -376,6 +460,14 @@ int falco_init(int argc, char **argv)
|
||||
}
|
||||
}
|
||||
|
||||
if(validate_rules_file != "")
|
||||
{
|
||||
falco_logger::log(LOG_INFO, "Validating rules file: " + validate_rules_file + "...\n");
|
||||
engine->load_rules_file(validate_rules_file, verbose, all_events);
|
||||
falco_logger::log(LOG_INFO, "Ok\n");
|
||||
goto exit;
|
||||
}
|
||||
|
||||
falco_configuration config;
|
||||
if (conf_filename.size())
|
||||
{
|
||||
@@ -394,19 +486,61 @@ int falco_init(int argc, char **argv)
|
||||
config.m_rules_filenames = rules_filenames;
|
||||
}
|
||||
|
||||
engine->set_min_priority(config.m_min_priority);
|
||||
|
||||
if(buffered_cmdline)
|
||||
{
|
||||
config.m_buffered_outputs = buffered_outputs;
|
||||
}
|
||||
|
||||
if(config.m_rules_filenames.size() == 0)
|
||||
{
|
||||
throw std::invalid_argument("You must specify at least one rules file via -r or a rules_file entry in falco.yaml");
|
||||
}
|
||||
|
||||
for (auto filename : config.m_rules_filenames)
|
||||
{
|
||||
engine->load_rules_file(filename, verbose, all_events);
|
||||
falco_logger::log(LOG_INFO, "Parsed rules from file " + filename + "\n");
|
||||
}
|
||||
|
||||
// You can't both disable and enable rules
|
||||
if((disabled_rule_patterns.size() + disabled_rule_tags.size() > 0) &&
|
||||
enabled_rule_tags.size() > 0) {
|
||||
throw std::invalid_argument("You can not specify both disabled (-D/-T) and enabled (-t) rules");
|
||||
}
|
||||
|
||||
for (auto pattern : disabled_rule_patterns)
|
||||
{
|
||||
falco_logger::log(LOG_INFO, "Disabling rules matching pattern: " + pattern + "\n");
|
||||
engine->enable_rule(pattern, false);
|
||||
}
|
||||
|
||||
outputs->init(config.m_json_output);
|
||||
if(disabled_rule_tags.size() > 0)
|
||||
{
|
||||
for(auto tag : disabled_rule_tags)
|
||||
{
|
||||
falco_logger::log(LOG_INFO, "Disabling rules with tag: " + tag + "\n");
|
||||
}
|
||||
engine->enable_rule_by_tag(disabled_rule_tags, false);
|
||||
}
|
||||
|
||||
if(enabled_rule_tags.size() > 0)
|
||||
{
|
||||
|
||||
// Since we only want to enable specific
|
||||
// rules, first disable all rules.
|
||||
engine->enable_rule(all_rules, false);
|
||||
for(auto tag : enabled_rule_tags)
|
||||
{
|
||||
falco_logger::log(LOG_INFO, "Enabling rules with tag: " + tag + "\n");
|
||||
}
|
||||
engine->enable_rule_by_tag(enabled_rule_tags, true);
|
||||
}
|
||||
|
||||
outputs->init(config.m_json_output,
|
||||
config.m_notifications_rate, config.m_notifications_max_burst,
|
||||
config.m_buffered_outputs);
|
||||
|
||||
if(!all_events)
|
||||
{
|
||||
@@ -588,7 +722,9 @@ int falco_init(int argc, char **argv)
|
||||
|
||||
num_evts = do_inspect(engine,
|
||||
outputs,
|
||||
inspector);
|
||||
inspector,
|
||||
uint64_t(duration_to_tot*ONE_SECOND_IN_NS),
|
||||
stats_filename);
|
||||
|
||||
duration = ((double)clock()) / CLOCKS_PER_SEC - duration;
|
||||
|
||||
|
||||
@@ -27,16 +27,32 @@ along with falco. If not, see <http://www.gnu.org/licenses/>.
|
||||
using namespace std;
|
||||
|
||||
falco_outputs::falco_outputs()
|
||||
: m_initialized(false),
|
||||
m_buffered(true)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
falco_outputs::~falco_outputs()
|
||||
{
|
||||
if(m_initialized)
|
||||
{
|
||||
lua_getglobal(m_ls, m_lua_output_cleanup.c_str());
|
||||
|
||||
if(!lua_isfunction(m_ls, -1))
|
||||
{
|
||||
throw falco_exception("No function " + m_lua_output_cleanup + " found. ");
|
||||
}
|
||||
|
||||
if(lua_pcall(m_ls, 0, 0, 0) != 0)
|
||||
{
|
||||
const char* lerr = lua_tostring(m_ls, -1);
|
||||
throw falco_exception(string(lerr));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void falco_outputs::init(bool json_output)
|
||||
void falco_outputs::init(bool json_output, uint32_t rate, uint32_t max_burst, bool buffered)
|
||||
{
|
||||
// The engine must have been given an inspector by now.
|
||||
if(! m_inspector)
|
||||
@@ -52,11 +68,17 @@ void falco_outputs::init(bool json_output)
|
||||
falco_formats::init(m_inspector, m_ls, json_output);
|
||||
|
||||
falco_logger::init(m_ls);
|
||||
|
||||
m_notifications_tb.init(rate, max_burst);
|
||||
|
||||
m_buffered = buffered;
|
||||
|
||||
m_initialized = true;
|
||||
}
|
||||
|
||||
void falco_outputs::add_output(output_config oc)
|
||||
{
|
||||
uint8_t nargs = 1;
|
||||
uint8_t nargs = 2;
|
||||
lua_getglobal(m_ls, m_lua_add_output.c_str());
|
||||
|
||||
if(!lua_isfunction(m_ls, -1))
|
||||
@@ -64,11 +86,12 @@ void falco_outputs::add_output(output_config oc)
|
||||
throw falco_exception("No function " + m_lua_add_output + " found. ");
|
||||
}
|
||||
lua_pushstring(m_ls, oc.name.c_str());
|
||||
lua_pushnumber(m_ls, (m_buffered ? 1 : 0));
|
||||
|
||||
// If we have options, build up a lua table containing them
|
||||
if (oc.options.size())
|
||||
{
|
||||
nargs = 2;
|
||||
nargs = 3;
|
||||
lua_createtable(m_ls, 0, oc.options.size());
|
||||
|
||||
for (auto it = oc.options.cbegin(); it != oc.options.cend(); ++it)
|
||||
@@ -86,18 +109,25 @@ void falco_outputs::add_output(output_config oc)
|
||||
|
||||
}
|
||||
|
||||
void falco_outputs::handle_event(sinsp_evt *ev, string &level, string &priority, string &format)
|
||||
void falco_outputs::handle_event(sinsp_evt *ev, string &rule, falco_common::priority_type priority, string &format)
|
||||
{
|
||||
if(!m_notifications_tb.claim())
|
||||
{
|
||||
falco_logger::log(LOG_DEBUG, "Skipping rate-limited notification for rule " + rule + "\n");
|
||||
return;
|
||||
}
|
||||
|
||||
lua_getglobal(m_ls, m_lua_output_event.c_str());
|
||||
|
||||
if(lua_isfunction(m_ls, -1))
|
||||
{
|
||||
lua_pushlightuserdata(m_ls, ev);
|
||||
lua_pushstring(m_ls, level.c_str());
|
||||
lua_pushstring(m_ls, priority.c_str());
|
||||
lua_pushstring(m_ls, rule.c_str());
|
||||
lua_pushstring(m_ls, falco_common::priority_names[priority].c_str());
|
||||
lua_pushnumber(m_ls, priority);
|
||||
lua_pushstring(m_ls, format.c_str());
|
||||
|
||||
if(lua_pcall(m_ls, 4, 0, 0) != 0)
|
||||
if(lua_pcall(m_ls, 5, 0, 0) != 0)
|
||||
{
|
||||
const char* lerr = lua_tostring(m_ls, -1);
|
||||
string err = "Error invoking function output: " + string(lerr);
|
||||
|
||||
@@ -19,6 +19,7 @@ along with falco. If not, see <http://www.gnu.org/licenses/>.
|
||||
#pragma once
|
||||
|
||||
#include "falco_common.h"
|
||||
#include "token_bucket.h"
|
||||
|
||||
//
|
||||
// This class acts as the primary interface between a program and the
|
||||
@@ -40,7 +41,7 @@ public:
|
||||
std::map<std::string, std::string> options;
|
||||
};
|
||||
|
||||
void init(bool json_output);
|
||||
void init(bool json_output, uint32_t rate, uint32_t max_burst, bool buffered);
|
||||
|
||||
void add_output(output_config oc);
|
||||
|
||||
@@ -48,10 +49,18 @@ public:
|
||||
// ev is an event that has matched some rule. Pass the event
|
||||
// to all configured outputs.
|
||||
//
|
||||
void handle_event(sinsp_evt *ev, std::string &level, std::string &priority, std::string &format);
|
||||
void handle_event(sinsp_evt *ev, std::string &rule, falco_common::priority_type priority, std::string &format);
|
||||
|
||||
private:
|
||||
bool m_initialized;
|
||||
|
||||
// Rate limits notifications
|
||||
token_bucket m_notifications_tb;
|
||||
|
||||
bool m_buffered;
|
||||
|
||||
std::string m_lua_add_output = "add_output";
|
||||
std::string m_lua_output_event = "output_event";
|
||||
std::string m_lua_output_cleanup = "output_cleanup";
|
||||
std::string m_lua_main_filename = "output.lua";
|
||||
};
|
||||
|
||||
@@ -20,18 +20,62 @@ along with falco. If not, see <http://www.gnu.org/licenses/>.
|
||||
#include "logger.h"
|
||||
#include "chisel_api.h"
|
||||
|
||||
#include "falco_common.h"
|
||||
|
||||
const static struct luaL_reg ll_falco [] =
|
||||
{
|
||||
{"syslog", &falco_logger::syslog},
|
||||
{NULL,NULL}
|
||||
};
|
||||
|
||||
int falco_logger::level = LOG_INFO;
|
||||
|
||||
void falco_logger::init(lua_State *ls)
|
||||
{
|
||||
luaL_openlib(ls, "falco", ll_falco, 0);
|
||||
}
|
||||
|
||||
void falco_logger::set_level(string &level)
|
||||
{
|
||||
if(level == "emergency")
|
||||
{
|
||||
falco_logger::level = LOG_EMERG;
|
||||
}
|
||||
else if(level == "alert")
|
||||
{
|
||||
falco_logger::level = LOG_ALERT;
|
||||
}
|
||||
else if(level == "critical")
|
||||
{
|
||||
falco_logger::level = LOG_CRIT;
|
||||
}
|
||||
else if(level == "error")
|
||||
{
|
||||
falco_logger::level = LOG_ERR;
|
||||
}
|
||||
else if(level == "warning")
|
||||
{
|
||||
falco_logger::level = LOG_WARNING;
|
||||
}
|
||||
else if(level == "notice")
|
||||
{
|
||||
falco_logger::level = LOG_NOTICE;
|
||||
}
|
||||
else if(level == "info")
|
||||
{
|
||||
falco_logger::level = LOG_INFO;
|
||||
}
|
||||
else if(level == "debug")
|
||||
{
|
||||
falco_logger::level = LOG_DEBUG;
|
||||
}
|
||||
else
|
||||
{
|
||||
throw falco_exception("Unknown log level " + level);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int falco_logger::syslog(lua_State *ls) {
|
||||
int priority = luaL_checknumber(ls, 1);
|
||||
|
||||
@@ -49,6 +93,12 @@ bool falco_logger::log_stderr = true;
|
||||
bool falco_logger::log_syslog = true;
|
||||
|
||||
void falco_logger::log(int priority, const string msg) {
|
||||
|
||||
if(priority > falco_logger::level)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
if (falco_logger::log_syslog) {
|
||||
::syslog(priority, "%s", msg.c_str());
|
||||
}
|
||||
|
||||
@@ -32,11 +32,15 @@ class falco_logger
|
||||
public:
|
||||
static void init(lua_State *ls);
|
||||
|
||||
// Will throw exception if level is unknown.
|
||||
static void set_level(string &level);
|
||||
|
||||
// value = falco.syslog(level, message)
|
||||
static int syslog(lua_State *ls);
|
||||
|
||||
static void log(int priority, const string msg);
|
||||
|
||||
static int level;
|
||||
static bool log_stderr;
|
||||
static bool log_syslog;
|
||||
};
|
||||
|
||||
@@ -18,22 +18,25 @@
|
||||
|
||||
local mod = {}
|
||||
|
||||
levels = {"Emergency", "Alert", "Critical", "Error", "Warning", "Notice", "Informational", "Debug"}
|
||||
|
||||
mod.levels = levels
|
||||
|
||||
local outputs = {}
|
||||
|
||||
function mod.stdout(level, msg)
|
||||
function mod.stdout(priority, priority_num, buffered, msg)
|
||||
if buffered == 0 then
|
||||
io.stdout:setvbuf 'no'
|
||||
end
|
||||
print (msg)
|
||||
end
|
||||
|
||||
function mod.stdout_cleanup()
|
||||
io.stdout:flush()
|
||||
end
|
||||
|
||||
function mod.file_validate(options)
|
||||
if (not type(options.filename) == 'string') then
|
||||
error("File output needs to be configured with a valid filename")
|
||||
end
|
||||
|
||||
file, err = io.open(options.filename, "a+")
|
||||
local file, err = io.open(options.filename, "a+")
|
||||
if file == nil then
|
||||
error("Error with file output: "..err)
|
||||
end
|
||||
@@ -41,51 +44,100 @@ function mod.file_validate(options)
|
||||
|
||||
end
|
||||
|
||||
function mod.file(level, msg, options)
|
||||
file = io.open(options.filename, "a+")
|
||||
function mod.file(priority, priority_num, buffered, msg, options)
|
||||
if options.keep_alive == "true" then
|
||||
if file == nil then
|
||||
file = io.open(options.filename, "a+")
|
||||
if buffered == 0 then
|
||||
file:setvbuf 'no'
|
||||
end
|
||||
end
|
||||
else
|
||||
file = io.open(options.filename, "a+")
|
||||
end
|
||||
|
||||
file:write(msg, "\n")
|
||||
file:close()
|
||||
|
||||
if options.keep_alive == nil or
|
||||
options.keep_alive ~= "true" then
|
||||
file:close()
|
||||
file = nil
|
||||
end
|
||||
end
|
||||
|
||||
function mod.syslog(level, msg, options)
|
||||
falco.syslog(level, msg)
|
||||
function mod.file_cleanup()
|
||||
if file ~= nil then
|
||||
file:flush()
|
||||
file:close()
|
||||
file = nil
|
||||
end
|
||||
end
|
||||
|
||||
function mod.program(level, msg, options)
|
||||
function mod.syslog(priority, priority_num, buffered, msg, options)
|
||||
falco.syslog(priority_num, msg)
|
||||
end
|
||||
|
||||
function mod.syslog_cleanup()
|
||||
end
|
||||
|
||||
function mod.program(priority, priority_num, buffered, msg, options)
|
||||
-- XXX Ideally we'd check that the program ran
|
||||
-- successfully. However, the luajit we're using returns true even
|
||||
-- when the shell can't run the program.
|
||||
|
||||
file = io.popen(options.program, "w")
|
||||
-- Note: options are all strings
|
||||
if options.keep_alive == "true" then
|
||||
if file == nil then
|
||||
file = io.popen(options.program, "w")
|
||||
if buffered == 0 then
|
||||
file:setvbuf 'no'
|
||||
end
|
||||
end
|
||||
else
|
||||
file = io.popen(options.program, "w")
|
||||
end
|
||||
|
||||
file:write(msg, "\n")
|
||||
file:close()
|
||||
end
|
||||
|
||||
local function level_of(s)
|
||||
s = string.lower(s)
|
||||
for i,v in ipairs(levels) do
|
||||
if (string.find(string.lower(v), "^"..s)) then
|
||||
return i - 1 -- (syslog levels start at 0, lua indices start at 1)
|
||||
end
|
||||
if options.keep_alive == nil or
|
||||
options.keep_alive ~= "true" then
|
||||
file:close()
|
||||
file = nil
|
||||
end
|
||||
error("Invalid severity level: "..s)
|
||||
end
|
||||
|
||||
function output_event(event, rule, priority, format)
|
||||
local level = level_of(priority)
|
||||
format = "*%evt.time: "..levels[level+1].." "..format
|
||||
formatter = formats.formatter(format)
|
||||
msg = formats.format_event(event, rule, levels[level+1], formatter)
|
||||
function mod.program_cleanup()
|
||||
if file ~= nil then
|
||||
file:flush()
|
||||
file:close()
|
||||
file = nil
|
||||
end
|
||||
end
|
||||
|
||||
function output_event(event, rule, priority, priority_num, format)
|
||||
-- If format starts with a *, remove it, as we're adding our own
|
||||
-- prefix here.
|
||||
if format:sub(1,1) == "*" then
|
||||
format = format:sub(2)
|
||||
end
|
||||
|
||||
format = "*%evt.time: "..priority.." "..format
|
||||
|
||||
msg = formats.format_event(event, rule, priority, format)
|
||||
|
||||
for index,o in ipairs(outputs) do
|
||||
o.output(level, msg, o.config)
|
||||
o.output(priority, priority_num, o.buffered, msg, o.config)
|
||||
end
|
||||
|
||||
formats.free_formatter(formatter)
|
||||
end
|
||||
|
||||
function add_output(output_name, config)
|
||||
function output_cleanup()
|
||||
formats.free_formatters()
|
||||
for index,o in ipairs(outputs) do
|
||||
o.cleanup()
|
||||
end
|
||||
end
|
||||
|
||||
function add_output(output_name, buffered, config)
|
||||
if not (type(mod[output_name]) == 'function') then
|
||||
error("rule_loader.add_output(): invalid output_name: "..output_name)
|
||||
end
|
||||
@@ -96,7 +148,9 @@ function add_output(output_name, config)
|
||||
mod[output_name.."_validate"](config)
|
||||
end
|
||||
|
||||
table.insert(outputs, {output = mod[output_name], config=config})
|
||||
table.insert(outputs, {output = mod[output_name],
|
||||
cleanup = mod[output_name.."_cleanup"],
|
||||
buffered=buffered, config=config})
|
||||
end
|
||||
|
||||
return mod
|
||||
|
||||
120
userspace/falco/statsfilewriter.cpp
Normal file
120
userspace/falco/statsfilewriter.cpp
Normal file
@@ -0,0 +1,120 @@
|
||||
#include <sys/time.h>
|
||||
#include <signal.h>
|
||||
|
||||
#include "statsfilewriter.h"
|
||||
|
||||
using namespace std;
|
||||
|
||||
static bool g_save_stats = false;
|
||||
static void timer_handler (int signum)
|
||||
{
|
||||
g_save_stats = true;
|
||||
}
|
||||
|
||||
extern char **environ;
|
||||
|
||||
StatsFileWriter::StatsFileWriter()
|
||||
: m_num_stats(0), m_inspector(NULL)
|
||||
{
|
||||
}
|
||||
|
||||
StatsFileWriter::~StatsFileWriter()
|
||||
{
|
||||
m_output.close();
|
||||
}
|
||||
|
||||
bool StatsFileWriter::init(sinsp *inspector, string &filename, uint32_t interval_sec, string &errstr)
|
||||
{
|
||||
struct itimerval timer;
|
||||
struct sigaction handler;
|
||||
|
||||
m_inspector = inspector;
|
||||
|
||||
m_output.exceptions ( ofstream::failbit | ofstream::badbit );
|
||||
m_output.open(filename, ios_base::app);
|
||||
|
||||
memset (&handler, 0, sizeof (handler));
|
||||
handler.sa_handler = &timer_handler;
|
||||
if (sigaction(SIGALRM, &handler, NULL) == -1)
|
||||
{
|
||||
errstr = string("Could not set up signal handler for periodic timer: ") + strerror(errno);
|
||||
return false;
|
||||
}
|
||||
|
||||
timer.it_value.tv_sec = interval_sec;
|
||||
timer.it_value.tv_usec = 0;
|
||||
timer.it_interval = timer.it_value;
|
||||
if (setitimer(ITIMER_REAL, &timer, NULL) == -1)
|
||||
{
|
||||
errstr = string("Could not set up periodic timer: ") + strerror(errno);
|
||||
return false;
|
||||
}
|
||||
|
||||
// (Undocumented) feature. Take any environment keys prefixed
|
||||
// with FALCO_STATS_EXTRA_XXX and add them to the output. Used by
|
||||
// run_performance_tests.sh.
|
||||
for(uint32_t i=0; environ[i]; i++)
|
||||
{
|
||||
char *p = strstr(environ[i], "=");
|
||||
if(!p)
|
||||
{
|
||||
errstr = string("Could not find environment separator in ") + string(environ[i]);
|
||||
return false;
|
||||
}
|
||||
string key(environ[i], p-environ[i]);
|
||||
string val(p+1, strlen(environ[i])-(p-environ[i])-1);
|
||||
if(key.compare(0, 18, "FALCO_STATS_EXTRA_") == 0)
|
||||
{
|
||||
string sub = key.substr(18);
|
||||
if (m_extra != "")
|
||||
{
|
||||
m_extra += ", ";
|
||||
}
|
||||
m_extra += "\"" + sub + "\": " + "\"" + val + "\"";
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void StatsFileWriter::handle()
|
||||
{
|
||||
if (g_save_stats)
|
||||
{
|
||||
scap_stats cstats;
|
||||
scap_stats delta;
|
||||
|
||||
g_save_stats = false;
|
||||
m_num_stats++;
|
||||
m_inspector->get_capture_stats(&cstats);
|
||||
|
||||
if(m_num_stats == 1)
|
||||
{
|
||||
delta = cstats;
|
||||
}
|
||||
else
|
||||
{
|
||||
delta.n_evts = cstats.n_evts - m_last_stats.n_evts;
|
||||
delta.n_drops = cstats.n_drops - m_last_stats.n_drops;
|
||||
delta.n_preemptions = cstats.n_preemptions - m_last_stats.n_preemptions;
|
||||
}
|
||||
|
||||
m_output << "{\"sample\": " << m_num_stats;
|
||||
if(m_extra != "")
|
||||
{
|
||||
m_output << ", " << m_extra;
|
||||
}
|
||||
m_output << ", \"cur\": {" <<
|
||||
"\"events\": " << cstats.n_evts <<
|
||||
", \"drops\": " << cstats.n_drops <<
|
||||
", \"preemptions\": " << cstats.n_preemptions <<
|
||||
"}, \"delta\": {" <<
|
||||
"\"events\": " << delta.n_evts <<
|
||||
", \"drops\": " << delta.n_drops <<
|
||||
", \"preemptions\": " << delta.n_preemptions <<
|
||||
"}, \"drop_pct\": " << (delta.n_evts == 0 ? 0 : (100.0*delta.n_drops/delta.n_evts)) <<
|
||||
"}," << endl;
|
||||
|
||||
m_last_stats = cstats;
|
||||
}
|
||||
}
|
||||
32
userspace/falco/statsfilewriter.h
Normal file
32
userspace/falco/statsfilewriter.h
Normal file
@@ -0,0 +1,32 @@
|
||||
#pragma once
|
||||
|
||||
#include <fstream>
|
||||
#include <string>
|
||||
#include <map>
|
||||
|
||||
#include <sinsp.h>
|
||||
|
||||
// Periodically collects scap stats files and writes them to a file as
|
||||
// json.
|
||||
|
||||
class StatsFileWriter {
|
||||
public:
|
||||
StatsFileWriter();
|
||||
virtual ~StatsFileWriter();
|
||||
|
||||
// Returns success as bool. On false fills in errstr.
|
||||
bool init(sinsp *inspector, std::string &filename,
|
||||
uint32_t interval_sec,
|
||||
string &errstr);
|
||||
|
||||
// Should be called often (like for each event in a sinsp
|
||||
// loop).
|
||||
void handle();
|
||||
|
||||
protected:
|
||||
uint32_t m_num_stats;
|
||||
sinsp *m_inspector;
|
||||
std::ofstream m_output;
|
||||
std::string m_extra;
|
||||
scap_stats m_last_stats;
|
||||
};
|
||||
Reference in New Issue
Block a user