mirror of
https://github.com/falcosecurity/falco.git
synced 2026-03-20 11:42:06 +00:00
Compare commits
137 Commits
agent/0.80
...
0.14.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
62c1a0440d | ||
|
|
ca7106c652 | ||
|
|
5e9bbd139c | ||
|
|
bd4c3ffa39 | ||
|
|
477fcb56f1 | ||
|
|
c111f282dd | ||
|
|
50c6515da5 | ||
|
|
9e0e3da617 | ||
|
|
513cf2ed8b | ||
|
|
a78212cc62 | ||
|
|
074a906af3 | ||
|
|
4fcd44e73a | ||
|
|
ec07f7c240 | ||
|
|
b8a25c6e71 | ||
|
|
edc8eb2fd8 | ||
|
|
21c7eece25 | ||
|
|
36a1cdd9bc | ||
|
|
ddf55d3c8e | ||
|
|
b76f60d419 | ||
|
|
889fcc8b50 | ||
|
|
6863675b76 | ||
|
|
c28892eba3 | ||
|
|
5e5742f87d | ||
|
|
611877e4d3 | ||
|
|
29b597dd9c | ||
|
|
840fc4bb41 | ||
|
|
ea303ba32f | ||
|
|
3bd0081753 | ||
|
|
67cde2980d | ||
|
|
e4e6d8845d | ||
|
|
67d1e6c51d | ||
|
|
f7c17bb1a5 | ||
|
|
21f16f0cb0 | ||
|
|
d1329af3bd | ||
|
|
1308d7fc35 | ||
|
|
c24fa324d2 | ||
|
|
e15ee1d28d | ||
|
|
1da02bf3ff | ||
|
|
4696519deb | ||
|
|
e321d7c8de | ||
|
|
bd7a9733fd | ||
|
|
3fd573e432 | ||
|
|
cd53c58808 | ||
|
|
c6169e1aaa | ||
|
|
b79670a79a | ||
|
|
1f28f85bdf | ||
|
|
ff4f7ca13b | ||
|
|
071e8de075 | ||
|
|
32f8e304eb | ||
|
|
6eac49e5ae | ||
|
|
53c7e101fe | ||
|
|
774046d57e | ||
|
|
438f647984 | ||
|
|
8c6ebd586d | ||
|
|
c531d91493 | ||
|
|
48d01203ef | ||
|
|
43126362c3 | ||
|
|
ef9c4ee6ab | ||
|
|
38771923ca | ||
|
|
5b060d2c0f | ||
|
|
47828f259f | ||
|
|
e614e64331 | ||
|
|
a3e336f782 | ||
|
|
7d24eba1b6 | ||
|
|
7dbdb00109 | ||
|
|
a2319d2b8a | ||
|
|
8d60d374f7 | ||
|
|
6ca316a7cc | ||
|
|
bc34e438ce | ||
|
|
7fa6fc1b70 | ||
|
|
e4ffa55d58 | ||
|
|
f746c4cd57 | ||
|
|
0499811762 | ||
|
|
6445cdb950 | ||
|
|
6b82ecfa79 | ||
|
|
fc70c635d1 | ||
|
|
2352b96d6b | ||
|
|
ff299c1d43 | ||
|
|
fb3f2178ba | ||
|
|
a5ef1c4f4f | ||
|
|
5e38f130cc | ||
|
|
eaaff5a773 | ||
|
|
81e2e672f0 | ||
|
|
071e7dff17 | ||
|
|
e8ba42cae4 | ||
|
|
470710366b | ||
|
|
24ca38a819 | ||
|
|
ab0413a9ee | ||
|
|
6acb13e6bb | ||
|
|
fdbe62fdae | ||
|
|
d63542d8ff | ||
|
|
7289315837 | ||
|
|
25efce033b | ||
|
|
8bc4a5e38f | ||
|
|
c05319927a | ||
|
|
1e32d637b2 | ||
|
|
ccf35552dd | ||
|
|
ec0c109d2a | ||
|
|
46b0fd833c | ||
|
|
bed5993500 | ||
|
|
bed360497e | ||
|
|
3afe04629a | ||
|
|
bebdff3d67 | ||
|
|
9543514270 | ||
|
|
46405510e2 | ||
|
|
42285687d4 | ||
|
|
8b82a08148 | ||
|
|
19d251ef4b | ||
|
|
66ba09ea3b | ||
|
|
4867c47d4b | ||
|
|
526f32b54b | ||
|
|
26ca866162 | ||
|
|
893554e0f0 | ||
|
|
c5523d89a7 | ||
|
|
81dcee23a9 | ||
|
|
81a38fb909 | ||
|
|
e9e9bd85c3 | ||
|
|
70f768d9ea | ||
|
|
c3b0f0d96d | ||
|
|
2a7851c77b | ||
|
|
512a36dfe1 | ||
|
|
73e1ae616a | ||
|
|
b496116fe3 | ||
|
|
2a0911dcfd | ||
|
|
af57f2b5c8 | ||
|
|
30ae3447c3 | ||
|
|
9d3392e9b9 | ||
|
|
6be4830342 | ||
|
|
e6bf402117 | ||
|
|
94df00e512 | ||
|
|
3ee76637f4 | ||
|
|
e8aee19f6c | ||
|
|
74556e5f6e | ||
|
|
809d20c294 | ||
|
|
b0ae29c23a | ||
|
|
d1b6b2be87 | ||
|
|
e00181d553 |
18
.github/stale.yaml
vendored
Normal file
18
.github/stale.yaml
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
# Number of days of inactivity before an issue becomes stale
|
||||
daysUntilStale: 60
|
||||
# Number of days of inactivity before a stale issue is closed
|
||||
daysUntilClose: 7
|
||||
# Issues with these labels will never be considered stale
|
||||
exemptLabels:
|
||||
- cncf
|
||||
- roadmap
|
||||
- enhancement
|
||||
# Label to use when marking an issue as stale
|
||||
staleLabel: wontfix
|
||||
# Comment to post when marking an issue as stale. Set to `false` to disable
|
||||
markComment: >
|
||||
This issue has been automatically marked as stale because it has not had
|
||||
recent activity. It will be closed if no further activity occurs. Thank you
|
||||
for your contributions.
|
||||
# Comment to post when closing a stale issue. Set to `false` to disable
|
||||
closeComment: false
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -18,3 +18,4 @@ docker/event-generator/mysqld
|
||||
docker/event-generator/httpd
|
||||
docker/event-generator/sha1sum
|
||||
docker/event-generator/vipw
|
||||
.vscode/*
|
||||
28
.travis.yml
28
.travis.yml
@@ -1,4 +1,22 @@
|
||||
language: c
|
||||
#
|
||||
# Copyright (C) 2016-2018 Draios Inc dba Sysdig.
|
||||
#
|
||||
# This file is part of falco .
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
language: cpp
|
||||
compiler: gcc
|
||||
env:
|
||||
- BUILD_TYPE=Debug
|
||||
- BUILD_TYPE=Release
|
||||
@@ -6,11 +24,9 @@ sudo: required
|
||||
services:
|
||||
- docker
|
||||
before_install:
|
||||
- sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test
|
||||
- sudo apt-get update
|
||||
install:
|
||||
- sudo apt-get --force-yes install g++-4.8
|
||||
- sudo apt-get install rpm linux-headers-$(uname -r)
|
||||
- sudo apt-get install rpm linux-headers-$(uname -r) libelf-dev
|
||||
- git clone https://github.com/draios/sysdig.git ../sysdig
|
||||
- sudo apt-get install -y python-pip libvirt-dev jq dkms
|
||||
- cd ..
|
||||
@@ -25,8 +41,6 @@ before_script:
|
||||
- export KERNELDIR=/lib/modules/$(uname -r)/build
|
||||
script:
|
||||
- set -e
|
||||
- export CC="gcc-4.8"
|
||||
- export CXX="g++-4.8"
|
||||
- mkdir build
|
||||
- cd build
|
||||
- cmake .. -DCMAKE_BUILD_TYPE=$BUILD_TYPE -DDRAIOS_DEBUG_FLAGS="-D_DEBUG -DNDEBUG"
|
||||
@@ -34,7 +48,7 @@ script:
|
||||
- make package
|
||||
- cp falco*.deb ../docker/local
|
||||
- cd ../docker/local
|
||||
- docker build -t sysdig/falco:test .
|
||||
- docker build -t falcosecurity/falco:test .
|
||||
- cd ../..
|
||||
- sudo test/run_regression_tests.sh $TRAVIS_BRANCH
|
||||
notifications:
|
||||
|
||||
234
CHANGELOG.md
234
CHANGELOG.md
@@ -2,6 +2,240 @@
|
||||
|
||||
This file documents all notable changes to Falco. The release numbering uses [semantic versioning](http://semver.org).
|
||||
|
||||
## v0.14.0
|
||||
|
||||
Released 2019-02-06
|
||||
|
||||
## Major Changes
|
||||
|
||||
* Rules versioning support: The falco engine and executable now have an *engine version* that represents the fields they support. Similarly, rules files have an optional *required_engine_version: NNN* object that names the minimum engine version required to read that rules file. Any time the engine adds new fields, event sources, etc, the engine version will be incremented, and any time a rules file starts using new fields, event sources, etc, the required engine version will be incremented. [[#492](https://github.com/falcosecurity/falco/pull/492)]
|
||||
|
||||
* Allow SSL for K8s audit endpoint/embedded webserver [[#471](https://github.com/falcosecurity/falco/pull/471)]
|
||||
|
||||
* Add stale issues bot that automatically flags old github issues as stale after 60 days of inactivity and closes issues after 67 days of inactivity. [[#500](https://github.com/falcosecurity/falco/pull/500)]
|
||||
|
||||
* Support bundle: When run with `--support`, falco will print a json object containing necessary information like falco version, command line, operating system information, and falco rules files contents. This could be useful when reporting issues. [[#517](https://github.com/falcosecurity/falco/pull/517)]
|
||||
|
||||
## Minor Changes
|
||||
|
||||
* Support new third-party library dependencies from open source sysdig. [[#498](https://github.com/falcosecurity/falco/pull/498)]
|
||||
|
||||
* Add CII best practices badge. [[#499](https://github.com/falcosecurity/falco/pull/499)]
|
||||
|
||||
* Fix kernel module builds when running on centos as a container by installing gcc 5 by hand instead of directly from debian/unstable. [[#501](https://github.com/falcosecurity/falco/pull/501)]
|
||||
|
||||
* Mount `/etc` when running as a container, which allows container to build kernel module/ebpf program on COS/Minikube. [[#475](https://github.com/falcosecurity/falco/pull/475)]
|
||||
|
||||
* Improved way to specify the source of generic event objects [[#480](https://github.com/falcosecurity/falco/pull/480)]
|
||||
|
||||
* Readability/clarity improvements to K8s Audit/K8s Daemonset READMEs. [[#503](https://github.com/falcosecurity/falco/pull/503)]
|
||||
|
||||
* Add additional RBAC permissions to track deployments/daemonsets/replicasets. [[#514](https://github.com/falcosecurity/falco/pull/514)]
|
||||
|
||||
## Bug Fixes
|
||||
|
||||
* Fix formatting of nodejs examples README [[#502](https://github.com/falcosecurity/falco/pull/502)]
|
||||
|
||||
## Rule Changes
|
||||
|
||||
* Remove FPs for `Launch Sensitive Mount Container` rule [[#509](https://github.com/falcosecurity/falco/pull/509/files)]
|
||||
|
||||
* Update Container rules/macros to use the more reliable `container.image.{repository,tag}` that always return the repository/tag of an image instead of `container.image`, which may not for some docker daemon versions. [[#513](https://github.com/falcosecurity/falco/pull/513)]
|
||||
|
||||
## v0.13.1
|
||||
|
||||
Released 2019-01-16
|
||||
|
||||
## Major Changes
|
||||
|
||||
|
||||
## Minor Changes
|
||||
|
||||
* Unbuffer outputs by default. This helps make output readable when used in environments like K8s. [[#494](https://github.com/falcosecurity/falco/pull/494)]
|
||||
|
||||
* Improved documentation for running Falco within K8s and getting K8s Audit Logging to work with Minikube and Falco as a Daemonset within K8s. [[#496](https://github.com/falcosecurity/falco/pull/496)]
|
||||
|
||||
* Fix AWS Permissions for Kubernetes Response Engine [[#465](https://github.com/falcosecurity/falco/pull/465)]
|
||||
|
||||
* Tighten compilation flags to include `-Wextra` and `-Werror` [[#479](https://github.com/falcosecurity/falco/pull/479)]
|
||||
|
||||
* Add `k8s.ns.name` to outputs when `-pk` argument is used [[#472](https://github.com/falcosecurity/falco/pull/472)]
|
||||
|
||||
* Remove kubernetes-response-engine from system:masters [[#488](https://github.com/falcosecurity/falco/pull/488)]
|
||||
|
||||
## Bug Fixes
|
||||
|
||||
* Ensure `-pc`/`-pk` only apply to syscall rules and not k8s_audit rules [[#495](https://github.com/falcosecurity/falco/pull/495)]
|
||||
|
||||
* Fix a potential crash that could occur when using the falco engine and rulesets [[#468](https://github.com/falcosecurity/falco/pull/468)]
|
||||
|
||||
* Fix a regression where format output options were mistakenly removed [[#485](https://github.com/falcosecurity/falco/pull/485)]
|
||||
|
||||
## Rule Changes
|
||||
|
||||
* Fix FPs related to calico and writing files below etc [[#481](https://github.com/falcosecurity/falco/pull/481)]
|
||||
|
||||
* Fix FPs related to `apt-config`/`apt-cache`, `apk` [[#490](https://github.com/falcosecurity/falco/pull/490)]
|
||||
|
||||
* New rules `Launch Package Management Process in Container`, `Netcat Remote Code Execution in Container`, `Lauch Suspicious Network Tool in Container` look for host-level network tools like `netcat`, package management tools like `apt-get`, or network tool binaries being run in a container. [[#490](https://github.com/falcosecurity/falco/pull/490)]
|
||||
|
||||
* Fix the `inbound` and `outbound` macros so they work with sendto/recvfrom/sendmsg/recvmsg. [[#470](https://github.com/falcosecurity/falco/pull/470)]
|
||||
|
||||
* Fix FPs related to prometheus/openshift writing config below /etc. [[#470](https://github.com/falcosecurity/falco/pull/470)]
|
||||
|
||||
|
||||
## v0.13.0
|
||||
|
||||
Released 2018-11-09
|
||||
|
||||
## Major Changes
|
||||
|
||||
* **Support for K8s Audit Events** : Falco now supports [K8s Audit Events](https://kubernetes.io/docs/tasks/debug-application-cluster/audit/#audit-backends) as a second stream of events in addition to syscalls. For full details on the feature, see the [wiki](https://github.com/falcosecurity/falco/wiki/K8s-Audit-Event-Support).
|
||||
|
||||
* Transparent Config/Rule Reloading: On SIGHUP, Falco will now reload all config files/rules files and start processing new events. Allows rules changes without having to restart falco [[#457](https://github.com/falcosecurity/falco/pull/457)] [[#432](https://github.com/falcosecurity/falco/issues/432)]
|
||||
|
||||
## Minor Changes
|
||||
|
||||
* The reference integration of falco into a action engine now supports aws actions like lambda, etc. [[#460](https://github.com/falcosecurity/falco/pull/460)]
|
||||
|
||||
* Add netcat to falco docker images, which allows easier integration of program outputs to external servers [[#456](https://github.com/falcosecurity/falco/pull/456)] [[#433](https://github.com/falcosecurity/falco/issues/433)]
|
||||
|
||||
## Bug Fixes
|
||||
|
||||
* Links cleanup related to the draios/falco -> falcosecurity/falco move [[#447](https://github.com/falcosecurity/falco/pull/447)]
|
||||
|
||||
* Properly load/unload kernel module when the falco service is started/stopped [[#459](https://github.com/falcosecurity/falco/pull/459)] [[#418](https://github.com/falcosecurity/falco/issues/418)]
|
||||
|
||||
## Rule Changes
|
||||
|
||||
* Better coverage (e.g. reduced FPs) for critical stack, hids systems, ufw, cloud-init, etc. [[#445](https://github.com/falcosecurity/falco/pull/445)]
|
||||
|
||||
* New rules `Launch Package Management Process in Container`, `Netcat Remote Code Execution in Container`, and `Lauch Suspicious Network Tool in Container` look for running various suspicious programs in a container. [[#461](https://github.com/falcosecurity/falco/pull/461)]
|
||||
|
||||
* Misc changes to address false positives in GKE, Istio, etc. [[#455](https://github.com/falcosecurity/falco/pull/455)] [[#439](https://github.com/falcosecurity/falco/issues/439)]
|
||||
|
||||
## v0.12.1
|
||||
|
||||
Released 2018-09-11
|
||||
|
||||
## Bug Fixes
|
||||
|
||||
* Fig regression in libcurl configure script [[#416](https://github.com/draios/falco/pull/416)]
|
||||
|
||||
## v0.12.0
|
||||
|
||||
Released 2018-09-11
|
||||
|
||||
## Major Changes
|
||||
|
||||
* Improved IPv6 Support to fully support use of IPv6 addresses in events, connections and filters [[#sysdig/1204](https://github.com/draios/sysdig/pull/1204)]
|
||||
|
||||
* Ability to associate connections with dns names: new filterchecks `fd.*ip.name` allow looking up the DNS name for a connection's IP address. This can be used to identify or restrict connections by dns names e.g. `evt.type=connect and fd.sip.name=github.com`. [[#412](https://github.com/draios/falco/pull/412)] [[#sysdig/1213](https://github.com/draios/sysdig/pull/1213)]
|
||||
|
||||
* New filterchecks `user.loginuid` and `user.loginname` can be used to match the login uid, which stays consistent across sudo/su. This can be used to find the actual user running a given process [[#sysdig/1189](https://github.com/draios/sysdig/pull/1189)]
|
||||
|
||||
## Minor Changes
|
||||
|
||||
* Upgrade zlib to 1.2.11, openssl to 1.0.2n, and libcurl to 7.60.0 to address software vulnerabilities [[#402](https://github.com/draios/falco/pull/402)]
|
||||
* New `endswith` operator can be used for suffix matching on strings [[#sysdig/1209](https://github.com/draios/sysdig/pull/1209)]
|
||||
|
||||
## Bug Fixes
|
||||
|
||||
* Better control of specifying location of lua source code [[#406](https://github.com/draios/falco/pull/406)]
|
||||
|
||||
## Rule Changes
|
||||
|
||||
* None for this release.
|
||||
|
||||
## v0.11.1
|
||||
|
||||
Released 2018-07-31
|
||||
|
||||
## Bug Fixes
|
||||
|
||||
* Fix a problem that caused the kernel module to not load on certain kernel versions [[#397](https://github.com/draios/falco/pull/397)] [[#394](https://github.com/draios/falco/issues/394)]
|
||||
|
||||
## v0.11.0
|
||||
|
||||
Released 2018-07-24
|
||||
|
||||
## Major Changes
|
||||
|
||||
* **EBPF Support** (Beta): Falco can now read events via an ebpf program loaded into the kernel instead of the `falco-probe` kernel module. Full docs [here](https://github.com/draios/sysdig/wiki/eBPF-(beta)). [[#365](https://github.com/draios/falco/pull/365)]
|
||||
|
||||
## Minor Changes
|
||||
|
||||
* Rules may now have an `skip-if-unknown-filter` property. If set to true, a rule will be skipped if its condition/output property refers to a filtercheck (e.g. `fd.some-new-attibute`) that is not present in the current falco version. [[#364](https://github.com/draios/falco/pull/364)] [[#345](https://github.com/draios/falco/issues/345)]
|
||||
* Small changes to Falco `COPYING` file so github automatically recognizes license [[#380](https://github.com/draios/falco/pull/380)]
|
||||
* New example integration showing how to connect Falco with Anchore to dynamically create falco rules based on negative scan results [[#390](https://github.com/draios/falco/pull/390)]
|
||||
* New example integration showing how to connect Falco, [nats](https://nats.io/), and K8s to run flexible "playbooks" based on Falco events [[#389](https://github.com/draios/falco/pull/389)]
|
||||
|
||||
## Bug Fixes
|
||||
|
||||
* Ensure all rules are enabled by default [[#379](https://github.com/draios/falco/pull/379)]
|
||||
* Fix libcurl compilation problems [[#374](https://github.com/draios/falco/pull/374)]
|
||||
* Add gcc-6 to docker container, which improves compatibility when building kernel module [[#382](https://github.com/draios/falco/pull/382)] [[#371](https://github.com/draios/falco/issues/371)]
|
||||
* Ensure the /lib/modules symlink to /host/lib/modules is set correctly [[#392](https://github.com/draios/falco/issues/392)]
|
||||
|
||||
## Rule Changes
|
||||
|
||||
* Add additional binary writing programs [[#366](https://github.com/draios/falco/pull/366)]
|
||||
* Add additional package management programs [[#388](https://github.com/draios/falco/pull/388)] [[#366](https://github.com/draios/falco/pull/366)]
|
||||
* Expand write_below_etc handling for additional programs [[#388](https://github.com/draios/falco/pull/388)] [[#366](https://github.com/draios/falco/pull/366)]
|
||||
* Expand set of programs allowed to write to `/etc/pki` [[#388](https://github.com/draios/falco/pull/388)]
|
||||
* Expand set of root written directories/files [[#388](https://github.com/draios/falco/pull/388)] [[#366](https://github.com/draios/falco/pull/366)]
|
||||
* Let pam-config read sensitive files [[#388](https://github.com/draios/falco/pull/388)]
|
||||
* Add additional trusted containers: openshift, datadog, docker ucp agent, gliderlabs logspout [[#388](https://github.com/draios/falco/pull/388)]
|
||||
* Let coreos update-ssh-keys write to /home/core/.ssh [[#388](https://github.com/draios/falco/pull/388)]
|
||||
* Expand coverage for MS OMS [[#388](https://github.com/draios/falco/issues/388)] [[#387](https://github.com/draios/falco/issues/387)]
|
||||
* Expand the set of shell spawning programs [[#366](https://github.com/draios/falco/pull/366)]
|
||||
* Add additional mysql programs/directories [[#366](https://github.com/draios/falco/pull/366)]
|
||||
* Let program `id` open network connections [[#366](https://github.com/draios/falco/pull/366)]
|
||||
* Opt-in rule for protecting tomcat shell spawns [[#366](https://github.com/draios/falco/pull/366)]
|
||||
* New rule `Write below monitored directory` [[#366](https://github.com/draios/falco/pull/366)]
|
||||
|
||||
## v0.10.0
|
||||
|
||||
Released 2018-04-24
|
||||
|
||||
## Major Changes
|
||||
|
||||
* **Rules Directory Support**: Falco will read rules files from `/etc/falco/rules.d` in addition to `/etc/falco/falco_rules.yaml` and `/etc/falco/falco_rules.local.yaml`. Also, when the argument to `-r`/falco.yaml `rules_file` is a directory, falco will read rules files from that directory. [[#348](https://github.com/draios/falco/pull/348)] [[#187](https://github.com/draios/falco/issues/187)]
|
||||
* Properly support all syscalls (e.g. those without parameter extraction by the kernel module) in falco conditions, so they can be included in `evt.type=<name>` conditions. [[#352](https://github.com/draios/falco/pull/352)]
|
||||
* When packaged as a container, start building kernel module with gcc 5.0 instead of gcc 4.9. [[#331](https://github.com/draios/falco/pull/331)]
|
||||
* New example puppet module for falco. [[#341](https://github.com/draios/falco/pull/341)] [[#115](https://github.com/draios/falco/issues/115)]
|
||||
* When signaled with `USR1`, falco will close/reopen log files. Include a [logrotate](https://github.com/logrotate/logrotate) example that shows how to use this feature for log rotation. [[#347](https://github.com/draios/falco/pull/347)] [[#266](https://github.com/draios/falco/issues/266)]
|
||||
* To improve resource usage, further restrict the set of system calls available to falco [[#351](https://github.com/draios/falco/pull/351)] [[draios/sysdig#1105](https://github.com/draios/sysdig/pull/1105)]
|
||||
|
||||
## Minor Changes
|
||||
|
||||
* Add gdb to the development Docker image (sysdig/falco:dev) to aid in debugging. [[#323](https://github.com/draios/falco/pull/323)]
|
||||
* You can now specify -V multiple times on the command line to validate multiple rules files at once. [[#329](https://github.com/draios/falco/pull/329)]
|
||||
* When run with `-v`, falco will print *dangling* macros/lists that are not used by any rules. [[#329](https://github.com/draios/falco/pull/329)]
|
||||
* Add an example demonstrating cryptomining attack that exploits an open docker daemon using host mounts. [[#336](https://github.com/draios/falco/pull/336)]
|
||||
* New falco.yaml option `json_include_output_property` controls whether the formatted string "output" is included in the json object when json output is enabled. [[#342](https://github.com/draios/falco/pull/342)]
|
||||
* Centralize testing event types for consideration by falco into a single function [[draios/sysdig#1105](https://github.com/draios/sysdig/pull/1105)) [[#356](https://github.com/draios/falco/pull/356)]
|
||||
* If a rule has an attribute `warn_evttypes`, falco will not complain about `evt.type` restrictions on that rule [[#355](https://github.com/draios/falco/pull/355)]
|
||||
* When run with `-i`, print all ignored events/syscalls and exit. [[#359](https://github.com/draios/falco/pull/359)]
|
||||
|
||||
## Bug Fixes
|
||||
|
||||
* Minor bug fixes to k8s daemonset configuration. [[#325](https://github.com/draios/falco/pull/325)] [[#296](https://github.com/draios/falco/pull/296)] [[#295](https://github.com/draios/falco/pull/295)]
|
||||
* Ensure `--validate` can be used interchangeably with `-V`. [[#334](https://github.com/draios/falco/pull/334)] [[#322](https://github.com/draios/falco/issues/322)]
|
||||
* Rule conditions like `fd.net` can now be used with the `in` operator e.g. `evt.type=connect and fd.net in ("127.0.0.1/24")`. [[draios/sysdig#1091](https://github.com/draios/sysdig/pull/1091)] [[#343](https://github.com/draios/falco/pull/343)]
|
||||
* Ensure that `keep_alive` can be used both with file and program output at the same time. [[#335](https://github.com/draios/falco/pull/335)]
|
||||
* Make it possible to append to a skipped macro/rule without falco complaining [[#346](https://github.com/draios/falco/pull/346)] [[#305](https://github.com/draios/falco/issues/305)]
|
||||
* Ensure rule order is preserved even when rules do not contain any `evt.type` restriction. [[#354](https://github.com/draios/falco/issues/354)] [[#355](https://github.com/draios/falco/pull/355)]
|
||||
|
||||
## Rule Changes
|
||||
|
||||
* Make it easier to extend the `Change thread namespace` rule via a `user_known_change_thread_namespace_binaries` list. [[#324](https://github.com/draios/falco/pull/324)]
|
||||
* Various FP fixes from users. [[#321](https://github.com/draios/falco/pull/321)] [[#326](https://github.com/draios/falco/pull/326)] [[#344](https://github.com/draios/falco/pull/344)] [[#350](https://github.com/draios/falco/pull/350)]
|
||||
* New rule `Disallowed SSH Connection` detects attempts ssh connection attempts to hosts outside of an expected set. In order to be effective, you need to override the macro `allowed_ssh_hosts` in a user rules file. [[#321](https://github.com/draios/falco/pull/321)]
|
||||
* New rule `Unexpected K8s NodePort Connection` detects attempts to contact the K8s NodePort range from a program running inside a container. In order to be effective, you need to override the macro `nodeport_containers` in a user rules file. [[#321](https://github.com/draios/falco/pull/321)]
|
||||
* Improve `Modify binary dirs` rule to work with new syscalls [[#353](https://github.com/draios/falco/pull/353)]
|
||||
* New rule `Unexpected UDP Traffic` checks for udp traffic not on a list of expected ports. Somewhat FP-prone, so it must be explicitly enabled by overriding the macro `do_unexpected_udp_check` in a user rules file. [[#320](https://github.com/draios/falco/pull/320)] [[#357](https://github.com/draios/falco/pull/357)]
|
||||
|
||||
## v0.9.0
|
||||
|
||||
Released 2018-01-18
|
||||
|
||||
@@ -1,3 +1,20 @@
|
||||
#
|
||||
# Copyright (C) 2016-2018 Draios Inc dba Sysdig.
|
||||
#
|
||||
# This file is part of falco .
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
if(CPACK_GENERATOR MATCHES "DEB")
|
||||
list(APPEND CPACK_INSTALL_COMMANDS "mkdir -p _CPack_Packages/${CPACK_TOPLEVEL_TAG}/${CPACK_GENERATOR}/${CPACK_PACKAGE_FILE_NAME}/etc/init.d/")
|
||||
list(APPEND CPACK_INSTALL_COMMANDS "cp scripts/debian/falco _CPack_Packages/${CPACK_TOPLEVEL_TAG}/${CPACK_GENERATOR}/${CPACK_PACKAGE_FILE_NAME}/etc/init.d")
|
||||
|
||||
222
CMakeLists.txt
222
CMakeLists.txt
@@ -1,7 +1,26 @@
|
||||
#
|
||||
# Copyright (C) 2016-2018 Draios Inc dba Sysdig.
|
||||
#
|
||||
# This file is part of falco .
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
cmake_minimum_required(VERSION 2.8.2)
|
||||
|
||||
project(falco)
|
||||
|
||||
option(BUILD_WARNINGS_AS_ERRORS "Enable building with -Wextra -Werror flags")
|
||||
|
||||
if(NOT DEFINED FALCO_VERSION)
|
||||
set(FALCO_VERSION "0.1.1dev")
|
||||
endif()
|
||||
@@ -18,8 +37,15 @@ if(NOT DRAIOS_DEBUG_FLAGS)
|
||||
set(DRAIOS_DEBUG_FLAGS "-D_DEBUG")
|
||||
endif()
|
||||
|
||||
set(CMAKE_C_FLAGS "-Wall -ggdb ${DRAIOS_FEATURE_FLAGS}")
|
||||
set(CMAKE_CXX_FLAGS "-Wall -ggdb --std=c++0x ${DRAIOS_FEATURE_FLAGS}")
|
||||
set(CMAKE_COMMON_FLAGS "-Wall -ggdb ${DRAIOS_FEATURE_FLAGS}")
|
||||
|
||||
if(BUILD_WARNINGS_AS_ERRORS)
|
||||
set(CMAKE_SUPPRESSED_WARNINGS "-Wno-unused-parameter -Wno-missing-field-initializers -Wno-sign-compare -Wno-type-limits -Wno-implicit-fallthrough -Wno-format-truncation")
|
||||
set(CMAKE_COMMON_FLAGS "${CMAKE_COMMON_FLAGS} -Wextra -Werror ${CMAKE_SUPPRESSED_WARNINGS}")
|
||||
endif()
|
||||
|
||||
set(CMAKE_C_FLAGS "${CMAKE_COMMON_FLAGS}")
|
||||
set(CMAKE_CXX_FLAGS "--std=c++0x ${CMAKE_COMMON_FLAGS}")
|
||||
|
||||
set(CMAKE_C_FLAGS_DEBUG "${DRAIOS_DEBUG_FLAGS}")
|
||||
set(CMAKE_CXX_FLAGS_DEBUG "${DRAIOS_DEBUG_FLAGS}")
|
||||
@@ -78,8 +104,10 @@ else()
|
||||
set(ZLIB_INCLUDE "${ZLIB_SRC}")
|
||||
set(ZLIB_LIB "${ZLIB_SRC}/libz.a")
|
||||
ExternalProject_Add(zlib
|
||||
URL "http://s3.amazonaws.com/download.draios.com/dependencies/zlib-1.2.8.tar.gz"
|
||||
URL_MD5 "44d667c142d7cda120332623eab69f40"
|
||||
# START CHANGE for CVE-2016-9840, CVE-2016-9841, CVE-2016-9842, CVE-2016-9843
|
||||
URL "http://s3.amazonaws.com/download.draios.com/dependencies/zlib-1.2.11.tar.gz"
|
||||
URL_MD5 "1c9f62f0778697a09d36121ead88e08e"
|
||||
# END CHANGE for CVE-2016-9840, CVE-2016-9841, CVE-2016-9842, CVE-2016-9843
|
||||
CONFIGURE_COMMAND "./configure"
|
||||
BUILD_COMMAND ${CMD_MAKE}
|
||||
BUILD_IN_SOURCE 1
|
||||
@@ -117,6 +145,32 @@ set(JSONCPP_SRC "${SYSDIG_DIR}/userspace/libsinsp/third-party/jsoncpp")
|
||||
set(JSONCPP_INCLUDE "${JSONCPP_SRC}")
|
||||
set(JSONCPP_LIB_SRC "${JSONCPP_SRC}/jsoncpp.cpp")
|
||||
|
||||
#
|
||||
# nlohmann-json
|
||||
#
|
||||
option(USE_BUNDLED_NJSON "Enable building of the bundled nlohmann-json" ${USE_BUNDLED_DEPS})
|
||||
|
||||
if(NOT USE_BUNDLED_NJSON)
|
||||
find_path(NJSON_INCLUDE json.hpp PATH_SUFFIXES nlohmann)
|
||||
if(NJSON_INCLUDE)
|
||||
message(STATUS "Found nlohmann-json: include: ${NJSON_INCLUDE}")
|
||||
else()
|
||||
message(FATAL_ERROR "Couldn't find system nlohmann-json")
|
||||
endif()
|
||||
else()
|
||||
# No distinction needed for windows. The implementation is
|
||||
# solely in json.hpp.
|
||||
set(NJSON_SRC "${PROJECT_BINARY_DIR}/njson-prefix/src/njson")
|
||||
message(STATUS "Using bundled nlohmann-json in '${NJSON_SRC}'")
|
||||
set(NJSON_INCLUDE "${NJSON_SRC}/single_include")
|
||||
ExternalProject_Add(njson
|
||||
URL "http://download.draios.com/dependencies/njson-3.3.0.tar.gz"
|
||||
URL_MD5 "e26760e848656a5da400662e6c5d999a"
|
||||
CONFIGURE_COMMAND ""
|
||||
BUILD_COMMAND ""
|
||||
INSTALL_COMMAND "")
|
||||
endif()
|
||||
|
||||
#
|
||||
# curses
|
||||
#
|
||||
@@ -215,8 +269,10 @@ else()
|
||||
message(STATUS "Using bundled openssl in '${OPENSSL_BUNDLE_DIR}'")
|
||||
|
||||
ExternalProject_Add(openssl
|
||||
URL "http://s3.amazonaws.com/download.draios.com/dependencies/openssl-1.0.2j.tar.gz"
|
||||
URL_MD5 "96322138f0b69e61b7212bc53d5e912b"
|
||||
# START CHANGE for CVE-2017-3735, CVE-2017-3731, CVE-2017-3737, CVE-2017-3738, CVE-2017-3736
|
||||
URL "http://s3.amazonaws.com/download.draios.com/dependencies/openssl-1.0.2n.tar.gz"
|
||||
URL_MD5 "13bdc1b1d1ff39b6fd42a255e74676a4"
|
||||
# END CHANGE for CVE-2017-3735, CVE-2017-3731, CVE-2017-3737, CVE-2017-3738, CVE-2017-3736
|
||||
CONFIGURE_COMMAND ./config shared --prefix=${OPENSSL_INSTALL_DIR}
|
||||
BUILD_COMMAND ${CMD_MAKE}
|
||||
BUILD_IN_SOURCE 1
|
||||
@@ -246,8 +302,10 @@ else()
|
||||
|
||||
ExternalProject_Add(curl
|
||||
DEPENDS openssl
|
||||
URL "http://s3.amazonaws.com/download.draios.com/dependencies/curl-7.56.0.tar.bz2"
|
||||
URL_MD5 "e0caf257103e0c77cee5be7e9ac66ca4"
|
||||
# START CHANGE for CVE-2017-8816, CVE-2017-8817, CVE-2017-8818, CVE-2018-1000007
|
||||
URL "http://s3.amazonaws.com/download.draios.com/dependencies/curl-7.61.0.tar.bz2"
|
||||
URL_MD5 "31d0a9f48dc796a7db351898a1e5058a"
|
||||
# END CHANGE for CVE-2017-8816, CVE-2017-8817, CVE-2017-8818, CVE-2018-1000007
|
||||
CONFIGURE_COMMAND ./configure ${CURL_SSL_OPTION} --disable-shared --enable-optimize --disable-curldebug --disable-rt --enable-http --disable-ftp --disable-file --disable-ldap --disable-ldaps --disable-rtsp --disable-telnet --disable-tftp --disable-pop3 --disable-imap --disable-smb --disable-smtp --disable-gopher --disable-sspi --disable-ntlm-wb --disable-tls-srp --without-winssl --without-darwinssl --without-polarssl --without-cyassl --without-nss --without-axtls --without-ca-path --without-ca-bundle --without-libmetalink --without-librtmp --without-winidn --without-libidn --without-nghttp2 --without-libssh2 --disable-threaded-resolver
|
||||
BUILD_COMMAND ${CMD_MAKE}
|
||||
BUILD_IN_SOURCE 1
|
||||
@@ -389,6 +447,152 @@ else()
|
||||
INSTALL_COMMAND sh -c "cp -R ${PROJECT_BINARY_DIR}/lyaml-prefix/src/lyaml/lib/* ${PROJECT_SOURCE_DIR}/userspace/engine/lua")
|
||||
endif()
|
||||
|
||||
option(USE_BUNDLED_TBB "Enable building of the bundled tbb" ${USE_BUNDLED_DEPS})
|
||||
if(NOT USE_BUNDLED_TBB)
|
||||
find_path(TBB_INCLUDE tbb.h PATH_SUFFIXES tbb)
|
||||
find_library(TBB_LIB NAMES tbb)
|
||||
if(TBB_INCLUDE AND TBB_LIB)
|
||||
message(STATUS "Found tbb: include: ${TBB_INCLUDE}, lib: ${TBB_LIB}")
|
||||
else()
|
||||
message(FATAL_ERROR "Couldn't find system tbb")
|
||||
endif()
|
||||
else()
|
||||
set(TBB_SRC "${PROJECT_BINARY_DIR}/tbb-prefix/src/tbb")
|
||||
|
||||
message(STATUS "Using bundled tbb in '${TBB_SRC}'")
|
||||
|
||||
set(TBB_INCLUDE "${TBB_SRC}/include/")
|
||||
set(TBB_LIB "${TBB_SRC}/build/lib_release/libtbb.a")
|
||||
ExternalProject_Add(tbb
|
||||
URL "http://s3.amazonaws.com/download.draios.com/dependencies/tbb-2018_U5.tar.gz"
|
||||
URL_MD5 "ff3ae09f8c23892fbc3008c39f78288f"
|
||||
CONFIGURE_COMMAND ""
|
||||
BUILD_COMMAND ${CMD_MAKE} tbb_build_dir=${TBB_SRC}/build tbb_build_prefix=lib extra_inc=big_iron.inc
|
||||
BUILD_IN_SOURCE 1
|
||||
BUILD_BYPRODUCTS ${TBB_LIB}
|
||||
INSTALL_COMMAND "")
|
||||
endif()
|
||||
|
||||
#
|
||||
# civetweb
|
||||
#
|
||||
option(USE_BUNDLED_CIVETWEB "Enable building of the bundled civetweb" ${USE_BUNDLED_DEPS})
|
||||
|
||||
if(NOT USE_BUNDLED_CIVETWEB)
|
||||
find_library(CIVETWEB_LIB NAMES civetweb)
|
||||
if(CIVETWEB_LIB)
|
||||
message(STATUS "Found civetweb: lib: ${CIVETWEB_LIB}")
|
||||
else()
|
||||
message(FATAL_ERROR "Couldn't find system civetweb")
|
||||
endif()
|
||||
else()
|
||||
set(CIVETWEB_SRC "${PROJECT_BINARY_DIR}/civetweb-prefix/src/civetweb/")
|
||||
set(CIVETWEB_LIB "${CIVETWEB_SRC}/install/lib/libcivetweb.a")
|
||||
set(CIVETWEB_INCLUDE_DIR "${CIVETWEB_SRC}/install/include")
|
||||
message(STATUS "Using bundled civetweb in '${CIVETWEB_SRC}'")
|
||||
set(CIVETWEB_DEPENDENCIES "")
|
||||
if(USE_BUNDLED_OPENSSL)
|
||||
list(APPEND CIVETWEB_DEPENDENCIES "openssl")
|
||||
endif()
|
||||
ExternalProject_Add(civetweb
|
||||
DEPENDS ${CIVETWEB_DEPENDENCIES}
|
||||
URL "http://s3.amazonaws.com/download.draios.com/dependencies/civetweb-1.11.tar.gz"
|
||||
URL_MD5 "b6d2175650a27924bccb747cbe084cd4"
|
||||
CONFIGURE_COMMAND ${CMAKE_COMMAND} -E make_directory ${CIVETWEB_SRC}/install/lib
|
||||
COMMAND ${CMAKE_COMMAND} -E make_directory ${CIVETWEB_SRC}/install/include
|
||||
BUILD_IN_SOURCE 1
|
||||
BUILD_COMMAND ${CMD_MAKE} COPT="-DNO_FILES" WITH_CPP=1
|
||||
INSTALL_COMMAND ${CMD_MAKE} install-lib install-headers PREFIX=${CIVETWEB_SRC}/install WITH_CPP=1)
|
||||
endif()
|
||||
|
||||
option(USE_BUNDLED_CARES "Enable building of the bundled c-ares" ${USE_BUNDLED_DEPS})
|
||||
if(NOT USE_BUNDLED_CARES)
|
||||
find_path(CARES_INCLUDE NAMES cares/ares.h)
|
||||
find_library(CARES_LIB NAMES libcares.a)
|
||||
if(CARES_INCLUDE AND CARES_LIB)
|
||||
message(STATUS "Found c-ares: include: ${CARES_INCLUDE}, lib: ${CARES_LIB}")
|
||||
else()
|
||||
message(FATAL_ERROR "Couldn't find system c-ares")
|
||||
endif()
|
||||
else()
|
||||
set(CARES_SRC "${PROJECT_BINARY_DIR}/c-ares-prefix/src/c-ares")
|
||||
message(STATUS "Using bundled c-ares in '${CARES_SRC}'")
|
||||
set(CARES_INCLUDE "${CARES_SRC}/target/include")
|
||||
set(CARES_LIB "${CARES_SRC}/target/lib/libcares.a")
|
||||
ExternalProject_Add(c-ares
|
||||
URL "https://download.sysdig.com/dependencies/c-ares-1.13.0.tar.gz"
|
||||
URL_MD5 "d2e010b43537794d8bedfb562ae6bba2"
|
||||
CONFIGURE_COMMAND ./configure --prefix=${CARES_SRC}/target
|
||||
BUILD_COMMAND ${CMD_MAKE}
|
||||
BUILD_IN_SOURCE 1
|
||||
BUILD_BYPRODUCTS ${CARES_INCLUDE} ${CARES_LIB}
|
||||
INSTALL_COMMAND ${CMD_MAKE} install)
|
||||
endif()
|
||||
|
||||
option(USE_BUNDLED_PROTOBUF "Enable building of the bundled protobuf" ${USE_BUNDLED_DEPS})
|
||||
if(NOT USE_BUNDLED_PROTOBUF)
|
||||
find_program(PROTOC NAMES protoc)
|
||||
find_path(PROTOBUF_INCLUDE NAMES google/protobuf/message.h)
|
||||
find_library(PROTOBUF_LIB NAMES libprotobuf.a)
|
||||
if(PROTOC AND PROTOBUF_INCLUDE AND PROTOBUF_LIB)
|
||||
message(STATUS "Found protobuf: compiler: ${PROTOC}, include: ${PROTOBUF_INCLUDE}, lib: ${PROTOBUF_LIB}")
|
||||
else()
|
||||
message(FATAL_ERROR "Couldn't find system protobuf")
|
||||
endif()
|
||||
else()
|
||||
set(PROTOBUF_SRC "${PROJECT_BINARY_DIR}/protobuf-prefix/src/protobuf")
|
||||
message(STATUS "Using bundled protobuf in '${PROTOBUF_SRC}'")
|
||||
set(PROTOC "${PROTOBUF_SRC}/target/bin/protoc")
|
||||
set(PROTOBUF_INCLUDE "${PROTOBUF_SRC}/target/include")
|
||||
set(PROTOBUF_LIB "${PROTOBUF_SRC}/target/lib/libprotobuf.a")
|
||||
ExternalProject_Add(protobuf
|
||||
DEPENDS openssl zlib
|
||||
URL "https://github.com/google/protobuf/releases/download/v3.5.0/protobuf-cpp-3.5.0.tar.gz"
|
||||
URL_MD5 "e4ba8284a407712168593e79e6555eb2"
|
||||
# TODO what if using system zlib?
|
||||
CONFIGURE_COMMAND /usr/bin/env CPPFLAGS=-I${ZLIB_INCLUDE} LDFLAGS=-L${ZLIB_SRC} ./configure --with-zlib --prefix=${PROTOBUF_SRC}/target
|
||||
BUILD_COMMAND ${CMD_MAKE}
|
||||
BUILD_IN_SOURCE 1
|
||||
BUILD_BYPRODUCTS ${PROTOC} ${PROTOBUF_INCLUDE} ${PROTOBUF_LIB}
|
||||
# TODO s390x support
|
||||
INSTALL_COMMAND make install)
|
||||
endif()
|
||||
|
||||
option(USE_BUNDLED_GRPC "Enable building of the bundled grpc" ${USE_BUNDLED_DEPS})
|
||||
if(NOT USE_BUNDLED_GRPC)
|
||||
find_path(GRPC_INCLUDE grpc++/impl/codegen/rpc_method.h)
|
||||
find_library(GRPC_LIB NAMES libgrpc_unsecure.a)
|
||||
find_library(GRPCPP_LIB NAMES libgrpc++_unsecure.a)
|
||||
if(GRPC_INCLUDE AND GRPC_LIB AND GRPCPP_LIB)
|
||||
message(STATUS "Found grpc: include: ${GRPC_INCLUDE}, C lib: ${GRPC_LIB}, C++ lib: ${GRPC_PP_LIB}")
|
||||
else()
|
||||
message(FATAL_ERROR "Couldn't find system grpc")
|
||||
endif()
|
||||
else()
|
||||
set(GRPC_SRC "${PROJECT_BINARY_DIR}/grpc-prefix/src/grpc")
|
||||
message(STATUS "Using bundled grpc in '${GRPC_SRC}'")
|
||||
set(GRPC_INCLUDE "${GRPC_SRC}/include")
|
||||
set(GRPC_LIB "${GRPC_SRC}/libs/opt/libgrpc_unsecure.a")
|
||||
set(GRPCPP_LIB "${GRPC_SRC}/libs/opt/libgrpc++_unsecure.a")
|
||||
|
||||
get_filename_component(PROTOC_DIR ${PROTOC} DIRECTORY)
|
||||
|
||||
ExternalProject_Add(grpc
|
||||
DEPENDS protobuf zlib c-ares
|
||||
URL "http://download.draios.com/dependencies/grpc-1.8.1.tar.gz"
|
||||
URL_MD5 "2fc42c182a0ed1b48ad77397f76bb3bc"
|
||||
CONFIGURE_COMMAND ""
|
||||
# TODO what if using system openssl, protobuf or cares?
|
||||
BUILD_COMMAND HAS_SYSTEM_ZLIB=false LDFLAGS=-static PATH=${PROTOC_DIR}:$ENV{PATH} PKG_CONFIG_PATH=${OPENSSL_BUNDLE_DIR}:${PROTOBUF_SRC}:${CARES_SRC} make grpc_cpp_plugin static_cxx static_c
|
||||
BUILD_IN_SOURCE 1
|
||||
BUILD_BYPRODUCTS ${GRPC_LIB} ${GRPCPP_LIB}
|
||||
# TODO s390x support
|
||||
# TODO what if using system zlib
|
||||
PATCH_COMMAND rm -rf third_party/zlib && ln -s ${ZLIB_SRC} third_party/zlib && wget https://download.sysdig.com/dependencies/grpc-1.1.4-Makefile.patch && patch < grpc-1.1.4-Makefile.patch
|
||||
INSTALL_COMMAND "")
|
||||
endif()
|
||||
|
||||
|
||||
install(FILES falco.yaml
|
||||
DESTINATION "${FALCO_ETC_DIR}")
|
||||
|
||||
@@ -427,7 +631,7 @@ set(CPACK_DEBIAN_PACKAGE_HOMEPAGE "http://www.sysdig.org")
|
||||
set(CPACK_DEBIAN_PACKAGE_DEPENDS "dkms (>= 2.1.0.0)")
|
||||
set(CPACK_DEBIAN_PACKAGE_CONTROL_EXTRA "${CMAKE_BINARY_DIR}/scripts/debian/postinst;${CMAKE_BINARY_DIR}/scripts/debian/prerm;${PROJECT_SOURCE_DIR}/scripts/debian/postrm;${PROJECT_SOURCE_DIR}/cpack/debian/conffiles")
|
||||
|
||||
set(CPACK_RPM_PACKAGE_LICENSE "GPLv2")
|
||||
set(CPACK_RPM_PACKAGE_LICENSE "Apache v2.0")
|
||||
set(CPACK_RPM_PACKAGE_URL "http://www.sysdig.org")
|
||||
set(CPACK_RPM_PACKAGE_REQUIRES "dkms, gcc, make, kernel-devel, perl")
|
||||
set(CPACK_RPM_POST_INSTALL_SCRIPT_FILE "${PROJECT_SOURCE_DIR}/scripts/rpm/postinstall")
|
||||
|
||||
39
CODE_OF_CONDUCT
Normal file
39
CODE_OF_CONDUCT
Normal file
@@ -0,0 +1,39 @@
|
||||
## CNCF Community Code of Conduct v1.0
|
||||
|
||||
### Contributor Code of Conduct
|
||||
|
||||
As contributors and maintainers of this project, and in the interest of fostering
|
||||
an open and welcoming community, we pledge to respect all people who contribute
|
||||
through reporting issues, posting feature requests, updating documentation,
|
||||
submitting pull requests or patches, and other activities.
|
||||
|
||||
We are committed to making participation in this project a harassment-free experience for
|
||||
everyone, regardless of level of experience, gender, gender identity and expression,
|
||||
sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
|
||||
religion, or nationality.
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery
|
||||
* Personal attacks
|
||||
* Trolling or insulting/derogatory comments
|
||||
* Public or private harassment
|
||||
* Publishing other's private information, such as physical or electronic addresses,
|
||||
without explicit permission
|
||||
* Other unethical or unprofessional conduct.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or reject
|
||||
comments, commits, code, wiki edits, issues, and other contributions that are not
|
||||
aligned to this Code of Conduct. By adopting this Code of Conduct, project maintainers
|
||||
commit themselves to fairly and consistently applying these principles to every aspect
|
||||
of managing this project. Project maintainers who do not follow or enforce the Code of
|
||||
Conduct may be permanently removed from the project team.
|
||||
|
||||
This code of conduct applies both within project spaces and in public spaces
|
||||
when an individual is representing the project or its community.
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting a CNCF project maintainer, Sarah Novotny <sarahnovotny@google.com>, and/or Dan Kohn <dan@linuxfoundation.org>.
|
||||
|
||||
This Code of Conduct is adapted from the Contributor Covenant
|
||||
(http://contributor-covenant.org), version 1.2.0, available at
|
||||
http://contributor-covenant.org/version/1/2/0/
|
||||
487
COPYING
487
COPYING
@@ -1,351 +1,202 @@
|
||||
GNU GENERAL PUBLIC LICENSE
|
||||
Version 2, June 1991
|
||||
|
||||
Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
Preamble
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
The licenses for most software are designed to take away your
|
||||
freedom to share and change it. By contrast, the GNU General Public
|
||||
License is intended to guarantee your freedom to share and change free
|
||||
software--to make sure the software is free for all its users. This
|
||||
General Public License applies to most of the Free Software
|
||||
Foundation's software and to any other program whose authors commit to
|
||||
using it. (Some other Free Software Foundation software is covered by
|
||||
the GNU Lesser General Public License instead.) You can apply it to
|
||||
your programs, too.
|
||||
1. Definitions.
|
||||
|
||||
When we speak of free software, we are referring to freedom, not
|
||||
price. Our General Public Licenses are designed to make sure that you
|
||||
have the freedom to distribute copies of free software (and charge for
|
||||
this service if you wish), that you receive source code or can get it
|
||||
if you want it, that you can change the software or use pieces of it
|
||||
in new free programs; and that you know you can do these things.
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
To protect your rights, we need to make restrictions that forbid
|
||||
anyone to deny you these rights or to ask you to surrender the rights.
|
||||
These restrictions translate to certain responsibilities for you if you
|
||||
distribute copies of the software, or if you modify it.
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
For example, if you distribute copies of such a program, whether
|
||||
gratis or for a fee, you must give the recipients all the rights that
|
||||
you have. You must make sure that they, too, receive or can get the
|
||||
source code. And you must show them these terms so they know their
|
||||
rights.
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
We protect your rights with two steps: (1) copyright the software, and
|
||||
(2) offer you this license which gives you legal permission to copy,
|
||||
distribute and/or modify the software.
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
Also, for each author's protection and ours, we want to make certain
|
||||
that everyone understands that there is no warranty for this free
|
||||
software. If the software is modified by someone else and passed on, we
|
||||
want its recipients to know that what they have is not the original, so
|
||||
that any problems introduced by others will not reflect on the original
|
||||
authors' reputations.
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
Finally, any free program is threatened constantly by software
|
||||
patents. We wish to avoid the danger that redistributors of a free
|
||||
program will individually obtain patent licenses, in effect making the
|
||||
program proprietary. To prevent this, we have made it clear that any
|
||||
patent must be licensed for everyone's free use or not licensed at all.
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
The precise terms and conditions for copying, distribution and
|
||||
modification follow.
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
GNU GENERAL PUBLIC LICENSE
|
||||
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
0. This License applies to any program or other work which contains
|
||||
a notice placed by the copyright holder saying it may be distributed
|
||||
under the terms of this General Public License. The "Program", below,
|
||||
refers to any such program or work, and a "work based on the Program"
|
||||
means either the Program or any derivative work under copyright law:
|
||||
that is to say, a work containing the Program or a portion of it,
|
||||
either verbatim or with modifications and/or translated into another
|
||||
language. (Hereinafter, translation is included without limitation in
|
||||
the term "modification".) Each licensee is addressed as "you".
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
Activities other than copying, distribution and modification are not
|
||||
covered by this License; they are outside its scope. The act of
|
||||
running the Program is not restricted, and the output from the Program
|
||||
is covered only if its contents constitute a work based on the
|
||||
Program (independent of having been made by running the Program).
|
||||
Whether that is true depends on what the Program does.
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
1. You may copy and distribute verbatim copies of the Program's
|
||||
source code as you receive it, in any medium, provided that you
|
||||
conspicuously and appropriately publish on each copy an appropriate
|
||||
copyright notice and disclaimer of warranty; keep intact all the
|
||||
notices that refer to this License and to the absence of any warranty;
|
||||
and give any other recipients of the Program a copy of this License
|
||||
along with the Program.
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
You may charge a fee for the physical act of transferring a copy, and
|
||||
you may at your option offer warranty protection in exchange for a fee.
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
2. You may modify your copy or copies of the Program or any portion
|
||||
of it, thus forming a work based on the Program, and copy and
|
||||
distribute such modifications or work under the terms of Section 1
|
||||
above, provided that you also meet all of these conditions:
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
a) You must cause the modified files to carry prominent notices
|
||||
stating that you changed the files and the date of any change.
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
b) You must cause any work that you distribute or publish, that in
|
||||
whole or in part contains or is derived from the Program or any
|
||||
part thereof, to be licensed as a whole at no charge to all third
|
||||
parties under the terms of this License.
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
c) If the modified program normally reads commands interactively
|
||||
when run, you must cause it, when started running for such
|
||||
interactive use in the most ordinary way, to print or display an
|
||||
announcement including an appropriate copyright notice and a
|
||||
notice that there is no warranty (or else, saying that you provide
|
||||
a warranty) and that users may redistribute the program under
|
||||
these conditions, and telling the user how to view a copy of this
|
||||
License. (Exception: if the Program itself is interactive but
|
||||
does not normally print such an announcement, your work based on
|
||||
the Program is not required to print an announcement.)
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
These requirements apply to the modified work as a whole. If
|
||||
identifiable sections of that work are not derived from the Program,
|
||||
and can be reasonably considered independent and separate works in
|
||||
themselves, then this License, and its terms, do not apply to those
|
||||
sections when you distribute them as separate works. But when you
|
||||
distribute the same sections as part of a whole which is a work based
|
||||
on the Program, the distribution of the whole must be on the terms of
|
||||
this License, whose permissions for other licensees extend to the
|
||||
entire whole, and thus to each and every part regardless of who wrote it.
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
Thus, it is not the intent of this section to claim rights or contest
|
||||
your rights to work written entirely by you; rather, the intent is to
|
||||
exercise the right to control the distribution of derivative or
|
||||
collective works based on the Program.
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
In addition, mere aggregation of another work not based on the Program
|
||||
with the Program (or with a work based on the Program) on a volume of
|
||||
a storage or distribution medium does not bring the other work under
|
||||
the scope of this License.
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
3. You may copy and distribute the Program (or a work based on it,
|
||||
under Section 2) in object code or executable form under the terms of
|
||||
Sections 1 and 2 above provided that you also do one of the following:
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
a) Accompany it with the complete corresponding machine-readable
|
||||
source code, which must be distributed under the terms of Sections
|
||||
1 and 2 above on a medium customarily used for software interchange; or,
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
b) Accompany it with a written offer, valid for at least three
|
||||
years, to give any third party, for a charge no more than your
|
||||
cost of physically performing source distribution, a complete
|
||||
machine-readable copy of the corresponding source code, to be
|
||||
distributed under the terms of Sections 1 and 2 above on a medium
|
||||
customarily used for software interchange; or,
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
c) Accompany it with the information you received as to the offer
|
||||
to distribute corresponding source code. (This alternative is
|
||||
allowed only for noncommercial distribution and only if you
|
||||
received the program in object code or executable form with such
|
||||
an offer, in accord with Subsection b above.)
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
The source code for a work means the preferred form of the work for
|
||||
making modifications to it. For an executable work, complete source
|
||||
code means all the source code for all modules it contains, plus any
|
||||
associated interface definition files, plus the scripts used to
|
||||
control compilation and installation of the executable. However, as a
|
||||
special exception, the source code distributed need not include
|
||||
anything that is normally distributed (in either source or binary
|
||||
form) with the major components (compiler, kernel, and so on) of the
|
||||
operating system on which the executable runs, unless that component
|
||||
itself accompanies the executable.
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
If distribution of executable or object code is made by offering
|
||||
access to copy from a designated place, then offering equivalent
|
||||
access to copy the source code from the same place counts as
|
||||
distribution of the source code, even though third parties are not
|
||||
compelled to copy the source along with the object code.
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
4. You may not copy, modify, sublicense, or distribute the Program
|
||||
except as expressly provided under this License. Any attempt
|
||||
otherwise to copy, modify, sublicense or distribute the Program is
|
||||
void, and will automatically terminate your rights under this License.
|
||||
However, parties who have received copies, or rights, from you under
|
||||
this License will not have their licenses terminated so long as such
|
||||
parties remain in full compliance.
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
5. You are not required to accept this License, since you have not
|
||||
signed it. However, nothing else grants you permission to modify or
|
||||
distribute the Program or its derivative works. These actions are
|
||||
prohibited by law if you do not accept this License. Therefore, by
|
||||
modifying or distributing the Program (or any work based on the
|
||||
Program), you indicate your acceptance of this License to do so, and
|
||||
all its terms and conditions for copying, distributing or modifying
|
||||
the Program or works based on it.
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
6. Each time you redistribute the Program (or any work based on the
|
||||
Program), the recipient automatically receives a license from the
|
||||
original licensor to copy, distribute or modify the Program subject to
|
||||
these terms and conditions. You may not impose any further
|
||||
restrictions on the recipients' exercise of the rights granted herein.
|
||||
You are not responsible for enforcing compliance by third parties to
|
||||
this License.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
7. If, as a consequence of a court judgment or allegation of patent
|
||||
infringement or for any other reason (not limited to patent issues),
|
||||
conditions are imposed on you (whether by court order, agreement or
|
||||
otherwise) that contradict the conditions of this License, they do not
|
||||
excuse you from the conditions of this License. If you cannot
|
||||
distribute so as to satisfy simultaneously your obligations under this
|
||||
License and any other pertinent obligations, then as a consequence you
|
||||
may not distribute the Program at all. For example, if a patent
|
||||
license would not permit royalty-free redistribution of the Program by
|
||||
all those who receive copies directly or indirectly through you, then
|
||||
the only way you could satisfy both it and this License would be to
|
||||
refrain entirely from distribution of the Program.
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
If any portion of this section is held invalid or unenforceable under
|
||||
any particular circumstance, the balance of the section is intended to
|
||||
apply and the section as a whole is intended to apply in other
|
||||
circumstances.
|
||||
|
||||
It is not the purpose of this section to induce you to infringe any
|
||||
patents or other property right claims or to contest validity of any
|
||||
such claims; this section has the sole purpose of protecting the
|
||||
integrity of the free software distribution system, which is
|
||||
implemented by public license practices. Many people have made
|
||||
generous contributions to the wide range of software distributed
|
||||
through that system in reliance on consistent application of that
|
||||
system; it is up to the author/donor to decide if he or she is willing
|
||||
to distribute software through any other system and a licensee cannot
|
||||
impose that choice.
|
||||
|
||||
This section is intended to make thoroughly clear what is believed to
|
||||
be a consequence of the rest of this License.
|
||||
|
||||
8. If the distribution and/or use of the Program is restricted in
|
||||
certain countries either by patents or by copyrighted interfaces, the
|
||||
original copyright holder who places the Program under this License
|
||||
may add an explicit geographical distribution limitation excluding
|
||||
those countries, so that distribution is permitted only in or among
|
||||
countries not thus excluded. In such case, this License incorporates
|
||||
the limitation as if written in the body of this License.
|
||||
|
||||
9. The Free Software Foundation may publish revised and/or new versions
|
||||
of the General Public License from time to time. Such new versions will
|
||||
be similar in spirit to the present version, but may differ in detail to
|
||||
address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the Program
|
||||
specifies a version number of this License which applies to it and "any
|
||||
later version", you have the option of following the terms and conditions
|
||||
either of that version or of any later version published by the Free
|
||||
Software Foundation. If the Program does not specify a version number of
|
||||
this License, you may choose any version ever published by the Free Software
|
||||
Foundation.
|
||||
|
||||
10. If you wish to incorporate parts of the Program into other free
|
||||
programs whose distribution conditions are different, write to the author
|
||||
to ask for permission. For software which is copyrighted by the Free
|
||||
Software Foundation, write to the Free Software Foundation; we sometimes
|
||||
make exceptions for this. Our decision will be guided by the two goals
|
||||
of preserving the free status of all derivatives of our free software and
|
||||
of promoting the sharing and reuse of software generally.
|
||||
|
||||
NO WARRANTY
|
||||
|
||||
11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
|
||||
FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
|
||||
OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
|
||||
PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
|
||||
OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
|
||||
TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
|
||||
PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
|
||||
REPAIR OR CORRECTION.
|
||||
|
||||
12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
||||
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
|
||||
REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
|
||||
INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
|
||||
OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
|
||||
TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
|
||||
YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
|
||||
PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
|
||||
POSSIBILITY OF SUCH DAMAGES.
|
||||
|
||||
* In addition, as a special exception, the copyright holders give
|
||||
* permission to link the code of portions of this program with the
|
||||
* OpenSSL library under certain conditions as described in each
|
||||
* individual source file, and distribute linked combinations
|
||||
* including the two.
|
||||
* You must obey the GNU General Public License in all respects
|
||||
* for all of the code used other than OpenSSL. If you modify
|
||||
* file(s) with this exception, you may extend this exception to your
|
||||
* version of the file(s), but you are not obligated to do so. If you
|
||||
* do not wish to do so, delete this exception statement from your
|
||||
* version.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
How to Apply These Terms to Your New Programs
|
||||
|
||||
If you develop a new program, and you want it to be of the greatest
|
||||
possible use to the public, the best way to achieve this is to make it
|
||||
free software which everyone can redistribute and change under these terms.
|
||||
|
||||
To do so, attach the following notices to the program. It is safest
|
||||
to attach them to the start of each source file to most effectively
|
||||
convey the exclusion of warranty; and each file should have at least
|
||||
the "copyright" line and a pointer to where the full notice is found.
|
||||
|
||||
<one line to give the program's name and a brief idea of what it does.>
|
||||
Copyright (C) <year> <name of author>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
Also add information on how to contact you by electronic and paper mail.
|
||||
|
||||
If the program is interactive, make it output a short notice like this
|
||||
when it starts in an interactive mode:
|
||||
|
||||
Gnomovision version 69, Copyright (C) year name of author
|
||||
Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
|
||||
This is free software, and you are welcome to redistribute it
|
||||
under certain conditions; type `show c' for details.
|
||||
|
||||
The hypothetical commands `show w' and `show c' should show the appropriate
|
||||
parts of the General Public License. Of course, the commands you use may
|
||||
be called something other than `show w' and `show c'; they could even be
|
||||
mouse-clicks or menu items--whatever suits your program.
|
||||
|
||||
You should also get your employer (if you work as a programmer) or your
|
||||
school, if any, to sign a "copyright disclaimer" for the program, if
|
||||
necessary. Here is a sample; alter the names:
|
||||
|
||||
Yoyodyne, Inc., hereby disclaims all copyright interest in the program
|
||||
`Gnomovision' (which makes passes at compilers) written by James Hacker.
|
||||
|
||||
<signature of Ty Coon>, 1 April 1989
|
||||
Ty Coon, President of Vice
|
||||
|
||||
This General Public License does not permit incorporating your program into
|
||||
proprietary programs. If your program is a subroutine library, you may
|
||||
consider it more useful to permit linking proprietary applications with the
|
||||
library. If this is what you want to do, use the GNU Lesser General
|
||||
Public License instead of this License.
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
55
GOVERNANCE
Normal file
55
GOVERNANCE
Normal file
@@ -0,0 +1,55 @@
|
||||
# Process for becoming a maintainer
|
||||
|
||||
* Express interest to the existing maintainers that you or your organization is interested in becoming a
|
||||
maintainer. Becoming a maintainer generally means that you are going to be spending substantial
|
||||
time (>25%) on Falco for the foreseeable future. You should have domain expertise and be extremely
|
||||
proficient in C++. Ultimately your goal is to become a maintainer that will represent your
|
||||
organization.
|
||||
* We will expect you to start contributing increasingly complicated PRs, under the guidance
|
||||
of the existing maintainers.
|
||||
* We may ask you to do some PRs from our backlog.
|
||||
* As you gain experience with the code base and our standards, we will ask you to do code reviews
|
||||
for incoming PRs (i.e., all maintainers are expected to shoulder a proportional share of
|
||||
community reviews).
|
||||
* After a period of approximately 2-3 months of working together and making sure we see eye to eye,
|
||||
the existing maintainers will confer and decide whether to grant maintainer status or not.
|
||||
We make no guarantees on the length of time this will take, but 2-3 months is the approximate
|
||||
goal.
|
||||
|
||||
## Maintainer responsibilities
|
||||
|
||||
* Monitor Slack (delayed response is perfectly acceptable).
|
||||
* Triage GitHub issues and perform pull request reviews for other maintainers and the community.
|
||||
* During GitHub issue triage, apply all applicable [labels](https://github.com/falcosecurity/falco/labels)
|
||||
to each new issue. Labels are extremely useful for future issue follow up. Which labels to apply
|
||||
is somewhat subjective so just use your best judgment.
|
||||
* Make sure that ongoing PRs are moving forward at the right pace or closing them.
|
||||
* Participate when called upon in the security releases. Note that although this should be a rare
|
||||
occurrence, if a serious vulnerability is found, the process may take up to several full days of
|
||||
work to implement. This reality should be taken into account when discussing time commitment
|
||||
obligations with employers.
|
||||
* In general continue to be willing to spend at least 25% of ones time working on Falco (~1.25
|
||||
business days per week).
|
||||
|
||||
## When does a maintainer lose maintainer status
|
||||
|
||||
If a maintainer is no longer interested or cannot perform the maintainer duties listed above, they
|
||||
should volunteer to be moved to emeritus status. In extreme cases this can also occur by a vote of
|
||||
the maintainers per the voting process below.
|
||||
|
||||
# Conflict resolution and voting
|
||||
|
||||
In general, we prefer that technical issues and maintainer membership are amicably worked out
|
||||
between the persons involved. If a dispute cannot be decided independently, the maintainers can be
|
||||
called in to decide an issue. If the maintainers themselves cannot decide an issue, the issue will
|
||||
be resolved by voting. The voting process is a simple majority in which each senior maintainer
|
||||
receives two votes and each normal maintainer receives one vote.
|
||||
|
||||
# Adding new projects to the falcosecurity GitHub organization
|
||||
|
||||
New projects will be added to the falcosecurity organization via GitHub issue discussion in one of the
|
||||
existing projects in the organization. Once sufficient discussion has taken place (~3-5 business
|
||||
days but depending on the volume of conversation), the maintainers of *the project where the issue
|
||||
was opened* (since different projects in the organization may have different maintainers) will
|
||||
decide whether the new project should be added. See the section above on voting if the maintainers
|
||||
cannot easily decide.
|
||||
9
MAINTAINERS
Normal file
9
MAINTAINERS
Normal file
@@ -0,0 +1,9 @@
|
||||
Current maintainers:
|
||||
@mstemm - Mark Stemm <mark.stemm@sysdig.com>
|
||||
@ldegio - Loris Degioanni <loris@sysdig.com>
|
||||
|
||||
Community Mangement:
|
||||
@mfdii - Michael Ducy <michael@sysdig.com>
|
||||
|
||||
Emeritus maintainers:
|
||||
@henridf - Henri Dubois-Ferriere <henri.dubois-ferriere@sysdig.com>
|
||||
34
README.md
34
README.md
@@ -1,19 +1,23 @@
|
||||
# Sysdig Falco
|
||||
# Falco
|
||||
|
||||
#### Latest release
|
||||
|
||||
**v0.9.0**
|
||||
Read the [change log](https://github.com/draios/falco/blob/dev/CHANGELOG.md)
|
||||
**v0.14.0**
|
||||
Read the [change log](https://github.com/falcosecurity/falco/blob/dev/CHANGELOG.md)
|
||||
|
||||
Dev Branch: [](https://travis-ci.org/falcosecurity/falco)<br />
|
||||
Master Branch: [](https://travis-ci.org/falcosecurity/falco)<br />
|
||||
CII Best Practices: [](https://bestpractices.coreinfrastructure.org/projects/2317)
|
||||
|
||||
Dev Branch: [](https://travis-ci.org/draios/falco)<br />
|
||||
Master Branch: [](https://travis-ci.org/draios/falco)
|
||||
|
||||
## Overview
|
||||
Sysdig Falco is a behavioral activity monitor designed to detect anomalous activity in your applications. Powered by sysdig’s system call capture infrastructure, falco lets you continuously monitor and detect container, application, host, and network activity... all in one place, from one source of data, with one set of rules.
|
||||
Falco is a behavioral activity monitor designed to detect anomalous activity in your applications. Powered by [sysdig’s](https://github.com/draios/sysdig) system call capture infrastructure, Falco lets you continuously monitor and detect container, application, host, and network activity... all in one place, from one source of data, with one set of rules.
|
||||
|
||||
Falco is hosted by the Cloud Native Computing Foundation (CNCF) as a sandbox level project. If you are an organization that wants to help shape the evolution of technologies that are container-packaged, dynamically-scheduled and microservices-oriented, consider joining the CNCF. For details read the [Falco CNCF project proposal](https://github.com/cncf/toc/tree/master/proposals/falco.adoc).
|
||||
|
||||
#### What kind of behaviors can Falco detect?
|
||||
|
||||
Falco can detect and alert on any behavior that involves making Linux system calls. Thanks to Sysdig's core decoding and state tracking functionality, falco alerts can be triggered by the use of specific system calls, their arguments, and by properties of the calling process. For example, you can easily detect things like:
|
||||
Falco can detect and alert on any behavior that involves making Linux system calls. Falco alerts can be triggered by the use of specific system calls, their arguments, and by properties of the calling process. For example, you can easily detect things like:
|
||||
|
||||
- A shell is run inside a container
|
||||
- A container is running in privileged mode, or is mounting a sensitive path like `/proc` from the host.
|
||||
@@ -24,27 +28,27 @@ Falco can detect and alert on any behavior that involves making Linux system cal
|
||||
|
||||
#### How Falco Compares to Other Security Tools like SELinux, Auditd, etc.
|
||||
|
||||
One of the questions we often get when we talk about Sysdig Falco is “How does it compare to other tools like SELinux, AppArmor, Auditd, etc. that also have security policies?”. We wrote a [blog post](https://sysdig.com/blog/selinux-seccomp-falco-technical-discussion/) comparing Falco to other tools.
|
||||
One of the questions we often get when we talk about Falco is “How does it compare to other tools like SELinux, AppArmor, Auditd, etc. that also have security policies?”. We wrote a [blog post](https://sysdig.com/blog/selinux-seccomp-falco-technical-discussion/) comparing Falco to other tools.
|
||||
|
||||
|
||||
Documentation
|
||||
---
|
||||
[Visit the wiki](https://github.com/draios/falco/wiki) for full documentation on falco.
|
||||
[Visit the wiki](https://github.com/falcosecurity/falco/wiki) for full documentation on falco.
|
||||
|
||||
Join the Community
|
||||
---
|
||||
* Follow us on [Twitter](https://twitter.com/sysdig) for general falco and sysdig news.
|
||||
* This is our [blog](https://sysdig.com/blog/), where you can find the latest [falco](https://sysdig.com/blog/tag/falco/) posts.
|
||||
* Join our [Public Slack](https://slack.sysdig.com) channel for sysdig and falco announcements and discussions.
|
||||
* [Website](https://falco.org) for Falco.
|
||||
* We are working on a blog for the Falco project. In the meantime you can find [Falco](https://sysdig.com/blog/tag/falco/) posts over on the Sysdig blog.
|
||||
* Join our [Public Slack](https://slack.sysdig.com) channel for open source sysdig and Falco announcements and discussions.
|
||||
|
||||
License Terms
|
||||
---
|
||||
Falco is licensed to you under the [GPL 2.0](./COPYING) open source license.
|
||||
Falco is licensed to you under the [Apache 2.0](./COPYING) open source license.
|
||||
|
||||
Contributor License Agreements
|
||||
---
|
||||
### Background
|
||||
As we did for sysdig, we are formalizing the way that we accept contributions of code from the contributing community. We must now ask that contributions to falco be provided subject to the terms and conditions of a [Contributor License Agreement (CLA)](./cla). The CLA comes in two forms, applicable to contributions by individuals, or by legal entities such as corporations and their employees. We recognize that entering into a CLA with us involves real consideration on your part, and we’ve tried to make this process as clear and simple as possible.
|
||||
We are formalizing the way that we accept contributions of code from the contributing community. We must now ask that contributions to falco be provided subject to the terms and conditions of a [Contributor License Agreement (CLA)](./cla). The CLA comes in two forms, applicable to contributions by individuals, or by legal entities such as corporations and their employees. We recognize that entering into a CLA with us involves real consideration on your part, and we’ve tried to make this process as clear and simple as possible.
|
||||
|
||||
We’ve modeled our CLA off of industry standards, such as [the CLA used by Kubernetes](https://github.com/kubernetes/kubernetes/blob/master/CONTRIBUTING.md). Note that this agreement is not a transfer of copyright ownership, this simply is a license agreement for contributions, intended to clarify the intellectual property license granted with contributions from any person or entity. It is for your protection as a contributor as well as the protection of falco; it does not change your rights to use your own contributions for any other purpose.
|
||||
|
||||
@@ -75,7 +79,7 @@ falco-CLA-1.0-signed-off-by: Joe Smith <joe.smith@email.com>
|
||||
|
||||
Use a real name of a natural person who is an authorized representative of the contributing entity; pseudonyms or anonymous contributions are not allowed.
|
||||
|
||||
**Government contributions**: Employees or officers of the United States Government, must review the [Government Contributor License Agreement](https://github.com/draios/falco/blob/dev/cla/falco_govt_contributor_agreement.txt), must be an authorized representative of the contributing entity, and indicate agreement to it on behalf of the contributing entity by adding the following lines to every GIT commit message:
|
||||
**Government contributions**: Employees or officers of the United States Government, must review the [Government Contributor License Agreement](https://github.com/falcosecurity/falco/blob/dev/cla/falco_govt_contributor_agreement.txt), must be an authorized representative of the contributing entity, and indicate agreement to it on behalf of the contributing entity by adding the following lines to every GIT commit message:
|
||||
|
||||
```
|
||||
falco-CLA-1.0-contributing-govt-entity: Full Legal Name of Entity
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
FROM debian:unstable
|
||||
|
||||
MAINTAINER Sysdig <support@sysdig.com>
|
||||
LABEL maintainer="Sysdig <support@sysdig.com>"
|
||||
|
||||
ENV FALCO_REPOSITORY dev
|
||||
|
||||
@@ -17,18 +17,47 @@ ADD http://download.draios.com/apt-draios-priority /etc/apt/preferences.d/
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y --no-install-recommends \
|
||||
bash-completion \
|
||||
curl \
|
||||
jq \
|
||||
gnupg2 \
|
||||
bc \
|
||||
clang-7 \
|
||||
ca-certificates \
|
||||
curl \
|
||||
dkms \
|
||||
gnupg2 \
|
||||
gcc \
|
||||
gcc-5 \
|
||||
gdb && rm -rf /var/lib/apt/lists/*
|
||||
gcc-6 \
|
||||
gdb \
|
||||
jq \
|
||||
libc6-dev \
|
||||
libelf-dev \
|
||||
llvm-7 \
|
||||
netcat \
|
||||
xz-utils \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# gcc 5 is no longer included in debian unstable, but we need it to
|
||||
# build centos kernels, which are 3.x based and explicitly want a gcc
|
||||
# version 3, 4, or 5 compiler. So grab copies we've saved from debian
|
||||
# snapshots with the prefix https://snapshot.debian.org/archive/debian/20190122T000000Z.
|
||||
|
||||
RUN curl -o cpp-5_5.5.0-12_amd64.deb https://s3.amazonaws.com/download.draios.com/dependencies/cpp-5_5.5.0-12_amd64.deb \
|
||||
&& curl -o gcc-5-base_5.5.0-12_amd64.deb https://s3.amazonaws.com/download.draios.com/dependencies/gcc-5-base_5.5.0-12_amd64.deb \
|
||||
&& curl -o gcc-5_5.5.0-12_amd64.deb https://s3.amazonaws.com/download.draios.com/dependencies/gcc-5_5.5.0-12_amd64.deb \
|
||||
&& curl -o libasan2_5.5.0-12_amd64.deb https://s3.amazonaws.com/download.draios.com/dependencies/libasan2_5.5.0-12_amd64.deb \
|
||||
&& curl -o libgcc-5-dev_5.5.0-12_amd64.deb https://s3.amazonaws.com/download.draios.com/dependencies/libgcc-5-dev_5.5.0-12_amd64.deb \
|
||||
&& curl -o libisl15_0.18-4_amd64.deb https://s3.amazonaws.com/download.draios.com/dependencies/libisl15_0.18-4_amd64.deb \
|
||||
&& curl -o libmpx0_5.5.0-12_amd64.deb https://s3.amazonaws.com/download.draios.com/dependencies/libmpx0_5.5.0-12_amd64.deb \
|
||||
&& dpkg -i cpp-5_5.5.0-12_amd64.deb gcc-5-base_5.5.0-12_amd64.deb gcc-5_5.5.0-12_amd64.deb libasan2_5.5.0-12_amd64.deb libgcc-5-dev_5.5.0-12_amd64.deb libisl15_0.18-4_amd64.deb libmpx0_5.5.0-12_amd64.deb \
|
||||
&& rm -f cpp-5_5.5.0-12_amd64.deb gcc-5-base_5.5.0-12_amd64.deb gcc-5_5.5.0-12_amd64.deb libasan2_5.5.0-12_amd64.deb libgcc-5-dev_5.5.0-12_amd64.deb libisl15_0.18-4_amd64.deb libmpx0_5.5.0-12_amd64.deb
|
||||
|
||||
# Since our base Debian image ships with GCC 7 which breaks older kernels, revert the
|
||||
# default to gcc-5.
|
||||
RUN rm -rf /usr/bin/gcc && ln -s /usr/bin/gcc-5 /usr/bin/gcc
|
||||
|
||||
RUN rm -rf /usr/bin/clang \
|
||||
&& rm -rf /usr/bin/llc \
|
||||
&& ln -s /usr/bin/clang-7 /usr/bin/clang \
|
||||
&& ln -s /usr/bin/llc-7 /usr/bin/llc
|
||||
|
||||
RUN curl -s https://s3.amazonaws.com/download.draios.com/DRAIOS-GPG-KEY.public | apt-key add - \
|
||||
&& curl -s -o /etc/apt/sources.list.d/draios.list http://download.draios.com/$FALCO_REPOSITORY/deb/draios.list \
|
||||
&& apt-get update \
|
||||
@@ -36,7 +65,20 @@ RUN curl -s https://s3.amazonaws.com/download.draios.com/DRAIOS-GPG-KEY.public |
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN ln -s $SYSDIG_HOST_ROOT/lib/modules /lib/modules
|
||||
# Some base images have an empty /lib/modules by default
|
||||
# If it's not empty, docker build will fail instead of
|
||||
# silently overwriting the existing directory
|
||||
RUN rm -df /lib/modules \
|
||||
&& ln -s $SYSDIG_HOST_ROOT/lib/modules /lib/modules
|
||||
|
||||
# debian:unstable head contains binutils 2.31, which generates
|
||||
# binaries that are incompatible with kernels < 4.16. So manually
|
||||
# forcibly install binutils 2.30-22 instead.
|
||||
RUN curl -s -o binutils_2.30-22_amd64.deb http://snapshot.debian.org/archive/debian/20180622T211149Z/pool/main/b/binutils/binutils_2.30-22_amd64.deb \
|
||||
&& curl -s -o libbinutils_2.30-22_amd64.deb http://snapshot.debian.org/archive/debian/20180622T211149Z/pool/main/b/binutils/libbinutils_2.30-22_amd64.deb \
|
||||
&& curl -s -o binutils-x86-64-linux-gnu_2.30-22_amd64.deb http://snapshot.debian.org/archive/debian/20180622T211149Z/pool/main/b/binutils/binutils-x86-64-linux-gnu_2.30-22_amd64.deb \
|
||||
&& curl -s -o binutils-common_2.30-22_amd64.deb http://snapshot.debian.org/archive/debian/20180622T211149Z/pool/main/b/binutils/binutils-common_2.30-22_amd64.deb \
|
||||
&& dpkg -i *binutils*.deb
|
||||
|
||||
COPY ./docker-entrypoint.sh /
|
||||
|
||||
|
||||
@@ -1,4 +1,22 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright (C) 2016-2018 Draios Inc dba Sysdig.
|
||||
#
|
||||
# This file is part of falco.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
#set -e
|
||||
|
||||
# Set the SYSDIG_SKIP_LOAD variable to skip loading the sysdig kernel module
|
||||
|
||||
@@ -1,2 +1,19 @@
|
||||
#
|
||||
# Copyright (C) 2016-2018 Draios Inc dba Sysdig.
|
||||
#
|
||||
# This file is part of falco .
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
image:
|
||||
docker build -t sysdig/falco-event-generator:latest .
|
||||
|
||||
@@ -1,19 +1,20 @@
|
||||
/*
|
||||
Copyright (C) 2016 Draios inc.
|
||||
Copyright (C) 2016-2018 Draios Inc dba Sysdig.
|
||||
|
||||
This file is part of falco.
|
||||
|
||||
falco is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License version 2 as
|
||||
published by the Free Software Foundation.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
falco is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with falco. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <cstdio>
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
FROM debian:unstable
|
||||
|
||||
MAINTAINER Sysdig <support@sysdig.com>
|
||||
LABEL maintainer="Sysdig <support@sysdig.com>"
|
||||
|
||||
ENV FALCO_VERSION 0.1.1dev
|
||||
|
||||
@@ -17,23 +17,64 @@ ADD http://download.draios.com/apt-draios-priority /etc/apt/preferences.d/
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y --no-install-recommends \
|
||||
bash-completion \
|
||||
curl \
|
||||
jq \
|
||||
gnupg2 \
|
||||
bc \
|
||||
clang-7 \
|
||||
ca-certificates \
|
||||
curl \
|
||||
dkms \
|
||||
gnupg2 \
|
||||
gcc \
|
||||
gcc-5 \
|
||||
dkms && rm -rf /var/lib/apt/lists/*
|
||||
gcc-6 \
|
||||
jq \
|
||||
libc6-dev \
|
||||
libelf-dev \
|
||||
llvm-7 \
|
||||
netcat \
|
||||
xz-utils \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# gcc 5 is no longer included in debian unstable, but we need it to
|
||||
# build centos kernels, which are 3.x based and explicitly want a gcc
|
||||
# version 3, 4, or 5 compiler. So grab copies we've saved from debian
|
||||
# snapshots with the prefix https://snapshot.debian.org/archive/debian/20190122T000000Z.
|
||||
|
||||
RUN curl -o cpp-5_5.5.0-12_amd64.deb https://s3.amazonaws.com/download.draios.com/dependencies/cpp-5_5.5.0-12_amd64.deb \
|
||||
&& curl -o gcc-5-base_5.5.0-12_amd64.deb https://s3.amazonaws.com/download.draios.com/dependencies/gcc-5-base_5.5.0-12_amd64.deb \
|
||||
&& curl -o gcc-5_5.5.0-12_amd64.deb https://s3.amazonaws.com/download.draios.com/dependencies/gcc-5_5.5.0-12_amd64.deb \
|
||||
&& curl -o libasan2_5.5.0-12_amd64.deb https://s3.amazonaws.com/download.draios.com/dependencies/libasan2_5.5.0-12_amd64.deb \
|
||||
&& curl -o libgcc-5-dev_5.5.0-12_amd64.deb https://s3.amazonaws.com/download.draios.com/dependencies/libgcc-5-dev_5.5.0-12_amd64.deb \
|
||||
&& curl -o libisl15_0.18-4_amd64.deb https://s3.amazonaws.com/download.draios.com/dependencies/libisl15_0.18-4_amd64.deb \
|
||||
&& curl -o libmpx0_5.5.0-12_amd64.deb https://s3.amazonaws.com/download.draios.com/dependencies/libmpx0_5.5.0-12_amd64.deb \
|
||||
&& dpkg -i cpp-5_5.5.0-12_amd64.deb gcc-5-base_5.5.0-12_amd64.deb gcc-5_5.5.0-12_amd64.deb libasan2_5.5.0-12_amd64.deb libgcc-5-dev_5.5.0-12_amd64.deb libisl15_0.18-4_amd64.deb libmpx0_5.5.0-12_amd64.deb \
|
||||
&& rm -f cpp-5_5.5.0-12_amd64.deb gcc-5-base_5.5.0-12_amd64.deb gcc-5_5.5.0-12_amd64.deb libasan2_5.5.0-12_amd64.deb libgcc-5-dev_5.5.0-12_amd64.deb libisl15_0.18-4_amd64.deb libmpx0_5.5.0-12_amd64.deb
|
||||
|
||||
# Since our base Debian image ships with GCC 7 which breaks older kernels, revert the
|
||||
# default to gcc-5.
|
||||
RUN rm -rf /usr/bin/gcc && ln -s /usr/bin/gcc-5 /usr/bin/gcc
|
||||
|
||||
RUN ln -s $SYSDIG_HOST_ROOT/lib/modules /lib/modules
|
||||
RUN rm -rf /usr/bin/clang \
|
||||
&& rm -rf /usr/bin/llc \
|
||||
&& ln -s /usr/bin/clang-7 /usr/bin/clang \
|
||||
&& ln -s /usr/bin/llc-7 /usr/bin/llc
|
||||
|
||||
# Some base images have an empty /lib/modules by default
|
||||
# If it's not empty, docker build will fail instead of
|
||||
# silently overwriting the existing directory
|
||||
RUN rm -df /lib/modules \
|
||||
&& ln -s $SYSDIG_HOST_ROOT/lib/modules /lib/modules
|
||||
|
||||
ADD falco-${FALCO_VERSION}-x86_64.deb /
|
||||
RUN dpkg -i /falco-${FALCO_VERSION}-x86_64.deb
|
||||
|
||||
# debian:unstable head contains binutils 2.31, which generates
|
||||
# binaries that are incompatible with kernels < 4.16. So manually
|
||||
# forcibly install binutils 2.30-22 instead.
|
||||
RUN curl -s -o binutils_2.30-22_amd64.deb http://snapshot.debian.org/archive/debian/20180622T211149Z/pool/main/b/binutils/binutils_2.30-22_amd64.deb \
|
||||
&& curl -s -o libbinutils_2.30-22_amd64.deb http://snapshot.debian.org/archive/debian/20180622T211149Z/pool/main/b/binutils/libbinutils_2.30-22_amd64.deb \
|
||||
&& curl -s -o binutils-x86-64-linux-gnu_2.30-22_amd64.deb http://snapshot.debian.org/archive/debian/20180622T211149Z/pool/main/b/binutils/binutils-x86-64-linux-gnu_2.30-22_amd64.deb \
|
||||
&& curl -s -o binutils-common_2.30-22_amd64.deb http://snapshot.debian.org/archive/debian/20180622T211149Z/pool/main/b/binutils/binutils-common_2.30-22_amd64.deb \
|
||||
&& dpkg -i *binutils*.deb
|
||||
|
||||
COPY ./docker-entrypoint.sh /
|
||||
|
||||
ENTRYPOINT ["/docker-entrypoint.sh"]
|
||||
|
||||
@@ -1,4 +1,22 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright (C) 2016-2018 Draios Inc dba Sysdig.
|
||||
#
|
||||
# This file is part of falco.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
#set -e
|
||||
|
||||
# Set the SYSDIG_SKIP_LOAD variable to skip loading the sysdig kernel module
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
FROM debian:unstable
|
||||
|
||||
MAINTAINER Sysdig <support@sysdig.com>
|
||||
LABEL maintainer="Sysdig <support@sysdig.com>"
|
||||
|
||||
ENV FALCO_REPOSITORY stable
|
||||
|
||||
@@ -17,17 +17,46 @@ ADD http://download.draios.com/apt-draios-priority /etc/apt/preferences.d/
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y --no-install-recommends \
|
||||
bash-completion \
|
||||
curl \
|
||||
jq \
|
||||
bc \
|
||||
clang-7 \
|
||||
ca-certificates \
|
||||
curl \
|
||||
dkms \
|
||||
gnupg2 \
|
||||
gcc \
|
||||
gcc-5 && rm -rf /var/lib/apt/lists/*
|
||||
gcc-6 \
|
||||
jq \
|
||||
libc6-dev \
|
||||
libelf-dev \
|
||||
llvm-7 \
|
||||
netcat \
|
||||
xz-utils \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# gcc 5 is no longer included in debian unstable, but we need it to
|
||||
# build centos kernels, which are 3.x based and explicitly want a gcc
|
||||
# version 3, 4, or 5 compiler. So grab copies we've saved from debian
|
||||
# snapshots with the prefix https://snapshot.debian.org/archive/debian/20190122T000000Z.
|
||||
|
||||
RUN curl -o cpp-5_5.5.0-12_amd64.deb https://s3.amazonaws.com/download.draios.com/dependencies/cpp-5_5.5.0-12_amd64.deb \
|
||||
&& curl -o gcc-5-base_5.5.0-12_amd64.deb https://s3.amazonaws.com/download.draios.com/dependencies/gcc-5-base_5.5.0-12_amd64.deb \
|
||||
&& curl -o gcc-5_5.5.0-12_amd64.deb https://s3.amazonaws.com/download.draios.com/dependencies/gcc-5_5.5.0-12_amd64.deb \
|
||||
&& curl -o libasan2_5.5.0-12_amd64.deb https://s3.amazonaws.com/download.draios.com/dependencies/libasan2_5.5.0-12_amd64.deb \
|
||||
&& curl -o libgcc-5-dev_5.5.0-12_amd64.deb https://s3.amazonaws.com/download.draios.com/dependencies/libgcc-5-dev_5.5.0-12_amd64.deb \
|
||||
&& curl -o libisl15_0.18-4_amd64.deb https://s3.amazonaws.com/download.draios.com/dependencies/libisl15_0.18-4_amd64.deb \
|
||||
&& curl -o libmpx0_5.5.0-12_amd64.deb https://s3.amazonaws.com/download.draios.com/dependencies/libmpx0_5.5.0-12_amd64.deb \
|
||||
&& dpkg -i cpp-5_5.5.0-12_amd64.deb gcc-5-base_5.5.0-12_amd64.deb gcc-5_5.5.0-12_amd64.deb libasan2_5.5.0-12_amd64.deb libgcc-5-dev_5.5.0-12_amd64.deb libisl15_0.18-4_amd64.deb libmpx0_5.5.0-12_amd64.deb \
|
||||
&& rm -f cpp-5_5.5.0-12_amd64.deb gcc-5-base_5.5.0-12_amd64.deb gcc-5_5.5.0-12_amd64.deb libasan2_5.5.0-12_amd64.deb libgcc-5-dev_5.5.0-12_amd64.deb libisl15_0.18-4_amd64.deb libmpx0_5.5.0-12_amd64.deb
|
||||
|
||||
# Since our base Debian image ships with GCC 7 which breaks older kernels, revert the
|
||||
# default to gcc-5.
|
||||
RUN rm -rf /usr/bin/gcc && ln -s /usr/bin/gcc-5 /usr/bin/gcc
|
||||
|
||||
RUN rm -rf /usr/bin/clang \
|
||||
&& rm -rf /usr/bin/llc \
|
||||
&& ln -s /usr/bin/clang-7 /usr/bin/clang \
|
||||
&& ln -s /usr/bin/llc-7 /usr/bin/llc
|
||||
|
||||
RUN curl -s https://s3.amazonaws.com/download.draios.com/DRAIOS-GPG-KEY.public | apt-key add - \
|
||||
&& curl -s -o /etc/apt/sources.list.d/draios.list http://download.draios.com/$FALCO_REPOSITORY/deb/draios.list \
|
||||
&& apt-get update \
|
||||
@@ -35,7 +64,20 @@ RUN curl -s https://s3.amazonaws.com/download.draios.com/DRAIOS-GPG-KEY.public |
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN ln -s $SYSDIG_HOST_ROOT/lib/modules /lib/modules
|
||||
# Some base images have an empty /lib/modules by default
|
||||
# If it's not empty, docker build will fail instead of
|
||||
# silently overwriting the existing directory
|
||||
RUN rm -df /lib/modules \
|
||||
&& ln -s $SYSDIG_HOST_ROOT/lib/modules /lib/modules
|
||||
|
||||
# debian:unstable head contains binutils 2.31, which generates
|
||||
# binaries that are incompatible with kernels < 4.16. So manually
|
||||
# forcibly install binutils 2.30-22 instead.
|
||||
RUN curl -s -o binutils_2.30-22_amd64.deb http://snapshot.debian.org/archive/debian/20180622T211149Z/pool/main/b/binutils/binutils_2.30-22_amd64.deb \
|
||||
&& curl -s -o libbinutils_2.30-22_amd64.deb http://snapshot.debian.org/archive/debian/20180622T211149Z/pool/main/b/binutils/libbinutils_2.30-22_amd64.deb \
|
||||
&& curl -s -o binutils-x86-64-linux-gnu_2.30-22_amd64.deb http://snapshot.debian.org/archive/debian/20180622T211149Z/pool/main/b/binutils/binutils-x86-64-linux-gnu_2.30-22_amd64.deb \
|
||||
&& curl -s -o binutils-common_2.30-22_amd64.deb http://snapshot.debian.org/archive/debian/20180622T211149Z/pool/main/b/binutils/binutils-common_2.30-22_amd64.deb \
|
||||
&& dpkg -i *binutils*.deb
|
||||
|
||||
COPY ./docker-entrypoint.sh /
|
||||
|
||||
|
||||
@@ -1,4 +1,22 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright (C) 2016-2018 Draios Inc dba Sysdig.
|
||||
#
|
||||
# This file is part of falco.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
#set -e
|
||||
|
||||
# Set the SYSDIG_SKIP_LOAD variable to skip loading the sysdig kernel module
|
||||
|
||||
38
examples/k8s_audit_config/README.md
Normal file
38
examples/k8s_audit_config/README.md
Normal file
@@ -0,0 +1,38 @@
|
||||
# Introduction
|
||||
|
||||
The files in this directory can be used to configure k8s audit logging. The relevant files are:
|
||||
|
||||
* [audit-policy.yaml](./audit-policy.yaml): The k8s audit log configuration we used to create the rules in [k8s_audit_rules.yaml](../../rules/k8s_audit_rules.yaml). You may find it useful as a reference when creating your own K8s Audit Log configuration.
|
||||
* [webhook-config.yaml.in](./webhook-config.yaml.in): A (templated) webhook configuration that sends audit events to an ip associated with the falco service, port 8765. It is templated in that the *actual* ip is defined in an environment variable `FALCO_SERVICE_CLUSTERIP`, which can be plugged in using a program like `envsubst`. You may find it useful as a starting point when deciding how to route audit events to the embedded webserver within falco.
|
||||
|
||||
These files are only needed when using Minikube, which doesn't currently
|
||||
have the ability to provide an audit config/webhook config directly
|
||||
from the minikube commandline. See [this issue](https://github.com/kubernetes/minikube/issues/2741) for more details.
|
||||
|
||||
* [apiserver-config.patch.sh](./apiserver-config.patch.sh): A script that changes the configuration file `/etc/kubernetes/manifests/kube-apiserver.yaml` to add necessary config options and mounts for the kube-apiserver container that runs within the minikube vm.
|
||||
|
||||
A way to use these files with minikube to run falco and enable audit logging would be the following:
|
||||
|
||||
#### Start Minikube with Audit Logging Enabled
|
||||
|
||||
Run the following to start minikube with Audit Logging Enabled:
|
||||
|
||||
```
|
||||
minikube start --kubernetes-version v1.11.0 --mount --mount-string $PWD:/tmp/k8s_audit_config --feature-gates AdvancedAuditing=true
|
||||
```
|
||||
|
||||
#### Create a Falco DaemonSet and Supporting Accounts/Services
|
||||
|
||||
Follow the [K8s Using Daemonset](../../integrations/k8s-using-daemonset/README.md) instructions to create a falco service account, service, configmap, and daemonset.
|
||||
|
||||
#### Configure Audit Logging with a Policy and Webhook
|
||||
|
||||
Run the following commands to fill in the template file with the ClusterIP ip address you created with the `falco-service` service above, and configure audit logging to use a policy and webhook that directs the right events to the falco daemonset. Although services like `falco-service.default.svc.cluster.local` can not be resolved from the kube-apiserver container within the minikube vm (they're run as pods but not *really* a part of the cluster), the ClusterIPs associated with those services are routable.
|
||||
|
||||
```
|
||||
FALCO_SERVICE_CLUSTERIP=$(kubectl get service falco-service -o=jsonpath={.spec.clusterIP}) envsubst < webhook-config.yaml.in > webhook-config.yaml
|
||||
minikube ssh sudo bash /tmp/k8s_audit_config/apiserver-config.patch.sh
|
||||
```
|
||||
|
||||
K8s audit events will then be routed to the falco daemonset within the cluster, which you can observe via `kubectl logs -f $(kubectl get pods -l app=falco-example -o jsonpath={.items[0].metadata.name})`.
|
||||
|
||||
40
examples/k8s_audit_config/apiserver-config.patch.sh
Normal file
40
examples/k8s_audit_config/apiserver-config.patch.sh
Normal file
@@ -0,0 +1,40 @@
|
||||
#!/bin/sh
|
||||
|
||||
IFS=''
|
||||
|
||||
FILENAME="/etc/kubernetes/manifests/kube-apiserver.yaml"
|
||||
|
||||
if grep audit-webhook-config-file $FILENAME ; then
|
||||
echo audit-webhook patch already applied
|
||||
exit 0
|
||||
fi
|
||||
|
||||
TMPFILE="/tmp/kube-apiserver.yaml.patched"
|
||||
rm -f "$TMPFILE"
|
||||
|
||||
while read LINE
|
||||
do
|
||||
echo "$LINE" >> "$TMPFILE"
|
||||
case "$LINE" in
|
||||
*"- kube-apiserver"*)
|
||||
echo " - --audit-log-path=/tmp/k8s_audit_config/audit.log" >> "$TMPFILE"
|
||||
echo " - --audit-policy-file=/tmp/k8s_audit_config/audit-policy.yaml" >> "$TMPFILE"
|
||||
echo " - --audit-webhook-config-file=/tmp/k8s_audit_config/webhook-config.yaml" >> "$TMPFILE"
|
||||
echo " - --audit-webhook-batch-max-wait=5s" >> "$TMPFILE"
|
||||
;;
|
||||
*"volumeMounts:"*)
|
||||
echo " - mountPath: /tmp/k8s_audit_config/" >> "$TMPFILE"
|
||||
echo " name: data" >> "$TMPFILE"
|
||||
;;
|
||||
*"volumes:"*)
|
||||
echo " - hostPath:" >> "$TMPFILE"
|
||||
echo " path: /tmp/k8s_audit_config" >> "$TMPFILE"
|
||||
echo " name: data" >> "$TMPFILE"
|
||||
;;
|
||||
|
||||
esac
|
||||
done < "$FILENAME"
|
||||
|
||||
cp "$FILENAME" "/tmp/kube-apiserver.yaml.original"
|
||||
cp "$TMPFILE" "$FILENAME"
|
||||
|
||||
76
examples/k8s_audit_config/audit-policy.yaml
Normal file
76
examples/k8s_audit_config/audit-policy.yaml
Normal file
@@ -0,0 +1,76 @@
|
||||
apiVersion: audit.k8s.io/v1beta1 # This is required.
|
||||
kind: Policy
|
||||
# Don't generate audit events for all requests in RequestReceived stage.
|
||||
omitStages:
|
||||
- "RequestReceived"
|
||||
rules:
|
||||
# Log pod changes at RequestResponse level
|
||||
- level: RequestResponse
|
||||
resources:
|
||||
- group: ""
|
||||
# Resource "pods" doesn't match requests to any subresource of pods,
|
||||
# which is consistent with the RBAC policy.
|
||||
resources: ["pods", "deployments"]
|
||||
|
||||
- level: RequestResponse
|
||||
resources:
|
||||
- group: "rbac.authorization.k8s.io"
|
||||
# Resource "pods" doesn't match requests to any subresource of pods,
|
||||
# which is consistent with the RBAC policy.
|
||||
resources: ["clusterroles", "clusterrolebindings"]
|
||||
|
||||
# Log "pods/log", "pods/status" at Metadata level
|
||||
- level: Metadata
|
||||
resources:
|
||||
- group: ""
|
||||
resources: ["pods/log", "pods/status"]
|
||||
|
||||
# Don't log requests to a configmap called "controller-leader"
|
||||
- level: None
|
||||
resources:
|
||||
- group: ""
|
||||
resources: ["configmaps"]
|
||||
resourceNames: ["controller-leader"]
|
||||
|
||||
# Don't log watch requests by the "system:kube-proxy" on endpoints or services
|
||||
- level: None
|
||||
users: ["system:kube-proxy"]
|
||||
verbs: ["watch"]
|
||||
resources:
|
||||
- group: "" # core API group
|
||||
resources: ["endpoints", "services"]
|
||||
|
||||
# Don't log authenticated requests to certain non-resource URL paths.
|
||||
- level: None
|
||||
userGroups: ["system:authenticated"]
|
||||
nonResourceURLs:
|
||||
- "/api*" # Wildcard matching.
|
||||
- "/version"
|
||||
|
||||
# Log the request body of configmap changes in kube-system.
|
||||
- level: Request
|
||||
resources:
|
||||
- group: "" # core API group
|
||||
resources: ["configmaps"]
|
||||
# This rule only applies to resources in the "kube-system" namespace.
|
||||
# The empty string "" can be used to select non-namespaced resources.
|
||||
namespaces: ["kube-system"]
|
||||
|
||||
# Log configmap and secret changes in all other namespaces at the RequestResponse level.
|
||||
- level: RequestResponse
|
||||
resources:
|
||||
- group: "" # core API group
|
||||
resources: ["secrets", "configmaps"]
|
||||
|
||||
# Log all other resources in core and extensions at the Request level.
|
||||
- level: Request
|
||||
resources:
|
||||
- group: "" # core API group
|
||||
- group: "extensions" # Version of group should NOT be included.
|
||||
|
||||
# A catch-all rule to log all other requests at the Metadata level.
|
||||
- level: Metadata
|
||||
# Long-running requests like watches that fall under this rule will not
|
||||
# generate an audit event in RequestReceived.
|
||||
omitStages:
|
||||
- "RequestReceived"
|
||||
14
examples/k8s_audit_config/webhook-config.yaml.in
Normal file
14
examples/k8s_audit_config/webhook-config.yaml.in
Normal file
@@ -0,0 +1,14 @@
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
clusters:
|
||||
- name: falco
|
||||
cluster:
|
||||
server: http://$FALCO_SERVICE_CLUSTERIP:8765/k8s_audit
|
||||
contexts:
|
||||
- context:
|
||||
cluster: falco
|
||||
user: ""
|
||||
name: default-context
|
||||
current-context: default-context
|
||||
preferences: {}
|
||||
users: []
|
||||
@@ -1,21 +1,22 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright (C) 2013-2014 My Company inc.
|
||||
# Copyright (C) 2016-2018 Draios Inc dba Sysdig.
|
||||
#
|
||||
# This file is part of my-software
|
||||
# This file is part of falco.
|
||||
#
|
||||
# my-software is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# my-software is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with my-software. If not, see <http://www.gnu.org/licenses/>.
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
set -e
|
||||
|
||||
function install_rpm {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#Demo of falco with bash exec via poorly designed REST API.
|
||||
# Demo of falco with bash exec via poorly designed REST API.
|
||||
|
||||
## Introduction
|
||||
|
||||
@@ -42,7 +42,7 @@ This starts the following containers:
|
||||
Run the following commands to execute arbitrary commands like 'ls', 'pwd', etc:
|
||||
|
||||
```
|
||||
$ curl http://localhost:8080/api/exec/ls
|
||||
$ curl http://localhost:8181/api/exec/ls
|
||||
|
||||
demo.yml
|
||||
node_modules
|
||||
@@ -52,7 +52,7 @@ server.js
|
||||
```
|
||||
|
||||
```
|
||||
$ curl http://localhost:8080/api/exec/pwd
|
||||
$ curl http://localhost:8181/api/exec/pwd
|
||||
|
||||
.../examples/nodejs-bad-rest-api
|
||||
```
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
express_server:
|
||||
container_name: express_server
|
||||
image: node:latest
|
||||
command: bash -c "apt-get -y update && apt-get -y install runit && npm install && runsv /usr/src/app"
|
||||
command: bash -c "apt-get -y update && apt-get -y install runit && cd /usr/src/app && npm install && runsv /usr/src/app"
|
||||
ports:
|
||||
- "8181:8181"
|
||||
volumes:
|
||||
|
||||
@@ -2,6 +2,6 @@
|
||||
"name": "bad-rest-api",
|
||||
"main": "server.js",
|
||||
"dependencies": {
|
||||
"express": "~4.0.0"
|
||||
"express": "~4.16.0"
|
||||
}
|
||||
}
|
||||
|
||||
41
falco.yaml
41
falco.yaml
@@ -1,3 +1,21 @@
|
||||
#
|
||||
# Copyright (C) 2016-2018 Draios Inc dba Sysdig.
|
||||
#
|
||||
# This file is part of falco .
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
# File(s) or Directories containing Falco rules, loaded at startup.
|
||||
# The name "rules_file" is only for backwards compatibility.
|
||||
# If the entry is a file, it will be read directly. If the entry is a directory,
|
||||
@@ -13,6 +31,7 @@
|
||||
rules_file:
|
||||
- /etc/falco/falco_rules.yaml
|
||||
- /etc/falco/falco_rules.local.yaml
|
||||
- /etc/falco/k8s_audit_rules.yaml
|
||||
- /etc/falco/rules.d
|
||||
|
||||
# Whether to output events in json or text
|
||||
@@ -41,8 +60,8 @@ log_level: info
|
||||
priority: debug
|
||||
|
||||
# Whether or not output to any of the output channels below is
|
||||
# buffered. Defaults to true
|
||||
buffered_outputs: true
|
||||
# buffered. Defaults to false
|
||||
buffered_outputs: false
|
||||
|
||||
# A throttling mechanism implemented as a token bucket limits the
|
||||
# rate of falco notifications. This throttling is controlled by the following configuration
|
||||
@@ -82,6 +101,24 @@ file_output:
|
||||
stdout_output:
|
||||
enabled: true
|
||||
|
||||
# Falco contains an embedded webserver that can be used to accept K8s
|
||||
# Audit Events. These config options control the behavior of that
|
||||
# webserver. (By default, the webserver is disabled).
|
||||
#
|
||||
# The ssl_certificate is a combination SSL Certificate and corresponding
|
||||
# key contained in a single file. You can generate a key/cert as follows:
|
||||
#
|
||||
# $ openssl req -newkey rsa:2048 -nodes -keyout key.pem -x509 -days 365 -out certificate.pem
|
||||
# $ cat certificate.pem key.pem > falco.pem
|
||||
# $ sudo cp falco.pem /etc/falco/falco.pem
|
||||
|
||||
webserver:
|
||||
enabled: true
|
||||
listen_port: 8765
|
||||
k8s_audit_endpoint: /k8s_audit
|
||||
ssl_enabled: false
|
||||
ssl_certificate: /etc/falco/falco.pem
|
||||
|
||||
# Possible additional things you might want to do with program output:
|
||||
# - send to a slack webhook:
|
||||
# program: "jq '{text: .output}' | curl -d @- -X POST https://hooks.slack.com/services/XXX"
|
||||
|
||||
13
integrations/anchore-falco/Dockerfile
Normal file
13
integrations/anchore-falco/Dockerfile
Normal file
@@ -0,0 +1,13 @@
|
||||
FROM python:3-stretch
|
||||
|
||||
RUN pip install pipenv
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
ADD Pipfile /app/Pipfile
|
||||
ADD Pipfile.lock /app/Pipfile.lock
|
||||
RUN pipenv install --system --deploy
|
||||
|
||||
ADD . /app
|
||||
|
||||
CMD ["python", "main.py"]
|
||||
16
integrations/anchore-falco/Pipfile
Normal file
16
integrations/anchore-falco/Pipfile
Normal file
@@ -0,0 +1,16 @@
|
||||
[[source]]
|
||||
url = "https://pypi.python.org/simple"
|
||||
verify_ssl = true
|
||||
name = "pypi"
|
||||
|
||||
[dev-packages]
|
||||
doublex-expects = "==0.7.0rc2"
|
||||
doublex = "*"
|
||||
mamba = "*"
|
||||
expects = "*"
|
||||
|
||||
[packages]
|
||||
requests = "*"
|
||||
|
||||
[requires]
|
||||
python_version = "3.6"
|
||||
156
integrations/anchore-falco/Pipfile.lock
generated
Normal file
156
integrations/anchore-falco/Pipfile.lock
generated
Normal file
@@ -0,0 +1,156 @@
|
||||
{
|
||||
"_meta": {
|
||||
"hash": {
|
||||
"sha256": "f2737a14e8f562cf355e13ae09f1eed0f80415effd2aa01b86125e94523da345"
|
||||
},
|
||||
"pipfile-spec": 6,
|
||||
"requires": {
|
||||
"python_version": "3.6"
|
||||
},
|
||||
"sources": [
|
||||
{
|
||||
"name": "pypi",
|
||||
"url": "https://pypi.python.org/simple",
|
||||
"verify_ssl": true
|
||||
}
|
||||
]
|
||||
},
|
||||
"default": {
|
||||
"certifi": {
|
||||
"hashes": [
|
||||
"sha256:13e698f54293db9f89122b0581843a782ad0934a4fe0172d2a980ba77fc61bb7",
|
||||
"sha256:9fa520c1bacfb634fa7af20a76bcbd3d5fb390481724c597da32c719a7dca4b0"
|
||||
],
|
||||
"version": "==2018.4.16"
|
||||
},
|
||||
"chardet": {
|
||||
"hashes": [
|
||||
"sha256:84ab92ed1c4d4f16916e05906b6b75a6c0fb5db821cc65e70cbd64a3e2a5eaae",
|
||||
"sha256:fc323ffcaeaed0e0a02bf4d117757b98aed530d9ed4531e3e15460124c106691"
|
||||
],
|
||||
"version": "==3.0.4"
|
||||
},
|
||||
"idna": {
|
||||
"hashes": [
|
||||
"sha256:156a6814fb5ac1fc6850fb002e0852d56c0c8d2531923a51032d1b70760e186e",
|
||||
"sha256:684a38a6f903c1d71d6d5fac066b58d7768af4de2b832e426ec79c30daa94a16"
|
||||
],
|
||||
"version": "==2.7"
|
||||
},
|
||||
"requests": {
|
||||
"hashes": [
|
||||
"sha256:63b52e3c866428a224f97cab011de738c36aec0185aa91cfacd418b5d58911d1",
|
||||
"sha256:ec22d826a36ed72a7358ff3fe56cbd4ba69dd7a6718ffd450ff0e9df7a47ce6a"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==2.19.1"
|
||||
},
|
||||
"urllib3": {
|
||||
"hashes": [
|
||||
"sha256:a68ac5e15e76e7e5dd2b8f94007233e01effe3e50e8daddf69acfd81cb686baf",
|
||||
"sha256:b5725a0bd4ba422ab0e66e89e030c806576753ea3ee08554382c14e685d117b5"
|
||||
],
|
||||
"version": "==1.23"
|
||||
}
|
||||
},
|
||||
"develop": {
|
||||
"args": {
|
||||
"hashes": [
|
||||
"sha256:a785b8d837625e9b61c39108532d95b85274acd679693b71ebb5156848fcf814"
|
||||
],
|
||||
"version": "==0.1.0"
|
||||
},
|
||||
"clint": {
|
||||
"hashes": [
|
||||
"sha256:05224c32b1075563d0b16d0015faaf9da43aa214e4a2140e51f08789e7a4c5aa"
|
||||
],
|
||||
"version": "==0.5.1"
|
||||
},
|
||||
"coverage": {
|
||||
"hashes": [
|
||||
"sha256:03481e81d558d30d230bc12999e3edffe392d244349a90f4ef9b88425fac74ba",
|
||||
"sha256:0b136648de27201056c1869a6c0d4e23f464750fd9a9ba9750b8336a244429ed",
|
||||
"sha256:104ab3934abaf5be871a583541e8829d6c19ce7bde2923b2751e0d3ca44db60a",
|
||||
"sha256:15b111b6a0f46ee1a485414a52a7ad1d703bdf984e9ed3c288a4414d3871dcbd",
|
||||
"sha256:198626739a79b09fa0a2f06e083ffd12eb55449b5f8bfdbeed1df4910b2ca640",
|
||||
"sha256:1c383d2ef13ade2acc636556fd544dba6e14fa30755f26812f54300e401f98f2",
|
||||
"sha256:28b2191e7283f4f3568962e373b47ef7f0392993bb6660d079c62bd50fe9d162",
|
||||
"sha256:2eb564bbf7816a9d68dd3369a510be3327f1c618d2357fa6b1216994c2e3d508",
|
||||
"sha256:337ded681dd2ef9ca04ef5d93cfc87e52e09db2594c296b4a0a3662cb1b41249",
|
||||
"sha256:3a2184c6d797a125dca8367878d3b9a178b6fdd05fdc2d35d758c3006a1cd694",
|
||||
"sha256:3c79a6f7b95751cdebcd9037e4d06f8d5a9b60e4ed0cd231342aa8ad7124882a",
|
||||
"sha256:3d72c20bd105022d29b14a7d628462ebdc61de2f303322c0212a054352f3b287",
|
||||
"sha256:3eb42bf89a6be7deb64116dd1cc4b08171734d721e7a7e57ad64cc4ef29ed2f1",
|
||||
"sha256:4635a184d0bbe537aa185a34193898eee409332a8ccb27eea36f262566585000",
|
||||
"sha256:56e448f051a201c5ebbaa86a5efd0ca90d327204d8b059ab25ad0f35fbfd79f1",
|
||||
"sha256:5a13ea7911ff5e1796b6d5e4fbbf6952381a611209b736d48e675c2756f3f74e",
|
||||
"sha256:69bf008a06b76619d3c3f3b1983f5145c75a305a0fea513aca094cae5c40a8f5",
|
||||
"sha256:6bc583dc18d5979dc0f6cec26a8603129de0304d5ae1f17e57a12834e7235062",
|
||||
"sha256:701cd6093d63e6b8ad7009d8a92425428bc4d6e7ab8d75efbb665c806c1d79ba",
|
||||
"sha256:7608a3dd5d73cb06c531b8925e0ef8d3de31fed2544a7de6c63960a1e73ea4bc",
|
||||
"sha256:76ecd006d1d8f739430ec50cc872889af1f9c1b6b8f48e29941814b09b0fd3cc",
|
||||
"sha256:7aa36d2b844a3e4a4b356708d79fd2c260281a7390d678a10b91ca595ddc9e99",
|
||||
"sha256:7d3f553904b0c5c016d1dad058a7554c7ac4c91a789fca496e7d8347ad040653",
|
||||
"sha256:7e1fe19bd6dce69d9fd159d8e4a80a8f52101380d5d3a4d374b6d3eae0e5de9c",
|
||||
"sha256:8c3cb8c35ec4d9506979b4cf90ee9918bc2e49f84189d9bf5c36c0c1119c6558",
|
||||
"sha256:9d6dd10d49e01571bf6e147d3b505141ffc093a06756c60b053a859cb2128b1f",
|
||||
"sha256:9e112fcbe0148a6fa4f0a02e8d58e94470fc6cb82a5481618fea901699bf34c4",
|
||||
"sha256:ac4fef68da01116a5c117eba4dd46f2e06847a497de5ed1d64bb99a5fda1ef91",
|
||||
"sha256:b8815995e050764c8610dbc82641807d196927c3dbed207f0a079833ffcf588d",
|
||||
"sha256:be6cfcd8053d13f5f5eeb284aa8a814220c3da1b0078fa859011c7fffd86dab9",
|
||||
"sha256:c1bb572fab8208c400adaf06a8133ac0712179a334c09224fb11393e920abcdd",
|
||||
"sha256:de4418dadaa1c01d497e539210cb6baa015965526ff5afc078c57ca69160108d",
|
||||
"sha256:e05cb4d9aad6233d67e0541caa7e511fa4047ed7750ec2510d466e806e0255d6",
|
||||
"sha256:e4d96c07229f58cb686120f168276e434660e4358cc9cf3b0464210b04913e77",
|
||||
"sha256:f3f501f345f24383c0000395b26b726e46758b71393267aeae0bd36f8b3ade80",
|
||||
"sha256:f8a923a85cb099422ad5a2e345fe877bbc89a8a8b23235824a93488150e45f6e"
|
||||
],
|
||||
"version": "==4.5.1"
|
||||
},
|
||||
"doublex": {
|
||||
"hashes": [
|
||||
"sha256:062af49d9e4148bc47b7512d3fdc8e145dea4671d074ffd54b2464a19d3757ab"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==1.8.4"
|
||||
},
|
||||
"doublex-expects": {
|
||||
"hashes": [
|
||||
"sha256:5421bd92319c77ccc5a81d595d06e9c9f7f670de342b33e8007a81e70f9fade8"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==0.7.0rc2"
|
||||
},
|
||||
"expects": {
|
||||
"hashes": [
|
||||
"sha256:37538d7b0fa9c0d53e37d07b0e8c07d89754d3deec1f0f8ed1be27f4f10363dd"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==0.8.0"
|
||||
},
|
||||
"mamba": {
|
||||
"hashes": [
|
||||
"sha256:63e70a8666039cf143a255000e23f29be4ea4b5b8169f2b053f94eb73a2ea9e2"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==0.9.3"
|
||||
},
|
||||
"pyhamcrest": {
|
||||
"hashes": [
|
||||
"sha256:6b672c02fdf7470df9674ab82263841ce8333fb143f32f021f6cb26f0e512420",
|
||||
"sha256:7a4bdade0ed98c699d728191a058a60a44d2f9c213c51e2dd1e6fb42f2c6128a",
|
||||
"sha256:8ffaa0a53da57e89de14ced7185ac746227a8894dbd5a3c718bf05ddbd1d56cd",
|
||||
"sha256:bac0bea7358666ce52e3c6c85139632ed89f115e9af52d44b3c36e0bf8cf16a9",
|
||||
"sha256:f30e9a310bcc1808de817a92e95169ffd16b60cbc5a016a49c8d0e8ababfae79"
|
||||
],
|
||||
"version": "==1.9.0"
|
||||
},
|
||||
"six": {
|
||||
"hashes": [
|
||||
"sha256:70e8a77beed4562e7f14fe23a786b54f6296e34344c23bc42f07b15018ff98e9",
|
||||
"sha256:832dc0e10feb1aa2c68dcc57dbb658f1c7e65b9b61af69048abc87a2db00a0eb"
|
||||
],
|
||||
"version": "==1.11.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
89
integrations/anchore-falco/README.md
Normal file
89
integrations/anchore-falco/README.md
Normal file
@@ -0,0 +1,89 @@
|
||||
# Create Falco rule from Anchore policy result
|
||||
|
||||
This integration creates a rule for Sysdig Falco based on Anchore policy result.
|
||||
So that when we will try to run an image which has a ```stop``` final action result
|
||||
in Anchore, Falco will alert us.
|
||||
|
||||
## Getting started
|
||||
|
||||
### Prerequisites
|
||||
|
||||
For running this integration you will need:
|
||||
|
||||
* Python 3.6
|
||||
* pipenv
|
||||
* An [anchore-engine](https://github.com/anchore/anchore-engine) running
|
||||
|
||||
### Configuration
|
||||
|
||||
This integration uses the [same environment variables that anchore-cli](https://github.com/anchore/anchore-cli#configuring-the-anchore-cli):
|
||||
|
||||
* ANCHORE_CLI_USER: The user used to conect to anchore-engine. By default is ```admin```
|
||||
* ANCHORE_CLI_PASS: The password used to connect to anchore-engine.
|
||||
* ANCHORE_CLI_URL: The url where anchore-engine listens. Make sure does not end with a slash. By default is ```http://localhost:8228/v1```
|
||||
* ANCHORE_CLI_SSL_VERIFY: Flag for enabling if HTTP client verifies SSL. By default is ```true```
|
||||
|
||||
### Running
|
||||
|
||||
This is a Python program which generates a Falco rule based on anchore-engine
|
||||
information:
|
||||
|
||||
```
|
||||
pipenv run python main.py
|
||||
```
|
||||
|
||||
And this will output something like:
|
||||
|
||||
|
||||
```yaml
|
||||
- macro: anchore_stop_policy_evaluation_containers
|
||||
condition: container.image.id in ("8626492fecd368469e92258dfcafe055f636cb9cbc321a5865a98a0a6c99b8dd", "e86d9bb526efa0b0401189d8df6e3856d0320a3d20045c87b4e49c8a8bdb22c1")
|
||||
|
||||
- rule: Run Anchore Containers with Stop Policy Evaluation
|
||||
desc: Detect containers which does not receive a positive Policy Evaluation from Anchore Engine.
|
||||
|
||||
condition: evt.type=execve and proc.vpid=1 and container and anchore_stop_policy_evaluation_containers
|
||||
output: A stop policy evaluation container from anchore has started (%container.info image=%container.image)
|
||||
priority: INFO
|
||||
tags: [container]
|
||||
```
|
||||
|
||||
You can save that output to ```/etc/falco/rules.d/anchore-integration-rules.yaml```
|
||||
and Falco will start checking this rule.
|
||||
|
||||
As long as information in anchore-engine can change, it's a good idea to run this
|
||||
integration **periodically** and keep the rule synchronized with anchore-engine
|
||||
policy evaluation result.
|
||||
|
||||
## Tests
|
||||
|
||||
As long as there are contract tests with anchore-engine, it needs a working
|
||||
anchore-engine and its environment variables.
|
||||
|
||||
```
|
||||
pipenv install -d
|
||||
pipenv run mamba --format=documentation
|
||||
```
|
||||
|
||||
## Docker support
|
||||
|
||||
### Build the image
|
||||
|
||||
```
|
||||
docker build -t sysdig/anchore-falco .
|
||||
```
|
||||
|
||||
### Running the image
|
||||
|
||||
An image exists on DockerHub, its name is ```sysdig/anchore-falco```.
|
||||
|
||||
So you can run directly with Docker:
|
||||
|
||||
```
|
||||
docker run --rm -e ANCHORE_CLI_USER=<user-for-custom-anchore-engine> \
|
||||
-e ANCHORE_CLI_PASS=<passsword-for-user-for-custom-anchore-engine> \
|
||||
-e ANCHORE_CLI_URL=http://<custom-anchore-engine-host>:8228/v1 \
|
||||
sysdig/anchore-falco
|
||||
```
|
||||
|
||||
And this will output the Falco rule based on *custom-anchore-engine-host*.
|
||||
25
integrations/anchore-falco/actions.py
Normal file
25
integrations/anchore-falco/actions.py
Normal file
@@ -0,0 +1,25 @@
|
||||
import string
|
||||
|
||||
FALCO_RULE_TEMPLATE = string.Template('''
|
||||
- macro: anchore_stop_policy_evaluation_containers
|
||||
condition: container.image.id in ($images)
|
||||
|
||||
- rule: Run Anchore Containers with Stop Policy Evaluation
|
||||
desc: Detect containers which does not receive a positive Policy Evaluation from Anchore Engine.
|
||||
|
||||
condition: evt.type=execve and proc.vpid=1 and container and anchore_stop_policy_evaluation_containers
|
||||
output: A stop policy evaluation container from anchore has started (%container.info image=%container.image)
|
||||
priority: INFO
|
||||
tags: [container]
|
||||
''')
|
||||
|
||||
|
||||
class CreateFalcoRuleFromAnchoreStopPolicyResults:
|
||||
def __init__(self, anchore_client):
|
||||
self._anchore_client = anchore_client
|
||||
|
||||
def run(self):
|
||||
images = self._anchore_client.get_images_with_policy_result('stop')
|
||||
|
||||
images = ['"{}"'.format(image) for image in images]
|
||||
return FALCO_RULE_TEMPLATE.substitute(images=', '.join(images))
|
||||
39
integrations/anchore-falco/infrastructure.py
Normal file
39
integrations/anchore-falco/infrastructure.py
Normal file
@@ -0,0 +1,39 @@
|
||||
import requests
|
||||
|
||||
|
||||
class AnchoreClient:
|
||||
def __init__(self, user, password, url, ssl_verify):
|
||||
self._user = user
|
||||
self._password = password
|
||||
self._url = url
|
||||
self._ssl_verify = ssl_verify
|
||||
|
||||
def get_images_with_policy_result(self, policy_result):
|
||||
results = []
|
||||
for image in self._get_all_images():
|
||||
final_action = self._evaluate_image(image)
|
||||
|
||||
if final_action == 'stop':
|
||||
results.append(image['image_id'])
|
||||
|
||||
return results
|
||||
|
||||
def _get_all_images(self):
|
||||
response = self._do_get_request(self._url + '/images')
|
||||
return [
|
||||
{
|
||||
'image_id': image['image_detail'][0]['imageId'],
|
||||
'image_digest': image['image_detail'][0]['imageDigest'],
|
||||
'full_tag': image['image_detail'][0]['fulltag']
|
||||
} for image in response.json()]
|
||||
|
||||
def _do_get_request(self, url):
|
||||
return requests.get(url,
|
||||
auth=(self._user, self._password),
|
||||
verify=self._ssl_verify,
|
||||
headers={'Content-Type': 'application/json'})
|
||||
|
||||
def _evaluate_image(self, image):
|
||||
response = self._do_get_request(self._url + '/images/{}/check?tag={}'.format(image['image_digest'], image['full_tag']))
|
||||
if response.status_code == 200:
|
||||
return response.json()[0][image['image_digest']][image['full_tag']][0]['detail']['result']['final_action']
|
||||
21
integrations/anchore-falco/main.py
Normal file
21
integrations/anchore-falco/main.py
Normal file
@@ -0,0 +1,21 @@
|
||||
import os
|
||||
|
||||
import actions, infrastructure
|
||||
|
||||
|
||||
def main():
|
||||
anchore_client = infrastructure.AnchoreClient(
|
||||
os.environ.get('ANCHORE_CLI_USER', 'admin'),
|
||||
os.environ['ANCHORE_CLI_PASS'],
|
||||
os.environ.get('ANCHORE_CLI_URL', 'http://localhost:8228/v1'),
|
||||
os.environ.get('ANCHORE_CLI_SSL_VERIFY', True)
|
||||
)
|
||||
action = actions.CreateFalcoRuleFromAnchoreStopPolicyResults(anchore_client)
|
||||
|
||||
result = action.run()
|
||||
|
||||
print(result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,21 @@
|
||||
from mamba import description, it, before
|
||||
from expects import expect, contain
|
||||
|
||||
from doublex import Stub, when
|
||||
|
||||
import actions
|
||||
import infrastructure
|
||||
|
||||
|
||||
with description(actions.CreateFalcoRuleFromAnchoreStopPolicyResults) as self:
|
||||
with before.each:
|
||||
self.anchore_client = Stub(infrastructure.AnchoreClient)
|
||||
self.action = actions.CreateFalcoRuleFromAnchoreStopPolicyResults(self.anchore_client)
|
||||
|
||||
with it('queries Anchore Server for images with Stop as policy results'):
|
||||
image_id = 'any image id'
|
||||
when(self.anchore_client).get_images_with_policy_result('stop').returns([image_id])
|
||||
|
||||
result = self.action.run()
|
||||
|
||||
expect(result).to(contain(image_id))
|
||||
@@ -0,0 +1,19 @@
|
||||
from mamba import description, it
|
||||
from expects import expect, have_length, be_above
|
||||
|
||||
import os
|
||||
|
||||
import infrastructure
|
||||
|
||||
|
||||
with description(infrastructure.AnchoreClient) as self:
|
||||
with it('retrieves images with stop policy results'):
|
||||
user = os.environ['ANCHORE_CLI_USER']
|
||||
password = os.environ['ANCHORE_CLI_PASS']
|
||||
url = os.environ['ANCHORE_CLI_URL']
|
||||
|
||||
client = infrastructure.AnchoreClient(user, password, url, True)
|
||||
|
||||
result = client.get_images_with_policy_result('stop')
|
||||
|
||||
expect(result).to(have_length(be_above(1)))
|
||||
@@ -4,7 +4,7 @@ This directory gives you the required YAML files to stand up Sysdig Falco on Kub
|
||||
|
||||
The two options are provided to deploy a Daemon Set:
|
||||
- `k8s-with-rbac` - This directory provides a definition to deploy a Daemon Set on Kubernetes with RBAC enabled.
|
||||
- `k8s-without-rbac` - This directory provides a definition to deploy a Daemon Set on Kubernetes without RBAC enabled.
|
||||
- `k8s-without-rbac` - This directory provides a definition to deploy a Daemon Set on Kubernetes without RBAC enabled. **This method is deprecated in favor of RBAC-based installs, and won't be updated going forward.**
|
||||
|
||||
Also provided:
|
||||
- `falco-event-generator-deployment.yaml` - A Kubernetes Deployment to generate sample events. This is useful for testing, but note it will generate a large number of events.
|
||||
@@ -21,11 +21,20 @@ clusterrolebinding "falco-cluster-role-binding" created
|
||||
k8s-using-daemonset$
|
||||
```
|
||||
|
||||
The Daemon Set also relies on a Kubernetes ConfigMap to store the Falco configuration and make the configuration available to the Falco Pods. This allows you to manage custom configuration without rebuilding and redeploying the underlying Pods. In order to create the ConfigMap you'll need to first need to copy the required configuration from their location in this GitHub repo to the `k8s-with-rbac/falco-config/` directory. Any modification of the configuration should be performed on these copies rather than the original files.
|
||||
We also create a service that allows other services to reach the embedded webserver in falco, which listens on https port 8765:
|
||||
|
||||
```
|
||||
k8s-using-daemonset$ kubectl create -f k8s-with-rbac/falco-service.yaml
|
||||
service/falco-service created
|
||||
k8s-using-daemonset$
|
||||
```
|
||||
|
||||
The Daemon Set also relies on a Kubernetes ConfigMap to store the Falco configuration and make the configuration available to the Falco Pods. This allows you to manage custom configuration without rebuilding and redeploying the underlying Pods. In order to create the ConfigMap you'll need to first need to copy the required configuration from their location in this GitHub repo to the `k8s-with-rbac/falco-config/` directory (please note that you will need to create the /falco-config directory). Any modification of the configuration should be performed on these copies rather than the original files.
|
||||
|
||||
```
|
||||
k8s-using-daemonset$ cp ../../falco.yaml k8s-with-rbac/falco-config/
|
||||
k8s-using-daemonset$ cp ../../rules/falco_rules.* k8s-with-rbac/falco-config/
|
||||
k8s-using-daemonset$ cp ../../rules/k8s_audit_rules.yaml k8s-with-rbac/falco-config/
|
||||
```
|
||||
|
||||
If you want to send Falco alerts to a Slack channel, you'll want to modify the `falco.yaml` file to point to your Slack webhook. For more information on getting a webhook URL for your Slack team, refer to the [Slack documentation](https://api.slack.com/incoming-webhooks). Add the below to the bottom of the `falco.yaml` config file you just copied to enable Slack messages.
|
||||
@@ -54,7 +63,7 @@ k8s-using-daemonset$
|
||||
```
|
||||
|
||||
|
||||
## Deploying to Kubernetes without RBAC enabled
|
||||
## Deploying to Kubernetes without RBAC enabled (**Deprecated**)
|
||||
|
||||
If you are running Kubernetes with Legacy Authorization enabled, you can use `kubectl` to deploy the Daemon Set provided in the `k8s-without-rbac` directory. The example provides the ability to post messages to a Slack channel via a webhook. For more information on getting a webhook URL for your Slack team, refer to the [Slack documentation](https://api.slack.com/incoming-webhooks). Modify the [`args`](https://github.com/draios/falco/blob/dev/examples/k8s-using-daemonset/falco-daemonset.yaml#L21) passed to the Falco container to point to the appropriate URL for your webhook.
|
||||
|
||||
@@ -2,14 +2,20 @@ apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: falco-account
|
||||
labels:
|
||||
app: falco-example
|
||||
role: security
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: falco-cluster-role
|
||||
labels:
|
||||
app: falco-example
|
||||
role: security
|
||||
rules:
|
||||
- apiGroups: ["extensions",""]
|
||||
resources: ["nodes","namespaces","pods","replicationcontrollers","services","events","configmaps"]
|
||||
resources: ["nodes","namespaces","pods","replicationcontrollers","replicasets","services","daemonsets","deployments","events","configmaps"]
|
||||
verbs: ["get","list","watch"]
|
||||
- nonResourceURLs: ["/healthz", "/healthz/*"]
|
||||
verbs: ["get"]
|
||||
@@ -19,6 +25,9 @@ apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: falco-cluster-role-binding
|
||||
namespace: default
|
||||
labels:
|
||||
app: falco-example
|
||||
role: security
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: falco-account
|
||||
@@ -1,24 +1,30 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: falco
|
||||
name: falco-daemonset
|
||||
labels:
|
||||
name: falco-daemonset
|
||||
app: demo
|
||||
app: falco-example
|
||||
role: security
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: falco
|
||||
app: demo
|
||||
app: falco-example
|
||||
role: security
|
||||
spec:
|
||||
serviceAccount: falco-account
|
||||
containers:
|
||||
- name: falco
|
||||
image: sysdig/falco:latest
|
||||
image: falcosecurity/falco:latest
|
||||
securityContext:
|
||||
privileged: true
|
||||
# Uncomment the 3 lines below to enable eBPF support for Falco.
|
||||
# This allows Falco to run on Google COS.
|
||||
# Leave blank for the default probe location, or set to the path
|
||||
# of a precompiled probe.
|
||||
# env:
|
||||
# - name: SYSDIG_BPF_PROBE
|
||||
# value: ""
|
||||
args: [ "/usr/bin/falco", "-K", "/var/run/secrets/kubernetes.io/serviceaccount/token", "-k", "https://kubernetes.default", "-pk"]
|
||||
volumeMounts:
|
||||
- mountPath: /host/var/run/docker.sock
|
||||
@@ -37,6 +43,9 @@ spec:
|
||||
- mountPath: /host/usr
|
||||
name: usr-fs
|
||||
readOnly: true
|
||||
- mountPath: /host/etc/
|
||||
name: etc-fs
|
||||
readOnly: true
|
||||
- mountPath: /etc/falco
|
||||
name: falco-config
|
||||
volumes:
|
||||
@@ -58,6 +67,9 @@ spec:
|
||||
- name: usr-fs
|
||||
hostPath:
|
||||
path: /usr
|
||||
- name: etc-fs
|
||||
hostPath:
|
||||
path: /etc
|
||||
- name: falco-config
|
||||
configMap:
|
||||
name: falco-config
|
||||
@@ -0,0 +1,13 @@
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: falco-service
|
||||
labels:
|
||||
app: falco-example
|
||||
role: security
|
||||
spec:
|
||||
selector:
|
||||
app: falco-example
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 8765
|
||||
@@ -15,7 +15,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: falco
|
||||
image: sysdig/falco:latest
|
||||
image: falcosecurity/falco:latest
|
||||
securityContext:
|
||||
privileged: true
|
||||
args: [ "/usr/bin/falco", "-K", "/var/run/secrets/kubernetes.io/serviceaccount/token", "-k", "https://kubernetes.default", "-pk", "-o", "json_output=true", "-o", "program_output.enabled=true", "-o", "program_output.program=jq '{text: .output}' | curl -d @- -X POST https://hooks.slack.com/services/see_your_slack_team/apps_settings_for/a_webhook_url"]
|
||||
18
integrations/kubernetes-response-engine/README.md
Normal file
18
integrations/kubernetes-response-engine/README.md
Normal file
@@ -0,0 +1,18 @@
|
||||
# Kubernetes Response Engine for Sysdig Falco
|
||||
|
||||
A response engine for Falco that allows to process security events executing playbooks to respond to security threats.
|
||||
|
||||
## Architecture
|
||||
|
||||
* *[Falco](https://sysdig.com/opensource/falco/)* monitors containers and processes to alert on unexpected behavior. This is defined through the runtime policy built from multiple rules that define what the system should and shouldn't do.
|
||||
* *falco-nats* forwards the alert to a message broker service into a topic compound by `falco.<severity>.<rule_name_slugified>`.
|
||||
* *[NATS](https://nats.io/)*, our message broker, delivers the alert to any subscribers to the different topics.
|
||||
* *[Kubeless](https://kubeless.io/)*, a FaaS framework that runs in Kubernetes, receives the security events and executes the configured playbooks.
|
||||
|
||||
## Glossary
|
||||
|
||||
* *Security event*: Alert sent by Falco when a configured rule matches the behaviour on that host.
|
||||
* *Playbook*: Each piece code executed when an alert is received to respond to that threat in an automated way, some examples include:
|
||||
- sending an alert to Slack
|
||||
- stop the pod killing the container
|
||||
- taint the specific node where the pod is running
|
||||
4
integrations/kubernetes-response-engine/deployment/aws/.gitignore
vendored
Normal file
4
integrations/kubernetes-response-engine/deployment/aws/.gitignore
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
.terraform/*
|
||||
.terraform.*
|
||||
terraform.*
|
||||
aws-auth-patch.yml
|
||||
@@ -0,0 +1,17 @@
|
||||
deploy: rbac create configure
|
||||
|
||||
rbac:
|
||||
kubectl apply -f cluster-role.yaml
|
||||
kubectl apply -f cluster-role-binding.yaml
|
||||
|
||||
create:
|
||||
terraform apply -auto-approve
|
||||
|
||||
configure:
|
||||
kubectl get -n kube-system configmap/aws-auth -o yaml | awk "/mapRoles: \|/{print;print \"$(shell terraform output patch_for_aws_auth)\";next}1" > aws-auth-patch.yml
|
||||
kubectl -n kube-system replace -f aws-auth-patch.yml
|
||||
|
||||
clean:
|
||||
terraform destroy -force
|
||||
kubectl delete -f cluster-role-binding.yaml
|
||||
kubectl delete -f cluster-role.yaml
|
||||
@@ -0,0 +1,23 @@
|
||||
# Terraform manifests for Kubernetes Response Engine running on AWS
|
||||
|
||||
In this directory are the Terraform manifests for creating required infrasturcture
|
||||
for the Kubernetes Response Engine running with AWS technology: SNS for messaging
|
||||
and Lambda for executing the playbooks.
|
||||
|
||||
## Deploy
|
||||
|
||||
For creating the resources, just run default Makefile target:
|
||||
|
||||
```
|
||||
make
|
||||
```
|
||||
|
||||
This will ask for an IAM user which creates the bridge between EKS rbac and AWS IAM.
|
||||
|
||||
## Clean
|
||||
|
||||
You can clean IAM roles and SNS topics with:
|
||||
|
||||
```
|
||||
make clean
|
||||
```
|
||||
@@ -0,0 +1,12 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: kubernetes-response-engine-cluster-role-binding
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: kubernetes-response-engine-cluster-role
|
||||
subjects:
|
||||
- kind: User
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
name: kubernetes-response-engine
|
||||
@@ -0,0 +1,25 @@
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: kubernetes-response-engine-cluster-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- delete
|
||||
- list
|
||||
- patch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- patch
|
||||
- apiGroups:
|
||||
- batch
|
||||
resources:
|
||||
- jobs
|
||||
verbs:
|
||||
- create
|
||||
@@ -0,0 +1,29 @@
|
||||
resource "aws_iam_user" "kubernetes-response-engine-user" {
|
||||
name = "kubernetes_response_engine"
|
||||
}
|
||||
|
||||
resource "aws_iam_role" "iam-for-lambda" {
|
||||
name = "iam_for_lambda"
|
||||
|
||||
assume_role_policy = <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Action": "sts:AssumeRole",
|
||||
"Principal": {
|
||||
"Service": "lambda.amazonaws.com",
|
||||
"AWS": "${aws_iam_user.kubernetes-response-engine-user.arn}"
|
||||
},
|
||||
"Effect": "Allow",
|
||||
"Sid": ""
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy_attachment" "iam-for-lambda" {
|
||||
policy_arn = "arn:aws:iam::aws:policy/CloudWatchFullAccess"
|
||||
role = "${aws_iam_role.iam-for-lambda.name}"
|
||||
}
|
||||
@@ -0,0 +1,14 @@
|
||||
locals {
|
||||
patch_for_aws_auth = <<CONFIGMAPAWSAUTH
|
||||
- rolearn: ${aws_iam_role.iam-for-lambda.arn}\n
|
||||
username: kubernetes-response-engine
|
||||
CONFIGMAPAWSAUTH
|
||||
}
|
||||
|
||||
output "patch_for_aws_auth" {
|
||||
value = "${local.patch_for_aws_auth}"
|
||||
}
|
||||
|
||||
output "iam_for_lambda" {
|
||||
value = "${aws_iam_role.iam-for-lambda.arn}"
|
||||
}
|
||||
@@ -0,0 +1,3 @@
|
||||
resource "aws_sns_topic" "falco-alerts" {
|
||||
name = "falco-alerts"
|
||||
}
|
||||
@@ -0,0 +1,9 @@
|
||||
deploy:
|
||||
kubectl apply -f nats/
|
||||
kubectl apply -f kubeless/
|
||||
kubectl apply -f .
|
||||
|
||||
clean:
|
||||
kubectl delete -f kubeless/
|
||||
kubectl delete -f nats/
|
||||
kubectl delete -f .
|
||||
@@ -0,0 +1,20 @@
|
||||
# Kubernetes Manifests for Kubernetes Response Engine
|
||||
|
||||
In this directory are the manifests for creating required infrastructure in the
|
||||
Kubernetes cluster
|
||||
|
||||
## Deploy
|
||||
|
||||
For deploying NATS, Falco + Falco-NATS output and Kubeless just run default Makefile target:
|
||||
|
||||
```
|
||||
make
|
||||
```
|
||||
|
||||
## Clean
|
||||
|
||||
You can clean your cluster with:
|
||||
|
||||
```
|
||||
make clean
|
||||
```
|
||||
@@ -0,0 +1,12 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: kubernetes-response-engine-cluster-role-binding
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: kubernetes-response-engine-cluster-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: default
|
||||
namespace: default
|
||||
@@ -0,0 +1,25 @@
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: kubernetes-response-engine-cluster-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- delete
|
||||
- list
|
||||
- patch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- patch
|
||||
- apiGroups:
|
||||
- batch
|
||||
resources:
|
||||
- jobs
|
||||
verbs:
|
||||
- create
|
||||
@@ -0,0 +1,5 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: kubeless
|
||||
@@ -0,0 +1,366 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: controller-acct
|
||||
namespace: kubeless
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: kubeless-controller-deployer
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
- configmaps
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- delete
|
||||
- list
|
||||
- update
|
||||
- patch
|
||||
- apiGroups:
|
||||
- apps
|
||||
- extensions
|
||||
resources:
|
||||
- deployments
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- delete
|
||||
- list
|
||||
- update
|
||||
- patch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- list
|
||||
- delete
|
||||
- apiGroups:
|
||||
- ""
|
||||
resourceNames:
|
||||
- kubeless-registry-credentials
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- kubeless.io
|
||||
resources:
|
||||
- functions
|
||||
- httptriggers
|
||||
- cronjobtriggers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- update
|
||||
- delete
|
||||
- apiGroups:
|
||||
- batch
|
||||
resources:
|
||||
- cronjobs
|
||||
- jobs
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- delete
|
||||
- deletecollection
|
||||
- list
|
||||
- update
|
||||
- patch
|
||||
- apiGroups:
|
||||
- autoscaling
|
||||
resources:
|
||||
- horizontalpodautoscalers
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- delete
|
||||
- list
|
||||
- update
|
||||
- patch
|
||||
- apiGroups:
|
||||
- apiextensions.k8s.io
|
||||
resources:
|
||||
- customresourcedefinitions
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- apiGroups:
|
||||
- monitoring.coreos.com
|
||||
resources:
|
||||
- alertmanagers
|
||||
- prometheuses
|
||||
- servicemonitors
|
||||
verbs:
|
||||
- '*'
|
||||
- apiGroups:
|
||||
- extensions
|
||||
resources:
|
||||
- ingresses
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- list
|
||||
- update
|
||||
- delete
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: kubeless-controller-deployer
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: kubeless-controller-deployer
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: controller-acct
|
||||
namespace: kubeless
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: functions.kubeless.io
|
||||
spec:
|
||||
group: kubeless.io
|
||||
names:
|
||||
kind: Function
|
||||
plural: functions
|
||||
singular: function
|
||||
scope: Namespaced
|
||||
version: v1beta1
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: httptriggers.kubeless.io
|
||||
spec:
|
||||
group: kubeless.io
|
||||
names:
|
||||
kind: HTTPTrigger
|
||||
plural: httptriggers
|
||||
singular: httptrigger
|
||||
scope: Namespaced
|
||||
version: v1beta1
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: cronjobtriggers.kubeless.io
|
||||
spec:
|
||||
group: kubeless.io
|
||||
names:
|
||||
kind: CronJobTrigger
|
||||
plural: cronjobtriggers
|
||||
singular: cronjobtrigger
|
||||
scope: Namespaced
|
||||
version: v1beta1
|
||||
---
|
||||
apiVersion: v1
|
||||
data:
|
||||
builder-image: kubeless/function-image-builder:v1.0.0-alpha.6
|
||||
builder-image-secret: ""
|
||||
deployment: '{}'
|
||||
enable-build-step: "false"
|
||||
function-registry-tls-verify: "true"
|
||||
ingress-enabled: "false"
|
||||
provision-image: kubeless/unzip@sha256:f162c062973cca05459834de6ed14c039d45df8cdb76097f50b028a1621b3697
|
||||
provision-image-secret: ""
|
||||
runtime-images: |-
|
||||
[
|
||||
{
|
||||
"ID": "python",
|
||||
"compiled": false,
|
||||
"versions": [
|
||||
{
|
||||
"name": "python27",
|
||||
"version": "2.7",
|
||||
"runtimeImage": "kubeless/python@sha256:07cfb0f3d8b6db045dc317d35d15634d7be5e436944c276bf37b1c630b03add8",
|
||||
"initImage": "python:2.7"
|
||||
},
|
||||
{
|
||||
"name": "python34",
|
||||
"version": "3.4",
|
||||
"runtimeImage": "kubeless/python@sha256:f19640c547a3f91dbbfb18c15b5e624029b4065c1baf2892144e07c36f0a7c8f",
|
||||
"initImage": "python:3.4"
|
||||
},
|
||||
{
|
||||
"name": "python36",
|
||||
"version": "3.6",
|
||||
"runtimeImage": "kubeless/python@sha256:0c9f8f727d42625a4e25230cfe612df7488b65f283e7972f84108d87e7443d72",
|
||||
"initImage": "python:3.6"
|
||||
}
|
||||
],
|
||||
"depName": "requirements.txt",
|
||||
"fileNameSuffix": ".py"
|
||||
},
|
||||
{
|
||||
"ID": "nodejs",
|
||||
"compiled": false,
|
||||
"versions": [
|
||||
{
|
||||
"name": "node6",
|
||||
"version": "6",
|
||||
"runtimeImage": "kubeless/nodejs@sha256:013facddb0f66c150844192584d823d7dfb2b5b8d79fd2ae98439c86685da657",
|
||||
"initImage": "node:6.10"
|
||||
},
|
||||
{
|
||||
"name": "node8",
|
||||
"version": "8",
|
||||
"runtimeImage": "kubeless/nodejs@sha256:b155d7e20e333044b60009c12a25a97c84eed610f2a3d9d314b47449dbdae0e5",
|
||||
"initImage": "node:8"
|
||||
}
|
||||
],
|
||||
"depName": "package.json",
|
||||
"fileNameSuffix": ".js"
|
||||
},
|
||||
{
|
||||
"ID": "nodejs_distroless",
|
||||
"compiled": false,
|
||||
"versions": [
|
||||
{
|
||||
"name": "node8",
|
||||
"version": "8",
|
||||
"runtimeImage": "henrike42/kubeless/runtimes/nodejs/distroless:0.0.2",
|
||||
"initImage": "node:8"
|
||||
}
|
||||
],
|
||||
"depName": "package.json",
|
||||
"fileNameSuffix": ".js"
|
||||
},
|
||||
{
|
||||
"ID": "ruby",
|
||||
"compiled": false,
|
||||
"versions": [
|
||||
{
|
||||
"name": "ruby24",
|
||||
"version": "2.4",
|
||||
"runtimeImage": "kubeless/ruby@sha256:01665f1a32fe4fab4195af048627857aa7b100e392ae7f3e25a44bd296d6f105",
|
||||
"initImage": "bitnami/ruby:2.4"
|
||||
}
|
||||
],
|
||||
"depName": "Gemfile",
|
||||
"fileNameSuffix": ".rb"
|
||||
},
|
||||
{
|
||||
"ID": "php",
|
||||
"compiled": false,
|
||||
"versions": [
|
||||
{
|
||||
"name": "php72",
|
||||
"version": "7.2",
|
||||
"runtimeImage": "kubeless/php@sha256:9b86066b2640bedcd88acb27f43dfaa2b338f0d74d9d91131ea781402f7ec8ec",
|
||||
"initImage": "composer:1.6"
|
||||
}
|
||||
],
|
||||
"depName": "composer.json",
|
||||
"fileNameSuffix": ".php"
|
||||
},
|
||||
{
|
||||
"ID": "go",
|
||||
"compiled": true,
|
||||
"versions": [
|
||||
{
|
||||
"name": "go1.10",
|
||||
"version": "1.10",
|
||||
"runtimeImage": "kubeless/go@sha256:e2fd49f09b6ff8c9bac6f1592b3119ea74237c47e2955a003983e08524cb3ae5",
|
||||
"initImage": "kubeless/go-init@sha256:983b3f06452321a2299588966817e724d1a9c24be76cf1b12c14843efcdff502"
|
||||
}
|
||||
],
|
||||
"depName": "Gopkg.toml",
|
||||
"fileNameSuffix": ".go"
|
||||
},
|
||||
{
|
||||
"ID": "dotnetcore",
|
||||
"compiled": true,
|
||||
"versions": [
|
||||
{
|
||||
"name": "dotnetcore2.0",
|
||||
"version": "2.0",
|
||||
"runtimeImage": "allantargino/kubeless-dotnetcore@sha256:1699b07d9fc0276ddfecc2f823f272d96fd58bbab82d7e67f2fd4982a95aeadc",
|
||||
"initImage": "allantargino/aspnetcore-build@sha256:0d60f845ff6c9c019362a68b87b3920f3eb2d32f847f2d75e4d190cc0ce1d81c"
|
||||
}
|
||||
],
|
||||
"depName": "project.csproj",
|
||||
"fileNameSuffix": ".cs"
|
||||
},
|
||||
{
|
||||
"ID": "java",
|
||||
"compiled": true,
|
||||
"versions": [
|
||||
{
|
||||
"name": "java1.8",
|
||||
"version": "1.8",
|
||||
"runtimeImage": "kubeless/java@sha256:debf9502545f4c0e955eb60fabb45748c5d98ed9365c4a508c07f38fc7fefaac",
|
||||
"initImage": "kubeless/java-init@sha256:7e5e4376d3ab76c336d4830c9ed1b7f9407415feca49b8c2bf013e279256878f"
|
||||
}
|
||||
],
|
||||
"depName": "pom.xml",
|
||||
"fileNameSuffix": ".java"
|
||||
},
|
||||
{
|
||||
"ID": "ballerina",
|
||||
"compiled": true,
|
||||
"versions": [
|
||||
{
|
||||
"name": "ballerina0.975.0",
|
||||
"version": "0.975.0",
|
||||
"runtimeImage": "kubeless/ballerina@sha256:83e51423972f4b0d6b419bee0b4afb3bb87d2bf1b604ebc4366c430e7cc28a35",
|
||||
"initImage": "kubeless/ballerina-init@sha256:05857ce439a7e290f9d86f8cb38ea3b574670c0c0e91af93af06686fa21ecf4f"
|
||||
}
|
||||
],
|
||||
"depName": "",
|
||||
"fileNameSuffix": ".bal"
|
||||
}
|
||||
]
|
||||
service-type: ClusterIP
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: kubeless-config
|
||||
namespace: kubeless
|
||||
---
|
||||
apiVersion: apps/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
kubeless: controller
|
||||
name: kubeless-controller-manager
|
||||
namespace: kubeless
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
kubeless: controller
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
kubeless: controller
|
||||
spec:
|
||||
containers:
|
||||
- env:
|
||||
- name: KUBELESS_INGRESS_ENABLED
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: ingress-enabled
|
||||
name: kubeless-config
|
||||
- name: KUBELESS_SERVICE_TYPE
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: service-type
|
||||
name: kubeless-config
|
||||
- name: KUBELESS_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: KUBELESS_CONFIG
|
||||
value: kubeless-config
|
||||
image: bitnami/kubeless-controller-manager:v1.0.0-alpha.6
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: kubeless-controller-manager
|
||||
serviceAccountName: controller-acct
|
||||
@@ -0,0 +1,73 @@
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: nats-controller-deployer
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
- configmaps
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- apiGroups:
|
||||
- kubeless.io
|
||||
resources:
|
||||
- functions
|
||||
- natstriggers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- update
|
||||
- delete
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: nats-controller-deployer
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: nats-controller-deployer
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: controller-acct
|
||||
namespace: kubeless
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: natstriggers.kubeless.io
|
||||
spec:
|
||||
group: kubeless.io
|
||||
names:
|
||||
kind: NATSTrigger
|
||||
plural: natstriggers
|
||||
singular: natstrigger
|
||||
scope: Namespaced
|
||||
version: v1beta1
|
||||
---
|
||||
apiVersion: apps/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
kubeless: nats-trigger-controller
|
||||
name: nats-trigger-controller
|
||||
namespace: kubeless
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
kubeless: nats-trigger-controller
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
kubeless: nats-trigger-controller
|
||||
spec:
|
||||
containers:
|
||||
- image: bitnami/nats-trigger-controller:v1.0.0-alpha.6
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: nats-trigger-controller
|
||||
serviceAccountName: controller-acct
|
||||
@@ -0,0 +1,82 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: nats-io
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: nats-operator
|
||||
namespace: nats-io
|
||||
---
|
||||
apiVersion: apps/v1beta2
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: nats-operator
|
||||
namespace: nats-io
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
name: nats-operator
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: nats-operator
|
||||
spec:
|
||||
serviceAccountName: nats-operator
|
||||
containers:
|
||||
- name: nats-operator
|
||||
image: connecteverything/nats-operator:0.2.2-v1alpha2
|
||||
imagePullPolicy: Always
|
||||
env:
|
||||
- name: MY_POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: MY_POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: nats-io:nats-operator-binding
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: nats-io:nats-operator
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: nats-operator
|
||||
namespace: nats-io
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: nats-io:nats-operator
|
||||
rules:
|
||||
# Allow creating CRDs
|
||||
- apiGroups:
|
||||
- apiextensions.k8s.io
|
||||
resources:
|
||||
- customresourcedefinitions
|
||||
verbs: ["*"]
|
||||
# Allow all actions on NatsClusters
|
||||
- apiGroups:
|
||||
- nats.io
|
||||
resources:
|
||||
- natsclusters
|
||||
verbs: ["*"]
|
||||
# Allow actions on basic Kubernetes objects
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- configmaps
|
||||
- secrets
|
||||
- pods
|
||||
- services
|
||||
- endpoints
|
||||
- events
|
||||
verbs: ["*"]
|
||||
@@ -0,0 +1,8 @@
|
||||
apiVersion: "nats.io/v1alpha2"
|
||||
kind: "NatsCluster"
|
||||
metadata:
|
||||
name: "nats"
|
||||
namespace: "nats-io"
|
||||
spec:
|
||||
size: 3
|
||||
version: "1.1.0"
|
||||
@@ -0,0 +1,11 @@
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
name: isolate
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
isolated: 'true'
|
||||
policyTypes:
|
||||
- Ingress
|
||||
- Egress
|
||||
1
integrations/kubernetes-response-engine/falco-nats/.gitignore
vendored
Normal file
1
integrations/kubernetes-response-engine/falco-nats/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
falco-nats
|
||||
@@ -0,0 +1,5 @@
|
||||
FROM alpine:latest
|
||||
|
||||
COPY ./falco-nats /bin/
|
||||
|
||||
CMD ["/bin/falco-nats"]
|
||||
12
integrations/kubernetes-response-engine/falco-nats/Makefile
Normal file
12
integrations/kubernetes-response-engine/falco-nats/Makefile
Normal file
@@ -0,0 +1,12 @@
|
||||
build:
|
||||
GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build -ldflags="-s" -o falco-nats main.go
|
||||
|
||||
deps:
|
||||
go get -u github.com/nats-io/go-nats
|
||||
|
||||
clean:
|
||||
rm falco-nats
|
||||
|
||||
docker: build
|
||||
docker build -t sysdig/falco-nats .
|
||||
docker push sysdig/falco-nats
|
||||
27
integrations/kubernetes-response-engine/falco-nats/README.md
Normal file
27
integrations/kubernetes-response-engine/falco-nats/README.md
Normal file
@@ -0,0 +1,27 @@
|
||||
# NATS output for Sysdig Falco
|
||||
|
||||
As Falco does not support a NATS output natively, we have created this small
|
||||
golang utility wich reads Falco alerts from a named pipe and sends them to a
|
||||
NATS server.
|
||||
|
||||
This utility is designed to being run in a sidecar container in the same
|
||||
Pod as Falco.
|
||||
|
||||
## Configuration
|
||||
|
||||
You have a [complete Kubernetes manifest available](https://github.com/draios/falco/tree/kubernetes-response-engine/deployment/falco/falco-daemonset.yaml) for future reading.
|
||||
|
||||
Take a look at sidecar container and to the initContainers directive which
|
||||
craetes the shared pipe between containers.
|
||||
|
||||
### Container image
|
||||
|
||||
You have this adapter available as a container image. Its name is *sysdig/falco-nats*.
|
||||
|
||||
### Parameters Reference
|
||||
|
||||
* -s: Specifies the NATS server URL where message will be published. By default
|
||||
is: *nats://nats.nats-io.svc.cluster.local:4222*
|
||||
|
||||
* -f: Specifies the named pipe path where Falco publishes its alerts. By default
|
||||
is: */var/run/falco/nats*
|
||||
100
integrations/kubernetes-response-engine/falco-nats/main.go
Normal file
100
integrations/kubernetes-response-engine/falco-nats/main.go
Normal file
@@ -0,0 +1,100 @@
|
||||
// Copyright 2012-2018 The NATS Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"github.com/nats-io/go-nats"
|
||||
"log"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var slugRegularExpression = regexp.MustCompile("[^a-z0-9]+")
|
||||
|
||||
func main() {
|
||||
var urls = flag.String("s", "nats://nats.nats-io.svc.cluster.local:4222", "The nats server URLs (separated by comma)")
|
||||
var pipePath = flag.String("f", "/var/run/falco/nats", "The named pipe path")
|
||||
|
||||
log.SetFlags(0)
|
||||
flag.Usage = usage
|
||||
flag.Parse()
|
||||
|
||||
nc, err := nats.Connect(*urls)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer nc.Close()
|
||||
|
||||
pipe, err := os.OpenFile(*pipePath, os.O_RDONLY, 0600)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
log.Printf("Opened pipe %s", *pipePath)
|
||||
|
||||
reader := bufio.NewReader(pipe)
|
||||
scanner := bufio.NewScanner(reader)
|
||||
|
||||
log.Printf("Scanning %s", *pipePath)
|
||||
|
||||
for scanner.Scan() {
|
||||
msg := []byte(scanner.Text())
|
||||
|
||||
subj, err := subjectAndRuleSlug(msg)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
nc.Publish(subj, msg)
|
||||
nc.Flush()
|
||||
|
||||
if err := nc.LastError(); err != nil {
|
||||
log.Fatal(err)
|
||||
} else {
|
||||
log.Printf("Published [%s] : '%s'\n", subj, msg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func usage() {
|
||||
log.Fatalf("Usage: nats-pub [-s server (%s)] <subject> <msg> \n", nats.DefaultURL)
|
||||
}
|
||||
|
||||
type parsedAlert struct {
|
||||
Priority string `json:"priority"`
|
||||
Rule string `json:"rule"`
|
||||
}
|
||||
|
||||
func subjectAndRuleSlug(alert []byte) (string, error) {
|
||||
var result parsedAlert
|
||||
err := json.Unmarshal(alert, &result)
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
subject := "falco." + result.Priority + "." + slugify(result.Rule)
|
||||
subject = strings.ToLower(subject)
|
||||
|
||||
return subject, nil
|
||||
}
|
||||
|
||||
func slugify(input string) string {
|
||||
return strings.Trim(slugRegularExpression.ReplaceAllString(strings.ToLower(input), "_"), "_")
|
||||
}
|
||||
1
integrations/kubernetes-response-engine/falco-sns/.gitignore
vendored
Normal file
1
integrations/kubernetes-response-engine/falco-sns/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
falco-sns
|
||||
@@ -0,0 +1,8 @@
|
||||
FROM alpine:latest
|
||||
MAINTAINER Néstor Salceda<nestor.salceda@sysdig.com>
|
||||
|
||||
RUN apk add --no-cache ca-certificates
|
||||
|
||||
COPY ./falco-sns /bin/
|
||||
|
||||
CMD ["/bin/falco-sns"]
|
||||
12
integrations/kubernetes-response-engine/falco-sns/Makefile
Normal file
12
integrations/kubernetes-response-engine/falco-sns/Makefile
Normal file
@@ -0,0 +1,12 @@
|
||||
build:
|
||||
GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build -ldflags="-s" -o falco-sns main.go
|
||||
|
||||
deps:
|
||||
go get -u github.com/aws/aws-sdk-go/
|
||||
|
||||
clean:
|
||||
rm falco-sns
|
||||
|
||||
docker: build
|
||||
docker build -t sysdig/falco-sns .
|
||||
docker push sysdig/falco-sns
|
||||
26
integrations/kubernetes-response-engine/falco-sns/README.md
Normal file
26
integrations/kubernetes-response-engine/falco-sns/README.md
Normal file
@@ -0,0 +1,26 @@
|
||||
# SNS output for Sysdig Falco
|
||||
|
||||
As Falco does not support AWS SNS output natively, we have created this small
|
||||
golang utility wich reads Falco alerts from a named pipe and sends them to a
|
||||
SNS topic.
|
||||
|
||||
This utility is designed to being run in a sidecar container in the same
|
||||
Pod as Falco.
|
||||
|
||||
## Configuration
|
||||
|
||||
You have a [complete Kubernetes manifest available](https://github.com/draios/falco/tree/kubernetes-response-engine/deployment/falco/falco-daemonset.yaml) for future reading.
|
||||
|
||||
Take a look at sidecar container and to the initContainers directive which
|
||||
craetes the shared pipe between containers.
|
||||
|
||||
### Container image
|
||||
|
||||
You have this adapter available as a container image. Its name is *sysdig/falco-sns*.
|
||||
|
||||
### Parameters Reference
|
||||
|
||||
* -t: Specifies the ARN SNS topic where message will be published.
|
||||
|
||||
* -f: Specifies the named pipe path where Falco publishes its alerts. By default
|
||||
is: */var/run/falco/nats*
|
||||
101
integrations/kubernetes-response-engine/falco-sns/main.go
Normal file
101
integrations/kubernetes-response-engine/falco-sns/main.go
Normal file
@@ -0,0 +1,101 @@
|
||||
// Copyright 2012-2018 The Sysdig Tech Marketing Team
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/sns"
|
||||
)
|
||||
|
||||
func main() {
|
||||
var topic = flag.String("t", "", "The AWS SNS topic ARN")
|
||||
var pipePath = flag.String("f", "/var/run/falco/nats", "The named pipe path")
|
||||
|
||||
log.SetFlags(0)
|
||||
flag.Usage = usage
|
||||
flag.Parse()
|
||||
|
||||
session, err := session.NewSession(&aws.Config{Region: aws.String(os.Getenv("AWS_DEFAULT_REGION"))})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
svc := sns.New(session)
|
||||
|
||||
pipe, err := os.OpenFile(*pipePath, os.O_RDONLY, 0600)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
log.Printf("Opened pipe %s", *pipePath)
|
||||
|
||||
reader := bufio.NewReader(pipe)
|
||||
scanner := bufio.NewScanner(reader)
|
||||
|
||||
log.Printf("Scanning %s", *pipePath)
|
||||
|
||||
for scanner.Scan() {
|
||||
msg := []byte(scanner.Text())
|
||||
alert := parseAlert(msg)
|
||||
|
||||
params := &sns.PublishInput{
|
||||
Message: aws.String(string(msg)),
|
||||
MessageAttributes: map[string]*sns.MessageAttributeValue{
|
||||
"priority": &sns.MessageAttributeValue{
|
||||
DataType: aws.String("String"),
|
||||
StringValue: aws.String(alert.Priority),
|
||||
},
|
||||
"rule": &sns.MessageAttributeValue{
|
||||
DataType: aws.String("String"),
|
||||
StringValue: aws.String(alert.Rule),
|
||||
},
|
||||
},
|
||||
TopicArn: aws.String(*topic),
|
||||
}
|
||||
|
||||
_, err := svc.Publish(params)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
} else {
|
||||
log.Printf("Published [%s] : '%s'\n", *topic, msg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func usage() {
|
||||
log.Fatalf("Usage: falco-sns -t topic <subject> <msg> \n")
|
||||
}
|
||||
|
||||
type parsedAlert struct {
|
||||
Priority string `json:"priority"`
|
||||
Rule string `json:"rule"`
|
||||
}
|
||||
|
||||
func parseAlert(alert []byte) *parsedAlert {
|
||||
var result parsedAlert
|
||||
err := json.Unmarshal(alert, &result)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
return &result
|
||||
}
|
||||
104
integrations/kubernetes-response-engine/playbooks/.gitignore
vendored
Normal file
104
integrations/kubernetes-response-engine/playbooks/.gitignore
vendored
Normal file
@@ -0,0 +1,104 @@
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
wheels/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
MANIFEST
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||
*.manifest
|
||||
*.spec
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
htmlcov/
|
||||
.tox/
|
||||
.coverage
|
||||
.coverage.*
|
||||
.cache
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*.cover
|
||||
.hypothesis/
|
||||
.pytest_cache/
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
# Django stuff:
|
||||
*.log
|
||||
local_settings.py
|
||||
db.sqlite3
|
||||
|
||||
# Flask stuff:
|
||||
instance/
|
||||
.webassets-cache
|
||||
|
||||
# Scrapy stuff:
|
||||
.scrapy
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
|
||||
# PyBuilder
|
||||
target/
|
||||
|
||||
# Jupyter Notebook
|
||||
.ipynb_checkpoints
|
||||
|
||||
# pyenv
|
||||
.python-version
|
||||
|
||||
# celery beat schedule file
|
||||
celerybeat-schedule
|
||||
|
||||
# SageMath parsed files
|
||||
*.sage.py
|
||||
|
||||
# Environments
|
||||
.env
|
||||
.venv
|
||||
env/
|
||||
venv/
|
||||
ENV/
|
||||
env.bak/
|
||||
venv.bak/
|
||||
|
||||
# Spyder project settings
|
||||
.spyderproject
|
||||
.spyproject
|
||||
|
||||
# Rope project settings
|
||||
.ropeproject
|
||||
|
||||
# mkdocs documentation
|
||||
/site
|
||||
|
||||
# mypy
|
||||
.mypy_cache/
|
||||
21
integrations/kubernetes-response-engine/playbooks/Pipfile
Normal file
21
integrations/kubernetes-response-engine/playbooks/Pipfile
Normal file
@@ -0,0 +1,21 @@
|
||||
[[source]]
|
||||
url = "https://pypi.python.org/simple"
|
||||
verify_ssl = true
|
||||
name = "pypi"
|
||||
|
||||
[dev-packages]
|
||||
mamba = "*"
|
||||
expects = "*"
|
||||
doublex = "*"
|
||||
doublex-expects = "==0.7.0rc2"
|
||||
six = "*"
|
||||
playbooks = {path = "."}
|
||||
|
||||
[packages]
|
||||
kubernetes = "*"
|
||||
requests = "*"
|
||||
"e1839a8" = {path = ".", editable = true}
|
||||
maya = "*"
|
||||
|
||||
[requires]
|
||||
python_version = "*"
|
||||
415
integrations/kubernetes-response-engine/playbooks/Pipfile.lock
generated
Normal file
415
integrations/kubernetes-response-engine/playbooks/Pipfile.lock
generated
Normal file
@@ -0,0 +1,415 @@
|
||||
{
|
||||
"_meta": {
|
||||
"hash": {
|
||||
"sha256": "ee8fff436e311a11069488c3d0955fef8cc3b4dd0d42ef8515e2e5858448623b"
|
||||
},
|
||||
"pipfile-spec": 6,
|
||||
"requires": {
|
||||
"python_version": "*"
|
||||
},
|
||||
"sources": [
|
||||
{
|
||||
"name": "pypi",
|
||||
"url": "https://pypi.python.org/simple",
|
||||
"verify_ssl": true
|
||||
}
|
||||
]
|
||||
},
|
||||
"default": {
|
||||
"adal": {
|
||||
"hashes": [
|
||||
"sha256:ba52913c38d76b4a4d88eaab41a5763d056ab6d073f106e0605b051ab930f5c1",
|
||||
"sha256:bf79392b8e9e5e82aa6acac3835ba58bbac0ccf7e15befa215863f83d5f6a007"
|
||||
],
|
||||
"version": "==1.2.0"
|
||||
},
|
||||
"asn1crypto": {
|
||||
"hashes": [
|
||||
"sha256:2f1adbb7546ed199e3c90ef23ec95c5cf3585bac7d11fb7eb562a3fe89c64e87",
|
||||
"sha256:9d5c20441baf0cb60a4ac34cc447c6c189024b6b4c6cd7877034f4965c464e49"
|
||||
],
|
||||
"version": "==0.24.0"
|
||||
},
|
||||
"cachetools": {
|
||||
"hashes": [
|
||||
"sha256:0a258d82933a1dd18cb540aca4ac5d5690731e24d1239a08577b814998f49785",
|
||||
"sha256:4621965b0d9d4c82a79a29edbad19946f5e7702df4afae7d1ed2df951559a8cc"
|
||||
],
|
||||
"version": "==3.0.0"
|
||||
},
|
||||
"certifi": {
|
||||
"hashes": [
|
||||
"sha256:339dc09518b07e2fa7eda5450740925974815557727d6bd35d319c1524a04a4c",
|
||||
"sha256:6d58c986d22b038c8c0df30d639f23a3e6d172a05c3583e766f4c0b785c0986a"
|
||||
],
|
||||
"version": "==2018.10.15"
|
||||
},
|
||||
"cffi": {
|
||||
"hashes": [
|
||||
"sha256:151b7eefd035c56b2b2e1eb9963c90c6302dc15fbd8c1c0a83a163ff2c7d7743",
|
||||
"sha256:1553d1e99f035ace1c0544050622b7bc963374a00c467edafac50ad7bd276aef",
|
||||
"sha256:1b0493c091a1898f1136e3f4f991a784437fac3673780ff9de3bcf46c80b6b50",
|
||||
"sha256:2ba8a45822b7aee805ab49abfe7eec16b90587f7f26df20c71dd89e45a97076f",
|
||||
"sha256:3bb6bd7266598f318063e584378b8e27c67de998a43362e8fce664c54ee52d30",
|
||||
"sha256:3c85641778460581c42924384f5e68076d724ceac0f267d66c757f7535069c93",
|
||||
"sha256:3eb6434197633b7748cea30bf0ba9f66727cdce45117a712b29a443943733257",
|
||||
"sha256:495c5c2d43bf6cebe0178eb3e88f9c4aa48d8934aa6e3cddb865c058da76756b",
|
||||
"sha256:4c91af6e967c2015729d3e69c2e51d92f9898c330d6a851bf8f121236f3defd3",
|
||||
"sha256:57b2533356cb2d8fac1555815929f7f5f14d68ac77b085d2326b571310f34f6e",
|
||||
"sha256:770f3782b31f50b68627e22f91cb182c48c47c02eb405fd689472aa7b7aa16dc",
|
||||
"sha256:79f9b6f7c46ae1f8ded75f68cf8ad50e5729ed4d590c74840471fc2823457d04",
|
||||
"sha256:7a33145e04d44ce95bcd71e522b478d282ad0eafaf34fe1ec5bbd73e662f22b6",
|
||||
"sha256:857959354ae3a6fa3da6651b966d13b0a8bed6bbc87a0de7b38a549db1d2a359",
|
||||
"sha256:87f37fe5130574ff76c17cab61e7d2538a16f843bb7bca8ebbc4b12de3078596",
|
||||
"sha256:95d5251e4b5ca00061f9d9f3d6fe537247e145a8524ae9fd30a2f8fbce993b5b",
|
||||
"sha256:9d1d3e63a4afdc29bd76ce6aa9d58c771cd1599fbba8cf5057e7860b203710dd",
|
||||
"sha256:a36c5c154f9d42ec176e6e620cb0dd275744aa1d804786a71ac37dc3661a5e95",
|
||||
"sha256:a6a5cb8809091ec9ac03edde9304b3ad82ad4466333432b16d78ef40e0cce0d5",
|
||||
"sha256:ae5e35a2c189d397b91034642cb0eab0e346f776ec2eb44a49a459e6615d6e2e",
|
||||
"sha256:b0f7d4a3df8f06cf49f9f121bead236e328074de6449866515cea4907bbc63d6",
|
||||
"sha256:b75110fb114fa366b29a027d0c9be3709579602ae111ff61674d28c93606acca",
|
||||
"sha256:ba5e697569f84b13640c9e193170e89c13c6244c24400fc57e88724ef610cd31",
|
||||
"sha256:be2a9b390f77fd7676d80bc3cdc4f8edb940d8c198ed2d8c0be1319018c778e1",
|
||||
"sha256:ca1bd81f40adc59011f58159e4aa6445fc585a32bb8ac9badf7a2c1aa23822f2",
|
||||
"sha256:d5d8555d9bfc3f02385c1c37e9f998e2011f0db4f90e250e5bc0c0a85a813085",
|
||||
"sha256:e55e22ac0a30023426564b1059b035973ec82186ddddbac867078435801c7801",
|
||||
"sha256:e90f17980e6ab0f3c2f3730e56d1fe9bcba1891eeea58966e89d352492cc74f4",
|
||||
"sha256:ecbb7b01409e9b782df5ded849c178a0aa7c906cf8c5a67368047daab282b184",
|
||||
"sha256:ed01918d545a38998bfa5902c7c00e0fee90e957ce036a4000a88e3fe2264917",
|
||||
"sha256:edabd457cd23a02965166026fd9bfd196f4324fe6032e866d0f3bd0301cd486f",
|
||||
"sha256:fdf1c1dc5bafc32bc5d08b054f94d659422b05aba244d6be4ddc1c72d9aa70fb"
|
||||
],
|
||||
"version": "==1.11.5"
|
||||
},
|
||||
"chardet": {
|
||||
"hashes": [
|
||||
"sha256:84ab92ed1c4d4f16916e05906b6b75a6c0fb5db821cc65e70cbd64a3e2a5eaae",
|
||||
"sha256:fc323ffcaeaed0e0a02bf4d117757b98aed530d9ed4531e3e15460124c106691"
|
||||
],
|
||||
"version": "==3.0.4"
|
||||
},
|
||||
"cryptography": {
|
||||
"hashes": [
|
||||
"sha256:02602e1672b62e803e08617ec286041cc453e8d43f093a5f4162095506bc0beb",
|
||||
"sha256:10b48e848e1edb93c1d3b797c83c72b4c387ab0eb4330aaa26da8049a6cbede0",
|
||||
"sha256:17db09db9d7c5de130023657be42689d1a5f60502a14f6f745f6f65a6b8195c0",
|
||||
"sha256:227da3a896df1106b1a69b1e319dce218fa04395e8cc78be7e31ca94c21254bc",
|
||||
"sha256:2cbaa03ac677db6c821dac3f4cdfd1461a32d0615847eedbb0df54bb7802e1f7",
|
||||
"sha256:31db8febfc768e4b4bd826750a70c79c99ea423f4697d1dab764eb9f9f849519",
|
||||
"sha256:4a510d268e55e2e067715d728e4ca6cd26a8e9f1f3d174faf88e6f2cb6b6c395",
|
||||
"sha256:6a88d9004310a198c474d8a822ee96a6dd6c01efe66facdf17cb692512ae5bc0",
|
||||
"sha256:76936ec70a9b72eb8c58314c38c55a0336a2b36de0c7ee8fb874a4547cadbd39",
|
||||
"sha256:7e3b4aecc4040928efa8a7cdaf074e868af32c58ffc9bb77e7bf2c1a16783286",
|
||||
"sha256:8168bcb08403ef144ff1fb880d416f49e2728101d02aaadfe9645883222c0aa5",
|
||||
"sha256:8229ceb79a1792823d87779959184a1bf95768e9248c93ae9f97c7a2f60376a1",
|
||||
"sha256:8a19e9f2fe69f6a44a5c156968d9fc8df56d09798d0c6a34ccc373bb186cee86",
|
||||
"sha256:8d10113ca826a4c29d5b85b2c4e045ffa8bad74fb525ee0eceb1d38d4c70dfd6",
|
||||
"sha256:be495b8ec5a939a7605274b6e59fbc35e76f5ad814ae010eb679529671c9e119",
|
||||
"sha256:dc2d3f3b1548f4d11786616cf0f4415e25b0fbecb8a1d2cd8c07568f13fdde38",
|
||||
"sha256:e4aecdd9d5a3d06c337894c9a6e2961898d3f64fe54ca920a72234a3de0f9cb3",
|
||||
"sha256:e79ab4485b99eacb2166f3212218dd858258f374855e1568f728462b0e6ee0d9",
|
||||
"sha256:f995d3667301e1754c57b04e0bae6f0fa9d710697a9f8d6712e8cca02550910f"
|
||||
],
|
||||
"version": "==2.3.1"
|
||||
},
|
||||
"dateparser": {
|
||||
"hashes": [
|
||||
"sha256:940828183c937bcec530753211b70f673c0a9aab831e43273489b310538dff86",
|
||||
"sha256:b452ef8b36cd78ae86a50721794bc674aa3994e19b570f7ba92810f4e0a2ae03"
|
||||
],
|
||||
"version": "==0.7.0"
|
||||
},
|
||||
"e1839a8": {
|
||||
"editable": true,
|
||||
"path": "."
|
||||
},
|
||||
"google-auth": {
|
||||
"hashes": [
|
||||
"sha256:9ca363facbf2622d9ba828017536ccca2e0f58bd15e659b52f312172f8815530",
|
||||
"sha256:a4cf9e803f2176b5de442763bd339b313d3f1ed3002e3e1eb6eec1d7c9bbc9b4"
|
||||
],
|
||||
"version": "==1.5.1"
|
||||
},
|
||||
"humanize": {
|
||||
"hashes": [
|
||||
"sha256:a43f57115831ac7c70de098e6ac46ac13be00d69abbf60bdcac251344785bb19"
|
||||
],
|
||||
"version": "==0.5.1"
|
||||
},
|
||||
"idna": {
|
||||
"hashes": [
|
||||
"sha256:156a6814fb5ac1fc6850fb002e0852d56c0c8d2531923a51032d1b70760e186e",
|
||||
"sha256:684a38a6f903c1d71d6d5fac066b58d7768af4de2b832e426ec79c30daa94a16"
|
||||
],
|
||||
"version": "==2.7"
|
||||
},
|
||||
"kubernetes": {
|
||||
"hashes": [
|
||||
"sha256:0cc9ce02d838da660efa0a67270b4b7d47e6beb8889673cd45c86f897e2d6821",
|
||||
"sha256:54f8e7bb1dd9a55cf416dff76a63c4ae441764280942d9913f2243676f29d02c"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==8.0.0"
|
||||
},
|
||||
"maya": {
|
||||
"hashes": [
|
||||
"sha256:6f63bc69aa77309fc220bc02618da8701a21da87c2e7a747ee5ccd56a907c3a5",
|
||||
"sha256:f526bc8596d993f4bd9755668f66aaf61d635bb4149e084d4a2bc0ebe42aa0b6"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==0.5.0"
|
||||
},
|
||||
"oauthlib": {
|
||||
"hashes": [
|
||||
"sha256:ac35665a61c1685c56336bda97d5eefa246f1202618a1d6f34fccb1bdd404162",
|
||||
"sha256:d883b36b21a6ad813953803edfa563b1b579d79ca758fe950d1bc9e8b326025b"
|
||||
],
|
||||
"version": "==2.1.0"
|
||||
},
|
||||
"pendulum": {
|
||||
"hashes": [
|
||||
"sha256:4173ce3e81ad0d9d61dbce86f4286c43a26a398270df6a0a89f501f0c28ad27d",
|
||||
"sha256:56a347d0457859c84b8cdba161fc37c7df5db9b3becec7881cd770e9d2058b3c",
|
||||
"sha256:738878168eb26e5446da5d1f7b3312ae993a542061be8882099c00ef4866b1a2",
|
||||
"sha256:95536b33ae152e3c831eb236c1bf9ac9dcfb3b5b98fdbe8e9e601eab6c373897",
|
||||
"sha256:c04fcf955e622e97e405e5f6d1b1f4a7adc69d79d82f3609643de69283170d6d",
|
||||
"sha256:dd6500d27bb7ccc029d497da4f9bd09549bd3c0ea276dad894ea2fdf309e83f3",
|
||||
"sha256:ddaf97a061eb5e2ae37857a8cb548e074125017855690d20e443ad8d9f31e164",
|
||||
"sha256:e7df37447824f9af0b58c7915a4caf349926036afd86ad38e7529a6b2f8fc34b",
|
||||
"sha256:e9732b8bb214fad2c72ddcbfec07542effa8a8b704e174347ede1ff8dc679cce",
|
||||
"sha256:f4eee1e1735487d9d25cc435c519fd4380cb1f82cde3ebad1efbc2fc30deca5b"
|
||||
],
|
||||
"version": "==1.5.1"
|
||||
},
|
||||
"pyasn1": {
|
||||
"hashes": [
|
||||
"sha256:b9d3abc5031e61927c82d4d96c1cec1e55676c1a991623cfed28faea73cdd7ca",
|
||||
"sha256:f58f2a3d12fd754aa123e9fa74fb7345333000a035f3921dbdaa08597aa53137"
|
||||
],
|
||||
"version": "==0.4.4"
|
||||
},
|
||||
"pyasn1-modules": {
|
||||
"hashes": [
|
||||
"sha256:a0cf3e1842e7c60fde97cb22d275eb6f9524f5c5250489e292529de841417547",
|
||||
"sha256:a38a8811ea784c0136abfdba73963876328f66172db21a05a82f9515909bfb4e"
|
||||
],
|
||||
"version": "==0.2.2"
|
||||
},
|
||||
"pycparser": {
|
||||
"hashes": [
|
||||
"sha256:a988718abfad80b6b157acce7bf130a30876d27603738ac39f140993246b25b3"
|
||||
],
|
||||
"version": "==2.19"
|
||||
},
|
||||
"pyjwt": {
|
||||
"hashes": [
|
||||
"sha256:30b1380ff43b55441283cc2b2676b755cca45693ae3097325dea01f3d110628c",
|
||||
"sha256:4ee413b357d53fd3fb44704577afac88e72e878716116270d722723d65b42176"
|
||||
],
|
||||
"version": "==1.6.4"
|
||||
},
|
||||
"python-dateutil": {
|
||||
"hashes": [
|
||||
"sha256:063df5763652e21de43de7d9e00ccf239f953a832941e37be541614732cdfc93",
|
||||
"sha256:88f9287c0174266bb0d8cedd395cfba9c58e87e5ad86b2ce58859bc11be3cf02"
|
||||
],
|
||||
"version": "==2.7.5"
|
||||
},
|
||||
"pytz": {
|
||||
"hashes": [
|
||||
"sha256:31cb35c89bd7d333cd32c5f278fca91b523b0834369e757f4c5641ea252236ca",
|
||||
"sha256:8e0f8568c118d3077b46be7d654cc8167fa916092e28320cde048e54bfc9f1e6"
|
||||
],
|
||||
"version": "==2018.7"
|
||||
},
|
||||
"pytzdata": {
|
||||
"hashes": [
|
||||
"sha256:10c74b0cfc51a9269031f86ecd11096c9c6a141f5bb15a3b8a88f9979f6361e2",
|
||||
"sha256:279cbd9900d5da9a8f9053e60db0db7f42d9a799673744b76aaeb6b4f14abe77"
|
||||
],
|
||||
"version": "==2018.7"
|
||||
},
|
||||
"pyyaml": {
|
||||
"hashes": [
|
||||
"sha256:3d7da3009c0f3e783b2c873687652d83b1bbfd5c88e9813fb7e5b03c0dd3108b",
|
||||
"sha256:3ef3092145e9b70e3ddd2c7ad59bdd0252a94dfe3949721633e41344de00a6bf",
|
||||
"sha256:40c71b8e076d0550b2e6380bada1f1cd1017b882f7e16f09a65be98e017f211a",
|
||||
"sha256:558dd60b890ba8fd982e05941927a3911dc409a63dcb8b634feaa0cda69330d3",
|
||||
"sha256:a7c28b45d9f99102fa092bb213aa12e0aaf9a6a1f5e395d36166639c1f96c3a1",
|
||||
"sha256:aa7dd4a6a427aed7df6fb7f08a580d68d9b118d90310374716ae90b710280af1",
|
||||
"sha256:bc558586e6045763782014934bfaf39d48b8ae85a2713117d16c39864085c613",
|
||||
"sha256:d46d7982b62e0729ad0175a9bc7e10a566fc07b224d2c79fafb5e032727eaa04",
|
||||
"sha256:d5eef459e30b09f5a098b9cea68bebfeb268697f78d647bd255a085371ac7f3f",
|
||||
"sha256:e01d3203230e1786cd91ccfdc8f8454c8069c91bee3962ad93b87a4b2860f537",
|
||||
"sha256:e170a9e6fcfd19021dd29845af83bb79236068bf5fd4df3327c1be18182b2531"
|
||||
],
|
||||
"version": "==3.13"
|
||||
},
|
||||
"regex": {
|
||||
"hashes": [
|
||||
"sha256:384c78351ceb08b9f04e28552edea9af837d05ad4fda9a187a7bbd82759f29b6",
|
||||
"sha256:41b70db2608726396de185e7571a70391507ab47a64b564f59861ff13f2c50a5",
|
||||
"sha256:50f4b57696883fdbb0494cf1ff1cf6e04790d5e1848dff0b2cf28a2b97614351",
|
||||
"sha256:81515123132f9ab0cc8128d035ba7db7783206e4616bdabd3faba335b9add185",
|
||||
"sha256:91e965833a9f93b3e6abfef815026ccb8a9abe12c0958c723fc6c0d396384602",
|
||||
"sha256:9cb058e53c2488b6cba85a7e6ce6d659b3f33ebe00f613dc9fda46de788a1298",
|
||||
"sha256:b41a81228c3994789d4785d9fef96770f9a6b564a30c10af671bd5a4078da6f4",
|
||||
"sha256:cf20d6539e00021793df23c2a98d57aff84f9402f81ac5896fffb4f8c8a08897",
|
||||
"sha256:f937fdbcdb1e455c23709f5cf6df91a0ecfe8c23268f601606173232958daa8d"
|
||||
],
|
||||
"version": "==2018.11.6"
|
||||
},
|
||||
"requests": {
|
||||
"hashes": [
|
||||
"sha256:99dcfdaaeb17caf6e526f32b6a7b780461512ab3f1d992187801694cba42770c",
|
||||
"sha256:a84b8c9ab6239b578f22d1c21d51b696dcfe004032bb80ea832398d6909d7279"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==2.20.0"
|
||||
},
|
||||
"requests-oauthlib": {
|
||||
"hashes": [
|
||||
"sha256:8886bfec5ad7afb391ed5443b1f697c6f4ae98d0e5620839d8b4499c032ada3f",
|
||||
"sha256:e21232e2465808c0e892e0e4dbb8c2faafec16ac6dc067dd546e9b466f3deac8"
|
||||
],
|
||||
"version": "==1.0.0"
|
||||
},
|
||||
"rsa": {
|
||||
"hashes": [
|
||||
"sha256:14ba45700ff1ec9eeb206a2ce76b32814958a98e372006c8fb76ba820211be66",
|
||||
"sha256:1a836406405730121ae9823e19c6e806c62bbad73f890574fff50efa4122c487"
|
||||
],
|
||||
"version": "==4.0"
|
||||
},
|
||||
"six": {
|
||||
"hashes": [
|
||||
"sha256:70e8a77beed4562e7f14fe23a786b54f6296e34344c23bc42f07b15018ff98e9",
|
||||
"sha256:832dc0e10feb1aa2c68dcc57dbb658f1c7e65b9b61af69048abc87a2db00a0eb"
|
||||
],
|
||||
"version": "==1.11.0"
|
||||
},
|
||||
"snaptime": {
|
||||
"hashes": [
|
||||
"sha256:e3f1eb89043d58d30721ab98cb65023f1a4c2740e3b197704298b163c92d508b"
|
||||
],
|
||||
"version": "==0.2.4"
|
||||
},
|
||||
"tzlocal": {
|
||||
"hashes": [
|
||||
"sha256:4ebeb848845ac898da6519b9b31879cf13b6626f7184c496037b818e238f2c4e"
|
||||
],
|
||||
"version": "==1.5.1"
|
||||
},
|
||||
"urllib3": {
|
||||
"hashes": [
|
||||
"sha256:61bf29cada3fc2fbefad4fdf059ea4bd1b4a86d2b6d15e1c7c0b582b9752fe39",
|
||||
"sha256:de9529817c93f27c8ccbfead6985011db27bd0ddfcdb2d86f3f663385c6a9c22"
|
||||
],
|
||||
"version": "==1.24.1"
|
||||
},
|
||||
"websocket-client": {
|
||||
"hashes": [
|
||||
"sha256:8c8bf2d4f800c3ed952df206b18c28f7070d9e3dcbd6ca6291127574f57ee786",
|
||||
"sha256:e51562c91ddb8148e791f0155fdb01325d99bb52c4cdbb291aee7a3563fd0849"
|
||||
],
|
||||
"version": "==0.54.0"
|
||||
}
|
||||
},
|
||||
"develop": {
|
||||
"args": {
|
||||
"hashes": [
|
||||
"sha256:a785b8d837625e9b61c39108532d95b85274acd679693b71ebb5156848fcf814"
|
||||
],
|
||||
"version": "==0.1.0"
|
||||
},
|
||||
"clint": {
|
||||
"hashes": [
|
||||
"sha256:05224c32b1075563d0b16d0015faaf9da43aa214e4a2140e51f08789e7a4c5aa"
|
||||
],
|
||||
"version": "==0.5.1"
|
||||
},
|
||||
"coverage": {
|
||||
"hashes": [
|
||||
"sha256:03481e81d558d30d230bc12999e3edffe392d244349a90f4ef9b88425fac74ba",
|
||||
"sha256:0b136648de27201056c1869a6c0d4e23f464750fd9a9ba9750b8336a244429ed",
|
||||
"sha256:0bf8cbbd71adfff0ef1f3a1531e6402d13b7b01ac50a79c97ca15f030dba6306",
|
||||
"sha256:10a46017fef60e16694a30627319f38a2b9b52e90182dddb6e37dcdab0f4bf95",
|
||||
"sha256:198626739a79b09fa0a2f06e083ffd12eb55449b5f8bfdbeed1df4910b2ca640",
|
||||
"sha256:23d341cdd4a0371820eb2b0bd6b88f5003a7438bbedb33688cd33b8eae59affd",
|
||||
"sha256:28b2191e7283f4f3568962e373b47ef7f0392993bb6660d079c62bd50fe9d162",
|
||||
"sha256:2a5b73210bad5279ddb558d9a2bfedc7f4bf6ad7f3c988641d83c40293deaec1",
|
||||
"sha256:2eb564bbf7816a9d68dd3369a510be3327f1c618d2357fa6b1216994c2e3d508",
|
||||
"sha256:337ded681dd2ef9ca04ef5d93cfc87e52e09db2594c296b4a0a3662cb1b41249",
|
||||
"sha256:3a2184c6d797a125dca8367878d3b9a178b6fdd05fdc2d35d758c3006a1cd694",
|
||||
"sha256:3c79a6f7b95751cdebcd9037e4d06f8d5a9b60e4ed0cd231342aa8ad7124882a",
|
||||
"sha256:3d72c20bd105022d29b14a7d628462ebdc61de2f303322c0212a054352f3b287",
|
||||
"sha256:3eb42bf89a6be7deb64116dd1cc4b08171734d721e7a7e57ad64cc4ef29ed2f1",
|
||||
"sha256:4635a184d0bbe537aa185a34193898eee409332a8ccb27eea36f262566585000",
|
||||
"sha256:56e448f051a201c5ebbaa86a5efd0ca90d327204d8b059ab25ad0f35fbfd79f1",
|
||||
"sha256:5a13ea7911ff5e1796b6d5e4fbbf6952381a611209b736d48e675c2756f3f74e",
|
||||
"sha256:69bf008a06b76619d3c3f3b1983f5145c75a305a0fea513aca094cae5c40a8f5",
|
||||
"sha256:6bc583dc18d5979dc0f6cec26a8603129de0304d5ae1f17e57a12834e7235062",
|
||||
"sha256:701cd6093d63e6b8ad7009d8a92425428bc4d6e7ab8d75efbb665c806c1d79ba",
|
||||
"sha256:7608a3dd5d73cb06c531b8925e0ef8d3de31fed2544a7de6c63960a1e73ea4bc",
|
||||
"sha256:76ecd006d1d8f739430ec50cc872889af1f9c1b6b8f48e29941814b09b0fd3cc",
|
||||
"sha256:7aa36d2b844a3e4a4b356708d79fd2c260281a7390d678a10b91ca595ddc9e99",
|
||||
"sha256:7d3f553904b0c5c016d1dad058a7554c7ac4c91a789fca496e7d8347ad040653",
|
||||
"sha256:7e1fe19bd6dce69d9fd159d8e4a80a8f52101380d5d3a4d374b6d3eae0e5de9c",
|
||||
"sha256:8c3cb8c35ec4d9506979b4cf90ee9918bc2e49f84189d9bf5c36c0c1119c6558",
|
||||
"sha256:9d6dd10d49e01571bf6e147d3b505141ffc093a06756c60b053a859cb2128b1f",
|
||||
"sha256:be6cfcd8053d13f5f5eeb284aa8a814220c3da1b0078fa859011c7fffd86dab9",
|
||||
"sha256:c1bb572fab8208c400adaf06a8133ac0712179a334c09224fb11393e920abcdd",
|
||||
"sha256:de4418dadaa1c01d497e539210cb6baa015965526ff5afc078c57ca69160108d",
|
||||
"sha256:e05cb4d9aad6233d67e0541caa7e511fa4047ed7750ec2510d466e806e0255d6",
|
||||
"sha256:f05a636b4564104120111800021a92e43397bc12a5c72fed7036be8556e0029e",
|
||||
"sha256:f3f501f345f24383c0000395b26b726e46758b71393267aeae0bd36f8b3ade80"
|
||||
],
|
||||
"version": "==4.5.1"
|
||||
},
|
||||
"doublex": {
|
||||
"hashes": [
|
||||
"sha256:bdfa5007ec6f93fcdb05683ef559dd7919b7fe217df41fd240f8d4b2f681ba21"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==1.9.1"
|
||||
},
|
||||
"doublex-expects": {
|
||||
"hashes": [
|
||||
"sha256:5421bd92319c77ccc5a81d595d06e9c9f7f670de342b33e8007a81e70f9fade8"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==0.7.0rc2"
|
||||
},
|
||||
"expects": {
|
||||
"hashes": [
|
||||
"sha256:419902ccafe81b7e9559eeb6b7a07ef9d5c5604eddb93000f0642b3b2d594f4c"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==0.9.0"
|
||||
},
|
||||
"mamba": {
|
||||
"hashes": [
|
||||
"sha256:25328151ea94d97a0b461d7256dc7350c99b5f8d2de22d355978378edfeac545"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==0.10"
|
||||
},
|
||||
"playbooks": {
|
||||
"path": "."
|
||||
},
|
||||
"pyhamcrest": {
|
||||
"hashes": [
|
||||
"sha256:6b672c02fdf7470df9674ab82263841ce8333fb143f32f021f6cb26f0e512420",
|
||||
"sha256:8ffaa0a53da57e89de14ced7185ac746227a8894dbd5a3c718bf05ddbd1d56cd"
|
||||
],
|
||||
"version": "==1.9.0"
|
||||
},
|
||||
"six": {
|
||||
"hashes": [
|
||||
"sha256:70e8a77beed4562e7f14fe23a786b54f6296e34344c23bc42f07b15018ff98e9",
|
||||
"sha256:832dc0e10feb1aa2c68dcc57dbb658f1c7e65b9b61af69048abc87a2db00a0eb"
|
||||
],
|
||||
"version": "==1.11.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
228
integrations/kubernetes-response-engine/playbooks/README.md
Normal file
228
integrations/kubernetes-response-engine/playbooks/README.md
Normal file
@@ -0,0 +1,228 @@
|
||||
# Playbooks
|
||||
|
||||
Following [owasp ideas](https://owaspsummit.org/Working-Sessions/Security-Playbooks/index.html),
|
||||
playbooks are workflows and prescriptive instructions on how to handle specific
|
||||
Security activities or incidents.
|
||||
|
||||
Being more specific, playbooks are actions that are going to be executed when
|
||||
Falco finds a weird behavior in our Kubernetes cluster. We have implemented
|
||||
them with Python and we have found that several Serverless concepts fits well
|
||||
with playbooks, so we use [Kubeless](https://kubeless.io/) for its deployment.
|
||||
|
||||
## Requirements
|
||||
|
||||
* A working Kubernetes cluster
|
||||
* [kubeless cli executable](https://kubeless.io/docs/quick-start/)
|
||||
* Python 3.6
|
||||
* pipenv
|
||||
|
||||
## Deploying a playbook
|
||||
|
||||
Deploying a playbook involves a couple of components, the function that is going
|
||||
to be with Kubeless and a trigger for that function.
|
||||
|
||||
We have automated those steps in a generic script *deploy_playbook* who packages
|
||||
the reaction and its dependencies, uploads to Kubernetes and creates the kubeless
|
||||
trigger.
|
||||
|
||||
```
|
||||
./deploy_playbook -p slack -e SLACK_WEBHOOK_URL="https://..." -t "falco.error.*" -t "falco.info.*"
|
||||
```
|
||||
|
||||
### Parameters
|
||||
|
||||
* -p: The playbook to deploy, it must match with the top-level script. In this
|
||||
example *slack.py* that contains the wiring between playbooks and Kubeless
|
||||
functions.
|
||||
|
||||
* -e: Sets configuration settings for Playbook. In this case the URL where we
|
||||
have to post messages. You can specify multiple *-e* flags.
|
||||
|
||||
* -t: Topic to susbcribe. You can specify multiple *-t* flags and a trigger
|
||||
will be created for each topic, so when we receive a message in that topic,
|
||||
our function will be ran. In this case, playbook will be run when a
|
||||
falco.error or falco.info alert is raised.
|
||||
|
||||
### Kubeless 101
|
||||
|
||||
Under the hood, there are several useful commands for checking function state with kubeless.
|
||||
|
||||
|
||||
We can retrieve all functions deployed in our cluster:
|
||||
```
|
||||
kubeless function list
|
||||
```
|
||||
|
||||
And we can see several interesting stats about a function usage:
|
||||
```
|
||||
kubeless function top
|
||||
```
|
||||
|
||||
And we can see bindings between functions and NATS topics:
|
||||
```
|
||||
kubeless trigger nats list
|
||||
```
|
||||
|
||||
### Undeploying a function
|
||||
|
||||
You have to delete every component using kubeless cli tool.
|
||||
|
||||
Generally, it takes 2 steps: Remove the triggers and remove the function.
|
||||
|
||||
Remove the triggers:
|
||||
```
|
||||
kubeless trigger nats delete trigger-name
|
||||
```
|
||||
|
||||
If you have deployed with the script, trigger-name look like:
|
||||
*falco-<playbook>-trigger-<index>* where index is the index of the topic created.
|
||||
Anyway, you can list all triggers and select the name.
|
||||
|
||||
|
||||
Remove the function:
|
||||
```
|
||||
kubeless function delete function-name
|
||||
```
|
||||
|
||||
If you have deployed with the script, the function name will start with *falco-<playbook>*,
|
||||
but you can list all functions and select its name.
|
||||
|
||||
## Testing
|
||||
|
||||
One of the goals of the project was that playbooks were tested.
|
||||
|
||||
You can execute the tests with:
|
||||
|
||||
```
|
||||
pipenv --three install -d
|
||||
export KUBERNETES_LOAD_KUBE_CONFIG=1
|
||||
pipenv run mamba --format=documentation
|
||||
```
|
||||
|
||||
The first line install development tools, which includes test runner and assertions.
|
||||
The second one tells Kubernetes Client to use the same configuration than kubectl and
|
||||
the third one runs the test.
|
||||
|
||||
The tests under *specs/infrastructure* runs against a real Kubernetes cluster,
|
||||
but the *spec/reactions* can be run without any kind of infrastructure.
|
||||
|
||||
## Available Playbooks
|
||||
|
||||
### Delete a Pod
|
||||
|
||||
This playbook kills a pod using Kubernetes API
|
||||
|
||||
```
|
||||
./deploy_playbook -p delete -t "falco.notice.terminal_shell_in_container"
|
||||
```
|
||||
|
||||
In this example, everytime we receive a *Terminal shell in container* alert from
|
||||
Falco, that pod will be deleted.
|
||||
|
||||
### Send message to Slack
|
||||
|
||||
This playbook posts a message to Slack
|
||||
|
||||
```
|
||||
./deploy_playbook -p slack -t "falco.error.*" -e SLACK_WEBHOOK_URL="https://..."
|
||||
```
|
||||
|
||||
#### Parameters
|
||||
|
||||
* SLACK_WEBHOOK_URL: This is the webhook used for posting messages in Slack
|
||||
|
||||
In this example, when Falco raises an error we will be notified in Slack
|
||||
|
||||
### Taint a Node
|
||||
|
||||
This playbook taints the node which where pod is running.
|
||||
|
||||
```
|
||||
$ ./deploy_playbook -p taint -t “falco.notice.contact_k8s_api_server_from_container”
|
||||
```
|
||||
|
||||
#### Parameters:
|
||||
* TAINT_KEY: This is the taint key. Default value: ‘falco/alert’
|
||||
* TAINT_VALUE: This is the taint value. Default value: ‘true’
|
||||
* TAINT_EFFECT: This is the taint effect. Default value: ‘NoSchedule’
|
||||
|
||||
In this example, we avoid scheduling in the node which originates the Contact
|
||||
K8S API server from container. But we can use a more aggresive approach and
|
||||
use -e TAINT_EFFECT=NoExecute
|
||||
|
||||
### Network isolate a Pod
|
||||
|
||||
This reaction denies all ingress/egress traffic from a Pod. It's intended to
|
||||
be used with Calico or other similar projects for managing networking in
|
||||
Kubernetes.
|
||||
|
||||
```
|
||||
./deploy_playbook -p isolate -t “falco.notice.write_below_binary_dir” -t “falco.error.write_below_etc”
|
||||
```
|
||||
|
||||
So as soon as we notice someone wrote under /bin (and additional binaries) or
|
||||
/etc, we disconnect that pod. It's like a trap for our attackers.
|
||||
|
||||
### Create an incident in Demisto
|
||||
|
||||
This playbook creates an incident in Demisto
|
||||
|
||||
```
|
||||
./deploy_playbook -p demisto -t "falco.*.*" -e DEMISTO_API_KEY=XxXxxXxxXXXx -e DEMISTO_BASE_URL=https://..."
|
||||
```
|
||||
|
||||
#### Parameters
|
||||
|
||||
* DEMISTO_API_KEY: This is the API key used for authenticating against Demisto. Create one under settings -> API keys
|
||||
* DEMISTO_BASE_URL: This is the base URL where your Demisto server lives on. Ensure there's no trailing slash.
|
||||
* VERIFY_SSL: Verify SSL certificates for HTTPS requests. By default is enabled.
|
||||
|
||||
In this example, when Falco raises any kind of alert, the alert will be created in Demisto
|
||||
|
||||
### Start a capture using Sysdig
|
||||
|
||||
This playbook starts to capture information about pod using sysdig and uploads
|
||||
to a s3 bucket.
|
||||
|
||||
```
|
||||
$ ./deploy_playbook -p capture -e CAPTURE_DURATION=300 -e AWS_S3_BUCKET=s3://xxxxxxx -e AWS_ACCESS_KEY_ID=xxxxXXXxxXXxXX -e AWS_SECRET_ACCESS_KEY=xxXxXXxxxxXXX -t "falco.notice.terminal_shell_in_container"
|
||||
```
|
||||
|
||||
#### Parameters:
|
||||
* CAPTURE_DURATION: Captures data for this duration in seconds. By default is
|
||||
120 seconds (2 minutes)
|
||||
* AWS_S3_BUCKET: This is the bucket where data is going to be uploaded. Jobs
|
||||
starts with sysdig- prefix and contain pod name and time where event starts.
|
||||
* AWS_ACCESS_KEY_ID: This is the Amazon access key id.
|
||||
* AWS_SECRET_ACCESS_KEY: This is the Amazon secret access key.
|
||||
|
||||
In this example, when we detect a shell in a container, we start to collect data
|
||||
for 300 seconds. This playbook requires permissions for creating a new pod from
|
||||
a Kubeless function.
|
||||
|
||||
### Create a container in Phantom
|
||||
This playbook creates a container in Phantom
|
||||
|
||||
```
|
||||
./deploy_playbook -p phantom -t "falco.*.*" -e PHANTOM_USER=user -e PHANTOM_PASSWORD=xxxXxxxX -e PHANTOM_BASE_URL=https://..."
|
||||
```
|
||||
|
||||
#### Parameters
|
||||
* PHANTOM_USER: This is the user used to connect to Phantom
|
||||
* PHANTOM_PASSWORD: This is the password used to connect to Phantom
|
||||
* PHANTOM_BASE_URL: This is the base URL where your Phantom server lives on. Ensure there's no trailing slash.
|
||||
* VERIFY_SSL: Verify SSL certificates for HTTPS requests. By default is enabled.
|
||||
|
||||
In this example, when Falco raises any kind of alert, the alert will be created in Phantom.
|
||||
|
||||
## Deploying playbooks to AWS Lambda
|
||||
|
||||
You can deploy functions to AWS Lambda using the `./deploy_playbook_aws` script.
|
||||
|
||||
### Parameters
|
||||
|
||||
* -p: The playbook to deploy, it must match with the top-level script.
|
||||
|
||||
* -e: Sets configuration settings for Playbook. You can specify multiple *-e* flags.
|
||||
|
||||
* -k: EKS cluster name against playbook is going to connect via K8s API.
|
||||
87
integrations/kubernetes-response-engine/playbooks/deploy_playbook
Executable file
87
integrations/kubernetes-response-engine/playbooks/deploy_playbook
Executable file
@@ -0,0 +1,87 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Deploys a playbook
|
||||
|
||||
set -e
|
||||
|
||||
function usage() {
|
||||
cat<<EOF
|
||||
Usage: $0 [options]
|
||||
|
||||
-p playbook Playbook to be deployed. Is the script for Kubeless: slack, taint, isolate.
|
||||
-e environment Environment variables for the Kubeless function. You can pass multiple environment variables passing several -e parameters.
|
||||
-t topic NATS topic to subscribe function. You can bind to multiple topics passing several -t parameters.
|
||||
|
||||
You must pass the playbook and at least one topic to subscribe.
|
||||
|
||||
Example:
|
||||
|
||||
deploy_playbook -p slack -t "falco.error.*" -e SLACK_WEBHOOK_URL=http://foobar.com/...
|
||||
EOF
|
||||
exit 1
|
||||
}
|
||||
|
||||
function create_environment_flags {
|
||||
for env in ${environment[*]}; do
|
||||
echo "--env ${env} "
|
||||
done
|
||||
}
|
||||
|
||||
playbook=""
|
||||
environment=()
|
||||
topics=()
|
||||
|
||||
while getopts "p:e:t:" arg; do
|
||||
case $arg in
|
||||
p)
|
||||
playbook="${OPTARG}"
|
||||
;;
|
||||
e)
|
||||
environment+=("${OPTARG}")
|
||||
;;
|
||||
t)
|
||||
topics+=("${OPTARG}")
|
||||
;;
|
||||
*)
|
||||
usage
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [[ "${playbook}" == "" || ${#topics[@]} -eq 0 ]]; then
|
||||
usage
|
||||
fi
|
||||
|
||||
pipenv lock --requirements | sed '/^-/ d' > requirements.txt
|
||||
|
||||
mkdir -p kubeless-function
|
||||
|
||||
cp -r playbooks kubeless-function/
|
||||
|
||||
cat > kubeless-function/"${playbook}".py <<EOL
|
||||
import sys
|
||||
import os.path
|
||||
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__))))
|
||||
|
||||
EOL
|
||||
cat functions/"${playbook}".py >> kubeless-function/"${playbook}".py
|
||||
|
||||
|
||||
cd kubeless-function
|
||||
zip ../"${playbook}".zip -r *
|
||||
cd ..
|
||||
|
||||
kubeless function deploy --from-file "${playbook}".zip \
|
||||
--dependencies requirements.txt \
|
||||
$(create_environment_flags ${environment[*]}) \
|
||||
--runtime python3.6 \
|
||||
--handler "${playbook}".handler \
|
||||
falco-"${playbook}"
|
||||
|
||||
rm -fr requirements.txt ${playbook}.zip kubeless-function
|
||||
|
||||
for index in ${!topics[*]}; do
|
||||
kubeless trigger nats create falco-"${playbook}"-trigger-"${index}" \
|
||||
--function-selector created-by=kubeless,function=falco-${playbook} \
|
||||
--trigger-topic "${topics[$index]}"
|
||||
done
|
||||
76
integrations/kubernetes-response-engine/playbooks/deploy_playbook_aws
Executable file
76
integrations/kubernetes-response-engine/playbooks/deploy_playbook_aws
Executable file
@@ -0,0 +1,76 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Deploys a playbook
|
||||
|
||||
set -e
|
||||
|
||||
function usage() {
|
||||
cat<<EOF
|
||||
Usage: $0 [options]
|
||||
|
||||
-p playbook Playbook to be deployed. Is the script for Kubeless: slack, taint, isolate.
|
||||
-e environment Environment variables for the Kubeless function. You can pass multiple environment variables passing several -e parameters.
|
||||
-k kubernetes_cluster Kubernetes cluster from aws eks list-clusters where function will be applied.
|
||||
|
||||
You must pass the playbook and at least one topic to subscribe.
|
||||
|
||||
Example:
|
||||
|
||||
deploy_playbook -p slack -e SLACK_WEBHOOK_URL=http://foobar.com/... -k sysdig_eks
|
||||
EOF
|
||||
exit 1
|
||||
}
|
||||
|
||||
function join { local IFS="$1"; shift; echo "$*"; }
|
||||
|
||||
playbook=""
|
||||
environment=("KUBECONFIG=kubeconfig" "KUBERNETES_LOAD_KUBE_CONFIG=1")
|
||||
eks_cluster="${EKS_CLUSTER}"
|
||||
|
||||
while getopts "p:e:k:" arg; do
|
||||
case $arg in
|
||||
p)
|
||||
playbook="${OPTARG}"
|
||||
;;
|
||||
e)
|
||||
environment+=("${OPTARG}")
|
||||
;;
|
||||
k)
|
||||
eks_cluster="${OPTARG}"
|
||||
;;
|
||||
*)
|
||||
usage
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [[ "${playbook}" == "" ]] || [[ "${eks_cluster}" == "" ]]; then
|
||||
usage
|
||||
fi
|
||||
|
||||
pipenv lock --requirements | sed '/^-/ d' > requirements.txt
|
||||
|
||||
mkdir -p lambda
|
||||
pip install -t lambda -r requirements.txt
|
||||
pip install -t lambda .
|
||||
|
||||
aws eks update-kubeconfig --name "${eks_cluster}" --kubeconfig lambda/kubeconfig
|
||||
sed -i "s/command: aws-iam-authenticator/command: .\/aws-iam-authenticator/g" lambda/kubeconfig
|
||||
|
||||
cp extra/aws-iam-authenticator lambda/
|
||||
|
||||
cp functions/"${playbook}".py lambda/
|
||||
|
||||
cd lambda
|
||||
zip ../"${playbook}".zip -r *
|
||||
cd ..
|
||||
|
||||
aws lambda create-function \
|
||||
--function-name falco-"${playbook}" \
|
||||
--runtime python2.7 \
|
||||
--role $(terraform output --state=../deployment/aws/terraform.tfstate iam_for_lambda) \
|
||||
--environment Variables={"$(join , ${environment[*]})"} \
|
||||
--handler "${playbook}".handler \
|
||||
--zip-file fileb://./"${playbook}".zip
|
||||
|
||||
rm -fr "${playbook}".zip lambda requirements.txt
|
||||
BIN
integrations/kubernetes-response-engine/playbooks/extra/aws-iam-authenticator
Executable file
BIN
integrations/kubernetes-response-engine/playbooks/extra/aws-iam-authenticator
Executable file
Binary file not shown.
@@ -0,0 +1,20 @@
|
||||
import sys
|
||||
import os.path
|
||||
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__))))
|
||||
|
||||
import os
|
||||
import playbooks
|
||||
from playbooks import infrastructure
|
||||
|
||||
|
||||
playbook = playbooks.StartSysdigCaptureForContainer(
|
||||
infrastructure.KubernetesClient(),
|
||||
int(os.environ.get('CAPTURE_DURATION', 120)),
|
||||
os.environ['AWS_S3_BUCKET'],
|
||||
os.environ['AWS_ACCESS_KEY_ID'],
|
||||
os.environ['AWS_SECRET_ACCESS_KEY']
|
||||
)
|
||||
|
||||
|
||||
def handler(event, context):
|
||||
playbook.run(event['data'])
|
||||
@@ -0,0 +1,11 @@
|
||||
import playbooks
|
||||
from playbooks import infrastructure
|
||||
|
||||
|
||||
playbook = playbooks.DeletePod(
|
||||
infrastructure.KubernetesClient()
|
||||
)
|
||||
|
||||
|
||||
def handler(event, context):
|
||||
playbook.run(playbooks.falco_alert(event))
|
||||
@@ -0,0 +1,22 @@
|
||||
import sys
|
||||
import os.path
|
||||
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__))))
|
||||
|
||||
import os
|
||||
import playbooks
|
||||
from playbooks import infrastructure
|
||||
|
||||
|
||||
def _to_bool(value):
|
||||
return value.lower() in ('yes', 'true', '1')
|
||||
|
||||
|
||||
playbook = playbooks.CreateIncidentInDemisto(
|
||||
infrastructure.DemistoClient(os.environ['DEMISTO_API_KEY'],
|
||||
os.environ['DEMISTO_BASE_URL']
|
||||
verify_ssl=_to_bool(os.environ.get('VERIFY_SSL', 'True')))
|
||||
)
|
||||
|
||||
|
||||
def handler(event, context):
|
||||
playbook.run(event['data'])
|
||||
@@ -0,0 +1,11 @@
|
||||
import playbooks
|
||||
from playbooks import infrastructure
|
||||
|
||||
|
||||
playbook = playbooks.NetworkIsolatePod(
|
||||
infrastructure.KubernetesClient()
|
||||
)
|
||||
|
||||
|
||||
def handler(event, context):
|
||||
playbook.run(playbooks.falco_alert(event))
|
||||
@@ -0,0 +1,25 @@
|
||||
import sys
|
||||
import os.path
|
||||
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__))))
|
||||
|
||||
import os
|
||||
import playbooks
|
||||
from playbooks import infrastructure
|
||||
|
||||
|
||||
def _to_bool(value):
|
||||
return value.lower() in ('yes', 'true', '1')
|
||||
|
||||
|
||||
playbook = playbooks.CreateContainerInPhantom(
|
||||
infrastructure.PhantomClient(
|
||||
os.environ['PHANTOM_USER'],
|
||||
os.environ['PHANTOM_PASSWORD'],
|
||||
os.environ['PHANTOM_BASE_URL'],
|
||||
verify_ssl=_to_bool(os.environ.get('VERIFY_SSL', 'True'))
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def handler(event, context):
|
||||
playbook.run(event['data'])
|
||||
@@ -0,0 +1,12 @@
|
||||
import os
|
||||
import playbooks
|
||||
from playbooks import infrastructure
|
||||
|
||||
|
||||
playbook = playbooks.AddMessageToSlack(
|
||||
infrastructure.SlackClient(os.environ['SLACK_WEBHOOK_URL'])
|
||||
)
|
||||
|
||||
|
||||
def handler(event, context):
|
||||
playbook.run(playbooks.falco_alert(event))
|
||||
@@ -0,0 +1,15 @@
|
||||
import os
|
||||
import playbooks
|
||||
from playbooks import infrastructure
|
||||
|
||||
|
||||
playbook = playbooks.TaintNode(
|
||||
infrastructure.KubernetesClient(),
|
||||
os.environ.get('TAINT_KEY', 'falco/alert'),
|
||||
os.environ.get('TAINT_VALUE', 'true'),
|
||||
os.environ.get('TAINT_EFFECT', 'NoSchedule')
|
||||
)
|
||||
|
||||
|
||||
def handler(event, context):
|
||||
playbook.run(playbooks.falco_alert(event))
|
||||
@@ -0,0 +1,210 @@
|
||||
import json
|
||||
import maya
|
||||
|
||||
|
||||
class DeletePod(object):
|
||||
def __init__(self, k8s_client):
|
||||
self._k8s_client = k8s_client
|
||||
|
||||
def run(self, alert):
|
||||
pod_name = alert['output_fields']['k8s.pod.name']
|
||||
|
||||
self._k8s_client.delete_pod(pod_name)
|
||||
|
||||
|
||||
class AddMessageToSlack(object):
|
||||
def __init__(self, slack_client):
|
||||
self._slack_client = slack_client
|
||||
|
||||
def run(self, alert):
|
||||
message = self._build_slack_message(alert)
|
||||
self._slack_client.post_message(message)
|
||||
|
||||
return message
|
||||
|
||||
def _build_slack_message(self, alert):
|
||||
return {
|
||||
'text': _output_from_alert(alert),
|
||||
'attachments': [{
|
||||
'color': self._color_from(alert['priority']),
|
||||
'fields': [
|
||||
{
|
||||
'title': 'Rule',
|
||||
'value': alert['rule'],
|
||||
'short': False
|
||||
},
|
||||
{
|
||||
'title': 'Priority',
|
||||
'value': alert['priority'],
|
||||
'short': True
|
||||
},
|
||||
{
|
||||
'title': 'Time',
|
||||
'value': str(maya.parse(alert['time'])),
|
||||
'short': True
|
||||
},
|
||||
{
|
||||
'title': 'Kubernetes Pod Name',
|
||||
'value': alert['output_fields']['k8s.pod.name'],
|
||||
'short': True
|
||||
},
|
||||
{
|
||||
'title': 'Container Id',
|
||||
'value': alert['output_fields']['container.id'],
|
||||
'short': True
|
||||
}
|
||||
]
|
||||
}]
|
||||
}
|
||||
|
||||
_COLORS = {
|
||||
'Emergency': '#b12737',
|
||||
'Alert': '#f24141',
|
||||
'Critical': '#fc7335',
|
||||
'Error': '#f28143',
|
||||
'Warning': '#f9c414',
|
||||
'Notice': '#397ec3',
|
||||
'Informational': '#8fc0e7',
|
||||
'Debug': '#8fc0e7',
|
||||
}
|
||||
|
||||
def _color_from(self, priority):
|
||||
return self._COLORS.get(priority, '#eeeeee')
|
||||
|
||||
|
||||
def _output_from_alert(alert):
|
||||
output = alert['output'].split(': ')[1]
|
||||
priority_plus_whitespace_length = len(alert['priority']) + 1
|
||||
|
||||
return output[priority_plus_whitespace_length:]
|
||||
|
||||
|
||||
class TaintNode(object):
|
||||
def __init__(self, k8s_client, key, value, effect):
|
||||
self._k8s_client = k8s_client
|
||||
self._key = key
|
||||
self._value = value
|
||||
self._effect = effect
|
||||
|
||||
def run(self, alert):
|
||||
pod = alert['output_fields']['k8s.pod.name']
|
||||
node = self._k8s_client.find_node_running_pod(pod)
|
||||
|
||||
self._k8s_client.taint_node(node, self._key, self._value, self._effect)
|
||||
|
||||
|
||||
class NetworkIsolatePod(object):
|
||||
def __init__(self, k8s_client):
|
||||
self._k8s_client = k8s_client
|
||||
|
||||
def run(self, alert):
|
||||
pod = alert['output_fields']['k8s.pod.name']
|
||||
|
||||
self._k8s_client.add_label_to_pod(pod, 'isolated', 'true')
|
||||
|
||||
|
||||
class CreateIncidentInDemisto(object):
|
||||
def __init__(self, demisto_client):
|
||||
self._demisto_client = demisto_client
|
||||
|
||||
def run(self, alert):
|
||||
incident = {
|
||||
'type': 'Policy Violation',
|
||||
'name': alert['rule'],
|
||||
'details': _output_from_alert(alert),
|
||||
'severity': self._severity_from(alert['priority']),
|
||||
'occurred': alert['time'],
|
||||
'labels': [
|
||||
{'type': 'Brand', 'value': 'Sysdig'},
|
||||
{'type': 'Application', 'value': 'Falco'},
|
||||
{'type': 'container.id', 'value': alert['output_fields']['container.id']},
|
||||
{'type': 'k8s.pod.name', 'value': alert['output_fields']['k8s.pod.name']}
|
||||
]
|
||||
}
|
||||
self._demisto_client.create_incident(incident)
|
||||
|
||||
return incident
|
||||
|
||||
def _severity_from(self, priority):
|
||||
return self._SEVERITIES.get(priority, 0)
|
||||
|
||||
_SEVERITIES = {
|
||||
'Emergency': 4,
|
||||
'Alert': 4,
|
||||
'Critical': 4,
|
||||
'Error': 3,
|
||||
'Warning': 2,
|
||||
'Notice': 1,
|
||||
'Informational': 5,
|
||||
'Debug': 5,
|
||||
}
|
||||
|
||||
|
||||
class StartSysdigCaptureForContainer(object):
|
||||
def __init__(self, k8s_client, duration_in_seconds, s3_bucket,
|
||||
aws_access_key_id, aws_secret_access_key):
|
||||
self._k8s_client = k8s_client
|
||||
self._duration_in_seconds = duration_in_seconds
|
||||
self._s3_bucket = s3_bucket
|
||||
self._aws_access_key_id = aws_access_key_id
|
||||
self._aws_secret_access_key = aws_secret_access_key
|
||||
|
||||
def run(self, alert):
|
||||
pod = alert['output_fields']['k8s.pod.name']
|
||||
event_time = alert['output_fields']['evt.time']
|
||||
|
||||
self._k8s_client.start_sysdig_capture_for(pod,
|
||||
event_time,
|
||||
self._duration_in_seconds,
|
||||
self._s3_bucket,
|
||||
self._aws_access_key_id,
|
||||
self._aws_secret_access_key)
|
||||
|
||||
|
||||
class CreateContainerInPhantom(object):
|
||||
def __init__(self, phantom_client):
|
||||
self._phantom_client = phantom_client
|
||||
|
||||
def run(self, alert):
|
||||
container = self._build_container_from(alert)
|
||||
self._phantom_client.create_container(container)
|
||||
|
||||
return container
|
||||
|
||||
def _build_container_from(self, alert):
|
||||
return {
|
||||
'description': _output_from_alert(alert),
|
||||
'name': alert['rule'],
|
||||
'start_time': maya.parse(alert['time']).iso8601(),
|
||||
'severity': self._severity_from(alert['priority']),
|
||||
'label': 'events',
|
||||
'status': 'new',
|
||||
'data': {
|
||||
'container.id': alert['output_fields']['container.id'],
|
||||
'k8s.pod.name': alert['output_fields']['k8s.pod.name'],
|
||||
}
|
||||
}
|
||||
|
||||
def _severity_from(self, priority):
|
||||
return self._SEVERITIES.get(priority, 0)
|
||||
|
||||
_SEVERITIES = {
|
||||
'Emergency': 'high',
|
||||
'Alert': 'high',
|
||||
'Critical': 'high',
|
||||
'Error': 'medium',
|
||||
'Warning': 'medium',
|
||||
'Notice': 'low',
|
||||
'Informational': 'low',
|
||||
'Debug': 'low',
|
||||
}
|
||||
|
||||
|
||||
def falco_alert(event):
|
||||
if 'data' in event:
|
||||
return event['data']
|
||||
|
||||
if 'Records' in event:
|
||||
return json.loads(event['Records'][0]['Sns']['Message'])
|
||||
|
||||
return event
|
||||
@@ -0,0 +1,267 @@
|
||||
import os
|
||||
import json
|
||||
from six.moves import http_client
|
||||
|
||||
from kubernetes import client, config
|
||||
import requests
|
||||
|
||||
|
||||
class KubernetesClient(object):
|
||||
def __init__(self):
|
||||
if 'KUBERNETES_LOAD_KUBE_CONFIG' in os.environ:
|
||||
config.load_kube_config()
|
||||
else:
|
||||
config.load_incluster_config()
|
||||
|
||||
self._v1 = client.CoreV1Api()
|
||||
self._batch_v1 = client.BatchV1Api()
|
||||
|
||||
def delete_pod(self, name):
|
||||
namespace = self._find_pod_namespace(name)
|
||||
body = client.V1DeleteOptions()
|
||||
self._v1.delete_namespaced_pod(name=name,
|
||||
namespace=namespace,
|
||||
body=body)
|
||||
|
||||
def exists_pod(self, name):
|
||||
response = self._v1.list_pod_for_all_namespaces(watch=False)
|
||||
for item in response.items:
|
||||
if item.metadata.name == name:
|
||||
if item.metadata.deletion_timestamp is None:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def _find_pod_namespace(self, name):
|
||||
response = self._v1.list_pod_for_all_namespaces(watch=False)
|
||||
for item in response.items:
|
||||
if item.metadata.name == name:
|
||||
return item.metadata.namespace
|
||||
|
||||
def find_node_running_pod(self, name):
|
||||
response = self._v1.list_pod_for_all_namespaces(watch=False)
|
||||
for item in response.items:
|
||||
if item.metadata.name == name:
|
||||
return item.spec.node_name
|
||||
|
||||
def taint_node(self, name, key, value, effect):
|
||||
body = client.V1Node(
|
||||
spec=client.V1NodeSpec(
|
||||
taints=[
|
||||
client.V1Taint(key=key, value=value, effect=effect)
|
||||
]
|
||||
)
|
||||
)
|
||||
|
||||
return self._v1.patch_node(name, body)
|
||||
|
||||
def add_label_to_pod(self, name, label, value):
|
||||
namespace = self._find_pod_namespace(name)
|
||||
|
||||
body = client.V1Pod(
|
||||
metadata=client.V1ObjectMeta(
|
||||
labels={label: value}
|
||||
)
|
||||
)
|
||||
|
||||
return self._v1.patch_namespaced_pod(name, namespace, body)
|
||||
|
||||
def start_sysdig_capture_for(self, pod_name, event_time,
|
||||
duration_in_seconds, s3_bucket,
|
||||
aws_access_key_id, aws_secret_access_key):
|
||||
job_name = 'sysdig-{}-{}'.format(pod_name, event_time)
|
||||
|
||||
node_name = self.find_node_running_pod(pod_name)
|
||||
namespace = self._find_pod_namespace(pod_name)
|
||||
body = self._build_sysdig_capture_job_body(job_name,
|
||||
node_name,
|
||||
duration_in_seconds,
|
||||
s3_bucket,
|
||||
aws_access_key_id,
|
||||
aws_secret_access_key)
|
||||
|
||||
return self._batch_v1.create_namespaced_job(namespace, body)
|
||||
|
||||
def _build_sysdig_capture_job_body(self, job_name, node_name,
|
||||
duration_in_seconds, s3_bucket,
|
||||
aws_access_key_id, aws_secret_access_key):
|
||||
return client.V1Job(
|
||||
metadata=client.V1ObjectMeta(
|
||||
name=job_name
|
||||
),
|
||||
spec=client.V1JobSpec(
|
||||
template=client.V1PodTemplateSpec(
|
||||
metadata=client.V1ObjectMeta(
|
||||
name=job_name
|
||||
),
|
||||
spec=client.V1PodSpec(
|
||||
containers=[client.V1Container(
|
||||
name='capturer',
|
||||
image='sysdig/capturer',
|
||||
image_pull_policy='Always',
|
||||
security_context=client.V1SecurityContext(
|
||||
privileged=True
|
||||
),
|
||||
env=[
|
||||
client.V1EnvVar(
|
||||
name='AWS_S3_BUCKET',
|
||||
value=s3_bucket
|
||||
),
|
||||
client.V1EnvVar(
|
||||
name='CAPTURE_DURATION',
|
||||
value=str(duration_in_seconds)
|
||||
),
|
||||
client.V1EnvVar(
|
||||
name='CAPTURE_FILE_NAME',
|
||||
value=job_name
|
||||
),
|
||||
client.V1EnvVar(
|
||||
name='AWS_ACCESS_KEY_ID',
|
||||
value=aws_access_key_id,
|
||||
),
|
||||
client.V1EnvVar(
|
||||
name='AWS_SECRET_ACCESS_KEY',
|
||||
value=aws_secret_access_key,
|
||||
)
|
||||
],
|
||||
volume_mounts=[
|
||||
client.V1VolumeMount(
|
||||
mount_path='/host/var/run/docker.sock',
|
||||
name='docker-socket'
|
||||
),
|
||||
client.V1VolumeMount(
|
||||
mount_path='/host/dev',
|
||||
name='dev-fs'
|
||||
),
|
||||
client.V1VolumeMount(
|
||||
mount_path='/host/proc',
|
||||
name='proc-fs',
|
||||
read_only=True
|
||||
),
|
||||
client.V1VolumeMount(
|
||||
mount_path='/host/boot',
|
||||
name='boot-fs',
|
||||
read_only=True
|
||||
),
|
||||
client.V1VolumeMount(
|
||||
mount_path='/host/lib/modules',
|
||||
name='lib-modules',
|
||||
read_only=True
|
||||
),
|
||||
client.V1VolumeMount(
|
||||
mount_path='/host/usr',
|
||||
name='usr-fs',
|
||||
read_only=True
|
||||
),
|
||||
client.V1VolumeMount(
|
||||
mount_path='/dev/shm',
|
||||
name='dshm'
|
||||
)
|
||||
]
|
||||
)],
|
||||
volumes=[
|
||||
client.V1Volume(
|
||||
name='dshm',
|
||||
empty_dir=client.V1EmptyDirVolumeSource(
|
||||
medium='Memory'
|
||||
)
|
||||
),
|
||||
client.V1Volume(
|
||||
name='docker-socket',
|
||||
host_path=client.V1HostPathVolumeSource(
|
||||
path='/var/run/docker.sock'
|
||||
)
|
||||
),
|
||||
client.V1Volume(
|
||||
name='dev-fs',
|
||||
host_path=client.V1HostPathVolumeSource(
|
||||
|
||||
path='/dev'
|
||||
)
|
||||
),
|
||||
client.V1Volume(
|
||||
name='proc-fs',
|
||||
host_path=client.V1HostPathVolumeSource(
|
||||
path='/proc'
|
||||
)
|
||||
),
|
||||
|
||||
client.V1Volume(
|
||||
name='boot-fs',
|
||||
host_path=client.V1HostPathVolumeSource(
|
||||
path='/boot'
|
||||
)
|
||||
),
|
||||
client.V1Volume(
|
||||
name='lib-modules',
|
||||
host_path=client.V1HostPathVolumeSource(
|
||||
path='/lib/modules'
|
||||
)
|
||||
),
|
||||
client.V1Volume(
|
||||
name='usr-fs',
|
||||
host_path=client.V1HostPathVolumeSource(
|
||||
path='/usr'
|
||||
)
|
||||
)
|
||||
],
|
||||
node_name=node_name,
|
||||
restart_policy='Never'
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class SlackClient(object):
|
||||
def __init__(self, slack_webhook_url):
|
||||
self._slack_webhook_url = slack_webhook_url
|
||||
|
||||
def post_message(self, message):
|
||||
requests.post(self._slack_webhook_url,
|
||||
data=json.dumps(message))
|
||||
|
||||
|
||||
class DemistoClient(object):
|
||||
def __init__(self, api_key, base_url, verify_ssl=True):
|
||||
self._api_key = api_key
|
||||
self._base_url = base_url
|
||||
self._verify_ssl = verify_ssl
|
||||
|
||||
def create_incident(self, incident):
|
||||
response = requests.post(self._base_url + '/incident',
|
||||
headers=self._headers(),
|
||||
data=json.dumps(incident),
|
||||
verify=self._verify_ssl)
|
||||
|
||||
if response.status_code != http_client.CREATED:
|
||||
raise RuntimeError(response.text)
|
||||
|
||||
def _headers(self):
|
||||
return {
|
||||
'Content-Type': 'application/json',
|
||||
'Accept': 'application/json',
|
||||
'Authorization': self._api_key,
|
||||
}
|
||||
|
||||
|
||||
class PhantomClient(object):
|
||||
def __init__(self, user, password, base_url, verify_ssl=True):
|
||||
self._user = user
|
||||
self._password = password
|
||||
self._base_url = base_url
|
||||
self._verify_ssl = verify_ssl
|
||||
|
||||
def create_container(self, container):
|
||||
response = requests.post(self._base_url + '/rest/container',
|
||||
data=json.dumps(container),
|
||||
auth=(self._user, self._password),
|
||||
verify=self._verify_ssl)
|
||||
|
||||
response_as_json = response.json()
|
||||
if 'success' in response_as_json:
|
||||
result = container.copy()
|
||||
result['id'] = response_as_json['id']
|
||||
return result
|
||||
|
||||
raise RuntimeError(response_as_json['message'])
|
||||
11
integrations/kubernetes-response-engine/playbooks/setup.py
Normal file
11
integrations/kubernetes-response-engine/playbooks/setup.py
Normal file
@@ -0,0 +1,11 @@
|
||||
from setuptools import setup
|
||||
|
||||
setup(name='playbooks',
|
||||
version='0.1',
|
||||
description='A set of playbooks for Falco alerts',
|
||||
url='http://github.com/draios/falco-playbooks',
|
||||
author='Néstor Salceda',
|
||||
author_email='nestor.salceda@sysdig.com',
|
||||
license='',
|
||||
packages=['playbooks'],
|
||||
zip_safe=False)
|
||||
@@ -0,0 +1,32 @@
|
||||
from mamba import description, it, context, before
|
||||
from expects import expect, raise_error
|
||||
|
||||
import os
|
||||
|
||||
from playbooks import infrastructure
|
||||
|
||||
|
||||
with description(infrastructure.DemistoClient) as self:
|
||||
with before.each:
|
||||
self.demisto_client = infrastructure.DemistoClient(
|
||||
os.environ['DEMISTO_API_KEY'],
|
||||
os.environ['DEMISTO_BASE_URL'],
|
||||
verify_ssl=False
|
||||
)
|
||||
|
||||
with it('creates an incident'):
|
||||
incident = {
|
||||
"type": "Policy Violation",
|
||||
"name": "Falco incident",
|
||||
"severity": 2,
|
||||
"details": "Some incident details"
|
||||
}
|
||||
|
||||
self.demisto_client.create_incident(incident)
|
||||
|
||||
with context('when an error happens'):
|
||||
with it('raises an exception'):
|
||||
incident = {}
|
||||
|
||||
expect(lambda: self.demisto_client.create_incident(incident)).\
|
||||
to(raise_error(RuntimeError))
|
||||
@@ -0,0 +1,78 @@
|
||||
from mamba import description, context, it, before
|
||||
from expects import expect, be_false, be_true, start_with, equal, have_key, be_none
|
||||
|
||||
import subprocess
|
||||
import os.path
|
||||
import time
|
||||
|
||||
from playbooks import infrastructure
|
||||
|
||||
|
||||
with description(infrastructure.KubernetesClient) as self:
|
||||
with before.each:
|
||||
self.kubernetes_client = infrastructure.KubernetesClient()
|
||||
|
||||
with context('when checking if a pod exists'):
|
||||
with before.each:
|
||||
self._create_nginx_pod()
|
||||
|
||||
with context('and pod exists'):
|
||||
with it('returns true'):
|
||||
expect(self.kubernetes_client.exists_pod('nginx')).to(be_true)
|
||||
|
||||
with context('and pod does not exist'):
|
||||
with it('returns false'):
|
||||
self.kubernetes_client.delete_pod('nginx')
|
||||
|
||||
expect(self.kubernetes_client.exists_pod('nginx')).to(be_false)
|
||||
|
||||
with it('finds node running pod'):
|
||||
self._create_nginx_pod()
|
||||
|
||||
node = self.kubernetes_client.find_node_running_pod('nginx')
|
||||
|
||||
expect(node).to(start_with('gke-sysdig-work-default-pool'))
|
||||
|
||||
with it('taints node'):
|
||||
self._create_nginx_pod()
|
||||
|
||||
node_name = self.kubernetes_client.find_node_running_pod('nginx')
|
||||
|
||||
node = self.kubernetes_client.taint_node(node_name,
|
||||
'playbooks',
|
||||
'true',
|
||||
'NoSchedule')
|
||||
|
||||
expect(node.spec.taints[0].effect).to(equal('NoSchedule'))
|
||||
expect(node.spec.taints[0].key).to(equal('playbooks'))
|
||||
expect(node.spec.taints[0].value).to(equal('true'))
|
||||
|
||||
with it('adds label to a pod'):
|
||||
self._create_nginx_pod()
|
||||
|
||||
pod = self.kubernetes_client.add_label_to_pod('nginx',
|
||||
'testing',
|
||||
'true')
|
||||
|
||||
expect(pod.metadata.labels).to(have_key('testing', 'true'))
|
||||
|
||||
with it('starts sysdig capture for'):
|
||||
self._create_nginx_pod()
|
||||
|
||||
job = self.kubernetes_client.start_sysdig_capture_for('nginx',
|
||||
int(time.time()),
|
||||
10,
|
||||
'any s3 bucket',
|
||||
'any aws key id',
|
||||
'any aws secret key')
|
||||
|
||||
expect(job).not_to(be_none)
|
||||
|
||||
def _create_nginx_pod(self):
|
||||
current_directory = os.path.dirname(os.path.realpath(__file__))
|
||||
pod_manifesto = os.path.join(current_directory,
|
||||
'..',
|
||||
'support',
|
||||
'deployment.yaml')
|
||||
|
||||
subprocess.call(['kubectl', 'create', '-f', pod_manifesto])
|
||||
@@ -0,0 +1,45 @@
|
||||
from mamba import description, it, before, context
|
||||
from expects import expect, be_none, raise_error
|
||||
|
||||
import os
|
||||
|
||||
from playbooks import infrastructure
|
||||
|
||||
|
||||
with description(infrastructure.PhantomClient) as self:
|
||||
with before.each:
|
||||
self.phantom_client = infrastructure.PhantomClient(
|
||||
os.environ['PHANTOM_USER'],
|
||||
os.environ['PHANTOM_PASSWORD'],
|
||||
os.environ['PHANTOM_BASE_URL'],
|
||||
verify_ssl=False
|
||||
)
|
||||
|
||||
with it('creates a container in Phantom Server'):
|
||||
container = {
|
||||
'name': 'My Container',
|
||||
'description': 'Useful description of this container.',
|
||||
'label': 'events',
|
||||
'run_automation': False,
|
||||
'severity': 'high',
|
||||
'status': 'new',
|
||||
'start_time': '2015-03-21T19:28:13.759Z',
|
||||
}
|
||||
|
||||
container = self.phantom_client.create_container(container)
|
||||
|
||||
expect(container['id']).not_to(be_none)
|
||||
|
||||
with context('when an error happens'):
|
||||
with it('raises an error'):
|
||||
container = {
|
||||
'description': 'Useful description of this container.',
|
||||
'label': 'events',
|
||||
'run_automation': False,
|
||||
'severity': 'high',
|
||||
'status': 'new',
|
||||
'start_time': '2015-03-21T19:28:13.759Z',
|
||||
}
|
||||
|
||||
expect(lambda: self.phantom_client.create_container(container))\
|
||||
.to(raise_error(RuntimeError))
|
||||
@@ -0,0 +1,16 @@
|
||||
from mamba import description, it
|
||||
|
||||
import os
|
||||
|
||||
from playbooks import infrastructure
|
||||
|
||||
|
||||
with description(infrastructure.SlackClient) as self:
|
||||
with it('posts a message to #kubeless-demo channel'):
|
||||
slack_client = infrastructure.SlackClient(os.environ['SLACK_WEBHOOK_URL'])
|
||||
|
||||
message = {
|
||||
'text': 'Hello from Python! :metal:'
|
||||
}
|
||||
|
||||
slack_client.post_message(message)
|
||||
@@ -0,0 +1,62 @@
|
||||
from mamba import description, it, before, context
|
||||
from expects import expect, have_key, have_keys, contain
|
||||
|
||||
from doublex import Spy
|
||||
from doublex_expects import have_been_called_with
|
||||
|
||||
from playbooks import infrastructure
|
||||
import playbooks
|
||||
|
||||
|
||||
|
||||
with description(playbooks.AddMessageToSlack) as self:
|
||||
with before.each:
|
||||
self.slack_client = Spy(infrastructure.SlackClient)
|
||||
self.playbook = playbooks.AddMessageToSlack(self.slack_client)
|
||||
|
||||
with context('when publishing a message to slack'):
|
||||
with before.each:
|
||||
self.alert = {
|
||||
"output": "10:22:15.576767292: Notice Unexpected setuid call by non-sudo, non-root program (user=bin cur_uid=2 parent=event_generator command=event_generator uid=root) k8s.pod=falco-event-generator-6fd89678f9-cdkvz container=1c76f49f40b4",
|
||||
"output_fields": {
|
||||
"container.id": "1c76f49f40b4",
|
||||
"evt.arg.uid": "root",
|
||||
"evt.time": 1527157335576767292,
|
||||
"k8s.pod.name": "falco-event-generator-6fd89678f9-cdkvz",
|
||||
"proc.cmdline": "event_generator ",
|
||||
"proc.pname": "event_generator",
|
||||
"user.name": "bin",
|
||||
"user.uid": 2
|
||||
},
|
||||
"priority": "Notice",
|
||||
"rule": "Non sudo setuid",
|
||||
"time": "2018-05-24T10:22:15.576767292Z"
|
||||
}
|
||||
|
||||
self.message = self.playbook.run(self.alert)
|
||||
|
||||
with it('publishes message to slack'):
|
||||
expect(self.slack_client.post_message).to(have_been_called_with(self.message))
|
||||
|
||||
with it('includes falco output'):
|
||||
falco_output = 'Unexpected setuid call by non-sudo, non-root program (user=bin cur_uid=2 parent=event_generator command=event_generator uid=root) k8s.pod=falco-event-generator-6fd89678f9-cdkvz container=1c76f49f40b4'
|
||||
|
||||
expect(self.message).to(have_key('text', falco_output))
|
||||
|
||||
with it('includes color based on priority'):
|
||||
expect(self.message['attachments'][0]).to(have_key('color'))
|
||||
|
||||
with it('includes priority'):
|
||||
expect(self.message['attachments'][0]['fields']).to(contain(have_keys(title='Priority', value='Notice')))
|
||||
|
||||
with it('includes rule name'):
|
||||
expect(self.message['attachments'][0]['fields']).to(contain(have_keys(title='Rule', value='Non sudo setuid')))
|
||||
|
||||
with it('includes time when alert happened'):
|
||||
expect(self.message['attachments'][0]['fields']).to(contain(have_keys(title='Time', value='Thu, 24 May 2018 10:22:15 GMT')))
|
||||
|
||||
with it('includes kubernetes pod name'):
|
||||
expect(self.message['attachments'][0]['fields']).to(contain(have_keys(title='Kubernetes Pod Name', value='falco-event-generator-6fd89678f9-cdkvz')))
|
||||
|
||||
with it('includes container id'):
|
||||
expect(self.message['attachments'][0]['fields']).to(contain(have_keys(title='Container Id', value='1c76f49f40b4')))
|
||||
@@ -0,0 +1,63 @@
|
||||
from mamba import description, it, before, context
|
||||
from expects import expect, have_key
|
||||
|
||||
from doublex import Spy
|
||||
from doublex_expects import have_been_called_with
|
||||
|
||||
from playbooks import infrastructure
|
||||
import playbooks
|
||||
|
||||
|
||||
with description(playbooks.CreateContainerInPhantom) as self:
|
||||
with before.each:
|
||||
self.phantom_client = Spy(infrastructure.PhantomClient)
|
||||
self.playbook = playbooks.CreateContainerInPhantom(self.phantom_client)
|
||||
|
||||
self.alert = {
|
||||
"output": "10:22:15.576767292: Notice Unexpected setuid call by non-sudo, non-root program (user=bin cur_uid=2 parent=event_generator command=event_generator uid=root) k8s.pod=falco-event-generator-6fd89678f9-cdkvz container=1c76f49f40b4",
|
||||
"output_fields": {
|
||||
"container.id": "1c76f49f40b4",
|
||||
"evt.arg.uid": "root",
|
||||
"evt.time": 1527157335576767292,
|
||||
"k8s.pod.name": "falco-event-generator-6fd89678f9-cdkvz",
|
||||
"proc.cmdline": "event_generator ",
|
||||
"proc.pname": "event_generator",
|
||||
"user.name": "bin",
|
||||
"user.uid": 2
|
||||
},
|
||||
"priority": "Notice",
|
||||
"rule": "Non sudo setuid",
|
||||
"time": "2018-05-24T10:22:15.576767292Z"
|
||||
}
|
||||
|
||||
self.container = self.playbook.run(self.alert)
|
||||
|
||||
with it('creates the container in phantom'):
|
||||
expect(self.phantom_client.create_container).to(have_been_called_with(self.container))
|
||||
|
||||
with it('includes falco output'):
|
||||
falco_output = 'Unexpected setuid call by non-sudo, non-root program (user=bin cur_uid=2 parent=event_generator command=event_generator uid=root) k8s.pod=falco-event-generator-6fd89678f9-cdkvz container=1c76f49f40b4'
|
||||
|
||||
expect(self.container).to(have_key('description', falco_output))
|
||||
|
||||
with it('includes severity'):
|
||||
expect(self.container).to(have_key('severity', 'low'))
|
||||
|
||||
with it('includes rule name'):
|
||||
expect(self.container).to(have_key('name', 'Non sudo setuid'))
|
||||
|
||||
with it('includes time when alert happened'):
|
||||
expect(self.container).to(have_key('start_time', '2018-05-24T10:22:15.576767Z'))
|
||||
|
||||
with it('includes label'):
|
||||
expect(self.container).to(have_key('label', 'events'))
|
||||
|
||||
with it('includes status'):
|
||||
expect(self.container).to(have_key('status', 'new'))
|
||||
|
||||
with context('when building additional data'):
|
||||
with it('includes kubernetes pod name'):
|
||||
expect(self.container['data']).to(have_key('k8s.pod.name', 'falco-event-generator-6fd89678f9-cdkvz'))
|
||||
|
||||
with it('includes container id'):
|
||||
expect(self.container['data']).to(have_key('container.id', '1c76f49f40b4'))
|
||||
@@ -0,0 +1,70 @@
|
||||
from mamba import description, it, before, context
|
||||
from expects import expect, have_key, have_keys, contain
|
||||
|
||||
from doublex import Spy
|
||||
from doublex_expects import have_been_called_with
|
||||
|
||||
from playbooks import infrastructure
|
||||
import playbooks
|
||||
|
||||
import os
|
||||
|
||||
|
||||
with description(playbooks.CreateIncidentInDemisto) as self:
|
||||
with before.each:
|
||||
self.demisto_client = Spy(infrastructure.DemistoClient)
|
||||
self.playbook = playbooks.CreateIncidentInDemisto(self.demisto_client)
|
||||
|
||||
with context('when publishing a message to slack'):
|
||||
with before.each:
|
||||
self.alert = {
|
||||
"output": "10:22:15.576767292: Notice Unexpected setuid call by non-sudo, non-root program (user=bin cur_uid=2 parent=event_generator command=event_generator uid=root) k8s.pod=falco-event-generator-6fd89678f9-cdkvz container=1c76f49f40b4",
|
||||
"output_fields": {
|
||||
"container.id": "1c76f49f40b4",
|
||||
"evt.arg.uid": "root",
|
||||
"evt.time": 1527157335576767292,
|
||||
"k8s.pod.name": "falco-event-generator-6fd89678f9-cdkvz",
|
||||
"proc.cmdline": "event_generator ",
|
||||
"proc.pname": "event_generator",
|
||||
"user.name": "bin",
|
||||
"user.uid": 2
|
||||
},
|
||||
"priority": "Notice",
|
||||
"rule": "Non sudo setuid",
|
||||
"time": "2018-05-24T10:22:15.576767292Z"
|
||||
}
|
||||
|
||||
self.incident = self.playbook.run(self.alert)
|
||||
|
||||
with it('creates incident in demisto'):
|
||||
expect(self.demisto_client.create_incident).to(have_been_called_with(self.incident))
|
||||
|
||||
with it('sets incident type as Policy Violation'):
|
||||
expect(self.incident).to(have_key('type', 'Policy Violation'))
|
||||
|
||||
with it('includes rule name'):
|
||||
expect(self.incident).to(have_key('name', 'Non sudo setuid'))
|
||||
|
||||
with it('includes falco output'):
|
||||
falco_output = 'Unexpected setuid call by non-sudo, non-root program (user=bin cur_uid=2 parent=event_generator command=event_generator uid=root) k8s.pod=falco-event-generator-6fd89678f9-cdkvz container=1c76f49f40b4'
|
||||
|
||||
expect(self.incident).to(have_key('details', falco_output))
|
||||
|
||||
with it('includes severity'):
|
||||
expect(self.incident).to(have_key('severity', 1))
|
||||
|
||||
with it('includes time when alert happened'):
|
||||
expect(self.incident).to(have_key('occurred', "2018-05-24T10:22:15.576767292Z"))
|
||||
|
||||
with context('when adding labels'):
|
||||
with it('includes Sysdig as Brand'):
|
||||
expect(self.incident['labels']).to(contain(have_keys(type='Brand', value='Sysdig')))
|
||||
|
||||
with it('includes Falco as Application'):
|
||||
expect(self.incident['labels']).to(contain(have_keys(type='Application', value='Falco')))
|
||||
|
||||
with it('includes container.id'):
|
||||
expect(self.incident['labels']).to(contain(have_keys(type='container.id', value='1c76f49f40b4')))
|
||||
|
||||
with it('includes k8s.pod.name'):
|
||||
expect(self.incident['labels']).to(contain(have_keys(type='k8s.pod.name', value='falco-event-generator-6fd89678f9-cdkvz')))
|
||||
@@ -0,0 +1,22 @@
|
||||
from mamba import description, it, before
|
||||
from expects import expect
|
||||
|
||||
from doublex import Spy
|
||||
from doublex_expects import have_been_called_with
|
||||
|
||||
from playbooks import infrastructure
|
||||
import playbooks
|
||||
|
||||
|
||||
with description(playbooks.DeletePod) as self:
|
||||
with before.each:
|
||||
self.k8s_client = Spy(infrastructure.KubernetesClient)
|
||||
self.playbook = playbooks.DeletePod(self.k8s_client)
|
||||
|
||||
with it('deletes a pod'):
|
||||
pod_name = 'a pod name'
|
||||
alert = {'output_fields': {'k8s.pod.name': pod_name}}
|
||||
|
||||
self.playbook.run(alert)
|
||||
|
||||
expect(self.k8s_client.delete_pod).to(have_been_called_with(pod_name))
|
||||
@@ -0,0 +1,22 @@
|
||||
from mamba import description, it, before
|
||||
from expects import expect
|
||||
|
||||
from doublex import Spy
|
||||
from doublex_expects import have_been_called
|
||||
|
||||
from playbooks import infrastructure
|
||||
import playbooks
|
||||
|
||||
|
||||
with description(playbooks.NetworkIsolatePod) as self:
|
||||
with before.each:
|
||||
self.k8s_client = Spy(infrastructure.KubernetesClient)
|
||||
self.playbook = playbooks.NetworkIsolatePod(self.k8s_client)
|
||||
|
||||
with it('adds isolation label to pod'):
|
||||
pod_name = 'any pod name'
|
||||
alert = {'output_fields': {'k8s.pod.name': pod_name}}
|
||||
|
||||
self.playbook.run(alert)
|
||||
|
||||
expect(self.k8s_client.add_label_to_pod).to(have_been_called)
|
||||
@@ -0,0 +1,40 @@
|
||||
from mamba import description, it, before
|
||||
from expects import expect
|
||||
|
||||
from doublex import Spy
|
||||
from doublex_expects import have_been_called_with
|
||||
|
||||
from playbooks import infrastructure
|
||||
import playbooks
|
||||
|
||||
|
||||
with description(playbooks.StartSysdigCaptureForContainer) as self:
|
||||
with before.each:
|
||||
self.k8s_client = Spy(infrastructure.KubernetesClient)
|
||||
self.duration_in_seconds = 'any duration in seconds'
|
||||
self.s3_bucket = 'any s3 bucket url'
|
||||
self.aws_access_key_id = 'any aws access key id'
|
||||
self.aws_secret_access_key = 'any aws secret access key'
|
||||
self.playbook = playbooks.StartSysdigCaptureForContainer(self.k8s_client,
|
||||
self.duration_in_seconds,
|
||||
self.s3_bucket,
|
||||
self.aws_access_key_id,
|
||||
self.aws_secret_access_key)
|
||||
|
||||
with it('add starts capturing job in same node than Pod alerted'):
|
||||
pod_name = 'any pod name'
|
||||
event_time = 'any event time'
|
||||
alert = {'output_fields': {
|
||||
'k8s.pod.name': pod_name,
|
||||
'evt.time': event_time,
|
||||
}}
|
||||
|
||||
self.playbook.run(alert)
|
||||
|
||||
expect(self.k8s_client.start_sysdig_capture_for)\
|
||||
.to(have_been_called_with(pod_name,
|
||||
event_time,
|
||||
self.duration_in_seconds,
|
||||
self.s3_bucket,
|
||||
self.aws_access_key_id,
|
||||
self.aws_secret_access_key))
|
||||
@@ -0,0 +1,34 @@
|
||||
from mamba import description, it, before
|
||||
from expects import expect
|
||||
|
||||
from doublex import Spy, when
|
||||
from doublex_expects import have_been_called_with
|
||||
|
||||
from playbooks import infrastructure
|
||||
import playbooks
|
||||
|
||||
|
||||
with description(playbooks.TaintNode) as self:
|
||||
with before.each:
|
||||
self.k8s_client = Spy(infrastructure.KubernetesClient)
|
||||
self.key = 'falco/alert'
|
||||
self.value = 'true'
|
||||
self.effect = 'NoSchedule'
|
||||
self.playbook = playbooks.TaintNode(self.k8s_client,
|
||||
self.key,
|
||||
self.value,
|
||||
self.effect)
|
||||
|
||||
with it('taints the node'):
|
||||
pod_name = 'any pod name'
|
||||
alert = {'output_fields': {'k8s.pod.name': pod_name}}
|
||||
|
||||
node = 'any node'
|
||||
when(self.k8s_client).find_node_running_pod(pod_name).returns(node)
|
||||
|
||||
self.playbook.run(alert)
|
||||
|
||||
expect(self.k8s_client.taint_node).to(have_been_called_with(node,
|
||||
self.key,
|
||||
self.value,
|
||||
self.effect))
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user