diff --git a/.gitignore b/.gitignore
index f392843b..7a98caeb 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,4 +1,11 @@
/build*
+*~
+test/falco_test.pyc
+test/falco_tests.yaml
+test/traces-negative
+test/traces-positive
+test/traces-info
+test/job-results
userspace/falco/lua/re.lua
userspace/falco/lua/lpeg.so
diff --git a/.travis.yml b/.travis.yml
new file mode 100644
index 00000000..fe37c22f
--- /dev/null
+++ b/.travis.yml
@@ -0,0 +1,46 @@
+language: c
+env:
+ - BUILD_TYPE=Debug
+ - BUILD_TYPE=Release
+before_install:
+ - sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test
+ - sudo apt-get update
+install:
+ - sudo apt-get --force-yes install g++-4.8
+ - sudo apt-get install rpm linux-headers-$(uname -r)
+ - git clone https://github.com/draios/sysdig.git ../sysdig
+ - sudo apt-get install -y python-pip libvirt-dev jq
+ - cd ..
+ - curl -Lo avocado-36.0-tar.gz https://github.com/avocado-framework/avocado/archive/36.0lts.tar.gz
+ - tar -zxvf avocado-36.0-tar.gz
+ - cd avocado-36.0lts
+ - sudo pip install -r requirements-travis.txt
+ - sudo python setup.py install
+ - cd ../falco
+before_script:
+ - export KERNELDIR=/lib/modules/$(ls /lib/modules | sort | head -1)/build
+script:
+ - set -e
+ - export CC="gcc-4.8"
+ - export CXX="g++-4.8"
+ - wget https://s3.amazonaws.com/download.draios.com/dependencies/cmake-3.3.2.tar.gz
+ - tar -xzf cmake-3.3.2.tar.gz
+ - cd cmake-3.3.2
+ - ./bootstrap --prefix=/usr
+ - make
+ - sudo make install
+ - cd ..
+ - mkdir build
+ - cd build
+ - cmake .. -DCMAKE_BUILD_TYPE=$BUILD_TYPE
+ - make VERBOSE=1
+ - make package
+ - cd ..
+ - sudo test/run_regression_tests.sh
+notifications:
+ webhooks:
+ urls:
+# - https://webhooks.gitter.im/e/fdbc2356fb0ea2f15033
+ on_success: change
+ on_failure: always
+ on_start: never
\ No newline at end of file
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 3ecd6b72..af721f54 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -2,6 +2,27 @@
This file documents all notable changes to Falco. The release numbering uses [semantic versioning](http://semver.org).
+## v0.2.0
+
+Released 2016-06-09
+
+For full handling of setsid system calls and session id tracking using `proc.sname`, falco requires a sysdig version >= 0.10.0.
+
+### Major Changes
+
+- Add TravisCI regression tests. Testing involves a variety of positive, negative, and informational trace files with both plain and json output. [[#76](https://github.com/draios/falco/pull/76)] [[#83](https://github.com/draios/falco/pull/83)]
+- Fairly big rework of ruleset to improve coverage, reduce false positives, and handle installation environments effectively [[#83](https://github.com/draios/falco/pull/83)] [[#87](https://github.com/draios/falco/pull/87)]
+- Not directly a code change, but mentioning it here--the Wiki has now been populated with an initial set of articles, migrating content from the README and adding detail when necessary. [[#90](https://github.com/draios/falco/pull/90)]
+
+### Minor Changes
+
+- Improve JSON output to include the rule name, full output string, time, and severity [[#89](https://github.com/draios/falco/pull/89)]
+
+### Bug Fixes
+
+- Improve CMake quote handling [[#84](https://github.com/draios/falco/pull/84)]
+- Remove unnecessary NULL check of a delete [[#85](https://github.com/draios/falco/pull/85)]
+
## v0.1.0
Released 2016-05-17
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 539ce783..a8ade60c 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -42,7 +42,7 @@ set(PROBE_DEVICE_NAME "sysdig")
set(CMD_MAKE make)
-set(SYSDIG_DIR ${PROJECT_SOURCE_DIR}/../sysdig)
+set(SYSDIG_DIR "${PROJECT_SOURCE_DIR}/../sysdig")
include(ExternalProject)
@@ -151,7 +151,7 @@ ExternalProject_Add(lpeg
DEPENDS luajit
URL "http://s3.amazonaws.com/download.draios.com/dependencies/lpeg-1.0.0.tar.gz"
URL_MD5 "0aec64ccd13996202ad0c099e2877ece"
- BUILD_COMMAND LUA_INCLUDE=${LUAJIT_INCLUDE} ${PROJECT_SOURCE_DIR}/scripts/build-lpeg.sh
+ BUILD_COMMAND LUA_INCLUDE=${LUAJIT_INCLUDE} "${PROJECT_SOURCE_DIR}/scripts/build-lpeg.sh"
BUILD_IN_SOURCE 1
CONFIGURE_COMMAND ""
INSTALL_COMMAND "")
@@ -180,9 +180,9 @@ ExternalProject_Add(lyaml
install(FILES falco.yaml
DESTINATION "${DIR_ETC}")
-add_subdirectory(${SYSDIG_DIR}/driver ${PROJECT_BINARY_DIR}/driver)
-add_subdirectory(${SYSDIG_DIR}/userspace/libscap ${PROJECT_BINARY_DIR}/userspace/libscap)
-add_subdirectory(${SYSDIG_DIR}/userspace/libsinsp ${PROJECT_BINARY_DIR}/userspace/libsinsp)
+add_subdirectory("${SYSDIG_DIR}/driver" "${PROJECT_BINARY_DIR}/driver")
+add_subdirectory("${SYSDIG_DIR}/userspace/libscap" "${PROJECT_BINARY_DIR}/userspace/libscap")
+add_subdirectory("${SYSDIG_DIR}/userspace/libsinsp" "${PROJECT_BINARY_DIR}/userspace/libsinsp")
add_subdirectory(rules)
add_subdirectory(scripts)
diff --git a/README.md b/README.md
index 807e6afb..a1e871b9 100644
--- a/README.md
+++ b/README.md
@@ -1,294 +1,35 @@
# Sysdig Falco
-### *Host Activity Monitoring using Sysdig Event Filtering*
-**Table of Contents**
+####Latest release
-- [Overview](#overview)
-- [Rules](#rules)
-- [Configuration](#configuration)
-- [Installation](#installation)
-- [Running Falco](#running-falco)
+**v0.2.0**
+Read the [change log](https://github.com/draios/falco/blob/dev/CHANGELOG.md)
+Dev Branch: [](https://travis-ci.org/draios/falco)
+Master Branch: [](https://travis-ci.org/draios/falco)
## Overview
-Sysdig Falco is a behavioral activity monitor designed to secure your applications. Powered by Sysdig’s universal system level visibility, write simple and powerful rules, and then output warnings in the format you need. Continuously monitor and detect container, application, host, and network activity... all in one place, from one source of data, with one set of rules.
-
+Sysdig Falco is a behavioral activity monitor designed to detect anomalous activity in your applications. Powered by sysdig’s system call capture infrastructure, falco lets you continuously monitor and detect container, application, host, and network activity... all in one place, from one source of data, with one set of rules.
#### What kind of behaviors can Falco detect?
-Falco can detect and alert on any behavior that involves making Linux system calls. Thanks to Sysdig's core decoding and state tracking functionality, Falco alerts can be triggered by the use of specific system calls, their arguments, and by properties of the calling process. For example, you can easily detect things like:
+Falco can detect and alert on any behavior that involves making Linux system calls. Thanks to Sysdig's core decoding and state tracking functionality, falco alerts can be triggered by the use of specific system calls, their arguments, and by properties of the calling process. For example, you can easily detect things like:
+
- A shell is run inside a container
- A server process spawns a child process of an unexpected type
-- Unexpected read of a sensitive file (like `/etc/passwd`)
+- Unexpected read of a sensitive file (like `/etc/shadow`)
- A non-device file is written to `/dev`
- A standard system binary (like `ls`) makes an outbound network connection
-#### How you use it
-
-Falco is deployed as a long-running daemon. You can install it as a debian/rpm
-package on a regular host or container host, or you can deploy it as a
-container.
-
-Falco is configured via a rules file defining the behaviors and events to
-watch for, and a general configuration file. Rules are expressed in a
-high-level, human-readable language. We've provided a sample rule file
-`./rules/falco_rules.yaml` as a starting point - you can (and will likely
-want!) to adapt it to your environment.
-
-When developing rules, one helpful feature is Falco's ability to read trace
-files saved by sysdig. This allows you to "record" the offending behavior
-once, and replay it with Falco as many times as needed while tweaking your
-rules.
-
-Once deployed, Falco uses the Sysdig kernel module and userspace libraries to
-watch for any events matching one of the conditions defined in the rule
-file. If a matching event occurs, a notification is written to the the
-configured output(s).
-
-
-## Rules
-
-_Call for contributions: If you come up with additional rules which you'd like to see in the core repository - PR welcome!_
-
-A Falco rules file is comprised of two kinds of elements: rules and macro definitions. Macros are simply definitions that can be re-used inside rules and other macros, providing a way to factor out and name common patterns.
-
-#### Conditions
-
-The key part of a rule is the _condition_ field. A condition is simply a boolean predicate on sysdig events.
-Conditions are expressed using the Sysdig [filter syntax](http://www.sysdig.org/wiki/sysdig-user-guide/#filtering). Any Sysdig filter is a valid Falco condition (with the caveat of certain excluded system calls, discussed below). In addition, Falco expressions can contain _macro_ terms, which are not present in Sysdig syntax.
-
-Here's an example of a condition that alerts whenever a bash shell is run inside a container:
-
-`container.id != host and proc.name = bash`
-
-The first clause checks that the event happened in a container (sysdig events have a `container` field that is equal to "host" if the event happened on a regular host). The second clause checks that the process name is `bash`. Note that this condition does not even include a clause with system call! It only uses event metadata. As such, if a bash shell does start up in a container, Falco will output events for every syscall that is done by that shell.
-
-_Tip: If you're new to sysdig and unsure what fields are available, run `sysdig -l` to see the list of supported fields._
-
-#### Rules
-
-Along with a condition, each rule includes the following fields:
-
-* _rule_: a short unique name for the rule
-* _desc_: a longer description of what the rule detects
-* _output_ and _priority_: The output format specifies the message that should be output if a matching event occurs, and follows the Sysdig [output format syntax](http://www.sysdig.org/wiki/sysdig-user-guide/#output-formatting). The priority is a case-insensitive representation of severity and should be one of "emergency", "alert", "critical", "error", "warning", "notice", "informational", or "debug".
-
-A complete rule using the above condition might be:
-
-```yaml
-- condition: container.id != host and proc.name = bash
- output: "shell in a container (%user.name %container.id %proc.name %evt.dir %evt.type %evt.args %fd.name)"
- priority: WARNING
-```
-
-#### Macros
-As noted above, macros provide a way to define common sub-portions of rules in a reusable way. As a very simple example, if we had many rules for events happening in containers, we might to define a `in_container` macro:
-
-```yaml
-- macro: in_container
- condition: container.id != host
-```
-
-With this macro defined, we can then rewrite the above rule's condition as `in_container and proc.name = bash`.
-
-For many more examples of rules and macros, please take a look at the accompanying [rules file](rules/falco_rules.yaml).
-
-
-#### Ignored system calls
-
-For performance reasons, some system calls are currently discarded before Falco processing. The current list is:
-`clock_getres,clock_gettime,clock_nanosleep,clock_settime,close,epoll_create,epoll_create1,epoll_ctl,epoll_pwait,epoll_wait,eventfd,fcntl,fcntl64,fstat,fstat64,fstatat64,fstatfs,fstatfs64,futex,getitimer,gettimeofday,ioprio_get,ioprio_set,llseek,lseek,lstat,lstat64,mmap,mmap2,munmap,nanosleep,poll,ppoll,pread64,preadv,procinfo,pselect6,pwrite64,pwritev,read,readv,recv,recvfrom,recvmmsg,recvmsg,sched_yield,select,send,sendfile,sendfile64,sendmmsg,sendmsg,sendto,setitimer,settimeofday,shutdown,splice,stat,stat64,statfs,statfs64,switch,tee,timer_create,timer_delete,timerfd_create,timerfd_gettime,timerfd_settime,timer_getoverrun,timer_gettime,timer_settime,wait4,write,writev`
-
-
-## Configuration
-
-General configuration is done via a separate yaml file. The
-[config file](falco.yaml) in this repo has comments describing the various
-configuration options.
-
-
-## Installation
-#### Scripted install
-
-To install Falco automatically in one step, simply run the following command as root or with sudo:
-
-`curl -s https://s3.amazonaws.com/download.draios.com/stable/install-falco | sudo bash`
-
-#### Package install
-
-##### RHEL
-
-- Trust the Draios GPG key and configure the yum repository
-```
-rpm --import https://s3.amazonaws.com/download.draios.com/DRAIOS-GPG-KEY.public
-curl -s -o /etc/yum.repos.d/draios.repo http://download.draios.com/stable/rpm/draios.repo
-```
-- Install the EPEL repository
-
-Note: The following command is required only if DKMS is not available in the distribution. You can verify if DKMS is available with yum list dkms
-
-`rpm -i http://mirror.us.leaseweb.net/epel/6/i386/epel-release-6-8.noarch.rpm`
-
-- Install kernel headers
-
-Warning: The following command might not work with any kernel. Make sure to customize the name of the package properly
-
-`yum -y install kernel-devel-$(uname -r)`
-
-- Install Falco
-
-`yum -y install falco`
-
-
-To uninstall, just do `yum erase falco`.
-
-##### Debian
-
-- Trust the Draios GPG key, configure the apt repository, and update the package list
-
-```
-curl -s https://s3.amazonaws.com/download.draios.com/DRAIOS-GPG-KEY.public | apt-key add -
-curl -s -o /etc/apt/sources.list.d/draios.list http://download.draios.com/stable/deb/draios.list
-apt-get update
-```
-
-- Install kernel headers
-
-Warning: The following command might not work with any kernel. Make sure to customize the name of the package properly
-
-`apt-get -y install linux-headers-$(uname -r)`
-
-- Install Falco
-
-`apt-get -y install falco`
-
-To uninstall, just do `apt-get remove falco`.
-
-
-##### Container install (general)
-
-If you have full control of your host operating system, then installing Falco using the normal installation method is the recommended best practice. This method allows full visibility into all containers on the host OS. No changes to the standard automatic/manual installation procedures are required.
-
-However, Falco can also run inside a Docker container. To guarantee a smooth deployment, the kernel headers must be installed in the host operating system, before running Falco.
-
-This can usually be done on Debian-like distributions with:
-`apt-get -y install linux-headers-$(uname -r)`
-
-Or, on RHEL-like distributions:
-`yum -y install kernel-devel-$(uname -r)`
-
-Falco can then be run with:
-
-```
-docker pull sysdig/falco
-docker run -i -t --name falco --privileged -v /var/run/docker.sock:/host/var/run/docker.sock -v /dev:/host/dev -v /proc:/host/proc:ro -v /boot:/host/boot:ro -v /lib/modules:/host/lib/modules:ro -v /usr:/host/usr:ro sysdig/falco
-```
-
-##### Container install (CoreOS)
-
-The recommended way to run Falco on CoreOS is inside of its own Docker container using the install commands in the paragraph above. This method allows full visibility into all containers on the host OS.
-
-This method is automatically updated, includes some nice features such as automatic setup and bash completion, and is a generic approach that can be used on other distributions outside CoreOS as well.
-
-However, some users may prefer to run Falco in the CoreOS toolbox. While not the recommended method, this can be achieved by installing Falco inside the toolbox using the normal installation method, and then manually running the sysdig-probe-loader script:
-
-```
-toolbox --bind=/dev --bind=/var/run/docker.sock
-curl -s https://s3.amazonaws.com/download.draios.com/stable/install-falco | bash
-sysdig-probe-loader
-```
-
-
-
-## Running Falco
-
-Falco is intended to be run as a service. But for experimentation and designing/testing rulesets, you will likely want to run it manually from the command-line.
-
-#### Running Falco as a service (after installing package)
-
-`service falco start`
-
-#### Running Falco in a container
-
-`docker run -i -t --name falco --privileged -v /var/run/docker.sock:/host/var/run/docker.sock -v /dev:/host/dev -v /proc:/host/proc:ro -v /boot:/host/boot:ro -v /lib/modules:/host/lib/modules:ro -v /usr:/host/usr:ro sysdig/falco`
-
-#### Running Falco manually
-
-Do `falco --help` to see the command-line options available when running manually.
-
-
-## Building and running Falco locally from source
-Building Falco requires having `cmake` and `g++` installed.
-
-
-#### Building Falco
-Clone this repo in a directory that also contains the sysdig source repo. The result should be something like:
-
-```
-22:50 vagrant@vagrant-ubuntu-trusty-64:/sysdig
-$ pwd
-/sysdig
-22:50 vagrant@vagrant-ubuntu-trusty-64:/sysdig
-$ ls -l
-total 20
-drwxr-xr-x 1 vagrant vagrant 238 Feb 21 21:44 falco
-drwxr-xr-x 1 vagrant vagrant 646 Feb 21 17:41 sysdig
-```
-
-create a build dir, then setup cmake and run make from that dir:
-
-```
-$ mkdir build
-$ cd build
-$ cmake ..
-$ make
-```
-
-as a result, you should have a falco executable in `build/userspace/falco/falco`.
-
-#### Load latest sysdig kernel module
-
-If you have a binary version of sysdig installed, an older sysdig kernel module may already be loaded. To ensure you are using the latest version, you should unload any existing sysdig kernel module and load the locally built version.
-
-Unload any existing kernel module via:
-
-`$ rmmod sysdig_probe`
-
-To load the locally built version, assuming you are in the `build` dir, use:
-
-`$ insmod driver/sysdig-probe.ko`
-
-#### Running Falco
-
-Assuming you are in the `build` dir, you can run Falco as:
-
-`$ sudo ./userspace/falco/falco -c ../falco.yaml -r ../rules/falco_rules.yaml`
-
-Or instead you can try using some of the simpler rules files in `rules`. Or to get started, try creating a file with this:
-
-Create a file with some [Falco rules](Rule-syntax-and-design). For example:
-```
-- macro: open_write
- condition: >
- (evt.type=open or evt.type=openat) and
- fd.typechar='f' and
- (evt.arg.flags contains O_WRONLY or
- evt.arg.flags contains O_RDWR or
- evt.arg.flags contains O_CREAT or
- evt.arg.flags contains O_TRUNC)
-
-- macro: bin_dir
- condition: fd.directory in (/bin, /sbin, /usr/bin, /usr/sbin)
-
-- rule: write_binary_dir
- desc: an attempt to write to any file below a set of binary directories
- condition: evt.dir = > and open_write and bin_dir
- output: "File below a known binary directory opened for writing (user=%user.name command=%proc.cmdline file=%fd.name)"
- priority: WARNING
-
-```
-
-And you will see an output event for any interactive process that touches a file with "sysdig" or ".txt" in its name!
+This is the initial falco release. Note that much of falco's code comes from
+[sysdig](https://github.com/draios/sysdig), so overall stability is very good
+for an early release. On the other hand performance is still a work in
+progress. On busy hosts and/or with large rule sets, you may see the current
+version of falco using high CPU. Expect big improvements in coming releases.
+
+Documentation
+---
+[Visit the wiki] (https://github.com/draios/falco/wiki) for full documentation on falco.
Join the Community
---
@@ -309,22 +50,26 @@ Contributor License Agreements
We’ve modeled our CLA off of industry standards, such as [the CLA used by Kubernetes](https://github.com/kubernetes/kubernetes/blob/master/CONTRIBUTING.md). Note that this agreement is not a transfer of copyright ownership, this simply is a license agreement for contributions, intended to clarify the intellectual property license granted with contributions from any person or entity. It is for your protection as a contributor as well as the protection of falco; it does not change your rights to use your own contributions for any other purpose.
For some background on why contributor license agreements are necessary, you can read FAQs from many other open source projects:
- - [Django’s excellent CLA FAQ](https://www.djangoproject.com/foundation/cla/faq/)
- - [A well-written chapter from Karl Fogel’s Producing Open Source Software on CLAs](http://producingoss.com/en/copyright-assignment.html)
- - [The Wikipedia article on CLAs](http://en.wikipedia.org/wiki/Contributor_license_agreement)
- As always, we are grateful for your past and present contributions to falco.
+- [Django’s excellent CLA FAQ](https://www.djangoproject.com/foundation/cla/faq/)
+- [A well-written chapter from Karl Fogel’s Producing Open Source Software on CLAs](http://producingoss.com/en/copyright-assignment.html)
+- [The Wikipedia article on CLAs](http://en.wikipedia.org/wiki/Contributor_license_agreement)
- ###What do I need to do in order to contribute code?
- **Individual contributions**: Individuals who wish to make contributions must review the [Individual Contributor License Agreement](./cla/falco_contributor_agreement.txt) and indicate agreement by adding the following line to every GIT commit message:
+As always, we are grateful for your past and present contributions to falco.
- falco-CLA-1.0-signed-off-by: Joe Smith
+###What do I need to do in order to contribute code?
- Use your real name; pseudonyms or anonymous contributions are not allowed.
+**Individual contributions**: Individuals who wish to make contributions must review the [Individual Contributor License Agreement](./cla/falco_contributor_agreement.txt) and indicate agreement by adding the following line to every GIT commit message:
- **Corporate contributions**: Employees of corporations, members of LLCs or LLPs, or others acting on behalf of a contributing entity, must review the [Corporate Contributor License Agreement](./cla/falco_corp_contributor_agreement.txt), must be an authorized representative of the contributing entity, and indicate agreement to it on behalf of the contributing entity by adding the following lines to every GIT commit message:
+falco-CLA-1.0-signed-off-by: Joe Smith
- falco-CLA-1.0-contributing-entity: Full Legal Name of Entity
- falco-CLA-1.0-signed-off-by: Joe Smith
+Use your real name; pseudonyms or anonymous contributions are not allowed.
- Use a real name of a natural person who is an authorized representative of the contributing entity; pseudonyms or anonymous contributions are not allowed.
+**Corporate contributions**: Employees of corporations, members of LLCs or LLPs, or others acting on behalf of a contributing entity, must review the [Corporate Contributor License Agreement](./cla/falco_corp_contributor_agreement.txt), must be an authorized representative of the contributing entity, and indicate agreement to it on behalf of the contributing entity by adding the following lines to every GIT commit message:
+
+```
+ falco-CLA-1.0-contributing-entity: Full Legal Name of Entity
+ falco-CLA-1.0-signed-off-by: Joe Smith
+```
+
+Use a real name of a natural person who is an authorized representative of the contributing entity; pseudonyms or anonymous contributions are not allowed.
diff --git a/rules/falco_rules.yaml b/rules/falco_rules.yaml
index b0cdb0ab..9b5c6097 100644
--- a/rules/falco_rules.yaml
+++ b/rules/falco_rules.yaml
@@ -38,12 +38,16 @@
- macro: modify
condition: rename or remove
-- macro: spawn_process
- condition: syscall.type = execve and evt.dir=<
+- macro: spawned_process
+ condition: evt.type = execve and evt.dir=<
# File categories
- macro: terminal_file_fd
condition: fd.name=/dev/ptmx or fd.directory=/dev/pts
+
+# This really should be testing that the directory begins with these
+# prefixes but sysdig's filter doesn't have a "starts with" operator
+# (yet).
- macro: bin_dir
condition: fd.directory in (/bin, /sbin, /usr/bin, /usr/sbin)
@@ -52,6 +56,8 @@
- macro: bin_dir_rename
condition: evt.arg[1] contains /bin/ or evt.arg[1] contains /sbin/ or evt.arg[1] contains /usr/bin/ or evt.arg[1] contains /usr/sbin/
+# This really should be testing that the directory begins with /etc,
+# but sysdig's filter doesn't have a "starts with" operator (yet).
- macro: etc_dir
condition: fd.directory contains /etc
@@ -74,25 +80,31 @@
tac, link, chroot, vdir, chown, touch, ls, dd, uname, true, pwd, date,
chgrp, chmod, mktemp, cat, mknod, sync, ln, false, rm, mv, cp, echo,
readlink, sleep, stty, mkdir, df, dir, rmdir, touch)
-- macro: adduser_binaries
- condition: proc.name in (adduser, deluser, addgroup, delgroup)
-- macro: login_binaries
- condition: proc.name in (bin, login, su, sbin, nologin, bin, faillog, lastlog, newgrp, sg)
-# dpkg -L passwd | grep bin | xargs -L 1 basename | tr "\\n" ","
+# dpkg -L login | grep bin | xargs ls -ld | grep -v '^d' | awk '{print $9}' | xargs -L 1 basename | tr "\\n" ","
+- macro: login_binaries
+ condition: proc.name in (login, systemd-logind, su, nologin, faillog, lastlog, newgrp, sg)
+
+# dpkg -L passwd | grep bin | xargs ls -ld | grep -v '^d' | awk '{print $9}' | xargs -L 1 basename | tr "\\n" ","
- macro: passwd_binaries
condition: >
- proc.name in (sbin, shadowconfig, sbin, grpck, pwunconv, grpconv, pwck,
+ proc.name in (shadowconfig, grpck, pwunconv, grpconv, pwck,
groupmod, vipw, pwconv, useradd, newusers, cppw, chpasswd, usermod,
- groupadd, groupdel, grpunconv, chgpasswd, userdel, bin, chage, chsh,
+ groupadd, groupdel, grpunconv, chgpasswd, userdel, chage, chsh,
gpasswd, chfn, expiry, passwd, vigr, cpgr)
-# repoquery -l shadow-utils | grep bin | xargs -L 1 basename | tr "\\n" ","
+# repoquery -l shadow-utils | grep bin | xargs ls -ld | grep -v '^d' | awk '{print $9}' | xargs -L 1 basename | tr "\\n" ","
- macro: shadowutils_binaries
condition: >
- proc.name in (chage, gpasswd, lastlog, newgrp, sg, adduser, chpasswd,
- groupadd, groupdel, groupmems, groupmod, grpck, grpconv, grpunconv,
- newusers, pwck, pwconv, pwunconv, useradd, userdel, usermod, vigr, vipw)
+ proc.name in (chage, gpasswd, lastlog, newgrp, sg, adduser, deluser, chpasswd,
+ groupadd, groupdel, addgroup, delgroup, groupmems, groupmod, grpck, grpconv, grpunconv,
+ newusers, pwck, pwconv, pwunconv, useradd, userdel, usermod, vigr, vipw, unix_chkpwd)
+
+- macro: sysdigcloud_binaries
+ condition: proc.name in (setup-backend, dragent)
+
+- macro: sysdigcloud_binaries_parent
+ condition: proc.pname in (setup-backend, dragent)
- macro: docker_binaries
condition: proc.name in (docker, exe)
@@ -103,25 +115,33 @@
- macro: db_server_binaries
condition: proc.name in (mysqld)
-- macro: server_binaries
- condition: http_server_binaries or db_server_binaries or docker_binaries or proc.name in (sshd)
+- macro: db_server_binaries_parent
+ condition: proc.pname in (mysqld)
+- macro: server_binaries
+ condition: (http_server_binaries or db_server_binaries or docker_binaries or proc.name in (sshd))
+
+# The truncated dpkg-preconfigu is intentional, process names are
+# truncated at the sysdig level.
- macro: package_mgmt_binaries
- condition: proc.name in (dpkg, rpm)
+ condition: proc.name in (dpkg, dpkg-preconfigu, rpm, rpmkey, yum)
# A canonical set of processes that run other programs with different
# privileges or as a different user.
- macro: userexec_binaries
condition: proc.name in (sudo, su)
+- macro: user_mgmt_binaries
+ condition: (login_binaries or passwd_binaries or shadowutils_binaries)
+
- macro: system_binaries
- condition: coreutils_binaries or adduser_binaries or login_binaries or passwd_binaries or shadowutils_binaries
+ condition: (coreutils_binaries or user_mgmt_binaries)
- macro: mail_binaries
- condition: proc.name in (sendmail, postfix, procmail)
+ condition: proc.name in (sendmail, sendmail-msp, postfix, procmail)
- macro: sensitive_files
- condition: fd.name contains /etc/shadow or fd.name = /etc/sudoers or fd.directory = /etc/sudoers.d or fd.directory = /etc/pam.d or fd.name = /etc/pam.conf
+ condition: (fd.name contains /etc/shadow or fd.name = /etc/sudoers or fd.directory in (/etc/sudoers.d, /etc/pam.d) or fd.name = /etc/pam.conf)
# Indicates that the process is new. Currently detected using time
# since process was started, using a threshold of 5 seconds.
@@ -130,7 +150,7 @@
# Network
- macro: inbound
- condition: (syscall.type=listen and evt.dir=>) or (syscall.type=accept and evt.dir=<)
+ condition: ((syscall.type=listen and evt.dir=>) or (syscall.type=accept and evt.dir=<))
# Currently sendto is an ignored syscall, otherwise this could also check for (syscall.type=sendto and evt.dir=>)
- macro: outbound
@@ -141,7 +161,7 @@
# Ssh
- macro: ssh_error_message
- condition: evt.arg.data contains "Invalid user" or evt.arg.data contains "preauth"
+ condition: (evt.arg.data contains "Invalid user" or evt.arg.data contains "preauth" or evt.arg.data contains "Failed password")
# System
- macro: modules
@@ -149,9 +169,9 @@
- macro: container
condition: container.id != host
- macro: interactive
- condition: (proc.aname=sshd and proc.name != sshd) or proc.name=systemd-logind
+ condition: ((proc.aname=sshd and proc.name != sshd) or proc.name=systemd-logind)
- macro: syslog
- condition: fd.name = /dev/log
+ condition: fd.name in (/dev/log, /run/systemd/journal/syslog)
- macro: cron
condition: proc.name in (cron, crond)
- macro: parent_cron
@@ -169,32 +189,46 @@
- rule: write_binary_dir
desc: an attempt to write to any file below a set of binary directories
- condition: evt.dir = > and open_write and bin_dir
+ condition: evt.dir = < and open_write and not package_mgmt_binaries and bin_dir
output: "File below a known binary directory opened for writing (user=%user.name command=%proc.cmdline file=%fd.name)"
priority: WARNING
- rule: write_etc
- desc: an attempt to write to any file below /etc
- condition: evt.dir = > and open_write and etc_dir
+ desc: an attempt to write to any file below /etc, not in a pipe installer session
+ condition: evt.dir = < and open_write and not shadowutils_binaries and not sysdigcloud_binaries_parent and not package_mgmt_binaries and etc_dir and not proc.sname=fbash
output: "File below /etc opened for writing (user=%user.name command=%proc.cmdline file=%fd.name)"
priority: WARNING
+# Within a fbash session, the severity is lowered to INFO
+- rule: write_etc_installer
+ desc: an attempt to write to any file below /etc, in a pipe installer session
+ condition: evt.dir = < and open_write and not shadowutils_binaries and not sysdigcloud_binaries_parent and not package_mgmt_binaries and etc_dir and proc.sname=fbash
+ output: "File below /etc opened for writing (user=%user.name command=%proc.cmdline file=%fd.name) within pipe installer session"
+ priority: INFO
+
- rule: read_sensitive_file_untrusted
desc: an attempt to read any sensitive file (e.g. files containing user/password/authentication information). Exceptions are made for known trusted programs.
- condition: open_read and not server_binaries and not userexec_binaries and not proc.name in (iptables, ps, systemd-logind, lsb_release, check-new-relea, dumpe2fs, accounts-daemon, bash) and not cron and sensitive_files
+ condition: open_read and not user_mgmt_binaries and not userexec_binaries and not proc.name in (iptables, ps, lsb_release, check-new-relea, dumpe2fs, accounts-daemon, bash, sshd) and not cron and sensitive_files
output: "Sensitive file opened for reading by non-trusted program (user=%user.name command=%proc.cmdline file=%fd.name)"
priority: WARNING
- rule: read_sensitive_file_trusted_after_startup
desc: an attempt to read any sensitive file (e.g. files containing user/password/authentication information) by a trusted program after startup. Trusted programs might read these files at startup to load initial state, but not afterwards.
- condition: open_read and server_binaries and not proc_is_new and sensitive_files
+ condition: open_read and server_binaries and not proc_is_new and sensitive_files and proc.name!="sshd"
output: "Sensitive file opened for reading by trusted program after startup (user=%user.name command=%proc.cmdline file=%fd.name)"
priority: WARNING
-- rule: db_program_spawn_process
- desc: a database-server related program spawning a new process after startup. This shouldn\'t occur and is a follow on from some SQL injection attacks.
- condition: db_server_binaries and not proc_is_new and spawn_process
- output: "Database-related program spawned new process after startup (user=%user.name command=%proc.cmdline)"
+# Only let rpm-related programs write to the rpm database
+- rule: write_rpm_database
+ desc: an attempt to write to the rpm database by any non-rpm related program
+ condition: open_write and not proc.name in (rpm,rpmkey,yum) and fd.directory=/var/lib/rpm
+ output: "Rpm database opened for writing by a non-rpm program (command=%proc.cmdline file=%fd.name)"
+ priority: WARNING
+
+- rule: db_program_spawned_process
+ desc: a database-server related program spawned a new process other than itself. This shouldn\'t occur and is a follow on from some SQL injection attacks.
+ condition: db_server_binaries_parent and not db_server_binaries and spawned_process
+ output: "Database-related program spawned process other than itself (user=%user.name program=%proc.cmdline parent=%proc.pname)"
priority: WARNING
- rule: modify_binary_dirs
@@ -218,11 +252,12 @@
# output: "Loaded .so from unexpected dir (%user.name %proc.name %evt.dir %evt.type %evt.args %fd.name)"
# priority: WARNING
-- rule: syscall_returns_eaccess
- desc: any system call that returns EACCESS. This is not always a strong indication of a problem, hence the INFO priority.
- condition: evt.res = EACCESS
- output: "System call returned EACCESS (user=%user.name command=%proc.cmdline syscall=%evt.type args=%evt.args)"
- priority: INFO
+# Temporarily disabling this rule as it's tripping over https://github.com/draios/sysdig/issues/598
+# - rule: syscall_returns_eaccess
+# desc: any system call that returns EACCESS. This is not always a strong indication of a problem, hence the INFO priority.
+# condition: evt.res = EACCESS
+# output: "System call returned EACCESS (user=%user.name command=%proc.cmdline syscall=%evt.type args=%evt.args)"
+# priority: INFO
- rule: change_thread_namespace
desc: an attempt to change a program/thread\'s namespace (commonly done as a part of creating a container) by calling setns.
@@ -232,7 +267,7 @@
- rule: run_shell_untrusted
desc: an attempt to spawn a shell by a non-shell program. Exceptions are made for trusted binaries.
- condition: proc.name = bash and evt.dir=< and evt.type=execve and proc.pname exists and not parent_cron and not proc.pname in (bash, sshd, sudo, docker, su, tmux, screen, emacs, systemd, flock, fs-bash, nginx, monit, supervisord)
+ condition: not container and proc.name = bash and spawned_process and proc.pname exists and not parent_cron and not proc.pname in (bash, sshd, sudo, docker, su, tmux, screen, emacs, systemd, login, flock, fbash, nginx, monit, supervisord, dragent)
output: "Shell spawned by untrusted binary (user=%user.name shell=%proc.name parent=%proc.pname cmdline=%proc.cmdline)"
priority: WARNING
@@ -243,13 +278,13 @@
- rule: system_user_interactive
desc: an attempt to run interactive commands by a system (i.e. non-login) user
- condition: spawn_process and system_users and interactive
+ condition: spawned_process and system_users and interactive
output: "System user ran an interactive command (user=%user.name command=%proc.cmdline)"
priority: WARNING
- rule: run_shell_in_container
- desc: an attempt to spawn a shell by a non-shell program in a container. Container entrypoints are excluded.
- condition: container and proc.name = bash and evt.dir=< and evt.type=execve and proc.pname exists and not proc.pname in (bash, docker)
+ desc: a shell was spawned by a non-shell program in a container. Container entrypoints are excluded.
+ condition: container and proc.name = bash and spawned_process and proc.pname exists and not proc.pname in (bash, docker)
output: "Shell spawned in a container other than entrypoint (user=%user.name container_id=%container.id container_name=%container.name shell=%proc.name parent=%proc.pname cmdline=%proc.cmdline)"
priority: WARNING
@@ -260,22 +295,26 @@
output: "Known system binary sent/received network traffic (user=%user.name command=%proc.cmdline connection=%fd.name)"
priority: WARNING
-- rule: ssh_error_syslog
- desc: any ssh errors (failed logins, disconnects, ...) sent to syslog
- condition: syslog and ssh_error_message and evt.dir = <
- output: "sshd sent error message to syslog (error=%evt.buffer)"
- priority: WARNING
+# With the current restriction on system calls handled by falco
+# (e.g. excluding read/write/sendto/recvfrom/etc, this rule won't
+# trigger).
+# - rule: ssh_error_syslog
+# desc: any ssh errors (failed logins, disconnects, ...) sent to syslog
+# condition: syslog and ssh_error_message and evt.dir = <
+# output: "sshd sent error message to syslog (error=%evt.buffer)"
+# priority: WARNING
+# sshd, sendmail-msp, sendmail attempt to setuid to root even when running as non-root. Excluding here to avoid meaningless FPs
- rule: non_sudo_setuid
desc: an attempt to change users by calling setuid. sudo/su are excluded. user "root" is also excluded, as setuid calls typically involve dropping privileges.
- condition: evt.type=setuid and evt.dir=> and not user.name=root and not userexec_binaries
+ condition: evt.type=setuid and evt.dir=> and not user.name=root and not userexec_binaries and not proc.name in (sshd, sendmail-msp, sendmail)
output: "Unexpected setuid call by non-sudo, non-root program (user=%user.name command=%proc.cmdline uid=%evt.arg.uid)"
priority: WARNING
- rule: user_mgmt_binaries
desc: activity by any programs that can manage users, passwords, or permissions. sudo and su are excluded. Activity in containers is also excluded--some containers create custom users on top of a base linux distribution at startup.
- condition: spawn_process and not proc.name in (su, sudo) and not container and (adduser_binaries or login_binaries or passwd_binaries or shadowutils_binaries)
- output: "User management binary command run outside of container (user=%user.name command=%proc.cmdline)"
+ condition: spawned_process and not proc.name in (su, sudo) and not container and user_mgmt_binaries and not parent_cron and not proc.pname in (systemd, run-parts)
+ output: "User management binary command run outside of container (user=%user.name command=%proc.cmdline parent=%proc.pname)"
priority: WARNING
# (we may need to add additional checks against false positives, see: https://bugs.launchpad.net/ubuntu/+source/rkhunter/+bug/86153)
@@ -285,19 +324,47 @@
output: "File created below /dev by untrusted program (user=%user.name command=%proc.cmdline file=%fd.name)"
priority: WARNING
-# fs-bash is a restricted version of bash suitable for use in curl | sh installers.
+# fbash is a small shell script that runs bash, and is suitable for use in curl | fbash installers.
- rule: installer_bash_starts_network_server
- desc: an attempt by any program that is a child of fs-bash to start listening for network connections
- condition: evt.type=listen and proc.aname=fs-bash
- output: "Unexpected listen call by a child process of fs-bash (command=%proc.cmdline)"
+ desc: an attempt by a program in a pipe installer session to start listening for network connections
+ condition: evt.type=listen and proc.sname=fbash
+ output: "Unexpected listen call by a process in a fbash session (command=%proc.cmdline)"
priority: WARNING
- rule: installer_bash_starts_session
- desc: an attempt by any program that is a child of fs-bash to start a new session (process group)
- condition: evt.type=setsid and proc.aname=fs-bash
- output: "Unexpected setsid call by a child process of fs-bash (command=%proc.cmdline)"
+ desc: an attempt by a program in a pipe installer session to start a new session
+ condition: evt.type=setsid and proc.sname=fbash
+ output: "Unexpected setsid call by a process in fbash session (command=%proc.cmdline)"
priority: WARNING
+- rule: installer_bash_non_https_connection
+ desc: an attempt by a program in a pipe installer session to make an outgoing connection on a non-http(s) port
+ condition: outbound and not fd.sport in (80, 443, 53) and proc.sname=fbash
+ output: "Outbound connection on non-http(s) port by a process in a fbash session (command=%proc.cmdline connection=%fd.name)"
+ priority: WARNING
+
+# It'd be nice if we could warn when processes in a fbash session try
+# to download from any nonstandard location? This is probably blocked
+# on https://github.com/draios/falco/issues/88 though.
+
+# Notice when processes try to run chkconfig/systemctl.... to install a service.
+# Note: this is not a WARNING, as you'd expect some service management
+# as a part of doing the installation.
+- rule: installer_bash_manages_service
+ desc: an attempt by a program in a pipe installer session to manage a system service (systemd/chkconfig)
+ condition: evt.type=execve and proc.name in (chkconfig, systemctl) and proc.sname=fbash
+ output: "Service management program run by process in a fbash session (command=%proc.cmdline)"
+ priority: INFO
+
+# Notice when processes try to run any package management binary within a fbash session.
+# Note: this is not a WARNING, as you'd expect some package management
+# as a part of doing the installation
+- rule: installer_bash_runs_pkgmgmt
+ desc: an attempt by a program in a pipe installer session to run a package management binary
+ condition: evt.type=execve and package_mgmt_binaries and proc.sname=fbash
+ output: "Package management program run by process in a fbash session (command=%proc.cmdline)"
+ priority: INFO
+
###########################
# Application-Related Rules
###########################
diff --git a/scripts/CMakeLists.txt b/scripts/CMakeLists.txt
index f9084aee..b8807b81 100644
--- a/scripts/CMakeLists.txt
+++ b/scripts/CMakeLists.txt
@@ -1,5 +1,5 @@
-file(COPY ${PROJECT_SOURCE_DIR}/scripts/debian/falco
- DESTINATION ${PROJECT_BINARY_DIR}/scripts/debian)
+file(COPY "${PROJECT_SOURCE_DIR}/scripts/debian/falco"
+ DESTINATION "${PROJECT_BINARY_DIR}/scripts/debian")
-file(COPY ${PROJECT_SOURCE_DIR}/scripts/rpm/falco
- DESTINATION ${PROJECT_BINARY_DIR}/scripts/rpm)
+file(COPY "${PROJECT_SOURCE_DIR}/scripts/rpm/falco"
+ DESTINATION "${PROJECT_BINARY_DIR}/scripts/rpm")
diff --git a/test/falco_test.py b/test/falco_test.py
new file mode 100644
index 00000000..adb35767
--- /dev/null
+++ b/test/falco_test.py
@@ -0,0 +1,89 @@
+#!/usr/bin/env python
+
+import os
+import re
+import json
+
+from avocado import Test
+from avocado.utils import process
+from avocado.utils import linux_modules
+
+class FalcoTest(Test):
+
+ def setUp(self):
+ """
+ Load the sysdig kernel module if not already loaded.
+ """
+ self.falcodir = self.params.get('falcodir', '/', default=os.path.join(self.basedir, '../build'))
+
+ self.should_detect = self.params.get('detect', '*')
+ self.trace_file = self.params.get('trace_file', '*')
+ self.json_output = self.params.get('json_output', '*')
+
+ if self.should_detect:
+ self.detect_level = self.params.get('detect_level', '*')
+
+ # Doing this in 2 steps instead of simply using
+ # module_is_loaded to avoid logging lsmod output to the log.
+ lsmod_output = process.system_output("lsmod", verbose=False)
+
+ if linux_modules.parse_lsmod_for_module(lsmod_output, 'sysdig_probe') == {}:
+ self.log.debug("Loading sysdig kernel module")
+ process.run('sudo insmod {}/driver/sysdig-probe.ko'.format(self.falcodir))
+
+ self.str_variant = self.trace_file
+
+ def test(self):
+ self.log.info("Trace file %s", self.trace_file)
+
+ # Run the provided trace file though falco
+ cmd = '{}/userspace/falco/falco -r {}/../rules/falco_rules.yaml -c {}/../falco.yaml -e {} -o json_output={}'.format(
+ self.falcodir, self.falcodir, self.falcodir, self.trace_file, self.json_output)
+
+ self.falco_proc = process.SubProcess(cmd)
+
+ res = self.falco_proc.run(timeout=60, sig=9)
+
+ if res.exit_status != 0:
+ self.error("Falco command \"{}\" exited with non-zero return value {}".format(
+ cmd, res.exit_status))
+
+ # Get the number of events detected.
+ match = re.search('Events detected: (\d+)', res.stdout)
+ if match is None:
+ self.fail("Could not find a line 'Events detected: ' in falco output")
+
+ events_detected = int(match.group(1))
+
+ if not self.should_detect and events_detected > 0:
+ self.fail("Detected {} events when should have detected none".format(events_detected))
+
+ if self.should_detect:
+ if events_detected == 0:
+ self.fail("Detected {} events when should have detected > 0".format(events_detected))
+
+ level_line = '{}: (\d+)'.format(self.detect_level)
+ match = re.search(level_line, res.stdout)
+
+ if match is None:
+ self.fail("Could not find a line '{}: ' in falco output".format(self.detect_level))
+
+ events_detected = int(match.group(1))
+
+ if not events_detected > 0:
+ self.fail("Detected {} events at level {} when should have detected > 0".format(events_detected, self.detect_level))
+
+ if self.json_output:
+ # Just verify that any lines starting with '{' are valid json objects.
+ # Doesn't do any deep inspection of the contents.
+ for line in res.stdout.splitlines():
+ if line.startswith('{'):
+ obj = json.loads(line)
+ for attr in ['time', 'rule', 'priority', 'output']:
+ if not attr in obj:
+ self.fail("Falco JSON object {} does not contain property \"{}\"".format(line, attr))
+ pass
+
+
+if __name__ == "__main__":
+ main()
diff --git a/test/run_regression_tests.sh b/test/run_regression_tests.sh
new file mode 100755
index 00000000..b46646a1
--- /dev/null
+++ b/test/run_regression_tests.sh
@@ -0,0 +1,70 @@
+#!/bin/bash
+
+SCRIPT=$(readlink -f $0)
+SCRIPTDIR=$(dirname $SCRIPT)
+MULT_FILE=$SCRIPTDIR/falco_tests.yaml
+
+function download_trace_files() {
+ for TRACE in traces-positive traces-negative traces-info ; do
+ rm -rf $SCRIPTDIR/$TRACE
+ curl -so $SCRIPTDIR/$TRACE.zip https://s3.amazonaws.com/download.draios.com/falco-tests/$TRACE.zip &&
+ unzip -d $SCRIPTDIR $SCRIPTDIR/$TRACE.zip &&
+ rm -rf $SCRIPTDIR/$TRACE.zip
+ done
+}
+
+function prepare_multiplex_fileset() {
+
+ dir=$1
+ detect=$2
+ detect_level=$3
+ json_output=$4
+
+ for trace in $SCRIPTDIR/$dir/*.scap ; do
+ [ -e "$trace" ] || continue
+ NAME=`basename $trace .scap`
+ cat << EOF >> $MULT_FILE
+ $NAME-detect-$detect-json-$json_output:
+ detect: $detect
+ detect_level: $detect_level
+ trace_file: $trace
+ json_output: $json_output
+EOF
+ done
+}
+
+function prepare_multiplex_file() {
+ echo "trace_files: !mux" > $MULT_FILE
+
+ prepare_multiplex_fileset traces-positive True Warning False
+ prepare_multiplex_fileset traces-negative False Warning True
+ prepare_multiplex_fileset traces-info True Informational False
+
+ prepare_multiplex_fileset traces-positive True Warning True
+ prepare_multiplex_fileset traces-info True Informational True
+
+ echo "Contents of $MULT_FILE:"
+ cat $MULT_FILE
+}
+
+function run_tests() {
+ CMD="avocado run --multiplex $MULT_FILE --job-results-dir $SCRIPTDIR/job-results -- $SCRIPTDIR/falco_test.py"
+ echo "Running: $CMD"
+ $CMD
+ TEST_RC=$?
+}
+
+
+function print_test_failure_details() {
+ echo "Showing full job logs for any tests that failed:"
+ jq '.tests[] | select(.status != "PASS") | .logfile' $SCRIPTDIR/job-results/latest/results.json | xargs cat
+}
+
+download_trace_files
+prepare_multiplex_file
+run_tests
+if [ $TEST_RC -ne 0 ]; then
+ print_test_failure_details
+fi
+
+exit $TEST_RC
diff --git a/test/utils/run_sysdig.sh b/test/utils/run_sysdig.sh
new file mode 100644
index 00000000..9a1a611f
--- /dev/null
+++ b/test/utils/run_sysdig.sh
@@ -0,0 +1,9 @@
+#!/bin/bash
+
+# Run sysdig excluding all events that aren't used by falco and also
+# excluding other high-volume events that aren't essential. This
+# results in smaller trace files.
+
+# The remaining arguments are taken from the command line.
+
+exec sudo sysdig not evt.type in '(mprotect,brk,mq_timedreceive,mq_receive,mq_timedsend,mq_send,getrusage,procinfo,rt_sigprocmask,rt_sigaction,ioctl,clock_getres,clock_gettime,clock_nanosleep,clock_settime,close,epoll_create,epoll_create1,epoll_ctl,epoll_pwait,epoll_wait,eventfd,fcntl,fcntl64,fstat,fstat64,fstatat64,fstatfs,fstatfs64,futex,getitimer,gettimeofday,ioprio_get,ioprio_set,llseek,lseek,lstat,lstat64,mmap,mmap2,munmap,nanosleep,poll,ppoll,pread,pread64,preadv,procinfo,pselect6,pwrite,pwrite64,pwritev,read,readv,recv,recvfrom,recvmmsg,recvmsg,sched_yield,select,send,sendfile,sendfile64,sendmmsg,sendmsg,sendto,setitimer,settimeofday,shutdown,splice,stat,stat64,statfs,statfs64,switch,tee,timer_create,timer_delete,timerfd_create,timerfd_gettime,timerfd_settime,timer_getoverrun,timer_gettime,timer_settime,wait4,write,writev) and user.name!=ec2-user' $@
diff --git a/userspace/falco/CMakeLists.txt b/userspace/falco/CMakeLists.txt
index 6dc1fe5e..fb241159 100644
--- a/userspace/falco/CMakeLists.txt
+++ b/userspace/falco/CMakeLists.txt
@@ -1,12 +1,12 @@
-include_directories(${PROJECT_SOURCE_DIR}/../sysdig/userspace/libsinsp/third-party/jsoncpp)
+include_directories("${PROJECT_SOURCE_DIR}/../sysdig/userspace/libsinsp/third-party/jsoncpp")
include_directories("${LUAJIT_INCLUDE}")
-include_directories(${PROJECT_SOURCE_DIR}/../sysdig/userspace/libscap)
-include_directories(${PROJECT_SOURCE_DIR}/../sysdig/userspace/libsinsp)
+include_directories("${PROJECT_SOURCE_DIR}/../sysdig/userspace/libscap")
+include_directories("${PROJECT_SOURCE_DIR}/../sysdig/userspace/libsinsp")
include_directories("${PROJECT_BINARY_DIR}/userspace/falco")
include_directories("${CURL_INCLUDE_DIR}")
include_directories("${YAMLCPP_INCLUDE_DIR}")
-include_directories(${DRAIOS_DEPENDENCIES_DIR}/yaml-${DRAIOS_YAML_VERSION}/target/include)
+include_directories("${DRAIOS_DEPENDENCIES_DIR}/yaml-${DRAIOS_YAML_VERSION}/target/include")
add_executable(falco configuration.cpp formats.cpp fields.cpp rules.cpp logger.cpp falco.cpp)
diff --git a/userspace/falco/falco.cpp b/userspace/falco/falco.cpp
index 6b92058f..d269a473 100644
--- a/userspace/falco/falco.cpp
+++ b/userspace/falco/falco.cpp
@@ -28,6 +28,14 @@ extern "C" {
#include "utils.h"
#include
+bool g_terminate = false;
+//
+// Helper functions
+//
+static void signal_callback(int signal)
+{
+ g_terminate = true;
+}
//
// Program help
@@ -67,6 +75,7 @@ static void display_fatal_err(const string &msg, bool daemon)
string lua_on_event = "on_event";
string lua_add_output = "add_output";
+string lua_print_stats = "print_stats";
// Splitting into key=value or key.subkey=value will be handled by configuration class.
std::list cmdline_options;
@@ -90,7 +99,11 @@ void do_inspect(sinsp* inspector,
res = inspector->next(&ev);
- if(res == SCAP_TIMEOUT)
+ if (g_terminate)
+ {
+ break;
+ }
+ else if(res == SCAP_TIMEOUT)
{
continue;
}
@@ -199,6 +212,26 @@ void add_output(lua_State *ls, output_config oc)
}
+// Print statistics on the the rules that triggered
+void print_stats(lua_State *ls)
+{
+ lua_getglobal(ls, lua_print_stats.c_str());
+
+ if(lua_isfunction(ls, -1))
+ {
+ if(lua_pcall(ls, 0, 0, 0) != 0)
+ {
+ const char* lerr = lua_tostring(ls, -1);
+ string err = "Error invoking function print_stats: " + string(lerr);
+ throw sinsp_exception(err);
+ }
+ }
+ else
+ {
+ throw sinsp_exception("No function " + lua_print_stats + " found in lua rule loader module");
+ }
+
+}
//
// ARGUMENT PARSING AND PROGRAM SETUP
@@ -209,7 +242,6 @@ int falco_init(int argc, char **argv)
sinsp* inspector = NULL;
falco_rules* rules = NULL;
int op;
- sinsp_evt::param_fmt event_buffer_format;
int long_index = 0;
string lua_main_filename;
string scap_filename;
@@ -358,7 +390,7 @@ int falco_init(int argc, char **argv)
rules = new falco_rules(inspector, ls, lua_main_filename);
- falco_formats::init(inspector, ls);
+ falco_formats::init(inspector, ls, config.m_json_output);
falco_fields::init(inspector, ls);
falco_logger::init(ls);
@@ -383,21 +415,25 @@ int falco_init(int argc, char **argv)
inspector->set_hostname_and_port_resolution_mode(false);
- if (config.m_json_output)
- {
- event_buffer_format = sinsp_evt::PF_JSON;
- }
- else
- {
- event_buffer_format = sinsp_evt::PF_NORMAL;
- }
- inspector->set_buffer_format(event_buffer_format);
-
for(std::vector::iterator it = config.m_outputs.begin(); it != config.m_outputs.end(); ++it)
{
add_output(ls, *it);
}
+ if(signal(SIGINT, signal_callback) == SIG_ERR)
+ {
+ fprintf(stderr, "An error occurred while setting SIGINT signal handler.\n");
+ result = EXIT_FAILURE;
+ goto exit;
+ }
+
+ if(signal(SIGTERM, signal_callback) == SIG_ERR)
+ {
+ fprintf(stderr, "An error occurred while setting SIGTERM signal handler.\n");
+ result = EXIT_FAILURE;
+ goto exit;
+ }
+
if (scap_filename.size())
{
inspector->open(scap_filename);
@@ -406,7 +442,7 @@ int falco_init(int argc, char **argv)
{
try
{
- inspector->open();
+ inspector->open(200);
}
catch(sinsp_exception e)
{
@@ -478,6 +514,8 @@ int falco_init(int argc, char **argv)
ls);
inspector->close();
+
+ print_stats(ls);
}
catch(sinsp_exception& e)
{
@@ -494,10 +532,7 @@ int falco_init(int argc, char **argv)
exit:
- if(inspector)
- {
- delete inspector;
- }
+ delete inspector;
if(ls)
{
diff --git a/userspace/falco/formats.cpp b/userspace/falco/formats.cpp
index 0ff87068..142df600 100644
--- a/userspace/falco/formats.cpp
+++ b/userspace/falco/formats.cpp
@@ -1,8 +1,11 @@
+#include
+
#include "formats.h"
#include "logger.h"
sinsp* falco_formats::s_inspector = NULL;
+bool s_json_output = false;
const static struct luaL_reg ll_falco [] =
{
@@ -11,9 +14,10 @@ const static struct luaL_reg ll_falco [] =
{NULL,NULL}
};
-void falco_formats::init(sinsp* inspector, lua_State *ls)
+void falco_formats::init(sinsp* inspector, lua_State *ls, bool json_output)
{
s_inspector = inspector;
+ s_json_output = json_output;
luaL_openlib(ls, "falco", ll_falco, 0);
}
@@ -42,15 +46,53 @@ int falco_formats::format_event (lua_State *ls)
{
string line;
- if (!lua_islightuserdata(ls, -1) || !lua_islightuserdata(ls, -2)) {
+ if (!lua_islightuserdata(ls, -1) ||
+ !lua_isstring(ls, -2) ||
+ !lua_isstring(ls, -3) ||
+ !lua_islightuserdata(ls, -4)) {
falco_logger::log(LOG_ERR, "Invalid arguments passed to format_event()\n");
throw sinsp_exception("format_event error");
}
sinsp_evt* evt = (sinsp_evt*)lua_topointer(ls, 1);
- sinsp_evt_formatter* formatter = (sinsp_evt_formatter*)lua_topointer(ls, 2);
+ const char *rule = (char *) lua_tostring(ls, 2);
+ const char *level = (char *) lua_tostring(ls, 3);
+ sinsp_evt_formatter* formatter = (sinsp_evt_formatter*)lua_topointer(ls, 4);
formatter->tostring(evt, &line);
+ // For JSON output, the formatter returned just the output
+ // string containing the format text and values. Use this to
+ // build a more detailed object containing the event time,
+ // rule, severity, full output, and fields.
+ if (s_json_output) {
+ Json::Value event;
+ Json::FastWriter writer;
+
+ // Convert the time-as-nanoseconds to a more json-friendly ISO8601.
+ time_t evttime = evt->get_ts()/1000000000;
+ char time_sec[20]; // sizeof "YYYY-MM-DDTHH:MM:SS"
+ char time_ns[12]; // sizeof ".sssssssssZ"
+ string iso8601evttime;
+
+ strftime(time_sec, sizeof(time_sec), "%FT%T", gmtime(&evttime));
+ snprintf(time_ns, sizeof(time_ns), ".%09luZ", evt->get_ts() % 1000000000);
+ iso8601evttime = time_sec;
+ iso8601evttime += time_ns;
+ event["time"] = iso8601evttime;
+ event["rule"] = rule;
+ event["priority"] = level;
+ event["output"] = line;
+
+ line = writer.write(event);
+
+ // Json::FastWriter may add a trailing newline. If it
+ // does, remove it.
+ if (line[line.length()-1] == '\n')
+ {
+ line.resize(line.length()-1);
+ }
+ }
+
lua_pushstring(ls, line.c_str());
return 1;
}
diff --git a/userspace/falco/formats.h b/userspace/falco/formats.h
index 73f69b0d..6f369bf3 100644
--- a/userspace/falco/formats.h
+++ b/userspace/falco/formats.h
@@ -13,7 +13,7 @@ class sinsp_evt_formatter;
class falco_formats
{
public:
- static void init(sinsp* inspector, lua_State *ls);
+ static void init(sinsp* inspector, lua_State *ls, bool json_output);
// formatter = falco.formatter(format_string)
static int formatter(lua_State *ls);
diff --git a/userspace/falco/lua/output.lua b/userspace/falco/lua/output.lua
index 78573b94..245f5cb4 100644
--- a/userspace/falco/lua/output.lua
+++ b/userspace/falco/lua/output.lua
@@ -2,12 +2,14 @@ local mod = {}
levels = {"Emergency", "Alert", "Critical", "Error", "Warning", "Notice", "Informational", "Debug"}
+mod.levels = levels
+
local outputs = {}
-function mod.stdout(evt, level, format)
+function mod.stdout(evt, rule, level, format)
format = "*%evt.time: "..levels[level+1].." "..format
formatter = falco.formatter(format)
- msg = falco.format_event(evt, formatter)
+ msg = falco.format_event(evt, rule, levels[level+1], formatter)
print (msg)
end
@@ -24,26 +26,26 @@ function mod.file_validate(options)
end
-function mod.file(evt, level, format, options)
+function mod.file(evt, rule, level, format, options)
format = "%evt.time: "..levels[level+1].." "..format
formatter = falco.formatter(format)
- msg = falco.format_event(evt, formatter)
+ msg = falco.format_event(evt, rule, levels[level+1], formatter)
file = io.open(options.filename, "a+")
file:write(msg, "\n")
file:close()
end
-function mod.syslog(evt, level, format)
+function mod.syslog(evt, rule, level, format)
formatter = falco.formatter(format)
- msg = falco.format_event(evt, formatter)
+ msg = falco.format_event(evt, rule, levels[level+1], formatter)
falco.syslog(level, msg)
end
-function mod.event(event, level, format)
+function mod.event(event, rule, level, format)
for index,o in ipairs(outputs) do
- o.output(event, level, format, o.config)
+ o.output(event, rule, level, format, o.config)
end
end
diff --git a/userspace/falco/lua/rule_loader.lua b/userspace/falco/lua/rule_loader.lua
index f5cc8882..8bb55edf 100644
--- a/userspace/falco/lua/rule_loader.lua
+++ b/userspace/falco/lua/rule_loader.lua
@@ -102,14 +102,13 @@ function set_output(output_format, state)
end
local function priority(s)
- valid_levels = {"emergency", "alert", "critical", "error", "warning", "notice", "informational", "debug"}
s = string.lower(s)
- for i,v in ipairs(valid_levels) do
- if (string.find(v, "^"..s)) then
+ for i,v in ipairs(output.levels) do
+ if (string.find(string.lower(v), "^"..s)) then
return i - 1 -- (syslog levels start at 0, lua indices start at 1)
end
end
- error("Invalid severity level: "..level)
+ error("Invalid severity level: "..s)
end
-- Note that the rules_by_name and rules_by_idx refer to the same rule
@@ -230,12 +229,51 @@ function describe_rule(name)
end
end
+local rule_output_counts = {total=0, by_level={}, by_name={}}
+
+for idx=0,table.getn(output.levels)-1,1 do
+ rule_output_counts.by_level[idx] = 0
+end
+
function on_event(evt_, rule_id)
if state.rules_by_idx[rule_id] == nil then
error ("rule_loader.on_event(): event with invalid rule_id: ", rule_id)
end
- output.event(evt_, state.rules_by_idx[rule_id].level, state.rules_by_idx[rule_id].output)
+ rule_output_counts.total = rule_output_counts.total + 1
+ local rule = state.rules_by_idx[rule_id]
+
+ if rule_output_counts.by_level[rule.level] == nil then
+ rule_output_counts.by_level[rule.level] = 1
+ else
+ rule_output_counts.by_level[rule.level] = rule_output_counts.by_level[rule.level] + 1
+ end
+
+ if rule_output_counts.by_name[rule.rule] == nil then
+ rule_output_counts.by_name[rule.rule] = 1
+ else
+ rule_output_counts.by_name[rule.rule] = rule_output_counts.by_name[rule.rule] + 1
+ end
+
+ output.event(evt_, rule.rule, rule.level, rule.output)
end
+function print_stats()
+ print("Events detected: "..rule_output_counts.total)
+ print("Rule counts by severity:")
+ for idx, level in ipairs(output.levels) do
+ -- To keep the output concise, we only print 0 counts for error, warning, and info levels
+ if rule_output_counts.by_level[idx-1] > 0 or level == "Error" or level == "Warning" or level == "Informational" then
+ print (" "..level..": "..rule_output_counts.by_level[idx-1])
+ end
+ end
+
+ print("Triggered rules by rule name:")
+ for name, count in pairs(rule_output_counts.by_name) do
+ print (" "..name..": "..count)
+ end
+end
+
+
+