Compare commits

..

1 Commits

Author SHA1 Message Date
Lorenzo Fontana
c0aa3e25e8 new: handle sinsp::next errors in the inspect loop
Signed-off-by: Lorenzo Fontana <lo@linux.com>
2019-07-31 00:37:46 +00:00
43 changed files with 283 additions and 1323 deletions

3
.gitignore vendored
View File

@@ -3,15 +3,12 @@
*.pyc
test/falco_tests.yaml
test/falco_traces.yaml
test/traces-negative
test/traces-positive
test/traces-info
test/job-results
test/build
test/.phoronix-test-suite
test/results*.json.*
test/build
userspace/falco/lua/re.lua
userspace/falco/lua/lpeg.so

View File

@@ -2,36 +2,6 @@
This file documents all notable changes to Falco. The release numbering uses [semantic versioning](http://semver.org).
## v0.17.0
Released 2019-07-31
## Major Changes
* **The set of supported platforms has changed**. Switch to a reorganized builder image that uses Centos 7 as a base. As a result, falco is no longer supported on Centos 6. The other supported platforms should remain the same [[#719](https://github.com/falcosecurity/falco/pull/719)]
## Minor Changes
* When enabling rules within the falco engine, use rule substrings instead of regexes. [[#743](https://github.com/falcosecurity/falco/pull/743)]
* Additional improvements to the handling and display of rules validation errors [[#744](https://github.com/falcosecurity/falco/pull/744)] [[#747](https://github.com/falcosecurity/falco/pull/747)]
## Bug Fixes
* Fix a problem that would cause prevent container metadata lookups when falco was daemonized [[#731](https://github.com/falcosecurity/falco/pull/731)]
* Allow rule priorites to be expressed as lowercase and a mix of lower/uppercase [[#737](https://github.com/falcosecurity/falco/pull/737)]
## Rule Changes
* Fix a parentheses bug with the `shell_procs` macro [[#728](https://github.com/falcosecurity/falco/pull/728)]
* Allow additional containers to mount sensitive host paths [[#733](https://github.com/falcosecurity/falco/pull/733)] [[#736](https://github.com/falcosecurity/falco/pull/736)]
* Allow additional containers to truncate log files [[#733](https://github.com/falcosecurity/falco/pull/733)]
* Fix false positives with the `Write below root` rule on GKE [[#739](https://github.com/falcosecurity/falco/pull/739)]
## v0.16.0
Released 2019-07-12

View File

@@ -20,7 +20,7 @@ cmake_minimum_required(VERSION 3.3.2)
project(falco)
if(NOT SYSDIG_DIR)
get_filename_component(SYSDIG_DIR "${PROJECT_SOURCE_DIR}/../sysdig" REALPATH)
get_filename_component(SYSDIG_DIR "${PROJECT_SOURCE_DIR}/../sysdig" REALPATH)
endif()
# Custom CMake modules
@@ -53,7 +53,7 @@ if(BUILD_WARNINGS_AS_ERRORS)
endif()
set(CMAKE_C_FLAGS "${CMAKE_COMMON_FLAGS}")
set(CMAKE_CXX_FLAGS "--std=c++11 ${CMAKE_COMMON_FLAGS}")
set(CMAKE_CXX_FLAGS "--std=c++0x ${CMAKE_COMMON_FLAGS}")
set(CMAKE_C_FLAGS_DEBUG "${DRAIOS_DEBUG_FLAGS}")
set(CMAKE_CXX_FLAGS_DEBUG "${DRAIOS_DEBUG_FLAGS}")
@@ -129,7 +129,7 @@ else()
set(ZLIB_LIB "${ZLIB_SRC}/libz.a")
ExternalProject_Add(zlib
# START CHANGE for CVE-2016-9840, CVE-2016-9841, CVE-2016-9842, CVE-2016-9843
URL "https://s3.amazonaws.com/download.draios.com/dependencies/zlib-1.2.11.tar.gz"
URL "http://s3.amazonaws.com/download.draios.com/dependencies/zlib-1.2.11.tar.gz"
URL_MD5 "1c9f62f0778697a09d36121ead88e08e"
# END CHANGE for CVE-2016-9840, CVE-2016-9841, CVE-2016-9842, CVE-2016-9843
CONFIGURE_COMMAND "./configure"
@@ -156,7 +156,7 @@ else()
set(JQ_INCLUDE "${JQ_SRC}")
set(JQ_LIB "${JQ_SRC}/.libs/libjq.a")
ExternalProject_Add(jq
URL "https://s3.amazonaws.com/download.draios.com/dependencies/jq-1.5.tar.gz"
URL "http://s3.amazonaws.com/download.draios.com/dependencies/jq-1.5.tar.gz"
URL_MD5 "0933532b086bd8b6a41c1b162b1731f9"
CONFIGURE_COMMAND ./configure --disable-maintainer-mode --enable-all-static --disable-dependency-tracking
BUILD_COMMAND ${CMD_MAKE} LDFLAGS=-all-static
@@ -188,7 +188,7 @@ else()
message(STATUS "Using bundled nlohmann-json in '${NJSON_SRC}'")
set(NJSON_INCLUDE "${NJSON_SRC}/single_include")
ExternalProject_Add(njson
URL "https://s3.amazonaws.com/download.draios.com/dependencies/njson-3.3.0.tar.gz"
URL "http://download.draios.com/dependencies/njson-3.3.0.tar.gz"
URL_MD5 "e26760e848656a5da400662e6c5d999a"
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
@@ -212,7 +212,7 @@ else()
set(CURSES_LIBRARIES "${CURSES_BUNDLE_DIR}/lib/libncurses.a")
message(STATUS "Using bundled ncurses in '${CURSES_BUNDLE_DIR}'")
ExternalProject_Add(ncurses
URL "https://s3.amazonaws.com/download.draios.com/dependencies/ncurses-6.0-20150725.tgz"
URL "http://s3.amazonaws.com/download.draios.com/dependencies/ncurses-6.0-20150725.tgz"
URL_MD5 "32b8913312e738d707ae68da439ca1f4"
CONFIGURE_COMMAND ./configure --without-cxx --without-cxx-binding --without-ada --without-manpages --without-progs --without-tests --with-terminfo-dirs=/etc/terminfo:/lib/terminfo:/usr/share/terminfo
BUILD_COMMAND ${CMD_MAKE}
@@ -239,7 +239,7 @@ else()
set(B64_INCLUDE "${B64_SRC}/include")
set(B64_LIB "${B64_SRC}/src/libb64.a")
ExternalProject_Add(b64
URL "https://s3.amazonaws.com/download.draios.com/dependencies/libb64-1.2.src.zip"
URL "http://s3.amazonaws.com/download.draios.com/dependencies/libb64-1.2.src.zip"
URL_MD5 "a609809408327117e2c643bed91b76c5"
CONFIGURE_COMMAND ""
BUILD_COMMAND ${CMD_MAKE}
@@ -292,7 +292,7 @@ else()
ExternalProject_Add(openssl
# START CHANGE for CVE-2017-3735, CVE-2017-3731, CVE-2017-3737, CVE-2017-3738, CVE-2017-3736
URL "https://s3.amazonaws.com/download.draios.com/dependencies/openssl-1.0.2n.tar.gz"
URL "http://s3.amazonaws.com/download.draios.com/dependencies/openssl-1.0.2n.tar.gz"
URL_MD5 "13bdc1b1d1ff39b6fd42a255e74676a4"
# END CHANGE for CVE-2017-3735, CVE-2017-3731, CVE-2017-3737, CVE-2017-3738, CVE-2017-3736
CONFIGURE_COMMAND ./config shared --prefix=${OPENSSL_INSTALL_DIR}
@@ -325,7 +325,7 @@ else()
ExternalProject_Add(curl
DEPENDS openssl
# START CHANGE for CVE-2017-8816, CVE-2017-8817, CVE-2017-8818, CVE-2018-1000007
URL "https://s3.amazonaws.com/download.draios.com/dependencies/curl-7.61.0.tar.bz2"
URL "http://s3.amazonaws.com/download.draios.com/dependencies/curl-7.61.0.tar.bz2"
URL_MD5 "31d0a9f48dc796a7db351898a1e5058a"
# END CHANGE for CVE-2017-8816, CVE-2017-8817, CVE-2017-8818, CVE-2018-1000007
CONFIGURE_COMMAND ./configure ${CURL_SSL_OPTION} --disable-shared --enable-optimize --disable-curldebug --disable-rt --enable-http --disable-ftp --disable-file --disable-ldap --disable-ldaps --disable-rtsp --disable-telnet --disable-tftp --disable-pop3 --disable-imap --disable-smb --disable-smtp --disable-gopher --disable-sspi --disable-ntlm-wb --disable-tls-srp --without-winssl --without-darwinssl --without-polarssl --without-cyassl --without-nss --without-axtls --without-ca-path --without-ca-bundle --without-libmetalink --without-librtmp --without-winidn --without-libidn2 --without-libpsl --without-nghttp2 --without-libssh2 --disable-threaded-resolver --without-brotli
@@ -360,7 +360,7 @@ else()
set(LUAJIT_INCLUDE "${LUAJIT_SRC}")
set(LUAJIT_LIB "${LUAJIT_SRC}/libluajit.a")
ExternalProject_Add(luajit
URL "https://s3.amazonaws.com/download.draios.com/dependencies/LuaJIT-2.0.3.tar.gz"
URL "http://s3.amazonaws.com/download.draios.com/dependencies/LuaJIT-2.0.3.tar.gz"
URL_MD5 "f14e9104be513913810cd59c8c658dc0"
CONFIGURE_COMMAND ""
BUILD_COMMAND ${CMD_MAKE}
@@ -390,7 +390,7 @@ else()
endif()
ExternalProject_Add(lpeg
DEPENDS ${LPEG_DEPENDENCIES}
URL "https://s3.amazonaws.com/download.draios.com/dependencies/lpeg-1.0.0.tar.gz"
URL "http://s3.amazonaws.com/download.draios.com/dependencies/lpeg-1.0.0.tar.gz"
URL_MD5 "0aec64ccd13996202ad0c099e2877ece"
BUILD_COMMAND LUA_INCLUDE=${LUAJIT_INCLUDE} "${PROJECT_SOURCE_DIR}/scripts/build-lpeg.sh" "${LPEG_SRC}/build"
BUILD_IN_SOURCE 1
@@ -425,7 +425,7 @@ else()
set(LIBYAML_LIB "${LIBYAML_SRC}/.libs/libyaml.a")
message(STATUS "Using bundled libyaml in '${LIBYAML_SRC}'")
ExternalProject_Add(libyaml
URL "https://s3.amazonaws.com/download.draios.com/dependencies/libyaml-0.1.4.tar.gz"
URL "http://s3.amazonaws.com/download.draios.com/dependencies/libyaml-0.1.4.tar.gz"
URL_MD5 "4a4bced818da0b9ae7fc8ebc690792a7"
BUILD_COMMAND ${CMD_MAKE}
BUILD_IN_SOURCE 1
@@ -461,7 +461,7 @@ else()
ExternalProject_Add(lyaml
DEPENDS ${LYAML_DEPENDENCIES}
URL "https://s3.amazonaws.com/download.draios.com/dependencies/lyaml-release-v6.0.tar.gz"
URL "http://s3.amazonaws.com/download.draios.com/dependencies/lyaml-release-v6.0.tar.gz"
URL_MD5 "dc3494689a0dce7cf44e7a99c72b1f30"
BUILD_COMMAND ${CMD_MAKE}
BUILD_IN_SOURCE 1
@@ -486,7 +486,7 @@ else()
set(TBB_INCLUDE_DIR "${TBB_SRC}/include/")
set(TBB_LIB "${TBB_SRC}/build/lib_release/libtbb.a")
ExternalProject_Add(tbb
URL "https://s3.amazonaws.com/download.draios.com/dependencies/tbb-2018_U5.tar.gz"
URL "http://s3.amazonaws.com/download.draios.com/dependencies/tbb-2018_U5.tar.gz"
URL_MD5 "ff3ae09f8c23892fbc3008c39f78288f"
CONFIGURE_COMMAND ""
BUILD_COMMAND ${CMD_MAKE} tbb_build_dir=${TBB_SRC}/build tbb_build_prefix=lib extra_inc=big_iron.inc
@@ -518,7 +518,7 @@ else()
endif()
ExternalProject_Add(civetweb
DEPENDS ${CIVETWEB_DEPENDENCIES}
URL "https://s3.amazonaws.com/download.draios.com/dependencies/civetweb-1.11.tar.gz"
URL "http://s3.amazonaws.com/download.draios.com/dependencies/civetweb-1.11.tar.gz"
URL_MD5 "b6d2175650a27924bccb747cbe084cd4"
CONFIGURE_COMMAND ${CMAKE_COMMAND} -E make_directory ${CIVETWEB_SRC}/install/lib
COMMAND ${CMAKE_COMMAND} -E make_directory ${CIVETWEB_SRC}/install/include
@@ -606,7 +606,7 @@ else()
ExternalProject_Add(grpc
DEPENDS protobuf zlib c-ares
URL "https://s3.amazonaws.com/download.draios.com/dependencies/grpc-1.8.1.tar.gz"
URL "http://download.draios.com/dependencies/grpc-1.8.1.tar.gz"
URL_MD5 "2fc42c182a0ed1b48ad77397f76bb3bc"
CONFIGURE_COMMAND ""
# TODO what if using system openssl, protobuf or cares?

9
OWNERS
View File

@@ -1,12 +1,11 @@
approvers:
- fntlnz
- kris-nova
- leodido
- fntlnz
- mstemm
reviewers:
- fntlnz
- kaizhe
- kris-nova
- leodido
- fntlnz
- mfdii
- kaizhe
- mstemm

View File

@@ -5,7 +5,7 @@
#### Latest release
**v0.17.0**
**v0.16.0**
Read the [change log](https://github.com/falcosecurity/falco/blob/dev/CHANGELOG.md)
Dev Branch: [![Build Status](https://travis-ci.com/falcosecurity/falco.svg?branch=dev)](https://travis-ci.com/falcosecurity/falco)<br />
@@ -45,17 +45,10 @@ See [Falco Documentation](https://falco.org/docs/) to quickly get started using
Join the Community
---
* [Join the mailing list](http://bit.ly/2Mu0wXA) for news and a Google calendar invite for our Falco open source meetings. Note: this is the only way to get a calendar invite for our open meetings.
* [Website](https://falco.org) for Falco.
* We are working on a blog for the Falco project. In the meantime you can find [Falco](https://sysdig.com/blog/tag/falco/) posts over on the Sysdig blog.
* Join our [Public Slack](https://slack.sysdig.com) channel for open source Sysdig and Falco announcements and discussions.
Office hours
---
Falco has bi-weekly office hour style meetings where we plan our work on the project. You can get a Google calendar invite by joining the mailing list. It will automatically be sent.
Wednesdays at 8am Pacific on [Zoom](https://sysdig.zoom.us/j/213235330).
License Terms
---
Falco is licensed to you under the [Apache 2.0](./COPYING) open source license.

View File

@@ -16,32 +16,22 @@ RUN cp /etc/skel/.bashrc /root && cp /etc/skel/.profile /root
ADD http://download.draios.com/apt-draios-priority /etc/apt/preferences.d/
RUN apt-get update \
&& apt-get install -y --no-install-recommends \
bash-completion \
bc \
clang-7 \
ca-certificates \
curl \
dkms \
gnupg2 \
gcc \
jq \
libc6-dev \
libelf-dev \
llvm-7 \
netcat \
xz-utils \
libmpc3 \
binutils \
libgomp1 \
libitm1 \
libatomic1 \
liblsan0 \
libtsan0 \
libmpx2 \
libquadmath0 \
libcc1-0 \
&& rm -rf /var/lib/apt/lists/*
&& apt-get install -y --no-install-recommends \
bash-completion \
bc \
clang-7 \
ca-certificates \
curl \
dkms \
gnupg2 \
gcc \
jq \
libc6-dev \
libelf-dev \
llvm-7 \
netcat \
xz-utils \
&& rm -rf /var/lib/apt/lists/*
# gcc 6 is no longer included in debian unstable, but we need it to
# build kernel modules on the default debian-based ami used by

View File

@@ -80,6 +80,7 @@ buffered_outputs: false
# The rate at which log/alert messages are emitted is governed by a
# token bucket. The rate corresponds to one message every 30 seconds
# with a burst of 10 messages.
syscall_event_drops:
actions:
- log
@@ -87,21 +88,6 @@ syscall_event_drops:
rate: .03333
max_burst: 10
# Options to configure the kernel module check.
# Falco uses a kernel module to obtain the info to match against the rules.
# In order to correctly behave it needs to ensure that the kernel module is always present and well behaving.
# The following options configure:
# the frequency it should check for the kernel module
# the maximum number of consecutive failures after which it should stop
# the exponential backoff mechanism it have to use to check for the module and eventally to try to re-insert it automatically.
module_check:
frequency: 10
max_consecutive_failures: 3
backoff:
max_attempts: 5
init_delay: 100
max_delay: 3000
# A throttling mechanism implemented as a token bucket limits the
# rate of falco notifications. This throttling is controlled by the following configuration
# options:
@@ -113,12 +99,14 @@ module_check:
# an initial quiet period, and then up to 1 notification per second
# afterward. It would gain the full burst back after 1000 seconds of
# no activity.
outputs:
rate: 1
max_burst: 1000
# Where security notifications should go.
# Multiple outputs can be enabled.
syslog_output:
enabled: true
@@ -129,6 +117,7 @@ syslog_output:
#
# Also, the file will be closed and reopened if falco is signaled with
# SIGUSR1.
file_output:
enabled: false
keep_alive: false
@@ -147,6 +136,7 @@ stdout_output:
# $ openssl req -newkey rsa:2048 -nodes -keyout key.pem -x509 -days 365 -out certificate.pem
# $ cat certificate.pem key.pem > falco.pem
# $ sudo cp falco.pem /etc/falco/falco.pem
webserver:
enabled: true
listen_port: 8765

View File

@@ -18,7 +18,7 @@ For running this integration you will need:
This integration uses the [same environment variables that anchore-cli](https://github.com/anchore/anchore-cli#configuring-the-anchore-cli):
* ANCHORE_CLI_USER: The user used to connect to anchore-engine. By default is ```admin```
* ANCHORE_CLI_USER: The user used to conect to anchore-engine. By default is ```admin```
* ANCHORE_CLI_PASS: The password used to connect to anchore-engine.
* ANCHORE_CLI_URL: The url where anchore-engine listens. Make sure does not end with a slash. By default is ```http://localhost:8228/v1```
* ANCHORE_CLI_SSL_VERIFY: Flag for enabling if HTTP client verifies SSL. By default is ```true```
@@ -81,7 +81,7 @@ So you can run directly with Docker:
```
docker run --rm -e ANCHORE_CLI_USER=<user-for-custom-anchore-engine> \
-e ANCHORE_CLI_PASS=<password-for-user-for-custom-anchore-engine> \
-e ANCHORE_CLI_PASS=<passsword-for-user-for-custom-anchore-engine> \
-e ANCHORE_CLI_URL=http://<custom-anchore-engine-host>:8228/v1 \
sysdig/anchore-falco
```

View File

@@ -72,9 +72,6 @@
- macro: create_symlink
condition: evt.type in (symlink, symlinkat) and evt.dir=<
- macro: chmod
condition: (evt.type in (chmod, fchmod, fchmodat) and evt.dir=<)
# File categories
- macro: bin_dir
condition: fd.directory in (/bin, /sbin, /usr/bin, /usr/sbin)
@@ -908,15 +905,12 @@
- macro: access_repositories
condition: (fd.filename in (repository_files) or fd.directory in (repository_directories))
- macro: modify_repositories
condition: (evt.arg.newpath pmatch (repository_directories))
- rule: Update Package Repository
desc: Detect package repositories get updated
condition: >
((open_write and access_repositories) or (modify and modify_repositories)) and not package_mgmt_procs
open_write and access_repositories and not package_mgmt_procs
output: >
Repository files get updated (user=%user.name command=%proc.cmdline file=%fd.name newpath=%evt.arg.newpath container_id=%container.id image=%container.image.repository)
Repository files get updated (user=%user.name command=%proc.cmdline file=%fd.name container_id=%container.id image=%container.image.repository)
priority:
NOTICE
tags: [filesystem, mitre_persistence]
@@ -1418,12 +1412,6 @@
priority: WARNING
tags: [filesystem, mitre_credential_access, mitre_discovery]
- macro: amazon_linux_running_python_yum
condition: >
(proc.name = python and
proc.pcmdline = "python -m amazon_linux_extras system_motd" and
proc.cmdline startswith "python -c import yum;")
# Only let rpm-related programs write to the rpm database
- rule: Write below rpm database
desc: an attempt to write to the rpm database by any non-rpm related program
@@ -1433,7 +1421,6 @@
and not ansible_running_python
and not python_running_chef
and not exe_running_docker_save
and not amazon_linux_running_python_yum
output: "Rpm database opened for writing by a non-rpm program (command=%proc.cmdline file=%fd.name parent=%proc.pname pcmdline=%proc.pcmdline container_id=%container.id image=%container.image.repository)"
priority: ERROR
tags: [filesystem, software_mgmt, mitre_persistence]
@@ -2387,48 +2374,29 @@
WARNING
tags: [process, mitre_persistence]
- rule: Delete or rename shell history
desc: Detect shell history deletion
- rule: Delete Bash History
desc: Detect bash history deletion
condition: >
(modify and (
evt.arg.name contains "bash_history" or
evt.arg.name contains "zsh_history" or
evt.arg.name contains "fish_read_history" or
evt.arg.name endswith "fish_history" or
evt.arg.oldpath contains "bash_history" or
evt.arg.oldpath contains "zsh_history" or
evt.arg.oldpath contains "fish_read_history" or
evt.arg.oldpath endswith "fish_history" or
evt.arg.path contains "bash_history" or
evt.arg.path contains "zsh_history" or
evt.arg.path contains "fish_read_history" or
evt.arg.path endswith "fish_history")) or
(open_write and (
fd.name contains "bash_history" or
fd.name contains "zsh_history" or
fd.name contains "fish_read_history" or
fd.name endswith "fish_history") and evt.arg.flags contains "O_TRUNC")
((spawned_process and proc.name in (shred, rm, mv) and proc.args contains "bash_history") or
(open_write and fd.name contains "bash_history" and evt.arg.flags contains "O_TRUNC"))
output: >
Shell history had been deleted or renamed (user=%user.name type=%evt.type command=%proc.cmdline fd.name=%fd.name name=%evt.arg.name path=%evt.arg.path oldpath=%evt.arg.oldpath %container.info)
Bash history has been deleted (user=%user.name command=%proc.cmdline file=%fd.name %container.info)
priority:
WARNING
tag: [process, mitre_defense_evation]
- macro: consider_all_chmods
condition: (always_true)
- list: user_known_chmod_applications
items: []
condition: (never_true)
- rule: Set Setuid or Setgid bit
desc: >
When the setuid or setgid bits are set for an application,
this means that the application will run with the privileges of the owning user or group respectively.
Detect setuid or setgid bits set via chmod
condition: consider_all_chmods and chmod and (evt.arg.mode contains "S_ISUID" or evt.arg.mode contains "S_ISGID") and not proc.cmdline in (user_known_chmod_applications)
condition: consider_all_chmods and spawned_process and proc.name = "chmod" and (proc.args contains "+s" or proc.args contains "4777")
output: >
Setuid or setgid bit is set via chmod (fd=%evt.arg.fd filename=%evt.arg.filename mode=%evt.arg.mode user=%user.name
command=%proc.cmdline container_id=%container.id container_name=%container.name image=%container.image.repository:%container.image.tag)
Setuid or setgid bit is set via chmod (user=%user.name command=%proc.cmdline
container_id=%container.id container_name=%container.name image=%container.image.repository:%container.image.tag)
priority:
NOTICE
tag: [process, mitre_persistence]
@@ -2443,14 +2411,12 @@
- rule: Create Hidden Files or Directories
desc: Detect hidden files or directories created
condition: >
(consider_hidden_file_creation and (
(modify and evt.arg.newpath contains "/.") or
(mkdir and evt.arg.path contains "/.") or
(open_write and evt.arg.flags contains "O_CREAT" and fd.name contains "/." and not fd.name pmatch (exclude_hidden_directories)))
)
((mkdir and consider_hidden_file_creation and evt.arg.path contains "/.") or
(open_write and consider_hidden_file_creation and evt.arg.flags contains "O_CREAT" and
fd.name contains "/." and not fd.name pmatch (exclude_hidden_directories)))
output: >
Hidden file or directory created (user=%user.name command=%proc.cmdline
file=%fd.name newpath=%evt.arg.newpath container_id=%container.id container_name=%container.name image=%container.image.repository:%container.image.tag)
file=%fd.name container_id=%container.id container_name=%container.name image=%container.image.repository:%container.image.tag)
priority:
NOTICE
tag: [file, mitre_persistence]
@@ -2480,103 +2446,6 @@
Symlinks created over senstivie files (user=%user.name command=%proc.cmdline target=%evt.arg.target linkpath=%evt.arg.linkpath parent_process=%proc.pname)
priority: NOTICE
tags: [file, mitre_exfiltration]
- list: miner_ports
items: [
25, 3333, 3334, 3335, 3336, 3357, 4444,
5555, 5556, 5588, 5730, 6099, 6666, 7777,
7778, 8000, 8001, 8008, 8080, 8118, 8333,
8888, 8899, 9332, 9999, 14433, 14444,
45560, 45700
]
- list: miner_domains
items: [
"asia1.ethpool.org","ca.minexmr.com",
"cn.stratum.slushpool.com","de.minexmr.com",
"eth-ar.dwarfpool.com","eth-asia.dwarfpool.com",
"eth-asia1.nanopool.org","eth-au.dwarfpool.com",
"eth-au1.nanopool.org","eth-br.dwarfpool.com",
"eth-cn.dwarfpool.com","eth-cn2.dwarfpool.com",
"eth-eu.dwarfpool.com","eth-eu1.nanopool.org",
"eth-eu2.nanopool.org","eth-hk.dwarfpool.com",
"eth-jp1.nanopool.org","eth-ru.dwarfpool.com",
"eth-ru2.dwarfpool.com","eth-sg.dwarfpool.com",
"eth-us-east1.nanopool.org","eth-us-west1.nanopool.org",
"eth-us.dwarfpool.com","eth-us2.dwarfpool.com",
"eu.stratum.slushpool.com","eu1.ethermine.org",
"eu1.ethpool.org","fr.minexmr.com",
"mine.moneropool.com","mine.xmrpool.net",
"pool.minexmr.com","pool.monero.hashvault.pro",
"pool.supportxmr.com","sg.minexmr.com",
"sg.stratum.slushpool.com","stratum-eth.antpool.com",
"stratum-ltc.antpool.com","stratum-zec.antpool.com",
"stratum.antpool.com","us-east.stratum.slushpool.com",
"us1.ethermine.org","us1.ethpool.org",
"us2.ethermine.org","us2.ethpool.org",
"xmr-asia1.nanopool.org","xmr-au1.nanopool.org",
"xmr-eu1.nanopool.org","xmr-eu2.nanopool.org",
"xmr-jp1.nanopool.org","xmr-us-east1.nanopool.org",
"xmr-us-west1.nanopool.org","xmr.crypto-pool.fr",
"xmr.pool.minergate.com"
]
- list: https_miner_domains
items: [
"ca.minexmr.com",
"cn.stratum.slushpool.com",
"de.minexmr.com",
"fr.minexmr.com",
"mine.moneropool.com",
"mine.xmrpool.net",
"pool.minexmr.com",
"sg.minexmr.com",
"stratum-eth.antpool.com",
"stratum-ltc.antpool.com",
"stratum-zec.antpool.com",
"stratum.antpool.com",
"xmr.crypto-pool.fr"
]
- list: http_miner_domains
items: [
"ca.minexmr.com",
"de.minexmr.com",
"fr.minexmr.com",
"mine.moneropool.com",
"mine.xmrpool.net",
"pool.minexmr.com",
"sg.minexmr.com",
"xmr.crypto-pool.fr"
]
# Add rule based on crypto mining IOCs
- macro: minerpool_https
condition: (fd.sport="443" and fd.sip.name in (https_miner_domains))
- macro: minerpool_http
condition: (fd.sport="80" and fd.sip.name in (http_miner_domains))
- macro: minerpool_other
condition: (fd.sport in (miner_ports) and fd.sip.name in (miner_domains))
- macro: net_miner_pool
condition: (evt.type in (sendto, sendmsg) and evt.dir=< and ((minerpool_http) or (minerpool_https) or (minerpool_other)))
- rule: Detect outbound connections to common miner pool ports
desc: Miners typically connect to miner pools on common ports.
condition: net_miner_pool
output: Outbound connection to IP/Port flagged by cryptoioc.ch (command=%proc.cmdline port=%fd.rport ip=%fd.rip container=%container.info image=%container.image.repository)
priority: CRITICAL
tags: [network, mitre_execution]
- rule: Detect crypto miners using the Stratum protocol
desc: Miners typically specify the mining pool to connect to with a URI that begins with 'stratum+tcp'
condition: spawned_process and proc.cmdline contains "stratum+tcp"
output: Possible miner running (command=%proc.cmdline container=%container.info image=%container.image.repository)
priority: CRITICAL
tags: [process, mitre_execution]
# Application rules have moved to application_rules.yaml. Please look
# there if you want to enable them by adding to
# falco_rules.local.yaml.

View File

@@ -19,7 +19,7 @@ configure_file(debian/postinst.in debian/postinst)
configure_file(debian/prerm.in debian/prerm)
if(NOT SYSDIG_DIR)
get_filename_component(SYSDIG_DIR "${PROJECT_SOURCE_DIR}/../sysdig" REALPATH)
set(SYSDIG_DIR "${PROJECT_SOURCE_DIR}/../sysdig")
endif()
file(COPY "${PROJECT_SOURCE_DIR}/scripts/debian/falco"

1
test/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
falco_traces.yaml

View File

@@ -40,8 +40,6 @@ class FalcoTest(Test):
build_type = "debug" if build_type == "debug" else "release"
build_dir = os.path.join('/build', build_type)
if not os.path.exists(build_dir):
build_dir = '../build'
self.falcodir = self.params.get('falcodir', '/', default=os.path.join(self.basedir, build_dir))
self.stdout_is = self.params.get('stdout_is', '*', default='')

View File

@@ -123,18 +123,6 @@ trace_files: !mux
trace_file: trace_files/cat_write.scap
all_events: True
multiple_docs:
detect: True
detect_level:
- WARNING
- INFO
- ERROR
rules_file:
- rules/single_rule.yaml
- rules/double_rule.yaml
trace_file: trace_files/cat_write.scap
all_events: True
rules_directory:
detect: True
detect_level:
@@ -414,148 +402,6 @@ trace_files: !mux
- rules/rule_append_failure.yaml
trace_file: trace_files/cat_write.scap
invalid_overwrite_macro:
exit_status: 1
stdout_contains: |+
.*invalid_base_macro.yaml: Ok
.*invalid_overwrite_macro.yaml: Compilation error when compiling "foo": Undefined macro 'foo' used in filter.
---
- macro: some macro
condition: foo
append: false
---
validate_rules_file:
- rules/invalid_base_macro.yaml
- rules/invalid_overwrite_macro.yaml
trace_file: trace_files/cat_write.scap
invalid_append_macro:
exit_status: 1
stdout_contains: |+
.*invalid_base_macro.yaml: Ok
.*invalid_append_macro.yaml: Compilation error when compiling "evt.type=execve foo": 17: syntax error, unexpected 'foo', expecting 'or', 'and'
---
- macro: some macro
condition: evt.type=execve
- macro: some macro
condition: foo
append: true
---
validate_rules_file:
- rules/invalid_base_macro.yaml
- rules/invalid_append_macro.yaml
trace_file: trace_files/cat_write.scap
invalid_overwrite_macro_multiple_docs:
exit_status: 1
stdout_is: |+
Compilation error when compiling "foo": Undefined macro 'foo' used in filter.
---
- macro: some macro
condition: foo
append: false
---
validate_rules_file:
- rules/invalid_overwrite_macro_multiple_docs.yaml
trace_file: trace_files/cat_write.scap
invalid_append_macro_multiple_docs:
exit_status: 1
stdout_is: |+
Compilation error when compiling "evt.type=execve foo": 17: syntax error, unexpected 'foo', expecting 'or', 'and'
---
- macro: some macro
condition: evt.type=execve
- macro: some macro
condition: foo
append: true
---
validate_rules_file:
- rules/invalid_append_macro_multiple_docs.yaml
trace_file: trace_files/cat_write.scap
invalid_overwrite_rule:
exit_status: 1
stdout_contains: |+
.*invalid_base_rule.yaml: Ok
.*invalid_overwrite_rule.yaml: Undefined macro 'bar' used in filter.
---
- rule: some rule
desc: some desc
condition: bar
output: some output
priority: INFO
append: false
---
validate_rules_file:
- rules/invalid_base_rule.yaml
- rules/invalid_overwrite_rule.yaml
trace_file: trace_files/cat_write.scap
invalid_append_rule:
exit_status: 1
stdout_contains: |+
.*invalid_base_rule.yaml: Ok
.*invalid_append_rule.yaml: Compilation error when compiling "evt.type=open bar": 15: syntax error, unexpected 'bar', expecting 'or', 'and'
---
- rule: some rule
desc: some desc
condition: evt.type=open
output: some output
priority: INFO
- rule: some rule
desc: some desc
condition: bar
output: some output
priority: INFO
append: true
---
validate_rules_file:
- rules/invalid_base_rule.yaml
- rules/invalid_append_rule.yaml
trace_file: trace_files/cat_write.scap
invalid_overwrite_rule_multiple_docs:
exit_status: 1
stdout_is: |+
Undefined macro 'bar' used in filter.
---
- rule: some rule
desc: some desc
condition: bar
output: some output
priority: INFO
append: false
---
validate_rules_file:
- rules/invalid_overwrite_rule_multiple_docs.yaml
trace_file: trace_files/cat_write.scap
invalid_append_rule_multiple_docs:
exit_status: 1
stdout_contains: |+
Compilation error when compiling "evt.type=open bar": 15: syntax error, unexpected 'bar', expecting 'or', 'and'
---
- rule: some rule
desc: some desc
condition: evt.type=open
output: some output
priority: INFO
- rule: some rule
desc: some desc
condition: bar
output: some output
priority: INFO
append: true
---
validate_rules_file:
- rules/invalid_append_rule_multiple_docs.yaml
trace_file: trace_files/cat_write.scap
invalid_missing_rule_name:
exit_status: 1
stdout_is: |+

View File

@@ -1,3 +0,0 @@
- macro: some macro
condition: foo
append: true

View File

@@ -1,8 +0,0 @@
---
- macro: some macro
condition: evt.type=execve
---
- macro: some macro
condition: foo
append: true

View File

@@ -1,6 +0,0 @@
- rule: some rule
desc: some desc
condition: bar
output: some output
priority: INFO
append: true

View File

@@ -1,13 +0,0 @@
---
- rule: some rule
desc: some desc
condition: evt.type=open
output: some output
priority: INFO
---
- rule: some rule
desc: some desc
condition: bar
output: some output
priority: INFO
append: true

View File

@@ -1,2 +0,0 @@
- macro: some macro
condition: evt.type=execve

View File

@@ -1,5 +0,0 @@
- rule: some rule
desc: some desc
condition: evt.type=open
output: some output
priority: INFO

View File

@@ -1,3 +0,0 @@
- macro: some macro
condition: foo
append: false

View File

@@ -1,8 +0,0 @@
---
- macro: some macro
condition: evt.type=execve
---
- macro: some macro
condition: foo
append: false

View File

@@ -1,6 +0,0 @@
- rule: some rule
desc: some desc
condition: bar
output: some output
priority: INFO
append: false

View File

@@ -1,13 +0,0 @@
---
- rule: some rule
desc: some desc
condition: evt.type=open
output: some output
priority: INFO
---
- rule: some rule
desc: some desc
condition: bar
output: some output
priority: INFO
append: false

View File

@@ -1,66 +0,0 @@
---
#
# Copyright (C) 2016-2018 Draios Inc dba Sysdig.
#
# This file is part of falco.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
- required_engine_version: 2
- list: cat_binaries
items: [cat]
- list: cat_capable_binaries
items: [cat_binaries]
- macro: is_cat
condition: proc.name in (cat_capable_binaries)
- rule: open_from_cat
desc: A process named cat does an open
condition: evt.type=open and is_cat
output: "An open was seen (command=%proc.cmdline)"
priority: WARNING
---
#
# Copyright (C) 2016-2018 Draios Inc dba Sysdig.
#
# This file is part of falco.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This ruleset depends on the is_cat macro defined in single_rule.yaml
- rule: exec_from_cat
desc: A process named cat does execve
condition: evt.type=execve and is_cat
output: "An exec was seen (command=%proc.cmdline)"
priority: ERROR
- rule: access_from_cat
desc: A process named cat does an access
condition: evt.type=access and is_cat
output: "An access was seen (command=%proc.cmdline)"
priority: INFO

Binary file not shown.

View File

@@ -15,7 +15,7 @@
# License for the specific language governing permissions and limitations under
# the License.
#
set(FALCO_TESTS_SOURCES test_base.cpp engine/test_token_bucket.cpp falco/test_webserver.cpp)
set(FALCO_TESTS_SOURCES test_base.cpp engine/test_token_bucket.cpp)
set(FALCO_TESTED_LIBRARIES falco_engine)
@@ -38,10 +38,7 @@ if(FALCO_BUILD_TESTS)
falco_test
PUBLIC "${CATCH2_INCLUDE}"
"${FAKEIT_INCLUDE}"
"${PROJECT_SOURCE_DIR}/userspace/engine"
"${YAMLCPP_INCLUDE_DIR}"
"${CIVETWEB_INCLUDE_DIR}"
"${PROJECT_SOURCE_DIR}/userspace/falco")
"${PROJECT_SOURCE_DIR}/userspace/engine")
include(CMakeParseArguments)
include(CTest)

View File

@@ -1,31 +0,0 @@
/*
Copyright (C) 2016-2019 Draios Inc dba Sysdig.
This file is part of falco.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "webserver.h"
#include <catch.hpp>
TEST_CASE("webserver must accept invalid data", "[!hide][webserver][k8s_audit_handler][accept_data]")
{
// falco_engine* engine = new falco_engine();
// falco_outputs* outputs = new falco_outputs(engine);
// std::string errstr;
// std::string input("{\"kind\": 0}");
//k8s_audit_handler::accept_data(engine, outputs, input, errstr);
REQUIRE(1 == 1);
}

View File

@@ -16,7 +16,7 @@
# limitations under the License.
if(NOT SYSDIG_DIR)
get_filename_component(SYSDIG_DIR "${PROJECT_SOURCE_DIR}/../sysdig" REALPATH)
set(SYSDIG_DIR "${PROJECT_SOURCE_DIR}/../sysdig")
endif()
set(FALCO_ENGINE_SOURCE_FILES

View File

@@ -365,54 +365,46 @@ unique_ptr<falco_engine::rule_result> falco_engine::process_k8s_audit_event(json
bool falco_engine::parse_k8s_audit_json(nlohmann::json &j, std::list<json_event> &evts)
{
// Note that nlohmann::basic_json::value can throw nlohmann::basic_json::type_error (302, 306)
try
// If the Kind is EventList, split it into individual events.
if(j.value("kind", "<NA>") == "EventList")
{
// If the kind is EventList, split it into individual events
if(j.value("kind", "<NA>") == "EventList")
{
for(auto &je : j["items"])
{
evts.emplace_back();
je["kind"] = "Event";
uint64_t ns = 0;
if(!sinsp_utils::parse_iso_8601_utc_string(je.value(k8s_audit_time, "<NA>"), ns))
{
return false;
}
std::string tmp;
sinsp_utils::ts_to_string(ns, &tmp, false, true);
evts.back().set_jevt(je, ns);
}
return true;
}
else if(j.value("kind", "<NA>") == "Event")
for(auto &je : j["items"])
{
evts.emplace_back();
je["kind"] = "Event";
uint64_t ns = 0;
if(!sinsp_utils::parse_iso_8601_utc_string(j.value(k8s_audit_time, "<NA>"), ns))
if(!sinsp_utils::parse_iso_8601_utc_string(je.value(k8s_audit_time, "<NA>"), ns))
{
return false;
}
evts.back().set_jevt(j, ns);
return true;
std::string tmp;
sinsp_utils::ts_to_string(ns, &tmp, false, true);
evts.back().set_jevt(je, ns);
}
else
return true;
}
else if(j.value("kind", "<NA>") == "Event")
{
evts.emplace_back();
uint64_t ns = 0;
if(!sinsp_utils::parse_iso_8601_utc_string(j.value(k8s_audit_time, "<NA>"), ns))
{
return false;
}
evts.back().set_jevt(j, ns);
return true;
}
catch(exception &e)
else
{
// Propagate the exception
rethrow_exception(current_exception());
return false;
}
}
unique_ptr<falco_engine::rule_result> falco_engine::process_k8s_audit_event(json_event *ev)

View File

@@ -225,24 +225,9 @@ function split_lines(rules_content)
return lines, indices
end
function get_orig_yaml_obj(rules_lines, row)
local ret = ""
function get_context(rules_lines, row, num_lines)
idx = row
while (idx <= #rules_lines) do
ret = ret..rules_lines[idx].."\n"
idx = idx + 1
if idx > #rules_lines or rules_lines[idx] == "" or string.sub(rules_lines[idx], 1, 1) == '-' then
break
end
end
return ret
end
function get_lines(rules_lines, row, num_lines)
local ret = ""
local ret = "---\n"
idx = row
while (idx < (row + num_lines) and idx <= #rules_lines) do
@@ -250,54 +235,94 @@ function get_lines(rules_lines, row, num_lines)
idx = idx + 1
end
ret = ret.."---"
return ret
end
function build_error(rules_lines, row, num_lines, err)
local ret = err.."\n---\n"..get_lines(rules_lines, row, num_lines).."---"
local ret = err.."\n"..get_context(rules_lines, row, num_lines)
return ret
end
function build_error_with_context(ctx, err)
local ret = err.."\n---\n"..ctx.."---"
return ret
end
function load_rules(sinsp_lua_parser,
json_lua_parser,
rules_content,
rules_mgr,
verbose,
all_events,
extra,
replace_container_info,
min_priority)
function load_rules_doc(rules_mgr, doc, load_state)
local required_engine_version = 0
local lines, indices = split_lines(rules_content)
local status, rules = pcall(yaml.load, rules_content)
if status == false then
local pat = "^([%d]+):([%d]+): "
-- rules is actually an error string
local row = 0
local col = 0
row, col = string.match(rules, pat)
if row ~= nil and col ~= nil then
rules = string.gsub(rules, pat, "")
end
row = tonumber(row)
col = tonumber(col)
return false, build_error(lines, row, 3, rules)
end
if rules == nil then
-- An empty rules file is acceptable
return true, required_engine_version
end
if type(rules) ~= "table" then
return false, build_error(lines, 1, 1, "Rules content is not yaml")
end
-- Look for non-numeric indices--implies that document is not array
-- of objects.
for key, val in pairs(rules) do
if type(key) ~= "number" then
return false, build_error(lines, 1, 1, "Rules content is not yaml array of objects")
end
end
-- Iterate over yaml list. In this pass, all we're doing is
-- populating the set of rules, macros, and lists. We're not
-- expanding/compiling anything yet. All that will happen in a
-- second pass
for i,v in ipairs(doc) do
load_state.cur_item_idx = load_state.cur_item_idx + 1
-- Save back the original object as it appeared in the file. Will be used to provide context.
local context = get_orig_yaml_obj(load_state.lines,
load_state.indices[load_state.cur_item_idx])
for i,v in ipairs(rules) do
if (not (type(v) == "table")) then
return false, build_error_with_context(context, "Unexpected element of type " ..type(v)..". Each element should be a yaml associative array.")
return false, build_error(lines, indices[i], (indices[i+1]-indices[i]), "Unexpected element of type " ..type(v)..". Each element should be a yaml associative array.")
end
v['context'] = context
if (v['required_engine_version']) then
load_state.required_engine_version = v['required_engine_version']
if type(load_state.required_engine_version) ~= "number" then
return false, build_error_with_context(v['context'], "Value of required_engine_version must be a number")
required_engine_version = v['required_engine_version']
if type(required_engine_version) ~= "number" then
return false, build_error(lines, indices[i], (indices[i+1]-indices[i]), "Value of required_engine_version must be a number")
end
if falco_rules.engine_version(rules_mgr) < v['required_engine_version'] then
return false, build_error_with_context(v['context'], "Rules require engine version "..v['required_engine_version']..", but engine version is "..falco_rules.engine_version(rules_mgr))
return false, build_error(lines, indices[i], (indices[i+1]-indices[i]), "Rules require engine version "..v['required_engine_version']..", but engine version is "..falco_rules.engine_version(rules_mgr))
end
elseif (v['macro']) then
if (v['macro'] == nil or type(v['macro']) == "table") then
return false, build_error_with_context(v['context'], "Macro name is empty")
return false, build_error(lines, indices[i], (indices[i+1]-indices[i]), "Macro name is empty")
end
if v['source'] == nil then
@@ -305,12 +330,12 @@ function load_rules_doc(rules_mgr, doc, load_state)
end
if state.macros_by_name[v['macro']] == nil then
state.ordered_macro_names[#state.ordered_macro_names+1] = v['macro']
state.ordered_macro_names[#state.ordered_macro_names+1] = {["idx"]=i, ["name"]=v['macro']}
end
for j, field in ipairs({'condition'}) do
if (v[field] == nil) then
return false, build_error_with_context(v['context'], "Macro must have property "..field)
return false, build_error(lines, indices[i], (indices[i+1]-indices[i]), "Macro must have property "..field)
end
end
@@ -323,14 +348,11 @@ function load_rules_doc(rules_mgr, doc, load_state)
if append then
if state.macros_by_name[v['macro']] == nil then
return false, build_error_with_context(v['context'], "Macro " ..v['macro'].. " has 'append' key but no macro by that name already exists")
return false, build_error(lines, indices[i], (indices[i+1]-indices[i]), "Macro " ..v['macro'].. " has 'append' key but no macro by that name already exists")
end
state.macros_by_name[v['macro']]['condition'] = state.macros_by_name[v['macro']]['condition'] .. " " .. v['condition']
-- Add the current object to the context of the base macro
state.macros_by_name[v['macro']]['context'] = state.macros_by_name[v['macro']]['context'].."\n"..v['context']
else
state.macros_by_name[v['macro']] = v
end
@@ -338,7 +360,7 @@ function load_rules_doc(rules_mgr, doc, load_state)
elseif (v['list']) then
if (v['list'] == nil or type(v['list']) == "table") then
return false, build_error_with_context(v['context'], "List name is empty")
return false, build_error(lines, indices[i], (indices[i+1]-indices[i]), "List name is empty")
end
if state.lists_by_name[v['list']] == nil then
@@ -347,7 +369,7 @@ function load_rules_doc(rules_mgr, doc, load_state)
for j, field in ipairs({'items'}) do
if (v[field] == nil) then
return false, build_error_with_context(v['context'], "List must have property "..field)
return false, build_error(lines, indices[i], (indices[i+1]-indices[i]), "List must have property "..field)
end
end
@@ -360,7 +382,7 @@ function load_rules_doc(rules_mgr, doc, load_state)
if append then
if state.lists_by_name[v['list']] == nil then
return false, build_error_with_context(v['context'], "List " ..v['list'].. " has 'append' key but no list by that name already exists")
return false, build_error(lines, indices[i], (indices[i+1]-indices[i]), "List " ..v['list'].. " has 'append' key but no list by that name already exists")
end
for j, elem in ipairs(v['items']) do
@@ -373,7 +395,7 @@ function load_rules_doc(rules_mgr, doc, load_state)
elseif (v['rule']) then
if (v['rule'] == nil or type(v['rule']) == "table") then
return false, build_error_with_context(v['context'], "Rule name is empty")
return false, build_error(lines, indices[i], (indices[i+1]-indices[i]), "Rule name is empty")
end
-- By default, if a rule's condition refers to an unknown
@@ -398,26 +420,23 @@ function load_rules_doc(rules_mgr, doc, load_state)
-- For append rules, all you need is the condition
for j, field in ipairs({'condition'}) do
if (v[field] == nil) then
return false, build_error_with_context(v['context'], "Rule must have property "..field)
return false, build_error(lines, indices[i], (indices[i+1]-indices[i]), "Rule must have property "..field)
end
end
if state.rules_by_name[v['rule']] == nil then
if state.skipped_rules_by_name[v['rule']] == nil then
return false, build_error_with_context(v['context'], "Rule " ..v['rule'].. " has 'append' key but no rule by that name already exists")
return false, build_error(lines, indices[i], (indices[i+1]-indices[i]), "Rule " ..v['rule'].. " has 'append' key but no rule by that name already exists")
end
else
state.rules_by_name[v['rule']]['condition'] = state.rules_by_name[v['rule']]['condition'] .. " " .. v['condition']
-- Add the current object to the context of the base rule
state.rules_by_name[v['rule']]['context'] = state.rules_by_name[v['rule']]['context'].."\n"..v['context']
end
else
for j, field in ipairs({'condition', 'output', 'desc', 'priority'}) do
if (v[field] == nil) then
return false, build_error_with_context(v['context'], "Rule must have property "..field)
return false, build_error(lines, indices[i], (indices[i+1]-indices[i]), "Rule must have property "..field)
end
end
@@ -428,12 +447,12 @@ function load_rules_doc(rules_mgr, doc, load_state)
error("Invalid priority level: "..v['priority'])
end
if v['priority_num'] <= load_state.min_priority then
if v['priority_num'] <= min_priority then
-- Note that we can overwrite rules, but the rules are still
-- loaded in the order in which they first appeared,
-- potentially across multiple files.
if state.rules_by_name[v['rule']] == nil then
state.ordered_rule_names[#state.ordered_rule_names+1] = v['rule']
state.ordered_rule_names[#state.ordered_rule_names+1] = {["idx"]=i, ["name"]=v['rule']}
end
-- The output field might be a folded-style, which adds a
@@ -446,81 +465,11 @@ function load_rules_doc(rules_mgr, doc, load_state)
end
end
else
-- Remove the context from the table, so the table is exactly what was parsed
local context = v['context']
v['context'] = nil
return false, build_error_with_context(context, "Unknown rule object: "..table.tostring(v))
return false, build_error(lines, indices[i], (indices[i+1]-indices[i]), "Unknown rule object: "..table.tostring(v))
end
end
return true, ""
end
function load_rules(sinsp_lua_parser,
json_lua_parser,
rules_content,
rules_mgr,
verbose,
all_events,
extra,
replace_container_info,
min_priority)
local load_state = {lines={}, indices={}, cur_item_idx=0, min_priority=min_priority, required_engine_version=0}
load_state.lines, load_state.indices = split_lines(rules_content)
local status, docs = pcall(yaml.load, rules_content, { all = true })
if status == false then
local pat = "^([%d]+):([%d]+): "
-- docs is actually an error string
local row = 0
local col = 0
row, col = string.match(docs, pat)
if row ~= nil and col ~= nil then
docs = string.gsub(docs, pat, "")
end
row = tonumber(row)
col = tonumber(col)
return false, build_error(load_state.lines, row, 3, docs)
end
if docs == nil then
-- An empty rules file is acceptable
return true, load_state.required_engine_version
end
if type(docs) ~= "table" then
return false, build_error(load_state.lines, 1, 1, "Rules content is not yaml")
end
for docidx, doc in ipairs(docs) do
if type(doc) ~= "table" then
return false, build_error(load_state.lines, 1, 1, "Rules content is not yaml")
end
-- Look for non-numeric indices--implies that document is not array
-- of objects.
for key, val in pairs(doc) do
if type(key) ~= "number" then
return false, build_error(load_state.lines, 1, 1, "Rules content is not yaml array of objects")
end
end
res, errstr = load_rules_doc(rules_mgr, doc, load_state)
if not res then
return res, errstr
end
end
-- We've now loaded all the rules, macros, and lists. Now
-- We've now loaded all the rules, macros, and list. Now
-- compile/expand the rules, macros, and lists. We use
-- ordered_rule_{lists,macros,names} to compile them in the order
-- in which they appeared in the file(s).
@@ -549,14 +498,17 @@ function load_rules(sinsp_lua_parser,
state.lists[v['list']] = {["items"] = items, ["used"] = false}
end
for _, name in ipairs(state.ordered_macro_names) do
for _, obj in ipairs(state.ordered_macro_names) do
local i = obj["idx"]
local name = obj["name"]
local v = state.macros_by_name[name]
local status, ast = compiler.compile_macro(v['condition'], state.macros, state.lists)
if status == false then
return false, build_error_with_context(v['context'], ast)
return false, build_error(lines, indices[i], (indices[i+1]-indices[i]), ast)
end
if v['source'] == "syscall" then
@@ -568,7 +520,10 @@ function load_rules(sinsp_lua_parser,
state.macros[v['macro']] = {["ast"] = ast.filter.value, ["used"] = false}
end
for _, name in ipairs(state.ordered_rule_names) do
for _, obj in ipairs(state.ordered_rule_names) do
local i = obj["idx"]
local name = obj["name"]
local v = state.rules_by_name[name]
@@ -581,7 +536,7 @@ function load_rules(sinsp_lua_parser,
state.macros, state.lists)
if status == false then
return false, build_error_with_context(v['context'], filter_ast)
return false, build_error(lines, indices[i], (indices[i+1]-indices[i]), filter_ast)
end
local evtttypes = {}
@@ -719,7 +674,7 @@ function load_rules(sinsp_lua_parser,
formatter = formats.formatter(v['source'], v['output'])
formats.free_formatter(v['source'], formatter)
else
return false, build_error_with_context(v['context'], "Unexpected type in load_rule: "..filter_ast.type)
return false, build_error(lines, indices[i], (indices[i+1]-indices[i]), "Unexpected type in load_rule: "..filter_ast.type)
end
::next_rule::
@@ -742,7 +697,7 @@ function load_rules(sinsp_lua_parser,
io.flush()
return true, load_state.required_engine_version
return true, required_engine_version
end
local rule_fmt = "%-50s %s"

View File

@@ -16,7 +16,7 @@
# limitations under the License.
#
if(NOT SYSDIG_DIR)
get_filename_component(SYSDIG_DIR "${PROJECT_SOURCE_DIR}/../sysdig" REALPATH)
set(SYSDIG_DIR "${PROJECT_SOURCE_DIR}/../sysdig")
endif()
configure_file("${SYSDIG_DIR}/userspace/sysdig/config_sysdig.h.in" config_sysdig.h)
@@ -27,8 +27,6 @@ add_executable(falco
falco_outputs.cpp
event_drops.cpp
statsfilewriter.cpp
timer.cpp
module_utils.cpp
falco.cpp
"${SYSDIG_DIR}/userspace/sysdig/fields_info.cpp"
webserver.cpp)
@@ -51,11 +49,16 @@ target_link_libraries(falco
configure_file(config_falco.h.in config_falco.h)
add_custom_command(TARGET falco
COMMAND bash ${CMAKE_CURRENT_SOURCE_DIR}/verify_engine_fields ${CMAKE_SOURCE_DIR}
COMMAND bash ${CMAKE_CURRENT_SOURCE_DIR}/verify_engine_fields.sh ${CMAKE_SOURCE_DIR}
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
COMMENT "Comparing engine fields checksum in falco_engine.h to actual fields"
)
# add_custom_target(verify_engine_fields
# DEPENDS verify_engine_fields.sh falco_engine.h)
# add_dependencies(verify_engine_fields falco)
install(TARGETS falco DESTINATION ${FALCO_BIN_DIR})
install(DIRECTORY lua
DESTINATION ${FALCO_SHARE_DIR}

View File

@@ -27,4 +27,4 @@ limitations under the License.
#define FALCO_INSTALL_CONF_FILE "/etc/falco/falco.yaml"
#define FALCO_SOURCE_LUA_DIR "${PROJECT_SOURCE_DIR}/userspace/falco/lua/"
#define PROBE_NAME "@PROBE_NAME@"
#define PROBE_NAME "${PROBE_NAME}"

View File

@@ -220,15 +220,6 @@ void falco_configuration::init(string conf_filename, list<string> &cmdline_optio
m_syscall_evt_drop_rate = m_config->get_scalar<double>("syscall_event_drops", "rate", 0.3333);
m_syscall_evt_drop_max_burst = m_config->get_scalar<double>("syscall_event_drops", "max_burst", 10);
m_module_check_frequency = m_config->get_scalar<uint64_t>("module_check", "frequency", 10);
if(m_module_check_frequency < 10) {
throw invalid_argument("Module check frequency must be higher than 10 seconds");
}
m_module_check_max_consecutive_failures = m_config->get_scalar<int>("module_check", "max_consecutive_failures", 3);
m_module_check_backoff_max_attempts = m_config->get_scalar<int>("module_check", "backoff", "max_attempts", 5);
m_module_check_backoff_init_delay = m_config->get_scalar<uint64_t>("module_check", "backoff", "init_delay", 100);
m_module_check_backoff_max_delay = m_config->get_scalar<uint64_t>("module_check", "backoff", "max_delay", 3000);
m_syscall_evt_simulate_drops = m_config->get_scalar<bool>("syscall_event_drops", "simulate_drops", false);
}

View File

@@ -133,47 +133,6 @@ public:
}
}
/**
* Get a scalar value defined inside a 3 level nested structure like:
* file_output:
* enabled: true
* filename: output_file.txt
*
* get_scalar<bool>("file_output", "enabled", false)
*/
template<typename T>
const T get_scalar(const std::string& key, const std::string& subkey, const std::string& subsubkey, const T& default_value)
{
try
{
auto node = m_root[key][subkey][subsubkey];
if (node.IsDefined())
{
return node.as<T>();
}
}
catch (const YAML::BadConversion& ex)
{
std::cerr << "Cannot read config file (" + m_path + "): wrong type at key " + key + "\n";
throw;
}
return default_value;
}
/**
* Set the second-level node identified by key[key][subkey] to value.
*/
template<typename T>
void set_scalar(const std::string& key, const std::string& subkey, const std::string& subsubkey, const T& value)
{
auto node = m_root;
if (node.IsDefined())
{
node[key][subkey][subsubkey] = value;
}
}
// called with the last variadic arg (where the sequence is expected to be found)
template <typename T>
void get_sequence_from_node(T& ret, const YAML::Node &node)
@@ -257,12 +216,6 @@ class falco_configuration
double m_syscall_evt_drop_rate;
double m_syscall_evt_drop_max_burst;
uint64_t m_module_check_frequency;
int m_module_check_max_consecutive_failures;
int m_module_check_backoff_max_attempts;
uint64_t m_module_check_backoff_init_delay;
uint64_t m_module_check_backoff_max_delay;
// Only used for testing
bool m_syscall_evt_simulate_drops;

View File

@@ -26,7 +26,6 @@ limitations under the License.
#include <vector>
#include <algorithm>
#include <string>
#include <functional>
#include <signal.h>
#include <fcntl.h>
#include <sys/utsname.h>
@@ -47,11 +46,6 @@ limitations under the License.
#include "config_falco.h"
#include "statsfilewriter.h"
#include "webserver.h"
#include "timer.h"
#include "retry.h"
#include "module_utils.h"
typedef function<void(sinsp* inspector)> open_t;
bool g_terminate = false;
bool g_reopen_outputs = false;
@@ -88,27 +82,25 @@ static void usage()
" -h, --help Print this page\n"
" -c Configuration file (default " FALCO_SOURCE_CONF_FILE ", " FALCO_INSTALL_CONF_FILE ")\n"
" -A Monitor all events, including those with EF_DROP_FALCO flag.\n"
" -b, --print-base64 Print data buffers in base64.\n"
" This is useful for encoding binary data that needs to be used over media designed to.\n"
" --cri <path> Path to CRI socket for container metadata.\n"
" Use the specified socket to fetch data from a CRI-compatible runtime.\n"
" -d, --daemon Run as a daemon.\n"
" --disable-source <event_source>\n"
" Disable a specific event source.\n"
" Available event sources are: syscall, k8s_audit.\n"
" It can be passed multiple times.\n"
" Can not disable both the event sources.\n"
" -b, --print-base64 Print data buffers in base64. This is useful for encoding\n"
" binary data that needs to be used over media designed to\n"
" --cri <path> Path to CRI socket for container metadata\n"
" Use the specified socket to fetch data from a CRI-compatible runtime\n"
" -d, --daemon Run as a daemon\n"
" -D <substring> Disable any rules with names having the substring <substring>. Can be specified multiple times.\n"
" Can not be specified with -t.\n"
" -e <events_file> Read the events from <events_file> (in .scap format for sinsp events, or jsonl for\n"
" k8s audit events) instead of tapping into live.\n"
" -k <url>, --k8s-api <url>\n"
" Enable Kubernetes support by connecting to the API server specified as argument.\n"
" E.g. \"http://admin:password@127.0.0.1:8080\".\n"
" The API server can also be specified via the environment variable FALCO_K8S_API.\n"
" -K <bt_file> | <cert_file>:<key_file[#password]>[:<ca_cert_file>], --k8s-api-cert <bt_file> | <cert_file>:<key_file[#password]>[:<ca_cert_file>]\n"
" Use the provided files names to authenticate user and (optionally) verify the K8S API server identity.\n"
" Each entry must specify full (absolute, or relative to the current directory) path to the respective file.\n"
" -k <url>, --k8s-api=<url>\n"
" Enable Kubernetes support by connecting to the API server\n"
" specified as argument. E.g. \"http://admin:password@127.0.0.1:8080\".\n"
" The API server can also be specified via the environment variable\n"
" FALCO_K8S_API.\n"
" -K <bt_file> | <cert_file>:<key_file[#password]>[:<ca_cert_file>], --k8s-api-cert=<bt_file> | <cert_file>:<key_file[#password]>[:<ca_cert_file>]\n"
" Use the provided files names to authenticate user and (optionally) verify the K8S API\n"
" server identity.\n"
" Each entry must specify full (absolute, or relative to the current directory) path\n"
" to the respective file.\n"
" Private key password is optional (needed only if key is password protected).\n"
" CA certificate is optional. For all files, only PEM file format is supported. \n"
" Specifying CA certificate only is obsoleted - when single entry is provided \n"
@@ -119,47 +111,51 @@ static void usage()
" -l <rule> Show the name and description of the rule with name <rule> and exit.\n"
" --list [<source>] List all defined fields. If <source> is provided, only list those fields for\n"
" the source <source>. Current values for <source> are \"syscall\", \"k8s_audit\"\n"
" -m <url[,marathon_url]>, --mesos-api <url[,marathon_url]>\n"
" -m <url[,marathon_url]>, --mesos-api=<url[,marathon_url]>\n"
" Enable Mesos support by connecting to the API server\n"
" specified as argument. E.g. \"http://admin:password@127.0.0.1:5050\".\n"
" Marathon url is optional and defaults to Mesos address, port 8080.\n"
" The API servers can also be specified via the environment variable FALCO_MESOS_API.\n"
" The API servers can also be specified via the environment variable\n"
" FALCO_MESOS_API.\n"
" -M <num_seconds> Stop collecting after <num_seconds> reached.\n"
" -N When used with --list, only print field names.\n"
" -o, --option <key>=<val> Set the value of option <key> to <val>. Overrides values in configuration file.\n"
" <key> can be a two-part <key>.<subkey>\n"
" -p <output_format>, --print <output_format>\n"
" -p <output_format>, --print=<output_format>\n"
" Add additional information to each falco notification's output.\n"
" With -pc or -pcontainer will use a container-friendly format.\n"
" With -pk or -pkubernetes will use a kubernetes-friendly format.\n"
" With -pm or -pmesos will use a mesos-friendly format.\n"
" Additionally, specifying -pc/-pk/-pm will change the interpretation\n"
" of %%container.info in rule output fields.\n"
" of %%container.info in rule output fields\n"
" See the examples section below for more info.\n"
" -P, --pidfile <pid_file> When run as a daemon, write pid to specified file\n"
" -r <rules_file> Rules file/directory (defaults to value set in configuration file, or /etc/falco_rules.yaml).\n"
" Can be specified multiple times to read from multiple files/directories.\n"
" -r <rules_file> Rules file/directory (defaults to value set in configuration file,\n"
" or /etc/falco_rules.yaml). Can be specified multiple times to read\n"
" from multiple files/directories.\n"
" -s <stats_file> If specified, write statistics related to falco's reading/processing of events\n"
" to this file. (Only useful in live mode).\n"
" --stats_interval <msec> When using -s <stats_file>, write statistics every <msec> ms.\n"
" This uses signals, so don't recommend intervals below 200 ms.\n"
" Defaults to 5000 (5 seconds).\n"
" -S <len>, --snaplen <len>\n"
" Capture the first <len> bytes of each I/O buffer.\n"
" By default, the first 80 bytes are captured. Use this\n"
" option with caution, it can generate huge trace files.\n"
" --support Print support information including version, rules files used, etc. and exit.\n"
" (This uses signals, so don't recommend intervals below 200 ms)\n"
" defaults to 5000 (5 seconds)\n"
" -S <len>, --snaplen=<len>\n"
" Capture the first <len> bytes of each I/O buffer.\n"
" By default, the first 80 bytes are captured. Use this\n"
" option with caution, it can generate huge trace files.\n"
" --support Print support information including version, rules files used, etc.\n"
" and exit.\n"
" -T <tag> Disable any rules with a tag=<tag>. Can be specified multiple times.\n"
" Can not be specified with -t.\n"
" -t <tag> Only run those rules with a tag=<tag>. Can be specified multiple times.\n"
" Can not be specified with -T/-D.\n"
" -U,--unbuffered Turn off output buffering to configured outputs.\n"
" This causes every single line emitted by falco to be flushed,\n"
" which generates higher CPU usage but is useful when piping those outputs\n"
" into another process or into a script.\n"
" -V, --validate <rules_file> Read the contents of the specified rules(s) file and exit.\n"
" -U,--unbuffered Turn off output buffering to configured outputs. This causes every\n"
" single line emitted by falco to be flushed, which generates higher CPU\n"
" usage but is useful when piping those outputs into another process\n"
" or into a script.\n"
" -V,--validate <rules_file> Read the contents of the specified rules(s) file and exit\n"
" Can be specified multiple times to validate multiple files.\n"
" -v Verbose output.\n"
" --version Print version number.\n"
" --version Print version number.\n"
"\n"
);
}
@@ -231,8 +227,6 @@ uint64_t do_inspect(falco_engine *engine,
string &stats_filename,
uint64_t stats_interval,
bool all_events,
bool verbose,
bool disable_syscall,
int &result)
{
uint64_t num_evts = 0;
@@ -258,44 +252,20 @@ uint64_t do_inspect(falco_engine *engine,
}
}
// Module check settings
utils::timer t;
t.reset();
uint64_t frequency = config.m_module_check_frequency;
auto num_failures = 0;
auto max_failures = config.m_module_check_max_consecutive_failures;
auto max_attempts = config.m_module_check_backoff_max_attempts;
auto ini_delay = config.m_module_check_backoff_init_delay;
auto max_delay = config.m_module_check_backoff_max_delay;
//
// Loop through the events
//
while(true)
while(1)
{
// Check module every x seconds
if(!disable_syscall && t.seconds_elapsed() > frequency)
{
// Check module is present and loaded with exponential backoff (eg., 100, 200, 400, ...)
// When module is missing or unloaded, try to insert it
// Retries at most <max_attempts> times
// Stops early if module is found
auto found = utils::retry(max_attempts, ini_delay, max_delay, utils::module_predicate, utils::has_module, verbose, true);
// Count how many intervals the module is missing, reset counter when module has been found
num_failures = found ? 0 : num_failures + 1;
// Stop falco if module is missing from <count * stop_after> checks
if (num_failures >= max_failures)
{
result = EXIT_FAILURE;
break;
}
// Reset timer
t.reset();
}
rc = inspector->next(&ev);
try {
rc = inspector->next(&ev);
} catch(sinsp_exception& e) {
string err = "Error handling inspector event: ";
err.append(e.what());
err.append("\n");
falco_logger::log(LOG_ERR, err);
continue;
}
writer.handle();
@@ -307,7 +277,7 @@ uint64_t do_inspect(falco_engine *engine,
if (g_terminate || g_restart)
{
falco_logger::log(LOG_INFO, "SIGHUP received, restarting...\n");
falco_logger::log(LOG_INFO, "SIGHUP Received, restarting...\n");
break;
}
else if(rc == SCAP_TIMEOUT)
@@ -328,11 +298,10 @@ uint64_t do_inspect(falco_engine *engine,
throw sinsp_exception(inspector->getlasterr().c_str());
}
if(duration_start == 0)
if (duration_start == 0)
{
duration_start = ev->get_ts();
}
else if(duration_to_tot_ns > 0)
} else if(duration_to_tot_ns > 0)
{
if(ev->get_ts() - duration_start >= duration_to_tot_ns)
{
@@ -466,9 +435,6 @@ int falco_init(int argc, char **argv)
string list_flds_source = "";
bool print_support = false;
string cri_socket_path;
set<string> disable_sources;
bool disable_syscall = false;
bool disable_k8s_audit = false;
// Used for writing trace files
int duration_seconds = 0;
@@ -488,26 +454,25 @@ int falco_init(int argc, char **argv)
static struct option long_options[] =
{
{"help", no_argument, 0, 'h' },
{"print-base64", no_argument, 0, 'b'},
{"daemon", no_argument, 0, 'd' },
{"k8s-api", required_argument, 0, 'k'},
{"k8s-api-cert", required_argument, 0, 'K' },
{"list", optional_argument, 0},
{"mesos-api", required_argument, 0, 'm'},
{"option", required_argument, 0, 'o'},
{"print", required_argument, 0, 'p' },
{"pidfile", required_argument, 0, 'P' },
{"snaplen", required_argument, 0, 'S' },
{"stats_interval", required_argument, 0},
{"support", no_argument, 0},
{"unbuffered", no_argument, 0, 'U' },
{"version", no_argument, 0, 0 },
{"validate", required_argument, 0, 'V' },
{"writefile", required_argument, 0, 'w' },
{"ignored-events", no_argument, 0, 'i'},
{"cri", required_argument, 0},
{"daemon", no_argument, 0, 'd'},
{"disable-source", required_argument, 0},
{"help", no_argument, 0, 'h'},
{"ignored-events", no_argument, 0, 'i'},
{"k8s-api-cert", required_argument, 0, 'K'},
{"k8s-api", required_argument, 0, 'k'},
{"list", optional_argument, 0},
{"mesos-api", required_argument, 0, 'm'},
{"option", required_argument, 0, 'o'},
{"pidfile", required_argument, 0, 'P'},
{"print-base64", no_argument, 0, 'b'},
{"print", required_argument, 0, 'p'},
{"snaplen", required_argument, 0, 'S'},
{"stats_interval", required_argument, 0},
{"support", no_argument, 0},
{"unbuffered", no_argument, 0, 'U'},
{"validate", required_argument, 0, 'V'},
{"version", no_argument, 0, 0},
{"writefile", required_argument, 0, 'w'},
{0, 0, 0, 0}
};
@@ -651,10 +616,7 @@ int falco_init(int argc, char **argv)
}
else if (string(long_options[long_index].name) == "cri")
{
if(optarg != NULL)
{
cri_socket_path = optarg;
}
cri_socket_path = optarg;
}
else if (string(long_options[long_index].name) == "list")
{
@@ -672,13 +634,6 @@ int falco_init(int argc, char **argv)
{
print_support = true;
}
else if (string(long_options[long_index].name) == "disable-source")
{
if(optarg != NULL)
{
disable_sources.insert(optarg);
}
}
break;
default:
@@ -721,25 +676,6 @@ int falco_init(int argc, char **argv)
return EXIT_SUCCESS;
}
if(disable_sources.size() > 0)
{
auto it = disable_sources.begin();
while(it != disable_sources.end())
{
if(*it != "syscall" && *it != "k8s_audit")
{
it = disable_sources.erase(it);
continue;
}
++it;
}
disable_syscall = disable_sources.count("syscall") > 0;
disable_k8s_audit = disable_sources.count("k8s_audit") > 0;
if (disable_syscall && disable_k8s_audit) {
throw std::invalid_argument("The event source \"syscall\" and \"k8s_audit\" can not be disabled together");
}
}
outputs = new falco_outputs(engine);
outputs->set_inspector(inspector);
@@ -1043,7 +979,7 @@ int falco_init(int argc, char **argv)
g_daemonized = true;
}
if(trace_filename.size())
if (trace_filename.size())
{
// Try to open the trace file as a sysdig
// capture file first.
@@ -1087,43 +1023,17 @@ int falco_init(int argc, char **argv)
}
else
{
open_t open_cb = [](sinsp* inspector) {
inspector->open();
};
open_t open_nodriver_cb = [](sinsp* inspector) {
inspector->open_nodriver();
};
open_t open_f;
// Default mode: both event sources enabled
if (!disable_syscall && !disable_k8s_audit) {
open_f = open_cb;
}
if (disable_syscall) {
open_f = open_nodriver_cb;
}
if (disable_k8s_audit) {
open_f = open_cb;
}
// Check that the kernel module is present at startup, otherwise try to add it
if(!utils::has_module(verbose, false))
{
falco_logger::log(LOG_ERR, "Module not found. Trying to load it ...\n");
if(!utils::ins_module())
{
result = EXIT_FAILURE;
goto exit;
}
}
try
{
open_f(inspector);
inspector->open(200);
}
catch(sinsp_exception &e)
{
rethrow_exception(current_exception());
if(system("modprobe " PROBE_NAME " > /dev/null 2> /dev/null"))
{
falco_logger::log(LOG_ERR, "Unable to load the driver. Exiting.\n");
}
inspector->open();
}
}
@@ -1198,7 +1108,7 @@ int falco_init(int argc, char **argv)
delete mesos_api;
mesos_api = 0;
if(trace_filename.empty() && config.m_webserver_enabled && !disable_k8s_audit)
if(trace_filename.empty() && config.m_webserver_enabled)
{
std::string ssl_option = (config.m_webserver_ssl_enabled ? " (SSL)" : "");
falco_logger::log(LOG_INFO, "Starting internal webserver, listening on port " + to_string(config.m_webserver_listen_port) + ssl_option + "\n");
@@ -1214,7 +1124,9 @@ int falco_init(int argc, char **argv)
}
else
{
uint64_t num_evts = do_inspect(engine,
uint64_t num_evts;
num_evts = do_inspect(engine,
outputs,
inspector,
config,
@@ -1223,8 +1135,6 @@ int falco_init(int argc, char **argv)
stats_filename,
stats_interval,
all_events,
verbose,
disable_syscall,
result);
duration = ((double)clock()) / CLOCKS_PER_SEC - duration;
@@ -1233,11 +1143,11 @@ int falco_init(int argc, char **argv)
if(verbose)
{
fprintf(stdout, "Driver events: %" PRIu64 "\nDriver drops: %" PRIu64 "\n",
fprintf(stderr, "Driver Events:%" PRIu64 "\nDriver Drops:%" PRIu64 "\n",
cstats.n_evts,
cstats.n_drops);
fprintf(stdout, "Elapsed time: %.3lf\nCaptured events: %" PRIu64 "\nEps: %.2lf\n",
fprintf(stderr, "Elapsed time: %.3lf, Captured Events: %" PRIu64 ", %.2lf eps\n",
duration,
num_evts,
num_evts / duration);

View File

@@ -1,120 +0,0 @@
/*
Copyright (C) 2016-2019 Draios Inc dba Sysdig.
This file is part of falco.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "config_falco.h"
#include "logger.h"
#include "module_utils.h"
#include <fstream>
#include <functional>
namespace utils
{
bool has_module(bool verbose, bool strict)
{
// Comparing considering underscores (95) equal to dashes (45), and viceversa
std::function<bool(const char &, const char &)> comparator = [](const char &a, const char &b) {
return a == b || (a == 45 && b == 95) || (b == 95 && a == 45);
};
std::ifstream modules(db);
std::string line;
while(std::getline(modules, line))
{
bool shorter = module.length() <= line.length();
if(shorter && std::equal(module.begin(), module.end(), line.begin(), comparator))
{
bool result = true;
if(!strict)
{
falco_logger::log(LOG_INFO, "Kernel module found: true (not strict)\n");
modules.close();
return result;
}
std::istringstream iss(line);
std::vector<std::string> cols(std::istream_iterator<std::string>{iss}, std::istream_iterator<std::string>());
// Check the module's number of instances - ie., whether it is loaded or not
auto ninstances = cols.at(2);
result = result && std::stoi(ninstances) > 0;
// Check the module's load state
auto state = cols.at(4);
std::transform(state.begin(), state.end(), state.begin(), ::tolower);
result = result && (state == module_state_live);
if(verbose)
{
falco_logger::log(LOG_INFO, "Kernel module instances: " + ninstances + "\n");
falco_logger::log(LOG_INFO, "Kernel module load state: " + state + "\n");
}
// Check the module's taint state
if(cols.size() > 6)
{
auto taint = cols.at(6);
auto died = taint.find(taint_die) != std::string::npos;
auto warn = taint.find(taint_warn) != std::string::npos;
auto unloaded = taint.find(taint_forced_rmmod) != std::string::npos;
result = result && !died && !warn && !unloaded;
if(verbose)
{
taint.erase(0, taint.find_first_not_of('('));
taint.erase(taint.find_last_not_of(')') + 1);
falco_logger::log(LOG_INFO, "Kernel module taint state: " + taint + "\n");
std::ostringstream message;
message << std::boolalpha << "Kernel module presence: " << result << "\n";
falco_logger::log(LOG_INFO, message.str());
}
}
modules.close();
return result;
}
}
modules.close();
return false;
}
bool ins_module()
{
if(system("modprobe " PROBE_NAME " > /dev/null 2> /dev/null"))
{
// todo > fallback to a custom directory where to look for the module using `modprobe -d build/driver`
falco_logger::log(LOG_ERR, "Unable to load the module.\n");
return false;
}
return true;
}
bool module_predicate(bool has_module)
{
if(has_module)
{
return false;
}
// Retry only when we have been not able to insert the module
return !ins_module();
}
} // namespace utils

View File

@@ -1,37 +0,0 @@
/*
Copyright (C) 2016-2019 Draios Inc dba Sysdig.
This file is part of falco.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#pragma once
#include <string>
namespace utils
{
const std::string db("/proc/modules");
const std::string module(PROBE_NAME);
const std::string module_state_live("live");
// Module's taint state constants
// see: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/kernel/panic.c#n351
const std::string taint_die("D");
const std::string taint_forced_rmmod("R");
const std::string taint_warn("W");
bool has_module(bool verbose, bool strict);
bool ins_module();
bool module_predicate(bool has_module);
} // namespace utils

View File

@@ -1,85 +0,0 @@
/*
Copyright (C) 2016-2019 Draios Inc dba Sysdig.
This file is part of falco.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "logger.h"
#include <algorithm>
#include <type_traits>
#include <chrono>
#include <iostream>
#include <thread>
#if __cplusplus < 201402L
template< class T >
using decay_t = typename decay<T>::type;
template< bool B, class T = void >
using enable_if_t = typename enable_if<B,T>::type;
#endif
#if __cplusplus != 201402L || __cplusplus != 201703L
template< class F, class... ArgTypes>
using result_of_t = typename result_of<F, ArgTypes...>::type;
#endif
namespace utils
{
template<
typename Predicate,
typename Callable,
typename... Args,
// figure out the callable return type
typename R = decay_t<result_of_t<Callable &(Args...)>>,
// require that Predicate is actually a Predicate
enable_if_t<std::is_convertible<result_of_t<Predicate &(R)>, bool>::value, int> = 0>
R retry(int max_retries,
uint64_t initial_delay_ms,
uint64_t max_backoff_ms,
Predicate &&retriable,
Callable &&callable,
Args &&... args)
{
int retries = 0;
while(true)
{
falco_logger::log(LOG_INFO, "Retry no.: " + std::to_string(retries) + "\n");
bool result = callable(std::forward<Args>(args)...);
if(!retriable(result))
{
return result;
}
if(retries >= max_retries)
{
return result;
}
int64_t delay = 0;
if(initial_delay_ms > 0)
{
delay = std::min(initial_delay_ms << retries, max_backoff_ms);
}
std::ostringstream message;
message << "Waiting " << delay << "ms ... \n";
falco_logger::log(LOG_INFO, message.str());
// Blocking for `delay` ms
std::this_thread::sleep_for(std::chrono::milliseconds(delay));
retries++;
}
}
}

View File

@@ -1,34 +0,0 @@
/*
Copyright (C) 2016-2019 Draios Inc dba Sysdig.
This file is part of falco.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "timer.h"
#include <chrono>
namespace utils
{
void timer::reset()
{
start = clock::now();
}
unsigned long long timer::seconds_elapsed() const
{
return std::chrono::duration_cast<seconds>(clock::now() - start).count();
}
} // namespace utils

View File

@@ -1,37 +0,0 @@
/*
Copyright (C) 2016-2019 Draios Inc dba Sysdig.
This file is part of falco.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "logger.h"
#include <chrono>
namespace utils
{
struct timer
{
typedef std::chrono::steady_clock clock;
typedef std::chrono::seconds seconds;
void reset();
unsigned long long seconds_elapsed() const;
private:
clock::time_point start;
};
} // namespace utils

View File

@@ -1,6 +1,6 @@
#!/usr/bin/env bash
#!/bin/sh
set -eu -o pipefail
set -euo pipefail
SOURCE_DIR=$1
OPENSSL=../../openssl-prefix/src/openssl/target/bin/openssl
@@ -11,13 +11,13 @@ if ! command -v ${OPENSSL} version > /dev/null 2>&1; then
fi
NEW_CHECKSUM=$(./falco --list -N | ${OPENSSL} dgst -sha256 | awk '{print $2}')
CUR_CHECKSUM=$(grep FALCO_FIELDS_CHECKSUM "${SOURCE_DIR}/userspace/engine/falco_engine_version.h" | awk '{print $3}' | sed -e 's/"//g')
CUR_CHECKSUM=$(grep FALCO_FIELDS_CHECKSUM ${SOURCE_DIR}/userspace/engine/falco_engine_version.h | awk '{print $3}' | sed -e 's/"//g')
if [ "$NEW_CHECKSUM" != "$CUR_CHECKSUM" ]; then
if [ $NEW_CHECKSUM != $CUR_CHECKSUM ]; then
echo "Set of fields supported by falco/sysdig libraries has changed (new checksum $NEW_CHECKSUM != old checksum $CUR_CHECKSUM)."
echo "Update checksum and/or version in falco_engine_version.h."
exit 1
fi
exit 0
exit 0

View File

@@ -44,31 +44,16 @@ bool k8s_audit_handler::accept_data(falco_engine *engine,
std::list<json_event> jevts;
json j;
try
{
try {
j = json::parse(data);
}
catch(json::parse_error &e)
{
errstr = string("Could not parse data: ") + e.what();
return false;
}
catch(json::out_of_range &e)
catch (json::parse_error& e)
{
errstr = string("Could not parse data: ") + e.what();
return false;
}
bool ok;
try
{
ok = engine->parse_k8s_audit_json(j, jevts);
}
catch(json::type_error &e)
{
ok = false;
}
if(!ok)
if(!engine->parse_k8s_audit_json(j, jevts))
{
errstr = string("Data not recognized as a k8s audit event");
return false;
@@ -175,6 +160,12 @@ void falco_webserver::init(falco_configuration *config,
m_outputs = outputs;
}
template<typename T, typename ...Args>
std::unique_ptr<T> make_unique( Args&& ...args )
{
return std::unique_ptr<T>( new T( std::forward<Args>(args)... ) );
}
void falco_webserver::start()
{
if(m_server)

View File

@@ -25,14 +25,6 @@ limitations under the License.
#include "falco_engine.h"
#include "falco_outputs.h"
#if __cplusplus < 201402L
template<typename T, typename... Ts>
std::unique_ptr<T> make_unique(Ts&&... params)
{
return std::unique_ptr<T>(new T(std::forward<Ts>(params)...));
}
#endif
class k8s_audit_handler : public CivetHandler
{
public: