Merge pull request #101 from draios/event-specific-filters

Event specific filters
This commit is contained in:
Mark Stemm 2016-07-18 17:56:50 -07:00 committed by GitHub
commit 7a43007e0d
16 changed files with 964 additions and 41 deletions

View File

@ -224,7 +224,7 @@
- rule: db_program_spawned_process
desc: a database-server related program spawned a new process other than itself. This shouldn\'t occur and is a follow on from some SQL injection attacks.
condition: proc.pname in (db_server_binaries) and not proc.name in (db_server_binaries) and spawned_process
condition: proc.pname in (db_server_binaries) and spawned_process and not proc.name in (db_server_binaries)
output: "Database-related program spawned process other than itself (user=%user.name program=%proc.cmdline parent=%proc.pname)"
priority: WARNING
@ -264,7 +264,7 @@
- rule: run_shell_untrusted
desc: an attempt to spawn a shell by a non-shell program. Exceptions are made for trusted binaries.
condition: not container and proc.name = bash and spawned_process and proc.pname exists and not proc.pname in (cron_binaries, bash, sshd, sudo, docker, su, tmux, screen, emacs, systemd, login, flock, fbash, nginx, monit, supervisord, dragent)
condition: spawned_process and not container and proc.name = bash and proc.pname exists and not proc.pname in (cron_binaries, bash, sshd, sudo, docker, su, tmux, screen, emacs, systemd, login, flock, fbash, nginx, monit, supervisord, dragent)
output: "Shell spawned by untrusted binary (user=%user.name shell=%proc.name parent=%proc.pname cmdline=%proc.cmdline)"
priority: WARNING
@ -281,7 +281,7 @@
- rule: run_shell_in_container
desc: a shell was spawned by a non-shell program in a container. Container entrypoints are excluded.
condition: container and proc.name = bash and spawned_process and proc.pname exists and not proc.pname in (sh, bash, docker)
condition: spawned_process and container and proc.name = bash and proc.pname exists and not proc.pname in (sh, bash, docker)
output: "Shell spawned in a container other than entrypoint (user=%user.name container_id=%container.id container_name=%container.name shell=%proc.name parent=%proc.pname cmdline=%proc.cmdline)"
priority: WARNING
@ -317,7 +317,7 @@
# (we may need to add additional checks against false positives, see: https://bugs.launchpad.net/ubuntu/+source/rkhunter/+bug/86153)
- rule: create_files_below_dev
desc: creating any files below /dev other than known programs that manage devices. Some rootkits hide files in /dev.
condition: (evt.type = creat or evt.arg.flags contains O_CREAT) and proc.name != blkid and fd.directory = /dev and not fd.name in (/dev/null,/dev/stdin,/dev/stdout,/dev/stderr,/dev/tty)
condition: (evt.type = creat or (evt.type = open and evt.arg.flags contains O_CREAT)) and proc.name != blkid and fd.directory = /dev and not fd.name in (/dev/null,/dev/stdin,/dev/stdout,/dev/stderr,/dev/tty)
output: "File created below /dev by untrusted program (user=%user.name command=%proc.cmdline file=%fd.name)"
priority: WARNING

9
test/cpu_monitor.sh Normal file
View File

@ -0,0 +1,9 @@
#!/bin/bash
SUBJ_PID=$1
BENCHMARK=$2
VARIANT=$3
RESULTS_FILE=$4
CPU_INTERVAL=$5
top -d $CPU_INTERVAL -b -p $SUBJ_PID | grep -E '(falco|sysdig)' --line-buffered | awk -v benchmark=$BENCHMARK -v variant=$VARIANT '{printf("{\"sample\": %d, \"benchmark\": \"%s\", \"variant\": \"%s\", \"cpu_usage\": %s},\n", NR, benchmark, variant, $9); fflush();}' >> $RESULTS_FILE

BIN
test/empty.scap Normal file

Binary file not shown.

View File

@ -0,0 +1,186 @@
- rule: no_warnings
desc: Rule with no warnings
condition: evt.type=execve
output: "None"
priority: WARNING
- rule: no_evttype
desc: No evttype at all
condition: proc.name=foo
output: "None"
priority: WARNING
- rule: evttype_not_equals
desc: Using != for event type
condition: evt.type!=execve
output: "None"
priority: WARNING
- rule: leading_not
desc: condition starts with not
condition: not evt.type=execve
output: "None"
priority: WARNING
- rule: not_equals_after_evttype
desc: != after evt.type, not affecting results
condition: evt.type=execve and proc.name!=foo
output: "None"
priority: WARNING
- rule: not_after_evttype
desc: not operator after evt.type, not affecting results
condition: evt.type=execve and not proc.name=foo
output: "None"
priority: WARNING
- rule: leading_trailing_evttypes
desc: evttype at beginning and end
condition: evt.type=execve and proc.name=foo or evt.type=open
output: "None"
priority: WARNING
- rule: leading_multtrailing_evttypes
desc: one evttype at beginning, multiple at end
condition: evt.type=execve and proc.name=foo or evt.type=open or evt.type=connect
output: "None"
priority: WARNING
- rule: leading_multtrailing_evttypes_using_in
desc: one evttype at beginning, multiple at end, using in
condition: evt.type=execve and proc.name=foo or evt.type in (open, connect)
output: "None"
priority: WARNING
- rule: not_equals_at_end
desc: not_equals at final evttype
condition: evt.type=execve and proc.name=foo or evt.type=open or evt.type!=connect
output: "None"
priority: WARNING
- rule: not_at_end
desc: not operator for final evttype
condition: evt.type=execve and proc.name=foo or evt.type=open or not evt.type=connect
output: "None"
priority: WARNING
- rule: not_before_trailing_evttype
desc: a not before a trailing event type
condition: evt.type=execve and not proc.name=foo or evt.type=open
output: "None"
priority: WARNING
- rule: not_equals_before_trailing_evttype
desc: a != before a trailing event type
condition: evt.type=execve and proc.name!=foo or evt.type=open
output: "None"
priority: WARNING
- rule: not_equals_and_not
desc: both != and not before event types
condition: evt.type=execve and proc.name!=foo or evt.type=open or not evt.type=connect
output: "None"
priority: WARNING
- rule: not_equals_before_in
desc: != before an in with event types
condition: evt.type=execve and proc.name!=foo or evt.type in (open, connect)
output: "None"
priority: WARNING
- rule: not_before_in
desc: a not before an in with event types
condition: evt.type=execve and not proc.name=foo or evt.type in (open, connect)
output: "None"
priority: WARNING
- rule: not_in_before_in
desc: a not with in before an in with event types
condition: evt.type=execve and not proc.name in (foo, bar) or evt.type in (open, connect)
output: "None"
priority: WARNING
- rule: evttype_in
desc: using in for event types
condition: evt.type in (execve, open)
output: "None"
priority: WARNING
- rule: evttype_in_plus_trailing
desc: using in for event types and a trailing evttype
condition: evt.type in (execve, open) and proc.name=foo or evt.type=connect
output: "None"
priority: WARNING
- rule: leading_in_not_equals_before_evttype
desc: initial in() for event types, then a != before an additional event type
condition: evt.type in (execve, open) and proc.name!=foo or evt.type=connect
output: "None"
priority: WARNING
- rule: leading_in_not_equals_at_evttype
desc: initial in() for event types, then a != with an additional event type
condition: evt.type in (execve, open) or evt.type!=connect
output: "None"
priority: WARNING
- rule: not_with_evttypes
desc: not in for event types
condition: not evt.type in (execve, open)
output: "None"
priority: WARNING
- rule: not_with_evttypes_addl
desc: not in for event types, and an additional event type
condition: not evt.type in (execve, open) or evt.type=connect
output: "None"
priority: WARNING
- rule: not_equals_before_evttype
desc: != before any event type
condition: proc.name!=foo and evt.type=execve
output: "None"
priority: WARNING
- rule: not_equals_before_in_evttype
desc: != before any event type using in
condition: proc.name!=foo and evt.type in (execve, open)
output: "None"
priority: WARNING
- rule: not_before_evttype
desc: not operator before any event type
condition: not proc.name=foo and evt.type=execve
output: "None"
priority: WARNING
- rule: not_before_evttype_using_in
desc: not operator before any event type using in
condition: not proc.name=foo and evt.type in (execve, open)
output: "None"
priority: WARNING
- rule: repeated_evttypes
desc: event types appearing multiple times
condition: evt.type=open or evt.type=open
output: "None"
priority: WARNING
- rule: repeated_evttypes_with_in
desc: event types appearing multiple times with in
condition: evt.type in (open, open)
output: "None"
priority: WARNING
- rule: repeated_evttypes_with_separate_in
desc: event types appearing multiple times with separate ins
condition: evt.type in (open) or evt.type in (open, open)
output: "None"
priority: WARNING
- rule: repeated_evttypes_with_mix
desc: event types appearing multiple times with mix of = and in
condition: evt.type=open or evt.type in (open, open)
output: "None"
priority: WARNING

View File

@ -3,6 +3,7 @@
import os
import re
import json
import sets
from avocado import Test
from avocado.utils import process
@ -16,9 +17,34 @@ class FalcoTest(Test):
"""
self.falcodir = self.params.get('falcodir', '/', default=os.path.join(self.basedir, '../build'))
self.should_detect = self.params.get('detect', '*')
self.should_detect = self.params.get('detect', '*', default=False)
self.trace_file = self.params.get('trace_file', '*')
self.json_output = self.params.get('json_output', '*')
if not os.path.isabs(self.trace_file):
self.trace_file = os.path.join(self.basedir, self.trace_file)
self.json_output = self.params.get('json_output', '*', default=False)
self.rules_file = self.params.get('rules_file', '*', default=os.path.join(self.basedir, '../rules/falco_rules.yaml'))
if not os.path.isabs(self.rules_file):
self.rules_file = os.path.join(self.basedir, self.rules_file)
self.rules_warning = self.params.get('rules_warning', '*', default=False)
if self.rules_warning == False:
self.rules_warning = sets.Set()
else:
self.rules_warning = sets.Set(self.rules_warning)
# Maps from rule name to set of evttypes
self.rules_events = self.params.get('rules_events', '*', default=False)
if self.rules_events == False:
self.rules_events = {}
else:
events = {}
for item in self.rules_events:
for item2 in item:
events[item2[0]] = sets.Set(item2[1])
self.rules_events = events
if self.should_detect:
self.detect_level = self.params.get('detect_level', '*')
@ -33,21 +59,38 @@ class FalcoTest(Test):
self.str_variant = self.trace_file
def test(self):
self.log.info("Trace file %s", self.trace_file)
def check_rules_warnings(self, res):
# Run the provided trace file though falco
cmd = '{}/userspace/falco/falco -r {}/../rules/falco_rules.yaml -c {}/../falco.yaml -e {} -o json_output={}'.format(
self.falcodir, self.falcodir, self.falcodir, self.trace_file, self.json_output)
found_warning = sets.Set()
self.falco_proc = process.SubProcess(cmd)
for match in re.finditer('Rule ([^:]+): warning \(([^)]+)\):', res.stderr):
rule = match.group(1)
warning = match.group(2)
found_warning.add(rule)
res = self.falco_proc.run(timeout=180, sig=9)
self.log.debug("Expected warning rules: {}".format(self.rules_warning))
self.log.debug("Actual warning rules: {}".format(found_warning))
if res.exit_status != 0:
self.error("Falco command \"{}\" exited with non-zero return value {}".format(
cmd, res.exit_status))
if found_warning != self.rules_warning:
self.fail("Expected rules with warnings {} does not match actual rules with warnings {}".format(self.rules_warning, found_warning))
def check_rules_events(self, res):
found_events = {}
for match in re.finditer('Event types for rule ([^:]+): (\S+)', res.stderr):
rule = match.group(1)
events = sets.Set(match.group(2).split(","))
found_events[rule] = events
self.log.debug("Expected events for rules: {}".format(self.rules_events))
self.log.debug("Actual events for rules: {}".format(found_events))
for rule in found_events.keys():
if found_events.get(rule) != self.rules_events.get(rule):
self.fail("rule {}: expected events {} differs from actual events {}".format(rule, self.rules_events.get(rule), found_events.get(rule)))
def check_detections(self, res):
# Get the number of events detected.
match = re.search('Events detected: (\d+)', res.stdout)
if match is None:
@ -73,6 +116,7 @@ class FalcoTest(Test):
if not events_detected > 0:
self.fail("Detected {} events at level {} when should have detected > 0".format(events_detected, self.detect_level))
def check_json_output(self, res):
if self.json_output:
# Just verify that any lines starting with '{' are valid json objects.
# Doesn't do any deep inspection of the contents.
@ -82,6 +126,27 @@ class FalcoTest(Test):
for attr in ['time', 'rule', 'priority', 'output']:
if not attr in obj:
self.fail("Falco JSON object {} does not contain property \"{}\"".format(line, attr))
def test(self):
self.log.info("Trace file %s", self.trace_file)
# Run the provided trace file though falco
cmd = '{}/userspace/falco/falco -r {} -c {}/../falco.yaml -e {} -o json_output={} -v'.format(
self.falcodir, self.rules_file, self.falcodir, self.trace_file, self.json_output)
self.falco_proc = process.SubProcess(cmd)
res = self.falco_proc.run(timeout=180, sig=9)
if res.exit_status != 0:
self.error("Falco command \"{}\" exited with non-zero return value {}".format(
cmd, res.exit_status))
self.check_rules_warnings(res)
if len(self.rules_events) > 0:
self.check_rules_events(res)
self.check_detections(res)
self.check_json_output(res)
pass

62
test/falco_tests.yaml.in Normal file
View File

@ -0,0 +1,62 @@
trace_files: !mux
builtin_rules_no_warnings:
detect: False
trace_file: empty.scap
rules_warning: False
test_warnings:
detect: False
trace_file: empty.scap
rules_file: falco_rules_warnings.yaml
rules_warning:
- no_evttype
- evttype_not_equals
- leading_not
- not_equals_at_end
- not_at_end
- not_before_trailing_evttype
- not_equals_before_trailing_evttype
- not_equals_and_not
- not_equals_before_in
- not_before_in
- not_in_before_in
- leading_in_not_equals_before_evttype
- leading_in_not_equals_at_evttype
- not_with_evttypes
- not_with_evttypes_addl
- not_equals_before_evttype
- not_equals_before_in_evttype
- not_before_evttype
- not_before_evttype_using_in
rules_events:
- no_warnings: [execve]
- no_evttype: [all]
- evttype_not_equals: [all]
- leading_not: [all]
- not_equals_after_evttype: [execve]
- not_after_evttype: [execve]
- leading_trailing_evttypes: [execve,open]
- leading_multtrailing_evttypes: [connect,execve,open]
- leading_multtrailing_evttypes_using_in: [connect,execve,open]
- not_equals_at_end: [all]
- not_at_end: [all]
- not_before_trailing_evttype: [all]
- not_equals_before_trailing_evttype: [all]
- not_equals_and_not: [all]
- not_equals_before_in: [all]
- not_before_in: [all]
- not_in_before_in: [all]
- evttype_in: [execve,open]
- evttype_in_plus_trailing: [connect,execve,open]
- leading_in_not_equals_before_evttype: [all]
- leading_in_not_equals_at_evttype: [all]
- not_with_evttypes: [all]
- not_with_evttypes_addl: [all]
- not_equals_before_evttype: [all]
- not_equals_before_in_evttype: [all]
- not_before_evttype: [all]
- not_before_evttype_using_in: [all]
- repeated_evttypes: [open]
- repeated_evttypes_with_in: [open]
- repeated_evttypes_with_separate_in: [open]
- repeated_evttypes_with_mix: [open]

40
test/plot-live.r Normal file
View File

@ -0,0 +1,40 @@
require(jsonlite)
library(ggplot2)
library(GetoptLong)
initial.options <- commandArgs(trailingOnly = FALSE)
file.arg.name <- "--file="
script.name <- sub(file.arg.name, "", initial.options[grep(file.arg.name, initial.options)])
script.basename <- dirname(script.name)
if (substr(script.basename, 1, 1) != '/') {
script.basename = paste(getwd(), script.basename, sep='/')
}
results = paste(script.basename, "results.json", sep='/')
output = "./output.png"
GetoptLong(
"results=s", "Path to results file",
"benchmark=s", "Benchmark from results file to graph",
"variant=s@", "Variant(s) to include in graph. Can be specified multiple times",
"output=s", "Output graph file"
)
res <- fromJSON(results, flatten=TRUE)
res2 = res[res$benchmark == benchmark & res$variant %in% variant,]
plot <- ggplot(data=res2, aes(x=sample, y=cpu_usage, group=variant, colour=variant)) +
geom_line() +
ylab("CPU Usage (%)") +
xlab("Time") +
ggtitle(sprintf("Falco/Sysdig CPU Usage: %s", benchmark))
theme(legend.position=c(.2, .88));
print(paste("Writing graph to", output, sep=" "))
ggsave(file=output)

35
test/plot-traces.r Normal file
View File

@ -0,0 +1,35 @@
require(jsonlite)
library(ggplot2)
library(reshape)
res <- fromJSON("/home/mstemm/results.txt", flatten=TRUE)
plot <- ggplot(data=res, aes(x=config, y=elapsed.real)) +
geom_bar(stat = "summary", fun.y = "mean") +
coord_flip() +
facet_grid(shortfile ~ .) +
ylab("Wall Clock Time (sec)") +
xlab("Trace File/Program")
ggsave(file="/mnt/sf_mstemm/res-real.png")
plot <- ggplot(data=res, aes(x=config, y=elapsed.user)) +
geom_bar(stat = "summary", fun.y = "mean") +
coord_flip() +
facet_grid(shortfile ~ .) +
ylab("User Time (sec)") +
xlab("Trace File/Program")
ggsave(file="/mnt/sf_mstemm/res-user.png")
res2 <- melt(res, id.vars = c("config", "shortfile"), measure.vars = c("elapsed.sys", "elapsed.user"))
plot <- ggplot(data=res2, aes(x=config, y=value, fill=variable, order=variable)) +
geom_bar(stat = "summary", fun.y = "mean") +
coord_flip() +
facet_grid(shortfile ~ .) +
ylab("User/System Time (sec)") +
xlab("Trace File/Program")
ggsave(file="/mnt/sf_mstemm/res-sys-user.png")

View File

@ -0,0 +1,316 @@
#!/bin/bash
#set -x
trap "cleanup; exit" SIGHUP SIGINT SIGTERM
function download_trace_files() {
(mkdir -p $TRACEDIR && rm -rf $TRACEDIR/traces-perf && curl -fo $TRACEDIR/traces-perf.zip https://s3.amazonaws.com/download.draios.com/falco-tests/traces-perf.zip && unzip -d $TRACEDIR $TRACEDIR/traces-perf.zip && rm -f $TRACEDIR/traces-perf.zip) || exit 1
}
function time_cmd() {
cmd="$1"
file="$2"
benchmark=`basename $file .scap`
echo -n "$benchmark: "
for i in `seq 1 5`; do
echo -n "$i "
time=`date --iso-8601=sec`
/usr/bin/time -a -o $RESULTS_FILE --format "{\"time\": \"$time\", \"benchmark\": \"$benchmark\", \"file\": \"$file\", \"variant\": \"$VARIANT\", \"elapsed\": {\"real\": %e, \"user\": %U, \"sys\": %S}}," $cmd >> $OUTPUT_FILE 2>&1
done
echo ""
}
function run_falco_on() {
file="$1"
cmd="$ROOT/userspace/falco/falco -c $ROOT/../falco.yaml -r $ROOT/../rules/falco_rules.yaml --option=stdout_output.enabled=false -e $file"
time_cmd "$cmd" "$file"
}
function run_sysdig_on() {
file="$1"
cmd="$ROOT/userspace/sysdig/sysdig -N -z -r $file evt.type=none"
time_cmd "$cmd" "$file"
}
function run_trace() {
if [ ! -e $TRACEDIR ]; then
download_trace_files
fi
trace_file="$1"
if [ $trace_file == "all" ]; then
files=($TRACEDIR/traces-perf/*.scap)
else
files=($TRACEDIR/traces-perf/$trace_file.scap)
fi
for file in ${files[@]}; do
if [[ $ROOT == *"falco"* ]]; then
run_falco_on "$file"
else
run_sysdig_on "$file"
fi
done
}
function start_monitor_cpu_usage() {
echo " monitoring cpu usage for sysdig/falco program"
setsid bash `dirname $0`/cpu_monitor.sh $SUBJ_PID $live_test $VARIANT $RESULTS_FILE $CPU_INTERVAL &
CPU_PID=$!
sleep 5
}
function start_subject_prog() {
echo " starting falco/sysdig program"
# Do a blocking sudo command now just to ensure we have a password
sudo bash -c ""
if [[ $ROOT == *"falco"* ]]; then
sudo $ROOT/userspace/falco/falco -c $ROOT/../falco.yaml -r $ROOT/../rules/falco_rules.yaml --option=stdout_output.enabled=false > ./prog-output.txt 2>&1 &
else
sudo $ROOT/userspace/sysdig/sysdig -N -z evt.type=none &
fi
SUDO_PID=$!
sleep 5
SUBJ_PID=`ps -h -o pid --ppid $SUDO_PID`
if [ -z $SUBJ_PID ]; then
echo "Could not find pid of subject program--did it start successfully? Not continuing."
exit 1
fi
}
function run_htop() {
screen -S htop-screen -d -m /usr/bin/htop -d2
sleep 90
screen -X -S htop-screen quit
}
function run_juttle_examples() {
pushd $SCRIPTDIR/../../juttle-engine/examples
docker-compose -f dc-juttle-engine.yml -f aws-cloudwatch/dc-aws-cloudwatch.yml -f elastic-newstracker/dc-elastic.yml -f github-tutorial/dc-elastic.yml -f nginx_logs/dc-nginx-logs.yml -f postgres-diskstats/dc-postgres.yml -f cadvisor-influx/dc-cadvisor-influx.yml up -d
sleep 120
docker-compose -f dc-juttle-engine.yml -f aws-cloudwatch/dc-aws-cloudwatch.yml -f elastic-newstracker/dc-elastic.yml -f github-tutorial/dc-elastic.yml -f nginx_logs/dc-nginx-logs.yml -f postgres-diskstats/dc-postgres.yml -f cadvisor-influx/dc-cadvisor-influx.yml stop
docker-compose -f dc-juttle-engine.yml -f aws-cloudwatch/dc-aws-cloudwatch.yml -f elastic-newstracker/dc-elastic.yml -f github-tutorial/dc-elastic.yml -f nginx_logs/dc-nginx-logs.yml -f postgres-diskstats/dc-postgres.yml -f cadvisor-influx/dc-cadvisor-influx.yml rm -fv
popd
}
function run_kubernetes_demo() {
pushd $SCRIPTDIR/../../infrastructure/test-infrastructures/kubernetes-demo
bash run-local.sh
bash init.sh
sleep 600
docker stop $(docker ps -qa)
docker rm -fv $(docker ps -qa)
popd
}
function run_live_test() {
live_test="$1"
echo "Running live test $live_test"
case "$live_test" in
htop ) CPU_INTERVAL=2;;
* ) CPU_INTERVAL=10;;
esac
start_subject_prog
start_monitor_cpu_usage
echo " starting live program and waiting for it to finish"
case "$live_test" in
htop ) run_htop ;;
juttle-examples ) run_juttle_examples ;;
kube-demo ) run_kubernetes_demo ;;
* ) usage; cleanup; exit 1 ;;
esac
cleanup
}
function cleanup() {
if [ -n "$SUBJ_PID" ] ; then
echo " stopping falco/sysdig program $SUBJ_PID"
sudo kill $SUBJ_PID
fi
if [ -n "$CPU_PID" ] ; then
echo " stopping cpu monitor program $CPU_PID"
kill -- -$CPU_PID
fi
}
run_live_tests() {
test="$1"
if [ $test == "all" ]; then
tests="htop juttle-examples kube-demo"
else
tests=$test
fi
for test in $tests; do
run_live_test $test
done
}
function run_phoronix_test() {
live_test="$1"
case "$live_test" in
pts/aio-stress | pts/fs-mark | pts/iozone | pts/network-loopback | pts/nginx | pts/pybench | pts/redis | pts/sqlite | pts/unpack-linux ) CPU_INTERVAL=2;;
* ) CPU_INTERVAL=10;;
esac
echo "Running phoronix test $live_test"
start_subject_prog
start_monitor_cpu_usage
echo " starting phoronix test and waiting for it to finish"
TEST_RESULTS_NAME=$VARIANT FORCE_TIMES_TO_RUN=1 phoronix-test-suite default-run $live_test
cleanup
}
# To install and configure phoronix:
# (redhat instructions, adapt as necessary for ubuntu or other distros)
# - install phoronix: yum install phoronix-test-suite.noarch
# - install dependencies not handled by phoronix: yum install libaio-devel pcre-devel popt-devel glibc-static zlib-devel nc bc
# - fix trivial bugs in tests:
# - edit ~/.phoronix-test-suite/installed-tests/pts/network-loopback-1.0.1/network-loopback line "nc -d -l 9999 > /dev/null &" to "nc -d -l 9999 > /dev/null &"
# - edit ~/.phoronix-test-suite/test-profiles/pts/nginx-1.1.0/test-definition.xml line "<Arguments>-n 500000 -c 100 http://localhost:8088/test.html</Arguments>" to "<Arguments>-n 500000 -c 100 http://127.0.0.1:8088/test.html</Arguments>"
# - phoronix batch-install <test list below>
function run_phoronix_tests() {
test="$1"
if [ $test == "all" ]; then
tests="pts/aio-stress pts/apache pts/blogbench pts/compilebench pts/dbench pts/fio pts/fs-mark pts/iozone pts/network-loopback pts/nginx pts/pgbench pts/phpbench pts/postmark pts/pybench pts/redis pts/sqlite pts/unpack-linux"
else
tests=$test
fi
for test in $tests; do
run_phoronix_test $test
done
}
run_tests() {
IFS=':' read -ra PARTS <<< "$TEST"
case "${PARTS[0]}" in
trace ) run_trace "${PARTS[1]}" ;;
live ) run_live_tests "${PARTS[1]}" ;;
phoronix ) run_phoronix_tests "${PARTS[1]}" ;;
* ) usage; exit 1 ;;
esac
}
usage() {
echo "Usage: $0 [options]"
echo ""
echo "Options:"
echo " -h/--help: show this help"
echo " -v/--variant: a variant name to attach to this set of test results"
echo " -r/--root: root directory containing falco/sysdig binaries (i.e. where you ran 'cmake')"
echo " -R/--results: append test results to this file"
echo " -o/--output: append program output to this file"
echo " -t/--test: test to run. Argument has the following format:"
echo " trace:<trace>: read the specified trace file."
echo " trace:all means run all traces"
echo " live:<live test>: run the specified live test."
echo " live:all means run all live tests."
echo " possible live tests:"
echo " live:htop: run htop -d2"
echo " live:kube-demo: run kubernetes demo from infrastructure repo"
echo " live:juttle-examples: run a juttle demo environment based on docker-compose"
echo " phoronix:<test>: run the specified phoronix test."
echo " if <test> is not 'all', it is passed directly to the command line of \"phoronix-test-suite run <test>\""
echo " if <test> is 'all', a built-in set of phoronix tests will be chosen and run"
echo " -T/--tracedir: Look for trace files in this directory. If doesn't exist, will download trace files from s3"
}
OPTS=`getopt -o hv:r:R:o:t:T: --long help,variant:,root:,results:,output:,test:,tracedir: -n $0 -- "$@"`
if [ $? != 0 ]; then
echo "Exiting" >&2
exit 1
fi
eval set -- "$OPTS"
VARIANT="falco"
ROOT=`dirname $0`/../build
SCRIPTDIR=`dirname $0`
RESULTS_FILE=`dirname $0`/results.json
OUTPUT_FILE=`dirname $0`/program-output.txt
TEST=trace:all
TRACEDIR=/tmp/falco-perf-traces.$USER
CPU_INTERVAL=10
while true; do
case "$1" in
-h | --help ) usage; exit 1;;
-v | --variant ) VARIANT="$2"; shift 2;;
-r | --root ) ROOT="$2"; shift 2;;
-R | --results ) RESULTS_FILE="$2"; shift 2;;
-o | --output ) OUTPUT_FILE="$2"; shift 2;;
-t | --test ) TEST="$2"; shift 2;;
-T | --tracedir ) TRACEDIR="$2"; shift 2;;
* ) break;;
esac
done
if [ -z $VARIANT ]; then
echo "A test variant name must be provided. Not continuing."
exit 1
fi
if [ -z $ROOT ]; then
echo "A root directory containing a falco/sysdig binary must be provided. Not continuing."
exit 1
fi
ROOT=`realpath $ROOT`
if [ -z $RESULTS_FILE ]; then
echo "An output file for test results must be provided. Not continuing."
exit 1
fi
if [ -z $OUTPUT_FILE ]; then
echo "An file for program output must be provided. Not continuing."
exit 1
fi
if [ -z $TEST ]; then
echo "A test must be provided. Not continuing."
exit 1
fi
run_tests

View File

@ -36,7 +36,7 @@ EOF
}
function prepare_multiplex_file() {
echo "trace_files: !mux" > $MULT_FILE
cp $SCRIPTDIR/falco_tests.yaml.in $MULT_FILE
prepare_multiplex_fileset traces-positive True Warning False
prepare_multiplex_fileset traces-negative False Warning True

View File

@ -55,6 +55,7 @@ static void usage()
" -r <rules_file> Rules file (defaults to value set in configuration file, or /etc/falco_rules.yaml).\n"
" -L Show the name and description of all rules and exit.\n"
" -l <rule> Show the name and description of the rule with name <rule> and exit.\n"
" -v Verbose output.\n"
"\n"
);
}
@ -253,6 +254,7 @@ int falco_init(int argc, char **argv)
string pidfilename = "/var/run/falco.pid";
bool describe_all_rules = false;
string describe_rule = "";
bool verbose = false;
static struct option long_options[] =
{
@ -272,7 +274,7 @@ int falco_init(int argc, char **argv)
// Parse the args
//
while((op = getopt_long(argc, argv,
"c:ho:e:r:dp:Ll:",
"c:ho:e:r:dp:Ll:v",
long_options, &long_index)) != -1)
{
switch(op)
@ -301,6 +303,9 @@ int falco_init(int argc, char **argv)
case 'L':
describe_all_rules = true;
break;
case 'v':
verbose = true;
break;
case 'l':
describe_rule = optarg;
break;
@ -394,11 +399,11 @@ int falco_init(int argc, char **argv)
falco_fields::init(inspector, ls);
falco_logger::init(ls);
falco_rules::init(ls);
inspector->set_drop_event_flags(EF_DROP_FALCO);
rules->load_rules(config.m_rules_filename);
inspector->set_filter(rules->get_filter());
rules->load_rules(config.m_rules_filename, verbose);
falco_logger::log(LOG_INFO, "Parsed rules from file " + config.m_rules_filename + "\n");
if (describe_all_rules)

View File

@ -1,6 +1,13 @@
local parser = require("parser")
local compiler = {}
compiler.verbose = false
function compiler.set_verbose(verbose)
compiler.verbose = verbose
parser.set_verbose(verbose)
end
function map(f, arr)
local res = {}
for i,v in ipairs(arr) do
@ -153,7 +160,103 @@ function check_for_ignored_syscalls_events(ast, filter_type, source)
end
end
parser.traverse_ast(ast, "BinaryRelOp", cb)
parser.traverse_ast(ast, {BinaryRelOp=1}, cb)
end
-- Examine the ast and find the event types for which the rule should
-- run. All evt.type references are added as event types up until the
-- first "!=" binary operator or unary not operator. If no event type
-- checks are found afterward in the rule, the rule is considered
-- optimized and is associated with the event type(s).
--
-- Otherwise, the rule is associated with a 'catchall' category and is
-- run for all event types. (Also, a warning is printed).
--
function get_evttypes(name, ast, source)
local evttypes = {}
local evtnames = {}
local found_event = false
local found_not = false
local found_event_after_not = false
function cb(node)
if node.type == "UnaryBoolOp" then
if node.operator == "not" then
found_not = true
end
else
if node.operator == "!=" then
found_not = true
end
if node.left.type == "FieldName" and node.left.value == "evt.type" then
found_event = true
if found_not then
found_event_after_not = true
end
if node.operator == "in" then
for i, v in ipairs(node.right.elements) do
if v.type == "BareString" then
evtnames[v.value] = 1
for id in string.gmatch(events[v.value], "%S+") do
evttypes[id] = 1
end
end
end
else
if node.right.type == "BareString" then
evtnames[node.right.value] = 1
for id in string.gmatch(events[node.right.value], "%S+") do
evttypes[id] = 1
end
end
end
end
end
end
parser.traverse_ast(ast.filter.value, {BinaryRelOp=1, UnaryBoolOp=1} , cb)
if not found_event then
io.stderr:write("Rule "..name..": warning (no-evttype):\n")
io.stderr:write(source.."\n")
io.stderr:write(" did not contain any evt.type restriction, meaning it will run for all event types.\n")
io.stderr:write(" This has a significant performance penalty. Consider adding an evt.type restriction if possible.\n")
evttypes = {}
evtnames = {}
end
if found_event_after_not then
io.stderr:write("Rule "..name..": warning (trailing-evttype):\n")
io.stderr:write(source.."\n")
io.stderr:write(" does not have all evt.type restrictions at the beginning of the condition,\n")
io.stderr:write(" or uses a negative match (i.e. \"not\"/\"!=\") for some evt.type restriction.\n")
io.stderr:write(" This has a performance penalty, as the rule can not be limited to specific event types.\n")
io.stderr:write(" Consider moving all evt.type restrictions to the beginning of the rule and/or\n")
io.stderr:write(" replacing negative matches with positive matches if possible.\n")
evttypes = {}
evtnames = {}
end
evtnames_only = {}
local num_evtnames = 0
for name, dummy in pairs(evtnames) do
table.insert(evtnames_only, name)
num_evtnames = num_evtnames + 1
end
if num_evtnames == 0 then
table.insert(evtnames_only, "all")
end
table.sort(evtnames_only)
if compiler.verbose then
io.stderr:write("Event types for rule "..name..": "..table.concat(evtnames_only, ",").."\n")
end
return evttypes
end
function compiler.compile_macro(line, list_defs)
@ -179,7 +282,7 @@ end
--[[
Parses a single filter, then expands macros using passed-in table of definitions. Returns resulting AST.
--]]
function compiler.compile_filter(source, macro_defs, list_defs)
function compiler.compile_filter(name, source, macro_defs, list_defs)
for name, items in pairs(list_defs) do
source = string.gsub(source, name, table.concat(items, ", "))
@ -206,7 +309,9 @@ function compiler.compile_filter(source, macro_defs, list_defs)
error("Unexpected top-level AST type: "..ast.type)
end
return ast
evttypes = get_evttypes(name, ast, source)
return ast, evttypes
end

View File

@ -11,6 +11,12 @@
local parser = {}
parser.verbose = false
function parser.set_verbose(verbose)
parser.verbose = verbose
end
local lpeg = require "lpeg"
lpeg.locale(lpeg)
@ -297,33 +303,33 @@ parser.print_ast = print_ast
-- have the signature:
-- cb(ast_node, ctx)
-- ctx is optional.
function traverse_ast(ast, node_type, cb, ctx)
function traverse_ast(ast, node_types, cb, ctx)
local t = ast.type
if t == node_type then
if node_types[t] ~= nil then
cb(ast, ctx)
end
if t == "Rule" then
traverse_ast(ast.filter, node_type, cb, ctx)
traverse_ast(ast.filter, node_types, cb, ctx)
elseif t == "Filter" then
traverse_ast(ast.value, node_type, cb, ctx)
traverse_ast(ast.value, node_types, cb, ctx)
elseif t == "BinaryBoolOp" or t == "BinaryRelOp" then
traverse_ast(ast.left, node_type, cb, ctx)
traverse_ast(ast.right, node_type, cb, ctx)
traverse_ast(ast.left, node_types, cb, ctx)
traverse_ast(ast.right, node_types, cb, ctx)
elseif t == "UnaryRelOp" or t == "UnaryBoolOp" then
traverse_ast(ast.argument, node_type, cb, ctx)
traverse_ast(ast.argument, node_types, cb, ctx)
elseif t == "List" then
for i, v in ipairs(ast.elements) do
traverse_ast(v, node_type, cb, ctx)
traverse_ast(v, node_types, cb, ctx)
end
elseif t == "MacroDef" then
traverse_ast(ast.value, node_type, cb, ctx)
traverse_ast(ast.value, node_types, cb, ctx)
elseif t == "FieldName" or t == "Number" or t == "String" or t == "BareString" or t == "Macro" then
-- do nothing, no traversal needed

View File

@ -117,7 +117,9 @@ end
-- to a rule.
local state = {macros={}, lists={}, filter_ast=nil, rules_by_name={}, n_rules=0, rules_by_idx={}}
function load_rules(filename)
function load_rules(filename, rules_mgr, verbose)
compiler.set_verbose(verbose)
local f = assert(io.open(filename, "r"))
local s = f:read("*all")
@ -169,7 +171,8 @@ function load_rules(filename)
v['level'] = priority(v['priority'])
state.rules_by_name[v['rule']] = v
local filter_ast = compiler.compile_filter(v['condition'], state.macros, state.lists)
local filter_ast, evttypes = compiler.compile_filter(v['rule'], v['condition'],
state.macros, state.lists)
if (filter_ast.type == "Rule") then
state.n_rules = state.n_rules + 1
@ -183,6 +186,11 @@ function load_rules(filename)
-- event.
mark_relational_nodes(filter_ast.filter.value, state.n_rules)
install_filter(filter_ast.filter.value)
-- Pass the filter and event types back up
falco_rules.add_filter(rules_mgr, evttypes)
-- Rule ASTs are merged together into one big AST, with "OR" between each
-- rule.
if (state.filter_ast == nil) then
@ -196,7 +204,6 @@ function load_rules(filename)
end
end
install_filter(state.filter_ast)
io.flush()
end

View File

@ -1,4 +1,5 @@
#include "rules.h"
#include "logger.h"
extern "C" {
#include "lua.h"
@ -6,6 +7,11 @@ extern "C" {
#include "lauxlib.h"
}
const static struct luaL_reg ll_falco_rules [] =
{
{"add_filter", &falco_rules::add_filter},
{NULL,NULL}
};
falco_rules::falco_rules(sinsp* inspector, lua_State *ls, string lua_main_filename)
{
@ -17,6 +23,48 @@ falco_rules::falco_rules(sinsp* inspector, lua_State *ls, string lua_main_filena
load_compiler(lua_main_filename);
}
void falco_rules::init(lua_State *ls)
{
luaL_openlib(ls, "falco_rules", ll_falco_rules, 0);
}
int falco_rules::add_filter(lua_State *ls)
{
if (! lua_islightuserdata(ls, -2) ||
! lua_istable(ls, -1))
{
falco_logger::log(LOG_ERR, "Invalid arguments passed to add_filter()\n");
throw sinsp_exception("add_filter error");
}
falco_rules *rules = (falco_rules *) lua_topointer(ls, -2);
list<uint32_t> evttypes;
lua_pushnil(ls); /* first key */
while (lua_next(ls, -2) != 0) {
// key is at index -2, value is at index
// -1. We want the keys.
evttypes.push_back(luaL_checknumber(ls, -2));
// Remove value, keep key for next iteration
lua_pop(ls, 1);
}
rules->add_filter(evttypes);
return 0;
}
void falco_rules::add_filter(list<uint32_t> &evttypes)
{
// While the current rule was being parsed, a sinsp_filter
// object was being populated by lua_parser. Grab that filter
// and pass it to the inspector.
sinsp_filter *filter = m_lua_parser->get_filter(true);
m_inspector->add_evttype_filter(evttypes, filter);
}
void falco_rules::load_compiler(string lua_main_filename)
{
@ -40,18 +88,47 @@ void falco_rules::load_compiler(string lua_main_filename)
}
}
void falco_rules::load_rules(string rules_filename)
void falco_rules::load_rules(string rules_filename, bool verbose)
{
lua_getglobal(m_ls, m_lua_load_rules.c_str());
if(lua_isfunction(m_ls, -1))
{
// Create a table containing all events, so they can
// be mapped to event ids.
sinsp_evttables* einfo = m_inspector->get_event_info_tables();
const struct ppm_event_info* etable = einfo->m_event_info;
const struct ppm_syscall_desc* stable = einfo->m_syscall_info_table;
map<string,string> events_by_name;
for(uint32_t j = 0; j < PPM_EVENT_MAX; j++)
{
auto it = events_by_name.find(etable[j].name);
if (it == events_by_name.end()) {
events_by_name[etable[j].name] = to_string(j);
} else {
string cur = it->second;
cur += " ";
cur += to_string(j);
events_by_name[etable[j].name] = cur;
}
}
lua_newtable(m_ls);
for( auto kv : events_by_name)
{
lua_pushstring(m_ls, kv.first.c_str());
lua_pushstring(m_ls, kv.second.c_str());
lua_settable(m_ls, -3);
}
lua_setglobal(m_ls, m_lua_events.c_str());
// Create a table containing the syscalls/events that
// are ignored by the kernel module. load_rules will
// return an error if any rule references one of these
// syscalls/events.
sinsp_evttables* einfo = m_inspector->get_event_info_tables();
const struct ppm_event_info* etable = einfo->m_event_info;
const struct ppm_syscall_desc* stable = einfo->m_syscall_info_table;
lua_newtable(m_ls);
@ -82,7 +159,9 @@ void falco_rules::load_rules(string rules_filename)
lua_setglobal(m_ls, m_lua_ignored_syscalls.c_str());
lua_pushstring(m_ls, rules_filename.c_str());
if(lua_pcall(m_ls, 1, 0, 0) != 0)
lua_pushlightuserdata(m_ls, this);
lua_pushboolean(m_ls, (verbose ? 1 : 0));
if(lua_pcall(m_ls, 3, 0, 0) != 0)
{
const char* lerr = lua_tostring(m_ls, -1);
string err = "Error loading rules:" + string(lerr);

View File

@ -1,5 +1,7 @@
#pragma once
#include <list>
#include "sinsp.h"
#include "lua_parser.h"
@ -8,13 +10,18 @@ class falco_rules
public:
falco_rules(sinsp* inspector, lua_State *ls, string lua_main_filename);
~falco_rules();
void load_rules(string rules_filename);
void load_rules(string rules_filename, bool verbose);
void describe_rule(string *rule);
sinsp_filter* get_filter();
static void init(lua_State *ls);
static int add_filter(lua_State *ls);
private:
void load_compiler(string lua_main_filename);
void add_filter(list<uint32_t> &evttypes);
lua_parser* m_lua_parser;
sinsp* m_inspector;
lua_State* m_ls;
@ -22,6 +29,7 @@ class falco_rules
string m_lua_load_rules = "load_rules";
string m_lua_ignored_syscalls = "ignored_syscalls";
string m_lua_ignored_events = "ignored_events";
string m_lua_events = "events";
string m_lua_on_event = "on_event";
string m_lua_describe_rule = "describe_rule";
};