diff --git a/integrations/kubernetes-response-engine/deployment/Makefile b/integrations/kubernetes-response-engine/deployment/Makefile index df2c4cb0..c02c718c 100644 --- a/integrations/kubernetes-response-engine/deployment/Makefile +++ b/integrations/kubernetes-response-engine/deployment/Makefile @@ -1,13 +1,9 @@ deploy: kubectl apply -f nats/ - kubectl create configmap falco-config --from-file=falco-config/ || true - kubectl apply -f falco/ kubectl apply -f kubeless/ kubectl apply -f network-policy.yaml clean: kubectl delete -f kubeless/ - kubectl delete configmap falco-config - kubectl delete -f falco/ kubectl delete -f nats/ kubectl delete -f network-policy.yaml diff --git a/integrations/kubernetes-response-engine/deployment/falco-config/falco.yaml b/integrations/kubernetes-response-engine/deployment/falco-config/falco.yaml deleted file mode 100644 index 9ce40534..00000000 --- a/integrations/kubernetes-response-engine/deployment/falco-config/falco.yaml +++ /dev/null @@ -1,102 +0,0 @@ -# File(s) or Directories containing Falco rules, loaded at startup. -# The name "rules_file" is only for backwards compatibility. -# If the entry is a file, it will be read directly. If the entry is a directory, -# every file in that directory will be read, in alphabetical order. -# -# falco_rules.yaml ships with the falco package and is overridden with -# every new software version. falco_rules.local.yaml is only created -# if it doesn't exist. If you want to customize the set of rules, add -# your customizations to falco_rules.local.yaml. -# -# The files will be read in the order presented here, so make sure if -# you have overrides they appear in later files. -rules_file: - - /etc/falco/falco_rules.yaml - - /etc/falco/falco_rules.local.yaml - - /etc/falco/rules.d - -# Whether to output events in json or text -json_output: true - -# When using json output, whether or not to include the "output" property -# itself (e.g. "File below a known binary directory opened for writing -# (user=root ....") in the json output. -json_include_output_property: true - -# Send information logs to stderr and/or syslog Note these are *not* security -# notification logs! These are just Falco lifecycle (and possibly error) logs. -log_stderr: true -log_syslog: true - -# Minimum log level to include in logs. Note: these levels are -# separate from the priority field of rules. This refers only to the -# log level of falco's internal logging. Can be one of "emergency", -# "alert", "critical", "error", "warning", "notice", "info", "debug". -log_level: info - -# Minimum rule priority level to load and run. All rules having a -# priority more severe than this level will be loaded/run. Can be one -# of "emergency", "alert", "critical", "error", "warning", "notice", -# "info", "debug". -priority: debug - -# Whether or not output to any of the output channels below is -# buffered. Defaults to true -buffered_outputs: true - -# A throttling mechanism implemented as a token bucket limits the -# rate of falco notifications. This throttling is controlled by the following configuration -# options: -# - rate: the number of tokens (i.e. right to send a notification) -# gained per second. Defaults to 1. -# - max_burst: the maximum number of tokens outstanding. Defaults to 1000. -# -# With these defaults, falco could send up to 1000 notifications after -# an initial quiet period, and then up to 1 notification per second -# afterward. It would gain the full burst back after 1000 seconds of -# no activity. - -outputs: - rate: 1 - max_burst: 1000 - -# Where security notifications should go. -# Multiple outputs can be enabled. - -syslog_output: - enabled: true - -# If keep_alive is set to true, the file will be opened once and -# continuously written to, with each output message on its own -# line. If keep_alive is set to false, the file will be re-opened -# for each output message. -# -# Also, the file will be closed and reopened if falco is signaled with -# SIGUSR1. - -file_output: - enabled: true - keep_alive: true - filename: /var/run/falco/nats - -stdout_output: - enabled: true - -# Possible additional things you might want to do with program output: -# - send to a slack webhook: -# program: "jq '{text: .output}' | curl -d @- -X POST https://hooks.slack.com/services/XXX" -# - logging (alternate method than syslog): -# program: logger -t falco-test -# - send over a network connection: -# program: nc host.example.com 80 - -# If keep_alive is set to true, the program will be started once and -# continuously written to, with each output message on its own -# line. If keep_alive is set to false, the program will be re-spawned -# for each output message. -# -# Also, the program will be closed and reopened if falco is signaled with -# SIGUSR1. - - - diff --git a/integrations/kubernetes-response-engine/deployment/falco-config/falco_rules.local.yaml b/integrations/kubernetes-response-engine/deployment/falco-config/falco_rules.local.yaml deleted file mode 100644 index 3c8e3bb5..00000000 --- a/integrations/kubernetes-response-engine/deployment/falco-config/falco_rules.local.yaml +++ /dev/null @@ -1,13 +0,0 @@ -#################### -# Your custom rules! -#################### - -# Add new rules, like this one -# - rule: The program "sudo" is run in a container -# desc: An event will trigger every time you run sudo in a container -# condition: evt.type = execve and evt.dir=< and container.id != host and proc.name = sudo -# output: "Sudo run in container (user=%user.name %container.info parent=%proc.pname cmdline=%proc.cmdline)" -# priority: ERROR -# tags: [users, container] - -# Or override/append to any rule, macro, or list from the Default Rules diff --git a/integrations/kubernetes-response-engine/deployment/falco-config/falco_rules.yaml b/integrations/kubernetes-response-engine/deployment/falco-config/falco_rules.yaml deleted file mode 100644 index 58f4ea43..00000000 --- a/integrations/kubernetes-response-engine/deployment/falco-config/falco_rules.yaml +++ /dev/null @@ -1,1540 +0,0 @@ -# Currently disabled as read/write are ignored syscalls. The nearly -# similar open_write/open_read check for files being opened for -# reading/writing. -# - macro: write -# condition: (syscall.type=write and fd.type in (file, directory)) -# - macro: read -# condition: (syscall.type=read and evt.dir=> and fd.type in (file, directory)) - -- macro: open_write - condition: (evt.type=open or evt.type=openat) and evt.is_open_write=true and fd.typechar='f' and fd.num>=0 - -- macro: open_read - condition: (evt.type=open or evt.type=openat) and evt.is_open_read=true and fd.typechar='f' and fd.num>=0 - -- macro: never_true - condition: (evt.num=0) - -- macro: always_true - condition: (evt.num=>0) - -# In some cases, such as dropped system call events, information about -# the process name may be missing. For some rules that really depend -# on the identity of the process performing an action such as opening -# a file, etc., we require that the process name be known. -- macro: proc_name_exists - condition: (proc.name!="") - -- macro: rename - condition: evt.type in (rename, renameat) -- macro: mkdir - condition: evt.type = mkdir -- macro: remove - condition: evt.type in (rmdir, unlink, unlinkat) - -- macro: modify - condition: rename or remove - -- macro: spawned_process - condition: evt.type = execve and evt.dir=< - -# File categories -- macro: bin_dir - condition: fd.directory in (/bin, /sbin, /usr/bin, /usr/sbin) - -- macro: bin_dir_mkdir - condition: > - (evt.arg[1] startswith /bin/ or - evt.arg[1] startswith /sbin/ or - evt.arg[1] startswith /usr/bin/ or - evt.arg[1] startswith /usr/sbin/) - -- macro: bin_dir_rename - condition: > - evt.arg[1] startswith /bin/ or - evt.arg[1] startswith /sbin/ or - evt.arg[1] startswith /usr/bin/ or - evt.arg[1] startswith /usr/sbin/ - -- macro: etc_dir - condition: fd.name startswith /etc/ - -# This detects writes immediately below / or any write anywhere below /root -- macro: root_dir - condition: ((fd.directory=/ or fd.name startswith /root) and fd.name contains "/") - -- list: shell_binaries - items: [bash, csh, ksh, sh, tcsh, zsh, dash] - -- list: shell_mgmt_binaries - items: [add-shell, remove-shell] - -- macro: shell_procs - condition: (proc.name in (shell_binaries)) - -- list: coreutils_binaries - items: [ - truncate, sha1sum, numfmt, fmt, fold, uniq, cut, who, - groups, csplit, sort, expand, printf, printenv, unlink, tee, chcon, stat, - basename, split, nice, "yes", whoami, sha224sum, hostid, users, stdbuf, - base64, unexpand, cksum, od, paste, nproc, pathchk, sha256sum, wc, test, - comm, arch, du, factor, sha512sum, md5sum, tr, runcon, env, dirname, - tsort, join, shuf, install, logname, pinky, nohup, expr, pr, tty, timeout, - tail, "[", seq, sha384sum, nl, head, id, mkfifo, sum, dircolors, ptx, shred, - tac, link, chroot, vdir, chown, touch, ls, dd, uname, "true", pwd, date, - chgrp, chmod, mktemp, cat, mknod, sync, ln, "false", rm, mv, cp, echo, - readlink, sleep, stty, mkdir, df, dir, rmdir, touch - ] - -# dpkg -L login | grep bin | xargs ls -ld | grep -v '^d' | awk '{print $9}' | xargs -L 1 basename | tr "\\n" "," -- list: login_binaries - items: [ - login, systemd, '"(systemd)"', systemd-logind, su, - nologin, faillog, lastlog, newgrp, sg - ] - -# dpkg -L passwd | grep bin | xargs ls -ld | grep -v '^d' | awk '{print $9}' | xargs -L 1 basename | tr "\\n" "," -- list: passwd_binaries - items: [ - shadowconfig, grpck, pwunconv, grpconv, pwck, - groupmod, vipw, pwconv, useradd, newusers, cppw, chpasswd, usermod, - groupadd, groupdel, grpunconv, chgpasswd, userdel, chage, chsh, - gpasswd, chfn, expiry, passwd, vigr, cpgr - ] - -# repoquery -l shadow-utils | grep bin | xargs ls -ld | grep -v '^d' | -# awk '{print $9}' | xargs -L 1 basename | tr "\\n" "," -- list: shadowutils_binaries - items: [ - chage, gpasswd, lastlog, newgrp, sg, adduser, deluser, chpasswd, - groupadd, groupdel, addgroup, delgroup, groupmems, groupmod, grpck, grpconv, grpunconv, - newusers, pwck, pwconv, pwunconv, useradd, userdel, usermod, vigr, vipw, unix_chkpwd - ] - -- list: sysdigcloud_binaries - items: [setup-backend, dragent, sdchecks] - -- list: docker_binaries - items: [docker, dockerd, exe, docker-compose, docker-entrypoi, docker-runc-cur, docker-current] - -- list: k8s_binaries - items: [hyperkube, skydns, kube2sky, exechealthz] - -- list: lxd_binaries - items: [lxd, lxcfs] - -- list: http_server_binaries - items: [nginx, httpd, httpd-foregroun, lighttpd, apache, apache2] - -- list: db_server_binaries - items: [mysqld, postgres, sqlplus] - -- list: mysql_mgmt_binaries - items: [mysql_install_d, mysql_ssl_rsa_s] - -- list: postgres_mgmt_binaries - items: [pg_dumpall, pg_ctl, pg_lsclusters, pg_ctlcluster] - -- list: db_mgmt_binaries - items: [mysql_mgmt_binaries, postgres_mgmt_binaries] - -- list: nosql_server_binaries - items: [couchdb, memcached, redis-server, rabbitmq-server, mongod] - -- list: gitlab_binaries - items: [gitlab-shell, gitlab-mon, gitlab-runner-b, git] - -- macro: server_procs - condition: proc.name in (http_server_binaries, db_server_binaries, docker_binaries, sshd) - -# The explicit quotes are needed to avoid the - characters being -# interpreted by the filter expression. -- list: rpm_binaries - items: [dnf, rpm, rpmkey, yum, '"75-system-updat"', rhsmcertd-worke, subscription-ma, - repoquery, rpmkeys, rpmq, yum-cron, yum-config-mana, yum-debug-dump, - abrt-action-sav, rpmdb_stat] - -- macro: rpm_procs - condition: proc.name in (rpm_binaries) or proc.name in (salt-minion) - -- list: deb_binaries - items: [dpkg, dpkg-preconfigu, dpkg-reconfigur, apt, apt-get, aptitude, - frontend, preinst, add-apt-reposit, apt-auto-remova, apt-key, - apt-listchanges, unattended-upgr, apt-add-reposit - ] - -# The truncated dpkg-preconfigu is intentional, process names are -# truncated at the sysdig level. -- list: package_mgmt_binaries - items: [rpm_binaries, deb_binaries, update-alternat, gem, pip, sane-utils.post] - -- macro: package_mgmt_procs - condition: proc.name in (package_mgmt_binaries) - -- macro: run_by_package_mgmt_binaries - condition: proc.aname in (package_mgmt_binaries, needrestart) - -- list: ssl_mgmt_binaries - items: [ca-certificates] - -- list: dhcp_binaries - items: [dhclient, dhclient-script] - -# A canonical set of processes that run other programs with different -# privileges or as a different user. -- list: userexec_binaries - items: [sudo, su, suexec] - -- list: known_setuid_binaries - items: [ - sshd, dbus-daemon-lau, ping, ping6, critical-stack-, pmmcli, - filemng, PassengerAgent, bwrap, osdetect, nginxmng, sw-engine-fpm, - start-stop-daem - ] - -- list: user_mgmt_binaries - items: [login_binaries, passwd_binaries, shadowutils_binaries] - -- list: dev_creation_binaries - items: [blkid, rename_device, update_engine, sgdisk] - -- list: hids_binaries - items: [aide] - -- list: vpn_binaries - items: [openvpn] - -- list: nomachine_binaries - items: [nxexec, nxnode.bin, nxserver.bin, nxclient.bin] - -- macro: system_procs - condition: proc.name in (coreutils_binaries, user_mgmt_binaries) - -- list: mail_binaries - items: [ - sendmail, sendmail-msp, postfix, procmail, exim4, - pickup, showq, mailq, dovecot, imap-login, imap, - mailmng-core, pop3-login, dovecot-lda, pop3 - ] - -- list: mail_config_binaries - items: [ - update_conf, parse_mc, makemap_hash, newaliases, update_mk, update_tlsm4, - update_db, update_mc, ssmtp.postinst, mailq, postalias, postfix.config., - postfix.config, postfix-script - ] - -- list: sensitive_file_names - items: [/etc/shadow, /etc/sudoers, /etc/pam.conf] - -- macro: sensitive_files - condition: > - fd.name startswith /etc and - (fd.name in (sensitive_file_names) - or fd.directory in (/etc/sudoers.d, /etc/pam.d)) - -# Indicates that the process is new. Currently detected using time -# since process was started, using a threshold of 5 seconds. -- macro: proc_is_new - condition: proc.duration <= 5000000000 - -# Network -- macro: inbound - condition: > - (((evt.type in (accept,listen) and evt.dir=<)) or - (fd.typechar = 4 or fd.typechar = 6) and - (fd.ip != "0.0.0.0" and fd.net != "127.0.0.0/8") and - (evt.rawres >= 0 or evt.res = EINPROGRESS)) - -- macro: outbound - condition: > - (((evt.type = connect and evt.dir=<)) or - (fd.typechar = 4 or fd.typechar = 6) and - (fd.ip != "0.0.0.0" and fd.net != "127.0.0.0/8") and - (evt.rawres >= 0 or evt.res = EINPROGRESS)) - -# Very similar to inbound/outbound, but combines the tests together -# for efficiency. -- macro: inbound_outbound - condition: > - (((evt.type in (accept,listen,connect) and evt.dir=<)) or - (fd.typechar = 4 or fd.typechar = 6) and - (fd.ip != "0.0.0.0" and fd.net != "127.0.0.0/8") and - (evt.rawres >= 0 or evt.res = EINPROGRESS)) - -- macro: ssh_port - condition: fd.sport=22 - -# In a local/user rules file, you could override this macro to -# enumerate the servers for which ssh connections are allowed. For -# example, you might have a ssh gateway host for which ssh connections -# are allowed. -# -# In the main falco rules file, there isn't any way to know the -# specific hosts for which ssh access is allowed, so this macro just -# repeats ssh_port, which effectively allows ssh from all hosts. In -# the overridden macro, the condition would look something like -# "fd.sip="a.b.c.d" or fd.sip="e.f.g.h" or ..." -- macro: allowed_ssh_hosts - condition: ssh_port - -- rule: Disallowed SSH Connection - desc: Detect any new ssh connection to a host other than those in an allowed group of hosts - condition: (inbound_outbound) and ssh_port and not allowed_ssh_hosts - output: Disallowed SSH Connection (command=%proc.cmdline connection=%fd.name user=%user.name) - priority: NOTICE - tags: [network] - -# Use this to test whether the event occurred within a container. - -# When displaying container information in the output field, use -# %container.info, without any leading term (file=%fd.name -# %container.info user=%user.name, and not file=%fd.name -# container=%container.info user=%user.name). The output will change -# based on the context and whether or not -pk/-pm/-pc was specified on -# the command line. -- macro: container - condition: container.id != host - -- macro: interactive - condition: > - ((proc.aname=sshd and proc.name != sshd) or - proc.name=systemd-logind or proc.name=login) - -- list: cron_binaries - items: [anacron, cron, crond, crontab] - -# https://github.com/liske/needrestart -- list: needrestart_binaries - items: [needrestart, 10-dpkg, 20-rpm, 30-pacman] - -# Possible scripts run by sshkit -- list: sshkit_script_binaries - items: [10_etc_sudoers., 10_passwd_group] - -- list: plesk_binaries - items: [sw-engine, sw-engine-fpm, sw-engine-kv, filemng, f2bmng] - -# System users that should never log into a system. Consider adding your own -# service users (e.g. 'apache' or 'mysqld') here. -- macro: system_users - condition: user.name in (bin, daemon, games, lp, mail, nobody, sshd, sync, uucp, www-data) - -# These macros will be removed soon. Only keeping them to maintain -# compatiblity with some widely used rules files. -# Begin Deprecated -- macro: parent_ansible_running_python - condition: (proc.pname in (python, pypy) and proc.pcmdline contains ansible) - -- macro: parent_bro_running_python - condition: (proc.pname=python and proc.cmdline contains /usr/share/broctl) - -- macro: parent_python_running_denyhosts - condition: > - (proc.cmdline startswith "denyhosts.py /usr/bin/denyhosts.py" or - (proc.pname=python and - (proc.pcmdline contains /usr/sbin/denyhosts or - proc.pcmdline contains /usr/local/bin/denyhosts.py))) - -- macro: parent_python_running_sdchecks - condition: > - (proc.pname in (python, python2.7) and - (proc.pcmdline contains /opt/draios/bin/sdchecks)) - -- macro: parent_linux_image_upgrade_script - condition: proc.pname startswith linux-image- - -- macro: parent_java_running_echo - condition: (proc.pname=java and proc.cmdline startswith "sh -c echo") - -- macro: parent_scripting_running_builds - condition: > - (proc.pname in (php,php5-fpm,php-fpm7.1,python,ruby,ruby2.3,ruby2.1,node,conda) and ( - proc.cmdline startswith "sh -c git" or - proc.cmdline startswith "sh -c date" or - proc.cmdline startswith "sh -c /usr/bin/g++" or - proc.cmdline startswith "sh -c /usr/bin/gcc" or - proc.cmdline startswith "sh -c gcc" or - proc.cmdline startswith "sh -c if type gcc" or - proc.cmdline startswith "sh -c cd '/var/www/edi/';LC_ALL=en_US.UTF-8 git" or - proc.cmdline startswith "sh -c /var/www/edi/bin/sftp.sh" or - proc.cmdline startswith "sh -c /usr/src/app/crxlsx/bin/linux/crxlsx" or - proc.cmdline startswith "sh -c make parent" or - proc.cmdline startswith "node /jenkins/tools" or - proc.cmdline startswith "sh -c '/usr/bin/node'" or - proc.cmdline startswith "sh -c stty -a |" or - proc.pcmdline startswith "node /opt/nodejs/bin/yarn" or - proc.pcmdline startswith "node /usr/local/bin/yarn" or - proc.pcmdline startswith "node /root/.config/yarn" or - proc.pcmdline startswith "node /opt/yarn/bin/yarn.js")) - - -- macro: httpd_writing_ssl_conf - condition: > - (proc.pname=run-httpd and - (proc.cmdline startswith "sed -ri" or proc.cmdline startswith "sed -i") and - (fd.name startswith /etc/httpd/conf.d/ or fd.name startswith /etc/httpd/conf)) - -- macro: parent_Xvfb_running_xkbcomp - condition: (proc.pname=Xvfb and proc.cmdline startswith 'sh -c "/usr/bin/xkbcomp"') - -- macro: parent_nginx_running_serf - condition: (proc.pname=nginx and proc.cmdline startswith "sh -c serf") - -- macro: parent_node_running_npm - condition: (proc.pcmdline startswith "node /usr/local/bin/npm" or - proc.pcmdline startswith "node /usr/local/nodejs/bin/npm" or - proc.pcmdline startswith "node /opt/rh/rh-nodejs6/root/usr/bin/npm") - -- macro: parent_java_running_sbt - condition: (proc.pname=java and proc.pcmdline contains sbt-launch.jar) - -- list: known_container_shell_spawn_cmdlines - items: [] - -- list: known_shell_spawn_binaries - items: [] - -- macro: shell_spawning_containers - condition: (container.image startswith jenkins or - container.image startswith gitlab/gitlab-ce or - container.image startswith gitlab/gitlab-ee) - -## End Deprecated - -- macro: ansible_running_python - condition: (proc.name in (python, pypy) and proc.cmdline contains ansible) - -- macro: chef_running_yum_dump - condition: (proc.name=python and proc.cmdline contains yum-dump.py) - -- macro: python_running_denyhosts - condition: > - (proc.name=python and - (proc.cmdline contains /usr/sbin/denyhosts or - proc.cmdline contains /usr/local/bin/denyhosts.py)) - -# Qualys seems to run a variety of shell subprocesses, at various -# levels. This checks at a few levels without the cost of a full -# proc.aname, which traverses the full parent heirarchy. -- macro: run_by_qualys - condition: > - (proc.pname=qualys-cloud-ag or - proc.aname[2]=qualys-cloud-ag or - proc.aname[3]=qualys-cloud-ag or - proc.aname[4]=qualys-cloud-ag) - -- macro: run_by_sumologic_securefiles - condition: > - ((proc.cmdline="usermod -a -G sumologic_collector" or - proc.cmdline="groupadd sumologic_collector") and - (proc.pname=secureFiles.sh and proc.aname[2]=java)) - -- macro: run_by_yum - condition: ((proc.pname=sh and proc.aname[2]=yum) or - (proc.aname[2]=sh and proc.aname[3]=yum)) - -- macro: run_by_ms_oms - condition: > - (proc.aname[3] startswith omsagent- or - proc.aname[3] startswith scx-) - -- macro: run_by_google_accounts_daemon - condition: > - (proc.aname[1] startswith google_accounts or - proc.aname[2] startswith google_accounts) - -# Chef is similar. -- macro: run_by_chef - condition: (proc.aname[2]=chef_command_wr or proc.aname[3]=chef_command_wr or - proc.aname[2]=chef-client or proc.aname[3]=chef-client or - proc.name=chef-client) - -- macro: run_by_adclient - condition: (proc.aname[2]=adclient or proc.aname[3]=adclient or proc.aname[4]=adclient) - -- macro: run_by_centrify - condition: (proc.aname[2]=centrify or proc.aname[3]=centrify or proc.aname[4]=centrify) - -- macro: run_by_puppet - condition: (proc.aname[2]=puppet or proc.aname[3]=puppet) - -# Also handles running semi-indirectly via scl -- macro: run_by_foreman - condition: > - (user.name=foreman and - (proc.pname in (rake, ruby, scl) and proc.aname[5] in (tfm-rake,tfm-ruby)) or - (proc.pname=scl and proc.aname[2] in (tfm-rake,tfm-ruby))) - -- macro: java_running_sdjagent - condition: proc.name=java and proc.cmdline contains sdjagent.jar - -- macro: kubelet_running_loopback - condition: (proc.pname=kubelet and proc.name=loopback) - -- macro: python_mesos_marathon_scripting - condition: (proc.pcmdline startswith "python3 /marathon-lb/marathon_lb.py") - -- macro: splunk_running_forwarder - condition: (proc.pname=splunkd and proc.cmdline startswith "sh -c /opt/splunkforwarder") - -- macro: parent_supervise_running_multilog - condition: (proc.name=multilog and proc.pname=supervise) - -- macro: supervise_writing_status - condition: (proc.name in (supervise,svc) and fd.name startswith "/etc/sb/") - -- macro: pki_realm_writing_realms - condition: (proc.cmdline startswith "bash /usr/local/lib/pki/pki-realm" and fd.name startswith /etc/pki/realms) - -- macro: htpasswd_writing_passwd - condition: (proc.name=htpasswd and fd.name=/etc/nginx/.htpasswd) - -- macro: lvprogs_writing_lvm_archive - condition: (proc.name in (dmeventd,lvcreate) and (fd.name startswith /etc/lvm/archive or - fd.name startswith /etc/lvm/backup)) -- macro: ovsdb_writing_openvswitch - condition: (proc.name=ovsdb-server and fd.directory=/etc/openvswitch) - -- macro: perl_running_plesk - condition: (proc.cmdline startswith "perl /opt/psa/admin/bin/plesk_agent_manager" or - proc.pcmdline startswith "perl /opt/psa/admin/bin/plesk_agent_manager") - -- macro: perl_running_updmap - condition: (proc.cmdline startswith "perl /usr/bin/updmap") - -- macro: perl_running_centrifydc - condition: (proc.cmdline startswith "perl /usr/share/centrifydc") - -- macro: parent_ucf_writing_conf - condition: (proc.pname=ucf and proc.aname[2]=frontend) - -- macro: consul_template_writing_conf - condition: > - ((proc.name=consul-template and fd.name startswith /etc/haproxy) or - (proc.name=reload.sh and proc.aname[2]=consul-template and fd.name startswith /etc/ssl)) - -- macro: countly_writing_nginx_conf - condition: (proc.cmdline startswith "nodejs /opt/countly/bin" and fd.name startswith /etc/nginx) - -- macro: ms_oms_writing_conf - condition: > - ((proc.name in (omiagent,omsagent,in_heartbeat_r*,omsadmin.sh,PerformInventor) - or proc.pname in (omi.postinst,omsconfig.posti,scx.postinst,omsadmin.sh,omiagent)) - and (fd.name startswith /etc/opt/omi or fd.name startswith /etc/opt/microsoft/omsagent)) - -- macro: ms_scx_writing_conf - condition: (proc.name in (GetLinuxOS.sh) and fd.name startswith /etc/opt/microsoft/scx) - -- macro: azure_scripts_writing_conf - condition: (proc.pname startswith "bash /var/lib/waagent/" and fd.name startswith /etc/azure) - -- macro: azure_networkwatcher_writing_conf - condition: (proc.name in (NetworkWatcherA) and fd.name=/etc/init.d/AzureNetworkWatcherAgent) - -- macro: couchdb_writing_conf - condition: (proc.name=beam.smp and proc.cmdline contains couchdb and fd.name startswith /etc/couchdb) - -- macro: update_texmf_writing_conf - condition: (proc.name=update-texmf and fd.name startswith /etc/texmf) - -- macro: slapadd_writing_conf - condition: (proc.name=slapadd and fd.name startswith /etc/ldap) - -- macro: symantec_writing_conf - condition: > - ((proc.name=symcfgd and fd.name startswith /etc/symantec) or - (proc.name=navdefutil and fd.name=/etc/symc-defutils.conf)) - -- macro: liveupdate_writing_conf - condition: (proc.cmdline startswith "java LiveUpdate" and fd.name in (/etc/liveupdate.conf, /etc/Product.Catalog.JavaLiveUpdate)) - -- macro: sosreport_writing_files - condition: > - (proc.name=urlgrabber-ext- and proc.aname[3]=sosreport and - (fd.name startswith /etc/pkt/nssdb or fd.name startswith /etc/pki/nssdb)) - -- macro: selinux_writing_conf - condition: (proc.name in (semodule,genhomedircon,sefcontext_comp) and fd.name startswith /etc/selinux) - -- list: veritas_binaries - items: [vxconfigd, sfcache, vxclustadm, vxdctl, vxprint, vxdmpadm, vxdisk, vxdg, vxassist, vxtune] - -- macro: veritas_driver_script - condition: (proc.cmdline startswith "perl /opt/VRTSsfmh/bin/mh_driver.pl") - -- macro: veritas_progs - condition: (proc.name in (veritas_binaries) or veritas_driver_script) - -- macro: veritas_writing_config - condition: (veritas_progs and fd.name startswith /etc/vx) - -- macro: nginx_writing_conf - condition: (proc.name=nginx and fd.name startswith /etc/nginx) - -- macro: nginx_writing_certs - condition: > - (((proc.name=openssl and proc.pname=nginx-launch.sh) or proc.name=nginx-launch.sh) and fd.name startswith /etc/nginx/certs) - -- macro: chef_client_writing_conf - condition: (proc.pcmdline startswith "chef-client /opt/gitlab" and fd.name startswith /etc/gitlab) - -- macro: centrify_writing_krb - condition: (proc.name in (adjoin,addns) and fd.name startswith /etc/krb5) - -- macro: cockpit_writing_conf - condition: > - ((proc.pname=cockpit-kube-la or proc.aname[2]=cockpit-kube-la) - and fd.name startswith /etc/cockpit) - -- macro: ipsec_writing_conf - condition: (proc.name=start-ipsec.sh and fd.directory=/etc/ipsec) - -- macro: exe_running_docker_save - condition: (proc.cmdline startswith "exe /var/lib/docker" and proc.pname in (dockerd, docker)) - -- macro: python_running_get_pip - condition: (proc.cmdline startswith "python get-pip.py") - -- macro: python_running_ms_oms - condition: (proc.cmdline startswith "python /var/lib/waagent/") - -- macro: gugent_writing_guestagent_log - condition: (proc.name=gugent and fd.name=GuestAgent.log) - -- rule: Write below binary dir - desc: an attempt to write to any file below a set of binary directories - condition: > - bin_dir and evt.dir = < and open_write - and not package_mgmt_procs - and not exe_running_docker_save - and not python_running_get_pip - and not python_running_ms_oms - output: > - File below a known binary directory opened for writing (user=%user.name - command=%proc.cmdline file=%fd.name parent=%proc.pname pcmdline=%proc.pcmdline gparent=%proc.aname[2]) - priority: ERROR - tags: [filesystem] - -- list: safe_etc_dirs - items: [/etc/cassandra, /etc/ssl/certs/java, /etc/logstash, /etc/nginx/conf.d, /etc/container_environment, /etc/hrmconfig] - -- macro: fluentd_writing_conf_files - condition: (proc.name=start-fluentd and fd.name in (/etc/fluent/fluent.conf, /etc/td-agent/td-agent.conf)) - -- macro: qualys_writing_conf_files - condition: (proc.name=qualys-cloud-ag and fd.name=/etc/qualys/cloud-agent/qagent-log.conf) - -- macro: git_writing_nssdb - condition: (proc.name=git-remote-http and fd.directory=/etc/pki/nssdb) - -- macro: plesk_writing_keys - condition: (proc.name in (plesk_binaries) and fd.name startswith /etc/sw/keys) - -- macro: plesk_install_writing_apache_conf - condition: (proc.cmdline startswith "bash -hB /usr/lib/plesk-9.0/services/webserver.apache configure" - and fd.name="/etc/apache2/apache2.conf.tmp") - -- macro: plesk_running_mktemp - condition: (proc.name=mktemp and proc.aname[3] in (plesk_binaries)) - -- macro: networkmanager_writing_resolv_conf - condition: proc.aname[2]=nm-dispatcher and fd.name=/etc/resolv.conf - -- macro: add_shell_writing_shells_tmp - condition: (proc.name=add-shell and fd.name=/etc/shells.tmp) - -- macro: duply_writing_exclude_files - condition: (proc.name=touch and proc.pcmdline startswith "bash /usr/bin/duply" and fd.name startswith "/etc/duply") - -- macro: xmlcatalog_writing_files - condition: (proc.name=update-xmlcatal and fd.directory=/etc/xml) - -- macro: datadog_writing_conf - condition: ((proc.cmdline startswith "python /opt/datadog-agent" or - proc.cmdline startswith "entrypoint.sh /entrypoint.sh datadog start" or - proc.cmdline startswith "agent.py /opt/datadog-agent") - and fd.name startswith "/etc/dd-agent") - -- macro: curl_writing_pki_db - condition: (proc.name=curl and fd.directory=/etc/pki/nssdb) - -- macro: haproxy_writing_conf - condition: ((proc.name in (update-haproxy-,haproxy_reload.) or proc.pname in (update-haproxy-,haproxy_reload,haproxy_reload.)) - and (fd.name=/etc/openvpn/client.map or fd.name startswith /etc/haproxy)) - -- macro: java_writing_conf - condition: (proc.name=java and fd.name=/etc/.java/.systemPrefs/.system.lock) - -- macro: rabbitmq_writing_conf - condition: (proc.name=rabbitmq-server and fd.directory=/etc/rabbitmq) - -- macro: rook_writing_conf - condition: (proc.name=toolbox.sh and container.image startswith rook/toolbox - and fd.directory=/etc/ceph) - -- macro: httpd_writing_conf_logs - condition: (proc.name=httpd and fd.name startswith /etc/httpd/) - -- macro: mysql_writing_conf - condition: ((proc.name=start-mysql.sh or proc.pname=start-mysql.sh) and fd.name startswith /etc/mysql) - -- macro: openvpn_writing_conf - condition: (proc.name in (openvpn,openvpn-entrypo) and fd.name startswith /etc/openvpn) - -- macro: php_handlers_writing_conf - condition: (proc.name=php_handlers_co and fd.name=/etc/psa/php_versions.json) - -- macro: sed_writing_temp_file - condition: > - ((proc.aname[3]=cron_start.sh and fd.name startswith /etc/security/sed) or - (proc.name=sed and (fd.name startswith /etc/apt/sources.list.d/sed or - fd.name startswith /etc/apt/sed or - fd.name startswith /etc/apt/apt.conf.d/sed))) - -- macro: cron_start_writing_pam_env - condition: (proc.cmdline="bash /usr/sbin/start-cron" and fd.name=/etc/security/pam_env.conf) - -# In some cases dpkg-reconfigur runs commands that modify /etc. Not -# putting the full set of package management programs yet. -- macro: dpkg_scripting - condition: (proc.aname[2] in (dpkg-reconfigur, dpkg-preconfigu)) - -# Add conditions to this macro (probably in a separate file, -# overwriting this macro) to allow for specific combinations of -# programs writing below specific directories below -# /etc. fluentd_writing_conf_files is a good example to follow, as it -# specifies both the program doing the writing as well as the specific -# files it is allowed to modify. -# -# In this file, it just takes one of the programs in the base macro -# and repeats it. - -- macro: user_known_write_etc_conditions - condition: proc.name=confd - -- macro: write_etc_common - condition: > - etc_dir and evt.dir = < and open_write - and proc_name_exists - and not proc.name in (passwd_binaries, shadowutils_binaries, sysdigcloud_binaries, - package_mgmt_binaries, ssl_mgmt_binaries, dhcp_binaries, - dev_creation_binaries, shell_mgmt_binaries, - mail_config_binaries, - sshkit_script_binaries, - ldconfig.real, ldconfig, confd, gpg, insserv, - apparmor_parser, update-mime, tzdata.config, tzdata.postinst, - systemd, systemd-machine, systemd-sysuser, - debconf-show, rollerd, bind9.postinst, sv, - gen_resolvconf., update-ca-certi, certbot, runsv, - qualys-cloud-ag, locales.postins, nomachine_binaries, - adclient, certutil, crlutil, pam-auth-update, parallels_insta, - openshift-launc, update-rc.d) - and not proc.pname in (sysdigcloud_binaries, mail_config_binaries, hddtemp.postins, sshkit_script_binaries, locales.postins, deb_binaries, dhcp_binaries) - and not fd.name pmatch (safe_etc_dirs) - and not fd.name in (/etc/container_environment.sh, /etc/container_environment.json, /etc/motd, /etc/motd.svc) - and not exe_running_docker_save - and not ansible_running_python - and not python_running_denyhosts - and not fluentd_writing_conf_files - and not user_known_write_etc_conditions - and not run_by_centrify - and not run_by_adclient - and not qualys_writing_conf_files - and not git_writing_nssdb - and not plesk_writing_keys - and not plesk_install_writing_apache_conf - and not plesk_running_mktemp - and not networkmanager_writing_resolv_conf - and not run_by_chef - and not add_shell_writing_shells_tmp - and not duply_writing_exclude_files - and not xmlcatalog_writing_files - and not parent_supervise_running_multilog - and not supervise_writing_status - and not pki_realm_writing_realms - and not htpasswd_writing_passwd - and not lvprogs_writing_lvm_archive - and not ovsdb_writing_openvswitch - and not datadog_writing_conf - and not curl_writing_pki_db - and not haproxy_writing_conf - and not java_writing_conf - and not dpkg_scripting - and not parent_ucf_writing_conf - and not rabbitmq_writing_conf - and not rook_writing_conf - and not php_handlers_writing_conf - and not sed_writing_temp_file - and not cron_start_writing_pam_env - and not httpd_writing_conf_logs - and not mysql_writing_conf - and not openvpn_writing_conf - and not consul_template_writing_conf - and not countly_writing_nginx_conf - and not ms_oms_writing_conf - and not ms_scx_writing_conf - and not azure_scripts_writing_conf - and not azure_networkwatcher_writing_conf - and not couchdb_writing_conf - and not update_texmf_writing_conf - and not slapadd_writing_conf - and not symantec_writing_conf - and not liveupdate_writing_conf - and not sosreport_writing_files - and not selinux_writing_conf - and not veritas_writing_config - and not nginx_writing_conf - and not nginx_writing_certs - and not chef_client_writing_conf - and not centrify_writing_krb - and not cockpit_writing_conf - and not ipsec_writing_conf - and not httpd_writing_ssl_conf - -- rule: Write below etc - desc: an attempt to write to any file below /etc - condition: write_etc_common - output: "File below /etc opened for writing (user=%user.name command=%proc.cmdline parent=%proc.pname pcmdline=%proc.pcmdline file=%fd.name program=%proc.name gparent=%proc.aname[2] ggparent=%proc.aname[3] gggparent=%proc.aname[4])" - priority: ERROR - tags: [filesystem] - -- list: known_root_files - items: [/root/.monit.state, /root/.auth_tokens, /root/.bash_history, /root/.ash_history, /root/.aws/credentials, - /root/.viminfo.tmp, /root/.lesshst, /root/.bzr.log, /root/.gitconfig.lock, /root/.babel.json, /root/.localstack, - /root/.node_repl_history, /root/.mongorc.js, /root/.dbshell, /root/.augeas/history, /root/.rnd] - -- list: known_root_directories - items: [/root/.oracle_jre_usage, /root/.ssh, /root/.subversion, /root/.nami] - -- macro: known_root_conditions - condition: (fd.name startswith /root/orcexec. - or fd.name startswith /root/.m2 - or fd.name startswith /root/.npm - or fd.name startswith /root/.pki - or fd.name startswith /root/.ivy2 - or fd.name startswith /root/.config/Cypress - or fd.name startswith /root/.config/pulse - or fd.name startswith /root/.config/configstore - or fd.name startswith /root/jenkins/workspace - or fd.name startswith /root/.jenkins - or fd.name startswith /root/.cache - or fd.name startswith /root/.sbt - or fd.name startswith /root/.java - or fd.name startswith /root/.glide - or fd.name startswith /root/.sonar - or fd.name startswith /root/.v8flag - or fd.name startswith /root/infaagent - or fd.name startswith /root/.local/lib/python - or fd.name startswith /root/.pm2 - or fd.name startswith /root/.gnupg - or fd.name startswith /root/.pgpass - or fd.name startswith /root/.theano - or fd.name startswith /root/.gradle - or fd.name startswith /root/.android - or fd.name startswith /root/.ansible - or fd.name startswith /root/.crashlytics - or fd.name startswith /root/.dbus - or fd.name startswith /root/.composer - or fd.name startswith /root/.gconf - or fd.name startswith /root/.nv) - -- rule: Write below root - desc: an attempt to write to any file directly below / or /root - condition: > - root_dir and evt.dir = < and open_write - and not fd.name in (known_root_files) - and not fd.directory in (known_root_directories) - and not exe_running_docker_save - and not gugent_writing_guestagent_log - and not known_root_conditions - output: "File below / or /root opened for writing (user=%user.name command=%proc.cmdline parent=%proc.pname file=%fd.name program=%proc.name)" - priority: ERROR - tags: [filesystem] - -- macro: cmp_cp_by_passwd - condition: proc.name in (cmp, cp) and proc.pname in (passwd, run-parts) - -- rule: Read sensitive file trusted after startup - desc: > - an attempt to read any sensitive file (e.g. files containing user/password/authentication - information) by a trusted program after startup. Trusted programs might read these files - at startup to load initial state, but not afterwards. - condition: sensitive_files and open_read and server_procs and not proc_is_new and proc.name!="sshd" - output: > - Sensitive file opened for reading by trusted program after startup (user=%user.name - command=%proc.cmdline parent=%proc.pname file=%fd.name parent=%proc.pname gparent=%proc.aname[2]) - priority: WARNING - tags: [filesystem] - -- list: read_sensitive_file_binaries - items: [ - iptables, ps, lsb_release, check-new-relea, dumpe2fs, accounts-daemon, sshd, - vsftpd, systemd, mysql_install_d, psql, screen, debconf-show, sa-update, - pam-auth-update, /usr/sbin/spamd, polkit-agent-he, lsattr, file, sosreport, - scxcimservera, adclient, rtvscand, cockpit-session - ] - -# Add conditions to this macro (probably in a separate file, -# overwriting this macro) to allow for specific combinations of -# programs accessing sensitive files. -# fluentd_writing_conf_files is a good example to follow, as it -# specifies both the program doing the writing as well as the specific -# files it is allowed to modify. -# -# In this file, it just takes one of the macros in the base rule -# and repeats it. - -- macro: user_read_sensitive_file_conditions - condition: cmp_cp_by_passwd - -- rule: Read sensitive file untrusted - desc: > - an attempt to read any sensitive file (e.g. files containing user/password/authentication - information). Exceptions are made for known trusted programs. - condition: > - sensitive_files and open_read - and proc_name_exists - and not proc.name in (user_mgmt_binaries, userexec_binaries, package_mgmt_binaries, - cron_binaries, read_sensitive_file_binaries, shell_binaries, hids_binaries, - vpn_binaries, mail_config_binaries, nomachine_binaries, sshkit_script_binaries, - in.proftpd, mandb, salt-minion, postgres_mgmt_binaries) - and not cmp_cp_by_passwd - and not ansible_running_python - and not proc.cmdline contains /usr/bin/mandb - and not run_by_qualys - and not run_by_chef - and not user_read_sensitive_file_conditions - and not perl_running_plesk - and not perl_running_updmap - and not veritas_driver_script - and not perl_running_centrifydc - output: > - Sensitive file opened for reading by non-trusted program (user=%user.name program=%proc.name - command=%proc.cmdline file=%fd.name parent=%proc.pname gparent=%proc.aname[2] ggparent=%proc.aname[3] gggparent=%proc.aname[4]) - priority: WARNING - tags: [filesystem] - -# Only let rpm-related programs write to the rpm database -- rule: Write below rpm database - desc: an attempt to write to the rpm database by any non-rpm related program - condition: fd.name startswith /var/lib/rpm and open_write and not rpm_procs and not ansible_running_python and not chef_running_yum_dump - output: "Rpm database opened for writing by a non-rpm program (command=%proc.cmdline file=%fd.name)" - priority: ERROR - tags: [filesystem, software_mgmt] - -- macro: postgres_running_wal_e - condition: (proc.pname=postgres and proc.cmdline startswith "sh -c envdir /etc/wal-e.d/env /usr/local/bin/wal-e") - -- macro: redis_running_prepost_scripts - condition: (proc.aname[2]=redis-server and (proc.cmdline contains "redis-server.post-up.d" or proc.cmdline contains "redis-server.pre-up.d")) - -- macro: rabbitmq_running_scripts - condition: > - (proc.pname=beam.smp and - (proc.cmdline startswith "sh -c exec ps" or - proc.cmdline startswith "sh -c exec inet_gethost" or - proc.cmdline= "sh -s unix:cmd" or - proc.cmdline= "sh -c exec /bin/sh -s unix:cmd 2>&1")) - -- macro: rabbitmqctl_running_scripts - condition: (proc.aname[2]=rabbitmqctl and proc.cmdline startswith "sh -c ") - -- rule: DB program spawned process - desc: > - a database-server related program spawned a new process other than itself. - This shouldn\'t occur and is a follow on from some SQL injection attacks. - condition: > - proc.pname in (db_server_binaries) - and spawned_process - and not proc.name in (db_server_binaries) - and not postgres_running_wal_e - output: > - Database-related program spawned process other than itself (user=%user.name - program=%proc.cmdline parent=%proc.pname) - priority: NOTICE - tags: [process, database] - -- rule: Modify binary dirs - desc: an attempt to modify any file below a set of binary directories. - condition: (bin_dir_rename) and modify and not package_mgmt_procs and not exe_running_docker_save - output: > - File below known binary directory renamed/removed (user=%user.name command=%proc.cmdline - operation=%evt.type file=%fd.name %evt.args) - priority: ERROR - tags: [filesystem] - -- rule: Mkdir binary dirs - desc: an attempt to create a directory below a set of binary directories. - condition: mkdir and bin_dir_mkdir and not package_mgmt_procs - output: > - Directory below known binary directory created (user=%user.name - command=%proc.cmdline directory=%evt.arg.path) - priority: ERROR - tags: [filesystem] - -# This list allows for easy additions to the set of commands allowed -# to change thread namespace without having to copy and override the -# entire change thread namespace rule. -- list: user_known_change_thread_namespace_binaries - items: [] - -- rule: Change thread namespace - desc: > - an attempt to change a program/thread\'s namespace (commonly done - as a part of creating a container) by calling setns. - condition: > - evt.type = setns - and not proc.name in (docker_binaries, k8s_binaries, lxd_binaries, sysdigcloud_binaries, sysdig, nsenter) - and not proc.name in (user_known_change_thread_namespace_binaries) - and not proc.name startswith "runc:" - and not proc.pname in (sysdigcloud_binaries) - and not java_running_sdjagent - and not kubelet_running_loopback - output: > - Namespace change (setns) by unexpected program (user=%user.name command=%proc.cmdline - parent=%proc.pname %container.info) - priority: NOTICE - tags: [process] - -# The binaries in this list and their descendents are *not* allowed -# spawn shells. This includes the binaries spawning shells directly as -# well as indirectly. For example, apache -> php/perl for -# mod_{php,perl} -> some shell is also not allowed, because the shell -# has apache as an ancestor. - -- list: protected_shell_spawning_binaries - items: [ - http_server_binaries, db_server_binaries, nosql_server_binaries, mail_binaries, - fluentd, flanneld, splunkd, consul, smbd, runsv, PM2 - ] - -- macro: parent_java_running_zookeeper - condition: (proc.pname=java and proc.pcmdline contains org.apache.zookeeper.server) - -- macro: parent_java_running_kafka - condition: (proc.pname=java and proc.pcmdline contains kafka.Kafka) - -- macro: parent_java_running_elasticsearch - condition: (proc.pname=java and proc.pcmdline contains org.elasticsearch.bootstrap.Elasticsearch) - -- macro: parent_java_running_activemq - condition: (proc.pname=java and proc.pcmdline contains activemq.jar) - -- macro: parent_java_running_cassandra - condition: (proc.pname=java and (proc.pcmdline contains "-Dcassandra.config.loader" or proc.pcmdline contains org.apache.cassandra.service.CassandraDaemon)) - -- macro: parent_java_running_jboss_wildfly - condition: (proc.pname=java and proc.pcmdline contains org.jboss) - -- macro: parent_java_running_glassfish - condition: (proc.pname=java and proc.pcmdline contains com.sun.enterprise.glassfish) - -- macro: parent_java_running_hadoop - condition: (proc.pname=java and proc.pcmdline contains org.apache.hadoop) - -- macro: parent_java_running_datastax - condition: (proc.pname=java and proc.pcmdline contains com.datastax) - -- macro: nginx_starting_nginx - condition: (proc.pname=nginx and proc.cmdline contains "/usr/sbin/nginx -c /etc/nginx/nginx.conf") - -- macro: nginx_running_aws_s3_cp - condition: (proc.pname=nginx and proc.cmdline startswith "sh -c /usr/local/bin/aws s3 cp") - -- macro: consul_running_net_scripts - condition: (proc.pname=consul and (proc.cmdline startswith "sh -c curl" or proc.cmdline startswith "sh -c nc")) - -- macro: consul_running_alert_checks - condition: (proc.pname=consul and proc.cmdline startswith "sh -c /bin/consul-alerts") - -- macro: serf_script - condition: (proc.cmdline startswith "sh -c serf") - -- macro: check_process_status - condition: (proc.cmdline startswith "sh -c kill -0 ") - -# In some cases, you may want to consider node processes run directly -# in containers as protected shell spawners. Examples include using -# pm2-docker or pm2 start some-app.js --no-daemon-mode as the direct -# entrypoint of the container, and when the node app is a long-lived -# server using something like express. -# -# However, there are other uses of node related to build pipelines for -# which node is not really a server but instead a general scripting -# tool. In these cases, shells are very likely and in these cases you -# don't want to consider node processes protected shell spawners. -# -# We have to choose one of these cases, so we consider node processes -# as unprotected by default. If you want to consider any node process -# run in a container as a protected shell spawner, override the below -# macro to remove the "never_true" clause, which allows it to take effect. -- macro: possibly_node_in_container - condition: (never_true and (proc.pname=node and proc.aname[3]=docker-containe)) - -- macro: protected_shell_spawner - condition: > - (proc.aname in (protected_shell_spawning_binaries) - or parent_java_running_zookeeper - or parent_java_running_kafka - or parent_java_running_elasticsearch - or parent_java_running_activemq - or parent_java_running_cassandra - or parent_java_running_jboss_wildfly - or parent_java_running_glassfish - or parent_java_running_hadoop - or parent_java_running_datastax - or possibly_node_in_container) - -- list: mesos_shell_binaries - items: [mesos-docker-ex, mesos-slave, mesos-health-ch] - -# Note that runsv is both in protected_shell_spawner and the -# exclusions by pname. This means that runsv can itself spawn shells -# (the ./run and ./finish scripts), but the processes runsv can not -# spawn shells. -- rule: Run shell untrusted - desc: an attempt to spawn a shell below a non-shell application. Specific applications are monitored. - condition: > - spawned_process - and shell_procs - and proc.pname exists - and protected_shell_spawner - and not proc.pname in (shell_binaries, gitlab_binaries, cron_binaries, user_known_shell_spawn_binaries, - needrestart_binaries, - mesos_shell_binaries, - erl_child_setup, exechealthz, - PM2, PassengerWatchd, c_rehash, svlogd, logrotate, hhvm, serf, - lb-controller, nvidia-installe, runsv, statsite, erlexec) - and not proc.cmdline in (known_shell_spawn_cmdlines) - and not proc.aname in (unicorn_launche) - and not consul_running_net_scripts - and not consul_running_alert_checks - and not nginx_starting_nginx - and not nginx_running_aws_s3_cp - and not run_by_package_mgmt_binaries - and not serf_script - and not check_process_status - and not run_by_foreman - and not python_mesos_marathon_scripting - and not splunk_running_forwarder - and not postgres_running_wal_e - and not redis_running_prepost_scripts - and not rabbitmq_running_scripts - and not rabbitmqctl_running_scripts - and not user_shell_container_exclusions - output: > - Shell spawned by untrusted binary (user=%user.name shell=%proc.name parent=%proc.pname - cmdline=%proc.cmdline pcmdline=%proc.pcmdline gparent=%proc.aname[2] ggparent=%proc.aname[3] - gggparent=%proc.aname[4] ggggparent=%proc.aname[5]) - priority: DEBUG - tags: [shell] - -- macro: trusted_containers - condition: (container.image startswith sysdig/agent or - (container.image startswith sysdig/falco and - not container.image startswith sysdig/falco-event-generator) or - container.image startswith quay.io/sysdig or - container.image startswith sysdig/sysdig or - container.image startswith gcr.io/google_containers/hyperkube or - container.image startswith quay.io/coreos/flannel or - container.image startswith gcr.io/google_containers/kube-proxy or - container.image startswith calico/node or - container.image startswith rook/toolbox or - container.image startswith registry.access.redhat.com/openshift3/logging-fluentd or - container.image startswith registry.access.redhat.com/openshift3/logging-elasticsearch or - container.image startswith registry.access.redhat.com/openshift3/metrics-cassandra or - container.image startswith openshift3/ose-sti-builder or - container.image startswith registry.access.redhat.com/openshift3/ose-sti-builder or - container.image startswith cloudnativelabs/kube-router or - container.image startswith "consul:" or - container.image startswith mesosphere/mesos-slave or - container.image startswith istio/proxy_ or - container.image startswith datadog/docker-dd-agent) - -# Add conditions to this macro (probably in a separate file, -# overwriting this macro) to specify additional containers that are -# trusted and therefore allowed to run privileged. -# -# In this file, it just takes one of the images in trusted_containers -# and repeats it. -- macro: user_trusted_containers - condition: (container.image startswith sysdig/agent) - -# Add conditions to this macro (probably in a separate file, -# overwriting this macro) to specify additional containers that are -# allowed to perform sensitive mounts. -# -# In this file, it just takes one of the images in trusted_containers -# and repeats it. -- macro: user_sensitive_mount_containers - condition: (container.image startswith sysdig/agent) - -- rule: Launch Privileged Container - desc: Detect the initial process started in a privileged container. Exceptions are made for known trusted images. - condition: > - evt.type=execve and proc.vpid=1 and container - and container.privileged=true - and not trusted_containers - and not user_trusted_containers - output: Privileged container started (user=%user.name command=%proc.cmdline %container.info image=%container.image) - priority: INFO - tags: [container, cis] - -# For now, only considering a full mount of /etc as -# sensitive. Ideally, this would also consider all subdirectories -# below /etc as well, but the globbing mechanism used by sysdig -# doesn't allow exclusions of a full pattern, only single characters. -- macro: sensitive_mount - condition: (container.mount.dest[/proc*] != "N/A" or - container.mount.dest[/var/run/docker.sock] != "N/A" or - container.mount.dest[/] != "N/A" or - container.mount.dest[/etc] != "N/A" or - container.mount.dest[/root*] != "N/A") - -# The steps libcontainer performs to set up the root program for a container are: -# - clone + exec self to a program runc:[0:PARENT] -# - clone a program runc:[1:CHILD] which sets up all the namespaces -# - clone a second program runc:[2:INIT] + exec to the root program. -# The parent of runc:[2:INIT] is runc:0:PARENT] -# As soon as 1:CHILD is created, 0:PARENT exits, so there's a race -# where at the time 2:INIT execs the root program, 0:PARENT might have -# already exited, or might still be around. So we handle both. -# We also let runc:[1:CHILD] count as the parent process, which can occur -# when we lose events and lose track of state. - -- macro: container_entrypoint - condition: (not proc.pname exists or proc.pname in (runc:[0:PARENT], runc:[1:CHILD], docker-runc, exe)) - -- rule: Launch Sensitive Mount Container - desc: > - Detect the initial process started by a container that has a mount from a sensitive host directory - (i.e. /proc). Exceptions are made for known trusted images. - condition: > - evt.type=execve and proc.vpid=1 and container - and sensitive_mount - and not trusted_containers - and not user_sensitive_mount_containers - output: Container with sensitive mount started (user=%user.name command=%proc.cmdline %container.info image=%container.image mounts=%container.mounts) - priority: INFO - tags: [container, cis] - -# In a local/user rules file, you could override this macro to -# explicitly enumerate the container images that you want to run in -# your environment. In this main falco rules file, there isn't any way -# to know all the containers that can run, so any container is -# alllowed, by using a filter that is guaranteed to evaluate to true -# (the same proc.vpid=1 that's in the Launch Disallowed Container -# rule). In the overridden macro, the condition would look something -# like (container.image startswith vendor/container-1 or -# container.image startswith vendor/container-2 or ...) - -- macro: allowed_containers - condition: (proc.vpid=1) - -- rule: Launch Disallowed Container - desc: > - Detect the initial process started by a container that is not in a list of allowed containers. - condition: evt.type=execve and proc.vpid=1 and container and not allowed_containers - output: Container started and not in allowed list (user=%user.name command=%proc.cmdline %container.info image=%container.image) - priority: WARNING - tags: [container] - -# Anything run interactively by root -# - condition: evt.type != switch and user.name = root and proc.name != sshd and interactive -# output: "Interactive root (%user.name %proc.name %evt.dir %evt.type %evt.args %fd.name)" -# priority: WARNING - -- rule: System user interactive - desc: an attempt to run interactive commands by a system (i.e. non-login) user - condition: spawned_process and system_users and interactive - output: "System user ran an interactive command (user=%user.name command=%proc.cmdline)" - priority: INFO - tags: [users] - -- rule: Terminal shell in container - desc: A shell was used as the entrypoint/exec point into a container with an attached terminal. - condition: > - spawned_process and container - and shell_procs and proc.tty != 0 - and container_entrypoint - output: > - A shell was spawned in a container with an attached terminal (user=%user.name %container.info - shell=%proc.name parent=%proc.pname cmdline=%proc.cmdline terminal=%proc.tty) - priority: NOTICE - tags: [container, shell] - -# For some container types (mesos), there isn't a container image to -# work with, and the container name is autogenerated, so there isn't -# any stable aspect of the software to work with. In this case, we -# fall back to allowing certain command lines. - -- list: known_shell_spawn_cmdlines - items: [ - '"sh -c uname -p 2> /dev/null"', - '"sh -c uname -s 2>&1"', - '"sh -c uname -r 2>&1"', - '"sh -c uname -v 2>&1"', - '"sh -c uname -a 2>&1"', - '"sh -c ruby -v 2>&1"', - '"sh -c getconf CLK_TCK"', - '"sh -c getconf PAGESIZE"', - '"sh -c LC_ALL=C LANG=C /sbin/ldconfig -p 2>/dev/null"', - '"sh -c LANG=C /sbin/ldconfig -p 2>/dev/null"', - '"sh -c /sbin/ldconfig -p 2>/dev/null"', - '"sh -c stty -a 2>/dev/null"', - '"sh -c stty -a < /dev/tty"', - '"sh -c stty -g < /dev/tty"', - '"sh -c node index.js"', - '"sh -c node index"', - '"sh -c node ./src/start.js"', - '"sh -c node app.js"', - '"sh -c node -e \"require(''nan'')\""', - '"sh -c node -e \"require(''nan'')\")"', - '"sh -c node $NODE_DEBUG_OPTION index.js "', - '"sh -c crontab -l 2"', - '"sh -c lsb_release -a"', - '"sh -c lsb_release -is 2>/dev/null"', - '"sh -c whoami"', - '"sh -c node_modules/.bin/bower-installer"', - '"sh -c /bin/hostname -f 2> /dev/null"', - '"sh -c locale -a"', - '"sh -c -t -i"', - '"sh -c openssl version"', - '"bash -c id -Gn kafadmin"', - '"sh -c /bin/sh -c ''date +%%s''"' - ] - -# This list allows for easy additions to the set of commands allowed -# to run shells in containers without having to without having to copy -# and override the entire run shell in container macro. Once -# https://github.com/draios/falco/issues/255 is fixed this will be a -# bit easier, as someone could append of any of the existing lists. -- list: user_known_shell_spawn_binaries - items: [] - -# This macro allows for easy additions to the set of commands allowed -# to run shells in containers without having to override the entire -# rule. Its default value is an expression that always is false, which -# becomes true when the "not ..." in the rule is applied. -- macro: user_shell_container_exclusions - condition: (never_true) - -- macro: login_doing_dns_lookup - condition: (proc.name=login and fd.l4proto=udp and fd.sport=53) - -# sockfamily ip is to exclude certain processes (like 'groups') that communicate on unix-domain sockets -# systemd can listen on ports to launch things like sshd on demand -- rule: System procs network activity - desc: any network activity performed by system binaries that are not expected to send or receive any network traffic - condition: > - (fd.sockfamily = ip and system_procs) - and (inbound_outbound) - and not proc.name in (systemd, hostid) - and not login_doing_dns_lookup - output: > - Known system binary sent/received network traffic - (user=%user.name command=%proc.cmdline connection=%fd.name) - priority: NOTICE - tags: [network] - -- list: openvpn_udp_ports - items: [1194, 1197, 1198, 8080, 9201] - -- list: l2tp_udp_ports - items: [500, 1701, 4500, 10000] - -- list: statsd_ports - items: [8125] - -- list: ntp_ports - items: [123] - -# Some applications will connect a udp socket to an address only to -# test connectivity. Assuming the udp connect works, they will follow -# up with a tcp connect that actually sends/receives data. -# -# With that in mind, we listed a few commonly seen ports here to avoid -# some false positives. In addition, we make the main rule opt-in, so -# it's disabled by default. - -- list: test_connect_ports - items: [0, 9, 80, 3306] - -- macro: do_unexpected_udp_check - condition: (never_true) - -- list: expected_udp_ports - items: [53, openvpn_udp_ports, l2tp_udp_ports, statsd_ports, ntp_ports, test_connect_ports] - -- macro: expected_udp_traffic - condition: fd.port in (expected_udp_ports) - -- rule: Unexpected UDP Traffic - desc: UDP traffic not on port 53 (DNS) or other commonly used ports - condition: (inbound_outbound) and do_unexpected_udp_check and fd.l4proto=udp and not expected_udp_traffic - output: > - Unexpected UDP Traffic Seen - (user=%user.name command=%proc.cmdline connection=%fd.name proto=%fd.l4proto evt=%evt.type %evt.args) - priority: NOTICE - tags: [network] - -# With the current restriction on system calls handled by falco -# (e.g. excluding read/write/sendto/recvfrom/etc, this rule won't -# trigger). -# - rule: Ssh error in syslog -# desc: any ssh errors (failed logins, disconnects, ...) sent to syslog -# condition: syslog and ssh_error_message and evt.dir = < -# output: "sshd sent error message to syslog (error=%evt.buffer)" -# priority: WARNING - -- macro: somebody_becoming_themself - condition: ((user.name=nobody and evt.arg.uid=nobody) or - (user.name=www-data and evt.arg.uid=www-data) or - (user.name=_apt and evt.arg.uid=_apt) or - (user.name=postfix and evt.arg.uid=postfix) or - (user.name=pki-agent and evt.arg.uid=pki-agent) or - (user.name=pki-acme and evt.arg.uid=pki-acme) or - (user.name=nfsnobody and evt.arg.uid=nfsnobody) or - (user.name=postgres and evt.arg.uid=postgres)) - -- macro: nrpe_becoming_nagios - condition: (proc.name=nrpe and evt.arg.uid=nagios) - -# In containers, the user name might be for a uid that exists in the -# container but not on the host. (See -# https://github.com/draios/sysdig/issues/954). So in that case, allow -# a setuid. -- macro: known_user_in_container - condition: (container and user.name != "N/A") - -# sshd, mail programs attempt to setuid to root even when running as non-root. Excluding here to avoid meaningless FPs -- rule: Non sudo setuid - desc: > - an attempt to change users by calling setuid. sudo/su are excluded. users "root" and "nobody" - suing to itself are also excluded, as setuid calls typically involve dropping privileges. - condition: > - evt.type=setuid and evt.dir=> - and (known_user_in_container or not container) - and not user.name=root and not somebody_becoming_themself - and not proc.name in (known_setuid_binaries, userexec_binaries, mail_binaries, docker_binaries, - nomachine_binaries) - and not java_running_sdjagent - and not nrpe_becoming_nagios - output: > - Unexpected setuid call by non-sudo, non-root program (user=%user.name cur_uid=%user.uid parent=%proc.pname - command=%proc.cmdline uid=%evt.arg.uid) - priority: NOTICE - tags: [users] - -- rule: User mgmt binaries - desc: > - activity by any programs that can manage users, passwords, or permissions. sudo and su are excluded. - Activity in containers is also excluded--some containers create custom users on top - of a base linux distribution at startup. - Some innocuous commandlines that don't actually change anything are excluded. - condition: > - spawned_process and proc.name in (user_mgmt_binaries) and - not proc.name in (su, sudo, lastlog, nologin, unix_chkpwd) and not container and - not proc.pname in (cron_binaries, systemd, systemd.postins, udev.postinst, run-parts) and - not proc.cmdline startswith "passwd -S" and - not proc.cmdline startswith "useradd -D" and - not proc.cmdline startswith "systemd --version" and - not run_by_qualys and - not run_by_sumologic_securefiles and - not run_by_yum and - not run_by_ms_oms and - not run_by_google_accounts_daemon - output: > - User management binary command run outside of container - (user=%user.name command=%proc.cmdline parent=%proc.pname gparent=%proc.aname[2] ggparent=%proc.aname[3] gggparent=%proc.aname[4]) - priority: NOTICE - tags: [host, users] - -- list: allowed_dev_files - items: [ - /dev/null, /dev/stdin, /dev/stdout, /dev/stderr, - /dev/random, /dev/urandom, /dev/console, /dev/kmsg - ] - -# (we may need to add additional checks against false positives, see: -# https://bugs.launchpad.net/ubuntu/+source/rkhunter/+bug/86153) -- rule: Create files below dev - desc: creating any files below /dev other than known programs that manage devices. Some rootkits hide files in /dev. - condition: > - fd.directory = /dev and - (evt.type = creat or (evt.type = open and evt.arg.flags contains O_CREAT)) - and not proc.name in (dev_creation_binaries) - and not fd.name in (allowed_dev_files) - and not fd.name startswith /dev/tty - output: "File created below /dev by untrusted program (user=%user.name command=%proc.cmdline file=%fd.name)" - priority: ERROR - tags: [filesystem] - - -# In a local/user rules file, you could override this macro to -# explicitly enumerate the container images that you want to allow -# access to EC2 metadata. In this main falco rules file, there isn't -# any way to know all the containers that should have access, so any -# container is alllowed, by repeating the "container" macro. In the -# overridden macro, the condition would look something like -# (container.image startswith vendor/container-1 or container.image -# startswith vendor/container-2 or ...) -- macro: ec2_metadata_containers - condition: container - -# On EC2 instances, 169.254.169.254 is a special IP used to fetch -# metadata about the instance. It may be desirable to prevent access -# to this IP from containers. -- rule: Contact EC2 Instance Metadata Service From Container - desc: Detect attempts to contact the EC2 Instance Metadata Service from a container - condition: outbound and fd.sip="169.254.169.254" and container and not ec2_metadata_containers - output: Outbound connection to EC2 instance metadata service (command=%proc.cmdline connection=%fd.name %container.info image=%container.image) - priority: NOTICE - tags: [network, aws, container] - -# In a local/user rules file, you should override this macro with the -# IP address of your k8s api server. The IP 1.2.3.4 is a placeholder -# IP that is not likely to be seen in practice. -- macro: k8s_api_server - condition: (fd.sip="1.2.3.4" and fd.sport=8080) - -# In a local/user rules file, list the container images that are -# allowed to contact the K8s API Server from within a container. This -# might cover cases where the K8s infrastructure itself is running -# within a container. -- macro: k8s_containers - condition: > - (container.image startswith gcr.io/google_containers/hyperkube-amd64 or - container.image startswith gcr.io/google_containers/kube2sky or - container.image startswith sysdig/agent or - container.image startswith sysdig/falco or - container.image startswith sysdig/sysdig) - -- rule: Contact K8S API Server From Container - desc: Detect attempts to contact the K8S API Server from a container - condition: outbound and k8s_api_server and container and not k8s_containers - output: Unexpected connection to K8s API Server from container (command=%proc.cmdline %container.info image=%container.image connection=%fd.name) - priority: NOTICE - tags: [network, k8s, container] - -# In a local/user rules file, list the container images that are -# allowed to contact NodePort services from within a container. This -# might cover cases where the K8s infrastructure itself is running -# within a container. -# -# By default, all containers are allowed to contact NodePort services. -- macro: nodeport_containers - condition: container - -- rule: Unexpected K8s NodePort Connection - desc: Detect attempts to use K8s NodePorts from a container - condition: (inbound_outbound) and fd.sport >= 30000 and fd.sport <= 32767 and container and not nodeport_containers - output: Unexpected K8s NodePort Connection (command=%proc.cmdline connection=%fd.name) - priority: NOTICE - tags: [network, k8s, container] - -# Application rules have moved to application_rules.yaml. Please look -# there if you want to enable them by adding to -# falco_rules.local.yaml. - diff --git a/integrations/kubernetes-response-engine/deployment/falco/falco-account.yaml b/integrations/kubernetes-response-engine/deployment/falco/falco-account.yaml deleted file mode 100644 index 9d611519..00000000 --- a/integrations/kubernetes-response-engine/deployment/falco/falco-account.yaml +++ /dev/null @@ -1,29 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: falco-account ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: falco-cluster-role -rules: - - apiGroups: ["extensions",""] - resources: ["nodes","namespaces","pods","replicationcontrollers","services","events","configmaps"] - verbs: ["get","list","watch"] - - nonResourceURLs: ["/healthz", "/healthz/*"] - verbs: ["get"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: falco-cluster-role-binding - namespace: default -subjects: - - kind: ServiceAccount - name: falco-account - namespace: default -roleRef: - kind: ClusterRole - name: falco-cluster-role - apiGroup: rbac.authorization.k8s.io diff --git a/integrations/kubernetes-response-engine/deployment/falco/falco-daemonset.yaml b/integrations/kubernetes-response-engine/deployment/falco/falco-daemonset.yaml deleted file mode 100644 index ecc4dcdb..00000000 --- a/integrations/kubernetes-response-engine/deployment/falco/falco-daemonset.yaml +++ /dev/null @@ -1,84 +0,0 @@ -apiVersion: extensions/v1beta1 -kind: DaemonSet -metadata: - name: falco - labels: - name: falco-daemonset - app: demo -spec: - template: - metadata: - labels: - name: falco - app: demo - role: security - spec: - serviceAccount: falco-account - containers: - - name: falco-nats - image: sysdig/falco-nats:latest - imagePullPolicy: Always - volumeMounts: - - mountPath: /var/run/falco/ - name: shared-pipe - - name: falco - image: sysdig/falco:latest - securityContext: - privileged: true - args: [ "/usr/bin/falco", "-K", "/var/run/secrets/kubernetes.io/serviceaccount/token", "-k", "https://kubernetes", "-pk", "-U"] - volumeMounts: - - mountPath: /var/run/falco/ - name: shared-pipe - readOnly: false - - mountPath: /host/var/run/docker.sock - name: docker-socket - readOnly: true - - mountPath: /host/dev - name: dev-fs - readOnly: true - - mountPath: /host/proc - name: proc-fs - readOnly: true - - mountPath: /host/boot - name: boot-fs - readOnly: true - - mountPath: /host/lib/modules - name: lib-modules - readOnly: true - - mountPath: /host/usr - name: usr-fs - readOnly: true - - mountPath: /etc/falco - name: falco-config - initContainers: - - name: init-pipe - image: busybox - command: ['mkfifo','/var/run/falco/nats'] - volumeMounts: - - mountPath: /var/run/falco/ - name: shared-pipe - readOnly: false - volumes: - - name: shared-pipe - emptyDir: {} - - name: docker-socket - hostPath: - path: /var/run/docker.sock - - name: dev-fs - hostPath: - path: /dev - - name: proc-fs - hostPath: - path: /proc - - name: boot-fs - hostPath: - path: /boot - - name: lib-modules - hostPath: - path: /lib/modules - - name: usr-fs - hostPath: - path: /usr - - name: falco-config - configMap: - name: falco-config diff --git a/integrations/kubernetes-response-engine/deployment/falco/falco-event-generator.yaml b/integrations/kubernetes-response-engine/deployment/falco/falco-event-generator.yaml deleted file mode 100644 index 0b6500d5..00000000 --- a/integrations/kubernetes-response-engine/deployment/falco/falco-event-generator.yaml +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: falco-event-generator - labels: - name: falco-event-generator - app: demo -spec: - replicas: 1 - template: - metadata: - labels: - name: falco-event-generator - app: demo - spec: - containers: - - name: falco-event-generator - image: sysdig/falco-event-generator:latest