Compare commits

..

5 Commits

Author SHA1 Message Date
Lorenzo Fontana
1d9188a316 wip: pointer to pointer in hawk_engine rules_cb
Co-Authored-By: Leonardo Di Donato <leodidonato@gmail.com>
Signed-off-by: Lorenzo Fontana <lo@linux.com>
2020-11-04 19:06:41 +01:00
Leonardo Di Donato
5cc102545f wip
Co-authored-by: Lorenzo Fontana <lo@linux.com>
Signed-off-by: Leonardo Di Donato <leodidonato@gmail.com>
Signed-off-by: Lorenzo Fontana <lo@linux.com>
Signed-off-by: Leonardo Di Donato <leodidonato@gmail.com>
2020-11-04 12:05:07 +00:00
Leonardo Di Donato
2801c62666 new(userspace/falco): destroy rules watcher when needed
Co-authored-by: Lorenzo Fontana <lo@linux.com>
Signed-off-by: Leonardo Di Donato <leodidonato@gmail.com>
2020-10-29 11:33:12 +00:00
Leonardo Di Donato
c6cffc1f48 new(userspace): make hawk_watch_rules aware of the engine
Co-authored-by: Lorenzo Fontana <lo@linux.com>
Signed-off-by: Leonardo Di Donato <leodidonato@gmail.com>
2020-10-29 10:58:28 +00:00
Lorenzo Fontana
4894c93d5e new(userspace): initial draft for libhawk
Co-Authored-By: Leonardo Di Donato <leodidonato@gmail.com>
Signed-off-by: Lorenzo Fontana <fontanalorenz@gmail.com>
2020-10-28 15:43:27 +01:00
105 changed files with 1599 additions and 4861 deletions

View File

@@ -1,4 +0,0 @@
approvers:
- jonahjon
reviewers:
- jonahjon

View File

@@ -282,8 +282,6 @@ jobs:
- run: - run:
name: Execute integration tests name: Execute integration tests
command: /usr/bin/entrypoint test command: /usr/bin/entrypoint test
- store_test_results:
path: /build/release/integration-tests-xunit
"tests/integration-static": "tests/integration-static":
docker: docker:
- image: falcosecurity/falco-tester:latest - image: falcosecurity/falco-tester:latest
@@ -299,8 +297,6 @@ jobs:
- run: - run:
name: Execute integration tests name: Execute integration tests
command: /usr/bin/entrypoint test command: /usr/bin/entrypoint test
- store_test_results:
path: /build-static/release/integration-tests-xunit
"tests/driver-loader/integration": "tests/driver-loader/integration":
machine: machine:
image: ubuntu-1604:202004-01 image: ubuntu-1604:202004-01
@@ -452,25 +448,6 @@ jobs:
docker build --build-arg FALCO_IMAGE_TAG=master -t falcosecurity/falco-driver-loader:master docker/driver-loader docker build --build-arg FALCO_IMAGE_TAG=master -t falcosecurity/falco-driver-loader:master docker/driver-loader
echo ${DOCKERHUB_SECRET} | docker login -u ${DOCKERHUB_USER} --password-stdin echo ${DOCKERHUB_SECRET} | docker login -u ${DOCKERHUB_USER} --password-stdin
docker push falcosecurity/falco-driver-loader:master docker push falcosecurity/falco-driver-loader:master
# Publish container images to AWS ECR Public
"publish/container-images-aws-dev":
docker:
- image: docker:stable
steps:
- attach_workspace:
at: /
- checkout
- setup_remote_docker
- run:
name: Build and publish falco to AWS
command: |
apk update
apk add --update groff less py-pip
pip install awscli
FALCO_VERSION=$(cat /build/release/userspace/falco/config_falco.h | grep 'FALCO_VERSION ' | cut -d' ' -f3 | sed -e 's/^"//' -e 's/"$//')
docker build --build-arg VERSION_BUCKET=deb-dev --build-arg FALCO_VERSION=${FALCO_VERSION} -t "public.ecr.aws/falcosecurity/falco:master" docker/falco
aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin public.ecr.aws/falcosecurity
docker push "public.ecr.aws/falcosecurity/falco:master"
# Publish the packages # Publish the packages
"publish/packages": "publish/packages":
docker: docker:
@@ -537,26 +514,6 @@ jobs:
echo ${DOCKERHUB_SECRET} | docker login -u ${DOCKERHUB_USER} --password-stdin echo ${DOCKERHUB_SECRET} | docker login -u ${DOCKERHUB_USER} --password-stdin
docker push "falcosecurity/falco-driver-loader:${CIRCLE_TAG}" docker push "falcosecurity/falco-driver-loader:${CIRCLE_TAG}"
docker push "falcosecurity/falco-driver-loader:latest" docker push "falcosecurity/falco-driver-loader:latest"
# Publish container images to AWS ECR Public
"publish/container-images-aws":
docker:
- image: docker:stable
steps:
- attach_workspace:
at: /
- checkout
- setup_remote_docker
- run:
name: Build and publish falco to AWS
command: |
apk update
apk add --update groff less py-pip
pip install awscli
docker build --build-arg VERSION_BUCKET=deb --build-arg FALCO_VERSION=${CIRCLE_TAG} -t "public.ecr.aws/falcosecurity/falco:${CIRCLE_TAG}" docker/falco
docker tag "public.ecr.aws/falcosecurity/falco:${CIRCLE_TAG}" public.ecr.aws/falcosecurity/falco:latest
aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin public.ecr.aws/falcosecurity
docker push "public.ecr.aws/falcosecurity/falco:${CIRCLE_TAG}"
docker push "public.ecr.aws/falcosecurity/falco:latest"
workflows: workflows:
version: 2 version: 2
build_and_test: build_and_test:
@@ -616,16 +573,7 @@ workflows:
requires: requires:
- "publish/packages-dev" - "publish/packages-dev"
- "tests/driver-loader/integration" - "tests/driver-loader/integration"
- "publish/container-images-aws-dev": - "quality/static-analysis"
context: test-infra # contains Falco AWS credentials
filters:
tags:
ignore: /.*/
branches:
only: master
requires:
- publish/docker-dev
# - "quality/static-analysis" # This is temporarly disabled: https://github.com/falcosecurity/falco/issues/1526
release: release:
jobs: jobs:
- "build/musl": - "build/musl":
@@ -668,12 +616,3 @@ workflows:
only: /.*/ only: /.*/
branches: branches:
ignore: /.*/ ignore: /.*/
- "publish/container-images-aws":
context: test-infra # contains Falco AWS credentials
requires:
- "publish/docker"
filters:
tags:
only: /.*/
branches:
ignore: /.*/

20
.github/stale.yml vendored Normal file
View File

@@ -0,0 +1,20 @@
# Number of days of inactivity before an issue becomes stale
daysUntilStale: 60
# Number of days of inactivity before a stale issue is closed
daysUntilClose: 7
# Issues with these labels will never be considered stale
exemptLabels:
- cncf
- roadmap
- "help wanted"
# Label to use when marking an issue as stale
staleLabel: wontfix
# Comment to post when marking an issue as stale. Set to `false` to disable
markComment: >
This issue has been automatically marked as stale because it has not had
recent activity. It will be closed if no further activity occurs. Thank you
for your contributions.
Issues labeled "cncf", "roadmap" and "help wanted" will not be automatically closed.
Please refer to a maintainer to get such label added if you think this should be kept open.
# Comment to post when closing a stale issue. Set to `false` to disable
closeComment: false

1
.gitignore vendored
View File

@@ -2,6 +2,7 @@
*~ *~
*.pyc *.pyc
test/falco_tests.yaml
test/traces-negative test/traces-negative
test/traces-positive test/traces-positive
test/traces-info test/traces-info

View File

@@ -10,7 +10,7 @@ This is a list of production adopters of Falco (in alphabetical order):
* [GitLab](https://about.gitlab.com/direction/defend/container_host_security/) - GitLab is a complete DevOps platform, delivered as a single application, fundamentally changing the way Development, Security, and Ops teams collaborate. GitLab Ultimate provides the single tool teams need to find, triage, and fix vulnerabilities in applications, services, and cloud-native environments enabling them to manage their risk. This provides them with repeatable, defensible processes that automate security and compliance policies. GitLab includes a tight integration with Falco, allowing users to defend their containerized applications from attacks while running in production. * [GitLab](https://about.gitlab.com/direction/defend/container_host_security/) - GitLab is a complete DevOps platform, delivered as a single application, fundamentally changing the way Development, Security, and Ops teams collaborate. GitLab Ultimate provides the single tool teams need to find, triage, and fix vulnerabilities in applications, services, and cloud-native environments enabling them to manage their risk. This provides them with repeatable, defensible processes that automate security and compliance policies. GitLab includes a tight integration with Falco, allowing users to defend their containerized applications from attacks while running in production.
* [League](https://league.com/ca/) - League provides health benefits management services to help employees understand and get the most from their benefits, and employers to provide effective, efficient plans. Falco is used to monitor our deployed services on Kubernetes, protecting against malicious access to containers which could lead to leaks of PHI or other sensitive data. The Falco alerts are logged in Stackdriver for grouping and further analysis. In the future, we're hoping for integrations with Prometheus and AlertManager as well. * [League](https://league.com/ca/) - League provides health benefits management services to help employees understand and get the most from their benefits, and employers to provide effective, efficient plans. Falco is used to monitor our deployed services on Kubernetes, protecting against malicious access to containerswhich could lead to leaks of PHI or other sensitive data. The Falco alerts are logged in Stackdriver for grouping and further analysis. In the future, we're hoping for integrations with Prometheus and AlertManager as well.
* [Logz.io](https://logz.io/) - Logz.io is a cloud observability platform for modern engineering teams. The Logz.io platform consists of three products — Log Management, Infrastructure Monitoring, and Cloud SIEM — that work together to unify the jobs of monitoring, troubleshooting, and security. We empower engineers to deliver better software by offering the world's most popular open source observability tools — the ELK Stack, Grafana, and Jaeger — in a single, easy to use, and powerful platform purpose-built for monitoring distributed cloud environments. Cloud SIEM supports data from multiple sources, including Falco's alerts, and offers useful rules and dashboards content to visualize and manage incidents across your systems in a unified UI. * [Logz.io](https://logz.io/) - Logz.io is a cloud observability platform for modern engineering teams. The Logz.io platform consists of three products — Log Management, Infrastructure Monitoring, and Cloud SIEM — that work together to unify the jobs of monitoring, troubleshooting, and security. We empower engineers to deliver better software by offering the world's most popular open source observability tools — the ELK Stack, Grafana, and Jaeger — in a single, easy to use, and powerful platform purpose-built for monitoring distributed cloud environments. Cloud SIEM supports data from multiple sources, including Falco's alerts, and offers useful rules and dashboards content to visualize and manage incidents across your systems in a unified UI.
* https://logz.io/blog/k8s-security-with-falco-and-cloud-siem/ * https://logz.io/blog/k8s-security-with-falco-and-cloud-siem/
@@ -26,5 +26,5 @@ This is a list of production adopters of Falco (in alphabetical order):
* [Sumo Logic](https://www.sumologic.com/) - Sumo Logic provides a SaaS based log aggregation service that provides dashboards and applications to easily identify and analyze problems in your application and infrastructure. Sumo Logic provides native integrations for many CNCF projects, such as Falco, that allows end users to easily collect Falco events and analyze Falco events on DecSecOps focused dashboards. * [Sumo Logic](https://www.sumologic.com/) - Sumo Logic provides a SaaS based log aggregation service that provides dashboards and applications to easily identify and analyze problems in your application and infrastructure. Sumo Logic provides native integrations for many CNCF projects, such as Falco, that allows end users to easily collect Falco events and analyze Falco events on DecSecOps focused dashboards.
* [Sysdig](https://www.sysdig.com/) Sysdig originally created Falco in 2016 to detect unexpected or suspicious activity using a rules engine on top of the data that comes from the sysdig kernel system call probe. Sysdig provides tooling to help with vulnerability management, compliance, detection, incident response and forensics in Cloud-native environments. Sysdig Secure has extended Falco to include: a rule library, the ability to update macros, lists & rules via the user interface and API, automated tuning of rules, and rule creation based on profiling known system behavior. On top of the basic Falco rules, Sysdig Secure implements the concept of a "Security policy" that can comprise several rules which are evaluated for a user-defined infrastructure scope like Kubernetes namespaces, OpenShift clusters, deployment workload, cloud regions etc. * [Sysdig](https://www.sysdig.com/) Sysdig originally created Falco in 2016 to detect unexpected or suspicious activity using a rules engine on top of the data that comes from the sysdig kernel system call probe. Sysdig provides tooling to help with vulnerability management, compliance, detection, incident response and forensics in Cloud-native environments. Sysdig Secure has extended falco to include: a rule library, the ability to update macros, lists & rules via the user interface and API, automated tuning of rules, and rule creation based on profiling known system behavior. On top of the basic Falco rules, Sysdig Secure implements the concept of a "Security policy" that can comprise several rules which are evaluated for a user-define infrastructure scope like Kubernetes namespaces, OpenShift clusters, deployment workload, cloud regions etc.

View File

@@ -1,79 +1,5 @@
# Change Log # Change Log
## v0.27.0
Released on 2021-01-18
### Major Changes
* new: Added falco engine version to grpc version service [[#1507](https://github.com/falcosecurity/falco/pull/1507)] - [@nibalizer](https://github.com/nibalizer)
* BREAKING CHANGE: Users who run Falco without a config file will be unable to do that any more, Falco now expects a configuration file to be passed all the times. Developers may need to adjust their processes. [[#1494](https://github.com/falcosecurity/falco/pull/1494)] - [@nibalizer](https://github.com/nibalizer)
* new: asynchronous outputs implementation, outputs channels will not block event processing anymore [[#1451](https://github.com/falcosecurity/falco/pull/1451)] - [@leogr](https://github.com/leogr)
* new: slow outputs detection [[#1451](https://github.com/falcosecurity/falco/pull/1451)] - [@leogr](https://github.com/leogr)
* new: `output_timeout` config option for slow outputs detection [[#1451](https://github.com/falcosecurity/falco/pull/1451)] - [@leogr](https://github.com/leogr)
### Minor Changes
* build: bump b64 to v2.0.0.1 [[#1441](https://github.com/falcosecurity/falco/pull/1441)] - [@fntlnz](https://github.com/fntlnz)
* rules(macro container_started): re-use `spawned_process` macro inside `container_started` macro [[#1449](https://github.com/falcosecurity/falco/pull/1449)] - [@leodido](https://github.com/leodido)
* docs: reach out documentation [[#1472](https://github.com/falcosecurity/falco/pull/1472)] - [@fntlnz](https://github.com/fntlnz)
* docs: Broken outputs.proto link [[#1493](https://github.com/falcosecurity/falco/pull/1493)] - [@deepskyblue86](https://github.com/deepskyblue86)
* docs(README.md): correct broken links [[#1506](https://github.com/falcosecurity/falco/pull/1506)] - [@leogr](https://github.com/leogr)
* docs(proposals): Exceptions handling proposal [[#1376](https://github.com/falcosecurity/falco/pull/1376)] - [@mstemm](https://github.com/mstemm)
* docs: fix a broken link of README [[#1516](https://github.com/falcosecurity/falco/pull/1516)] - [@oke-py](https://github.com/oke-py)
* docs: adding the kubernetes privileged use case to use cases [[#1484](https://github.com/falcosecurity/falco/pull/1484)] - [@fntlnz](https://github.com/fntlnz)
* rules(Mkdir binary dirs): Adds exe_running_docker_save as an exception as this rules can be triggerred when a container is created. [[#1386](https://github.com/falcosecurity/falco/pull/1386)] - [@jhwbarlow](https://github.com/jhwbarlow)
* rules(Create Hidden Files): Adds exe_running_docker_save as an exception as this rules can be triggerred when a container is created. [[#1386](https://github.com/falcosecurity/falco/pull/1386)] - [@jhwbarlow](https://github.com/jhwbarlow)
* docs(.circleci): welcome Jonah (Amazon) as a new Falco CI maintainer [[#1518](https://github.com/falcosecurity/falco/pull/1518)] - [@leodido](https://github.com/leodido)
* build: falcosecurity/falco:master also available on the AWS ECR Public registry [[#1512](https://github.com/falcosecurity/falco/pull/1512)] - [@leodido](https://github.com/leodido)
* build: falcosecurity/falco:latest also available on the AWS ECR Public registry [[#1512](https://github.com/falcosecurity/falco/pull/1512)] - [@leodido](https://github.com/leodido)
* update: gRPC clients can now subscribe to drop alerts via gRCP API [[#1451](https://github.com/falcosecurity/falco/pull/1451)] - [@leogr](https://github.com/leogr)
* macro(allowed_k8s_users): exclude cloud-controller-manage to avoid false positives on k3s [[#1444](https://github.com/falcosecurity/falco/pull/1444)] - [@fntlnz](https://github.com/fntlnz)
### Bug Fixes
* fix(userspace/falco): use given priority in falco_outputs::handle_msg() [[#1450](https://github.com/falcosecurity/falco/pull/1450)] - [@leogr](https://github.com/leogr)
* fix(userspace/engine): free formatters, if any [[#1447](https://github.com/falcosecurity/falco/pull/1447)] - [@leogr](https://github.com/leogr)
* fix(scripts/falco-driver-loader): lsmod usage [[#1474](https://github.com/falcosecurity/falco/pull/1474)] - [@dnwe](https://github.com/dnwe)
* fix: a bug that prevents Falco driver to be consumed by many Falco instances in some circumstances [[#1485](https://github.com/falcosecurity/falco/pull/1485)] - [@leodido](https://github.com/leodido)
* fix: set `HOST_ROOT=/host` environment variable for the `falcosecurity/falco-no-driver` container image by default [[#1492](https://github.com/falcosecurity/falco/pull/1492)] - [@leogr](https://github.com/leogr)
### Rule Changes
* rule(list user_known_change_thread_namespace_binaries): add crio and multus to the list [[#1501](https://github.com/falcosecurity/falco/pull/1501)] - [@Kaizhe](https://github.com/Kaizhe)
* rule(Container Run as Root User): new rule created [[#1500](https://github.com/falcosecurity/falco/pull/1500)] - [@Kaizhe](https://github.com/Kaizhe)
* rule(Linux Kernel Module injection detected): adds a new rule that detects when an LKM module is injected using `insmod` from a container (typically used by rootkits looking to obfuscate their behavior via kernel hooking). [[#1478](https://github.com/falcosecurity/falco/pull/1478)] - [@d1vious](https://github.com/d1vious)
* rule(macro multipath_writing_conf): create and use the macro [[#1475](https://github.com/falcosecurity/falco/pull/1475)] - [@nmarier-coveo](https://github.com/nmarier-coveo)
* rule(list falco_privileged_images): add calico/node without registry prefix to prevent false positive alerts [[#1457](https://github.com/falcosecurity/falco/pull/1457)] - [@czunker](https://github.com/czunker)
* rule(Full K8s Administrative Access): use the right list of admin users (fix) [[#1454](https://github.com/falcosecurity/falco/pull/1454)] - [@mstemm](https://github.com/mstemm)
### Non user-facing changes
* chore(cmake): remove unnecessary whitespace patch [[#1522](https://github.com/falcosecurity/falco/pull/1522)] - [@leogr](https://github.com/leogr)
* remove stale bot in favor of the new lifecycle bot [[#1490](https://github.com/falcosecurity/falco/pull/1490)] - [@leodido](https://github.com/leodido)
* chore(cmake): mark some variables as advanced [[#1496](https://github.com/falcosecurity/falco/pull/1496)] - [@deepskyblue86](https://github.com/deepskyblue86)
* chore(cmake/modules): avoid useless rebuild [[#1495](https://github.com/falcosecurity/falco/pull/1495)] - [@deepskyblue86](https://github.com/deepskyblue86)
* build: BUILD_BYPRODUCTS for civetweb [[#1489](https://github.com/falcosecurity/falco/pull/1489)] - [@fntlnz](https://github.com/fntlnz)
* build: remove duplicate item from FALCO_SOURCES [[#1480](https://github.com/falcosecurity/falco/pull/1480)] - [@leodido](https://github.com/leodido)
* build: make our integration tests report clear steps for CircleCI UI [[#1473](https://github.com/falcosecurity/falco/pull/1473)] - [@fntlnz](https://github.com/fntlnz)
* further improvements outputs impl. [[#1443](https://github.com/falcosecurity/falco/pull/1443)] - [@leogr](https://github.com/leogr)
* fix(test): make integration tests properly fail [[#1439](https://github.com/falcosecurity/falco/pull/1439)] - [@leogr](https://github.com/leogr)
* Falco outputs refactoring [[#1412](https://github.com/falcosecurity/falco/pull/1412)] - [@leogr](https://github.com/leogr)
## v0.26.2
Released on 2020-11-10
### Major Changes
* update: DRIVERS_REPO now defaults to https://download.falco.org/driver [[#1460](https://github.com/falcosecurity/falco/pull/1460)] - [@leodido](https://github.com/leodido)
## v0.26.1 ## v0.26.1
Released on 2020-10-01 Released on 2020-10-01

View File

@@ -19,15 +19,6 @@ option(BUILD_WARNINGS_AS_ERRORS "Enable building with -Wextra -Werror flags" OFF
option(MINIMAL_BUILD "Build a minimal version of Falco, containing only the engine and basic input/output (EXPERIMENTAL)" OFF) option(MINIMAL_BUILD "Build a minimal version of Falco, containing only the engine and basic input/output (EXPERIMENTAL)" OFF)
option(MUSL_OPTIMIZED_BUILD "Enable if you want a musl optimized build" OFF) option(MUSL_OPTIMIZED_BUILD "Enable if you want a musl optimized build" OFF)
# We shouldn't need to set this, see https://gitlab.kitware.com/cmake/cmake/-/issues/16419
option(EP_UPDATE_DISCONNECTED "ExternalProject update disconnected" OFF)
if (${EP_UPDATE_DISCONNECTED})
set_property(
DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
PROPERTY EP_UPDATE_DISCONNECTED TRUE)
endif()
# Elapsed time # Elapsed time
# set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE "${CMAKE_COMMAND} -E time") # TODO(fntlnz, leodido): add a flag to enable this # set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE "${CMAKE_COMMAND} -E time") # TODO(fntlnz, leodido): add a flag to enable this
@@ -69,7 +60,7 @@ if(MUSL_OPTIMIZED_BUILD)
set(MUSL_FLAGS "-static -Os") set(MUSL_FLAGS "-static -Os")
endif() endif()
set(CMAKE_COMMON_FLAGS "-Wall -ggdb ${DRAIOS_FEATURE_FLAGS} ${MINIMAL_BUILD_FLAGS} ${MUSL_FLAGS}") set(CMAKE_COMMON_FLAGS "-Wall -pg -ggdb ${DRAIOS_FEATURE_FLAGS} ${MINIMAL_BUILD_FLAGS} ${MUSL_FLAGS}")
if(BUILD_WARNINGS_AS_ERRORS) if(BUILD_WARNINGS_AS_ERRORS)
set(CMAKE_SUPPRESSED_WARNINGS set(CMAKE_SUPPRESSED_WARNINGS
@@ -92,7 +83,7 @@ include(GetFalcoVersion)
set(PACKAGE_NAME "falco") set(PACKAGE_NAME "falco")
set(PROBE_NAME "falco") set(PROBE_NAME "falco")
set(PROBE_DEVICE_NAME "falco") set(PROBE_DEVICE_NAME "falco")
set(DRIVERS_REPO "https://download.falco.org/driver") set(DRIVERS_REPO "https://dl.bintray.com/falcosecurity/driver")
if(CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT) if(CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT)
set(CMAKE_INSTALL_PREFIX set(CMAKE_INSTALL_PREFIX
/usr /usr
@@ -124,8 +115,20 @@ set(CURSES_NEED_NCURSES TRUE)
find_package(Curses REQUIRED) find_package(Curses REQUIRED)
message(STATUS "Found ncurses: include: ${CURSES_INCLUDE_DIR}, lib: ${CURSES_LIBRARIES}") message(STATUS "Found ncurses: include: ${CURSES_INCLUDE_DIR}, lib: ${CURSES_LIBRARIES}")
# b64 # libb64
include(b64)
set(B64_SRC "${PROJECT_BINARY_DIR}/b64-prefix/src/b64")
message(STATUS "Using bundled b64 in '${B64_SRC}'")
set(B64_INCLUDE "${B64_SRC}/include")
set(B64_LIB "${B64_SRC}/src/libb64.a")
ExternalProject_Add(
b64
URL "https://github.com/libb64/libb64/archive/ce864b17ea0e24a91e77c7dd3eb2d1ac4175b3f0.tar.gz"
URL_HASH "SHA256=d07173e66f435e5c77dbf81bd9313f8d0e4a3b4edd4105a62f4f8132ba932811"
CONFIGURE_COMMAND ""
BUILD_COMMAND ${CMD_MAKE}
BUILD_IN_SOURCE 1
INSTALL_COMMAND "")
# yaml-cpp # yaml-cpp
include(yaml-cpp) include(yaml-cpp)
@@ -139,16 +142,52 @@ if(NOT MINIMAL_BUILD)
endif() endif()
# LuaJIT # LuaJIT
include(luajit) set(LUAJIT_SRC "${PROJECT_BINARY_DIR}/luajit-prefix/src/luajit/src")
message(STATUS "Using bundled LuaJIT in '${LUAJIT_SRC}'")
set(LUAJIT_INCLUDE "${LUAJIT_SRC}")
set(LUAJIT_LIB "${LUAJIT_SRC}/libluajit.a")
ExternalProject_Add(
luajit
URL "https://github.com/LuaJIT/LuaJIT/archive/v2.0.3.tar.gz"
URL_HASH "SHA256=8da3d984495a11ba1bce9a833ba60e18b532ca0641e7d90d97fafe85ff014baa"
CONFIGURE_COMMAND ""
BUILD_COMMAND ${CMD_MAKE}
BUILD_IN_SOURCE 1
INSTALL_COMMAND "")
# Lpeg # Lpeg
include(lpeg) set(LPEG_SRC "${PROJECT_BINARY_DIR}/lpeg-prefix/src/lpeg")
set(LPEG_LIB "${PROJECT_BINARY_DIR}/lpeg-prefix/src/lpeg/build/lpeg.a")
message(STATUS "Using bundled lpeg in '${LPEG_SRC}'")
set(LPEG_DEPENDENCIES "")
list(APPEND LPEG_DEPENDENCIES "luajit")
ExternalProject_Add(
lpeg
DEPENDS ${LPEG_DEPENDENCIES}
URL "http://www.inf.puc-rio.br/~roberto/lpeg/lpeg-1.0.2.tar.gz"
URL_HASH "SHA256=48d66576051b6c78388faad09b70493093264588fcd0f258ddaab1cdd4a15ffe"
BUILD_COMMAND LUA_INCLUDE=${LUAJIT_INCLUDE} "${PROJECT_SOURCE_DIR}/scripts/build-lpeg.sh" "${LPEG_SRC}/build"
BUILD_IN_SOURCE 1
CONFIGURE_COMMAND ""
INSTALL_COMMAND "")
# libyaml # libyaml
include(libyaml) include(libyaml)
# lyaml # lyaml
include(lyaml) set(LYAML_SRC "${PROJECT_BINARY_DIR}/lyaml-prefix/src/lyaml/ext/yaml")
set(LYAML_LIB "${LYAML_SRC}/.libs/yaml.a")
message(STATUS "Using bundled lyaml in '${LYAML_SRC}'")
ExternalProject_Add(
lyaml
DEPENDS luajit libyaml
URL "https://github.com/gvvaughan/lyaml/archive/release-v6.0.tar.gz"
URL_HASH "SHA256=9d7cf74d776999ff6f758c569d5202ff5da1f303c6f4229d3b41f71cd3a3e7a7"
BUILD_COMMAND ${CMD_MAKE}
BUILD_IN_SOURCE 1
CONFIGURE_COMMAND ./configure --enable-static CFLAGS=-I${LIBYAML_INSTALL_DIR}/include CPPFLAGS=-I${LIBYAML_INSTALL_DIR}/include LDFLAGS=-L${LIBYAML_INSTALL_DIR}/lib LIBS=-lyaml LUA=${LUAJIT_SRC}/luajit LUA_INCLUDE=-I${LUAJIT_INCLUDE}
INSTALL_COMMAND sh -c
"cp -R ${PROJECT_BINARY_DIR}/lyaml-prefix/src/lyaml/lib/* ${PROJECT_SOURCE_DIR}/userspace/engine/lua")
# One TBB # One TBB
set(TBB_SRC "${PROJECT_BINARY_DIR}/tbb-prefix/src/tbb") set(TBB_SRC "${PROJECT_BINARY_DIR}/tbb-prefix/src/tbb")
@@ -181,7 +220,6 @@ if(NOT MINIMAL_BUILD)
COMMAND ${CMAKE_COMMAND} -E make_directory ${CIVETWEB_SRC}/install/include COMMAND ${CMAKE_COMMAND} -E make_directory ${CIVETWEB_SRC}/install/include
BUILD_IN_SOURCE 1 BUILD_IN_SOURCE 1
BUILD_COMMAND ${CMD_MAKE} COPT="-DNO_FILES" WITH_CPP=1 BUILD_COMMAND ${CMD_MAKE} COPT="-DNO_FILES" WITH_CPP=1
BUILD_BYPRODUCTS ${CIVETWEB_LIB}
INSTALL_COMMAND ${CMD_MAKE} COPT="-DNO_FILES" install-lib install-headers PREFIX=${CIVETWEB_SRC}/install "WITH_CPP=1") INSTALL_COMMAND ${CMD_MAKE} COPT="-DNO_FILES" install-lib install-headers PREFIX=${CIVETWEB_SRC}/install "WITH_CPP=1")
endif() endif()
@@ -226,7 +264,6 @@ set(FALCO_ABSOLUTE_SHARE_DIR "${CMAKE_INSTALL_PREFIX}/${FALCO_SHARE_DIR}")
set(FALCO_BIN_DIR bin) set(FALCO_BIN_DIR bin)
add_subdirectory(scripts) add_subdirectory(scripts)
add_subdirectory(userspace/libhawk)
add_subdirectory(userspace/engine) add_subdirectory(userspace/engine)
add_subdirectory(userspace/falco) add_subdirectory(userspace/falco)
add_subdirectory(tests) add_subdirectory(tests)

55
GOVERNANCE.md Normal file
View File

@@ -0,0 +1,55 @@
# Process for becoming a maintainer
* Express interest to the existing maintainers that you or your organization is interested in becoming a
maintainer. Becoming a maintainer generally means that you are going to be spending substantial
time (>25%) on Falco for the foreseeable future. You should have domain expertise and be extremely
proficient in C++. Ultimately your goal is to become a maintainer that will represent your
organization.
* We will expect you to start contributing increasingly complicated PRs, under the guidance
of the existing maintainers.
* We may ask you to do some PRs from our backlog.
* As you gain experience with the code base and our standards, we will ask you to do code reviews
for incoming PRs (i.e., all maintainers are expected to shoulder a proportional share of
community reviews).
* After a period of approximately 2-3 months of working together and making sure we see eye to eye,
the existing maintainers will confer and decide whether to grant maintainer status or not.
We make no guarantees on the length of time this will take, but 2-3 months is the approximate
goal.
## Maintainer responsibilities
* Monitor Slack (delayed response is perfectly acceptable).
* Triage GitHub issues and perform pull request reviews for other maintainers and the community.
* During GitHub issue triage, apply all applicable [labels](https://github.com/falcosecurity/falco/labels)
to each new issue. Labels are extremely useful for future issue follow up. Which labels to apply
is somewhat subjective so just use your best judgment.
* Make sure that ongoing PRs are moving forward at the right pace or closing them.
* Participate when called upon in the security releases. Note that although this should be a rare
occurrence, if a serious vulnerability is found, the process may take up to several full days of
work to implement. This reality should be taken into account when discussing time commitment
obligations with employers.
* In general continue to be willing to spend at least 25% of ones time working on Falco (~1.25
business days per week).
## When does a maintainer lose maintainer status
If a maintainer is no longer interested or cannot perform the maintainer duties listed above, they
should volunteer to be moved to emeritus status. In extreme cases this can also occur by a vote of
the maintainers per the voting process below.
# Conflict resolution and voting
In general, we prefer that technical issues and maintainer membership are amicably worked out
between the persons involved. If a dispute cannot be decided independently, the maintainers can be
called in to decide an issue. If the maintainers themselves cannot decide an issue, the issue will
be resolved by voting. The voting process is a simple majority in which each senior maintainer
receives two votes and each normal maintainer receives one vote.
# Adding new projects to the falcosecurity GitHub organization
New projects will be added to the falcosecurity organization via GitHub issue discussion in one of the
existing projects in the organization. Once sufficient discussion has taken place (~3-5 business
days but depending on the volume of conversation), the maintainers of *the project where the issue
was opened* (since different projects in the organization may have different maintainers) will
decide whether the new project should be added. See the section above on voting if the maintainers
cannot easily decide.

View File

@@ -5,9 +5,7 @@
[![Build Status](https://img.shields.io/circleci/build/github/falcosecurity/falco/master?style=for-the-badge)](https://circleci.com/gh/falcosecurity/falco) [![CII Best Practices Summary](https://img.shields.io/cii/summary/2317?label=CCI%20Best%20Practices&style=for-the-badge)](https://bestpractices.coreinfrastructure.org/projects/2317) [![GitHub](https://img.shields.io/github/license/falcosecurity/falco?style=for-the-badge)](COPYING) [![Build Status](https://img.shields.io/circleci/build/github/falcosecurity/falco/master?style=for-the-badge)](https://circleci.com/gh/falcosecurity/falco) [![CII Best Practices Summary](https://img.shields.io/cii/summary/2317?label=CCI%20Best%20Practices&style=for-the-badge)](https://bestpractices.coreinfrastructure.org/projects/2317) [![GitHub](https://img.shields.io/github/license/falcosecurity/falco?style=for-the-badge)](COPYING)
Want to talk? Join us on the [#falco](https://kubernetes.slack.com/archives/CMWH3EH32) channel in the [Kubernetes Slack](https://slack.k8s.io). #### Latest releases
### Latest releases
Read the [change log](CHANGELOG.md). Read the [change log](CHANGELOG.md).
@@ -21,27 +19,27 @@ Read the [change log](CHANGELOG.md).
The Falco Project, originally created by [Sysdig](https://sysdig.com), is an incubating [CNCF](https://cncf.io) open source cloud native runtime security tool. The Falco Project, originally created by [Sysdig](https://sysdig.com), is an incubating [CNCF](https://cncf.io) open source cloud native runtime security tool.
Falco makes it easy to consume kernel events, and enrich those events with information from Kubernetes and the rest of the cloud native stack. Falco makes it easy to consume kernel events, and enrich those events with information from Kubernetes and the rest of the cloud native stack.
Falco has a rich set of security rules specifically built for Kubernetes, Linux, and cloud-native. Falco has a rich rule set of security rules specifically built for Kubernetes, Linux, and cloud-native.
If a rule is violated in a system, Falco will send an alert notifying the user of the violation and its severity. If a rule is violated in a system, Falco will send an alert notifying the user of the violation and its severity.
### Installing Falco ### Installing Falco
If you would like to run Falco in **production** please adhere to the [official installation guide](https://falco.org/docs/getting-started/installation/). If you would like to run Falco in **production** please adhere to the [official installation guide](https://falco.org/docs/installation/).
##### Kubernetes ##### Kubernetes
| Tool | Link | Note | | Tool | Link | Note |
|----------|--------------------------------------------------------------------------------------------|--------------------------------------------------------------------| |----------|--------------------------------------------------------------------------------------------|--------------------------------------------------------------------|
| Helm | [Chart Repository](https://github.com/falcosecurity/charts/tree/master/falco#introduction) | The Falco community offers regular helm chart releases. | | Helm | [Chart Repository](https://github.com/falcosecurity/charts/tree/master/falco#introduction) | The Falco community offers regular helm chart releases. |
| Minikube | [Tutorial](https://falco.org/docs/getting-started/third-party/#minikube) | The Falco driver has been baked into minikube for easy deployment. | | Minikube | [Tutorial](https://falco.org/docs/third-party/#minikube) | The Falco driver has been baked into minikube for easy deployment. |
| Kind | [Tutorial](https://falco.org/docs/getting-started/third-party/#kind) | Running Falco with kind requires a driver on the host system. | | Kind | [Tutorial](https://falco.org/docs/third-party/#kind) | Running Falco with kind requires a driver on the host system. |
| GKE | [Tutorial](https://falco.org/docs/getting-started/third-party/#gke) | We suggest using the eBPF driver for running Falco on GKE. | | GKE | [Tutorial](https://falco.org/docs/third-party/#gke) | We suggest using the eBPF driver for running Falco on GKE. |
### Developing ### Developing
Falco is designed to be extensible such that it can be built into cloud-native applications and infrastructure. Falco is designed to be extensible such that it can be built into cloud-native applications and infrastructure.
Falco has a [gRPC](https://falco.org/docs/grpc/) endpoint and an API defined in [protobuf](https://github.com/falcosecurity/falco/blob/master/userspace/falco/outputs.proto). Falco has a [gRPC](https://falco.org/docs/grpc/) endpoint and an API defined in [protobuf](https://github.com/falcosecurity/falco/blob/update-readme/userspace/falco/outputs.proto).
The Falco Project supports various SDKs for this endpoint. The Falco Project supports various SDKs for this endpoint.
##### SDKs ##### SDKs
@@ -65,7 +63,6 @@ For example, Falco can easily detect incidents including but not limited to:
- Unexpected read of a sensitive file, such as `/etc/shadow`. - Unexpected read of a sensitive file, such as `/etc/shadow`.
- A non-device file is written to `/dev`. - A non-device file is written to `/dev`.
- A standard system binary, such as `ls`, is making an outbound network connection. - A standard system binary, such as `ls`, is making an outbound network connection.
- A privileged pod is started in a Kubernetes cluster.
### Documentation ### Documentation
@@ -75,13 +72,6 @@ The [Official Documentation](https://falco.org/docs/) is the best resource to le
To get involved with The Falco Project please visit [the community repository](https://github.com/falcosecurity/community) to find more. To get involved with The Falco Project please visit [the community repository](https://github.com/falcosecurity/community) to find more.
How to reach out?
- Join the #falco channel on the [Kubernetes Slack](https://slack.k8s.io)
- [Join the Falco mailing list](https://lists.cncf.io/g/cncf-falco-dev)
- [Read the Falco documentation](https://falco.org/docs/)
### Contributing ### Contributing
See the [CONTRIBUTING.md](https://github.com/falcosecurity/.github/blob/master/CONTRIBUTING.md). See the [CONTRIBUTING.md](https://github.com/falcosecurity/.github/blob/master/CONTRIBUTING.md).
@@ -104,4 +94,4 @@ Falco is licensed to you under the [Apache 2.0](./COPYING) open source license.
[3]: https://dl.bintray.com/falcosecurity/deb-dev/stable [3]: https://dl.bintray.com/falcosecurity/deb-dev/stable
[4]: https://dl.bintray.com/falcosecurity/deb/stable [4]: https://dl.bintray.com/falcosecurity/deb/stable
[5]: https://dl.bintray.com/falcosecurity/bin-dev/x86_64 [5]: https://dl.bintray.com/falcosecurity/bin-dev/x86_64
[6]: https://dl.bintray.com/falcosecurity/bin/x86_64 [6]: https://dl.bintray.com/falcosecurity/bin/x86_64

View File

@@ -28,8 +28,8 @@ Before cutting a release we need to do some homework in the Falco repository. Th
- Double-check if any hard-coded version number is present in the code, it should be not present anywhere: - Double-check if any hard-coded version number is present in the code, it should be not present anywhere:
- If any, manually correct it then open an issue to automate version number bumping later - If any, manually correct it then open an issue to automate version number bumping later
- Versions table in the `README.md` updates itself automatically - Versions table in the `README.md` update itself automatically
- Generate the change log https://github.com/leodido/rn2md: - Generate the change log https://github.com/leodido/rn2md, or https://fs.fntlnz.wtf/falco/milestones-changelog.txt for the lazy people (it updates every 5 minutes)
- If you review timeout errors with `rn2md` try to generate an GitHub Oauth access token and use `-t` - If you review timeout errors with `rn2md` try to generate an GitHub Oauth access token and use `-t`
- Add the latest changes on top the previous `CHANGELOG.md` - Add the latest changes on top the previous `CHANGELOG.md`
- Submit a PR with the above modifications - Submit a PR with the above modifications
@@ -69,12 +69,13 @@ Now assume `x.y.z` is the new version.
| deb | [![deb](https://img.shields.io/badge/Falco-x.y.z-%2300aec7?style=flat-square)](https://dl.bintray.com/falcosecurity/deb/stable/falco-x.y.z-x86_64.deb) | | deb | [![deb](https://img.shields.io/badge/Falco-x.y.z-%2300aec7?style=flat-square)](https://dl.bintray.com/falcosecurity/deb/stable/falco-x.y.z-x86_64.deb) |
| tgz | [![tgz](https://img.shields.io/badge/Falco-x.y.z-%2300aec7?style=flat-square)](https://dl.bintray.com/falcosecurity/bin/x86_64/falco-x.y.z-x86_64.deb) | | tgz | [![tgz](https://img.shields.io/badge/Falco-x.y.z-%2300aec7?style=flat-square)](https://dl.bintray.com/falcosecurity/bin/x86_64/falco-x.y.z-x86_64.deb) |
| Images | | Images |
| --------------------------------------------------------------------------- | | --------------------------------------------------------------- |
| `docker pull docker.io/falcosecurity/falco:x.y.z` | | `docker pull docker.io/falcosecurity/falco:_tag_` |
| `docker pull public.ecr.aws/falcosecurity/falco:x.y.z` | | `docker pull docker.io/falcosecurity/falco-driver-loader:_tag_` |
| `docker pull docker.io/falcosecurity/falco-driver-loader:x.y.z` | | `docker pull docker.io/falcosecurity/falco-no-driver:_tag_` |
| `docker pull docker.io/falcosecurity/falco-no-driver:x.y.z` |
<!-- Copy the relevant part of the changelog here -->
### Statistics ### Statistics

View File

@@ -30,15 +30,9 @@ if(NOT CPACK_GENERATOR)
endif() endif()
message(STATUS "Using package generators: ${CPACK_GENERATOR}") message(STATUS "Using package generators: ${CPACK_GENERATOR}")
message(STATUS "Package architecture: ${CMAKE_SYSTEM_PROCESSOR}")
set(CPACK_DEBIAN_PACKAGE_SECTION "utils")
if(${CMAKE_SYSTEM_PROCESSOR} STREQUAL "x86_64") set(CPACK_DEBIAN_PACKAGE_SECTION "utils")
set(CPACK_DEBIAN_PACKAGE_ARCHITECTURE "amd64") set(CPACK_DEBIAN_PACKAGE_ARCHITECTURE "amd64")
endif()
if(${CMAKE_SYSTEM_PROCESSOR} STREQUAL "aarch64")
set(CPACK_DEBIAN_PACKAGE_ARCHITECTURE "arm64")
endif()
set(CPACK_DEBIAN_PACKAGE_HOMEPAGE "https://www.falco.org") set(CPACK_DEBIAN_PACKAGE_HOMEPAGE "https://www.falco.org")
set(CPACK_DEBIAN_PACKAGE_DEPENDS "dkms (>= 2.1.0.0)") set(CPACK_DEBIAN_PACKAGE_DEPENDS "dkms (>= 2.1.0.0)")
set(CPACK_DEBIAN_PACKAGE_CONTROL_EXTRA set(CPACK_DEBIAN_PACKAGE_CONTROL_EXTRA

View File

@@ -10,7 +10,6 @@
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License. # specific language governing permissions and limitations under the License.
# #
mark_as_advanced(OPENSSL_BINARY)
if(NOT USE_BUNDLED_DEPS) if(NOT USE_BUNDLED_DEPS)
find_package(OpenSSL REQUIRED) find_package(OpenSSL REQUIRED)
message(STATUS "Found openssl: include: ${OPENSSL_INCLUDE_DIR}, lib: ${OPENSSL_LIBRARIES}") message(STATUS "Found openssl: include: ${OPENSSL_INCLUDE_DIR}, lib: ${OPENSSL_LIBRARIES}")
@@ -21,8 +20,6 @@ if(NOT USE_BUNDLED_DEPS)
message(STATUS "Found openssl: binary: ${OPENSSL_BINARY}") message(STATUS "Found openssl: binary: ${OPENSSL_BINARY}")
endif() endif()
else() else()
mark_as_advanced(OPENSSL_BUNDLE_DIR OPENSSL_INSTALL_DIR OPENSSL_INCLUDE_DIR
OPENSSL_LIBRARY_SSL OPENSSL_LIBRARY_CRYPTO)
set(OPENSSL_BUNDLE_DIR "${PROJECT_BINARY_DIR}/openssl-prefix/src/openssl") set(OPENSSL_BUNDLE_DIR "${PROJECT_BINARY_DIR}/openssl-prefix/src/openssl")
set(OPENSSL_INSTALL_DIR "${OPENSSL_BUNDLE_DIR}/target") set(OPENSSL_INSTALL_DIR "${OPENSSL_BUNDLE_DIR}/target")
set(OPENSSL_INCLUDE_DIR "${PROJECT_BINARY_DIR}/openssl-prefix/src/openssl/include") set(OPENSSL_INCLUDE_DIR "${PROJECT_BINARY_DIR}/openssl-prefix/src/openssl/include")

View File

@@ -1,27 +0,0 @@
#
# Copyright (C) 2020 The Falco Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
set(B64_SRC "${PROJECT_BINARY_DIR}/b64-prefix/src/b64")
message(STATUS "Using bundled b64 in '${B64_SRC}'")
set(B64_INCLUDE "${B64_SRC}/include")
set(B64_LIB "${B64_SRC}/src/libb64.a")
externalproject_add(
b64
URL "https://github.com/libb64/libb64/archive/ce864b17ea0e24a91e77c7dd3eb2d1ac4175b3f0.tar.gz"
URL_HASH "SHA256=d07173e66f435e5c77dbf81bd9313f8d0e4a3b4edd4105a62f4f8132ba932811"
CONFIGURE_COMMAND ""
BUILD_COMMAND ${CMD_MAKE}
BUILD_IN_SOURCE 1
BUILD_BYPRODUCTS ${B64_LIB}
INSTALL_COMMAND ""
)

View File

@@ -22,7 +22,6 @@ if(NOT USE_BUNDLED_DEPS)
endif() endif()
# c-ares # c-ares
mark_as_advanced(CARES_INCLUDE CARES_LIB)
find_path(CARES_INCLUDE NAMES ares.h) find_path(CARES_INCLUDE NAMES ares.h)
find_library(CARES_LIB NAMES libcares.so) find_library(CARES_LIB NAMES libcares.so)
if(CARES_INCLUDE AND CARES_LIB) if(CARES_INCLUDE AND CARES_LIB)
@@ -32,7 +31,6 @@ if(NOT USE_BUNDLED_DEPS)
endif() endif()
# protobuf # protobuf
mark_as_advanced(PROTOC PROTOBUF_INCLUDE PROTOBUF_LIB)
find_program(PROTOC NAMES protoc) find_program(PROTOC NAMES protoc)
find_path(PROTOBUF_INCLUDE NAMES google/protobuf/message.h) find_path(PROTOBUF_INCLUDE NAMES google/protobuf/message.h)
find_library(PROTOBUF_LIB NAMES libprotobuf.so) find_library(PROTOBUF_LIB NAMES libprotobuf.so)
@@ -45,7 +43,6 @@ if(NOT USE_BUNDLED_DEPS)
endif() endif()
# gpr # gpr
mark_as_advanced(GPR_LIB)
find_library(GPR_LIB NAMES gpr) find_library(GPR_LIB NAMES gpr)
if(GPR_LIB) if(GPR_LIB)
@@ -55,16 +52,12 @@ if(NOT USE_BUNDLED_DEPS)
endif() endif()
# gRPC todo(fntlnz, leodido): check that gRPC version is greater or equal than 1.8.0 # gRPC todo(fntlnz, leodido): check that gRPC version is greater or equal than 1.8.0
mark_as_advanced(GRPC_INCLUDE GRPC_SRC
GRPC_LIB GRPC_LIBS_ABSOLUTE GRPCPP_LIB GRPC_CPP_PLUGIN)
find_path(GRPCXX_INCLUDE NAMES grpc++/grpc++.h) find_path(GRPCXX_INCLUDE NAMES grpc++/grpc++.h)
if(GRPCXX_INCLUDE) if(GRPCXX_INCLUDE)
set(GRPC_INCLUDE ${GRPCXX_INCLUDE}) set(GRPC_INCLUDE ${GRPCXX_INCLUDE})
unset(GRPCXX_INCLUDE CACHE)
else() else()
find_path(GRPCPP_INCLUDE NAMES grpcpp/grpcpp.h) find_path(GRPCPP_INCLUDE NAMES grpcpp/grpcpp.h)
set(GRPC_INCLUDE ${GRPCPP_INCLUDE}) set(GRPC_INCLUDE ${GRPCPP_INCLUDE})
unset(GRPCPP_INCLUDE CACHE)
add_definitions(-DGRPC_INCLUDE_IS_GRPCPP=1) add_definitions(-DGRPC_INCLUDE_IS_GRPCPP=1)
endif() endif()
find_library(GRPC_LIB NAMES grpc) find_library(GRPC_LIB NAMES grpc)
@@ -122,7 +115,7 @@ else()
grpc grpc
DEPENDS openssl DEPENDS openssl
GIT_REPOSITORY https://github.com/grpc/grpc.git GIT_REPOSITORY https://github.com/grpc/grpc.git
GIT_TAG v1.32.0 GIT_TAG v1.31.1
GIT_SUBMODULES "third_party/protobuf third_party/zlib third_party/cares/cares third_party/abseil-cpp third_party/re2" GIT_SUBMODULES "third_party/protobuf third_party/zlib third_party/cares/cares third_party/abseil-cpp third_party/re2"
BUILD_IN_SOURCE 1 BUILD_IN_SOURCE 1
BUILD_BYPRODUCTS ${GRPC_LIB} ${GRPCPP_LIB} BUILD_BYPRODUCTS ${GRPC_LIB} ${GRPCPP_LIB}

View File

@@ -10,7 +10,6 @@
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License. # specific language governing permissions and limitations under the License.
# #
mark_as_advanced(JQ_INCLUDE JQ_LIB)
if (NOT USE_BUNDLED_DEPS) if (NOT USE_BUNDLED_DEPS)
find_path(JQ_INCLUDE jq.h PATH_SUFFIXES jq) find_path(JQ_INCLUDE jq.h PATH_SUFFIXES jq)
find_library(JQ_LIB NAMES jq) find_library(JQ_LIB NAMES jq)

View File

@@ -15,13 +15,12 @@ set(LIBYAML_SRC "${PROJECT_BINARY_DIR}/libyaml-prefix/src/libyaml")
set(LIBYAML_INSTALL_DIR "${LIBYAML_SRC}/target") set(LIBYAML_INSTALL_DIR "${LIBYAML_SRC}/target")
message(STATUS "Using bundled libyaml in '${LIBYAML_SRC}'") message(STATUS "Using bundled libyaml in '${LIBYAML_SRC}'")
set(LIBYAML_LIB "${LIBYAML_SRC}/src/.libs/libyaml.a") set(LIBYAML_LIB "${LIBYAML_SRC}/src/.libs/libyaml.a")
externalproject_add( ExternalProject_Add(
libyaml libyaml
URL "https://github.com/yaml/libyaml/releases/download/0.2.5/yaml-0.2.5.tar.gz" URL "https://github.com/yaml/libyaml/releases/download/0.2.5/yaml-0.2.5.tar.gz"
URL_HASH "SHA256=c642ae9b75fee120b2d96c712538bd2cf283228d2337df2cf2988e3c02678ef4" URL_HASH "SHA256=c642ae9b75fee120b2d96c712538bd2cf283228d2337df2cf2988e3c02678ef4"
CONFIGURE_COMMAND ./configure --prefix=${LIBYAML_INSTALL_DIR} CFLAGS=-fPIC CPPFLAGS=-fPIC --enable-static=true --enable-shared=false CONFIGURE_COMMAND ./configure --prefix=${LIBYAML_INSTALL_DIR} CFLAGS=-fPIC CPPFLAGS=-fPIC --enable-static=true --enable-shared=false
BUILD_COMMAND ${CMD_MAKE} BUILD_COMMAND ${CMD_MAKE}
BUILD_IN_SOURCE 1 BUILD_IN_SOURCE 1
BUILD_BYPRODUCTS ${LIBYAML_LIB} INSTALL_COMMAND ${CMD_MAKE} install)
INSTALL_COMMAND ${CMD_MAKE} install
)

View File

@@ -1,28 +0,0 @@
#
# Copyright (C) 2020 The Falco Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
set(LPEG_SRC "${PROJECT_BINARY_DIR}/lpeg-prefix/src/lpeg")
set(LPEG_LIB "${PROJECT_BINARY_DIR}/lpeg-prefix/src/lpeg/build/lpeg.a")
message(STATUS "Using bundled lpeg in '${LPEG_SRC}'")
set(LPEG_DEPENDENCIES "")
list(APPEND LPEG_DEPENDENCIES "luajit")
ExternalProject_Add(
lpeg
DEPENDS ${LPEG_DEPENDENCIES}
URL "http://www.inf.puc-rio.br/~roberto/lpeg/lpeg-1.0.2.tar.gz"
URL_HASH "SHA256=48d66576051b6c78388faad09b70493093264588fcd0f258ddaab1cdd4a15ffe"
BUILD_COMMAND LUA_INCLUDE=${LUAJIT_INCLUDE} "${PROJECT_SOURCE_DIR}/scripts/build-lpeg.sh" "${LPEG_SRC}/build"
BUILD_IN_SOURCE 1
BUILD_BYPRODUCTS ${LPEG_LIB}
CONFIGURE_COMMAND ""
INSTALL_COMMAND "")

View File

@@ -1,27 +0,0 @@
#
# Copyright (C) 2020 The Falco Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
set(LUAJIT_SRC "${PROJECT_BINARY_DIR}/luajit-prefix/src/luajit/src")
message(STATUS "Using bundled LuaJIT in '${LUAJIT_SRC}'")
set(LUAJIT_INCLUDE "${LUAJIT_SRC}")
set(LUAJIT_LIB "${LUAJIT_SRC}/libluajit.a")
externalproject_add(
luajit
GIT_REPOSITORY "https://github.com/LuaJIT/LuaJIT"
GIT_TAG "1d8b747c161db457e032a023ebbff511f5de5ec2"
CONFIGURE_COMMAND ""
BUILD_COMMAND ${CMD_MAKE}
BUILD_IN_SOURCE 1
BUILD_BYPRODUCTS ${LUAJIT_LIB}
INSTALL_COMMAND ""
)

View File

@@ -1,28 +0,0 @@
#
# Copyright (C) 2020 The Falco Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
set(LYAML_SRC "${PROJECT_BINARY_DIR}/lyaml-prefix/src/lyaml/ext/yaml")
set(LYAML_LIB "${LYAML_SRC}/.libs/yaml.a")
message(STATUS "Using bundled lyaml in '${LYAML_SRC}'")
externalproject_add(
lyaml
DEPENDS luajit libyaml
URL "https://github.com/gvvaughan/lyaml/archive/release-v6.0.tar.gz"
URL_HASH "SHA256=9d7cf74d776999ff6f758c569d5202ff5da1f303c6f4229d3b41f71cd3a3e7a7"
BUILD_COMMAND ${CMD_MAKE}
BUILD_IN_SOURCE 1
BUILD_BYPRODUCTS ${LYAML_LIB}
CONFIGURE_COMMAND ./configure --enable-static CFLAGS=-I${LIBYAML_INSTALL_DIR}/include CPPFLAGS=-I${LIBYAML_INSTALL_DIR}/include LDFLAGS=-L${LIBYAML_INSTALL_DIR}/lib LIBS=-lyaml LUA=${LUAJIT_SRC}/luajit LUA_INCLUDE=-I${LUAJIT_INCLUDE}
INSTALL_COMMAND sh -c
"cp -R ${PROJECT_BINARY_DIR}/lyaml-prefix/src/lyaml/lib/* ${PROJECT_SOURCE_DIR}/userspace/engine/lua"
)

View File

@@ -3,7 +3,6 @@ file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/static-analysis-reports)
file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/static-analysis-reports/cppcheck) file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/static-analysis-reports/cppcheck)
# cppcheck # cppcheck
mark_as_advanced(CPPCHECK CPPCHECK_HTMLREPORT)
find_program(CPPCHECK cppcheck) find_program(CPPCHECK cppcheck)
find_program(CPPCHECK_HTMLREPORT cppcheck-htmlreport) find_program(CPPCHECK_HTMLREPORT cppcheck-htmlreport)

View File

@@ -1,5 +1,5 @@
# #
# Copyright (C) 2020 The Falco Authors. # Copyright (C) 2019 The Falco Authors.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at # the License. You may obtain a copy of the License at
@@ -25,4 +25,4 @@ ExternalProject_Add(
BUILD_COMMAND "" BUILD_COMMAND ""
INSTALL_COMMAND "" INSTALL_COMMAND ""
TEST_COMMAND "" TEST_COMMAND ""
PATCH_COMMAND patch -p1 -i ${CMAKE_CURRENT_SOURCE_DIR}/patch/libscap.patch && patch -p1 -i ${CMAKE_CURRENT_SOURCE_DIR}/patch/luajit.patch) PATCH_COMMAND patch -p1 -i ${CMAKE_CURRENT_SOURCE_DIR}/patch/libscap.patch)

View File

@@ -1,8 +1,8 @@
diff --git a/userspace/libscap/scap.c b/userspace/libscap/scap.c diff --git a/userspace/libscap/scap.c b/userspace/libscap/scap.c
index 6f51588e..5f9ea84e 100644 index e9faea51..a1b3b501 100644
--- a/userspace/libscap/scap.c --- a/userspace/libscap/scap.c
+++ b/userspace/libscap/scap.c +++ b/userspace/libscap/scap.c
@@ -55,7 +55,7 @@ limitations under the License. @@ -52,7 +52,7 @@ limitations under the License.
//#define NDEBUG //#define NDEBUG
#include <assert.h> #include <assert.h>
@@ -11,16 +11,7 @@ index 6f51588e..5f9ea84e 100644
// //
// Probe version string size // Probe version string size
@@ -114,7 +114,7 @@ scap_t* scap_open_udig_int(char *error, int32_t *rc, @@ -171,7 +171,7 @@ scap_t* scap_open_live_int(char *error, int32_t *rc,
static uint32_t get_max_consumers()
{
uint32_t max;
- FILE *pfile = fopen("/sys/module/" PROBE_DEVICE_NAME "_probe/parameters/max_consumers", "r");
+ FILE *pfile = fopen("/sys/module/" PROBE_DEVICE_NAME "/parameters/max_consumers", "r");
if(pfile != NULL)
{
int w = fscanf(pfile, "%"PRIu32, &max);
@@ -186,7 +186,7 @@ scap_t* scap_open_live_int(char *error, int32_t *rc,
return NULL; return NULL;
} }
@@ -29,16 +20,7 @@ index 6f51588e..5f9ea84e 100644
bpf_probe = buf; bpf_probe = buf;
} }
} }
@@ -344,7 +344,7 @@ scap_t* scap_open_live_int(char *error, int32_t *rc, @@ -1808,7 +1808,7 @@ int32_t scap_disable_dynamic_snaplen(scap_t* handle)
else if(errno == EBUSY)
{
uint32_t curr_max_consumers = get_max_consumers();
- snprintf(error, SCAP_LASTERR_SIZE, "Too many sysdig instances attached to device %s. Current value for /sys/module/" PROBE_DEVICE_NAME "_probe/parameters/max_consumers is '%"PRIu32"'.", filename, curr_max_consumers);
+ snprintf(error, SCAP_LASTERR_SIZE, "Too many Falco instances attached to device %s. Current value for /sys/module/" PROBE_DEVICE_NAME "/parameters/max_consumers is '%"PRIu32"'.", filename, curr_max_consumers);
}
else
{
@@ -2175,7 +2175,7 @@ int32_t scap_disable_dynamic_snaplen(scap_t* handle)
const char* scap_get_host_root() const char* scap_get_host_root()
{ {

View File

@@ -1,57 +0,0 @@
diff --git a/userspace/libsinsp/chisel.cpp b/userspace/libsinsp/chisel.cpp
index 0a6e3cf8..0c2e255a 100644
--- a/userspace/libsinsp/chisel.cpp
+++ b/userspace/libsinsp/chisel.cpp
@@ -98,7 +98,7 @@ void lua_stackdump(lua_State *L)
// Lua callbacks
///////////////////////////////////////////////////////////////////////////////
#ifdef HAS_LUA_CHISELS
-const static struct luaL_reg ll_sysdig [] =
+const static struct luaL_Reg ll_sysdig [] =
{
{"set_filter", &lua_cbacks::set_global_filter},
{"set_snaplen", &lua_cbacks::set_snaplen},
@@ -134,7 +134,7 @@ const static struct luaL_reg ll_sysdig [] =
{NULL,NULL}
};
-const static struct luaL_reg ll_chisel [] =
+const static struct luaL_Reg ll_chisel [] =
{
{"request_field", &lua_cbacks::request_field},
{"set_filter", &lua_cbacks::set_filter},
@@ -146,7 +146,7 @@ const static struct luaL_reg ll_chisel [] =
{NULL,NULL}
};
-const static struct luaL_reg ll_evt [] =
+const static struct luaL_Reg ll_evt [] =
{
{"field", &lua_cbacks::field},
{"get_num", &lua_cbacks::get_num},
diff --git a/userspace/libsinsp/lua_parser.cpp b/userspace/libsinsp/lua_parser.cpp
index 0e26617d..78810d96 100644
--- a/userspace/libsinsp/lua_parser.cpp
+++ b/userspace/libsinsp/lua_parser.cpp
@@ -32,7 +32,7 @@ extern "C" {
#include "lauxlib.h"
}
-const static struct luaL_reg ll_filter [] =
+const static struct luaL_Reg ll_filter [] =
{
{"rel_expr", &lua_parser_cbacks::rel_expr},
{"bool_op", &lua_parser_cbacks::bool_op},
diff --git a/userspace/libsinsp/lua_parser_api.cpp b/userspace/libsinsp/lua_parser_api.cpp
index c89e9126..c3d8008a 100644
--- a/userspace/libsinsp/lua_parser_api.cpp
+++ b/userspace/libsinsp/lua_parser_api.cpp
@@ -266,7 +266,7 @@ int lua_parser_cbacks::rel_expr(lua_State *ls)
string err = "Got non-table as in-expression operand\n";
throw sinsp_exception("parser API error");
}
- int n = luaL_getn(ls, 4); /* get size of table */
+ int n = lua_objlen (ls, 4); /* get size of table */
for (i=1; i<=n; i++)
{
lua_rawgeti(ls, 4, i);

View File

@@ -29,8 +29,8 @@ file(MAKE_DIRECTORY ${SYSDIG_CMAKE_WORKING_DIR})
# default below In case you want to test against another sysdig version just pass the variable - ie., `cmake # default below In case you want to test against another sysdig version just pass the variable - ie., `cmake
# -DSYSDIG_VERSION=dev ..` # -DSYSDIG_VERSION=dev ..`
if(NOT SYSDIG_VERSION) if(NOT SYSDIG_VERSION)
set(SYSDIG_VERSION "5c0b863ddade7a45568c0ac97d037422c9efb750") set(SYSDIG_VERSION "2aa88dcf6243982697811df4c1b484bcbe9488a2")
set(SYSDIG_CHECKSUM "SHA256=9de717b3a4b611ea6df56afee05171860167112f74bb7717b394bcc88ac843cd") set(SYSDIG_CHECKSUM "SHA256=a737077543a6f3473ab306b424bcf7385d788149829ed1538252661b0f20d0f6")
endif() endif()
set(PROBE_VERSION "${SYSDIG_VERSION}") set(PROBE_VERSION "${SYSDIG_VERSION}")
@@ -57,7 +57,6 @@ add_subdirectory("${SYSDIG_SOURCE_DIR}/driver" "${PROJECT_BINARY_DIR}/driver")
# Add libscap directory # Add libscap directory
add_definitions(-D_GNU_SOURCE) add_definitions(-D_GNU_SOURCE)
add_definitions(-DHAS_CAPTURE) add_definitions(-DHAS_CAPTURE)
add_definitions(-DNOCURSESUI)
if(MUSL_OPTIMIZED_BUILD) if(MUSL_OPTIMIZED_BUILD)
add_definitions(-DMUSL_OPTIMIZED) add_definitions(-DMUSL_OPTIMIZED)
endif() endif()

View File

@@ -10,7 +10,6 @@
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License. # specific language governing permissions and limitations under the License.
# #
mark_as_advanced(YAMLCPP_INCLUDE_DIR YAMLCPP_LIB)
if(NOT USE_BUNDLED_DEPS) if(NOT USE_BUNDLED_DEPS)
find_path(YAMLCPP_INCLUDE_DIR NAMES yaml-cpp/yaml.h) find_path(YAMLCPP_INCLUDE_DIR NAMES yaml-cpp/yaml.h)
find_library(YAMLCPP_LIB NAMES yaml-cpp) find_library(YAMLCPP_LIB NAMES yaml-cpp)

View File

@@ -3,7 +3,7 @@ FROM falcosecurity/falco:${FALCO_IMAGE_TAG}
LABEL maintainer="cncf-falco-dev@lists.cncf.io" LABEL maintainer="cncf-falco-dev@lists.cncf.io"
LABEL usage="docker run -i -t --privileged -v /root/.falco:/root/.falco -v /proc:/host/proc:ro -v /boot:/host/boot:ro -v /lib/modules:/host/lib/modules:ro -v /usr:/host/usr:ro -v /etc:/host/etc:ro --name NAME IMAGE" LABEL usage="docker run -i -t -v /dev:/host/dev -v /proc:/host/proc:ro -v /boot:/host/boot:ro -v /lib/modules:/host/lib/modules:ro -v /usr:/host/usr:ro --name NAME IMAGE"
ENV HOST_ROOT /host ENV HOST_ROOT /host
ENV HOME /root ENV HOME /root

View File

@@ -2,7 +2,7 @@ FROM debian:stable
LABEL maintainer="cncf-falco-dev@lists.cncf.io" LABEL maintainer="cncf-falco-dev@lists.cncf.io"
LABEL usage="docker run -i -t --privileged -v /var/run/docker.sock:/host/var/run/docker.sock -v /dev:/host/dev -v /proc:/host/proc:ro -v /boot:/host/boot:ro -v /lib/modules:/host/lib/modules:ro -v /usr:/host/usr:ro -v /etc:/host/etc --name NAME IMAGE" LABEL usage="docker run -i -t -v /var/run/docker.sock:/host/var/run/docker.sock -v /dev:/host/dev -v /proc:/host/proc:ro -v /boot:/host/boot:ro -v /lib/modules:/host/lib/modules:ro -v /usr:/host/usr:ro --name NAME IMAGE"
ARG FALCO_VERSION=latest ARG FALCO_VERSION=latest
ARG VERSION_BUCKET=deb ARG VERSION_BUCKET=deb

View File

@@ -1,5 +1,7 @@
FROM ubuntu:18.04 as ubuntu FROM ubuntu:18.04 as ubuntu
LABEL maintainer="cncf-falco-dev@lists.cncf.io"
ARG FALCO_VERSION ARG FALCO_VERSION
ARG VERSION_BUCKET=bin ARG VERSION_BUCKET=bin
@@ -20,14 +22,6 @@ RUN sed -e 's/time_format_iso_8601: false/time_format_iso_8601: true/' < /falco/
FROM scratch FROM scratch
LABEL maintainer="cncf-falco-dev@lists.cncf.io"
LABEL usage="docker run -i -t --privileged -v /var/run/docker.sock:/host/var/run/docker.sock -v /dev:/host/dev -v /proc:/host/proc:ro --name NAME IMAGE"
# NOTE: for the "least privileged" use case, please refer to the official documentation
ENV HOST_ROOT /host
ENV HOME /root
COPY --from=ubuntu /falco / COPY --from=ubuntu /falco /
CMD ["/usr/bin/falco", "-o", "time_format_iso_8601=true"] CMD ["/usr/bin/falco", "-o", "time_format_iso_8601=true"]

View File

@@ -84,23 +84,6 @@ syscall_event_drops:
rate: .03333 rate: .03333
max_burst: 10 max_burst: 10
# Falco continuously monitors outputs performance. When an output channel does not allow
# to deliver an alert within a given deadline, an error is reported indicating
# which output is blocking notifications.
# The timeout error will be reported to the log according to the above log_* settings.
# Note that the notification will not be discarded from the output queue; thus,
# output channels may indefinitely remain blocked.
# An output timeout error indeed indicate a misconfiguration issue or I/O problems
# that cannot be recovered by Falco and should be fixed by the user.
#
# The "output_timeout" value specifies the duration in milliseconds to wait before
# considering the deadline exceed.
#
# With a 2000ms default, the notification consumer can block the Falco output
# for up to 2 seconds without reaching the timeout.
output_timeout: 2000
# A throttling mechanism implemented as a token bucket limits the # A throttling mechanism implemented as a token bucket limits the
# rate of falco notifications. This throttling is controlled by the following configuration # rate of falco notifications. This throttling is controlled by the following configuration
# options: # options:
@@ -215,14 +198,3 @@ grpc:
# Make sure to have a consumer for them or leave this disabled. # Make sure to have a consumer for them or leave this disabled.
grpc_output: grpc_output:
enabled: false enabled: false
# todo(fntlnz): provide a default implementation
# so that users can avoid to input this configuration
# if they don't need to change the default Falco behavior
#extensions:
# - myextension.so
# Rules provider
# Specify a non-default provider.
# Default value is "internal"
rules_provider: internal

View File

@@ -1,240 +0,0 @@
# Proposal for First Class Structured Exceptions in Falco Rules
## Summary
## Motivation
Almost all Falco Rules have cases where the behavior detected by the
rule should be allowed. For example, The rule Write Below Binary Dir
has exceptions for specific programs that are known to write below
these directories as a part of software installation/management:
```yaml
- rule: Write below binary dir
desc: an attempt to write to any file below a set of binary directories
condition: >
bin_dir and evt.dir = < and open_write
and not package_mgmt_procs
and not exe_running_docker_save
and not python_running_get_pip
and not python_running_ms_oms
and not user_known_write_below_binary_dir_activities
...
```
In most cases, these exceptions are expressed as concatenations to the original rule's condition. For example, looking at the macro package_mgmt_procs:
```yaml
- macro: package_mgmt_procs
condition: proc.name in (package_mgmt_binaries)
```
The result is appending `and not proc.name in (package_mgmt_binaries)` to the condition of the rule.
A more extreme case of this is the write_below_etc macro used by Write below etc rule. It has tens of exceptions:
```
...
and not sed_temporary_file
and not exe_running_docker_save
and not ansible_running_python
and not python_running_denyhosts
and not fluentd_writing_conf_files
and not user_known_write_etc_conditions
and not run_by_centrify
and not run_by_adclient
and not qualys_writing_conf_files
and not git_writing_nssdb
...
```
The exceptions all generally follow the same structure--naming a program and a directory prefix below /etc where that program is allowed to write files.
### Using Appends/Overwrites to Customize Rules
An important way to customize rules and macros is to use `append: true` to add to them, or `append: false` to define a new rule/macro, overwriting the original rule/macro. Here's an example from Update Package Repository:
```yaml
- list: package_mgmt_binaries
items: [rpm_binaries, deb_binaries, update-alternat, gem, pip, pip3, sane-utils.post, alternatives, chef-client, apk, snapd]
- macro: package_mgmt_procs
condition: proc.name in (package_mgmt_binaries)
- macro: user_known_update_package_registry
condition: (never_true)
- rule: Update Package Repository
desc: Detect package repositories get updated
condition: >
((open_write and access_repositories) or (modify and modify_repositories))
and not package_mgmt_procs
and not exe_running_docker_save
and not user_known_update_package_registry
```
If someone wanted to add additional exceptions to this rule, they could add the following to the user_rules file:
```yaml
- list: package_mgmt_binaries
items: [puppet]
append: true
- macro: package_mgmt_procs
condition: and not proc.pname=chef
append: true
- macro: user_known_update_package_registry
condition: (proc.name in (npm))
append: false
```
This adds an 3 different exceptions:
* an additional binary to package_mgmt_binaries (because append is true),
* adds to package_mgmt_procs, adding an exception for programs spawned by chef (because append is true)
* overrides the macro user_known_update_package_registry to add an exception for npm (because append is false).
### Problems with Appends/Overrides to Define Exceptions
Although the concepts of macros and lists in condition fields, combined with appending to lists/conditions in macros/rules, is very general purpose, it can be unwieldy:
* Appending to conditions can result in incorrect behavior, unless the original condition has its logical operators set up properly with parentheses. For example:
```yaml
rule: my_rule
condition: (evt.type=open and (fd.name=/tmp/foo or fd.name=/tmp/bar))
rule: my_rule
condition: or fd.name=/tmp/baz
append: true
```
Results in unintended behavior. It will match any fd related event where the name is /tmp/baz, when the intent was probably to add /tmp/baz as an additional opened file.
* A good convention many rules use is to have a clause "and not user_known_xxxx" built into the condition field. However, it's not in all rules and its use is a bit haphazard.
* Appends and overrides can get confusing if you try to apply them multiple times. For example:
```yaml
macro: allowed_files
condition: fd.name=/tmp/foo
...
macro: allowed_files
condition: and fd.name=/tmp/bar
append: true
```
If someone wanted to override the original behavior of allowed_files, they would have to use `append: false` in a third definition of allowed_files, but this would result in losing the append: true override.
## Solution: Exceptions as first class objects
To address some of these problems, we will add the notion of Exceptions as top level objects alongside Rules, Macros, and Lists. A rule that supports exceptions must define a new key `exceptions` in the rule. The exceptions key is a list of identifier plus list of tuples of filtercheck fields. Here's an example:
```yaml
- rule: Write below binary dir
desc: an attempt to write to any file below a set of binary directories
condition: >
bin_dir and evt.dir = < and open_write
and not package_mgmt_procs
and not exe_running_docker_save
and not python_running_get_pip
and not python_running_ms_oms
and not user_known_write_below_binary_dir_activities
exceptions:
- name: proc_writer
fields: [proc.name, fd.directory]
- name: container_writer
fields: [container.image.repository, fd.directory]
comps: [=, startswith]
- name: proc_filenames
fields: [proc.name, fd.name]
comps: [=, in]
- name: filenames
fields: fd.filename
comps: in
```
This rule defines four kinds of exceptions:
* proc_writer: uses a combination of proc.name and fd.directory
* container_writer: uses a combination of container.image.repository and fd.directory
* proc_filenames: uses a combination of process and list of filenames.
* filenames: uses a list of filenames
The specific strings "proc_writer"/"container_writer"/"proc_filenames"/"filenames" are arbitrary strings and don't have a special meaning to the rules file parser. They're only used to link together the list of field names with the list of field values that exist in the exception object.
proc_writer does not have any comps property, so the fields are directly compared to values using the = operator. container_writer does have a comps property, so each field will be compared to the corresponding exception items using the corresponding comparison operator.
proc_filenames uses the in comparison operator, so the corresponding values entry should be a list of filenames.
filenames differs from the others in that it names a single field and single comp operator. This changes how the exception condition snippet is constructed (see below).
Notice that exceptions are defined as a part of the rule. This is important because the author of the rule defines what construes a valid exception to the rule. In this case, an exception can consist of a process and file directory (actor and target), but not a process name only (too broad).
Exception values will most commonly be defined in rules with append: true. Here's an example:
```yaml
- list: apt_files
items: [/bin/ls, /bin/rm]
- rule: Write below binary dir
exceptions:
- name: proc_writer
values:
- [apk, /usr/lib/alpine]
- [npm, /usr/node/bin]
- name: container_writer
values:
- [docker.io/alpine, /usr/libexec/alpine]
- name: proc_filenames
values:
- [apt, apt_files]
- [rpm, [/bin/cp, /bin/pwd]]
- name: filenames
values: [python, go]
```
A rule exception applies if for a given event, the fields in a rule.exception match all of the values in some exception.item. For example, if a program `apk` writes to a file below `/usr/lib/alpine`, the rule will not trigger, even if the condition is met.
Notice that an item in a values list can be a list. This allows building exceptions with operators like "in", "pmatch", etc. that work on a list of items. The item can also be a name of an existing list. If not present surrounding parantheses will be added.
Finally, note that the structure of the values property differs between the items where fields is a list of fields (proc_writer/container_writer/proc_filenames) and when it is a single field (procs_only). This changes how the condition snippet is constructed.
### Implementation
For exception items where the fields property is a list of field names, each exception can be thought of as an implicit "and not (field1 cmp1 val1 and field2 cmp2 val2 and...)" appended to the rule's condition. For exception items where the fields property is a single field name, the exception can be thought of as an implict "and not field cmp (val1, val2, ...)". In practice, that's how exceptions will be implemented.
When a rule is parsed, the original condition will be wrapped in an extra layer of parentheses and all exception values will be appended to the condition. For example, using the example above, the resulting condition will be:
```
(<Write below binary dir condition>) and not (
(proc.name = apk and fd.directory = /usr/lib/alpine) or (proc.name = npm and fd.directory = /usr/node/bin) or
(container.image.repository = docker.io/alpine and fd.directory startswith /usr/libexec/alpine) or
(proc.name=apt and fd.name in (apt_files))) or
(fd.filename in (python, go))))
```
The exceptions are effectively syntatic sugar that allows expressing sets of exceptions in a concise way.
### Advantages
Adding Exception objects as described here has several advantages:
* All rules will implicitly support exceptions. A rule writer doesn't need to define a user_known_xxx macro and add it to the condition.
* The rule writer has some controls on what defines a valid exception. The rule author knows best what is a good exception, and can define the fields that make up the exception.
* With this approach, it's much easier to add and manage multiple sets of exceptions from multiple sources. You're just combining lists of tuples of filtercheck field values.
## Backwards compatibility
To take advantage of these new features, users will need to upgrade Falco to a version that supports exception objects and exception keys in rules. For the most part, however, the rules file structure is unchanged.
This approach does not remove the ability to append to exceptions nor the existing use of user_xxx macros to define exceptions to rules. It only provides an additional way to express exceptions. Hopefully, we can migrate existing exceptions to use this approach, but there isn't any plan to make wholesale rules changes as a part of this.
This approach is for the most part backwards compatible with older Falco releases. To implement exceptions, we'll add a preprocessing element to rule parsing. The main Falco engine is unchanged.
However, there are a few changes we'll have to make to Falco rules file parsing:
* Currently, Falco will reject files containing anything other than rule/macro/list top-level objects. As a result, `exception` objects would be rejected. We'll probably want to make a one-time change to Falco to allow arbitrary top level objects.
* Similarly, Falco will reject rule objects with exception keys. We'll also probably want to change Falco to allow unknown keys inside rule/macro/list/exception objects.

View File

@@ -1,9 +1,5 @@
# Falco Drivers Storage S3 # Falco Drivers Storage S3
Supersedes: [20200818-artifacts-storage.md#drivers](20200818-artifacts-storage.md#drivers)
Supersedes: [20200901-artifacts-cleanup.md#drivers](20200901-artifacts-cleanup.md#drivers)
## Introduction ## Introduction
In the past days, as many people probably noticed, Bintray started rate-limiting our users, effectively preventing them from downloading any kernel module, rpm/deb package or any pre-built dependency we host there. In the past days, as many people probably noticed, Bintray started rate-limiting our users, effectively preventing them from downloading any kernel module, rpm/deb package or any pre-built dependency we host there.
@@ -45,7 +41,7 @@ Before today, we had many issues with storage even without the spike in users we
## Context on AWS ## Context on AWS
Amazon AWS, recently gave credits to the Falco project to operate some parts of the infrastructure on AWS. The CNCF is providing a sub-account we are already using for the migration of the other pieces (like Prow). Amazon AWS, recently gave credits to the Falco project to operate some parts of the infrastructure on AWS. The CNCF is providing a sub-account we are already using for the migration of the other pieces (like Prow).
## Interactions with other teams and the CNCF ## Interactions with other teams and the CNCF
@@ -59,7 +55,7 @@ We want to propose to move the drivers and the container dependencies to S3.
#### Moving means: #### Moving means:
* We create a public S3 bucket with [stats enabled](https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html) * We create a public S3 bucket with[ stats enabled](https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html)
* We attach the bucket to a cloudfront distribution behind the download.falco.org subdomain * We attach the bucket to a cloudfront distribution behind the download.falco.org subdomain
@@ -117,7 +113,7 @@ export DRIVERS_REPO=https://your-url-here
Pass it as environment variable using the docker run flag -e - for example: Pass it as environment variable using the docker run flag -e - for example:
docker run -e DRIVERS_REPO=[https://your-url-here](https://your-url-here) docker run -e DRIVERS_REPO=[https://your-url-here](https://your-url-here)
**Kubernetes** **Kubernetes**

File diff suppressed because it is too large Load Diff

View File

@@ -14,7 +14,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
# #
- required_engine_version: 8 - required_engine_version: 2
# Like always_true/always_false, but works with k8s audit events # Like always_true/always_false, but works with k8s audit events
- macro: k8s_audit_always_true - macro: k8s_audit_always_true
@@ -55,12 +55,7 @@
- rule: Disallowed K8s User - rule: Disallowed K8s User
desc: Detect any k8s operation by users outside of an allowed set of users. desc: Detect any k8s operation by users outside of an allowed set of users.
condition: kevt and non_system_user condition: kevt and non_system_user and not ka.user.name in (allowed_k8s_users)
exceptions:
- name: user_names
fields: ka.user.name
comps: in
values: [allowed_k8s_users]
output: K8s Operation performed by user not in allowed list of users (user=%ka.user.name target=%ka.target.name/%ka.target.resource verb=%ka.verb uri=%ka.uri resp=%ka.response.code) output: K8s Operation performed by user not in allowed list of users (user=%ka.user.name target=%ka.target.name/%ka.target.resource verb=%ka.verb uri=%ka.uri resp=%ka.response.code)
priority: WARNING priority: WARNING
source: k8s_audit source: k8s_audit
@@ -127,10 +122,6 @@
desc: > desc: >
Detect an attempt to start a pod with a container image outside of a list of allowed images. Detect an attempt to start a pod with a container image outside of a list of allowed images.
condition: kevt and pod and kcreate and not allowed_k8s_containers condition: kevt and pod and kcreate and not allowed_k8s_containers
exceptions:
- name: image_repos
fields: ka.req.pod.containers.image.repository
comps: in
output: Pod started with container not in allowed list (user=%ka.user.name pod=%ka.resp.name ns=%ka.target.namespace images=%ka.req.pod.containers.image) output: Pod started with container not in allowed list (user=%ka.user.name pod=%ka.resp.name ns=%ka.target.namespace images=%ka.req.pod.containers.image)
priority: WARNING priority: WARNING
source: k8s_audit source: k8s_audit
@@ -139,12 +130,7 @@
- rule: Create Privileged Pod - rule: Create Privileged Pod
desc: > desc: >
Detect an attempt to start a pod with a privileged container Detect an attempt to start a pod with a privileged container
condition: kevt and pod and kcreate and ka.req.pod.containers.privileged intersects (true) condition: kevt and pod and kcreate and ka.req.pod.containers.privileged intersects (true) and not ka.req.pod.containers.image.repository in (falco_privileged_images)
exceptions:
- name: image_repos
fields: ka.req.pod.containers.image.repository
comps: in
values: [falco_privileged_images]
output: Pod started with privileged container (user=%ka.user.name pod=%ka.resp.name ns=%ka.target.namespace images=%ka.req.pod.containers.image) output: Pod started with privileged container (user=%ka.user.name pod=%ka.resp.name ns=%ka.target.namespace images=%ka.req.pod.containers.image)
priority: WARNING priority: WARNING
source: k8s_audit source: k8s_audit
@@ -158,12 +144,7 @@
desc: > desc: >
Detect an attempt to start a pod with a volume from a sensitive host directory (i.e. /proc). Detect an attempt to start a pod with a volume from a sensitive host directory (i.e. /proc).
Exceptions are made for known trusted images. Exceptions are made for known trusted images.
condition: kevt and pod and kcreate and sensitive_vol_mount condition: kevt and pod and kcreate and sensitive_vol_mount and not ka.req.pod.containers.image.repository in (falco_sensitive_mount_images)
exceptions:
- name: image_repos
fields: ka.req.pod.containers.image.repository
comps: in
values: [falco_sensitive_mount_images]
output: Pod started with sensitive mount (user=%ka.user.name pod=%ka.resp.name ns=%ka.target.namespace images=%ka.req.pod.containers.image volumes=%jevt.value[/requestObject/spec/volumes]) output: Pod started with sensitive mount (user=%ka.user.name pod=%ka.resp.name ns=%ka.target.namespace images=%ka.req.pod.containers.image volumes=%jevt.value[/requestObject/spec/volumes])
priority: WARNING priority: WARNING
source: k8s_audit source: k8s_audit
@@ -172,12 +153,7 @@
# Corresponds to K8s CIS Benchmark 1.7.4 # Corresponds to K8s CIS Benchmark 1.7.4
- rule: Create HostNetwork Pod - rule: Create HostNetwork Pod
desc: Detect an attempt to start a pod using the host network. desc: Detect an attempt to start a pod using the host network.
condition: kevt and pod and kcreate and ka.req.pod.host_network intersects (true) condition: kevt and pod and kcreate and ka.req.pod.host_network intersects (true) and not ka.req.pod.containers.image.repository in (falco_hostnetwork_images)
exceptions:
- name: image_repos
fields: ka.req.pod.containers.image.repository
comps: in
values: [falco_hostnetwork_images]
output: Pod started using host network (user=%ka.user.name pod=%ka.resp.name ns=%ka.target.namespace images=%ka.req.pod.containers.image) output: Pod started using host network (user=%ka.user.name pod=%ka.resp.name ns=%ka.target.namespace images=%ka.req.pod.containers.image)
priority: WARNING priority: WARNING
source: k8s_audit source: k8s_audit
@@ -190,9 +166,6 @@
desc: > desc: >
Detect an attempt to start a service with a NodePort service type Detect an attempt to start a service with a NodePort service type
condition: kevt and service and kcreate and ka.req.service.type=NodePort and not user_known_node_port_service condition: kevt and service and kcreate and ka.req.service.type=NodePort and not user_known_node_port_service
exceptions:
- name: services
fields: [ka.target.namespace, ka.target.name]
output: NodePort Service Created (user=%ka.user.name service=%ka.target.name ns=%ka.target.namespace ports=%ka.req.service.ports) output: NodePort Service Created (user=%ka.user.name service=%ka.target.name ns=%ka.target.namespace ports=%ka.req.service.ports)
priority: WARNING priority: WARNING
source: k8s_audit source: k8s_audit
@@ -211,9 +184,6 @@
desc: > desc: >
Detect creating/modifying a configmap containing a private credential (aws key, password, etc.) Detect creating/modifying a configmap containing a private credential (aws key, password, etc.)
condition: kevt and configmap and kmodify and contains_private_credentials condition: kevt and configmap and kmodify and contains_private_credentials
exceptions:
- name: configmaps
fields: [ka.target.namespace, ka.req.configmap.name]
output: K8s configmap with private credential (user=%ka.user.name verb=%ka.verb configmap=%ka.req.configmap.name config=%ka.req.configmap.obj) output: K8s configmap with private credential (user=%ka.user.name verb=%ka.verb configmap=%ka.req.configmap.name config=%ka.req.configmap.obj)
priority: WARNING priority: WARNING
source: k8s_audit source: k8s_audit
@@ -224,10 +194,6 @@
desc: > desc: >
Detect any request made by the anonymous user that was allowed Detect any request made by the anonymous user that was allowed
condition: kevt and ka.user.name=system:anonymous and ka.auth.decision="allow" and not health_endpoint condition: kevt and ka.user.name=system:anonymous and ka.auth.decision="allow" and not health_endpoint
exceptions:
- name: user_names
fields: ka.user.name
comps: in
output: Request by anonymous user allowed (user=%ka.user.name verb=%ka.verb uri=%ka.uri reason=%ka.auth.reason)) output: Request by anonymous user allowed (user=%ka.user.name verb=%ka.verb uri=%ka.uri reason=%ka.auth.reason))
priority: WARNING priority: WARNING
source: k8s_audit source: k8s_audit
@@ -241,10 +207,6 @@
# events to be stateful, so it could know if a container named in an # events to be stateful, so it could know if a container named in an
# attach request was created privileged or not. For now, we have a # attach request was created privileged or not. For now, we have a
# less severe rule that detects attaches/execs to any pod. # less severe rule that detects attaches/execs to any pod.
#
# For the same reason, you can't use things like image names/prefixes,
# as the event that creates the pod (which has the images) is a
# separate event than the actual exec/attach to the pod.
- macro: user_known_exec_pod_activities - macro: user_known_exec_pod_activities
condition: (k8s_audit_never_true) condition: (k8s_audit_never_true)
@@ -253,10 +215,6 @@
desc: > desc: >
Detect any attempt to attach/exec to a pod Detect any attempt to attach/exec to a pod
condition: kevt_started and pod_subresource and kcreate and ka.target.subresource in (exec,attach) and not user_known_exec_pod_activities condition: kevt_started and pod_subresource and kcreate and ka.target.subresource in (exec,attach) and not user_known_exec_pod_activities
exceptions:
- name: user_names
fields: ka.user.name
comps: in
output: Attach/Exec to pod (user=%ka.user.name pod=%ka.target.name ns=%ka.target.namespace action=%ka.target.subresource command=%ka.uri.param[command]) output: Attach/Exec to pod (user=%ka.user.name pod=%ka.target.name ns=%ka.target.namespace action=%ka.target.subresource command=%ka.uri.param[command])
priority: NOTICE priority: NOTICE
source: k8s_audit source: k8s_audit
@@ -266,14 +224,10 @@
condition: (k8s_audit_never_true) condition: (k8s_audit_never_true)
# Only works when feature gate EphemeralContainers is enabled # Only works when feature gate EphemeralContainers is enabled
# Definining empty exceptions just to avoid warnings. There isn't any
# great exception for this kind of object, as you'd expect the images
# to vary wildly.
- rule: EphemeralContainers Created - rule: EphemeralContainers Created
desc: > desc: >
Detect any ephemeral container created Detect any ephemeral container created
condition: kevt and pod_subresource and kmodify and ka.target.subresource in (ephemeralcontainers) and not user_known_pod_debug_activities condition: kevt and pod_subresource and kmodify and ka.target.subresource in (ephemeralcontainers) and not user_known_pod_debug_activities
exceptions:
output: Ephemeral container is created in pod (user=%ka.user.name pod=%ka.target.name ns=%ka.target.namespace ephemeral_container_name=%jevt.value[/requestObject/ephemeralContainers/0/name] ephemeral_container_image=%jevt.value[/requestObject/ephemeralContainers/0/image]) output: Ephemeral container is created in pod (user=%ka.user.name pod=%ka.target.name ns=%ka.target.namespace ephemeral_container_name=%jevt.value[/requestObject/ephemeralContainers/0/name] ephemeral_container_image=%jevt.value[/requestObject/ephemeralContainers/0/image])
priority: NOTICE priority: NOTICE
source: k8s_audit source: k8s_audit
@@ -285,12 +239,7 @@
- rule: Create Disallowed Namespace - rule: Create Disallowed Namespace
desc: Detect any attempt to create a namespace outside of a set of known namespaces desc: Detect any attempt to create a namespace outside of a set of known namespaces
condition: kevt and namespace and kcreate condition: kevt and namespace and kcreate and not ka.target.name in (allowed_namespaces)
exceptions:
- name: services
fields: ka.target.name
comps: in
values: [allowed_namespaces]
output: Disallowed namespace created (user=%ka.user.name ns=%ka.target.name) output: Disallowed namespace created (user=%ka.user.name ns=%ka.target.name)
priority: WARNING priority: WARNING
source: k8s_audit source: k8s_audit
@@ -330,16 +279,15 @@
k8s_image_list k8s_image_list
] ]
- macro: allowed_kube_namespace_pods
condition: (ka.req.pod.containers.image.repository in (user_allowed_kube_namespace_image_list) or
ka.req.pod.containers.image.repository in (allowed_kube_namespace_image_list))
# Detect any new pod created in the kube-system namespace # Detect any new pod created in the kube-system namespace
- rule: Pod Created in Kube Namespace - rule: Pod Created in Kube Namespace
desc: Detect any attempt to create a pod in the kube-system or kube-public namespaces desc: Detect any attempt to create a pod in the kube-system or kube-public namespaces
condition: kevt and pod and kcreate and ka.target.namespace in (kube-system, kube-public) condition: kevt and pod and kcreate and ka.target.namespace in (kube-system, kube-public) and not allowed_kube_namespace_pods
output: Pod created in kube namespace (user=%ka.user.name pod=%ka.resp.name ns=%ka.target.namespace images=%ka.req.pod.containers.image) output: Pod created in kube namespace (user=%ka.user.name pod=%ka.resp.name ns=%ka.target.namespace images=%ka.req.pod.containers.image)
exceptions:
- name: images
fields: ka.req.pod.containers.image.repository
comps: in
values: [user_allowed_kube_namespace_image_list, allowed_kube_namespace_image_list]
priority: WARNING priority: WARNING
source: k8s_audit source: k8s_audit
tags: [k8s] tags: [k8s]
@@ -354,9 +302,6 @@
- rule: Service Account Created in Kube Namespace - rule: Service Account Created in Kube Namespace
desc: Detect any attempt to create a serviceaccount in the kube-system or kube-public namespaces desc: Detect any attempt to create a serviceaccount in the kube-system or kube-public namespaces
condition: kevt and serviceaccount and kcreate and ka.target.namespace in (kube-system, kube-public) and response_successful and not trusted_sa condition: kevt and serviceaccount and kcreate and ka.target.namespace in (kube-system, kube-public) and response_successful and not trusted_sa
exceptions:
- name: accounts
fields: [ka.target.namespace, ka.target.name]
output: Service account created in kube namespace (user=%ka.user.name serviceaccount=%ka.target.name ns=%ka.target.namespace) output: Service account created in kube namespace (user=%ka.user.name serviceaccount=%ka.target.name ns=%ka.target.namespace)
priority: WARNING priority: WARNING
source: k8s_audit source: k8s_audit
@@ -369,9 +314,6 @@
desc: Detect any attempt to modify/delete a ClusterRole/Role starting with system desc: Detect any attempt to modify/delete a ClusterRole/Role starting with system
condition: kevt and (role or clusterrole) and (kmodify or kdelete) and (ka.target.name startswith "system:") and condition: kevt and (role or clusterrole) and (kmodify or kdelete) and (ka.target.name startswith "system:") and
not ka.target.name in (system:coredns, system:managed-certificate-controller) not ka.target.name in (system:coredns, system:managed-certificate-controller)
exceptions:
- name: roles
fields: [ka.target.namespace, ka.target.name]
output: System ClusterRole/Role modified or deleted (user=%ka.user.name role=%ka.target.name ns=%ka.target.namespace action=%ka.verb) output: System ClusterRole/Role modified or deleted (user=%ka.user.name role=%ka.target.name ns=%ka.target.namespace action=%ka.verb)
priority: WARNING priority: WARNING
source: k8s_audit source: k8s_audit
@@ -382,10 +324,6 @@
- rule: Attach to cluster-admin Role - rule: Attach to cluster-admin Role
desc: Detect any attempt to create a ClusterRoleBinding to the cluster-admin user desc: Detect any attempt to create a ClusterRoleBinding to the cluster-admin user
condition: kevt and clusterrolebinding and kcreate and ka.req.binding.role=cluster-admin condition: kevt and clusterrolebinding and kcreate and ka.req.binding.role=cluster-admin
exceptions:
- name: subjects
fields: ka.req.binding.subjects
comps: in
output: Cluster Role Binding to cluster-admin role (user=%ka.user.name subject=%ka.req.binding.subjects) output: Cluster Role Binding to cluster-admin role (user=%ka.user.name subject=%ka.req.binding.subjects)
priority: WARNING priority: WARNING
source: k8s_audit source: k8s_audit
@@ -394,10 +332,6 @@
- rule: ClusterRole With Wildcard Created - rule: ClusterRole With Wildcard Created
desc: Detect any attempt to create a Role/ClusterRole with wildcard resources or verbs desc: Detect any attempt to create a Role/ClusterRole with wildcard resources or verbs
condition: kevt and (role or clusterrole) and kcreate and (ka.req.role.rules.resources intersects ("*") or ka.req.role.rules.verbs intersects ("*")) condition: kevt and (role or clusterrole) and kcreate and (ka.req.role.rules.resources intersects ("*") or ka.req.role.rules.verbs intersects ("*"))
exceptions:
- name: roles
fields: ka.target.name
comps: in
output: Created Role/ClusterRole with wildcard (user=%ka.user.name role=%ka.target.name rules=%ka.req.role.rules) output: Created Role/ClusterRole with wildcard (user=%ka.user.name role=%ka.target.name rules=%ka.req.role.rules)
priority: WARNING priority: WARNING
source: k8s_audit source: k8s_audit
@@ -410,10 +344,6 @@
- rule: ClusterRole With Write Privileges Created - rule: ClusterRole With Write Privileges Created
desc: Detect any attempt to create a Role/ClusterRole that can perform write-related actions desc: Detect any attempt to create a Role/ClusterRole that can perform write-related actions
condition: kevt and (role or clusterrole) and kcreate and writable_verbs condition: kevt and (role or clusterrole) and kcreate and writable_verbs
exceptions:
- name: roles
fields: ka.target.name
comps: in
output: Created Role/ClusterRole with write privileges (user=%ka.user.name role=%ka.target.name rules=%ka.req.role.rules) output: Created Role/ClusterRole with write privileges (user=%ka.user.name role=%ka.target.name rules=%ka.req.role.rules)
priority: NOTICE priority: NOTICE
source: k8s_audit source: k8s_audit
@@ -422,10 +352,6 @@
- rule: ClusterRole With Pod Exec Created - rule: ClusterRole With Pod Exec Created
desc: Detect any attempt to create a Role/ClusterRole that can exec to pods desc: Detect any attempt to create a Role/ClusterRole that can exec to pods
condition: kevt and (role or clusterrole) and kcreate and ka.req.role.rules.resources intersects ("pods/exec") condition: kevt and (role or clusterrole) and kcreate and ka.req.role.rules.resources intersects ("pods/exec")
exceptions:
- name: roles
fields: ka.target.name
comps: in
output: Created Role/ClusterRole with pod exec privileges (user=%ka.user.name role=%ka.target.name rules=%ka.req.role.rules) output: Created Role/ClusterRole with pod exec privileges (user=%ka.user.name role=%ka.target.name rules=%ka.req.role.rules)
priority: WARNING priority: WARNING
source: k8s_audit source: k8s_audit
@@ -437,16 +363,12 @@
- macro: consider_activity_events - macro: consider_activity_events
condition: (k8s_audit_always_true) condition: (k8s_audit_always_true)
# Activity events don't have exceptions. They do define an empty
# exceptions property just to avoid warnings when loading rules.
- macro: kactivity - macro: kactivity
condition: (kevt and consider_activity_events) condition: (kevt and consider_activity_events)
- rule: K8s Deployment Created - rule: K8s Deployment Created
desc: Detect any attempt to create a deployment desc: Detect any attempt to create a deployment
condition: (kactivity and kcreate and deployment and response_successful) condition: (kactivity and kcreate and deployment and response_successful)
exceptions:
output: K8s Deployment Created (user=%ka.user.name deployment=%ka.target.name ns=%ka.target.namespace resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) output: K8s Deployment Created (user=%ka.user.name deployment=%ka.target.name ns=%ka.target.namespace resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason)
priority: INFO priority: INFO
source: k8s_audit source: k8s_audit
@@ -455,7 +377,6 @@
- rule: K8s Deployment Deleted - rule: K8s Deployment Deleted
desc: Detect any attempt to delete a deployment desc: Detect any attempt to delete a deployment
condition: (kactivity and kdelete and deployment and response_successful) condition: (kactivity and kdelete and deployment and response_successful)
exceptions:
output: K8s Deployment Deleted (user=%ka.user.name deployment=%ka.target.name ns=%ka.target.namespace resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) output: K8s Deployment Deleted (user=%ka.user.name deployment=%ka.target.name ns=%ka.target.namespace resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason)
priority: INFO priority: INFO
source: k8s_audit source: k8s_audit
@@ -464,7 +385,6 @@
- rule: K8s Service Created - rule: K8s Service Created
desc: Detect any attempt to create a service desc: Detect any attempt to create a service
condition: (kactivity and kcreate and service and response_successful) condition: (kactivity and kcreate and service and response_successful)
exceptions:
output: K8s Service Created (user=%ka.user.name service=%ka.target.name ns=%ka.target.namespace resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) output: K8s Service Created (user=%ka.user.name service=%ka.target.name ns=%ka.target.namespace resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason)
priority: INFO priority: INFO
source: k8s_audit source: k8s_audit
@@ -473,7 +393,6 @@
- rule: K8s Service Deleted - rule: K8s Service Deleted
desc: Detect any attempt to delete a service desc: Detect any attempt to delete a service
condition: (kactivity and kdelete and service and response_successful) condition: (kactivity and kdelete and service and response_successful)
exceptions:
output: K8s Service Deleted (user=%ka.user.name service=%ka.target.name ns=%ka.target.namespace resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) output: K8s Service Deleted (user=%ka.user.name service=%ka.target.name ns=%ka.target.namespace resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason)
priority: INFO priority: INFO
source: k8s_audit source: k8s_audit
@@ -482,7 +401,6 @@
- rule: K8s ConfigMap Created - rule: K8s ConfigMap Created
desc: Detect any attempt to create a configmap desc: Detect any attempt to create a configmap
condition: (kactivity and kcreate and configmap and response_successful) condition: (kactivity and kcreate and configmap and response_successful)
exceptions:
output: K8s ConfigMap Created (user=%ka.user.name configmap=%ka.target.name ns=%ka.target.namespace resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) output: K8s ConfigMap Created (user=%ka.user.name configmap=%ka.target.name ns=%ka.target.namespace resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason)
priority: INFO priority: INFO
source: k8s_audit source: k8s_audit
@@ -491,7 +409,6 @@
- rule: K8s ConfigMap Deleted - rule: K8s ConfigMap Deleted
desc: Detect any attempt to delete a configmap desc: Detect any attempt to delete a configmap
condition: (kactivity and kdelete and configmap and response_successful) condition: (kactivity and kdelete and configmap and response_successful)
exceptions:
output: K8s ConfigMap Deleted (user=%ka.user.name configmap=%ka.target.name ns=%ka.target.namespace resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) output: K8s ConfigMap Deleted (user=%ka.user.name configmap=%ka.target.name ns=%ka.target.namespace resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason)
priority: INFO priority: INFO
source: k8s_audit source: k8s_audit
@@ -500,7 +417,6 @@
- rule: K8s Namespace Created - rule: K8s Namespace Created
desc: Detect any attempt to create a namespace desc: Detect any attempt to create a namespace
condition: (kactivity and kcreate and namespace and response_successful) condition: (kactivity and kcreate and namespace and response_successful)
exceptions:
output: K8s Namespace Created (user=%ka.user.name namespace=%ka.target.name resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) output: K8s Namespace Created (user=%ka.user.name namespace=%ka.target.name resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason)
priority: INFO priority: INFO
source: k8s_audit source: k8s_audit
@@ -509,7 +425,6 @@
- rule: K8s Namespace Deleted - rule: K8s Namespace Deleted
desc: Detect any attempt to delete a namespace desc: Detect any attempt to delete a namespace
condition: (kactivity and non_system_user and kdelete and namespace and response_successful) condition: (kactivity and non_system_user and kdelete and namespace and response_successful)
exceptions:
output: K8s Namespace Deleted (user=%ka.user.name namespace=%ka.target.name resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) output: K8s Namespace Deleted (user=%ka.user.name namespace=%ka.target.name resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason)
priority: INFO priority: INFO
source: k8s_audit source: k8s_audit
@@ -518,7 +433,6 @@
- rule: K8s Serviceaccount Created - rule: K8s Serviceaccount Created
desc: Detect any attempt to create a service account desc: Detect any attempt to create a service account
condition: (kactivity and kcreate and serviceaccount and response_successful) condition: (kactivity and kcreate and serviceaccount and response_successful)
exceptions:
output: K8s Serviceaccount Created (user=%ka.user.name user=%ka.target.name ns=%ka.target.namespace resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) output: K8s Serviceaccount Created (user=%ka.user.name user=%ka.target.name ns=%ka.target.namespace resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason)
priority: INFO priority: INFO
source: k8s_audit source: k8s_audit
@@ -527,7 +441,6 @@
- rule: K8s Serviceaccount Deleted - rule: K8s Serviceaccount Deleted
desc: Detect any attempt to delete a service account desc: Detect any attempt to delete a service account
condition: (kactivity and kdelete and serviceaccount and response_successful) condition: (kactivity and kdelete and serviceaccount and response_successful)
exceptions:
output: K8s Serviceaccount Deleted (user=%ka.user.name user=%ka.target.name ns=%ka.target.namespace resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) output: K8s Serviceaccount Deleted (user=%ka.user.name user=%ka.target.name ns=%ka.target.namespace resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason)
priority: INFO priority: INFO
source: k8s_audit source: k8s_audit
@@ -536,7 +449,6 @@
- rule: K8s Role/Clusterrole Created - rule: K8s Role/Clusterrole Created
desc: Detect any attempt to create a cluster role/role desc: Detect any attempt to create a cluster role/role
condition: (kactivity and kcreate and (clusterrole or role) and response_successful) condition: (kactivity and kcreate and (clusterrole or role) and response_successful)
exceptions:
output: K8s Cluster Role Created (user=%ka.user.name role=%ka.target.name rules=%ka.req.role.rules resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) output: K8s Cluster Role Created (user=%ka.user.name role=%ka.target.name rules=%ka.req.role.rules resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason)
priority: INFO priority: INFO
source: k8s_audit source: k8s_audit
@@ -545,7 +457,6 @@
- rule: K8s Role/Clusterrole Deleted - rule: K8s Role/Clusterrole Deleted
desc: Detect any attempt to delete a cluster role/role desc: Detect any attempt to delete a cluster role/role
condition: (kactivity and kdelete and (clusterrole or role) and response_successful) condition: (kactivity and kdelete and (clusterrole or role) and response_successful)
exceptions:
output: K8s Cluster Role Deleted (user=%ka.user.name role=%ka.target.name resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) output: K8s Cluster Role Deleted (user=%ka.user.name role=%ka.target.name resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason)
priority: INFO priority: INFO
source: k8s_audit source: k8s_audit
@@ -554,7 +465,6 @@
- rule: K8s Role/Clusterrolebinding Created - rule: K8s Role/Clusterrolebinding Created
desc: Detect any attempt to create a clusterrolebinding desc: Detect any attempt to create a clusterrolebinding
condition: (kactivity and kcreate and clusterrolebinding and response_successful) condition: (kactivity and kcreate and clusterrolebinding and response_successful)
exceptions:
output: K8s Cluster Role Binding Created (user=%ka.user.name binding=%ka.target.name subjects=%ka.req.binding.subjects role=%ka.req.binding.role resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) output: K8s Cluster Role Binding Created (user=%ka.user.name binding=%ka.target.name subjects=%ka.req.binding.subjects role=%ka.req.binding.role resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason)
priority: INFO priority: INFO
source: k8s_audit source: k8s_audit
@@ -563,7 +473,6 @@
- rule: K8s Role/Clusterrolebinding Deleted - rule: K8s Role/Clusterrolebinding Deleted
desc: Detect any attempt to delete a clusterrolebinding desc: Detect any attempt to delete a clusterrolebinding
condition: (kactivity and kdelete and clusterrolebinding and response_successful) condition: (kactivity and kdelete and clusterrolebinding and response_successful)
exceptions:
output: K8s Cluster Role Binding Deleted (user=%ka.user.name binding=%ka.target.name resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) output: K8s Cluster Role Binding Deleted (user=%ka.user.name binding=%ka.target.name resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason)
priority: INFO priority: INFO
source: k8s_audit source: k8s_audit
@@ -572,7 +481,6 @@
- rule: K8s Secret Created - rule: K8s Secret Created
desc: Detect any attempt to create a secret. Service account tokens are excluded. desc: Detect any attempt to create a secret. Service account tokens are excluded.
condition: (kactivity and kcreate and secret and ka.target.namespace!=kube-system and non_system_user and response_successful) condition: (kactivity and kcreate and secret and ka.target.namespace!=kube-system and non_system_user and response_successful)
exceptions:
output: K8s Secret Created (user=%ka.user.name secret=%ka.target.name ns=%ka.target.namespace resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) output: K8s Secret Created (user=%ka.user.name secret=%ka.target.name ns=%ka.target.namespace resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason)
priority: INFO priority: INFO
source: k8s_audit source: k8s_audit
@@ -581,7 +489,6 @@
- rule: K8s Secret Deleted - rule: K8s Secret Deleted
desc: Detect any attempt to delete a secret Service account tokens are excluded. desc: Detect any attempt to delete a secret Service account tokens are excluded.
condition: (kactivity and kdelete and secret and ka.target.namespace!=kube-system and non_system_user and response_successful) condition: (kactivity and kdelete and secret and ka.target.namespace!=kube-system and non_system_user and response_successful)
exceptions:
output: K8s Secret Deleted (user=%ka.user.name secret=%ka.target.name ns=%ka.target.namespace resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) output: K8s Secret Deleted (user=%ka.user.name secret=%ka.target.name ns=%ka.target.namespace resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason)
priority: INFO priority: INFO
source: k8s_audit source: k8s_audit
@@ -600,7 +507,6 @@
- rule: All K8s Audit Events - rule: All K8s Audit Events
desc: Match all K8s Audit Events desc: Match all K8s Audit Events
condition: kall condition: kall
exceptions:
output: K8s Audit Event received (user=%ka.user.name verb=%ka.verb uri=%ka.uri obj=%jevt.obj) output: K8s Audit Event received (user=%ka.user.name verb=%ka.verb uri=%ka.uri obj=%jevt.obj)
priority: DEBUG priority: DEBUG
source: k8s_audit source: k8s_audit
@@ -615,11 +521,11 @@
- list: full_admin_k8s_users - list: full_admin_k8s_users
items: ["admin", "kubernetes-admin", "kubernetes-admin@kubernetes", "kubernetes-admin@cluster.local", "minikube-user"] items: ["admin", "kubernetes-admin", "kubernetes-admin@kubernetes", "kubernetes-admin@cluster.local", "minikube-user"]
# This rules detect an operation triggered by an user name that is # This rules detect an operation triggered by an user name that is
# included in the list of those that are default administrators upon # included in the list of those that are default administrators upon
# cluster creation. This may signify a permission setting too broader. # cluster creation. This may signify a permission setting too broader.
# As we can't check for role of the user on a general ka.* event, this # As we can't check for role of the user on a general ka.* event, this
# may or may not be an administrator. Customize the full_admin_k8s_users # may or may not be an administrator. Customize the full_admin_k8s_users
# list to your needs, and activate at your discrection. # list to your needs, and activate at your discrection.
# # How to test: # # How to test:
@@ -629,14 +535,10 @@
- rule: Full K8s Administrative Access - rule: Full K8s Administrative Access
desc: Detect any k8s operation by a user name that may be an administrator with full access. desc: Detect any k8s operation by a user name that may be an administrator with full access.
condition: > condition: >
kevt kevt
and non_system_user and non_system_user
and ka.user.name in (full_admin_k8s_users) and ka.user.name in (admin_k8s_users)
and not allowed_full_admin_users and not allowed_full_admin_users
exceptions:
- name: user_names
fields: ka.user.name
comps: in
output: K8s Operation performed by full admin user (user=%ka.user.name target=%ka.target.name/%ka.target.resource verb=%ka.verb uri=%ka.uri resp=%ka.response.code) output: K8s Operation performed by full admin user (user=%ka.user.name target=%ka.target.name/%ka.target.resource verb=%ka.verb uri=%ka.uri resp=%ka.response.code)
priority: WARNING priority: WARNING
source: k8s_audit source: k8s_audit
@@ -670,13 +572,10 @@
desc: Detect any attempt to create an ingress without TLS certification. desc: Detect any attempt to create an ingress without TLS certification.
condition: > condition: >
(kactivity and kcreate and ingress and response_successful and not ingress_tls) (kactivity and kcreate and ingress and response_successful and not ingress_tls)
exceptions:
- name: ingresses
fields: [ka.target.namespace, ka.target.name]
output: > output: >
K8s Ingress Without TLS Cert Created (user=%ka.user.name ingress=%ka.target.name K8s Ingress Without TLS Cert Created (user=%ka.user.name ingress=%ka.target.name
namespace=%ka.target.namespace) namespace=%ka.target.namespace)
source: k8s_audit source: k8s_audit
priority: WARNING priority: WARNING
tags: [k8s, network] tags: [k8s, network]
@@ -699,15 +598,11 @@
desc: > desc: >
Detect a node successfully joined the cluster outside of the list of allowed nodes. Detect a node successfully joined the cluster outside of the list of allowed nodes.
condition: > condition: >
kevt and node kevt and node
and kcreate and kcreate
and response_successful and response_successful
and not allow_all_k8s_nodes and not allow_all_k8s_nodes
exceptions: and not ka.target.name in (allowed_k8s_nodes)
- name: nodes
fields: ka.target.name
comps: in
values: [allowed_k8s_nodes]
output: Node not in allowed list successfully joined the cluster (user=%ka.user.name node=%ka.target.name) output: Node not in allowed list successfully joined the cluster (user=%ka.user.name node=%ka.target.name)
priority: ERROR priority: ERROR
source: k8s_audit source: k8s_audit
@@ -717,15 +612,11 @@
desc: > desc: >
Detect an unsuccessful attempt to join the cluster for a node not in the list of allowed nodes. Detect an unsuccessful attempt to join the cluster for a node not in the list of allowed nodes.
condition: > condition: >
kevt and node kevt and node
and kcreate and kcreate
and not response_successful and not response_successful
and not allow_all_k8s_nodes and not allow_all_k8s_nodes
exceptions: and not ka.target.name in (allowed_k8s_nodes)
- name: nodes
fields: ka.target.name
comps: in
values: [allowed_k8s_nodes]
output: Node not in allowed list tried unsuccessfully to join the cluster (user=%ka.user.name node=%ka.target.name reason=%ka.response.reason) output: Node not in allowed list tried unsuccessfully to join the cluster (user=%ka.user.name node=%ka.target.name reason=%ka.response.reason)
priority: WARNING priority: WARNING
source: k8s_audit source: k8s_audit

View File

@@ -220,7 +220,7 @@ load_kernel_module() {
rmmod "${DRIVER_NAME}" 2>/dev/null rmmod "${DRIVER_NAME}" 2>/dev/null
WAIT_TIME=0 WAIT_TIME=0
KMOD_NAME=$(echo "${DRIVER_NAME}" | tr "-" "_") KMOD_NAME=$(echo "${DRIVER_NAME}" | tr "-" "_")
while lsmod | cut -d' ' -f1 | grep -qx "${KMOD_NAME}" && [ $WAIT_TIME -lt "${MAX_RMMOD_WAIT}" ]; do while lsmod | grep "${KMOD_NAME}" > /dev/null 2>&1 && [ $WAIT_TIME -lt "${MAX_RMMOD_WAIT}" ]; do
if rmmod "${DRIVER_NAME}" 2>/dev/null; then if rmmod "${DRIVER_NAME}" 2>/dev/null; then
echo "* Unloading ${DRIVER_NAME} module succeeded after ${WAIT_TIME}s" echo "* Unloading ${DRIVER_NAME} module succeeded after ${WAIT_TIME}s"
break break
@@ -232,7 +232,7 @@ load_kernel_module() {
sleep 1 sleep 1
done done
if lsmod | cut -d' ' -f1 | grep -qx "${KMOD_NAME}" > /dev/null 2>&1; then if lsmod | grep "${KMOD_NAME}" > /dev/null 2>&1; then
echo "* ${DRIVER_NAME} module seems to still be loaded, hoping the best" echo "* ${DRIVER_NAME} module seems to still be loaded, hoping the best"
exit 0 exit 0
fi fi

View File

@@ -675,8 +675,7 @@ class FalcoTest(Test):
self.check_rules_warnings(res) self.check_rules_warnings(res)
if len(self.rules_events) > 0: if len(self.rules_events) > 0:
self.check_rules_events(res) self.check_rules_events(res)
if len(self.validate_rules_file) == 0: self.check_detections(res)
self.check_detections(res)
if len(self.detect_counts) > 0: if len(self.detect_counts) > 0:
self.check_detections_by_rule(res) self.check_detections_by_rule(res)
self.check_json_output(res) self.check_json_output(res)

View File

@@ -262,7 +262,6 @@ trace_files: !mux
invalid_not_yaml: invalid_not_yaml:
exit_status: 1 exit_status: 1
stdout_is: |+ stdout_is: |+
1 errors:
Rules content is not yaml Rules content is not yaml
--- ---
This is not yaml This is not yaml
@@ -274,7 +273,6 @@ trace_files: !mux
invalid_not_array: invalid_not_array:
exit_status: 1 exit_status: 1
stdout_is: |+ stdout_is: |+
1 errors:
Rules content is not yaml array of objects Rules content is not yaml array of objects
--- ---
foo: bar foo: bar
@@ -286,7 +284,6 @@ trace_files: !mux
invalid_array_item_not_object: invalid_array_item_not_object:
exit_status: 1 exit_status: 1
stdout_is: |+ stdout_is: |+
1 errors:
Unexpected element of type string. Each element should be a yaml associative array. Unexpected element of type string. Each element should be a yaml associative array.
--- ---
- foo - foo
@@ -295,10 +292,20 @@ trace_files: !mux
- rules/invalid_array_item_not_object.yaml - rules/invalid_array_item_not_object.yaml
trace_file: trace_files/cat_write.scap trace_file: trace_files/cat_write.scap
invalid_unexpected object:
exit_status: 1
stdout_is: |+
Unknown rule object: {foo="bar"}
---
- foo: bar
---
validate_rules_file:
- rules/invalid_unexpected_object.yaml
trace_file: trace_files/cat_write.scap
invalid_engine_version_not_number: invalid_engine_version_not_number:
exit_status: 1 exit_status: 1
stdout_is: |+ stdout_is: |+
1 errors:
Value of required_engine_version must be a number Value of required_engine_version must be a number
--- ---
- required_engine_version: not-a-number - required_engine_version: not-a-number
@@ -310,7 +317,6 @@ trace_files: !mux
invalid_yaml_parse_error: invalid_yaml_parse_error:
exit_status: 1 exit_status: 1
stdout_is: |+ stdout_is: |+
1 errors:
mapping values are not allowed in this context mapping values are not allowed in this context
--- ---
this : is : not : yaml this : is : not : yaml
@@ -322,7 +328,6 @@ trace_files: !mux
invalid_list_without_items: invalid_list_without_items:
exit_status: 1 exit_status: 1
stdout_is: |+ stdout_is: |+
1 errors:
List must have property items List must have property items
--- ---
- list: bad_list - list: bad_list
@@ -335,7 +340,6 @@ trace_files: !mux
invalid_macro_without_condition: invalid_macro_without_condition:
exit_status: 1 exit_status: 1
stdout_is: |+ stdout_is: |+
1 errors:
Macro must have property condition Macro must have property condition
--- ---
- macro: bad_macro - macro: bad_macro
@@ -348,7 +352,6 @@ trace_files: !mux
invalid_rule_without_output: invalid_rule_without_output:
exit_status: 1 exit_status: 1
stdout_is: |+ stdout_is: |+
1 errors:
Rule must have property output Rule must have property output
--- ---
- rule: no output rule - rule: no output rule
@@ -356,8 +359,6 @@ trace_files: !mux
condition: evt.type=fork condition: evt.type=fork
priority: INFO priority: INFO
--- ---
1 warnings:
Rule no output rule: consider adding an exceptions property to define supported exceptions fields
validate_rules_file: validate_rules_file:
- rules/invalid_rule_without_output.yaml - rules/invalid_rule_without_output.yaml
trace_file: trace_files/cat_write.scap trace_file: trace_files/cat_write.scap
@@ -365,8 +366,7 @@ trace_files: !mux
invalid_append_rule_without_condition: invalid_append_rule_without_condition:
exit_status: 1 exit_status: 1
stdout_is: |+ stdout_is: |+
1 errors: Rule must have property condition
Rule must have exceptions or condition property
--- ---
- rule: no condition rule - rule: no condition rule
append: true append: true
@@ -378,7 +378,6 @@ trace_files: !mux
invalid_append_macro_dangling: invalid_append_macro_dangling:
exit_status: 1 exit_status: 1
stdout_is: |+ stdout_is: |+
1 errors:
Macro dangling append has 'append' key but no macro by that name already exists Macro dangling append has 'append' key but no macro by that name already exists
--- ---
- macro: dangling append - macro: dangling append
@@ -392,7 +391,6 @@ trace_files: !mux
invalid_list_append_dangling: invalid_list_append_dangling:
exit_status: 1 exit_status: 1
stdout_is: |+ stdout_is: |+
1 errors:
List my_list has 'append' key but no list by that name already exists List my_list has 'append' key but no list by that name already exists
--- ---
- list: my_list - list: my_list
@@ -406,15 +404,12 @@ trace_files: !mux
invalid_rule_append_dangling: invalid_rule_append_dangling:
exit_status: 1 exit_status: 1
stdout_is: |+ stdout_is: |+
1 errors:
Rule my_rule has 'append' key but no rule by that name already exists Rule my_rule has 'append' key but no rule by that name already exists
--- ---
- rule: my_rule - rule: my_rule
condition: evt.type=open condition: evt.type=open
append: true append: true
--- ---
1 warnings:
Rule my_rule: consider adding an exceptions property to define supported exceptions fields
validate_rules_file: validate_rules_file:
- rules/rule_append_failure.yaml - rules/rule_append_failure.yaml
trace_file: trace_files/cat_write.scap trace_file: trace_files/cat_write.scap
@@ -423,8 +418,7 @@ trace_files: !mux
exit_status: 1 exit_status: 1
stdout_contains: |+ stdout_contains: |+
.*invalid_base_macro.yaml: Ok .*invalid_base_macro.yaml: Ok
.*invalid_overwrite_macro.yaml: 1 errors: .*invalid_overwrite_macro.yaml: Compilation error when compiling "foo": Undefined macro 'foo' used in filter.
Compilation error when compiling "foo": Undefined macro 'foo' used in filter.
--- ---
- macro: some macro - macro: some macro
condition: foo condition: foo
@@ -439,8 +433,7 @@ trace_files: !mux
exit_status: 1 exit_status: 1
stdout_contains: |+ stdout_contains: |+
.*invalid_base_macro.yaml: Ok .*invalid_base_macro.yaml: Ok
.*invalid_append_macro.yaml: 1 errors: .*invalid_append_macro.yaml: Compilation error when compiling "evt.type=execve foo": 17: syntax error, unexpected 'foo', expecting 'or', 'and'
Compilation error when compiling "evt.type=execve foo": 17: syntax error, unexpected 'foo', expecting 'or', 'and'
--- ---
- macro: some macro - macro: some macro
condition: evt.type=execve condition: evt.type=execve
@@ -457,7 +450,6 @@ trace_files: !mux
invalid_overwrite_macro_multiple_docs: invalid_overwrite_macro_multiple_docs:
exit_status: 1 exit_status: 1
stdout_is: |+ stdout_is: |+
1 errors:
Compilation error when compiling "foo": Undefined macro 'foo' used in filter. Compilation error when compiling "foo": Undefined macro 'foo' used in filter.
--- ---
- macro: some macro - macro: some macro
@@ -471,7 +463,6 @@ trace_files: !mux
invalid_append_macro_multiple_docs: invalid_append_macro_multiple_docs:
exit_status: 1 exit_status: 1
stdout_is: |+ stdout_is: |+
1 errors:
Compilation error when compiling "evt.type=execve foo": 17: syntax error, unexpected 'foo', expecting 'or', 'and' Compilation error when compiling "evt.type=execve foo": 17: syntax error, unexpected 'foo', expecting 'or', 'and'
--- ---
- macro: some macro - macro: some macro
@@ -489,8 +480,7 @@ trace_files: !mux
exit_status: 1 exit_status: 1
stdout_contains: |+ stdout_contains: |+
.*invalid_base_rule.yaml: Ok .*invalid_base_rule.yaml: Ok
.*invalid_overwrite_rule.yaml: 1 errors: .*invalid_overwrite_rule.yaml: Undefined macro 'bar' used in filter.
Undefined macro 'bar' used in filter.
--- ---
- rule: some rule - rule: some rule
desc: some desc desc: some desc
@@ -508,8 +498,7 @@ trace_files: !mux
exit_status: 1 exit_status: 1
stdout_contains: |+ stdout_contains: |+
.*invalid_base_rule.yaml: Ok .*invalid_base_rule.yaml: Ok
.*invalid_append_rule.yaml: 1 errors: .*invalid_append_rule.yaml: Compilation error when compiling "evt.type=open bar": 15: syntax error, unexpected 'bar', expecting 'or', 'and'
Compilation error when compiling "evt.type=open bar": 15: syntax error, unexpected 'bar', expecting 'or', 'and'
--- ---
- rule: some rule - rule: some rule
desc: some desc desc: some desc
@@ -532,7 +521,6 @@ trace_files: !mux
invalid_overwrite_rule_multiple_docs: invalid_overwrite_rule_multiple_docs:
exit_status: 1 exit_status: 1
stdout_is: |+ stdout_is: |+
1 errors:
Undefined macro 'bar' used in filter. Undefined macro 'bar' used in filter.
--- ---
- rule: some rule - rule: some rule
@@ -542,9 +530,6 @@ trace_files: !mux
priority: INFO priority: INFO
append: false append: false
--- ---
2 warnings:
Rule some rule: consider adding an exceptions property to define supported exceptions fields
Rule some rule: consider adding an exceptions property to define supported exceptions fields
validate_rules_file: validate_rules_file:
- rules/invalid_overwrite_rule_multiple_docs.yaml - rules/invalid_overwrite_rule_multiple_docs.yaml
trace_file: trace_files/cat_write.scap trace_file: trace_files/cat_write.scap
@@ -567,9 +552,6 @@ trace_files: !mux
priority: INFO priority: INFO
append: true append: true
--- ---
2 warnings:
Rule some rule: consider adding an exceptions property to define supported exceptions fields
Rule some rule: consider adding an exceptions property to define supported exceptions fields
validate_rules_file: validate_rules_file:
- rules/invalid_append_rule_multiple_docs.yaml - rules/invalid_append_rule_multiple_docs.yaml
trace_file: trace_files/cat_write.scap trace_file: trace_files/cat_write.scap
@@ -577,7 +559,6 @@ trace_files: !mux
invalid_missing_rule_name: invalid_missing_rule_name:
exit_status: 1 exit_status: 1
stdout_is: |+ stdout_is: |+
1 errors:
Rule name is empty Rule name is empty
--- ---
- rule: - rule:
@@ -592,7 +573,6 @@ trace_files: !mux
invalid_missing_list_name: invalid_missing_list_name:
exit_status: 1 exit_status: 1
stdout_is: |+ stdout_is: |+
1 errors:
List name is empty List name is empty
--- ---
- list: - list:
@@ -605,7 +585,6 @@ trace_files: !mux
invalid_missing_macro_name: invalid_missing_macro_name:
exit_status: 1 exit_status: 1
stdout_is: |+ stdout_is: |+
1 errors:
Macro name is empty Macro name is empty
--- ---
- macro: - macro:
@@ -617,19 +596,8 @@ trace_files: !mux
invalid_rule_output: invalid_rule_output:
exit_status: 1 exit_status: 1
stdout_is: |+ stderr_contains: "Runtime error: Error loading rules:.* Invalid output format 'An open was seen %not_a_real_field': 'invalid formatting token not_a_real_field'. Exiting."
1 errors: rules_file:
Invalid output format 'An open was seen %not_a_real_field': 'invalid formatting token not_a_real_field'
---
- rule: rule_with_invalid_output
desc: A rule with an invalid output field
condition: evt.type=open
output: "An open was seen %not_a_real_field"
priority: WARNING
---
1 warnings:
Rule rule_with_invalid_output: consider adding an exceptions property to define supported exceptions fields
validate_rules_file:
- rules/invalid_rule_output.yaml - rules/invalid_rule_output.yaml
trace_file: trace_files/cat_write.scap trace_file: trace_files/cat_write.scap
@@ -1149,7 +1117,7 @@ trace_files: !mux
skip_unknown_noevt: skip_unknown_noevt:
detect: False detect: False
stdout_contains: Skipping rule "Contains Unknown Event And Skipping". contains unknown filter proc.nobody stdout_contains: Skipping rule "Contains Unknown Event And Skipping" that contains unknown filter proc.nobody
rules_file: rules_file:
- rules/skip_unknown_evt.yaml - rules/skip_unknown_evt.yaml
trace_file: trace_files/cat_write.scap trace_file: trace_files/cat_write.scap
@@ -1162,33 +1130,14 @@ trace_files: !mux
skip_unknown_error: skip_unknown_error:
exit_status: 1 exit_status: 1
stderr_contains: |+ stderr_contains: Rule "Contains Unknown Event And Not Skipping" contains unknown filter proc.nobody. Exiting.
Could not load rules file.*skip_unknown_error.yaml: 1 errors:
rule "Contains Unknown Event And Not Skipping". contains unknown filter proc.nobody
---
- rule: Contains Unknown Event And Not Skipping
desc: Contains an unknown event
condition: proc.nobody=cat
output: Never
skip-if-unknown-filter: false
priority: INFO
---
rules_file: rules_file:
- rules/skip_unknown_error.yaml - rules/skip_unknown_error.yaml
trace_file: trace_files/cat_write.scap trace_file: trace_files/cat_write.scap
skip_unknown_unspec_error: skip_unknown_unspec_error:
exit_status: 1 exit_status: 1
stderr_contains: |+ stderr_contains: Rule "Contains Unknown Event And Unspecified" contains unknown filter proc.nobody. Exiting.
Could not load rules file .*skip_unknown_unspec.yaml: 1 errors:
rule "Contains Unknown Event And Unspecified". contains unknown filter proc.nobody
---
- rule: Contains Unknown Event And Unspecified
desc: Contains an unknown event
condition: proc.nobody=cat
output: Never
priority: INFO
---
rules_file: rules_file:
- rules/skip_unknown_unspec.yaml - rules/skip_unknown_unspec.yaml
trace_file: trace_files/cat_write.scap trace_file: trace_files/cat_write.scap

View File

@@ -1,323 +0,0 @@
#
# Copyright (C) 2016-2020 The Falco Authors..
#
# This file is part of falco.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
trace_files: !mux
rule_exception_no_fields:
exit_status: 1
stdout_is: |+
1 errors:
Rule exception item ex1: must have fields property with a list of fields
---
- rule: My Rule
desc: Some desc
condition: evt.type=open and proc.name=cat
output: Some output
exceptions:
- name: ex1
priority: error
---
validate_rules_file:
- rules/exceptions/item_no_fields.yaml
trace_file: trace_files/cat_write.scap
rule_exception_no_name:
exit_status: 1
stdout_is: |+
1 errors:
Rule exception item must have name property
---
- rule: My Rule
desc: Some desc
condition: evt.type=open and proc.name=cat
output: Some output
exceptions:
- fields: [proc.name, fd.filename]
priority: error
---
validate_rules_file:
- rules/exceptions/item_no_name.yaml
trace_file: trace_files/cat_write.scap
rule_exception_append_no_name:
exit_status: 1
stdout_is: |+
1 errors:
Rule exception item must have name property
---
- rule: My Rule
exceptions:
- values:
- [nginx, /tmp/foo]
append: true
---
validate_rules_file:
- rules/exceptions/append_item_no_name.yaml
trace_file: trace_files/cat_write.scap
rule_exception_unknown_fields:
exit_status: 1
stdout_is: |+
1 errors:
Rule exception item ex1: field name not.exist is not a supported filter field
---
- rule: My Rule
desc: Some desc
condition: evt.type=open and proc.name=cat
output: Some output
exceptions:
- name: ex1
fields: [not.exist]
priority: error
---
validate_rules_file:
- rules/exceptions/item_unknown_fields.yaml
trace_file: trace_files/cat_write.scap
rule_exception_comps_fields_len_mismatch:
exit_status: 1
stdout_is: |+
1 errors:
Rule exception item ex1: fields and comps lists must have equal length
---
- rule: My Rule
desc: Some desc
condition: evt.type=open and proc.name=cat
output: Some output
exceptions:
- name: ex1
fields: [proc.name, fd.filename]
comps: [=]
priority: error
---
validate_rules_file:
- rules/exceptions/item_comps_fields_len_mismatch.yaml
trace_file: trace_files/cat_write.scap
rule_exception_unknown_comp:
exit_status: 1
stdout_is: |+
1 errors:
Rule exception item ex1: comparison operator no-comp is not a supported comparison operator
---
- rule: My Rule
desc: Some desc
condition: evt.type=open and proc.name=cat
output: Some output
exceptions:
- name: ex1
fields: [proc.name, fd.filename]
comps: [=, no-comp]
priority: error
---
validate_rules_file:
- rules/exceptions/item_unknown_comp.yaml
trace_file: trace_files/cat_write.scap
rule_exception_fields_values_len_mismatch:
exit_status: 1
stdout_is: |+
1 errors:
Exception item ex1: fields and values lists must have equal length
---
- rule: My Rule
desc: Some desc
condition: evt.type=open and proc.name=cat
output: Some output
exceptions:
- name: ex1
fields: [proc.name, fd.filename]
values:
- [nginx]
priority: error
---
validate_rules_file:
- rules/exceptions/item_fields_values_len_mismatch.yaml
trace_file: trace_files/cat_write.scap
rule_exception_append_fields_values_len_mismatch:
exit_status: 1
stdout_is: |+
1 errors:
Exception item ex1: fields and values lists must have equal length
---
- rule: My Rule
desc: Some desc
condition: evt.type=open and proc.name=cat
output: Some output
exceptions:
- name: ex1
fields: [proc.name, fd.filename]
priority: error
- rule: My Rule
exceptions:
- name: ex1
values:
- [nginx]
append: true
---
validate_rules_file:
- rules/exceptions/append_item_fields_values_len_mismatch.yaml
trace_file: trace_files/cat_write.scap
rule_exception_append_item_not_in_rule:
exit_status: 0
stderr_contains: |+
1 warnings:
Rule My Rule with append=true: no set of fields matching name ex2
validate_rules_file:
- rules/exceptions/append_item_not_in_rule.yaml
trace_file: trace_files/cat_write.scap
rule_without_exception:
exit_status: 0
stderr_contains: |+
1 warnings:
Rule My Rule: consider adding an exceptions property to define supported exceptions fields
validate_rules_file:
- rules/exceptions/rule_without_exception.yaml
trace_file: trace_files/cat_write.scap
rule_exception_no_values:
detect: True
detect_level: WARNING
rules_file:
- rules/exceptions/rule_exception_no_values.yaml
trace_file: trace_files/cat_write.scap
rule_exception_one_value:
detect: False
detect_level: WARNING
rules_file:
- rules/exceptions/rule_exception_one_value.yaml
trace_file: trace_files/cat_write.scap
rule_exception_append_one_value:
detect: False
detect_level: WARNING
rules_file:
- rules/exceptions/rule_exception_append_one_value.yaml
trace_file: trace_files/cat_write.scap
rule_exception_second_value:
detect: False
detect_level: WARNING
rules_file:
- rules/exceptions/rule_exception_second_value.yaml
trace_file: trace_files/cat_write.scap
rule_exception_append_second_value:
detect: False
detect_level: WARNING
rules_file:
- rules/exceptions/rule_exception_append_second_value.yaml
trace_file: trace_files/cat_write.scap
rule_exception_second_item:
detect: False
detect_level: WARNING
rules_file:
- rules/exceptions/rule_exception_second_item.yaml
trace_file: trace_files/cat_write.scap
rule_exception_append_second_item:
detect: False
detect_level: WARNING
rules_file:
- rules/exceptions/rule_exception_append_second_item.yaml
trace_file: trace_files/cat_write.scap
rule_exception_third_item:
detect: False
detect_level: WARNING
rules_file:
- rules/exceptions/rule_exception_third_item.yaml
trace_file: trace_files/cat_write.scap
rule_exception_append_third_item:
detect: False
detect_level: WARNING
rules_file:
- rules/exceptions/rule_exception_append_third_item.yaml
trace_file: trace_files/cat_write.scap
rule_exception_quoted:
detect: False
detect_level: WARNING
rules_file:
- rules/exceptions/rule_exception_quoted.yaml
trace_file: trace_files/cat_write.scap
rule_exception_append_multiple_values:
detect: False
detect_level: WARNING
rules_file:
- rules/exceptions/rule_exception_append_multiple.yaml
trace_file: trace_files/cat_write.scap
rule_exception_comp:
detect: False
detect_level: WARNING
rules_file:
- rules/exceptions/rule_exception_comp.yaml
trace_file: trace_files/cat_write.scap
rule_exception_append_comp:
detect: False
detect_level: WARNING
rules_file:
- rules/exceptions/rule_exception_append_comp.yaml
trace_file: trace_files/cat_write.scap
rule_exception_values_listref:
detect: False
detect_level: WARNING
rules_file:
- rules/exceptions/rule_exception_values_listref.yaml
trace_file: trace_files/cat_write.scap
rule_exception_values_listref_noparens:
detect: False
detect_level: WARNING
rules_file:
- rules/exceptions/rule_exception_values_listref_noparens.yaml
trace_file: trace_files/cat_write.scap
rule_exception_values_list:
detect: False
detect_level: WARNING
rules_file:
- rules/exceptions/rule_exception_values_list.yaml
trace_file: trace_files/cat_write.scap
rule_exception_single_field:
detect: False
detect_level: WARNING
rules_file:
- rules/exceptions/rule_exception_single_field.yaml
trace_file: trace_files/cat_write.scap
rule_exception_single_field_append:
detect: False
detect_level: WARNING
rules_file:
- rules/exceptions/rule_exception_single_field_append.yaml
trace_file: trace_files/cat_write.scap

View File

@@ -1,31 +0,0 @@
#
# Copyright (C) 2020 The Falco Authors.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
- rule: My Rule
desc: Some desc
condition: evt.type=open and proc.name=cat
output: Some output
exceptions:
- name: ex1
fields: [proc.name, fd.filename]
priority: error
- rule: My Rule
exceptions:
- name: ex1
values:
- [nginx]
append: true

View File

@@ -1,30 +0,0 @@
#
# Copyright (C) 2020 The Falco Authors.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
- rule: My Rule
desc: Some desc
condition: evt.type=open and proc.name=cat
output: Some output
exceptions:
- name: ex1
fields: [proc.name, fd.filename]
priority: error
- rule: My Rule
exceptions:
- values:
- [nginx, /tmp/foo]
append: true

View File

@@ -1,31 +0,0 @@
#
# Copyright (C) 2020 The Falco Authors.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
- rule: My Rule
desc: Some desc
condition: evt.type=open and proc.name=cat
output: Some output
exceptions:
- name: ex1
fields: [proc.name, fd.filename]
priority: error
- rule: My Rule
exceptions:
- name: ex2
values:
- [apache, /tmp]
append: true

View File

@@ -1,25 +0,0 @@
#
# Copyright (C) 2020 The Falco Authors.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
- rule: My Rule
desc: Some desc
condition: evt.type=open and proc.name=cat
output: Some output
exceptions:
- name: ex1
fields: [proc.name, fd.filename]
comps: [=]
priority: error

View File

@@ -1,26 +0,0 @@
#
# Copyright (C) 2020 The Falco Authors.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
- rule: My Rule
desc: Some desc
condition: evt.type=open and proc.name=cat
output: Some output
exceptions:
- name: ex1
fields: [proc.name, fd.filename]
values:
- [nginx]
priority: error

View File

@@ -1,23 +0,0 @@
#
# Copyright (C) 2020 The Falco Authors.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
- rule: My Rule
desc: Some desc
condition: evt.type=open and proc.name=cat
output: Some output
exceptions:
- name: ex1
priority: error

View File

@@ -1,23 +0,0 @@
#
# Copyright (C) 2020 The Falco Authors.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
- rule: My Rule
desc: Some desc
condition: evt.type=open and proc.name=cat
output: Some output
exceptions:
- fields: [proc.name, fd.filename]
priority: error

View File

@@ -1,25 +0,0 @@
#
# Copyright (C) 2020 The Falco Authors.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
- rule: My Rule
desc: Some desc
condition: evt.type=open and proc.name=cat
output: Some output
exceptions:
- name: ex1
fields: [proc.name, fd.filename]
comps: [=, no-comp]
priority: error

View File

@@ -1,24 +0,0 @@
#
# Copyright (C) 2020 The Falco Authors.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
- rule: My Rule
desc: Some desc
condition: evt.type=open and proc.name=cat
output: Some output
exceptions:
- name: ex1
fields: [not.exist]
priority: error

View File

@@ -1,38 +0,0 @@
#
# Copyright (C) 2020 The Falco Authors.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
- rule: Open From Cat
desc: A process named cat does an open
condition: evt.type=open and proc.name=cat
output: "An open was seen (command=%proc.cmdline)"
exceptions:
- name: proc_name
fields: [proc.name]
- name: proc_name_contains
fields: [proc.name]
comps: [contains]
- name: proc_name_cmdline
fields: [proc.name, proc.cmdline]
- name: proc_name_cmdline_pname
fields: [proc.name, proc.cmdline, proc.pname]
priority: WARNING
- rule: Open From Cat
exceptions:
- name: proc_name_contains
values:
- [cat]
append: true

View File

@@ -1,42 +0,0 @@
#
# Copyright (C) 2020 The Falco Authors.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
- rule: Open From Cat
desc: A process named cat does an open
condition: evt.type=open and proc.name=cat
output: "An open was seen (command=%proc.cmdline)"
exceptions:
- name: proc_name
fields: [proc.name]
- name: proc_name_cmdline
fields: [proc.name, proc.cmdline]
- name: proc_name_cmdline_pname
fields: [proc.name, proc.cmdline, proc.pname]
priority: WARNING
- rule: Open From Cat
exceptions:
- name: proc_name
values:
- [not-cat]
append: true
- rule: Open From Cat
exceptions:
- name: proc_name
values:
- [cat]
append: true

View File

@@ -1,37 +0,0 @@
#
# Copyright (C) 2020 The Falco Authors.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
- rule: Open From Cat
desc: A process named cat does an open
condition: evt.type=open and proc.name=cat
output: "An open was seen (command=%proc.cmdline)"
exceptions:
- name: proc_name
fields: [proc.name]
values:
- [cat]
- name: proc_name_cmdline
fields: [proc.name, proc.cmdline]
- name: proc_name_cmdline_pname
fields: [proc.name, proc.cmdline, proc.pname]
priority: WARNING
- rule: Open From Cat
exceptions:
- name: proc_name
values:
- [cat]
append: true

View File

@@ -1,41 +0,0 @@
#
# Copyright (C) 2020 The Falco Authors.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
- rule: Open From Cat
desc: A process named cat does an open
condition: evt.type=open and proc.name=cat
output: "An open was seen (command=%proc.cmdline)"
exceptions:
- name: proc_name
fields: [proc.name]
- name: proc_name_cmdline
fields: [proc.name, proc.cmdline]
- name: proc_name_cmdline_pname
fields: [proc.name, proc.cmdline, proc.pname]
priority: WARNING
- rule: Open From Cat
exceptions:
- name: proc_name
values:
- [not-cat]
- name: proc_name_cmdline
values:
- [cat, "cat /dev/null"]
- name: proc_name_cmdline_pname
values:
- [not-cat, "cat /dev/null", bash]
append: true

View File

@@ -1,36 +0,0 @@
#
# Copyright (C) 2020 The Falco Authors.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
- rule: Open From Cat
desc: A process named cat does an open
condition: evt.type=open and proc.name=cat
output: "An open was seen (command=%proc.cmdline)"
exceptions:
- name: proc_name
fields: [proc.name]
- name: proc_name_cmdline
fields: [proc.name, proc.cmdline]
- name: proc_name_cmdline_pname
fields: [proc.name, proc.cmdline, proc.pname]
priority: WARNING
- rule: Open From Cat
exceptions:
- name: proc_name_cmdline
values:
- [not-cat, not-cat]
- [cat, "cat /dev/null"]
append: true

View File

@@ -1,41 +0,0 @@
#
# Copyright (C) 2020 The Falco Authors.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
- rule: Open From Cat
desc: A process named cat does an open
condition: evt.type=open and proc.name=cat
output: "An open was seen (command=%proc.cmdline)"
exceptions:
- name: proc_name
fields: [proc.name]
- name: proc_name_cmdline
fields: [proc.name, proc.cmdline]
- name: proc_name_cmdline_pname
fields: [proc.name, proc.cmdline, proc.pname]
priority: WARNING
- rule: Open From Cat
exceptions:
- name: proc_name
values:
- [not-cat]
- name: proc_name_cmdline
values:
- [not-cat, "cat /dev/null"]
- name: proc_name_cmdline_pname
values:
- [cat, "cat /dev/null", bash]
append: true

View File

@@ -1,34 +0,0 @@
#
# Copyright (C) 2020 The Falco Authors.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
- rule: Open From Cat
desc: A process named cat does an open
condition: evt.type=open and proc.name=cat
output: "An open was seen (command=%proc.cmdline)"
exceptions:
- name: proc_name
fields: [proc.name]
- name: proc_name_contains
fields: [proc.name]
comps: [contains]
values:
- [cat]
- name: proc_name_cmdline
fields: [proc.name, proc.cmdline]
- name: proc_name_cmdline_pname
fields: [proc.name, proc.cmdline, proc.pname]
priority: WARNING

View File

@@ -1,28 +0,0 @@
#
# Copyright (C) 2020 The Falco Authors.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
- rule: Open From Cat
desc: A process named cat does an open
condition: evt.type=open and proc.name=cat
output: "An open was seen (command=%proc.cmdline)"
exceptions:
- name: proc_name
fields: [proc.name]
- name: proc_name_cmdline
fields: [proc.name, proc.cmdline]
- name: proc_name_cmdline_pname
fields: [proc.name, proc.cmdline, proc.pname]
priority: WARNING

View File

@@ -1,30 +0,0 @@
#
# Copyright (C) 2020 The Falco Authors.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
- rule: Open From Cat
desc: A process named cat does an open
condition: evt.type=open and proc.name=cat
output: "An open was seen (command=%proc.cmdline)"
exceptions:
- name: proc_name
fields: [proc.name]
values:
- [cat]
- name: proc_name_cmdline
fields: [proc.name, proc.cmdline]
- name: proc_name_cmdline_pname
fields: [proc.name, proc.cmdline, proc.pname]
priority: WARNING

View File

@@ -1,36 +0,0 @@
#
# Copyright (C) 2020 The Falco Authors.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
- rule: Open From Cat
desc: A process named cat does an open
condition: evt.type=open and proc.name=cat
output: "An open was seen (command=%proc.cmdline)"
exceptions:
- name: proc_name
fields: [proc.name]
- name: proc_name_cmdline
fields: [proc.name, proc.cmdline]
- name: proc_name_cmdline_pname
fields: [proc.name, proc.cmdline, proc.pname]
priority: WARNING
- rule: Open From Cat
exceptions:
- name: proc_name_cmdline
values:
- [not-cat, not-cat]
- [cat, '"cat /dev/null"']
append: true

View File

@@ -1,34 +0,0 @@
#
# Copyright (C) 2020 The Falco Authors.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
- rule: Open From Cat
desc: A process named cat does an open
condition: evt.type=open and proc.name=cat
output: "An open was seen (command=%proc.cmdline)"
exceptions:
- name: proc_name
fields: [proc.name]
values:
- [not-cat]
- name: proc_name_cmdline
fields: [proc.name, proc.cmdline]
values:
- [cat, "cat /dev/null"]
- name: proc_name_cmdline_pname
fields: [proc.name, proc.cmdline, proc.pname]
values:
- [not-cat, "cat /dev/null", bash]
priority: WARNING

View File

@@ -1,32 +0,0 @@
#
# Copyright (C) 2020 The Falco Authors.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
- rule: Open From Cat
desc: A process named cat does an open
condition: evt.type=open and proc.name=cat
output: "An open was seen (command=%proc.cmdline)"
exceptions:
- name: proc_name
fields: [proc.name]
- name: proc_name_cmdline
fields: [proc.name, proc.cmdline]
values:
- [not-cat, not-cat]
- [cat, "cat /dev/null"]
- name: proc_name_cmdline_pname
fields: [proc.name, proc.cmdline, proc.pname]
priority: WARNING

View File

@@ -1,30 +0,0 @@
#
# Copyright (C) 2020 The Falco Authors.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
- rule: Open From Cat
desc: A process named cat does an open
condition: evt.type=open and proc.name=cat
output: "An open was seen (command=%proc.cmdline)"
exceptions:
- name: proc_cmdline
fields: proc.cmdline
comps: in
values:
- cat /dev/zero
- "cat /dev/null"
priority: WARNING

View File

@@ -1,37 +0,0 @@
#
# Copyright (C) 2020 The Falco Authors.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
- rule: Open From Cat
desc: A process named cat does an open
condition: evt.type=open and proc.name=cat
output: "An open was seen (command=%proc.cmdline)"
exceptions:
- name: proc_cmdline
fields: proc.cmdline
comps: in
values:
- cat /dev/zero
priority: WARNING
- rule: Open From Cat
exceptions:
- name: proc_cmdline
values:
- "cat /dev/null"
append: true

View File

@@ -1,34 +0,0 @@
#
# Copyright (C) 2020 The Falco Authors.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
- rule: Open From Cat
desc: A process named cat does an open
condition: evt.type=open and proc.name=cat
output: "An open was seen (command=%proc.cmdline)"
exceptions:
- name: proc_name
fields: [proc.name]
values:
- [not-cat]
- name: proc_name_cmdline
fields: [proc.name, proc.cmdline]
values:
- [not-cat, "cat /dev/null"]
- name: proc_name_cmdline_pname
fields: [proc.name, proc.cmdline, proc.pname]
values:
- [cat, "cat /dev/null", bash]
priority: WARNING

View File

@@ -1,29 +0,0 @@
#
# Copyright (C) 2020 The Falco Authors.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
- rule: Open From Cat
desc: A process named cat does an open
condition: evt.type=open and proc.name=cat
output: "An open was seen (command=%proc.cmdline)"
exceptions:
- name: proc_name_cmdline
fields: [proc.name, proc.cmdline]
comps: [=, in]
values:
- [cat, [cat /dev/zero, "cat /dev/null"]]
priority: WARNING

View File

@@ -1,32 +0,0 @@
#
# Copyright (C) 2020 The Falco Authors.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
- list: cat_cmdlines
items: [cat /dev/zero, "cat /dev/null"]
- rule: Open From Cat
desc: A process named cat does an open
condition: evt.type=open and proc.name=cat
output: "An open was seen (command=%proc.cmdline)"
exceptions:
- name: proc_name_cmdline
fields: [proc.name, proc.cmdline]
comps: [=, in]
values:
- [cat, (cat_cmdlines)]
priority: WARNING

View File

@@ -1,32 +0,0 @@
#
# Copyright (C) 2020 The Falco Authors.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
- list: cat_cmdlines
items: [cat /dev/zero, "cat /dev/null"]
- rule: Open From Cat
desc: A process named cat does an open
condition: evt.type=open and proc.name=cat
output: "An open was seen (command=%proc.cmdline)"
exceptions:
- name: proc_name_cmdline
fields: [proc.name, proc.cmdline]
comps: [=, in]
values:
- [cat, cat_cmdlines]
priority: WARNING

View File

@@ -1,21 +0,0 @@
#
# Copyright (C) 2020 The Falco Authors.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
- rule: My Rule
desc: Some desc
condition: evt.type=open and proc.name=cat
output: Some output
priority: error

View File

@@ -0,0 +1 @@
- foo: bar

View File

@@ -98,18 +98,14 @@ function run_tests() {
# as we're watching the return status when running avocado. # as we're watching the return status when running avocado.
set +e set +e
TEST_RC=0 TEST_RC=0
suites=($SCRIPTDIR/falco_traces.yaml $SCRIPTDIR/falco_tests.yaml $SCRIPTDIR/falco_k8s_audit_tests.yaml $SCRIPTDIR/falco_tests_psp.yaml $SCRIPTDIR/falco_tests_exceptions.yaml) suites=($SCRIPTDIR/falco_traces.yaml $SCRIPTDIR/falco_tests.yaml $SCRIPTDIR/falco_k8s_audit_tests.yaml $SCRIPTDIR/falco_tests_psp.yaml)
if [ "$SKIP_PACKAGES_TESTS" = false ] ; then if [ "$SKIP_PACKAGES_TESTS" = false ] ; then
suites+=($SCRIPTDIR/falco_tests_package.yaml) suites+=($SCRIPTDIR/falco_tests_package.yaml)
fi fi
XUNIT_DIR="${OPT_BUILD_DIR}/integration-tests-xunit"
mkdir -p "${XUNIT_DIR}"
for mult in "${suites[@]}"; do for mult in "${suites[@]}"; do
XUNIT_FILE_NAME="${XUNIT_DIR}/$(basename "${mult}").xml" CMD="avocado run --mux-yaml $mult --job-results-dir $SCRIPTDIR/job-results -- $SCRIPTDIR/falco_test.py"
CMD="avocado run --xunit ${XUNIT_FILE_NAME} --mux-yaml $mult --job-results-dir $SCRIPTDIR/job-results -- $SCRIPTDIR/falco_test.py"
echo "Running $CMD" echo "Running $CMD"
BUILD_DIR=${OPT_BUILD_DIR} $CMD BUILD_DIR=${OPT_BUILD_DIR} $CMD
RC=$? RC=$?

View File

@@ -38,7 +38,8 @@ if(MINIMAL_BUILD)
"${SYSDIG_SOURCE_DIR}/userspace/libsinsp/third-party/jsoncpp" "${SYSDIG_SOURCE_DIR}/userspace/libsinsp/third-party/jsoncpp"
"${SYSDIG_SOURCE_DIR}/userspace/libscap" "${SYSDIG_SOURCE_DIR}/userspace/libscap"
"${SYSDIG_SOURCE_DIR}/userspace/libsinsp" "${SYSDIG_SOURCE_DIR}/userspace/libsinsp"
"${PROJECT_BINARY_DIR}/userspace/engine") "${PROJECT_BINARY_DIR}/userspace/engine"
"${PROJECT_SOURCE_DIR}/userspace/libhawk")
else() else()
target_include_directories( target_include_directories(
falco_engine falco_engine
@@ -51,11 +52,17 @@ else()
"${SYSDIG_SOURCE_DIR}/userspace/libsinsp/third-party/jsoncpp" "${SYSDIG_SOURCE_DIR}/userspace/libsinsp/third-party/jsoncpp"
"${SYSDIG_SOURCE_DIR}/userspace/libscap" "${SYSDIG_SOURCE_DIR}/userspace/libscap"
"${SYSDIG_SOURCE_DIR}/userspace/libsinsp" "${SYSDIG_SOURCE_DIR}/userspace/libsinsp"
"${PROJECT_BINARY_DIR}/userspace/engine") "${PROJECT_BINARY_DIR}/userspace/engine"
"${PROJECT_SOURCE_DIR}/userspace/libhawk")
endif() endif()
target_link_libraries(falco_engine "${FALCO_SINSP_LIBRARY}" "${LPEG_LIB}" "${LYAML_LIB}" "${LIBYAML_LIB}") target_link_libraries(falco_engine "${FALCO_SINSP_LIBRARY}" "${LPEG_LIB}" "${LYAML_LIB}" "${LIBYAML_LIB}")
if(DEFINED LIBHAWK_LIBRARIES)
message(STATUS "Using externally provided libhawk implementations: ${LIBHAWK_LIBRARIES}")
target_link_libraries(falco_engine ${LIBHAWK_LIBRARIES})
endif()
configure_file(config_falco_engine.h.in config_falco_engine.h) configure_file(config_falco_engine.h.in config_falco_engine.h)
if(DEFINED FALCO_COMPONENT) if(DEFINED FALCO_COMPONENT)

View File

@@ -26,7 +26,8 @@ limitations under the License.
#include "formats.h" #include "formats.h"
extern "C" { extern "C"
{
#include "lpeg.h" #include "lpeg.h"
#include "lyaml.h" #include "lyaml.h"
} }
@@ -34,7 +35,6 @@ extern "C" {
#include "utils.h" #include "utils.h"
#include "banned.h" // This raises a compilation error when certain functions are used #include "banned.h" // This raises a compilation error when certain functions are used
string lua_on_event = "on_event"; string lua_on_event = "on_event";
string lua_print_stats = "print_stats"; string lua_print_stats = "print_stats";
@@ -42,25 +42,24 @@ using namespace std;
nlohmann::json::json_pointer falco_engine::k8s_audit_time = "/stageTimestamp"_json_pointer; nlohmann::json::json_pointer falco_engine::k8s_audit_time = "/stageTimestamp"_json_pointer;
falco_engine::falco_engine(bool seed_rng, const std::string& alternate_lua_dir) falco_engine::falco_engine(bool seed_rng, const std::string &alternate_lua_dir):
: m_rules(NULL), m_next_ruleset_id(0), m_rules(NULL), m_next_ruleset_id(0),
m_min_priority(falco_common::PRIORITY_DEBUG), m_min_priority(falco_common::PRIORITY_DEBUG),
m_alternate_lua_dir(alternate_lua_dir), m_sampling_ratio(1), m_sampling_multiplier(0),
m_sampling_ratio(1), m_sampling_multiplier(0), m_replace_container_info(false)
m_replace_container_info(false)
{ {
luaopen_lpeg(m_ls); luaopen_lpeg(m_ls);
luaopen_yaml(m_ls); luaopen_yaml(m_ls);
m_alternate_lua_dir = alternate_lua_dir;
falco_common::init(m_lua_main_filename.c_str(), alternate_lua_dir.c_str()); falco_common::init(m_lua_main_filename.c_str(), alternate_lua_dir.c_str());
falco_rules::init(m_ls); falco_rules::init(m_ls);
m_sinsp_rules.reset(new falco_sinsp_ruleset()); clear_filters();
m_k8s_audit_rules.reset(new falco_ruleset());
if(seed_rng) if(seed_rng)
{ {
srandom((unsigned) getpid()); srandom((unsigned)getpid());
} }
m_default_ruleset_id = find_ruleset_id(m_default_ruleset); m_default_ruleset_id = find_ruleset_id(m_default_ruleset);
@@ -69,46 +68,26 @@ falco_engine::falco_engine(bool seed_rng, const std::string& alternate_lua_dir)
m_json_factory = make_shared<json_event_filter_factory>(); m_json_factory = make_shared<json_event_filter_factory>();
} }
falco_engine::falco_engine(const falco_engine &orig_engine)
: m_rules(NULL), m_next_ruleset_id(0),
m_min_priority(falco_common::PRIORITY_DEBUG),
m_sampling_ratio(1), m_sampling_multiplier(0),
m_replace_container_info(false)
{
luaopen_lpeg(m_ls);
luaopen_yaml(m_ls);
m_alternate_lua_dir = orig_engine.m_alternate_lua_dir;
falco_common::init(m_lua_main_filename.c_str(), m_alternate_lua_dir.c_str());
falco_rules::init(m_ls);
m_sinsp_rules.reset(new falco_sinsp_ruleset());
m_k8s_audit_rules.reset(new falco_ruleset());
m_default_ruleset_id = find_ruleset_id(m_default_ruleset);
// Create this now so we can potentially list filters and exit
m_json_factory = make_shared<json_event_filter_factory>();
set_inspector(orig_engine.m_inspector);
std::string extra = orig_engine.m_extra;
set_extra(extra, orig_engine.m_replace_container_info);
set_min_priority(orig_engine.m_min_priority);
set_sampling_multiplier(orig_engine.m_sampling_multiplier);
set_sampling_ratio(orig_engine.m_sampling_ratio);
}
falco_engine::~falco_engine() falco_engine::~falco_engine()
{ {
if (m_rules) if(m_rules)
{ {
delete m_rules; delete m_rules;
} }
} }
falco_engine *falco_engine::clone()
{
auto engine = new falco_engine(true, m_alternate_lua_dir);
engine->set_inspector(m_inspector);
engine->set_extra(m_extra, m_replace_container_info);
engine->set_min_priority(m_min_priority);
return engine;
}
uint32_t falco_engine::engine_version() uint32_t falco_engine::engine_version()
{ {
return (uint32_t) FALCO_ENGINE_VERSION; return (uint32_t)FALCO_ENGINE_VERSION;
} }
#define DESCRIPTION_TEXT_START 16 #define DESCRIPTION_TEXT_START 16
@@ -174,17 +153,28 @@ void falco_engine::list_fields(bool names_only)
} }
} }
void falco_engine::load_rules(const string &rules_content, bool verbose, bool all_events) void falco_engine::load_rules_file(const string &rules_filename, bool verbose, bool all_events)
{ {
uint64_t dummy; ifstream is;
return load_rules(rules_content, verbose, all_events, dummy); is.open(rules_filename);
if(!is.is_open())
{
throw falco_exception("Could not open rules filename " +
rules_filename + " " +
"for reading");
}
string rules_content((istreambuf_iterator<char>(is)),
istreambuf_iterator<char>());
load_rules(rules_content, verbose, all_events);
} }
void falco_engine::load_rules(const string &rules_content, bool verbose, bool all_events, uint64_t &required_engine_version) void falco_engine::load_rules(const string &rules_content, bool verbose, bool all_events)
{ {
// The engine must have been given an inspector by now. // The engine must have been given an inspector by now.
if(! m_inspector) if(!m_inspector)
{ {
throw falco_exception("No inspector provided"); throw falco_exception("No inspector provided");
} }
@@ -196,45 +186,52 @@ void falco_engine::load_rules(const string &rules_content, bool verbose, bool al
if(!m_rules) if(!m_rules)
{ {
m_rules = new falco_rules(m_inspector, // Note that falco_formats is added to the lua state used by the falco engine only.
this, // Within the engine, only formats.
m_ls); // Formatter is used, so we can unconditionally set json_output to false.
bool json_output = false;
bool json_include_output_property = false;
falco_formats::init(m_inspector, this, m_ls, json_output, json_include_output_property);
m_rules = new falco_rules(m_inspector, this, m_ls);
} }
// Note that falco_formats is added to the lua state used
// by the falco engine only. Within the engine, only
// formats.formatter is used, so we can unconditionally set
// json_output to false.
bool json_output = false;
bool json_include_output_property = false;
falco_formats::init(m_inspector, this, m_ls, json_output, json_include_output_property);
m_rules->load_rules(rules_content, verbose, all_events, m_extra, m_replace_container_info, m_min_priority, required_engine_version);
}
void falco_engine::load_rules_file(const string &rules_filename, bool verbose, bool all_events)
{
uint64_t dummy; uint64_t dummy;
// m_sinsp_rules.reset(new falco_sinsp_ruleset());
// m_k8s_audit_rules.reset(new falco_ruleset());
m_rules->load_rules(rules_content, verbose, all_events, m_extra, m_replace_container_info, m_min_priority, dummy);
return load_rules_file(rules_filename, verbose, all_events, dummy); m_is_ready = true;
return;
//
// auto local_rules = new falco_rules(m_inspector, this, m_ls);
// try
// {
// uint64_t dummy;
// local_rules->load_rules(rules_content, verbose, all_events, m_extra, m_replace_container_info, m_min_priority, dummy);
// // m_rules = local_rules
// // std::atomic<falco_rules *> lore(m_rules);
// // std::atomic_exchange(&lore, local_rules);
// // SCHEDULE LOCAL_RULES AS NEXT RULESET
// }
// catch(const falco_exception &e)
// {
// // todo
// printf("IGNORE BECAUSE OF ERROR LOADING RULESET!\n");
// }
} }
void falco_engine::load_rules_file(const string &rules_filename, bool verbose, bool all_events, uint64_t &required_engine_version) // // todo(fntlnz): not sure we want this in falco_engine
// void falco_engine::watch_rules(bool verbose, bool all_events)
// {
// hawk_watch_rules((hawk_watch_rules_cb)rules_cb, reinterpret_cast<hawk_engine *>(this));
// }
bool falco_engine::is_ready()
{ {
ifstream is; return m_is_ready;
is.open(rules_filename);
if (!is.is_open())
{
throw falco_exception("Could not open rules filename " +
rules_filename + " " +
"for reading");
}
string rules_content((istreambuf_iterator<char>(is)),
istreambuf_iterator<char>());
load_rules(rules_content, verbose, all_events, required_engine_version);
} }
void falco_engine::enable_rule(const string &substring, bool enabled, const string &ruleset) void falco_engine::enable_rule(const string &substring, bool enabled, const string &ruleset)
@@ -302,7 +299,7 @@ uint64_t falco_engine::num_rules_for_ruleset(const std::string &ruleset)
uint16_t ruleset_id = find_ruleset_id(ruleset); uint16_t ruleset_id = find_ruleset_id(ruleset);
return m_sinsp_rules->num_rules_for_ruleset(ruleset_id) + return m_sinsp_rules->num_rules_for_ruleset(ruleset_id) +
m_k8s_audit_rules->num_rules_for_ruleset(ruleset_id); m_k8s_audit_rules->num_rules_for_ruleset(ruleset_id);
} }
void falco_engine::evttypes_for_ruleset(std::vector<bool> &evttypes, const std::string &ruleset) void falco_engine::evttypes_for_ruleset(std::vector<bool> &evttypes, const std::string &ruleset)
@@ -332,15 +329,38 @@ unique_ptr<falco_engine::rule_result> falco_engine::process_sinsp_event(sinsp_ev
} }
unique_ptr<struct rule_result> res(new rule_result()); unique_ptr<struct rule_result> res(new rule_result());
res->source = "syscall";
populate_rule_result(res, ev); std::lock_guard<std::mutex> guard(m_ls_semaphore);
lua_getglobal(m_ls, lua_on_event.c_str());
if(lua_isfunction(m_ls, -1))
{
lua_pushnumber(m_ls, ev->get_check_id());
if(lua_pcall(m_ls, 1, 3, 0) != 0)
{
const char *lerr = lua_tostring(m_ls, -1);
string err = "Error invoking function output: " + string(lerr);
throw falco_exception(err);
}
res->evt = ev;
const char *p = lua_tostring(m_ls, -3);
res->rule = p;
res->source = "syscall";
res->priority_num = (falco_common::priority_type)lua_tonumber(m_ls, -2);
res->format = lua_tostring(m_ls, -1);
lua_pop(m_ls, 3);
}
else
{
throw falco_exception("No function " + lua_on_event + " found in lua compiler module");
}
return res; return res;
} }
unique_ptr<falco_engine::rule_result> falco_engine::process_sinsp_event(sinsp_evt *ev) unique_ptr<falco_engine::rule_result> falco_engine::process_sinsp_event(sinsp_evt *ev)
{ {
// todo(leodido, fntlnz) > pass the last ruleset id
return process_sinsp_event(ev, m_default_ruleset_id); return process_sinsp_event(ev, m_default_ruleset_id);
} }
@@ -352,56 +372,39 @@ unique_ptr<falco_engine::rule_result> falco_engine::process_k8s_audit_event(json
} }
// All k8s audit events have the single tag "1". // All k8s audit events have the single tag "1".
if(!m_k8s_audit_rules->run((gen_event *) ev, 1, ruleset_id)) if(!m_k8s_audit_rules->run((gen_event *)ev, 1, ruleset_id))
{ {
return unique_ptr<struct rule_result>(); return unique_ptr<struct rule_result>();
} }
unique_ptr<struct rule_result> res(new rule_result()); unique_ptr<struct rule_result> res(new rule_result());
res->source = "k8s_audit";
populate_rule_result(res, ev);
return res;
}
void falco_engine::populate_rule_result(unique_ptr<struct rule_result> &res, gen_event *ev)
{
std::lock_guard<std::mutex> guard(m_ls_semaphore); std::lock_guard<std::mutex> guard(m_ls_semaphore);
lua_getglobal(m_ls, lua_on_event.c_str()); lua_getglobal(m_ls, lua_on_event.c_str());
if(lua_isfunction(m_ls, -1)) if(lua_isfunction(m_ls, -1))
{ {
lua_pushnumber(m_ls, ev->get_check_id()); lua_pushnumber(m_ls, ev->get_check_id());
if(lua_pcall(m_ls, 1, 4, 0) != 0) if(lua_pcall(m_ls, 1, 3, 0) != 0)
{ {
const char* lerr = lua_tostring(m_ls, -1); const char *lerr = lua_tostring(m_ls, -1);
string err = "Error invoking function output: " + string(lerr); string err = "Error invoking function output: " + string(lerr);
throw falco_exception(err); throw falco_exception(err);
} }
const char *p = lua_tostring(m_ls, -4);
res->rule = p;
res->evt = ev; res->evt = ev;
res->priority_num = (falco_common::priority_type) lua_tonumber(m_ls, -3); const char *p = lua_tostring(m_ls, -3);
res->format = lua_tostring(m_ls, -2); res->rule = p;
res->source = "k8s_audit";
// Exception fields are passed back as a table res->priority_num = (falco_common::priority_type)lua_tonumber(m_ls, -2);
lua_pushnil(m_ls); /* first key */ res->format = lua_tostring(m_ls, -1);
while (lua_next(m_ls, -2) != 0) { lua_pop(m_ls, 3);
// key is at index -2, value is at index
// -1. We want the keys.
res->exception_fields.insert(luaL_checkstring(m_ls, -2));
// Remove value, keep key for next iteration
lua_pop(m_ls, 1);
}
lua_pop(m_ls, 4);
} }
else else
{ {
throw falco_exception("No function " + lua_on_event + " found in lua compiler module"); throw falco_exception("No function " + lua_on_event + " found in lua compiler module");
} }
return res;
} }
bool falco_engine::parse_k8s_audit_json(nlohmann::json &j, std::list<json_event> &evts, bool top) bool falco_engine::parse_k8s_audit_json(nlohmann::json &j, std::list<json_event> &evts, bool top)
@@ -418,7 +421,7 @@ bool falco_engine::parse_k8s_audit_json(nlohmann::json &j, std::list<json_event>
{ {
// Note we only handle a single top level array, to // Note we only handle a single top level array, to
// avoid excessive recursion. // avoid excessive recursion.
if(! parse_k8s_audit_json(item, evts, false)) if(!parse_k8s_audit_json(item, evts, false))
{ {
return false; return false;
} }
@@ -496,7 +499,7 @@ void falco_engine::print_stats()
{ {
if(lua_pcall(m_ls, 0, 0, 0) != 0) if(lua_pcall(m_ls, 0, 0, 0) != 0)
{ {
const char* lerr = lua_tostring(m_ls, -1); const char *lerr = lua_tostring(m_ls, -1);
string err = "Error invoking function print_stats: " + string(lerr); string err = "Error invoking function print_stats: " + string(lerr);
throw falco_exception(err); throw falco_exception(err);
} }
@@ -505,21 +508,20 @@ void falco_engine::print_stats()
{ {
throw falco_exception("No function " + lua_print_stats + " found in lua rule loader module"); throw falco_exception("No function " + lua_print_stats + " found in lua rule loader module");
} }
} }
void falco_engine::add_sinsp_filter(string &rule, void falco_engine::add_sinsp_filter(string &rule,
set<uint32_t> &evttypes, set<uint32_t> &evttypes,
set<uint32_t> &syscalls, set<uint32_t> &syscalls,
set<string> &tags, set<string> &tags,
sinsp_filter* filter) sinsp_filter *filter)
{ {
m_sinsp_rules->add(rule, evttypes, syscalls, tags, filter); m_sinsp_rules->add(rule, evttypes, syscalls, tags, filter);
} }
void falco_engine::add_k8s_audit_filter(string &rule, void falco_engine::add_k8s_audit_filter(string &rule,
set<string> &tags, set<string> &tags,
json_event_filter* filter) json_event_filter *filter)
{ {
// All k8s audit events have a single tag "1". // All k8s audit events have a single tag "1".
std::set<uint32_t> event_tags = {1}; std::set<uint32_t> event_tags = {1};
@@ -561,8 +563,8 @@ inline bool falco_engine::should_drop_evt()
return false; return false;
} }
double coin = (random() * (1.0/RAND_MAX)); double coin = (random() * (1.0 / RAND_MAX));
return (coin >= (1.0/(m_sampling_multiplier * m_sampling_ratio))); return (coin >= (1.0 / (m_sampling_multiplier * m_sampling_ratio)));
} }
sinsp_filter_factory &falco_engine::sinsp_factory() sinsp_filter_factory &falco_engine::sinsp_factory()

View File

@@ -38,6 +38,11 @@ limitations under the License.
#include "config_falco_engine.h" #include "config_falco_engine.h"
#include "falco_common.h" #include "falco_common.h"
extern "C"
{
#include "hawk.h"
}
// //
// This class acts as the primary interface between a program and the // This class acts as the primary interface between a program and the
// falco rules engine. Falco outputs (writing to files/syslog/etc) are // falco rules engine. Falco outputs (writing to files/syslog/etc) are
@@ -47,10 +52,12 @@ limitations under the License.
class falco_engine : public falco_common class falco_engine : public falco_common
{ {
public: public:
explicit falco_engine(bool seed_rng=true, const std::string& alternate_lua_dir=FALCO_ENGINE_SOURCE_LUA_DIR); falco_engine(bool seed_rng = true, const std::string &alternate_lua_dir = FALCO_ENGINE_SOURCE_LUA_DIR);
falco_engine(const falco_engine &orig_engine);
virtual ~falco_engine(); virtual ~falco_engine();
falco_engine(const falco_engine &rhs);
falco_engine *clone();
// A given engine has a version which identifies the fields // A given engine has a version which identifies the fields
// and rules file format it supports. This version will change // and rules file format it supports. This version will change
// any time the code that handles rules files, expression // any time the code that handles rules files, expression
@@ -58,7 +65,7 @@ public:
static uint32_t engine_version(); static uint32_t engine_version();
// Print to stdout (using printf) a description of each field supported by this engine. // Print to stdout (using printf) a description of each field supported by this engine.
void list_fields(bool names_only=false); void list_fields(bool names_only = false);
// //
// Load rules either directly or from a filename. // Load rules either directly or from a filename.
@@ -66,12 +73,8 @@ public:
void load_rules_file(const std::string &rules_filename, bool verbose, bool all_events); void load_rules_file(const std::string &rules_filename, bool verbose, bool all_events);
void load_rules(const std::string &rules_content, bool verbose, bool all_events); void load_rules(const std::string &rules_content, bool verbose, bool all_events);
// // Watch and live-reload rules using an external ABI interface provided by libhawk
// Identical to above, but also returns the required engine version for the file/content. void watch_rules(bool verbose, bool all_events);
// (If no required engine version is specified, returns 0).
//
void load_rules_file(const std::string &rules_filename, bool verbose, bool all_events, uint64_t &required_engine_version);
void load_rules(const std::string &rules_content, bool verbose, bool all_events, uint64_t &required_engine_version);
// //
// Enable/Disable any rules matching the provided substring. // Enable/Disable any rules matching the provided substring.
@@ -86,7 +89,6 @@ public:
// Wrapper that assumes the default ruleset // Wrapper that assumes the default ruleset
void enable_rule(const std::string &substring, bool enabled); void enable_rule(const std::string &substring, bool enabled);
// Like enable_rule, but the rule name must be an exact match. // Like enable_rule, but the rule name must be an exact match.
void enable_rule_exact(const std::string &rule_name, bool enabled, const std::string &ruleset); void enable_rule_exact(const std::string &rule_name, bool enabled, const std::string &ruleset);
@@ -155,13 +157,13 @@ public:
// **Methods Related to k8s audit log events, which are // **Methods Related to k8s audit log events, which are
// **represented as json objects. // **represented as json objects.
struct rule_result { struct rule_result
{
gen_event *evt; gen_event *evt;
std::string rule; std::string rule;
std::string source; std::string source;
falco_common::priority_type priority_num; falco_common::priority_type priority_num;
std::string format; std::string format;
std::set<std::string> exception_fields;
}; };
// //
@@ -172,7 +174,7 @@ public:
// Returns true if the json object was recognized as a k8s // Returns true if the json object was recognized as a k8s
// audit event(s), false otherwise. // audit event(s), false otherwise.
// //
bool parse_k8s_audit_json(nlohmann::json &j, std::list<json_event> &evts, bool top=true); bool parse_k8s_audit_json(nlohmann::json &j, std::list<json_event> &evts, bool top = true);
// //
// Given an event, check it against the set of rules in the // Given an event, check it against the set of rules in the
@@ -197,7 +199,7 @@ public:
// //
void add_k8s_audit_filter(std::string &rule, void add_k8s_audit_filter(std::string &rule,
std::set<std::string> &tags, std::set<std::string> &tags,
json_event_filter* filter); json_event_filter *filter);
// **Methods Related to Sinsp Events e.g system calls // **Methods Related to Sinsp Events e.g system calls
// //
@@ -238,13 +240,14 @@ public:
std::set<uint32_t> &evttypes, std::set<uint32_t> &evttypes,
std::set<uint32_t> &syscalls, std::set<uint32_t> &syscalls,
std::set<std::string> &tags, std::set<std::string> &tags,
sinsp_filter* filter); sinsp_filter *filter);
sinsp_filter_factory &sinsp_factory(); sinsp_filter_factory &sinsp_factory();
json_event_filter_factory &json_factory(); json_event_filter_factory &json_factory();
private: bool is_ready();
private:
static nlohmann::json::json_pointer k8s_audit_time; static nlohmann::json::json_pointer k8s_audit_time;
// //
@@ -264,7 +267,6 @@ private:
std::unique_ptr<falco_sinsp_ruleset> m_sinsp_rules; std::unique_ptr<falco_sinsp_ruleset> m_sinsp_rules;
std::unique_ptr<falco_ruleset> m_k8s_audit_rules; std::unique_ptr<falco_ruleset> m_k8s_audit_rules;
void populate_rule_result(unique_ptr<struct rule_result> &res, gen_event *ev);
std::string m_alternate_lua_dir; std::string m_alternate_lua_dir;
// //
@@ -296,5 +298,6 @@ private:
std::string m_extra; std::string m_extra;
bool m_replace_container_info; bool m_replace_container_info;
};
bool m_is_ready = false;
};

View File

@@ -16,7 +16,7 @@ limitations under the License.
// The version of rules/filter fields/etc supported by this falco // The version of rules/filter fields/etc supported by this falco
// engine. // engine.
#define FALCO_ENGINE_VERSION (8) #define FALCO_ENGINE_VERSION (7)
// This is the result of running "falco --list -N | sha256sum" and // This is the result of running "falco --list -N | sha256sum" and
// represents the fields supported by this version of falco. It's used // represents the fields supported by this version of falco. It's used

View File

@@ -26,7 +26,7 @@ bool falco_formats::s_json_output = false;
bool falco_formats::s_json_include_output_property = true; bool falco_formats::s_json_include_output_property = true;
std::unique_ptr<sinsp_evt_formatter_cache> falco_formats::s_formatters = NULL; std::unique_ptr<sinsp_evt_formatter_cache> falco_formats::s_formatters = NULL;
const static struct luaL_Reg ll_falco[] = const static struct luaL_reg ll_falco[] =
{ {
{"formatter", &falco_formats::lua_formatter}, {"formatter", &falco_formats::lua_formatter},
{"free_formatter", &falco_formats::lua_free_formatter}, {"free_formatter", &falco_formats::lua_free_formatter},
@@ -60,32 +60,25 @@ int falco_formats::lua_formatter(lua_State *ls)
{ {
sinsp_evt_formatter *formatter; sinsp_evt_formatter *formatter;
formatter = new sinsp_evt_formatter(s_inspector, format); formatter = new sinsp_evt_formatter(s_inspector, format);
lua_pushnil(ls);
lua_pushlightuserdata(ls, formatter); lua_pushlightuserdata(ls, formatter);
} }
else else
{ {
json_event_formatter *formatter; json_event_formatter *formatter;
formatter = new json_event_formatter(s_engine->json_factory(), format); formatter = new json_event_formatter(s_engine->json_factory(), format);
lua_pushnil(ls);
lua_pushlightuserdata(ls, formatter); lua_pushlightuserdata(ls, formatter);
} }
} }
catch(exception &e) catch(sinsp_exception &e)
{ {
std::ostringstream os; luaL_error(ls, "Invalid output format '%s': '%s'", format.c_str(), e.what());
}
os << "Invalid output format '" catch(falco_exception &e)
<< format {
<< "': '" luaL_error(ls, "Invalid output format '%s': '%s'", format.c_str(), e.what());
<< e.what()
<< "'";
lua_pushstring(ls, os.str().c_str());
lua_pushnil(ls);
} }
return 2; return 1;
} }
int falco_formats::lua_free_formatter(lua_State *ls) int falco_formats::lua_free_formatter(lua_State *ls)

View File

@@ -126,31 +126,6 @@ function set_output(output_format, state)
end end
end end
-- This should be keep in sync with parser.lua
defined_comp_operators = {
["="]=1,
["=="] = 1,
["!="] = 1,
["<="] = 1,
[">="] = 1,
["<"] = 1,
[">"] = 1,
["contains"] = 1,
["icontains"] = 1,
["glob"] = 1,
["startswith"] = 1,
["endswith"] = 1,
["in"] = 1,
["intersects"] = 1,
["pmatch"] = 1
}
defined_list_comp_operators = {
["in"] = 1,
["intersects"] = 1,
["pmatch"] = 1
}
-- Note that the rules_by_name and rules_by_idx refer to the same rule -- Note that the rules_by_name and rules_by_idx refer to the same rule
-- object. The by_name index is used for things like describing rules, -- object. The by_name index is used for things like describing rules,
-- and the by_idx index is used to map the relational node index back -- and the by_idx index is used to map the relational node index back
@@ -278,126 +253,19 @@ function get_lines(rules_lines, row, num_lines)
return ret return ret
end end
function quote_item(item)
-- Add quotes if the string contains spaces and doesn't start/end
-- w/ quotes
if string.find(item, " ") then
if string.sub(item, 1, 1) ~= "'" and string.sub(item, 1, 1) ~= '"' then
item = "\""..item.."\""
end
end
return item
end
function paren_item(item)
if string.sub(item, 1, 1) ~= "(" then
item = "("..item..")"
end
return item
end
function build_error(rules_lines, row, num_lines, err) function build_error(rules_lines, row, num_lines, err)
local ret = err.."\n---\n"..get_lines(rules_lines, row, num_lines).."---" local ret = err.."\n---\n"..get_lines(rules_lines, row, num_lines).."---"
return {ret} return ret
end end
function build_error_with_context(ctx, err) function build_error_with_context(ctx, err)
local ret = err.."\n---\n"..ctx.."---" local ret = err.."\n---\n"..ctx.."---"
return {ret} return ret
end end
function validate_exception_item_multi_fields(eitem, context)
local name = eitem['name']
local fields = eitem['fields']
local values = eitem['values']
local comps = eitem['comps']
if comps == nil then
comps = {}
for c=1,#fields do
table.insert(comps, "=")
end
eitem['comps'] = comps
else
if #fields ~= #comps then
return false, build_error_with_context(context, "Rule exception item "..name..": fields and comps lists must have equal length"), warnings
end
end
for k, fname in ipairs(fields) do
if not is_defined_filter(fname) then
return false, build_error_with_context(context, "Rule exception item "..name..": field name "..fname.." is not a supported filter field"), warnings
end
end
for k, comp in ipairs(comps) do
if defined_comp_operators[comp] == nil then
return false, build_error_with_context(context, "Rule exception item "..name..": comparison operator "..comp.." is not a supported comparison operator"), warnings
end
end
end
function validate_exception_item_single_field(eitem, context)
local name = eitem['name']
local fields = eitem['fields']
local values = eitem['values']
local comps = eitem['comps']
if comps == nil then
eitem['comps'] = "in"
comps = eitem['comps']
else
if type(fields) ~= "string" or type(comps) ~= "string" then
return false, build_error_with_context(context, "Rule exception item "..name..": fields and comps must both be strings"), warnings
end
end
if not is_defined_filter(fields) then
return false, build_error_with_context(context, "Rule exception item "..name..": field name "..fields.." is not a supported filter field"), warnings
end
if defined_comp_operators[comps] == nil then
return false, build_error_with_context(context, "Rule exception item "..name..": comparison operator "..comps.." is not a supported comparison operator"), warnings
end
end
function is_defined_filter(filter)
if defined_noarg_filters[filter] ~= nil then
return true
else
bracket_idx = string.find(filter, "[", 1, true)
if bracket_idx ~= nil then
subfilter = string.sub(filter, 1, bracket_idx-1)
if defined_arg_filters[subfilter] ~= nil then
return true
end
end
dot_idx = string.find(filter, ".", 1, true)
while dot_idx ~= nil do
subfilter = string.sub(filter, 1, dot_idx-1)
if defined_arg_filters[subfilter] ~= nil then
return true
end
dot_idx = string.find(filter, ".", dot_idx+1, true)
end
end
return false
end
function load_rules_doc(rules_mgr, doc, load_state) function load_rules_doc(rules_mgr, doc, load_state)
local warnings = {}
-- Iterate over yaml list. In this pass, all we're doing is -- Iterate over yaml list. In this pass, all we're doing is
-- populating the set of rules, macros, and lists. We're not -- populating the set of rules, macros, and lists. We're not
-- expanding/compiling anything yet. All that will happen in a -- expanding/compiling anything yet. All that will happen in a
@@ -411,7 +279,7 @@ function load_rules_doc(rules_mgr, doc, load_state)
load_state.indices[load_state.cur_item_idx]) load_state.indices[load_state.cur_item_idx])
if (not (type(v) == "table")) then if (not (type(v) == "table")) then
return false, build_error_with_context(context, "Unexpected element of type " ..type(v)..". Each element should be a yaml associative array."), warnings return false, build_error_with_context(context, "Unexpected element of type " ..type(v)..". Each element should be a yaml associative array.")
end end
v['context'] = context v['context'] = context
@@ -423,13 +291,13 @@ function load_rules_doc(rules_mgr, doc, load_state)
end end
if falco_rules.engine_version(rules_mgr) < v['required_engine_version'] then if falco_rules.engine_version(rules_mgr) < v['required_engine_version'] then
return false, build_error_with_context(v['context'], "Rules require engine version "..v['required_engine_version']..", but engine version is "..falco_rules.engine_version(rules_mgr)), warnings return false, build_error_with_context(v['context'], "Rules require engine version "..v['required_engine_version']..", but engine version is "..falco_rules.engine_version(rules_mgr))
end end
elseif (v['macro']) then elseif (v['macro']) then
if (v['macro'] == nil or type(v['macro']) == "table") then if (v['macro'] == nil or type(v['macro']) == "table") then
return false, build_error_with_context(v['context'], "Macro name is empty"), warnings return false, build_error_with_context(v['context'], "Macro name is empty")
end end
if v['source'] == nil then if v['source'] == nil then
@@ -442,7 +310,7 @@ function load_rules_doc(rules_mgr, doc, load_state)
for j, field in ipairs({'condition'}) do for j, field in ipairs({'condition'}) do
if (v[field] == nil) then if (v[field] == nil) then
return false, build_error_with_context(v['context'], "Macro must have property "..field), warnings return false, build_error_with_context(v['context'], "Macro must have property "..field)
end end
end end
@@ -455,7 +323,7 @@ function load_rules_doc(rules_mgr, doc, load_state)
if append then if append then
if state.macros_by_name[v['macro']] == nil then if state.macros_by_name[v['macro']] == nil then
return false, build_error_with_context(v['context'], "Macro " ..v['macro'].. " has 'append' key but no macro by that name already exists"), warnings return false, build_error_with_context(v['context'], "Macro " ..v['macro'].. " has 'append' key but no macro by that name already exists")
end end
state.macros_by_name[v['macro']]['condition'] = state.macros_by_name[v['macro']]['condition'] .. " " .. v['condition'] state.macros_by_name[v['macro']]['condition'] = state.macros_by_name[v['macro']]['condition'] .. " " .. v['condition']
@@ -470,7 +338,7 @@ function load_rules_doc(rules_mgr, doc, load_state)
elseif (v['list']) then elseif (v['list']) then
if (v['list'] == nil or type(v['list']) == "table") then if (v['list'] == nil or type(v['list']) == "table") then
return false, build_error_with_context(v['context'], "List name is empty"), warnings return false, build_error_with_context(v['context'], "List name is empty")
end end
if state.lists_by_name[v['list']] == nil then if state.lists_by_name[v['list']] == nil then
@@ -479,7 +347,7 @@ function load_rules_doc(rules_mgr, doc, load_state)
for j, field in ipairs({'items'}) do for j, field in ipairs({'items'}) do
if (v[field] == nil) then if (v[field] == nil) then
return false, build_error_with_context(v['context'], "List must have property "..field), warnings return false, build_error_with_context(v['context'], "List must have property "..field)
end end
end end
@@ -492,7 +360,7 @@ function load_rules_doc(rules_mgr, doc, load_state)
if append then if append then
if state.lists_by_name[v['list']] == nil then if state.lists_by_name[v['list']] == nil then
return false, build_error_with_context(v['context'], "List " ..v['list'].. " has 'append' key but no list by that name already exists"), warnings return false, build_error_with_context(v['context'], "List " ..v['list'].. " has 'append' key but no list by that name already exists")
end end
for j, elem in ipairs(v['items']) do for j, elem in ipairs(v['items']) do
@@ -505,11 +373,7 @@ function load_rules_doc(rules_mgr, doc, load_state)
elseif (v['rule']) then elseif (v['rule']) then
if (v['rule'] == nil or type(v['rule']) == "table") then if (v['rule'] == nil or type(v['rule']) == "table") then
return false, build_error_with_context(v['context'], "Rule name is empty"), warnings return false, build_error_with_context(v['context'], "Rule name is empty")
end
if (v['condition'] == nil and v['exceptions'] == nil) then
return false, build_error_with_context(v['context'], "Rule must have exceptions or condition property"), warnings
end end
-- By default, if a rule's condition refers to an unknown -- By default, if a rule's condition refers to an unknown
@@ -522,13 +386,6 @@ function load_rules_doc(rules_mgr, doc, load_state)
v['source'] = "syscall" v['source'] = "syscall"
end end
-- Add an empty exceptions property to the rule if not
-- defined, but add a warning about defining one
if v['exceptions'] == nil then
warnings[#warnings + 1] = "Rule "..v['rule']..": consider adding an exceptions property to define supported exceptions fields"
v['exceptions'] = {}
end
-- Possibly append to the condition field of an existing rule -- Possibly append to the condition field of an existing rule
append = false append = false
@@ -536,95 +393,21 @@ function load_rules_doc(rules_mgr, doc, load_state)
append = v['append'] append = v['append']
end end
-- Validate the contents of the rule exception if append then
if next(v['exceptions']) ~= nil then
-- This validation only applies if append=false. append=true validation is handled below -- For append rules, all you need is the condition
if append == false then for j, field in ipairs({'condition'}) do
if (v[field] == nil) then
for _, eitem in ipairs(v['exceptions']) do return false, build_error_with_context(v['context'], "Rule must have property "..field)
if eitem['name'] == nil then
return false, build_error_with_context(v['context'], "Rule exception item must have name property"), warnings
end
if eitem['fields'] == nil then
return false, build_error_with_context(v['context'], "Rule exception item "..eitem['name']..": must have fields property with a list of fields"), warnings
end
if eitem['values'] == nil then
-- An empty values array is okay
eitem['values'] = {}
end
-- Different handling if the fields property is a single item vs a list
local valid, err
if type(eitem['fields']) == "table" then
valid, err = validate_exception_item_multi_fields(eitem, v['context'])
else
valid, err = validate_exception_item_single_field(eitem, v['context'])
end
if valid == false then
return valid, err
end
end end
end end
end
if append then
if state.rules_by_name[v['rule']] == nil then if state.rules_by_name[v['rule']] == nil then
if state.skipped_rules_by_name[v['rule']] == nil then if state.skipped_rules_by_name[v['rule']] == nil then
return false, build_error_with_context(v['context'], "Rule " ..v['rule'].. " has 'append' key but no rule by that name already exists"), warnings return false, build_error_with_context(v['context'], "Rule " ..v['rule'].. " has 'append' key but no rule by that name already exists")
end end
else else
state.rules_by_name[v['rule']]['condition'] = state.rules_by_name[v['rule']]['condition'] .. " " .. v['condition']
if next(v['exceptions']) ~= nil then
for _, eitem in ipairs(v['exceptions']) do
local name = eitem['name']
local fields = eitem['fields']
local comps = eitem['comps']
if name == nil then
return false, build_error_with_context(v['context'], "Rule exception item must have name property"), warnings
end
-- You can't append exception fields or comps to a rule
if fields ~= nil then
return false, build_error_with_context(v['context'], "Can not append exception fields to existing rule, only values"), warnings
end
if comps ~= nil then
return false, build_error_with_context(v['context'], "Can not append exception comps to existing rule, only values"), warnings
end
-- You can append values. They are added to the
-- corresponding name, if it exists. If no
-- exception with that name exists, add a
-- warning.
if eitem['values'] ~= nil then
local found=false
for _, reitem in ipairs(state.rules_by_name[v['rule']]['exceptions']) do
if reitem['name'] == eitem['name'] then
found=true
for _, values in ipairs(eitem['values']) do
reitem['values'][#reitem['values'] + 1] = values
end
end
end
if found == false then
warnings[#warnings + 1] = "Rule "..v['rule'].." with append=true: no set of fields matching name "..eitem['name']
end
end
end
end
if v['condition'] ~= nil then
state.rules_by_name[v['rule']]['condition'] = state.rules_by_name[v['rule']]['condition'] .. " " .. v['condition']
end
-- Add the current object to the context of the base rule -- Add the current object to the context of the base rule
state.rules_by_name[v['rule']]['context'] = state.rules_by_name[v['rule']]['context'].."\n"..v['context'] state.rules_by_name[v['rule']]['context'] = state.rules_by_name[v['rule']]['context'].."\n"..v['context']
@@ -634,7 +417,7 @@ function load_rules_doc(rules_mgr, doc, load_state)
for j, field in ipairs({'condition', 'output', 'desc', 'priority'}) do for j, field in ipairs({'condition', 'output', 'desc', 'priority'}) do
if (v[field] == nil) then if (v[field] == nil) then
return false, build_error_with_context(v['context'], "Rule must have property "..field), warnings return false, build_error_with_context(v['context'], "Rule must have property "..field)
end end
end end
@@ -663,116 +446,16 @@ function load_rules_doc(rules_mgr, doc, load_state)
end end
end end
else else
-- Remove the context from the table, so the table is exactly what was parsed
local context = v['context'] local context = v['context']
v['context'] = nil
arr = build_error_with_context(context, "Unknown top level object: "..table.tostring(v)) return false, build_error_with_context(context, "Unknown rule object: "..table.tostring(v))
warnings[#warnings + 1] = arr[1]
end end
end end
return true, {}, warnings return true, ""
end end
-- cond and not ((proc.name=apk and fd.directory=/usr/lib/alpine) or (proc.name=npm and fd.directory=/usr/node/bin) or ...)
-- Populates exfields with all fields used
function build_exception_condition_string_multi_fields(eitem, exfields)
local fields = eitem['fields']
local comps = eitem['comps']
local icond = "("
for i, values in ipairs(eitem['values']) do
if #fields ~= #values then
return nil, "Exception item "..eitem['name']..": fields and values lists must have equal length"
end
if icond ~= "(" then
icond=icond.." or "
end
icond=icond.."("
for k=1,#fields do
if k > 1 then
icond=icond.." and "
end
local ival = values[k]
local istr = ""
-- If ival is a table, express it as (titem1, titem2, etc)
if type(ival) == "table" then
istr = "("
for _, item in ipairs(ival) do
if istr ~= "(" then
istr = istr..", "
end
istr = istr..quote_item(item)
end
istr = istr..")"
else
-- If the corresponding operator is one that works on lists, possibly add surrounding parentheses.
if defined_list_comp_operators[comps[k]] then
istr = paren_item(ival)
else
-- Quote the value if not already quoted
istr = quote_item(ival)
end
end
icond = icond..fields[k].." "..comps[k].." "..istr
exfields[fields[k]] = true
end
icond=icond..")"
end
icond = icond..")"
-- Don't return a trivially empty condition string
if icond == "()" then
icond = ""
end
return icond, nil
end
function build_exception_condition_string_single_field(eitem, exfields)
local icond = ""
for i, value in ipairs(eitem['values']) do
if type(value) ~= "string" then
return "", "Expected values array for item "..eitem['name'].." to contain a list of strings"
end
if icond == "" then
icond = "("..eitem['fields'].." "..eitem['comps'].." ("
else
icond = icond..", "
end
exfields[eitem['fields']] = true
icond = icond..quote_item(value)
end
if icond ~= "" then
icond = icond.."))"
end
return icond, nil
end
-- Returns:
-- - Load Result: bool
-- - required engine version. will be nil when load result is false
-- - List of Errors
-- - List of Warnings
function load_rules(sinsp_lua_parser, function load_rules(sinsp_lua_parser,
json_lua_parser, json_lua_parser,
rules_content, rules_content,
@@ -783,8 +466,6 @@ function load_rules(sinsp_lua_parser,
replace_container_info, replace_container_info,
min_priority) min_priority)
local warnings = {}
local load_state = {lines={}, indices={}, cur_item_idx=0, min_priority=min_priority, required_engine_version=0} local load_state = {lines={}, indices={}, cur_item_idx=0, min_priority=min_priority, required_engine_version=0}
load_state.lines, load_state.indices = split_lines(rules_content) load_state.lines, load_state.indices = split_lines(rules_content)
@@ -806,42 +487,36 @@ function load_rules(sinsp_lua_parser,
row = tonumber(row) row = tonumber(row)
col = tonumber(col) col = tonumber(col)
return false, nil, build_error(load_state.lines, row, 3, docs), warnings return false, build_error(load_state.lines, row, 3, docs)
end end
if docs == nil then if docs == nil then
-- An empty rules file is acceptable -- An empty rules file is acceptable
return true, load_state.required_engine_version, {}, warnings return true, load_state.required_engine_version
end end
if type(docs) ~= "table" then if type(docs) ~= "table" then
return false, nil, build_error(load_state.lines, 1, 1, "Rules content is not yaml"), warnings return false, build_error(load_state.lines, 1, 1, "Rules content is not yaml")
end end
for docidx, doc in ipairs(docs) do for docidx, doc in ipairs(docs) do
if type(doc) ~= "table" then if type(doc) ~= "table" then
return false, nil, build_error(load_state.lines, 1, 1, "Rules content is not yaml"), warnings return false, build_error(load_state.lines, 1, 1, "Rules content is not yaml")
end end
-- Look for non-numeric indices--implies that document is not array -- Look for non-numeric indices--implies that document is not array
-- of objects. -- of objects.
for key, val in pairs(doc) do for key, val in pairs(doc) do
if type(key) ~= "number" then if type(key) ~= "number" then
return false, nil, build_error(load_state.lines, 1, 1, "Rules content is not yaml array of objects"), warnings return false, build_error(load_state.lines, 1, 1, "Rules content is not yaml array of objects")
end end
end end
res, errors, doc_warnings = load_rules_doc(rules_mgr, doc, load_state) res, errstr = load_rules_doc(rules_mgr, doc, load_state)
if (doc_warnings ~= nil) then
for idx, warning in pairs(doc_warnings) do
table.insert(warnings, warning)
end
end
if not res then if not res then
return res, nil, errors, warnings return res, errstr
end end
end end
@@ -863,9 +538,8 @@ function load_rules(sinsp_lua_parser,
-- the items and expand any references to the items in the list -- the items and expand any references to the items in the list
for i, item in ipairs(v['items']) do for i, item in ipairs(v['items']) do
if (state.lists[item] == nil) then if (state.lists[item] == nil) then
items[#items+1] = quote_item(item) items[#items+1] = item
else else
state.lists[item].used = true
for i, exp_item in ipairs(state.lists[item].items) do for i, exp_item in ipairs(state.lists[item].items) do
items[#items+1] = exp_item items[#items+1] = exp_item
end end
@@ -882,7 +556,7 @@ function load_rules(sinsp_lua_parser,
local status, ast = compiler.compile_macro(v['condition'], state.macros, state.lists) local status, ast = compiler.compile_macro(v['condition'], state.macros, state.lists)
if status == false then if status == false then
return false, nil, build_error_with_context(v['context'], ast), warnings return false, build_error_with_context(v['context'], ast)
end end
if v['source'] == "syscall" then if v['source'] == "syscall" then
@@ -898,48 +572,16 @@ function load_rules(sinsp_lua_parser,
local v = state.rules_by_name[name] local v = state.rules_by_name[name]
local econd = ""
local exfields = {}
-- Turn exceptions into condition strings and add them to each
-- rule's condition
for _, eitem in ipairs(v['exceptions']) do
local icond, err
if type(eitem['fields']) == "table" then
icond, err = build_exception_condition_string_multi_fields(eitem, exfields)
else
icond, err = build_exception_condition_string_single_field(eitem, exfields)
end
if err ~= nil then
return false, nil, build_error_with_context(v['context'], err), warnings
end
if icond ~= "" then
econd = econd.." and not "..icond
end
end
state.rules_by_name[name]['exception_fields'] = exfields
if econd ~= "" then
state.rules_by_name[name]['compile_condition'] = "("..state.rules_by_name[name]['condition']..") "..econd
else
state.rules_by_name[name]['compile_condition'] = state.rules_by_name[name]['condition']
end
warn_evttypes = true warn_evttypes = true
if v['warn_evttypes'] ~= nil then if v['warn_evttypes'] ~= nil then
warn_evttypes = v['warn_evttypes'] warn_evttypes = v['warn_evttypes']
end end
local status, filter_ast, filters = compiler.compile_filter(v['rule'], v['compile_condition'], local status, filter_ast, filters = compiler.compile_filter(v['rule'], v['condition'],
state.macros, state.lists) state.macros, state.lists)
if status == false then if status == false then
return false, nil, build_error_with_context(v['context'], filter_ast), warnings return false, build_error_with_context(v['context'], filter_ast)
end end
local evtttypes = {} local evtttypes = {}
@@ -950,22 +592,52 @@ function load_rules(sinsp_lua_parser,
sinsp_rule_utils.check_for_ignored_syscalls_events(filter_ast, 'rule', v['rule']) sinsp_rule_utils.check_for_ignored_syscalls_events(filter_ast, 'rule', v['rule'])
end end
evttypes, syscallnums = sinsp_rule_utils.get_evttypes_syscalls(name, filter_ast, v['compile_condition'], warn_evttypes, verbose) evttypes, syscallnums = sinsp_rule_utils.get_evttypes_syscalls(name, filter_ast, v['condition'], warn_evttypes, verbose)
end end
-- If a filter in the rule doesn't exist, either skip the rule -- If a filter in the rule doesn't exist, either skip the rule
-- or raise an error, depending on the value of -- or raise an error, depending on the value of
-- skip-if-unknown-filter. -- skip-if-unknown-filter.
for filter, _ in pairs(filters) do for filter, _ in pairs(filters) do
if not is_defined_filter(filter) then found = false
msg = "rule \""..v['rule'].."\": contains unknown filter "..filter
warnings[#warnings + 1] = msg
if not v['skip-if-unknown-filter'] then if defined_noarg_filters[filter] ~= nil then
return false, nil, build_error_with_context(v['context'], msg), warnings found = true
else else
print("Skipping "..msg) bracket_idx = string.find(filter, "[", 1, true)
if bracket_idx ~= nil then
subfilter = string.sub(filter, 1, bracket_idx-1)
if defined_arg_filters[subfilter] ~= nil then
found = true
end
end
if not found then
dot_idx = string.find(filter, ".", 1, true)
while dot_idx ~= nil do
subfilter = string.sub(filter, 1, dot_idx-1)
if defined_arg_filters[subfilter] ~= nil then
found = true
break
end
dot_idx = string.find(filter, ".", dot_idx+1, true)
end
end
end
if not found then
if v['skip-if-unknown-filter'] then
if verbose then
print("Skipping rule \""..v['rule'].."\" that contains unknown filter "..filter)
end
goto next_rule goto next_rule
else
error("Rule \""..v['rule'].."\" contains unknown filter "..filter)
end end
end end
end end
@@ -1044,37 +716,33 @@ function load_rules(sinsp_lua_parser,
-- Ensure that the output field is properly formatted by -- Ensure that the output field is properly formatted by
-- creating a formatter from it. Any error will be thrown -- creating a formatter from it. Any error will be thrown
-- up to the top level. -- up to the top level.
local err, formatter = formats.formatter(v['source'], v['output']) formatter = formats.formatter(v['source'], v['output'])
if err == nil then formats.free_formatter(v['source'], formatter)
formats.free_formatter(v['source'], formatter)
else
return false, nil, build_error_with_context(v['context'], err), warnings
end
else else
return false, nil, build_error_with_context(v['context'], "Unexpected type in load_rule: "..filter_ast.type), warnings return false, build_error_with_context(v['context'], "Unexpected type in load_rule: "..filter_ast.type)
end end
::next_rule:: ::next_rule::
end end
-- Print info on any dangling lists or macros that were not used anywhere if verbose then
for name, macro in pairs(state.macros) do -- Print info on any dangling lists or macros that were not used anywhere
if macro.used == false then for name, macro in pairs(state.macros) do
msg = "macro "..name.." not refered to by any rule/macro" if macro.used == false then
warnings[#warnings + 1] = msg print("Warning: macro "..name.." not refered to by any rule/macro")
end
end end
end
for name, list in pairs(state.lists) do for name, list in pairs(state.lists) do
if list.used == false then if list.used == false then
msg = "list "..name.." not refered to by any rule/macro/list" print("Warning: list "..name.." not refered to by any rule/macro/list")
warnings[#warnings + 1] = msg end
end end
end end
io.flush() io.flush()
return true, load_state.required_engine_version, {}, warnings return true, load_state.required_engine_version
end end
local rule_fmt = "%-50s %s" local rule_fmt = "%-50s %s"
@@ -1151,14 +819,7 @@ function on_event(rule_id)
-- Prefix output with '*' so formatting is permissive -- Prefix output with '*' so formatting is permissive
output = "*"..rule.output output = "*"..rule.output
-- Also return all fields from all exceptions return rule.rule, rule.priority_num, output
combined_rule = state.rules_by_name[rule.rule]
if combined_rule == nil then
error ("rule_loader.on_event(): could not find rule by name: ", rule.rule)
end
return rule.rule, rule.priority_num, output, combined_rule.exception_fields
end end
function print_stats() function print_stats()

View File

@@ -14,9 +14,8 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
#include <sstream>
#include "rules.h" #include "rules.h"
#include "logger.h"
extern "C" { extern "C" {
#include "lua.h" #include "lua.h"
@@ -27,14 +26,15 @@ extern "C" {
#include "falco_engine.h" #include "falco_engine.h"
#include "banned.h" // This raises a compilation error when certain functions are used #include "banned.h" // This raises a compilation error when certain functions are used
const static struct luaL_Reg ll_falco_rules[] = const static struct luaL_reg ll_falco_rules [] =
{ {
{"clear_filters", &falco_rules::clear_filters}, {"clear_filters", &falco_rules::clear_filters},
{"add_filter", &falco_rules::add_filter}, {"add_filter", &falco_rules::add_filter},
{"add_k8s_audit_filter", &falco_rules::add_k8s_audit_filter}, {"add_k8s_audit_filter", &falco_rules::add_k8s_audit_filter},
{"enable_rule", &falco_rules::enable_rule}, {"enable_rule", &falco_rules::enable_rule},
{"engine_version", &falco_rules::engine_version}, {"engine_version", &falco_rules::engine_version},
{NULL, NULL}}; {NULL,NULL}
};
falco_rules::falco_rules(sinsp* inspector, falco_rules::falco_rules(sinsp* inspector,
falco_engine *engine, falco_engine *engine,
@@ -219,31 +219,6 @@ int falco_rules::engine_version(lua_State *ls)
return 1; return 1;
} }
static std::list<std::string> get_lua_table_values(lua_State *ls, int idx)
{
std::list<std::string> ret;
if (lua_isnil(ls, idx)) {
return ret;
}
lua_pushnil(ls); /* first key */
while (lua_next(ls, idx-1) != 0) {
// key is at index -2, value is at index
// -1. We want the values.
if (! lua_isstring(ls, -1)) {
std::string err = "Non-string value in table of strings";
throw falco_exception(err);
}
ret.push_back(string(lua_tostring(ls, -1)));
// Remove value, keep key for next iteration
lua_pop(ls, 1);
}
return ret;
}
void falco_rules::load_rules(const string &rules_content, void falco_rules::load_rules(const string &rules_content,
bool verbose, bool all_events, bool verbose, bool all_events,
string &extra, bool replace_container_info, string &extra, bool replace_container_info,
@@ -449,7 +424,7 @@ void falco_rules::load_rules(const string &rules_content,
lua_pushstring(m_ls, extra.c_str()); lua_pushstring(m_ls, extra.c_str());
lua_pushboolean(m_ls, (replace_container_info ? 1 : 0)); lua_pushboolean(m_ls, (replace_container_info ? 1 : 0));
lua_pushnumber(m_ls, min_priority); lua_pushnumber(m_ls, min_priority);
if(lua_pcall(m_ls, 9, 4, 0) != 0) if(lua_pcall(m_ls, 9, 2, 0) != 0)
{ {
const char* lerr = lua_tostring(m_ls, -1); const char* lerr = lua_tostring(m_ls, -1);
@@ -458,49 +433,20 @@ void falco_rules::load_rules(const string &rules_content,
throw falco_exception(err); throw falco_exception(err);
} }
// Returns: // Either returns (true, required_engine_version), or (false, error string)
// Load result: bool bool successful = lua_toboolean(m_ls, -2);
// required engine version: will be nil when load result is false
// array of errors
// array of warnings
bool successful = lua_toboolean(m_ls, -4);
required_engine_version = lua_tonumber(m_ls, -3);
std::list<std::string> errors = get_lua_table_values(m_ls, -2);
std::list<std::string> warnings = get_lua_table_values(m_ls, -1);
// Concatenate errors/warnings if(successful)
std::ostringstream os;
if (errors.size() > 0)
{ {
os << errors.size() << " errors:" << std::endl; required_engine_version = lua_tonumber(m_ls, -1);
for(auto err : errors)
{
os << err << std::endl;
}
} }
else
if (warnings.size() > 0)
{ {
os << warnings.size() << " warnings:" << std::endl; std::string err = lua_tostring(m_ls, -1);
for(auto warn : warnings) throw falco_exception(err);
{
os << warn << std::endl;
}
} }
if(!successful) lua_pop(m_ls, 2);
{
throw falco_exception(os.str());
}
if (verbose && os.str() != "") {
// We don't really have a logging callback
// from the falco engine, but this would be a
// good place to use it.
fprintf(stderr, "When reading rules content: %s", os.str().c_str());
}
lua_pop(m_ls, 4);
} else { } else {
throw falco_exception("No function " + m_lua_load_rules + " found in lua rule module"); throw falco_exception("No function " + m_lua_load_rules + " found in lua rule module");

View File

@@ -1,5 +1,5 @@
# #
# Copyright (C) 2020 The Falco Authors. # Copyright (C) 2019 The Falco Authors.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at # the License. You may obtain a copy of the License at
@@ -11,10 +11,39 @@
# specific language governing permissions and limitations under the License. # specific language governing permissions and limitations under the License.
# #
configure_file(config_falco.h.in config_falco.h) configure_file("${SYSDIG_SOURCE_DIR}/userspace/sysdig/config_sysdig.h.in" config_sysdig.h)
set( if(NOT MINIMAL_BUILD)
FALCO_SOURCES add_custom_command(
OUTPUT
${CMAKE_CURRENT_BINARY_DIR}/version.grpc.pb.cc
${CMAKE_CURRENT_BINARY_DIR}/version.grpc.pb.h
${CMAKE_CURRENT_BINARY_DIR}/version.pb.cc
${CMAKE_CURRENT_BINARY_DIR}/version.pb.h
${CMAKE_CURRENT_BINARY_DIR}/outputs.grpc.pb.cc
${CMAKE_CURRENT_BINARY_DIR}/outputs.grpc.pb.h
${CMAKE_CURRENT_BINARY_DIR}/outputs.pb.cc
${CMAKE_CURRENT_BINARY_DIR}/outputs.pb.h
${CMAKE_CURRENT_BINARY_DIR}/schema.pb.cc
${CMAKE_CURRENT_BINARY_DIR}/schema.pb.h
COMMENT "Generate gRPC API"
# Falco gRPC Version API
DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/version.proto
COMMAND ${PROTOC} -I ${CMAKE_CURRENT_SOURCE_DIR} --cpp_out=. ${CMAKE_CURRENT_SOURCE_DIR}/version.proto
COMMAND ${PROTOC} -I ${CMAKE_CURRENT_SOURCE_DIR} --grpc_out=. --plugin=protoc-gen-grpc=${GRPC_CPP_PLUGIN}
${CMAKE_CURRENT_SOURCE_DIR}/version.proto
# Falco gRPC Outputs API
DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/outputs.proto
COMMAND ${PROTOC} -I ${CMAKE_CURRENT_SOURCE_DIR} --cpp_out=. ${CMAKE_CURRENT_SOURCE_DIR}/outputs.proto
${CMAKE_CURRENT_SOURCE_DIR}/schema.proto
COMMAND ${PROTOC} -I ${CMAKE_CURRENT_SOURCE_DIR} --grpc_out=. --plugin=protoc-gen-grpc=${GRPC_CPP_PLUGIN}
${CMAKE_CURRENT_SOURCE_DIR}/outputs.proto
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})
endif()
if(MINIMAL_BUILD)
add_executable(
falco
configuration.cpp configuration.cpp
logger.cpp logger.cpp
falco_outputs.cpp falco_outputs.cpp
@@ -25,78 +54,84 @@ set(
event_drops.cpp event_drops.cpp
statsfilewriter.cpp statsfilewriter.cpp
falco.cpp falco.cpp
"${SYSDIG_SOURCE_DIR}/userspace/libsinsp/fields_info.cpp" "${SYSDIG_SOURCE_DIR}/userspace/sysdig/fields_info.cpp")
) else()
add_executable(
set( falco
FALCO_INCLUDE_DIRECTORIES configuration.cpp
"${LIBHAWK_INCLUDE_DIRECTORY}" logger.cpp
"${PROJECT_SOURCE_DIR}/userspace/engine" falco_outputs.cpp
"${PROJECT_BINARY_DIR}/userspace/falco" outputs_file.cpp
"${PROJECT_BINARY_DIR}/driver/src"
"${STRING_VIEW_LITE_INCLUDE}"
"${YAMLCPP_INCLUDE_DIR}"
"${CMAKE_CURRENT_BINARY_DIR}"
"${DRAIOS_DEPENDENCIES_DIR}/yaml-${DRAIOS_YAML_VERSION}/target/include"
)
set(
FALCO_DEPENDENCIES
string-view-lite
libyaml
b64
luajit
lpeg
lyaml
)
set(
FALCO_LIBRARIES
falco_engine
libhawk
sinsp
"${LIBYAML_LIB}"
"${YAMLCPP_LIB}"
)
if(USE_BUNDLED_DEPS)
list(APPEND FALCO_DEPENDENCIES yamlcpp)
endif()
if(NOT MINIMAL_BUILD)
list(
APPEND FALCO_SOURCES
outputs_grpc.cpp outputs_grpc.cpp
outputs_http.cpp outputs_http.cpp
outputs_program.cpp
outputs_stdout.cpp
outputs_syslog.cpp
event_drops.cpp
statsfilewriter.cpp
falco.cpp
"${SYSDIG_SOURCE_DIR}/userspace/sysdig/fields_info.cpp"
webserver.cpp webserver.cpp
grpc_context.cpp grpc_context.cpp
grpc_server_impl.cpp grpc_server_impl.cpp
grpc_request_context.cpp grpc_request_context.cpp
grpc_server.cpp grpc_server.cpp
grpc_context.cpp
grpc_server_impl.cpp
${CMAKE_CURRENT_BINARY_DIR}/version.grpc.pb.cc ${CMAKE_CURRENT_BINARY_DIR}/version.grpc.pb.cc
${CMAKE_CURRENT_BINARY_DIR}/version.pb.cc ${CMAKE_CURRENT_BINARY_DIR}/version.pb.cc
${CMAKE_CURRENT_BINARY_DIR}/outputs.grpc.pb.cc ${CMAKE_CURRENT_BINARY_DIR}/outputs.grpc.pb.cc
${CMAKE_CURRENT_BINARY_DIR}/outputs.pb.cc ${CMAKE_CURRENT_BINARY_DIR}/outputs.pb.cc
${CMAKE_CURRENT_BINARY_DIR}/schema.pb.cc ${CMAKE_CURRENT_BINARY_DIR}/schema.pb.cc)
)
list( add_dependencies(falco civetweb)
APPEND FALCO_INCLUDE_DIRECTORIES endif()
"${CIVETWEB_INCLUDE_DIR}"
"${OPENSSL_INCLUDE_DIR}"
"${GRPC_INCLUDE}"
"${GRPCPP_INCLUDE}"
"${PROTOBUF_INCLUDE}"
)
list(APPEND FALCO_DEPENDENCIES civetweb) add_dependencies(falco string-view-lite)
list( if(USE_BUNDLED_DEPS)
APPEND FALCO_LIBRARIES add_dependencies(falco yamlcpp)
endif()
if(MINIMAL_BUILD)
target_include_directories(
falco
PUBLIC
"${SYSDIG_SOURCE_DIR}/userspace/sysdig"
"${PROJECT_SOURCE_DIR}/userspace/engine"
"${PROJECT_BINARY_DIR}/userspace/falco"
"${PROJECT_BINARY_DIR}/driver/src"
"${STRING_VIEW_LITE_INCLUDE}"
"${YAMLCPP_INCLUDE_DIR}"
"${CMAKE_CURRENT_BINARY_DIR}"
"${DRAIOS_DEPENDENCIES_DIR}/yaml-${DRAIOS_YAML_VERSION}/target/include")
target_link_libraries(
falco
falco_engine
sinsp
"${LIBYAML_LIB}"
"${YAMLCPP_LIB}")
else()
target_include_directories(
falco
PUBLIC
"${SYSDIG_SOURCE_DIR}/userspace/sysdig"
"${PROJECT_SOURCE_DIR}/userspace/engine"
"${PROJECT_BINARY_DIR}/userspace/falco"
"${PROJECT_BINARY_DIR}/driver/src"
"${STRING_VIEW_LITE_INCLUDE}"
"${YAMLCPP_INCLUDE_DIR}"
"${CIVETWEB_INCLUDE_DIR}"
"${OPENSSL_INCLUDE_DIR}"
"${GRPC_INCLUDE}"
"${GRPCPP_INCLUDE}"
"${PROTOBUF_INCLUDE}"
"${CMAKE_CURRENT_BINARY_DIR}"
"${DRAIOS_DEPENDENCIES_DIR}/yaml-${DRAIOS_YAML_VERSION}/target/include")
target_link_libraries(
falco
falco_engine
sinsp
"${GPR_LIB}" "${GPR_LIB}"
"${GRPC_LIB}" "${GRPC_LIB}"
"${GRPCPP_LIB}" "${GRPCPP_LIB}"
@@ -105,68 +140,19 @@ if(NOT MINIMAL_BUILD)
"${OPENSSL_LIBRARY_CRYPTO}" "${OPENSSL_LIBRARY_CRYPTO}"
"${LIBYAML_LIB}" "${LIBYAML_LIB}"
"${YAMLCPP_LIB}" "${YAMLCPP_LIB}"
"${CIVETWEB_LIB}" "${CIVETWEB_LIB}")
)
endif() endif()
add_executable( configure_file(config_falco.h.in config_falco.h)
falco
${FALCO_SOURCES}
)
add_dependencies(falco ${FALCO_DEPENDENCIES})
target_link_libraries(
falco
${FALCO_LIBRARIES}
)
target_include_directories(
falco
PUBLIC
${FALCO_INCLUDE_DIRECTORIES}
)
if(NOT MINIMAL_BUILD) if(NOT MINIMAL_BUILD)
# todo(fntlnz): restore this before merge, after the command for compare is refactored # add_custom_command(
# to work with the new way the engine is passed around # TARGET falco
# add_custom_command( # COMMAND bash ${CMAKE_CURRENT_SOURCE_DIR}/verify_engine_fields.sh ${CMAKE_SOURCE_DIR}
# TARGET falco # WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
# COMMAND bash ${CMAKE_CURRENT_SOURCE_DIR}/verify_engine_fields.sh ${CMAKE_SOURCE_DIR} # COMMENT "Comparing engine fields checksum in falco_engine.h to actual fields")
# WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
# COMMENT "Comparing engine fields checksum in falco_engine.h to actual fields"
# )
else() else()
message(STATUS "Skipping engine fields checksum when building the minimal Falco.") MESSAGE(STATUS "Skipping engine fields checksum when building the minimal Falco.")
endif()
if(NOT MINIMAL_BUILD)
add_custom_command(
OUTPUT
${CMAKE_CURRENT_BINARY_DIR}/version.grpc.pb.cc
${CMAKE_CURRENT_BINARY_DIR}/version.grpc.pb.h
${CMAKE_CURRENT_BINARY_DIR}/version.pb.cc
${CMAKE_CURRENT_BINARY_DIR}/version.pb.h
${CMAKE_CURRENT_BINARY_DIR}/outputs.grpc.pb.cc
${CMAKE_CURRENT_BINARY_DIR}/outputs.grpc.pb.h
${CMAKE_CURRENT_BINARY_DIR}/outputs.pb.cc
${CMAKE_CURRENT_BINARY_DIR}/outputs.pb.h
${CMAKE_CURRENT_BINARY_DIR}/schema.pb.cc
${CMAKE_CURRENT_BINARY_DIR}/schema.pb.h
COMMENT "Generate gRPC API"
# Falco gRPC Version API
DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/version.proto
COMMAND ${PROTOC} -I ${CMAKE_CURRENT_SOURCE_DIR} --cpp_out=. ${CMAKE_CURRENT_SOURCE_DIR}/version.proto
COMMAND ${PROTOC} -I ${CMAKE_CURRENT_SOURCE_DIR} --grpc_out=. --plugin=protoc-gen-grpc=${GRPC_CPP_PLUGIN}
${CMAKE_CURRENT_SOURCE_DIR}/version.proto
# Falco gRPC Outputs API
DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/outputs.proto
COMMAND ${PROTOC} -I ${CMAKE_CURRENT_SOURCE_DIR} --cpp_out=. ${CMAKE_CURRENT_SOURCE_DIR}/outputs.proto
${CMAKE_CURRENT_SOURCE_DIR}/schema.proto
COMMAND ${PROTOC} -I ${CMAKE_CURRENT_SOURCE_DIR} --grpc_out=. --plugin=protoc-gen-grpc=${GRPC_CPP_PLUGIN}
${CMAKE_CURRENT_SOURCE_DIR}/outputs.proto
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
)
endif() endif()
# strip the Falco binary when releasing using musl # strip the Falco binary when releasing using musl
@@ -175,8 +161,7 @@ if(MUSL_OPTIMIZED_BUILD AND CMAKE_BUILD_TYPE STREQUAL "release")
TARGET falco TARGET falco
POST_BUILD POST_BUILD
COMMAND ${CMAKE_STRIP} --strip-unneeded falco COMMAND ${CMAKE_STRIP} --strip-unneeded falco
COMMENT "Strip the Falco binary when releasing the musl build" COMMENT "Strip the Falco binary when releasing the musl build")
)
endif() endif()
install(TARGETS falco DESTINATION ${FALCO_BIN_DIR}) install(TARGETS falco DESTINATION ${FALCO_BIN_DIR})

View File

@@ -47,6 +47,16 @@ falco_configuration::~falco_configuration()
} }
} }
// If we don't have a configuration file, we just use stdout output and all other defaults
void falco_configuration::init(list<string> &cmdline_options)
{
init_cmdline_options(cmdline_options);
falco::outputs::config stdout_output;
stdout_output.name = "stdout";
m_outputs.push_back(stdout_output);
}
void falco_configuration::init(string conf_filename, list<string> &cmdline_options) void falco_configuration::init(string conf_filename, list<string> &cmdline_options)
{ {
string m_config_file = conf_filename; string m_config_file = conf_filename;
@@ -58,16 +68,6 @@ void falco_configuration::init(string conf_filename, list<string> &cmdline_optio
m_config->get_sequence<list<string>>(rules_files, string("rules_file")); m_config->get_sequence<list<string>>(rules_files, string("rules_file"));
for(auto &file : rules_files)
{
// Here, we only include files that exist
struct stat buffer;
if(stat(file.c_str(), &buffer) == 0)
{
read_rules_file_directory(file, m_rules_filenames);
}
}
m_json_output = m_config->get_scalar<bool>("json_output", false); m_json_output = m_config->get_scalar<bool>("json_output", false);
m_json_include_output_property = m_config->get_scalar<bool>("json_include_output_property", true); m_json_include_output_property = m_config->get_scalar<bool>("json_include_output_property", true);
@@ -137,11 +137,6 @@ void falco_configuration::init(string conf_filename, list<string> &cmdline_optio
m_outputs.push_back(http_output); m_outputs.push_back(http_output);
} }
// extension related configuration
m_config->get_sequence<list<string>>(m_extensions_filenames , string("extensions"));
m_rules_provider = m_config->get_scalar<string>("rules_provider", "internal");
// gRPC related configuration
m_grpc_enabled = m_config->get_scalar<bool>("grpc", "enabled", false); m_grpc_enabled = m_config->get_scalar<bool>("grpc", "enabled", false);
m_grpc_bind_address = m_config->get_scalar<string>("grpc", "bind_address", "0.0.0.0:5060"); m_grpc_bind_address = m_config->get_scalar<string>("grpc", "bind_address", "0.0.0.0:5060");
m_grpc_threadiness = m_config->get_scalar<uint32_t>("grpc", "threadiness", 0); m_grpc_threadiness = m_config->get_scalar<uint32_t>("grpc", "threadiness", 0);
@@ -171,8 +166,6 @@ void falco_configuration::init(string conf_filename, list<string> &cmdline_optio
falco_logger::set_level(m_log_level); falco_logger::set_level(m_log_level);
m_output_timeout = m_config->get_scalar<uint32_t>("output_timeout", 2000);
m_notifications_rate = m_config->get_scalar<uint32_t>("outputs", "rate", 1); m_notifications_rate = m_config->get_scalar<uint32_t>("outputs", "rate", 1);
m_notifications_max_burst = m_config->get_scalar<uint32_t>("outputs", "max_burst", 1000); m_notifications_max_burst = m_config->get_scalar<uint32_t>("outputs", "max_burst", 1000);
@@ -239,69 +232,6 @@ void falco_configuration::init(string conf_filename, list<string> &cmdline_optio
m_syscall_evt_simulate_drops = m_config->get_scalar<bool>("syscall_event_drops", "simulate_drops", false); m_syscall_evt_simulate_drops = m_config->get_scalar<bool>("syscall_event_drops", "simulate_drops", false);
} }
void falco_configuration::read_rules_file_directory(const string &path, list<string> &rules_filenames)
{
struct stat st;
int rc = stat(path.c_str(), &st);
if(rc != 0)
{
std::cerr << "Could not get info on rules file " << path << ": " << strerror(errno) << std::endl;
exit(-1);
}
if(st.st_mode & S_IFDIR)
{
// It's a directory. Read the contents, sort
// alphabetically, and add every path to
// rules_filenames
vector<string> dir_filenames;
DIR *dir = opendir(path.c_str());
if(!dir)
{
std::cerr << "Could not get read contents of directory " << path << ": " << strerror(errno) << std::endl;
exit(-1);
}
for(struct dirent *ent = readdir(dir); ent; ent = readdir(dir))
{
string efile = path + "/" + ent->d_name;
rc = stat(efile.c_str(), &st);
if(rc != 0)
{
std::cerr << "Could not get info on rules file " << efile << ": " << strerror(errno) << std::endl;
exit(-1);
}
if(st.st_mode & S_IFREG)
{
dir_filenames.push_back(efile);
}
}
closedir(dir);
std::sort(dir_filenames.begin(),
dir_filenames.end());
for(string &ent : dir_filenames)
{
rules_filenames.push_back(ent);
}
}
else
{
// Assume it's a file and just add to
// rules_filenames. If it can't be opened/etc that
// will be reported later..
rules_filenames.push_back(path);
}
}
static bool split(const string &str, char delim, pair<string, string> &parts) static bool split(const string &str, char delim, pair<string, string> &parts)
{ {

View File

@@ -190,9 +190,6 @@ public:
void init(std::string conf_filename, std::list<std::string>& cmdline_options); void init(std::string conf_filename, std::list<std::string>& cmdline_options);
void init(std::list<std::string>& cmdline_options); void init(std::list<std::string>& cmdline_options);
static void read_rules_file_directory(const string& path, list<string>& rules_filenames);
std::list<std::string> m_rules_filenames;
bool m_json_output; bool m_json_output;
bool m_json_include_output_property; bool m_json_include_output_property;
std::string m_log_level; std::string m_log_level;
@@ -204,7 +201,6 @@ public:
bool m_buffered_outputs; bool m_buffered_outputs;
bool m_time_format_iso_8601; bool m_time_format_iso_8601;
uint32_t m_output_timeout;
bool m_grpc_enabled; bool m_grpc_enabled;
uint32_t m_grpc_threadiness; uint32_t m_grpc_threadiness;
@@ -222,9 +218,6 @@ public:
double m_syscall_evt_drop_rate; double m_syscall_evt_drop_rate;
double m_syscall_evt_drop_max_burst; double m_syscall_evt_drop_max_burst;
std::list<std::string> m_extensions_filenames;
std::string m_rules_provider;
// Only used for testing // Only used for testing
bool m_syscall_evt_simulate_drops; bool m_syscall_evt_simulate_drops;

View File

@@ -31,15 +31,15 @@ limitations under the License.
#include <unistd.h> #include <unistd.h>
#include <getopt.h> #include <getopt.h>
#include <condition_variable> #include <condition_variable>
#include <tuple>
#include <sinsp.h> #include <sinsp.h>
#include "logger.h" #include "logger.h"
#include "utils.h" #include "utils.h"
#include "fields_info.h" #include "chisel.h"
#include "sysdig.h"
#include "lifecycle.h"
#include "library.h"
#include "event_drops.h" #include "event_drops.h"
#include "configuration.h" #include "configuration.h"
#include "falco_engine.h" #include "falco_engine.h"
@@ -58,16 +58,9 @@ bool g_reopen_outputs = false;
bool g_restart = false; bool g_restart = false;
bool g_daemonized = false; bool g_daemonized = false;
// g_engine is the current loaded Falco engine std::mutex engine_ready;
std::atomic<falco_engine *> g_engine; std::condition_variable engine_cv;
bool is_engine_ready = false;
// g_engine_transaction is the Falco engine that is
// being modified under a transaction started by a libhawk plugin
// This engine might become the current g_engine if the transaction is committed
std::atomic<falco_engine *> g_engine_transaction;
// g_engine_blueprint is the engine we use as a template to create new engines
falco_engine *g_engine_blueprint;
// //
// Helper functions // Helper functions
@@ -92,101 +85,99 @@ static void restart_falco(int signal)
// //
static void usage() static void usage()
{ {
printf( printf(
"Falco version: " FALCO_VERSION "\n" "Falco version: " FALCO_VERSION "\n"
"Usage: falco [options]\n\n" "Usage: falco [options]\n\n"
"Options:\n" "Options:\n"
" -h, --help Print this page\n" " -h, --help Print this page\n"
" -c Configuration file (default " FALCO_SOURCE_CONF_FILE ", " FALCO_INSTALL_CONF_FILE ")\n" " -c Configuration file (default " FALCO_SOURCE_CONF_FILE ", " FALCO_INSTALL_CONF_FILE ")\n"
" -A Monitor all events, including those with EF_DROP_SIMPLE_CONS flag.\n" " -A Monitor all events, including those with EF_DROP_SIMPLE_CONS flag.\n"
" --alternate-lua-dir <path> Specify an alternate path for loading Falco lua files\n" " --alternate-lua-dir <path> Specify an alternate path for loading Falco lua files\n"
" -b, --print-base64 Print data buffers in base64.\n" " -b, --print-base64 Print data buffers in base64.\n"
" This is useful for encoding binary data that needs to be used over media designed to.\n" " This is useful for encoding binary data that needs to be used over media designed to.\n"
" --cri <path> Path to CRI socket for container metadata.\n" " --cri <path> Path to CRI socket for container metadata.\n"
" Use the specified socket to fetch data from a CRI-compatible runtime.\n" " Use the specified socket to fetch data from a CRI-compatible runtime.\n"
" -d, --daemon Run as a daemon.\n" " -d, --daemon Run as a daemon.\n"
" --disable-cri-async Disable asynchronous CRI metadata fetching.\n" " --disable-cri-async Disable asynchronous CRI metadata fetching.\n"
" This is useful to let the input event wait for the container metadata fetch\n" " This is useful to let the input event wait for the container metadata fetch\n"
" to finish before moving forward. Async fetching, in some environments leads\n" " to finish before moving forward. Async fetching, in some environments leads\n"
" to empty fields for container metadata when the fetch is not fast enough to be\n" " to empty fields for container metadata when the fetch is not fast enough to be\n"
" completed asynchronously. This can have a performance penalty on your environment\n" " completed asynchronously. This can have a performance penalty on your environment\n"
" depending on the number of containers and the frequency at which they are created/started/stopped\n" " depending on the number of containers and the frequency at which they are created/started/stopped\n"
" --disable-source <event_source>\n" " --disable-source <event_source>\n"
" Disable a specific event source.\n" " Disable a specific event source.\n"
" Available event sources are: syscall, k8s_audit.\n" " Available event sources are: syscall, k8s_audit.\n"
" It can be passed multiple times.\n" " It can be passed multiple times.\n"
" Can not disable both the event sources.\n" " Can not disable both the event sources.\n"
" -D <substring> Disable any rules with names having the substring <substring>. Can be specified multiple times.\n" " -D <substring> Disable any rules with names having the substring <substring>. Can be specified multiple times.\n"
" Can not be specified with -t.\n" " Can not be specified with -t.\n"
" -e <events_file> Read the events from <events_file> (in .scap format for sinsp events, or jsonl for\n" " -e <events_file> Read the events from <events_file> (in .scap format for sinsp events, or jsonl for\n"
" k8s audit events) instead of tapping into live.\n" " k8s audit events) instead of tapping into live.\n"
#ifndef MINIMAL_BUILD #ifndef MINIMAL_BUILD
" -k <url>, --k8s-api <url>\n" " -k <url>, --k8s-api <url>\n"
" Enable Kubernetes support by connecting to the API server specified as argument.\n" " Enable Kubernetes support by connecting to the API server specified as argument.\n"
" E.g. \"http://admin:password@127.0.0.1:8080\".\n" " E.g. \"http://admin:password@127.0.0.1:8080\".\n"
" The API server can also be specified via the environment variable FALCO_K8S_API.\n" " The API server can also be specified via the environment variable FALCO_K8S_API.\n"
" -K <bt_file> | <cert_file>:<key_file[#password]>[:<ca_cert_file>], --k8s-api-cert <bt_file> | <cert_file>:<key_file[#password]>[:<ca_cert_file>]\n" " -K <bt_file> | <cert_file>:<key_file[#password]>[:<ca_cert_file>], --k8s-api-cert <bt_file> | <cert_file>:<key_file[#password]>[:<ca_cert_file>]\n"
" Use the provided files names to authenticate user and (optionally) verify the K8S API server identity.\n" " Use the provided files names to authenticate user and (optionally) verify the K8S API server identity.\n"
" Each entry must specify full (absolute, or relative to the current directory) path to the respective file.\n" " Each entry must specify full (absolute, or relative to the current directory) path to the respective file.\n"
" Private key password is optional (needed only if key is password protected).\n" " Private key password is optional (needed only if key is password protected).\n"
" CA certificate is optional. For all files, only PEM file format is supported. \n" " CA certificate is optional. For all files, only PEM file format is supported. \n"
" Specifying CA certificate only is obsoleted - when single entry is provided \n" " Specifying CA certificate only is obsoleted - when single entry is provided \n"
" for this option, it will be interpreted as the name of a file containing bearer token.\n" " for this option, it will be interpreted as the name of a file containing bearer token.\n"
" Note that the format of this command-line option prohibits use of files whose names contain\n" " Note that the format of this command-line option prohibits use of files whose names contain\n"
" ':' or '#' characters in the file name.\n" " ':' or '#' characters in the file name.\n"
#endif #endif
" -L Show the name and description of all rules and exit.\n" " -L Show the name and description of all rules and exit.\n"
" -l <rule> Show the name and description of the rule with name <rule> and exit.\n" " -l <rule> Show the name and description of the rule with name <rule> and exit.\n"
" --list [<source>] List all defined fields. If <source> is provided, only list those fields for\n" " --list [<source>] List all defined fields. If <source> is provided, only list those fields for\n"
" the source <source>. Current values for <source> are \"syscall\", \"k8s_audit\"\n" " the source <source>. Current values for <source> are \"syscall\", \"k8s_audit\"\n"
#ifndef MINIMAL_BUILD #ifndef MINIMAL_BUILD
" -m <url[,marathon_url]>, --mesos-api <url[,marathon_url]>\n" " -m <url[,marathon_url]>, --mesos-api <url[,marathon_url]>\n"
" Enable Mesos support by connecting to the API server\n" " Enable Mesos support by connecting to the API server\n"
" specified as argument. E.g. \"http://admin:password@127.0.0.1:5050\".\n" " specified as argument. E.g. \"http://admin:password@127.0.0.1:5050\".\n"
" Marathon url is optional and defaults to Mesos address, port 8080.\n" " Marathon url is optional and defaults to Mesos address, port 8080.\n"
" The API servers can also be specified via the environment variable FALCO_MESOS_API.\n" " The API servers can also be specified via the environment variable FALCO_MESOS_API.\n"
#endif #endif
" -M <num_seconds> Stop collecting after <num_seconds> reached.\n" " -M <num_seconds> Stop collecting after <num_seconds> reached.\n"
" -N When used with --list, only print field names.\n" " -N When used with --list, only print field names.\n"
" -o, --option <key>=<val> Set the value of option <key> to <val>. Overrides values in configuration file.\n" " -o, --option <key>=<val> Set the value of option <key> to <val>. Overrides values in configuration file.\n"
" <key> can be a two-part <key>.<subkey>\n" " <key> can be a two-part <key>.<subkey>\n"
" -p <output_format>, --print <output_format>\n" " -p <output_format>, --print <output_format>\n"
" Add additional information to each falco notification's output.\n" " Add additional information to each falco notification's output.\n"
" With -pc or -pcontainer will use a container-friendly format.\n" " With -pc or -pcontainer will use a container-friendly format.\n"
" With -pk or -pkubernetes will use a kubernetes-friendly format.\n" " With -pk or -pkubernetes will use a kubernetes-friendly format.\n"
" With -pm or -pmesos will use a mesos-friendly format.\n" " With -pm or -pmesos will use a mesos-friendly format.\n"
" Additionally, specifying -pc/-pk/-pm will change the interpretation\n" " Additionally, specifying -pc/-pk/-pm will change the interpretation\n"
" of %%container.info in rule output fields.\n" " of %%container.info in rule output fields.\n"
" -P, --pidfile <pid_file> When run as a daemon, write pid to specified file\n" " -P, --pidfile <pid_file> When run as a daemon, write pid to specified file\n"
" -r <rules_file> Rules file/directory (defaults to value set in configuration file, or /etc/falco_rules.yaml).\n" " -s <stats_file> If specified, append statistics related to Falco's reading/processing of events\n"
" Can be specified multiple times to read from multiple files/directories.\n" " to this file (only useful in live mode).\n"
" -s <stats_file> If specified, append statistics related to Falco's reading/processing of events\n" " --stats-interval <msec> When using -s <stats_file>, write statistics every <msec> ms.\n"
" to this file (only useful in live mode).\n" " This uses signals, so don't recommend intervals below 200 ms.\n"
" --stats-interval <msec> When using -s <stats_file>, write statistics every <msec> ms.\n" " Defaults to 5000 (5 seconds).\n"
" This uses signals, so don't recommend intervals below 200 ms.\n" " -S <len>, --snaplen <len>\n"
" Defaults to 5000 (5 seconds).\n" " Capture the first <len> bytes of each I/O buffer.\n"
" -S <len>, --snaplen <len>\n" " By default, the first 80 bytes are captured. Use this\n"
" Capture the first <len> bytes of each I/O buffer.\n" " option with caution, it can generate huge trace files.\n"
" By default, the first 80 bytes are captured. Use this\n" " --support Print support information including version, etc. and exit.\n"
" option with caution, it can generate huge trace files.\n" " -T <tag> Disable any rules with a tag=<tag>. Can be specified multiple times.\n"
" --support Print support information including version, rules files used, etc. and exit.\n" " Can not be specified with -t.\n"
" -T <tag> Disable any rules with a tag=<tag>. Can be specified multiple times.\n" " -t <tag> Only run those rules with a tag=<tag>. Can be specified multiple times.\n"
" Can not be specified with -t.\n" " Can not be specified with -T/-D.\n"
" -t <tag> Only run those rules with a tag=<tag>. Can be specified multiple times.\n" " -U,--unbuffered Turn off output buffering to configured outputs.\n"
" Can not be specified with -T/-D.\n" " This causes every single line emitted by falco to be flushed,\n"
" -U,--unbuffered Turn off output buffering to configured outputs.\n" " which generates higher CPU usage but is useful when piping those outputs\n"
" This causes every single line emitted by falco to be flushed,\n" " into another process or into a script.\n"
" which generates higher CPU usage but is useful when piping those outputs\n" " -u, --userspace Parse events from userspace.\n"
" into another process or into a script.\n" " To be used in conjunction with the ptrace(2) based driver (pdig).\n"
" -u, --userspace Parse events from userspace.\n" " -V, --validate <rules_file> Read the contents of the specified rules(s) file and exit.\n"
" To be used in conjunction with the ptrace(2) based driver (pdig).\n" " Can be specified multiple times to validate multiple files.\n"
" -V, --validate <rules_file> Read the contents of the specified rules(s) file and exit.\n" " -v Verbose output.\n"
" Can be specified multiple times to validate multiple files.\n" " --version Print version number.\n"
" -v Verbose output.\n" "\n");
" --version Print version number.\n"
"\n"
);
} }
static void display_fatal_err(const string &msg) static void display_fatal_err(const string &msg)
{ {
falco_logger::log(LOG_ERR, msg); falco_logger::log(LOG_ERR, msg);
@@ -247,7 +238,7 @@ static std::string read_file(std::string filename)
// //
// Event processing loop // Event processing loop
// //
uint64_t do_inspect(falco_outputs *outputs, uint64_t do_inspect(falco_engine **engine, falco_outputs *outputs,
sinsp *inspector, sinsp *inspector,
falco_configuration &config, falco_configuration &config,
syscall_evt_drop_mgr &sdropmgr, syscall_evt_drop_mgr &sdropmgr,
@@ -257,6 +248,7 @@ uint64_t do_inspect(falco_outputs *outputs,
bool all_events, bool all_events,
int &result) int &result)
{ {
uint64_t num_evts = 0; uint64_t num_evts = 0;
int32_t rc; int32_t rc;
sinsp_evt *ev; sinsp_evt *ev;
@@ -280,20 +272,21 @@ uint64_t do_inspect(falco_outputs *outputs,
} }
} }
falco_engine *current_engine = g_engine.exchange(nullptr);
// If we didn't get a set of rules yet from the rules plugin, we load
// an engine with an empty ruleset to let Falco do the processing without blocking
// the driver.
if(current_engine == nullptr)
{ {
current_engine = new falco_engine((const falco_engine)*g_engine_blueprint); // wait for the first engine to be ready
current_engine->load_rules("", false, false); std::unique_lock<std::mutex> lk(engine_ready);
engine_cv.wait(lk, [] { return is_engine_ready; });
} }
//
// Loop through the events
//
std::atomic<falco_engine *> e;
falco_engine *engine_to_use = nullptr;
e.compare_exchange_strong(engine_to_use, *engine);
while(1) while(1)
{ {
rc = inspector->next(&ev); rc = inspector->next(&ev);
writer.handle(); writer.handle();
@@ -355,14 +348,17 @@ uint64_t do_inspect(falco_outputs *outputs,
continue; continue;
} }
auto engine_replacement = g_engine.exchange(nullptr); // As the inspector has no filter at its level, all
if(engine_replacement != nullptr) // events are returned here. Pass them to the Falco
// engine, which will match the event against the set
// of rules. If a match is found, pass the event to
// the outputs.
bool engine_cmp_res = e.compare_exchange_strong(engine_to_use, *engine);
if(engine_cmp_res == false)
{ {
delete current_engine; falco_logger::log(LOG_INFO, "Using new engine with new ruleset\n");
current_engine = engine_replacement;
falco_logger::log(LOG_DEBUG, "falco_engine replacement found and swapped");
} }
unique_ptr<falco_engine::rule_result> res = current_engine->process_sinsp_event(ev); unique_ptr<falco_engine::rule_result> res = e.load()->process_sinsp_event(ev);
if(res) if(res)
{ {
outputs->handle_event(res->evt, res->rule, res->source, res->priority_num, res->format); outputs->handle_event(res->evt, res->rule, res->source, res->priority_num, res->format);
@@ -432,56 +428,22 @@ static void list_source_fields(falco_engine *engine, bool verbose, bool names_on
} }
} }
static void rules_insert_cb(char *rules_content) static void rules_cb(char *rules_content, hawk_engine *engine)
{ {
try falco_engine *engine_replacement = (*reinterpret_cast<falco_engine **>(engine))->clone();
{ engine_replacement->load_rules(rules_content, false, true);
auto engine = g_engine_transaction.load();
if(engine == nullptr)
{
// todo: inform the caller about this error, maybe stderr and return code?
falco_logger::log(LOG_ERR, std::string("can't insert rules, no transaction in progress"));
return;
}
engine->load_rules(rules_content, false, true);
g_engine_transaction.store(engine);
}
catch(const falco_exception &e)
{
// todo: inform the caller about this error, maybe stderr and return code?
falco_logger::log(LOG_WARNING, std::string("rules load failed: ") + e.what());
return;
}
}
static void rules_begin_cb() *engine = std::ref(engine_replacement);
{
if(g_engine_transaction.load() != nullptr)
{
// todo: inform the caller about this error, maybe stderr and return code?
falco_logger::log(LOG_ERR, std::string("a transaction is already in progress"));
return;
}
auto engine_replacement = new falco_engine((const falco_engine)*g_engine_blueprint);
g_engine_transaction.store(engine_replacement);
}
static void rules_commit_cb() // This mutex is only needed for the first synchronization
{ // it can be discarded the second time rules_cb is needed
auto engine = g_engine_transaction.load(); // since the main engine loop is already started.
if(engine == nullptr) if(!is_engine_ready)
{ {
// todo: inform the caller about this error, maybe stderr and return code? std::lock_guard<std::mutex> lk(engine_ready);
falco_logger::log(LOG_ERR, std::string("can't commit rules, no transaction in progress")); is_engine_ready = true;
return; engine_cv.notify_all();
} }
delete g_engine.exchange(g_engine_transaction.load());
g_engine_transaction.store(nullptr);
}
static void rules_rollback_cb()
{
g_engine_transaction.store(nullptr);
} }
// //
@@ -492,6 +454,7 @@ int falco_init(int argc, char **argv)
int result = EXIT_SUCCESS; int result = EXIT_SUCCESS;
sinsp *inspector = NULL; sinsp *inspector = NULL;
sinsp_evt::param_fmt event_buffer_format = sinsp_evt::PF_NORMAL; sinsp_evt::param_fmt event_buffer_format = sinsp_evt::PF_NORMAL;
falco_engine *engine_blueprint;
std::thread watchrules_thread; std::thread watchrules_thread;
falco_outputs *outputs = NULL; falco_outputs *outputs = NULL;
syscall_evt_drop_mgr sdropmgr; syscall_evt_drop_mgr sdropmgr;
@@ -592,7 +555,7 @@ int falco_init(int argc, char **argv)
// Parse the args // Parse the args
// //
while((op = getopt_long(argc, argv, while((op = getopt_long(argc, argv,
"hc:AbdD:e:F:ik:K:Ll:m:M:No:P:p:r:S:s:T:t:UuvV:w:", "hc:AbdD:e:F:ik:K:Ll:m:M:No:P:p:S:s:T:t:UuvV:w:",
long_options, &long_index)) != -1) long_options, &long_index)) != -1)
{ {
switch(op) switch(op)
@@ -686,9 +649,6 @@ int falco_init(int argc, char **argv)
replace_container_info = false; replace_container_info = false;
} }
break; break;
case 'r':
falco_configuration::read_rules_file_directory(string(optarg), rules_filenames);
break;
case 'S': case 'S':
snaplen = atoi(optarg); snaplen = atoi(optarg);
break; break;
@@ -807,16 +767,15 @@ int falco_init(int argc, char **argv)
return EXIT_SUCCESS; return EXIT_SUCCESS;
} }
auto initial_engine = new falco_engine(true, alternate_lua_dir); engine_blueprint = new falco_engine(true, alternate_lua_dir);
initial_engine->set_inspector(inspector); engine_blueprint->set_inspector(inspector);
initial_engine->set_extra(output_format, replace_container_info); engine_blueprint->set_extra(output_format, replace_container_info);
g_engine_blueprint = initial_engine;
if(list_flds) // if(list_flds)
{ // {
// list_source_fields(engine, verbose, names_only, list_flds_source); // list_source_fields(engine, verbose, names_only, list_flds_source);
return EXIT_SUCCESS; // return EXIT_SUCCESS;
} // }
if(disable_sources.size() > 0) if(disable_sources.size() > 0)
{ {
@@ -838,6 +797,8 @@ int falco_init(int argc, char **argv)
} }
} }
outputs = new falco_outputs();
// Some combinations of arguments are not allowed. // Some combinations of arguments are not allowed.
if(daemon && pidfilename == "") if(daemon && pidfilename == "")
{ {
@@ -869,7 +830,7 @@ int falco_init(int argc, char **argv)
} }
else else
{ {
throw std::invalid_argument("You must create a config file at " FALCO_SOURCE_CONF_FILE ", " FALCO_INSTALL_CONF_FILE " or by passing -c\n"); conf_filename = "";
} }
} }
} }
@@ -892,7 +853,7 @@ int falco_init(int argc, char **argv)
// } // }
// catch(falco_exception &e) // catch(falco_exception &e)
// { // {
// printf("%s%s", prefix.c_str(), e.what()); // printf("%s%s\n", prefix.c_str(), e.what());
// throw; // throw;
// } // }
// printf("%sOk\n", prefix.c_str()); // printf("%sOk\n", prefix.c_str());
@@ -913,123 +874,30 @@ int falco_init(int argc, char **argv)
} }
else else
{ {
throw std::runtime_error("Could not find configuration file at " + conf_filename); config.init(cmdline_options);
falco_logger::set_time_format_iso_8601(config.m_time_format_iso_8601);
// log after config init because config determines where logs go
falco_logger::log(LOG_INFO, "Falco version " + std::string(FALCO_VERSION) + " (driver version " + std::string(DRIVER_VERSION) + ")\n");
falco_logger::log(LOG_INFO, "Falco initialized. No configuration file found, proceeding with defaults\n");
} }
for(auto extension : config.m_extensions_filenames) engine_blueprint->set_min_priority(config.m_min_priority);
{
auto lib = new libhawk::library(extension);
lib->load();
}
libhawk::lifecycle::start();
if(rules_filenames.size())
{
config.m_rules_filenames = rules_filenames;
}
g_engine_blueprint->set_min_priority(config.m_min_priority);
if(buffered_cmdline) if(buffered_cmdline)
{ {
config.m_buffered_outputs = buffered_outputs; config.m_buffered_outputs = buffered_outputs;
} }
if(config.m_rules_filenames.size() == 0) hawk_init();
{
// throw std::invalid_argument("You must specify at least one rules file/directory via -r or a rules_file entry in falco.yaml");
}
falco_logger::log(LOG_DEBUG, "Configured rules filenames:\n");
for(auto filename : config.m_rules_filenames)
{
falco_logger::log(LOG_DEBUG, string(" ") + filename + "\n");
}
for(auto filename : config.m_rules_filenames)
{
falco_logger::log(LOG_INFO, "Loading rules from file " + filename + ":\n");
uint64_t required_engine_version;
// engine->load_rules_file(filename, verbose, all_events, required_engine_version);
required_engine_versions[filename] = required_engine_version;
}
// You can't both disable and enable rules
if((disabled_rule_substrings.size() + disabled_rule_tags.size() > 0) &&
enabled_rule_tags.size() > 0)
{
throw std::invalid_argument("You can not specify both disabled (-D/-T) and enabled (-t) rules");
}
for(auto substring : disabled_rule_substrings)
{
falco_logger::log(LOG_INFO, "Disabling rules matching substring: " + substring + "\n");
// engine->enable_rule(substring, false);
}
if(disabled_rule_tags.size() > 0)
{
for(auto tag : disabled_rule_tags)
{
falco_logger::log(LOG_INFO, "Disabling rules with tag: " + tag + "\n");
}
// engine->enable_rule_by_tag(disabled_rule_tags, false);
}
if(enabled_rule_tags.size() > 0)
{
// Since we only want to enable specific
// rules, first disable all rules.
// engine->enable_rule(all_rules, false);
for(auto tag : enabled_rule_tags)
{
falco_logger::log(LOG_INFO, "Enabling rules with tag: " + tag + "\n");
}
// engine->enable_rule_by_tag(enabled_rule_tags, true);
}
watchrules_thread = std::thread([&] { watchrules_thread = std::thread([&] {
libhawk::lifecycle::watch_rules( // todo: pass verbose, and all_events
(hawk_rules_begin_cb)rules_begin_cb, hawk_watch_rules((hawk_watch_rules_cb)rules_cb, reinterpret_cast<hawk_engine *>(&engine_blueprint));
(hawk_rules_insert_cb)rules_insert_cb,
(hawk_rules_commit_cb)rules_commit_cb,
(hawk_rules_rollback_cb)rules_rollback_cb,
config.m_rules_provider);
}); });
falco_logger::log(LOG_INFO, "DOPO\n"); // todo(fntlnz): make this a callback to watch_rules?
// This needs to be done for every load
// if(config.m_rules_filenames.size() == 0) //
// {
// throw std::invalid_argument("You must specify at least one rules file/directory via -r or a rules_file entry in falco.yaml");
// }
// falco_logger::log(LOG_DEBUG, "Configured rules filenames:\n");
// for (auto filename : config.m_rules_filenames)
// {
// falco_logger::log(LOG_DEBUG, string(" ") + filename + "\n");
// }
// for (auto filename : config.m_rules_filenames)
// {
// falco_logger::log(LOG_INFO, "Loading rules from file " + filename + ":\n");
// uint64_t required_engine_version;
// try {
// engine->load_rules_file(filename, verbose, all_events, required_engine_version);
// }
// catch(falco_exception &e)
// {
// std::string prefix = "Could not load rules file " + filename + ": ";
// throw falco_exception(prefix + e.what());
// }
// required_engine_versions[filename] = required_engine_version;
// }
// // You can't both disable and enable rules // // You can't both disable and enable rules
// if((disabled_rule_substrings.size() + disabled_rule_tags.size() > 0) && // if((disabled_rule_substrings.size() + disabled_rule_tags.size() > 0) &&
// enabled_rule_tags.size() > 0) { // enabled_rule_tags.size() > 0) {
@@ -1092,17 +960,7 @@ int falco_init(int argc, char **argv)
support["system_info"]["machine"] = sysinfo.machine; support["system_info"]["machine"] = sysinfo.machine;
support["cmdline"] = cmdline; support["cmdline"] = cmdline;
support["config"] = read_file(conf_filename); support["config"] = read_file(conf_filename);
support["rules_files"] = nlohmann::json::array(); support["rules_source"] = "external"; // todo(fntlnz): do we want to let libhawk pass an identifier and maybe more dump info for this?
for(auto filename : config.m_rules_filenames)
{
nlohmann::json finfo;
finfo["name"] = filename;
nlohmann::json variant;
variant["required_engine_version"] = required_engine_versions[filename];
variant["content"] = read_file(filename);
finfo["variants"].push_back(variant);
support["rules_files"].push_back(finfo);
}
printf("%s\n", support.dump().c_str()); printf("%s\n", support.dump().c_str());
goto exit; goto exit;
} }
@@ -1124,6 +982,13 @@ int falco_init(int argc, char **argv)
hostname = c_hostname; hostname = c_hostname;
} }
outputs->init(config.m_json_output,
config.m_json_include_output_property,
config.m_notifications_rate, config.m_notifications_max_burst,
config.m_buffered_outputs,
config.m_time_format_iso_8601,
hostname);
if(!all_events) if(!all_events)
{ {
inspector->set_drop_event_flags(EF_DROP_SIMPLE_CONS); inspector->set_drop_event_flags(EF_DROP_SIMPLE_CONS);
@@ -1143,6 +1008,11 @@ int falco_init(int argc, char **argv)
inspector->set_hostname_and_port_resolution_mode(false); inspector->set_hostname_and_port_resolution_mode(false);
for(auto output : config.m_outputs)
{
outputs->add_output(output);
}
if(signal(SIGINT, signal_callback) == SIG_ERR) if(signal(SIGINT, signal_callback) == SIG_ERR)
{ {
fprintf(stderr, "An error occurred while setting SIGINT signal handler.\n"); fprintf(stderr, "An error occurred while setting SIGINT signal handler.\n");
@@ -1234,21 +1104,6 @@ int falco_init(int argc, char **argv)
g_daemonized = true; g_daemonized = true;
} }
outputs = new falco_outputs();
outputs->init(config.m_json_output,
config.m_json_include_output_property,
config.m_output_timeout,
config.m_notifications_rate, config.m_notifications_max_burst,
config.m_buffered_outputs,
config.m_time_format_iso_8601,
hostname);
for(auto output : config.m_outputs)
{
outputs->add_output(output);
}
if(trace_filename.size()) if(trace_filename.size())
{ {
// Try to open the trace file as a sysdig // Try to open the trace file as a sysdig
@@ -1432,7 +1287,7 @@ int falco_init(int argc, char **argv)
{ {
// std::string ssl_option = (config.m_webserver_ssl_enabled ? " (SSL)" : ""); // std::string ssl_option = (config.m_webserver_ssl_enabled ? " (SSL)" : "");
// falco_logger::log(LOG_INFO, "Starting internal webserver, listening on port " + to_string(config.m_webserver_listen_port) + ssl_option + "\n"); // falco_logger::log(LOG_INFO, "Starting internal webserver, listening on port " + to_string(config.m_webserver_listen_port) + ssl_option + "\n");
// webserver.init(&config, engine, outputs); // webserver.init(&config, engine_future.get().get(), outputs);
// webserver.start(); // webserver.start();
} }
@@ -1458,7 +1313,7 @@ int falco_init(int argc, char **argv)
if(!trace_filename.empty() && !trace_is_scap) if(!trace_filename.empty() && !trace_is_scap)
{ {
#ifndef MINIMAL_BUILD #ifndef MINIMAL_BUILD
// read_k8s_audit_trace_file(engine, // read_k8s_audit_trace_file(engine.get(),
// outputs, // outputs,
// trace_filename); // trace_filename);
#endif #endif
@@ -1467,7 +1322,7 @@ int falco_init(int argc, char **argv)
{ {
uint64_t num_evts; uint64_t num_evts;
num_evts = do_inspect(outputs, num_evts = do_inspect(&engine_blueprint, outputs,
inspector, inspector,
config, config,
sdropmgr, sdropmgr,
@@ -1505,10 +1360,9 @@ int falco_init(int argc, char **argv)
inspector->close(); inspector->close();
// engine->print_stats(); // engine->print_stats();
sdropmgr.print_stats(); sdropmgr.print_stats();
libhawk::lifecycle::stop();
if(watchrules_thread.joinable()) if(watchrules_thread.joinable())
{ {
hawk_destroy();
watchrules_thread.join(); watchrules_thread.join();
} }
#ifndef MINIMAL_BUILD #ifndef MINIMAL_BUILD
@@ -1523,13 +1377,14 @@ int falco_init(int argc, char **argv)
catch(exception &e) catch(exception &e)
{ {
display_fatal_err("Runtime error: " + string(e.what()) + ". Exiting.\n"); display_fatal_err("Runtime error: " + string(e.what()) + ". Exiting.\n");
libhawk::lifecycle::stop();
if(watchrules_thread.joinable())
{
watchrules_thread.join();
}
result = EXIT_FAILURE; result = EXIT_FAILURE;
if(watchrules_thread.joinable())
{
hawk_destroy();
watchrules_thread.join();
}
#ifndef MINIMAL_BUILD #ifndef MINIMAL_BUILD
webserver.stop(); webserver.stop();
if(grpc_server_thread.joinable()) if(grpc_server_thread.joinable())
@@ -1543,7 +1398,9 @@ int falco_init(int argc, char **argv)
exit: exit:
delete inspector; delete inspector;
delete engine_blueprint;
delete outputs; delete outputs;
return result; return result;
} }

View File

@@ -24,7 +24,6 @@ limitations under the License.
#include "formats.h" #include "formats.h"
#include "logger.h" #include "logger.h"
#include "watchdog.h"
#include "outputs_file.h" #include "outputs_file.h"
#include "outputs_program.h" #include "outputs_program.h"
@@ -52,26 +51,18 @@ falco_outputs::~falco_outputs()
{ {
if(m_initialized) if(m_initialized)
{ {
this->stop_worker(); for(auto it = m_outputs.cbegin(); it != m_outputs.cend(); ++it)
for(auto o : m_outputs)
{ {
delete o; (*it)->cleanup();
} }
} }
} }
void falco_outputs::init(bool json_output, void falco_outputs::init(bool json_output,
bool json_include_output_property, bool json_include_output_property,
uint32_t timeout, uint32_t rate, uint32_t max_burst, bool buffered,
uint32_t rate, uint32_t max_burst, bool buffered, bool time_format_iso_8601, string hostname)
bool time_format_iso_8601, std::string hostname)
{ {
// Cannot be initialized more than one time.
if(m_initialized)
{
throw falco_exception("falco_outputs already initialized");
}
m_json_output = json_output; m_json_output = json_output;
// Note that falco_formats is already initialized by the engine, // Note that falco_formats is already initialized by the engine,
@@ -80,29 +71,17 @@ void falco_outputs::init(bool json_output,
falco_formats::s_json_output = json_output; falco_formats::s_json_output = json_output;
falco_formats::s_json_include_output_property = json_include_output_property; falco_formats::s_json_include_output_property = json_include_output_property;
m_timeout = std::chrono::milliseconds(timeout);
m_notifications_tb.init(rate, max_burst); m_notifications_tb.init(rate, max_burst);
m_buffered = buffered; m_buffered = buffered;
m_time_format_iso_8601 = time_format_iso_8601; m_time_format_iso_8601 = time_format_iso_8601;
m_hostname = hostname; m_hostname = hostname;
m_worker_thread = std::thread(&falco_outputs::worker, this);
m_initialized = true; m_initialized = true;
} }
// This function has to be called after init() since some configuration settings
// need to be passed to the output plugins. Then, although the worker has started,
// the worker is still on hold, waiting for a message.
// Thus it is still safe to call add_output() before any message has been enqueued.
void falco_outputs::add_output(falco::outputs::config oc) void falco_outputs::add_output(falco::outputs::config oc)
{ {
if(!m_initialized)
{
throw falco_exception("cannot add output: falco_outputs not initialized yet");
}
falco::outputs::abstract_output *oo; falco::outputs::abstract_output *oo;
@@ -150,12 +129,6 @@ void falco_outputs::handle_event(gen_event *evt, string &rule, string &source,
return; return;
} }
falco_outputs::ctrl_msg cmsg = {};
cmsg.ts = evt->get_ts();
cmsg.priority = priority;
cmsg.source = source;
cmsg.rule = rule;
string sformat; string sformat;
if(source == "syscall") if(source == "syscall")
{ {
@@ -190,38 +163,35 @@ void falco_outputs::handle_event(gen_event *evt, string &rule, string &source,
sformat += " " + format; sformat += " " + format;
} }
cmsg.msg = falco_formats::format_event(evt, rule, source, falco_common::priority_names[priority], sformat); string msg;
cmsg.fields = falco_formats::resolve_tokens(evt, source, sformat); msg = falco_formats::format_event(evt, rule, source, falco_common::priority_names[priority], sformat);
cmsg.type = ctrl_msg_type::CTRL_MSG_OUTPUT; for(auto it = m_outputs.cbegin(); it != m_outputs.cend(); ++it)
m_queue.push(cmsg); {
(*it)->output_event(evt, rule, source, priority, sformat, msg);
}
} }
void falco_outputs::handle_msg(uint64_t ts, void falco_outputs::handle_msg(uint64_t now,
falco_common::priority_type priority, falco_common::priority_type priority,
std::string &msg, std::string &msg,
std::string &rule, std::string &rule,
std::map<std::string, std::string> &output_fields) std::map<std::string, std::string> &output_fields)
{ {
falco_outputs::ctrl_msg cmsg = {}; std::string full_msg;
cmsg.ts = ts;
cmsg.priority = priority;
cmsg.source = "internal";
cmsg.rule = rule;
cmsg.fields = output_fields;
if(m_json_output) if(m_json_output)
{ {
nlohmann::json jmsg; nlohmann::json jmsg;
// Convert the time-as-nanoseconds to a more json-friendly ISO8601. // Convert the time-as-nanoseconds to a more json-friendly ISO8601.
time_t evttime = ts / 1000000000; time_t evttime = now / 1000000000;
char time_sec[20]; // sizeof "YYYY-MM-DDTHH:MM:SS" char time_sec[20]; // sizeof "YYYY-MM-DDTHH:MM:SS"
char time_ns[12]; // sizeof ".sssssssssZ" char time_ns[12]; // sizeof ".sssssssssZ"
string iso8601evttime; string iso8601evttime;
strftime(time_sec, sizeof(time_sec), "%FT%T", gmtime(&evttime)); strftime(time_sec, sizeof(time_sec), "%FT%T", gmtime(&evttime));
snprintf(time_ns, sizeof(time_ns), ".%09luZ", ts % 1000000000); snprintf(time_ns, sizeof(time_ns), ".%09luZ", now % 1000000000);
iso8601evttime = time_sec; iso8601evttime = time_sec;
iso8601evttime += time_ns; iso8601evttime += time_ns;
@@ -231,15 +201,15 @@ void falco_outputs::handle_msg(uint64_t ts,
jmsg["time"] = iso8601evttime; jmsg["time"] = iso8601evttime;
jmsg["output_fields"] = output_fields; jmsg["output_fields"] = output_fields;
cmsg.msg = jmsg.dump(); full_msg = jmsg.dump();
} }
else else
{ {
std::string timestr; std::string timestr;
bool first = true; bool first = true;
sinsp_utils::ts_to_string(ts, &timestr, false, true); sinsp_utils::ts_to_string(now, &timestr, false, true);
cmsg.msg = timestr + ": " + falco_common::priority_names[priority] + " " + msg + " ("; full_msg = timestr + ": " + falco_common::priority_names[priority] + " " + msg + " (";
for(auto &pair : output_fields) for(auto &pair : output_fields)
{ {
if(first) if(first)
@@ -248,95 +218,23 @@ void falco_outputs::handle_msg(uint64_t ts,
} }
else else
{ {
cmsg.msg += " "; full_msg += " ";
} }
cmsg.msg += pair.first + "=" + pair.second; full_msg += pair.first + "=" + pair.second;
} }
cmsg.msg += ")"; full_msg += ")";
} }
cmsg.type = ctrl_msg_type::CTRL_MSG_OUTPUT; for(auto it = m_outputs.cbegin(); it != m_outputs.cend(); ++it)
m_queue.push(cmsg); {
} (*it)->output_msg(priority, full_msg);
}
void falco_outputs::cleanup_outputs()
{
this->push(falco_outputs::ctrl_msg_type::CTRL_MSG_CLEANUP);
} }
void falco_outputs::reopen_outputs() void falco_outputs::reopen_outputs()
{ {
this->push(falco_outputs::ctrl_msg_type::CTRL_MSG_REOPEN); for(auto it = m_outputs.cbegin(); it != m_outputs.cend(); ++it)
}
void falco_outputs::stop_worker()
{
watchdog<void *> wd;
wd.start([&](void *) -> void {
falco_logger::log(LOG_NOTICE, "output channels still blocked, discarding all remaining notifications\n");
m_queue.clear();
this->push(falco_outputs::ctrl_msg_type::CTRL_MSG_STOP);
});
wd.set_timeout(m_timeout, nullptr);
this->push(falco_outputs::ctrl_msg_type::CTRL_MSG_STOP);
if(m_worker_thread.joinable())
{ {
m_worker_thread.join(); (*it)->reopen();
} }
} }
inline void falco_outputs::push(ctrl_msg_type cmt)
{
falco_outputs::ctrl_msg cmsg = {};
cmsg.type = cmt;
m_queue.push(cmsg);
}
// todo(leogr,leodido): this function is not supposed to throw exceptions, and with "noexcept",
// the program is terminated if that occurs. Although that's the wanted behavior,
// we still need to improve the error reporting since some inner functions can throw exceptions.
void falco_outputs::worker() noexcept
{
watchdog<std::string> wd;
wd.start([&](std::string payload) -> void {
falco_logger::log(LOG_CRIT, "\"" + payload + "\" output timeout, all output channels are blocked\n");
});
auto timeout = m_timeout;
falco_outputs::ctrl_msg cmsg;
do
{
// Block until a message becomes available.
m_queue.pop(cmsg);
for(const auto o : m_outputs)
{
wd.set_timeout(timeout, o->get_name());
try
{
switch(cmsg.type)
{
case ctrl_msg_type::CTRL_MSG_OUTPUT:
o->output(&cmsg);
break;
case ctrl_msg_type::CTRL_MSG_CLEANUP:
case ctrl_msg_type::CTRL_MSG_STOP:
o->cleanup();
break;
case ctrl_msg_type::CTRL_MSG_REOPEN:
o->reopen();
break;
default:
falco_logger::log(LOG_DEBUG, "Outputs worker received an unknown message type\n");
}
}
catch(const exception &e)
{
falco_logger::log(LOG_ERR, o->get_name() + ": " + string(e.what()) + "\n");
}
}
wd.cancel_timeout();
} while(cmsg.type != ctrl_msg_type::CTRL_MSG_STOP);
}

View File

@@ -25,7 +25,6 @@ limitations under the License.
#include "token_bucket.h" #include "token_bucket.h"
#include "falco_engine.h" #include "falco_engine.h"
#include "outputs.h" #include "outputs.h"
#include "tbb/concurrent_queue.h"
// //
// This class acts as the primary interface between a program and the // This class acts as the primary interface between a program and the
@@ -40,25 +39,25 @@ public:
void init(bool json_output, void init(bool json_output,
bool json_include_output_property, bool json_include_output_property,
uint32_t timeout,
uint32_t rate, uint32_t max_burst, bool buffered, uint32_t rate, uint32_t max_burst, bool buffered,
bool time_format_iso_8601, std::string hostname); bool time_format_iso_8601, std::string hostname);
void add_output(falco::outputs::config oc); void add_output(falco::outputs::config oc);
// Format then send the event to all configured outputs (`evt` is an event that has matched some rule). //
// evt is an event that has matched some rule. Pass the event
// to all configured outputs.
//
void handle_event(gen_event *evt, std::string &rule, std::string &source, void handle_event(gen_event *evt, std::string &rule, std::string &source,
falco_common::priority_type priority, std::string &format); falco_common::priority_type priority, std::string &format);
// Format then send a generic message to all outputs. Not necessarily associated with any event. // Send a generic message to all outputs. Not necessarily associated with any event.
void handle_msg(uint64_t now, void handle_msg(uint64_t now,
falco_common::priority_type priority, falco_common::priority_type priority,
std::string &msg, std::string &msg,
std::string &rule, std::string &rule,
std::map<std::string, std::string> &output_fields); std::map<std::string, std::string> &output_fields);
void cleanup_outputs();
void reopen_outputs(); void reopen_outputs();
private: private:
@@ -72,28 +71,5 @@ private:
bool m_buffered; bool m_buffered;
bool m_json_output; bool m_json_output;
bool m_time_format_iso_8601; bool m_time_format_iso_8601;
std::chrono::milliseconds m_timeout;
std::string m_hostname; std::string m_hostname;
enum ctrl_msg_type
{
CTRL_MSG_STOP = 0,
CTRL_MSG_OUTPUT = 1,
CTRL_MSG_CLEANUP = 2,
CTRL_MSG_REOPEN = 3,
};
struct ctrl_msg : falco::outputs::message
{
ctrl_msg_type type;
};
typedef tbb::concurrent_bounded_queue<ctrl_msg> falco_outputs_cbq;
falco_outputs_cbq m_queue;
std::thread m_worker_thread;
inline void push(ctrl_msg_type cmt);
void worker() noexcept;
void stop_worker();
}; };

View File

@@ -15,7 +15,6 @@ limitations under the License.
*/ */
#include "config_falco.h" #include "config_falco.h"
#include "falco_engine_version.h"
#include "grpc_server_impl.h" #include "grpc_server_impl.h"
#include "grpc_queue.h" #include "grpc_queue.h"
#include "logger.h" #include "logger.h"
@@ -76,9 +75,6 @@ void falco::grpc::server_impl::version(const context& ctx, const version::reques
auto& version = *res.mutable_version(); auto& version = *res.mutable_version();
version = FALCO_VERSION; version = FALCO_VERSION;
res.set_engine_version(FALCO_ENGINE_VERSION);
res.set_engine_fields_checksum(FALCO_FIELDS_CHECKSUM);
res.set_major(FALCO_VERSION_MAJOR); res.set_major(FALCO_VERSION_MAJOR);
res.set_minor(FALCO_VERSION_MINOR); res.set_minor(FALCO_VERSION_MINOR);
res.set_patch(FALCO_VERSION_PATCH); res.set_patch(FALCO_VERSION_PATCH);

View File

@@ -37,21 +37,6 @@ struct config
std::map<std::string, std::string> options; std::map<std::string, std::string> options;
}; };
//
// The message to be outputted. It can either refer to:
// - an event that has matched some rule,
// - or a generic message (e.g., a drop alert).
//
struct message
{
uint64_t ts;
falco_common::priority_type priority;
std::string msg;
std::string rule;
std::string source;
map<std::string, std::string> fields;
};
// //
// This class acts as the primary interface for implementing // This class acts as the primary interface for implementing
// a Falco output class. // a Falco output class.
@@ -60,8 +45,6 @@ struct message
class abstract_output class abstract_output
{ {
public: public:
virtual ~abstract_output() {}
void init(config oc, bool buffered, std::string hostname) void init(config oc, bool buffered, std::string hostname)
{ {
m_oc = oc; m_oc = oc;
@@ -69,19 +52,15 @@ public:
m_hostname = hostname; m_hostname = hostname;
} }
// Return the output's name as per its configuration. // Output an event that has matched some rule.
const std::string get_name() const virtual void output_event(gen_event *evt, std::string &rule, std::string &source,
{ falco_common::priority_type priority, std::string &format, std::string &msg) = 0;
return m_oc.name;
}
// Output a message. // Output a generic message. Not necessarily associated with any event.
virtual void output(const message *msg) = 0; virtual void output_msg(falco_common::priority_type priority, std::string &msg) = 0;
// Possibly close the output and open it again.
virtual void reopen() {} virtual void reopen() {}
// Possibly flush the output.
virtual void cleanup() {} virtual void cleanup() {}
protected: protected:

View File

@@ -31,10 +31,16 @@ void falco::outputs::output_file::open_file()
} }
} }
void falco::outputs::output_file::output(const message *msg) void falco::outputs::output_file::output_event(gen_event *evt, std::string &rule, std::string &source,
falco_common::priority_type priority, std::string &format, std::string &msg)
{
output_msg(priority, msg);
}
void falco::outputs::output_file::output_msg(falco_common::priority_type priority, std::string &msg)
{ {
open_file(); open_file();
m_outfile << msg->msg + "\n"; m_outfile << msg + "\n";
if(m_oc.options["keep_alive"] != "true") if(m_oc.options["keep_alive"] != "true")
{ {

View File

@@ -27,7 +27,10 @@ namespace outputs
class output_file : public abstract_output class output_file : public abstract_output
{ {
void output(const message *msg); void output_event(gen_event *evt, std::string &rule, std::string &source,
falco_common::priority_type priority, std::string &format, std::string &msg);
void output_msg(falco_common::priority_type priority, std::string &msg);
void cleanup(); void cleanup();

View File

@@ -21,41 +21,44 @@ limitations under the License.
#include "formats.h" #include "formats.h"
#include "banned.h" // This raises a compilation error when certain functions are used #include "banned.h" // This raises a compilation error when certain functions are used
void falco::outputs::output_grpc::output(const message *msg) void falco::outputs::output_grpc::output_event(gen_event *evt, std::string &rule, std::string &source,
falco_common::priority_type priority, std::string &format,
std::string &msg)
{ {
falco::outputs::response grpc_res; falco::outputs::response grpc_res;
// time // time
auto timestamp = grpc_res.mutable_time(); auto timestamp = grpc_res.mutable_time();
*timestamp = google::protobuf::util::TimeUtil::NanosecondsToTimestamp(msg->ts); *timestamp = google::protobuf::util::TimeUtil::NanosecondsToTimestamp(evt->get_ts());
// rule // rule
auto r = grpc_res.mutable_rule(); auto r = grpc_res.mutable_rule();
*r = msg->rule; *r = rule;
// source // source
falco::schema::source s = falco::schema::source::SYSCALL; falco::schema::source s = falco::schema::source::SYSCALL;
if(!falco::schema::source_Parse(msg->source, &s)) if(!falco::schema::source_Parse(source, &s))
{ {
throw falco_exception("Unknown source passed to output_grpc::output()"); throw falco_exception("Unknown source passed to output_grpc::output_event()");
} }
grpc_res.set_source(s); grpc_res.set_source(s);
// priority // priority
falco::schema::priority p = falco::schema::priority::EMERGENCY; falco::schema::priority p = falco::schema::priority::EMERGENCY;
if(!falco::schema::priority_Parse(falco_common::priority_names[msg->priority], &p)) if(!falco::schema::priority_Parse(falco_common::priority_names[priority], &p))
{ {
throw falco_exception("Unknown priority passed to output_grpc::output()"); throw falco_exception("Unknown priority passed to output_grpc::output_event()");
} }
grpc_res.set_priority(p); grpc_res.set_priority(p);
// output // output
auto output = grpc_res.mutable_output(); auto output = grpc_res.mutable_output();
*output = msg->msg; *output = msg;
// output fields // output fields
auto &fields = *grpc_res.mutable_output_fields(); auto &fields = *grpc_res.mutable_output_fields();
for(const auto &kv : msg->fields) auto resolvedTkns = falco_formats::resolve_tokens(evt, source, format);
for(const auto &kv : resolvedTkns)
{ {
fields[kv.first] = kv.second; fields[kv.first] = kv.second;
} }
@@ -65,4 +68,9 @@ void falco::outputs::output_grpc::output(const message *msg)
*host = m_hostname; *host = m_hostname;
falco::grpc::queue::get().push(grpc_res); falco::grpc::queue::get().push(grpc_res);
}
void falco::outputs::output_grpc::output_msg(falco_common::priority_type priority, std::string &msg)
{
// todo(fntlnz, leodido, leogr) > gRPC does not support subscribing to dropped events yet
} }

View File

@@ -25,7 +25,10 @@ namespace outputs
class output_grpc : public abstract_output class output_grpc : public abstract_output
{ {
void output(const message *msg); void output_event(gen_event *evt, std::string &rule, std::string &source,
falco_common::priority_type priority, std::string &format, std::string &msg);
void output_msg(falco_common::priority_type priority, std::string &msg);
}; };
} // namespace outputs } // namespace outputs

View File

@@ -18,7 +18,13 @@ limitations under the License.
#include "logger.h" #include "logger.h"
#include "banned.h" // This raises a compilation error when certain functions are used #include "banned.h" // This raises a compilation error when certain functions are used
void falco::outputs::output_http::output(const message *msg) void falco::outputs::output_http::output_event(gen_event *evt, std::string &rule, std::string &source,
falco_common::priority_type priority, std::string &format, std::string &msg)
{
output_msg(priority, msg);
}
void falco::outputs::output_http::output_msg(falco_common::priority_type priority, std::string &msg)
{ {
CURL *curl = NULL; CURL *curl = NULL;
CURLcode res = CURLE_FAILED_INIT; CURLcode res = CURLE_FAILED_INIT;
@@ -31,7 +37,7 @@ void falco::outputs::output_http::output(const message *msg)
slist1 = curl_slist_append(slist1, "Content-Type: application/json"); slist1 = curl_slist_append(slist1, "Content-Type: application/json");
curl_easy_setopt(curl, CURLOPT_HTTPHEADER, slist1); curl_easy_setopt(curl, CURLOPT_HTTPHEADER, slist1);
curl_easy_setopt(curl, CURLOPT_URL, m_oc.options["url"].c_str()); curl_easy_setopt(curl, CURLOPT_URL, m_oc.options["url"].c_str());
curl_easy_setopt(curl, CURLOPT_POSTFIELDS, msg->msg.c_str()); curl_easy_setopt(curl, CURLOPT_POSTFIELDS, msg.c_str());
curl_easy_setopt(curl, CURLOPT_POSTFIELDSIZE, -1L); curl_easy_setopt(curl, CURLOPT_POSTFIELDSIZE, -1L);
res = curl_easy_perform(curl); res = curl_easy_perform(curl);

View File

@@ -25,7 +25,10 @@ namespace outputs
class output_http : public abstract_output class output_http : public abstract_output
{ {
void output(const message *msg); void output_event(gen_event *evt, std::string &rule, std::string &source,
falco_common::priority_type priority, std::string &format, std::string &msg);
void output_msg(falco_common::priority_type priority, std::string &msg);
}; };
} // namespace outputs } // namespace outputs

View File

@@ -31,11 +31,17 @@ void falco::outputs::output_program::open_pfile()
} }
} }
void falco::outputs::output_program::output(const message *msg) void falco::outputs::output_program::output_event(gen_event *evt, std::string &rule, std::string &source,
falco_common::priority_type priority, std::string &format, std::string &msg)
{
output_msg(priority, msg);
}
void falco::outputs::output_program::output_msg(falco_common::priority_type priority, std::string &msg)
{ {
open_pfile(); open_pfile();
fprintf(m_pfile, "%s\n", msg->msg.c_str()); fprintf(m_pfile, "%s\n", msg.c_str());
if(m_oc.options["keep_alive"] != "true") if(m_oc.options["keep_alive"] != "true")
{ {

View File

@@ -25,7 +25,10 @@ namespace outputs
class output_program : public abstract_output class output_program : public abstract_output
{ {
void output(const message *msg); void output_event(gen_event *evt, std::string &rule, std::string &source,
falco_common::priority_type priority, std::string &format, std::string &msg);
void output_msg(falco_common::priority_type priority, std::string &msg);
void cleanup(); void cleanup();

View File

@@ -18,10 +18,16 @@ limitations under the License.
#include <iostream> #include <iostream>
#include "banned.h" // This raises a compilation error when certain functions are used #include "banned.h" // This raises a compilation error when certain functions are used
void falco::outputs::output_stdout::output(const message *msg) void falco::outputs::output_stdout::output_event(gen_event *evt, std::string &rule, std::string &source,
falco_common::priority_type priority, std::string &format, std::string &msg)
{
output_msg(priority, msg);
}
void falco::outputs::output_stdout::output_msg(falco_common::priority_type priority, std::string &msg)
{ {
// //
// By default, the stdout stream is fully buffered or line buffered // By default, the stdout stream is fully buffered or line buffered
// (if the stream can be determined to refer to an interactive device, e.g. in a TTY). // (if the stream can be determined to refer to an interactive device, e.g. in a TTY).
// Just enable automatic flushing when unbuffered output is desired. // Just enable automatic flushing when unbuffered output is desired.
// Note that it is set every time since other writings to the stdout can disable it. // Note that it is set every time since other writings to the stdout can disable it.
@@ -30,7 +36,7 @@ void falco::outputs::output_stdout::output(const message *msg)
{ {
std::cout << std::unitbuf; std::cout << std::unitbuf;
} }
std::cout << msg->msg + "\n"; std::cout << msg + "\n";
} }
void falco::outputs::output_stdout::cleanup() void falco::outputs::output_stdout::cleanup()

View File

@@ -25,7 +25,10 @@ namespace outputs
class output_stdout : public abstract_output class output_stdout : public abstract_output
{ {
void output(const message *msg); void output_event(gen_event *evt, std::string &rule, std::string &source,
falco_common::priority_type priority, std::string &format, std::string &msg);
void output_msg(falco_common::priority_type priority, std::string &msg);
void cleanup(); void cleanup();
}; };

View File

@@ -18,8 +18,14 @@ limitations under the License.
#include <syslog.h> #include <syslog.h>
#include "banned.h" // This raises a compilation error when certain functions are used #include "banned.h" // This raises a compilation error when certain functions are used
void falco::outputs::output_syslog::output(const message *msg) void falco::outputs::output_syslog::output_event(gen_event *evt, std::string &rule, std::string &source,
falco_common::priority_type priority, std::string &format, std::string &msg)
{
output_msg(priority, msg);
}
void falco::outputs::output_syslog::output_msg(falco_common::priority_type priority, std::string &msg)
{ {
// Syslog output should not have any trailing newline // Syslog output should not have any trailing newline
::syslog(msg->priority, "%s", msg->msg.c_str()); ::syslog(priority, "%s", msg.c_str());
} }

View File

@@ -25,7 +25,10 @@ namespace outputs
class output_syslog : public abstract_output class output_syslog : public abstract_output
{ {
void output(const message *msg); void output_event(gen_event *evt, std::string &rule, std::string &source,
falco_common::priority_type priority, std::string &format, std::string &msg);
void output_msg(falco_common::priority_type priority, std::string &msg);
}; };
} // namespace outputs } // namespace outputs

View File

@@ -57,7 +57,4 @@ enum source {
k8s_audit = 1; k8s_audit = 1;
K8s_audit = 1; K8s_audit = 1;
K8S_audit = 1; K8S_audit = 1;
INTERNAL = 2;
internal = 2;
Internal = 2;
} }

View File

@@ -36,14 +36,10 @@ message request
// its parts as per semver 2.0 specification (https://semver.org). // its parts as per semver 2.0 specification (https://semver.org).
message response message response
{ {
// falco version
string version = 1; string version = 1;
uint32 major = 2; uint32 major = 2;
uint32 minor = 3; uint32 minor = 3;
uint32 patch = 4; uint32 patch = 4;
string prerelease = 5; string prerelease = 5;
string build = 6; string build = 6;
// falco engine version }
uint32 engine_version = 7;
string engine_fields_checksum = 8;
}

View File

@@ -1,96 +0,0 @@
/*
Copyright (C) 2020 The Falco Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include <chrono>
#include <thread>
#include <functional>
#include <atomic>
template<typename _T>
class watchdog
{
public:
watchdog():
m_timeout(nullptr),
m_is_running(false)
{
}
~watchdog()
{
stop();
}
void start(std::function<void(_T)> cb,
std::chrono::milliseconds resolution = std::chrono::milliseconds(100))
{
stop();
m_is_running.store(true, std::memory_order_release);
m_thread = std::thread([this, cb, resolution]() {
const auto no_deadline = time_point{};
timeout_data curr;
while(m_is_running.load(std::memory_order_acquire))
{
auto t = m_timeout.exchange(nullptr, std::memory_order_release);
if(t)
{
curr = *t;
delete t;
}
if(curr.deadline != no_deadline && curr.deadline < std::chrono::steady_clock::now())
{
cb(curr.payload);
curr.deadline = no_deadline;
}
std::this_thread::sleep_for(resolution);
}
});
}
void stop()
{
if(m_is_running.load(std::memory_order_acquire))
{
m_is_running.store(false, std::memory_order_release);
if(m_thread.joinable())
{
m_thread.join();
}
delete m_timeout.exchange(nullptr, std::memory_order_release);
}
}
inline void set_timeout(std::chrono::milliseconds timeout, _T payload) noexcept
{
delete m_timeout.exchange(new timeout_data{std::chrono::steady_clock::now() + timeout, payload}, std::memory_order_release);
}
inline void cancel_timeout() noexcept
{
delete m_timeout.exchange(new timeout_data, std::memory_order_release);
}
private:
typedef std::chrono::time_point<std::chrono::steady_clock> time_point;
struct timeout_data
{
time_point deadline;
_T payload;
};
std::atomic<timeout_data *> m_timeout;
std::atomic<bool> m_is_running;
std::thread m_thread;
};

View File

@@ -1,37 +0,0 @@
#
# Copyright (C) 2020 The Falco Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
include(CheckSymbolExists)
set(
LIBHAWK_SOURCES
lifecycle.cpp
library.cpp
)
set(
LIBHAWK_PUBLIC_INCLUDES
hawk.h
)
set(LIBHAWK_INCLUDE_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} PARENT_SCOPE)
add_library(libhawk STATIC ${LIBHAWK_SOURCES})
target_link_options(libhawk PUBLIC "LINKER:--export-dynamic-symbol=plugin_registry")
#todo: we want to provide a default version of the libhawk plugin functions
# we need to manage the situation where the user only provides parts of it and not others
install(
FILES ${LIBHAWK_PUBLIC_INCLUDES}
${PROJECT_BINARY_DIR}/userspace/libhawk/libhawk_export.h
DESTINATION "${FALCO_SHARE_DIR}"
)

View File

@@ -1,143 +0,0 @@
# Libhawk
Libhawk is a plugin system that can be used to enrich Falco
functionalities via external, user-defined libraries.
## Glossary:
- library: a bundle (e.g: an ELF shared library) containing one or more plugins
- plugin: an hawk plugin. Libraries can register one or more plugins using the `HAWK_REGISTER_PLUGIN` macro
- plugin function: a specific function inside the plugin definition of each plugin. `hawk_init`, `hawk_destroy`
- extension: it's the user facing term to define a library that contains one or more plugin.
## Plugin definitions and lifecycle
Plugins are all loaded when Falco starts.
Falco provides a default plugin for the main functionalities.
### hawk_init
On start, the `hawk_init` function of every plugin is called.
You can use that function to create any resource you might need
for your plugin's lifecycle.
### hawk_destroy
When Falco is stopped, the `hawk_destroy` function gets called.
Implementors have the last chance to free any resources here.
### hawk_watch_rules
`hawk_watch_rules` implements a transactional interface for updating rules.
Its signature takes four arguments, one for each state of the transaction.
An implementation looks like this
```C
void hawk_watch_rules(hawk_rules_begin_cb begin_cb,
hawk_rules_insert_cb insert_cb,
hawk_rules_commit_cb commit_cb,
hawk_rules_rollback_cb rollback_cb)
{
printf("starting rules transaction\n");
begin_cb(); // start the rules loading transaction
printf("insert rules\n");
insert_cb(""); // todo: pass the rules as a string here, this is empty
insert_cb(""); // you can do this as many times you want
commit_cb(); // commit rules
printf("rules committed");
}
```
As you can see, we have a `begin_cb` that is telling the Falco engine to start the transactiont o load rules.
Then we have an `insert_cb` which takes Falco rules as a yaml string, it can be called as many times you want.
Finally we can either commit the transaction with `commit_cb` or we can rollback it with `rollback_cb`.
**Important note**: `hawk_watch_rules` gets called in a thread by Falco.
This means that it is not blocking and executing in parallel with the rest of Falco.
Practically, you can implement things like a for loop to update rules **live** from a database or an external resource.
After you load the extension, you will need to change the `rules_provider` configuration in `falco.yaml` to the
name you gave to the extension you are writing if you want to use the watch rules implementation you just wrote.
<a name="extension-loading"></a>
## Extension Loading
To tell falco to load a library containing one or more plugins
you have to add the path to the shared object into the `extensions`
configuration in `falco.yaml`:
The path can be either absolute, relative or specified into the `ldconfig` search path.
See `/etc/ld.so.conf` for reference.
examples:
```
extensions:
- ./mylocalextension.so
- myextension.so
- /usr/share/falco/extensions/kubernetes.so
```
TODO: when shipping Falco with this feature, we probably want to ship a ld config file to allow dynamic
loading from `/usr/share/falco/extensions` for example.
## Plugin configuration
TODO
This can be explained once this feature is developed.
## Plugin example
A plugin can define one or more definitions.
Here's an example of plugin that is registered and defines
`hawk_init`, `hawk_destroy` and `hawk_watch_rules`
```c
#include "hawk.h"
#include <stdio.h>
void hawk_init() { printf("hawk_example init!\n"); }
void hawk_destroy() { printf("hawk example destroy\n"); }
// note: this function gets called in a thread.
// this means that it is non blocking for the rest of falco.
// You can start your own lifecycle here to fetch rules from
// the outside and begin/commit as many transactions you want in a loop.
void hawk_watch_rules(hawk_rules_begin_cb begin_cb,
hawk_rules_insert_cb insert_cb,
hawk_rules_commit_cb commit_cb,
hawk_rules_rollback_cb rollback_cb)
{
printf("starting rules transaction\n");
begin_cb(); // start the rules loading transaction
printf("insert rules\n");
insert_cb(""); // todo: pass the rules as a string here, this is empty
insert_cb(""); // you can do this as many times you want
commit_cb(); // commit rules
printf("rules committed");
}
hawk_plugin_definition plugin_definition = {
.hawk_init = &hawk_init,
.hawk_destroy = &hawk_destroy,
.hawk_watch_rules = &hawk_watch_rules,
};
HAWK_REGISTER_PLUGIN(hawk_example_c, plugin_definition)
```
To compile the plugin, save it in a file `plugin.c` and then:
```bash
FALCO=/source/falco
gcc -o libhawk.so -fPIC -shared -I$FALCO/userspace/libhawk plugin.c
```
Remember to change the `FALCO` variable to point to where you have the Falco sources.
This should produce shared object called `libhawk.so`, you can now use this library to load the plugin in Falco.
See the [Extension loading](#extension-loading) section.

View File

@@ -1,44 +0,0 @@
#pragma once
#include <stdexcept>
#include <stdexcept>
#include <string>
namespace libhawk
{
class hawk_exception : public std::runtime_error
{
public:
hawk_exception(const std::string& message):
std::runtime_error(message) {}
};
class hawk_plugin_exception : public hawk_exception
{
public:
hawk_plugin_exception(const std::string& plugin_name, const std::string& message):
hawk_exception("plugin: " + plugin_name + ", error: " + message) {}
};
class hawk_library_exception : public hawk_exception
{
public:
hawk_library_exception(const std::string& message):
hawk_exception(message) {}
};
class hawk_library_load_exception : public hawk_library_exception
{
public:
hawk_library_load_exception(const std::string&library_name, const std::string&message):
hawk_library_exception("library loading error, library: " + library_name + " error: " + message) {}
};
class hawk_library_unload_exception : public hawk_library_exception
{
public:
hawk_library_unload_exception(const std::string&library_name, const std::string&message):
hawk_library_exception("library unloading error, library: " + library_name + " error: " + message) {}
};
} // namespace libhawk

Some files were not shown because too many files have changed in this diff Show More