Compare commits

..

1 Commits

Author SHA1 Message Date
Andrea Terzolo
9a1824343c tmp
Signed-off-by: Andrea Terzolo <andrea.terzolo@polito.it>
2023-03-09 18:43:24 +01:00
95 changed files with 1767 additions and 4334 deletions

View File

@@ -39,8 +39,7 @@ jobs:
- run:
name: Build Falco packages 🏗️
command: |
FALCO_VERSION=$(cat /tmp/source-arm64/falco/skeleton-build/userspace/falco/config_falco.h | grep 'FALCO_VERSION ' | cut -d' ' -f3 | sed -e 's/^"//' -e 's/"$//')
DOCKER_BUILDKIT=1 docker build -f /tmp/source-arm64/falco/docker/builder/modern-falco-builder.Dockerfile --output type=local,dest=/tmp --build-arg CMAKE_OPTIONS="-DCMAKE_BUILD_TYPE=Release -DUSE_BUNDLED_DEPS=On -DFALCO_ETC_DIR=/etc/falco -DBUILD_FALCO_MODERN_BPF=ON -DMODERN_BPF_SKEL_DIR=/source/skeleton-build/skel_dir -DBUILD_DRIVER=Off -DBUILD_BPF=Off -DFALCO_VERSION=${FALCO_VERSION}" --build-arg DEST_BUILD_DIR=/build-arm64/release /tmp/source-arm64/falco
DOCKER_BUILDKIT=1 docker build -f /tmp/source-arm64/falco/docker/builder/modern-falco-builder.Dockerfile --output type=local,dest=/tmp --build-arg CMAKE_OPTIONS="-DCMAKE_BUILD_TYPE=Release -DUSE_BUNDLED_DEPS=On -DFALCO_ETC_DIR=/etc/falco -DBUILD_FALCO_MODERN_BPF=ON -DMODERN_BPF_SKEL_DIR=/source/skeleton-build/skel_dir -DBUILD_DRIVER=Off -DBUILD_BPF=Off" --build-arg DEST_BUILD_DIR=/build-arm64/release /tmp/source-arm64/falco
- store_artifacts:
path: /tmp/packages
@@ -136,8 +135,7 @@ jobs:
- run:
name: Build Falco packages 🏗️
command: |
FALCO_VERSION=$(cat /tmp/source/falco/skeleton-build/userspace/falco/config_falco.h | grep 'FALCO_VERSION ' | cut -d' ' -f3 | sed -e 's/^"//' -e 's/"$//')
DOCKER_BUILDKIT=1 docker build -f /tmp/source/falco/docker/builder/modern-falco-builder.Dockerfile --output type=local,dest=/tmp --build-arg CMAKE_OPTIONS="-DCMAKE_BUILD_TYPE=Release -DUSE_BUNDLED_DEPS=On -DFALCO_ETC_DIR=/etc/falco -DBUILD_FALCO_MODERN_BPF=ON -DMODERN_BPF_SKEL_DIR=/source/skeleton-build/skel_dir -DBUILD_DRIVER=Off -DBUILD_BPF=Off -DFALCO_VERSION=${FALCO_VERSION}" --build-arg DEST_BUILD_DIR=/build/release /tmp/source/falco
DOCKER_BUILDKIT=1 docker build -f /tmp/source/falco/docker/builder/modern-falco-builder.Dockerfile --output type=local,dest=/tmp --build-arg CMAKE_OPTIONS="-DCMAKE_BUILD_TYPE=Release -DUSE_BUNDLED_DEPS=On -DFALCO_ETC_DIR=/etc/falco -DBUILD_FALCO_MODERN_BPF=ON -DMODERN_BPF_SKEL_DIR=/source/skeleton-build/skel_dir -DBUILD_DRIVER=Off -DBUILD_BPF=Off" --build-arg DEST_BUILD_DIR=/build/release /tmp/source/falco
- store_artifacts:
path: /tmp/packages
@@ -211,6 +209,533 @@ jobs:
name: Execute driver-loader integration tests
command: /tmp/ws/source/falco/test/driver-loader/run_test.sh /tmp/ws/build/release/
# Sign rpm packages
"rpm-sign":
docker:
- image: docker.io/centos:7
steps:
- attach_workspace:
at: /
- run:
name: Install rpmsign
command: |
yum update -y
yum install rpm-sign expect which -y
- run:
name: Prepare
command: |
echo "%_signature gpg" > ~/.rpmmacros
echo "%_gpg_name Falcosecurity Package Signing" >> ~/.rpmmacros
echo "%__gpg_sign_cmd %{__gpg} --force-v3-sigs --batch --no-armor --passphrase-fd 3 --no-secmem-warning -u \"%{_gpg_name}\" -sb --digest-algo sha256 %{__plaintext_filename}'" >> ~/.rpmmacros
cat > ~/sign \<<EOF
#!/usr/bin/expect -f
spawn rpmsign --addsign {*}\$argv
expect -exact "Enter pass phrase: "
send -- "\n"
expect eof
EOF
chmod +x ~/sign
echo $GPG_KEY | base64 -d | gpg --import
- run:
name: Sign rpm x86_64
command: |
cd /build/release/
~/sign *.rpm
rpm --qf %{SIGPGP:pgpsig} -qp *.rpm | grep SHA256
- run:
name: Sign rpm arm64
command: |
cd /build-arm64/release/
~/sign *.rpm
rpm --qf %{SIGPGP:pgpsig} -qp *.rpm | grep SHA256
- persist_to_workspace:
root: /
paths:
- build/release/*.rpm
- build-arm64/release/*.rpm
# Publish the dev packages
"publish-packages-dev":
docker:
- image: docker.io/centos:7
steps:
- attach_workspace:
at: /
- run:
name: Setup
command: |
yum install epel-release -y
yum update -y
yum install createrepo gpg python python-pip -y
pip install awscli==1.19.47
echo $GPG_KEY | base64 -d | gpg --import
- run:
name: Publish rpm-dev
command: |
FALCO_VERSION=$(cat /build/release/userspace/falco/config_falco.h | grep 'FALCO_VERSION ' | cut -d' ' -f3 | sed -e 's/^"//' -e 's/"$//')
/source/falco/scripts/publish-rpm -f /build/release/falco-${FALCO_VERSION}-x86_64.rpm -f /build-arm64/release/falco-${FALCO_VERSION}-aarch64.rpm -r rpm-dev
- run:
name: Publish bin-dev
command: |
FALCO_VERSION=$(cat /build/release/userspace/falco/config_falco.h | grep 'FALCO_VERSION ' | cut -d' ' -f3 | sed -e 's/^"//' -e 's/"$//')
/source/falco/scripts/publish-bin -f /build/release/falco-${FALCO_VERSION}-x86_64.tar.gz -r bin-dev -a x86_64
/source/falco/scripts/publish-bin -f /build-arm64/release/falco-${FALCO_VERSION}-aarch64.tar.gz -r bin-dev -a aarch64
- run:
name: Publish bin-static-dev
command: |
FALCO_VERSION=$(cat /build-static/release/userspace/falco/config_falco.h | grep 'FALCO_VERSION ' | cut -d' ' -f3 | sed -e 's/^"//' -e 's/"$//')
cp -f /build-static/release/falco-${FALCO_VERSION}-x86_64.tar.gz /build-static/release/falco-${FALCO_VERSION}-static-x86_64.tar.gz
/source/falco/scripts/publish-bin -f /build-static/release/falco-${FALCO_VERSION}-static-x86_64.tar.gz -r bin-dev -a x86_64
"publish-packages-deb-dev":
docker:
- image: docker.io/debian:stable
steps:
- attach_workspace:
at: /
- run:
name: Setup
command: |
apt update -y
apt-get install apt-utils bzip2 gpg python python3-pip -y
pip install awscli
echo $GPG_KEY | base64 -d | gpg --import
- run:
name: Publish deb-dev
command: |
FALCO_VERSION=$(cat /build/release/userspace/falco/config_falco.h | grep 'FALCO_VERSION ' | cut -d' ' -f3 | sed -e 's/^"//' -e 's/"$//')
/source/falco/scripts/publish-deb -f /build/release/falco-${FALCO_VERSION}-x86_64.deb -f /build-arm64/release/falco-${FALCO_VERSION}-aarch64.deb -r deb-dev
"build-docker-dev":
docker:
- image: alpine:3.16
steps:
- attach_workspace:
at: /
- setup_remote_docker:
version: 20.10.12
docker_layer_caching: true
- run:
name: Install deps
command: |
apk update
apk add make bash git docker docker-cli-buildx py3-pip
pip install awscli
- run:
name: Login to registries
command: |
echo ${DOCKERHUB_SECRET} | docker login -u ${DOCKERHUB_USER} --password-stdin
aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin public.ecr.aws/falcosecurity
- run:
name: Build and publish no-driver-dev
command: |
FALCO_VERSION=$(cat /build/release/userspace/falco/config_falco.h | grep 'FALCO_VERSION ' | cut -d' ' -f3 | sed -e 's/^"//' -e 's/"$//')
cd /source/falco
docker buildx build --push --build-arg VERSION_BUCKET=bin-dev --build-arg FALCO_VERSION=${FALCO_VERSION} \
-t falcosecurity/falco-no-driver:x86_64-master \
-t falcosecurity/falco:x86_64-master-slim \
-t public.ecr.aws/falcosecurity/falco-no-driver:x86_64-master \
-t public.ecr.aws/falcosecurity/falco:x86_64-master-slim \
docker/no-driver
- run:
name: Build and publish falco-dev
command: |
FALCO_VERSION=$(cat /build/release/userspace/falco/config_falco.h | grep 'FALCO_VERSION ' | cut -d' ' -f3 | sed -e 's/^"//' -e 's/"$//')
cd /source/falco
docker buildx build --push --build-arg VERSION_BUCKET=deb-dev --build-arg FALCO_VERSION=${FALCO_VERSION} \
-t falcosecurity/falco:x86_64-master \
-t public.ecr.aws/falcosecurity/falco:x86_64-master \
docker/falco
- run:
name: Build and publish falco-driver-loader-dev
command: |
cd /source/falco
docker buildx build --push --build-arg FALCO_IMAGE_TAG=x86_64-master \
-t falcosecurity/falco-driver-loader:x86_64-master \
-t public.ecr.aws/falcosecurity/falco-driver-loader:x86_64-master \
docker/driver-loader
"build-docker-dev-arm64":
machine:
enabled: true
image: ubuntu-2004:202101-01
docker_layer_caching: true
resource_class: arm.medium
steps:
- attach_workspace:
at: /tmp
- run:
name: Install deps
command: |
sudo apt update
sudo apt install groff less python3-pip
pip install awscli
- run:
name: Login to registries
command: |
echo ${DOCKERHUB_SECRET} | docker login -u ${DOCKERHUB_USER} --password-stdin
aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin public.ecr.aws/falcosecurity
- run:
name: Build and publish no-driver-dev
command: |
FALCO_VERSION=$(cat /tmp/build/release/userspace/falco/config_falco.h | grep 'FALCO_VERSION ' | cut -d' ' -f3 | sed -e 's/^"//' -e 's/"$//')
cd /tmp/source-arm64/falco
docker buildx build --push --build-arg VERSION_BUCKET=bin-dev --build-arg FALCO_VERSION=${FALCO_VERSION} \
-t falcosecurity/falco-no-driver:aarch64-master \
-t falcosecurity/falco:aarch64-master-slim \
-t public.ecr.aws/falcosecurity/falco-no-driver:aarch64-master \
-t public.ecr.aws/falcosecurity/falco:aarch64-master-slim \
docker/no-driver
- run:
name: Build and publish falco-dev
command: |
FALCO_VERSION=$(cat /tmp/build/release/userspace/falco/config_falco.h | grep 'FALCO_VERSION ' | cut -d' ' -f3 | sed -e 's/^"//' -e 's/"$//')
cd /tmp/source-arm64/falco
docker buildx build --push --build-arg VERSION_BUCKET=deb-dev --build-arg FALCO_VERSION=${FALCO_VERSION} \
-t falcosecurity/falco:aarch64-master \
-t public.ecr.aws/falcosecurity/falco:aarch64-master \
docker/falco
- run:
name: Build and publish falco-driver-loader-dev
command: |
cd /tmp/source-arm64/falco
docker buildx build --push --build-arg FALCO_IMAGE_TAG=aarch64-master \
-t falcosecurity/falco-driver-loader:aarch64-master \
-t public.ecr.aws/falcosecurity/falco-driver-loader:aarch64-master \
docker/driver-loader
# Publish docker packages
"publish-docker-dev":
docker:
- image: cimg/base:stable
user: root
steps:
- setup_remote_docker:
version: 20.10.12
- run:
name: Install deps
command: |
sudo apt update
sudo apt install groff less python3-pip
pip install awscli
- run:
name: Login to registries
command: |
echo ${DOCKERHUB_SECRET} | docker login -u ${DOCKERHUB_USER} --password-stdin
aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin public.ecr.aws/falcosecurity
- run:
name: Upload no-driver-dev manifest to registries
command: |
docker manifest create falcosecurity/falco-no-driver:master \
falcosecurity/falco-no-driver:aarch64-master \
falcosecurity/falco-no-driver:x86_64-master
docker manifest push falcosecurity/falco-no-driver:master
docker manifest create falcosecurity/falco:master-slim \
falcosecurity/falco:aarch64-master-slim \
falcosecurity/falco:x86_64-master-slim
docker manifest push falcosecurity/falco:master-slim
docker manifest create public.ecr.aws/falcosecurity/falco-no-driver:master \
public.ecr.aws/falcosecurity/falco-no-driver:aarch64-master \
public.ecr.aws/falcosecurity/falco-no-driver:x86_64-master
docker manifest push public.ecr.aws/falcosecurity/falco-no-driver:master
docker manifest create public.ecr.aws/falcosecurity/falco:master-slim \
public.ecr.aws/falcosecurity/falco:aarch64-master-slim \
public.ecr.aws/falcosecurity/falco:x86_64-master-slim
docker manifest push public.ecr.aws/falcosecurity/falco:master-slim
- run:
name: Upload falco-dev manifest to registries
command: |
docker manifest create falcosecurity/falco:master \
falcosecurity/falco:aarch64-master \
falcosecurity/falco:x86_64-master
docker manifest push falcosecurity/falco:master
docker manifest create public.ecr.aws/falcosecurity/falco:master \
public.ecr.aws/falcosecurity/falco:aarch64-master \
public.ecr.aws/falcosecurity/falco:x86_64-master
docker manifest push public.ecr.aws/falcosecurity/falco:master
- run:
name: Upload falco-driver-loader-dev manifest to registries
command: |
docker manifest create falcosecurity/falco-driver-loader:master \
falcosecurity/falco-driver-loader:aarch64-master \
falcosecurity/falco-driver-loader:x86_64-master
docker manifest push falcosecurity/falco-driver-loader:master
docker manifest create public.ecr.aws/falcosecurity/falco-driver-loader:master \
public.ecr.aws/falcosecurity/falco-driver-loader:aarch64-master \
public.ecr.aws/falcosecurity/falco-driver-loader:x86_64-master
docker manifest push public.ecr.aws/falcosecurity/falco-driver-loader:master
# Publish the packages
"publish-packages":
docker:
- image: docker.io/centos:7
steps:
- attach_workspace:
at: /
- run:
name: Setup
command: |
yum install epel-release -y
yum update -y
yum install createrepo gpg python python-pip -y
pip install awscli==1.19.47
echo $GPG_KEY | base64 -d | gpg --import
- run:
name: Publish rpm
command: |
FALCO_VERSION=$(cat /build/release/userspace/falco/config_falco.h | grep 'FALCO_VERSION ' | cut -d' ' -f3 | sed -e 's/^"//' -e 's/"$//')
/source/falco/scripts/publish-rpm -f /build/release/falco-${FALCO_VERSION}-x86_64.rpm -f /build-arm64/release/falco-${FALCO_VERSION}-aarch64.rpm -r rpm
- run:
name: Publish bin
command: |
FALCO_VERSION=$(cat /build/release/userspace/falco/config_falco.h | grep 'FALCO_VERSION ' | cut -d' ' -f3 | sed -e 's/^"//' -e 's/"$//')
/source/falco/scripts/publish-bin -f /build/release/falco-${FALCO_VERSION}-x86_64.tar.gz -r bin -a x86_64
/source/falco/scripts/publish-bin -f /build-arm64/release/falco-${FALCO_VERSION}-aarch64.tar.gz -r bin -a aarch64
- run:
name: Publish bin-static
command: |
FALCO_VERSION=$(cat /build-static/release/userspace/falco/config_falco.h | grep 'FALCO_VERSION ' | cut -d' ' -f3 | sed -e 's/^"//' -e 's/"$//')
cp -f /build-static/release/falco-${FALCO_VERSION}-x86_64.tar.gz /build-static/release/falco-${FALCO_VERSION}-static-x86_64.tar.gz
/source/falco/scripts/publish-bin -f /build-static/release/falco-${FALCO_VERSION}-static-x86_64.tar.gz -r bin -a x86_64
"publish-packages-deb":
docker:
- image: docker.io/debian:stable
steps:
- attach_workspace:
at: /
- run:
name: Setup
command: |
apt update -y
apt-get install apt-utils bzip2 gpg python python3-pip -y
pip install awscli
echo $GPG_KEY | base64 -d | gpg --import
- run:
name: Publish deb
command: |
FALCO_VERSION=$(cat /build/release/userspace/falco/config_falco.h | grep 'FALCO_VERSION ' | cut -d' ' -f3 | sed -e 's/^"//' -e 's/"$//')
/source/falco/scripts/publish-deb -f /build/release/falco-${FALCO_VERSION}-x86_64.deb -f /build-arm64/release/falco-${FALCO_VERSION}-aarch64.deb -r deb
"build-docker":
docker:
- image: alpine:3.16
steps:
- attach_workspace:
at: /
- setup_remote_docker:
version: 20.10.12
docker_layer_caching: true
- run:
name: Install deps
command: |
apk update
apk add make bash git docker docker-cli-buildx py3-pip
pip install awscli
- run:
name: Login to registries
command: |
echo ${DOCKERHUB_SECRET} | docker login -u ${DOCKERHUB_USER} --password-stdin
aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin public.ecr.aws/falcosecurity
- run:
name: Build and publish no-driver
command: |
cd /source/falco
docker buildx build --push --build-arg VERSION_BUCKET=bin --build-arg FALCO_VERSION=${CIRCLE_TAG} \
-t "falcosecurity/falco-no-driver:x86_64-${CIRCLE_TAG}" \
-t falcosecurity/falco-no-driver:x86_64-latest \
-t "falcosecurity/falco:x86_64-${CIRCLE_TAG}-slim" \
-t "falcosecurity/falco:x86_64-latest-slim" \
-t "public.ecr.aws/falcosecurity/falco-no-driver:x86_64-${CIRCLE_TAG}" \
-t "public.ecr.aws/falcosecurity/falco-no-driver:x86_64-latest" \
-t "public.ecr.aws/falcosecurity/falco:x86_64-${CIRCLE_TAG}-slim" \
-t "public.ecr.aws/falcosecurity/falco:x86_64-latest-slim" \
docker/no-driver
- run:
name: Build and publish falco
command: |
cd /source/falco
docker buildx build --push --build-arg VERSION_BUCKET=deb --build-arg FALCO_VERSION=${CIRCLE_TAG} \
-t "falcosecurity/falco:x86_64-${CIRCLE_TAG}" \
-t "falcosecurity/falco:x86_64-latest" \
-t "public.ecr.aws/falcosecurity/falco:x86_64-${CIRCLE_TAG}" \
-t "public.ecr.aws/falcosecurity/falco:x86_64-latest" \
docker/falco
- run:
name: Build and publish falco-driver-loader
command: |
cd /source/falco
docker buildx build --push --build-arg FALCO_IMAGE_TAG=x86_64-${CIRCLE_TAG} \
-t "falcosecurity/falco-driver-loader:x86_64-${CIRCLE_TAG}" \
-t "falcosecurity/falco-driver-loader:x86_64-latest" \
-t "public.ecr.aws/falcosecurity/falco-driver-loader:x86_64-${CIRCLE_TAG}" \
-t "public.ecr.aws/falcosecurity/falco-driver-loader:x86_64-latest" \
docker/driver-loader
"build-docker-arm64":
machine:
enabled: true
image: ubuntu-2004:202101-01
docker_layer_caching: true
resource_class: arm.medium
steps:
- attach_workspace:
at: /tmp
- run:
name: Install deps
command: |
sudo apt update
sudo apt install groff less python3-pip
pip install awscli
- run:
name: Login to registries
command: |
echo ${DOCKERHUB_SECRET} | docker login -u ${DOCKERHUB_USER} --password-stdin
aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin public.ecr.aws/falcosecurity
- run:
name: Build and publish no-driver
command: |
cd /tmp/source-arm64/falco
docker buildx build --push --build-arg VERSION_BUCKET=bin --build-arg FALCO_VERSION=${CIRCLE_TAG} \
-t falcosecurity/falco-no-driver:aarch64-${CIRCLE_TAG} \
-t falcosecurity/falco-no-driver:aarch64-latest \
-t falcosecurity/falco:aarch64-${CIRCLE_TAG}-slim \
-t "falcosecurity/falco:aarch64-latest-slim" \
-t public.ecr.aws/falcosecurity/falco-no-driver:aarch64-${CIRCLE_TAG} \
-t "public.ecr.aws/falcosecurity/falco-no-driver:aarch64-latest" \
-t public.ecr.aws/falcosecurity/falco:aarch64-${CIRCLE_TAG}-slim \
-t "public.ecr.aws/falcosecurity/falco:aarch64-latest-slim" \
docker/no-driver
- run:
name: Build and publish falco
command: |
cd /tmp/source-arm64/falco
docker buildx build --push --build-arg VERSION_BUCKET=deb --build-arg FALCO_VERSION=${CIRCLE_TAG} \
-t "falcosecurity/falco:aarch64-${CIRCLE_TAG}" \
-t "falcosecurity/falco:aarch64-latest" \
-t "public.ecr.aws/falcosecurity/falco:aarch64-${CIRCLE_TAG}" \
-t "public.ecr.aws/falcosecurity/falco:aarch64-latest" \
docker/falco
- run:
name: Build and publish falco-driver-loader
command: |
cd /tmp/source-arm64/falco
docker buildx build --push --build-arg FALCO_IMAGE_TAG=aarch64-${CIRCLE_TAG} \
-t "falcosecurity/falco-driver-loader:aarch64-${CIRCLE_TAG}" \
-t "falcosecurity/falco-driver-loader:aarch64-latest" \
-t "public.ecr.aws/falcosecurity/falco-driver-loader:aarch64-${CIRCLE_TAG}" \
-t "public.ecr.aws/falcosecurity/falco-driver-loader:aarch64-latest" \
docker/driver-loader
# Publish docker packages
"publish-docker":
docker:
- image: cimg/base:stable
user: root
steps:
- setup_remote_docker:
version: 20.10.12
- run:
name: Install deps
command: |
sudo apt update
sudo apt install groff less python3-pip
pip install awscli
- run:
name: Login to registries
command: |
echo ${DOCKERHUB_SECRET} | docker login -u ${DOCKERHUB_USER} --password-stdin
aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin public.ecr.aws/falcosecurity
- run:
name: Upload no-driver manifest to registries
command: |
docker manifest create falcosecurity/falco-no-driver:${CIRCLE_TAG} \
falcosecurity/falco-no-driver:aarch64-${CIRCLE_TAG} \
falcosecurity/falco-no-driver:x86_64-${CIRCLE_TAG}
docker manifest push falcosecurity/falco-no-driver:${CIRCLE_TAG}
docker manifest create falcosecurity/falco-no-driver:latest \
falcosecurity/falco-no-driver:aarch64-latest \
falcosecurity/falco-no-driver:x86_64-latest
docker manifest push falcosecurity/falco-no-driver:latest
docker manifest create falcosecurity/falco:${CIRCLE_TAG}-slim \
falcosecurity/falco:aarch64-${CIRCLE_TAG}-slim \
falcosecurity/falco:x86_64-${CIRCLE_TAG}-slim
docker manifest push falcosecurity/falco:${CIRCLE_TAG}-slim
docker manifest create falcosecurity/falco:latest-slim \
falcosecurity/falco:aarch64-latest-slim \
falcosecurity/falco:x86_64-latest-slim
docker manifest push falcosecurity/falco:latest-slim
docker manifest create public.ecr.aws/falcosecurity/falco-no-driver:${CIRCLE_TAG} \
public.ecr.aws/falcosecurity/falco-no-driver:aarch64-${CIRCLE_TAG} \
public.ecr.aws/falcosecurity/falco-no-driver:x86_64-${CIRCLE_TAG}
docker manifest push public.ecr.aws/falcosecurity/falco-no-driver:${CIRCLE_TAG}
docker manifest create public.ecr.aws/falcosecurity/falco-no-driver:latest \
public.ecr.aws/falcosecurity/falco-no-driver:aarch64-latest \
public.ecr.aws/falcosecurity/falco-no-driver:x86_64-latest
docker manifest push public.ecr.aws/falcosecurity/falco-no-driver:latest
docker manifest create public.ecr.aws/falcosecurity/falco:${CIRCLE_TAG}-slim \
public.ecr.aws/falcosecurity/falco:aarch64-${CIRCLE_TAG}-slim \
public.ecr.aws/falcosecurity/falco:x86_64-${CIRCLE_TAG}-slim
docker manifest push public.ecr.aws/falcosecurity/falco:${CIRCLE_TAG}-slim
docker manifest create public.ecr.aws/falcosecurity/falco:latest-slim \
public.ecr.aws/falcosecurity/falco:aarch64-latest-slim \
public.ecr.aws/falcosecurity/falco:x86_64-latest-slim
docker manifest push public.ecr.aws/falcosecurity/falco:latest-slim
- run:
name: Upload falco manifest to registries
command: |
docker manifest create falcosecurity/falco:${CIRCLE_TAG} \
falcosecurity/falco:aarch64-${CIRCLE_TAG} \
falcosecurity/falco:x86_64-${CIRCLE_TAG}
docker manifest push falcosecurity/falco:${CIRCLE_TAG}
docker manifest create falcosecurity/falco:latest \
falcosecurity/falco:aarch64-latest \
falcosecurity/falco:x86_64-latest
docker manifest push falcosecurity/falco:latest
docker manifest create public.ecr.aws/falcosecurity/falco:${CIRCLE_TAG} \
public.ecr.aws/falcosecurity/falco:aarch64-${CIRCLE_TAG} \
public.ecr.aws/falcosecurity/falco:x86_64-${CIRCLE_TAG}
docker manifest push public.ecr.aws/falcosecurity/falco:${CIRCLE_TAG}
docker manifest create public.ecr.aws/falcosecurity/falco:latest \
public.ecr.aws/falcosecurity/falco:aarch64-latest \
public.ecr.aws/falcosecurity/falco:x86_64-latest
docker manifest push public.ecr.aws/falcosecurity/falco:latest
- run:
name: Upload falco-driver-loader manifest to registries
command: |
docker manifest create falcosecurity/falco-driver-loader:${CIRCLE_TAG} \
falcosecurity/falco-driver-loader:aarch64-${CIRCLE_TAG} \
falcosecurity/falco-driver-loader:x86_64-${CIRCLE_TAG}
docker manifest push falcosecurity/falco-driver-loader:${CIRCLE_TAG}
docker manifest create falcosecurity/falco-driver-loader:latest \
falcosecurity/falco-driver-loader:aarch64-latest \
falcosecurity/falco-driver-loader:x86_64-latest
docker manifest push falcosecurity/falco-driver-loader:latest
docker manifest create public.ecr.aws/falcosecurity/falco-driver-loader:${CIRCLE_TAG} \
public.ecr.aws/falcosecurity/falco-driver-loader:aarch64-${CIRCLE_TAG} \
public.ecr.aws/falcosecurity/falco-driver-loader:x86_64-${CIRCLE_TAG}
docker manifest push public.ecr.aws/falcosecurity/falco-driver-loader:${CIRCLE_TAG}
docker manifest create public.ecr.aws/falcosecurity/falco-driver-loader:latest \
public.ecr.aws/falcosecurity/falco-driver-loader:aarch64-latest \
public.ecr.aws/falcosecurity/falco-driver-loader:x86_64-latest
docker manifest push public.ecr.aws/falcosecurity/falco-driver-loader:latest
workflows:
version: 2.1
build_and_test:
@@ -230,3 +755,166 @@ workflows:
- "tests-driver-loader-integration":
requires:
- "build-centos7"
- "rpm-sign":
context: falco
filters:
tags:
ignore: /.*/
branches:
only: master
requires:
- "tests-integration"
- "tests-integration-arm64"
- "publish-packages-dev":
context:
- falco
- test-infra
filters:
tags:
ignore: /.*/
branches:
only: master
requires:
- "rpm-sign"
- "tests-integration-static"
- "publish-packages-deb-dev":
context:
- falco
- test-infra
filters:
tags:
ignore: /.*/
branches:
only: master
requires:
- "tests-integration"
- "tests-integration-arm64"
- "build-docker-dev":
context:
- falco
- test-infra
filters:
tags:
ignore: /.*/
branches:
only: master
requires:
- "publish-packages-dev"
- "publish-packages-deb-dev"
- "tests-driver-loader-integration"
- "build-docker-dev-arm64":
context:
- falco
- test-infra
filters:
tags:
ignore: /.*/
branches:
only: master
requires:
- "publish-packages-dev"
- "publish-packages-deb-dev"
- "tests-driver-loader-integration"
- "publish-docker-dev":
context:
- falco
- test-infra
filters:
tags:
ignore: /.*/
branches:
only: master
requires:
- "build-docker-dev"
- "build-docker-dev-arm64"
# - "quality/static-analysis" # This is temporarily disabled: https://github.com/falcosecurity/falco/issues/1526
release:
jobs:
- "build-musl":
filters:
tags:
only: /.*/
branches:
ignore: /.*/
- "build-centos7":
filters:
tags:
only: /.*/
branches:
ignore: /.*/
- "build-arm64":
filters:
tags:
only: /.*/
branches:
ignore: /.*/
- "rpm-sign":
context: falco
requires:
- "build-centos7"
- "build-arm64"
filters:
tags:
only: /.*/
branches:
ignore: /.*/
- "publish-packages":
context:
- falco
- test-infra
requires:
- "build-musl"
- "rpm-sign"
filters:
tags:
only: /.*/
branches:
ignore: /.*/
- "publish-packages-deb":
context:
- falco
- test-infra
requires:
- "build-centos7"
- "build-arm64"
filters:
tags:
only: /.*/
branches:
ignore: /.*/
- "build-docker":
context:
- falco
- test-infra
requires:
- "publish-packages"
- "publish-packages-deb"
filters:
tags:
only: /.*/
branches:
ignore: /.*/
- "build-docker-arm64":
context:
- falco
- test-infra
requires:
- "publish-packages"
- "publish-packages-deb"
filters:
tags:
only: /.*/
branches:
ignore: /.*/
- "publish-docker":
context:
- falco
- test-infra
requires:
- "build-docker"
- "build-docker-arm64"
filters:
tags:
only: /.*/
branches:
ignore: /.*/

View File

@@ -2,14 +2,10 @@ name: CI Build
on:
pull_request:
branches: [master]
push:
branches: [master]
workflow_dispatch:
# Checks if any concurrent jobs under the same pull request or branch are being executed
# NOTE: this will cancel every workflow that is being ran against a PR as group is just the github ref (without the workflow name)
concurrency:
group: ${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
build-minimal:
runs-on: ubuntu-20.04
@@ -64,7 +60,7 @@ jobs:
run: |
mkdir build
pushd build
cmake -DBUILD_BPF=On -DCMAKE_BUILD_TYPE=Release -DBUILD_FALCO_UNIT_TESTS=On ..
cmake -DBUILD_BPF=On -DBUILD_FALCO_UNIT_TESTS=On ..
popd
- name: Build
@@ -98,7 +94,7 @@ jobs:
run: |
mkdir build
pushd build
cmake -DCMAKE_BUILD_TYPE=Debug -DBUILD_BPF=On -DBUILD_FALCO_UNIT_TESTS=On ..
cmake -DCMAKE_BUILD_TYPE=debug -DBUILD_BPF=On -DBUILD_FALCO_UNIT_TESTS=On ..
popd
- name: Build

View File

@@ -1,93 +0,0 @@
name: Dev Packages and Docker images
on:
push:
branches: [master]
# Checks if any concurrent jobs is running for master CI and eventually cancel it
concurrency:
group: ci-master
cancel-in-progress: true
jobs:
# We need to use an ubuntu-latest to fetch Falco version because
# Falco version is computed by some cmake scripts that do git sorceries
# to get the current version.
# But centos7 jobs have a git version too old and actions/checkout does not
# fully clone the repo, but uses http rest api instead.
fetch-version:
runs-on: ubuntu-latest
# Map the job outputs to step outputs
outputs:
version: ${{ steps.store_version.outputs.version }}
steps:
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Install build dependencies
run: |
sudo apt update
sudo apt install -y cmake build-essential
- name: Configure project
run: |
mkdir build && cd build
cmake -DUSE_BUNDLED_DEPS=On ..
- name: Load and store Falco version output
id: store_version
run: |
FALCO_VERSION=$(cat build/userspace/falco/config_falco.h | grep 'FALCO_VERSION ' | cut -d' ' -f3 | sed -e 's/^"//' -e 's/"$//')
echo "version=${FALCO_VERSION}" >> $GITHUB_OUTPUT
build-dev-packages:
needs: [fetch-version]
uses: ./.github/workflows/reusable_build_packages.yaml
with:
arch: x86_64
version: ${{ needs.fetch-version.outputs.version }}
secrets: inherit
build-dev-packages-arm64:
needs: [fetch-version]
uses: ./.github/workflows/reusable_build_packages.yaml
with:
arch: aarch64
version: ${{ needs.fetch-version.outputs.version }}
secrets: inherit
publish-dev-packages:
needs: [fetch-version, build-dev-packages, build-dev-packages-arm64]
uses: ./.github/workflows/reusable_publish_packages.yaml
with:
bucket_suffix: '-dev'
version: ${{ needs.fetch-version.outputs.version }}
secrets: inherit
build-dev-docker:
needs: [fetch-version, publish-dev-packages]
uses: ./.github/workflows/reusable_build_docker.yaml
with:
arch: x86_64
bucket_suffix: '-dev'
version: ${{ needs.fetch-version.outputs.version }}
tag: master
secrets: inherit
build-dev-docker-arm64:
needs: [fetch-version, publish-dev-packages]
uses: ./.github/workflows/reusable_build_docker.yaml
with:
arch: aarch64
bucket_suffix: '-dev'
version: ${{ needs.fetch-version.outputs.version }}
tag: master
secrets: inherit
publish-dev-docker:
needs: [fetch-version, build-dev-docker, build-dev-docker-arm64]
uses: ./.github/workflows/reusable_publish_docker.yaml
with:
tag: master
secrets: inherit

View File

@@ -1,105 +0,0 @@
name: Release Packages and Docker images
on:
release:
types: [published]
# Checks if any concurrent jobs is running for release CI and eventually cancel it.
concurrency:
group: ci-release
cancel-in-progress: true
jobs:
release-settings:
runs-on: ubuntu-latest
outputs:
is_latest: ${{ steps.get_settings.outputs.is_latest }}
bucket_suffix: ${{ steps.get_settings.outputs.bucket_suffix }}
steps:
- name: Get latest release
uses: rez0n/actions-github-release@v2.0
id: latest_release
env:
token: ${{ secrets.GITHUB_TOKEN }}
repository: ${{ github.repository }}
type: "stable"
- name: Get settings for this release
id: get_settings
shell: python
run: |
import os
import re
import sys
semver_no_meta = '''^(?P<major>0|[1-9]\d*)\.(?P<minor>0|[1-9]\d*)\.(?P<patch>0|[1-9]\d*)(?:-(?P<prerelease>(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?$'''
tag_name = '${{ github.event.release.tag_name }}'
is_valid_version = re.match(semver_no_meta, tag_name) is not None
if not is_valid_version:
print(f'Release version {tag_name} is not a valid full or pre-release. See RELEASE.md for more information.')
sys.exit(1)
is_prerelease = '-' in tag_name
# Safeguard: you need to both set "latest" in GH and not have suffixes to overwrite latest
is_latest = '${{ steps.latest_release.outputs.release }}' == tag_name and not is_prerelease
bucket_suffix = '-dev' if is_prerelease else ''
with open(os.environ['GITHUB_OUTPUT'], 'a') as ofp:
print(f'is_latest={is_latest}'.lower(), file=ofp)
print(f'bucket_suffix={bucket_suffix}', file=ofp)
build-packages:
needs: [release-settings]
uses: ./.github/workflows/reusable_build_packages.yaml
with:
arch: x86_64
version: ${{ github.event.release.tag_name }}
secrets: inherit
build-packages-arm64:
needs: [release-settings]
uses: ./.github/workflows/reusable_build_packages.yaml
with:
arch: aarch64
version: ${{ github.event.release.tag_name }}
secrets: inherit
publish-packages:
needs: [release-settings, build-packages, build-packages-arm64]
uses: ./.github/workflows/reusable_publish_packages.yaml
with:
bucket_suffix: ${{ needs.release-settings.outputs.bucket_suffix }}
version: ${{ github.event.release.tag_name }}
secrets: inherit
# Both build-docker and its arm64 counterpart require build-packages because they use its output
build-docker:
needs: [release-settings, build-packages, publish-packages]
uses: ./.github/workflows/reusable_build_docker.yaml
with:
arch: x86_64
bucket_suffix: ${{ needs.release-settings.outputs.bucket_suffix }}
version: ${{ github.event.release.tag_name }}
tag: ${{ github.event.release.tag_name }}
secrets: inherit
build-docker-arm64:
needs: [release-settings, build-packages, publish-packages]
uses: ./.github/workflows/reusable_build_docker.yaml
with:
arch: aarch64
bucket_suffix: ${{ needs.release-settings.outputs.bucket_suffix }}
version: ${{ github.event.release.tag_name }}
tag: ${{ github.event.release.tag_name }}
secrets: inherit
publish-docker:
needs: [release-settings, build-docker, build-docker-arm64]
uses: ./.github/workflows/reusable_publish_docker.yaml
secrets: inherit
with:
is_latest: ${{ needs.release-settings.outputs.is_latest == 'true' }}
tag: ${{ github.event.release.tag_name }}
sign: true

View File

@@ -1,73 +0,0 @@
# This is a reusable workflow used by master and release CI
on:
workflow_call:
inputs:
arch:
description: x86_64 or aarch64
required: true
type: string
bucket_suffix:
description: bucket suffix for packages
required: false
default: ''
type: string
version:
description: The Falco version to use when building images
required: true
type: string
tag:
description: The tag to use (e.g. "master" or "0.35.0")
required: true
type: string
# Here we just build all docker images as tarballs,
# then we upload all the tarballs to be later downloaded by reusable_publish_docker workflow.
# In this way, we don't need to publish any arch specific image,
# and this "build" workflow is actually only building images.
jobs:
build-docker:
# See https://github.com/actions/runner/issues/409#issuecomment-1158849936
runs-on: ${{ (inputs.arch == 'aarch64' && fromJSON('[ "self-hosted", "linux", "ARM64" ]')) || 'ubuntu-latest' }}
env:
TARGETARCH: ${{ (inputs.arch == 'aarch64' && 'arm64') || 'amd64' }}
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Build no-driver image
run: |
cd ${{ github.workspace }}/docker/no-driver/
docker build -t docker.io/falcosecurity/falco-no-driver:${{ inputs.arch }}-${{ inputs.tag }} \
--build-arg VERSION_BUCKET=bin${{ inputs.bucket_suffix }} \
--build-arg FALCO_VERSION=${{ inputs.version }} \
--build-arg TARGETARCH=${TARGETARCH} \
.
docker save docker.io/falcosecurity/falco-no-driver:${{ inputs.arch }}-${{ inputs.tag }} --output /tmp/falco-no-driver-${{ inputs.arch }}.tar
- name: Build falco image
run: |
cd ${{ github.workspace }}/docker/falco/
docker build -t docker.io/falcosecurity/falco:${{ inputs.arch }}-${{ inputs.tag }} \
--build-arg VERSION_BUCKET=deb${{ inputs.bucket_suffix }} \
--build-arg FALCO_VERSION=${{ inputs.version }} \
--build-arg TARGETARCH=${TARGETARCH} \
.
docker save docker.io/falcosecurity/falco:${{ inputs.arch }}-${{ inputs.tag }} --output /tmp/falco-${{ inputs.arch }}.tar
- name: Build falco-driver-loader image
run: |
cd ${{ github.workspace }}/docker/driver-loader/
docker build -t docker.io/falcosecurity/falco-driver-loader:${{ inputs.arch }}-${{ inputs.tag }} \
--build-arg FALCO_IMAGE_TAG=${{ inputs.arch }}-${{ inputs.tag }} \
--build-arg TARGETARCH=${TARGETARCH} \
.
docker save docker.io/falcosecurity/falco-driver-loader:${{ inputs.arch }}-${{ inputs.tag }} --output /tmp/falco-driver-loader-${{ inputs.arch }}.tar
- name: Upload images tarballs
uses: actions/upload-artifact@v3
with:
name: falco-images
path: /tmp/falco-*.tar

View File

@@ -1,160 +0,0 @@
# This is a reusable workflow used by master and release CI
on:
workflow_call:
inputs:
arch:
description: x86_64 or aarch64
required: true
type: string
version:
description: The Falco version to use when building packages
required: true
type: string
jobs:
build-modern-bpf-skeleton:
# See https://github.com/actions/runner/issues/409#issuecomment-1158849936
runs-on: ${{ (inputs.arch == 'aarch64' && fromJSON('[ "self-hosted", "linux", "ARM64" ]')) || 'ubuntu-latest' }}
container: fedora:latest
steps:
# Always install deps before invoking checkout action, to properly perform a full clone.
- name: Install build dependencies
run: |
dnf install -y bpftool ca-certificates cmake make automake gcc gcc-c++ kernel-devel clang git pkg-config autoconf automake libbpf-devel
- name: Checkout
uses: actions/checkout@v3
- name: Build modern BPF skeleton
run: |
mkdir skeleton-build && cd skeleton-build
cmake -DUSE_BUNDLED_DEPS=ON -DBUILD_FALCO_MODERN_BPF=ON -DCREATE_TEST_TARGETS=Off -DFALCO_VERSION=${{ inputs.version }} ..
make ProbeSkeleton -j6
- name: Upload skeleton
uses: actions/upload-artifact@v3
with:
name: bpf_probe_${{ inputs.arch }}.skel.h
path: skeleton-build/skel_dir/bpf_probe.skel.h
build-packages:
# See https://github.com/actions/runner/issues/409#issuecomment-1158849936
runs-on: ${{ (inputs.arch == 'aarch64' && fromJSON('[ "self-hosted", "linux", "ARM64" ]')) || 'ubuntu-latest' }}
needs: [build-modern-bpf-skeleton]
container: centos:7
steps:
# Always install deps before invoking checkout action, to properly perform a full clone.
- name: Install build dependencies
run: |
yum -y install centos-release-scl
yum -y install devtoolset-9-gcc devtoolset-9-gcc-c++
source /opt/rh/devtoolset-9/enable
yum install -y wget git make m4 rpm-build
- name: Checkout
uses: actions/checkout@v3
- name: Download skeleton
uses: actions/download-artifact@v3
with:
name: bpf_probe_${{ inputs.arch }}.skel.h
path: /tmp
- name: Install updated cmake
run: |
curl -L -o /tmp/cmake.tar.gz https://github.com/Kitware/CMake/releases/download/v3.22.5/cmake-3.22.5-linux-$(uname -m).tar.gz
gzip -d /tmp/cmake.tar.gz
tar -xpf /tmp/cmake.tar --directory=/tmp
cp -R /tmp/cmake-3.22.5-linux-$(uname -m)/* /usr
rm -rf /tmp/cmake-3.22.5-linux-$(uname -m)
- name: Prepare project
run: |
mkdir build && cd build
source /opt/rh/devtoolset-9/enable
cmake \
-DCMAKE_BUILD_TYPE=Release \
-DUSE_BUNDLED_DEPS=On \
-DFALCO_ETC_DIR=/etc/falco \
-DBUILD_FALCO_MODERN_BPF=ON \
-DMODERN_BPF_SKEL_DIR=/tmp \
-DBUILD_DRIVER=Off \
-DBUILD_BPF=Off \
-DFALCO_VERSION=${{ inputs.version }} \
..
- name: Build project
run: |
cd build
source /opt/rh/devtoolset-9/enable
make falco -j6
- name: Build packages
run: |
cd build
source /opt/rh/devtoolset-9/enable
make package
- name: Upload Falco tar.gz package
uses: actions/upload-artifact@v3
with:
name: falco-${{ inputs.version }}-${{ inputs.arch }}.tar.gz
path: |
${{ github.workspace }}/build/falco-*.tar.gz
- name: Upload Falco deb package
uses: actions/upload-artifact@v3
with:
name: falco-${{ inputs.version }}-${{ inputs.arch }}.deb
path: |
${{ github.workspace }}/build/falco-*.deb
- name: Upload Falco rpm package
uses: actions/upload-artifact@v3
with:
name: falco-${{ inputs.version }}-${{ inputs.arch }}.rpm
path: |
${{ github.workspace }}/build/falco-*.rpm
build-musl-package:
# x86_64 only for now
if: ${{ inputs.arch == 'x86_64' }}
runs-on: ubuntu-latest
container: alpine:3.17
steps:
# Always install deps before invoking checkout action, to properly perform a full clone.
- name: Install build dependencies
run: |
apk add g++ gcc cmake make git bash perl linux-headers autoconf automake m4 libtool elfutils-dev libelf-static patch binutils bpftool clang
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Prepare project
run: |
mkdir build && cd build
cmake -DCPACK_GENERATOR=TGZ -DBUILD_BPF=Off -DBUILD_DRIVER=Off -DCMAKE_BUILD_TYPE=Release -DUSE_BUNDLED_DEPS=On -DUSE_BUNDLED_LIBELF=Off -DBUILD_LIBSCAP_MODERN_BPF=ON -DMUSL_OPTIMIZED_BUILD=On -DFALCO_ETC_DIR=/etc/falco ../ -DFALCO_VERSION=${{ inputs.version }}
- name: Build project
run: |
cd build
make -j6 all
- name: Build packages
run: |
cd build
make -j6 package
- name: Rename static package
run: |
cd build
mv falco-${{ inputs.version }}-x86_64.tar.gz falco-${{ inputs.version }}-static-x86_64.tar.gz
- name: Upload Falco static package
uses: actions/upload-artifact@v3
with:
name: falco-${{ inputs.version }}-static-x86_64.tar.gz
path: |
${{ github.workspace }}/build/falco-${{ inputs.version }}-static-x86_64.tar.gz

View File

@@ -1,144 +0,0 @@
# This is a reusable workflow used by master and release CI
on:
workflow_call:
inputs:
tag:
description: The tag to push
required: true
type: string
is_latest:
description: Update the latest tag with the new image
required: false
type: boolean
default: false
sign:
description: Add signature with cosign
required: false
type: boolean
default: false
permissions:
id-token: write
contents: read
jobs:
publish-docker:
runs-on: ubuntu-latest
steps:
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Download images tarballs
uses: actions/download-artifact@v3
with:
name: falco-images
path: /tmp/falco-images
- name: Load all images
run: |
for img in /tmp/falco-images/falco-*.tar; do docker load --input $img; done
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USER }}
password: ${{ secrets.DOCKERHUB_SECRET }}
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v2
with:
role-to-assume: "arn:aws:iam::292999226676:role/github_actions-falco-ecr"
aws-region: us-east-1 # The region must be set to us-east-1 in order to access ECR Public.
- name: Login to Amazon ECR
id: login-ecr-public
uses: aws-actions/amazon-ecr-login@2f9f10ea3fa2eed41ac443fee8bfbd059af2d0a4 # v1.6.0
with:
registry-type: public
- name: Setup Crane
uses: imjasonh/setup-crane@v0.3
with:
version: v0.15.1
# We're pushing the arch-specific manifests to Docker Hub so that we'll be able to easily create the index/multiarch later
- name: Push arch-specific images to Docker Hub
run: |
docker push docker.io/falcosecurity/falco-no-driver:aarch64-${{ inputs.tag }}
docker push docker.io/falcosecurity/falco-no-driver:x86_64-${{ inputs.tag }}
docker push docker.io/falcosecurity/falco:aarch64-${{ inputs.tag }}
docker push docker.io/falcosecurity/falco:x86_64-${{ inputs.tag }}
docker push docker.io/falcosecurity/falco-driver-loader:aarch64-${{ inputs.tag }}
docker push docker.io/falcosecurity/falco-driver-loader:x86_64-${{ inputs.tag }}
- name: Create no-driver manifest on Docker Hub
uses: Noelware/docker-manifest-action@0.3.1
with:
inputs: docker.io/falcosecurity/falco-no-driver:${{ inputs.tag }}
images: docker.io/falcosecurity/falco-no-driver:aarch64-${{ inputs.tag }},docker.io/falcosecurity/falco-no-driver:x86_64-${{ inputs.tag }}
push: true
- name: Tag slim manifest on Docker Hub
run: |
crane copy docker.io/falcosecurity/falco-no-driver:${{ inputs.tag }} docker.io/falcosecurity/falco:${{ inputs.tag }}-slim
- name: Create falco manifest on Docker Hub
uses: Noelware/docker-manifest-action@0.3.1
with:
inputs: docker.io/falcosecurity/falco:${{ inputs.tag }}
images: docker.io/falcosecurity/falco:aarch64-${{ inputs.tag }},docker.io/falcosecurity/falco:x86_64-${{ inputs.tag }}
push: true
- name: Create falco-driver-loader manifest on Docker Hub
uses: Noelware/docker-manifest-action@0.3.1
with:
inputs: docker.io/falcosecurity/falco-driver-loader:${{ inputs.tag }}
images: docker.io/falcosecurity/falco-driver-loader:aarch64-${{ inputs.tag }},docker.io/falcosecurity/falco-driver-loader:x86_64-${{ inputs.tag }}
push: true
- name: Get Digests for images
id: digests
run: |
echo "falco-no-driver=$(crane digest docker.io/falcosecurity/falco-no-driver:${{ inputs.tag }})" >> $GITHUB_OUTPUT
echo "falco=$(crane digest docker.io/falcosecurity/falco:${{ inputs.tag }})" >> $GITHUB_OUTPUT
echo "falco-driver-loader=$(crane digest docker.io/falcosecurity/falco-driver-loader:${{ inputs.tag }})" >> $GITHUB_OUTPUT
- name: Publish images to ECR
run: |
crane copy docker.io/falcosecurity/falco-no-driver:${{ inputs.tag }} public.ecr.aws/falcosecurity/falco-no-driver:${{ inputs.tag }}
crane copy docker.io/falcosecurity/falco:${{ inputs.tag }} public.ecr.aws/falcosecurity/falco:${{ inputs.tag }}
crane copy docker.io/falcosecurity/falco-driver-loader:${{ inputs.tag }} public.ecr.aws/falcosecurity/falco-driver-loader:${{ inputs.tag }}
crane copy public.ecr.aws/falcosecurity/falco-no-driver:${{ inputs.tag }} public.ecr.aws/falcosecurity/falco:${{ inputs.tag }}-slim
- name: Tag latest on Docker Hub and ECR
if: inputs.is_latest
run: |
crane tag docker.io/falcosecurity/falco-no-driver:${{ inputs.tag }} latest
crane tag docker.io/falcosecurity/falco:${{ inputs.tag }} latest
crane tag docker.io/falcosecurity/falco-driver-loader:${{ inputs.tag }} latest
crane tag docker.io/falcosecurity/falco:${{ inputs.tag }}-slim latest-slim
crane tag public.ecr.aws/falcosecurity/falco-no-driver:${{ inputs.tag }} latest
crane tag public.ecr.aws/falcosecurity/falco:${{ inputs.tag }} latest
crane tag public.ecr.aws/falcosecurity/falco-driver-loader:${{ inputs.tag }} latest
crane tag public.ecr.aws/falcosecurity/falco:${{ inputs.tag }}-slim latest-slim
- name: Setup Cosign
if: inputs.sign
uses: sigstore/cosign-installer@main
with:
cosign-release: v2.0.2
- name: Sign images with cosign
if: inputs.sign
env:
COSIGN_EXPERIMENTAL: "true"
COSIGN_YES: "true"
run: |
cosign sign docker.io/falcosecurity/falco-no-driver@${{ steps.digests.outputs.falco-no-driver }}
cosign sign docker.io/falcosecurity/falco@${{ steps.digests.outputs.falco }}
cosign sign docker.io/falcosecurity/falco-driver-loader@${{ steps.digests.outputs.falco-driver-loader }}
cosign sign public.ecr.aws/falcosecurity/falco-no-driver@${{ steps.digests.outputs.falco-no-driver }}
cosign sign public.ecr.aws/falcosecurity/falco@${{ steps.digests.outputs.falco }}
cosign sign public.ecr.aws/falcosecurity/falco-driver-loader@${{ steps.digests.outputs.falco-driver-loader }}

View File

@@ -1,150 +0,0 @@
# This is a reusable workflow used by master and release CI
on:
workflow_call:
inputs:
version:
description: The Falco version to use when publishing packages
required: true
type: string
bucket_suffix:
description: bucket suffix for packages
required: false
default: ''
type: string
permissions:
id-token: write
contents: read
env:
AWS_S3_REGION: eu-west-1
AWS_CLOUDFRONT_DIST_ID: E1CQNPFWRXLGQD
jobs:
publish-packages:
runs-on: ubuntu-latest
container: docker.io/centos:7
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Install dependencies
run: |
yum install epel-release -y
yum update -y
yum install rpm-sign expect which createrepo gpg python python-pip -y
pip install awscli==1.19.47
# Configure AWS role; see https://github.com/falcosecurity/test-infra/pull/1102
# Note: master CI can only push dev packages as we have 2 different roles for master and release.
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v2
with:
role-to-assume: "arn:aws:iam::292999226676:role/github_actions-falco${{ inputs.bucket_suffix }}-s3"
aws-region: ${{ env.AWS_S3_REGION }}
- name: Download RPM x86_64
uses: actions/download-artifact@v3
with:
name: falco-${{ inputs.version }}-x86_64.rpm
path: /tmp/falco-rpm
- name: Download RPM aarch64
uses: actions/download-artifact@v3
with:
name: falco-${{ inputs.version }}-aarch64.rpm
path: /tmp/falco-rpm
- name: Download binary x86_64
uses: actions/download-artifact@v3
with:
name: falco-${{ inputs.version }}-x86_64.tar.gz
path: /tmp/falco-bin
- name: Download binary aarch64
uses: actions/download-artifact@v3
with:
name: falco-${{ inputs.version }}-aarch64.tar.gz
path: /tmp/falco-bin
- name: Download static binary x86_64
uses: actions/download-artifact@v3
with:
name: falco-${{ inputs.version }}-static-x86_64.tar.gz
path: /tmp/falco-bin-static
- name: Import gpg key
env:
GPG_KEY: ${{ secrets.GPG_KEY }}
run: printenv GPG_KEY | gpg --import -
- name: Sign rpms
run: |
echo "%_signature gpg" > ~/.rpmmacros
echo "%_gpg_name Falcosecurity Package Signing" >> ~/.rpmmacros
echo "%__gpg_sign_cmd %{__gpg} --force-v3-sigs --batch --no-armor --passphrase-fd 3 --no-secmem-warning -u \"%{_gpg_name}\" -sb --digest-algo sha256 %{__plaintext_filename}'" >> ~/.rpmmacros
cat > ~/sign <<EOF
#!/usr/bin/expect -f
spawn rpmsign --addsign {*}\$argv
expect -exact "Enter pass phrase: "
send -- "\n"
expect eof
EOF
chmod +x ~/sign
~/sign /tmp/falco-rpm/falco-*.rpm
rpm --qf %{SIGPGP:pgpsig} -qp /tmp/falco-rpm/falco-*.rpm | grep SHA256
- name: Publish rpm
run: |
./scripts/publish-rpm -f /tmp/falco-rpm/falco-${{ inputs.version }}-x86_64.rpm -f /tmp/falco-rpm/falco-${{ inputs.version }}-aarch64.rpm -r rpm${{ inputs.bucket_suffix }}
- name: Publish bin
run: |
./scripts/publish-bin -f /tmp/falco-bin/falco-${{ inputs.version }}-x86_64.tar.gz -r bin${{ inputs.bucket_suffix }} -a x86_64
./scripts/publish-bin -f /tmp/falco-bin/falco-${{ inputs.version }}-aarch64.tar.gz -r bin${{ inputs.bucket_suffix }} -a aarch64
- name: Publish static
run: |
./scripts/publish-bin -f /tmp/falco-bin-static/falco-${{ inputs.version }}-static-x86_64.tar.gz -r bin${{ inputs.bucket_suffix }} -a x86_64
publish-packages-deb:
runs-on: ubuntu-latest
container: docker.io/debian:stable
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Install dependencies
run: |
apt update -y
apt-get install apt-utils bzip2 gpg python python3-pip -y
pip install awscli
# Configure AWS role; see https://github.com/falcosecurity/test-infra/pull/1102
# Note: master CI can only push dev packages as we have 2 different roles for master and release.
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v2
with:
role-to-assume: "arn:aws:iam::292999226676:role/github_actions-falco${{ inputs.bucket_suffix }}-s3"
aws-region: ${{ env.AWS_S3_REGION }}
- name: Download deb x86_64
uses: actions/download-artifact@v3
with:
name: falco-${{ inputs.version }}-x86_64.deb
path: /tmp/falco-deb
- name: Download deb aarch64
uses: actions/download-artifact@v3
with:
name: falco-${{ inputs.version }}-aarch64.deb
path: /tmp/falco-deb
- name: Import gpg key
env:
GPG_KEY: ${{ secrets.GPG_KEY }}
run: printenv GPG_KEY | gpg --import -
- name: Publish deb
run: |
./scripts/publish-deb -f /tmp/falco-deb/falco-${{ inputs.version }}-x86_64.deb -f /tmp/falco-deb/falco-${{ inputs.version }}-aarch64.deb -r deb${{ inputs.bucket_suffix }}

View File

@@ -44,8 +44,6 @@ if (${EP_UPDATE_DISCONNECTED})
PROPERTY EP_UPDATE_DISCONNECTED TRUE)
endif()
set(CMAKE_CXX_STANDARD 17)
set(CMAKE_CXX_EXTENSIONS OFF)
# Elapsed time
# set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE "${CMAKE_COMMAND} -E time") # TODO(fntlnz, leodido): add a flag to enable this
@@ -109,8 +107,10 @@ if(BUILD_WARNINGS_AS_ERRORS)
set(CMAKE_COMMON_FLAGS "${CMAKE_COMMON_FLAGS} -Wextra -Werror ${CMAKE_SUPPRESSED_WARNINGS}")
endif()
set(CMAKE_CXX_STANDARD 17)
set(CMAKE_CXX_EXTENSIONS OFF)
set(CMAKE_C_FLAGS "${CMAKE_COMMON_FLAGS}")
set(CMAKE_CXX_FLAGS "-std=c++17 ${CMAKE_COMMON_FLAGS} -Wno-class-memaccess")
set(CMAKE_CXX_FLAGS "--std=c++17 ${CMAKE_COMMON_FLAGS} -Wno-class-memaccess")
set(CMAKE_C_FLAGS_DEBUG "${FALCO_EXTRA_DEBUG_FLAGS}")
set(CMAKE_CXX_FLAGS_DEBUG "${FALCO_EXTRA_DEBUG_FLAGS}")

142
README.md
View File

@@ -5,8 +5,12 @@
[![Build Status](https://img.shields.io/circleci/build/github/falcosecurity/falco/master?style=for-the-badge)](https://circleci.com/gh/falcosecurity/falco) [![CII Best Practices Summary](https://img.shields.io/cii/summary/2317?label=CCI%20Best%20Practices&style=for-the-badge)](https://bestpractices.coreinfrastructure.org/projects/2317) [![GitHub](https://img.shields.io/github/license/falcosecurity/falco?style=for-the-badge)](COPYING) [![Latest](https://img.shields.io/github/v/release/falcosecurity/falco?style=for-the-badge)](https://github.com/falcosecurity/falco/releases/latest) ![Architectures](https://img.shields.io/badge/ARCHS-x86__64%7Caarch64-blueviolet?style=for-the-badge)
Want to talk? Join us on the [#falco](https://kubernetes.slack.com/messages/falco) channel in the [Kubernetes Slack](https://slack.k8s.io).
## Latest releases
Read the [change log](CHANGELOG.md).
<!--
Badges in the following table are constructed by using the
https://img.shields.io/badge/dynamic/xml endpoint.
@@ -45,90 +49,116 @@ Notes:
-->
| | stable |
|--------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| rpm-x86_64 | [![rpm](https://img.shields.io/badge/dynamic/xml?color=%2300aec7&style=flat-square&label=Falco&query=substring-before%28substring-after%28%28%2F%2A%5Bname%28%29%3D%27ListBucketResult%27%5D%2F%2A%5Bname%28%29%3D%27Contents%27%5D%29%5Blast%28%29%5D%2F%2A%5Bname%28%29%3D%27Key%27%5D%2C%22falco-%22%29%2C%22.asc%22%29&url=https%3A%2F%2Ffalco-distribution.s3-eu-west-1.amazonaws.com%2F%3Fprefix%3Dpackages%2Frpm%2Ffalco-%26delimiter=aarch64)][2] |
| deb-x86_64 | [![deb](https://img.shields.io/badge/dynamic/xml?color=%2300aec7&style=flat-square&label=Falco&query=substring-before%28substring-after%28%28%2F%2A%5Bname%28%29%3D%27ListBucketResult%27%5D%2F%2A%5Bname%28%29%3D%27Contents%27%5D%29%5Blast%28%29%5D%2F%2A%5Bname%28%29%3D%27Key%27%5D%2C%22falco-%22%29%2C%22.asc%22%29&url=https%3A%2F%2Ffalco-distribution.s3-eu-west-1.amazonaws.com%2F%3Fprefix%3Dpackages%2Fdeb%2Fstable%2Ffalco-%26delimiter=aarch64)][4] |
| binary-x86_64 | [![bin](https://img.shields.io/badge/dynamic/xml?color=%2300aec7&style=flat-square&label=Falco&query=substring-after%28%28%2F%2A%5Bname%28%29%3D%27ListBucketResult%27%5D%2F%2A%5Bname%28%29%3D%27Contents%27%5D%29%5Blast%28%29%5D%2F%2A%5Bname%28%29%3D%27Key%27%5D%2C%20%22falco-%22%29&url=https%3A%2F%2Ffalco-distribution.s3-eu-west-1.amazonaws.com%2F%3Fprefix%3Dpackages%2Fbin%2Fx86_64%2Ffalco-)][6] |
| rpm-aarch64 | [![rpm](https://img.shields.io/badge/dynamic/xml?color=%2300aec7&style=flat-square&label=Falco&query=substring-before%28substring-after%28%28%2F%2A%5Bname%28%29%3D%27ListBucketResult%27%5D%2F%2A%5Bname%28%29%3D%27Contents%27%5D%29%5Blast%28%29%5D%2F%2A%5Bname%28%29%3D%27Key%27%5D%2C%22falco-%22%29%2C%22.asc%22%29&url=https%3A%2F%2Ffalco-distribution.s3-eu-west-1.amazonaws.com%2F%3Fprefix%3Dpackages%2Frpm%2Ffalco-%26delimiter=x86_64)][2] |
| deb-aarch64 | [![deb](https://img.shields.io/badge/dynamic/xml?color=%2300aec7&style=flat-square&label=Falco&query=substring-before%28substring-after%28%28%2F%2A%5Bname%28%29%3D%27ListBucketResult%27%5D%2F%2A%5Bname%28%29%3D%27Contents%27%5D%29%5Blast%28%29%5D%2F%2A%5Bname%28%29%3D%27Key%27%5D%2C%22falco-%22%29%2C%22.asc%22%29&url=https%3A%2F%2Ffalco-distribution.s3-eu-west-1.amazonaws.com%2F%3Fprefix%3Dpackages%2Fdeb%2Fstable%2Ffalco-%26delimiter=x86_64)][4] |
| binary-aarch64 | [![bin](https://img.shields.io/badge/dynamic/xml?color=%2300aec7&style=flat-square&label=Falco&query=substring-after%28%28%2F%2A%5Bname%28%29%3D%27ListBucketResult%27%5D%2F%2A%5Bname%28%29%3D%27Contents%27%5D%29%5Blast%28%29%5D%2F%2A%5Bname%28%29%3D%27Key%27%5D%2C%20%22falco-%22%29&url=https%3A%2F%2Ffalco-distribution.s3-eu-west-1.amazonaws.com%2F%3Fprefix%3Dpackages%2Fbin%2Faarch64%2Ffalco-)][8] |
| | development | stable |
|--------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| rpm-x86_64 | [![rpm-dev](https://img.shields.io/badge/dynamic/xml?color=%2300aec7&style=flat-square&label=Falco&query=substring-before%28substring-after%28%28%2F%2A%5Bname%28%29%3D%27ListBucketResult%27%5D%2F%2A%5Bname%28%29%3D%27Contents%27%5D%29%5Blast%28%29%5D%2F%2A%5Bname%28%29%3D%27Key%27%5D%2C%22falco-%22%29%2C%22.asc%22%29&url=https%3A%2F%2Ffalco-distribution.s3-eu-west-1.amazonaws.com%2F%3Fprefix%3Dpackages%2Frpm-dev%2Ffalco-%26delimiter=aarch64)][1] | [![rpm](https://img.shields.io/badge/dynamic/xml?color=%2300aec7&style=flat-square&label=Falco&query=substring-before%28substring-after%28%28%2F%2A%5Bname%28%29%3D%27ListBucketResult%27%5D%2F%2A%5Bname%28%29%3D%27Contents%27%5D%29%5Blast%28%29%5D%2F%2A%5Bname%28%29%3D%27Key%27%5D%2C%22falco-%22%29%2C%22.asc%22%29&url=https%3A%2F%2Ffalco-distribution.s3-eu-west-1.amazonaws.com%2F%3Fprefix%3Dpackages%2Frpm%2Ffalco-%26delimiter=aarch64)][2] |
| deb-x86_64 | [![deb-dev](https://img.shields.io/badge/dynamic/xml?color=%2300aec7&style=flat-square&label=Falco&query=substring-before%28substring-after%28%28%2F%2A%5Bname%28%29%3D%27ListBucketResult%27%5D%2F%2A%5Bname%28%29%3D%27Contents%27%5D%29%5Blast%28%29%5D%2F%2A%5Bname%28%29%3D%27Key%27%5D%2C%22falco-%22%29%2C%22.asc%22%29&url=https%3A%2F%2Ffalco-distribution.s3-eu-west-1.amazonaws.com%2F%3Fprefix%3Dpackages%2Fdeb-dev%2Fstable%2Ffalco-%26delimiter=aarch64)][3] | [![deb](https://img.shields.io/badge/dynamic/xml?color=%2300aec7&style=flat-square&label=Falco&query=substring-before%28substring-after%28%28%2F%2A%5Bname%28%29%3D%27ListBucketResult%27%5D%2F%2A%5Bname%28%29%3D%27Contents%27%5D%29%5Blast%28%29%5D%2F%2A%5Bname%28%29%3D%27Key%27%5D%2C%22falco-%22%29%2C%22.asc%22%29&url=https%3A%2F%2Ffalco-distribution.s3-eu-west-1.amazonaws.com%2F%3Fprefix%3Dpackages%2Fdeb%2Fstable%2Ffalco-%26delimiter=aarch64)][4] |
| binary-x86_64 | [![bin-dev](https://img.shields.io/badge/dynamic/xml?color=%2300aec7&style=flat-square&label=Falco&query=substring-after%28%28%2F%2A%5Bname%28%29%3D%27ListBucketResult%27%5D%2F%2A%5Bname%28%29%3D%27Contents%27%5D%29%5Blast%28%29%5D%2F%2A%5Bname%28%29%3D%27Key%27%5D%2C%20%22falco-%22%29&url=https%3A%2F%2Ffalco-distribution.s3-eu-west-1.amazonaws.com%2F%3Fprefix%3Dpackages%2Fbin-dev%2Fx86_64%2Ffalco-)][5] | [![bin](https://img.shields.io/badge/dynamic/xml?color=%2300aec7&style=flat-square&label=Falco&query=substring-after%28%28%2F%2A%5Bname%28%29%3D%27ListBucketResult%27%5D%2F%2A%5Bname%28%29%3D%27Contents%27%5D%29%5Blast%28%29%5D%2F%2A%5Bname%28%29%3D%27Key%27%5D%2C%20%22falco-%22%29&url=https%3A%2F%2Ffalco-distribution.s3-eu-west-1.amazonaws.com%2F%3Fprefix%3Dpackages%2Fbin%2Fx86_64%2Ffalco-)][6] |
| rpm-aarch64 | [![rpm-dev](https://img.shields.io/badge/dynamic/xml?color=%2300aec7&style=flat-square&label=Falco&query=substring-before%28substring-after%28%28%2F%2A%5Bname%28%29%3D%27ListBucketResult%27%5D%2F%2A%5Bname%28%29%3D%27Contents%27%5D%29%5Blast%28%29%5D%2F%2A%5Bname%28%29%3D%27Key%27%5D%2C%22falco-%22%29%2C%22.asc%22%29&url=https%3A%2F%2Ffalco-distribution.s3-eu-west-1.amazonaws.com%2F%3Fprefix%3Dpackages%2Frpm-dev%2Ffalco-%26delimiter=x86_64)][1] | [![rpm](https://img.shields.io/badge/dynamic/xml?color=%2300aec7&style=flat-square&label=Falco&query=substring-before%28substring-after%28%28%2F%2A%5Bname%28%29%3D%27ListBucketResult%27%5D%2F%2A%5Bname%28%29%3D%27Contents%27%5D%29%5Blast%28%29%5D%2F%2A%5Bname%28%29%3D%27Key%27%5D%2C%22falco-%22%29%2C%22.asc%22%29&url=https%3A%2F%2Ffalco-distribution.s3-eu-west-1.amazonaws.com%2F%3Fprefix%3Dpackages%2Frpm%2Ffalco-%26delimiter=x86_64)][2] |
| deb-aarch64 | [![deb-dev](https://img.shields.io/badge/dynamic/xml?color=%2300aec7&style=flat-square&label=Falco&query=substring-before%28substring-after%28%28%2F%2A%5Bname%28%29%3D%27ListBucketResult%27%5D%2F%2A%5Bname%28%29%3D%27Contents%27%5D%29%5Blast%28%29%5D%2F%2A%5Bname%28%29%3D%27Key%27%5D%2C%22falco-%22%29%2C%22.asc%22%29&url=https%3A%2F%2Ffalco-distribution.s3-eu-west-1.amazonaws.com%2F%3Fprefix%3Dpackages%2Fdeb-dev%2Fstable%2Ffalco-%26delimiter=x86_64)][3] | [![deb](https://img.shields.io/badge/dynamic/xml?color=%2300aec7&style=flat-square&label=Falco&query=substring-before%28substring-after%28%28%2F%2A%5Bname%28%29%3D%27ListBucketResult%27%5D%2F%2A%5Bname%28%29%3D%27Contents%27%5D%29%5Blast%28%29%5D%2F%2A%5Bname%28%29%3D%27Key%27%5D%2C%22falco-%22%29%2C%22.asc%22%29&url=https%3A%2F%2Ffalco-distribution.s3-eu-west-1.amazonaws.com%2F%3Fprefix%3Dpackages%2Fdeb%2Fstable%2Ffalco-%26delimiter=x86_64)][4] |
| binary-aarch64 | [![bin-dev](https://img.shields.io/badge/dynamic/xml?color=%2300aec7&style=flat-square&label=Falco&query=substring-after%28%28%2F%2A%5Bname%28%29%3D%27ListBucketResult%27%5D%2F%2A%5Bname%28%29%3D%27Contents%27%5D%29%5Blast%28%29%5D%2F%2A%5Bname%28%29%3D%27Key%27%5D%2C%20%22falco-%22%29&url=https%3A%2F%2Ffalco-distribution.s3-eu-west-1.amazonaws.com%2F%3Fprefix%3Dpackages%2Fbin-dev%2Faarch64%2Ffalco-)][7] | [![bin](https://img.shields.io/badge/dynamic/xml?color=%2300aec7&style=flat-square&label=Falco&query=substring-after%28%28%2F%2A%5Bname%28%29%3D%27ListBucketResult%27%5D%2F%2A%5Bname%28%29%3D%27Contents%27%5D%29%5Blast%28%29%5D%2F%2A%5Bname%28%29%3D%27Key%27%5D%2C%20%22falco-%22%29&url=https%3A%2F%2Ffalco-distribution.s3-eu-west-1.amazonaws.com%2F%3Fprefix%3Dpackages%2Fbin%2Faarch64%2Ffalco-)][8] |
For comprehensive information on the latest updates and changes to the project, please refer to the [change log](CHANGELOG.md). Additionally, we have documented the [release process](RELEASE.md) for delivering new versions of Falco.
---
## Introduction to Falco
The Falco Project, originally created by [Sysdig](https://sysdig.com), is an incubating [CNCF](https://cncf.io) open source cloud native runtime security tool.
Falco makes it easy to consume kernel events, and enrich those events with information from Kubernetes and the rest of the cloud native stack.
Falco can also be extended to other data sources by using plugins.
Falco has a rich set of security rules specifically built for Kubernetes, Linux, and cloud-native.
If a rule is violated in a system, Falco will send an alert notifying the user of the violation and its severity.
[Falco](https://falco.org/), originally created by [Sysdig](https://sysdig.com), is an incubating project under the [CNCF](https://cncf.io).
## What can Falco detect?
Falco is a cloud native runtime security tool for Linux operating systems. It is designed to detect and alert on abnormal behavior and potential security threats in real-time.
Falco can detect and alert on any behavior that involves making Linux system calls.
Falco alerts can be triggered by the use of specific system calls, their arguments, and by properties of the calling process.
For example, Falco can easily detect incidents including but not limited to:
At its core, Falco is a kernel event monitoring and detection agent that captures events, such as syscalls, based on custom rules. Falco can enhance these events by integrating metadata from the container runtime and Kubernetes. The collected events can be analyzed off-host in SIEM or data lake systems.
- A shell is running inside a container or pod in Kubernetes.
- A container is running in privileged mode, or is mounting a sensitive path, such as `/proc`, from the host.
- A server process is spawning a child process of an unexpected type.
- Unexpected read of a sensitive file, such as `/etc/shadow`.
- A non-device file is written to `/dev`.
- A standard system binary, such as `ls`, is making an outbound network connection.
- A privileged pod is started in a Kubernetes cluster.
For detailed technical information and insights into the cyber threats that Falco can detect, visit the official [Falco](https://falco.org/) website.
The official Falco rules are maintained and released in [falcosecurity/rules](https://github.com/falcosecurity/rules/). That repository also contains the Falco rules inventory [document](https://github.com/falcosecurity/rules/blob/main/rules_inventory/rules_overview.md), which provides additional details around the default rules Falco ships with.
## Installing Falco
If you would like to run Falco in **production** please adhere to the [official installation guide](https://falco.org/docs/getting-started/installation/).
### Kubernetes
| Tool | Link | Note |
|----------|--------------------------------------------------------------------------------------------|--------------------------------------------------------------------|
| Helm | [Chart Repository](https://github.com/falcosecurity/charts/tree/master/falco#introduction) | The Falco community offers regular helm chart releases. |
| Minikube | [Tutorial](https://falco.org/docs/getting-started/third-party/#minikube) | The Falco driver has been baked into minikube for easy deployment. |
| Kind | [Tutorial](https://falco.org/docs/getting-started/third-party/#kind) | Running Falco with kind requires a driver on the host system. |
| GKE | [Tutorial](https://falco.org/docs/getting-started/third-party/#gke) | We suggest using the eBPF driver for running Falco on GKE. |
## Developing
Falco is designed to be extensible such that it can be built into cloud-native applications and infrastructure.
Falco has a [gRPC](https://falco.org/docs/grpc/) endpoint and an API defined in [protobuf](https://github.com/falcosecurity/falco/blob/master/userspace/falco/outputs.proto).
The Falco Project supports various SDKs for this endpoint.
### SDKs
| Language | Repository |
|----------|---------------------------------------------------------|
| Go | [client-go](https://github.com/falcosecurity/client-go) |
## Plugins
Falco comes with a [plugin framework](https://falco.org/docs/plugins/) that extends it to potentially any cloud detection scenario. Plugins are shared libraries that conform to a documented API and allow for:
- Adding new event sources that can be used in rules;
- Adding the ability to define new fields and extract information from events.
The Falco Project maintains [various plugins](https://github.com/falcosecurity/plugins) and provides SDKs for plugin development.
## Falco Repo: Powering the Core of The Falco Project
### SDKs
This is the main Falco repository which contains the source code for building the Falco binary. By utilizing its [libraries](https://github.com/falcosecurity/libs) and the [falco.yaml](falco.yaml) configuration file, this repository forms the foundation of Falco's functionality. The Falco repository is closely interconnected with the following *core* repositories:
- [falcosecurity/libs](https://github.com/falcosecurity/libs): Falco's libraries are key to its fundamental operations, making up the greater portion of the source code of the Falco binary and providing essential features such as kernel drivers.
- [falcosecurity/rules](https://github.com/falcosecurity/rules): Contains the official ruleset for Falco, providing pre-defined detection rules for various security threats and abnormal behaviors.
- [falcosecurity/plugins](https://github.com/falcosecurity/plugins/): Falco plugins facilitate integration with external services, expand Falco's capabilities beyond syscalls and container events, and are designed to evolve with specialized functionality in future releases.
- [falcosecurity/falcoctl](https://github.com/falcosecurity/falcoctl): Command-line utility for managing and interacting with Falco.
For more information, visit the official hub of The Falco Project: [falcosecurity/evolution](https://github.com/falcosecurity/evolution). It provides valuable insights and information about the project's repositories.
## Getting Started with Falco
Carefully review and follow the [official guide and documentation](https://falco.org/docs/getting-started/).
Considerations and guidance for Falco adopters:
1. Understand dependencies: Assess the environment where you'll run Falco and consider kernel versions and architectures.
2. Define threat detection objectives: Clearly identify the threats you want to detect and evaluate Falco's strengths and limitations.
3. Consider performance and cost: Assess compute performance overhead and align with system administrators or SREs. Budget accordingly.
4. Choose build and customization approach: Decide between the open source Falco build or creating a custom build pipeline. Customize the build and deployment process as necessary, including incorporating unique tests or approaches, to ensure a resilient deployment with fast deployment cycles.
5. Integrate with output destinations: Integrate Falco with SIEM, data lake systems, or other preferred output destinations to establish a robust foundation for comprehensive data analysis and enable effective incident response workflows.
| Language | Repository |
|----------|-------------------------------------------------------------------------------|
| Go | [falcosecurity/plugin-sdk-go](https://github.com/falcosecurity/plugin-sdk-go) |
## How to Contribute
Please refer to the [contributing guide](https://github.com/falcosecurity/.github/blob/main/CONTRIBUTING.md) and the [code of conduct](https://github.com/falcosecurity/evolution/CODE_OF_CONDUCT.md) for more information on how to contribute.
## Documentation
The [Official Documentation](https://falco.org/docs/) is the best resource to learn about Falco.
## Join the Community
To get involved with the Falco Project please visit the [community repository](https://github.com/falcosecurity/community) to find more information and ways to get involved.
If you have any questions about Falco or contributing, do not hesitate to file an issue or contact the Falco maintainers and community members for assistance.
To get involved with The Falco Project please visit [the community repository](https://github.com/falcosecurity/community) to find more.
How to reach out?
- Join the [#falco](https://kubernetes.slack.com/messages/falco) channel on the [Kubernetes Slack](https://slack.k8s.io).
- Join the [Falco mailing list](https://lists.cncf.io/g/cncf-falco-dev).
- File an [issue](https://github.com/falcosecurity/falco/issues) or make feature requests.
- Join the [#falco](https://kubernetes.slack.com/messages/falco) channel on the [Kubernetes Slack](https://slack.k8s.io)
- [Join the Falco mailing list](https://lists.cncf.io/g/cncf-falco-dev)
- [Read the Falco documentation](https://falco.org/docs/)
## Commitment to Falco's Own Security
## How to contribute
Full reports of various security audits can be found [here](./audits/).
See the [contributing guide](https://github.com/falcosecurity/.github/blob/main/CONTRIBUTING.md) and the [code of conduct](https://github.com/falcosecurity/evolution/CODE_OF_CONDUCT.md).
## Security Audit
In addition, you can refer to the [falco security](https://github.com/falcosecurity/falco/security) and [libs security](https://github.com/falcosecurity/libs/security) sections for detailed updates on security advisories and policies.
A third party security audit was performed by Cure53, you can see the full report [here](./audits/SECURITY_AUDIT_2019_07.pdf).
To report security vulnerabilities, please follow the community process outlined in the documentation found [here](https://github.com/falcosecurity/.github/blob/main/SECURITY.md).
## What's next for Falco?
Stay updated with Falco's evolving capabilities by exploring the [Falco Roadmap](https://github.com/orgs/falcosecurity/projects/5), which provides insights into the features currently under development and planned for future releases.
## Reporting security vulnerabilities
Please report security vulnerabilities following the community process documented [here](https://github.com/falcosecurity/.github/blob/main/SECURITY.md).
## License
Falco is licensed to you under the [Apache 2.0](./COPYING) open source license.
## Project Evolution
The [falcosecurity/evolution](https://github.com/falcosecurity/evolution) repository is the official space for the community to work together, discuss ideas, and document processes. It is also a place to make decisions. Check it out to find more helpful resources.
## Resources
- [Governance](https://github.com/falcosecurity/evolution/blob/main/GOVERNANCE.md)

View File

@@ -5,22 +5,18 @@
This document provides the process to create a new Falco release. In addition, it provides information about the versioning of the Falco components. At a high level each Falco release consists of the following main components:
- Falco binary (userspace), includes `modern_bpf` driver object code (kernel space) starting with Falco 0.34.x releases
- Falco kernel driver object files, separate artifacts for `kmod` and `bpf` drivers, not applicable for `modern_bpf` driver (kernel space)
- Falco binary (userspace)
- Falco kernel driver object files (kernel space)
- Option 1: Kernel module (`.ko` files)
- Option 2: eBPF (`.o` files)
- Falco config and rules `.yaml` files (userspace)
- Falco config and primary rules `.yaml` files (userspace)
- Falco plugins (userspace - optional)
> Note: Starting with Falco 0.34.x releases, the Falco userspace binary includes the `modern_bpf` driver object code during the linking process. This integration is made possible by the CO-RE (Compile Once - Run Everywhere) feature of the modern BPF driver. CO-RE allows the driver to function on kernels that have backported BTF (BPF Type Format) support or have a kernel version >= 5.8. For the older `kmod` and `bpf` drivers, separate artifacts are released for the kernel space. This is because these drivers need to be explicitly compiled for the specific kernel release, using the exact kernel headers. This approach ensures that Falco can support a wide range of environments, including multiple kernel versions, distributions, and architectures. (see `libs` [driver - kernel version support matrix](https://github.com/falcosecurity/libs#drivers-officially-supported-architectures)).
One nice trait about releasing separate artifacts for userspace and kernel space is that Falco is amenable to supporting a large array of environments, that is, multiple kernel versions, distros and architectures (see `libs` [driver - kernel version support matrix](https://github.com/falcosecurity/libs#drivers-officially-supported-architectures)). The Falco project manages the release of both the Falco userspace binary and pre-compiled Falco kernel drivers for the most popular kernel versions and distros. The build and publish process is managed by the [test-infra](https://github.com/falcosecurity/test-infra) repo. The Falco userspace executable includes bundled dependencies, so that it can be run from anywhere.
The Falco Project manages the release of both the Falco userspace binary and pre-compiled Falco kernel drivers for the most popular kernel versions and distros. The build and publish process is managed by the [test-infra](https://github.com/falcosecurity/test-infra) repo.
The Falco project also publishes all sources for each component. In fact, sources are included in the Falco release in the same way as some plugins (k8saudit and cloudtrail) as well as the rules that are shipped together with Falco. This empowers the end user to audit the integrity of the project as well as build kernel drivers for custom kernels or not officially supported kernels / distros (see [driverkit](https://github.com/falcosecurity/driverkit) for more information). While the Falco project is deeply embedded into an ecosystem of supporting [Falco sub-projects](https://github.com/falcosecurity/evolution) that aim to make the deployment of Falco easy, user-friendly, extendible and cloud-native, core Falco is split across two repos, [falco](https://github.com/falcosecurity/falco) (this repo) and [libs](https://github.com/falcosecurity/libs). The `libs` repo contains >90% of Falco's core features and is the home of each of the kernel drivers and engines. More details are provided in the [Falco Components Versioning](#falco-components-versioning) section.
The Falco userspace executable includes bundled dependencies, so that it can be run from anywhere.
Falco publishes all sources, enabling users to audit the project's integrity and build kernel drivers for custom or unsupported kernels/distributions, specifically for non-modern BPF drivers (see [driverkit](https://github.com/falcosecurity/driverkit) for more information).
Finally, the release process follows a transparent process described in more detail in the following sections and the official [Falco guide and documentation](https://falco.org/) provide rich information around building, installing and using Falco.
Finally, the release process follows a transparent process described in more detail in the following sections and the official [Falco docs](https://falco.org/) contain rich information around building, installing and using Falco.
### Falco Binaries, Rules and Sources Artifacts - Quick Links
@@ -46,9 +42,8 @@ Alternatively Falco binaries or plugins can be downloaded from the Falco Artifac
### Falco Drivers Artifacts Repo - Quick Links
> Note: This section specifically applies to non-modern BPF drivers.
The Falco Project publishes all drivers for each release for popular kernel versions / distros and `x86_64` and `aarch64` architectures to the Falco project's managed Artifacts repo. The Artifacts repo follows standard directory level conventions. The respective driver object file is prefixed by distro and named / versioned by kernel release - `$(uname -r)`. Pre-compiled drivers are released with a [best effort](https://github.com/falcosecurity/falco/blob/master/proposals/20200818-artifacts-storage.md#notice) notice. This is because gcc (`kmod`) and clang (`bpf`) compilers sometimes fail to build the artifacts for a specific kernel version. More details around driver versioning and driver compatibility are provided in the [Falco Components Versioning](#falco-components-versioning) section. Short preview: If you use the standard Falco setup leveraging driver-loader, [driver-loader script](https://github.com/falcosecurity/falco/blob/master/scripts/falco-driver-loader) will fetch the kernel space artifact (object file) corresponding to the default `DRIVER_VERSION` Falco was shipped with.
The Falco project publishes all drivers for each release for all popular kernel versions / distros and `x86_64` and `aarch64` architectures to the Falco project managed Artifacts repo. The Artifacts repo follows standard directory level conventions. The respective driver object file is prefixed by distro and named / versioned by kernel release - `$(uname -r)`. Pre-compiled drivers are released with a [best effort](https://github.com/falcosecurity/falco/blob/master/proposals/20200818-artifacts-storage.md#notice) notice. This is because gcc (`kmod`) and clang (`bpf`) compilers or for example the eBPF verifier are not perfect. More details around driver versioning and driver compatibility are provided in the [Falco Components Versioning](#falco-components-versioning) section. Short preview: If you use the standard Falco setup leveraging driver-loader, [driver-loader script](https://github.com/falcosecurity/falco/blob/master/scripts/falco-driver-loader) will fetch the kernel space artifact (object file) corresponding to the default `DRIVER_VERSION` Falco was shipped with.
- [Falco Artifacts Repo Drivers Root](https://download.falco.org/?prefix=driver/)
- Option 1: Kernel module (`.ko` files) - all under same driver version directory
@@ -57,16 +52,16 @@ The Falco Project publishes all drivers for each release for popular kernel vers
### Timeline
Falco follows a release schedule of three times per year, with releases expected at the end of January, May, and September. Hotfix releases are issued as needed.
Falco releases are due to happen 3 times per year. Our current schedule sees a new release by the end of January, May, and September each year. Hotfix releases can happen whenever it's needed.
Changes and new features are organized into [milestones](https://github.com/falcosecurity/falco/milestones). The milestone corresponding to the next version represents the content that will be included in the upcoming release.
Changes and new features are grouped in [milestones](https://github.com/falcosecurity/falco/milestones), the milestone with the next version represents what is going to be released.
### Procedures
The release process is mostly automated, requiring only a few manual steps to initiate and complete.
The release process is mostly automated requiring only a few manual steps to initiate and complete it.
Moreover, we assign owners for each release (typically pairing a new person with an experienced one). Assignees and due dates for releases are proposed during the [weekly community call](https://github.com/falcosecurity/community).
Moreover, we need to assign owners for each release (usually we pair a new person with an experienced one). Assignees and the due date are proposed during the [weekly community call](https://github.com/falcosecurity/community).
At a high level each Falco release needs to follow a pre-determined sequencing of releases and build order:
@@ -74,13 +69,11 @@ At a high level each Falco release needs to follow a pre-determined sequencing o
- [4] Falco driver pre-compiled object files push to Falco's Artifacts repo
- [5] Falco userspace binary release
Assignees are responsible for creating a Falco GitHub issue to track the release tasks and monitor the progress of the release. This issue serves as a central point for communication and provides updates on the release dates. You can refer to the [Falco v0.35 release](https://github.com/falcosecurity/falco/issues/2554) or [Libs Release (0.11.0+5.0.1+driver)](https://github.com/falcosecurity/libs/issues/1092) issues as examples/templates for creating the release issue.
Finally, on the proposed due date, the assignees for the upcoming release proceed with the processes described below.
Finally, on the proposed due date the assignees for the upcoming release proceed with the processes described below.
## Pre-Release Checklist
Before proceeding with the release, make sure to complete the following preparatory steps, which can be easily done using the GitHub UI:
Prior to cutting a release the following preparatory steps should take 5 minutes using the GitHub UI.
### 1. Release notes
- Find the previous release date (`YYYY-MM-DD`) by looking at the [Falco releases](https://github.com/falcosecurity/falco/releases)
@@ -120,29 +113,26 @@ The release PR is meant to be made against the respective `release/M.m.x` branch
- Close the completed milestone as soon as the PR is merged into the release branch
- Cherry pick the PR on master too
## Publishing Pre-Releases (RCs and tagged development versions)
Core maintainers and/or the release manager can decide to publish pre-releases at any time before the final release
is live for development and testing purposes.
The prerelease tag must be formatted as `M.m.p-r`where `r` is the prerelease version information (e.g. `0.35.0-rc1`.)
To do so:
- [Draft a new release](https://github.com/falcosecurity/falco/releases/new)
- Use `M.m.p-r` both as tag version and release title.
- Check the "Set as a pre-release" checkbox and make sure "Set as the latest release" is unchecked
- It is recommended to add a brief description so that other contributors will understand the reason why the prerelease is published
- Publish the prerelease!
- The release pipeline will start automatically. Packages will be uploaded to the `-dev` bucket and container images will be tagged with the specified tag.
In order to check the status of the release pipeline click on the [GitHub Actions tab](https://github.com/falcosecurity/falco/actions?query=event%3Arelease) in the Falco repository and filter by release.
## Release
Assume `M.m.p` is the new version.
### 1. Create the release with GitHub
### 1. Create a tag
- Once the release PR has got merged both on the release branch and on master, and the master CI has done its job, git tag the new release on the release branch:
```
git pull
git checkout release/M.m.x
git tag M.m.p
git push origin M.m.p
```
> **N.B.**: do NOT use an annotated tag. For reference https://git-scm.com/book/en/v2/Git-Basics-Tagging
- Wait for the CI to complete
### 2. Update the GitHub release
- [Draft a new release](https://github.com/falcosecurity/falco/releases/new)
- Use `M.m.p` both as tag version and release title
@@ -186,11 +176,8 @@ Assume `M.m.p` is the new version.
```
- Finally, publish the release!
- The release pipeline will start automatically upon publication and all packages and container images will be uploaded to the stable repositories.
In order to check the status of the release pipeline click on the [GitHub Actions tab](https://github.com/falcosecurity/falco/actions?query=event%3Arelease) in the Falco repository and filter by release.
### 2. Update the meeting notes
### 3. Update the meeting notes
For each release we archive the meeting notes in git for historical purposes.
@@ -212,13 +199,13 @@ Announce the new release to the world!
## Falco Components Versioning
This section provides more details around the versioning of the components that make up Falco's core. It can also be a useful guide for the uninitiated to be more informed about Falco's source. Because `libs` makes up the greater portion of the source code of the Falco binary and is the home of each of the kernel drivers and engines, the [libs release doc](https://github.com/falcosecurity/libs/blob/master/release.md) is an excellent additional resource. In addition, the [plugins release doc](https://github.com/falcosecurity/plugins/blob/master/release.md) provides similar details around Falco's plugins. `SHA256` checksums are provided throughout Falco's source code to empower the end user to perform integrity checks. All Falco releases also contain the sources as part of the packages.
This section provides more details around the versioning of all components that make up core Falco. It can also be a useful guide for the uninitiated to be more informed about Falco's source. Because the `libs` repo contains >90% of Falco's core features and is the home of each of the kernel drivers and engines, the [libs release doc](https://github.com/falcosecurity/libs/blob/master/release.md) is an excellent additional resource. In addition, the [plugins release doc](https://github.com/falcosecurity/plugins/blob/master/release.md) provides similar details around Falco's plugins. `SHA256` checksums are provided throughout Falco's source code to empower the end user to perform integrity checks. All Falco releases also contain the sources as part of the packages.
### Falco repo (this repo)
- Falco version is a git tag (`x.y.z`), see [Procedures](#procedures) section. Note that the Falco version is a sem-ver-like schema, but not fully compatible with sem-ver.
- [FALCO_ENGINE_VERSION](https://github.com/falcosecurity/falco/blob/master/userspace/engine/falco_engine_version.h) is not sem-ver and must be bumped either when a backward incompatible change has been introduced to the rules files syntax and/or `FALCO_FIELDS_CHECKSUM` computed via `falco --list -N | sha256sum` has changed. The primary idea is that when new filter / display fields (see currently supported [Falco fields](https://falco.org/docs/rules/supported-fields/)) are introduced, a version change indicates that these fields were not available in previous engine versions. See the [rules release guidelines](https://github.com/falcosecurity/rules/blob/main/RELEASE.md#versioning-a-ruleset) to understand how this affects the versioning of Falco rules. Breaking changes introduced in the Falco engine are not necessarily tied to the drivers or libs versions. Lastly, `FALCO_ENGINE_VERSION` is typically incremented once during a Falco release cycle, while `FALCO_FIELDS_CHECKSUM` is bumped whenever necessary during the development and testing phases of the release cycle.
- During development and release preparation, libs and driver reference commits are often bumped in Falco's cmake setup ([falcosecurity-libs cmake](https://github.com/falcosecurity/falco/blob/master/cmake/modules/falcosecurity-libs.cmake#L30) and [driver cmake](https://github.com/falcosecurity/falco/blob/master/cmake/modules/driver.cmake#L29)) in order to merge new Falco features. In practice, they are mostly bumped at the same time referencing the same `libs` commit. However, for the official Falco build `FALCOSECURITY_LIBS_VERSION` flag that references the stable libs version is used (read below).
- [FALCO_ENGINE_VERSION](https://github.com/falcosecurity/falco/blob/master/userspace/engine/falco_engine_version.h) is not sem-ver and must be bumped either when a backward incompatible change has been introduced to the rules files syntax or `falco --list -N | sha256sum` has changed. Breaking changes introduced in the Falco engine are not necessarily tied to the drivers or libs versions. The primary idea behind the hash is that when new filter / display fields (see currently supported [Falco fields](https://falco.org/docs/rules/supported-fields/)) are introduced a version bump indicates that this field was not available in previous engine versions. See the [rules release guidelines](https://github.com/falcosecurity/rules/blob/main/RELEASE.md#versioning-a-ruleset) to understand how this affects the versioning of Falco rules.
- During development and release preparation, libs and driver reference commits are often bumped in Falco's cmake setup ([falcosecurity-libs cmake](https://github.com/falcosecurity/falco/blob/master/cmake/modules/falcosecurity-libs.cmake#L30) and [driver cmake](https://github.com/falcosecurity/falco/blob/master/cmake/modules/driver.cmake#L29)) in order to merge new Falco features. In practice they are mostly bumped at the same time referencing the same `libs` commit. However, for the official Falco build `FALCOSECURITY_LIBS_VERSION` flag that references the stable Libs version is used (read below).
- Similarly, Falco plugins versions are bumped in Falco's cmake setup ([plugins cmake](https://github.com/falcosecurity/falco/blob/master/cmake/modules/plugins.cmake)) and those versions are the ones used for the Falco release.
- At release time Plugin, Libs and Driver versions are compatible with Falco.
- If you use the standard Falco setup leveraging driver-loader, [driver-loader script](https://github.com/falcosecurity/falco/blob/master/scripts/falco-driver-loader) will fetch the kernel space artifact (object file) corresponding to the default `DRIVER_VERSION` Falco was shipped with (read more below under Libs).
@@ -238,7 +225,7 @@ Driver:
### Libs repo
- Libs version is a git tag (`x.y.z`) and when building Falco the libs version is set via the `FALCOSECURITY_LIBS_VERSION` flag (see above).
- The driver version is not directly linked to the userspace components of the Falco binary. This is because of the clear separation between userspace and kernel space, which adds an additional layer of complexity. To address this, the concept of a `Default driver` has been introduced, allowing for implicit declaration of compatible driver versions. For example, if the default driver version is `5.0.1+driver`, Falco works with all driver versions >= 5.0.1 and < 6.0.0. This is a consequence of how the driver version is constructed starting from the `Driver API version` and `Driver Schema version`. Driver API and Schema versions are explained in the respective [libs driver doc](https://github.com/falcosecurity/libs/blob/master/driver/README.VERSION.md) -> Falco's `driver-loader` will always fetch the default driver, therefore a Falco release is always "shipped" with the driver version corresponding to the default driver.
- Driver version itself is not directly tied to the Falco binary as opposed to the libs version being part of the source code used to compile Falco's userspace binary. This is because of the strict separation between userspace and kernel space artifacts, so things become a bit more interesting here. This is why the concept of a `Default driver` has been introduced to still implicitly declare the compatible driver versions. For example, if the default driver version is `2.0.0+driver`, Falco works with all driver versions >= 2.0.0 and < 3.0.0. This is a consequence of how the driver version is constructed starting from the `Driver API version` and `Driver Schema version`. Driver API and Schema versions are explained in the respective [libs driver doc](https://github.com/falcosecurity/libs/blob/master/driver/README.VERSION.md) -> Falco's `driver-loader` will always fetch the default driver, therefore a Falco release is always "shipped" with the driver version corresponding to the default driver.
- See [libs release doc](https://github.com/falcosecurity/libs/blob/master/release.md) for more information.
### Plugins repo

View File

@@ -3,9 +3,7 @@
# Falco Branding Guidelines
Falco is an open source security project whose brand and identity are governed by the [Cloud Native Computing Foundation](https://www.linuxfoundation.org/legal/trademark-usage).
This document describes the official branding guidelines of The Falco Project. Please see the [Falco Branding](https://falco.org/community/falco-brand/) page on our website for further details.
This document describes The Falco Project's branding guidelines, language, and message.
Content in this document can be used to publicly share about Falco.
@@ -84,7 +82,7 @@ Examples of malicious behavior include:
Falco is capable of [consuming the Kubernetes audit logs](https://kubernetes.io/docs/tasks/debug-application-cluster/falco/#use-falco-to-collect-audit-events).
By adding Kubernetes application context, and Kubernetes audit logs teams can understand who did what.
### Writing about Falco
##### Yes
@@ -124,6 +122,7 @@ Falco does not prevent unwanted behavior.
Falco however alerts when unusual behavior occurs.
This is commonly referred to as **detection** or **forensics**.
---
# Glossary

View File

@@ -91,16 +91,15 @@ function(git_get_latest_tag _var)
find_package(Git QUIET)
endif()
# We use git describe --tags `git rev-list --exclude "*.*.*-*" --tags --max-count=1`
# Note how we eclude prereleases tags (the ones with "-alphaX")
# We use git describe --tags `git rev-list --tags --max-count=1`
execute_process(COMMAND
"${GIT_EXECUTABLE}"
rev-list
--exclude "*.*.*-*"
--tags
--max-count=1
WORKING_DIRECTORY
"${CMAKE_CURRENT_SOURCE_DIR}"
COMMAND tail -n1
RESULT_VARIABLE
res
OUTPUT_VARIABLE

View File

@@ -26,8 +26,8 @@ else()
# In case you want to test against another driver version (or branch, or commit) just pass the variable -
# ie., `cmake -DDRIVER_VERSION=dev ..`
if(NOT DRIVER_VERSION)
set(DRIVER_VERSION "5.0.1+driver")
set(DRIVER_CHECKSUM "SHA256=8b197b916b6419dac8fb41807aa05d822164c7bfd2c3eef66d20d060a05a485a")
set(DRIVER_VERSION "ea32dfb4510ad6d9dcdfa6c40c0ba062dcc2bfb5")
set(DRIVER_CHECKSUM "SHA256=31cc9ed4479daf210ccefcf419bd64f8e7c475d441453db368bde72e653774b6")
endif()
# cd /path/to/build && cmake /path/to/source

View File

@@ -15,14 +15,14 @@ include(ExternalProject)
string(TOLOWER ${CMAKE_HOST_SYSTEM_NAME} FALCOCTL_SYSTEM_NAME)
set(FALCOCTL_VERSION "0.5.0")
set(FALCOCTL_VERSION "0.4.0")
if(${CMAKE_HOST_SYSTEM_PROCESSOR} STREQUAL "x86_64")
set(FALCOCTL_SYSTEM_PROC_GO "amd64")
set(FALCOCTL_HASH "ba82ee14ee72fe5737f1b5601e403d8a9422dfe2c467d1754eb488001eeea5f1")
set(FALCOCTL_HASH "13c88e612efe955bc014918a7af30bae28dc5ba99b2962af57e36b1b87f527f9")
else() # aarch64
set(FALCOCTL_SYSTEM_PROC_GO "arm64")
set(FALCOCTL_HASH "be145ece641d439011cc4a512d0fd2dac5974cab7399f9a7cd43f08eb43dd446")
set(FALCOCTL_HASH "0f8898853e99a2cd1b4dd6b161e8545cf20ce0e3ce79cddc539f6002257d5de5")
endif()
ExternalProject_Add(

View File

@@ -19,7 +19,7 @@ message(STATUS "Libs version: ${FALCOSECURITY_LIBS_VERSION}")
ExternalProject_Add(
falcosecurity-libs
URL "https://github.com/falcosecurity/libs/archive/${FALCOSECURITY_LIBS_VERSION}.tar.gz"
URL "https://github.com/Andreagit97/libs/archive/${FALCOSECURITY_LIBS_VERSION}.tar.gz"
URL_HASH "${FALCOSECURITY_LIBS_CHECKSUM}"
CONFIGURE_COMMAND ""
BUILD_COMMAND ""

View File

@@ -25,17 +25,14 @@ if(FALCOSECURITY_LIBS_SOURCE_DIR)
else()
# FALCOSECURITY_LIBS_VERSION accepts a git reference (branch name, commit hash, or tag) to the falcosecurity/libs repository.
# In case you want to test against another falcosecurity/libs version (or branch, or commit) just pass the variable -
# ie., `cmake -DFALCOSECURITY_LIBS_VERSION=dev ..`
# ie., `cmake -DFALCOSECURITY_LIBS_VERSION=dev ..`
if(NOT FALCOSECURITY_LIBS_VERSION)
set(FALCOSECURITY_LIBS_VERSION "0.11.0-rc5")
set(FALCOSECURITY_LIBS_CHECKSUM "SHA256=079ab5f596a0d8af2a7f843e8159f83cb7c864331019aaed822daa737c75e9e7")
set(FALCOSECURITY_LIBS_VERSION "ea32dfb4510ad6d9dcdfa6c40c0ba062dcc2bfb5")
set(FALCOSECURITY_LIBS_CHECKSUM "SHA256=31cc9ed4479daf210ccefcf419bd64f8e7c475d441453db368bde72e653774b6")
endif()
# cd /path/to/build && cmake /path/to/source
execute_process(COMMAND "${CMAKE_COMMAND}"
-DCMAKE_BUILD_TYPE="${CMAKE_BUILD_TYPE}"
-DFALCOSECURITY_LIBS_VERSION=${FALCOSECURITY_LIBS_VERSION}
-DFALCOSECURITY_LIBS_CHECKSUM=${FALCOSECURITY_LIBS_CHECKSUM}
execute_process(COMMAND "${CMAKE_COMMAND}" -DFALCOSECURITY_LIBS_VERSION=${FALCOSECURITY_LIBS_VERSION} -DFALCOSECURITY_LIBS_CHECKSUM=${FALCOSECURITY_LIBS_CHECKSUM}
${FALCOSECURITY_LIBS_CMAKE_SOURCE_DIR} WORKING_DIRECTORY ${FALCOSECURITY_LIBS_CMAKE_WORKING_DIR})
# cmake --build .
@@ -53,8 +50,6 @@ if(MUSL_OPTIMIZED_BUILD)
endif()
set(SCAP_HOST_ROOT_ENV_VAR_NAME "HOST_ROOT")
set(SCAP_HOSTNAME_ENV_VAR "FALCO_HOSTNAME")
set(SINSP_AGENT_CGROUP_MEM_PATH_ENV_VAR "FALCO_CGROUP_MEM_PATH")
if(NOT LIBSCAP_DIR)
set(LIBSCAP_DIR "${FALCOSECURITY_LIBS_SOURCE_DIR}")

View File

@@ -13,26 +13,22 @@
include(ExternalProject)
# 'stable' or 'dev'
set(PLUGINS_DOWNLOAD_BUCKET "dev")
string(TOLOWER ${CMAKE_HOST_SYSTEM_NAME} PLUGINS_SYSTEM_NAME)
if(NOT DEFINED PLUGINS_COMPONENT_NAME)
set(PLUGINS_COMPONENT_NAME "${CMAKE_PROJECT_NAME}-plugins")
endif()
# k8saudit
set(PLUGIN_K8S_AUDIT_VERSION "0.6.0-0.5.3-33%2B81ffddd")
set(PLUGIN_K8S_AUDIT_VERSION "0.5.0")
if(${CMAKE_HOST_SYSTEM_PROCESSOR} STREQUAL "x86_64")
set(PLUGIN_K8S_AUDIT_HASH "990e5c67d3b3c7cf5d30c73d73871b58767171ce7c998c1ca1d94d70c67db290")
set(PLUGIN_K8S_AUDIT_HASH "c4abb288df018940be8e548340a74d39623b69142304e01523ea189bc698bc80")
else() # aarch64
set(PLUGIN_K8S_AUDIT_HASH "c3634dfa83c8c8898811ab6b7587ea6d1c6dfffbdfa56def28cab43aaf01f88c")
set(PLUGIN_K8S_AUDIT_HASH "3bcc849d9f95a3fa519b4592d0947149e492b530fb935a3f98f098e234b7baa7")
endif()
ExternalProject_Add(
k8saudit-plugin
URL "https://download.falco.org/plugins/${PLUGINS_DOWNLOAD_BUCKET}/k8saudit-${PLUGIN_K8S_AUDIT_VERSION}-${PLUGINS_SYSTEM_NAME}-${CMAKE_HOST_SYSTEM_PROCESSOR}.tar.gz"
URL "https://download.falco.org/plugins/stable/k8saudit-${PLUGIN_K8S_AUDIT_VERSION}-${PLUGINS_SYSTEM_NAME}-${CMAKE_HOST_SYSTEM_PROCESSOR}.tar.gz"
URL_HASH "SHA256=${PLUGIN_K8S_AUDIT_HASH}"
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
@@ -42,25 +38,24 @@ install(FILES "${PROJECT_BINARY_DIR}/k8saudit-plugin-prefix/src/k8saudit-plugin/
ExternalProject_Add(
k8saudit-rules
URL "https://download.falco.org/plugins/${PLUGINS_DOWNLOAD_BUCKET}/k8saudit-rules-${PLUGIN_K8S_AUDIT_VERSION}.tar.gz"
URL_HASH "SHA256=2e3214fee00a012b32402aad5198df889773fc5f86b8ab87583fbc56ae5fb78c"
URL "https://download.falco.org/plugins/stable/k8saudit-rules-${PLUGIN_K8S_AUDIT_VERSION}.tar.gz"
URL_HASH "SHA256=4383c69ba0ad63a127667c05618c37effc5297e6a7e68a1492acb0e48386540e"
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
INSTALL_COMMAND "")
install(FILES "${PROJECT_BINARY_DIR}/k8saudit-rules-prefix/src/k8saudit-rules/k8s_audit_rules.yaml" DESTINATION "${FALCO_ETC_DIR}" COMPONENT "${PLUGINS_COMPONENT_NAME}")
# cloudtrail
set(PLUGIN_CLOUDTRAIL_VERSION "0.8.0-0.7.3-33%2B81ffddd")
set(PLUGIN_CLOUDTRAIL_VERSION "0.7.0")
if(${CMAKE_HOST_SYSTEM_PROCESSOR} STREQUAL "x86_64")
set(PLUGIN_CLOUDTRAIL_HASH "144c297ae4285ea84b04af272f708a8b824f58bc9427a2eb91b467a6285d9e10")
set(PLUGIN_CLOUDTRAIL_HASH "85d94d8f5915804d5a30ff2f056e51de27d537f1fd1115050b4f4be6d32588cf")
else() # aarch64
set(PLUGIN_CLOUDTRAIL_HASH "19e7e8e11aaecd16442f65a265d3cd80ffb736ca4d3d8215893900fa0f04b926")
set(PLUGIN_CLOUDTRAIL_HASH "61ae471ee41e76680da9ab66f583d1ec43a2e48fbad8c157caecef56e4aa5fb7")
endif()
ExternalProject_Add(
cloudtrail-plugin
URL "https://download.falco.org/plugins/${PLUGINS_DOWNLOAD_BUCKET}/cloudtrail-${PLUGIN_CLOUDTRAIL_VERSION}-${PLUGINS_SYSTEM_NAME}-${CMAKE_HOST_SYSTEM_PROCESSOR}.tar.gz"
URL "https://download.falco.org/plugins/stable/cloudtrail-${PLUGIN_CLOUDTRAIL_VERSION}-${PLUGINS_SYSTEM_NAME}-${CMAKE_HOST_SYSTEM_PROCESSOR}.tar.gz"
URL_HASH "SHA256=${PLUGIN_CLOUDTRAIL_HASH}"
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
@@ -70,25 +65,24 @@ install(FILES "${PROJECT_BINARY_DIR}/cloudtrail-plugin-prefix/src/cloudtrail-plu
ExternalProject_Add(
cloudtrail-rules
URL "https://download.falco.org/plugins/${PLUGINS_DOWNLOAD_BUCKET}/cloudtrail-rules-${PLUGIN_CLOUDTRAIL_VERSION}.tar.gz"
URL_HASH "SHA256=4f51d4bd9679f7f244c225b6fe530323f3536663da26a5b9d94d6953ed4e2cbc"
URL "https://download.falco.org/plugins/stable/cloudtrail-rules-${PLUGIN_CLOUDTRAIL_VERSION}.tar.gz"
URL_HASH "SHA256=c805be29ddc14fbffa29f7d6ee4f7e968a3bdb42da5f5483e5e6de273e8850c8"
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
INSTALL_COMMAND "")
install(FILES "${PROJECT_BINARY_DIR}/cloudtrail-rules-prefix/src/cloudtrail-rules/aws_cloudtrail_rules.yaml" DESTINATION "${FALCO_ETC_DIR}" COMPONENT "${PLUGINS_COMPONENT_NAME}")
install(FILES "${PROJECT_BINARY_DIR}/cloudtrail-rules-prefix/src/cloudtrail-rules/aws_cloudtrail_rules.yaml" DESTINATION "${FALCO_ETC_DIR}" COMPONENT "${PLUGINS_COMPONENT_NAME}")
# json
set(PLUGIN_JSON_VERSION "0.7.0-0.6.2-36%2B81ffddd")
set(PLUGIN_JSON_VERSION "0.6.0")
if(${CMAKE_HOST_SYSTEM_PROCESSOR} STREQUAL "x86_64")
set(PLUGIN_JSON_HASH "a9d8c595a139df5dc0cf2117127b496c94a9d3a3d0e84c1f18b3ccc9163f5f4a")
set(PLUGIN_JSON_HASH "15fb7eddd978e8bb03f05412e9446e264e4548d7423b3d724b99d6d87a8c1b27")
else() # aarch64
set(PLUGIN_JSON_HASH "7d78620395526d1e6a948cc915d1d52a343c2b637c9ac0e3892e76826fcdc2df")
set(PLUGIN_JSON_HASH "4db23f35a750e10a5b7b54c9aa469a7587705e7faa22927e941b41f3c5533e9f")
endif()
ExternalProject_Add(
json-plugin
URL "https://download.falco.org/plugins/${PLUGINS_DOWNLOAD_BUCKET}/json-${PLUGIN_JSON_VERSION}-${PLUGINS_SYSTEM_NAME}-${CMAKE_HOST_SYSTEM_PROCESSOR}.tar.gz"
URL "https://download.falco.org/plugins/stable/json-${PLUGIN_JSON_VERSION}-${PLUGINS_SYSTEM_NAME}-${CMAKE_HOST_SYSTEM_PROCESSOR}.tar.gz"
URL_HASH "SHA256=${PLUGIN_JSON_HASH}"
CONFIGURE_COMMAND ""
BUILD_COMMAND ""

View File

@@ -3,7 +3,6 @@ FROM centos:7
LABEL name="falcosecurity/falco-builder"
LABEL usage="docker run -v $PWD/..:/source -v $PWD/build:/build falcosecurity/falco-builder cmake"
LABEL maintainer="cncf-falco-dev@lists.cncf.io"
LABEL org.opencontainers.image.source="https://github.com/falcosecurity/falco"
ARG BUILD_TYPE=release
ARG BUILD_DRIVER=OFF

View File

@@ -34,8 +34,6 @@ RUN make all -j${MAKE_JOBS}
FROM scratch AS export-stage
LABEL org.opencontainers.image.source="https://github.com/falcosecurity/falco"
ARG DEST_BUILD_DIR="/build"
COPY --from=build-stage /build/release/falco-*.tar.gz /packages/

View File

@@ -1,8 +1,7 @@
ARG FALCO_IMAGE_TAG=latest
FROM docker.io/falcosecurity/falco:${FALCO_IMAGE_TAG}
FROM falcosecurity/falco:${FALCO_IMAGE_TAG}
LABEL maintainer="cncf-falco-dev@lists.cncf.io"
LABEL org.opencontainers.image.source="https://github.com/falcosecurity/falco"
LABEL usage="docker run -i -t --privileged -v /root/.falco:/root/.falco -v /proc:/host/proc:ro -v /boot:/host/boot:ro -v /lib/modules:/host/lib/modules:ro -v /usr:/host/usr:ro -v /etc:/host/etc:ro --name NAME IMAGE"
@@ -11,4 +10,4 @@ ENV HOME /root
COPY ./docker-entrypoint.sh /
ENTRYPOINT ["/docker-entrypoint.sh"]
ENTRYPOINT ["/docker-entrypoint.sh"]

View File

@@ -1,7 +1,6 @@
FROM debian:buster
LABEL maintainer="cncf-falco-dev@lists.cncf.io"
LABEL org.opencontainers.image.source="https://github.com/falcosecurity/falco"
LABEL usage="docker run -i -t --privileged -v /var/run/docker.sock:/host/var/run/docker.sock -v /dev:/host/dev -v /proc:/host/proc:ro -v /boot:/host/boot:ro -v /lib/modules:/host/lib/modules:ro -v /usr:/host/usr:ro -v /etc:/host/etc --name NAME IMAGE"

View File

@@ -2,7 +2,6 @@ FROM debian:buster
LABEL usage="docker run -i -t -v /var/run/docker.sock:/host/var/run/docker.sock -v /dev:/host/dev -v /proc:/host/proc:ro -v /boot:/host/boot:ro -v /lib/modules:/host/lib/modules:ro -v /usr:/host/usr:ro --name NAME IMAGE"
LABEL maintainer="cncf-falco-dev@lists.cncf.io"
LABEL org.opencontainers.image.source="https://github.com/falcosecurity/falco"
ARG TARGETARCH

View File

@@ -23,13 +23,11 @@ RUN sed -e 's/time_format_iso_8601: false/time_format_iso_8601: true/' < /falco/
FROM debian:11-slim
LABEL maintainer="cncf-falco-dev@lists.cncf.io"
LABEL org.opencontainers.image.source="https://github.com/falcosecurity/falco"
LABEL usage="docker run -i -t --privileged -v /var/run/docker.sock:/host/var/run/docker.sock -v /dev:/host/dev -v /proc:/host/proc:ro --name NAME IMAGE"
# NOTE: for the "least privileged" use case, please refer to the official documentation
RUN apt-get -y update && apt-get -y install ca-certificates curl jq \
&& apt clean -y && rm -rf /var/lib/apt/lists/*
RUN apt-get -y update && apt-get -y install ca-certificates
ENV HOST_ROOT /host
ENV HOME /root

View File

@@ -3,7 +3,6 @@ FROM fedora:31
LABEL name="falcosecurity/falco-tester"
LABEL usage="docker run -v /boot:/boot:ro -v /var/run/docker.sock:/var/run/docker.sock -v $PWD/..:/source -v $PWD/build:/build --name <name> falcosecurity/falco-tester test"
LABEL maintainer="cncf-falco-dev@lists.cncf.io"
LABEL org.opencontainers.image.source="https://github.com/falcosecurity/falco"
ARG TARGETARCH

View File

@@ -15,7 +15,6 @@ LABEL "description"="Falco is a security policy engine that monitors system call
LABEL "io.k8s.display-name"="Falco"
LABEL "io.k8s.description"="Falco is a security policy engine that monitors system calls and cloud events, and fires alerts when security policies are violated."
LABEL maintainer="cncf-falco-dev@lists.cncf.io"
LABEL org.opencontainers.image.source="https://github.com/falcosecurity/falco"
LABEL usage="docker run -i -t --privileged -v /var/run/docker.sock:/host/var/run/docker.sock -v /dev:/host/dev -v /proc:/host/proc:ro -v /boot:/host/boot:ro -v /lib/modules:/host/lib/modules:ro -v /usr:/host/usr:ro -v /etc:/host/etc --name NAME IMAGE"

1241
falco.yaml

File diff suppressed because it is too large Load Diff

View File

@@ -1,100 +0,0 @@
# Falco Roadmap Management Proposal
## Summary
This document proposes the introduction of a structured process for managing Falco's roadmap and implementing related changes in our development process. The goal is to ensure the efficient execution of our roadmap objectives.
### Goals
The pillars of this proposal are:
- Define processes for release cycles and development iterations
- Provide guidelines for planning and prioritizing efforts
- Introduce regular meetings for core maintainers
- Using *GitHub Project* as the primary tool for managing *The Falco Project* roadmap
### Non-Goals
- Providing an exact set of criteria for task prioritization
- Detailing testing procedures
- Providing detailed instructions for GitHub Project usage
- Addressing hotfix releases
### Scope of this Proposal
Primarily, the roadmap targets the planning of Falco development and releases. However, given Falco's dependence on numerous components, it's inevitable that scheduling and planning activities span across multiple repositories. We anticipate that all [core repositories](https://github.com/falcosecurity/evolution#official) will be interconnected with the roadmap, making it comprehensive enough to incorporate items from all related [Falcosecurity repositories](https://github.com/falcosecurity) as necessary.
This proposal does **not apply to hotfix releases** that may happen whenever needed at the maintainers' discretion.
## Release Cycles and Development Iterations
Falco releases happen 3 times per year. Each release cycle completes, respectively, by the end of January, May, and September.
A **release cycle is a 16-week time frame** between two subsequent releases.
Using this schema, in a 52-week calendar year, we allocate 48 weeks for scheduled activities (16 weeks *x* 3 releases), leaving 4 weeks for breaks.
The 16-week release cycle is further divided into three distinct iterations:
| Iteration Name | Duration | Description |
|---------------|----------|-------------|
| Development | 8 weeks | Development phase |
| Stabilization | 4 weeks | Feature completion and bug fixing |
| Release Preparation | 4 weeks | Release preparation, testing, bug fixing, no new feature |
### Targeted Release Date
The final week of the *Release Preparation* should conclude before the *last Monday of the release month* (ie. January/May/September). This *last Monday* is designated as the **targeted release date** (when the release is being published), and the remaining part of the week is considered a break period.
### Milestones
For each release, we create a [GitHub Milestone](https://github.com/falcosecurity/falco/milestones) (whose due date must be equal to the target release date). We use the milestone to collect all items to be tentatively completed within the release.
### Alignment of Falco Components
The release schedule of the [components Falco depends on](https://github.com/falcosecurity/falco/blob/master/RELEASE.md#falco-components-versioning) needs to be synchronized to conform to these stipulations. For instance, a [falcosecurity/libs](https://github.com/falcosecurity/libs) release may be required at least one week prior to the termination of each iteration.
The maintainers are responsible for adapting those components' release schedules and procedures to release cycles and development iterations of Falco. Furthermore, all release processes must be documented and provide clear expectations regarding release dates.
## Project Roadmap
We use the [GitHub Project called *Falco Roadmap*](https://github.com/orgs/falcosecurity/projects/5) to plan and track the progress of each release cycle. The GitHub Project needs to be configured with the above mentioned iterations and break periods, compiled with actual dates. It's recommended to preconfigure the GitHub Project to accommodate the current plus the following three release cycles.
### Roadmap Planning
The roadmap serves as a strategic planning tool that outlines the goals and objectives for Falco. Its purpose is to visually represent the overall direction and timeline, enhance transparency and engage the community.
The onus is on the [Core Maintainers](https://github.com/falcosecurity/evolution/blob/main/GOVERNANCE.md#core-maintainers) to manage the roadmap. In this regard, Core Maintainers meet in **planning sessions on the first week of each calendar month**.
During these planning sessions, tasks are allocated to the current iteration or postponed to one of the following iterations. The assigned iteration indicates the projected completion date for a particular workstream.
When a session matches with the commencement of an iteration, maintainers convene to assess the planning and prioritize tasks for the iteration. The first planning session of a release cycle must define top priorities for the related release.
## Testing and Quality Assurance (QA)
Each iteration's output must include at least one Falco pre-release (or a viable development build) designated for testing and QA activities. While it's acceptable for these builds to contain unfinished features or known bugs, they must enable any community member to contribute to the testing and QA efforts.
The targeted schedule for these Testing/QA activities should be the **last week of each iteration** (or earlier during the *Release Preparation*).
Testing and Quality Assurance criteria and procedures must be defined and documented across relevant repositories.
Furthermore, given the strong reliance of Falco on [falcosecurity/libs](https://github.com/falcosecurity/libs), the above-mentioned pre-release/build for Testing/QA purposes must be based on the most recent *libs* development for the intended iteration. This means that during each interaction, a *libs* release (either pre or stable) must happen early enough to be used for this purpose.
## Next Steps and Conclusions
The Falco 0.36 release cycle, running from June to September 2023, will mark the initiation of the new process. This cycle will also serve as an experimental phase for refining the process.
Furthermore, as soon as possible, we will kick off a Working Group specifically to ensure smooth execution. This group will involve community members in assisting maintainers with roadmap management. It will provide curated feature suggestions for the roadmap, informed by community needs. This approach would facilitate the core maintainers' decisions, as they would mostly need just to review and adopt these pre-vetted recommendations, enhancing efficiency.
The Working Group's responsibilities will include (non-exhaustive list):
- Address input from the [2023-04-27 Core Maintainers meeting](https://github.com/falcosecurity/community/blob/main/meeting-notes/2023-04-27-Falco-Roadmap-Discussion.md)
- Sorting and reviewing pending issues to identify key topics for discussion and potential inclusion in the roadmap
- Establishing protocols not explicitly covered in this document
- Updating the documentation accordingly
- Supporting Core Maintainers in managing the [Falco Roadmap GitHub project](https://github.com/orgs/falcosecurity/projects/5)
- Gathering suggestions from all involved stakeholders to put forward potential enhancements
Finally, we anticipate the need for minor adjustments, which will become apparent only after an initial period of experimentation. Thus we have to intend this process to be flexible enough to adapt to emerging needs and improvements as long as the fundamental spirit of this proposal is upheld.

View File

@@ -128,38 +128,10 @@ get_target_id() {
case "${OS_ID}" in
("amzn")
case "${VERSION_ID}" in
("2")
if [[ $VERSION_ID == "2" ]]; then
TARGET_ID="amazonlinux2"
;;
("2022")
TARGET_ID="amazonlinux2022"
;;
("2023")
TARGET_ID="amazonlinux2023"
;;
(*)
else
TARGET_ID="amazonlinux"
;;
esac
;;
("debian")
# Workaround: debian kernelreleases might now be actual kernel running;
# instead, they might be the Debian kernel package
# providing the compatible kernel ABI
# See https://lists.debian.org/debian-user/2017/03/msg00485.html
# Real kernel release is embedded inside the kernel version.
# Moreover, kernel arch, when present, is attached to the former,
# therefore make sure to properly take it and attach it to the latter.
TARGET_ID=$(echo "${OS_ID}" | tr '[:upper:]' '[:lower:]')
local ARCH_extra=""
if [[ $KERNEL_RELEASE =~ -(amd64|arm64) ]];
then
ARCH_extra="-${BASH_REMATCH[1]}"
fi
if [[ $(uname -v) =~ ([0-9]+\.[0-9]+\.[0-9]+\-[0-9]+) ]];
then
KERNEL_RELEASE="${BASH_REMATCH[1]}${ARCH_extra}"
fi
;;
("ubuntu")
@@ -179,7 +151,7 @@ get_target_id() {
TARGET_ID=$(echo "${OS_ID}" | tr '[:upper:]' '[:lower:]')
;;
("minikube")
TARGET_ID=$(echo "${OS_ID}" | tr '[:upper:]' '[:lower:]')
TARGET_ID="${OS_ID}"
# Extract the minikube version. Ex. With minikube version equal to "v1.26.0-1655407986-14197" the extracted version
# will be "1.26.0"
if [[ $(cat ${HOST_ROOT}/etc/VERSION) =~ ([0-9]+(\.[0-9]+){2}) ]]; then
@@ -191,7 +163,7 @@ get_target_id() {
fi
;;
("bottlerocket")
TARGET_ID=$(echo "${OS_ID}" | tr '[:upper:]' '[:lower:]')
TARGET_ID="${OS_ID}"
# variant_id has been sourced from os-release. Get only the first variant part
if [[ -n ${VARIANT_ID} ]]; then
# take just first part (eg: VARIANT_ID=aws-k8s-1.15 -> aws)
@@ -200,11 +172,6 @@ get_target_id() {
# version_id has been sourced from os-release. Build a kernel version like: 1_1.11.0-aws
KERNEL_VERSION="1_${VERSION_ID}-${VARIANT_ID_CUT}"
;;
("talos")
TARGET_ID=$(echo "${OS_ID}" | tr '[:upper:]' '[:lower:]')
# version_id has been sourced from os-release. Build a kernel version like: 1_1.4.1
KERNEL_VERSION="1_${VERSION_ID}"
;;
(*)
TARGET_ID=$(echo "${OS_ID}" | tr '[:upper:]' '[:lower:]')
;;
@@ -265,10 +232,10 @@ load_kernel_module_compile() {
continue
fi
echo "* Trying to dkms install ${DRIVER_NAME} module with GCC ${CURRENT_GCC}"
echo "#!/usr/bin/env bash" > "${TMPDIR}/falco-dkms-make"
echo "make CC=${CURRENT_GCC} \$@" >> "${TMPDIR}/falco-dkms-make"
chmod +x "${TMPDIR}/falco-dkms-make"
if dkms install --directive="MAKE='${TMPDIR}/falco-dkms-make'" -m "${DRIVER_NAME}" -v "${DRIVER_VERSION}" -k "${KERNEL_RELEASE}" 2>/dev/null; then
echo "#!/usr/bin/env bash" > /tmp/falco-dkms-make
echo "make CC=${CURRENT_GCC} \$@" >> /tmp/falco-dkms-make
chmod +x /tmp/falco-dkms-make
if dkms install --directive="MAKE='/tmp/falco-dkms-make'" -m "${DRIVER_NAME}" -v "${DRIVER_VERSION}" -k "${KERNEL_RELEASE}" 2>/dev/null; then
echo "* ${DRIVER_NAME} module installed in dkms"
KO_FILE="/var/lib/dkms/${DRIVER_NAME}/${DRIVER_VERSION}/${KERNEL_RELEASE}/${ARCH}/module/${DRIVER_NAME}"
if [ -f "$KO_FILE.ko" ]; then
@@ -692,8 +659,6 @@ if [ -v FALCO_BPF_PROBE ]; then
DRIVER="bpf"
fi
TMPDIR=${TMPDIR:-"/tmp"}
ENABLE_COMPILE=
ENABLE_DOWNLOAD=

View File

@@ -56,7 +56,7 @@ trace_files: !mux
incompatible_extract_sources:
exit_status: 1
stderr_contains: "Plugin '.*' is loaded but unused as not compatible with any known event source"
stderr_contains: "Plugin '.*' has field extraction capability but is not compatible with any known event source"
conf_file: BUILD_DIR/test/confs/plugins/incompatible_extract_sources.yaml
rules_file:
- rules/plugins/cloudtrail_create_instances.yaml

View File

@@ -6,7 +6,7 @@ idna==2.9
pathtools==0.1.2
pbr==5.4.5
PyYAML==5.4
requests==2.31.0
requests==2.26.0
six==1.14.0
stevedore==1.32.0
urllib3==1.26.5

View File

@@ -36,35 +36,3 @@ TEST(FalcoUtils, is_unix_scheme)
char url_char[] = "unix:///falco.sock";
ASSERT_EQ(falco::utils::network::is_unix_scheme(url_char), true);
}
TEST(FalcoUtils, parse_prometheus_interval)
{
/* Test matrix around correct time conversions. */
ASSERT_EQ(falco::utils::parse_prometheus_interval("1ms"), 1UL);
ASSERT_EQ(falco::utils::parse_prometheus_interval("1s"), 1000UL);
ASSERT_EQ(falco::utils::parse_prometheus_interval("1m"), 60000UL);
ASSERT_EQ(falco::utils::parse_prometheus_interval("1h"), 3600000UL);
ASSERT_EQ(falco::utils::parse_prometheus_interval("1d"), 86400000UL);
ASSERT_EQ(falco::utils::parse_prometheus_interval("1w"), 604800000UL);
ASSERT_EQ(falco::utils::parse_prometheus_interval("1y"), 31536000000UL);
ASSERT_EQ(falco::utils::parse_prometheus_interval("300ms"), 300UL);
ASSERT_EQ(falco::utils::parse_prometheus_interval("255s"), 255000UL);
ASSERT_EQ(falco::utils::parse_prometheus_interval("5m"), 300000UL);
ASSERT_EQ(falco::utils::parse_prometheus_interval("15m"), 900000UL);
ASSERT_EQ(falco::utils::parse_prometheus_interval("30m"), 1800000UL);
ASSERT_EQ(falco::utils::parse_prometheus_interval("60m"), 3600000UL);
/* Test matrix for concatenated time interval examples. */
ASSERT_EQ(falco::utils::parse_prometheus_interval("1h3m2s1ms"), 3600000UL + 3 * 60000UL + 2 * 1000UL + 1UL);
ASSERT_EQ(falco::utils::parse_prometheus_interval("1y1w1d1h1m1s1ms"), 31536000000UL + 604800000UL + 86400000UL + 3600000UL + 60000UL + 1000UL + 1UL);
ASSERT_EQ(falco::utils::parse_prometheus_interval("2h5m"), 2 * 3600000UL + 5 * 60000UL);
ASSERT_EQ(falco::utils::parse_prometheus_interval("2h 5m"), 2 * 3600000UL + 5 * 60000UL);
ASSERT_EQ(falco::utils::parse_prometheus_interval("200"), 200UL);
/* Invalid, non prometheus compliant time ordering will result in 0ms. */
ASSERT_EQ(falco::utils::parse_prometheus_interval("1ms1y"), 0UL);
ASSERT_EQ(falco::utils::parse_prometheus_interval("1t1y"), 0UL);
ASSERT_EQ(falco::utils::parse_prometheus_interval("1t"), 0UL);
}

View File

@@ -1,49 +0,0 @@
/*
Copyright (C) 2023 The Falco Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless ASSERT_EQd by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include <gtest/gtest.h>
#include <engine/filter_details_resolver.h>
TEST(DetailsResolver, resolve_ast)
{
std::string cond = "(spawned_process or evt.type = open) and (proc.name icontains cat or proc.name in (known_procs, ps))";
auto ast = libsinsp::filter::parser(cond).parse();
filter_details details;
details.known_macros.insert("spawned_process");
details.known_lists.insert("known_procs");
filter_details_resolver resolver;
resolver.run(ast.get(), details);
// Assert fields
ASSERT_EQ(details.fields.size(), 2);
ASSERT_NE(details.fields.find("evt.type"), details.fields.end());
ASSERT_NE(details.fields.find("proc.name"), details.fields.end());
// Assert macros
ASSERT_EQ(details.macros.size(), 1);
ASSERT_NE(details.macros.find("spawned_process"), details.macros.end());
// Assert operators
ASSERT_EQ(details.operators.size(), 3);
ASSERT_NE(details.operators.find("="), details.operators.end());
ASSERT_NE(details.operators.find("icontains"), details.operators.end());
ASSERT_NE(details.operators.find("in"), details.operators.end());
// Assert lists
ASSERT_EQ(details.lists.size(), 1);
ASSERT_NE(details.lists.find("known_procs"), details.lists.end());
}

View File

@@ -1,7 +0,0 @@
#pragma once
#include <gtest/gtest.h>
#include <falco/app/state.h>
#include <falco/app/actions/actions.h>
#define EXPECT_ACTION_OK(r) { EXPECT_TRUE(r.success); EXPECT_TRUE(r.proceed); EXPECT_EQ(r.errstr, ""); }
#define EXPECT_ACTION_FAIL(r) { EXPECT_FALSE(r.success); EXPECT_FALSE(r.proceed); EXPECT_NE(r.errstr, ""); }

View File

@@ -17,11 +17,13 @@ limitations under the License.
#include <falco_engine.h>
#include <falco/app/app.h>
#include "app_action_helpers.h"
#include <falco/app/state.h>
#include <falco/app/actions/actions.h>
#include <gtest/gtest.h>
#define ASSERT_NAMES_EQ(a, b) { \
EXPECT_EQ(_order(a).size(), _order(b).size()); \
ASSERT_EQ(_order(a).size(), _order(b).size()); \
ASSERT_EQ(_order(a), _order(b)); \
}
@@ -45,7 +47,7 @@ static std::string s_sample_ruleset = "sample-ruleset";
static std::string s_sample_source = falco_common::syscall_source;
static strset_t s_sample_filters = {
"evt.type=connect or evt.type=accept or evt.type=accept4 or evt.type=umount2",
"evt.type=connect or evt.type=accept",
"evt.type in (open, ptrace, mmap, execve, read, container)",
"evt.type in (open, execve, mprotect) and not evt.type=mprotect"};
@@ -89,7 +91,6 @@ static std::shared_ptr<falco_engine> mock_engine_from_filters(const strset_t& fi
TEST(ConfigureInterestingSets, engine_codes_syscalls_set)
{
auto engine = mock_engine_from_filters(s_sample_filters);
auto enabled_count = engine->num_rules_for_ruleset(s_sample_ruleset);
ASSERT_EQ(enabled_count, s_sample_filters.size());
@@ -98,45 +99,45 @@ TEST(ConfigureInterestingSets, engine_codes_syscalls_set)
auto rules_event_set = engine->event_codes_for_ruleset(s_sample_source);
auto rules_event_names = libsinsp::events::event_set_to_names(rules_event_set);
ASSERT_NAMES_EQ(rules_event_names, strset_t({
"connect", "accept", "accept4", "umount2", "open", "ptrace", "mmap", "execve", "read", "container", "asyncevent"}));
"connect", "accept", "open", "ptrace", "mmap", "execve", "read", "container"}));
// test if sc code names were extracted from each rule in test ruleset.
// note, this is not supposed to contain "container", as that's an event
// not mapped through the ppm_sc_code enumerative.
auto rules_sc_set = engine->sc_codes_for_ruleset(s_sample_source);
auto rules_sc_names = libsinsp::events::sc_set_to_event_names(rules_sc_set);
auto rules_sc_names = libsinsp::events::sc_set_to_names(rules_sc_set);
ASSERT_NAMES_EQ(rules_sc_names, strset_t({
"connect", "accept", "accept4", "umount2", "open", "ptrace", "mmap", "execve", "read"}));
"connect", "accept", "accept4", "open", "ptrace", "mmap", "execve", "read"}));
}
TEST(ConfigureInterestingSets, preconditions_postconditions)
{
falco::app::state s;
auto mock_engine = mock_engine_from_filters(s_sample_filters);
falco::app::state s1;
s1.engine = mock_engine;
s1.config = nullptr;
auto result = falco::app::actions::configure_interesting_sets(s1);
s.engine = mock_engine;
s.config = nullptr;
auto result = falco::app::actions::configure_interesting_sets(s);
ASSERT_FALSE(result.success);
ASSERT_NE(result.errstr, "");
s1.engine = nullptr;
s1.config = std::make_shared<falco_configuration>();
result = falco::app::actions::configure_interesting_sets(s1);
s.engine = nullptr;
s.config = std::make_shared<falco_configuration>();
result = falco::app::actions::configure_interesting_sets(s);
ASSERT_FALSE(result.success);
ASSERT_NE(result.errstr, "");
s1.engine = mock_engine;
s1.config = std::make_shared<falco_configuration>();
result = falco::app::actions::configure_interesting_sets(s1);
s.engine = mock_engine;
s.config = std::make_shared<falco_configuration>();
result = falco::app::actions::configure_interesting_sets(s);
ASSERT_TRUE(result.success);
ASSERT_EQ(result.errstr, "");
auto prev_selection_size = s1.selected_sc_set.size();
result = falco::app::actions::configure_interesting_sets(s1);
auto prev_selection_size = s.selected_sc_set.size();
result = falco::app::actions::configure_interesting_sets(s);
ASSERT_TRUE(result.success);
ASSERT_EQ(result.errstr, "");
ASSERT_EQ(prev_selection_size, s1.selected_sc_set.size());
ASSERT_EQ(prev_selection_size, s.selected_sc_set.size());
}
TEST(ConfigureInterestingSets, engine_codes_nonsyscalls_set)
@@ -157,28 +158,26 @@ TEST(ConfigureInterestingSets, engine_codes_nonsyscalls_set)
// This is a good example of information loss from ppm_event_code <-> ppm_sc_code.
auto generic_names = libsinsp::events::event_set_to_names({ppm_event_code::PPME_GENERIC_E});
auto expected_names = strset_t({
"connect", "accept", "accept4", "umount2", "open", "ptrace", "mmap", "execve", "read", "container", // ruleset
"procexit", "switch", "pluginevent", "asyncevent"}); // from non-syscall event filters
"connect", "accept", "open", "ptrace", "mmap", "execve", "read", "container", // ruleset
"procexit", "switch", "pluginevent"}); // from non-syscall event filters
expected_names.insert(generic_names.begin(), generic_names.end());
ASSERT_NAMES_EQ(rules_event_names, expected_names);
auto rules_sc_set = engine->sc_codes_for_ruleset(s_sample_source);
auto rules_sc_names = libsinsp::events::sc_set_to_event_names(rules_sc_set);
auto rules_sc_names = libsinsp::events::sc_set_to_names(rules_sc_set);
ASSERT_NAMES_EQ(rules_sc_names, strset_t({
"connect", "accept", "accept4", "umount2", "open", "ptrace", "mmap", "execve", "read",
"procexit", "switch", "syncfs", "fanotify_init", // from generic event filters
"connect", "accept", "accept4", "open", "ptrace", "mmap", "execve", "read",
"syncfs", "fanotify_init", // from generic event filters
}));
}
TEST(ConfigureInterestingSets, selection_not_allevents)
{
falco::app::state s2;
// run app action with fake engine and without the `-A` option
s2.engine = mock_engine_from_filters(s_sample_filters);
s2.options.all_events = false;
ASSERT_EQ(s2.options.all_events, false);
auto result = falco::app::actions::configure_interesting_sets(s2);
falco::app::state s;
s.engine = mock_engine_from_filters(s_sample_filters);
s.options.all_events = false;
auto result = falco::app::actions::configure_interesting_sets(s);
ASSERT_TRUE(result.success);
ASSERT_EQ(result.errstr, "");
@@ -186,42 +185,42 @@ TEST(ConfigureInterestingSets, selection_not_allevents)
// also check if a warning has been printed in stderr
// check that the final selected set is the one expected
ASSERT_GT(s2.selected_sc_set.size(), 1);
auto selected_sc_names = libsinsp::events::sc_set_to_event_names(s2.selected_sc_set);
ASSERT_NE(s.selected_sc_set.size(), 0);
auto selected_sc_names = libsinsp::events::sc_set_to_names(s.selected_sc_set);
auto expected_sc_names = strset_t({
// note: we expect the "read" syscall to have been erased
"connect", "accept", "accept4", "umount2", "open", "ptrace", "mmap", "execve", // from ruleset
"connect", "accept", "open", "ptrace", "mmap", "execve", // from ruleset
"clone", "clone3", "fork", "vfork", // from sinsp state set (spawned_process)
"socket", "bind", "close" // from sinsp state set (network, files)
});
ASSERT_NAMES_CONTAIN(selected_sc_names, expected_sc_names);
// check that all IO syscalls have been erased from the selection
auto ignored_set = falco::app::ignored_sc_set();
auto erased_sc_names = libsinsp::events::sc_set_to_event_names(ignored_set);
auto io_set = libsinsp::events::io_sc_set();
auto erased_sc_names = libsinsp::events::sc_set_to_names(io_set);
ASSERT_NAMES_NOCONTAIN(selected_sc_names, erased_sc_names);
// check that final selected set is exactly sinsp state + ruleset
auto rule_set = s2.engine->sc_codes_for_ruleset(s_sample_source, s_sample_ruleset);
auto rule_set = s.engine->sc_codes_for_ruleset(s_sample_source, s_sample_ruleset);
auto state_set = libsinsp::events::sinsp_state_sc_set();
for (const auto &erased : ignored_set)
for (const auto &erased : io_set)
{
rule_set.remove(erased);
state_set.remove(erased);
}
auto union_set = state_set.merge(rule_set);
auto inter_set = state_set.intersect(rule_set);
EXPECT_EQ(s2.selected_sc_set.size(), state_set.size() + rule_set.size() - inter_set.size());
ASSERT_EQ(s2.selected_sc_set, union_set);
ASSERT_EQ(s.selected_sc_set.size(), state_set.size() + rule_set.size() - inter_set.size());
ASSERT_EQ(s.selected_sc_set, union_set);
}
TEST(ConfigureInterestingSets, selection_allevents)
{
falco::app::state s3;
// run app action with fake engine and with the `-A` option
s3.engine = mock_engine_from_filters(s_sample_filters);
s3.options.all_events = true;
auto result = falco::app::actions::configure_interesting_sets(s3);
falco::app::state s;
s.engine = mock_engine_from_filters(s_sample_filters);
s.options.all_events = true;
auto result = falco::app::actions::configure_interesting_sets(s);
ASSERT_TRUE(result.success);
ASSERT_EQ(result.errstr, "");
@@ -229,50 +228,47 @@ TEST(ConfigureInterestingSets, selection_allevents)
// also check if a warning has not been printed in stderr
// check that the final selected set is the one expected
ASSERT_GT(s3.selected_sc_set.size(), 1);
auto selected_sc_names = libsinsp::events::sc_set_to_event_names(s3.selected_sc_set);
ASSERT_NE(s.selected_sc_set.size(), 0);
auto selected_sc_names = libsinsp::events::sc_set_to_names(s.selected_sc_set);
auto expected_sc_names = strset_t({
// note: we expect the "read" syscall to not be erased
"connect", "accept", "accept4", "umount2", "open", "ptrace", "mmap", "execve", "read", // from ruleset
"connect", "accept", "open", "ptrace", "mmap", "execve", "read", // from ruleset
"clone", "clone3", "fork", "vfork", // from sinsp state set (spawned_process)
"socket", "bind", "close" // from sinsp state set (network, files)
});
ASSERT_NAMES_CONTAIN(selected_sc_names, expected_sc_names);
// check that final selected set is exactly sinsp state + ruleset
auto rule_set = s3.engine->sc_codes_for_ruleset(s_sample_source, s_sample_ruleset);
auto rule_set = s.engine->sc_codes_for_ruleset(s_sample_source, s_sample_ruleset);
auto state_set = libsinsp::events::sinsp_state_sc_set();
auto union_set = state_set.merge(rule_set);
auto inter_set = state_set.intersect(rule_set);
EXPECT_EQ(s3.selected_sc_set.size(), state_set.size() + rule_set.size() - inter_set.size());
ASSERT_EQ(s3.selected_sc_set, union_set);
ASSERT_EQ(s.selected_sc_set.size(), state_set.size() + rule_set.size() - inter_set.size());
ASSERT_EQ(s.selected_sc_set, union_set);
}
TEST(ConfigureInterestingSets, selection_generic_evts)
{
falco::app::state s4;
// run app action with fake engine and without the `-A` option
s4.options.all_events = false;
falco::app::state s;
auto filters = s_sample_filters;
filters.insert(s_sample_generic_filters.begin(), s_sample_generic_filters.end());
s4.engine = mock_engine_from_filters(filters);
auto result = falco::app::actions::configure_interesting_sets(s4);
s.engine = mock_engine_from_filters(filters);
auto result = falco::app::actions::configure_interesting_sets(s);
ASSERT_TRUE(result.success);
ASSERT_EQ(result.errstr, "");
// check that the final selected set is the one expected
ASSERT_GT(s4.selected_sc_set.size(), 1);
auto selected_sc_names = libsinsp::events::sc_set_to_event_names(s4.selected_sc_set);
ASSERT_NE(s.selected_sc_set.size(), 0);
auto selected_sc_names = libsinsp::events::sc_set_to_names(s.selected_sc_set);
auto expected_sc_names = strset_t({
// note: we expect the "read" syscall to not be erased
"connect", "accept", "accept4", "umount2", "open", "ptrace", "mmap", "execve", // from ruleset
"connect", "accept", "open", "ptrace", "mmap", "execve", // from ruleset
"syncfs", "fanotify_init", // from ruleset (generic events)
"clone", "clone3", "fork", "vfork", // from sinsp state set (spawned_process)
"socket", "bind", "close" // from sinsp state set (network, files)
});
ASSERT_NAMES_CONTAIN(selected_sc_names, expected_sc_names);
auto unexpected_sc_names = libsinsp::events::sc_set_to_event_names(falco::app::ignored_sc_set());
ASSERT_NAMES_NOCONTAIN(selected_sc_names, unexpected_sc_names);
}
// expected combinations precedence:
@@ -282,19 +278,18 @@ TEST(ConfigureInterestingSets, selection_generic_evts)
// - if `-A` is not set, events from the IO set are removed from the selected set
TEST(ConfigureInterestingSets, selection_custom_base_set)
{
falco::app::state s5;
// run app action with fake engine and without the `-A` option
s5.options.all_events = true;
s5.engine = mock_engine_from_filters(s_sample_filters);
falco::app::state s;
s.options.all_events = true;
s.engine = mock_engine_from_filters(s_sample_filters);
auto default_base_set = libsinsp::events::sinsp_state_sc_set();
// non-empty custom base set (both positive and negative)
s5.config->m_base_syscalls_repair = false;
s5.config->m_base_syscalls_custom_set = {"syncfs", "!accept"};
auto result = falco::app::actions::configure_interesting_sets(s5);
s.config->m_base_syscalls = {"syncfs", "!accept"};
auto result = falco::app::actions::configure_interesting_sets(s);
ASSERT_TRUE(result.success);
ASSERT_EQ(result.errstr, "");
auto selected_sc_names = libsinsp::events::sc_set_to_event_names(s5.selected_sc_set);
auto selected_sc_names = libsinsp::events::sc_set_to_names(s.selected_sc_set);
auto expected_sc_names = strset_t({
// note: `syncfs` has been added due to the custom base set, and `accept`
// has been remove due to the negative base set.
@@ -302,127 +297,56 @@ TEST(ConfigureInterestingSets, selection_custom_base_set)
// note: `accept` is not included even though it is matched by the rules,
// which means that the custom negation base set has precedence over the
// final selection set as a whole
// note(jasondellaluce): "accept4" should be added, however old versions
// of the ACCEPT4 event are actually named "accept" in the event table
"connect", "umount2", "open", "ptrace", "mmap", "execve", "read", "syncfs", "procexit"
"connect", "open", "ptrace", "mmap", "execve", "read", "syncfs"
});
ASSERT_NAMES_EQ(selected_sc_names, expected_sc_names);
ASSERT_NAMES_CONTAIN(selected_sc_names, expected_sc_names);
// non-empty custom base set (both positive and negative with collision)
s5.config->m_base_syscalls_repair = false;
s5.config->m_base_syscalls_custom_set = {"syncfs", "accept", "!accept"};
result = falco::app::actions::configure_interesting_sets(s5);
s.config->m_base_syscalls = {"syncfs", "accept", "!accept"};
result = falco::app::actions::configure_interesting_sets(s);
ASSERT_TRUE(result.success);
ASSERT_EQ(result.errstr, "");
selected_sc_names = libsinsp::events::sc_set_to_event_names(s5.selected_sc_set);
selected_sc_names = libsinsp::events::sc_set_to_names(s.selected_sc_set);
// note: in case of collision, negation has priority, so the expected
// names are the same as the case above
ASSERT_NAMES_EQ(selected_sc_names, expected_sc_names);
ASSERT_NAMES_CONTAIN(selected_sc_names, expected_sc_names);
// non-empty custom base set (only positive)
s5.config->m_base_syscalls_custom_set = {"syncfs"};
result = falco::app::actions::configure_interesting_sets(s5);
s.config->m_base_syscalls = {"syncfs"};
result = falco::app::actions::configure_interesting_sets(s);
ASSERT_TRUE(result.success);
ASSERT_EQ(result.errstr, "");
selected_sc_names = libsinsp::events::sc_set_to_event_names(s5.selected_sc_set);
selected_sc_names = libsinsp::events::sc_set_to_names(s.selected_sc_set);
expected_sc_names = strset_t({
// note: accept is not negated anymore
"connect", "accept", "accept4", "umount2", "open", "ptrace", "mmap", "execve", "read", "syncfs", "procexit"
"connect", "accept", "open", "ptrace", "mmap", "execve", "read", "syncfs"
});
ASSERT_NAMES_EQ(selected_sc_names, expected_sc_names);
ASSERT_NAMES_CONTAIN(selected_sc_names, expected_sc_names);
// non-empty custom base set (only negative)
s5.config->m_base_syscalls_custom_set = {"!accept"};
result = falco::app::actions::configure_interesting_sets(s5);
s.config->m_base_syscalls = {"!accept"};
result = falco::app::actions::configure_interesting_sets(s);
ASSERT_TRUE(result.success);
ASSERT_EQ(result.errstr, "");
selected_sc_names = libsinsp::events::sc_set_to_event_names(s5.selected_sc_set);
selected_sc_names = libsinsp::events::sc_set_to_names(s.selected_sc_set);
expected_sc_names = unordered_set_union(
libsinsp::events::sc_set_to_event_names(default_base_set),
strset_t({ "connect", "umount2", "open", "ptrace", "mmap", "execve", "read"}));
libsinsp::events::sc_set_to_names(default_base_set),
strset_t({ "connect", "open", "ptrace", "mmap", "execve", "read"}));
expected_sc_names.erase("accept");
// note(jasondellaluce): "accept4" should be included, however old versions
// of the ACCEPT4 event are actually named "accept" in the event table
expected_sc_names.erase("accept4");
ASSERT_NAMES_EQ(selected_sc_names, expected_sc_names);
ASSERT_NAMES_CONTAIN(selected_sc_names, expected_sc_names);
// non-empty custom base set (positive, without -A)
s5.options.all_events = false;
s5.config->m_base_syscalls_custom_set = {"read"};
result = falco::app::actions::configure_interesting_sets(s5);
s.options.all_events = false;
s.config->m_base_syscalls = {"read"};
result = falco::app::actions::configure_interesting_sets(s);
ASSERT_TRUE(result.success);
ASSERT_EQ(result.errstr, "");
selected_sc_names = libsinsp::events::sc_set_to_event_names(s5.selected_sc_set);
selected_sc_names = libsinsp::events::sc_set_to_names(s.selected_sc_set);
expected_sc_names = strset_t({
// note: read is both part of the custom base set and the rules set,
// but we expect the unset -A option to take precedence
"connect", "accept", "accept4", "umount2", "open", "ptrace", "mmap", "execve", "procexit"
});
ASSERT_NAMES_EQ(selected_sc_names, expected_sc_names);
auto unexpected_sc_names = libsinsp::events::sc_set_to_event_names(falco::app::ignored_sc_set());
ASSERT_NAMES_NOCONTAIN(selected_sc_names, unexpected_sc_names);
}
TEST(ConfigureInterestingSets, selection_custom_base_set_repair)
{
falco::app::state s6;
// run app action with fake engine and without the `-A` option
s6.options.all_events = false;
s6.engine = mock_engine_from_filters(s_sample_filters);
// note: here we use file syscalls (e.g. open, openat) and have a custom
// positive set, so we expect syscalls such as "close" to be selected as
// repaired. Also, given that we use some network syscalls, we expect "bind"
// to be selected event if we negate it, because repairment should have
// take precedence.
s6.config->m_base_syscalls_custom_set = {"openat", "!bind"};
s6.config->m_base_syscalls_repair = true;
auto result = falco::app::actions::configure_interesting_sets(s6);
ASSERT_TRUE(result.success);
ASSERT_EQ(result.errstr, "");
auto selected_sc_names = libsinsp::events::sc_set_to_event_names(s6.selected_sc_set);
auto expected_sc_names = strset_t({
// note: expecting syscalls from mock rules and `sinsp_repair_state_sc_set` enforced syscalls
"connect", "accept", "accept4", "umount2", "open", "ptrace", "mmap", "execve", "procexit", \
"bind", "socket", "clone3", "close", "setuid"
"connect", "accept", "open", "ptrace", "mmap", "execve",
});
ASSERT_NAMES_CONTAIN(selected_sc_names, expected_sc_names);
auto unexpected_sc_names = libsinsp::events::sc_set_to_event_names(falco::app::ignored_sc_set());
ASSERT_NAMES_NOCONTAIN(selected_sc_names, unexpected_sc_names);
}
TEST(ConfigureInterestingSets, selection_empty_custom_base_set_repair)
{
falco::app::state s7;
// run app action with fake engine and with the `-A` option
s7.options.all_events = true;
s7.engine = mock_engine_from_filters(s_sample_filters);
// simulate empty custom set but repair option set.
s7.config->m_base_syscalls_custom_set = {};
s7.config->m_base_syscalls_repair = true;
auto result = falco::app::actions::configure_interesting_sets(s7);
auto s7_rules_set = s7.engine->sc_codes_for_ruleset(s_sample_source, s_sample_ruleset);
ASSERT_TRUE(result.success);
ASSERT_EQ(result.errstr, "");
auto selected_sc_names = libsinsp::events::sc_set_to_event_names(s7.selected_sc_set);
auto expected_sc_names = strset_t({
// note: expecting syscalls from mock rules and `sinsp_repair_state_sc_set` enforced syscalls
"connect", "accept", "accept4", "umount2", "open", "ptrace", "mmap", "execve", "procexit", \
"bind", "socket", "clone3", "close", "setuid"
});
ASSERT_NAMES_CONTAIN(selected_sc_names, expected_sc_names);
auto s7_state_set = libsinsp::events::sinsp_repair_state_sc_set(s7_rules_set);
ASSERT_EQ(s7.selected_sc_set, s7_state_set);
ASSERT_EQ(s7.selected_sc_set.size(), s7_state_set.size());
}
TEST(ConfigureInterestingSets, ignored_set_expected_size)
{
// unit test fence to make sure we don't have unexpected regressions
// in the ignored set, to be updated in the future
ASSERT_EQ(falco::app::ignored_sc_set().size(), 14);
// we don't expect to ignore any syscall in the default base set
ASSERT_EQ(falco::app::ignored_sc_set().intersect(libsinsp::events::sinsp_state_sc_set()).size(), 0);
}

View File

@@ -1,55 +0,0 @@
/*
Copyright (C) 2023 The Falco Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless ASSERTd by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "app_action_helpers.h"
TEST(ActionConfigureSyscallBufferNum, variable_number_of_CPUs)
{
auto action = falco::app::actions::configure_syscall_buffer_num;
ssize_t online_cpus = sysconf(_SC_NPROCESSORS_ONLN);
if(online_cpus <= 0)
{
FAIL() << "cannot get the number of online CPUs from the system\n";
}
// not modern bpf engine, we do nothing
{
falco::app::state s;
s.options.modern_bpf = false;
EXPECT_ACTION_OK(action(s));
}
// modern bpf engine, with an invalid number of CPUs
// default `m_cpus_for_each_syscall_buffer` to online CPU number
{
falco::app::state s;
s.options.modern_bpf = true;
s.config->m_cpus_for_each_syscall_buffer = online_cpus + 1;
EXPECT_ACTION_OK(action(s));
EXPECT_EQ(s.config->m_cpus_for_each_syscall_buffer, online_cpus);
}
// modern bpf engine, with an valid number of CPUs
// we don't modify `m_cpus_for_each_syscall_buffer`
{
falco::app::state s;
s.options.modern_bpf = true;
s.config->m_cpus_for_each_syscall_buffer = online_cpus - 1;
EXPECT_ACTION_OK(action(s));
EXPECT_EQ(s.config->m_cpus_for_each_syscall_buffer, online_cpus - 1);
}
}

View File

@@ -14,7 +14,12 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
#include "app_action_helpers.h"
#include <gtest/gtest.h>
#include <falco/app/state.h>
#include <falco/app/actions/actions.h>
#define EXPECT_ACTION_OK(r) { EXPECT_TRUE(r.success); EXPECT_TRUE(r.proceed); EXPECT_EQ(r.errstr, ""); }
#define EXPECT_ACTION_FAIL(r) { EXPECT_FALSE(r.success); EXPECT_FALSE(r.proceed); EXPECT_NE(r.errstr, ""); }
TEST(ActionSelectEventSources, pre_post_conditions)
{
@@ -39,18 +44,10 @@ TEST(ActionSelectEventSources, pre_post_conditions)
falco::app::state s;
s.loaded_sources = {"syscall", "some_source"};
EXPECT_ACTION_OK(action(s));
EXPECT_EQ(s.loaded_sources.size(), s.enabled_sources.size());
for (const auto& v : s.loaded_sources)
{
ASSERT_TRUE(s.enabled_sources.find(v) != s.enabled_sources.end());
}
s.loaded_sources.push_back("another_source");
EXPECT_EQ(s.loaded_sources, s.enabled_sources);
s.loaded_sources.insert("another_source");
EXPECT_ACTION_OK(action(s));
EXPECT_EQ(s.loaded_sources.size(), s.enabled_sources.size());
for (const auto& v : s.loaded_sources)
{
ASSERT_TRUE(s.enabled_sources.find(v) != s.enabled_sources.end());
}
EXPECT_EQ(s.loaded_sources, s.enabled_sources);
}
// enable only selected sources

View File

@@ -18,7 +18,6 @@ set(FALCO_ENGINE_SOURCE_FILES
json_evt.cpp
evttype_index_ruleset.cpp
formats.cpp
filter_details_resolver.cpp
filter_macro_resolver.cpp
filter_warning_resolver.cpp
stats_manager.cpp

View File

@@ -1,5 +1,5 @@
/*
Copyright (C) 2023 The Falco Authors.
Copyright (C) 2019 The Falco Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -37,6 +37,9 @@ limitations under the License.
#undef strcat
#define strcat(a, b) BAN(strcat)
#undef strncat
#define strncat(a, b, c) BAN(strncat)
#undef strncpy
#define strncpy(a, b, c) BAN(strncpy)

View File

@@ -171,6 +171,8 @@ void evttype_index_ruleset::add(
if(rule.source == falco_common::syscall_source)
{
wrap->sc_codes = libsinsp::filter::ast::ppm_sc_codes(condition.get());
// todo(jasondellaluce): once libsinsp has its fixes, optimize this
// by using libsinsp::events::ppm_set_to_event_set(wrap->sc_codes)
wrap->event_codes = libsinsp::filter::ast::ppm_event_codes(condition.get());
}
else
@@ -178,7 +180,6 @@ void evttype_index_ruleset::add(
wrap->sc_codes = { };
wrap->event_codes = { ppm_event_code::PPME_PLUGINEVENT_E };
}
wrap->event_codes.insert(ppm_event_code::PPME_ASYNCEVENT_E);
m_filters.insert(wrap);
}
catch (const sinsp_exception& e)

View File

@@ -52,7 +52,7 @@ struct falco_exception : std::exception
namespace falco_common
{
const std::string syscall_source = sinsp_syscall_event_source_name;
const std::string syscall_source = "syscall";
// Same as numbers/indices into the above vector
enum priority_type

View File

@@ -15,21 +15,11 @@ limitations under the License.
*/
#include <cstdlib>
#ifndef _WIN32
#include <unistd.h>
#else
#include <stdlib.h>
#include <io.h>
#define srandom srand
#define random rand
#endif
#include <string>
#include <fstream>
#include <functional>
#include <utility>
#include <vector>
#include <nlohmann/json.hpp>
#include <sinsp.h>
#include <plugin.h>
@@ -45,7 +35,6 @@ limitations under the License.
#include "utils.h"
#include "banned.h" // This raises a compilation error when certain functions are used
#include "evttype_index_ruleset.h"
#include "filter_details_resolver.h"
const std::string falco_engine::s_default_ruleset = "falco-default-ruleset";
@@ -199,6 +188,11 @@ std::unique_ptr<load_result> falco_engine::load_rules(const std::string &rules_c
rule_loader::reader reader;
if (reader.read(cfg, m_rule_collector))
{
for (auto &src : m_sources)
{
src.ruleset = src.ruleset_factory->new_ruleset();
}
rule_loader::compiler compiler;
m_rules.clear();
compiler.compile(cfg, m_rule_collector, m_rules);
@@ -426,323 +420,27 @@ std::size_t falco_engine::add_source(const std::string &source,
return m_sources.insert(src, source);
}
void falco_engine::describe_rule(std::string *rule, bool json) const
void falco_engine::describe_rule(std::string *rule) const
{
if(!json)
static const char* rule_fmt = "%-50s %s\n";
fprintf(stdout, rule_fmt, "Rule", "Description");
fprintf(stdout, rule_fmt, "----", "-----------");
if (!rule)
{
static const char *rule_fmt = "%-50s %s\n";
fprintf(stdout, rule_fmt, "Rule", "Description");
fprintf(stdout, rule_fmt, "----", "-----------");
if(!rule)
for (auto &r : m_rules)
{
for(auto &r : m_rules)
{
auto str = falco::utils::wrap_text(r.description, 51, 110) + "\n";
fprintf(stdout, rule_fmt, r.name.c_str(), str.c_str());
}
auto str = falco::utils::wrap_text(r.description, 51, 110) + "\n";
fprintf(stdout, rule_fmt, r.name.c_str(), str.c_str());
}
else
{
auto r = m_rules.at(*rule);
if(r == nullptr)
{
return;
}
auto str = falco::utils::wrap_text(r->description, 51, 110) + "\n";
fprintf(stdout, rule_fmt, r->name.c_str(), str.c_str());
}
return;
}
std::unique_ptr<sinsp> insp(new sinsp());
Json::FastWriter writer;
std::string json_str;
if(!rule)
{
// In this case we build json information about
// all rules, macros and lists
Json::Value output;
// Store required engine version
auto required_engine_version = m_rule_collector.required_engine_version();
output["required_engine_version"] = std::to_string(required_engine_version.version);
// Store required plugin versions
Json::Value plugin_versions = Json::arrayValue;
auto required_plugin_versions = m_rule_collector.required_plugin_versions();
for(const auto& req : required_plugin_versions)
{
Json::Value r;
r["name"] = req.at(0).name;
r["version"] = req.at(0).version;
Json::Value alternatives = Json::arrayValue;
for(size_t i = 1; i < req.size(); i++)
{
Json::Value alternative;
alternative["name"] = req[i].name;
alternative["version"] = req[i].version;
alternatives.append(alternative);
}
r["alternatives"] = alternatives;
plugin_versions.append(r);
}
output["required_plugin_versions"] = plugin_versions;
// Store information about rules
Json::Value rules_array = Json::arrayValue;
for(const auto& r : m_rules)
{
auto ri = m_rule_collector.rules().at(r.name);
Json::Value rule;
get_json_details(r, *ri, insp.get(), rule);
// Append to rule array
rules_array.append(rule);
}
output["rules"] = rules_array;
// Store information about macros
Json::Value macros_array;
for(const auto &m : m_rule_collector.macros())
{
Json::Value macro;
get_json_details(m, macro);
macros_array.append(macro);
}
output["macros"] = macros_array;
// Store information about lists
Json::Value lists_array = Json::arrayValue;
for(const auto &l : m_rule_collector.lists())
{
Json::Value list;
get_json_details(l, list);
lists_array.append(list);
}
output["lists"] = lists_array;
json_str = writer.write(output);
}
else
{
// build json information for just the specified rule
auto ri = m_rule_collector.rules().at(*rule);
if(ri == nullptr)
{
throw falco_exception("Rule \"" + *rule + "\" is not loaded");
}
auto r = m_rules.at(ri->name);
Json::Value rule;
get_json_details(*r, *ri, insp.get(), rule);
json_str = writer.write(rule);
}
fprintf(stdout, "%s", json_str.c_str());
}
void falco_engine::get_json_details(const falco_rule &r,
const rule_loader::rule_info &ri,
sinsp *insp,
Json::Value &rule) const
{
Json::Value rule_info;
// Fill general rule information
rule_info["name"] = r.name;
rule_info["condition"] = ri.cond;
rule_info["priority"] = format_priority(r.priority, false);
rule_info["output"] = r.output;
rule_info["description"] = r.description;
rule_info["enabled"] = ri.enabled;
rule_info["source"] = r.source;
Json::Value tags = Json::arrayValue;
for(const auto &t : ri.tags)
{
tags.append(t);
}
rule_info["tags"] = tags;
rule["info"] = rule_info;
// Parse rule condition and build the AST
// Assumption: no exception because rules have already been loaded.
auto ast = libsinsp::filter::parser(ri.cond).parse();
Json::Value json_details;
get_json_details(ast.get(), json_details);
rule["details"] = json_details;
// Get fields from output string
auto fmt = create_formatter(r.source, r.output);
std::vector<std::string> out_fields;
fmt->get_field_names(out_fields);
Json::Value outputFields = Json::arrayValue;
for(const auto &of : out_fields)
{
outputFields.append(of);
}
rule["details"]["output_fields"] = outputFields;
// Get fields from exceptions
Json::Value exception_fields = Json::arrayValue;
for(const auto &f : r.exception_fields)
{
exception_fields.append(f);
}
rule["details"]["exception_fields"] = exception_fields;
// Get names and operators from exceptions
Json::Value exception_names = Json::arrayValue;
Json::Value exception_operators = Json::arrayValue;
for(const auto &e : ri.exceptions)
{
exception_names.append(e.name);
if(e.comps.is_list)
{
for(const auto& c : e.comps.items)
{
if(c.is_list)
{
// considering max two levels of lists
for(const auto& i : c.items)
{
exception_operators.append(i.item);
}
}
else
{
exception_operators.append(c.item);
}
}
}
else
{
exception_operators.append(e.comps.item);
}
}
rule["details"]["exceptions"] = exception_names;
rule["details"]["exception_operators"] = exception_operators;
if(ri.source == falco_common::syscall_source)
{
// Store event types
Json::Value events;
get_json_evt_types(ast.get(), events);
rule["details"]["events"] = events;
auto r = m_rules.at(*rule);
auto str = falco::utils::wrap_text(r->description, 51, 110) + "\n";
fprintf(stdout, rule_fmt, r->name.c_str(), str.c_str());
}
}
void falco_engine::get_json_details(const rule_loader::macro_info& m,
Json::Value& macro) const
{
Json::Value macro_info;
macro_info["name"] = m.name;
macro_info["condition"] = m.cond;
macro["info"] = macro_info;
// Assumption: no exception because rules have already been loaded.
auto ast = libsinsp::filter::parser(m.cond).parse();
Json::Value json_details;
get_json_details(ast.get(), json_details);
macro["details"] = json_details;
// Store event types
Json::Value events;
get_json_evt_types(ast.get(), events);
macro["details"]["events"] = events;
}
void falco_engine::get_json_details(const rule_loader::list_info& l,
Json::Value& list) const
{
Json::Value list_info;
list_info["name"] = l.name;
Json::Value items = Json::arrayValue;
Json::Value lists = Json::arrayValue;
for(const auto &i : l.items)
{
if(m_rule_collector.lists().at(i) != nullptr)
{
lists.append(i);
continue;
}
items.append(i);
}
list_info["items"] = items;
list["info"] = list_info;
list["details"]["lists"] = lists;
}
void falco_engine::get_json_details(libsinsp::filter::ast::expr* ast,
Json::Value& output) const
{
filter_details details;
for(const auto &m : m_rule_collector.macros())
{
details.known_macros.insert(m.name);
}
for(const auto &l : m_rule_collector.lists())
{
details.known_lists.insert(l.name);
}
// Resolve the AST details
filter_details_resolver resolver;
resolver.run(ast, details);
Json::Value macros = Json::arrayValue;
for(const auto &m : details.macros)
{
macros.append(m);
}
output["macros"] = macros;
Json::Value operators = Json::arrayValue;
for(const auto &o : details.operators)
{
operators.append(o);
}
output["operators"] = operators;
Json::Value condition_fields = Json::arrayValue;
for(const auto &f : details.fields)
{
condition_fields.append(f);
}
output["condition_fields"] = condition_fields;
Json::Value lists = Json::arrayValue;
for(const auto &l : details.lists)
{
lists.append(l);
}
output["lists"] = lists;
details.reset();
}
void falco_engine::get_json_evt_types(libsinsp::filter::ast::expr* ast,
Json::Value& output) const
{
output = Json::arrayValue;
auto evtcodes = libsinsp::filter::ast::ppm_event_codes(ast);
auto syscodes = libsinsp::filter::ast::ppm_sc_codes(ast);
auto syscodes_to_evt_names = libsinsp::events::sc_set_to_event_names(syscodes);
auto evtcodes_to_evt_names = libsinsp::events::event_set_to_names(evtcodes, false);
for (const auto& n : unordered_set_union(syscodes_to_evt_names, evtcodes_to_evt_names))
{
output.append(n);
}
}
void falco_engine::print_stats() const
{
std::string out;

View File

@@ -37,7 +37,6 @@ limitations under the License.
#include "falco_common.h"
#include "falco_source.h"
#include "falco_load_result.h"
#include "filter_details_resolver.h"
//
// This class acts as the primary interface between a program and the
@@ -124,7 +123,7 @@ public:
// Print details on the given rule. If rule is NULL, print
// details on all rules.
//
void describe_rule(std::string *rule, bool json) const;
void describe_rule(std::string *rule) const;
//
// Print statistics on how many events matched each rule.
@@ -148,7 +147,7 @@ public:
// of all output expressions. You can also choose to replace
// %container.info with the extra information or add it to the
// end of the expression. This is used in open source falco to
// add k8s/container information to outputs when
// add k8s/mesos/container information to outputs when
// available.
//
void set_extra(std::string &extra, bool replace_container_info);
@@ -299,20 +298,6 @@ private:
//
inline bool should_drop_evt() const;
// Retrieve json details from rules, macros, lists
void get_json_details(const falco_rule& r,
const rule_loader::rule_info& ri,
sinsp* insp,
Json::Value& rule) const;
void get_json_details(const rule_loader::macro_info& m,
Json::Value& macro) const;
void get_json_details(const rule_loader::list_info& l,
Json::Value& list) const;
void get_json_details(libsinsp::filter::ast::expr* ast,
Json::Value& output) const;
void get_json_evt_types(libsinsp::filter::ast::expr* ast,
Json::Value& output) const;
rule_loader::collector m_rule_collector;
indexed_vector<falco_rule> m_rules;
stats_manager m_rule_stats_manager;

View File

@@ -21,4 +21,4 @@ limitations under the License.
// This is the result of running "falco --list -N | sha256sum" and
// represents the fields supported by this version of Falco. It's used
// at build time to detect a changed set of fields.
#define FALCO_FIELDS_CHECKSUM "dd438e1713ebf8abc09a2c89da77bb43ee3886ad1ba69802595a5f18e3854550"
#define FALCO_FIELDS_CHECKSUM "f054e066bd153f851285973bb2f628462574d4679c18e1ca5dbca0585acc8a72"

View File

@@ -23,109 +23,12 @@ limitations under the License.
#include "utils.h"
#include "banned.h" // This raises a compilation error when certain functions are used
#include <re2/re2.h>
#define RGX_PROMETHEUS_TIME_DURATION "^((?P<y>[0-9]+)y)?((?P<w>[0-9]+)w)?((?P<d>[0-9]+)d)?((?P<h>[0-9]+)h)?((?P<m>[0-9]+)m)?((?P<s>[0-9]+)s)?((?P<ms>[0-9]+)ms)?$"
// using pre-compiled regex
static re2::RE2 s_rgx_prometheus_time_duration(RGX_PROMETHEUS_TIME_DURATION);
// Prometheus time durations: https://prometheus.io/docs/prometheus/latest/querying/basics/#time-durations
#define PROMETHEUS_UNIT_Y "y" ///> assuming a year has always 365d
#define PROMETHEUS_UNIT_W "w" ///> assuming a week has always 7d
#define PROMETHEUS_UNIT_D "d" ///> assuming a day has always 24h
#define PROMETHEUS_UNIT_H "h" ///> hour
#define PROMETHEUS_UNIT_M "m" ///> minute
#define PROMETHEUS_UNIT_S "s" ///> second
#define PROMETHEUS_UNIT_MS "ms" ///> millisecond
// standard time unit conversions to milliseconds
#define ONE_MS_TO_MS 1UL
#define ONE_SECOND_TO_MS 1000UL
#define ONE_MINUTE_TO_MS ONE_SECOND_TO_MS * 60UL
#define ONE_HOUR_TO_MS ONE_MINUTE_TO_MS * 60UL
#define ONE_DAY_TO_MS ONE_HOUR_TO_MS * 24UL
#define ONE_WEEK_TO_MS ONE_DAY_TO_MS * 7UL
#define ONE_YEAR_TO_MS ONE_DAY_TO_MS * 365UL
namespace falco
{
namespace utils
{
uint64_t parse_prometheus_interval(std::string interval_str)
{
uint64_t interval = 0;
/* Sanitize user input, remove possible whitespaces. */
interval_str.erase(remove_if(interval_str.begin(), interval_str.end(), isspace), interval_str.end());
if(!interval_str.empty())
{
/* Option 1: Passing interval directly in ms. Will be deprecated in the future. */
if(std::all_of(interval_str.begin(), interval_str.end(), ::isdigit))
{
/* todo: deprecate for Falco 0.36. */
interval = std::stoull(interval_str, nullptr, 0);
}
/* Option 2: Passing a Prometheus compliant time duration.
* https://prometheus.io/docs/prometheus/latest/querying/basics/#time-durations
*/
else
{
re2::StringPiece input(interval_str);
std::string args[14];
re2::RE2::Arg arg0(&args[0]);
re2::RE2::Arg arg1(&args[1]);
re2::RE2::Arg arg2(&args[2]);
re2::RE2::Arg arg3(&args[3]);
re2::RE2::Arg arg4(&args[4]);
re2::RE2::Arg arg5(&args[5]);
re2::RE2::Arg arg6(&args[6]);
re2::RE2::Arg arg7(&args[7]);
re2::RE2::Arg arg8(&args[8]);
re2::RE2::Arg arg9(&args[9]);
re2::RE2::Arg arg10(&args[10]);
re2::RE2::Arg arg11(&args[11]);
re2::RE2::Arg arg12(&args[12]);
re2::RE2::Arg arg13(&args[13]);
const re2::RE2::Arg* const matches[14] = {&arg0, &arg1, &arg2, &arg3, &arg4, &arg5, &arg6, &arg7, &arg8, &arg9, &arg10, &arg11, &arg12, &arg13};
const std::map<std::string, int>& named_groups = s_rgx_prometheus_time_duration.NamedCapturingGroups();
int num_groups = s_rgx_prometheus_time_duration.NumberOfCapturingGroups();
re2::RE2::FullMatchN(input, s_rgx_prometheus_time_duration, matches, num_groups);
static const char* all_prometheus_units[7] = {
PROMETHEUS_UNIT_Y, PROMETHEUS_UNIT_W, PROMETHEUS_UNIT_D, PROMETHEUS_UNIT_H,
PROMETHEUS_UNIT_M, PROMETHEUS_UNIT_S, PROMETHEUS_UNIT_MS };
static const uint64_t all_prometheus_time_conversions[7] = {
ONE_YEAR_TO_MS, ONE_WEEK_TO_MS, ONE_DAY_TO_MS, ONE_HOUR_TO_MS,
ONE_MINUTE_TO_MS, ONE_SECOND_TO_MS, ONE_MS_TO_MS };
for(size_t i = 0; i < sizeof(all_prometheus_units) / sizeof(const char*); i++)
{
std::string cur_interval_str;
uint64_t cur_interval = 0;
const auto &group_it = named_groups.find(all_prometheus_units[i]);
if(group_it != named_groups.end())
{
cur_interval_str = args[group_it->second - 1];
if(!cur_interval_str.empty())
{
cur_interval = std::stoull(cur_interval_str, nullptr, 0);
}
if(cur_interval > 0)
{
interval += cur_interval * all_prometheus_time_conversions[i];
}
}
}
}
}
return interval;
}
std::string wrap_text(const std::string& in, uint32_t indent, uint32_t line_len)
{
std::istringstream is(in);

View File

@@ -43,8 +43,6 @@ namespace falco
namespace utils
{
uint64_t parse_prometheus_interval(std::string interval_str);
std::string wrap_text(const std::string& in, uint32_t indent, uint32_t linelen);
void readfile(const std::string& filename, std::string& data);

View File

@@ -1,103 +0,0 @@
/*
Copyright (C) 2023 The Falco Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "filter_details_resolver.h"
using namespace libsinsp::filter;
void filter_details::reset()
{
fields.clear();
macros.clear();
operators.clear();
lists.clear();
}
void filter_details_resolver::run(ast::expr* filter, filter_details& details)
{
visitor v(details);
filter->accept(&v);
}
void filter_details_resolver::visitor::visit(ast::and_expr* e)
{
for(size_t i = 0; i < e->children.size(); i++)
{
m_expect_macro = true;
e->children[i]->accept(this);
m_expect_macro = false;
}
}
void filter_details_resolver::visitor::visit(ast::or_expr* e)
{
for(size_t i = 0; i < e->children.size(); i++)
{
m_expect_macro = true;
e->children[i]->accept(this);
m_expect_macro = false;
}
}
void filter_details_resolver::visitor::visit(ast::not_expr* e)
{
e->child->accept(this);
}
void filter_details_resolver::visitor::visit(ast::list_expr* e)
{
if(m_expect_list)
{
for(const auto& item : e->values)
{
if(m_details.known_lists.find(item) != m_details.known_lists.end())
{
m_details.lists.insert(item);
}
}
}
}
void filter_details_resolver::visitor::visit(ast::binary_check_expr* e)
{
m_expect_macro = false;
m_details.fields.insert(e->field);
m_details.operators.insert(e->op);
m_expect_list = true;
e->value->accept(this);
m_expect_list = false;
}
void filter_details_resolver::visitor::visit(ast::unary_check_expr* e)
{
m_expect_macro = false;
m_details.fields.insert(e->field);
m_details.operators.insert(e->op);
}
void filter_details_resolver::visitor::visit(ast::value_expr* e)
{
if(m_expect_macro)
{
auto it = m_details.known_macros.find(e->value);
if(it == m_details.known_macros.end())
{
return;
}
m_details.macros.insert(e->value);
}
}

View File

@@ -1,79 +0,0 @@
/*
Copyright (C) 2023 The Falco Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#pragma once
#include <filter/parser.h>
#include <string>
#include <unordered_set>
#include <unordered_map>
struct filter_details
{
// input macros and lists
std::unordered_set<std::string> known_macros;
std::unordered_set<std::string> known_lists;
// output details
std::unordered_set<std::string> fields;
std::unordered_set<std::string> macros;
std::unordered_set<std::string> operators;
std::unordered_set<std::string> lists;
void reset();
};
/*!
\brief Helper class for getting details about rules' filters.
*/
class filter_details_resolver
{
public:
/*!
\brief Visits a filter AST and stores details about macros, lists,
fields and operators used.
\param filter The filter AST to be processed.
\param details Helper structure used to state known macros and
lists on input, and to store all the retrieved details as output.
*/
void run(libsinsp::filter::ast::expr* filter,
filter_details& details);
private:
struct visitor : public libsinsp::filter::ast::expr_visitor
{
visitor(filter_details& details) :
m_details(details),
m_expect_list(false),
m_expect_macro(false) {}
visitor(visitor&&) = default;
visitor& operator = (visitor&&) = default;
visitor(const visitor&) = delete;
visitor& operator = (const visitor&) = delete;
void visit(libsinsp::filter::ast::and_expr* e) override;
void visit(libsinsp::filter::ast::or_expr* e) override;
void visit(libsinsp::filter::ast::not_expr* e) override;
void visit(libsinsp::filter::ast::value_expr* e) override;
void visit(libsinsp::filter::ast::list_expr* e) override;
void visit(libsinsp::filter::ast::unary_check_expr* e) override;
void visit(libsinsp::filter::ast::binary_check_expr* e) override;
filter_details& m_details;
bool m_expect_list;
bool m_expect_macro;
};
};

View File

@@ -26,7 +26,6 @@ limitations under the License.
#include <nlohmann/json.hpp>
#include "falco_common.h"
#include "prefix_search.h"
#include <sinsp.h>
@@ -436,10 +435,6 @@ public:
bool tostring(gen_event *evt, std::string &output) override;
bool tostring_withformat(gen_event *evt, std::string &output, gen_event_formatter::output_format of) override;
bool get_field_values(gen_event *evt, std::map<std::string, std::string> &fields) override;
void get_field_names(std::vector<std::string> &fields) override
{
throw falco_exception("json_event_formatter::get_field_names operation not supported");
}
output_format get_output_format() override;
std::string tojson(json_event *ev);

View File

@@ -297,7 +297,6 @@ namespace rule_loader
*/
struct engine_version_info
{
engine_version_info() : ctx("no-filename-given"), version(0) { };
engine_version_info(context &ctx);
~engine_version_info() = default;
engine_version_info(engine_version_info&&) = default;

View File

@@ -116,11 +116,6 @@ const std::vector<rule_loader::plugin_version_info::requirement_alternatives>& r
return m_required_plugin_versions;
}
const rule_loader::engine_version_info& rule_loader::collector::required_engine_version() const
{
return m_required_engine_version;
}
const indexed_vector<rule_loader::list_info>& rule_loader::collector::lists() const
{
return m_list_infos;
@@ -142,10 +137,6 @@ void rule_loader::collector::define(configuration& cfg, engine_version_info& inf
THROW(v < info.version, "Rules require engine version "
+ std::to_string(info.version) + ", but engine version is " + std::to_string(v),
info.ctx);
if(m_required_engine_version.version < info.version)
{
m_required_engine_version = info;
}
}
void rule_loader::collector::define(configuration& cfg, plugin_version_info& info)

View File

@@ -46,11 +46,6 @@ public:
*/
virtual const std::vector<plugin_version_info::requirement_alternatives>& required_plugin_versions() const;
/*!
\brief Returns the required engine versions
*/
virtual const engine_version_info& required_engine_version() const;
/*!
\brief Returns the list of defined lists
*/
@@ -97,7 +92,6 @@ private:
indexed_vector<macro_info> m_macro_infos;
indexed_vector<list_info> m_list_infos;
std::vector<plugin_version_info::requirement_alternatives> m_required_plugin_versions;
engine_version_info m_required_engine_version;
};
}; // namespace rule_loader

View File

@@ -495,10 +495,12 @@ void rule_loader::compiler::compile_rule_infos(
}
// populate set of event types and emit an special warning
libsinsp::events::set<ppm_event_code> evttypes = { ppm_event_code::PPME_PLUGINEVENT_E };
if(rule.source == falco_common::syscall_source)
{
auto evttypes = libsinsp::filter::ast::ppm_event_codes(ast.get());
if ((evttypes.empty() || evttypes.size() > 100) && r.warn_evttypes)
evttypes = libsinsp::filter::ast::ppm_event_codes(ast.get());
if ((evttypes.empty() || evttypes.size() > 100)
&& r.warn_evttypes)
{
cfg.res->add_warning(
falco::load_result::load_result::LOAD_NO_EVTTYPE,

View File

@@ -17,7 +17,6 @@ set(
FALCO_SOURCES
app/app.cpp
app/options.cpp
app/restart_handler.cpp
app/actions/helpers_generic.cpp
app/actions/helpers_inspector.cpp
app/actions/configure_interesting_sets.cpp
@@ -41,8 +40,7 @@ set(
app/actions/print_syscall_events.cpp
app/actions/print_version.cpp
app/actions/print_page_size.cpp
app/actions/configure_syscall_buffer_size.cpp
app/actions/configure_syscall_buffer_num.cpp
app/actions/compute_syscall_buffer_size.cpp
app/actions/select_event_sources.cpp
app/actions/start_grpc_server.cpp
app/actions/start_webserver.cpp

View File

@@ -23,9 +23,9 @@ namespace falco {
namespace app {
namespace actions {
falco::app::run_result attach_inotify_signals(falco::app::state& s);
falco::app::run_result configure_interesting_sets(falco::app::state& s);
falco::app::run_result configure_syscall_buffer_size(falco::app::state& s);
falco::app::run_result configure_syscall_buffer_num(falco::app::state& s);
falco::app::run_result create_requested_paths(falco::app::state& s);
falco::app::run_result create_signal_handlers(falco::app::state& s);
falco::app::run_result daemonize(falco::app::state& s);

View File

@@ -29,10 +29,7 @@ falco::app::run_result falco::app::actions::configure_syscall_buffer_size(falco:
/* We don't need to compute the syscall buffer dimension if we are in capture mode or if the
* the syscall source is not enabled.
*/
if(s.is_capture_mode()
|| !s.is_source_enabled(falco_common::syscall_source)
|| s.is_gvisor_enabled()
|| s.options.nodriver)
if(s.is_capture_mode() || s.enabled_sources.find(falco_common::syscall_source) == s.enabled_sources.end() || s.is_gvisor_enabled())
{
return run_result::ok();
}

View File

@@ -15,8 +15,6 @@ limitations under the License.
*/
#include "actions.h"
#include "helpers.h"
#include "../app.h"
using namespace falco::app;
using namespace falco::app::actions;
@@ -46,7 +44,7 @@ static void check_for_rules_unsupported_events(falco::app::state& s, const libsi
{
/* Unsupported events are those events that are used in the rules
* but that are not part of the selected event set. For now, this
* is expected to happen only for high volume syscalls for
* is expected to happen only for high volume I/O syscalls for
* performance reasons. */
auto unsupported_sc_set = rules_sc_set.diff(s.selected_sc_set);
if (unsupported_sc_set.empty())
@@ -55,9 +53,9 @@ static void check_for_rules_unsupported_events(falco::app::state& s, const libsi
}
/* Get the names of the events (syscall and non syscall events) that were not activated and print them. */
auto names = libsinsp::events::sc_set_to_event_names(unsupported_sc_set);
auto names = libsinsp::events::sc_set_to_names(unsupported_sc_set);
std::cerr << "Loaded rules match syscalls that are not activated (e.g. were removed via config settings such as no -A flag or negative base_syscalls elements) or unsupported with current configuration: warning (unsupported-evttype): " + concat_set_in_order(names) << std::endl;
std::cerr << "If syscalls in rules include high volume syscalls (-> activate via `-A` flag), else syscalls may have been removed via base_syscalls option or might be associated with syscalls undefined on your architecture (https://marcin.juszkiewicz.com.pl/download/tables/syscalls.html)" << std::endl;
std::cerr << "If syscalls in rules include high volume I/O syscalls (-> activate via `-A` flag), else syscalls may have been removed via base_syscalls option or might be associated with syscalls undefined on your architecture (https://marcin.juszkiewicz.com.pl/download/tables/syscalls.html)" << std::endl;
}
static void select_event_set(falco::app::state& s, const libsinsp::events::set<ppm_sc_code>& rules_sc_set)
@@ -65,7 +63,7 @@ static void select_event_set(falco::app::state& s, const libsinsp::events::set<p
/* PPM syscall codes (sc) can be viewed as condensed libsinsp lookup table
* to map a system call name to it's actual system syscall id (as defined
* by the Linux kernel). Hence here we don't need syscall enter and exit distinction. */
auto rules_names = libsinsp::events::sc_set_to_event_names(rules_sc_set);
auto rules_names = libsinsp::events::sc_set_to_names(rules_sc_set);
if (!rules_sc_set.empty())
{
falco_logger::log(LOG_DEBUG, "(" + std::to_string(rules_names.size())
@@ -73,25 +71,24 @@ static void select_event_set(falco::app::state& s, const libsinsp::events::set<p
}
/* DEFAULT OPTION:
* Current `sinsp_state_sc_set()` approach includes multiple steps:
* Current sinsp_state_sc_set() approach includes multiple steps:
* (1) Enforce all positive syscalls from each Falco rule
* (2) Enforce static Falco state set (non-adaptive, not conditioned by rules,
* (2) Enforce static `libsinsp` state set (non-adaptive, not conditioned by rules,
* but based on PPME event table flags indicating generic sinsp state modifications)
* -> Final set is union of (1) and (2)
*
* Fall-back if no valid positive syscalls in `base_syscalls.custom_set`,
* e.g. when using `base_syscalls.custom_set` only for negative syscalls.
* Fall-back if no valid positive syscalls in "base_syscalls",
* e.g. when using "base_syscalls" only for negative syscalls.
*/
auto base_sc_set = libsinsp::events::sinsp_state_sc_set();
/* USER OVERRIDE INPUT OPTION `base_syscalls.custom_set` etc. */
/* USER OVERRIDE INPUT OPTION "base_syscalls". */
std::unordered_set<std::string> user_positive_names = {};
std::unordered_set<std::string> user_negative_names = {};
extract_base_syscalls_names(s.config->m_base_syscalls_custom_set, user_positive_names, user_negative_names);
auto user_positive_sc_set = libsinsp::events::event_names_to_sc_set(user_positive_names);
auto user_negative_sc_set = libsinsp::events::event_names_to_sc_set(user_negative_names);
extract_base_syscalls_names(s.config->m_base_syscalls, user_positive_names, user_negative_names);
auto user_positive_sc_set = libsinsp::events::names_to_sc_set(user_positive_names);
auto user_negative_sc_set = libsinsp::events::names_to_sc_set(user_negative_names);
auto user_positive_sc_set_names = libsinsp::events::sc_set_to_event_names(user_positive_sc_set);
if (!user_positive_sc_set.empty())
{
// user overrides base event set
@@ -99,35 +96,16 @@ static void select_event_set(falco::app::state& s, const libsinsp::events::set<p
// we re-transform from sc_set to names to make
// sure that bad user inputs are ignored
auto user_positive_sc_set_names = libsinsp::events::sc_set_to_names(user_positive_sc_set);
falco_logger::log(LOG_DEBUG, "+(" + std::to_string(user_positive_sc_set_names.size())
+ ") syscalls added (base_syscalls override): "
+ concat_set_in_order(user_positive_sc_set_names) + "\n");
}
auto invalid_positive_sc_set_names = unordered_set_difference(user_positive_names, user_positive_sc_set_names);
if (!invalid_positive_sc_set_names.empty())
{
falco_logger::log(LOG_WARNING, "Invalid (positive) syscall names: warning (base_syscalls override): "
+ concat_set_in_order(invalid_positive_sc_set_names));
}
// selected events are the union of the rules events set and the
// base events set (either the default or the user-defined one)
s.selected_sc_set = rules_sc_set.merge(base_sc_set);
/* REPLACE DEFAULT STATE, nothing else. Need to override s.selected_sc_set and have a separate logic block. */
if (s.config->m_base_syscalls_repair && user_positive_sc_set.empty())
{
/* If `base_syscalls.repair` is specified, but `base_syscalls.custom_set` is empty we are replacing
* the default `sinsp_state_sc_set()` enforcement with the alternative `sinsp_repair_state_sc_set`.
* This approach only activates additional syscalls Falco needs beyond the
* syscalls defined in each Falco rule that are absolutely necessary based
* on the current rules configuration. */
// returned set already has rules_sc_set merged
s.selected_sc_set = libsinsp::events::sinsp_repair_state_sc_set(rules_sc_set);
}
auto user_negative_sc_set_names = libsinsp::events::sc_set_to_event_names(user_negative_sc_set);
if (!user_negative_sc_set.empty())
{
/* Remove negative base_syscalls events. */
@@ -135,16 +113,11 @@ static void select_event_set(falco::app::state& s, const libsinsp::events::set<p
// we re-transform from sc_set to names to make
// sure that bad user inputs are ignored
auto user_negative_sc_set_names = libsinsp::events::sc_set_to_names(user_negative_sc_set);
falco_logger::log(LOG_DEBUG, "-(" + std::to_string(user_negative_sc_set_names.size())
+ ") syscalls removed (base_syscalls override): "
+ concat_set_in_order(user_negative_sc_set_names) + "\n");
}
auto invalid_negative_sc_set_names = unordered_set_difference(user_negative_names, user_negative_sc_set_names);
if (!invalid_negative_sc_set_names.empty())
{
falco_logger::log(LOG_WARNING, "Invalid (negative) syscall names: warning (base_syscalls override): "
+ concat_set_in_order(invalid_negative_sc_set_names));
}
/* Derive the diff between the additional syscalls added via libsinsp state
enforcement and the syscalls from each Falco rule. We avoid printing
@@ -152,7 +125,7 @@ static void select_event_set(falco::app::state& s, const libsinsp::events::set<p
auto non_rules_sc_set = s.selected_sc_set.diff(rules_sc_set);
if (!non_rules_sc_set.empty() && user_positive_sc_set.empty())
{
auto non_rules_sc_set_names = libsinsp::events::sc_set_to_event_names(non_rules_sc_set);
auto non_rules_sc_set_names = libsinsp::events::sc_set_to_names(non_rules_sc_set);
falco_logger::log(LOG_DEBUG, "+(" + std::to_string(non_rules_sc_set_names.size())
+ ") syscalls (Falco's state engine set of syscalls): "
+ concat_set_in_order(non_rules_sc_set_names) + "\n");
@@ -160,58 +133,42 @@ static void select_event_set(falco::app::state& s, const libsinsp::events::set<p
/* -A flag behavior:
* (1) default: all syscalls in rules included, sinsp state enforcement
without high volume syscalls
without high volume I/O syscalls
* (2) -A flag set: all syscalls in rules included, sinsp state enforcement
and allowing high volume syscalls */
and allowing high volume I/O syscalls */
if(!s.options.all_events)
{
auto ignored_sc_set = falco::app::ignored_sc_set();
auto ignored_sc_set = libsinsp::events::io_sc_set();
auto erased_sc_set = s.selected_sc_set.intersect(ignored_sc_set);
s.selected_sc_set = s.selected_sc_set.diff(ignored_sc_set);
if (!erased_sc_set.empty())
{
auto erased_sc_set_names = libsinsp::events::sc_set_to_event_names(erased_sc_set);
auto erased_sc_set_names = libsinsp::events::sc_set_to_names(erased_sc_set);
falco_logger::log(LOG_DEBUG, "-(" + std::to_string(erased_sc_set_names.size())
+ ") ignored syscalls (-> activate via `-A` flag): "
+ concat_set_in_order(erased_sc_set_names) + "\n");
}
}
/* If a custom set is specified (positive, negative, or both), we attempt
* to repair it if configured to do so. */
if (s.config->m_base_syscalls_repair && !s.config->m_base_syscalls_custom_set.empty())
{
/* If base_syscalls.repair is specified enforce state using `sinsp_repair_state_sc_set`.
* This approach is an alternative to the default `sinsp_state_sc_set()` state enforcement
* and only activates additional syscalls Falco needs beyond the syscalls defined in the
* Falco rules that are absolutely necessary based on the current rules configuration. */
auto selected_sc_set = s.selected_sc_set;
s.selected_sc_set = libsinsp::events::sinsp_repair_state_sc_set(s.selected_sc_set);
auto repaired_sc_set = s.selected_sc_set.diff(selected_sc_set);
if (!repaired_sc_set.empty())
{
auto repaired_sc_set_names = libsinsp::events::sc_set_to_event_names(repaired_sc_set);
falco_logger::log(LOG_INFO, "+(" + std::to_string(repaired_sc_set_names.size())
+ ") repaired syscalls: " + concat_set_in_order(repaired_sc_set_names) + "\n");
}
}
/* Hidden safety enforcement for `base_syscalls.custom_set` user
* input override option (but keep as general safety enforcement)
* -> sched_process_exit trace point activation (procexit event)
* is necessary for continuous state engine cleanup,
* else memory would grow rapidly and linearly over time. */
s.selected_sc_set.insert(ppm_sc_code::PPM_SC_SCHED_PROCESS_EXIT);
if (!s.selected_sc_set.empty())
{
auto selected_sc_set_names = libsinsp::events::sc_set_to_event_names(s.selected_sc_set);
auto selected_sc_set_names = libsinsp::events::sc_set_to_names(s.selected_sc_set);
falco_logger::log(LOG_DEBUG, "(" + std::to_string(selected_sc_set_names.size())
+ ") syscalls selected in total (final set): "
+ concat_set_in_order(selected_sc_set_names) + "\n");
}
}
static void select_kernel_tracepoint_set(falco::app::state& s)
{
/* Kernel tracepoints activation
* Activate all tracepoints except `sched_switch` tracepoint since it
* is highly noisy and not so useful
* for our state/events enrichment. */
s.selected_tp_set = libsinsp::events::sinsp_state_tp_set();
s.selected_tp_set.remove(ppm_tp_code::SCHED_SWITCH);
}
falco::app::run_result falco::app::actions::configure_interesting_sets(falco::app::state& s)
{
if (s.engine == nullptr || s.config == nullptr)
@@ -220,7 +177,8 @@ falco::app::run_result falco::app::actions::configure_interesting_sets(falco::ap
}
s.selected_sc_set.clear();
s.selected_tp_set.clear();
/* note: the set of events is the richest source of truth about
* the events generable by an inspector, because they also carry information
* about events that are old, unused, internal, and so on. As such, the
@@ -232,5 +190,6 @@ falco::app::run_result falco::app::actions::configure_interesting_sets(falco::ap
auto rules_sc_set = s.engine->sc_codes_for_ruleset(falco_common::syscall_source);
select_event_set(s, rules_sc_set);
check_for_rules_unsupported_events(s, rules_sc_set);
select_kernel_tracepoint_set(s);
return run_result::ok();
}

View File

@@ -1,42 +0,0 @@
/*
Copyright (C) 2023 The Falco Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "actions.h"
using namespace falco::app;
using namespace falco::app::actions;
falco::app::run_result falco::app::actions::configure_syscall_buffer_num(falco::app::state& s)
{
if(!s.options.modern_bpf)
{
return run_result::ok();
}
ssize_t online_cpus = sysconf(_SC_NPROCESSORS_ONLN);
if(online_cpus <= 0)
{
return run_result::fatal("cannot get the number of online CPUs from the system\n");
}
if(s.config->m_cpus_for_each_syscall_buffer > online_cpus)
{
falco_logger::log(LOG_WARNING, "you required a buffer every '" + std::to_string(s.config->m_cpus_for_each_syscall_buffer) + "' CPUs but there are only '" + std::to_string(online_cpus) + "' online CPUs. Falco changed the config to: one buffer every '" + std::to_string(online_cpus) + "' CPUs\n");
s.config->m_cpus_for_each_syscall_buffer = online_cpus;
}
return run_result::ok();
}

View File

@@ -33,7 +33,7 @@ static int create_dir(const std::string &path);
falco::app::run_result falco::app::actions::create_requested_paths(falco::app::state& s)
{
if(s.is_gvisor_enabled())
if(!s.options.gvisor_config.empty())
{
// This is bad: parsing gvisor config to get endpoint
// to be able to auto-create the path to the file for the user.

View File

@@ -16,16 +16,23 @@ limitations under the License.
#include <functional>
#include "actions.h"
#include "../app.h"
#include "../signals.h"
#include <string.h>
#include <signal.h>
#include <sys/inotify.h>
#include <fcntl.h>
#include "actions.h"
#include "../signals.h"
using namespace falco::app;
using namespace falco::app::actions;
static std::shared_ptr<falco::app::restart_handler> s_restarter;
// This is initially set to a dummy application. When
// create_signal_handlers is called, it will be rebound to the
// provided application, and in unregister_signal_handlers it will be
// rebound back to the dummy application.
static int inot_fd;
static void terminate_signal_handler(int signal)
{
@@ -39,10 +46,7 @@ static void reopen_outputs_signal_handler(int signal)
static void restart_signal_handler(int signal)
{
if (s_restarter != nullptr)
{
s_restarter->trigger();
}
falco::app::g_restart_signal.trigger();
}
bool create_handler(int sig, void (*func)(int), run_result &ret)
@@ -67,12 +71,6 @@ bool create_handler(int sig, void (*func)(int), run_result &ret)
falco::app::run_result falco::app::actions::create_signal_handlers(falco::app::state& s)
{
if (s.options.dry_run)
{
falco_logger::log(LOG_DEBUG, "Skipping signal handlers creation in dry-run\n");
return run_result::ok();
}
falco::app::g_terminate_signal.reset();
falco::app::g_restart_signal.reset();
falco::app::g_reopen_outputs_signal.reset();
@@ -90,80 +88,87 @@ falco::app::run_result falco::app::actions::create_signal_handlers(falco::app::s
! create_handler(SIGUSR1, ::reopen_outputs_signal_handler, ret) ||
! create_handler(SIGHUP, ::restart_signal_handler, ret))
{
return ret;
// we use the if just to make sure we return at the first failed statement
}
falco::app::restart_handler::watch_list_t files_to_watch;
falco::app::restart_handler::watch_list_t dirs_to_watch;
if (s.config->m_watch_config_files)
{
files_to_watch.push_back(s.options.conf_filename);
files_to_watch.insert(
files_to_watch.end(),
s.config->m_loaded_rules_filenames.begin(),
s.config->m_loaded_rules_filenames.end());
dirs_to_watch.insert(
dirs_to_watch.end(),
s.config->m_loaded_rules_folders.begin(),
s.config->m_loaded_rules_folders.end());
}
s.restarter = std::make_shared<falco::app::restart_handler>([&s]{
bool tmp = false;
bool success = false;
std::string err;
falco::app::state tmp_state(s.cmdline, s.options);
tmp_state.options.dry_run = true;
try
{
success = falco::app::run(tmp_state, tmp, err);
}
catch (std::exception& e)
{
err = e.what();
}
catch (...)
{
err = "unknown error";
}
if (!success && s.outputs != nullptr)
{
std::string rule = "Falco internal: hot restart failure";
std::string msg = rule + ": " + err;
auto fields = nlohmann::json::object();
auto now = std::chrono::duration_cast<std::chrono::nanoseconds>(std::chrono::system_clock::now().time_since_epoch()).count();
s.outputs->handle_msg(now, falco_common::PRIORITY_CRITICAL, msg, rule, fields);
}
return success;
}, files_to_watch, dirs_to_watch);
ret = run_result::ok();
ret.success = s.restarter->start(ret.errstr);
ret.proceed = ret.success;
if (ret.success)
{
s_restarter = s.restarter;
}
return ret;
}
falco::app::run_result falco::app::actions::attach_inotify_signals(falco::app::state& s)
{
if (s.config->m_watch_config_files)
{
inot_fd = inotify_init();
if (inot_fd == -1)
{
return run_result::fatal("Could not create inotify handler");
}
struct sigaction sa;
sigemptyset(&sa.sa_mask);
sa.sa_flags = SA_RESTART;
sa.sa_handler = restart_signal_handler;
if (sigaction(SIGIO, &sa, NULL) == -1)
{
return run_result::fatal("Failed to link SIGIO to inotify handler");
}
/* Set owner process that is to receive "I/O possible" signal */
if (fcntl(inot_fd, F_SETOWN, getpid()) == -1)
{
return run_result::fatal("Failed to setting owner on inotify handler");
}
/*
* Enable "I/O possible" signaling and make I/O nonblocking
* for file descriptor
*/
int flags = fcntl(inot_fd, F_GETFL);
if (fcntl(inot_fd, F_SETFL, flags | O_ASYNC | O_NONBLOCK) == -1)
{
return run_result::fatal("Failed to setting flags on inotify handler");
}
// Watch conf file
int wd = inotify_add_watch(inot_fd, s.options.conf_filename.c_str(), IN_CLOSE_WRITE);
if (wd == -1)
{
return run_result::fatal("Failed to watch conf file");
}
falco_logger::log(LOG_DEBUG, "Watching " + s.options.conf_filename +"\n");
// Watch rules files
for (const auto &rule : s.config->m_loaded_rules_filenames)
{
wd = inotify_add_watch(inot_fd, rule.c_str(), IN_CLOSE_WRITE | IN_ONESHOT);
if (wd == -1)
{
return run_result::fatal("Failed to watch rule file: " + rule);
}
falco_logger::log(LOG_DEBUG, "Watching " + rule +"\n");
}
// Watch specified rules folders, if any:
// any newly created/removed file within the folder
// will trigger a Falco restart.
for (const auto &fld : s.config->m_loaded_rules_folders)
{
// For folders, we watch if any file is created or destroyed within
wd = inotify_add_watch(inot_fd, fld.c_str(), IN_CREATE | IN_DELETE | IN_ONESHOT);
if (wd == -1)
{
return run_result::fatal("Failed to watch rule folder: " + fld);
}
falco_logger::log(LOG_DEBUG, "Watching " + fld +" folder\n");
}
}
return run_result::ok();
}
falco::app::run_result falco::app::actions::unregister_signal_handlers(falco::app::state& s)
{
if (s.options.dry_run)
{
falco_logger::log(LOG_DEBUG, "Skipping unregistering signal handlers in dry-run\n");
return run_result::ok();
}
s_restarter = nullptr;
if (s.restarter != nullptr)
{
s.restarter->stop();
}
run_result ret;
close(inot_fd);
if(! create_handler(SIGINT, SIG_DFL, ret) ||
! create_handler(SIGTERM, SIG_DFL, ret) ||
! create_handler(SIGUSR1, SIG_DFL, ret) ||

View File

@@ -27,12 +27,6 @@ static bool s_daemonized = false;
falco::app::run_result falco::app::actions::daemonize(falco::app::state& s)
{
if (s.options.dry_run)
{
falco_logger::log(LOG_DEBUG, "Skipping daemonizing in dry-run\n");
return run_result::ok();
}
// If daemonizing, do it here so any init errors will
// be returned in the foreground process.
if (s.options.daemon && !s_daemonized) {

View File

@@ -41,45 +41,12 @@ void falco::app::actions::print_enabled_event_sources(falco::app::state& s)
{
/* Print all enabled sources. */
std::string str;
for (const auto &src : s.enabled_sources)
for (const auto &s : s.enabled_sources)
{
str += str.empty() ? "" : ", ";
str += src;
}
falco_logger::log(LOG_INFO, "Enabled event sources: " + str);
// print some warnings to the user
for (const auto& src : s.enabled_sources)
{
std::shared_ptr<sinsp_plugin> first_plugin = nullptr;
const auto& plugins = s.offline_inspector->get_plugin_manager()->plugins();
for (const auto& p : plugins)
{
if ((p->caps() & CAP_SOURCING)
&& ((p->id() != 0 && src == p->event_source())
|| (p->id() == 0 && src == falco_common::syscall_source)))
{
if (first_plugin == nullptr)
{
first_plugin = p;
}
else
{
if (src != falco_common::syscall_source || s.options.nodriver)
{
falco_logger::log(LOG_WARNING, "Enabled event source '"
+ src + "' can be opened with multiple loaded plugins, will use only '"
+ first_plugin->name() + "'");
}
}
}
}
if (!first_plugin && s.options.nodriver)
{
falco_logger::log(LOG_WARNING, "Enabled event source '"
+ src + "' will be opened with no driver, no event will be produced");
}
str += s;
}
falco_logger::log(LOG_INFO, "Enabled event sources: " + str + "\n");
}
void falco::app::actions::format_plugin_info(std::shared_ptr<sinsp_plugin> p, std::ostream& os)
@@ -91,29 +58,12 @@ void falco::app::actions::format_plugin_info(std::shared_ptr<sinsp_plugin> p, st
os << "Capabilities: " << std::endl;
if(p->caps() & CAP_SOURCING)
{
os << " - Event Sourcing";
if (p->id() != 0)
{
os << " (ID=" << p->id();
os << ", source='" << p->event_source() << "')";
}
else
{
os << " (system events)";
}
os << std::endl;
os << " - Event Sourcing (ID=" << p->id();
os << ", source='" << p->event_source() << "')" << std::endl;
}
if(p->caps() & CAP_EXTRACTION)
{
os << " - Field Extraction" << std::endl;
}
if(p->caps() & CAP_PARSING)
{
os << " - Event Parsing" << std::endl;
}
if(p->caps() & CAP_ASYNC)
{
os << " - Async Events" << std::endl;
}
}

View File

@@ -53,37 +53,15 @@ falco::app::run_result falco::app::actions::open_live_inspector(
{
for (const auto& p: inspector->get_plugin_manager()->plugins())
{
// note: if more than one loaded plugin supports the given
// event source, only the first one will be opened, following
// the loading order specified in the Falco config.
if (p->caps() & CAP_SOURCING && p->id() != 0 && p->event_source() == source)
if (p->caps() & CAP_SOURCING && p->event_source() == source)
{
auto cfg = s.plugin_configs.at(p->name());
falco_logger::log(LOG_INFO, "Opening '" + source + "' source with plugin '" + cfg->m_name + "'");
falco_logger::log(LOG_INFO, "Opening capture with plugin '" + cfg->m_name + "'\n");
inspector->open_plugin(cfg->m_name, cfg->m_open_params);
return run_result::ok();
}
}
return run_result::fatal("Can't find plugin for event source: " + source);
}
else if (s.options.nodriver) /* nodriver engine. */
{
// when opening a capture with no driver, Falco will first check
// if a plugin is capable of generating raw events from the libscap
// event table (including system events), and if none is found it
// will use the nodriver engine.
for (const auto& p: inspector->get_plugin_manager()->plugins())
{
if (p->caps() & CAP_SOURCING && p->id() == 0)
{
auto cfg = s.plugin_configs.at(p->name());
falco_logger::log(LOG_INFO, "Opening '" + source + "' source with plugin '" + cfg->m_name + "'");
inspector->open_plugin(cfg->m_name, cfg->m_open_params);
return run_result::ok();
}
}
falco_logger::log(LOG_INFO, "Opening '" + source + "' source with no driver\n");
inspector->open_nodriver();
return run_result::fatal("Can't open inspector for plugin event source: " + source);
}
else if (s.options.userspace) /* udig engine. */
{
@@ -91,19 +69,19 @@ falco::app::run_result falco::app::actions::open_live_inspector(
//
// Falco uses a ptrace(2) based userspace implementation.
// Regardless of the implementation, the underlying method remains the same.
falco_logger::log(LOG_INFO, "Opening '" + source + "' source with udig\n");
falco_logger::log(LOG_INFO, "Opening capture with udig\n");
inspector->open_udig();
}
else if(s.is_gvisor_enabled()) /* gvisor engine. */
else if(!s.options.gvisor_config.empty()) /* gvisor engine. */
{
falco_logger::log(LOG_INFO, "Opening '" + source + "' source with gVisor. Configuration path: " + s.options.gvisor_config);
falco_logger::log(LOG_INFO, "Opening capture with gVisor. Configuration path: " + s.options.gvisor_config);
inspector->open_gvisor(s.options.gvisor_config, s.options.gvisor_root);
}
else if(s.options.modern_bpf) /* modern BPF engine. */
{
falco_logger::log(LOG_INFO, "Opening '" + source + "' source with modern BPF probe.");
falco_logger::log(LOG_INFO, "Opening capture with modern BPF probe.");
falco_logger::log(LOG_INFO, "One ring buffer every '" + std::to_string(s.config->m_cpus_for_each_syscall_buffer) + "' CPUs.");
inspector->open_modern_bpf(s.syscall_buffer_bytes_size, s.config->m_cpus_for_each_syscall_buffer, true, s.selected_sc_set);
inspector->open_modern_bpf(s.syscall_buffer_bytes_size, s.config->m_cpus_for_each_syscall_buffer, true, s.selected_sc_set, s.selected_tp_set);
}
else if(getenv(FALCO_BPF_ENV_VARIABLE) != NULL) /* BPF engine. */
{
@@ -120,15 +98,15 @@ falco::app::run_result falco::app::actions::open_live_inspector(
snprintf(full_path, PATH_MAX, "%s/%s", home, FALCO_PROBE_BPF_FILEPATH);
bpf_probe_path = full_path;
}
falco_logger::log(LOG_INFO, "Opening '" + source + "' source with BPF probe. BPF probe path: " + std::string(bpf_probe_path));
inspector->open_bpf(bpf_probe_path, s.syscall_buffer_bytes_size, s.selected_sc_set);
falco_logger::log(LOG_INFO, "Opening capture with BPF probe. BPF probe path: " + std::string(bpf_probe_path));
inspector->open_bpf(bpf_probe_path, s.syscall_buffer_bytes_size, s.selected_sc_set, s.selected_tp_set);
}
else /* Kernel module (default). */
{
try
{
falco_logger::log(LOG_INFO, "Opening '" + source + "' source with Kernel module");
inspector->open_kmod(s.syscall_buffer_bytes_size, s.selected_sc_set);
falco_logger::log(LOG_INFO, "Opening capture with Kernel module");
inspector->open_kmod(s.syscall_buffer_bytes_size, s.selected_sc_set, s.selected_tp_set);
}
catch(sinsp_exception &e)
{
@@ -138,7 +116,7 @@ falco::app::run_result falco::app::actions::open_live_inspector(
{
falco_logger::log(LOG_ERR, "Unable to load the driver\n");
}
inspector->open_kmod(s.syscall_buffer_bytes_size, s.selected_sc_set);
inspector->open_kmod(s.syscall_buffer_bytes_size, s.selected_sc_set, s.selected_tp_set);
}
}
}

View File

@@ -23,7 +23,7 @@ falco::app::run_result falco::app::actions::init_clients(falco::app::state& s)
{
#ifndef MINIMAL_BUILD
// k8s is useful only if the syscall source is enabled
if (s.is_capture_mode() || !s.is_source_enabled(falco_common::syscall_source))
if (s.enabled_sources.find(falco_common::syscall_source) == s.enabled_sources.end())
{
return run_result::ok();
}
@@ -35,12 +35,6 @@ falco::app::run_result falco::app::actions::init_clients(falco::app::state& s)
falco_logger::log(LOG_DEBUG, "Setting metadata download watch frequency to " + std::to_string(s.config->m_metadata_download_watch_freq_sec) + " seconds\n");
inspector->set_metadata_download_params(s.config->m_metadata_download_max_mb * 1024 * 1024, s.config->m_metadata_download_chunk_wait_us, s.config->m_metadata_download_watch_freq_sec);
if (s.options.dry_run)
{
falco_logger::log(LOG_DEBUG, "Skipping clients initialization in dry-run\n");
return run_result::ok();
}
//
// Run k8s, if required
//
@@ -64,6 +58,27 @@ falco::app::run_result falco::app::actions::init_clients(falco::app::state& s)
}
inspector->init_k8s_client(k8s_api_ptr, k8s_api_cert_ptr, k8s_node_name_ptr, s.options.verbose);
}
//
// DEPRECATED!
// Run mesos, if required
// todo(leogr): remove in Falco 0,.35
//
if(!s.options.mesos_api.empty())
{
// Differs from init_k8s_client in that it
// passes a pointer but the inspector does
// *not* own it and does not use it after
// init_mesos_client() returns.
falco_logger::log(LOG_WARNING, "Mesos support has been DEPRECATED and will be removed in the next version!\n");
inspector->init_mesos_client(&(s.options.mesos_api), s.options.verbose);
}
else if(char* mesos_api_env = getenv("FALCO_MESOS_API"))
{
falco_logger::log(LOG_WARNING, "Mesos support has been DEPRECATED and will be removed in the next version!\n");
std::string mesos_api_copy = mesos_api_env;
inspector->init_mesos_client(&mesos_api_copy, s.options.verbose);
}
#endif
return run_result::ok();

View File

@@ -45,6 +45,11 @@ void configure_output_format(falco::app::state& s)
output_format = "k8s.ns=%k8s.ns.name k8s.pod=%k8s.pod.name container=%container.id vpid=%proc.vpid vtid=%thread.vtid";
replace_container_info = true;
}
else if(s.options.print_additional == "m" || s.options.print_additional == "mesos")
{
output_format = "task=%mesos.task.name container=%container.id";
replace_container_info = true;
}
else if(!s.options.print_additional.empty())
{
output_format = s.options.print_additional;
@@ -60,38 +65,50 @@ void configure_output_format(falco::app::state& s)
void add_source_to_engine(falco::app::state& s, const std::string& src)
{
auto src_info = s.source_infos.at(src);
auto& filterchecks = *src_info->filterchecks.get();
auto* inspector = src_info->inspector.get();
std::shared_ptr<gen_event_filter_factory> filter_factory = nullptr;
std::shared_ptr<gen_event_formatter_factory> formatter_factory = nullptr;
auto filter_factory = std::shared_ptr<gen_event_filter_factory>(
new sinsp_filter_factory(inspector, filterchecks));
auto formatter_factory = std::shared_ptr<gen_event_formatter_factory>(
new sinsp_evt_formatter_factory(inspector, filterchecks));
if (src == falco_common::syscall_source)
{
filter_factory = std::shared_ptr<gen_event_filter_factory>(
new sinsp_filter_factory(src_info->inspector.get()));
formatter_factory = std::shared_ptr<gen_event_formatter_factory>(
new sinsp_evt_formatter_factory(src_info->inspector.get()));
}
else
{
auto &filterchecks = s.source_infos.at(src)->filterchecks;
filter_factory = std::shared_ptr<gen_event_filter_factory>(
new sinsp_filter_factory(src_info->inspector.get(), filterchecks));
formatter_factory = std::shared_ptr<gen_event_formatter_factory>(
new sinsp_evt_formatter_factory(src_info->inspector.get(), filterchecks));
}
if(s.config->m_json_output)
{
formatter_factory->set_output_format(gen_event_formatter::OF_JSON);
}
src_info->engine_idx = s.engine->add_source(src, filter_factory, formatter_factory);
src_info->engine_idx = s.engine->add_source(
src, filter_factory, formatter_factory);
}
falco::app::run_result falco::app::actions::init_falco_engine(falco::app::state& s)
{
// add syscall as first source, this is also what each inspector do
// in their own list of registered event sources
add_source_to_engine(s, falco_common::syscall_source);
// add all non-syscall event sources in engine
for (const auto& src : s.loaded_sources)
{
// we skip the syscall source because we already added it
if (src != falco_common::syscall_source)
{
// we skip the syscall as we want it to be the one added for last
// in the engine. This makes the source index assignment easier.
add_source_to_engine(s, src);
}
}
// add syscall as last source
add_source_to_engine(s, falco_common::syscall_source);
// note: in capture mode, we can assume that the plugin source index will
// be the same in both the falco engine and the sinsp plugin manager.
// This assumption stands because the plugin manager stores sources in a
@@ -105,7 +122,7 @@ falco::app::run_result falco::app::actions::init_falco_engine(falco::app::state&
auto manager = s.offline_inspector->get_plugin_manager();
for (const auto &p : manager->plugins())
{
if (p->caps() & CAP_SOURCING && p->id() != 0)
if (p->caps() & CAP_SOURCING)
{
bool added = false;
auto source_idx = manager->source_idx_by_plugin_id(p->id(), added);

View File

@@ -48,17 +48,11 @@ static void init_syscall_inspector(falco::app::state& s, std::shared_ptr<sinsp>
inspector->set_snaplen(s.options.snaplen);
}
if (s.config->m_syscall_drop_failed_exit)
{
falco_logger::log(LOG_INFO, "Failed syscall exit events are dropped in the kernel driver\n");
inspector->set_dropfailed(true);
}
inspector->set_hostname_and_port_resolution_mode(false);
}
static bool populate_filterchecks(
const std::shared_ptr<sinsp>& inspector,
std::shared_ptr<sinsp> inspector,
const std::string& source,
filter_check_list& filterchecks,
std::unordered_set<std::string>& used_plugins,
@@ -118,10 +112,12 @@ falco::app::run_result falco::app::actions::init_inspectors(falco::app::state& s
? s.offline_inspector
: std::make_shared<sinsp>();
// do extra preparation for the syscall source
// handle syscall and plugin sources differently
// todo(jasondellaluce): change this once we support extracting plugin fields from syscalls too
if (src == falco_common::syscall_source)
{
init_syscall_inspector(s, src_info->inspector);
continue;
}
// load and init all plugins compatible with this event source
@@ -130,9 +126,7 @@ falco::app::run_result falco::app::actions::init_inspectors(falco::app::state& s
{
std::shared_ptr<sinsp_plugin> plugin = nullptr;
auto config = s.plugin_configs.at(p->name());
auto is_input = (p->caps() & CAP_SOURCING)
&& ((p->id() != 0 && src == p->event_source())
|| (p->id() == 0 && src == falco_common::syscall_source));
auto is_input = p->caps() & CAP_SOURCING && p->event_source() == src;
if (s.is_capture_mode())
{
@@ -146,10 +140,7 @@ falco::app::run_result falco::app::actions::init_inspectors(falco::app::state& s
// event source, we must register the plugin supporting
// that event source and also plugins with field extraction
// capability that are compatible with that event source
if (is_input
|| (p->caps() & CAP_EXTRACTION && sinsp_plugin::is_source_compatible(p->extract_event_sources(), src))
|| (p->caps() & CAP_PARSING && sinsp_plugin::is_source_compatible(p->parse_event_sources(), src))
|| (p->caps() & CAP_ASYNC && sinsp_plugin::is_source_compatible(p->async_event_sources(), src)))
if (is_input || (p->caps() & CAP_EXTRACTION && p->is_source_compatible(src)))
{
plugin = src_info->inspector->register_plugin(config->m_library_path);
}
@@ -159,19 +150,14 @@ falco::app::run_result falco::app::actions::init_inspectors(falco::app::state& s
// (in capture mode, this is true for every plugin)
if (plugin)
{
// avoid initializing the same plugin twice in the same
// inspector if we're in capture mode
if (!s.is_capture_mode() || used_plugins.find(p->name()) == used_plugins.end())
if (!plugin->init(config->m_init_config, err))
{
if (!plugin->init(config->m_init_config, err))
{
return run_result::fatal(err);
}
return run_result::fatal(err);
}
if (is_input)
{
auto gen_check = src_info->inspector->new_generic_filtercheck();
src_info->filterchecks->add_filter_check(gen_check);
src_info->filterchecks.add_filter_check(gen_check);
}
used_plugins.insert(plugin->name());
}
@@ -181,20 +167,24 @@ falco::app::run_result falco::app::actions::init_inspectors(falco::app::state& s
if (!populate_filterchecks(
src_info->inspector,
src,
*src_info->filterchecks.get(),
src_info->filterchecks,
used_plugins,
err))
{
return run_result::fatal(err);
}
}
}
// check if some plugin remains unused
// check if some plugin with field extraction capability remains unused
for (const auto& p : all_plugins)
{
if (used_plugins.find(p->name()) == used_plugins.end())
if(used_plugins.find(p->name()) == used_plugins.end()
&& p->caps() & CAP_EXTRACTION
&& !(p->caps() & CAP_SOURCING && p->is_source_compatible(p->event_source())))
{
return run_result::fatal("Plugin '" + p->name() + "' is loaded but unused as not compatible with any known event source");
return run_result::fatal("Plugin '" + p->name()
+ "' has field extraction capability but is not compatible with any known event source");
}
}

View File

@@ -49,12 +49,6 @@ falco::app::run_result falco::app::actions::init_outputs(falco::app::state& s)
hostname = c_hostname;
}
if (s.options.dry_run)
{
falco_logger::log(LOG_DEBUG, "Skipping daemonizing in dry-run\n");
return run_result::ok();
}
s.outputs.reset(new falco_outputs(
s.engine,
s.config->m_outputs,

View File

@@ -15,32 +15,10 @@ limitations under the License.
*/
#include "actions.h"
#include "falco_utils.h"
using namespace falco::app;
using namespace falco::app::actions;
// applies legacy/in-deprecation options to the current config
static void apply_deprecated_options(
const falco::app::options& opts,
const std::shared_ptr<falco_configuration>& cfg)
{
if (!opts.stats_output_file.empty() || !opts.stats_interval.empty())
{
falco_logger::log(LOG_WARNING, "Options '-s' and '--stats-interval' will be deprecated in the future, metrics must be configured through config file");
if (!opts.stats_output_file.empty())
{
cfg->m_metrics_enabled = true;
cfg->m_metrics_output_file = opts.stats_output_file;
if (!opts.stats_interval.empty())
{
cfg->m_metrics_interval_str = opts.stats_interval;
cfg->m_metrics_interval = falco::utils::parse_prometheus_interval(cfg->m_metrics_interval_str);
}
}
}
}
falco::app::run_result falco::app::actions::load_config(falco::app::state& s)
{
try
@@ -73,8 +51,6 @@ falco::app::run_result falco::app::actions::load_config(falco::app::state& s)
s.config->m_buffered_outputs = !s.options.unbuffered_outputs;
apply_deprecated_options(s.options, s.config);
return run_result::ok();
}

View File

@@ -28,12 +28,12 @@ falco::app::run_result falco::app::actions::load_plugins(falco::app::state& s)
return run_result::fatal("Can not load/use plugins with musl optimized build");
}
#endif
auto empty_src_info = state::source_info{};
// Initialize the set of loaded event sources.
// By default, the set includes the 'syscall' event source
state::source_info syscall_src_info;
syscall_src_info.filterchecks.reset(new sinsp_filter_check_list());
s.source_infos.clear();
s.source_infos.insert(syscall_src_info, falco_common::syscall_source);
s.source_infos.insert(empty_src_info, falco_common::syscall_source);
s.loaded_sources = { falco_common::syscall_source };
// Initialize map of plugin configs
@@ -51,17 +51,11 @@ falco::app::run_result falco::app::actions::load_plugins(falco::app::state& s)
falco_logger::log(LOG_INFO, "Loading plugin '" + p.m_name + "' from file " + p.m_library_path + "\n");
auto plugin = s.offline_inspector->register_plugin(p.m_library_path);
s.plugin_configs.insert(p, plugin->name());
if(plugin->caps() & CAP_SOURCING && plugin->id() != 0)
if(plugin->caps() & CAP_SOURCING)
{
state::source_info src_info;
src_info.filterchecks.reset(new filter_check_list());
auto sname = plugin->event_source();
s.source_infos.insert(src_info, sname);
// note: this avoids duplicate values
if (std::find(s.loaded_sources.begin(), s.loaded_sources.end(), sname) == s.loaded_sources.end())
{
s.loaded_sources.push_back(sname);
}
s.source_infos.insert(empty_src_info, sname);
s.loaded_sources.insert(sname);
}
}

View File

@@ -63,7 +63,6 @@ falco::app::run_result falco::app::actions::load_rules_files(falco::app::state&
return run_result::fatal(e.what());
}
std::string err = "";
for(auto &filename : s.config->m_loaded_rules_filenames)
{
falco_logger::log(LOG_INFO, "Loading rules from file " + filename + "\n");
@@ -74,8 +73,7 @@ falco::app::run_result falco::app::actions::load_rules_files(falco::app::state&
if(!res->successful())
{
// Return the summary version as the error
err = res->as_string(true, rc);
break;
return run_result::fatal(res->as_string(true, rc));
}
// If verbose is true, also print any warnings
@@ -85,44 +83,8 @@ falco::app::run_result falco::app::actions::load_rules_files(falco::app::state&
}
}
// note: we have an egg-and-chicken problem here. We would like to check
// plugin requirements before loading any rule, so that we avoid having
// all the "unknown field XXX" errors caused when a plugin is required but
// not loaded. On the other hand, we can't check the requirements before
// loading the rules file, because that's where the plugin dependencies
// are specified. This issue is visible only for dependencies over extractor
// plugins, due to the fact that if a source plugin is not loaded, its
// source will be unknown for the engine and so it will skip loading all of
// the rules to that source, to finally end up here and return a fatal error
// due to plugin dependency not satisfied being the actual problem.
//
// The long-term solution would be to pass information about all the loaded
// plugins to the falco engine before or when loading a rules file, so that
// plugin version checks can be performed properly by the engine, just
// like it does for the engine version requirement. On the other hand,
// This also requires refactoring a big chunk of the API and code of the
// engine responsible of loading rules.
//
// Since we're close to releasing Falco v0.35, the chosen workaround is
// to first collect any error from the engine, then checking if there is
// also a version dependency not being satisfied, and give that failure
// cause priority in case we encounter it. This is indeed not perfect, but
// suits us for the time being. The non-covered corner case is when
// the `required_plugin_versions` YAML block is defined after the first
// rule definition (which is wrong anyways but currently allowed by the
// engine), in which case Falco would stop at the first error (which
// behavior we'll still want to change in the near future), not collect the
// plugin deps info, and the checks below will pass with success wrongly.
//
// todo(jasondellaluce): perform plugin deps checks inside the
// falco engine in the middle of the loading procedure of a rules file
std::string req_err = "";
if (!check_rules_plugin_requirements(s, req_err))
{
err = req_err;
}
if (!err.empty())
std::string err = "";
if (!check_rules_plugin_requirements(s, err))
{
return run_result::fatal(err);
}
@@ -154,15 +116,23 @@ falco::app::run_result falco::app::actions::load_rules_files(falco::app::state&
s.engine->enable_rule_by_tag(s.options.enabled_rule_tags, true);
}
if(s.options.all_events && s.options.modern_bpf)
{
/* Right now the modern BPF probe doesn't support the -A flag, we implemented just
* the "simple set" syscalls.
*/
falco_logger::log(LOG_INFO, "The '-A' flag has no effect with the modern BPF probe, no further syscalls will be added\n");
}
if (s.options.describe_all_rules)
{
s.engine->describe_rule(NULL, s.config->m_json_output);
s.engine->describe_rule(NULL);
return run_result::exit();
}
if (!s.options.describe_rule.empty())
{
s.engine->describe_rule(&(s.options.describe_rule), s.config->m_json_output);
s.engine->describe_rule(&(s.options.describe_rule));
return run_result::exit();
}

View File

@@ -16,7 +16,6 @@ limitations under the License.
#include "actions.h"
#include "helpers.h"
#include "../app.h"
using namespace falco::app;
using namespace falco::app::actions;
@@ -28,8 +27,8 @@ falco::app::run_result falco::app::actions::print_ignored_events(falco::app::sta
return run_result::ok();
}
std::cout << "Ignored syscall(s):" << std::endl;
for(const auto& it : libsinsp::events::sc_set_to_event_names(falco::app::ignored_sc_set()))
std::cout << "Ignored I/O syscall(s):" << std::endl;
for(const auto& it : libsinsp::events::sc_set_to_names(libsinsp::events::io_sc_set()))
{
std::cout << "- " << it.c_str() << std::endl;
}

View File

@@ -140,14 +140,15 @@ static falco::app::run_result do_inspect(
uint64_t duration_start = 0;
uint32_t timeouts_since_last_success_or_msg = 0;
token_bucket rate_limiter;
const bool rate_limiter_enabled = s.config->m_notifications_rate > 0;
const bool is_capture_mode = source.empty();
size_t source_engine_idx = 0;
bool rate_limiter_enabled = s.config->m_notifications_rate > 0;
bool source_engine_idx_found = false;
bool is_capture_mode = source.empty();
bool syscall_source_engine_idx = s.source_infos.at(falco_common::syscall_source)->engine_idx;
std::size_t source_engine_idx = 0;
std::vector<std::string> source_names = inspector->get_plugin_manager()->sources();
source_names.push_back(falco_common::syscall_source);
if (!is_capture_mode)
{
// note: in live mode, each inspector gets assigned a distinct event
// source that does not change for the whole capture.
source_engine_idx = s.source_infos.at(source)->engine_idx;
}
@@ -207,9 +208,8 @@ static falco::app::run_result do_inspect(
}
else if(falco::app::g_restart_signal.triggered())
{
falco::app::g_restart_signal.handle([&s](){
falco::app::g_restart_signal.handle([&](){
falco_logger::log(LOG_INFO, "SIGHUP received, restarting...\n");
s.restart.store(true);
});
break;
}
@@ -228,10 +228,11 @@ static falco::app::run_result do_inspect(
{
sinsp_utils::ts_to_string(duration_start, &last_event_time_str, false, true);
}
nlohmann::json fields;
fields["last_event_time"] = last_event_time_str;
std::map<std::string, std::string> o = {
{"last_event_time", last_event_time_str},
};
auto now = std::chrono::duration_cast<std::chrono::nanoseconds>(std::chrono::system_clock::now().time_since_epoch()).count();
s.outputs->handle_msg(now, falco_common::PRIORITY_DEBUG, msg, rule, fields);
s.outputs->handle_msg(now, falco_common::PRIORITY_DEBUG, msg, rule, o);
// Reset the timeouts counter, Falco alerted
timeouts_since_last_success_or_msg = 0;
}
@@ -258,39 +259,26 @@ static falco::app::run_result do_inspect(
// if we are in live mode, we already have the right source engine idx
if (is_capture_mode)
{
// note: here we can assume that the source index will be the same
// in both the falco engine and the inspector. See the
// comment in init_falco_engine.cpp for more details.
source_engine_idx = ev->get_source_idx();
if (source_engine_idx == sinsp_no_event_source_idx)
source_engine_idx = syscall_source_engine_idx;
if (ev->get_type() == PPME_PLUGINEVENT_E)
{
std::string msg = "Unknown event source for inspector's event";
if (ev->get_type() == PPME_PLUGINEVENT_E)
// note: here we can assume that the source index will be the same
// in both the falco engine and the sinsp plugin manager. See the
// comment in init_falco_engine.cpp for more details.
source_engine_idx = inspector->get_plugin_manager()->source_idx_by_plugin_id(*(int32_t *)ev->get_param(0)->m_val, source_engine_idx_found);
if (!source_engine_idx_found)
{
auto pluginID = *(int32_t *)ev->get_param(0)->m_val;
msg += " (plugin ID: " + std::to_string(pluginID) + ")";
return run_result::fatal("Unknown plugin ID in inspector: " + std::to_string(*(int32_t *)ev->get_param(0)->m_val));
}
return run_result::fatal(msg);
}
// for capture mode, the source name can change at every event
stats_collector.collect(inspector, inspector->event_sources()[source_engine_idx], num_evts);
stats_collector.collect(inspector, source_names[source_engine_idx]);
}
else
{
// in live mode, each inspector gets assigned a distinct event source,
// so we report an error if we fetch an event of a different source.
if (source_engine_idx != ev->get_source_idx())
{
auto msg = "Unexpected event source for inspector's event: expected='" + source + "', actual=";
msg += (ev->get_source_name() != NULL)
? ("'" + std::string(ev->get_source_name()) + "'")
: ("<NA>");
return run_result::fatal(msg);
}
// for live mode, the source name is constant
stats_collector.collect(inspector, source, num_evts);
stats_collector.collect(inspector, source);
}
// Reset the timeouts counter, Falco successfully got an event to process
@@ -396,67 +384,33 @@ static void process_inspector_events(
}
}
static falco::app::run_result init_stats_writer(
const std::shared_ptr<const stats_writer>& sw,
const std::shared_ptr<const falco_configuration>& config,
bool is_dry_run)
static std::shared_ptr<stats_writer> init_stats_writer(const options& opts)
{
if (!config->m_metrics_enabled)
auto statsw = std::make_shared<stats_writer>();
if (!opts.stats_filename.empty())
{
return falco::app::run_result::ok();
std::string err;
if (!stats_writer::init_ticker(opts.stats_interval, err))
{
throw falco_exception(err);
}
statsw.reset(new stats_writer(opts.stats_filename));
}
/* Enforce minimum bound of 100ms. */
if(config->m_metrics_interval < 100)
{
return falco::app::run_result::fatal("Metrics interval must have a minimum value of 100ms and reflect a Prometheus compliant time duration format: https://prometheus.io/docs/prometheus/latest/querying/basics/#time-durations. ");
}
if(std::all_of(config->m_metrics_interval_str.begin(), config->m_metrics_interval_str.end(), ::isdigit))
{
falco_logger::log(LOG_WARNING, "Metrics interval was passed as numeric value without Prometheus time unit, this option will be deprecated in the future");
}
if (config->m_metrics_enabled && !sw->has_output())
{
falco_logger::log(LOG_WARNING, "Metrics are enabled with no output configured, no snapshot will be collected");
}
falco_logger::log(LOG_INFO, "Setting metrics interval to " + config->m_metrics_interval_str + ", equivalent to " + std::to_string(config->m_metrics_interval) + " (ms)\n");
auto res = falco::app::run_result::ok();
if (is_dry_run)
{
return res;
}
res.success = stats_writer::init_ticker(config->m_metrics_interval, res.errstr);
res.proceed = res.success;
return res;
return statsw;
}
falco::app::run_result falco::app::actions::process_events(falco::app::state& s)
{
run_result res = run_result::ok();
bool termination_forced = false;
// Notify engine that we finished loading and enabling all rules
s.engine->complete_rule_loading();
// Initialize stats writer
auto statsw = std::make_shared<stats_writer>(s.outputs, s.config);
auto res = init_stats_writer(statsw, s.config, s.options.dry_run);
if (s.options.dry_run)
{
falco_logger::log(LOG_DEBUG, "Skipping event processing in dry-run\n");
return res;
}
if (!res.success)
{
return res;
}
auto statsw = init_stats_writer(s.options);
// Start processing events
bool termination_forced = false;
if(s.is_capture_mode())
{
res = open_offline_inspector(s);
@@ -533,19 +487,11 @@ falco::app::run_result falco::app::actions::process_events(falco::app::state& s)
// wait for event processing to terminate for all sources
// if a thread terminates with an error, we trigger the app termination
// to force all other event streams to terminate too.
// to force all other event streams to termiante too.
// We accomulate the errors in a single run_result.
size_t closed_count = 0;
while (closed_count < ctxs.size())
{
if (!res.success && !termination_forced)
{
falco_logger::log(LOG_INFO, "An error occurred in an event source, forcing termination...\n");
falco::app::g_terminate_signal.trigger();
falco::app::g_terminate_signal.handle([&](){});
termination_forced = true;
}
// This is shared across all running event source threads an
// keeps the main thread sleepy until one of the parallel
// threads terminates and invokes release(). At that point,
@@ -554,6 +500,14 @@ falco::app::run_result falco::app::actions::process_events(falco::app::state& s)
// event source is enabled, in which we have no additional threads.
termination_sem.acquire();
if (!res.success && !termination_forced)
{
falco_logger::log(LOG_INFO, "An error occurred in an event source, forcing termination...\n");
falco::app::g_terminate_signal.trigger();
falco::app::g_terminate_signal.handle([&](){});
termination_forced = true;
}
for (auto &ctx : ctxs)
{
if (ctx.sync->finished() && !ctx.sync->joined())

View File

@@ -22,7 +22,7 @@ using namespace falco::app::actions;
falco::app::run_result falco::app::actions::select_event_sources(falco::app::state& s)
{
s.enabled_sources = { s.loaded_sources.begin(), s.loaded_sources.end() };
s.enabled_sources = s.loaded_sources;
// event sources selection is meaningless when reading trace files
if (s.is_capture_mode())
@@ -40,7 +40,7 @@ falco::app::run_result falco::app::actions::select_event_sources(falco::app::sta
s.enabled_sources.clear();
for(const auto &src : s.options.enable_sources)
{
if (std::find(s.loaded_sources.begin(), s.loaded_sources.end(), src) == s.loaded_sources.end())
if (s.loaded_sources.find(src) == s.loaded_sources.end())
{
return run_result::fatal("Attempted enabling an unknown event source: " + src);
}
@@ -51,7 +51,7 @@ falco::app::run_result falco::app::actions::select_event_sources(falco::app::sta
{
for(const auto &src : s.options.disable_sources)
{
if (std::find(s.loaded_sources.begin(), s.loaded_sources.end(), src) == s.loaded_sources.end())
if (s.loaded_sources.find(src) == s.loaded_sources.end())
{
return run_result::fatal("Attempted disabling an unknown event source: " + src);
}

View File

@@ -29,12 +29,6 @@ falco::app::run_result falco::app::actions::start_grpc_server(falco::app::state&
// gRPC server
if(s.config->m_grpc_enabled)
{
if (s.options.dry_run)
{
falco_logger::log(LOG_DEBUG, "Skipping starting gRPC server in dry-run\n");
return run_result::ok();
}
falco_logger::log(LOG_INFO, "gRPC server threadiness equals to " + std::to_string(s.config->m_grpc_threadiness) + "\n");
// TODO(fntlnz,leodido): when we want to spawn multiple threads we need to have a queue per thread, or implement
// different queuing mechanisms, round robin, fanout? What we want to achieve?
@@ -57,19 +51,10 @@ falco::app::run_result falco::app::actions::start_grpc_server(falco::app::state&
falco::app::run_result falco::app::actions::stop_grpc_server(falco::app::state& s)
{
#ifndef MINIMAL_BUILD
if(s.config->m_grpc_enabled)
if(s.grpc_server_thread.joinable())
{
if (s.options.dry_run)
{
falco_logger::log(LOG_DEBUG, "Skipping stopping gRPC server in dry-run\n");
return run_result::ok();
}
if(s.grpc_server_thread.joinable())
{
s.grpc_server.shutdown();
s.grpc_server_thread.join();
}
s.grpc_server.shutdown();
s.grpc_server_thread.join();
}
#endif
return run_result::ok();

View File

@@ -28,12 +28,6 @@ falco::app::run_result falco::app::actions::start_webserver(falco::app::state& s
#ifndef MINIMAL_BUILD
if(!s.is_capture_mode() && s.config->m_webserver_enabled)
{
if (s.options.dry_run)
{
falco_logger::log(LOG_DEBUG, "Skipping starting webserver in dry-run\n");
return run_result::ok();
}
std::string ssl_option = (s.config->m_webserver_ssl_enabled ? " (SSL)" : "");
falco_logger::log(LOG_INFO, "Starting health webserver with threadiness "
+ std::to_string(s.config->m_webserver_threadiness)
@@ -56,14 +50,8 @@ falco::app::run_result falco::app::actions::start_webserver(falco::app::state& s
falco::app::run_result falco::app::actions::stop_webserver(falco::app::state& s)
{
#ifndef MINIMAL_BUILD
if(!s.is_capture_mode() && s.config->m_webserver_enabled)
if(!s.is_capture_mode())
{
if (s.options.dry_run)
{
falco_logger::log(LOG_DEBUG, "Skipping stopping webserver in dry-run\n");
return run_result::ok();
}
s.webserver.stop();
}
#endif

View File

@@ -25,15 +25,6 @@ falco::atomic_signal_handler falco::app::g_reopen_outputs_signal;
using app_action = std::function<falco::app::run_result(falco::app::state&)>;
libsinsp::events::set<ppm_sc_code> falco::app::ignored_sc_set()
{
// we ignore all the I/O syscalls that can have very high throughput and
// that can badly impact performance. Of those, we avoid ignoring the
// ones that are part of the base set used by libsinsp for maintaining
// its internal state.
return libsinsp::events::io_sc_set().diff(libsinsp::events::sinsp_state_sc_set());
}
bool falco::app::run(int argc, char** argv, bool& restart, std::string& errstr)
{
falco::app::state s;
@@ -49,11 +40,7 @@ bool falco::app::run(int argc, char** argv, bool& restart, std::string& errstr)
}
s.cmdline += *arg;
}
return falco::app::run(s, restart, errstr);
}
bool falco::app::run(falco::app::state& s, bool& restart, std::string& errstr)
{
// The order here is the order in which the methods will be
// called. Before changing the order, ensure that all
// dependencies are honored (e.g. don't process events before
@@ -77,14 +64,14 @@ bool falco::app::run(falco::app::state& s, bool& restart, std::string& errstr)
falco::app::actions::validate_rules_files,
falco::app::actions::load_rules_files,
falco::app::actions::print_support,
falco::app::actions::init_outputs,
falco::app::actions::create_signal_handlers,
falco::app::actions::attach_inotify_signals,
falco::app::actions::create_requested_paths,
falco::app::actions::daemonize,
falco::app::actions::init_outputs,
falco::app::actions::init_clients,
falco::app::actions::configure_interesting_sets,
falco::app::actions::configure_syscall_buffer_size,
falco::app::actions::configure_syscall_buffer_num,
falco::app::actions::start_grpc_server,
falco::app::actions::start_webserver,
falco::app::actions::process_events,
@@ -117,7 +104,7 @@ bool falco::app::run(falco::app::state& s, bool& restart, std::string& errstr)
errstr = res.errstr;
}
restart = s.restart;
restart = falco::app::g_restart_signal.triggered();
return res.success;
}

View File

@@ -16,18 +16,12 @@ limitations under the License.
#pragma once
#include "state.h"
#include <string>
namespace falco {
namespace app {
libsinsp::events::set<ppm_sc_code> ignored_sc_set();
bool run(int argc, char** argv, bool& restart, std::string& errstr);
bool run(falco::app::state& s, bool& restart, std::string& errstr);
}; // namespace app
}; // namespace falco

View File

@@ -18,8 +18,6 @@ limitations under the License.
#include "../configuration.h"
#include "config_falco.h"
#include <cxxopts.hpp>
#include <fstream>
namespace falco {
@@ -37,9 +35,9 @@ options::options()
list_syscall_events(false),
markdown(false),
modern_bpf(false),
dry_run(false),
nodriver(false)
m_cmdline_opts("falco", "Falco - Cloud Native Runtime Security")
{
define();
}
options::~options()
@@ -48,13 +46,8 @@ options::~options()
bool options::parse(int argc, char **argv, std::string &errstr)
{
cxxopts::Options opts("falco", "Falco - Cloud Native Runtime Security");
define(opts);
m_usage_str = opts.help();
cxxopts::ParseResult m_cmdline_parsed;
try {
m_cmdline_parsed = opts.parse(argc, argv);
m_cmdline_parsed = m_cmdline_opts.parse(argc, argv);
}
catch (std::exception &e)
{
@@ -149,43 +142,29 @@ bool options::parse(int argc, char **argv, std::string &errstr)
list_fields = m_cmdline_parsed.count("list") > 0 ? true : false;
int open_modes = 0;
open_modes += !trace_filename.empty();
open_modes += userspace;
open_modes += !gvisor_config.empty();
open_modes += modern_bpf;
open_modes += getenv("FALCO_BPF_PROBE") != NULL;
open_modes += nodriver;
if (open_modes > 1)
{
errstr = std::string("You can not specify more than one of -e, -u (--userspace), -g (--gvisor-config), --modern-bpf, --nodriver, and the FALCO_BPF_PROBE env var");
return false;
}
return true;
}
const std::string& options::usage()
std::string options::usage()
{
return m_usage_str;
return m_cmdline_opts.help();
}
void options::define(cxxopts::Options& opts)
void options::define()
{
opts.add_options()
m_cmdline_opts.add_options()
("h,help", "Print this page", cxxopts::value(help)->default_value("false"))
#ifdef BUILD_TYPE_RELEASE
("c", "Configuration file. If not specified uses " FALCO_INSTALL_CONF_FILE ".", cxxopts::value(conf_filename), "<path>")
#else
("c", "Configuration file. If not specified tries " FALCO_SOURCE_CONF_FILE ", " FALCO_INSTALL_CONF_FILE ".", cxxopts::value(conf_filename), "<path>")
#endif
("A", "Monitor all events supported by Falco defined in rules and configs. Please use the -i option to list the events ignored by default without -A. This option affects live captures only. Setting -A can impact performance.", cxxopts::value(all_events)->default_value("false"))
("A", "Monitor each event defined in rules and configs + high volume I/O syscalls. Please use the -i option to list the I/O syscalls Falco supports. This option affects live captures only. Setting -A can impact performance.", cxxopts::value(all_events)->default_value("false"))
("b,print-base64", "Print data buffers in base64. This is useful for encoding binary data that needs to be used over media designed to consume this format.")
("cri", "Path to CRI socket for container metadata. Use the specified socket to fetch data from a CRI-compatible runtime. If not specified, uses the libs default. This option can be passed multiple times to specify socket to be tried until a successful one is found.", cxxopts::value(cri_socket_paths), "<path>")
("d,daemon", "Run as a daemon.", cxxopts::value(daemon)->default_value("false"))
("disable-cri-async", "Disable asynchronous CRI metadata fetching. This is useful to let the input event wait for the container metadata fetch to finish before moving forward. Async fetching, in some environments leads to empty fields for container metadata when the fetch is not fast enough to be completed asynchronously. This can have a performance penalty on your environment depending on the number of containers and the frequency at which they are created/started/stopped.", cxxopts::value(disable_cri_async)->default_value("false"))
("disable-source", "Disable a specific event source. By default, all loaded sources get enabled. Available sources are 'syscall' and all sources defined by loaded plugins supporting the event sourcing capability. This option can be passed multiple times. This has no offect when reading events from a trace file. Can not disable all event sources. Can not be mixed with --enable-source.", cxxopts::value(disable_sources), "<event_source>")
("dry-run", "Run Falco without proceesing events. Can be useful for checking that the configuration and rules do not have any errors.", cxxopts::value(dry_run)->default_value("false"))
("D", "Disable any rules with names having the substring <substring>. This option can be passed multiple times. Can not be mixed with -t.", cxxopts::value(disabled_rule_substrings), "<substring>")
("e", "Read the events from a trace file <events_file> in .scap format instead of tapping into live.", cxxopts::value(trace_filename), "<events_file>")
("enable-source", "Enable a specific event source. If used, all loaded sources get disabled by default and only the ones passed with this option get enabled. Available sources are 'syscall' and all sources defined by loaded plugins supporting the event sourcing capability. This option can be passed multiple times. This has no offect when reading events from a trace file. Can not be mixed with --disable-source.", cxxopts::value(enable_sources), "<event_source>")
@@ -195,32 +174,34 @@ void options::define(cxxopts::Options& opts)
("gvisor-root", "gVisor root directory for storage of container state. Equivalent to runsc --root flag.", cxxopts::value(gvisor_root), "<gvisor_root>")
#endif
#ifdef HAS_MODERN_BPF
("modern-bpf", "Use BPF modern probe to capture system events.", cxxopts::value(modern_bpf)->default_value("false"))
("modern-bpf", "[EXPERIMENTAL] Use BPF modern probe to capture system events.", cxxopts::value(modern_bpf)->default_value("false"))
#endif
("i", "Print all high volume syscalls that are ignored by default for performance reasons (i.e. without the -A flag) and exit.", cxxopts::value(print_ignored_events)->default_value("false"))
("i", "Print all high volume I/O syscalls that are ignored by default (i.e. without the -A flag) and exit.", cxxopts::value(print_ignored_events)->default_value("false"))
#ifndef MINIMAL_BUILD
("k,k8s-api", "Enable Kubernetes support by connecting to the API server specified as argument. E.g. \"http://admin:password@127.0.0.1:8080\". The API server can also be specified via the environment variable FALCO_K8S_API.", cxxopts::value(k8s_api), "<url>")
("K,k8s-api-cert", "Use the provided files names to authenticate user and (optionally) verify the K8S API server identity. Each entry must specify full (absolute, or relative to the current directory) path to the respective file. Private key password is optional (needed only if key is password protected). CA certificate is optional. For all files, only PEM file format is supported. Specifying CA certificate only is obsoleted - when single entry is provided for this option, it will be interpreted as the name of a file containing bearer token. Note that the format of this command-line option prohibits use of files whose names contain ':' or '#' characters in the file name.", cxxopts::value(k8s_api_cert), "(<bt_file> | <cert_file>:<key_file[#password]>[:<ca_cert_file>])")
("k8s-node", "The node name will be used as a filter when requesting metadata of pods to the API server. Usually, this should be set to the current node on which Falco is running. If empty, no filter is set, which may have a performance penalty on large clusters.", cxxopts::value(k8s_node_name), "<node_name>")
#endif
("L", "Show the name and description of all rules and exit. If json_output is set to true, it prints details about all rules, macros and lists in JSON format", cxxopts::value(describe_all_rules)->default_value("false"))
("l", "Show the name and description of the rule with name <rule> and exit. If json_output is set to true, it prints details about the rule in JSON format", cxxopts::value(describe_rule), "<rule>")
("L", "Show the name and description of all rules and exit.", cxxopts::value(describe_all_rules)->default_value("false"))
("l", "Show the name and description of the rule with name <rule> and exit.", cxxopts::value(describe_rule), "<rule>")
("list", "List all defined fields. If <source> is provided, only list those fields for the source <source>. Current values for <source> are \"syscall\" or any source from a configured plugin with event sourcing capability.", cxxopts::value(list_source_fields)->implicit_value(""), "<source>")
("list-syscall-events", "List all defined system call events.", cxxopts::value<bool>(list_syscall_events))
#ifndef MUSL_OPTIMIZED
("list-plugins", "Print info on all loaded plugins and exit.", cxxopts::value(list_plugins)->default_value("false"))
#endif
#ifndef MINIMAL_BUILD
("m,mesos-api", "This feature has been DEPRECATED and will be removed in the next version.", cxxopts::value(mesos_api), "<url[,marathon_url]>")
#endif
("M", "Stop collecting after <num_seconds> reached.", cxxopts::value(duration_to_tot)->default_value("0"), "<num_seconds>")
("markdown", "When used with --list/--list-syscall-events, print the content in Markdown format", cxxopts::value<bool>(markdown))
("N", "When used with --list, only print field names.", cxxopts::value(names_only)->default_value("false"))
("nodriver", "Capture for system events without drivers. If a loaded plugin has event sourcing capability and can produce system events, it will be used to for event collection.", cxxopts::value(nodriver)->default_value("false"))
("o,option", "Set the value of option <opt> to <val>. Overrides values in configuration file. <opt> can be identified using its location in configuration file using dot notation. Elements which are entries of lists can be accessed via square brackets [].\n E.g. base.id = val\n base.subvalue.subvalue2 = val\n base.list[1]=val", cxxopts::value(cmdline_config_options), "<opt>=<val>")
("plugin-info", "Print info for a single plugin and exit.\nThis includes all descriptivo info like name and author, along with the\nschema format for the init configuration and a list of suggested open parameters.\n<plugin_name> can be the name of the plugin or its configured library_path.", cxxopts::value(print_plugin_info), "<plugin_name>")
("p,print", "Add additional information to each falco notification's output.\nWith -pc or -pcontainer will use a container-friendly format.\nWith -pk or -pkubernetes will use a kubernetes-friendly format.\nAdditionally, specifying -pc/-pk will change the interpretation of %container.info in rule output fields.", cxxopts::value(print_additional), "<output_format>")
("P,pidfile", "When run as a daemon, write pid to specified file", cxxopts::value(pidfilename)->default_value("/var/run/falco.pid"), "<pid_file>")
("r", "Rules file/directory (defaults to value set in configuration file, or /etc/falco_rules.yaml). This option can be passed multiple times to read from multiple files/directories.", cxxopts::value<std::vector<std::string>>(), "<rules_file>")
("s", "If specified, append statistics related to Falco's reading/processing of events to this file (only useful in live mode).", cxxopts::value(stats_output_file), "<stats_file>")
("stats-interval", "When using -s <stats_file>, write statistics every <msec> ms. This uses signals, and has a minimum threshold of 100 ms. Defaults to 5000 (5 seconds).", cxxopts::value(stats_interval), "<msec>")
("s", "If specified, append statistics related to Falco's reading/processing of events to this file (only useful in live mode).", cxxopts::value(stats_filename), "<stats_file>")
("stats-interval", "When using -s <stats_file>, write statistics every <msec> ms. This uses signals, so don't recommend intervals below 200 ms. Defaults to 5000 (5 seconds).", cxxopts::value(stats_interval)->default_value("5000"), "<msec>")
("S,snaplen", "Capture the first <len> bytes of each I/O buffer. By default, the first 80 bytes are captured. Use this option with caution, it can generate huge trace files.", cxxopts::value(snaplen)->default_value("0"), "<len>")
("support", "Print support information including version, rules files used, etc. and exit.", cxxopts::value(print_support)->default_value("false"))
("T", "Disable any rules with a tag=<tag>. This option can be passed multiple times. Can not be mized with -t", cxxopts::value<std::vector<std::string>>(), "<tag>")
@@ -233,7 +214,7 @@ void options::define(cxxopts::Options& opts)
("page-size", "Print the system page size (may help you to choose the right syscall ring-buffer size).", cxxopts::value(print_page_size)->default_value("false"));
opts.set_width(140);
m_cmdline_opts.set_width(140);
}
}; // namespace app

View File

@@ -18,12 +18,12 @@ limitations under the License.
#include <event.h>
#include <cxxopts.hpp>
#include <string>
#include <vector>
#include <set>
namespace cxxopts { class Options; };
namespace falco {
namespace app {
@@ -63,6 +63,7 @@ public:
std::string print_plugin_info;
bool list_syscall_events;
bool markdown;
std::string mesos_api;
int duration_to_tot;
bool names_only;
std::vector<std::string> cmdline_config_options;
@@ -70,8 +71,8 @@ public:
std::string pidfilename;
// Rules list as passed by the user, via cmdline option '-r'
std::list<std::string> rules_filenames;
std::string stats_output_file;
std::string stats_interval;
std::string stats_filename;
uint64_t stats_interval;
uint64_t snaplen;
bool print_support;
std::set<std::string> disabled_rule_tags;
@@ -83,16 +84,15 @@ public:
bool print_version_info;
bool print_page_size;
bool modern_bpf;
bool dry_run;
bool nodriver;
bool parse(int argc, char **argv, std::string &errstr);
const std::string& usage();
std::string usage();
private:
void define(cxxopts::Options& opts);
std::string m_usage_str;
void define();
cxxopts::Options m_cmdline_opts;
cxxopts::ParseResult m_cmdline_parsed;
};
}; // namespace application

View File

@@ -1,204 +0,0 @@
/*
Copyright (C) 2023 The Falco Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "restart_handler.h"
#include "signals.h"
#include "../logger.h"
#include <string.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/inotify.h>
#include <sys/select.h>
#if __GLIBC__ == 2 && __GLIBC_MINOR__ < 30
#include <sys/syscall.h>
#define gettid() syscall(SYS_gettid)
#endif
falco::app::restart_handler::~restart_handler()
{
close(m_inotify_fd);
stop();
}
void falco::app::restart_handler::trigger()
{
m_forced.store(true, std::memory_order_release);
}
bool falco::app::restart_handler::start(std::string& err)
{
m_inotify_fd = inotify_init();
if (m_inotify_fd < 0)
{
err = "could not initialize inotify handler";
return false;
}
for (const auto& f : m_watched_files)
{
auto wd = inotify_add_watch(m_inotify_fd, f.c_str(), IN_CLOSE_WRITE);
if (wd < 0)
{
err = "could not watch file: " + f;
return false;
}
falco_logger::log(LOG_DEBUG, "Watching file '" + f +"'\n");
}
for (const auto &f : m_watched_dirs)
{
auto wd = inotify_add_watch(m_inotify_fd, f.c_str(), IN_CREATE | IN_DELETE);
if (wd < 0)
{
err = "could not watch directory: " + f;
return false;
}
falco_logger::log(LOG_DEBUG, "Watching directory '" + f +"'\n");
}
// launch the watcher thread
m_watcher = std::thread(&falco::app::restart_handler::watcher_loop, this);
return true;
}
void falco::app::restart_handler::stop()
{
m_stop.store(true, std::memory_order_release);
if (m_watcher.joinable())
{
m_watcher.join();
}
}
void falco::app::restart_handler::watcher_loop() noexcept
{
if (fcntl(m_inotify_fd, F_SETOWN, gettid()) < 0)
{
// an error occurred, we can't recover
// todo(jasondellaluce): should we terminate the process?
falco_logger::log(LOG_ERR, "Failed owning inotify handler, shutting down watcher...");
return;
}
fd_set set;
bool forced = false;
bool should_check = false;
bool should_restart = false;
struct timeval timeout;
uint8_t buf[(10 * (sizeof(struct inotify_event) + NAME_MAX + 1))];
while (!m_stop.load(std::memory_order_acquire))
{
// wait for inotify events with a certain timeout.
// Note, we'll run through select even before performing a dry-run,
// so that we can dismiss in case we have to debounce rapid
// subsequent events.
timeout.tv_sec = 0;
timeout.tv_usec = 100000;
FD_ZERO(&set);
FD_SET(m_inotify_fd, &set);
auto rv = select(m_inotify_fd + 1, &set, NULL, NULL, &timeout);
if (rv < 0)
{
// an error occurred, we can't recover
// todo(jasondellaluce): should we terminate the process?
falco_logger::log(LOG_ERR, "Failed select with inotify handler, shutting down watcher...");
return;
}
// check if there's been a forced restart request
forced = m_forced.load(std::memory_order_acquire);
m_forced.store(false, std::memory_order_release);
// no new watch event is received during the timeout
if (rv == 0 && !forced)
{
// perform a dry run. In case no error occurs, we loop back
// to the select in order to debounce new inotify events before
// actually triggering a restart.
if (should_check)
{
should_check = false;
should_restart = m_on_check();
continue;
}
// if the previous dry run was successful, and no new
// inotify events have been received during the dry run,
// then we trigger the restarting signal and quit.
// note: quitting is a time optimization, the thread
// will be forced to quit anyways later by the Falco app, but
// at least we don't make users wait for the timeout.
if (should_restart)
{
should_restart = false;
// todo(jasondellaluce): make this a callback too maybe?
g_restart_signal.trigger();
return;
}
// let's go back to the select
continue;
}
// at this point, we either received a new inotify event or a forced
// restart. If this happened during a dry run (even if the dry run
// was successful), or during a timeout wait since the last successful
// dry run before a restart, we dismiss the restart attempt and
// perform an additional dry-run for safety purposes (the new inotify
// events may be related to bad config/rules files changes).
should_restart = false;
should_check = false;
// if there's date on the inotify fd, consume it
// (even if there is a forced request too)
if (rv > 0)
{
// note: if available data is less than buffer size, this should
// return n > 0 but not filling the buffer. If available data is
// more than buffer size, we will loop back to select and behave
// like we debounced an event.
auto n = read(m_inotify_fd, buf, sizeof(buf));
if (n < 0)
{
// an error occurred, we can't recover
// todo(jasondellaluce): should we terminate the process?
falco_logger::log(LOG_ERR, "Failed read with inotify handler, shutting down watcher...");
return;
}
// this is an odd case, but if we got here with
// no read data, and no forced request, we get back
// looping in the select. This can likely happen if
// there's data in the inotify fd but the first read
// returned no bytes. Likely we'll get back here at the
// next select call.
else if (n == 0)
{
// we still proceed in case the request was forced
if (!forced)
{
continue;
}
}
}
// we consumed the new inotify events or we received a forced
// restart request, so we'll perform a dry run after the
// next timeout.
should_check = true;
}
}

View File

@@ -1,81 +0,0 @@
/*
Copyright (C) 2023 The Falco Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#pragma once
#include <thread>
#include <atomic>
#include <vector>
#include <string>
#include <functional>
namespace falco
{
namespace app
{
/**
* @brief A thread-safe helper for handling hot-reload application restarts.
*/
class restart_handler
{
public:
/**
* @brief A function that performs safety checks before confirming
* a triggered application restart. Returns true if the application
* can safely be restarted.
*/
using on_check_t = std::function<bool()>;
/**
* @brief A list of files or directories paths to watch.
*/
using watch_list_t = std::vector<std::string>;
restart_handler(
on_check_t on_check,
const watch_list_t& watch_files = {},
const watch_list_t& watch_dirs = {})
: m_inotify_fd(-1),
m_stop(false),
m_forced(false),
m_on_check(on_check),
m_watched_dirs(watch_dirs),
m_watched_files(watch_files) { }
virtual ~restart_handler();
restart_handler(restart_handler&&) = default;
restart_handler& operator = (restart_handler&&) = default;
restart_handler(const restart_handler&) = delete;
restart_handler& operator = (const restart_handler&) = delete;
bool start(std::string& err);
void stop();
void trigger();
private:
void watcher_loop() noexcept;
int m_inotify_fd;
std::thread m_watcher;
std::atomic<bool> m_stop;
std::atomic<bool> m_forced;
on_check_t m_on_check;
watch_list_t m_watched_dirs;
watch_list_t m_watched_files;
};
}; // namespace app
}; // namespace falco

View File

@@ -19,7 +19,6 @@ limitations under the License.
#include "indexed_vector.h"
#include "options.h"
#include "restart_handler.h"
#include "../configuration.h"
#include "../stats_writer.h"
#ifndef MINIMAL_BUILD
@@ -31,7 +30,6 @@ limitations under the License.
#include <string>
#include <memory>
#include <atomic>
#include <unordered_set>
namespace falco {
@@ -47,21 +45,12 @@ struct state
// Holds the info mapped for each loaded event source
struct source_info
{
source_info():
engine_idx(-1),
filterchecks(new filter_check_list()),
inspector(nullptr) { }
source_info(source_info&&) = default;
source_info& operator = (source_info&&) = default;
source_info(const source_info&) = default;
source_info& operator = (const source_info&) = default;
// The index of the given event source in the state's falco_engine,
// as returned by falco_engine::add_source
std::size_t engine_idx;
// The filtercheck list containing all fields compatible
// with the given event source
std::shared_ptr<filter_check_list> filterchecks;
filter_check_list filterchecks;
// The inspector assigned to this event source. If in capture mode,
// all event source will share the same inspector. If the event
// source is a plugin one, the assigned inspector must have that
@@ -70,27 +59,19 @@ struct state
};
state():
restart(false),
loaded_sources(),
enabled_sources(),
source_infos(),
plugin_configs(),
selected_sc_set(),
selected_tp_set(),
syscall_buffer_bytes_size(DEFAULT_DRIVER_BUFFER_BYTES_DIM)
{
config = std::make_shared<falco_configuration>();
engine = std::make_shared<falco_engine>();
offline_inspector = std::make_shared<sinsp>();
outputs = nullptr;
restarter = nullptr;
}
state(const std::string& cmd, const falco::app::options& opts): state()
{
cmdline = cmd;
options = opts;
}
~state() = default;
state(state&&) = default;
state& operator = (state&&) = default;
@@ -99,18 +80,14 @@ struct state
std::string cmdline;
falco::app::options options;
std::atomic<bool> restart;
std::shared_ptr<falco_configuration> config;
std::shared_ptr<falco_outputs> outputs;
std::shared_ptr<falco_engine> engine;
// The set of loaded event sources (by default, the syscall event
// source plus all event sources coming from the loaded plugins).
// note: this has to be a vector to preserve the loading order,
// however it's not supposed to contain duplicate values.
std::vector<std::string> loaded_sources;
// source plus all event sources coming from the loaded plugins)
std::unordered_set<std::string> loaded_sources;
// The set of enabled event sources (can be altered by using
// the --enable-source and --disable-source options)
@@ -131,12 +108,12 @@ struct state
// Set of syscalls we want the driver to capture
libsinsp::events::set<ppm_sc_code> selected_sc_set;
// Set of tracepoints we want the driver to capture
libsinsp::events::set<ppm_tp_code> selected_tp_set;
// Dimension of the syscall buffer in bytes.
uint64_t syscall_buffer_bytes_size;
// Helper responsible for watching of handling hot application restarts
std::shared_ptr<restart_handler> restarter;
#ifndef MINIMAL_BUILD
falco::grpc::server grpc_server;
std::thread grpc_server_thread;
@@ -153,11 +130,6 @@ struct state
{
return !options.gvisor_config.empty();
}
inline bool is_source_enabled(const std::string& src) const
{
return enabled_sources.find(falco_common::syscall_source) != enabled_sources.end();
}
};
}; // namespace app

View File

@@ -14,8 +14,6 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
#pragma once
#include <mutex>
#include <atomic>
#include <functional>

View File

@@ -57,21 +57,8 @@ falco_configuration::falco_configuration():
m_metadata_download_chunk_wait_us(1000),
m_metadata_download_watch_freq_sec(1),
m_syscall_buf_size_preset(4),
m_cpus_for_each_syscall_buffer(2),
m_syscall_drop_failed_exit(false),
m_base_syscalls_repair(false),
m_metrics_enabled(false),
m_metrics_interval_str("5000"),
m_metrics_interval(5000),
m_metrics_stats_rule_enabled(false),
m_metrics_output_file(""),
m_metrics_resource_utilization_enabled(true),
m_metrics_kernel_event_counters_enabled(true),
m_metrics_libbpf_stats_enabled(true),
m_metrics_convert_memory_to_mb(true),
m_metrics_include_empty_values(false)
m_cpus_for_each_syscall_buffer(2)
{
init({});
}
void falco_configuration::init(const std::vector<std::string>& cmdline_options)
@@ -190,22 +177,6 @@ void falco_configuration::load_yaml(const std::string& config_name, const yaml_h
user_agent = config.get_scalar<std::string>("http_output.user_agent","falcosecurity/falco");
http_output.options["user_agent"] = user_agent;
bool insecure;
insecure = config.get_scalar<bool>("http_output.insecure", false);
http_output.options["insecure"] = insecure? std::string("true") : std::string("false");
std::string ca_cert;
ca_cert = config.get_scalar<std::string>("http_output.ca_cert", "");
http_output.options["ca_cert"] = ca_cert;
std::string ca_bundle;
ca_bundle = config.get_scalar<std::string>("http_output.ca_bundle", "");
http_output.options["ca_bundle"] = ca_bundle;
std::string ca_path;
ca_path = config.get_scalar<std::string>("http_output.ca_path", "/etc/ssl/certs");
http_output.options["ca_path"] = ca_path;
m_outputs.push_back(http_output);
}
@@ -342,28 +313,14 @@ void falco_configuration::load_yaml(const std::string& config_name, const yaml_h
m_cpus_for_each_syscall_buffer = config.get_scalar<uint16_t>("modern_bpf.cpus_for_each_syscall_buffer", 2);
m_syscall_drop_failed_exit = config.get_scalar<bool>("syscall_drop_failed_exit", false);
m_base_syscalls.clear();
config.get_sequence<std::unordered_set<std::string>>(m_base_syscalls, std::string("base_syscalls"));
m_base_syscalls_custom_set.clear();
config.get_sequence<std::unordered_set<std::string>>(m_base_syscalls_custom_set, std::string("base_syscalls.custom_set"));
m_base_syscalls_repair = config.get_scalar<bool>("base_syscalls.repair", false);
m_metrics_enabled = config.get_scalar<bool>("metrics.enabled", false);
m_metrics_interval_str = config.get_scalar<std::string>("metrics.interval", "5000");
m_metrics_interval = falco::utils::parse_prometheus_interval(m_metrics_interval_str);
m_metrics_stats_rule_enabled = config.get_scalar<bool>("metrics.output_rule", false);
m_metrics_output_file = config.get_scalar<std::string>("metrics.output_file", "");
m_metrics_resource_utilization_enabled = config.get_scalar<bool>("metrics.resource_utilization_enabled", true);
m_metrics_kernel_event_counters_enabled = config.get_scalar<bool>("metrics.kernel_event_counters_enabled", true);
m_metrics_libbpf_stats_enabled = config.get_scalar<bool>("metrics.libbpf_stats_enabled", true);
m_metrics_convert_memory_to_mb = config.get_scalar<bool>("metrics.convert_memory_to_mb", true);
m_metrics_include_empty_values = config.get_scalar<bool>("metrics.include_empty_values", false);
std::vector<std::string> load_plugins;
std::set<std::string> load_plugins;
bool load_plugins_node_defined = config.is_defined("load_plugins");
config.get_sequence<std::vector<std::string>>(load_plugins, "load_plugins");
config.get_sequence<std::set<std::string>>(load_plugins, "load_plugins");
std::list<falco_configuration::plugin_config> plugins;
try
@@ -381,32 +338,14 @@ void falco_configuration::load_yaml(const std::string& config_name, const yaml_h
// If load_plugins was specified, only save plugins matching those in values
m_plugins.clear();
if (!load_plugins_node_defined)
for (auto &p : plugins)
{
// If load_plugins was not specified at all, every plugin is added.
// The loading order is the same as the sequence in the YAML config.
m_plugins = { plugins.begin(), plugins.end() };
}
else
{
// If load_plugins is specified, only plugins contained in its list
// are added, with the same order as in the list.
for (const auto& pname : load_plugins)
// If load_plugins was not specified at all, every
// plugin is added. Otherwise, the plugin must be in
// the load_plugins list.
if(!load_plugins_node_defined || load_plugins.find(p.m_name) != load_plugins.end())
{
bool found = false;
for (const auto& p : plugins)
{
if (pname == p.m_name)
{
m_plugins.push_back(p);
found = true;
break;
}
}
if (!found)
{
throw std::logic_error("Cannot load plugin '" + pname + "': plugin config not found for given name");
}
m_plugins.push_back(p);
}
}

View File

@@ -106,23 +106,8 @@ public:
// Number of CPUs associated with a single ring buffer.
uint16_t m_cpus_for_each_syscall_buffer;
bool m_syscall_drop_failed_exit;
// User supplied base_syscalls, overrides any Falco state engine enforcement.
std::unordered_set<std::string> m_base_syscalls_custom_set;
bool m_base_syscalls_repair;
// metrics configs
bool m_metrics_enabled;
std::string m_metrics_interval_str;
uint64_t m_metrics_interval;
bool m_metrics_stats_rule_enabled;
std::string m_metrics_output_file;
bool m_metrics_resource_utilization_enabled;
bool m_metrics_kernel_event_counters_enabled;
bool m_metrics_libbpf_stats_enabled;
bool m_metrics_convert_memory_to_mb;
bool m_metrics_include_empty_values;
std::unordered_set<std::string> m_base_syscalls;
std::vector<plugin_config> m_plugins;

View File

@@ -156,7 +156,7 @@ bool syscall_evt_drop_mgr::perform_actions(uint64_t now, scap_stats &delta, bool
case syscall_evt_drop_action::ALERT:
{
nlohmann::json output_fields;
std::map<std::string, std::string> output_fields;
output_fields["n_evts"] = std::to_string(delta.n_evts); /* Total number of kernel side events actively traced (not including events discarded due to simple consumer mode in eBPF case). */
output_fields["n_drops"] = std::to_string(delta.n_drops); /* Number of all kernel side event drops out of n_evts. */
output_fields["n_drops_buffer_total"] = std::to_string(delta.n_drops_buffer); /* Total number of kernel side drops due to full buffer, includes all categories below, likely higher than sum of syscall categories. */

View File

@@ -161,13 +161,8 @@ void falco_outputs::handle_msg(uint64_t ts,
falco_common::priority_type priority,
std::string &msg,
std::string &rule,
nlohmann::json &output_fields)
std::map<std::string, std::string> &output_fields)
{
if (!output_fields.is_object())
{
throw falco_exception("falco_outputs: output fields must be key-value maps");
}
falco_outputs::ctrl_msg cmsg = {};
cmsg.ts = ts;
cmsg.priority = priority;
@@ -196,7 +191,6 @@ void falco_outputs::handle_msg(uint64_t ts,
jmsg["time"] = iso8601evttime;
jmsg["output_fields"] = output_fields;
jmsg["hostname"] = m_hostname;
jmsg["source"] = s_internal_source;
cmsg.msg = jmsg.dump();
}
@@ -207,7 +201,7 @@ void falco_outputs::handle_msg(uint64_t ts,
sinsp_utils::ts_to_string(ts, &timestr, false, true);
cmsg.msg = timestr + ": " + falco_common::format_priority(priority) + " " + msg + " (";
for(auto &pair : output_fields.items())
for(auto &pair : output_fields)
{
if(first)
{
@@ -217,11 +211,7 @@ void falco_outputs::handle_msg(uint64_t ts,
{
cmsg.msg += " ";
}
if (!pair.value().is_primitive())
{
throw falco_exception("falco_outputs: output fields must be key-value maps");
}
cmsg.msg += pair.key() + "=" + pair.value().dump();
cmsg.msg += pair.first + "=" + pair.second;
}
cmsg.msg += ")";
}

View File

@@ -66,7 +66,7 @@ public:
falco_common::priority_type priority,
std::string &msg,
std::string &rule,
nlohmann::json &output_fields);
std::map<std::string, std::string> &output_fields);
/*!
\brief Sends a cleanup message to all outputs.

View File

@@ -21,7 +21,6 @@ limitations under the License.
#include "falco_common.h"
#include "gen_filter.h"
#include <nlohmann/json.hpp>
namespace falco
{
@@ -50,7 +49,7 @@ struct message
std::string msg;
std::string rule;
std::string source;
nlohmann::json fields;
std::map<std::string, std::string> fields;
std::set<std::string> tags;
};

View File

@@ -79,15 +79,9 @@ void falco::outputs::output_grpc::output(const message *msg)
// output fields
auto &fields = *grpc_res.mutable_output_fields();
for(const auto &kv : msg->fields.items())
for(const auto &kv : msg->fields)
{
if (!kv.value().is_primitive())
{
throw falco_exception("output_grpc: output fields must be key-value maps");
}
fields[kv.key()] = (kv.value().is_string())
? kv.value().get<std::string>()
: kv.value().dump();
fields[kv.first] = kv.second;
}
// hostname

View File

@@ -34,58 +34,15 @@ void falco::outputs::output_http::output(const message *msg)
} else {
slist1 = curl_slist_append(slist1, "Content-Type: text/plain");
}
res = curl_easy_setopt(curl, CURLOPT_HTTPHEADER, slist1);
if(res == CURLE_OK)
{
res = curl_easy_setopt(curl, CURLOPT_URL, m_oc.options["url"].c_str());
}
if(res == CURLE_OK)
{
res = curl_easy_setopt(curl, CURLOPT_POSTFIELDS, msg->msg.c_str());
}
curl_easy_setopt(curl, CURLOPT_HTTPHEADER, slist1);
curl_easy_setopt(curl, CURLOPT_URL, m_oc.options["url"].c_str());
curl_easy_setopt(curl, CURLOPT_POSTFIELDS, msg->msg.c_str());
curl_easy_setopt(curl, CURLOPT_USERAGENT, m_oc.options["user_agent"].c_str());
curl_easy_setopt(curl, CURLOPT_POSTFIELDSIZE, -1L);
if(res == CURLE_OK)
{
res = curl_easy_setopt(curl, CURLOPT_USERAGENT, m_oc.options["user_agent"].c_str());
}
if(res == CURLE_OK)
{
res = curl_easy_setopt(curl, CURLOPT_POSTFIELDSIZE, -1L);
}
if(res == CURLE_OK)
{
if(m_oc.options["insecure"] == std::string("true"))
{
res = curl_easy_setopt(curl,CURLOPT_SSL_VERIFYPEER, 0L);
if(res == CURLE_OK)
{
res = curl_easy_setopt(curl, CURLOPT_SSL_VERIFYHOST, 0L);
}
}
}
if(res == CURLE_OK)
{
if (!m_oc.options["ca_cert"].empty())
{
res = curl_easy_setopt(curl, CURLOPT_CAINFO, m_oc.options["ca_cert"].c_str());
}else if(!m_oc.options["ca_bundle"].empty())
{
res = curl_easy_setopt(curl, CURLOPT_CAINFO, m_oc.options["ca_bundle"].c_str());
}else{
res = curl_easy_setopt(curl, CURLOPT_CAPATH, m_oc.options["ca_path"].c_str());
}
}
if(res == CURLE_OK)
{
res = curl_easy_perform(curl);
}
res = curl_easy_perform(curl);
if(res != CURLE_OK)
{

View File

@@ -1,5 +1,5 @@
/*
Copyright (C) 2023 The Falco Authors.
Copyright (C) 2022 The Falco Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -25,7 +25,7 @@ limitations under the License.
#include "stats_writer.h"
#include "logger.h"
#include "banned.h" // This raises a compilation error when certain functions are used
#include "config_falco.h"
#include "logger.h"
// note: ticker_t is an uint16_t, which is enough because we don't care about
// overflows here. Threads calling stats_writer::handle() will just
@@ -67,32 +67,18 @@ stats_writer::ticker_t stats_writer::get_ticker()
return s_timer.load(std::memory_order_relaxed);
}
stats_writer::stats_writer(
const std::shared_ptr<falco_outputs>& outputs,
const std::shared_ptr<const falco_configuration>& config)
stats_writer::stats_writer()
: m_initialized(false), m_total_samples(0)
{
m_config = config;
if (config->m_metrics_enabled)
{
if (!config->m_metrics_output_file.empty())
{
m_file_output.exceptions(std::ofstream::failbit | std::ofstream::badbit);
m_file_output.open(config->m_metrics_output_file, std::ios_base::app);
m_initialized = true;
}
if (config->m_metrics_stats_rule_enabled)
{
m_outputs = outputs;
m_initialized = true;
}
}
}
if (m_initialized)
{
m_worker = std::thread(&stats_writer::worker, this);
}
stats_writer::stats_writer(const std::string &filename)
: m_initialized(true), m_total_samples(0)
{
m_output.exceptions(std::ofstream::failbit | std::ofstream::badbit);
m_output.open(filename, std::ios_base::app);
m_worker = std::thread(&stats_writer::worker, this);
}
stats_writer::~stats_writer()
@@ -100,13 +86,15 @@ stats_writer::~stats_writer()
if (m_initialized)
{
stop_worker();
if (!m_config->m_metrics_output_file.empty())
{
m_file_output.close();
}
m_output.close();
}
}
bool stats_writer::has_output() const
{
return m_initialized;
}
void stats_writer::stop_worker()
{
stats_writer::msg msg;
@@ -131,11 +119,8 @@ void stats_writer::worker() noexcept
{
stats_writer::msg m;
nlohmann::json jmsg;
bool use_outputs = m_config->m_metrics_stats_rule_enabled;
bool use_file = !m_config->m_metrics_output_file.empty();
auto tick = stats_writer::get_ticker();
auto last_tick = tick;
auto first_tick = tick;
while(true)
{
@@ -145,32 +130,28 @@ void stats_writer::worker() noexcept
{
return;
}
// this helps waiting for the first tick
tick = stats_writer::get_ticker();
if (first_tick != tick)
{
if (last_tick != tick)
{
m_total_samples++;
}
last_tick = tick;
// update records for this event source
jmsg[m.source]["cur"]["events"] = m.stats.n_evts;
jmsg[m.source]["delta"]["events"] = m.delta.n_evts;
if (m.source == falco_common::syscall_source)
{
jmsg[m.source]["cur"]["drops"] = m.stats.n_drops;
jmsg[m.source]["cur"]["preemptions"] = m.stats.n_preemptions;
jmsg[m.source]["cur"]["drop_pct"] = (m.stats.n_evts == 0 ? 0.0 : (100.0*m.stats.n_drops/m.stats.n_evts));
jmsg[m.source]["delta"]["drops"] = m.delta.n_drops;
jmsg[m.source]["delta"]["preemptions"] = m.delta.n_preemptions;
jmsg[m.source]["delta"]["drop_pct"] = (m.delta.n_evts == 0 ? 0.0 : (100.0*m.delta.n_drops/m.delta.n_evts));
}
tick = stats_writer::get_ticker();
if (last_tick != tick)
{
m_total_samples++;
try
{
if (use_outputs)
{
std::string rule = "Falco internal: metrics snapshot";
std::string msg = "Falco metrics snapshot";
m_outputs->handle_msg(m.ts, falco_common::PRIORITY_INFORMATIONAL, msg, rule, m.output_fields);
}
if (use_file)
{
jmsg["sample"] = m_total_samples;
jmsg["output_fields"] = m.output_fields;
m_file_output << jmsg.dump() << std::endl;
}
jmsg["sample"] = m_total_samples;
m_output << jmsg.dump() << std::endl;
}
catch(const std::exception &e)
{
@@ -180,245 +161,39 @@ void stats_writer::worker() noexcept
}
}
stats_writer::collector::collector(const std::shared_ptr<stats_writer>& writer)
: m_writer(writer), m_last_tick(0), m_samples(0),
m_last_now(0), m_last_n_evts(0), m_last_n_drops(0), m_last_num_evts(0)
stats_writer::collector::collector(std::shared_ptr<stats_writer> writer)
: m_writer(writer), m_last_tick(0), m_samples(0)
{
}
void stats_writer::collector::get_metrics_output_fields_wrapper(
nlohmann::json& output_fields,
const std::shared_ptr<sinsp>& inspector, uint64_t now,
const std::string& src, uint64_t num_evts, double stats_snapshot_time_delta_sec)
{
static const char* all_driver_engines[] = {
BPF_ENGINE, KMOD_ENGINE, MODERN_BPF_ENGINE,
SOURCE_PLUGIN_ENGINE, NODRIVER_ENGINE, UDIG_ENGINE, GVISOR_ENGINE };
const scap_agent_info* agent_info = inspector->get_agent_info();
const scap_machine_info* machine_info = inspector->get_machine_info();
/* Wrapper fields useful for statistical analyses and attributions. Always enabled. */
output_fields["evt.time"] = now; /* Some ETLs may prefer a consistent timestamp within output_fields. */
output_fields["falco.version"] = FALCO_VERSION;
output_fields["falco.start_ts"] = agent_info->start_ts_epoch;
output_fields["falco.duration_sec"] = (uint64_t)((now - agent_info->start_ts_epoch) / ONE_SECOND_IN_NS);
output_fields["falco.kernel_release"] = agent_info->uname_r;
output_fields["falco.host_boot_ts"] = machine_info->boot_ts_epoch;
output_fields["falco.hostname"] = machine_info->hostname; /* Explicitly add hostname to log msg in case hostname rule output field is disabled. */
output_fields["falco.host_num_cpus"] = machine_info->num_cpus;
output_fields["evt.source"] = src;
for (size_t i = 0; i < sizeof(all_driver_engines) / sizeof(const char*); i++)
{
if (inspector->check_current_engine(all_driver_engines[i]))
{
output_fields["scap.engine_name"] = all_driver_engines[i];
break;
}
}
/* Falco userspace event counters. Always enabled. */
if (m_last_num_evts != 0 && stats_snapshot_time_delta_sec > 0)
{
/* Successfully processed userspace event rate. */
output_fields["falco.evts_rate_sec"] = (double)((num_evts - m_last_num_evts) / (double)stats_snapshot_time_delta_sec);
}
output_fields["falco.num_evts"] = num_evts;
output_fields["falco.num_evts_prev"] = m_last_num_evts;
m_last_num_evts = num_evts;
}
void stats_writer::collector::get_metrics_output_fields_additional(
nlohmann::json& output_fields,
const std::shared_ptr<sinsp>& inspector,
double stats_snapshot_time_delta_sec, const std::string& src)
{
const scap_agent_info* agent_info = inspector->get_agent_info();
const scap_machine_info* machine_info = inspector->get_machine_info();
#ifndef MINIMAL_BUILD
/* Resource utilization, CPU and memory usage etc. */
uint32_t nstats = 0;
int32_t rc = 0;
if (m_writer->m_config->m_metrics_resource_utilization_enabled)
{
const scap_stats_v2* utilization;
auto buffer = inspector->get_sinsp_stats_v2_buffer();
utilization = libsinsp::resource_utilization::get_resource_utilization(agent_info, buffer, &nstats, &rc);
if (utilization && rc == 0 && nstats > 0)
{
for(uint32_t stat = 0; stat < nstats; stat++)
{
char metric_name[STATS_NAME_MAX] = "falco.";
strncat(metric_name, utilization[stat].name, sizeof(metric_name) - strlen(metric_name) - 1);
switch(utilization[stat].type)
{
case STATS_VALUE_TYPE_U64:
if (utilization[stat].value.u64 == 0 && !m_writer->m_config->m_metrics_include_empty_values)
{
break;
}
if (m_writer->m_config->m_metrics_convert_memory_to_mb && strncmp(utilization[stat].name, "container_memory_used", 22) == 0) // exact str match
{
output_fields[metric_name] = (uint64_t)(utilization[stat].value.u64 / (double)1024 / (double)1024);
}
else
{
output_fields[metric_name] = utilization[stat].value.u64;
}
break;
case STATS_VALUE_TYPE_U32:
if (utilization[stat].value.u32 == 0 && !m_writer->m_config->m_metrics_include_empty_values)
{
break;
}
if (m_writer->m_config->m_metrics_convert_memory_to_mb && strncmp(utilization[stat].name, "memory_", 7) == 0) // prefix match
{
output_fields[metric_name] = (uint32_t)(utilization[stat].value.u32 / (double)1024);
}
else
{
output_fields[metric_name] = utilization[stat].value.u32;
}
break;
case STATS_VALUE_TYPE_D:
if (utilization[stat].value.d == 0 && !m_writer->m_config->m_metrics_include_empty_values)
{
break;
}
output_fields[metric_name] = utilization[stat].value.d;
break;
default:
break;
}
}
}
}
if (src != falco_common::syscall_source)
{
return;
}
/* Kernel side stats counters and libbpf stats if applicable. */
nstats = 0;
rc = 0;
uint32_t flags = 0;
if (m_writer->m_config->m_metrics_kernel_event_counters_enabled)
{
flags |= PPM_SCAP_STATS_KERNEL_COUNTERS;
}
if (m_writer->m_config->m_metrics_libbpf_stats_enabled && (inspector->check_current_engine(BPF_ENGINE) || inspector->check_current_engine(MODERN_BPF_ENGINE)) && (machine_info->flags & PPM_BPF_STATS_ENABLED))
{
flags |= PPM_SCAP_STATS_LIBBPF_STATS;
}
const scap_stats_v2* stats_v2 = inspector->get_capture_stats_v2(flags, &nstats, &rc);
if (stats_v2 && nstats > 0 && rc == 0)
{
/* Cache n_evts and n_drops to derive n_drops_perc. */
uint64_t n_evts = 0;
uint64_t n_drops = 0;
uint64_t n_evts_delta = 0;
uint64_t n_drops_delta = 0;
for(uint32_t stat = 0; stat < nstats; stat++)
{
// todo: as we expand scap_stats_v2 prefix may be pushed to scap or we may need to expand
// functionality here for example if we add userspace syscall counters that should be prefixed w/ `falco.`
char metric_name[STATS_NAME_MAX] = "scap.";
strncat(metric_name, stats_v2[stat].name, sizeof(metric_name) - strlen(metric_name) - 1);
switch(stats_v2[stat].type)
{
case STATS_VALUE_TYPE_U64:
/* Always send high level n_evts related fields, even if zero. */
if (strncmp(stats_v2[stat].name, "n_evts", 7) == 0) // exact not prefix match here
{
n_evts = stats_v2[stat].value.u64;
output_fields[metric_name] = n_evts;
output_fields["scap.n_evts_prev"] = m_last_n_evts;
n_evts_delta = n_evts - m_last_n_evts;
if (n_evts_delta != 0 && stats_snapshot_time_delta_sec > 0)
{
/* n_evts is total number of kernel side events. */
output_fields["scap.evts_rate_sec"] = (double)(n_evts_delta / stats_snapshot_time_delta_sec);
}
else
{
output_fields["scap.evts_rate_sec"] = (double)(0);
}
m_last_n_evts = n_evts;
}
/* Always send high level n_drops related fields, even if zero. */
else if (strncmp(stats_v2[stat].name, "n_drops", 8) == 0) // exact not prefix match here
{
n_drops = stats_v2[stat].value.u64;
output_fields[metric_name] = n_drops;
output_fields["scap.n_drops_prev"] = m_last_n_drops;
n_drops_delta = n_drops - m_last_n_drops;
if (n_drops_delta != 0 && stats_snapshot_time_delta_sec > 0)
{
/* n_drops is total number of kernel side event drops. */
output_fields["scap.evts_drop_rate_sec"] = (double)(n_drops_delta / stats_snapshot_time_delta_sec);
}
else
{
output_fields["scap.evts_drop_rate_sec"] = (double)(0);
}
m_last_n_drops = n_drops;
}
if (stats_v2[stat].value.u64 == 0 && !m_writer->m_config->m_metrics_include_empty_values)
{
break;
}
output_fields[metric_name] = stats_v2[stat].value.u64;
break;
default:
break;
}
}
/* n_drops_perc needs to be calculated outside the loop given no field ordering guarantees.
* Always send n_drops_perc, even if zero. */
if(n_evts_delta > 0)
{
output_fields["scap.n_drops_perc"] = (double)((100.0 * n_drops_delta) / n_evts_delta);
}
else
{
output_fields["scap.n_drops_perc"] = (double)(0);
}
}
#endif
}
void stats_writer::collector::collect(const std::shared_ptr<sinsp>& inspector, const std::string &src, uint64_t num_evts)
void stats_writer::collector::collect(std::shared_ptr<sinsp> inspector, const std::string& src)
{
// just skip if no output is configured
if (m_writer->has_output())
{
/* Collect stats / metrics once per ticker period. */
// collect stats once per each ticker period
auto tick = stats_writer::get_ticker();
if (tick != m_last_tick)
{
m_last_tick = tick;
auto now = std::chrono::duration_cast<std::chrono::nanoseconds>(
std::chrono::system_clock::now().time_since_epoch()).count();
uint64_t stats_snapshot_time_delta = 0;
if (m_last_now != 0)
{
stats_snapshot_time_delta = now - m_last_now;
}
m_last_now = now;
double stats_snapshot_time_delta_sec = (stats_snapshot_time_delta / (double)ONE_SECOND_IN_NS);
/* Get respective metrics output_fields. */
nlohmann::json output_fields;
get_metrics_output_fields_wrapper(output_fields, inspector, now, src, num_evts, stats_snapshot_time_delta_sec);
get_metrics_output_fields_additional(output_fields, inspector, stats_snapshot_time_delta_sec, src);
/* Send message in the queue */
stats_writer::msg msg;
msg.ts = now;
msg.stop = false;
msg.source = src;
msg.output_fields = std::move(output_fields);
inspector->get_capture_stats(&msg.stats);
m_samples++;
if(m_samples == 1)
{
msg.delta = msg.stats;
}
else
{
msg.delta.n_evts = msg.stats.n_evts - m_last_stats.n_evts;
msg.delta.n_drops = msg.stats.n_drops - m_last_stats.n_drops;
msg.delta.n_preemptions = msg.stats.n_preemptions - m_last_stats.n_preemptions;
}
m_last_tick = tick;
m_last_stats = msg.stats;
m_writer->push(msg);
}
}

View File

@@ -1,5 +1,5 @@
/*
Copyright (C) 2023 The Falco Authors.
Copyright (C) 2022 The Falco Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -18,13 +18,11 @@ limitations under the License.
#include <fstream>
#include <string>
#include <unordered_map>
#include <map>
#include <sinsp.h>
#include "tbb/concurrent_queue.h"
#include "falco_outputs.h"
#include "configuration.h"
/*!
\brief Writes stats samples collected from inspectors into a given output.
@@ -52,34 +50,19 @@ public:
/*!
\brief Initializes the collector with the given writer
*/
explicit collector(const std::shared_ptr<stats_writer>& writer);
explicit collector(std::shared_ptr<stats_writer> writer);
/*!
\brief Collects one stats sample from an inspector
and for the given event source name
*/
void collect(const std::shared_ptr<sinsp>& inspector, const std::string& src, uint64_t num_evts);
void collect(std::shared_ptr<sinsp> inspector, const std::string& src);
private:
/*!
\brief Collect snapshot metrics wrapper fields as internal rule formatted output fields.
*/
void get_metrics_output_fields_wrapper(nlohmann::json& output_fields, const std::shared_ptr<sinsp>& inspector, uint64_t now, const std::string& src, uint64_t num_evts, double stats_snapshot_time_delta_sec);
/*!
\brief Collect snapshot metrics syscalls related metrics as internal rule formatted output fields.
*/
void get_metrics_output_fields_additional(nlohmann::json& output_fields, const std::shared_ptr<sinsp>& inspector, double stats_snapshot_time_delta_sec, const std::string& src);
std::shared_ptr<stats_writer> m_writer;
stats_writer::ticker_t m_last_tick;
uint64_t m_samples;
scap_stats m_last_stats;
uint64_t m_last_now;
uint64_t m_last_n_evts;
uint64_t m_last_n_drops;
uint64_t m_last_num_evts;
};
stats_writer(const stats_writer&) = delete;
@@ -93,18 +76,21 @@ public:
~stats_writer();
/*!
\brief Initializes a writer.
\brief Initializes a writer without any output.
With this constructor, has_output() always returns false
*/
stats_writer(const std::shared_ptr<falco_outputs>& outputs,
const std::shared_ptr<const falco_configuration>& config);
stats_writer();
/*!
\brief Returns true if the writer is configured with a valid output.
\brief Initializes a writer that prints to a file at the given filename.
With this constructor, has_output() always returns true
*/
inline bool has_output() const
{
return m_initialized;
}
explicit stats_writer(const std::string &filename);
/*!
\brief Returns true if the writer is configured with a valid output
*/
inline bool has_output() const;
/*!
\brief Initializes the ticker with a given interval period defined
@@ -123,16 +109,16 @@ public:
private:
struct msg
{
msg(): stop(false), ts(0) {}
msg(): stop(false) {}
msg(msg&&) = default;
msg& operator = (msg&&) = default;
msg(const msg&) = default;
msg& operator = (const msg&) = default;
bool stop;
uint64_t ts;
scap_stats delta;
scap_stats stats;
std::string source;
nlohmann::json output_fields;
};
void worker() noexcept;
@@ -142,10 +128,8 @@ private:
bool m_initialized;
uint64_t m_total_samples;
std::thread m_worker;
std::ofstream m_file_output;
tbb::concurrent_bounded_queue<stats_writer::msg> m_queue;
std::shared_ptr<falco_outputs> m_outputs;
std::shared_ptr<const falco_configuration> m_config;
std::ofstream m_output;
tbb::concurrent_bounded_queue<stats_writer::msg> m_queue;
// note: in this way, only collectors can push into the queue
friend class stats_writer::collector;

View File

@@ -26,12 +26,7 @@ limitations under the License.
class falco_webserver
{
public:
falco_webserver() = default;
virtual ~falco_webserver();
falco_webserver(falco_webserver&&) = default;
falco_webserver& operator = (falco_webserver&&) = default;
falco_webserver(const falco_webserver&) = delete;
falco_webserver& operator = (const falco_webserver&) = delete;
virtual void start(
const std::shared_ptr<sinsp>& inspector,
uint32_t threadiness,