mirror of
https://github.com/falcosecurity/falco.git
synced 2026-03-20 19:52:08 +00:00
Compare commits
105 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
bb81133201 | ||
|
|
46d5266ac8 | ||
|
|
3414ca5361 | ||
|
|
0eb170cf5f | ||
|
|
21fa6e9505 | ||
|
|
b82cbb1b59 | ||
|
|
d033868ab9 | ||
|
|
7c98d0047c | ||
|
|
c7d9b6ee7f | ||
|
|
8273e57598 | ||
|
|
b0562242e8 | ||
|
|
ca66b84e5a | ||
|
|
7c9ec9fc17 | ||
|
|
9ea43c2663 | ||
|
|
4d55847bd4 | ||
|
|
a684bec007 | ||
|
|
812aa9b566 | ||
|
|
e0f8b81692 | ||
|
|
874809351f | ||
|
|
4527228ef8 | ||
|
|
e684c95e23 | ||
|
|
2390ca447a | ||
|
|
af0e6da375 | ||
|
|
84e7d3f18f | ||
|
|
2a8c0e8bb7 | ||
|
|
f28688551c | ||
|
|
b12d37a3b8 | ||
|
|
5e027c7fe2 | ||
|
|
efbe887d6e | ||
|
|
7dcbeb1f44 | ||
|
|
93667f2d3e | ||
|
|
b5b1763d09 | ||
|
|
d6690313a0 | ||
|
|
98ce88f7ef | ||
|
|
9ff8099501 | ||
|
|
7db4778f55 | ||
|
|
7f761ade4b | ||
|
|
84257912e0 | ||
|
|
9bc942c654 | ||
|
|
8216b435cb | ||
|
|
78f710c706 | ||
|
|
1dd97c1b6f | ||
|
|
3ef5716fa2 | ||
|
|
64102078c7 | ||
|
|
9703853da8 | ||
|
|
96403fa275 | ||
|
|
acd5422b55 | ||
|
|
099c79ddde | ||
|
|
0f24448d18 | ||
|
|
1b63ad1aed | ||
|
|
b268d4d6c3 | ||
|
|
684a5d85ff | ||
|
|
58cea0c5e7 | ||
|
|
38ebc61808 | ||
|
|
535db19991 | ||
|
|
abe46a19a0 | ||
|
|
96fc8d1a27 | ||
|
|
ad82f66be3 | ||
|
|
c60fac9e34 | ||
|
|
35dc315390 | ||
|
|
62c995f309 | ||
|
|
3432551295 | ||
|
|
09e1604fe0 | ||
|
|
da7279da1d | ||
|
|
05f5aa2af3 | ||
|
|
53a1be66b0 | ||
|
|
f7b572bea5 | ||
|
|
ed59f33f3f | ||
|
|
b41acdff1c | ||
|
|
4acc089b1f | ||
|
|
591d4e500e | ||
|
|
79bdcb030b | ||
|
|
f4dba52ee2 | ||
|
|
bfc0021cdd | ||
|
|
e616f79bac | ||
|
|
4006452b1f | ||
|
|
59831b077e | ||
|
|
0d95beb1e3 | ||
|
|
2e27d5dded | ||
|
|
24f64cab33 | ||
|
|
0f36ff030e | ||
|
|
601ec5cf85 | ||
|
|
f237f277e7 | ||
|
|
2226a1508c | ||
|
|
6f64c21ad9 | ||
|
|
fd6a1d0d05 | ||
|
|
87438ec723 | ||
|
|
d0be6d96d0 | ||
|
|
aefd67eb8a | ||
|
|
6e94c37399 | ||
|
|
d3c22d3d0c | ||
|
|
366975bc3b | ||
|
|
f9692fcb82 | ||
|
|
e95ab26f33 | ||
|
|
23a611b343 | ||
|
|
2658d65373 | ||
|
|
600501e141 | ||
|
|
0df18fd786 | ||
|
|
c1da6d21b9 | ||
|
|
c4a73bdd8e | ||
|
|
28a339e4bc | ||
|
|
65a168ab5a | ||
|
|
46425b392c | ||
|
|
8b0d22dee9 | ||
|
|
a7e04fe6e6 |
@@ -13,7 +13,7 @@ jobs:
|
||||
command: apk update
|
||||
- run:
|
||||
name: Install build dependencies
|
||||
command: apk add g++ gcc cmake cmake make ncurses-dev git bash perl linux-headers autoconf automake m4 libtool elfutils-dev libelf-static patch binutils
|
||||
command: apk add g++ gcc cmake cmake make git bash perl linux-headers autoconf automake m4 libtool elfutils-dev libelf-static patch binutils
|
||||
- run:
|
||||
name: Prepare project
|
||||
command: |
|
||||
@@ -60,7 +60,7 @@ jobs:
|
||||
command: apt update -y
|
||||
- run:
|
||||
name: Install dependencies
|
||||
command: DEBIAN_FRONTEND=noninteractive apt install libjq-dev libncurses-dev libyaml-cpp-dev libelf-dev cmake build-essential git -y
|
||||
command: DEBIAN_FRONTEND=noninteractive apt install libjq-dev libyaml-cpp-dev libelf-dev cmake build-essential git -y
|
||||
- run:
|
||||
name: Prepare project
|
||||
command: |
|
||||
@@ -92,7 +92,7 @@ jobs:
|
||||
command: apt update -y
|
||||
- run:
|
||||
name: Install dependencies
|
||||
command: DEBIAN_FRONTEND=noninteractive apt install libssl-dev libyaml-dev libncurses-dev libc-ares-dev libprotobuf-dev protobuf-compiler libjq-dev libyaml-cpp-dev libgrpc++-dev protobuf-compiler-grpc rpm libelf-dev cmake build-essential libcurl4-openssl-dev linux-headers-generic clang llvm git -y
|
||||
command: DEBIAN_FRONTEND=noninteractive apt install libssl-dev libyaml-dev libc-ares-dev libprotobuf-dev protobuf-compiler libjq-dev libyaml-cpp-dev libgrpc++-dev protobuf-compiler-grpc rpm libelf-dev cmake build-essential libcurl4-openssl-dev linux-headers-generic clang llvm git -y
|
||||
- run:
|
||||
name: Prepare project
|
||||
command: |
|
||||
@@ -124,7 +124,7 @@ jobs:
|
||||
command: apt update -y
|
||||
- run:
|
||||
name: Install dependencies
|
||||
command: DEBIAN_FRONTEND=noninteractive apt install libssl-dev libyaml-dev libncurses-dev libc-ares-dev libprotobuf-dev protobuf-compiler libjq-dev libyaml-cpp-dev libgrpc++-dev protobuf-compiler-grpc rpm libelf-dev cmake build-essential libcurl4-openssl-dev linux-headers-generic clang llvm git -y
|
||||
command: DEBIAN_FRONTEND=noninteractive apt install libssl-dev libyaml-dev libc-ares-dev libprotobuf-dev protobuf-compiler libjq-dev libyaml-cpp-dev libgrpc++-dev protobuf-compiler-grpc rpm libelf-dev cmake build-essential libcurl4-openssl-dev linux-headers-generic clang llvm git -y
|
||||
- run:
|
||||
name: Prepare project
|
||||
command: |
|
||||
@@ -156,7 +156,7 @@ jobs:
|
||||
command: apt update -y
|
||||
- run:
|
||||
name: Install dependencies
|
||||
command: DEBIAN_FRONTEND=noninteractive apt install cmake build-essential clang llvm git linux-headers-generic libncurses-dev pkg-config autoconf libtool libelf-dev -y
|
||||
command: DEBIAN_FRONTEND=noninteractive apt install cmake build-essential clang llvm git linux-headers-generic pkg-config autoconf libtool libelf-dev -y
|
||||
- run:
|
||||
name: Prepare project
|
||||
command: |
|
||||
@@ -188,7 +188,7 @@ jobs:
|
||||
command: dnf update -y
|
||||
- run:
|
||||
name: Install dependencies
|
||||
command: dnf install gcc gcc-c++ git make cmake autoconf automake pkg-config patch ncurses-devel libtool elfutils-libelf-devel diffutils kernel-devel kernel-headers kernel-core clang llvm which -y
|
||||
command: dnf install gcc gcc-c++ git make cmake autoconf automake pkg-config patch libtool elfutils-libelf-devel diffutils kernel-devel kernel-headers kernel-core clang llvm which -y
|
||||
- run:
|
||||
name: Prepare project
|
||||
command: |
|
||||
@@ -403,7 +403,7 @@ jobs:
|
||||
name: Setup
|
||||
command: |
|
||||
apt update -y
|
||||
apt-get install apt-utils bzip2 gpg python python-pip -y
|
||||
apt-get install apt-utils bzip2 gpg python python3-pip -y
|
||||
pip install awscli
|
||||
echo $GPG_KEY | base64 -d | gpg --import
|
||||
- run:
|
||||
@@ -517,7 +517,7 @@ jobs:
|
||||
name: Setup
|
||||
command: |
|
||||
apt update -y
|
||||
apt-get install apt-utils bzip2 gpg python python-pip -y
|
||||
apt-get install apt-utils bzip2 gpg python python3-pip -y
|
||||
pip install awscli
|
||||
echo $GPG_KEY | base64 -d | gpg --import
|
||||
- run:
|
||||
|
||||
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -1,6 +1,6 @@
|
||||
<!-- Thanks for sending a pull request! Here are some tips for you:
|
||||
|
||||
1. If this is your first time, please read our contributor guidelines in the [CONTRIBUTING.md](CONTRIBUTING.md) file and learn how to compile Falco from source [here](https://falco.org/docs/source).
|
||||
1. If this is your first time, please read our contributor guidelines in the [CONTRIBUTING.md](https://github.com/falcosecurity/.github/blob/master/CONTRIBUTING.md) file and learn how to compile Falco from source [here](https://falco.org/docs/source).
|
||||
2. Please label this pull request according to what type of issue you are addressing.
|
||||
3. . Please add a release note!
|
||||
4. If the PR is unfinished while opening it specify a wip in the title before the actual title, for example, "wip: my awesome feature"
|
||||
|
||||
30
ADOPTERS.md
30
ADOPTERS.md
@@ -1,5 +1,21 @@
|
||||
# Adopters
|
||||
|
||||
Known end users with notable contributions to the project include:
|
||||
* AWS
|
||||
* IBM
|
||||
* Red Hat
|
||||
|
||||
Falco is being used by numerous other companies, both large and small, to build higher layer products and services. The list includes but is not limited to:
|
||||
* Equinix Metal
|
||||
* IEEE
|
||||
* Lowes
|
||||
* Reckrut
|
||||
* Yellow Pepper
|
||||
* CTx
|
||||
* Utikal
|
||||
* Discrete Events
|
||||
* Agritech Infra
|
||||
|
||||
This is a list of production adopters of Falco (in alphabetical order):
|
||||
|
||||
* [ASAPP](https://www.asapp.com/) - ASAPP is a pushing the boundaries of fundamental artificial intelligence research. We apply our research into AI-Native® products that make organizations, in the customer experience industry, highly productive, efficient, and effective—by augmenting human activity and automating workflows. We constantly monitor our workloads against different hazards and FALCO helps us extend our threat monitoring boundaries.
|
||||
@@ -17,11 +33,19 @@ This is a list of production adopters of Falco (in alphabetical order):
|
||||
* [Logz.io](https://logz.io/) - Logz.io is a cloud observability platform for modern engineering teams. The Logz.io platform consists of three products — Log Management, Infrastructure Monitoring, and Cloud SIEM — that work together to unify the jobs of monitoring, troubleshooting, and security. We empower engineers to deliver better software by offering the world's most popular open source observability tools — the ELK Stack, Grafana, and Jaeger — in a single, easy to use, and powerful platform purpose-built for monitoring distributed cloud environments. Cloud SIEM supports data from multiple sources, including Falco's alerts, and offers useful rules and dashboards content to visualize and manage incidents across your systems in a unified UI.
|
||||
* https://logz.io/blog/k8s-security-with-falco-and-cloud-siem/
|
||||
|
||||
* [MathWorks](https://mathworks.com) - MathWorks develops mathematical computing software for engineers and scientists. MathWorks uses Falco for Kubernetes threat detection, unexpected application behavior, and maps Falco rules to their cloud infrastructure's security kill chain model. MathWorks presented their Falco use case at [KubeCon + CloudNativeCon North America 2020](https://www.youtube.com/watch?v=L-5RYBTV010).
|
||||
|
||||
* [Pocteo](https://pocteo.co) - Pocteo helps with Kubernetes adoption in enterprises by providing a variety of services such as training, consulting, auditing and mentoring. We build CI/CD pipelines the GitOps way, as well as design and run k8s clusters. Pocteo uses Falco as a runtime monitoring system to secure clients' workloads against suspicious behavior and ensure k8s pods immutability. We also use Falco to collect, process and act on security events through a response engine and serverless functions.
|
||||
|
||||
* [Preferral](https://www.preferral.com) - Preferral is a HIPAA-compliant platform for Referral Management and Online Referral Forms. Preferral streamlines the referral process for patients, specialists and their referral partners. By automating the referral process, referring practices spend less time on the phone, manual efforts are eliminated, and patients get the right care from the right specialist. Preferral leverages Falco to provide a Host Intrusion Detection System to meet their HIPPA compliance requirements.
|
||||
* https://hipaa.preferral.com/01-preferral_hipaa_compliance/
|
||||
|
||||
* [Qonto](https://qonto.com) - Qonto is a modern banking for SMEs and freelancers. Qonto provides a fully featured business account with a simplified accounting flow. Falco is used by our SecOps team to detect suspicous behaviors in our clusters.
|
||||
|
||||
* [Replicated](https://www.replicated.com/) - Replicated is the modern way to ship on-prem software. Replicated gives software vendors a container-based platform for easily deploying cloud native applications inside customers' environments to provide greater security and control. Replicated uses Falco as runtime security to detect threats in the Kubernetes clusters which host our critical SaaS services.
|
||||
|
||||
* [Secureworks](https://www.secureworks.com/) - Secureworks is a leading worldwide cybersecurity company with a cloud-native security product that combines the power of human intellect with security analytics to unify detection and response across cloud, network, and endpoint environments for improved security operations and outcomes. Our Taegis XDR platform and detection system processes petabytes of security relevant data to expose active threats amongst the billions of daily events from our customers. We are proud to protect our platform’s Kubernetes deployments, as well as help our customers protect their own Linux and container environments, using Falco.
|
||||
|
||||
* [Shopify](https://www.shopify.com) - Shopify is the leading multi-channel commerce platform. Merchants use Shopify to design, set up, and manage their stores across multiple sales channels, including mobile, web, social media, marketplaces, brick-and-mortar locations, and pop-up shops. The platform also provides merchants with a powerful back-office and a single view of their business, from payments to shipping. The Shopify platform was engineered for reliability and scale, making enterprise-level technology available to businesses of all sizes. Shopify uses Falco to complement its Host and Network Intrusion Detection Systems.
|
||||
|
||||
* [Sight Machine](https://www.sightmachine.com) - Sight Machine is the category leader for manufacturing analytics and used by Global 500 companies to make better, faster decisions about their operations. Sight Machine uses Falco to help enforce SOC2 compliance as well as a tool for real time security monitoring and alerting in Kubernetes.
|
||||
@@ -34,4 +58,10 @@ This is a list of production adopters of Falco (in alphabetical order):
|
||||
|
||||
* [Shapesecurity/F5] (https://www.shapesecurity.com/) Shapesecurity defends against application fraud attacks like Account Take Over, Credential Stuffing, Fake Accounts, etc. Required by FedRamp certification, we needed to find a FIM solution to help monitor and protect our Kubernetes clusters. Traditional FIM solutions were not scalable and not working for our environment, but with Falco we found the solution we needed. Falco's detection capabilities have helped us identify anomalous behaviour within our clusters. We leverage Sidekick (https://github.com/falcosecurity/charts/tree/master/falcosidekick) to send Falco alerts to a PubSub which in turn publishes those alerts to our SIEM (SumoLogic)
|
||||
|
||||
* [Yahoo! JAPAN](https://www.yahoo.co.jp/) Yahoo! JAPAN is a leading company of internet in Japan. We build an AI Platform in our private cloud and provide it to scientists in our company. AI Platform is a multi-tenant Kubernetes environment and more flexible, faster, more efficient Machine Learning environment. Falco is used to detect unauthorized commands and malicious access and our AI Platform is monitored and alerted by Falco.
|
||||
|
||||
* [Sysdig](https://www.sysdig.com/) Sysdig originally created Falco in 2016 to detect unexpected or suspicious activity using a rules engine on top of the data that comes from the sysdig kernel system call probe. Sysdig provides tooling to help with vulnerability management, compliance, detection, incident response and forensics in Cloud-native environments. Sysdig Secure has extended Falco to include: a rule library, the ability to update macros, lists & rules via the user interface and API, automated tuning of rules, and rule creation based on profiling known system behavior. On top of the basic Falco rules, Sysdig Secure implements the concept of a "Security policy" that can comprise several rules which are evaluated for a user-defined infrastructure scope like Kubernetes namespaces, OpenShift clusters, deployment workload, cloud regions etc.
|
||||
|
||||
## Adding a name
|
||||
|
||||
If you would like to add your name to this file, submit a pull request with your change.
|
||||
|
||||
130
CHANGELOG.md
130
CHANGELOG.md
@@ -1,5 +1,135 @@
|
||||
# Change Log
|
||||
|
||||
## v0.30.0
|
||||
|
||||
Released on 2021-10-01
|
||||
|
||||
### Major Changes
|
||||
|
||||
* new: add `--k8s-node` command-line options, which allows filtering by a node when requesting metadata of pods to the K8s API server [[#1671](https://github.com/falcosecurity/falco/pull/1671)] - [@leogr](https://github.com/leogr)
|
||||
* new(outputs): expose rule tags and event source in gRPC and json outputs [[#1714](https://github.com/falcosecurity/falco/pull/1714)] - [@jasondellaluce](https://github.com/jasondellaluce)
|
||||
* new(userspace/falco): add customizable metadata fetching params [[#1667](https://github.com/falcosecurity/falco/pull/1667)] - [@zuc](https://github.com/zuc)
|
||||
|
||||
|
||||
### Minor Changes
|
||||
|
||||
* update: bump driver version to 3aa7a83bf7b9e6229a3824e3fd1f4452d1e95cb4 [[#1744](https://github.com/falcosecurity/falco/pull/1744)] - [@zuc](https://github.com/zuc)
|
||||
* docs: clarify that previous Falco drivers will remain available at https://download.falco.org and no automated cleanup is run anymore [[#1738](https://github.com/falcosecurity/falco/pull/1738)] - [@leodido](https://github.com/leodido)
|
||||
* update(outputs): add configuration option for tags in json outputs [[#1733](https://github.com/falcosecurity/falco/pull/1733)] - [@jasondellaluce](https://github.com/jasondellaluce)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* fix(scripts): correct standard output redirection in systemd config (DEB and RPM packages) [[#1697](https://github.com/falcosecurity/falco/pull/1697)] - [@chirabino](https://github.com/chirabino)
|
||||
* fix(scripts): correct lookup order when trying multiple `gcc` versions in the `falco-driver-loader` script [[#1716](https://github.com/falcosecurity/falco/pull/1716)] - [@Spartan-65](https://github.com/Spartan-65)
|
||||
|
||||
|
||||
### Rule Changes
|
||||
|
||||
* rule(list miner_domains): add new miner domains [[#1729](https://github.com/falcosecurity/falco/pull/1729)] - [@AlbertoPellitteri](https://github.com/AlbertoPellitteri)
|
||||
* rule(list https_miner_domains): add new miner domains [[#1729](https://github.com/falcosecurity/falco/pull/1729)] - [@AlbertoPellitteri](https://github.com/AlbertoPellitteri)
|
||||
|
||||
|
||||
### Non user-facing changes
|
||||
|
||||
* add Qonto as adopter [[#1717](https://github.com/falcosecurity/falco/pull/1717)] - [@Issif](https://github.com/Issif)
|
||||
* docs(proposals): proposal for a libs plugin system [[#1637](https://github.com/falcosecurity/falco/pull/1637)] - [@ldegio](https://github.com/ldegio)
|
||||
* build: remove unused `ncurses` dependency [[#1658](https://github.com/falcosecurity/falco/pull/1658)] - [@leogr](https://github.com/leogr)
|
||||
* build(.circleci): use new Debian 11 package names for python-pip [[#1712](https://github.com/falcosecurity/falco/pull/1712)] - [@zuc](https://github.com/zuc)
|
||||
* build(docker): adding libssl-dev, upstream image reference pinned to `debian:buster` [[#1719](https://github.com/falcosecurity/falco/pull/1719)] - [@michalschott](https://github.com/michalschott)
|
||||
* fix(test): avoid output_strictly_contains failures [[#1724](https://github.com/falcosecurity/falco/pull/1724)] - [@jasondellaluce](https://github.com/jasondellaluce)
|
||||
* Remove duplicate allowed ecr registry rule [[#1725](https://github.com/falcosecurity/falco/pull/1725)] - [@TomKeyte](https://github.com/TomKeyte)
|
||||
* docs(RELEASE.md): switch to 3 releases per year [[#1711](https://github.com/falcosecurity/falco/pull/1711)] - [@leogr](https://github.com/leogr)
|
||||
|
||||
|
||||
## v0.29.1
|
||||
|
||||
Released on 2021-06-29
|
||||
|
||||
### Minor Changes
|
||||
|
||||
* update: bump the Falco engine version to version 9 [[#1675](https://github.com/falcosecurity/falco/pull/1675)] - [@leodido](https://github.com/leodido)
|
||||
|
||||
### Rule Changes
|
||||
|
||||
* rule(list user_known_userfaultfd_processes): list to exclude processes known to use userfaultfd syscall [[#1675](https://github.com/falcosecurity/falco/pull/1675)] - [@leodido](https://github.com/leodido)
|
||||
* rule(macro consider_userfaultfd_activities): macro to gate the "Unprivileged Delegation of Page Faults Handling to a Userspace Process" rule [[#1675](https://github.com/falcosecurity/falco/pull/1675)] - [@leodido](https://github.com/leodido)
|
||||
* rule(Unprivileged Delegation of Page Faults Handling to a Userspace Process): new rule to detect successful unprivileged userfaultfd syscalls [[#1675](https://github.com/falcosecurity/falco/pull/1675)] - [@leodido](https://github.com/leodido)
|
||||
* rule(Linux Kernel Module Injection Detected): adding container info to the output of the rule [[#1675](https://github.com/falcosecurity/falco/pull/1675)] - [@leodido](https://github.com/leodido)
|
||||
|
||||
### Non user-facing changes
|
||||
|
||||
* docs(release.md): update steps [[#1684](https://github.com/falcosecurity/falco/pull/1684)] - [@maxgio92](https://github.com/maxgio92)
|
||||
|
||||
|
||||
## v0.29.0
|
||||
|
||||
Released on 2021-06-21
|
||||
|
||||
### Minor Changes
|
||||
|
||||
* update: driver version is 17f5df52a7d9ed6bb12d3b1768460def8439936d now [[#1669](https://github.com/falcosecurity/falco/pull/1669)] - [@leogr](https://github.com/leogr)
|
||||
|
||||
### Rule Changes
|
||||
|
||||
* rule(list miner_domains): add rx.unmineable.com for anti-miner detection [[#1676](https://github.com/falcosecurity/falco/pull/1676)] - [@fntlnz](https://github.com/fntlnz)
|
||||
* rule(Change thread namespace and Set Setuid or Setgid bit): disable by default [[#1632](https://github.com/falcosecurity/falco/pull/1632)] - [@Kaizhe](https://github.com/Kaizhe)
|
||||
* rule(list known_sa_list): add namespace-controller, statefulset-controller, disruption-controller, job-controller, horizontal-pod-autoscaler and persistent-volume-binder as allowed service accounts in the kube-system namespace [[#1659](https://github.com/falcosecurity/falco/pull/1659)] - [@sboschman](https://github.com/sboschman)
|
||||
* rule(Non sudo setuid): check user id as well in case user name info is not available [[#1665](https://github.com/falcosecurity/falco/pull/1665)] - [@Kaizhe](https://github.com/Kaizhe)
|
||||
* rule(Debugfs Launched in Privileged Container): fix typo in description [[#1657](https://github.com/falcosecurity/falco/pull/1657)] - [@Kaizhe](https://github.com/Kaizhe)
|
||||
|
||||
### Non user-facing changes
|
||||
|
||||
* Fix link to CONTRIBUTING.md in the Pull Request Template [[#1679](https://github.com/falcosecurity/falco/pull/1679)] - [@tspearconquest](https://github.com/tspearconquest)
|
||||
* fetch libs and drivers from the new repo [[#1552](https://github.com/falcosecurity/falco/pull/1552)] - [@leogr](https://github.com/leogr)
|
||||
* build(test): upgrade urllib3 to 1.26.5 [[#1666](https://github.com/falcosecurity/falco/pull/1666)] - [@leogr](https://github.com/leogr)
|
||||
* revert: add notes for 0.28.2 release [[#1663](https://github.com/falcosecurity/falco/pull/1663)] - [@maxgio92](https://github.com/maxgio92)
|
||||
* changelog: add notes for 0.28.2 release [[#1661](https://github.com/falcosecurity/falco/pull/1661)] - [@maxgio92](https://github.com/maxgio92)
|
||||
* docs(release.md): add blog announcement to post-release tasks [[#1652](https://github.com/falcosecurity/falco/pull/1652)] - [@maxgio92](https://github.com/maxgio92)
|
||||
* add Yahoo!Japan as an adopter [[#1651](https://github.com/falcosecurity/falco/pull/1651)] - [@ukitazume](https://github.com/ukitazume)
|
||||
* Add Replicated to adopters [[#1649](https://github.com/falcosecurity/falco/pull/1649)] - [@diamonwiggins](https://github.com/diamonwiggins)
|
||||
* docs(proposals): fix libs contribution name [[#1641](https://github.com/falcosecurity/falco/pull/1641)] - [@leodido](https://github.com/leodido)
|
||||
|
||||
|
||||
## v0.28.1
|
||||
|
||||
Released on 2021-05-07
|
||||
|
||||
### Major Changes
|
||||
|
||||
* new: `--support` output now includes info about the Falco engine version [[#1581](https://github.com/falcosecurity/falco/pull/1581)] - [@mstemm](https://github.com/mstemm)
|
||||
* new: Falco outputs an alert in the unlikely situation it's receiving too many consecutive timeouts without an event [[#1622](https://github.com/falcosecurity/falco/pull/1622)] - [@leodido](https://github.com/leodido)
|
||||
* new: configuration field `syscall_event_timeouts.max_consecutive` to configure after how many consecutive timeouts without an event Falco must alert [[#1622](https://github.com/falcosecurity/falco/pull/1622)] - [@leodido](https://github.com/leodido)
|
||||
|
||||
### Minor Changes
|
||||
|
||||
* build: enforcing hardening flags by default [[#1604](https://github.com/falcosecurity/falco/pull/1604)] - [@leogr](https://github.com/leogr)
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* fix: do not stop the webserver for k8s audit logs when invalid data is coming in the event to be processed [[#1617](https://github.com/falcosecurity/falco/pull/1617)] - [@fntlnz](https://github.com/fntlnz)
|
||||
|
||||
### Rule Changes
|
||||
|
||||
* rule(macro: allowed_aws_ecr_registry_root_for_eks): new macro for AWS EKS images hosted on ECR to use in rule: Launch Privileged Container [[#1640](https://github.com/falcosecurity/falco/pull/1640)] - [@ismailyenigul](https://github.com/ismailyenigul)
|
||||
* rule(macro: aws_eks_core_images): new macro for AWS EKS images hosted on ECR to use in rule: Launch Privileged Container [[#1640](https://github.com/falcosecurity/falco/pull/1640)] - [@ismailyenigul](https://github.com/ismailyenigul)
|
||||
* rule(macro: aws_eks_image_sensitive_mount): new macro for AWS EKS images hosted on ECR to use in rule: Launch Privileged Container [[#1640](https://github.com/falcosecurity/falco/pull/1640)] - [@ismailyenigul](https://github.com/ismailyenigul)
|
||||
* rule(list `falco_privileged_images`): remove deprecated Falco's OCI image repositories [[#1634](https://github.com/falcosecurity/falco/pull/1634)] - [@maxgio92](https://github.com/maxgio92)
|
||||
* rule(list `falco_sensitive_mount_images`): remove deprecated Falco's OCI image repositories [[#1634](https://github.com/falcosecurity/falco/pull/1634)] - [@maxgio92](https://github.com/maxgio92)
|
||||
* rule(macro `k8s_containers`): remove deprecated Falco's OCI image repositories [[#1634](https://github.com/falcosecurity/falco/pull/1634)] - [@maxgio92](https://github.com/maxgio92)
|
||||
* rule(macro: python_running_sdchecks): macro removed [[#1620](https://github.com/falcosecurity/falco/pull/1620)] - [@leogr](https://github.com/leogr)
|
||||
* rule(Change thread namespace): remove python_running_sdchecks exception [[#1620](https://github.com/falcosecurity/falco/pull/1620)] - [@leogr](https://github.com/leogr)
|
||||
|
||||
### Non user-facing changes
|
||||
|
||||
* urelease/docs: fix link and small refactor in the text [[#1636](https://github.com/falcosecurity/falco/pull/1636)] - [@cpanato](https://github.com/cpanato)
|
||||
* Add Secureworks to adopters [[#1629](https://github.com/falcosecurity/falco/pull/1629)] - [@dwindsor-scwx](https://github.com/dwindsor-scwx)
|
||||
* regression test for malformed k8s audit input (FAL-01-003) [[#1624](https://github.com/falcosecurity/falco/pull/1624)] - [@leodido](https://github.com/leodido)
|
||||
* Add mathworks to adopterlist [[#1621](https://github.com/falcosecurity/falco/pull/1621)] - [@natchaphon-r](https://github.com/natchaphon-r)
|
||||
* adding known users [[#1623](https://github.com/falcosecurity/falco/pull/1623)] - [@danpopSD](https://github.com/danpopSD)
|
||||
* docs: update link for HackMD community call notes [[#1614](https://github.com/falcosecurity/falco/pull/1614)] - [@leodido](https://github.com/leodido)
|
||||
|
||||
|
||||
## v0.28.0
|
||||
|
||||
Released on 2021-04-12
|
||||
|
||||
@@ -66,10 +66,17 @@ if(MINIMAL_BUILD)
|
||||
endif()
|
||||
|
||||
if(MUSL_OPTIMIZED_BUILD)
|
||||
set(MUSL_FLAGS "-static -Os")
|
||||
set(MUSL_FLAGS "-static -Os -fPIE -pie")
|
||||
endif()
|
||||
|
||||
set(CMAKE_COMMON_FLAGS "-Wall -ggdb ${DRAIOS_FEATURE_FLAGS} ${MINIMAL_BUILD_FLAGS} ${MUSL_FLAGS}")
|
||||
# explicitly set hardening flags
|
||||
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
|
||||
set(FALCO_SECURITY_FLAGS "-Wl,-z,relro,-z,now -fstack-protector-strong")
|
||||
if(CMAKE_BUILD_TYPE STREQUAL "release")
|
||||
set(FALCO_SECURITY_FLAGS "${FALCO_SECURITY_FLAGS} -D_FORTIFY_SOURCE=2")
|
||||
endif()
|
||||
|
||||
set(CMAKE_COMMON_FLAGS "${FALCO_SECURITY_FLAGS} -Wall -ggdb ${DRAIOS_FEATURE_FLAGS} ${MINIMAL_BUILD_FLAGS} ${MUSL_FLAGS}")
|
||||
|
||||
if(BUILD_WARNINGS_AS_ERRORS)
|
||||
set(CMAKE_SUPPRESSED_WARNINGS
|
||||
@@ -103,6 +110,12 @@ set(CMD_MAKE make)
|
||||
|
||||
include(ExternalProject)
|
||||
|
||||
# LuaJIT
|
||||
include(luajit)
|
||||
|
||||
# libs
|
||||
include(falcosecurity-libs)
|
||||
|
||||
# jq
|
||||
include(jq)
|
||||
|
||||
@@ -118,12 +131,6 @@ ExternalProject_Add(
|
||||
BUILD_COMMAND ""
|
||||
INSTALL_COMMAND "")
|
||||
|
||||
# curses
|
||||
# We pull this in because libsinsp won't build without it
|
||||
set(CURSES_NEED_NCURSES TRUE)
|
||||
find_package(Curses REQUIRED)
|
||||
message(STATUS "Found ncurses: include: ${CURSES_INCLUDE_DIR}, lib: ${CURSES_LIBRARIES}")
|
||||
|
||||
# b64
|
||||
include(b64)
|
||||
|
||||
@@ -132,15 +139,12 @@ include(yaml-cpp)
|
||||
|
||||
if(NOT MINIMAL_BUILD)
|
||||
# OpenSSL
|
||||
include(OpenSSL)
|
||||
include(openssl)
|
||||
|
||||
# libcurl
|
||||
include(cURL)
|
||||
include(curl)
|
||||
endif()
|
||||
|
||||
# LuaJIT
|
||||
include(luajit)
|
||||
|
||||
# Lpeg
|
||||
include(lpeg)
|
||||
|
||||
@@ -151,21 +155,7 @@ include(libyaml)
|
||||
include(lyaml)
|
||||
|
||||
# One TBB
|
||||
set(TBB_SRC "${PROJECT_BINARY_DIR}/tbb-prefix/src/tbb")
|
||||
|
||||
message(STATUS "Using bundled tbb in '${TBB_SRC}'")
|
||||
|
||||
set(TBB_INCLUDE_DIR "${TBB_SRC}/include/")
|
||||
set(TBB_LIB "${TBB_SRC}/build/lib_release/libtbb.a")
|
||||
ExternalProject_Add(
|
||||
tbb
|
||||
URL "https://github.com/oneapi-src/oneTBB/archive/2018_U5.tar.gz"
|
||||
URL_HASH "SHA256=b8dbab5aea2b70cf07844f86fa413e549e099aa3205b6a04059ca92ead93a372"
|
||||
CONFIGURE_COMMAND ""
|
||||
BUILD_COMMAND ${CMD_MAKE} tbb_build_dir=${TBB_SRC}/build tbb_build_prefix=lib extra_inc=big_iron.inc
|
||||
BUILD_IN_SOURCE 1
|
||||
BUILD_BYPRODUCTS ${TBB_LIB}
|
||||
INSTALL_COMMAND "")
|
||||
include(tbb)
|
||||
|
||||
if(NOT MINIMAL_BUILD)
|
||||
# civetweb
|
||||
@@ -189,13 +179,13 @@ endif()
|
||||
include(DownloadStringViewLite)
|
||||
|
||||
if(NOT MINIMAL_BUILD)
|
||||
include(zlib)
|
||||
include(cares)
|
||||
include(protobuf)
|
||||
# gRPC
|
||||
include(gRPC)
|
||||
include(grpc)
|
||||
endif()
|
||||
|
||||
# sysdig
|
||||
include(sysdig)
|
||||
|
||||
# Installation
|
||||
install(FILES falco.yaml DESTINATION "${FALCO_ETC_DIR}")
|
||||
|
||||
|
||||
26
RELEASE.md
26
RELEASE.md
@@ -4,7 +4,9 @@ Our release process is mostly automated, but we still need some manual steps to
|
||||
|
||||
Changes and new features are grouped in [milestones](https://github.com/falcosecurity/falco/milestones), the milestone with the next version represents what is going to be released.
|
||||
|
||||
A release happens every two months ([as per community discussion](https://github.com/falcosecurity/community/blob/master/meeting-notes/2020-09-30.md#agenda)), and we need to assign owners for each (usually we pair a new person with an experienced one). Assignees and the due date are proposed during the [weekly community call](https://github.com/falcosecurity/community). Note that hotfix releases can happen as soon as it is needed.
|
||||
Falco releases are due to happen 3 times per year. Our current schedule sees a new release by the end of January, May, and September each year. Hotfix releases can happen whenever it's needed.
|
||||
|
||||
Moreover, we need to assign owners for each release (usually we pair a new person with an experienced one). Assignees and the due date are proposed during the [weekly community call](https://github.com/falcosecurity/community).
|
||||
|
||||
Finally, on the proposed due date the assignees for the upcoming release proceed with the processes described below.
|
||||
|
||||
@@ -15,10 +17,10 @@ Before cutting a release we need to do some homework in the Falco repository. Th
|
||||
### 1. Release notes
|
||||
- Find the previous release date (`YYYY-MM-DD`) by looking at the [Falco releases](https://github.com/falcosecurity/falco/releases)
|
||||
- Check the release note block of every PR matching the `is:pr is:merged closed:>YYYY-MM-DD` [filter](https://github.com/falcosecurity/falco/pulls?q=is%3Apr+is%3Amerged+closed%3A%3EYYYY-MM-DD)
|
||||
- Ensure the release note block follows the [commit convention](https://github.com/falcosecurity/falco/blob/master/CONTRIBUTING.md#commit-convention), otherwise fix its content
|
||||
- Ensure the release note block follows the [commit convention](https://github.com/falcosecurity/.github/blob/master/CONTRIBUTING.md#commit-convention), otherwise fix its content
|
||||
- If the PR has no milestone, assign it to the milestone currently undergoing release
|
||||
- Check issues without a milestone (using [is:pr is:merged no:milestone closed:>YYYY-MM-DD](https://github.com/falcosecurity/falco/pulls?q=is%3Apr+is%3Amerged+no%3Amilestone+closed%3A%3EYYYY-MM-DD) filter) and add them to the milestone currently undergoing release
|
||||
- Double-check that there are no more merged PRs without the target milestone assigned with the `is:pr is:merged no:milestone closed:>YYYY-MM-DD` [filters](https://github.com/falcosecurity/falco/pulls?q=is%3Apr+is%3Amerged+no%3Amilestone+closed%3A%3EYYYY-MM-DD), if any, fix them
|
||||
- Check issues without a milestone (using `is:pr is:merged no:milestone closed:>YYYY-MM-DD` [filter](https://github.com/falcosecurity/falco/pulls?q=is%3Apr+is%3Amerged+no%3Amilestone+closed%3A%3EYYYY-MM-DD) ) and add them to the milestone currently undergoing release
|
||||
- Double-check that there are no more merged PRs without the target milestone assigned with the `is:pr is:merged no:milestone closed:>YYYY-MM-DD` [filter](https://github.com/falcosecurity/falco/pulls?q=is%3Apr+is%3Amerged+no%3Amilestone+closed%3A%3EYYYY-MM-DD), if any, update those missing
|
||||
|
||||
### 2. Milestones
|
||||
|
||||
@@ -30,7 +32,7 @@ Before cutting a release we need to do some homework in the Falco repository. Th
|
||||
- If any, manually correct it then open an issue to automate version number bumping later
|
||||
- Versions table in the `README.md` updates itself automatically
|
||||
- Generate the change log using [rn2md](https://github.com/leodido/rn2md):
|
||||
- Execute `rn2md -o falcosecurity -m <version> -r falco`
|
||||
- Execute `rn2md -o falcosecurity -m <version> -r falco`
|
||||
- In case `rn2md` emits error try to generate an GitHub OAuth access token and provide it with the `-t` flag
|
||||
- Add the latest changes on top the previous `CHANGELOG.md`
|
||||
- Submit a PR with the above modifications
|
||||
@@ -52,7 +54,7 @@ Now assume `x.y.z` is the new version.
|
||||
git push origin x.y.z
|
||||
```
|
||||
|
||||
> **N.B.**: do NOT use an annotated tag
|
||||
> **N.B.**: do NOT use an annotated tag. For reference https://git-scm.com/book/en/v2/Git-Basics-Tagging
|
||||
|
||||
- Wait for the CI to complete
|
||||
|
||||
@@ -77,6 +79,10 @@ Now assume `x.y.z` is the new version.
|
||||
| `docker pull docker.io/falcosecurity/falco-driver-loader:x.y.z` |
|
||||
| `docker pull docker.io/falcosecurity/falco-no-driver:x.y.z` |
|
||||
|
||||
<changelog>
|
||||
|
||||
<!-- Substitute <changelog> with the one generated by [rn2md](https://github.com/leodido/rn2md) -->
|
||||
|
||||
### Statistics
|
||||
|
||||
| Merged PRs | Number |
|
||||
@@ -86,6 +92,10 @@ Now assume `x.y.z` is the new version.
|
||||
| Total | x |
|
||||
|
||||
<!-- Calculate stats and fill the above table -->
|
||||
|
||||
#### Release Manager <github handle>
|
||||
|
||||
<!-- Substitute Github handle with the release manager's one -->
|
||||
```
|
||||
|
||||
- Finally, publish the release!
|
||||
@@ -94,7 +104,7 @@ Now assume `x.y.z` is the new version.
|
||||
|
||||
For each release we archive the meeting notes in git for historical purposes.
|
||||
|
||||
- The notes from the Falco meetings can be [found here](https://hackmd.io/6sEAlInlSaGnLz2FnFz21A).
|
||||
- The notes from the Falco meetings can be [found here](https://hackmd.io/3qYPnZPUQLGKCzR14va_qg).
|
||||
- Note: There may be other notes from working groups that can optionally be added as well as needed.
|
||||
- Add the entire content of the document to a new file in [github.com/falcosecurity/community/tree/master/meeting-notes](https://github.com/falcosecurity/community/tree/master/meeting-notes) as a new file labeled `release-x.y.z.md`
|
||||
- Open up a pull request with the new change.
|
||||
@@ -104,5 +114,7 @@ For each release we archive the meeting notes in git for historical purposes.
|
||||
|
||||
Announce the new release to the world!
|
||||
|
||||
- Publish a blog on [Falco website](https://github.com/falcosecurity/falco-website) ([example](https://github.com/falcosecurity/falco-website/blob/master/content/en/blog/falco-0-28-1.md))
|
||||
- Send an announcement to cncf-falco-dev@lists.cncf.io (plain text, please)
|
||||
- Let folks in the slack #falco channel know about a new release came out
|
||||
- IFF the on going release introduces a **new minor version**, [archive a snapshot of the Falco website](https://github.com/falcosecurity/falco-website/blob/master/release.md#documentation-versioning)
|
||||
|
||||
@@ -48,7 +48,7 @@ set(CPACK_DEBIAN_PACKAGE_CONTROL_EXTRA
|
||||
set(CPACK_RPM_PACKAGE_LICENSE "Apache v2.0")
|
||||
set(CPACK_RPM_PACKAGE_ARCHITECTURE, "amd64")
|
||||
set(CPACK_RPM_PACKAGE_URL "https://www.falco.org")
|
||||
set(CPACK_RPM_PACKAGE_REQUIRES "dkms, kernel-devel, ncurses, systemd")
|
||||
set(CPACK_RPM_PACKAGE_REQUIRES "dkms, kernel-devel, systemd")
|
||||
set(CPACK_RPM_POST_INSTALL_SCRIPT_FILE "${CMAKE_BINARY_DIR}/scripts/rpm/postinstall")
|
||||
set(CPACK_RPM_PRE_UNINSTALL_SCRIPT_FILE "${CMAKE_BINARY_DIR}/scripts/rpm/preuninstall")
|
||||
set(CPACK_RPM_POST_UNINSTALL_SCRIPT_FILE "${CMAKE_BINARY_DIR}/scripts/rpm/postuninstall")
|
||||
|
||||
@@ -1,45 +0,0 @@
|
||||
#
|
||||
# Copyright (C) 2020 The Falco Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations under the License.
|
||||
#
|
||||
mark_as_advanced(OPENSSL_BINARY)
|
||||
if(NOT USE_BUNDLED_DEPS)
|
||||
find_package(OpenSSL REQUIRED)
|
||||
message(STATUS "Found openssl: include: ${OPENSSL_INCLUDE_DIR}, lib: ${OPENSSL_LIBRARIES}")
|
||||
find_program(OPENSSL_BINARY openssl)
|
||||
if(NOT OPENSSL_BINARY)
|
||||
message(FATAL_ERROR "Couldn't find the openssl command line in PATH")
|
||||
else()
|
||||
message(STATUS "Found openssl: binary: ${OPENSSL_BINARY}")
|
||||
endif()
|
||||
else()
|
||||
mark_as_advanced(OPENSSL_BUNDLE_DIR OPENSSL_INSTALL_DIR OPENSSL_INCLUDE_DIR
|
||||
OPENSSL_LIBRARY_SSL OPENSSL_LIBRARY_CRYPTO)
|
||||
set(OPENSSL_BUNDLE_DIR "${PROJECT_BINARY_DIR}/openssl-prefix/src/openssl")
|
||||
set(OPENSSL_INSTALL_DIR "${OPENSSL_BUNDLE_DIR}/target")
|
||||
set(OPENSSL_INCLUDE_DIR "${PROJECT_BINARY_DIR}/openssl-prefix/src/openssl/include")
|
||||
set(OPENSSL_LIBRARY_SSL "${OPENSSL_INSTALL_DIR}/lib/libssl.a")
|
||||
set(OPENSSL_LIBRARY_CRYPTO "${OPENSSL_INSTALL_DIR}/lib/libcrypto.a")
|
||||
set(OPENSSL_BINARY "${OPENSSL_INSTALL_DIR}/bin/openssl")
|
||||
|
||||
message(STATUS "Using bundled openssl in '${OPENSSL_BUNDLE_DIR}'")
|
||||
|
||||
ExternalProject_Add(
|
||||
openssl
|
||||
# START CHANGE for CVE-2017-3735, CVE-2017-3731, CVE-2017-3737, CVE-2017-3738, CVE-2017-3736
|
||||
URL "https://github.com/openssl/openssl/archive/OpenSSL_1_0_2n.tar.gz"
|
||||
URL_HASH "SHA256=4f4bc907caff1fee6ff8593729e5729891adcee412049153a3bb4db7625e8364"
|
||||
# END CHANGE for CVE-2017-3735, CVE-2017-3731, CVE-2017-3737, CVE-2017-3738, CVE-2017-3736
|
||||
CONFIGURE_COMMAND ./config no-shared --prefix=${OPENSSL_INSTALL_DIR}
|
||||
BUILD_COMMAND ${CMD_MAKE}
|
||||
BUILD_IN_SOURCE 1
|
||||
INSTALL_COMMAND ${CMD_MAKE} install)
|
||||
endif()
|
||||
@@ -1,27 +0,0 @@
|
||||
#
|
||||
# Copyright (C) 2020 The Falco Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations under the License.
|
||||
#
|
||||
|
||||
set(B64_SRC "${PROJECT_BINARY_DIR}/b64-prefix/src/b64")
|
||||
message(STATUS "Using bundled b64 in '${B64_SRC}'")
|
||||
set(B64_INCLUDE "${B64_SRC}/include")
|
||||
set(B64_LIB "${B64_SRC}/src/libb64.a")
|
||||
externalproject_add(
|
||||
b64
|
||||
URL "https://github.com/libb64/libb64/archive/ce864b17ea0e24a91e77c7dd3eb2d1ac4175b3f0.tar.gz"
|
||||
URL_HASH "SHA256=d07173e66f435e5c77dbf81bd9313f8d0e4a3b4edd4105a62f4f8132ba932811"
|
||||
CONFIGURE_COMMAND ""
|
||||
BUILD_COMMAND ${CMD_MAKE}
|
||||
BUILD_IN_SOURCE 1
|
||||
BUILD_BYPRODUCTS ${B64_LIB}
|
||||
INSTALL_COMMAND ""
|
||||
)
|
||||
@@ -1,76 +0,0 @@
|
||||
#
|
||||
# Copyright (C) 2020 The Falco Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations under the License.
|
||||
#
|
||||
|
||||
if(NOT USE_BUNDLED_DEPS)
|
||||
find_package(CURL REQUIRED)
|
||||
message(STATUS "Found CURL: include: ${CURL_INCLUDE_DIR}, lib: ${CURL_LIBRARIES}")
|
||||
else()
|
||||
set(CURL_BUNDLE_DIR "${PROJECT_BINARY_DIR}/curl-prefix/src/curl")
|
||||
set(CURL_INCLUDE_DIR "${CURL_BUNDLE_DIR}/include/")
|
||||
set(CURL_LIBRARIES "${CURL_BUNDLE_DIR}/lib/.libs/libcurl.a")
|
||||
|
||||
set(CURL_SSL_OPTION "--with-ssl=${OPENSSL_INSTALL_DIR}")
|
||||
message(STATUS "Using bundled curl in '${CURL_BUNDLE_DIR}'")
|
||||
message(STATUS "Using SSL for curl in '${CURL_SSL_OPTION}'")
|
||||
|
||||
externalproject_add(
|
||||
curl
|
||||
DEPENDS openssl
|
||||
# START CHANGE for CVE-2017-8816, CVE-2017-8817, CVE-2017-8818, CVE-2018-1000007
|
||||
URL "https://github.com/curl/curl/releases/download/curl-7_61_0/curl-7.61.0.tar.bz2"
|
||||
URL_HASH "SHA256=5f6f336921cf5b84de56afbd08dfb70adeef2303751ffb3e570c936c6d656c9c"
|
||||
# END CHANGE for CVE-2017-8816, CVE-2017-8817, CVE-2017-8818, CVE-2018-1000007
|
||||
CONFIGURE_COMMAND
|
||||
./configure
|
||||
${CURL_SSL_OPTION}
|
||||
--disable-shared
|
||||
--enable-optimize
|
||||
--disable-curldebug
|
||||
--disable-rt
|
||||
--enable-http
|
||||
--disable-ftp
|
||||
--disable-file
|
||||
--disable-ldap
|
||||
--disable-ldaps
|
||||
--disable-rtsp
|
||||
--disable-telnet
|
||||
--disable-tftp
|
||||
--disable-pop3
|
||||
--disable-imap
|
||||
--disable-smb
|
||||
--disable-smtp
|
||||
--disable-gopher
|
||||
--disable-sspi
|
||||
--disable-ntlm-wb
|
||||
--disable-tls-srp
|
||||
--without-winssl
|
||||
--without-darwinssl
|
||||
--without-polarssl
|
||||
--without-cyassl
|
||||
--without-nss
|
||||
--without-axtls
|
||||
--without-ca-path
|
||||
--without-ca-bundle
|
||||
--without-libmetalink
|
||||
--without-librtmp
|
||||
--without-winidn
|
||||
--without-libidn2
|
||||
--without-libpsl
|
||||
--without-nghttp2
|
||||
--without-libssh2
|
||||
--disable-threaded-resolver
|
||||
--without-brotli
|
||||
BUILD_COMMAND ${CMD_MAKE}
|
||||
BUILD_IN_SOURCE 1
|
||||
INSTALL_COMMAND "")
|
||||
endif()
|
||||
@@ -12,15 +12,15 @@
|
||||
#
|
||||
cmake_minimum_required(VERSION 3.5.1)
|
||||
|
||||
project(sysdig-repo NONE)
|
||||
project(falcosecurity-libs-repo NONE)
|
||||
|
||||
include(ExternalProject)
|
||||
message(STATUS "Driver version: ${SYSDIG_VERSION}")
|
||||
message(STATUS "Driver version: ${FALCOSECURITY_LIBS_VERSION}")
|
||||
|
||||
ExternalProject_Add(
|
||||
sysdig
|
||||
URL "https://github.com/draios/sysdig/archive/${SYSDIG_VERSION}.tar.gz"
|
||||
URL_HASH "${SYSDIG_CHECKSUM}"
|
||||
falcosecurity-libs
|
||||
URL "https://github.com/falcosecurity/libs/archive/${FALCOSECURITY_LIBS_VERSION}.tar.gz"
|
||||
URL_HASH "${FALCOSECURITY_LIBS_CHECKSUM}"
|
||||
CONFIGURE_COMMAND ""
|
||||
BUILD_COMMAND ""
|
||||
INSTALL_COMMAND ""
|
||||
@@ -1,7 +1,7 @@
|
||||
diff --git a/userspace/libsinsp/chisel.cpp b/userspace/libsinsp/chisel.cpp
|
||||
diff --git a/userspace/chisel/chisel.cpp b/userspace/chisel/chisel.cpp
|
||||
index 0a6e3cf8..0c2e255a 100644
|
||||
--- a/userspace/libsinsp/chisel.cpp
|
||||
+++ b/userspace/libsinsp/chisel.cpp
|
||||
--- a/userspace/chisel/chisel.cpp
|
||||
+++ b/userspace/chisel/chisel.cpp
|
||||
@@ -98,7 +98,7 @@ void lua_stackdump(lua_State *L)
|
||||
// Lua callbacks
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
@@ -29,10 +29,10 @@ index 0a6e3cf8..0c2e255a 100644
|
||||
{
|
||||
{"field", &lua_cbacks::field},
|
||||
{"get_num", &lua_cbacks::get_num},
|
||||
diff --git a/userspace/libsinsp/lua_parser.cpp b/userspace/libsinsp/lua_parser.cpp
|
||||
diff --git a/userspace/chisel/lua_parser.cpp b/userspace/chisel/lua_parser.cpp
|
||||
index 0e26617d..78810d96 100644
|
||||
--- a/userspace/libsinsp/lua_parser.cpp
|
||||
+++ b/userspace/libsinsp/lua_parser.cpp
|
||||
--- a/userspace/chisel/lua_parser.cpp
|
||||
+++ b/userspace/chisel/lua_parser.cpp
|
||||
@@ -32,7 +32,7 @@ extern "C" {
|
||||
#include "lauxlib.h"
|
||||
}
|
||||
@@ -42,10 +42,10 @@ index 0e26617d..78810d96 100644
|
||||
{
|
||||
{"rel_expr", &lua_parser_cbacks::rel_expr},
|
||||
{"bool_op", &lua_parser_cbacks::bool_op},
|
||||
diff --git a/userspace/libsinsp/lua_parser_api.cpp b/userspace/libsinsp/lua_parser_api.cpp
|
||||
diff --git a/userspace/chisel/lua_parser_api.cpp b/userspace/chisel/lua_parser_api.cpp
|
||||
index c89e9126..c3d8008a 100644
|
||||
--- a/userspace/libsinsp/lua_parser_api.cpp
|
||||
+++ b/userspace/libsinsp/lua_parser_api.cpp
|
||||
--- a/userspace/chisel/lua_parser_api.cpp
|
||||
+++ b/userspace/chisel/lua_parser_api.cpp
|
||||
@@ -266,7 +266,7 @@ int lua_parser_cbacks::rel_expr(lua_State *ls)
|
||||
string err = "Got non-table as in-expression operand\n";
|
||||
throw sinsp_exception("parser API error");
|
||||
67
cmake/modules/falcosecurity-libs.cmake
Normal file
67
cmake/modules/falcosecurity-libs.cmake
Normal file
@@ -0,0 +1,67 @@
|
||||
#
|
||||
# Copyright (C) 2021 The Falco Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations under the License.
|
||||
#
|
||||
|
||||
set(FALCOSECURITY_LIBS_CMAKE_SOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/cmake/modules/falcosecurity-libs-repo")
|
||||
set(FALCOSECURITY_LIBS_CMAKE_WORKING_DIR "${CMAKE_BINARY_DIR}/falcosecurity-libs-repo")
|
||||
|
||||
file(MAKE_DIRECTORY ${FALCOSECURITY_LIBS_CMAKE_WORKING_DIR})
|
||||
|
||||
# The falcosecurity/libs git reference (branch name, commit hash, or tag) To update falcosecurity/libs version for the next release, change the
|
||||
# default below In case you want to test against another falcosecurity/libs version just pass the variable - ie., `cmake
|
||||
# -DFALCOSECURITY_LIBS_VERSION=dev ..`
|
||||
if(NOT FALCOSECURITY_LIBS_VERSION)
|
||||
set(FALCOSECURITY_LIBS_VERSION "3aa7a83bf7b9e6229a3824e3fd1f4452d1e95cb4")
|
||||
set(FALCOSECURITY_LIBS_CHECKSUM "SHA256=1edb535b3778fcfb46bbeeda891f176a1bd591bebd7b89c27f04837e55a52beb")
|
||||
endif()
|
||||
|
||||
# cd /path/to/build && cmake /path/to/source
|
||||
execute_process(COMMAND "${CMAKE_COMMAND}" -DFALCOSECURITY_LIBS_VERSION=${FALCOSECURITY_LIBS_VERSION} -DFALCOSECURITY_LIBS_CHECKSUM=${FALCOSECURITY_LIBS_CHECKSUM}
|
||||
${FALCOSECURITY_LIBS_CMAKE_SOURCE_DIR} WORKING_DIRECTORY ${FALCOSECURITY_LIBS_CMAKE_WORKING_DIR})
|
||||
|
||||
# todo(leodido, fntlnz) > use the following one when CMake version will be >= 3.13
|
||||
|
||||
# execute_process(COMMAND "${CMAKE_COMMAND}" -B ${FALCOSECURITY_LIBS_CMAKE_WORKING_DIR} WORKING_DIRECTORY
|
||||
# "${FALCOSECURITY_LIBS_CMAKE_SOURCE_DIR}")
|
||||
|
||||
execute_process(COMMAND "${CMAKE_COMMAND}" --build . WORKING_DIRECTORY "${FALCOSECURITY_LIBS_CMAKE_WORKING_DIR}")
|
||||
set(FALCOSECURITY_LIBS_SOURCE_DIR "${FALCOSECURITY_LIBS_CMAKE_WORKING_DIR}/falcosecurity-libs-prefix/src/falcosecurity-libs")
|
||||
|
||||
add_definitions(-D_GNU_SOURCE)
|
||||
add_definitions(-DHAS_CAPTURE)
|
||||
if(MUSL_OPTIMIZED_BUILD)
|
||||
add_definitions(-DMUSL_OPTIMIZED)
|
||||
endif()
|
||||
|
||||
set(PROBE_VERSION "${FALCOSECURITY_LIBS_VERSION}")
|
||||
|
||||
if(NOT LIBSCAP_DIR)
|
||||
set(LIBSCAP_DIR "${FALCOSECURITY_LIBS_SOURCE_DIR}")
|
||||
endif()
|
||||
set(LIBSINSP_DIR "${FALCOSECURITY_LIBS_SOURCE_DIR}")
|
||||
|
||||
# explicitly disable the tests/examples of this dependency
|
||||
set(CREATE_TEST_TARGETS OFF CACHE BOOL "")
|
||||
set(BUILD_LIBSCAP_EXAMPLES OFF CACHE BOOL "")
|
||||
|
||||
# todo(leogr): although Falco does not actually depend on chisels, we need this for the lua_parser.
|
||||
# Hopefully, we can switch off this in the future
|
||||
set(WITH_CHISEL ON CACHE BOOL "")
|
||||
|
||||
set(USE_BUNDLED_TBB ON CACHE BOOL "")
|
||||
set(USE_BUNDLED_B64 ON CACHE BOOL "")
|
||||
set(USE_BUNDLED_JSONCPP ON CACHE BOOL "")
|
||||
|
||||
list(APPEND CMAKE_MODULE_PATH "${FALCOSECURITY_LIBS_SOURCE_DIR}/cmake/modules")
|
||||
|
||||
include(libscap)
|
||||
include(libsinsp)
|
||||
@@ -1,145 +0,0 @@
|
||||
#
|
||||
# Copyright (C) 2020 The Falco Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations under the License.
|
||||
#
|
||||
|
||||
if(NOT USE_BUNDLED_DEPS)
|
||||
# zlib
|
||||
include(FindZLIB)
|
||||
set(ZLIB_INCLUDE "${ZLIB_INCLUDE_DIRS}")
|
||||
set(ZLIB_LIB "${ZLIB_LIBRARIES}")
|
||||
|
||||
if(ZLIB_INCLUDE AND ZLIB_LIB)
|
||||
message(STATUS "Found zlib: include: ${ZLIB_INCLUDE}, lib: ${ZLIB_LIB}")
|
||||
endif()
|
||||
|
||||
# c-ares
|
||||
mark_as_advanced(CARES_INCLUDE CARES_LIB)
|
||||
find_path(CARES_INCLUDE NAMES ares.h)
|
||||
find_library(CARES_LIB NAMES libcares.so)
|
||||
if(CARES_INCLUDE AND CARES_LIB)
|
||||
message(STATUS "Found c-ares: include: ${CARES_INCLUDE}, lib: ${CARES_LIB}")
|
||||
else()
|
||||
message(FATAL_ERROR "Couldn't find system c-ares")
|
||||
endif()
|
||||
|
||||
# protobuf
|
||||
mark_as_advanced(PROTOC PROTOBUF_INCLUDE PROTOBUF_LIB)
|
||||
find_program(PROTOC NAMES protoc)
|
||||
find_path(PROTOBUF_INCLUDE NAMES google/protobuf/message.h)
|
||||
find_library(PROTOBUF_LIB NAMES libprotobuf.so)
|
||||
if(PROTOC
|
||||
AND PROTOBUF_INCLUDE
|
||||
AND PROTOBUF_LIB)
|
||||
message(STATUS "Found protobuf: compiler: ${PROTOC}, include: ${PROTOBUF_INCLUDE}, lib: ${PROTOBUF_LIB}")
|
||||
else()
|
||||
message(FATAL_ERROR "Couldn't find system protobuf")
|
||||
endif()
|
||||
|
||||
# gpr
|
||||
mark_as_advanced(GPR_LIB)
|
||||
find_library(GPR_LIB NAMES gpr)
|
||||
|
||||
if(GPR_LIB)
|
||||
message(STATUS "Found gpr lib: ${GPR_LIB}")
|
||||
else()
|
||||
message(FATAL_ERROR "Couldn't find system gpr")
|
||||
endif()
|
||||
|
||||
# gRPC todo(fntlnz, leodido): check that gRPC version is greater or equal than 1.8.0
|
||||
mark_as_advanced(GRPC_INCLUDE GRPC_SRC
|
||||
GRPC_LIB GRPC_LIBS_ABSOLUTE GRPCPP_LIB GRPC_CPP_PLUGIN)
|
||||
find_path(GRPCXX_INCLUDE NAMES grpc++/grpc++.h)
|
||||
if(GRPCXX_INCLUDE)
|
||||
set(GRPC_INCLUDE ${GRPCXX_INCLUDE})
|
||||
unset(GRPCXX_INCLUDE CACHE)
|
||||
else()
|
||||
find_path(GRPCPP_INCLUDE NAMES grpcpp/grpcpp.h)
|
||||
set(GRPC_INCLUDE ${GRPCPP_INCLUDE})
|
||||
unset(GRPCPP_INCLUDE CACHE)
|
||||
add_definitions(-DGRPC_INCLUDE_IS_GRPCPP=1)
|
||||
endif()
|
||||
find_library(GRPC_LIB NAMES grpc)
|
||||
find_library(GRPCPP_LIB NAMES grpc++)
|
||||
if(GRPC_INCLUDE
|
||||
AND GRPC_LIB
|
||||
AND GRPCPP_LIB)
|
||||
message(STATUS "Found grpc: include: ${GRPC_INCLUDE}, C lib: ${GRPC_LIB}, C++ lib: ${GRPCPP_LIB}")
|
||||
else()
|
||||
message(FATAL_ERROR "Couldn't find system grpc")
|
||||
endif()
|
||||
find_program(GRPC_CPP_PLUGIN grpc_cpp_plugin)
|
||||
if(NOT GRPC_CPP_PLUGIN)
|
||||
message(FATAL_ERROR "System grpc_cpp_plugin not found")
|
||||
endif()
|
||||
|
||||
else()
|
||||
find_package(PkgConfig)
|
||||
if(NOT PKG_CONFIG_FOUND)
|
||||
message(FATAL_ERROR "pkg-config binary not found")
|
||||
endif()
|
||||
message(STATUS "Found pkg-config executable: ${PKG_CONFIG_EXECUTABLE}")
|
||||
set(GRPC_SRC "${PROJECT_BINARY_DIR}/grpc-prefix/src/grpc")
|
||||
set(GRPC_INCLUDE "${GRPC_SRC}/include")
|
||||
set(GRPC_LIBS_ABSOLUTE "${GRPC_SRC}/libs/opt")
|
||||
set(GRPC_LIB "${GRPC_LIBS_ABSOLUTE}/libgrpc.a")
|
||||
set(GRPCPP_LIB "${GRPC_LIBS_ABSOLUTE}/libgrpc++.a")
|
||||
set(GRPC_CPP_PLUGIN "${GRPC_SRC}/bins/opt/grpc_cpp_plugin")
|
||||
|
||||
# we tell gRPC to compile protobuf for us because when a gRPC package is not available, like on CentOS, it's very
|
||||
# likely that protobuf will be very outdated
|
||||
set(PROTOBUF_INCLUDE "${GRPC_SRC}/third_party/protobuf/src")
|
||||
set(PROTOC "${PROTOBUF_INCLUDE}/protoc")
|
||||
set(PROTOBUF_LIB "${GRPC_LIBS_ABSOLUTE}/protobuf/libprotobuf.a")
|
||||
# we tell gRPC to compile zlib for us because when a gRPC package is not available, like on CentOS, it's very likely
|
||||
# that zlib will be very outdated
|
||||
set(ZLIB_INCLUDE "${GRPC_SRC}/third_party/zlib")
|
||||
set(ZLIB_LIB "${GRPC_LIBS_ABSOLUTE}/libz.a")
|
||||
# we tell gRPC to compile c-ares for us because when a gRPC package is not available, like on CentOS, it's very likely
|
||||
# that c-ares will be very outdated
|
||||
set(CARES_INCLUDE "${GRPC_SRC}/third_party/cares" "${GRPC_SRC}/third_party/cares/cares")
|
||||
set(CARES_LIB "${GRPC_LIBS_ABSOLUTE}/libares.a")
|
||||
|
||||
message(STATUS "Using bundled gRPC in '${GRPC_SRC}'")
|
||||
message(
|
||||
STATUS
|
||||
"Bundled gRPC comes with protobuf: compiler: ${PROTOC}, include: ${PROTOBUF_INCLUDE}, lib: ${PROTOBUF_LIB}")
|
||||
message(STATUS "Bundled gRPC comes with zlib: include: ${ZLIB_INCLUDE}, lib: ${ZLIB_LIB}}")
|
||||
message(STATUS "Bundled gRPC comes with cares: include: ${CARES_INCLUDE}, lib: ${CARES_LIB}}")
|
||||
message(STATUS "Bundled gRPC comes with gRPC C++ plugin: include: ${GRPC_CPP_PLUGIN}")
|
||||
|
||||
get_filename_component(PROTOC_DIR ${PROTOC} PATH)
|
||||
|
||||
ExternalProject_Add(
|
||||
grpc
|
||||
DEPENDS openssl
|
||||
GIT_REPOSITORY https://github.com/grpc/grpc.git
|
||||
GIT_TAG v1.32.0
|
||||
GIT_SUBMODULES "third_party/protobuf third_party/zlib third_party/cares/cares third_party/abseil-cpp third_party/re2"
|
||||
BUILD_IN_SOURCE 1
|
||||
BUILD_BYPRODUCTS ${GRPC_LIB} ${GRPCPP_LIB}
|
||||
INSTALL_COMMAND ""
|
||||
CONFIGURE_COMMAND ""
|
||||
BUILD_COMMAND
|
||||
CFLAGS=-Wno-implicit-fallthrough
|
||||
HAS_SYSTEM_ZLIB=false
|
||||
HAS_SYSTEM_PROTOBUF=false
|
||||
HAS_SYSTEM_CARES=false
|
||||
HAS_EMBEDDED_OPENSSL_ALPN=false
|
||||
HAS_SYSTEM_OPENSSL_ALPN=true
|
||||
PKG_CONFIG_PATH=${OPENSSL_BUNDLE_DIR}
|
||||
PKG_CONFIG=${PKG_CONFIG_EXECUTABLE}
|
||||
PATH=${PROTOC_DIR}:$ENV{PATH}
|
||||
make
|
||||
static_cxx
|
||||
static_c
|
||||
grpc_cpp_plugin)
|
||||
endif()
|
||||
@@ -1,54 +0,0 @@
|
||||
#
|
||||
# Copyright (C) 2020 The Falco Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations under the License.
|
||||
#
|
||||
mark_as_advanced(JQ_INCLUDE JQ_LIB)
|
||||
if (NOT USE_BUNDLED_DEPS)
|
||||
find_path(JQ_INCLUDE jq.h PATH_SUFFIXES jq)
|
||||
find_library(JQ_LIB NAMES jq)
|
||||
if (JQ_INCLUDE AND JQ_LIB)
|
||||
message(STATUS "Found jq: include: ${JQ_INCLUDE}, lib: ${JQ_LIB}")
|
||||
else ()
|
||||
message(FATAL_ERROR "Couldn't find system jq")
|
||||
endif ()
|
||||
else ()
|
||||
set(JQ_SRC "${PROJECT_BINARY_DIR}/jq-prefix/src/jq")
|
||||
message(STATUS "Using bundled jq in '${JQ_SRC}'")
|
||||
set(JQ_INCLUDE "${JQ_SRC}/target/include")
|
||||
set(JQ_INSTALL_DIR "${JQ_SRC}/target")
|
||||
set(JQ_LIB "${JQ_INSTALL_DIR}/lib/libjq.a")
|
||||
set(ONIGURUMA_LIB "${JQ_INSTALL_DIR}/lib/libonig.a")
|
||||
message(STATUS "Bundled jq: include: ${JQ_INCLUDE}, lib: ${JQ_LIB}")
|
||||
|
||||
# Why we mirror jq here?
|
||||
#
|
||||
# In their readme, jq claims that you don't have
|
||||
# to do autoreconf -fi when downloading a released tarball.
|
||||
#
|
||||
# However, they forgot to push the released makefiles
|
||||
# into their release tarbal.
|
||||
#
|
||||
# For this reason, we have to mirror their release after
|
||||
# doing the configuration ourselves.
|
||||
#
|
||||
# This is needed because many distros do not ship the right
|
||||
# version of autoreconf, making virtually impossible to build Falco on them.
|
||||
# Read more about it here:
|
||||
# https://github.com/stedolan/jq/issues/2061#issuecomment-593445920
|
||||
ExternalProject_Add(
|
||||
jq
|
||||
URL "https://download.falco.org/dependencies/jq-1.6.tar.gz"
|
||||
URL_HASH "SHA256=787518068c35e244334cc79b8e56b60dbab352dff175b7f04a94f662b540bfd9"
|
||||
CONFIGURE_COMMAND ./configure --disable-maintainer-mode --enable-all-static --disable-dependency-tracking --with-oniguruma=builtin --prefix=${JQ_INSTALL_DIR}
|
||||
BUILD_COMMAND ${CMD_MAKE} LDFLAGS=-all-static
|
||||
BUILD_IN_SOURCE 1
|
||||
INSTALL_COMMAND ${CMD_MAKE} install)
|
||||
endif ()
|
||||
@@ -11,17 +11,20 @@
|
||||
# specific language governing permissions and limitations under the License.
|
||||
#
|
||||
|
||||
set(LUAJIT_SRC "${PROJECT_BINARY_DIR}/luajit-prefix/src/luajit/src")
|
||||
message(STATUS "Using bundled LuaJIT in '${LUAJIT_SRC}'")
|
||||
set(LUAJIT_INCLUDE "${LUAJIT_SRC}")
|
||||
set(LUAJIT_LIB "${LUAJIT_SRC}/libluajit.a")
|
||||
externalproject_add(
|
||||
luajit
|
||||
GIT_REPOSITORY "https://github.com/LuaJIT/LuaJIT"
|
||||
GIT_TAG "1d8b747c161db457e032a023ebbff511f5de5ec2"
|
||||
CONFIGURE_COMMAND ""
|
||||
BUILD_COMMAND ${CMD_MAKE}
|
||||
BUILD_IN_SOURCE 1
|
||||
BUILD_BYPRODUCTS ${LUAJIT_LIB}
|
||||
INSTALL_COMMAND ""
|
||||
)
|
||||
if(NOT LUAJIT_INCLUDE)
|
||||
set(LUAJIT_SRC "${PROJECT_BINARY_DIR}/luajit-prefix/src/luajit/src")
|
||||
message(STATUS "Using bundled LuaJIT in '${LUAJIT_SRC}'")
|
||||
set(LUAJIT_INCLUDE "${LUAJIT_SRC}")
|
||||
set(LUAJIT_LIB "${LUAJIT_SRC}/libluajit.a")
|
||||
externalproject_add(
|
||||
luajit
|
||||
GIT_REPOSITORY "https://github.com/LuaJIT/LuaJIT"
|
||||
GIT_TAG "1d8b747c161db457e032a023ebbff511f5de5ec2"
|
||||
CONFIGURE_COMMAND ""
|
||||
BUILD_COMMAND ${CMD_MAKE}
|
||||
BUILD_IN_SOURCE 1
|
||||
BUILD_BYPRODUCTS ${LUAJIT_LIB}
|
||||
INSTALL_COMMAND ""
|
||||
)
|
||||
endif()
|
||||
include_directories("${LUAJIT_INCLUDE}")
|
||||
|
||||
@@ -1,78 +0,0 @@
|
||||
#
|
||||
# Copyright (C) 2020 The Falco Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations under the License.
|
||||
#
|
||||
|
||||
set(SYSDIG_CMAKE_SOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/cmake/modules/sysdig-repo")
|
||||
set(SYSDIG_CMAKE_WORKING_DIR "${CMAKE_BINARY_DIR}/sysdig-repo")
|
||||
|
||||
# this needs to be here at the top
|
||||
if(USE_BUNDLED_DEPS)
|
||||
# explicitly force this dependency to use the bundled OpenSSL
|
||||
if(NOT MINIMAL_BUILD)
|
||||
set(USE_BUNDLED_OPENSSL ON)
|
||||
endif()
|
||||
set(USE_BUNDLED_JQ ON)
|
||||
endif()
|
||||
|
||||
file(MAKE_DIRECTORY ${SYSDIG_CMAKE_WORKING_DIR})
|
||||
|
||||
# The sysdig git reference (branch name, commit hash, or tag) To update sysdig version for the next release, change the
|
||||
# default below In case you want to test against another sysdig version just pass the variable - ie., `cmake
|
||||
# -DSYSDIG_VERSION=dev ..`
|
||||
if(NOT SYSDIG_VERSION)
|
||||
set(SYSDIG_VERSION "5c0b863ddade7a45568c0ac97d037422c9efb750")
|
||||
set(SYSDIG_CHECKSUM "SHA256=9de717b3a4b611ea6df56afee05171860167112f74bb7717b394bcc88ac843cd")
|
||||
endif()
|
||||
set(PROBE_VERSION "${SYSDIG_VERSION}")
|
||||
|
||||
# cd /path/to/build && cmake /path/to/source
|
||||
execute_process(COMMAND "${CMAKE_COMMAND}" -DSYSDIG_VERSION=${SYSDIG_VERSION} -DSYSDIG_CHECKSUM=${SYSDIG_CHECKSUM}
|
||||
${SYSDIG_CMAKE_SOURCE_DIR} WORKING_DIRECTORY ${SYSDIG_CMAKE_WORKING_DIR})
|
||||
|
||||
# todo(leodido, fntlnz) > use the following one when CMake version will be >= 3.13
|
||||
|
||||
# execute_process(COMMAND "${CMAKE_COMMAND}" -B ${SYSDIG_CMAKE_WORKING_DIR} WORKING_DIRECTORY
|
||||
# "${SYSDIG_CMAKE_SOURCE_DIR}")
|
||||
|
||||
execute_process(COMMAND "${CMAKE_COMMAND}" --build . WORKING_DIRECTORY "${SYSDIG_CMAKE_WORKING_DIR}")
|
||||
set(SYSDIG_SOURCE_DIR "${SYSDIG_CMAKE_WORKING_DIR}/sysdig-prefix/src/sysdig")
|
||||
|
||||
# jsoncpp
|
||||
set(JSONCPP_SRC "${SYSDIG_SOURCE_DIR}/userspace/libsinsp/third-party/jsoncpp")
|
||||
set(JSONCPP_INCLUDE "${JSONCPP_SRC}")
|
||||
set(JSONCPP_LIB_SRC "${JSONCPP_SRC}/jsoncpp.cpp")
|
||||
|
||||
# Add driver directory
|
||||
add_subdirectory("${SYSDIG_SOURCE_DIR}/driver" "${PROJECT_BINARY_DIR}/driver")
|
||||
|
||||
# Add libscap directory
|
||||
add_definitions(-D_GNU_SOURCE)
|
||||
add_definitions(-DHAS_CAPTURE)
|
||||
add_definitions(-DNOCURSESUI)
|
||||
if(MUSL_OPTIMIZED_BUILD)
|
||||
add_definitions(-DMUSL_OPTIMIZED)
|
||||
endif()
|
||||
add_subdirectory("${SYSDIG_SOURCE_DIR}/userspace/libscap" "${PROJECT_BINARY_DIR}/userspace/libscap")
|
||||
|
||||
# Add libsinsp directory
|
||||
add_subdirectory("${SYSDIG_SOURCE_DIR}/userspace/libsinsp" "${PROJECT_BINARY_DIR}/userspace/libsinsp")
|
||||
add_dependencies(sinsp tbb b64 luajit)
|
||||
|
||||
# explicitly disable the tests of this dependency
|
||||
set(CREATE_TEST_TARGETS OFF)
|
||||
|
||||
if(USE_BUNDLED_DEPS)
|
||||
add_dependencies(scap jq)
|
||||
if(NOT MINIMAL_BUILD)
|
||||
add_dependencies(scap curl grpc)
|
||||
endif()
|
||||
endif()
|
||||
@@ -28,6 +28,7 @@ else()
|
||||
yamlcpp
|
||||
URL "https://github.com/jbeder/yaml-cpp/archive/yaml-cpp-0.6.2.tar.gz"
|
||||
URL_HASH "SHA256=e4d8560e163c3d875fd5d9e5542b5fd5bec810febdcba61481fe5fc4e6b1fd05"
|
||||
BUILD_BYPRODUCTS ${YAMLCPP_LIB}
|
||||
BUILD_IN_SOURCE 1
|
||||
INSTALL_COMMAND "")
|
||||
endif()
|
||||
|
||||
@@ -20,11 +20,11 @@ ENV FALCO_VERSION=${FALCO_VERSION}
|
||||
|
||||
# build toolchain
|
||||
RUN yum -y install centos-release-scl && \
|
||||
INSTALL_PKGS="devtoolset-7-gcc devtoolset-7-gcc-c++ devtoolset-7-toolchain devtoolset-7-libstdc++-devel devtoolset-7-elfutils-libelf-devel llvm-toolset-7 glibc-static autoconf automake libtool createrepo expect git which libcurl-devel zlib-devel ncurses-devel rpm-build libyaml-devel" && \
|
||||
INSTALL_PKGS="devtoolset-7-gcc devtoolset-7-gcc-c++ devtoolset-7-toolchain devtoolset-7-libstdc++-devel devtoolset-7-elfutils-libelf-devel llvm-toolset-7 glibc-static autoconf automake libtool createrepo expect git which libcurl-devel zlib-devel rpm-build libyaml-devel" && \
|
||||
yum -y install --setopt=tsflags=nodocs $INSTALL_PKGS && \
|
||||
rpm -V $INSTALL_PKGS
|
||||
|
||||
ARG CMAKE_VERSION=3.5.1
|
||||
ARG CMAKE_VERSION=3.6.3
|
||||
RUN source scl_source enable devtoolset-7 llvm-toolset-7 && \
|
||||
cd /tmp && \
|
||||
curl -L https://github.com/kitware/cmake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}.tar.gz | tar xz; \
|
||||
|
||||
@@ -34,7 +34,6 @@ case "$CMD" in
|
||||
-DCMAKE_BUILD_TYPE="$BUILD_TYPE" \
|
||||
-DCMAKE_INSTALL_PREFIX=/usr \
|
||||
-DBUILD_DRIVER="$BUILD_DRIVER" \
|
||||
-DMINIMAL_BUILD="$MINIMAL_BUILD" \
|
||||
-DBUILD_BPF="$BUILD_BPF" \
|
||||
-DBUILD_WARNINGS_AS_ERRORS="$BUILD_WARNINGS_AS_ERRORS" \
|
||||
-DFALCO_VERSION="$FALCO_VERSION" \
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM debian:stable
|
||||
FROM debian:buster
|
||||
|
||||
LABEL maintainer="cncf-falco-dev@lists.cncf.io"
|
||||
|
||||
@@ -30,6 +30,7 @@ RUN apt-get update \
|
||||
libc6-dev \
|
||||
libelf-dev \
|
||||
libmpx2 \
|
||||
libssl-dev \
|
||||
llvm-7 \
|
||||
netcat \
|
||||
xz-utils \
|
||||
|
||||
30
falco.yaml
30
falco.yaml
@@ -46,6 +46,12 @@ json_output: false
|
||||
# (user=root ....") in the json output.
|
||||
json_include_output_property: true
|
||||
|
||||
# When using json output, whether or not to include the "tags" property
|
||||
# itself in the json output. If set to true, outputs caused by rules
|
||||
# with no tags will have a "tags" field set to an empty array. If set to
|
||||
# false, the "tags" field will not be included in the json output at all.
|
||||
json_include_tags_property: true
|
||||
|
||||
# Send information logs to stderr and/or syslog Note these are *not* security
|
||||
# notification logs! These are just Falco lifecycle (and possibly error) logs.
|
||||
log_stderr: true
|
||||
@@ -97,6 +103,24 @@ syscall_event_drops:
|
||||
rate: .03333
|
||||
max_burst: 1
|
||||
|
||||
# Falco uses a shared buffer between the kernel and userspace to receive
|
||||
# the events (eg., system call information) in userspace.
|
||||
#
|
||||
# Anyways, the underlying libraries can also timeout for various reasons.
|
||||
# For example, there could have been issues while reading an event.
|
||||
# Or the particular event needs to be skipped.
|
||||
# Normally, it's very unlikely that Falco does not receive events consecutively.
|
||||
#
|
||||
# Falco is able to detect such uncommon situation.
|
||||
#
|
||||
# Here you can configure the maximum number of consecutive timeouts without an event
|
||||
# after which you want Falco to alert.
|
||||
# By default this value is set to 1000 consecutive timeouts without an event at all.
|
||||
# How this value maps to a time interval depends on the CPU frequency.
|
||||
|
||||
syscall_event_timeouts:
|
||||
max_consecutives: 1000
|
||||
|
||||
# Falco continuously monitors outputs performance. When an output channel does not allow
|
||||
# to deliver an alert within a given deadline, an error is reported indicating
|
||||
# which output is blocking notifications.
|
||||
@@ -231,3 +255,9 @@ grpc:
|
||||
# Make sure to have a consumer for them or leave this disabled.
|
||||
grpc_output:
|
||||
enabled: false
|
||||
|
||||
# Container orchestrator metadata fetching params
|
||||
metadata_download:
|
||||
max_mb: 100
|
||||
chunk_wait_us: 1000
|
||||
watch_freq_sec: 1
|
||||
|
||||
@@ -2,6 +2,8 @@
|
||||
|
||||
This document reflects when and how we clean up the Falco artifacts from their storage location.
|
||||
|
||||
**Superseeded by**: [drivers-storage-s3 proposal](https://github.com/falcosecurity/falco/blob/master/proposals/20201025-drivers-storage-s3.md).
|
||||
|
||||
## Motivation
|
||||
|
||||
The [bintray](https://bintray.com/falcosecurity) open-source plan offers 10GB free space for storing artifacts.
|
||||
@@ -94,9 +96,19 @@ Since the process of building drivers is time and resource consuming, this docum
|
||||
|
||||
The candidate is an AWS S3 bucket responsible for holding the deleted driver version files.
|
||||
|
||||
#### Notice
|
||||
|
||||
The current mechanism the Falco community uses to store the Falco drivers is explained by the [drivers-storage-s3](https://github.com/falcosecurity/falco/blob/master/proposals/20201025-drivers-storage-s3.md) proposal.
|
||||
|
||||
### Implementation
|
||||
|
||||
The [test-infra](https://github.com/falcosecurity/test-infra) CI, specifically its part dedicated to run the **Drivers Build Grid** that runs every time it detects changes into the `driverkit` directory of the [test-infra](https://github.com/falcosecurity/test-infra) repository,
|
||||
will have a new job - called `drivers/cleanup` - responsible for removing all the Falco driver versions except the last two.
|
||||
|
||||
This job will be triggered after the `drivers/publish` completed successfully on the master branch.
|
||||
This job will be triggered after the `drivers/publish` completed successfully on the master branch.
|
||||
|
||||
#### Notice
|
||||
|
||||
At the moment of writing (2021 09 28) the `drivers/cleanup` job is no more in place.
|
||||
|
||||
Pragmatically, this means that the older Falco drivers will remain available in their [S3 bucket](https://download.falco.org/?prefix=driver/).
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# OSS Libraries Donation Plan
|
||||
# OSS Libraries Contribution Plan
|
||||
|
||||
## Summary
|
||||
|
||||
@@ -6,7 +6,7 @@ Sysdig Inc. intends to donate **libsinsp**, **libscap**, the **kernel module dri
|
||||
|
||||
This means that some parts of the [draios/sysdig](https://github.com/draios/sysdig) repository will be moved to a new GitHub repository called [falcosecurity/libs](https://github.com/falcosecurity/libs).
|
||||
|
||||
This plan aims to describe and clarify the terms and goals to get the donation done.
|
||||
This plan aims to describe and clarify the terms and goals to get the contribution done.
|
||||
|
||||
## Motivation
|
||||
|
||||
@@ -22,7 +22,7 @@ Sysdig (the command line tool) will continue to use the libraries now provided b
|
||||
This change is win-win for both parties because of the following reasons:
|
||||
|
||||
- The Falco community owns the source code of the three most important parts of the software it distributes.
|
||||
- Right now it is "only" an engine on top of the libraries. This **donation** helps in making the scope of the Falco project broader. Having the majority of the source code under an **open governance** in the same organization gives the Falco project more contribution opportunities, helps it in **evolving independently** and makes the whole Falco community a strong owner of the processes and decision making regarding those crucial parts.
|
||||
- Right now it is "only" an engine on top of the libraries. This **contribution** helps in making the scope of the Falco project broader. Having the majority of the source code under an **open governance** in the same organization gives the Falco project more contribution opportunities, helps it in **evolving independently** and makes the whole Falco community a strong owner of the processes and decision making regarding those crucial parts.
|
||||
|
||||
- Given the previous point, Sysdig (the command line tool) will benefit from the now **extended contributors base**
|
||||
|
||||
@@ -34,7 +34,7 @@ This change is win-win for both parties because of the following reasons:
|
||||
|
||||
## Goals
|
||||
|
||||
There are many sub-projects and each of them interacts in a different way in this donation.
|
||||
There are many sub-projects and each of them interacts in a different way in this contribution.
|
||||
|
||||
Let's see the goals per sub-project.
|
||||
|
||||
@@ -68,7 +68,7 @@ Let's see the goals per sub-project.
|
||||
|
||||
13. Falco follows a [multi-stage model for adopting new projects](https://github.com/falcosecurity/evolution#falco-project-evolution), in this case we will do an exception since the library is foundational for Falco and it has a very good track record already
|
||||
|
||||
14. This project will go already "Official support" once the donation is completed
|
||||
14. This project will go already "Official support" once the contribution is completed
|
||||
|
||||
15. Contributing, Code of Conduct, Governance, Security, and Support will be the same as the rest of the organization, find them [here](https://github.com/falcosecurity/.github)
|
||||
|
||||
@@ -110,7 +110,7 @@ Let's see the goals per sub-project.
|
||||
|
||||
13. Falco follows a [multi-stage model for adopting new projects](https://github.com/falcosecurity/evolution#falco-project-evolution), in this case we will do an exception since the library is foundational for Falco and it has a very good track record already
|
||||
|
||||
14. This project will go already "Official support" once the donation is completed
|
||||
14. This project will go already "Official support" once the contribution is completed
|
||||
|
||||
15. Contributing, Code of Conduct, Governance, Security, and Support will be the same as the rest of the organization, find them [here](https://github.com/falcosecurity/.github)
|
||||
|
||||
613
proposals/20210501-plugin-system.md
Normal file
613
proposals/20210501-plugin-system.md
Normal file
@@ -0,0 +1,613 @@
|
||||
# Plugin System
|
||||
|
||||
## Summary
|
||||
|
||||
This is a proposal to create an infrastructure to extend the functionality of the Falco libraries via plugins.
|
||||
|
||||
Plugins will allow users to easily extend the functionality of the libraries and, as a consequence, of Falco and any other tool based on the libraries.
|
||||
|
||||
This proposal, in particular, focuses on two types of plugins: source plugins and extractor plugins.
|
||||
|
||||
## Motivation
|
||||
|
||||
[libscap](https://github.com/falcosecurity/libs/tree/master/userspace/libscap) and [libsinsp](https://github.com/falcosecurity/libs/tree/master/userspace/libsinsp) provide a powerful data capture framework, with a rich set of features that includes:
|
||||
|
||||
- data capture
|
||||
- trace files management
|
||||
- enrichment
|
||||
- filtering
|
||||
- formatting and screen rendering
|
||||
- Lua scripting (chisels)
|
||||
|
||||
These features have been designed with one specific input in mind: system calls. However, they are generically adaptable to a broad set of inputs, such as cloud logs.
|
||||
|
||||
With this proposal, we want to dramatically extend the scope of what the libraries, Falco and other tools can be applied to. We want to do it in a way that is easy, efficient and empowers anyone in the community to write a plugin.
|
||||
|
||||
## Goals
|
||||
|
||||
- To design and implement a plugin framework that makes the libraries more modular and extensible
|
||||
- To have a framework that is easy to use
|
||||
- To support dynamic loading of plugins, so that the libraries can be extended without having to be recompiled and relinked
|
||||
- To enable users to write plugins in any language, with a particular focus on Go, C and C++
|
||||
- To have an efficient plugin framework so that, performance-wise, writing a plugin is as close as possible as extending the libraries internal source code
|
||||
- To make it possible to write plugins for Linux, MacOS and Windows
|
||||
|
||||
## Non-Goals
|
||||
|
||||
- To implement plugins other than source and extractor: to be approached as separate task
|
||||
- To document the plugin framework and interface: to be approached as separate task
|
||||
|
||||
## Proposal
|
||||
|
||||
### Plugin Common Information
|
||||
|
||||
Both source and extractor plugins have the following:
|
||||
|
||||
- A required api version, to ensure compatibility with the plugin framework.
|
||||
- A name
|
||||
- A description
|
||||
- A version
|
||||
- A contact field for the plugin authors (website, github repo, twitter, etc).
|
||||
- Functions to initialize and destroy the plugin internal state.
|
||||
|
||||
### Plugin types
|
||||
|
||||
Initially, we will implement support for two types of plugins: source plugins and extractor plugins.
|
||||
|
||||
#### Source Plugin
|
||||
|
||||
A source plugin implements a new sinsp/scap event source. It has the ability to "open" and "close" a session that provides events. It also has the ability to return an event to the plugin framework via a next() method. Events returned by source plugins have an "event source", which describes the information in the event. This is distinct from the plugin name to allow for multiple kinds of plugins to generate the same kind of events. For example, there might be plugins gke-audit-bridge, eks-audit-bridge, ibmcloud-audit-bridge, etc. that all fetch [K8s Audit](https://kubernetes.io/docs/tasks/debug-application-cluster/audit/) information. The plugins would have different names but would have the same event source "k8s_audit".
|
||||
|
||||
Source plugins also have the ability to extract information from events based on fields. For example, a field proc.name extracts a process name from a syscall event. The plugin returns a set of supported fields, and there are functions to extract a value given an event and field. The plugin framework can then build filtering expressions/Falco rule conditions based on these fields combined with relational and/or logical operators. For example, given an expression "ct.name=root and ct.region=us-east-1", the plugin framework handles parsing the expression, calling the plugin to extract values for a given event, and determining the result of the expression. In a Falco output string like "An EC2 Node was created (name=%ec2.name region=%ct.region)", the plugin framework handles parsing the output string, calling the plugin to extract values for a given event, and building the resolved string.
|
||||
|
||||
Source plugins also provide an "id", which is globally unique and is used in capture files (see below).
|
||||
|
||||
#### Extractor Plugin
|
||||
|
||||
An extractor plugin focuses only on field extraction from events generated by other plugins, or by the core libraries. It does *not* provide an event source, but can extract fields from other event sources. An example is json field extraction, where a plugin might be able to extract fields from arbitrary json payloads.
|
||||
|
||||
An extractor plugin provides an optional set of event sources. When the framework receives an event with an event source in the plugin's set of event sources, fields in expressions/Falco outputs will be extracted from events using the plugin. An extractor plugin can also *not* name a set of event sources. In this case, fields will be extracted from *all* events, regardless of source. In this case, the exctractor plugin must detect the format of arbitrary payloads and be able to return NULL/no value when the payload is not supported.
|
||||
|
||||
### Support for Plugin Events in Capture Files.
|
||||
|
||||
libscap will define a new event type called "pluginevent" that contains two fields:
|
||||
|
||||
* "plugin ID": This uniquely identifies the plugin that generated this event.
|
||||
* "event_data": This is a variable-length data buffer containing the event data, as returned by the plugin.
|
||||
|
||||
Defining an event for plugins allows creating capture files from plugins. These capture files can be saved, read, filtered, etc, like any other capture file, allowing for later analysis/display/etc.
|
||||
|
||||
### Plugins format
|
||||
|
||||
Plugins are dynamic libraries (.so files in Unix, .dll files in windows) that export a minimum set of functions that the libraries will recognize.
|
||||
|
||||
Plugins are versioned using semantic versioning to minimize regressions and compatibility issues.
|
||||
|
||||
Plugins can be written in any language, as long as they export the required functions. Go, however, is the preferred language to write plugins, followed by C/C++.
|
||||
|
||||
### Protecting from plugin issues
|
||||
|
||||
The libraries will do everything possible to validate the data coming from the plugins and protect Falco and the other consumers from corrupted data. However, for performance reasons, plugins will be "trusted": they will run in the same thread and address space as Falco and they could crash the program. We assume that the user will be in control of plugin loading and will make sure only trusted plugins are loaded/packaged with Falco.
|
||||
|
||||
### Plugin/Event Source registries
|
||||
|
||||
Every source plugin requires its own, unique plugin ID to interoperate with Falco and the other plugins. The plugin ID will be used by the libs to properly process incoming events (for example, when saving events to file and loading them back), and by plugins to unuambiguosly recognize their dependencies.
|
||||
|
||||
To facilitate the allocation and distribution of plugin IDs, we will require that plugin developers request IDs for their plugins to the Falco organization. The mechanism used for plugin allocation is not determined yet and will be discussed in the future.
|
||||
|
||||
Similarly, plugin developers must register event sources with the Falco organization. This allows coordination between plugins that wish to provide compatible payloads, and to allow extractor plugins to know what data format is associated with a given event source.
|
||||
|
||||
### golang plugin SDK
|
||||
|
||||
To facilitate the development of plugins written in go, an SDK has been developed. We intend this SDK (and future SDKs for other languages) to be part of the Falco organization. For this reason, we submitted the following incubation request: https://github.com/falcosecurity/evolution/issues/62
|
||||
|
||||
### Proposed API (subject to change)
|
||||
|
||||
```c
|
||||
// This struct represents an event returned by the plugin, and is used
|
||||
// below in next()/next_batch().
|
||||
// - data: pointer to a memory buffer pointer. The plugin will set it
|
||||
// to point to the memory containing the next event. Once returned,
|
||||
// the memory is owned by the plugin framework and will be freed via
|
||||
// a call to free().
|
||||
// - datalen: pointer to a 32bit integer. The plugin will set it the size of the
|
||||
// buffer pointed by data.
|
||||
// - ts: the event timestamp. Can be (uint64_t)-1, in which case the engine will
|
||||
// automatically fill the event time with the current time.
|
||||
typedef struct ss_plugin_event
|
||||
{
|
||||
uint8_t *data;
|
||||
uint32_t datalen;
|
||||
uint64_t ts;
|
||||
} ss_plugin_event;
|
||||
|
||||
//
|
||||
// This is the opaque pointer to the state of a plugin.
|
||||
// It points to any data that might be needed plugin-wise. It is
|
||||
// allocated by init() and must be destroyed by destroy().
|
||||
// It is defined as void because the engine doesn't care what it is
|
||||
// and it treats is as opaque.
|
||||
//
|
||||
typedef void ss_plugin_t;
|
||||
|
||||
//
|
||||
// This is the opaque pointer to the state of an open instance of the source
|
||||
// plugin.
|
||||
// It points to any data that is needed while a capture is running. It is
|
||||
// allocated by open() and must be destroyed by close().
|
||||
// It is defined as void because the engine doesn't care what it is
|
||||
// and it treats is as opaque.
|
||||
//
|
||||
typedef void ss_instance_t;
|
||||
|
||||
//
|
||||
// Interface for a sinsp/scap source plugin
|
||||
//
|
||||
//
|
||||
// NOTE: For all functions below that return a char *, the memory
|
||||
// pointed to by the char * must be allocated by the plugin using
|
||||
// malloc() and should be freed by the caller using free().
|
||||
//
|
||||
// For each function below, the exported symbol from the dynamic
|
||||
// library should have a prefix of "plugin_"
|
||||
// (e.g. plugin_get_required_api_version, plugin_init, etc.)
|
||||
//
|
||||
typedef struct
|
||||
{
|
||||
//
|
||||
// Return the version of the plugin API used by this plugin.
|
||||
// Required: yes
|
||||
// Return value: the API version string, in the following format:
|
||||
// "<major>.<minor>.<patch>", e.g. "1.2.3".
|
||||
// NOTE: to ensure correct interoperability between the engine and the plugins,
|
||||
// we use a semver approach. Plugins are required to specify the version
|
||||
// of the API they run against, and the engine will take care of checking
|
||||
// and enforcing compatibility.
|
||||
//
|
||||
char* (*get_required_api_version)();
|
||||
//
|
||||
// Return the plugin type.
|
||||
// Required: yes
|
||||
// Should return TYPE_SOURCE_PLUGIN. It still makes sense to
|
||||
// have a function get_type() as the plugin interface will
|
||||
// often dlsym() functions from shared libraries, and can't
|
||||
// inspect any C struct type.
|
||||
//
|
||||
uint32_t (*get_type)();
|
||||
//
|
||||
// Initialize the plugin and, if needed, allocate its state.
|
||||
// Required: yes
|
||||
// Arguments:
|
||||
// - config: a string with the plugin configuration. The format of the
|
||||
// string is chosen by the plugin itself.
|
||||
// - rc: pointer to an integer that will contain the initialization result,
|
||||
// as a SCAP_* value (e.g. SCAP_SUCCESS=0, SCAP_FAILURE=1)
|
||||
// Return value: pointer to the plugin state that will be treated as opaque
|
||||
// by the engine and passed to the other plugin functions.
|
||||
// If rc is SCAP_FAILURE, this function should return NULL.
|
||||
//
|
||||
ss_plugin_t* (*init)(char* config, int32_t* rc);
|
||||
//
|
||||
// Destroy the plugin and, if plugin state was allocated, free it.
|
||||
// Required: yes
|
||||
//
|
||||
void (*destroy)(ss_plugin_t* s);
|
||||
//
|
||||
// Return a string with the error that was last generated by
|
||||
// the plugin.
|
||||
// Required: yes
|
||||
//
|
||||
// In cases where any other api function returns an error, the
|
||||
// plugin should be prepared to return a human-readable error
|
||||
// string with more context for the error. The plugin manager
|
||||
// calls get_last_error() to access that string.
|
||||
//
|
||||
char* (*get_last_error)(ss_plugin_t* s);
|
||||
//
|
||||
// Return the unique ID of the plugin.
|
||||
// Required: yes
|
||||
// EVERY SOURCE PLUGIN (see get_type()) MUST OBTAIN AN OFFICIAL ID FROM THE
|
||||
// FALCOSECURITY ORGANIZATION, OTHERWISE IT WON'T PROPERLY COEXIST WITH OTHER PLUGINS.
|
||||
//
|
||||
uint32_t (*get_id)();
|
||||
//
|
||||
// Return the name of the plugin, which will be printed when displaying
|
||||
// information about the plugin.
|
||||
// Required: yes
|
||||
//
|
||||
char* (*get_name)();
|
||||
//
|
||||
// Return the descriptions of the plugin, which will be printed when displaying
|
||||
// information about the plugin or its events.
|
||||
// Required: yes
|
||||
//
|
||||
char* (*get_description)();
|
||||
//
|
||||
// Return a string containing contact info (url, email, twitter, etc) for
|
||||
// the plugin authors.
|
||||
// Required: yes
|
||||
//
|
||||
char* (*get_contact)();
|
||||
//
|
||||
// Return the version of this plugin itself
|
||||
// Required: yes
|
||||
// Return value: a string with a version identifier, in the following format:
|
||||
// "<major>.<minor>.<patch>", e.g. "1.2.3".
|
||||
// This differs from the api version in that this versions the
|
||||
// plugin itself, as compared to the plugin interface. When
|
||||
// reading capture files, the major version of the plugin that
|
||||
// generated events must match the major version of the plugin
|
||||
// used to read events.
|
||||
//
|
||||
char* (*get_version)();
|
||||
//
|
||||
// Return a string describing the events generated by this source plugin.
|
||||
// Required: yes
|
||||
// Example event sources would be strings like "syscall",
|
||||
// "k8s_audit", etc. The source can be used by extractor
|
||||
// plugins to filter the events they receive.
|
||||
//
|
||||
char* (*get_event_source)();
|
||||
//
|
||||
// Return the list of extractor fields exported by this plugin. Extractor
|
||||
// fields can be used in Falco rule conditions and sysdig filters.
|
||||
// Required: no
|
||||
// Return value: a string with the list of fields encoded as a json
|
||||
// array.
|
||||
// Each field entry is a json object with the following properties:
|
||||
// "type": one of "string", "uint64"
|
||||
// "name": a string with a name for the field
|
||||
// "desc": a string with a description of the field
|
||||
// Example return value:
|
||||
// [
|
||||
// {"type": "string", "name": "field1", "desc": "Describing field 1"},
|
||||
// {"type": "uint64", "name": "field2", "desc": "Describing field 2"}
|
||||
// ]
|
||||
char* (*get_fields)();
|
||||
//
|
||||
// Open the source and start a capture.
|
||||
// Required: yes
|
||||
// Arguments:
|
||||
// - s: the plugin state returned by init()
|
||||
// - params: the open parameters, as a string. The format is defined by the plugin
|
||||
// itsef
|
||||
// - rc: pointer to an integer that will contain the open result, as a SCAP_* value
|
||||
// (e.g. SCAP_SUCCESS=0, SCAP_FAILURE=1)
|
||||
// Return value: a pointer to the open context that will be passed to next(),
|
||||
// close(), event_to_string() and extract_*.
|
||||
//
|
||||
ss_instance_t* (*open)(ss_plugin_t* s, char* params, int32_t* rc);
|
||||
//
|
||||
// Close a capture.
|
||||
// Required: yes
|
||||
// Arguments:
|
||||
// - s: the plugin context, returned by init(). Can be NULL.
|
||||
// - h: the capture context, returned by open(). Can be NULL.
|
||||
//
|
||||
void (*close)(ss_plugin_t* s, ss_instance_t* h);
|
||||
//
|
||||
// Return the next event.
|
||||
// Required: yes
|
||||
// Arguments:
|
||||
// - s: the plugin context, returned by init(). Can be NULL.
|
||||
// - h: the capture context, returned by open(). Can be NULL.
|
||||
//
|
||||
// - evt: pointer to a ss_plugin_event pointer. The plugin should
|
||||
// allocate a ss_plugin_event struct using malloc(), as well as
|
||||
// allocate the data buffer within the ss_plugin_event struct.
|
||||
// Both the struct and data buffer are owned by the plugin framework
|
||||
// and will free them using free().
|
||||
//
|
||||
// Return value: the status of the operation (e.g. SCAP_SUCCESS=0, SCAP_FAILURE=1,
|
||||
// SCAP_TIMEOUT=-1)
|
||||
//
|
||||
int32_t (*next)(ss_plugin_t* s, ss_instance_t* h, ss_plugin_event **evt);
|
||||
//
|
||||
// Return the read progress.
|
||||
// Required: no
|
||||
// Arguments:
|
||||
// - progress_pct: the read progress, as a number between 0 (no data has been read)
|
||||
// and 10000 (100% of the data has been read). This encoding allows the engine to
|
||||
// print progress decimals without requiring to deal with floating point numbers
|
||||
// (which could cause incompatibility problems with some languages).
|
||||
// Return value: a string representation of the read
|
||||
// progress. This might include the progress percentage
|
||||
// combined with additional context added by the plugin. If
|
||||
// NULL, progress_pct should be used.
|
||||
// NOTE: reporting progress is optional and in some case could be impossible. However,
|
||||
// when possible, it's recommended as it provides valuable information to the
|
||||
// user.
|
||||
//
|
||||
char* (*get_progress)(ss_plugin_t* s, ss_instance_t* h, uint32_t* progress_pct);
|
||||
//
|
||||
// Return a text representation of an event generated by this source plugin.
|
||||
// Required: yes
|
||||
// Arguments:
|
||||
// - data: the buffer from an event produced by next().
|
||||
// - datalen: the length of the buffer from an event produced by next().
|
||||
// Return value: the text representation of the event. This is used, for example,
|
||||
// by sysdig to print a line for the given event.
|
||||
//
|
||||
char *(*event_to_string)(ss_plugin_t *s, const uint8_t *data, uint32_t datalen);
|
||||
//
|
||||
// Extract a filter field value from an event.
|
||||
// We offer multiple versions of extract(), differing from each other only in
|
||||
// the type of the value they return (string, integer...).
|
||||
// Required: no
|
||||
// Arguments:
|
||||
// - evtnum: the number of the event that is bein processed
|
||||
// - id: the numeric identifier of the field to extract. It corresponds to the
|
||||
// position of the field in the array returned by get_fields().
|
||||
// - arg: the field argument, if an argument has been specified for the field,
|
||||
// otherwise it's NULL. For example:
|
||||
// * if the field specified by the user is foo.bar[pippo], arg will be the
|
||||
// string "pippo"
|
||||
// * if the field specified by the user is foo.bar, arg will be NULL
|
||||
// - data: the buffer produced by next().
|
||||
// - datalen: the length of the buffer produced by next().
|
||||
// - field_present: nonzero if the field is present for the given event.
|
||||
// Return value: the produced value of the filter field. For extract_str(), a
|
||||
// NULL return value means that the field is missing for the given event.
|
||||
//
|
||||
char *(*extract_str)(ss_plugin_t *s, uint64_t evtnum, const char * field, const char *arg, uint8_t *data, uint32_t datalen);
|
||||
uint64_t (*extract_u64)(ss_plugin_t *s, uint64_t evtnum, const char *field, const char *arg, uint8_t *data, uint32_t datalen, uint32_t *field_present);
|
||||
//
|
||||
// This is an optional, internal, function used to speed up event capture by
|
||||
// batching the calls to next().
|
||||
// On success:
|
||||
// - nevts will be filled in with the number of events.
|
||||
// - evts: pointer to an ss_plugin_event pointer. The plugin should
|
||||
// allocate an array of contiguous ss_plugin_event structs using malloc(),
|
||||
// as well as allocate each data buffer within each ss_plugin_event
|
||||
// struct using malloc(). Both the array of structs and each data buffer are
|
||||
// owned by the plugin framework and will free them using free().
|
||||
// Required: no
|
||||
//
|
||||
int32_t (*next_batch)(ss_plugin_t* s, ss_instance_t* h, uint32_t *nevts, ss_plugin_event **evts);
|
||||
//
|
||||
// This is an optional, internal, function used to speed up value extraction
|
||||
// Required: no
|
||||
//
|
||||
int32_t (*register_async_extractor)(ss_plugin_t *s, async_extractor_info *info);
|
||||
|
||||
//
|
||||
// The following members are PRIVATE for the engine and should not be touched.
|
||||
//
|
||||
ss_plugin_t* state;
|
||||
ss_instance_t* handle;
|
||||
uint32_t id;
|
||||
char *name;
|
||||
} source_plugin_info;
|
||||
|
||||
//
|
||||
// Interface for a sinsp/scap extractor plugin
|
||||
//
|
||||
//
|
||||
// NOTE: For all functions below that return a char *, the memory
|
||||
// pointed to by the char * must be allocated by the plugin using
|
||||
// malloc() and should be freed by the caller using free().
|
||||
//
|
||||
typedef struct
|
||||
{
|
||||
//
|
||||
// Return the version of the plugin API used by this plugin.
|
||||
// Required: yes
|
||||
// Return value: the API version string, in the following format:
|
||||
// "<major>.<minor>.<patch>", e.g. "1.2.3".
|
||||
// NOTE: to ensure correct interoperability between the engine and the plugins,
|
||||
// we use a semver approach. Plugins are required to specify the version
|
||||
// of the API they run against, and the engine will take care of checking
|
||||
// and enforcing compatibility.
|
||||
//
|
||||
char* (*get_required_api_version)();
|
||||
//
|
||||
// Return the plugin type.
|
||||
// Required: yes
|
||||
// Should return TYPE_EXTRACTOR_PLUGIN. It still makes sense to
|
||||
// have a function get_type() as the plugin interface will
|
||||
// often dlsym() functions from shared libraries, and can't
|
||||
// inspect any C struct type.
|
||||
//
|
||||
uint32_t (*get_type)();
|
||||
//
|
||||
// Initialize the plugin and, if needed, allocate its state.
|
||||
// Required: yes
|
||||
// Arguments:
|
||||
// - config: a string with the plugin configuration. The format of the
|
||||
// string is chosen by the plugin itself.
|
||||
// - rc: pointer to an integer that will contain the initialization result,
|
||||
// as a SCAP_* value (e.g. SCAP_SUCCESS=0, SCAP_FAILURE=1)
|
||||
// Return value: pointer to the plugin state that will be treated as opaque
|
||||
// by the engine and passed to the other plugin functions.
|
||||
//
|
||||
ss_plugin_t* (*init)(char* config, int32_t* rc);
|
||||
//
|
||||
// Destroy the plugin and, if plugin state was allocated, free it.
|
||||
// Required: yes
|
||||
//
|
||||
void (*destroy)(ss_plugin_t* s);
|
||||
//
|
||||
// Return a string with the error that was last generated by
|
||||
// the plugin.
|
||||
// Required: yes
|
||||
//
|
||||
// In cases where any other api function returns an error, the
|
||||
// plugin should be prepared to return a human-readable error
|
||||
// string with more context for the error. The plugin manager
|
||||
// calls get_last_error() to access that string.
|
||||
//
|
||||
char* (*get_last_error)(ss_plugin_t* s);
|
||||
//
|
||||
// Return the name of the plugin, which will be printed when displaying
|
||||
// information about the plugin.
|
||||
// Required: yes
|
||||
//
|
||||
char* (*get_name)();
|
||||
//
|
||||
// Return the descriptions of the plugin, which will be printed when displaying
|
||||
// information about the plugin or its events.
|
||||
// Required: yes
|
||||
//
|
||||
char* (*get_description)();
|
||||
//
|
||||
// Return a string containing contact info (url, email, twitter, etc) for
|
||||
// the plugin author.
|
||||
// Required: yes
|
||||
//
|
||||
char* (*get_contact)();
|
||||
//
|
||||
// Return the version of this plugin itself
|
||||
// Required: yes
|
||||
// Return value: a string with a version identifier, in the following format:
|
||||
// "<major>.<minor>.<patch>", e.g. "1.2.3".
|
||||
// This differs from the api version in that this versions the
|
||||
// plugin itself, as compared to the plugin interface. When
|
||||
// reading capture files, the major version of the plugin that
|
||||
// generated events must match the major version of the plugin
|
||||
// used to read events.
|
||||
//
|
||||
char* (*get_version)();
|
||||
//
|
||||
// Return a string describing the event sources that this
|
||||
// extractor plugin can consume.
|
||||
// Required: no
|
||||
// Return value: a json array of strings containing event
|
||||
// sources returned by a source plugin's get_event_source()
|
||||
// function.
|
||||
// This function is optional--if NULL then the exctractor
|
||||
// plugin will receive every event.
|
||||
//
|
||||
char* (*get_extract_event_sources)();
|
||||
//
|
||||
// Return the list of extractor fields exported by this plugin. Extractor
|
||||
// fields can be used in Falco rules and sysdig filters.
|
||||
// Required: yes
|
||||
// Return value: a string with the list of fields encoded as a json
|
||||
// array.
|
||||
//
|
||||
char* (*get_fields)();
|
||||
//
|
||||
// Extract a filter field value from an event.
|
||||
// We offer multiple versions of extract(), differing from each other only in
|
||||
// the type of the value they return (string, integer...).
|
||||
// Required: for plugins of type TYPE_EXTRACTOR_PLUGIN only
|
||||
// Arguments:
|
||||
// - evtnum: the number of the event that is being processed
|
||||
// - id: the numeric identifier of the field to extract. It corresponds to the
|
||||
// position of the field in the array returned by get_fields().
|
||||
// - arg: the field argument, if an argument has been specified for the field,
|
||||
// otherwise it's NULL. For example:
|
||||
// * if the field specified by the user is foo.bar[pippo], arg will be the
|
||||
// string "pippo"
|
||||
// * if the field specified by the user is foo.bar, arg will be NULL
|
||||
// - data: the buffer produced by next().
|
||||
// - datalen: the length of the buffer produced by next().
|
||||
// - field_present: nonzero if the field is present for the given event.
|
||||
// Return value: the produced value of the filter field. For extract_str(), a
|
||||
// NULL return value means that the field is missing for the given event.
|
||||
//
|
||||
char *(*extract_str)(ss_plugin_t *s, uint64_t evtnum, const char *field, const char *arg, uint8_t *data, uint32_t datalen);
|
||||
uint64_t (*extract_u64)(ss_plugin_t *s, uint64_t evtnum, const char *field, const char *arg, uint8_t *data, uint32_t datalen, uint32_t *field_present);
|
||||
} extractor_plugin_info;
|
||||
|
||||
```
|
||||
|
||||
### Event Sources and Falco Rules
|
||||
|
||||
Falco rules already have the notion of a "source", using the source property in rules objects, and there are currently two kinds of event sources: "syscall" and "k8s_audit". We will use the source property in Falco rules to map a given rule to the event source on which the rule runs.
|
||||
|
||||
For example, given a plugin with source "aws_cloudtrail", and a Falco rule with source "aws_cloudtrail", the rule will be evaluated for any events generated by the plugin.
|
||||
|
||||
Similarly, an extractor plugin that includes "aws_cloudtrail" in its set of event sources will have the opportunity to extract information from aws_cloudtrail events if a matching field is found in the rule's condition, exception, or output properties.
|
||||
|
||||
This, combined with the restrictions below, allows a set of loaded rules files to contain a mix of rules for plugins as well as "core" syscall/k8s_audit events.
|
||||
|
||||
We will also make a change to compile rules/macros/lists selectively based on the set of loaded plugins (specifically, their event sources), instead of unconditionally as Falco is started. This is especially important for macros, which do not contain a source property, but might contain fields that are only implemented by a given plugin.
|
||||
|
||||
### Handling Duplicate/Overlapping Fields in Plugins/Libraries Core
|
||||
|
||||
At an initial glance, adding plugins introduces the possibility of tens/hundreds of new filtercheck fields that could potentially overlap/conflict. For example, what happens if a plugin defines a "proc.name" field? However, the notion of "event source" makes these potential conflicts managable.
|
||||
|
||||
Remember that field extraction is always done in the context of an event, and each event can be mapped back to an event source. So we only need to ensure that filtercheck fields are distinct for a given event source. For example, it's perfectly valid for an AWS Cloudtrail plugin to define a proc.name field, as the events generated by that plugin are wholly separate from syscall events. For syscall events, the AWS Cloudtrail plugin is not involved and the core libraries extract the process name for the tid performing a syscall. For AWS Cloudtrail events, the core libraries are not involved in field extraction and is performed by the AWS Cloudtrail plugin instead.
|
||||
|
||||
We only need to ensure the following:
|
||||
|
||||
* That only one plugin is loaded at a time that exports a given event source. For example, the libraries can load either a gke-audit-bridge plugin with event source k8s_audit, or eks-audit-bridge with event source k8s_audit, but not both.
|
||||
* That for a mix of source and extractor plugins having the same event source, that the fields are distinct. For example, a source plugin with source k8s_audit can export ka.* fields, and an extractor plugin with event source k8s_audit can export a jevt.value[/...] field, and the appropriate plugin will be used to extract fields from k8s_audit events as fields are parsed from condition expressions/output format strings.
|
||||
|
||||
### Plugin Versions and Falco Rules
|
||||
|
||||
To allow rules files to document the plugin versions they are compatible with, we will add a new top-level field `required_plugin_versions` to the Falco rules file format. The field is optional, and if not provided no plugin compatibility checks will be performed. The syntax of `required_plugin_versions` will be the following:
|
||||
|
||||
```yaml
|
||||
- required_plugin_versions:
|
||||
- name: <plugin_name>
|
||||
version: x.y.z
|
||||
...
|
||||
```
|
||||
|
||||
Below required_plugin_versions is a list of objects, where each object has `name` and `version` properties. If a plugin is loaded, and if an entry in `required_plugin_versions` has a matching name, then the loaded plugin version must be semver compatible with the version property.
|
||||
|
||||
Falco can load multiple rules files, and each file may contain its own `required_plugin_versions` property. In this case, name+version pairs across all files will be merged, and in the case of duplicate names all provided versions must be compatible.
|
||||
|
||||
### Loading the plugins
|
||||
|
||||
The mechanics of loading a plugin are implemented in the libraries and leverage the dynamic library functionality of the operating system (dlopen/dlsym in unix, LoadLibrary/GetProcAddress in Windows). The plugin loading code also ensures that:
|
||||
|
||||
- the plugin is valid, i.e. that it exports the set of expected symbols
|
||||
- the plugin has an api version number that is compatible with the libraries instance
|
||||
- that only one source plugin is loaded at a time for a given event source
|
||||
- if a mix of source and extractor plugins are loaded for a given event source, that the exported fields have unique names that don't overlap across plugins
|
||||
|
||||
#### Loading plugins in falcosecurity/libs
|
||||
|
||||
At the libraries level, loading plugins is handled via the static method:
|
||||
|
||||
```c++
|
||||
void sinsp_plugin::register_plugin(sinsp* inspector, string filepath, char* config, ...)
|
||||
```
|
||||
|
||||
filepath points to a dynamic library containing code that exports plugin API functions. config contains arbitrary config content which is passed to init().
|
||||
|
||||
Note that the code using the libraries is responsible for determining the location of plugin libraries and their configuration.
|
||||
|
||||
#### Loading plugins in falcosecurity/falco
|
||||
|
||||
Falco will control/configure loading plugins via the new "plugins" property in falco.yaml. Here's an example:
|
||||
|
||||
```yaml
|
||||
plugins:
|
||||
- name: aws_cloudtrail
|
||||
library_path: aws_cloudtrail/plugin.so
|
||||
init_config: "..."
|
||||
open_params: "..."
|
||||
- name: http_json
|
||||
library_path: http_json/plugin.so
|
||||
init_config_file: http_json/config.txt
|
||||
open_params_file: http_json/params.txt
|
||||
|
||||
# Optional
|
||||
load_plugins: [aws_cloudtrail]
|
||||
```
|
||||
|
||||
A new "plugins" property in falco.yaml will define the set of plugins that can be loaded by Falco. The property contains a list of objects, with the following properties:
|
||||
|
||||
* name: Only used for load_plugins, but by convention should be the same as the value returned by the name() api function.
|
||||
* library_path: a path to the shared library. The path can be relative, in which case it is relative to Falco's "share" directory under a "plugins" subdirectory e.g. /usr/share/falco/plugins.
|
||||
* init_config: If present, the exact configuration text that will be provided as an argument to the init() function.
|
||||
* init_config_file: If present, the provided file will be read and the contents will be provided as an argument to the init() function.
|
||||
* open_params: If present, the exact params text that will be provided as an argument to the open() function.
|
||||
* open_params_file: If present, the provided file will be read and the contents will be provided as an argument to the open() function.
|
||||
|
||||
For a given yaml object in the plugins list, only one of init_config/init_config_file and one of open_params/open_params_file can be provided at a time.
|
||||
|
||||
A new "load_plugins" property in falco.yaml will allow for loading a subset of the plugins defined in plugins. If present, only the plugins with the provided names will be loaded.
|
||||
|
||||
### Examples
|
||||
|
||||
We have an initial version working, consisting of:
|
||||
|
||||
* A version of falcosecurity/libs that supports the [plugin framework](https://github.com/falcosecurity/libs/tree/new/plugin-system-api-additions)
|
||||
* Support code and examples for [writing plugins in go](https://github.com/mstemm/libsinsp-plugin-sdk-go/tree/new/plugin-system-api-additions)
|
||||
* A [cloudtrail](https://github.com/mstemm/plugins/tree/new/plugin-system-api-additions) plugin that can generate events from cloudtrail logs and extract fields from those events.
|
||||
* A version of Falco that uses all of the above to [load and evaluate rules with plugins](https://github.com/leogr/falco/tree/new/plugin-system-api-additions)
|
||||
@@ -15,10 +15,10 @@
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
# The latest Falco Engine version is 8 if you want to
|
||||
# use exceptions. However the default rules file does not
|
||||
# use them so we stick with 7 for compatibility.
|
||||
- required_engine_version: 7
|
||||
# The latest Falco Engine version is 9.
|
||||
# Starting with version 8, the Falco engine supports exceptions.
|
||||
# However the Falco rules file does not use them by default.
|
||||
- required_engine_version: 9
|
||||
|
||||
# Currently disabled as read/write are ignored syscalls. The nearly
|
||||
# similar open_write/open_read check for files being opened for
|
||||
@@ -538,11 +538,6 @@
|
||||
- macro: system_users
|
||||
condition: user.name in (bin, daemon, games, lp, mail, nobody, sshd, sync, uucp, www-data)
|
||||
|
||||
- macro: python_running_sdchecks
|
||||
condition: >
|
||||
(proc.name in (python, python2.7) and
|
||||
(proc.cmdline contains /opt/draios/bin/sdchecks))
|
||||
|
||||
- macro: httpd_writing_ssl_conf
|
||||
condition: >
|
||||
(proc.pname=run-httpd and
|
||||
@@ -1397,6 +1392,15 @@
|
||||
- macro: user_read_sensitive_file_containers
|
||||
condition: (container and container.image.repository in (read_sensitive_file_images))
|
||||
|
||||
# This macro detects man-db postinst, see https://salsa.debian.org/debian/man-db/-/blob/master/debian/postinst
|
||||
# The rule "Read sensitive file untrusted" use this macro to avoid FPs.
|
||||
- macro: mandb_postinst
|
||||
condition: >
|
||||
(proc.name=perl and proc.args startswith "-e" and
|
||||
proc.args contains "@pwd = getpwnam(" and
|
||||
proc.args contains "exec " and
|
||||
proc.args contains "/usr/bin/mandb")
|
||||
|
||||
- rule: Read sensitive file untrusted
|
||||
desc: >
|
||||
an attempt to read any sensitive file (e.g. files containing user/password/authentication
|
||||
@@ -1412,11 +1416,11 @@
|
||||
)
|
||||
and not cmp_cp_by_passwd
|
||||
and not ansible_running_python
|
||||
and not proc.cmdline contains /usr/bin/mandb
|
||||
and not run_by_qualys
|
||||
and not run_by_chef
|
||||
and not run_by_google_accounts_daemon
|
||||
and not user_read_sensitive_file_conditions
|
||||
and not mandb_postinst
|
||||
and not perl_running_plesk
|
||||
and not perl_running_updmap
|
||||
and not veritas_driver_script
|
||||
@@ -1554,7 +1558,6 @@
|
||||
and not proc.name startswith "runc"
|
||||
and not proc.cmdline startswith "containerd"
|
||||
and not proc.pname in (sysdigcloud_binaries, hyperkube, kubelet, protokube, dockerd, tini, aws)
|
||||
and not python_running_sdchecks
|
||||
and not java_running_sdjagent
|
||||
and not kubelet_running_loopback
|
||||
and not rancher_agent
|
||||
@@ -1562,6 +1565,7 @@
|
||||
and not calico_node
|
||||
and not weaveworks_scope
|
||||
and not user_known_change_thread_namespace_activities
|
||||
enabled: false
|
||||
output: >
|
||||
Namespace change (setns) by unexpected program (user=%user.name user_loginuid=%user.loginuid command=%proc.cmdline
|
||||
parent=%proc.pname %container.info container_id=%container.id image=%container.image.repository:%container.image.tag)
|
||||
@@ -1744,6 +1748,32 @@
|
||||
container.image.repository endswith /prometheus-node-exporter or
|
||||
container.image.repository endswith /image-inspector))
|
||||
|
||||
# https://docs.aws.amazon.com/eks/latest/userguide/add-ons-images.html
|
||||
# official AWS EKS registry list. AWS has different ECR repo per region
|
||||
- macro: allowed_aws_ecr_registry_root_for_eks
|
||||
condition: >
|
||||
(container.image.repository startswith "602401143452.dkr.ecr" or
|
||||
container.image.repository startswith "877085696533.dkr.ecr" or
|
||||
container.image.repository startswith "800184023465.dkr.ecr" or
|
||||
container.image.repository startswith "918309763551.dkr.ecr" or
|
||||
container.image.repository startswith "961992271922.dkr.ecr" or
|
||||
container.image.repository startswith "590381155156.dkr.ecr" or
|
||||
container.image.repository startswith "558608220178.dkr.ecr" or
|
||||
container.image.repository startswith "151742754352.dkr.ecr" or
|
||||
container.image.repository startswith "013241004608.dkr.ecr")
|
||||
|
||||
|
||||
- macro: aws_eks_core_images
|
||||
condition: >
|
||||
(allowed_aws_ecr_registry_root_for_eks and
|
||||
(container.image.repository endswith ".amazonaws.com/amazon-k8s-cni" or
|
||||
container.image.repository endswith ".amazonaws.com/eks/kube-proxy"))
|
||||
|
||||
|
||||
- macro: aws_eks_image_sensitive_mount
|
||||
condition: >
|
||||
(allowed_aws_ecr_registry_root_for_eks and container.image.repository endswith ".amazonaws.com/amazon-k8s-cni")
|
||||
|
||||
# These images are allowed both to run with --privileged and to mount
|
||||
# sensitive paths from the host filesystem.
|
||||
#
|
||||
@@ -1784,7 +1814,6 @@
|
||||
docker.io/falcosecurity/falco,
|
||||
docker.io/mesosphere/mesos-slave,
|
||||
docker.io/rook/toolbox,
|
||||
docker.io/sysdig/falco,
|
||||
docker.io/sysdig/sysdig,
|
||||
falcosecurity/falco,
|
||||
gcr.io/google_containers/kube-proxy,
|
||||
@@ -1798,7 +1827,6 @@
|
||||
k8s.gcr.io/kube-proxy,
|
||||
k8s.gcr.io/prometheus-to-sd,
|
||||
quay.io/calico/node,
|
||||
sysdig/falco,
|
||||
sysdig/sysdig,
|
||||
sematext_images
|
||||
]
|
||||
@@ -1806,6 +1834,7 @@
|
||||
- macro: falco_privileged_containers
|
||||
condition: (openshift_image or
|
||||
user_trusted_containers or
|
||||
aws_eks_core_images or
|
||||
container.image.repository in (trusted_images) or
|
||||
container.image.repository in (falco_privileged_images) or
|
||||
container.image.repository startswith istio/proxy_ or
|
||||
@@ -1824,7 +1853,7 @@
|
||||
# host filesystem.
|
||||
- list: falco_sensitive_mount_images
|
||||
items: [
|
||||
docker.io/sysdig/falco, docker.io/sysdig/sysdig, sysdig/falco, sysdig/sysdig,
|
||||
docker.io/sysdig/sysdig, sysdig/sysdig,
|
||||
docker.io/falcosecurity/falco, falcosecurity/falco,
|
||||
gcr.io/google_containers/hyperkube,
|
||||
gcr.io/google_containers/kube-proxy, docker.io/calico/node,
|
||||
@@ -1836,6 +1865,7 @@
|
||||
|
||||
- macro: falco_sensitive_mount_containers
|
||||
condition: (user_trusted_containers or
|
||||
aws_eks_image_sensitive_mount or
|
||||
container.image.repository in (trusted_images) or
|
||||
container.image.repository in (falco_sensitive_mount_images) or
|
||||
container.image.repository startswith quay.io/sysdig/)
|
||||
@@ -2204,7 +2234,7 @@
|
||||
condition: >
|
||||
evt.type=setuid and evt.dir=>
|
||||
and (known_user_in_container or not container)
|
||||
and not user.name=root
|
||||
and not (user.name=root or user.uid=0)
|
||||
and not somebody_becoming_themself
|
||||
and not proc.name in (known_setuid_binaries, userexec_binaries, mail_binaries, docker_binaries,
|
||||
nomachine_binaries)
|
||||
@@ -2329,9 +2359,9 @@
|
||||
- macro: k8s_containers
|
||||
condition: >
|
||||
(container.image.repository in (gcr.io/google_containers/hyperkube-amd64,
|
||||
gcr.io/google_containers/kube2sky, docker.io/sysdig/falco,
|
||||
gcr.io/google_containers/kube2sky,
|
||||
docker.io/sysdig/sysdig, docker.io/falcosecurity/falco,
|
||||
sysdig/falco, sysdig/sysdig, falcosecurity/falco,
|
||||
sysdig/sysdig, falcosecurity/falco,
|
||||
fluent/fluentd-kubernetes-daemonset, prom/prometheus,
|
||||
ibm_cloud_containers)
|
||||
or (k8s.ns.name = "kube-system"))
|
||||
@@ -2611,6 +2641,7 @@
|
||||
and not proc.name in (user_known_chmod_applications)
|
||||
and not exe_running_docker_save
|
||||
and not user_known_set_setuid_or_setgid_bit_conditions
|
||||
enabled: false
|
||||
output: >
|
||||
Setuid or setgid bit is set via chmod (fd=%evt.arg.fd filename=%evt.arg.filename mode=%evt.arg.mode user=%user.name user_loginuid=%user.loginuid process=%proc.name
|
||||
command=%proc.cmdline container_id=%container.id container_name=%container.name image=%container.image.repository:%container.image.tag)
|
||||
@@ -2715,7 +2746,12 @@
|
||||
"xmr-eu1.nanopool.org","xmr-eu2.nanopool.org",
|
||||
"xmr-jp1.nanopool.org","xmr-us-east1.nanopool.org",
|
||||
"xmr-us-west1.nanopool.org","xmr.crypto-pool.fr",
|
||||
"xmr.pool.minergate.com"
|
||||
"xmr.pool.minergate.com", "rx.unmineable.com",
|
||||
"ss.antpool.com","dash.antpool.com",
|
||||
"eth.antpool.com","zec.antpool.com",
|
||||
"xmc.antpool.com","btm.antpool.com",
|
||||
"stratum-dash.antpool.com","stratum-xmc.antpool.com",
|
||||
"stratum-btm.antpool.com"
|
||||
]
|
||||
|
||||
- list: https_miner_domains
|
||||
@@ -2732,7 +2768,12 @@
|
||||
"stratum-ltc.antpool.com",
|
||||
"stratum-zec.antpool.com",
|
||||
"stratum.antpool.com",
|
||||
"xmr.crypto-pool.fr"
|
||||
"xmr.crypto-pool.fr",
|
||||
"ss.antpool.com",
|
||||
"stratum-dash.antpool.com",
|
||||
"stratum-xmc.antpool.com",
|
||||
"stratum-btm.antpool.com",
|
||||
"btm.antpool.com"
|
||||
]
|
||||
|
||||
- list: http_miner_domains
|
||||
@@ -2971,7 +3012,7 @@
|
||||
- rule: Linux Kernel Module Injection Detected
|
||||
desc: Detect kernel module was injected (from container).
|
||||
condition: spawned_process and container and proc.name=insmod and not proc.args in (white_listed_modules)
|
||||
output: Linux Kernel Module injection using insmod detected (user=%user.name user_loginuid=%user.loginuid parent_process=%proc.pname module=%proc.args)
|
||||
output: Linux Kernel Module injection using insmod detected (user=%user.name user_loginuid=%user.loginuid parent_process=%proc.pname module=%proc.args %container.info image=%container.image.repository:%container.image.tag)
|
||||
priority: WARNING
|
||||
tags: [process]
|
||||
|
||||
@@ -2995,13 +3036,13 @@
|
||||
# A privilege escalation to root through heap-based buffer overflow
|
||||
- rule: Sudo Potential Privilege Escalation
|
||||
desc: Privilege escalation vulnerability affecting sudo (<= 1.9.5p2). Executing sudo using sudoedit -s or sudoedit -i command with command-line argument that ends with a single backslash character from an unprivileged user it's possible to elevate the user privileges to root.
|
||||
condition: spawned_process and user.uid!= 0 and proc.name=sudoedit and (proc.args contains -s or proc.args contains -i) and (proc.args contains "\ " or proc.args endswith \)
|
||||
condition: spawned_process and user.uid != 0 and proc.name=sudoedit and (proc.args contains -s or proc.args contains -i) and (proc.args contains "\ " or proc.args endswith \)
|
||||
output: "Detect Sudo Privilege Escalation Exploit (CVE-2021-3156) (user=%user.name parent=%proc.pname cmdline=%proc.cmdline %container.info)"
|
||||
priority: CRITICAL
|
||||
tags: [filesystem, mitre_privilege_escalation]
|
||||
|
||||
- rule: Debugfs Launched in Privileged Container
|
||||
desc: Detect file system debugger debugfs launched inside a privilegd container which might lead to container escape.
|
||||
desc: Detect file system debugger debugfs launched inside a privileged container which might lead to container escape.
|
||||
condition: >
|
||||
spawned_process and container
|
||||
and container.privileged=true
|
||||
@@ -3024,6 +3065,24 @@
|
||||
priority: WARNING
|
||||
tags: [container, cis, mitre_lateral_movement]
|
||||
|
||||
- macro: consider_userfaultfd_activities
|
||||
condition: (always_true)
|
||||
|
||||
- list: user_known_userfaultfd_processes
|
||||
items: []
|
||||
|
||||
- rule: Unprivileged Delegation of Page Faults Handling to a Userspace Process
|
||||
desc: Detect a successful unprivileged userfaultfd syscall which might act as an attack primitive to exploit other bugs
|
||||
condition: >
|
||||
consider_userfaultfd_activities and
|
||||
evt.type = userfaultfd and
|
||||
user.uid != 0 and
|
||||
(evt.rawres >= 0 or evt.res != -1) and
|
||||
not proc.name in (user_known_userfaultfd_processes)
|
||||
output: An userfaultfd syscall was successfully executed by an unprivileged user (user=%user.name user_loginuid=%user.loginuid process=%proc.name command=%proc.cmdline %container.info image=%container.image.repository:%container.image.tag)
|
||||
priority: CRITICAL
|
||||
tags: [syscall, mitre_defense_evasion]
|
||||
|
||||
# Application rules have moved to application_rules.yaml. Please look
|
||||
# there if you want to enable them by adding to
|
||||
# falco_rules.local.yaml.
|
||||
|
||||
@@ -304,7 +304,7 @@
|
||||
- list: known_sa_list
|
||||
items: ["pod-garbage-collector","resourcequota-controller","cronjob-controller","generic-garbage-collector",
|
||||
"daemon-set-controller","endpointslice-controller","deployment-controller", "replicaset-controller",
|
||||
"endpoint-controller"]
|
||||
"endpoint-controller", "namespace-controller", "statefulset-controller", "disruption-controller"]
|
||||
|
||||
- macro: trusted_sa
|
||||
condition: (ka.target.name in (known_sa_list, user_known_sa_list))
|
||||
|
||||
@@ -154,7 +154,7 @@ load_kernel_module_compile() {
|
||||
fi
|
||||
|
||||
# Try to compile using all the available gcc versions
|
||||
for CURRENT_GCC in $(which gcc) $(ls "$(dirname "$(which gcc)")"/gcc-* | grep 'gcc-[0-9]\+' | sort -r); do
|
||||
for CURRENT_GCC in $(which gcc) $(ls "$(dirname "$(which gcc)")"/gcc-* | grep 'gcc-[0-9]\+' | sort -n -r -k 2 -t -); do
|
||||
echo "* Trying to dkms install ${DRIVER_NAME} module with GCC ${CURRENT_GCC}"
|
||||
echo "#!/usr/bin/env bash" > /tmp/falco-dkms-make
|
||||
echo "make CC=${CURRENT_GCC} \$@" >> /tmp/falco-dkms-make
|
||||
|
||||
@@ -17,9 +17,9 @@
|
||||
#
|
||||
scriptdir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
|
||||
parentdir="$(dirname "$scriptdir")"
|
||||
sysdigdir="${parentdir}/build/sysdig-repo/sysdig-prefix/src/sysdig"
|
||||
cat "${sysdigdir}/userspace/libscap/syscall_info_table.c" | grep EF_DROP_SIMPLE_CONS | sed -e 's/.*\"\(.*\)\".*/\1/' | sort > /tmp/ignored_syscall_info_table.txt
|
||||
cat "${sysdigdir}/driver/event_table.c" | grep EF_DROP_SIMPLE_CONS | sed -e 's/[^\"]*\"\([^\"]*\)\".*/\1/' | sort | uniq > /tmp/ignored_driver_event_table.txt
|
||||
libsdir="${parentdir}/build/falcosecurity-libs-repo/falcosecurity-libs-prefix/src/falcosecurity-libs"
|
||||
cat "${libsdir}/userspace/libscap/syscall_info_table.c" | grep EF_DROP_SIMPLE_CONS | sed -e 's/.*\"\(.*\)\".*/\1/' | sort > /tmp/ignored_syscall_info_table.txt
|
||||
cat "${libsdir}/driver/event_table.c" | grep EF_DROP_SIMPLE_CONS | sed -e 's/[^\"]*\"\([^\"]*\)\".*/\1/' | sort | uniq > /tmp/ignored_driver_event_table.txt
|
||||
|
||||
cat /tmp/ignored_driver_event_table.txt /tmp/ignored_syscall_info_table.txt | sort | uniq | tr '\n' ', '
|
||||
|
||||
|
||||
@@ -19,6 +19,7 @@ ProtectSystem=full
|
||||
ProtectKernelTunables=true
|
||||
RestrictRealtime=true
|
||||
RestrictAddressFamilies=~AF_PACKET
|
||||
StandardOutput=null
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
@@ -43,6 +43,12 @@ json_output: false
|
||||
# (user=root ....") in the json output.
|
||||
json_include_output_property: true
|
||||
|
||||
# When using json output, whether or not to include the "tags" property
|
||||
# itself in the json output. If set to true, outputs caused by rules
|
||||
# with no tags will have a "tags" field set to an empty array. If set to
|
||||
# false, the "tags" field will not be included in the json output at all.
|
||||
json_include_tags_property: true
|
||||
|
||||
# Send information logs to stderr and/or syslog Note these are *not* security
|
||||
# notification logs! These are just Falco lifecycle (and possibly error) logs.
|
||||
log_stderr: true
|
||||
|
||||
@@ -622,4 +622,13 @@ trace_files: !mux
|
||||
- ../rules/k8s_audit_rules.yaml
|
||||
detect_counts:
|
||||
- K8s Secret Deleted: 1
|
||||
trace_file: trace_files/k8s_audit/delete_secret.json
|
||||
trace_file: trace_files/k8s_audit/delete_secret.json
|
||||
|
||||
fal_01_003:
|
||||
detect: False
|
||||
detect_level: INFO
|
||||
rules_file:
|
||||
- ../rules/falco_rules.yaml
|
||||
- ../rules/k8s_audit_rules.yaml
|
||||
trace_file: trace_files/k8s_audit/fal_01_003.json
|
||||
stderr_contains: 'Could not read k8s audit event line #1, "{"kind": 0}": Data not recognized as a k8s audit event, stopping'
|
||||
|
||||
@@ -90,6 +90,8 @@ class FalcoTest(Test):
|
||||
self.json_output = self.params.get('json_output', '*', default=False)
|
||||
self.json_include_output_property = self.params.get(
|
||||
'json_include_output_property', '*', default=True)
|
||||
self.json_include_tags_property = self.params.get(
|
||||
'json_include_tags_property', '*', default=True)
|
||||
self.all_events = self.params.get('all_events', '*', default=False)
|
||||
self.priority = self.params.get('priority', '*', default='debug')
|
||||
self.rules_file = self.params.get(
|
||||
@@ -388,10 +390,11 @@ class FalcoTest(Test):
|
||||
for line in res.stdout.decode("utf-8").splitlines():
|
||||
if line.startswith('{'):
|
||||
obj = json.loads(line)
|
||||
attrs = ['time', 'rule', 'priority']
|
||||
if self.json_include_output_property:
|
||||
attrs = ['time', 'rule', 'priority', 'output']
|
||||
else:
|
||||
attrs = ['time', 'rule', 'priority']
|
||||
attrs.append('output')
|
||||
if self.json_include_tags_property:
|
||||
attrs.append('tags')
|
||||
for attr in attrs:
|
||||
if not attr in obj:
|
||||
self.fail(
|
||||
@@ -409,10 +412,15 @@ class FalcoTest(Test):
|
||||
else:
|
||||
actual = open(output['actual']).read()
|
||||
|
||||
if expected not in actual:
|
||||
self.fail("Output '{}' does not strictly contains the expected content '{}'".format(
|
||||
output['actual'], output['expected']))
|
||||
return False
|
||||
actual_cursor = actual
|
||||
expected_lines = expected.splitlines()
|
||||
for line in expected_lines:
|
||||
pos = actual_cursor.find(line)
|
||||
if pos < 0:
|
||||
self.fail("Output '{}' does not strictly contains the expected content '{}'".format(
|
||||
output['actual'], output['expected']))
|
||||
return False
|
||||
actual_cursor = actual_cursor[pos + len(line):]
|
||||
|
||||
return True
|
||||
|
||||
@@ -609,8 +617,9 @@ class FalcoTest(Test):
|
||||
self.log.debug("Converted Rules: {}".format(psp_rules))
|
||||
|
||||
# Run falco
|
||||
cmd = '{} {} {} -c {} {} -o json_output={} -o json_include_output_property={} -o priority={} -v'.format(
|
||||
self.falco_binary_path, self.rules_args, self.disabled_args, self.conf_file, trace_arg, self.json_output, self.json_include_output_property, self.priority)
|
||||
cmd = '{} {} {} -c {} {} -o json_output={} -o json_include_output_property={} -o json_include_tags_property={} -o priority={} -v'.format(
|
||||
self.falco_binary_path, self.rules_args, self.disabled_args, self.conf_file, trace_arg, self.json_output,
|
||||
self.json_include_output_property, self.json_include_tags_property, self.priority)
|
||||
|
||||
for tag in self.disable_tags:
|
||||
cmd += ' -T {}'.format(tag)
|
||||
|
||||
@@ -688,7 +688,7 @@ trace_files: !mux
|
||||
detect: True
|
||||
detect_level: WARNING
|
||||
rules_file:
|
||||
- rules/single_rule.yaml
|
||||
- rules/single_rule_with_tags.yaml
|
||||
conf_file: confs/stdout_output.yaml
|
||||
trace_file: trace_files/cat_write.scap
|
||||
time_iso_8601: true
|
||||
@@ -721,7 +721,7 @@ trace_files: !mux
|
||||
detect: True
|
||||
detect_level: WARNING
|
||||
rules_file:
|
||||
- rules/single_rule.yaml
|
||||
- rules/single_rule_with_tags.yaml
|
||||
conf_file: confs/grpc_unix_socket.yaml
|
||||
trace_file: trace_files/cat_write.scap
|
||||
run_duration: 5
|
||||
@@ -745,6 +745,10 @@ trace_files: !mux
|
||||
# For the hostname, since we don't know that beforehand,
|
||||
# only check the field presence
|
||||
- "hostname: "
|
||||
#tags
|
||||
- "tags: \"filesystem\""
|
||||
- "tags: \"process\""
|
||||
- "tags: \"testing\""
|
||||
|
||||
detect_counts:
|
||||
detect: True
|
||||
@@ -763,7 +767,7 @@ trace_files: !mux
|
||||
- "Non sudo setuid": 1
|
||||
- "Create files below dev": 1
|
||||
- "Modify binary dirs": 2
|
||||
- "Change thread namespace": 1
|
||||
- "Change thread namespace": 0
|
||||
|
||||
disabled_tags_a:
|
||||
detect: True
|
||||
@@ -1107,6 +1111,25 @@ trace_files: !mux
|
||||
trace_file: trace_files/cat_write.scap
|
||||
stdout_contains: "^(?!.*Warning An open of /dev/null was seen.*)"
|
||||
|
||||
json_output_no_tags_property:
|
||||
json_output: True
|
||||
json_include_tags_property: False
|
||||
detect: True
|
||||
detect_level: WARNING
|
||||
rules_file:
|
||||
- rules/rule_append.yaml
|
||||
trace_file: trace_files/cat_write.scap
|
||||
stdout_contains: "^(?!.*\"tags\":[ ]*\\[.*\\],.*)"
|
||||
|
||||
json_output_empty_tags_property:
|
||||
json_output: True
|
||||
detect: True
|
||||
detect_level: WARNING
|
||||
rules_file:
|
||||
- rules/rule_append.yaml
|
||||
trace_file: trace_files/cat_write.scap
|
||||
stdout_contains: "^(.*\"tags\":[ ]*\\[\\],.*)"
|
||||
|
||||
in_operator_netmasks:
|
||||
detect: True
|
||||
detect_level: INFO
|
||||
|
||||
@@ -23,10 +23,10 @@ has_json_output: !mux
|
||||
traces: !mux
|
||||
change-thread-namespace:
|
||||
trace_file: traces-positive/change-thread-namespace.scap
|
||||
detect: True
|
||||
detect: False
|
||||
detect_level: NOTICE
|
||||
detect_counts:
|
||||
- "Change thread namespace": 1
|
||||
- "Change thread namespace": 0
|
||||
|
||||
container-privileged:
|
||||
trace_file: traces-positive/container-privileged.scap
|
||||
@@ -73,7 +73,7 @@ traces: !mux
|
||||
- "Non sudo setuid": 1
|
||||
- "Create files below dev": 1
|
||||
- "Modify binary dirs": 2
|
||||
- "Change thread namespace": 1
|
||||
- "Change thread namespace": 0
|
||||
|
||||
mkdir-binary-dirs:
|
||||
trace_file: traces-positive/mkdir-binary-dirs.scap
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
{"output":"2016-08-04T16:17:57.881781397+0000: Warning An open was seen (command=cat /dev/null)","priority":"Warning","rule":"open_from_cat","time":"2016-08-04T16:17:57.881781397Z", "output_fields": {"evt.time.iso8601":1470327477881781397,"proc.cmdline":"cat /dev/null"}}
|
||||
{"output":"2016-08-04T16:17:57.881785348+0000: Warning An open was seen (command=cat /dev/null)","priority":"Warning","rule":"open_from_cat","time":"2016-08-04T16:17:57.881785348Z", "output_fields": {"evt.time.iso8601":1470327477881785348,"proc.cmdline":"cat /dev/null"}}
|
||||
{"output":"2016-08-04T16:17:57.881796705+0000: Warning An open was seen (command=cat /dev/null)","priority":"Warning","rule":"open_from_cat","time":"2016-08-04T16:17:57.881796705Z", "output_fields": {"evt.time.iso8601":1470327477881796705,"proc.cmdline":"cat /dev/null"}}
|
||||
{"output":"2016-08-04T16:17:57.881799840+0000: Warning An open was seen (command=cat /dev/null)","priority":"Warning","rule":"open_from_cat","time":"2016-08-04T16:17:57.881799840Z", "output_fields": {"evt.time.iso8601":1470327477881799840,"proc.cmdline":"cat /dev/null"}}
|
||||
{"output":"2016-08-04T16:17:57.882003104+0000: Warning An open was seen (command=cat /dev/null)","priority":"Warning","rule":"open_from_cat","time":"2016-08-04T16:17:57.882003104Z", "output_fields": {"evt.time.iso8601":1470327477882003104,"proc.cmdline":"cat /dev/null"}}
|
||||
{"output":"2016-08-04T16:17:57.882008208+0000: Warning An open was seen (command=cat /dev/null)","priority":"Warning","rule":"open_from_cat","time":"2016-08-04T16:17:57.882008208Z", "output_fields": {"evt.time.iso8601":1470327477882008208,"proc.cmdline":"cat /dev/null"}}
|
||||
{"output":"2016-08-04T16:17:57.882045694+0000: Warning An open was seen (command=cat /dev/null)","priority":"Warning","rule":"open_from_cat","time":"2016-08-04T16:17:57.882045694Z", "output_fields": {"evt.time.iso8601":1470327477882045694,"proc.cmdline":"cat /dev/null"}}
|
||||
{"output":"2016-08-04T16:17:57.882054739+0000: Warning An open was seen (command=cat /dev/null)","priority":"Warning","rule":"open_from_cat","time":"2016-08-04T16:17:57.882054739Z", "output_fields": {"evt.time.iso8601":1470327477882054739,"proc.cmdline":"cat /dev/null"}}
|
||||
{"output":"2016-08-04T16:17:57.881781397+0000: Warning An open was seen (command=cat /dev/null)","priority":"Warning","rule":"open_from_cat","source":"syscall","tags":["filesystem","process","testing"],"time":"2016-08-04T16:17:57.881781397Z", "output_fields": {"evt.time.iso8601":1470327477881781397,"proc.cmdline":"cat /dev/null"}}
|
||||
{"output":"2016-08-04T16:17:57.881785348+0000: Warning An open was seen (command=cat /dev/null)","priority":"Warning","rule":"open_from_cat","source":"syscall","tags":["filesystem","process","testing"],"time":"2016-08-04T16:17:57.881785348Z", "output_fields": {"evt.time.iso8601":1470327477881785348,"proc.cmdline":"cat /dev/null"}}
|
||||
{"output":"2016-08-04T16:17:57.881796705+0000: Warning An open was seen (command=cat /dev/null)","priority":"Warning","rule":"open_from_cat","source":"syscall","tags":["filesystem","process","testing"],"time":"2016-08-04T16:17:57.881796705Z", "output_fields": {"evt.time.iso8601":1470327477881796705,"proc.cmdline":"cat /dev/null"}}
|
||||
{"output":"2016-08-04T16:17:57.881799840+0000: Warning An open was seen (command=cat /dev/null)","priority":"Warning","rule":"open_from_cat","source":"syscall","tags":["filesystem","process","testing"],"time":"2016-08-04T16:17:57.881799840Z", "output_fields": {"evt.time.iso8601":1470327477881799840,"proc.cmdline":"cat /dev/null"}}
|
||||
{"output":"2016-08-04T16:17:57.882003104+0000: Warning An open was seen (command=cat /dev/null)","priority":"Warning","rule":"open_from_cat","source":"syscall","tags":["filesystem","process","testing"],"time":"2016-08-04T16:17:57.882003104Z", "output_fields": {"evt.time.iso8601":1470327477882003104,"proc.cmdline":"cat /dev/null"}}
|
||||
{"output":"2016-08-04T16:17:57.882008208+0000: Warning An open was seen (command=cat /dev/null)","priority":"Warning","rule":"open_from_cat","source":"syscall","tags":["filesystem","process","testing"],"time":"2016-08-04T16:17:57.882008208Z", "output_fields": {"evt.time.iso8601":1470327477882008208,"proc.cmdline":"cat /dev/null"}}
|
||||
{"output":"2016-08-04T16:17:57.882045694+0000: Warning An open was seen (command=cat /dev/null)","priority":"Warning","rule":"open_from_cat","source":"syscall","tags":["filesystem","process","testing"],"time":"2016-08-04T16:17:57.882045694Z", "output_fields": {"evt.time.iso8601":1470327477882045694,"proc.cmdline":"cat /dev/null"}}
|
||||
{"output":"2016-08-04T16:17:57.882054739+0000: Warning An open was seen (command=cat /dev/null)","priority":"Warning","rule":"open_from_cat","source":"syscall","tags":["filesystem","process","testing"],"time":"2016-08-04T16:17:57.882054739Z", "output_fields": {"evt.time.iso8601":1470327477882054739,"proc.cmdline":"cat /dev/null"}}
|
||||
|
||||
@@ -9,5 +9,5 @@ PyYAML==5.4
|
||||
requests==2.23.0
|
||||
six==1.14.0
|
||||
stevedore==1.32.0
|
||||
urllib3==1.25.9
|
||||
urllib3==1.26.5
|
||||
watchdog==0.10.2
|
||||
|
||||
34
test/rules/single_rule_with_tags.yaml
Normal file
34
test/rules/single_rule_with_tags.yaml
Normal file
@@ -0,0 +1,34 @@
|
||||
#
|
||||
# Copyright (C) 2021 The Falco Authors.
|
||||
#
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
- required_engine_version: 2
|
||||
|
||||
- list: cat_binaries
|
||||
items: [cat]
|
||||
|
||||
- list: cat_capable_binaries
|
||||
items: [cat_binaries]
|
||||
|
||||
- macro: is_cat
|
||||
condition: proc.name in (cat_capable_binaries)
|
||||
|
||||
- rule: open_from_cat
|
||||
desc: A process named cat does an open
|
||||
condition: evt.type=open and is_cat
|
||||
output: "An open was seen (command=%proc.cmdline)"
|
||||
priority: WARNING
|
||||
tags: [filesystem, process, testing]
|
||||
1
test/trace_files/k8s_audit/fal_01_003.json
Normal file
1
test/trace_files/k8s_audit/fal_01_003.json
Normal file
@@ -0,0 +1 @@
|
||||
{"kind": 0}
|
||||
@@ -35,9 +35,8 @@ if(MINIMAL_BUILD)
|
||||
"${NJSON_INCLUDE}"
|
||||
"${TBB_INCLUDE_DIR}"
|
||||
"${STRING_VIEW_LITE_INCLUDE}"
|
||||
"${SYSDIG_SOURCE_DIR}/userspace/libsinsp/third-party/jsoncpp"
|
||||
"${SYSDIG_SOURCE_DIR}/userspace/libscap"
|
||||
"${SYSDIG_SOURCE_DIR}/userspace/libsinsp"
|
||||
"${LIBSCAP_INCLUDE_DIRS}"
|
||||
"${LIBSINSP_INCLUDE_DIRS}"
|
||||
"${PROJECT_BINARY_DIR}/userspace/engine")
|
||||
else()
|
||||
target_include_directories(
|
||||
@@ -48,9 +47,8 @@ else()
|
||||
"${CURL_INCLUDE_DIR}"
|
||||
"${TBB_INCLUDE_DIR}"
|
||||
"${STRING_VIEW_LITE_INCLUDE}"
|
||||
"${SYSDIG_SOURCE_DIR}/userspace/libsinsp/third-party/jsoncpp"
|
||||
"${SYSDIG_SOURCE_DIR}/userspace/libscap"
|
||||
"${SYSDIG_SOURCE_DIR}/userspace/libsinsp"
|
||||
"${LIBSCAP_INCLUDE_DIRS}"
|
||||
"${LIBSINSP_INCLUDE_DIRS}"
|
||||
"${PROJECT_BINARY_DIR}/userspace/engine")
|
||||
endif()
|
||||
|
||||
|
||||
@@ -177,7 +177,8 @@ void falco_engine::load_rules(const string &rules_content, bool verbose, bool al
|
||||
// json_output to false.
|
||||
bool json_output = false;
|
||||
bool json_include_output_property = false;
|
||||
falco_formats::init(m_inspector, this, m_ls, json_output, json_include_output_property);
|
||||
bool json_include_tags_property = false;
|
||||
falco_formats::init(m_inspector, this, m_ls, json_output, json_include_output_property, json_include_tags_property);
|
||||
|
||||
m_rules->load_rules(rules_content, verbose, all_events, m_extra, m_replace_container_info, m_min_priority, required_engine_version);
|
||||
}
|
||||
@@ -342,18 +343,29 @@ void falco_engine::populate_rule_result(unique_ptr<struct rule_result> &res, gen
|
||||
if(lua_isfunction(m_ls, -1))
|
||||
{
|
||||
lua_pushnumber(m_ls, ev->get_check_id());
|
||||
|
||||
if(lua_pcall(m_ls, 1, 4, 0) != 0)
|
||||
if(lua_pcall(m_ls, 1, 5, 0) != 0)
|
||||
{
|
||||
const char* lerr = lua_tostring(m_ls, -1);
|
||||
string err = "Error invoking function output: " + string(lerr);
|
||||
throw falco_exception(err);
|
||||
}
|
||||
const char *p = lua_tostring(m_ls, -4);
|
||||
const char *p = lua_tostring(m_ls, -5);
|
||||
res->rule = p;
|
||||
res->evt = ev;
|
||||
res->priority_num = (falco_common::priority_type) lua_tonumber(m_ls, -3);
|
||||
res->format = lua_tostring(m_ls, -2);
|
||||
res->priority_num = (falco_common::priority_type) lua_tonumber(m_ls, -4);
|
||||
res->format = lua_tostring(m_ls, -3);
|
||||
|
||||
// Tags are passed back as a table, and is on the top of the stack
|
||||
lua_pushnil(m_ls); /* first key */
|
||||
while (lua_next(m_ls, -2) != 0) {
|
||||
// key is at index -2, value is at index
|
||||
// -1. We want the value.
|
||||
res->tags.insert(luaL_checkstring(m_ls, -1));
|
||||
|
||||
// Remove value, keep key for next iteration
|
||||
lua_pop(m_ls, 1);
|
||||
}
|
||||
lua_pop(m_ls, 1); // Clean table leftover
|
||||
|
||||
// Exception fields are passed back as a table
|
||||
lua_pushnil(m_ls); /* first key */
|
||||
|
||||
@@ -161,6 +161,7 @@ public:
|
||||
falco_common::priority_type priority_num;
|
||||
std::string format;
|
||||
std::set<std::string> exception_fields;
|
||||
std::set<std::string> tags;
|
||||
};
|
||||
|
||||
//
|
||||
|
||||
@@ -16,7 +16,7 @@ limitations under the License.
|
||||
|
||||
// The version of rules/filter fields/etc supported by this falco
|
||||
// engine.
|
||||
#define FALCO_ENGINE_VERSION (8)
|
||||
#define FALCO_ENGINE_VERSION (9)
|
||||
|
||||
// This is the result of running "falco --list -N | sha256sum" and
|
||||
// represents the fields supported by this version of falco. It's used
|
||||
|
||||
@@ -17,6 +17,8 @@ limitations under the License.
|
||||
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <sstream>
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
@@ -24,7 +26,13 @@ limitations under the License.
|
||||
#include <thread>
|
||||
#include <nonstd/string_view.hpp>
|
||||
|
||||
#pragma once
|
||||
#ifdef __GNUC__
|
||||
#define likely(x) __builtin_expect(!!(x), 1)
|
||||
#define unlikely(x) __builtin_expect(!!(x), 0)
|
||||
#else
|
||||
#define likely(x) (x)
|
||||
#define unlikely(x) (x)
|
||||
#endif
|
||||
|
||||
namespace falco
|
||||
{
|
||||
|
||||
@@ -24,6 +24,7 @@ sinsp *falco_formats::s_inspector = NULL;
|
||||
falco_engine *falco_formats::s_engine = NULL;
|
||||
bool falco_formats::s_json_output = false;
|
||||
bool falco_formats::s_json_include_output_property = true;
|
||||
bool falco_formats::s_json_include_tags_property = true;
|
||||
std::unique_ptr<sinsp_evt_formatter_cache> falco_formats::s_formatters = NULL;
|
||||
|
||||
const static struct luaL_Reg ll_falco[] =
|
||||
@@ -36,12 +37,14 @@ void falco_formats::init(sinsp *inspector,
|
||||
falco_engine *engine,
|
||||
lua_State *ls,
|
||||
bool json_output,
|
||||
bool json_include_output_property)
|
||||
bool json_include_output_property,
|
||||
bool json_include_tags_property)
|
||||
{
|
||||
s_inspector = inspector;
|
||||
s_engine = engine;
|
||||
s_json_output = json_output;
|
||||
s_json_include_output_property = json_include_output_property;
|
||||
s_json_include_tags_property = json_include_tags_property;
|
||||
|
||||
// todo(leogr): we should have used std::make_unique, but we cannot since it's not C++14
|
||||
s_formatters = std::unique_ptr<sinsp_evt_formatter_cache>(new sinsp_evt_formatter_cache(s_inspector));
|
||||
@@ -114,7 +117,7 @@ int falco_formats::lua_free_formatter(lua_State *ls)
|
||||
}
|
||||
|
||||
string falco_formats::format_event(const gen_event *evt, const std::string &rule, const std::string &source,
|
||||
const std::string &level, const std::string &format)
|
||||
const std::string &level, const std::string &format, std::set<std::string> &tags)
|
||||
{
|
||||
|
||||
string line;
|
||||
@@ -181,8 +184,10 @@ string falco_formats::format_event(const gen_event *evt, const std::string &rule
|
||||
if(s_json_output)
|
||||
{
|
||||
Json::Value event;
|
||||
Json::Value rule_tags;
|
||||
Json::FastWriter writer;
|
||||
string full_line;
|
||||
unsigned int rule_tags_idx = 0;
|
||||
|
||||
// Convert the time-as-nanoseconds to a more json-friendly ISO8601.
|
||||
time_t evttime = evt->get_ts() / 1000000000;
|
||||
@@ -197,12 +202,30 @@ string falco_formats::format_event(const gen_event *evt, const std::string &rule
|
||||
event["time"] = iso8601evttime;
|
||||
event["rule"] = rule;
|
||||
event["priority"] = level;
|
||||
event["source"] = source;
|
||||
|
||||
if(s_json_include_output_property)
|
||||
{
|
||||
// This is the filled-in output line.
|
||||
event["output"] = line;
|
||||
}
|
||||
|
||||
if(s_json_include_tags_property)
|
||||
{
|
||||
if (tags.size() == 0)
|
||||
{
|
||||
// This sets an empty array
|
||||
rule_tags = Json::arrayValue;
|
||||
}
|
||||
else
|
||||
{
|
||||
for (auto &tag : tags)
|
||||
{
|
||||
rule_tags[rule_tags_idx++] = tag;
|
||||
}
|
||||
}
|
||||
event["tags"] = rule_tags;
|
||||
}
|
||||
|
||||
full_line = writer.write(event);
|
||||
|
||||
|
||||
@@ -37,7 +37,8 @@ public:
|
||||
falco_engine *engine,
|
||||
lua_State *ls,
|
||||
bool json_output,
|
||||
bool json_include_output_property);
|
||||
bool json_include_output_property,
|
||||
bool json_include_tags_property);
|
||||
|
||||
// formatter = falco.formatter(format_string)
|
||||
static int lua_formatter(lua_State *ls);
|
||||
@@ -46,7 +47,7 @@ public:
|
||||
static int lua_free_formatter(lua_State *ls);
|
||||
|
||||
static string format_event(const gen_event *evt, const std::string &rule, const std::string &source,
|
||||
const std::string &level, const std::string &format);
|
||||
const std::string &level, const std::string &format, std::set<std::string> &tags);
|
||||
|
||||
static map<string, string> resolve_tokens(const gen_event *evt, const std::string &source,
|
||||
const std::string &format);
|
||||
@@ -56,4 +57,5 @@ public:
|
||||
static std::unique_ptr<sinsp_evt_formatter_cache> s_formatters;
|
||||
static bool s_json_output;
|
||||
static bool s_json_include_output_property;
|
||||
static bool s_json_include_tags_property;
|
||||
};
|
||||
|
||||
@@ -281,7 +281,11 @@ bool json_event_value::parse_as_int64(int64_t &intval, const std::string &val)
|
||||
return false;
|
||||
}
|
||||
}
|
||||
catch (std::invalid_argument &e)
|
||||
catch(std::out_of_range &)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
catch (std::invalid_argument &)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -1156,7 +1156,7 @@ function on_event(rule_id)
|
||||
error ("rule_loader.on_event(): could not find rule by name: ", rule.rule)
|
||||
end
|
||||
|
||||
return rule.rule, rule.priority_num, output, combined_rule.exception_fields
|
||||
return rule.rule, rule.priority_num, output, combined_rule.exception_fields, rule.tags
|
||||
end
|
||||
|
||||
function print_stats()
|
||||
|
||||
@@ -25,7 +25,6 @@ set(
|
||||
event_drops.cpp
|
||||
statsfilewriter.cpp
|
||||
falco.cpp
|
||||
"${SYSDIG_SOURCE_DIR}/userspace/libsinsp/fields_info.cpp"
|
||||
)
|
||||
|
||||
set(
|
||||
@@ -87,16 +86,23 @@ if(NOT MINIMAL_BUILD)
|
||||
"${GRPC_INCLUDE}"
|
||||
"${GRPCPP_INCLUDE}"
|
||||
"${PROTOBUF_INCLUDE}"
|
||||
"${CARES_INCLUDE}"
|
||||
)
|
||||
|
||||
if(USE_BUNDLED_GRPC)
|
||||
list(APPEND FALCO_DEPENDENCIES grpc)
|
||||
list(APPEND FALCO_LIBRARIES "${GRPC_LIBRARIES}")
|
||||
endif()
|
||||
|
||||
list(APPEND FALCO_DEPENDENCIES civetweb)
|
||||
|
||||
list(
|
||||
APPEND FALCO_LIBRARIES
|
||||
"${GPR_LIB}"
|
||||
"${GRPC_LIB}"
|
||||
"${GRPCPP_LIB}"
|
||||
"${GRPC_LIB}"
|
||||
"${GPR_LIB}"
|
||||
"${PROTOBUF_LIB}"
|
||||
"${CARES_LIB}"
|
||||
"${OPENSSL_LIBRARY_SSL}"
|
||||
"${OPENSSL_LIBRARY_CRYPTO}"
|
||||
"${LIBYAML_LIB}"
|
||||
|
||||
@@ -71,6 +71,7 @@ void falco_configuration::init(string conf_filename, list<string> &cmdline_optio
|
||||
|
||||
m_json_output = m_config->get_scalar<bool>("json_output", false);
|
||||
m_json_include_output_property = m_config->get_scalar<bool>("json_include_output_property", true);
|
||||
m_json_include_tags_property = m_config->get_scalar<bool>("json_include_tags_property", true);
|
||||
|
||||
falco::outputs::config file_output;
|
||||
file_output.name = "file";
|
||||
@@ -246,6 +247,25 @@ void falco_configuration::init(string conf_filename, list<string> &cmdline_optio
|
||||
m_syscall_evt_drop_rate = m_config->get_scalar<double>("syscall_event_drops", "rate", .03333);
|
||||
m_syscall_evt_drop_max_burst = m_config->get_scalar<double>("syscall_event_drops", "max_burst", 1);
|
||||
m_syscall_evt_simulate_drops = m_config->get_scalar<bool>("syscall_event_drops", "simulate_drops", false);
|
||||
|
||||
m_syscall_evt_timeout_max_consecutives = m_config->get_scalar<uint32_t>("syscall_event_timeouts", "max_consecutives", 1000);
|
||||
if(m_syscall_evt_timeout_max_consecutives == 0)
|
||||
{
|
||||
throw logic_error("Error reading config file(" + m_config_file + "): the maximum consecutive timeouts without an event must be an unsigned integer > 0");
|
||||
}
|
||||
|
||||
m_metadata_download_max_mb = m_config->get_scalar<uint32_t>("metadata_download", "max_mb", 100);
|
||||
if(m_metadata_download_max_mb > 1024)
|
||||
{
|
||||
throw logic_error("Error reading config file(" + m_config_file + "): metadata download maximum size should be < 1024 Mb");
|
||||
}
|
||||
m_metadata_download_chunk_wait_us = m_config->get_scalar<uint32_t>("metadata_download", "chunk_wait_us", 1000);
|
||||
m_metadata_download_watch_freq_sec = m_config->get_scalar<uint32_t>("metadata_download", "watch_freq_sec", 1);
|
||||
if(m_metadata_download_watch_freq_sec == 0)
|
||||
{
|
||||
throw logic_error("Error reading config file(" + m_config_file + "): metadata download watch frequency seconds must be an unsigned integer > 0");
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void falco_configuration::read_rules_file_directory(const string &path, list<string> &rules_filenames)
|
||||
|
||||
@@ -195,6 +195,7 @@ public:
|
||||
std::list<std::string> m_rules_filenames;
|
||||
bool m_json_output;
|
||||
bool m_json_include_output_property;
|
||||
bool m_json_include_tags_property;
|
||||
std::string m_log_level;
|
||||
std::vector<falco::outputs::config> m_outputs;
|
||||
uint32_t m_notifications_rate;
|
||||
@@ -219,14 +220,20 @@ public:
|
||||
std::string m_webserver_k8s_healthz_endpoint;
|
||||
bool m_webserver_ssl_enabled;
|
||||
std::string m_webserver_ssl_certificate;
|
||||
|
||||
syscall_evt_drop_actions m_syscall_evt_drop_actions;
|
||||
double m_syscall_evt_drop_threshold;
|
||||
double m_syscall_evt_drop_rate;
|
||||
double m_syscall_evt_drop_max_burst;
|
||||
|
||||
// Only used for testing
|
||||
bool m_syscall_evt_simulate_drops;
|
||||
|
||||
uint32_t m_syscall_evt_timeout_max_consecutives;
|
||||
|
||||
uint32_t m_metadata_download_max_mb;
|
||||
uint32_t m_metadata_download_chunk_wait_us;
|
||||
uint32_t m_metadata_download_watch_freq_sec;
|
||||
|
||||
private:
|
||||
void init_cmdline_options(std::list<std::string>& cmdline_options);
|
||||
|
||||
|
||||
@@ -23,6 +23,7 @@ limitations under the License.
|
||||
#include <vector>
|
||||
#include <algorithm>
|
||||
#include <string>
|
||||
#include <chrono>
|
||||
#include <functional>
|
||||
#include <signal.h>
|
||||
#include <fcntl.h>
|
||||
@@ -35,12 +36,13 @@ limitations under the License.
|
||||
|
||||
#include "logger.h"
|
||||
#include "utils.h"
|
||||
#include "chisel.h"
|
||||
#include "fields_info.h"
|
||||
#include "falco_utils.h"
|
||||
|
||||
#include "event_drops.h"
|
||||
#include "configuration.h"
|
||||
#include "falco_engine.h"
|
||||
#include "falco_engine_version.h"
|
||||
#include "config_falco.h"
|
||||
#include "statsfilewriter.h"
|
||||
#ifndef MINIMAL_BUILD
|
||||
@@ -121,6 +123,9 @@ static void usage()
|
||||
" for this option, it will be interpreted as the name of a file containing bearer token.\n"
|
||||
" Note that the format of this command-line option prohibits use of files whose names contain\n"
|
||||
" ':' or '#' characters in the file name.\n"
|
||||
" --k8s-node <node_name> The node name will be used as a filter when requesting metadata of pods to the API server.\n"
|
||||
" Usually, it should be set to the current node on which Falco is running.\n"
|
||||
" If empty, no filter is set, which may have a performance penalty on large clusters.\n"
|
||||
#endif
|
||||
" -L Show the name and description of all rules and exit.\n"
|
||||
" -l <rule> Show the name and description of the rule with name <rule> and exit.\n"
|
||||
@@ -251,6 +256,7 @@ uint64_t do_inspect(falco_engine *engine,
|
||||
sinsp_evt* ev;
|
||||
StatsFileWriter writer;
|
||||
uint64_t duration_start = 0;
|
||||
uint32_t timeouts_since_last_success_or_msg = 0;
|
||||
|
||||
sdropmgr.init(inspector,
|
||||
outputs,
|
||||
@@ -298,6 +304,28 @@ uint64_t do_inspect(falco_engine *engine,
|
||||
}
|
||||
else if(rc == SCAP_TIMEOUT)
|
||||
{
|
||||
if(unlikely(ev == nullptr))
|
||||
{
|
||||
timeouts_since_last_success_or_msg++;
|
||||
if(timeouts_since_last_success_or_msg > config.m_syscall_evt_timeout_max_consecutives)
|
||||
{
|
||||
std::string rule = "Falco internal: timeouts notification";
|
||||
std::string msg = rule + ". " + std::to_string(config.m_syscall_evt_timeout_max_consecutives) + " consecutive timeouts without event.";
|
||||
std::string last_event_time_str = "none";
|
||||
if(duration_start > 0)
|
||||
{
|
||||
sinsp_utils::ts_to_string(duration_start, &last_event_time_str, false, true);
|
||||
}
|
||||
std::map<std::string, std::string> o = {
|
||||
{"last_event_time", last_event_time_str},
|
||||
};
|
||||
auto now = std::chrono::duration_cast<std::chrono::nanoseconds>(std::chrono::system_clock::now().time_since_epoch()).count();
|
||||
outputs->handle_msg(now, falco_common::PRIORITY_DEBUG, msg, rule, o);
|
||||
// Reset the timeouts counter, Falco alerted
|
||||
timeouts_since_last_success_or_msg = 0;
|
||||
}
|
||||
}
|
||||
|
||||
continue;
|
||||
}
|
||||
else if(rc == SCAP_EOF)
|
||||
@@ -308,16 +336,18 @@ uint64_t do_inspect(falco_engine *engine,
|
||||
{
|
||||
//
|
||||
// Event read error.
|
||||
// Notify the chisels that we're exiting, and then die with an error.
|
||||
//
|
||||
cerr << "rc = " << rc << endl;
|
||||
throw sinsp_exception(inspector->getlasterr().c_str());
|
||||
}
|
||||
|
||||
if (duration_start == 0)
|
||||
// Reset the timeouts counter, Falco succesfully got an event to process
|
||||
timeouts_since_last_success_or_msg = 0;
|
||||
if(duration_start == 0)
|
||||
{
|
||||
duration_start = ev->get_ts();
|
||||
} else if(duration_to_tot_ns > 0)
|
||||
}
|
||||
else if(duration_to_tot_ns > 0)
|
||||
{
|
||||
if(ev->get_ts() - duration_start >= duration_to_tot_ns)
|
||||
{
|
||||
@@ -344,7 +374,7 @@ uint64_t do_inspect(falco_engine *engine,
|
||||
unique_ptr<falco_engine::rule_result> res = engine->process_sinsp_event(ev);
|
||||
if(res)
|
||||
{
|
||||
outputs->handle_event(res->evt, res->rule, res->source, res->priority_num, res->format);
|
||||
outputs->handle_event(res->evt, res->rule, res->source, res->priority_num, res->format, res->tags);
|
||||
}
|
||||
|
||||
num_evts++;
|
||||
@@ -442,6 +472,7 @@ int falco_init(int argc, char **argv)
|
||||
#ifndef MINIMAL_BUILD
|
||||
string* k8s_api = 0;
|
||||
string* k8s_api_cert = 0;
|
||||
string *k8s_node_name = 0;
|
||||
string* mesos_api = 0;
|
||||
#endif
|
||||
string output_format = "";
|
||||
@@ -490,6 +521,7 @@ int falco_init(int argc, char **argv)
|
||||
{"ignored-events", no_argument, 0, 'i'},
|
||||
{"k8s-api-cert", required_argument, 0, 'K'},
|
||||
{"k8s-api", required_argument, 0, 'k'},
|
||||
{"k8s-node", required_argument, 0},
|
||||
{"list", optional_argument, 0},
|
||||
{"mesos-api", required_argument, 0, 'm'},
|
||||
{"option", required_argument, 0, 'o'},
|
||||
@@ -666,6 +698,15 @@ int falco_init(int argc, char **argv)
|
||||
{
|
||||
cri_async = false;
|
||||
}
|
||||
#ifndef MINIMAL_BUILD
|
||||
else if(string(long_options[long_index].name) == "k8s-node")
|
||||
{
|
||||
k8s_node_name = new string(optarg);
|
||||
if (k8s_node_name->size() == 0) {
|
||||
throw std::invalid_argument("If --k8s-node is provided, it cannot be an empty string");
|
||||
}
|
||||
}
|
||||
#endif
|
||||
else if (string(long_options[long_index].name) == "list")
|
||||
{
|
||||
list_flds = true;
|
||||
@@ -939,6 +980,7 @@ int falco_init(int argc, char **argv)
|
||||
support["system_info"]["version"] = sysinfo.version;
|
||||
support["system_info"]["machine"] = sysinfo.machine;
|
||||
support["cmdline"] = cmdline;
|
||||
support["engine_info"]["engine_version"] = FALCO_ENGINE_VERSION;
|
||||
support["config"] = read_file(conf_filename);
|
||||
support["rules_files"] = nlohmann::json::array();
|
||||
for(auto filename : config.m_rules_filenames)
|
||||
@@ -1080,6 +1122,7 @@ int falco_init(int argc, char **argv)
|
||||
|
||||
outputs->init(config.m_json_output,
|
||||
config.m_json_include_output_property,
|
||||
config.m_json_include_tags_property,
|
||||
config.m_output_timeout,
|
||||
config.m_notifications_rate, config.m_notifications_max_burst,
|
||||
config.m_buffered_outputs,
|
||||
@@ -1221,7 +1264,7 @@ int falco_init(int argc, char **argv)
|
||||
k8s_api_cert = new string(k8s_cert_env);
|
||||
}
|
||||
}
|
||||
inspector->init_k8s_client(k8s_api, k8s_api_cert, verbose);
|
||||
inspector->init_k8s_client(k8s_api, k8s_api_cert, k8s_node_name, verbose);
|
||||
k8s_api = 0;
|
||||
k8s_api_cert = 0;
|
||||
}
|
||||
@@ -1237,7 +1280,7 @@ int falco_init(int argc, char **argv)
|
||||
}
|
||||
}
|
||||
k8s_api = new string(k8s_api_env);
|
||||
inspector->init_k8s_client(k8s_api, k8s_api_cert, verbose);
|
||||
inspector->init_k8s_client(k8s_api, k8s_api_cert, k8s_node_name, verbose);
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -1266,6 +1309,11 @@ int falco_init(int argc, char **argv)
|
||||
delete mesos_api;
|
||||
mesos_api = 0;
|
||||
|
||||
falco_logger::log(LOG_DEBUG, "Setting metadata download max size to " + to_string(config.m_metadata_download_max_mb) + " MB\n");
|
||||
falco_logger::log(LOG_DEBUG, "Setting metadata download chunk wait time to " + to_string(config.m_metadata_download_chunk_wait_us) + " μs\n");
|
||||
falco_logger::log(LOG_DEBUG, "Setting metadata download watch frequency to " + to_string(config.m_metadata_download_watch_freq_sec) + " seconds\n");
|
||||
inspector->set_metadata_download_params(config.m_metadata_download_max_mb * 1024 * 1024, config.m_metadata_download_chunk_wait_us, config.m_metadata_download_watch_freq_sec);
|
||||
|
||||
if(trace_filename.empty() && config.m_webserver_enabled && !disable_k8s_audit)
|
||||
{
|
||||
std::string ssl_option = (config.m_webserver_ssl_enabled ? " (SSL)" : "");
|
||||
|
||||
@@ -62,6 +62,7 @@ falco_outputs::~falco_outputs()
|
||||
|
||||
void falco_outputs::init(bool json_output,
|
||||
bool json_include_output_property,
|
||||
bool json_include_tags_property,
|
||||
uint32_t timeout,
|
||||
uint32_t rate, uint32_t max_burst, bool buffered,
|
||||
bool time_format_iso_8601, std::string hostname)
|
||||
@@ -79,6 +80,7 @@ void falco_outputs::init(bool json_output,
|
||||
// So we can safely update them.
|
||||
falco_formats::s_json_output = json_output;
|
||||
falco_formats::s_json_include_output_property = json_include_output_property;
|
||||
falco_formats::s_json_include_tags_property = json_include_tags_property;
|
||||
|
||||
m_timeout = std::chrono::milliseconds(timeout);
|
||||
|
||||
@@ -142,7 +144,7 @@ void falco_outputs::add_output(falco::outputs::config oc)
|
||||
}
|
||||
|
||||
void falco_outputs::handle_event(gen_event *evt, string &rule, string &source,
|
||||
falco_common::priority_type priority, string &format)
|
||||
falco_common::priority_type priority, string &format, std::set<std::string> &tags)
|
||||
{
|
||||
if(!m_notifications_tb.claim())
|
||||
{
|
||||
@@ -190,8 +192,9 @@ void falco_outputs::handle_event(gen_event *evt, string &rule, string &source,
|
||||
sformat += " " + format;
|
||||
}
|
||||
|
||||
cmsg.msg = falco_formats::format_event(evt, rule, source, falco_common::priority_names[priority], sformat);
|
||||
cmsg.msg = falco_formats::format_event(evt, rule, source, falco_common::priority_names[priority], sformat, tags);
|
||||
cmsg.fields = falco_formats::resolve_tokens(evt, source, sformat);
|
||||
cmsg.tags.insert(tags.begin(), tags.end());
|
||||
|
||||
cmsg.type = ctrl_msg_type::CTRL_MSG_OUTPUT;
|
||||
m_queue.push(cmsg);
|
||||
|
||||
@@ -40,6 +40,7 @@ public:
|
||||
|
||||
void init(bool json_output,
|
||||
bool json_include_output_property,
|
||||
bool json_include_tags_property,
|
||||
uint32_t timeout,
|
||||
uint32_t rate, uint32_t max_burst, bool buffered,
|
||||
bool time_format_iso_8601, std::string hostname);
|
||||
@@ -48,7 +49,7 @@ public:
|
||||
|
||||
// Format then send the event to all configured outputs (`evt` is an event that has matched some rule).
|
||||
void handle_event(gen_event *evt, std::string &rule, std::string &source,
|
||||
falco_common::priority_type priority, std::string &format);
|
||||
falco_common::priority_type priority, std::string &format, std::set<std::string> &tags);
|
||||
|
||||
// Format then send a generic message to all outputs. Not necessarily associated with any event.
|
||||
void handle_msg(uint64_t now,
|
||||
|
||||
@@ -50,6 +50,7 @@ struct message
|
||||
std::string rule;
|
||||
std::string source;
|
||||
map<std::string, std::string> fields;
|
||||
std::set<std::string> tags;
|
||||
};
|
||||
|
||||
//
|
||||
|
||||
@@ -50,6 +50,5 @@ message response {
|
||||
string output = 5;
|
||||
map<string, string> output_fields = 6;
|
||||
string hostname = 7;
|
||||
// TODO(leodido,fntlnz): tags not supported yet, keeping it for reference.
|
||||
// repeated string tags = 8;
|
||||
repeated string tags = 8;
|
||||
}
|
||||
@@ -64,5 +64,9 @@ void falco::outputs::output_grpc::output(const message *msg)
|
||||
auto host = grpc_res.mutable_hostname();
|
||||
*host = m_hostname;
|
||||
|
||||
// tags
|
||||
auto tags = grpc_res.mutable_tags();
|
||||
*tags = {msg->tags.begin(), msg->tags.end()};
|
||||
|
||||
falco::grpc::queue::get().push(grpc_res);
|
||||
}
|
||||
@@ -84,7 +84,17 @@ bool k8s_audit_handler::accept_data(falco_engine *engine,
|
||||
for(auto &jev : jevts)
|
||||
{
|
||||
std::unique_ptr<falco_engine::rule_result> res;
|
||||
res = engine->process_k8s_audit_event(&jev);
|
||||
|
||||
try
|
||||
{
|
||||
res = engine->process_k8s_audit_event(&jev);
|
||||
}
|
||||
catch(...)
|
||||
{
|
||||
errstr = string("unkown error processing audit event");
|
||||
fprintf(stderr, "%s\n", errstr.c_str());
|
||||
return false;
|
||||
}
|
||||
|
||||
if(res)
|
||||
{
|
||||
@@ -92,7 +102,7 @@ bool k8s_audit_handler::accept_data(falco_engine *engine,
|
||||
{
|
||||
outputs->handle_event(res->evt, res->rule,
|
||||
res->source, res->priority_num,
|
||||
res->format);
|
||||
res->format, res->tags);
|
||||
}
|
||||
catch(falco_exception &e)
|
||||
{
|
||||
|
||||
Reference in New Issue
Block a user