mirror of
https://github.com/falcosecurity/falco.git
synced 2025-07-16 15:51:55 +00:00
delete all duplicate empty blanks (#542)
falco-CLA-1.0-signed-off-by: Xiang Dai <764524258@qq.com>
This commit is contained in:
parent
a6c5926336
commit
04b1b4da67
@ -22,11 +22,11 @@
|
||||
* Triage GitHub issues and perform pull request reviews for other maintainers and the community.
|
||||
* During GitHub issue triage, apply all applicable [labels](https://github.com/falcosecurity/falco/labels)
|
||||
to each new issue. Labels are extremely useful for future issue follow up. Which labels to apply
|
||||
is somewhat subjective so just use your best judgment.
|
||||
is somewhat subjective so just use your best judgment.
|
||||
* Make sure that ongoing PRs are moving forward at the right pace or closing them.
|
||||
* Participate when called upon in the security releases. Note that although this should be a rare
|
||||
* Participate when called upon in the security releases. Note that although this should be a rare
|
||||
occurrence, if a serious vulnerability is found, the process may take up to several full days of
|
||||
work to implement. This reality should be taken into account when discussing time commitment
|
||||
work to implement. This reality should be taken into account when discussing time commitment
|
||||
obligations with employers.
|
||||
* In general continue to be willing to spend at least 25% of ones time working on Falco (~1.25
|
||||
business days per week).
|
||||
|
@ -16,7 +16,7 @@ if [ ! -e $CMAKE_DIR ]; then
|
||||
cd $BUILD_DIR
|
||||
mkdir -p $BUILD_DIR/cmake
|
||||
wget -nv https://s3.amazonaws.com/download.draios.com/dependencies/cmake-3.3.2.tar.gz
|
||||
tar -C $CMAKE_DIR --strip-components 1 -xzf cmake-3.3.2.tar.gz
|
||||
tar -C $CMAKE_DIR --strip-components 1 -xzf cmake-3.3.2.tar.gz
|
||||
cd $CMAKE_DIR
|
||||
./bootstrap --system-curl
|
||||
make -j$MAKE_JOBS
|
||||
|
@ -5,4 +5,3 @@ while true; do
|
||||
sleep 60
|
||||
done
|
||||
|
||||
|
@ -26,8 +26,8 @@ services:
|
||||
- ${PWD}/attacker_files:/usr/share/nginx/html
|
||||
- ${PWD}/attacker-nginx.conf:/etc/nginx/conf.d/default.conf
|
||||
depends_on:
|
||||
- "falco"
|
||||
|
||||
- "falco"
|
||||
|
||||
falco:
|
||||
image: sysdig/falco:latest
|
||||
privileged: true
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Example Kubernetes Daemon Sets for Sysdig Falco
|
||||
|
||||
This directory gives you the required YAML files to stand up Sysdig Falco on Kubernetes as a Daemon Set. This will result in a Falco Pod being deployed to each node, and thus the ability to monitor any running containers for abnormal behavior.
|
||||
This directory gives you the required YAML files to stand up Sysdig Falco on Kubernetes as a Daemon Set. This will result in a Falco Pod being deployed to each node, and thus the ability to monitor any running containers for abnormal behavior.
|
||||
|
||||
The two options are provided to deploy a Daemon Set:
|
||||
- `k8s-with-rbac` - This directory provides a definition to deploy a Daemon Set on Kubernetes with RBAC enabled.
|
||||
@ -47,7 +47,7 @@ program_output:
|
||||
program: "jq '{text: .output}' | curl -d @- -X POST https://hooks.slack.com/services/see_your_slack_team/apps_settings_for/a_webhook_url"
|
||||
```
|
||||
|
||||
You will also need to enable JSON output. Find the `json_output: false` setting in the `falco.yaml` file and change it to read `json_output: true`. Any custom rules for your environment can be added to into the `falco_rules.local.yaml` file and they will be picked up by Falco at start time. You can now create the ConfigMap in Kubernetes.
|
||||
You will also need to enable JSON output. Find the `json_output: false` setting in the `falco.yaml` file and change it to read `json_output: true`. Any custom rules for your environment can be added to into the `falco_rules.local.yaml` file and they will be picked up by Falco at start time. You can now create the ConfigMap in Kubernetes.
|
||||
|
||||
```
|
||||
k8s-using-daemonset$ kubectl create configmap falco-config --from-file=k8s-with-rbac/falco-config
|
||||
@ -58,7 +58,7 @@ k8s-using-daemonset$
|
||||
Now that we have the requirements for our Daemon Set in place, we can create our Daemon Set.
|
||||
|
||||
```
|
||||
k8s-using-daemonset$ kubectl create -f k8s-with-rbac/falco-daemonset-configmap.yaml
|
||||
k8s-using-daemonset$ kubectl create -f k8s-with-rbac/falco-daemonset-configmap.yaml
|
||||
daemonset "falco" created
|
||||
k8s-using-daemonset$
|
||||
```
|
||||
@ -106,9 +106,9 @@ root@falco-74htl:/# exit
|
||||
k8s-using-daemonset$ kubectl logs falco-74htl
|
||||
{"output":"17:48:58.590038385: Notice A shell was spawned in a container with an attached terminal (user=root k8s.pod=falco-74htl container=a98c2aa8e670 shell=bash parent=<NA> cmdline=bash terminal=34816)","priority":"Notice","rule":"Terminal shell in container","time":"2017-12-20T17:48:58.590038385Z", "output_fields": {"container.id":"a98c2aa8e670","evt.time":1513792138590038385,"k8s.pod.name":"falco-74htl","proc.cmdline":"bash ","proc.name":"bash","proc.pname":null,"proc.tty":34816,"user.name":"root"}}
|
||||
k8s-using-daemonset$
|
||||
```
|
||||
```
|
||||
|
||||
Alternatively, you can deploy the [Falco Event Generator](https://github.com/draios/falco/wiki/Generating-Sample-Events) deployement to have events automatically generated. Please note that this Deployment will generate a large number of events.
|
||||
Alternatively, you can deploy the [Falco Event Generator](https://github.com/draios/falco/wiki/Generating-Sample-Events) deployement to have events automatically generated. Please note that this Deployment will generate a large number of events.
|
||||
|
||||
```
|
||||
k8s-using-daemonset$ kubectl create -f falco-event-generator-deployment.yaml \
|
||||
@ -116,5 +116,5 @@ k8s-using-daemonset$ kubectl create -f falco-event-generator-deployment.yaml \
|
||||
&& kubectl delete -f falco-event-generator-deployment.yaml
|
||||
deployment "falco-event-generator-deployment" created
|
||||
deployment "falco-event-generator-deployment" deleted
|
||||
k8s-using-daemonset$
|
||||
k8s-using-daemonset$
|
||||
```
|
||||
|
@ -26,15 +26,15 @@
|
||||
#
|
||||
|
||||
### BEGIN INIT INFO
|
||||
# Provides:
|
||||
# Required-Start:
|
||||
# Required-Stop:
|
||||
# Should-Start:
|
||||
# Should-Stop:
|
||||
# Default-Start:
|
||||
# Default-Stop:
|
||||
# Short-Description:
|
||||
# Description:
|
||||
# Provides:
|
||||
# Required-Start:
|
||||
# Required-Stop:
|
||||
# Should-Start:
|
||||
# Should-Stop:
|
||||
# Default-Start:
|
||||
# Default-Stop:
|
||||
# Short-Description:
|
||||
# Description:
|
||||
### END INIT INFO
|
||||
|
||||
# Source function library.
|
||||
|
@ -23,7 +23,7 @@
|
||||
|
||||
- macro: is_cat
|
||||
condition: proc.name in (cat_binaries)
|
||||
|
||||
|
||||
- rule: open_from_cat
|
||||
desc: A process named cat does an open
|
||||
condition: evt.type=open and is_cat
|
||||
|
@ -206,7 +206,7 @@ function run_juttle_examples() {
|
||||
docker-compose -f dc-juttle-engine.yml -f aws-cloudwatch/dc-aws-cloudwatch.yml -f elastic-newstracker/dc-elastic.yml -f github-tutorial/dc-elastic.yml -f nginx_logs/dc-nginx-logs.yml -f postgres-diskstats/dc-postgres.yml -f cadvisor-influx/dc-cadvisor-influx.yml up -d
|
||||
sleep 120
|
||||
docker-compose -f dc-juttle-engine.yml -f aws-cloudwatch/dc-aws-cloudwatch.yml -f elastic-newstracker/dc-elastic.yml -f github-tutorial/dc-elastic.yml -f nginx_logs/dc-nginx-logs.yml -f postgres-diskstats/dc-postgres.yml -f cadvisor-influx/dc-cadvisor-influx.yml stop
|
||||
docker-compose -f dc-juttle-engine.yml -f aws-cloudwatch/dc-aws-cloudwatch.yml -f elastic-newstracker/dc-elastic.yml -f github-tutorial/dc-elastic.yml -f nginx_logs/dc-nginx-logs.yml -f postgres-diskstats/dc-postgres.yml -f cadvisor-influx/dc-cadvisor-influx.yml rm -fv
|
||||
docker-compose -f dc-juttle-engine.yml -f aws-cloudwatch/dc-aws-cloudwatch.yml -f elastic-newstracker/dc-elastic.yml -f github-tutorial/dc-elastic.yml -f nginx_logs/dc-nginx-logs.yml -f postgres-diskstats/dc-postgres.yml -f cadvisor-influx/dc-cadvisor-influx.yml rm -fv
|
||||
popd
|
||||
}
|
||||
|
||||
|
@ -186,7 +186,7 @@ int falco_outputs::handle_http(lua_State *ls)
|
||||
slist1 = NULL;
|
||||
|
||||
if (!lua_isstring(ls, -1) ||
|
||||
!lua_isstring(ls, -2))
|
||||
!lua_isstring(ls, -2))
|
||||
{
|
||||
lua_pushstring(ls, "Invalid arguments passed to handle_http()");
|
||||
lua_error(ls);
|
||||
@ -196,7 +196,7 @@ int falco_outputs::handle_http(lua_State *ls)
|
||||
string msg = (char *) lua_tostring(ls, 2);
|
||||
|
||||
curl = curl_easy_init();
|
||||
if(curl)
|
||||
if(curl)
|
||||
{
|
||||
slist1 = curl_slist_append(slist1, "Content-Type: application/json");
|
||||
curl_easy_setopt(curl, CURLOPT_HTTPHEADER, slist1);
|
||||
|
Loading…
Reference in New Issue
Block a user