Merge pull request #389 from nestorsalceda/kubernetes-response-engine

Add Kubernetes response engine
This commit is contained in:
Jorge Salamero Sanz 2018-07-12 18:55:07 +02:00 committed by GitHub
commit ccf35552dd
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
33 changed files with 1947 additions and 0 deletions

View File

@ -0,0 +1,18 @@
# Kubernetes Response Engine for Sysdig Falco
A response engine for Falco that allows to process security events executing playbooks to respond to security threats.
## Architecture
* *[Falco](https://sysdig.com/opensource/falco/)* monitors containers and processes to alert on unexpected behavior. This is defined through the runtime policy built from multiple rules that define what the system should and shouldn't do.
* *falco-nats* forwards the alert to a message broker service into a topic compound by `falco.<severity>.<rule_name_slugified>`.
* *[NATS](https://nats.io/)*, our message broker, delivers the alert to any subscribers to the different topics.
* *[Kubeless](https://kubeless.io/)*, a FaaS framework that runs in Kubernetes, receives the security events and executes the configured playbooks.
## Glossary
* *Security event*: Alert sent by Falco when a configured rule matches the behaviour on that host.
* *Playbook*: Each piece code executed when an alert is received to respond to that threat in an automated way, some examples include:
- sending an alert to Slack
- stop the pod killing the container
- taint the specific node where the pod is running

View File

@ -0,0 +1,9 @@
deploy:
kubectl apply -f nats/
kubectl apply -f kubeless/
kubectl apply -f network-policy.yaml
clean:
kubectl delete -f kubeless/
kubectl delete -f nats/
kubectl delete -f network-policy.yaml

View File

@ -0,0 +1,20 @@
# Kubernetes Manifests for Kubernetes Response Engine
In this directory are the manifests for creating required infrastructure in the
Kubernetes cluster
## Deploy
For deploying NATS, Falco + Falco-NATS output and Kubeless just run default Makefile target:
```
make
```
## Clean
You can clean your cluster with:
```
make clean
```

View File

@ -0,0 +1,5 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: kubeless

View File

@ -0,0 +1,369 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: controller-acct
namespace: kubeless
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: kubeless-controller-deployer
rules:
- apiGroups:
- ""
resources:
- services
- configmaps
verbs:
- create
- get
- delete
- list
- update
- patch
- apiGroups:
- apps
- extensions
resources:
- deployments
verbs:
- create
- get
- delete
- list
- update
- patch
- apiGroups:
- ""
resources:
- pods
verbs:
- list
- delete
- apiGroups:
- ""
resourceNames:
- kubeless-registry-credentials
resources:
- secrets
verbs:
- get
- apiGroups:
- kubeless.io
resources:
- functions
- httptriggers
- cronjobtriggers
verbs:
- get
- list
- watch
- update
- delete
- apiGroups:
- batch
resources:
- cronjobs
- jobs
verbs:
- create
- get
- delete
- deletecollection
- list
- update
- patch
- apiGroups:
- autoscaling
resources:
- horizontalpodautoscalers
verbs:
- create
- get
- delete
- list
- update
- patch
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- get
- list
- apiGroups:
- monitoring.coreos.com
resources:
- alertmanagers
- prometheuses
- servicemonitors
verbs:
- '*'
- apiGroups:
- extensions
resources:
- ingresses
verbs:
- create
- get
- list
- update
- delete
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: kubeless-controller-deployer
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubeless-controller-deployer
subjects:
- kind: ServiceAccount
name: controller-acct
namespace: kubeless
---
apiVersion: apiextensions.k8s.io/v1beta1
description: Kubernetes Native Serverless Framework
kind: CustomResourceDefinition
metadata:
name: functions.kubeless.io
spec:
group: kubeless.io
names:
kind: Function
plural: functions
singular: function
scope: Namespaced
version: v1beta1
---
apiVersion: apiextensions.k8s.io/v1beta1
description: CRD object for HTTP trigger type
kind: CustomResourceDefinition
metadata:
name: httptriggers.kubeless.io
spec:
group: kubeless.io
names:
kind: HTTPTrigger
plural: httptriggers
singular: httptrigger
scope: Namespaced
version: v1beta1
---
apiVersion: apiextensions.k8s.io/v1beta1
description: CRD object for HTTP trigger type
kind: CustomResourceDefinition
metadata:
name: cronjobtriggers.kubeless.io
spec:
group: kubeless.io
names:
kind: CronJobTrigger
plural: cronjobtriggers
singular: cronjobtrigger
scope: Namespaced
version: v1beta1
---
apiVersion: v1
data:
builder-image: kubeless/function-image-builder:v1.0.0-alpha.6
builder-image-secret: ""
deployment: '{}'
enable-build-step: "false"
function-registry-tls-verify: "true"
ingress-enabled: "false"
provision-image: kubeless/unzip@sha256:f162c062973cca05459834de6ed14c039d45df8cdb76097f50b028a1621b3697
provision-image-secret: ""
runtime-images: |-
[
{
"ID": "python",
"compiled": false,
"versions": [
{
"name": "python27",
"version": "2.7",
"runtimeImage": "kubeless/python@sha256:07cfb0f3d8b6db045dc317d35d15634d7be5e436944c276bf37b1c630b03add8",
"initImage": "python:2.7"
},
{
"name": "python34",
"version": "3.4",
"runtimeImage": "kubeless/python@sha256:f19640c547a3f91dbbfb18c15b5e624029b4065c1baf2892144e07c36f0a7c8f",
"initImage": "python:3.4"
},
{
"name": "python36",
"version": "3.6",
"runtimeImage": "kubeless/python@sha256:0c9f8f727d42625a4e25230cfe612df7488b65f283e7972f84108d87e7443d72",
"initImage": "python:3.6"
}
],
"depName": "requirements.txt",
"fileNameSuffix": ".py"
},
{
"ID": "nodejs",
"compiled": false,
"versions": [
{
"name": "node6",
"version": "6",
"runtimeImage": "kubeless/nodejs@sha256:013facddb0f66c150844192584d823d7dfb2b5b8d79fd2ae98439c86685da657",
"initImage": "node:6.10"
},
{
"name": "node8",
"version": "8",
"runtimeImage": "kubeless/nodejs@sha256:b155d7e20e333044b60009c12a25a97c84eed610f2a3d9d314b47449dbdae0e5",
"initImage": "node:8"
}
],
"depName": "package.json",
"fileNameSuffix": ".js"
},
{
"ID": "nodejs_distroless",
"compiled": false,
"versions": [
{
"name": "node8",
"version": "8",
"runtimeImage": "henrike42/kubeless/runtimes/nodejs/distroless:0.0.2",
"initImage": "node:8"
}
],
"depName": "package.json",
"fileNameSuffix": ".js"
},
{
"ID": "ruby",
"compiled": false,
"versions": [
{
"name": "ruby24",
"version": "2.4",
"runtimeImage": "kubeless/ruby@sha256:01665f1a32fe4fab4195af048627857aa7b100e392ae7f3e25a44bd296d6f105",
"initImage": "bitnami/ruby:2.4"
}
],
"depName": "Gemfile",
"fileNameSuffix": ".rb"
},
{
"ID": "php",
"compiled": false,
"versions": [
{
"name": "php72",
"version": "7.2",
"runtimeImage": "kubeless/php@sha256:9b86066b2640bedcd88acb27f43dfaa2b338f0d74d9d91131ea781402f7ec8ec",
"initImage": "composer:1.6"
}
],
"depName": "composer.json",
"fileNameSuffix": ".php"
},
{
"ID": "go",
"compiled": true,
"versions": [
{
"name": "go1.10",
"version": "1.10",
"runtimeImage": "kubeless/go@sha256:e2fd49f09b6ff8c9bac6f1592b3119ea74237c47e2955a003983e08524cb3ae5",
"initImage": "kubeless/go-init@sha256:983b3f06452321a2299588966817e724d1a9c24be76cf1b12c14843efcdff502"
}
],
"depName": "Gopkg.toml",
"fileNameSuffix": ".go"
},
{
"ID": "dotnetcore",
"compiled": true,
"versions": [
{
"name": "dotnetcore2.0",
"version": "2.0",
"runtimeImage": "allantargino/kubeless-dotnetcore@sha256:1699b07d9fc0276ddfecc2f823f272d96fd58bbab82d7e67f2fd4982a95aeadc",
"initImage": "allantargino/aspnetcore-build@sha256:0d60f845ff6c9c019362a68b87b3920f3eb2d32f847f2d75e4d190cc0ce1d81c"
}
],
"depName": "project.csproj",
"fileNameSuffix": ".cs"
},
{
"ID": "java",
"compiled": true,
"versions": [
{
"name": "java1.8",
"version": "1.8",
"runtimeImage": "kubeless/java@sha256:debf9502545f4c0e955eb60fabb45748c5d98ed9365c4a508c07f38fc7fefaac",
"initImage": "kubeless/java-init@sha256:7e5e4376d3ab76c336d4830c9ed1b7f9407415feca49b8c2bf013e279256878f"
}
],
"depName": "pom.xml",
"fileNameSuffix": ".java"
},
{
"ID": "ballerina",
"compiled": true,
"versions": [
{
"name": "ballerina0.975.0",
"version": "0.975.0",
"runtimeImage": "kubeless/ballerina@sha256:83e51423972f4b0d6b419bee0b4afb3bb87d2bf1b604ebc4366c430e7cc28a35",
"initImage": "kubeless/ballerina-init@sha256:05857ce439a7e290f9d86f8cb38ea3b574670c0c0e91af93af06686fa21ecf4f"
}
],
"depName": "",
"fileNameSuffix": ".bal"
}
]
service-type: ClusterIP
kind: ConfigMap
metadata:
name: kubeless-config
namespace: kubeless
---
apiVersion: apps/v1beta1
kind: Deployment
metadata:
labels:
kubeless: controller
name: kubeless-controller-manager
namespace: kubeless
spec:
selector:
matchLabels:
kubeless: controller
template:
metadata:
labels:
kubeless: controller
spec:
containers:
- env:
- name: KUBELESS_INGRESS_ENABLED
valueFrom:
configMapKeyRef:
key: ingress-enabled
name: kubeless-config
- name: KUBELESS_SERVICE_TYPE
valueFrom:
configMapKeyRef:
key: service-type
name: kubeless-config
- name: KUBELESS_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: KUBELESS_CONFIG
value: kubeless-config
image: bitnami/kubeless-controller-manager:v1.0.0-alpha.6
imagePullPolicy: IfNotPresent
name: kubeless-controller-manager
serviceAccountName: controller-acct

View File

@ -0,0 +1,74 @@
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: nats-controller-deployer
rules:
- apiGroups:
- ""
resources:
- services
- configmaps
verbs:
- get
- list
- apiGroups:
- kubeless.io
resources:
- functions
- natstriggers
verbs:
- get
- list
- watch
- update
- delete
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: nats-controller-deployer
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: nats-controller-deployer
subjects:
- kind: ServiceAccount
name: controller-acct
namespace: kubeless
---
apiVersion: apiextensions.k8s.io/v1beta1
description: CRD object for NATS trigger type
kind: CustomResourceDefinition
metadata:
name: natstriggers.kubeless.io
spec:
group: kubeless.io
names:
kind: NATSTrigger
plural: natstriggers
singular: natstrigger
scope: Namespaced
version: v1beta1
---
apiVersion: apps/v1beta1
kind: Deployment
metadata:
labels:
kubeless: nats-trigger-controller
name: nats-trigger-controller
namespace: kubeless
spec:
selector:
matchLabels:
kubeless: nats-trigger-controller
template:
metadata:
labels:
kubeless: nats-trigger-controller
spec:
containers:
- image: bitnami/nats-trigger-controller:v1.0.0-alpha.6
imagePullPolicy: IfNotPresent
name: nats-trigger-controller
serviceAccountName: controller-acct

View File

@ -0,0 +1,82 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: nats-io
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: nats-operator
namespace: nats-io
---
apiVersion: apps/v1beta2
kind: Deployment
metadata:
name: nats-operator
namespace: nats-io
spec:
replicas: 1
selector:
matchLabels:
name: nats-operator
template:
metadata:
labels:
name: nats-operator
spec:
serviceAccountName: nats-operator
containers:
- name: nats-operator
image: connecteverything/nats-operator:0.2.2-v1alpha2
imagePullPolicy: Always
env:
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: nats-io:nats-operator-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: nats-io:nats-operator
subjects:
- kind: ServiceAccount
name: nats-operator
namespace: nats-io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: nats-io:nats-operator
rules:
# Allow creating CRDs
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs: ["*"]
# Allow all actions on NatsClusters
- apiGroups:
- nats.io
resources:
- natsclusters
verbs: ["*"]
# Allow actions on basic Kubernetes objects
- apiGroups: [""]
resources:
- configmaps
- secrets
- pods
- services
- endpoints
- events
verbs: ["*"]

View File

@ -0,0 +1,8 @@
apiVersion: "nats.io/v1alpha2"
kind: "NatsCluster"
metadata:
name: "nats"
namespace: "nats-io"
spec:
size: 3
version: "1.1.0"

View File

@ -0,0 +1,11 @@
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: isolate
spec:
podSelector:
matchLabels:
isolated: 'true'
policyTypes:
- Ingress
- Egress

View File

@ -0,0 +1 @@
falco-nats

View File

@ -0,0 +1,5 @@
FROM alpine:latest
COPY ./falco-nats /bin/
CMD ["/bin/falco-nats"]

View File

@ -0,0 +1,12 @@
build:
GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build -ldflags="-s" -o falco-nats main.go
deps:
go get -u github.com/nats-io/go-nats
clean:
rm falco-nats
docker: build
docker build -t sysdig/falco-nats .
docker push sysdig/falco-nats

View File

@ -0,0 +1,27 @@
# NATS output for Sysdig Falco
As Falco does not support a NATS output natively, we have created this small
golang utility wich reads Falco alerts from a named pipe and sends them to a
NATS server.
This utility is designed to being run in a sidecar container in the same
Pod as Falco.
## Configuration
You have a [complete Kubernetes manifest available](https://github.com/draios/falco/tree/kubernetes-response-engine/deployment/falco/falco-daemonset.yaml) for future reading.
Take a look at sidecar container and to the initContainers directive which
craetes the shared pipe between containers.
### Container image
You have this adapter available as a container image. Its name is *sysdig/falco-nats*.
### Parameters Reference
* -s: Specifies the NATS server URL where message will be published. By default
is: *nats://nats.nats-io.svc.cluster.local:4222*
* -f: Specifies the named pipe path where Falco publishes its alerts. By default
is: */var/run/falco/nats*

View File

@ -0,0 +1,100 @@
// Copyright 2012-2018 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build ignore
package main
import (
"bufio"
"encoding/json"
"flag"
"github.com/nats-io/go-nats"
"log"
"os"
"regexp"
"strings"
)
var slugRegularExpression = regexp.MustCompile("[^a-z0-9]+")
func main() {
var urls = flag.String("s", "nats://nats.nats-io.svc.cluster.local:4222", "The nats server URLs (separated by comma)")
var pipePath = flag.String("f", "/var/run/falco/nats", "The named pipe path")
log.SetFlags(0)
flag.Usage = usage
flag.Parse()
nc, err := nats.Connect(*urls)
if err != nil {
log.Fatal(err)
}
defer nc.Close()
pipe, err := os.OpenFile(*pipePath, os.O_RDONLY, 0600)
if err != nil {
log.Fatal(err)
}
log.Printf("Opened pipe %s", *pipePath)
reader := bufio.NewReader(pipe)
scanner := bufio.NewScanner(reader)
log.Printf("Scanning %s", *pipePath)
for scanner.Scan() {
msg := []byte(scanner.Text())
subj, err := subjectAndRuleSlug(msg)
if err != nil {
log.Fatal(err)
}
nc.Publish(subj, msg)
nc.Flush()
if err := nc.LastError(); err != nil {
log.Fatal(err)
} else {
log.Printf("Published [%s] : '%s'\n", subj, msg)
}
}
}
func usage() {
log.Fatalf("Usage: nats-pub [-s server (%s)] <subject> <msg> \n", nats.DefaultURL)
}
type parsedAlert struct {
Priority string `json:"priority"`
Rule string `json:"rule"`
}
func subjectAndRuleSlug(alert []byte) (string, error) {
var result parsedAlert
err := json.Unmarshal(alert, &result)
if err != nil {
return "", err
}
subject := "falco." + result.Priority + "." + slugify(result.Rule)
subject = strings.ToLower(subject)
return subject, nil
}
func slugify(input string) string {
return strings.Trim(slugRegularExpression.ReplaceAllString(strings.ToLower(input), "_"), "_")
}

View File

@ -0,0 +1,104 @@
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
.hypothesis/
.pytest_cache/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
target/
# Jupyter Notebook
.ipynb_checkpoints
# pyenv
.python-version
# celery beat schedule file
celerybeat-schedule
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/

View File

@ -0,0 +1,19 @@
[[source]]
url = "https://pypi.python.org/simple"
verify_ssl = true
name = "pypi"
[dev-packages]
mamba = "*"
expects = "*"
doublex = "*"
doublex-expects = "==0.7.0rc2"
[packages]
kubernetes = "*"
requests = "*"
"e1839a8" = {path = ".", editable = true}
maya = "*"
[requires]
python_version = "3.6"

View File

@ -0,0 +1,367 @@
{
"_meta": {
"hash": {
"sha256": "00ca5a9cb1f462d534a06bca990e987e75a05b7baf6ba5ddac529f03312135e6"
},
"pipfile-spec": 6,
"requires": {
"python_version": "3.6"
},
"sources": [
{
"name": "pypi",
"url": "https://pypi.python.org/simple",
"verify_ssl": true
}
]
},
"default": {
"cachetools": {
"hashes": [
"sha256:90f1d559512fc073483fe573ef5ceb39bf6ad3d39edc98dc55178a2b2b176fa3",
"sha256:d1c398969c478d336f767ba02040fa22617333293fb0b8968e79b16028dfee35"
],
"version": "==2.1.0"
},
"certifi": {
"hashes": [
"sha256:13e698f54293db9f89122b0581843a782ad0934a4fe0172d2a980ba77fc61bb7",
"sha256:9fa520c1bacfb634fa7af20a76bcbd3d5fb390481724c597da32c719a7dca4b0"
],
"version": "==2018.4.16"
},
"chardet": {
"hashes": [
"sha256:84ab92ed1c4d4f16916e05906b6b75a6c0fb5db821cc65e70cbd64a3e2a5eaae",
"sha256:fc323ffcaeaed0e0a02bf4d117757b98aed530d9ed4531e3e15460124c106691"
],
"version": "==3.0.4"
},
"dateparser": {
"hashes": [
"sha256:940828183c937bcec530753211b70f673c0a9aab831e43273489b310538dff86",
"sha256:b452ef8b36cd78ae86a50721794bc674aa3994e19b570f7ba92810f4e0a2ae03"
],
"version": "==0.7.0"
},
"e1839a8": {
"editable": true,
"path": "."
},
"google-auth": {
"hashes": [
"sha256:1745c9066f698eac3da99cef082914495fb71bc09597ba7626efbbb64c4acc57",
"sha256:82a34e1a59ad35f01484d283d2a36b7a24c8c404a03a71b3afddd0a4d31e169f"
],
"version": "==1.5.0"
},
"humanize": {
"hashes": [
"sha256:a43f57115831ac7c70de098e6ac46ac13be00d69abbf60bdcac251344785bb19"
],
"version": "==0.5.1"
},
"idna": {
"hashes": [
"sha256:156a6814fb5ac1fc6850fb002e0852d56c0c8d2531923a51032d1b70760e186e",
"sha256:684a38a6f903c1d71d6d5fac066b58d7768af4de2b832e426ec79c30daa94a16"
],
"version": "==2.7"
},
"ipaddress": {
"hashes": [
"sha256:64b28eec5e78e7510698f6d4da08800a5c575caa4a286c93d651c5d3ff7b6794",
"sha256:b146c751ea45cad6188dd6cf2d9b757f6f4f8d6ffb96a023e6f2e26eea02a72c"
],
"version": "==1.0.22"
},
"kubernetes": {
"hashes": [
"sha256:b370ab4abd925309db69a14a4723487948e9a83de60ca92782ec14992b741c89",
"sha256:c80dcf531deca2037105df09c933355c80830ffbf9e496b5e6a3967ac6809ef7"
],
"index": "pypi",
"version": "==6.0.0"
},
"maya": {
"hashes": [
"sha256:6f63bc69aa77309fc220bc02618da8701a21da87c2e7a747ee5ccd56a907c3a5",
"sha256:f526bc8596d993f4bd9755668f66aaf61d635bb4149e084d4a2bc0ebe42aa0b6"
],
"index": "pypi",
"version": "==0.5.0"
},
"oauthlib": {
"hashes": [
"sha256:ac35665a61c1685c56336bda97d5eefa246f1202618a1d6f34fccb1bdd404162",
"sha256:d883b36b21a6ad813953803edfa563b1b579d79ca758fe950d1bc9e8b326025b"
],
"version": "==2.1.0"
},
"pendulum": {
"hashes": [
"sha256:4173ce3e81ad0d9d61dbce86f4286c43a26a398270df6a0a89f501f0c28ad27d",
"sha256:56a347d0457859c84b8cdba161fc37c7df5db9b3becec7881cd770e9d2058b3c",
"sha256:738878168eb26e5446da5d1f7b3312ae993a542061be8882099c00ef4866b1a2",
"sha256:95536b33ae152e3c831eb236c1bf9ac9dcfb3b5b98fdbe8e9e601eab6c373897",
"sha256:c04fcf955e622e97e405e5f6d1b1f4a7adc69d79d82f3609643de69283170d6d",
"sha256:dd6500d27bb7ccc029d497da4f9bd09549bd3c0ea276dad894ea2fdf309e83f3",
"sha256:ddaf97a061eb5e2ae37857a8cb548e074125017855690d20e443ad8d9f31e164",
"sha256:e7df37447824f9af0b58c7915a4caf349926036afd86ad38e7529a6b2f8fc34b",
"sha256:e9732b8bb214fad2c72ddcbfec07542effa8a8b704e174347ede1ff8dc679cce",
"sha256:f4eee1e1735487d9d25cc435c519fd4380cb1f82cde3ebad1efbc2fc30deca5b"
],
"version": "==1.5.1"
},
"pyasn1": {
"hashes": [
"sha256:2f57960dc7a2820ea5a1782b872d974b639aa3b448ac6628d1ecc5d0fe3986f2",
"sha256:3651774ca1c9726307560792877db747ba5e8a844ea1a41feb7670b319800ab3",
"sha256:602fda674355b4701acd7741b2be5ac188056594bf1eecf690816d944e52905e",
"sha256:8fb265066eac1d3bb5015c6988981b009ccefd294008ff7973ed5f64335b0f2d",
"sha256:9334cb427609d2b1e195bb1e251f99636f817d7e3e1dffa150cb3365188fb992",
"sha256:9a15cc13ff6bf5ed29ac936ca941400be050dff19630d6cd1df3fb978ef4c5ad",
"sha256:a66dcda18dbf6e4663bde70eb30af3fc4fe1acb2d14c4867a861681887a5f9a2",
"sha256:ba77f1e8d7d58abc42bfeddd217b545fdab4c1eeb50fd37c2219810ad56303bf",
"sha256:cdc8eb2eaafb56de66786afa6809cd9db2df1b3b595dcb25aa5b9dc61189d40a",
"sha256:d01fbba900c80b42af5c3fe1a999acf61e27bf0e452e0f1ef4619065e57622da",
"sha256:f281bf11fe204f05859225ec2e9da7a7c140b65deccd8a4eb0bc75d0bd6949e0",
"sha256:fb81622d8f3509f0026b0683fe90fea27be7284d3826a5f2edf97f69151ab0fc"
],
"version": "==0.4.3"
},
"pyasn1-modules": {
"hashes": [
"sha256:041e9fbafac548d095f5b6c3b328b80792f006196e15a232b731a83c93d59493",
"sha256:0cdca76a68dcb701fff58c397de0ef9922b472b1cb3ea9695ca19d03f1869787",
"sha256:0cea139045c38f84abaa803bcb4b5e8775ea12a42af10019d942f227acc426c3",
"sha256:0f2e50d20bc670be170966638fa0ae603f0bc9ed6ebe8e97a6d1d4cef30cc889",
"sha256:47fb6757ab78fe966e7c58b2030b546854f78416d653163f0ce9290cf2278e8b",
"sha256:598a6004ec26a8ab40a39ea955068cf2a3949ad9c0030da970f2e1ca4c9f1cc9",
"sha256:72fd8b0c11191da088147c6e4678ec53e573923ecf60b57eeac9e97433e09fc2",
"sha256:854700bbdd01394e2ada9c1bfbd0ed9f5d0c551350dbbd023e88b11d2771ae06",
"sha256:af00ea8f2022b6287dc375b2c70f31ab5af83989fc6fe9eacd4976ce26cd7ccc",
"sha256:b1f395cae2d669e0830cb023aa86f9f283b7a9aa32317d7f80d8e78aa2745812",
"sha256:c6747146e95d2b14cc2a8399b2b0bde3f93778f8f9ec704690d2b589c376c137",
"sha256:f53fe5bcebdf318f51399b250fe8325ef3a26d927f012cc0c8e0f9e9af7f9deb"
],
"version": "==0.2.1"
},
"python-dateutil": {
"hashes": [
"sha256:1adb80e7a782c12e52ef9a8182bebeb73f1d7e24e374397af06fb4956c8dc5c0",
"sha256:e27001de32f627c22380a688bcc43ce83504a7bc5da472209b4c70f02829f0b8"
],
"version": "==2.7.3"
},
"pytz": {
"hashes": [
"sha256:65ae0c8101309c45772196b21b74c46b2e5d11b6275c45d251b150d5da334555",
"sha256:c06425302f2cf668f1bba7a0a03f3c1d34d4ebeef2c72003da308b3947c7f749"
],
"version": "==2018.4"
},
"pytzdata": {
"hashes": [
"sha256:1d936da41ee06216d89fdc7ead1ee9a5da2811a8787515a976b646e110c3f622",
"sha256:e4ef42e82b0b493c5849eed98b5ab49d6767caf982127e9a33167f1153b36cc5"
],
"version": "==2018.5"
},
"pyyaml": {
"hashes": [
"sha256:0c507b7f74b3d2dd4d1322ec8a94794927305ab4cebbe89cc47fe5e81541e6e8",
"sha256:16b20e970597e051997d90dc2cddc713a2876c47e3d92d59ee198700c5427736",
"sha256:3262c96a1ca437e7e4763e2843746588a965426550f3797a79fca9c6199c431f",
"sha256:326420cbb492172dec84b0f65c80942de6cedb5233c413dd824483989c000608",
"sha256:4474f8ea030b5127225b8894d626bb66c01cda098d47a2b0d3429b6700af9fd8",
"sha256:592766c6303207a20efc445587778322d7f73b161bd994f227adaa341ba212ab",
"sha256:5ac82e411044fb129bae5cfbeb3ba626acb2af31a8d17d175004b70862a741a7",
"sha256:5f84523c076ad14ff5e6c037fe1c89a7f73a3e04cf0377cb4d017014976433f3",
"sha256:827dc04b8fa7d07c44de11fabbc888e627fa8293b695e0f99cb544fdfa1bf0d1",
"sha256:b4c423ab23291d3945ac61346feeb9a0dc4184999ede5e7c43e1ffb975130ae6",
"sha256:bc6bced57f826ca7cb5125a10b23fd0f2fff3b7c4701d64c439a300ce665fff8",
"sha256:c01b880ec30b5a6e6aa67b09a2fe3fb30473008c85cd6a67359a1b15ed6d83a4",
"sha256:ca233c64c6e40eaa6c66ef97058cdc80e8d0157a443655baa1b2966e812807ca",
"sha256:e863072cdf4c72eebf179342c94e6989c67185842d9997960b3e69290b2fa269"
],
"version": "==3.12"
},
"regex": {
"hashes": [
"sha256:0201b4cb42f03842a75044a3d08b62a79114f753b33ee421182c631d9f5c81f5",
"sha256:204524604456e3e0e25c3f24da4efc43db78edfe7623f1049e03d3aa51ddda48",
"sha256:24c0e838bde42fe9d4d5650e75bff2d4bb5867968fb9409331dbe39154f6e8e2",
"sha256:4360143da844cd985effb7fb9af04beaa2d371ab13e4a1996424aa2f6fbfb877",
"sha256:4b8c6fd44dbd46cdbf755c20a7b9dedb32b8d15b707a0e470dfa66ba5df00a35",
"sha256:4fb5622987f3863cfa76c40ab3338a7dc8ed2bac236bb53e638b21ea397a3252",
"sha256:5eebefef6e3d97e4c1f9f77eac6555c32ed3afbd769955a9f7339256a4d50d6c",
"sha256:7222204c6acb9e52688678ec7306b2dfd84df68bc8eb251be74fec4e9dd85bf9",
"sha256:809cbbcbe291cf7bc9cf6aeac6a9a400a71318292d0a2a07effaf4b4782203a0",
"sha256:9c9075c727afec23eab196be51737eedb00cd67bb4a2e0170fa8dc65163838f3",
"sha256:a105b1d7287d412e8fe99959c1b80f7cbd76184b6466d63579b6d256a406a76e",
"sha256:c3d9cfd214a3e5a25f2da9817c389e32069e210b067ebb901e10f3270da9b259",
"sha256:c3ebfb5ec2dd750f7861734b25ea7d5ae89d6f33b427cccf3cafa36a1511d862",
"sha256:c670acd71d975b0c91579d40ae7f703d0daa1c871f12e46394a2c7be0ec8e217",
"sha256:e371482ee3e6e5ca19ea83cdfc84bf69cac230e3cb1073c8c3bebf3f143cd7a5"
],
"version": "==2018.6.9"
},
"requests": {
"hashes": [
"sha256:63b52e3c866428a224f97cab011de738c36aec0185aa91cfacd418b5d58911d1",
"sha256:ec22d826a36ed72a7358ff3fe56cbd4ba69dd7a6718ffd450ff0e9df7a47ce6a"
],
"index": "pypi",
"version": "==2.19.1"
},
"requests-oauthlib": {
"hashes": [
"sha256:8886bfec5ad7afb391ed5443b1f697c6f4ae98d0e5620839d8b4499c032ada3f",
"sha256:e21232e2465808c0e892e0e4dbb8c2faafec16ac6dc067dd546e9b466f3deac8",
"sha256:fe3282f48fb134ee0035712159f5429215459407f6d5484013343031ff1a400d"
],
"version": "==1.0.0"
},
"rsa": {
"hashes": [
"sha256:25df4e10c263fb88b5ace923dd84bf9aa7f5019687b5e55382ffcdb8bede9db5",
"sha256:43f682fea81c452c98d09fc316aae12de6d30c4b5c84226642cf8f8fd1c93abd"
],
"version": "==3.4.2"
},
"six": {
"hashes": [
"sha256:70e8a77beed4562e7f14fe23a786b54f6296e34344c23bc42f07b15018ff98e9",
"sha256:832dc0e10feb1aa2c68dcc57dbb658f1c7e65b9b61af69048abc87a2db00a0eb"
],
"version": "==1.11.0"
},
"snaptime": {
"hashes": [
"sha256:e3f1eb89043d58d30721ab98cb65023f1a4c2740e3b197704298b163c92d508b"
],
"version": "==0.2.4"
},
"tzlocal": {
"hashes": [
"sha256:4ebeb848845ac898da6519b9b31879cf13b6626f7184c496037b818e238f2c4e"
],
"version": "==1.5.1"
},
"urllib3": {
"hashes": [
"sha256:a68ac5e15e76e7e5dd2b8f94007233e01effe3e50e8daddf69acfd81cb686baf",
"sha256:b5725a0bd4ba422ab0e66e89e030c806576753ea3ee08554382c14e685d117b5"
],
"version": "==1.23"
},
"websocket-client": {
"hashes": [
"sha256:18f1170e6a1b5463986739d9fd45c4308b0d025c1b2f9b88788d8f69e8a5eb4a",
"sha256:db70953ae4a064698b27ae56dcad84d0ee68b7b43cb40940f537738f38f510c1"
],
"version": "==0.48.0"
}
},
"develop": {
"args": {
"hashes": [
"sha256:a785b8d837625e9b61c39108532d95b85274acd679693b71ebb5156848fcf814"
],
"version": "==0.1.0"
},
"clint": {
"hashes": [
"sha256:05224c32b1075563d0b16d0015faaf9da43aa214e4a2140e51f08789e7a4c5aa"
],
"version": "==0.5.1"
},
"coverage": {
"hashes": [
"sha256:03481e81d558d30d230bc12999e3edffe392d244349a90f4ef9b88425fac74ba",
"sha256:0b136648de27201056c1869a6c0d4e23f464750fd9a9ba9750b8336a244429ed",
"sha256:104ab3934abaf5be871a583541e8829d6c19ce7bde2923b2751e0d3ca44db60a",
"sha256:15b111b6a0f46ee1a485414a52a7ad1d703bdf984e9ed3c288a4414d3871dcbd",
"sha256:198626739a79b09fa0a2f06e083ffd12eb55449b5f8bfdbeed1df4910b2ca640",
"sha256:1c383d2ef13ade2acc636556fd544dba6e14fa30755f26812f54300e401f98f2",
"sha256:28b2191e7283f4f3568962e373b47ef7f0392993bb6660d079c62bd50fe9d162",
"sha256:2eb564bbf7816a9d68dd3369a510be3327f1c618d2357fa6b1216994c2e3d508",
"sha256:337ded681dd2ef9ca04ef5d93cfc87e52e09db2594c296b4a0a3662cb1b41249",
"sha256:3a2184c6d797a125dca8367878d3b9a178b6fdd05fdc2d35d758c3006a1cd694",
"sha256:3c79a6f7b95751cdebcd9037e4d06f8d5a9b60e4ed0cd231342aa8ad7124882a",
"sha256:3d72c20bd105022d29b14a7d628462ebdc61de2f303322c0212a054352f3b287",
"sha256:3eb42bf89a6be7deb64116dd1cc4b08171734d721e7a7e57ad64cc4ef29ed2f1",
"sha256:4635a184d0bbe537aa185a34193898eee409332a8ccb27eea36f262566585000",
"sha256:56e448f051a201c5ebbaa86a5efd0ca90d327204d8b059ab25ad0f35fbfd79f1",
"sha256:5a13ea7911ff5e1796b6d5e4fbbf6952381a611209b736d48e675c2756f3f74e",
"sha256:69bf008a06b76619d3c3f3b1983f5145c75a305a0fea513aca094cae5c40a8f5",
"sha256:6bc583dc18d5979dc0f6cec26a8603129de0304d5ae1f17e57a12834e7235062",
"sha256:701cd6093d63e6b8ad7009d8a92425428bc4d6e7ab8d75efbb665c806c1d79ba",
"sha256:7608a3dd5d73cb06c531b8925e0ef8d3de31fed2544a7de6c63960a1e73ea4bc",
"sha256:76ecd006d1d8f739430ec50cc872889af1f9c1b6b8f48e29941814b09b0fd3cc",
"sha256:7aa36d2b844a3e4a4b356708d79fd2c260281a7390d678a10b91ca595ddc9e99",
"sha256:7d3f553904b0c5c016d1dad058a7554c7ac4c91a789fca496e7d8347ad040653",
"sha256:7e1fe19bd6dce69d9fd159d8e4a80a8f52101380d5d3a4d374b6d3eae0e5de9c",
"sha256:8c3cb8c35ec4d9506979b4cf90ee9918bc2e49f84189d9bf5c36c0c1119c6558",
"sha256:9d6dd10d49e01571bf6e147d3b505141ffc093a06756c60b053a859cb2128b1f",
"sha256:9e112fcbe0148a6fa4f0a02e8d58e94470fc6cb82a5481618fea901699bf34c4",
"sha256:ac4fef68da01116a5c117eba4dd46f2e06847a497de5ed1d64bb99a5fda1ef91",
"sha256:b8815995e050764c8610dbc82641807d196927c3dbed207f0a079833ffcf588d",
"sha256:be6cfcd8053d13f5f5eeb284aa8a814220c3da1b0078fa859011c7fffd86dab9",
"sha256:c1bb572fab8208c400adaf06a8133ac0712179a334c09224fb11393e920abcdd",
"sha256:de4418dadaa1c01d497e539210cb6baa015965526ff5afc078c57ca69160108d",
"sha256:e05cb4d9aad6233d67e0541caa7e511fa4047ed7750ec2510d466e806e0255d6",
"sha256:e4d96c07229f58cb686120f168276e434660e4358cc9cf3b0464210b04913e77",
"sha256:f3f501f345f24383c0000395b26b726e46758b71393267aeae0bd36f8b3ade80",
"sha256:f8a923a85cb099422ad5a2e345fe877bbc89a8a8b23235824a93488150e45f6e"
],
"version": "==4.5.1"
},
"doublex": {
"hashes": [
"sha256:062af49d9e4148bc47b7512d3fdc8e145dea4671d074ffd54b2464a19d3757ab"
],
"index": "pypi",
"version": "==1.8.4"
},
"doublex-expects": {
"hashes": [
"sha256:5421bd92319c77ccc5a81d595d06e9c9f7f670de342b33e8007a81e70f9fade8"
],
"index": "pypi",
"version": "==0.7.0rc2"
},
"expects": {
"hashes": [
"sha256:37538d7b0fa9c0d53e37d07b0e8c07d89754d3deec1f0f8ed1be27f4f10363dd"
],
"index": "pypi",
"version": "==0.8.0"
},
"mamba": {
"hashes": [
"sha256:63e70a8666039cf143a255000e23f29be4ea4b5b8169f2b053f94eb73a2ea9e2"
],
"index": "pypi",
"version": "==0.9.3"
},
"pyhamcrest": {
"hashes": [
"sha256:6b672c02fdf7470df9674ab82263841ce8333fb143f32f021f6cb26f0e512420",
"sha256:7a4bdade0ed98c699d728191a058a60a44d2f9c213c51e2dd1e6fb42f2c6128a",
"sha256:8ffaa0a53da57e89de14ced7185ac746227a8894dbd5a3c718bf05ddbd1d56cd",
"sha256:bac0bea7358666ce52e3c6c85139632ed89f115e9af52d44b3c36e0bf8cf16a9",
"sha256:f30e9a310bcc1808de817a92e95169ffd16b60cbc5a016a49c8d0e8ababfae79"
],
"version": "==1.9.0"
},
"six": {
"hashes": [
"sha256:70e8a77beed4562e7f14fe23a786b54f6296e34344c23bc42f07b15018ff98e9",
"sha256:832dc0e10feb1aa2c68dcc57dbb658f1c7e65b9b61af69048abc87a2db00a0eb"
],
"version": "==1.11.0"
}
}
}

View File

@ -0,0 +1,164 @@
# Playbooks
Following [owasp ideas](https://owaspsummit.org/Working-Sessions/Security-Playbooks/index.html),
playbooks are workflows and prescriptive instructions on how to handle specific
Security activities or incidents.
Being more specific, playbooks are actions that are going to be executed when
Falco finds a weird behavior in our Kubernetes cluster. We have implemented
them with Python and we have found that several Serverless concepts fits well
with playbooks, so we use [Kubeless](https://kubeless.io/) for its deployment.
## Requirements
* A working Kubernetes cluster
* [kubeless cli executable](https://kubeless.io/docs/quick-start/)
* Python 3.6
* pipenv
## Deploying a playbook
Deploying a playbook involves a couple of components, the function that is going
to be with Kubeless and a trigger for that function.
We have automated those steps in a generic script *deploy_playbook* who packages
the reaction and its dependencies, uploads to Kubernetes and creates the kubeless
trigger.
```
./deploy_playbook -p slack -e SLACK_WEBHOOK_URL="https://..." -t "falco.error.*" -t "falco.info.*"
```
### Parameters
* -p: The playbook to deploy, it must match with the top-level script. In this
example *slack.py* that contains the wiring between playbooks and Kubeless
functions
* -e: Sets configuration settings for Playbook. In this case the URL where we
have to post messages. You can specify multiple *-e* flags.
* -t: Topic to susbcribe. You can specify multiple *-t* flags and a trigger
will be created for each topic, so when we receive a message in that topic,
our function will be ran. In this case, playbook will be run when a
falco.error or falco.info alert is raised.
### Kubeless 101
Under the hood, there are several useful commands for checking function state with kubeless.
We can retrieve all functions deployed in our cluster:
```
kubeless function list
```
And we can see several interesting stats about a function usage:
```
kubeless function top
```
And we can see bindings between functions and NATS topics:
```
kubeless trigger nats list
```
### Undeploying a function
You have to delete every component using kubeless cli tool.
Generally, it takes 2 steps: Remove the triggers and remove the function.
Remove the triggers:
```
kubeless trigger nats delete trigger-name
```
If you have deployed with the script, trigger-name look like:
*falco-<playbook>-trigger-<index>* where index is the index of the topic created.
Anyway, you can list all triggers and select the name.
Remove the function:
```
kubeless function delete function-name
```
If you have deployed with the script, the function name will start with *falco-<playbook>*,
but you can list all functions and select its name.
## Testing
One of the goals of the project was that playbooks were tested.
You can execute the tests with:
```
pipenv --three install -d
export KUBERNETES_LOAD_KUBE_CONFIG=1
pipenv run mamba --format=documentation
```
The first line install development tools, which includes test runner and assertions.
The second one tells Kubernetes Client to use the same configuration than kubectl and
the third one runs the test.
The tests under *specs/infrastructure* runs against a real Kubernetes cluster,
but the *spec/reactions* can be run without any kind of infrastructure.
## Available Playbooks
### Delete a Pod
This playbook kills a pod using Kubernetes API
```
./deploy_playbook -p delete -t "falco.notice.terminal_shell_in_container"
```
In this example, everytime we receive a *Terminal shell in container* alert from
Falco, that pod will be deleted.
### Send message to Slack
This playbook posts a message to Slack
```
./deploy_playbook -p slack -t "falco.error.*" -e SLACK_WEBHOOK_URL="https://..."
```
#### Parameters
* SLACK_WEBHOOK_URL: This is the webhook used for posting messages in Slack
In this example, when Falco raises an error we will be notified in Slack
### Taint a Node
This playbook taints the node which where pod is running.
```
$ ./deploy_playbook -p taint -t “falco.notice.contact_k8s_api_server_from_container”
```
#### Parameters:
* TAINT_KEY: This is the taint key. Default value: falco/alert
* TAINT_VALUE: This is the taint value. Default value: true
* TAINT_EFFECT: This is the taint effect. Default value: NoSchedule
In this example, we avoid scheduling in the node which originates the Contact
K8S API server from container. But we can use a more aggresive approach and
use -e TAINT_EFFECT=NoExecute
### Network isolate a Pod
This reaction denies all ingress/egress traffic from a Pod. It's intended to
be used with Calico or other similar projects for managing networking in
Kubernetes.
```
./deploy_playbook -p isolate -t “falco.notice.write_below_binary_dir” -t “falco.error.write_below_etc”
```
So as soon as we notice someone wrote under /bin (and additional binaries) or
/etc, we disconnect that pod. It's like a trap for our attackers.

View File

@ -0,0 +1,16 @@
import sys
import os.path
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__))))
import os
import playbooks
from playbooks import infrastructure
playbook = playbooks.DeletePod(
infrastructure.KubernetesClient()
)
def handler(event, context):
playbook.run(event['data'])

View File

@ -0,0 +1,68 @@
#!/bin/bash
#
# Deploys a playbook
set -e
function usage() {
cat<<EOF
Usage: $0 [options]
-p playbook Playbook to be deployed. Is the script for Kubeless: slack, taint, isolate.
-e environment Environment variables for the Kubeless function. You can pass multiple environment variables passing several -e parameters.
-t topic NATS topic to subscribe function. You can bind to multiple topics passing several -t parameters.
You must pass the playbook and at least one topic to subscribe.
Example:
deploy_playbook -r slack -t "falco.error.*" -e SLACK_WEBHOOK_URL=http://foobar.com/...
EOF
exit 1
}
function join { local IFS="$1"; shift; echo "$*"; }
playbook=""
environment=()
topics=()
while getopts "r:e:t:" arg; do
case $arg in
r)
playbook="${OPTARG}"
;;
e)
environment+=("${OPTARG}")
;;
t)
topics+=("${OPTARG}")
;;
*)
usage
;;
esac
done
if [[ "${playbook}" == "" || ${#topics[@]} -eq 0 ]]; then
usage
fi
pipenv lock --requirements | sed '/^-/ d' > requirements.txt
zip "${playbook}".zip -r playbooks/*.py "${playbook}".py
kubeless function deploy --from-file "${playbook}".zip \
--dependencies requirements.txt \
--env "$(join , ${environment[*]})" \
--runtime python3.6 \
--handler "${playbook}".handler \
falco-"${playbook}"
rm requirements.txt ${playbook}.zip
for index in ${!topics[*]}; do
kubeless trigger nats create falco-"${playbook}"-trigger-"${index}" \
--function-selector created-by=kubeless,function=falco-${playbook} \
--trigger-topic "${topics[$index]}"
done

View File

@ -0,0 +1,16 @@
import sys
import os.path
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__))))
import os
import playbooks
from playbooks import infrastructure
playbook = playbooks.NetworkIsolatePod(
infrastructure.KubernetesClient()
)
def handler(event, context):
playbook.run(event['data'])

View File

@ -0,0 +1,101 @@
import maya
class DeletePod:
def __init__(self, k8s_client):
self._k8s_client = k8s_client
def run(self, alert):
pod_name = alert['output_fields']['k8s.pod.name']
self._k8s_client.delete_pod(pod_name)
class AddMessageToSlack:
def __init__(self, slack_client):
self._slack_client = slack_client
def run(self, alert):
message = self._build_slack_message(alert)
self._slack_client.post_message(message)
return message
def _build_slack_message(self, alert):
return {
'text': self._output(alert),
'attachments': [{
'color': self._color_from(alert['priority']),
'fields': [
{
'title': 'Rule',
'value': alert['rule'],
'short': False
},
{
'title': 'Priority',
'value': alert['priority'],
'short': True
},
{
'title': 'Time',
'value': str(maya.parse(alert['time'])),
'short': True
},
{
'title': 'Kubernetes Pod Name',
'value': alert['output_fields']['k8s.pod.name'],
'short': True
},
{
'title': 'Container Id',
'value': alert['output_fields']['container.id'],
'short': True
}
]
}]
}
def _output(self, alert):
output = alert['output'].split(': ')[1]
priority_plus_whitespace_length = len(alert['priority']) + 1
return output[priority_plus_whitespace_length:]
_COLORS = {
'Emergency': '#b12737',
'Alert': '#f24141',
'Critical': '#fc7335',
'Error': '#f28143',
'Warning': '#f9c414',
'Notice': '#397ec3',
'Informational': '#8fc0e7',
'Debug': '#8fc0e7',
}
def _color_from(self, priority):
return self._COLORS.get(priority, '#eeeeee')
class TaintNode:
def __init__(self, k8s_client, key, value, effect):
self._k8s_client = k8s_client
self._key = key
self._value = value
self._effect = effect
def run(self, alert):
pod = alert['output_fields']['k8s.pod.name']
node = self._k8s_client.find_node_running_pod(pod)
self._k8s_client.taint_node(node, self._key, self._value, self._effect)
class NetworkIsolatePod:
def __init__(self, k8s_client):
self._k8s_client = k8s_client
def run(self, alert):
pod = alert['output_fields']['k8s.pod.name']
self._k8s_client.add_label_to_pod(pod, 'isolated', 'true')

View File

@ -0,0 +1,74 @@
import os
import json
from kubernetes import client, config
import requests
class KubernetesClient:
def __init__(self):
if 'KUBERNETES_LOAD_KUBE_CONFIG' in os.environ:
config.load_kube_config()
else:
config.load_incluster_config()
self._v1 = client.CoreV1Api()
def delete_pod(self, name):
namespace = self._find_pod_namespace(name)
body = client.V1DeleteOptions()
self._v1.delete_namespaced_pod(name=name,
namespace=namespace,
body=body)
def exists_pod(self, name):
response = self._v1.list_pod_for_all_namespaces(watch=False)
for item in response.items:
if item.metadata.name == name:
if item.metadata.deletion_timestamp is None:
return True
return False
def _find_pod_namespace(self, name):
response = self._v1.list_pod_for_all_namespaces(watch=False)
for item in response.items:
if item.metadata.name == name:
return item.metadata.namespace
def find_node_running_pod(self, name):
response = self._v1.list_pod_for_all_namespaces(watch=False)
for item in response.items:
if item.metadata.name == name:
return item.spec.node_name
def taint_node(self, name, key, value, effect):
body = client.V1Node(
spec=client.V1NodeSpec(
taints=[
client.V1Taint(key=key, value=value, effect=effect)
]
)
)
return self._v1.patch_node(name, body)
def add_label_to_pod(self, name, label, value):
namespace = self._find_pod_namespace(name)
body = client.V1Pod(
metadata=client.V1ObjectMeta(
labels={label: value}
)
)
return self._v1.patch_namespaced_pod(name, namespace, body)
class SlackClient:
def __init__(self, slack_webhook_url):
self._slack_webhook_url = slack_webhook_url
def post_message(self, message):
requests.post(self._slack_webhook_url,
data=json.dumps(message))

View File

@ -0,0 +1,11 @@
from setuptools import setup
setup(name='playbooks',
version='0.1',
description='A set of playbooks for Falco alerts',
url='http://github.com/draios/falco-playbooks',
author='Néstor Salceda',
author_email='nestor.salceda@sysdig.com',
license='',
packages=['playbooks'],
zip_safe=False)

View File

@ -0,0 +1,16 @@
import sys
import os.path
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__))))
import os
import playbooks
from playbooks import infrastructure
playbook = playbooks.AddMessageToSlack(
infrastructure.SlackClient(os.environ['SLACK_WEBHOOK_URL'])
)
def handler(event, context):
playbook.run(event['data'])

View File

@ -0,0 +1,65 @@
from mamba import description, context, it, before
from expects import expect, be_false, be_true, start_with, equal, have_key
import subprocess
import os.path
from playbooks import infrastructure
with description(infrastructure.KubernetesClient) as self:
with before.each:
self.kubernetes_client = infrastructure.KubernetesClient()
with context('when checking if a pod exists'):
with before.each:
self._create_nginx_pod()
with context('and pod exists'):
with it('returns true'):
expect(self.kubernetes_client.exists_pod('nginx')).to(be_true)
with context('and pod does not exist'):
with it('returns false'):
self.kubernetes_client.delete_pod('nginx')
expect(self.kubernetes_client.exists_pod('nginx')).to(be_false)
with it('finds node running pod'):
self._create_nginx_pod()
node = self.kubernetes_client.find_node_running_pod('nginx')
expect(node).to(start_with('gke-sysdig-work-default-pool'))
with it('taints node'):
self._create_nginx_pod()
node_name = self.kubernetes_client.find_node_running_pod('nginx')
node = self.kubernetes_client.taint_node(node_name,
'playbooks',
'true',
'NoSchedule')
expect(node.spec.taints[0].effect).to(equal('NoSchedule'))
expect(node.spec.taints[0].key).to(equal('playbooks'))
expect(node.spec.taints[0].value).to(equal('true'))
with it('adds labels to a pod'):
self._create_nginx_pod()
pod = self.kubernetes_client.add_label_to_pod('nginx',
'testing',
'true')
expect(pod.metadata.labels).to(have_key('testing', 'true'))
def _create_nginx_pod(self):
current_directory = os.path.dirname(os.path.realpath(__file__))
pod_manifesto = os.path.join(current_directory,
'..',
'support',
'deployment.yaml')
subprocess.run(['kubectl', 'create', '-f', pod_manifesto])

View File

@ -0,0 +1,16 @@
from mamba import description, it
import os
from playbooks import infrastructure
with description(infrastructure.SlackClient) as self:
with it('posts a message to #kubeless-demo channel'):
slack_client = infrastructure.SlackClient(os.environ['SLACK_WEBHOOK_URL'])
message = {
'text': 'Hello from Python! :metal:'
}
slack_client.post_message(message)

View File

@ -0,0 +1,62 @@
from mamba import description, it, before, context
from expects import expect, have_key, have_keys, contain
from doublex import Spy
from doublex_expects import have_been_called_with
from playbooks import infrastructure
import playbooks
with description(playbooks.AddMessageToSlack) as self:
with before.each:
self.slack_client = Spy(infrastructure.SlackClient)
self.playbook = playbooks.AddMessageToSlack(self.slack_client)
with context('when publishing a message to slack'):
with before.each:
self.alert = {
"output": "10:22:15.576767292: Notice Unexpected setuid call by non-sudo, non-root program (user=bin cur_uid=2 parent=event_generator command=event_generator uid=root) k8s.pod=falco-event-generator-6fd89678f9-cdkvz container=1c76f49f40b4",
"output_fields": {
"container.id": "1c76f49f40b4",
"evt.arg.uid": "root",
"evt.time": 1527157335576767292,
"k8s.pod.name": "falco-event-generator-6fd89678f9-cdkvz",
"proc.cmdline": "event_generator ",
"proc.pname": "event_generator",
"user.name": "bin",
"user.uid": 2
},
"priority": "Notice",
"rule": "Non sudo setuid",
"time": "2018-05-24T10:22:15.576767292Z"
}
self.message = self.playbook.run(self.alert)
with it('publishes message to slack'):
expect(self.slack_client.post_message).to(have_been_called_with(self.message))
with it('includes falco output'):
falco_output = 'Unexpected setuid call by non-sudo, non-root program (user=bin cur_uid=2 parent=event_generator command=event_generator uid=root) k8s.pod=falco-event-generator-6fd89678f9-cdkvz container=1c76f49f40b4'
expect(self.message).to(have_key('text', falco_output))
with it('includes color based on priority'):
expect(self.message['attachments'][0]).to(have_key('color'))
with it('includes priority'):
expect(self.message['attachments'][0]['fields']).to(contain(have_keys(title='Priority', value='Notice')))
with it('includes rule name'):
expect(self.message['attachments'][0]['fields']).to(contain(have_keys(title='Rule', value='Non sudo setuid')))
with it('includes time when alert happened'):
expect(self.message['attachments'][0]['fields']).to(contain(have_keys(title='Time', value='Thu, 24 May 2018 10:22:15 GMT')))
with it('includes kubernetes pod name'):
expect(self.message['attachments'][0]['fields']).to(contain(have_keys(title='Kubernetes Pod Name', value='falco-event-generator-6fd89678f9-cdkvz')))
with it('includes container id'):
expect(self.message['attachments'][0]['fields']).to(contain(have_keys(title='Container Id', value='1c76f49f40b4')))

View File

@ -0,0 +1,22 @@
from mamba import description, it, before
from expects import expect
from doublex import Spy
from doublex_expects import have_been_called_with
from playbooks import infrastructure
import playbooks
with description(playbooks.DeletePod) as self:
with before.each:
self.k8s_client = Spy(infrastructure.KubernetesClient)
self.playbook = playbooks.DeletePod(self.k8s_client)
with it('deletes a pod'):
pod_name = 'a pod name'
alert = {'output_fields': {'k8s.pod.name': pod_name}}
self.playbook.run(alert)
expect(self.k8s_client.delete_pod).to(have_been_called_with(pod_name))

View File

@ -0,0 +1,22 @@
from mamba import description, it, before
from expects import expect
from doublex import Spy
from doublex_expects import have_been_called
from playbooks import infrastructure
import playbooks
with description(playbooks.NetworkIsolatePod) as self:
with before.each:
self.k8s_client = Spy(infrastructure.KubernetesClient)
self.playbook = playbooks.NetworkIsolatePod(self.k8s_client)
with it('adds isolation label to pod'):
pod_name = 'any pod name'
alert = {'output_fields': {'k8s.pod.name': pod_name}}
self.playbook.run(alert)
expect(self.k8s_client.add_label_to_pod).to(have_been_called)

View File

@ -0,0 +1,34 @@
from mamba import description, it, before
from expects import expect
from doublex import Spy, when
from doublex_expects import have_been_called_with
from playbooks import infrastructure
import playbooks
with description(playbooks.TaintNode) as self:
with before.each:
self.k8s_client = Spy(infrastructure.KubernetesClient)
self.key = 'falco/alert'
self.value = 'true'
self.effect = 'NoSchedule'
self.playbook = playbooks.TaintNode(self.k8s_client,
self.key,
self.value,
self.effect)
with it('taints the node'):
pod_name = 'any pod name'
alert = {'output_fields': {'k8s.pod.name': pod_name}}
node = 'any node'
when(self.k8s_client).find_node_running_pod(pod_name).returns(node)
self.playbook.run(alert)
expect(self.k8s_client.taint_node).to(have_been_called_with(node,
self.key,
self.value,
self.effect))

View File

@ -0,0 +1,10 @@
apiVersion: v1
kind: Pod
metadata:
name: nginx
spec:
containers:
- name: nginx
image: nginx:latest
ports:
- containerPort: 80

View File

@ -0,0 +1,19 @@
import sys
import os.path
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__))))
import os
import playbooks
from playbooks import infrastructure
playbook = playbooks.TaintNode(
infrastructure.KubernetesClient(),
os.environ.get('TAINT_KEY', 'falco/alert'),
os.environ.get('TAINT_VALUE', 'true'),
os.environ.get('TAINT_EFFECT', 'NoSchedule')
)
def handler(event, context):
playbook.run(event['data'])