mirror of
https://github.com/falcosecurity/falco.git
synced 2025-10-22 12:27:10 +00:00
* Fix spec name * Add a playbook for capturing stuff using sysdig in a container * Add event-name to job name for avoid collisions among captures * Implement job for starting container in Pod in Kubernetes Client We are going to pick data for all Pod, not limited to one container * Use sysdig/capturer image for capture and upload to s3 the capture * There is a bug with environment string splitting in kubeless https://github.com/kubeless/kubeless/issues/824 So here is a workaround which uses multiple --env flags, one for each environment. * Use shorter job name. Kubernetes limit is 64 characters. * Add a deployable playbook with Kubeless for capturing stuff with Sysdig * Document the integration with Sysdig capture * Add Dockerfile for creating sysdig-capturer
21 lines
484 B
Python
21 lines
484 B
Python
import sys
|
|
import os.path
|
|
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__))))
|
|
|
|
import os
|
|
import playbooks
|
|
from playbooks import infrastructure
|
|
|
|
|
|
playbook = playbooks.StartSysdigCaptureForContainer(
|
|
infrastructure.KubernetesClient(),
|
|
int(os.environ.get('CAPTURE_DURATION', 120)),
|
|
os.environ['AWS_S3_BUCKET'],
|
|
os.environ['AWS_ACCESS_KEY_ID'],
|
|
os.environ['AWS_SECRET_ACCESS_KEY']
|
|
)
|
|
|
|
|
|
def handler(event, context):
|
|
playbook.run(event['data'])
|