Merge pull request #1559 from l0kod/landlock-v6-linux-v4.9.20

Backport Landlock v6 for Linux v4.9.20
This commit is contained in:
Justin Cormack 2017-04-09 19:31:21 +01:00 committed by GitHub
commit 9d1b120498
15 changed files with 7794 additions and 0 deletions

View File

@ -17,6 +17,7 @@ If you want to create a project, please submit a pull request to create a new di
- [eBPF](ebpf/) iovisor eBPF tools
- [AWS](aws/) AWS build support
- [Swarmd](swarmd) Standalone swarmkit based orchestrator
- [Landlock LSM](landlock/) programmatic access control
## Current projects not yet documented
- Clear Linux integration (Intel)

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,26 @@
## MOBY DEBUG OPTIONS ##
CONFIG_LOCKDEP=y
CONFIG_FRAME_POINTER=y
CONFIG_LOCKUP_DETECTOR=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_DEBUG_TIMEKEEPING=y
CONFIG_DEBUG_RT_MUTEXES=y
CONFIG_DEBUG_SPINLOCK=y
CONFIG_DEBUG_MUTEXES=y
CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y
CONFIG_DEBUG_LOCK_ALLOC=y
CONFIG_PROVE_LOCKING=y
CONFIG_LOCK_STAT=y
CONFIG_DEBUG_ATOMIC_SLEEP=y
CONFIG_DEBUG_LIST=y
CONFIG_DEBUG_NOTIFIERS=y
CONFIG_PROVE_RCU=y
CONFIG_RCU_TRACE=y
CONFIG_KGDB=y
CONFIG_KGDB_SERIAL_CONSOLE=y
CONFIG_KGDBOC=y
CONFIG_DEBUG_RODATA_TEST=y
CONFIG_DEBUG_WX=y

View File

@ -0,0 +1,38 @@
From 9a94a681dffa7b2115806405ab573ba41052ad04 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Micka=C3=ABl=20Sala=C3=BCn?= <mic@digikod.net>
Date: Tue, 7 Feb 2017 21:56:05 +0100
Subject: [PATCH 01/12] tools lib bpf: Add missing header to the library
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Include stddef.h to define size_t.
Signed-off-by: Mickaël Salaün <mic@digikod.net>
Acked-by: Wang Nan <wangnan0@huawei.com>
Cc: Alexei Starovoitov <ast@fb.com>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Cc: David S. Miller <davem@davemloft.net>
Cc: Joe Stringer <joe@ovn.org>
Link: http://lkml.kernel.org/r/20170207205609.8035-2-mic@digikod.net
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
(cherry picked from commit 7a5980f9c0066d085319415ec15ee51f165111f5)
---
tools/lib/bpf/bpf.h | 1 +
1 file changed, 1 insertion(+)
diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
index e8ba54087497..eb584e639500 100644
--- a/tools/lib/bpf/bpf.h
+++ b/tools/lib/bpf/bpf.h
@@ -22,6 +22,7 @@
#define __BPF_BPF_H
#include <linux/bpf.h>
+#include <stddef.h>
int bpf_create_map(enum bpf_map_type map_type, int key_size, int value_size,
int max_entries);
--
2.11.0

View File

@ -0,0 +1,39 @@
From 7dbf58fc4e42a55cc2d37130362080f7ecf8ca1a Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Micka=C3=ABl=20Sala=C3=BCn?= <mic@digikod.net>
Date: Wed, 8 Feb 2017 21:27:44 +0100
Subject: [PATCH 02/12] samples/bpf: Add missing header
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Include unistd.h to define __NR_getuid and __NR_getsid.
Signed-off-by: Mickaël Salaün <mic@digikod.net>
Acked-by: Joe Stringer <joe@ovn.org>
Acked-by: Wang Nan <wangnan0@huawei.com>
Cc: Alexei Starovoitov <ast@fb.com>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Cc: David S. Miller <davem@davemloft.net>
Cc: netdev@vger.kernel.org
Link: http://lkml.kernel.org/r/20170208202744.16274-4-mic@digikod.net
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
(cherry picked from commit af392a8f5399e831cb502ff210dacef8b38ca513)
---
samples/bpf/tracex5_kern.c | 1 +
1 file changed, 1 insertion(+)
diff --git a/samples/bpf/tracex5_kern.c b/samples/bpf/tracex5_kern.c
index fd12d7154d42..7e4cf74553ff 100644
--- a/samples/bpf/tracex5_kern.c
+++ b/samples/bpf/tracex5_kern.c
@@ -8,6 +8,7 @@
#include <linux/version.h>
#include <uapi/linux/bpf.h>
#include <uapi/linux/seccomp.h>
+#include <uapi/linux/unistd.h>
#include "bpf_helpers.h"
#define PROG(F) SEC("kprobe/"__stringify(F)) int bpf_func_##F
--
2.11.0

View File

@ -0,0 +1,40 @@
From 7e0ee6e01fbafac371e50052eddd59af9b053825 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Micka=C3=ABl=20Sala=C3=BCn?= <mic@digikod.net>
Date: Wed, 8 Feb 2017 21:27:42 +0100
Subject: [PATCH 03/12] samples/bpf: Ignore already processed ELF sections
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Add a missing check for the map fixup loop.
Signed-off-by: Mickaël Salaün <mic@digikod.net>
Acked-by: Joe Stringer <joe@ovn.org>
Acked-by: Wang Nan <wangnan0@huawei.com>
Cc: Alexei Starovoitov <ast@fb.com>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Cc: David S. Miller <davem@davemloft.net>
Cc: netdev@vger.kernel.org
Link: http://lkml.kernel.org/r/20170208202744.16274-2-mic@digikod.net
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
(cherry picked from commit 16ad1329002f905c643a438ddcfb0a180787850a)
---
samples/bpf/bpf_load.c | 2 ++
1 file changed, 2 insertions(+)
diff --git a/samples/bpf/bpf_load.c b/samples/bpf/bpf_load.c
index 97913e109b14..4b86bd3c7c6b 100644
--- a/samples/bpf/bpf_load.c
+++ b/samples/bpf/bpf_load.c
@@ -307,6 +307,8 @@ int load_bpf_file(char *path)
/* load programs that need map fixup (relocations) */
for (i = 1; i < ehdr.e_shnum; i++) {
+ if (processed_sec[i])
+ continue;
if (get_sec(elf, i, &ehdr, &shname, &shdr, &data))
continue;
--
2.11.0

View File

@ -0,0 +1,44 @@
From 3dd0d725f5e83a53ea2d4cbb3fe0856ce2b836cf Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Micka=C3=ABl=20Sala=C3=BCn?= <mic@digikod.net>
Date: Wed, 8 Feb 2017 21:27:43 +0100
Subject: [PATCH 04/12] samples/bpf: Reset global variables
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Before loading a new ELF, clean previous kernel version, license and
processed sections.
Signed-off-by: Mickaël Salaün <mic@digikod.net>
Acked-by: Joe Stringer <joe@ovn.org>
Acked-by: Wang Nan <wangnan0@huawei.com>
Cc: Alexei Starovoitov <ast@fb.com>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Cc: David S. Miller <davem@davemloft.net>
Cc: netdev@vger.kernel.org
Link: http://lkml.kernel.org/r/20170208202744.16274-3-mic@digikod.net
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
(cherry picked from commit a734fb5d60067a73dd7099a58756847c07f9cd68)
---
samples/bpf/bpf_load.c | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/samples/bpf/bpf_load.c b/samples/bpf/bpf_load.c
index 4b86bd3c7c6b..765a6e45b92d 100644
--- a/samples/bpf/bpf_load.c
+++ b/samples/bpf/bpf_load.c
@@ -256,6 +256,11 @@ int load_bpf_file(char *path)
Elf_Data *data, *data_prog, *symbols = NULL;
char *shname, *shname_prog;
+ /* reset global variables */
+ kern_version = 0;
+ memset(license, 0, sizeof(license));
+ memset(processed_sec, 0, sizeof(processed_sec));
+
if (elf_version(EV_CURRENT) == EV_NONE)
return 1;
--
2.11.0

View File

@ -0,0 +1,640 @@
From 0ecb458868b0902aa40d583a24819b97998c5555 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Micka=C3=ABl=20Sala=C3=BCn?= <mic@digikod.net>
Date: Wed, 29 Mar 2017 01:30:33 +0200
Subject: [PATCH 05/12] bpf: Add eBPF program subtype and is_valid_subtype()
verifier
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
The goal of the program subtype is to be able to have different static
fine-grained verifications for a unique program type.
The struct bpf_verifier_ops gets a new optional function:
is_valid_subtype(). This new verifier is called at the beginning of the
eBPF program verification to check if the (optional) program subtype is
valid.
For now, only Landlock eBPF programs are using a program subtype (see
next commit) but this could be used by other program types in the future.
Changes since v5:
* use a prog_subtype pointer and make it future-proof
* add subtype test
* constify bpf_load_program()'s subtype argument
* cleanup subtype initialization
* rebase
Changes since v4:
* replace the "status" field with "version" (more generic)
* replace the "access" field with "ability" (less confusing)
Changes since v3:
* remove the "origin" field
* add an "option" field
* cleanup comments
Signed-off-by: Mickaël Salaün <mic@digikod.net>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Cc: David S. Miller <davem@davemloft.net>
Link: https://lkml.kernel.org/r/20160827205559.GA43880@ast-mbp.thefacebook.com
(cherry picked from commit 173f32497bc24bc1e3379a0050e0ca603fb2922d)
---
include/linux/bpf.h | 7 +++-
include/linux/filter.h | 2 +
include/uapi/linux/bpf.h | 11 +++++
kernel/bpf/syscall.c | 92 ++++++++++++++++++++++++++++--------------
kernel/bpf/verifier.c | 14 ++++++-
kernel/trace/bpf_trace.c | 15 ++++---
net/core/filter.c | 22 ++++++----
samples/bpf/bpf_load.c | 3 +-
samples/bpf/fds_example.c | 2 +-
samples/bpf/libbpf.c | 7 +++-
samples/bpf/libbpf.h | 3 +-
samples/bpf/sock_example.c | 2 +-
samples/bpf/test_verifier.c | 2 +-
tools/include/uapi/linux/bpf.h | 11 +++++
tools/lib/bpf/bpf.c | 5 ++-
tools/lib/bpf/bpf.h | 2 +-
tools/lib/bpf/libbpf.c | 4 +-
tools/perf/tests/bpf.c | 2 +-
18 files changed, 147 insertions(+), 59 deletions(-)
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index c201017b5730..d4b9ca479f79 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -152,18 +152,21 @@ struct bpf_prog;
struct bpf_verifier_ops {
/* return eBPF function prototype for verification */
- const struct bpf_func_proto *(*get_func_proto)(enum bpf_func_id func_id);
+ const struct bpf_func_proto *(*get_func_proto)(enum bpf_func_id func_id,
+ union bpf_prog_subtype *prog_subtype);
/* return true if 'size' wide access at offset 'off' within bpf_context
* with 'type' (read or write) is allowed
*/
bool (*is_valid_access)(int off, int size, enum bpf_access_type type,
- enum bpf_reg_type *reg_type);
+ enum bpf_reg_type *reg_type,
+ union bpf_prog_subtype *prog_subtype);
int (*gen_prologue)(struct bpf_insn *insn, bool direct_write,
const struct bpf_prog *prog);
u32 (*convert_ctx_access)(enum bpf_access_type type, int dst_reg,
int src_reg, int ctx_off,
struct bpf_insn *insn, struct bpf_prog *prog);
+ bool (*is_valid_subtype)(union bpf_prog_subtype *prog_subtype);
};
struct bpf_prog_type_list {
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 1f09c521adfe..782a271bf54e 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -406,6 +406,8 @@ struct bpf_prog {
kmemcheck_bitfield_end(meta);
u32 len; /* Number of filter blocks */
enum bpf_prog_type type; /* Type of BPF program */
+ u8 has_subtype;
+ union bpf_prog_subtype subtype; /* Fine-grained verifications */
struct bpf_prog_aux *aux; /* Auxiliary fields */
struct sock_fprog_kern *orig_prog; /* Original BPF program */
unsigned int (*bpf_func)(const struct sk_buff *skb,
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index f09c70b97eca..a203fbcb0b2d 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -107,6 +107,15 @@ enum bpf_prog_type {
#define BPF_F_NO_PREALLOC (1U << 0)
+union bpf_prog_subtype {
+ struct {
+ __u32 version; /* cf. documentation */
+ __u32 event; /* enum landlock_subtype_event */
+ __aligned_u64 ability; /* LANDLOCK_SUBTYPE_ABILITY_* */
+ __aligned_u64 option; /* LANDLOCK_SUBTYPE_OPTION_* */
+ } landlock_rule;
+} __attribute__((aligned(8)));
+
union bpf_attr {
struct { /* anonymous struct used by BPF_MAP_CREATE command */
__u32 map_type; /* one of enum bpf_map_type */
@@ -135,6 +144,8 @@ union bpf_attr {
__u32 log_size; /* size of user buffer */
__aligned_u64 log_buf; /* user supplied buffer */
__u32 kern_version; /* checked when prog_type=kprobe */
+ __aligned_u64 prog_subtype; /* bpf_prog_subtype address */
+ __u32 prog_subtype_size;
};
struct { /* anonymous struct used by BPF_OBJ_* commands */
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 237f3d6a7ddc..17bbd1af517f 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -580,7 +580,8 @@ static void fixup_bpf_calls(struct bpf_prog *prog)
continue;
}
- fn = prog->aux->ops->get_func_proto(insn->imm);
+ fn = prog->aux->ops->get_func_proto(insn->imm,
+ &prog->subtype);
/* all functions that have prototype and verifier allowed
* programs to call them, must be real in-kernel functions
*/
@@ -717,8 +718,44 @@ struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type)
}
EXPORT_SYMBOL_GPL(bpf_prog_get_type);
+static int check_user_buf(void __user *uptr, unsigned int size_req,
+ unsigned int size_max)
+{
+ if (!access_ok(VERIFY_READ, uptr, 1))
+ return -EFAULT;
+
+ if (size_req > PAGE_SIZE) /* silly large */
+ return -E2BIG;
+
+ /* If we're handed a bigger struct than we know of,
+ * ensure all the unknown bits are 0 - i.e. new
+ * user-space does not rely on any kernel feature
+ * extensions we dont know about yet.
+ */
+ if (size_req > size_max) {
+ unsigned char __user *addr;
+ unsigned char __user *end;
+ unsigned char val;
+ int err;
+
+ addr = uptr + size_max;
+ end = uptr + size_req;
+
+ for (; addr < end; addr++) {
+ err = get_user(val, addr);
+ if (err)
+ return err;
+ if (val)
+ return -E2BIG;
+ }
+ return size_max;
+ }
+
+ return size_req;
+}
+
/* last field in 'union bpf_attr' used by this command */
-#define BPF_PROG_LOAD_LAST_FIELD kern_version
+#define BPF_PROG_LOAD_LAST_FIELD prog_subtype_size
static int bpf_prog_load(union bpf_attr *attr)
{
@@ -777,6 +814,26 @@ static int bpf_prog_load(union bpf_attr *attr)
if (err < 0)
goto free_prog;
+ /* copy eBPF program subtype from user space */
+ if (attr->prog_subtype) {
+ __u32 size;
+
+ size = check_user_buf((void __user *)attr->prog_subtype,
+ attr->prog_subtype_size,
+ sizeof(prog->subtype));
+ if (size < 0) {
+ err = size;
+ goto free_prog;
+ }
+ /* prog->subtype is __GFP_ZERO */
+ if (copy_from_user(&prog->subtype,
+ u64_to_user_ptr(attr->prog_subtype), size)
+ != 0)
+ return -EFAULT;
+ prog->has_subtype = 1;
+ } else if (attr->prog_subtype_size != 0)
+ return -EINVAL;
+
/* run eBPF verifier */
err = bpf_check(&prog, attr);
if (err < 0)
@@ -832,34 +889,9 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
if (!capable(CAP_SYS_ADMIN) && sysctl_unprivileged_bpf_disabled)
return -EPERM;
- if (!access_ok(VERIFY_READ, uattr, 1))
- return -EFAULT;
-
- if (size > PAGE_SIZE) /* silly large */
- return -E2BIG;
-
- /* If we're handed a bigger struct than we know of,
- * ensure all the unknown bits are 0 - i.e. new
- * user-space does not rely on any kernel feature
- * extensions we dont know about yet.
- */
- if (size > sizeof(attr)) {
- unsigned char __user *addr;
- unsigned char __user *end;
- unsigned char val;
-
- addr = (void __user *)uattr + sizeof(attr);
- end = (void __user *)uattr + size;
-
- for (; addr < end; addr++) {
- err = get_user(val, addr);
- if (err)
- return err;
- if (val)
- return -E2BIG;
- }
- size = sizeof(attr);
- }
+ size = check_user_buf((void __user *)uattr, size, sizeof(attr));
+ if (size < 0)
+ return size;
/* copy attributes from user space, may be less than sizeof(bpf_attr) */
if (copy_from_user(&attr, uattr, size) != 0)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 85d1c9423ccb..f5f082de2f7f 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -660,7 +660,8 @@ static int check_ctx_access(struct bpf_verifier_env *env, int off, int size,
return 0;
if (env->prog->aux->ops->is_valid_access &&
- env->prog->aux->ops->is_valid_access(off, size, t, reg_type)) {
+ env->prog->aux->ops->is_valid_access(off, size, t, reg_type,
+ &env->prog->subtype)) {
/* remember the offset of last byte accessed in ctx */
if (env->prog->aux->max_ctx_offset < off + size)
env->prog->aux->max_ctx_offset = off + size;
@@ -1182,7 +1183,8 @@ static int check_call(struct bpf_verifier_env *env, int func_id)
}
if (env->prog->aux->ops->get_func_proto)
- fn = env->prog->aux->ops->get_func_proto(func_id);
+ fn = env->prog->aux->ops->get_func_proto(func_id,
+ &env->prog->subtype);
if (!fn) {
verbose("unknown func %d\n", func_id);
@@ -3116,6 +3118,14 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
if ((*prog)->len <= 0 || (*prog)->len > BPF_MAXINSNS)
return -E2BIG;
+ if ((*prog)->aux->ops->is_valid_subtype) {
+ if (!(*prog)->aux->ops->is_valid_subtype(&(*prog)->subtype))
+ return -EINVAL;
+ } else if ((*prog)->has_subtype) {
+ /* do not accept a subtype if the program does not handle it */
+ return -EINVAL;
+ }
+
/* 'struct bpf_verifier_env' can be global, but since it's not small,
* allocate/free it every time bpf_check() is called
*/
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 5dcb99281259..653695fbc520 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -435,7 +435,8 @@ static const struct bpf_func_proto *tracing_func_proto(enum bpf_func_id func_id)
}
}
-static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func_id)
+static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func_id,
+ union bpf_prog_subtype *prog_subtype)
{
switch (func_id) {
case BPF_FUNC_perf_event_output:
@@ -449,7 +450,8 @@ static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func
/* bpf+kprobe programs can access fields of 'struct pt_regs' */
static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
- enum bpf_reg_type *reg_type)
+ enum bpf_reg_type *reg_type,
+ union bpf_prog_subtype *prog_subtype)
{
if (off < 0 || off >= sizeof(struct pt_regs))
return false;
@@ -517,7 +519,8 @@ static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
.arg3_type = ARG_ANYTHING,
};
-static const struct bpf_func_proto *tp_prog_func_proto(enum bpf_func_id func_id)
+static const struct bpf_func_proto *tp_prog_func_proto(enum bpf_func_id func_id,
+ union bpf_prog_subtype *prog_subtype)
{
switch (func_id) {
case BPF_FUNC_perf_event_output:
@@ -530,7 +533,8 @@ static const struct bpf_func_proto *tp_prog_func_proto(enum bpf_func_id func_id)
}
static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
- enum bpf_reg_type *reg_type)
+ enum bpf_reg_type *reg_type,
+ union bpf_prog_subtype *prog_subtype)
{
if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
return false;
@@ -552,7 +556,8 @@ static struct bpf_prog_type_list tracepoint_tl = {
};
static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
- enum bpf_reg_type *reg_type)
+ enum bpf_reg_type *reg_type,
+ union bpf_prog_subtype *prog_subtype)
{
if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
return false;
diff --git a/net/core/filter.c b/net/core/filter.c
index b391209838ef..36a773c846b9 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -2531,7 +2531,8 @@ static const struct bpf_func_proto bpf_xdp_event_output_proto = {
};
static const struct bpf_func_proto *
-sk_filter_func_proto(enum bpf_func_id func_id)
+sk_filter_func_proto(enum bpf_func_id func_id,
+ union bpf_prog_subtype *prog_subtype)
{
switch (func_id) {
case BPF_FUNC_map_lookup_elem:
@@ -2557,7 +2558,8 @@ sk_filter_func_proto(enum bpf_func_id func_id)
}
static const struct bpf_func_proto *
-tc_cls_act_func_proto(enum bpf_func_id func_id)
+tc_cls_act_func_proto(enum bpf_func_id func_id,
+ union bpf_prog_subtype *prog_subtype)
{
switch (func_id) {
case BPF_FUNC_skb_store_bytes:
@@ -2611,12 +2613,13 @@ tc_cls_act_func_proto(enum bpf_func_id func_id)
case BPF_FUNC_skb_under_cgroup:
return &bpf_skb_under_cgroup_proto;
default:
- return sk_filter_func_proto(func_id);
+ return sk_filter_func_proto(func_id, prog_subtype);
}
}
static const struct bpf_func_proto *
-xdp_func_proto(enum bpf_func_id func_id)
+xdp_func_proto(enum bpf_func_id func_id,
+ union bpf_prog_subtype *prog_subtype)
{
switch (func_id) {
case BPF_FUNC_perf_event_output:
@@ -2624,7 +2627,7 @@ xdp_func_proto(enum bpf_func_id func_id)
case BPF_FUNC_get_smp_processor_id:
return &bpf_get_smp_processor_id_proto;
default:
- return sk_filter_func_proto(func_id);
+ return sk_filter_func_proto(func_id, prog_subtype);
}
}
@@ -2643,7 +2646,8 @@ static bool __is_valid_access(int off, int size, enum bpf_access_type type)
static bool sk_filter_is_valid_access(int off, int size,
enum bpf_access_type type,
- enum bpf_reg_type *reg_type)
+ enum bpf_reg_type *reg_type,
+ union bpf_prog_subtype *prog_subtype)
{
switch (off) {
case offsetof(struct __sk_buff, tc_classid):
@@ -2706,7 +2710,8 @@ static int tc_cls_act_prologue(struct bpf_insn *insn_buf, bool direct_write,
static bool tc_cls_act_is_valid_access(int off, int size,
enum bpf_access_type type,
- enum bpf_reg_type *reg_type)
+ enum bpf_reg_type *reg_type,
+ union bpf_prog_subtype *prog_subtype)
{
if (type == BPF_WRITE) {
switch (off) {
@@ -2749,7 +2754,8 @@ static bool __is_valid_xdp_access(int off, int size,
static bool xdp_is_valid_access(int off, int size,
enum bpf_access_type type,
- enum bpf_reg_type *reg_type)
+ enum bpf_reg_type *reg_type,
+ union bpf_prog_subtype *prog_subtype)
{
if (type == BPF_WRITE)
return false;
diff --git a/samples/bpf/bpf_load.c b/samples/bpf/bpf_load.c
index 765a6e45b92d..40cf828a37c7 100644
--- a/samples/bpf/bpf_load.c
+++ b/samples/bpf/bpf_load.c
@@ -56,6 +56,7 @@ static int load_and_attach(const char *event, struct bpf_insn *prog, int size)
char buf[256];
int fd, efd, err, id;
struct perf_event_attr attr = {};
+ union bpf_prog_subtype *st = NULL;
attr.type = PERF_TYPE_TRACEPOINT;
attr.sample_type = PERF_SAMPLE_RAW;
@@ -77,7 +78,7 @@ static int load_and_attach(const char *event, struct bpf_insn *prog, int size)
return -1;
}
- fd = bpf_prog_load(prog_type, prog, size, license, kern_version);
+ fd = bpf_prog_load(prog_type, prog, size, license, kern_version, st);
if (fd < 0) {
printf("bpf_prog_load() err=%d\n%s", errno, bpf_log_buf);
return -1;
diff --git a/samples/bpf/fds_example.c b/samples/bpf/fds_example.c
index 625e797be6ef..df38b68f3586 100644
--- a/samples/bpf/fds_example.c
+++ b/samples/bpf/fds_example.c
@@ -59,7 +59,7 @@ static int bpf_prog_create(const char *object)
return prog_fd[0];
} else {
return bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER,
- insns, sizeof(insns), "GPL", 0);
+ insns, sizeof(insns), "GPL", 0, NULL);
}
}
diff --git a/samples/bpf/libbpf.c b/samples/bpf/libbpf.c
index 9969e35550c3..b5a4275d13a2 100644
--- a/samples/bpf/libbpf.c
+++ b/samples/bpf/libbpf.c
@@ -13,7 +13,7 @@
#include <arpa/inet.h>
#include "libbpf.h"
-static __u64 ptr_to_u64(void *ptr)
+static __u64 ptr_to_u64(const void *ptr)
{
return (__u64) (unsigned long) ptr;
}
@@ -82,7 +82,8 @@ char bpf_log_buf[LOG_BUF_SIZE];
int bpf_prog_load(enum bpf_prog_type prog_type,
const struct bpf_insn *insns, int prog_len,
- const char *license, int kern_version)
+ const char *license, int kern_version,
+ const union bpf_prog_subtype *subtype)
{
union bpf_attr attr = {
.prog_type = prog_type,
@@ -92,6 +93,8 @@ int bpf_prog_load(enum bpf_prog_type prog_type,
.log_buf = ptr_to_u64(bpf_log_buf),
.log_size = LOG_BUF_SIZE,
.log_level = 1,
+ .prog_subtype = ptr_to_u64(subtype),
+ .prog_subtype_size = subtype ? sizeof(*subtype) : 0,
};
/* assign one field outside of struct init to make sure any
diff --git a/samples/bpf/libbpf.h b/samples/bpf/libbpf.h
index ac6edb61b64a..56a86b847544 100644
--- a/samples/bpf/libbpf.h
+++ b/samples/bpf/libbpf.h
@@ -13,7 +13,8 @@ int bpf_get_next_key(int fd, void *key, void *next_key);
int bpf_prog_load(enum bpf_prog_type prog_type,
const struct bpf_insn *insns, int insn_len,
- const char *license, int kern_version);
+ const char *license, int kern_version,
+ const union bpf_prog_subtype *subtype);
int bpf_obj_pin(int fd, const char *pathname);
int bpf_obj_get(const char *pathname);
diff --git a/samples/bpf/sock_example.c b/samples/bpf/sock_example.c
index 28b60baa9fa8..521f918ab34d 100644
--- a/samples/bpf/sock_example.c
+++ b/samples/bpf/sock_example.c
@@ -56,7 +56,7 @@ static int test_sock(void)
};
prog_fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, prog, sizeof(prog),
- "GPL", 0);
+ "GPL", 0, NULL);
if (prog_fd < 0) {
printf("failed to load prog '%s'\n", strerror(errno));
goto cleanup;
diff --git a/samples/bpf/test_verifier.c b/samples/bpf/test_verifier.c
index 369ffaad3799..7a965da8ed2d 100644
--- a/samples/bpf/test_verifier.c
+++ b/samples/bpf/test_verifier.c
@@ -2468,7 +2468,7 @@ static int test(void)
prog_fd = bpf_prog_load(prog_type ?: BPF_PROG_TYPE_SOCKET_FILTER,
prog, prog_len * sizeof(struct bpf_insn),
- "GPL", 0);
+ "GPL", 0, NULL);
if (unpriv && tests[i].result_unpriv != UNDEF)
expected_result = tests[i].result_unpriv;
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 9e5fc168c8a3..aae3b82a673c 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -106,6 +106,15 @@ enum bpf_prog_type {
#define BPF_F_NO_PREALLOC (1U << 0)
+union bpf_prog_subtype {
+ struct {
+ __u32 version; /* cf. documentation */
+ __u32 event; /* enum landlock_subtype_event */
+ __aligned_u64 ability; /* LANDLOCK_SUBTYPE_ABILITY_* */
+ __aligned_u64 option; /* LANDLOCK_SUBTYPE_OPTION_* */
+ } landlock_rule;
+} __attribute__((aligned(8)));
+
union bpf_attr {
struct { /* anonymous struct used by BPF_MAP_CREATE command */
__u32 map_type; /* one of enum bpf_map_type */
@@ -134,6 +143,8 @@ union bpf_attr {
__u32 log_size; /* size of user buffer */
__aligned_u64 log_buf; /* user supplied buffer */
__u32 kern_version; /* checked when prog_type=kprobe */
+ __aligned_u64 prog_subtype; /* bpf_prog_subtype address */
+ __u32 prog_subtype_size;
};
struct { /* anonymous struct used by BPF_OBJ_* commands */
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index 4212ed62235b..57258decb4bd 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -70,7 +70,8 @@ int bpf_create_map(enum bpf_map_type map_type, int key_size,
int bpf_load_program(enum bpf_prog_type type, struct bpf_insn *insns,
size_t insns_cnt, char *license,
- u32 kern_version, char *log_buf, size_t log_buf_sz)
+ u32 kern_version, char *log_buf, size_t log_buf_sz,
+ const union bpf_prog_subtype *subtype)
{
int fd;
union bpf_attr attr;
@@ -84,6 +85,8 @@ int bpf_load_program(enum bpf_prog_type type, struct bpf_insn *insns,
attr.log_size = 0;
attr.log_level = 0;
attr.kern_version = kern_version;
+ attr.prog_subtype = ptr_to_u64(subtype);
+ attr.prog_subtype_size = subtype ? sizeof(*subtype) : 0;
fd = sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
if (fd >= 0 || !log_buf || !log_buf_sz)
diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
index eb584e639500..8a772c817b87 100644
--- a/tools/lib/bpf/bpf.h
+++ b/tools/lib/bpf/bpf.h
@@ -32,7 +32,7 @@ int bpf_create_map(enum bpf_map_type map_type, int key_size, int value_size,
int bpf_load_program(enum bpf_prog_type type, struct bpf_insn *insns,
size_t insns_cnt, char *license,
u32 kern_version, char *log_buf,
- size_t log_buf_sz);
+ size_t log_buf_sz, const union bpf_prog_subtype *subtype);
int bpf_map_update_elem(int fd, void *key, void *value,
u64 flags);
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index b699aea9a025..ea8c03a12c16 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -916,7 +916,7 @@ load_program(enum bpf_prog_type type, struct bpf_insn *insns,
pr_warning("Alloc log buffer for bpf loader error, continue without log\n");
ret = bpf_load_program(type, insns, insns_cnt, license,
- kern_version, log_buf, BPF_LOG_BUF_SIZE);
+ kern_version, log_buf, BPF_LOG_BUF_SIZE, NULL);
if (ret >= 0) {
*pfd = ret;
@@ -943,7 +943,7 @@ load_program(enum bpf_prog_type type, struct bpf_insn *insns,
fd = bpf_load_program(BPF_PROG_TYPE_KPROBE, insns,
insns_cnt, license, kern_version,
- NULL, 0);
+ NULL, 0, NULL);
if (fd >= 0) {
close(fd);
ret = -LIBBPF_ERRNO__PROGTYPE;
diff --git a/tools/perf/tests/bpf.c b/tools/perf/tests/bpf.c
index 2673e86ed50f..285b872b46a2 100644
--- a/tools/perf/tests/bpf.c
+++ b/tools/perf/tests/bpf.c
@@ -266,7 +266,7 @@ static int check_env(void)
err = bpf_load_program(BPF_PROG_TYPE_KPROBE, insns,
sizeof(insns) / sizeof(insns[0]),
- license, kver_int, NULL, 0);
+ license, kver_int, NULL, 0, NULL);
if (err < 0) {
pr_err("Missing basic BPF support, skip this test: %s\n",
strerror(errno));
--
2.11.0

View File

@ -0,0 +1,566 @@
From 7136b7f6c5f451e4c1fd0db5beb69994a2b9a9f7 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Micka=C3=ABl=20Sala=C3=BCn?= <mic@digikod.net>
Date: Wed, 29 Mar 2017 01:30:33 +0200
Subject: [PATCH 06/12] bpf,landlock: Define an eBPF program type for Landlock
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Add a new type of eBPF program used by Landlock rules.
This new BPF program type will be registered with the Landlock LSM
initialization.
Add an initial Landlock Kconfig.
Changes since v5:
* rename file hooks.c to init.c
* fix spelling
Changes since v4:
* merge a minimal (not enabled) LSM code and Kconfig in this commit
Changes since v3:
* split commit
* revamp the landlock_context:
* add arch, syscall_nr and syscall_cmd (ioctl, fcntl…) to be able to
cross-check action with the event type
* replace args array with dedicated fields to ease the addition of new
fields
Signed-off-by: Mickaël Salaün <mic@digikod.net>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Cc: David S. Miller <davem@davemloft.net>
Cc: James Morris <james.l.morris@oracle.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Serge E. Hallyn <serge@hallyn.com>
(cherry picked from commit f2265894fff03038ec0a81dbcf68ee8d1bf7c33d)
---
include/linux/landlock.h | 23 ++++++++
include/uapi/linux/bpf.h | 110 ++++++++++++++++++++++++++++++++++++
security/Kconfig | 1 +
security/Makefile | 2 +
security/landlock/Kconfig | 18 ++++++
security/landlock/Makefile | 3 +
security/landlock/common.h | 25 +++++++++
security/landlock/init.c | 123 +++++++++++++++++++++++++++++++++++++++++
tools/include/uapi/linux/bpf.h | 111 +++++++++++++++++++++++++++++++++++++
9 files changed, 416 insertions(+)
create mode 100644 include/linux/landlock.h
create mode 100644 security/landlock/Kconfig
create mode 100644 security/landlock/Makefile
create mode 100644 security/landlock/common.h
create mode 100644 security/landlock/init.c
diff --git a/include/linux/landlock.h b/include/linux/landlock.h
new file mode 100644
index 000000000000..53013dc374fe
--- /dev/null
+++ b/include/linux/landlock.h
@@ -0,0 +1,23 @@
+/*
+ * Landlock LSM - public kernel headers
+ *
+ * Copyright © 2017 Mickaël Salaün <mic@digikod.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _LINUX_LANDLOCK_H
+#define _LINUX_LANDLOCK_H
+#ifdef CONFIG_SECURITY_LANDLOCK
+
+/*
+ * This is not intended for the UAPI headers. Each userland software should use
+ * a static minimal version for the required features as explained in the
+ * documentation.
+ */
+#define LANDLOCK_VERSION 1
+
+#endif /* CONFIG_SECURITY_LANDLOCK */
+#endif /* _LINUX_LANDLOCK_H */
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index a203fbcb0b2d..f190be4f609f 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -96,6 +96,12 @@ enum bpf_prog_type {
BPF_PROG_TYPE_TRACEPOINT,
BPF_PROG_TYPE_XDP,
BPF_PROG_TYPE_PERF_EVENT,
+ BPF_PROG_TYPE_CGROUP_SKB,
+ BPF_PROG_TYPE_CGROUP_SOCK,
+ BPF_PROG_TYPE_LWT_IN,
+ BPF_PROG_TYPE_LWT_OUT,
+ BPF_PROG_TYPE_LWT_XMIT,
+ BPF_PROG_TYPE_LANDLOCK,
};
#define BPF_PSEUDO_MAP_FD 1
@@ -532,4 +538,108 @@ struct xdp_md {
__u32 data_end;
};
+/**
+ * enum landlock_subtype_event - event occurring when an action is performed on
+ * a particular kernel object
+ *
+ * An event is a policy decision point which exposes the same context type
+ * (especially the same arg[0-9] field types) for each rule execution.
+ *
+ * @LANDLOCK_SUBTYPE_EVENT_UNSPEC: invalid value
+ * @LANDLOCK_SUBTYPE_EVENT_FS: generic filesystem event
+ */
+enum landlock_subtype_event {
+ LANDLOCK_SUBTYPE_EVENT_UNSPEC,
+ LANDLOCK_SUBTYPE_EVENT_FS,
+};
+#define _LANDLOCK_SUBTYPE_EVENT_LAST LANDLOCK_SUBTYPE_EVENT_FS
+
+/**
+ * DOC: landlock_subtype_access
+ *
+ * eBPF context and functions allowed for a rule
+ *
+ * - LANDLOCK_SUBTYPE_ABILITY_WRITE: allows to directly send notification to
+ * userland (e.g. through a map), which may leaks sensitive information
+ * - LANDLOCK_SUBTYPE_ABILITY_DEBUG: allows to do debug actions (e.g. writing
+ * logs), which may be dangerous and should only be used for rule testing
+ */
+#define LANDLOCK_SUBTYPE_ABILITY_WRITE (1ULL << 0)
+#define LANDLOCK_SUBTYPE_ABILITY_DEBUG (1ULL << 1)
+#define _LANDLOCK_SUBTYPE_ABILITY_NB 2
+#define _LANDLOCK_SUBTYPE_ABILITY_MASK ((1ULL << _LANDLOCK_SUBTYPE_ABILITY_NB) - 1)
+
+/*
+ * Future options for a Landlock rule (e.g. run even if a previous rule denied
+ * an action).
+ */
+#define _LANDLOCK_SUBTYPE_OPTION_NB 0
+#define _LANDLOCK_SUBTYPE_OPTION_MASK ((1ULL << _LANDLOCK_SUBTYPE_OPTION_NB) - 1)
+
+/*
+ * Status visible in the @status field of a context (e.g. already called in
+ * this syscall session, with same args...).
+ *
+ * The @status field exposed to a rule shall depend on the rule version.
+ */
+#define _LANDLOCK_SUBTYPE_STATUS_NB 0
+#define _LANDLOCK_SUBTYPE_STATUS_MASK ((1ULL << _LANDLOCK_SUBTYPE_STATUS_NB) - 1)
+
+/**
+ * DOC: landlock_action_fs
+ *
+ * - %LANDLOCK_ACTION_FS_EXEC: execute a file or walk through a directory
+ * - %LANDLOCK_ACTION_FS_WRITE: modify a file or a directory view (which
+ * include mount actions)
+ * - %LANDLOCK_ACTION_FS_READ: read a file or a directory
+ * - %LANDLOCK_ACTION_FS_NEW: create a file or a directory
+ * - %LANDLOCK_ACTION_FS_GET: open or receive a file
+ * - %LANDLOCK_ACTION_FS_REMOVE: unlink a file or remove a directory
+ *
+ * Each of the following actions are specific to syscall multiplexers. They
+ * fill the syscall_cmd field from &struct landlock_context with their custom
+ * command.
+ *
+ * - %LANDLOCK_ACTION_FS_IOCTL: ioctl command
+ * - %LANDLOCK_ACTION_FS_LOCK: flock or fcntl lock command
+ * - %LANDLOCK_ACTION_FS_FCNTL: fcntl command
+ */
+#define LANDLOCK_ACTION_FS_EXEC (1ULL << 0)
+#define LANDLOCK_ACTION_FS_WRITE (1ULL << 1)
+#define LANDLOCK_ACTION_FS_READ (1ULL << 2)
+#define LANDLOCK_ACTION_FS_NEW (1ULL << 3)
+#define LANDLOCK_ACTION_FS_GET (1ULL << 4)
+#define LANDLOCK_ACTION_FS_REMOVE (1ULL << 5)
+#define LANDLOCK_ACTION_FS_IOCTL (1ULL << 6)
+#define LANDLOCK_ACTION_FS_LOCK (1ULL << 7)
+#define LANDLOCK_ACTION_FS_FCNTL (1ULL << 8)
+#define _LANDLOCK_ACTION_FS_NB 9
+#define _LANDLOCK_ACTION_FS_MASK ((1ULL << _LANDLOCK_ACTION_FS_NB) - 1)
+
+
+/**
+ * struct landlock_context - context accessible to a Landlock rule
+ *
+ * @status: bitfield for future use (LANDLOCK_SUBTYPE_STATUS_*)
+ * @arch: indicates system call convention as an AUDIT_ARCH_* value
+ * as defined in <linux/audit.h>
+ * @syscall_nr: the system call number called by the current process (may be
+ * useful to debug: find out from which syscall this request came
+ * from)
+ * @syscall_cmd: contains the command used by a multiplexer syscall (e.g.
+ * ioctl, fcntl, flock)
+ * @event: event type (&enum landlock_subtype_event)
+ * @arg1: event's first optional argument
+ * @arg2: event's second optional argument
+ */
+struct landlock_context {
+ __u64 status;
+ __u32 arch;
+ __u32 syscall_nr;
+ __u32 syscall_cmd;
+ __u32 event;
+ __u64 arg1;
+ __u64 arg2;
+};
+
#endif /* _UAPI__LINUX_BPF_H__ */
diff --git a/security/Kconfig b/security/Kconfig
index 118f4549404e..c63194c561c5 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -164,6 +164,7 @@ source security/tomoyo/Kconfig
source security/apparmor/Kconfig
source security/loadpin/Kconfig
source security/yama/Kconfig
+source security/landlock/Kconfig
source security/integrity/Kconfig
diff --git a/security/Makefile b/security/Makefile
index f2d71cdb8e19..3fdc2f19dc48 100644
--- a/security/Makefile
+++ b/security/Makefile
@@ -9,6 +9,7 @@ subdir-$(CONFIG_SECURITY_TOMOYO) += tomoyo
subdir-$(CONFIG_SECURITY_APPARMOR) += apparmor
subdir-$(CONFIG_SECURITY_YAMA) += yama
subdir-$(CONFIG_SECURITY_LOADPIN) += loadpin
+subdir-$(CONFIG_SECURITY_LANDLOCK) += landlock
# always enable default capabilities
obj-y += commoncap.o
@@ -24,6 +25,7 @@ obj-$(CONFIG_SECURITY_TOMOYO) += tomoyo/
obj-$(CONFIG_SECURITY_APPARMOR) += apparmor/
obj-$(CONFIG_SECURITY_YAMA) += yama/
obj-$(CONFIG_SECURITY_LOADPIN) += loadpin/
+obj-$(CONFIG_SECURITY_LANDLOCK) += landlock/
obj-$(CONFIG_CGROUP_DEVICE) += device_cgroup.o
# Object integrity file lists
diff --git a/security/landlock/Kconfig b/security/landlock/Kconfig
new file mode 100644
index 000000000000..aa5808e116f1
--- /dev/null
+++ b/security/landlock/Kconfig
@@ -0,0 +1,18 @@
+config SECURITY_LANDLOCK
+ bool "Landlock sandbox support"
+ depends on SECURITY
+ depends on BPF_SYSCALL
+ depends on SECCOMP_FILTER
+ default y
+ help
+ Landlock is a stackable LSM which allows to load a security policy to
+ restrict processes (i.e. create a sandbox). The policy is a list of
+ stacked eBPF programs, called rules, dedicated to restrict access to
+ a type of kernel object (e.g. file).
+
+ You need to enable seccomp filter to apply a security policy to a
+ process hierarchy (e.g. application with built-in sandboxing).
+
+ See Documentation/security/landlock/ for further information.
+
+ If you are unsure how to answer this question, answer Y.
diff --git a/security/landlock/Makefile b/security/landlock/Makefile
new file mode 100644
index 000000000000..7205f9a7a2ee
--- /dev/null
+++ b/security/landlock/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_SECURITY_LANDLOCK) := landlock.o
+
+landlock-y := init.o
diff --git a/security/landlock/common.h b/security/landlock/common.h
new file mode 100644
index 000000000000..a2483405349f
--- /dev/null
+++ b/security/landlock/common.h
@@ -0,0 +1,25 @@
+/*
+ * Landlock LSM - private headers
+ *
+ * Copyright © 2017 Mickaël Salaün <mic@digikod.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _SECURITY_LANDLOCK_COMMON_H
+#define _SECURITY_LANDLOCK_COMMON_H
+
+/**
+ * get_index - get an index for the rules of struct landlock_events
+ *
+ * @event: a Landlock event type
+ */
+static inline int get_index(enum landlock_subtype_event event)
+{
+ /* event ID > 0 for loaded programs */
+ return event - 1;
+}
+
+#endif /* _SECURITY_LANDLOCK_COMMON_H */
diff --git a/security/landlock/init.c b/security/landlock/init.c
new file mode 100644
index 000000000000..0a97026f1c07
--- /dev/null
+++ b/security/landlock/init.c
@@ -0,0 +1,123 @@
+/*
+ * Landlock LSM - init
+ *
+ * Copyright © 2017 Mickaël Salaün <mic@digikod.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/bpf.h> /* enum bpf_access_type */
+#include <linux/capability.h> /* capable */
+#include <linux/landlock.h> /* LANDLOCK_VERSION */
+
+
+static inline bool bpf_landlock_is_valid_access(int off, int size,
+ enum bpf_access_type type, enum bpf_reg_type *reg_type,
+ union bpf_prog_subtype *prog_subtype)
+{
+ if (WARN_ON(!prog_subtype))
+ return false;
+
+ switch (prog_subtype->landlock_rule.event) {
+ case LANDLOCK_SUBTYPE_EVENT_FS:
+ case LANDLOCK_SUBTYPE_EVENT_UNSPEC:
+ default:
+ return false;
+ }
+}
+
+static inline bool bpf_landlock_is_valid_subtype(
+ union bpf_prog_subtype *prog_subtype)
+{
+ if (WARN_ON(!prog_subtype))
+ return false;
+
+ switch (prog_subtype->landlock_rule.event) {
+ case LANDLOCK_SUBTYPE_EVENT_FS:
+ break;
+ case LANDLOCK_SUBTYPE_EVENT_UNSPEC:
+ default:
+ return false;
+ }
+
+ if (!prog_subtype->landlock_rule.version ||
+ prog_subtype->landlock_rule.version > LANDLOCK_VERSION)
+ return false;
+ if (!prog_subtype->landlock_rule.event ||
+ prog_subtype->landlock_rule.event > _LANDLOCK_SUBTYPE_EVENT_LAST)
+ return false;
+ if (prog_subtype->landlock_rule.ability & ~_LANDLOCK_SUBTYPE_ABILITY_MASK)
+ return false;
+ if (prog_subtype->landlock_rule.option & ~_LANDLOCK_SUBTYPE_OPTION_MASK)
+ return false;
+
+ /* check ability flags */
+ if (prog_subtype->landlock_rule.ability & LANDLOCK_SUBTYPE_ABILITY_WRITE &&
+ !capable(CAP_SYS_ADMIN))
+ return false;
+ if (prog_subtype->landlock_rule.ability & LANDLOCK_SUBTYPE_ABILITY_DEBUG &&
+ !capable(CAP_SYS_ADMIN))
+ return false;
+
+ return true;
+}
+
+static inline const struct bpf_func_proto *bpf_landlock_func_proto(
+ enum bpf_func_id func_id, union bpf_prog_subtype *prog_subtype)
+{
+ bool event_fs = (prog_subtype->landlock_rule.event ==
+ LANDLOCK_SUBTYPE_EVENT_FS);
+ bool ability_write = !!(prog_subtype->landlock_rule.ability &
+ LANDLOCK_SUBTYPE_ABILITY_WRITE);
+ bool ability_debug = !!(prog_subtype->landlock_rule.ability &
+ LANDLOCK_SUBTYPE_ABILITY_DEBUG);
+
+ switch (func_id) {
+ case BPF_FUNC_map_lookup_elem:
+ return &bpf_map_lookup_elem_proto;
+
+ /* ability_write */
+ case BPF_FUNC_map_delete_elem:
+ if (ability_write)
+ return &bpf_map_delete_elem_proto;
+ return NULL;
+ case BPF_FUNC_map_update_elem:
+ if (ability_write)
+ return &bpf_map_update_elem_proto;
+ return NULL;
+
+ /* ability_debug */
+ case BPF_FUNC_get_current_comm:
+ if (ability_debug)
+ return &bpf_get_current_comm_proto;
+ return NULL;
+ case BPF_FUNC_get_current_pid_tgid:
+ if (ability_debug)
+ return &bpf_get_current_pid_tgid_proto;
+ return NULL;
+ case BPF_FUNC_get_current_uid_gid:
+ if (ability_debug)
+ return &bpf_get_current_uid_gid_proto;
+ return NULL;
+ case BPF_FUNC_trace_printk:
+ if (ability_debug)
+ return bpf_get_trace_printk_proto();
+ return NULL;
+
+ default:
+ return NULL;
+ }
+}
+
+static const struct bpf_verifier_ops bpf_landlock_ops = {
+ .get_func_proto = bpf_landlock_func_proto,
+ .is_valid_access = bpf_landlock_is_valid_access,
+ .is_valid_subtype = bpf_landlock_is_valid_subtype,
+};
+
+static struct bpf_prog_type_list bpf_landlock_type __ro_after_init = {
+ .ops = &bpf_landlock_ops,
+ .type = BPF_PROG_TYPE_LANDLOCK,
+};
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index aae3b82a673c..0167f61cb3ba 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -95,6 +95,13 @@ enum bpf_prog_type {
BPF_PROG_TYPE_SCHED_ACT,
BPF_PROG_TYPE_TRACEPOINT,
BPF_PROG_TYPE_XDP,
+ BPF_PROG_TYPE_PERF_EVENT,
+ BPF_PROG_TYPE_CGROUP_SKB,
+ BPF_PROG_TYPE_CGROUP_SOCK,
+ BPF_PROG_TYPE_LWT_IN,
+ BPF_PROG_TYPE_LWT_OUT,
+ BPF_PROG_TYPE_LWT_XMIT,
+ BPF_PROG_TYPE_LANDLOCK,
};
#define BPF_PSEUDO_MAP_FD 1
@@ -481,4 +488,108 @@ struct xdp_md {
__u32 data_end;
};
+/**
+ * enum landlock_subtype_event - event occurring when an action is performed on
+ * a particular kernel object
+ *
+ * An event is a policy decision point which exposes the same context type
+ * (especially the same arg[0-9] field types) for each rule execution.
+ *
+ * @LANDLOCK_SUBTYPE_EVENT_UNSPEC: invalid value
+ * @LANDLOCK_SUBTYPE_EVENT_FS: generic filesystem event
+ */
+enum landlock_subtype_event {
+ LANDLOCK_SUBTYPE_EVENT_UNSPEC,
+ LANDLOCK_SUBTYPE_EVENT_FS,
+};
+#define _LANDLOCK_SUBTYPE_EVENT_LAST LANDLOCK_SUBTYPE_EVENT_FS
+
+/**
+ * DOC: landlock_subtype_access
+ *
+ * eBPF context and functions allowed for a rule
+ *
+ * - LANDLOCK_SUBTYPE_ABILITY_WRITE: allows to directly send notification to
+ * userland (e.g. through a map), which may leaks sensitive information
+ * - LANDLOCK_SUBTYPE_ABILITY_DEBUG: allows to do debug actions (e.g. writing
+ * logs), which may be dangerous and should only be used for rule testing
+ */
+#define LANDLOCK_SUBTYPE_ABILITY_WRITE (1ULL << 0)
+#define LANDLOCK_SUBTYPE_ABILITY_DEBUG (1ULL << 1)
+#define _LANDLOCK_SUBTYPE_ABILITY_NB 2
+#define _LANDLOCK_SUBTYPE_ABILITY_MASK ((1ULL << _LANDLOCK_SUBTYPE_ABILITY_NB) - 1)
+
+/*
+ * Future options for a Landlock rule (e.g. run even if a previous rule denied
+ * an action).
+ */
+#define _LANDLOCK_SUBTYPE_OPTION_NB 0
+#define _LANDLOCK_SUBTYPE_OPTION_MASK ((1ULL << _LANDLOCK_SUBTYPE_OPTION_NB) - 1)
+
+/*
+ * Status visible in the @status field of a context (e.g. already called in
+ * this syscall session, with same args...).
+ *
+ * The @status field exposed to a rule shall depend on the rule version.
+ */
+#define _LANDLOCK_SUBTYPE_STATUS_NB 0
+#define _LANDLOCK_SUBTYPE_STATUS_MASK ((1ULL << _LANDLOCK_SUBTYPE_STATUS_NB) - 1)
+
+/**
+ * DOC: landlock_action_fs
+ *
+ * - %LANDLOCK_ACTION_FS_EXEC: execute a file or walk through a directory
+ * - %LANDLOCK_ACTION_FS_WRITE: modify a file or a directory view (which
+ * include mount actions)
+ * - %LANDLOCK_ACTION_FS_READ: read a file or a directory
+ * - %LANDLOCK_ACTION_FS_NEW: create a file or a directory
+ * - %LANDLOCK_ACTION_FS_GET: open or receive a file
+ * - %LANDLOCK_ACTION_FS_REMOVE: unlink a file or remove a directory
+ *
+ * Each of the following actions are specific to syscall multiplexers. They
+ * fill the syscall_cmd field from &struct landlock_context with their custom
+ * command.
+ *
+ * - %LANDLOCK_ACTION_FS_IOCTL: ioctl command
+ * - %LANDLOCK_ACTION_FS_LOCK: flock or fcntl lock command
+ * - %LANDLOCK_ACTION_FS_FCNTL: fcntl command
+ */
+#define LANDLOCK_ACTION_FS_EXEC (1ULL << 0)
+#define LANDLOCK_ACTION_FS_WRITE (1ULL << 1)
+#define LANDLOCK_ACTION_FS_READ (1ULL << 2)
+#define LANDLOCK_ACTION_FS_NEW (1ULL << 3)
+#define LANDLOCK_ACTION_FS_GET (1ULL << 4)
+#define LANDLOCK_ACTION_FS_REMOVE (1ULL << 5)
+#define LANDLOCK_ACTION_FS_IOCTL (1ULL << 6)
+#define LANDLOCK_ACTION_FS_LOCK (1ULL << 7)
+#define LANDLOCK_ACTION_FS_FCNTL (1ULL << 8)
+#define _LANDLOCK_ACTION_FS_NB 9
+#define _LANDLOCK_ACTION_FS_MASK ((1ULL << _LANDLOCK_ACTION_FS_NB) - 1)
+
+
+/**
+ * struct landlock_context - context accessible to a Landlock rule
+ *
+ * @status: bitfield for future use (LANDLOCK_SUBTYPE_STATUS_*)
+ * @arch: indicates system call convention as an AUDIT_ARCH_* value
+ * as defined in <linux/audit.h>
+ * @syscall_nr: the system call number called by the current process (may be
+ * useful to debug: find out from which syscall this request came
+ * from)
+ * @syscall_cmd: contains the command used by a multiplexer syscall (e.g.
+ * ioctl, fcntl, flock)
+ * @event: event type (&enum landlock_subtype_event)
+ * @arg1: event's first optional argument
+ * @arg2: event's second optional argument
+ */
+struct landlock_context {
+ __u64 status;
+ __u32 arch;
+ __u32 syscall_nr;
+ __u32 syscall_cmd;
+ __u32 event;
+ __u64 arg1;
+ __u64 arg2;
+};
+
#endif /* _UAPI__LINUX_BPF_H__ */
--
2.11.0

View File

@ -0,0 +1,377 @@
From ca8ae2066f8852a118e0885ab22d22e603001481 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Micka=C3=ABl=20Sala=C3=BCn?= <mic@digikod.net>
Date: Wed, 29 Mar 2017 01:30:33 +0200
Subject: [PATCH 07/12] bpf: Define handle_fs and add a new helper
bpf_handle_fs_get_mode()
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Add an eBPF function bpf_handle_fs_get_mode(handle_fs) to get the mode
of a an abstract object wrapping either a file, a dentry, a path, or an
inode.
Changes since v5:
* cosmetic fixes and rebase
Changes since v4:
* use a file abstraction (handle) to wrap inode, dentry, path and file
structs
* remove bpf_landlock_cmp_fs_beneath()
* rename the BPF helper and move it to kernel/bpf/
* tighten helpers accessible by a Landlock rule
Changes since v3:
* remove bpf_landlock_cmp_fs_prop() (suggested by Alexie Starovoitov)
* add hooks dealing with struct inode and struct path pointers:
inode_permission and inode_getattr
* add abstraction over eBPF helper arguments thanks to wrapping structs
* add bpf_landlock_get_fs_mode() helper to check file type and mode
* merge WARN_ON() (suggested by Kees Cook)
* fix and update bpf_helpers.h
* use BPF_CALL_* for eBPF helpers (suggested by Alexie Starovoitov)
* make handle arraymap safe (RCU) and remove buggy synchronize_rcu()
* factor out the arraymay walk
* use size_t to index array (suggested by Jann Horn)
Changes since v2:
* add MNT_INTERNAL check to only add file handle from user-visible FS
(e.g. no anonymous inode)
* replace struct file* with struct path* in map_landlock_handle
* add BPF protos
* fix bpf_landlock_cmp_fs_prop_with_struct_file()
Signed-off-by: Mickaël Salaün <mic@digikod.net>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Cc: David S. Miller <davem@davemloft.net>
Cc: James Morris <james.l.morris@oracle.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Serge E. Hallyn <serge@hallyn.com>
Cc: Jann Horn <jann@thejh.net>
(cherry picked from commit 7cb1d72a1cca9442bc0b9c3eeff621b9d1709296)
---
include/linux/bpf.h | 33 +++++++++++++++++++++
include/uapi/linux/bpf.h | 17 +++++++++++
kernel/bpf/Makefile | 2 +-
kernel/bpf/helpers_fs.c | 52 ++++++++++++++++++++++++++++++++
kernel/bpf/verifier.c | 6 ++++
samples/bpf/bpf_helpers.h | 2 ++
security/landlock/init.c | 6 ++++
tools/include/uapi/linux/bpf.h | 67 ++++++++++++++++++++++++++++++++++++++++++
8 files changed, 184 insertions(+), 1 deletion(-)
create mode 100644 kernel/bpf/helpers_fs.c
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index d4b9ca479f79..d66843a2aafb 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -13,6 +13,11 @@
#include <linux/percpu.h>
#include <linux/err.h>
+/* FS helpers */
+#include <linux/dcache.h> /* struct dentry */
+#include <linux/fs.h> /* struct file, struct inode */
+#include <linux/path.h> /* struct path */
+
struct perf_event;
struct bpf_map;
@@ -80,6 +85,8 @@ enum bpf_arg_type {
ARG_PTR_TO_CTX, /* pointer to context */
ARG_ANYTHING, /* any (initialized) argument is ok */
+
+ ARG_CONST_PTR_TO_HANDLE_FS, /* pointer to an abstract FS struct */
};
/* type of values returned from helper functions */
@@ -146,6 +153,9 @@ enum bpf_reg_type {
* map element.
*/
PTR_TO_MAP_VALUE_ADJ,
+
+ /* FS helpers */
+ CONST_PTR_TO_HANDLE_FS,
};
struct bpf_prog;
@@ -215,6 +225,26 @@ struct bpf_event_entry {
struct rcu_head rcu;
};
+/* FS helpers */
+enum bpf_handle_fs_type {
+ BPF_HANDLE_FS_TYPE_NONE,
+ BPF_HANDLE_FS_TYPE_FILE,
+ BPF_HANDLE_FS_TYPE_INODE,
+ BPF_HANDLE_FS_TYPE_PATH,
+ BPF_HANDLE_FS_TYPE_DENTRY,
+};
+
+struct bpf_handle_fs {
+ enum bpf_handle_fs_type type;
+ union {
+ struct file *file;
+ struct inode *inode;
+ const struct path *path;
+ struct dentry *dentry;
+ };
+};
+
+
u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5);
u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
@@ -331,6 +361,9 @@ extern const struct bpf_func_proto bpf_skb_vlan_push_proto;
extern const struct bpf_func_proto bpf_skb_vlan_pop_proto;
extern const struct bpf_func_proto bpf_get_stackid_proto;
+/* FS helpers */
+extern const struct bpf_func_proto bpf_handle_fs_get_mode_proto;
+
/* Shared helpers among cBPF and eBPF. */
void bpf_user_rnd_init_once(void);
u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index f190be4f609f..6aebf7144e93 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -443,6 +443,23 @@ enum bpf_func_id {
*/
BPF_FUNC_set_hash_invalid,
+ BPF_FUNC_get_numa_node_id,
+ BPF_FUNC_skb_change_head,
+ BPF_FUNC_xdp_adjust_head,
+ BPF_FUNC_probe_read_str,
+ BPF_FUNC_get_socket_cookie,
+ BPF_FUNC_get_socket_uid,
+
+ /**
+ * s64 bpf_handle_fs_get_mode(handle_fs)
+ * Get the mode of a struct bpf_handle_fs
+ * fs: struct bpf_handle_fs address
+ * Return:
+ * >= 0 file mode
+ * < 0 error
+ */
+ BPF_FUNC_handle_fs_get_mode,
+
__BPF_FUNC_MAX_ID,
};
diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile
index eed911d091da..8fffb30ac7a1 100644
--- a/kernel/bpf/Makefile
+++ b/kernel/bpf/Makefile
@@ -1,6 +1,6 @@
obj-y := core.o
-obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o
+obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o helpers_fs.o
obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o
ifeq ($(CONFIG_PERF_EVENTS),y)
obj-$(CONFIG_BPF_SYSCALL) += stackmap.o
diff --git a/kernel/bpf/helpers_fs.c b/kernel/bpf/helpers_fs.c
new file mode 100644
index 000000000000..d524d382adeb
--- /dev/null
+++ b/kernel/bpf/helpers_fs.c
@@ -0,0 +1,52 @@
+/*
+ * BPF filesystem helpers
+ *
+ * Copyright © 2017 Mickaël Salaün <mic@digikod.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/bpf.h> /* struct bpf_handle_fs */
+#include <linux/errno.h>
+#include <linux/filter.h> /* BPF_CALL*() */
+
+BPF_CALL_1(bpf_handle_fs_get_mode, struct bpf_handle_fs *, handle_fs)
+{
+ if (WARN_ON(!handle_fs))
+ return -EFAULT;
+ if (!handle_fs->file) {
+ /* file can be null for anonymous mmap */
+ WARN_ON(handle_fs->type != BPF_HANDLE_FS_TYPE_FILE);
+ return -ENOENT;
+ }
+ switch (handle_fs->type) {
+ case BPF_HANDLE_FS_TYPE_FILE:
+ if (WARN_ON(!handle_fs->file->f_inode))
+ return -ENOENT;
+ return handle_fs->file->f_inode->i_mode;
+ case BPF_HANDLE_FS_TYPE_INODE:
+ return handle_fs->inode->i_mode;
+ case BPF_HANDLE_FS_TYPE_PATH:
+ if (WARN_ON(!handle_fs->path->dentry ||
+ !handle_fs->path->dentry->d_inode))
+ return -ENOENT;
+ return handle_fs->path->dentry->d_inode->i_mode;
+ case BPF_HANDLE_FS_TYPE_DENTRY:
+ if (WARN_ON(!handle_fs->dentry->d_inode))
+ return -ENOENT;
+ return handle_fs->dentry->d_inode->i_mode;
+ case BPF_HANDLE_FS_TYPE_NONE:
+ default:
+ WARN_ON(1);
+ return -EFAULT;
+ }
+}
+
+const struct bpf_func_proto bpf_handle_fs_get_mode_proto = {
+ .func = bpf_handle_fs_get_mode,
+ .gpl_only = true,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_CONST_PTR_TO_HANDLE_FS,
+};
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index f5f082de2f7f..7cecf5099207 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -188,6 +188,7 @@ static const char * const reg_type_str[] = {
[CONST_IMM] = "imm",
[PTR_TO_PACKET] = "pkt",
[PTR_TO_PACKET_END] = "pkt_end",
+ [CONST_PTR_TO_HANDLE_FS] = "handle_fs",
};
static void print_verifier_state(struct bpf_verifier_state *state)
@@ -520,6 +521,7 @@ static bool is_spillable_regtype(enum bpf_reg_type type)
case PTR_TO_PACKET_END:
case FRAME_PTR:
case CONST_PTR_TO_MAP:
+ case CONST_PTR_TO_HANDLE_FS:
return true;
default:
return false;
@@ -981,6 +983,10 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
expected_type = PTR_TO_CTX;
if (type != expected_type)
goto err_type;
+ } else if (arg_type == ARG_CONST_PTR_TO_HANDLE_FS) {
+ expected_type = CONST_PTR_TO_HANDLE_FS;
+ if (type != expected_type)
+ goto err_type;
} else if (arg_type == ARG_PTR_TO_STACK ||
arg_type == ARG_PTR_TO_RAW_STACK) {
expected_type = PTR_TO_STACK;
diff --git a/samples/bpf/bpf_helpers.h b/samples/bpf/bpf_helpers.h
index dadd5161bd91..d962a5d76725 100644
--- a/samples/bpf/bpf_helpers.h
+++ b/samples/bpf/bpf_helpers.h
@@ -57,6 +57,8 @@ static int (*bpf_skb_set_tunnel_opt)(void *ctx, void *md, int size) =
(void *) BPF_FUNC_skb_set_tunnel_opt;
static unsigned long long (*bpf_get_prandom_u32)(void) =
(void *) BPF_FUNC_get_prandom_u32;
+static long long (*bpf_handle_fs_get_mode)(void *handle_fs) =
+ (void *) BPF_FUNC_handle_fs_get_mode;
/* llvm builtin functions that eBPF C program may use to
* emit BPF_LD_ABS and BPF_LD_IND instructions
diff --git a/security/landlock/init.c b/security/landlock/init.c
index 0a97026f1c07..914895d08320 100644
--- a/security/landlock/init.c
+++ b/security/landlock/init.c
@@ -78,6 +78,12 @@ static inline const struct bpf_func_proto *bpf_landlock_func_proto(
case BPF_FUNC_map_lookup_elem:
return &bpf_map_lookup_elem_proto;
+ /* event_fs */
+ case BPF_FUNC_handle_fs_get_mode:
+ if (event_fs)
+ return &bpf_handle_fs_get_mode_proto;
+ return NULL;
+
/* ability_write */
case BPF_FUNC_map_delete_elem:
if (ability_write)
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 0167f61cb3ba..6aebf7144e93 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -393,6 +393,73 @@ enum bpf_func_id {
*/
BPF_FUNC_probe_write_user,
+ /**
+ * bpf_current_task_under_cgroup(map, index) - Check cgroup2 membership of current task
+ * @map: pointer to bpf_map in BPF_MAP_TYPE_CGROUP_ARRAY type
+ * @index: index of the cgroup in the bpf_map
+ * Return:
+ * == 0 current failed the cgroup2 descendant test
+ * == 1 current succeeded the cgroup2 descendant test
+ * < 0 error
+ */
+ BPF_FUNC_current_task_under_cgroup,
+
+ /**
+ * bpf_skb_change_tail(skb, len, flags)
+ * The helper will resize the skb to the given new size,
+ * to be used f.e. with control messages.
+ * @skb: pointer to skb
+ * @len: new skb length
+ * @flags: reserved
+ * Return: 0 on success or negative error
+ */
+ BPF_FUNC_skb_change_tail,
+
+ /**
+ * bpf_skb_pull_data(skb, len)
+ * The helper will pull in non-linear data in case the
+ * skb is non-linear and not all of len are part of the
+ * linear section. Only needed for read/write with direct
+ * packet access.
+ * @skb: pointer to skb
+ * @len: len to make read/writeable
+ * Return: 0 on success or negative error
+ */
+ BPF_FUNC_skb_pull_data,
+
+ /**
+ * bpf_csum_update(skb, csum)
+ * Adds csum into skb->csum in case of CHECKSUM_COMPLETE.
+ * @skb: pointer to skb
+ * @csum: csum to add
+ * Return: csum on success or negative error
+ */
+ BPF_FUNC_csum_update,
+
+ /**
+ * bpf_set_hash_invalid(skb)
+ * Invalidate current skb>hash.
+ * @skb: pointer to skb
+ */
+ BPF_FUNC_set_hash_invalid,
+
+ BPF_FUNC_get_numa_node_id,
+ BPF_FUNC_skb_change_head,
+ BPF_FUNC_xdp_adjust_head,
+ BPF_FUNC_probe_read_str,
+ BPF_FUNC_get_socket_cookie,
+ BPF_FUNC_get_socket_uid,
+
+ /**
+ * s64 bpf_handle_fs_get_mode(handle_fs)
+ * Get the mode of a struct bpf_handle_fs
+ * fs: struct bpf_handle_fs address
+ * Return:
+ * >= 0 file mode
+ * < 0 error
+ */
+ BPF_FUNC_handle_fs_get_mode,
+
__BPF_FUNC_MAX_ID,
};
--
2.11.0

View File

@ -0,0 +1,143 @@
From 1512eca4f0a5b3d1a98ed9940cae462bf71cc956 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Micka=C3=ABl=20Sala=C3=BCn?= <mic@digikod.net>
Date: Wed, 29 Mar 2017 01:30:33 +0200
Subject: [PATCH 09/12] seccomp: Split put_seccomp_filter() with put_seccomp()
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
The semantic is unchanged. This will be useful for the Landlock
integration with seccomp (next commit).
Signed-off-by: Mickaël Salaün <mic@digikod.net>
Cc: Kees Cook <keescook@chromium.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Will Drewry <wad@chromium.org>
(cherry picked from commit 2f707c41fd744e5c2beb382dafe3b2dc658c26d4)
---
include/linux/seccomp.h | 4 ++--
kernel/fork.c | 2 +-
kernel/seccomp.c | 18 +++++++++++++-----
security/landlock/hooks.c | 4 +---
security/landlock/init.c | 2 +-
5 files changed, 18 insertions(+), 12 deletions(-)
diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h
index ecc296c137cd..e25aee2cdfc0 100644
--- a/include/linux/seccomp.h
+++ b/include/linux/seccomp.h
@@ -77,10 +77,10 @@ static inline int seccomp_mode(struct seccomp *s)
#endif /* CONFIG_SECCOMP */
#ifdef CONFIG_SECCOMP_FILTER
-extern void put_seccomp_filter(struct task_struct *tsk);
+extern void put_seccomp(struct task_struct *tsk);
extern void get_seccomp_filter(struct task_struct *tsk);
#else /* CONFIG_SECCOMP_FILTER */
-static inline void put_seccomp_filter(struct task_struct *tsk)
+static inline void put_seccomp(struct task_struct *tsk)
{
return;
}
diff --git a/kernel/fork.c b/kernel/fork.c
index ba8a01564985..48996df6eb5e 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -352,7 +352,7 @@ void free_task(struct task_struct *tsk)
#endif
rt_mutex_debug_task_free(tsk);
ftrace_graph_exit_task(tsk);
- put_seccomp_filter(tsk);
+ put_seccomp(tsk);
arch_release_task_struct(tsk);
free_task_struct(tsk);
}
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index 0db7c8a2afe2..e741a82eab4d 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -63,6 +63,8 @@ struct seccomp_filter {
/* Limit any path through the tree to 256KB worth of instructions. */
#define MAX_INSNS_PER_PATH ((1 << 18) / sizeof(struct sock_filter))
+static void put_seccomp_filter(struct seccomp_filter *filter);
+
/*
* Endianness is explicitly ignored and left for BPF program authors to manage
* as per the specific architecture.
@@ -313,7 +315,7 @@ static inline void seccomp_sync_threads(void)
* current's path will hold a reference. (This also
* allows a put before the assignment.)
*/
- put_seccomp_filter(thread);
+ put_seccomp_filter(thread->seccomp.filter);
smp_store_release(&thread->seccomp.filter,
caller->seccomp.filter);
@@ -475,10 +477,11 @@ static inline void seccomp_filter_free(struct seccomp_filter *filter)
}
}
-/* put_seccomp_filter - decrements the ref count of tsk->seccomp.filter */
-void put_seccomp_filter(struct task_struct *tsk)
+/* put_seccomp_filter - decrements the ref count of a filter */
+static void put_seccomp_filter(struct seccomp_filter *filter)
{
- struct seccomp_filter *orig = tsk->seccomp.filter;
+ struct seccomp_filter *orig = filter;
+
/* Clean up single-reference branches iteratively. */
while (orig && atomic_dec_and_test(&orig->usage)) {
struct seccomp_filter *freeme = orig;
@@ -487,6 +490,11 @@ void put_seccomp_filter(struct task_struct *tsk)
}
}
+void put_seccomp(struct task_struct *tsk)
+{
+ put_seccomp_filter(tsk->seccomp.filter);
+}
+
/**
* seccomp_send_sigsys - signals the task to allow in-process syscall emulation
* @syscall: syscall number to send to userland
@@ -898,7 +906,7 @@ long seccomp_get_filter(struct task_struct *task, unsigned long filter_off,
if (copy_to_user(data, fprog->filter, bpf_classic_proglen(fprog)))
ret = -EFAULT;
- put_seccomp_filter(task);
+ put_seccomp_filter(task->seccomp.filter);
return ret;
out:
diff --git a/security/landlock/hooks.c b/security/landlock/hooks.c
index eaee8162ff70..cbad4b66ca13 100644
--- a/security/landlock/hooks.c
+++ b/security/landlock/hooks.c
@@ -27,10 +27,8 @@ __init void landlock_register_hooks(struct security_hook_list *hooks, int count)
{
int i;
- for (i = 0; i < count; i++) {
- hooks[i].lsm = "landlock";
+ for (i = 0; i < count; i++)
list_add_tail_rcu(&hooks[i].list, hooks[i].head);
- }
}
bool landlock_is_valid_access(int off, int size, enum bpf_access_type type,
diff --git a/security/landlock/init.c b/security/landlock/init.c
index 1c2750e12dfa..909c51c3fa32 100644
--- a/security/landlock/init.c
+++ b/security/landlock/init.c
@@ -137,6 +137,6 @@ void __init landlock_add_hooks(void)
{
pr_info("landlock: Version %u", LANDLOCK_VERSION);
landlock_add_hooks_fs();
- security_add_hooks(NULL, 0, "landlock");
+ security_add_hooks(NULL, 0);
bpf_register_prog_type(&bpf_landlock_type);
}
--
2.11.0

View File

@ -0,0 +1,599 @@
From a53bef1072081d7ff33c58293a019cfab91111a7 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Micka=C3=ABl=20Sala=C3=BCn?= <mic@digikod.net>
Date: Wed, 29 Mar 2017 01:30:33 +0200
Subject: [PATCH 10/12] seccomp,landlock: Handle Landlock events per process
hierarchy
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
The seccomp(2) syscall can be used by a task to apply a Landlock rule to
itself. As a seccomp filter, a Landlock rule is enforced for the current
task and all its future children. A rule is immutable and a task can
only add new restricting rules to itself, forming a chain of rules.
A Landlock rule is tied to a Landlock event. If the use of a kernel
object is allowed by the other Linux security mechanisms (e.g. DAC,
capabilities, other LSM), then a Landlock event related to this kind of
object is triggered. The chain of rules for this event is then
evaluated. Each rule return a 32-bit value which can deny the use of a
kernel object with a non-zero value. If every rules of the chain return
zero, then the use of the object is allowed.
Changes since v5:
* remove struct landlock_node and use a similar inheritance mechanisme
as seccomp-bpf (requested by Andy Lutomirski)
* rename SECCOMP_ADD_LANDLOCK_RULE to SECCOMP_APPEND_LANDLOCK_RULE
* rename file manager.c to providers.c
* add comments
* typo and cosmetic fixes
Changes since v4:
* merge manager and seccomp patches
* return -EFAULT in seccomp(2) when user_bpf_fd is null to easely check
if Landlock is supported
* only allow a process with the global CAP_SYS_ADMIN to use Landlock
(will be lifted in the future)
* add an early check to exit as soon as possible if the current process
does not have Landlock rules
Changes since v3:
* remove the hard link with seccomp (suggested by Andy Lutomirski and
Kees Cook):
* remove the cookie which could imply multiple evaluation of Landlock
rules
* remove the origin field in struct landlock_data
* remove documentation fix (merged upstream)
* rename the new seccomp command to SECCOMP_ADD_LANDLOCK_RULE
* internal renaming
* split commit
* new design to be able to inherit on the fly the parent rules
Changes since v2:
* Landlock programs can now be run without seccomp filter but for any
syscall (from the process) or interruption
* move Landlock related functions and structs into security/landlock/*
(to manage cgroups as well)
* fix seccomp filter handling: run Landlock programs for each of their
legitimate seccomp filter
* properly clean up all seccomp results
* cosmetic changes to ease the understanding
* fix some ifdef
Signed-off-by: Mickaël Salaün <mic@digikod.net>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: James Morris <james.l.morris@oracle.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Serge E. Hallyn <serge@hallyn.com>
Cc: Will Drewry <wad@chromium.org>
Link: https://lkml.kernel.org/r/c10a503d-5e35-7785-2f3d-25ed8dd63fab@digikod.net
(cherry picked from commit 47986b417d07970983d82579fd9def4844f8ed78)
---
include/linux/landlock.h | 36 +++++++
include/linux/seccomp.h | 8 ++
include/uapi/linux/seccomp.h | 1 +
kernel/fork.c | 14 ++-
kernel/seccomp.c | 8 ++
security/landlock/Makefile | 2 +-
security/landlock/hooks.c | 37 +++++++
security/landlock/hooks.h | 5 +
security/landlock/init.c | 3 +-
security/landlock/providers.c | 232 ++++++++++++++++++++++++++++++++++++++++++
10 files changed, 342 insertions(+), 4 deletions(-)
create mode 100644 security/landlock/providers.c
diff --git a/include/linux/landlock.h b/include/linux/landlock.h
index 53013dc374fe..c40ee78e86e0 100644
--- a/include/linux/landlock.h
+++ b/include/linux/landlock.h
@@ -12,6 +12,9 @@
#define _LINUX_LANDLOCK_H
#ifdef CONFIG_SECURITY_LANDLOCK
+#include <linux/bpf.h> /* _LANDLOCK_SUBTYPE_EVENT_LAST */
+#include <linux/types.h> /* atomic_t */
+
/*
* This is not intended for the UAPI headers. Each userland software should use
* a static minimal version for the required features as explained in the
@@ -19,5 +22,38 @@
*/
#define LANDLOCK_VERSION 1
+struct landlock_rule {
+ atomic_t usage;
+ struct landlock_rule *prev;
+ struct bpf_prog *prog;
+};
+
+/**
+ * struct landlock_events - Landlock event rules enforced on a thread
+ *
+ * This is used for low performance impact when forking a process. Instead of
+ * copying the full array and incrementing the usage of each entries, only
+ * create a pointer to &struct landlock_events and increments its usage. When
+ * appending a new rule, if &struct landlock_events is shared with other tasks,
+ * then duplicate it and append the rule to this new &struct landlock_events.
+ *
+ * @usage: reference count to manage the object lifetime. When a thread need to
+ * add Landlock rules and if @usage is greater than 1, then the thread
+ * must duplicate &struct landlock_events to not change the children's
+ * rules as well.
+ * @rules: array of non-NULL &struct landlock_rule pointers
+ */
+struct landlock_events {
+ atomic_t usage;
+ struct landlock_rule *rules[_LANDLOCK_SUBTYPE_EVENT_LAST];
+};
+
+void put_landlock_events(struct landlock_events *events);
+
+#ifdef CONFIG_SECCOMP_FILTER
+int landlock_seccomp_append_prog(unsigned int flags,
+ const char __user *user_bpf_fd);
+#endif /* CONFIG_SECCOMP_FILTER */
+
#endif /* CONFIG_SECURITY_LANDLOCK */
#endif /* _LINUX_LANDLOCK_H */
diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h
index e25aee2cdfc0..9a38de3c0e72 100644
--- a/include/linux/seccomp.h
+++ b/include/linux/seccomp.h
@@ -10,6 +10,10 @@
#include <linux/thread_info.h>
#include <asm/seccomp.h>
+#if defined(CONFIG_SECCOMP_FILTER) && defined(CONFIG_SECURITY_LANDLOCK)
+struct landlock_events;
+#endif /* CONFIG_SECCOMP_FILTER && CONFIG_SECURITY_LANDLOCK */
+
struct seccomp_filter;
/**
* struct seccomp - the state of a seccomp'ed process
@@ -18,6 +22,7 @@ struct seccomp_filter;
* system calls available to a process.
* @filter: must always point to a valid seccomp-filter or NULL as it is
* accessed without locking during system call entry.
+ * @landlock_events: contains an array of Landlock rules.
*
* @filter must only be accessed from the context of current as there
* is no read locking.
@@ -25,6 +30,9 @@ struct seccomp_filter;
struct seccomp {
int mode;
struct seccomp_filter *filter;
+#if defined(CONFIG_SECCOMP_FILTER) && defined(CONFIG_SECURITY_LANDLOCK)
+ struct landlock_events *landlock_events;
+#endif /* CONFIG_SECCOMP_FILTER && CONFIG_SECURITY_LANDLOCK */
};
#ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
diff --git a/include/uapi/linux/seccomp.h b/include/uapi/linux/seccomp.h
index 0f238a43ff1e..74891cf60ca6 100644
--- a/include/uapi/linux/seccomp.h
+++ b/include/uapi/linux/seccomp.h
@@ -13,6 +13,7 @@
/* Valid operations for seccomp syscall. */
#define SECCOMP_SET_MODE_STRICT 0
#define SECCOMP_SET_MODE_FILTER 1
+#define SECCOMP_APPEND_LANDLOCK_RULE 2
/* Valid flags for SECCOMP_SET_MODE_FILTER */
#define SECCOMP_FILTER_FLAG_TSYNC 1
diff --git a/kernel/fork.c b/kernel/fork.c
index 48996df6eb5e..8c7e289d0727 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -37,6 +37,7 @@
#include <linux/security.h>
#include <linux/hugetlb.h>
#include <linux/seccomp.h>
+#include <linux/landlock.h>
#include <linux/swap.h>
#include <linux/syscalls.h>
#include <linux/jiffies.h>
@@ -513,7 +514,10 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
* the usage counts on the error path calling free_task.
*/
tsk->seccomp.filter = NULL;
-#endif
+#ifdef CONFIG_SECURITY_LANDLOCK
+ tsk->seccomp.landlock_events = NULL;
+#endif /* CONFIG_SECURITY_LANDLOCK */
+#endif /* CONFIG_SECCOMP */
setup_thread_stack(tsk, orig);
clear_user_return_notifier(tsk);
@@ -1384,7 +1388,13 @@ static void copy_seccomp(struct task_struct *p)
/* Ref-count the new filter user, and assign it. */
get_seccomp_filter(current);
- p->seccomp = current->seccomp;
+ p->seccomp.mode = current->seccomp.mode;
+ p->seccomp.filter = current->seccomp.filter;
+#if defined(CONFIG_SECCOMP_FILTER) && defined(CONFIG_SECURITY_LANDLOCK)
+ p->seccomp.landlock_events = current->seccomp.landlock_events;
+ if (p->seccomp.landlock_events)
+ atomic_inc(&p->seccomp.landlock_events->usage);
+#endif /* CONFIG_SECCOMP_FILTER && CONFIG_SECURITY_LANDLOCK */
/*
* Explicitly enable no_new_privs here in case it got set
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index e741a82eab4d..72b1cc4ce63b 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -32,6 +32,7 @@
#include <linux/security.h>
#include <linux/tracehook.h>
#include <linux/uaccess.h>
+#include <linux/landlock.h>
/**
* struct seccomp_filter - container for seccomp BPF programs
@@ -493,6 +494,9 @@ static void put_seccomp_filter(struct seccomp_filter *filter)
void put_seccomp(struct task_struct *tsk)
{
put_seccomp_filter(tsk->seccomp.filter);
+#ifdef CONFIG_SECURITY_LANDLOCK
+ put_landlock_events(tsk->seccomp.landlock_events);
+#endif /* CONFIG_SECURITY_LANDLOCK */
}
/**
@@ -797,6 +801,10 @@ static long do_seccomp(unsigned int op, unsigned int flags,
return seccomp_set_mode_strict();
case SECCOMP_SET_MODE_FILTER:
return seccomp_set_mode_filter(flags, uargs);
+#if defined(CONFIG_SECCOMP_FILTER) && defined(CONFIG_SECURITY_LANDLOCK)
+ case SECCOMP_APPEND_LANDLOCK_RULE:
+ return landlock_seccomp_append_prog(flags, uargs);
+#endif /* CONFIG_SECCOMP_FILTER && CONFIG_SECURITY_LANDLOCK */
default:
return -EINVAL;
}
diff --git a/security/landlock/Makefile b/security/landlock/Makefile
index c0db504a6335..da8ba8b5183e 100644
--- a/security/landlock/Makefile
+++ b/security/landlock/Makefile
@@ -2,4 +2,4 @@ ccflags-$(CONFIG_SECURITY_LANDLOCK) += -Werror=unused-function
obj-$(CONFIG_SECURITY_LANDLOCK) := landlock.o
-landlock-y := init.o hooks.o hooks_fs.o
+landlock-y := init.o providers.o hooks.o hooks_fs.o
diff --git a/security/landlock/hooks.c b/security/landlock/hooks.c
index cbad4b66ca13..194cd4307b01 100644
--- a/security/landlock/hooks.c
+++ b/security/landlock/hooks.c
@@ -93,6 +93,38 @@ bool landlock_is_valid_access(int off, int size, enum bpf_access_type type,
return true;
}
+/**
+ * landlock_event_deny - run Landlock rules tied to an event
+ *
+ * @event_idx: event index in the rules array
+ * @ctx: non-NULL eBPF context
+ * @events: Landlock events pointer
+ *
+ * Return true if at least one rule deny the event.
+ */
+static bool landlock_event_deny(u32 event_idx, const struct landlock_context *ctx,
+ struct landlock_events *events)
+{
+ struct landlock_rule *rule;
+
+ if (!events)
+ return false;
+
+ for (rule = events->rules[event_idx]; rule; rule = rule->prev) {
+ u32 ret;
+
+ if (WARN_ON(!rule->prog))
+ continue;
+ rcu_read_lock();
+ ret = BPF_PROG_RUN(rule->prog, (void *)ctx);
+ rcu_read_unlock();
+ /* deny access if a program returns a value different than 0 */
+ if (ret)
+ return true;
+ }
+ return false;
+}
+
int landlock_decide(enum landlock_subtype_event event,
__u64 ctx_values[CTX_ARG_NB], u32 cmd, const char *hook)
{
@@ -109,5 +141,10 @@ int landlock_decide(enum landlock_subtype_event event,
.arg2 = ctx_values[1],
};
+#ifdef CONFIG_SECCOMP_FILTER
+ deny = landlock_event_deny(event_idx, &ctx,
+ current->seccomp.landlock_events);
+#endif /* CONFIG_SECCOMP_FILTER */
+
return deny ? -EPERM : 0;
}
diff --git a/security/landlock/hooks.h b/security/landlock/hooks.h
index 2e180f6ed86b..dd0486a4c284 100644
--- a/security/landlock/hooks.h
+++ b/security/landlock/hooks.h
@@ -12,6 +12,7 @@
#include <linux/bpf.h> /* enum bpf_access_type */
#include <linux/lsm_hooks.h>
#include <linux/sched.h> /* struct task_struct */
+#include <linux/seccomp.h>
/* separators */
#define SEP_COMMA() ,
@@ -163,7 +164,11 @@ WRAP_TYPE_RAW_C;
static inline bool landlocked(const struct task_struct *task)
{
+#ifdef CONFIG_SECCOMP_FILTER
+ return !!(task->seccomp.landlock_events);
+#else
return false;
+#endif /* CONFIG_SECCOMP_FILTER */
}
__init void landlock_register_hooks(struct security_hook_list *hooks, int count);
diff --git a/security/landlock/init.c b/security/landlock/init.c
index 909c51c3fa32..9ea7963dcf4c 100644
--- a/security/landlock/init.c
+++ b/security/landlock/init.c
@@ -135,7 +135,8 @@ static struct bpf_prog_type_list bpf_landlock_type __ro_after_init = {
void __init landlock_add_hooks(void)
{
- pr_info("landlock: Version %u", LANDLOCK_VERSION);
+ pr_info("landlock: Version %u, ready to sandbox with %s\n",
+ LANDLOCK_VERSION, "seccomp");
landlock_add_hooks_fs();
security_add_hooks(NULL, 0);
bpf_register_prog_type(&bpf_landlock_type);
diff --git a/security/landlock/providers.c b/security/landlock/providers.c
new file mode 100644
index 000000000000..6d867a39c947
--- /dev/null
+++ b/security/landlock/providers.c
@@ -0,0 +1,232 @@
+/*
+ * Landlock LSM - seccomp provider
+ *
+ * Copyright © 2017 Mickaël Salaün <mic@digikod.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/page.h> /* PAGE_SIZE */
+#include <linux/atomic.h> /* atomic_*(), smp_store_release() */
+#include <linux/bpf.h> /* bpf_prog_put() */
+#include <linux/filter.h> /* struct bpf_prog */
+#include <linux/kernel.h> /* round_up() */
+#include <linux/landlock.h>
+#include <linux/sched.h> /* current_cred(), task_no_new_privs() */
+#include <linux/security.h> /* security_capable_noaudit() */
+#include <linux/slab.h> /* alloc(), kfree() */
+#include <linux/types.h> /* atomic_t */
+#include <linux/uaccess.h> /* copy_from_user() */
+
+#include "common.h"
+
+static void put_landlock_rule(struct landlock_rule *rule)
+{
+ struct landlock_rule *orig = rule;
+
+ /* clean up single-reference branches iteratively */
+ while (orig && atomic_dec_and_test(&orig->usage)) {
+ struct landlock_rule *freeme = orig;
+
+ bpf_prog_put(orig->prog);
+ orig = orig->prev;
+ kfree(freeme);
+ }
+}
+
+void put_landlock_events(struct landlock_events *events)
+{
+ if (events && atomic_dec_and_test(&events->usage)) {
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(events->rules); i++)
+ /* XXX: Do we need to use lockless_dereference() here? */
+ put_landlock_rule(events->rules[i]);
+ kfree(events);
+ }
+}
+
+static struct landlock_events *new_landlock_events(void)
+{
+ struct landlock_events *ret;
+
+ /* array filled with NULL values */
+ ret = kzalloc(sizeof(*ret), GFP_KERNEL);
+ if (!ret)
+ return ERR_PTR(-ENOMEM);
+ atomic_set(&ret->usage, 1);
+ return ret;
+}
+
+static void add_landlock_rule(struct landlock_events *events,
+ struct landlock_rule *rule)
+{
+ /* subtype.landlock_rule.event > 0 for loaded programs */
+ u32 event_idx = get_index(rule->prog->subtype.landlock_rule.event);
+
+ rule->prev = events->rules[event_idx];
+ WARN_ON(atomic_read(&rule->usage));
+ atomic_set(&rule->usage, 1);
+ /* do not increment the previous rule usage */
+ smp_store_release(&events->rules[event_idx], rule);
+}
+
+/* limit Landlock events to 256KB */
+#define LANDLOCK_EVENTS_MAX_PAGES (1 << 6)
+
+/**
+ * landlock_append_prog - attach a Landlock rule to @current_events
+ *
+ * @current_events: landlock_events pointer, must be locked (if needed) to
+ * prevent a concurrent put/free. This pointer must not be
+ * freed after the call.
+ * @prog: non-NULL Landlock rule to append to @current_events. @prog will be
+ * owned by landlock_append_prog() and freed if an error happened.
+ *
+ * Return @current_events or a new pointer when OK. Return a pointer error
+ * otherwise.
+ */
+static struct landlock_events *landlock_append_prog(
+ struct landlock_events *current_events, struct bpf_prog *prog)
+{
+ struct landlock_events *new_events = current_events;
+ unsigned long pages;
+ struct landlock_rule *rule;
+ u32 event_idx;
+
+ if (prog->type != BPF_PROG_TYPE_LANDLOCK) {
+ new_events = ERR_PTR(-EINVAL);
+ goto put_prog;
+ }
+
+ /* validate memory size allocation */
+ pages = prog->pages;
+ if (current_events) {
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(current_events->rules); i++) {
+ struct landlock_rule *walker_r;
+
+ for (walker_r = current_events->rules[i]; walker_r;
+ walker_r = walker_r->prev)
+ pages += walker_r->prog->pages;
+ }
+ /* count a struct landlock_events if we need to allocate one */
+ if (atomic_read(&current_events->usage) != 1)
+ pages += round_up(sizeof(*current_events), PAGE_SIZE) /
+ PAGE_SIZE;
+ }
+ if (pages > LANDLOCK_EVENTS_MAX_PAGES) {
+ new_events = ERR_PTR(-E2BIG);
+ goto put_prog;
+ }
+
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule) {
+ new_events = ERR_PTR(-ENOMEM);
+ goto put_prog;
+ }
+ rule->prog = prog;
+
+ /* subtype.landlock_rule.event > 0 for loaded programs */
+ event_idx = get_index(rule->prog->subtype.landlock_rule.event);
+
+ if (!new_events) {
+ /*
+ * If there is no Landlock events used by the current task,
+ * then create a new one.
+ */
+ new_events = new_landlock_events();
+ if (IS_ERR(new_events))
+ goto put_rule;
+ } else if (atomic_read(&current_events->usage) > 1) {
+ /*
+ * If the current task is not the sole user of its Landlock
+ * events, then duplicate them.
+ */
+ size_t i;
+
+ new_events = new_landlock_events();
+ if (IS_ERR(new_events))
+ goto put_rule;
+ for (i = 0; i < ARRAY_SIZE(new_events->rules); i++) {
+ new_events->rules[i] =
+ lockless_dereference(current_events->rules[i]);
+ if (new_events->rules[i])
+ atomic_inc(&new_events->rules[i]->usage);
+ }
+
+ /*
+ * Landlock events from the current task will not be freed here
+ * because the usage is strictly greater than 1. It is only
+ * prevented to be freed by another subject thanks to the
+ * caller of landlock_append_prog() which should be locked if
+ * needed.
+ */
+ put_landlock_events(current_events);
+ }
+ add_landlock_rule(new_events, rule);
+ return new_events;
+
+put_prog:
+ bpf_prog_put(prog);
+ return new_events;
+
+put_rule:
+ put_landlock_rule(rule);
+ return new_events;
+}
+
+/**
+ * landlock_seccomp_append_prog - attach a Landlock rule to the current process
+ *
+ * current->seccomp.landlock_events is lazily allocated. When a process fork,
+ * only a pointer is copied. When a new event is added by a process, if there
+ * is other references to this process' landlock_events, then a new allocation
+ * is made to contain an array pointing to Landlock rule lists. This design
+ * enable low-performance impact and is memory efficient while keeping the
+ * property of append-only rules.
+ *
+ * @flags: not used for now, but could be used for TSYNC
+ * @user_bpf_fd: file descriptor pointing to a loaded Landlock rule
+ */
+#ifdef CONFIG_SECCOMP_FILTER
+int landlock_seccomp_append_prog(unsigned int flags,
+ const char __user *user_bpf_fd)
+{
+ struct landlock_events *new_events;
+ struct bpf_prog *prog;
+ int bpf_fd;
+
+ /* force no_new_privs to limit privilege escalation */
+ if (!task_no_new_privs(current))
+ return -EPERM;
+ /* will be removed in the future to allow unprivileged tasks */
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ /* enable to check if Landlock is supported with early EFAULT */
+ if (!user_bpf_fd)
+ return -EFAULT;
+ if (flags)
+ return -EINVAL;
+ if (copy_from_user(&bpf_fd, user_bpf_fd, sizeof(bpf_fd)))
+ return -EFAULT;
+ prog = bpf_prog_get(bpf_fd);
+ if (IS_ERR(prog))
+ return PTR_ERR(prog);
+
+ /*
+ * We don't need to lock anything for the current process hierarchy,
+ * everything is guarded by the atomic counters.
+ */
+ new_events = landlock_append_prog(current->seccomp.landlock_events,
+ prog);
+ /* @prog is managed/freed by landlock_append_prog() */
+ if (IS_ERR(new_events))
+ return PTR_ERR(new_events);
+ current->seccomp.landlock_events = new_events;
+ return 0;
+}
+#endif /* CONFIG_SECCOMP_FILTER */
--
2.11.0

View File

@ -0,0 +1,216 @@
From 2a43bc167fa63c0d02bed4b5826bdfc1fd719714 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Micka=C3=ABl=20Sala=C3=BCn?= <mic@digikod.net>
Date: Wed, 29 Mar 2017 01:30:33 +0200
Subject: [PATCH 11/12] landlock: Add ptrace restrictions
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
A landlocked process has less privileges than a non-landlocked process
and must then be subject to additional restrictions when manipulating
processes. To be allowed to use ptrace(2) and related syscalls on a
target process, a landlocked process must have a subset of the target
process' rules.
New in v6
Signed-off-by: Mickaël Salaün <mic@digikod.net>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Cc: David S. Miller <davem@davemloft.net>
Cc: James Morris <james.l.morris@oracle.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Serge E. Hallyn <serge@hallyn.com>
(cherry picked from commit 9f9bb82c0f1a9694263a0a1d6833f3049f0abec2)
---
security/landlock/Makefile | 2 +-
security/landlock/hooks_ptrace.c | 126 +++++++++++++++++++++++++++++++++++++++
security/landlock/hooks_ptrace.h | 11 ++++
security/landlock/init.c | 2 +
4 files changed, 140 insertions(+), 1 deletion(-)
create mode 100644 security/landlock/hooks_ptrace.c
create mode 100644 security/landlock/hooks_ptrace.h
diff --git a/security/landlock/Makefile b/security/landlock/Makefile
index da8ba8b5183e..099a56ca4842 100644
--- a/security/landlock/Makefile
+++ b/security/landlock/Makefile
@@ -2,4 +2,4 @@ ccflags-$(CONFIG_SECURITY_LANDLOCK) += -Werror=unused-function
obj-$(CONFIG_SECURITY_LANDLOCK) := landlock.o
-landlock-y := init.o providers.o hooks.o hooks_fs.o
+landlock-y := init.o providers.o hooks.o hooks_fs.o hooks_ptrace.o
diff --git a/security/landlock/hooks_ptrace.c b/security/landlock/hooks_ptrace.c
new file mode 100644
index 000000000000..8ab53baba9ad
--- /dev/null
+++ b/security/landlock/hooks_ptrace.c
@@ -0,0 +1,126 @@
+/*
+ * Landlock LSM - ptrace hooks
+ *
+ * Copyright © 2017 Mickaël Salaün <mic@digikod.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/current.h>
+#include <linux/kernel.h> /* ARRAY_SIZE */
+#include <linux/landlock.h> /* struct landlock_events */
+#include <linux/lsm_hooks.h>
+#include <linux/sched.h> /* struct task_struct */
+#include <linux/seccomp.h>
+
+#include "hooks.h" /* landlocked() */
+
+#include "hooks_ptrace.h"
+
+
+static bool landlock_events_are_subset(const struct landlock_events *parent,
+ const struct landlock_events *child)
+{
+ size_t i;
+
+ if (!parent || !child)
+ return false;
+ if (parent == child)
+ return true;
+
+ for (i = 0; i < ARRAY_SIZE(child->rules); i++) {
+ struct landlock_rule *walker;
+ bool found_parent = false;
+
+ if (!parent->rules[i])
+ continue;
+ for (walker = child->rules[i]; walker; walker = walker->prev) {
+ if (walker == parent->rules[i]) {
+ found_parent = true;
+ break;
+ }
+ }
+ if (!found_parent)
+ return false;
+ }
+ return true;
+}
+
+static bool landlock_task_has_subset_events(const struct task_struct *parent,
+ const struct task_struct *child)
+{
+#ifdef CONFIG_SECCOMP_FILTER
+ if (landlock_events_are_subset(parent->seccomp.landlock_events,
+ child->seccomp.landlock_events))
+ /* must be ANDed with other providers (i.e. cgroup) */
+ return true;
+#endif /* CONFIG_SECCOMP_FILTER */
+ return false;
+}
+
+/**
+ * landlock_ptrace_access_check - determine whether the current process may
+ * access another
+ *
+ * @child: the process to be accessed
+ * @mode: the mode of attachment
+ *
+ * If the current task has Landlock rules, then the child must have at least
+ * the same rules. Else denied.
+ *
+ * Determine whether a process may access another, returning 0 if permission
+ * granted, -errno if denied.
+ */
+static int landlock_ptrace_access_check(struct task_struct *child,
+ unsigned int mode)
+{
+ if (!landlocked(current))
+ return 0;
+
+ if (!landlocked(child))
+ return -EPERM;
+
+ if (landlock_task_has_subset_events(current, child))
+ return 0;
+
+ return -EPERM;
+}
+
+/**
+ * landlock_ptrace_traceme - determine whether another process may trace the
+ * current one
+ *
+ * @parent: the task proposed to be the tracer
+ *
+ * If the parent has Landlock rules, then the current task must have the same
+ * or more rules.
+ * Else denied.
+ *
+ * Determine whether the nominated task is permitted to trace the current
+ * process, returning 0 if permission is granted, -errno if denied.
+ */
+static int landlock_ptrace_traceme(struct task_struct *parent)
+{
+ if (!landlocked(parent))
+ return 0;
+
+ if (!landlocked(current))
+ return -EPERM;
+
+ if (landlock_task_has_subset_events(parent, current))
+ return 0;
+
+ return -EPERM;
+}
+
+static struct security_hook_list landlock_hooks[] = {
+ LSM_HOOK_INIT(ptrace_access_check, landlock_ptrace_access_check),
+ LSM_HOOK_INIT(ptrace_traceme, landlock_ptrace_traceme),
+};
+
+__init void landlock_add_hooks_ptrace(void)
+{
+ landlock_register_hooks(landlock_hooks, ARRAY_SIZE(landlock_hooks));
+}
diff --git a/security/landlock/hooks_ptrace.h b/security/landlock/hooks_ptrace.h
new file mode 100644
index 000000000000..15b1f3479e0e
--- /dev/null
+++ b/security/landlock/hooks_ptrace.h
@@ -0,0 +1,11 @@
+/*
+ * Landlock LSM - ptrace hooks
+ *
+ * Copyright © 2017 Mickaël Salaün <mic@digikod.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2, as
+ * published by the Free Software Foundation.
+ */
+
+__init void landlock_add_hooks_ptrace(void);
diff --git a/security/landlock/init.c b/security/landlock/init.c
index 9ea7963dcf4c..74f0e17a92f6 100644
--- a/security/landlock/init.c
+++ b/security/landlock/init.c
@@ -14,6 +14,7 @@
#include <linux/lsm_hooks.h>
#include "hooks_fs.h"
+#include "hooks_ptrace.h"
static inline bool bpf_landlock_is_valid_access(int off, int size,
@@ -137,6 +138,7 @@ void __init landlock_add_hooks(void)
{
pr_info("landlock: Version %u, ready to sandbox with %s\n",
LANDLOCK_VERSION, "seccomp");
+ landlock_add_hooks_ptrace();
landlock_add_hooks_fs();
security_add_hooks(NULL, 0);
bpf_register_prog_type(&bpf_landlock_type);
--
2.11.0

View File

@ -0,0 +1,355 @@
From 9fca752f05909f12edd32f619ec40d38a6c2b305 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Micka=C3=ABl=20Sala=C3=BCn?= <mic@digikod.net>
Date: Wed, 29 Mar 2017 01:30:33 +0200
Subject: [PATCH 12/12] bpf: Add a Landlock sandbox example
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Add a basic sandbox tool to create a process isolated from some part of
the system. This sandbox create a read-only environment. It is only
allowed to write to a character device such as a TTY:
# :> X
# echo $?
0
# ./samples/bpf/landlock1 /bin/sh -i
Launching a new sandboxed process.
# :> Y
cannot create Y: Operation not permitted
Changes since v5:
* cosmetic fixes
* rebase
Changes since v4:
* write Landlock rule in C and compiled it with LLVM
* remove cgroup handling
* remove path handling: only handle a read-only environment
* remove errno return codes
Changes since v3:
* remove seccomp and origin field: completely free from seccomp programs
* handle more FS-related hooks
* handle inode hooks and directory traversal
* add faked but consistent view thanks to ENOENT
* add /lib64 in the example
* fix spelling
* rename some types and definitions (e.g. SECCOMP_ADD_LANDLOCK_RULE)
Changes since v2:
* use BPF_PROG_ATTACH for cgroup handling
Signed-off-by: Mickaël Salaün <mic@digikod.net>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Cc: David S. Miller <davem@davemloft.net>
Cc: James Morris <james.l.morris@oracle.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Serge E. Hallyn <serge@hallyn.com>
(cherry picked from commit 9c5c745d4c0640a96f30f072cb835c21e7bd3ca6)
---
samples/bpf/Makefile | 4 ++
samples/bpf/bpf_load.c | 30 +++++++++++--
samples/bpf/landlock1_kern.c | 46 +++++++++++++++++++
samples/bpf/landlock1_user.c | 102 +++++++++++++++++++++++++++++++++++++++++++
4 files changed, 178 insertions(+), 4 deletions(-)
create mode 100644 samples/bpf/landlock1_kern.c
create mode 100644 samples/bpf/landlock1_user.c
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 72c58675973e..c9ce3b2e7a7e 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -28,6 +28,7 @@ hostprogs-y += test_current_task_under_cgroup
hostprogs-y += trace_event
hostprogs-y += sampleip
hostprogs-y += tc_l2_redirect
+hostprogs-y += landlock1
test_verifier-objs := test_verifier.o libbpf.o
test_maps-objs := test_maps.o libbpf.o
@@ -58,6 +59,7 @@ test_current_task_under_cgroup-objs := bpf_load.o libbpf.o \
trace_event-objs := bpf_load.o libbpf.o trace_event_user.o
sampleip-objs := bpf_load.o libbpf.o sampleip_user.o
tc_l2_redirect-objs := bpf_load.o libbpf.o tc_l2_redirect_user.o
+landlock1-objs := bpf_load.o libbpf.o landlock1_user.o
# Tell kbuild to always build the programs
always := $(hostprogs-y)
@@ -88,6 +90,7 @@ always += xdp2_kern.o
always += test_current_task_under_cgroup_kern.o
always += trace_event_kern.o
always += sampleip_kern.o
+always += landlock1_kern.o
HOSTCFLAGS += -I$(objtree)/usr/include
@@ -115,6 +118,7 @@ HOSTLOADLIBES_test_current_task_under_cgroup += -lelf
HOSTLOADLIBES_trace_event += -lelf
HOSTLOADLIBES_sampleip += -lelf
HOSTLOADLIBES_tc_l2_redirect += -l elf
+HOSTLOADLIBES_landlock1 += -lelf
# Allows pointing LLC/CLANG to a LLVM backend with bpf support, redefine on cmdline:
# make samples/bpf/ LLC=~/git/llvm/build/bin/llc CLANG=~/git/llvm/build/bin/clang
diff --git a/samples/bpf/bpf_load.c b/samples/bpf/bpf_load.c
index 40cf828a37c7..1a461afb1d82 100644
--- a/samples/bpf/bpf_load.c
+++ b/samples/bpf/bpf_load.c
@@ -25,6 +25,8 @@
static char license[128];
static int kern_version;
+static union bpf_prog_subtype subtype = {};
+static bool has_subtype;
static bool processed_sec[128];
int map_fd[MAX_MAPS];
int prog_fd[MAX_PROGS];
@@ -52,6 +54,7 @@ static int load_and_attach(const char *event, struct bpf_insn *prog, int size)
bool is_tracepoint = strncmp(event, "tracepoint/", 11) == 0;
bool is_xdp = strncmp(event, "xdp", 3) == 0;
bool is_perf_event = strncmp(event, "perf_event", 10) == 0;
+ bool is_landlock = strncmp(event, "landlock", 8) == 0;
enum bpf_prog_type prog_type;
char buf[256];
int fd, efd, err, id;
@@ -73,6 +76,13 @@ static int load_and_attach(const char *event, struct bpf_insn *prog, int size)
prog_type = BPF_PROG_TYPE_XDP;
} else if (is_perf_event) {
prog_type = BPF_PROG_TYPE_PERF_EVENT;
+ } else if (is_landlock) {
+ prog_type = BPF_PROG_TYPE_LANDLOCK;
+ if (!has_subtype) {
+ printf("No subtype\n");
+ return -1;
+ }
+ st = &subtype;
} else {
printf("Unknown event '%s'\n", event);
return -1;
@@ -86,7 +96,7 @@ static int load_and_attach(const char *event, struct bpf_insn *prog, int size)
prog_fd[prog_cnt++] = fd;
- if (is_xdp || is_perf_event)
+ if (is_xdp || is_perf_event || is_landlock)
return 0;
if (is_socket) {
@@ -261,6 +271,7 @@ int load_bpf_file(char *path)
kern_version = 0;
memset(license, 0, sizeof(license));
memset(processed_sec, 0, sizeof(processed_sec));
+ has_subtype = false;
if (elf_version(EV_CURRENT) == EV_NONE)
return 1;
@@ -306,6 +317,16 @@ int load_bpf_file(char *path)
processed_sec[i] = true;
if (load_maps(data->d_buf, data->d_size))
return 1;
+ } else if (strcmp(shname, "subtype") == 0) {
+ processed_sec[i] = true;
+ if (data->d_size != sizeof(union bpf_prog_subtype)) {
+ printf("invalid size of subtype section %zd\n",
+ data->d_size);
+ return 1;
+ }
+ memcpy(&subtype, data->d_buf,
+ sizeof(union bpf_prog_subtype));
+ has_subtype = true;
} else if (shdr.sh_type == SHT_SYMTAB) {
symbols = data;
}
@@ -338,14 +359,14 @@ int load_bpf_file(char *path)
memcmp(shname_prog, "tracepoint/", 11) == 0 ||
memcmp(shname_prog, "xdp", 3) == 0 ||
memcmp(shname_prog, "perf_event", 10) == 0 ||
- memcmp(shname_prog, "socket", 6) == 0)
+ memcmp(shname_prog, "socket", 6) == 0 ||
+ memcmp(shname_prog, "landlock", 8) == 0)
load_and_attach(shname_prog, insns, data_prog->d_size);
}
}
/* load programs that don't use maps */
for (i = 1; i < ehdr.e_shnum; i++) {
-
if (processed_sec[i])
continue;
@@ -357,7 +378,8 @@ int load_bpf_file(char *path)
memcmp(shname, "tracepoint/", 11) == 0 ||
memcmp(shname, "xdp", 3) == 0 ||
memcmp(shname, "perf_event", 10) == 0 ||
- memcmp(shname, "socket", 6) == 0)
+ memcmp(shname, "socket", 6) == 0 ||
+ memcmp(shname, "landlock", 8) == 0)
load_and_attach(shname, data->d_buf, data->d_size);
}
diff --git a/samples/bpf/landlock1_kern.c b/samples/bpf/landlock1_kern.c
new file mode 100644
index 000000000000..b8a9b0ca84c9
--- /dev/null
+++ b/samples/bpf/landlock1_kern.c
@@ -0,0 +1,46 @@
+/*
+ * Landlock rule - partial read-only filesystem
+ *
+ * Copyright © 2017 Mickaël Salaün <mic@digikod.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#define KBUILD_MODNAME "foo"
+#include <uapi/linux/bpf.h>
+#include <uapi/linux/stat.h> /* S_ISCHR() */
+#include "bpf_helpers.h"
+
+SEC("landlock1")
+static int landlock_fs_prog1(struct landlock_context *ctx)
+{
+ char fmt_error[] = "landlock1: error: get_mode:%lld\n";
+ char fmt_name[] = "landlock1: syscall:%d\n";
+ long long ret;
+
+ if (!(ctx->arg2 & LANDLOCK_ACTION_FS_WRITE))
+ return 0;
+ ret = bpf_handle_fs_get_mode((void *)ctx->arg1);
+ if (ret < 0) {
+ bpf_trace_printk(fmt_error, sizeof(fmt_error), ret);
+ return 1;
+ }
+ if (S_ISCHR(ret))
+ return 0;
+ bpf_trace_printk(fmt_name, sizeof(fmt_name), ctx->syscall_nr);
+ return 1;
+}
+
+SEC("subtype")
+static union bpf_prog_subtype _subtype = {
+ .landlock_rule = {
+ .version = 1,
+ .event = LANDLOCK_SUBTYPE_EVENT_FS,
+ .ability = LANDLOCK_SUBTYPE_ABILITY_DEBUG,
+ }
+};
+
+SEC("license")
+static const char _license[] = "GPL";
diff --git a/samples/bpf/landlock1_user.c b/samples/bpf/landlock1_user.c
new file mode 100644
index 000000000000..6f79eb0ee6db
--- /dev/null
+++ b/samples/bpf/landlock1_user.c
@@ -0,0 +1,102 @@
+/*
+ * Landlock sandbox - partial read-only filesystem
+ *
+ * Copyright © 2017 Mickaël Salaün <mic@digikod.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#include "bpf_load.h"
+#include "libbpf.h"
+
+#define _GNU_SOURCE
+#include <errno.h>
+#include <fcntl.h> /* open() */
+#include <linux/bpf.h>
+#include <linux/filter.h>
+#include <linux/prctl.h>
+#include <linux/seccomp.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/prctl.h>
+#include <sys/syscall.h>
+#include <unistd.h>
+
+#ifndef seccomp
+static int seccomp(unsigned int op, unsigned int flags, void *args)
+{
+ errno = 0;
+ return syscall(__NR_seccomp, op, flags, args);
+}
+#endif
+
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0]))
+#define MAX_ERRNO 4095
+
+
+struct landlock_rule {
+ enum landlock_subtype_event event;
+ struct bpf_insn *bpf;
+ size_t size;
+};
+
+static int apply_sandbox(int prog_fd)
+{
+ int ret = 0;
+
+ /* set up the test sandbox */
+ if (prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
+ perror("prctl(no_new_priv)");
+ return 1;
+ }
+ if (seccomp(SECCOMP_APPEND_LANDLOCK_RULE, 0, &prog_fd)) {
+ perror("seccomp(set_hook)");
+ ret = 1;
+ }
+ close(prog_fd);
+
+ return ret;
+}
+
+int main(int argc, char * const argv[], char * const *envp)
+{
+ char filename[256];
+ char *cmd_path;
+ char * const *cmd_argv;
+
+ if (argc < 2) {
+ fprintf(stderr, "usage: %s <cmd> [args]...\n\n", argv[0]);
+ fprintf(stderr, "Launch a command in a read-only environment "
+ "(except for character devices).\n");
+ fprintf(stderr, "Display debug with: "
+ "cat /sys/kernel/debug/tracing/trace_pipe &\n");
+ return 1;
+ }
+
+ snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+ if (load_bpf_file(filename)) {
+ printf("%s", bpf_log_buf);
+ return 1;
+ }
+ if (!prog_fd[0]) {
+ if (errno) {
+ printf("load_bpf_file: %s\n", strerror(errno));
+ } else {
+ printf("load_bpf_file: Error\n");
+ }
+ return 1;
+ }
+
+ if (apply_sandbox(prog_fd[0]))
+ return 1;
+ cmd_path = argv[1];
+ cmd_argv = argv + 1;
+ fprintf(stderr, "Launching a new sandboxed process.\n");
+ execve(cmd_path, cmd_argv, envp);
+ perror("execve");
+ return 1;
+}
--
2.11.0