ANDROID: bpf: validate bpf_func when BPF_JIT is enabled with CFI
With CONFIG_BPF_JIT, the kernel makes indirect calls to dynamically
generated code, which the compile-time Control-Flow Integrity (CFI)
checking cannot validate. This change adds basic sanity checking to
ensure we are jumping to a valid location, which narrows down the
attack surface on the stored pointer.
In addition, this change adds a weak arch_bpf_jit_check_func function,
which architectures that implement BPF JIT can override to perform
additional validation, such as verifying that the pointer points to
the correct memory region.
Bug: 140377409
Change-Id: I8ebac6637ab6bd9db44716b1c742add267298669
Signed-off-by: Sami Tolvanen <samitolvanen@google.com>
(cherry picked from commit 9a11e8da57)
This commit is contained in:
committed by
Maciej Żenczykowski
parent
ccddca0ef3
commit
89aed473c5
@@ -466,7 +466,12 @@ struct sock_fprog_kern {
|
||||
struct sock_filter *filter;
|
||||
};
|
||||
|
||||
#define BPF_BINARY_HEADER_MAGIC 0x05de0e82
|
||||
|
||||
struct bpf_binary_header {
|
||||
#ifdef CONFIG_CFI_CLANG
|
||||
u32 magic;
|
||||
#endif
|
||||
u32 pages;
|
||||
/* Some arches need word alignment for their instructions */
|
||||
u8 image[] __aligned(4);
|
||||
@@ -506,7 +511,62 @@ struct sk_filter {
|
||||
struct bpf_prog *prog;
|
||||
};
|
||||
|
||||
#define BPF_PROG_RUN(filter, ctx) (*(filter)->bpf_func)(ctx, (filter)->insnsi)
|
||||
#if IS_ENABLED(CONFIG_BPF_JIT) && IS_ENABLED(CONFIG_CFI_CLANG)
|
||||
/*
|
||||
* With JIT, the kernel makes an indirect call to dynamically generated
|
||||
* code. Use bpf_call_func to perform additional validation of the call
|
||||
* target to narrow down attack surface. Architectures implementing BPF
|
||||
* JIT can override arch_bpf_jit_check_func for arch-specific checking.
|
||||
*/
|
||||
extern bool arch_bpf_jit_check_func(const struct bpf_prog *prog);
|
||||
|
||||
static inline unsigned int __bpf_call_func(const struct bpf_prog *prog,
|
||||
const void *ctx)
|
||||
{
|
||||
/* Call interpreter with CFI checking. */
|
||||
return prog->bpf_func(ctx, prog->insnsi);
|
||||
}
|
||||
|
||||
static inline struct bpf_binary_header *
|
||||
bpf_jit_binary_hdr(const struct bpf_prog *fp);
|
||||
|
||||
static inline unsigned int __nocfi bpf_call_func(const struct bpf_prog *prog,
|
||||
const void *ctx)
|
||||
{
|
||||
const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(prog);
|
||||
|
||||
if (!IS_ENABLED(CONFIG_BPF_JIT_ALWAYS_ON) && !prog->jited)
|
||||
return __bpf_call_func(prog, ctx);
|
||||
|
||||
/*
|
||||
* We are about to call dynamically generated code. Check that the
|
||||
* page has bpf_binary_header with a valid magic to limit possible
|
||||
* call targets.
|
||||
*/
|
||||
BUG_ON(hdr->magic != BPF_BINARY_HEADER_MAGIC ||
|
||||
!arch_bpf_jit_check_func(prog));
|
||||
|
||||
/* Call jited function without CFI checking. */
|
||||
return prog->bpf_func(ctx, prog->insnsi);
|
||||
}
|
||||
|
||||
static inline void bpf_jit_set_header_magic(struct bpf_binary_header *hdr)
|
||||
{
|
||||
hdr->magic = BPF_BINARY_HEADER_MAGIC;
|
||||
}
|
||||
#else
|
||||
static inline unsigned int bpf_call_func(const struct bpf_prog *prog,
|
||||
const void *ctx)
|
||||
{
|
||||
return prog->bpf_func(ctx, prog->insnsi);
|
||||
}
|
||||
|
||||
static inline void bpf_jit_set_header_magic(struct bpf_binary_header *hdr)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#define BPF_PROG_RUN(filter, ctx) bpf_call_func(filter, ctx)
|
||||
|
||||
#define BPF_SKB_CB_LEN QDISC_CB_PRIV_LEN
|
||||
|
||||
|
||||
@@ -621,6 +621,14 @@ static void bpf_jit_uncharge_modmem(u32 pages)
|
||||
atomic_long_sub(pages, &bpf_jit_current);
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_BPF_JIT) && IS_ENABLED(CONFIG_CFI_CLANG)
|
||||
bool __weak arch_bpf_jit_check_func(const struct bpf_prog *prog)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
EXPORT_SYMBOL(arch_bpf_jit_check_func);
|
||||
#endif
|
||||
|
||||
struct bpf_binary_header *
|
||||
bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
|
||||
unsigned int alignment,
|
||||
@@ -647,6 +655,7 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
|
||||
/* Fill space with illegal/arch-dep instructions. */
|
||||
bpf_fill_ill_insns(hdr, size);
|
||||
|
||||
bpf_jit_set_header_magic(hdr);
|
||||
hdr->pages = pages;
|
||||
hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
|
||||
PAGE_SIZE - sizeof(*hdr));
|
||||
|
||||
@@ -287,7 +287,6 @@ config BPF_JIT
|
||||
bool "enable BPF Just In Time compiler"
|
||||
depends on HAVE_CBPF_JIT || HAVE_EBPF_JIT
|
||||
depends on MODULES
|
||||
depends on !CFI
|
||||
---help---
|
||||
Berkeley Packet Filter filtering capabilities are normally handled
|
||||
by an interpreter. This option allows kernel to generate a native
|
||||
|
||||
Reference in New Issue
Block a user