Merge remote-tracking branch 'qcom_sm8250/lineage-20' into lineage-22.2
Change-Id: I41d3b2e791439f1ef1d34ced81a40912461bb3b0
This commit is contained in:
@@ -627,6 +627,16 @@ static void bpf_jit_uncharge_modmem(u32 pages)
|
||||
atomic_long_sub(pages, &bpf_jit_current);
|
||||
}
|
||||
|
||||
void *__weak bpf_jit_alloc_exec(unsigned long size)
|
||||
{
|
||||
return module_alloc(size);
|
||||
}
|
||||
|
||||
void __weak bpf_jit_free_exec(void *addr)
|
||||
{
|
||||
module_memfree(addr);
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_BPF_JIT) && IS_ENABLED(CONFIG_CFI_CLANG)
|
||||
bool __weak arch_bpf_jit_check_func(const struct bpf_prog *prog)
|
||||
{
|
||||
@@ -652,7 +662,7 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
|
||||
|
||||
if (bpf_jit_charge_modmem(pages))
|
||||
return NULL;
|
||||
hdr = module_alloc(size);
|
||||
hdr = bpf_jit_alloc_exec(size);
|
||||
if (!hdr) {
|
||||
bpf_jit_uncharge_modmem(pages);
|
||||
return NULL;
|
||||
@@ -677,7 +687,7 @@ void bpf_jit_binary_free(struct bpf_binary_header *hdr)
|
||||
{
|
||||
u32 pages = hdr->pages;
|
||||
|
||||
module_memfree(hdr);
|
||||
bpf_jit_free_exec(hdr);
|
||||
bpf_jit_uncharge_modmem(pages);
|
||||
}
|
||||
|
||||
|
||||
79
kernel/cfi.c
79
kernel/cfi.c
@@ -1,16 +1,17 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* CFI (Control Flow Integrity) error and slowpath handling
|
||||
* Clang Control Flow Integrity (CFI) error and slowpath handling.
|
||||
*
|
||||
* Copyright (C) 2017 Google, Inc.
|
||||
* Copyright (C) 2019 Google LLC
|
||||
*/
|
||||
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/printk.h>
|
||||
#include <linux/ratelimit.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <asm/bug.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/set_memory.h>
|
||||
|
||||
@@ -25,12 +26,10 @@
|
||||
|
||||
static inline void handle_cfi_failure(void *ptr)
|
||||
{
|
||||
#ifdef CONFIG_CFI_PERMISSIVE
|
||||
WARN_RATELIMIT(1, "CFI failure (target: %pF):\n", ptr);
|
||||
#else
|
||||
pr_err("CFI failure (target: %pF):\n", ptr);
|
||||
BUG();
|
||||
#endif
|
||||
if (IS_ENABLED(CONFIG_CFI_PERMISSIVE))
|
||||
WARN_RATELIMIT(1, "CFI failure (target: %pS):\n", ptr);
|
||||
else
|
||||
panic("CFI failure (target: %pS)\n", ptr);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MODULES
|
||||
@@ -44,7 +43,7 @@ struct shadow_range {
|
||||
unsigned long max_page;
|
||||
};
|
||||
|
||||
#define SHADOW_ORDER 1
|
||||
#define SHADOW_ORDER 2
|
||||
#define SHADOW_PAGES (1 << SHADOW_ORDER)
|
||||
#define SHADOW_SIZE \
|
||||
((SHADOW_PAGES * PAGE_SIZE - sizeof(struct shadow_range)) / sizeof(u16))
|
||||
@@ -57,8 +56,8 @@ struct cfi_shadow {
|
||||
u16 shadow[SHADOW_SIZE];
|
||||
};
|
||||
|
||||
static DEFINE_SPINLOCK(shadow_update_lock);
|
||||
static struct cfi_shadow __rcu *cfi_shadow __read_mostly = NULL;
|
||||
static DEFINE_MUTEX(shadow_update_lock);
|
||||
static struct cfi_shadow __rcu *cfi_shadow __read_mostly;
|
||||
|
||||
static inline int ptr_to_shadow(const struct cfi_shadow *s, unsigned long ptr)
|
||||
{
|
||||
@@ -79,7 +78,8 @@ static inline int ptr_to_shadow(const struct cfi_shadow *s, unsigned long ptr)
|
||||
static inline unsigned long shadow_to_ptr(const struct cfi_shadow *s,
|
||||
int index)
|
||||
{
|
||||
BUG_ON(index < 0 || index >= SHADOW_SIZE);
|
||||
if (unlikely(index < 0 || index >= SHADOW_SIZE))
|
||||
return 0;
|
||||
|
||||
if (unlikely(s->shadow[index] == SHADOW_INVALID))
|
||||
return 0;
|
||||
@@ -90,7 +90,8 @@ static inline unsigned long shadow_to_ptr(const struct cfi_shadow *s,
|
||||
static inline unsigned long shadow_to_page(const struct cfi_shadow *s,
|
||||
int index)
|
||||
{
|
||||
BUG_ON(index < 0 || index >= SHADOW_SIZE);
|
||||
if (unlikely(index < 0 || index >= SHADOW_SIZE))
|
||||
return 0;
|
||||
|
||||
return (s->r.min_page + index) << PAGE_SHIFT;
|
||||
}
|
||||
@@ -138,7 +139,8 @@ static void add_module_to_shadow(struct cfi_shadow *s, struct module *mod)
|
||||
unsigned long check = (unsigned long)mod->cfi_check;
|
||||
int check_index = ptr_to_shadow(s, check);
|
||||
|
||||
BUG_ON((check & PAGE_MASK) != check); /* Must be page aligned */
|
||||
if (unlikely((check & PAGE_MASK) != check))
|
||||
return; /* Must be page aligned */
|
||||
|
||||
if (check_index < 0)
|
||||
return; /* Module not addressable with shadow */
|
||||
@@ -151,9 +153,10 @@ static void add_module_to_shadow(struct cfi_shadow *s, struct module *mod)
|
||||
/* For each page, store the check function index in the shadow */
|
||||
for (ptr = min_page_addr; ptr <= max_page_addr; ptr += PAGE_SIZE) {
|
||||
int index = ptr_to_shadow(s, ptr);
|
||||
|
||||
if (index >= 0) {
|
||||
/* Assume a page only contains code for one module */
|
||||
BUG_ON(s->shadow[index] != SHADOW_INVALID);
|
||||
/* Each page must only contain one module */
|
||||
WARN_ON(s->shadow[index] != SHADOW_INVALID);
|
||||
s->shadow[index] = (u16)check_index;
|
||||
}
|
||||
}
|
||||
@@ -172,6 +175,7 @@ static void remove_module_from_shadow(struct cfi_shadow *s, struct module *mod)
|
||||
|
||||
for (ptr = min_page_addr; ptr <= max_page_addr; ptr += PAGE_SIZE) {
|
||||
int index = ptr_to_shadow(s, ptr);
|
||||
|
||||
if (index >= 0)
|
||||
s->shadow[index] = SHADOW_INVALID;
|
||||
}
|
||||
@@ -186,14 +190,12 @@ static void update_shadow(struct module *mod, unsigned long min_addr,
|
||||
struct cfi_shadow *next = (struct cfi_shadow *)
|
||||
__get_free_pages(GFP_KERNEL, SHADOW_ORDER);
|
||||
|
||||
BUG_ON(!next);
|
||||
|
||||
next->r.mod_min_addr = min_addr;
|
||||
next->r.mod_max_addr = max_addr;
|
||||
next->r.min_page = min_addr >> PAGE_SHIFT;
|
||||
next->r.max_page = max_addr >> PAGE_SHIFT;
|
||||
|
||||
spin_lock(&shadow_update_lock);
|
||||
mutex_lock(&shadow_update_lock);
|
||||
prev = rcu_dereference_protected(cfi_shadow, 1);
|
||||
prepare_next_shadow(prev, next);
|
||||
|
||||
@@ -201,7 +203,7 @@ static void update_shadow(struct module *mod, unsigned long min_addr,
|
||||
set_memory_ro((unsigned long)next, SHADOW_PAGES);
|
||||
rcu_assign_pointer(cfi_shadow, next);
|
||||
|
||||
spin_unlock(&shadow_update_lock);
|
||||
mutex_unlock(&shadow_update_lock);
|
||||
synchronize_rcu();
|
||||
|
||||
if (prev) {
|
||||
@@ -245,33 +247,36 @@ static inline cfi_check_fn ptr_to_check_fn(const struct cfi_shadow __rcu *s,
|
||||
|
||||
static inline cfi_check_fn find_module_cfi_check(void *ptr)
|
||||
{
|
||||
cfi_check_fn f = CFI_CHECK_FN;
|
||||
struct module *mod;
|
||||
|
||||
preempt_disable();
|
||||
mod = __module_address((unsigned long)ptr);
|
||||
if (mod)
|
||||
f = mod->cfi_check;
|
||||
preempt_enable();
|
||||
|
||||
if (mod)
|
||||
return mod->cfi_check;
|
||||
|
||||
return CFI_CHECK_FN;
|
||||
return f;
|
||||
}
|
||||
|
||||
static inline cfi_check_fn find_cfi_check(void *ptr)
|
||||
{
|
||||
#ifdef CONFIG_CFI_CLANG_SHADOW
|
||||
bool rcu;
|
||||
cfi_check_fn f;
|
||||
|
||||
if (!rcu_access_pointer(cfi_shadow))
|
||||
return CFI_CHECK_FN; /* No loaded modules */
|
||||
rcu = rcu_is_watching();
|
||||
if (!rcu)
|
||||
rcu_nmi_enter();
|
||||
|
||||
#ifdef CONFIG_CFI_CLANG_SHADOW
|
||||
/* Look up the __cfi_check function to use */
|
||||
rcu_read_lock();
|
||||
f = ptr_to_check_fn(rcu_dereference(cfi_shadow), (unsigned long)ptr);
|
||||
rcu_read_unlock();
|
||||
rcu_read_lock_sched();
|
||||
f = ptr_to_check_fn(rcu_dereference_sched(cfi_shadow),
|
||||
(unsigned long)ptr);
|
||||
rcu_read_unlock_sched();
|
||||
|
||||
if (f)
|
||||
return f;
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* Fall back to find_module_cfi_check, which works also for a larger
|
||||
@@ -279,7 +284,13 @@ static inline cfi_check_fn find_cfi_check(void *ptr)
|
||||
*/
|
||||
#endif /* CONFIG_CFI_CLANG_SHADOW */
|
||||
|
||||
return find_module_cfi_check(ptr);
|
||||
f = find_module_cfi_check(ptr);
|
||||
|
||||
out:
|
||||
if (!rcu)
|
||||
rcu_nmi_exit();
|
||||
|
||||
return f;
|
||||
}
|
||||
|
||||
void cfi_slowpath_handler(uint64_t id, void *ptr, void *diag)
|
||||
|
||||
@@ -158,6 +158,26 @@ static unsigned long kallsyms_sym_address(int idx)
|
||||
return kallsyms_relative_base - 1 - kallsyms_offsets[idx];
|
||||
}
|
||||
|
||||
#if defined(CONFIG_CFI_CLANG) && defined(CONFIG_THINLTO)
|
||||
/*
|
||||
* LLVM appends a hash to static function names when ThinLTO and CFI are
|
||||
* both enabled, which causes confusion and potentially breaks user space
|
||||
* tools, so we will strip the postfix from expanded symbol names.
|
||||
*/
|
||||
static inline char *cleanup_symbol_name(char *s)
|
||||
{
|
||||
char *res = NULL;
|
||||
|
||||
res = strrchr(s, '$');
|
||||
if (res)
|
||||
*res = '\0';
|
||||
|
||||
return res;
|
||||
}
|
||||
#else
|
||||
static inline char *cleanup_symbol_name(char *s) { return NULL; }
|
||||
#endif
|
||||
|
||||
/* Lookup the address for this symbol. Returns 0 if not found. */
|
||||
unsigned long kallsyms_lookup_name(const char *name)
|
||||
{
|
||||
@@ -170,6 +190,9 @@ unsigned long kallsyms_lookup_name(const char *name)
|
||||
|
||||
if (strcmp(namebuf, name) == 0)
|
||||
return kallsyms_sym_address(i);
|
||||
|
||||
if (cleanup_symbol_name(namebuf) && strcmp(namebuf, name) == 0)
|
||||
return kallsyms_sym_address(i);
|
||||
}
|
||||
return module_kallsyms_lookup_name(name);
|
||||
}
|
||||
@@ -268,30 +291,6 @@ int kallsyms_lookup_size_offset(unsigned long addr, unsigned long *symbolsize,
|
||||
!!__bpf_address_lookup(addr, symbolsize, offset, namebuf);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_CFI_CLANG
|
||||
/*
|
||||
* LLVM appends .cfi to function names when CONFIG_CFI_CLANG is enabled,
|
||||
* which causes confusion and potentially breaks user space tools, so we
|
||||
* will strip the postfix from expanded symbol names.
|
||||
*/
|
||||
static inline void cleanup_symbol_name(char *s)
|
||||
{
|
||||
char *res;
|
||||
|
||||
#ifdef CONFIG_THINLTO
|
||||
/* Filter out hashes from static functions */
|
||||
res = strrchr(s, '$');
|
||||
if (res)
|
||||
*res = '\0';
|
||||
#endif
|
||||
res = strrchr(s, '.');
|
||||
if (res && !strcmp(res, ".cfi"))
|
||||
*res = '\0';
|
||||
}
|
||||
#else
|
||||
static inline void cleanup_symbol_name(char *s) {}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Lookup an address
|
||||
* - modname is set to NULL if it's in the kernel.
|
||||
|
||||
@@ -4235,10 +4235,10 @@ int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
|
||||
static void cfi_init(struct module *mod)
|
||||
{
|
||||
#ifdef CONFIG_CFI_CLANG
|
||||
preempt_disable();
|
||||
rcu_read_lock_sched();
|
||||
mod->cfi_check =
|
||||
(cfi_check_fn)mod_find_symname(mod, CFI_CHECK_FN_NAME);
|
||||
preempt_enable();
|
||||
rcu_read_unlock_sched();
|
||||
cfi_module_add(mod, module_addr_min, module_addr_max);
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -576,7 +576,7 @@ EXPORT_SYMBOL(ns_to_timespec64);
|
||||
* - all other values are converted to jiffies by either multiplying
|
||||
* the input value by a factor or dividing it with a factor and
|
||||
* handling any 32-bit overflows.
|
||||
* for the details see __msecs_to_jiffies()
|
||||
* for the details see _msecs_to_jiffies()
|
||||
*
|
||||
* msecs_to_jiffies() checks for the passed in value being a constant
|
||||
* via __builtin_constant_p() allowing gcc to eliminate most of the
|
||||
|
||||
@@ -28,6 +28,8 @@ ifdef CONFIG_GCOV_PROFILE_FTRACE
|
||||
GCOV_PROFILE := y
|
||||
endif
|
||||
|
||||
CFLAGS_bpf_trace.o := -I$(src)
|
||||
|
||||
CFLAGS_trace_benchmark.o := -I$(src)
|
||||
CFLAGS_trace_events_filter.o := -I$(src)
|
||||
|
||||
|
||||
@@ -11,12 +11,16 @@
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/error-injection.h>
|
||||
|
||||
#include "trace_probe.h"
|
||||
#include "trace.h"
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include "bpf_trace.h"
|
||||
|
||||
u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
|
||||
u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
|
||||
|
||||
@@ -156,6 +160,30 @@ static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
|
||||
return &bpf_probe_write_user_proto;
|
||||
}
|
||||
|
||||
static DEFINE_RAW_SPINLOCK(trace_printk_lock);
|
||||
|
||||
#define BPF_TRACE_PRINTK_SIZE 1024
|
||||
|
||||
static inline __printf(1, 0) int bpf_do_trace_printk(const char *fmt, ...)
|
||||
{
|
||||
static char buf[BPF_TRACE_PRINTK_SIZE];
|
||||
unsigned long flags;
|
||||
va_list ap;
|
||||
int ret;
|
||||
|
||||
raw_spin_lock_irqsave(&trace_printk_lock, flags);
|
||||
va_start(ap, fmt);
|
||||
ret = vsnprintf(buf, sizeof(buf), fmt, ap);
|
||||
va_end(ap);
|
||||
/* vsnprintf() will not append null for zero-length strings */
|
||||
if (ret == 0)
|
||||
buf[0] = '\0';
|
||||
trace_bpf_trace_printk(buf);
|
||||
raw_spin_unlock_irqrestore(&trace_printk_lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Only limited trace_printk() conversion specifiers allowed:
|
||||
* %d %i %u %x %ld %li %lu %lx %lld %lli %llu %llx %p %s
|
||||
@@ -246,8 +274,7 @@ BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
|
||||
*/
|
||||
#define __BPF_TP_EMIT() __BPF_ARG3_TP()
|
||||
#define __BPF_TP(...) \
|
||||
__trace_printk(0 /* Fake ip */, \
|
||||
fmt, ##__VA_ARGS__)
|
||||
bpf_do_trace_printk(fmt, ##__VA_ARGS__)
|
||||
|
||||
#define __BPF_ARG1_TP(...) \
|
||||
((mod[0] == 2 || (mod[0] == 1 && __BITS_PER_LONG == 64)) \
|
||||
@@ -284,10 +311,15 @@ static const struct bpf_func_proto bpf_trace_printk_proto = {
|
||||
const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
|
||||
{
|
||||
/*
|
||||
* this program might be calling bpf_trace_printk,
|
||||
* so allocate per-cpu printk buffers
|
||||
* This program might be calling bpf_trace_printk,
|
||||
* so enable the associated bpf_trace/bpf_trace_printk event.
|
||||
* Repeat this each time as it is possible a user has
|
||||
* disabled bpf_trace_printk events. By loading a program
|
||||
* calling bpf_trace_printk() however the user has expressed
|
||||
* the intent to see such events.
|
||||
*/
|
||||
trace_printk_init_buffers();
|
||||
if (trace_set_clr_event("bpf_trace", "bpf_trace_printk", 1))
|
||||
pr_warn_ratelimited("could not enable bpf_trace_printk events");
|
||||
|
||||
return &bpf_trace_printk_proto;
|
||||
}
|
||||
|
||||
34
kernel/trace/bpf_trace.h
Normal file
34
kernel/trace/bpf_trace.h
Normal file
@@ -0,0 +1,34 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#undef TRACE_SYSTEM
|
||||
#define TRACE_SYSTEM bpf_trace
|
||||
|
||||
#if !defined(_TRACE_BPF_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
|
||||
|
||||
#define _TRACE_BPF_TRACE_H
|
||||
|
||||
#include <linux/tracepoint.h>
|
||||
|
||||
TRACE_EVENT(bpf_trace_printk,
|
||||
|
||||
TP_PROTO(const char *bpf_string),
|
||||
|
||||
TP_ARGS(bpf_string),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__string(bpf_string, bpf_string)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__assign_str(bpf_string, bpf_string);
|
||||
),
|
||||
|
||||
TP_printk("%s", __get_str(bpf_string))
|
||||
);
|
||||
|
||||
#endif /* _TRACE_BPF_TRACE_H */
|
||||
|
||||
#undef TRACE_INCLUDE_PATH
|
||||
#define TRACE_INCLUDE_PATH .
|
||||
#define TRACE_INCLUDE_FILE bpf_trace
|
||||
|
||||
#include <trace/define_trace.h>
|
||||
@@ -354,10 +354,16 @@ void perf_uprobe_destroy(struct perf_event *p_event)
|
||||
int perf_trace_add(struct perf_event *p_event, int flags)
|
||||
{
|
||||
struct trace_event_call *tp_event = p_event->tp_event;
|
||||
struct hw_perf_event *hwc = &p_event->hw;
|
||||
|
||||
if (!(flags & PERF_EF_START))
|
||||
p_event->hw.state = PERF_HES_STOPPED;
|
||||
|
||||
if (is_sampling_event(p_event)) {
|
||||
hwc->last_period = hwc->sample_period;
|
||||
perf_swevent_set_period(p_event);
|
||||
}
|
||||
|
||||
/*
|
||||
* If TRACE_REG_PERF_ADD returns false; no custom action was performed
|
||||
* and we need to take the default action of enqueueing our event on
|
||||
|
||||
Reference in New Issue
Block a user