ANDROID: Update CFI patchset from android11-5.4

Change-Id: Iff956ce7c2b89d3a442b0b81deba0da323f5d18b
Signed-off-by: Alexander Winkowski <dereference23@outlook.com>
This commit is contained in:
Alexander Winkowski
2025-03-29 14:50:48 +00:00
committed by basamaryan
parent b891b37591
commit fa0cd620e3
7 changed files with 83 additions and 71 deletions

View File

@@ -1,16 +1,17 @@
// SPDX-License-Identifier: GPL-2.0
/*
* CFI (Control Flow Integrity) error and slowpath handling
* Clang Control Flow Integrity (CFI) error and slowpath handling.
*
* Copyright (C) 2017 Google, Inc.
* Copyright (C) 2019 Google LLC
*/
#include <linux/gfp.h>
#include <linux/hardirq.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/printk.h>
#include <linux/ratelimit.h>
#include <linux/rcupdate.h>
#include <linux/spinlock.h>
#include <asm/bug.h>
#include <asm/cacheflush.h>
#include <asm/set_memory.h>
@@ -25,12 +26,10 @@
static inline void handle_cfi_failure(void *ptr)
{
#ifdef CONFIG_CFI_PERMISSIVE
WARN_RATELIMIT(1, "CFI failure (target: %pF):\n", ptr);
#else
pr_err("CFI failure (target: %pF):\n", ptr);
BUG();
#endif
if (IS_ENABLED(CONFIG_CFI_PERMISSIVE))
WARN_RATELIMIT(1, "CFI failure (target: %pS):\n", ptr);
else
panic("CFI failure (target: %pS)\n", ptr);
}
#ifdef CONFIG_MODULES
@@ -44,7 +43,7 @@ struct shadow_range {
unsigned long max_page;
};
#define SHADOW_ORDER 1
#define SHADOW_ORDER 2
#define SHADOW_PAGES (1 << SHADOW_ORDER)
#define SHADOW_SIZE \
((SHADOW_PAGES * PAGE_SIZE - sizeof(struct shadow_range)) / sizeof(u16))
@@ -57,8 +56,8 @@ struct cfi_shadow {
u16 shadow[SHADOW_SIZE];
};
static DEFINE_SPINLOCK(shadow_update_lock);
static struct cfi_shadow __rcu *cfi_shadow __read_mostly = NULL;
static DEFINE_MUTEX(shadow_update_lock);
static struct cfi_shadow __rcu *cfi_shadow __read_mostly;
static inline int ptr_to_shadow(const struct cfi_shadow *s, unsigned long ptr)
{
@@ -79,7 +78,8 @@ static inline int ptr_to_shadow(const struct cfi_shadow *s, unsigned long ptr)
static inline unsigned long shadow_to_ptr(const struct cfi_shadow *s,
int index)
{
BUG_ON(index < 0 || index >= SHADOW_SIZE);
if (unlikely(index < 0 || index >= SHADOW_SIZE))
return 0;
if (unlikely(s->shadow[index] == SHADOW_INVALID))
return 0;
@@ -90,7 +90,8 @@ static inline unsigned long shadow_to_ptr(const struct cfi_shadow *s,
static inline unsigned long shadow_to_page(const struct cfi_shadow *s,
int index)
{
BUG_ON(index < 0 || index >= SHADOW_SIZE);
if (unlikely(index < 0 || index >= SHADOW_SIZE))
return 0;
return (s->r.min_page + index) << PAGE_SHIFT;
}
@@ -138,7 +139,8 @@ static void add_module_to_shadow(struct cfi_shadow *s, struct module *mod)
unsigned long check = (unsigned long)mod->cfi_check;
int check_index = ptr_to_shadow(s, check);
BUG_ON((check & PAGE_MASK) != check); /* Must be page aligned */
if (unlikely((check & PAGE_MASK) != check))
return; /* Must be page aligned */
if (check_index < 0)
return; /* Module not addressable with shadow */
@@ -151,9 +153,10 @@ static void add_module_to_shadow(struct cfi_shadow *s, struct module *mod)
/* For each page, store the check function index in the shadow */
for (ptr = min_page_addr; ptr <= max_page_addr; ptr += PAGE_SIZE) {
int index = ptr_to_shadow(s, ptr);
if (index >= 0) {
/* Assume a page only contains code for one module */
BUG_ON(s->shadow[index] != SHADOW_INVALID);
/* Each page must only contain one module */
WARN_ON(s->shadow[index] != SHADOW_INVALID);
s->shadow[index] = (u16)check_index;
}
}
@@ -172,6 +175,7 @@ static void remove_module_from_shadow(struct cfi_shadow *s, struct module *mod)
for (ptr = min_page_addr; ptr <= max_page_addr; ptr += PAGE_SIZE) {
int index = ptr_to_shadow(s, ptr);
if (index >= 0)
s->shadow[index] = SHADOW_INVALID;
}
@@ -186,14 +190,12 @@ static void update_shadow(struct module *mod, unsigned long min_addr,
struct cfi_shadow *next = (struct cfi_shadow *)
__get_free_pages(GFP_KERNEL, SHADOW_ORDER);
BUG_ON(!next);
next->r.mod_min_addr = min_addr;
next->r.mod_max_addr = max_addr;
next->r.min_page = min_addr >> PAGE_SHIFT;
next->r.max_page = max_addr >> PAGE_SHIFT;
spin_lock(&shadow_update_lock);
mutex_lock(&shadow_update_lock);
prev = rcu_dereference_protected(cfi_shadow, 1);
prepare_next_shadow(prev, next);
@@ -201,7 +203,7 @@ static void update_shadow(struct module *mod, unsigned long min_addr,
set_memory_ro((unsigned long)next, SHADOW_PAGES);
rcu_assign_pointer(cfi_shadow, next);
spin_unlock(&shadow_update_lock);
mutex_unlock(&shadow_update_lock);
synchronize_rcu();
if (prev) {
@@ -245,33 +247,36 @@ static inline cfi_check_fn ptr_to_check_fn(const struct cfi_shadow __rcu *s,
static inline cfi_check_fn find_module_cfi_check(void *ptr)
{
cfi_check_fn f = CFI_CHECK_FN;
struct module *mod;
preempt_disable();
mod = __module_address((unsigned long)ptr);
if (mod)
f = mod->cfi_check;
preempt_enable();
if (mod)
return mod->cfi_check;
return CFI_CHECK_FN;
return f;
}
static inline cfi_check_fn find_cfi_check(void *ptr)
{
#ifdef CONFIG_CFI_CLANG_SHADOW
bool rcu;
cfi_check_fn f;
if (!rcu_access_pointer(cfi_shadow))
return CFI_CHECK_FN; /* No loaded modules */
rcu = rcu_is_watching();
if (!rcu)
rcu_nmi_enter();
#ifdef CONFIG_CFI_CLANG_SHADOW
/* Look up the __cfi_check function to use */
rcu_read_lock();
f = ptr_to_check_fn(rcu_dereference(cfi_shadow), (unsigned long)ptr);
rcu_read_unlock();
rcu_read_lock_sched();
f = ptr_to_check_fn(rcu_dereference_sched(cfi_shadow),
(unsigned long)ptr);
rcu_read_unlock_sched();
if (f)
return f;
goto out;
/*
* Fall back to find_module_cfi_check, which works also for a larger
@@ -279,7 +284,13 @@ static inline cfi_check_fn find_cfi_check(void *ptr)
*/
#endif /* CONFIG_CFI_CLANG_SHADOW */
return find_module_cfi_check(ptr);
f = find_module_cfi_check(ptr);
out:
if (!rcu)
rcu_nmi_exit();
return f;
}
void cfi_slowpath_handler(uint64_t id, void *ptr, void *diag)

View File

@@ -158,6 +158,26 @@ static unsigned long kallsyms_sym_address(int idx)
return kallsyms_relative_base - 1 - kallsyms_offsets[idx];
}
#if defined(CONFIG_CFI_CLANG) && defined(CONFIG_THINLTO)
/*
* LLVM appends a hash to static function names when ThinLTO and CFI are
* both enabled, which causes confusion and potentially breaks user space
* tools, so we will strip the postfix from expanded symbol names.
*/
static inline char *cleanup_symbol_name(char *s)
{
char *res = NULL;
res = strrchr(s, '$');
if (res)
*res = '\0';
return res;
}
#else
static inline char *cleanup_symbol_name(char *s) { return NULL; }
#endif
/* Lookup the address for this symbol. Returns 0 if not found. */
unsigned long kallsyms_lookup_name(const char *name)
{
@@ -170,6 +190,9 @@ unsigned long kallsyms_lookup_name(const char *name)
if (strcmp(namebuf, name) == 0)
return kallsyms_sym_address(i);
if (cleanup_symbol_name(namebuf) && strcmp(namebuf, name) == 0)
return kallsyms_sym_address(i);
}
return module_kallsyms_lookup_name(name);
}
@@ -268,30 +291,6 @@ int kallsyms_lookup_size_offset(unsigned long addr, unsigned long *symbolsize,
!!__bpf_address_lookup(addr, symbolsize, offset, namebuf);
}
#ifdef CONFIG_CFI_CLANG
/*
* LLVM appends .cfi to function names when CONFIG_CFI_CLANG is enabled,
* which causes confusion and potentially breaks user space tools, so we
* will strip the postfix from expanded symbol names.
*/
static inline void cleanup_symbol_name(char *s)
{
char *res;
#ifdef CONFIG_THINLTO
/* Filter out hashes from static functions */
res = strrchr(s, '$');
if (res)
*res = '\0';
#endif
res = strrchr(s, '.');
if (res && !strcmp(res, ".cfi"))
*res = '\0';
}
#else
static inline void cleanup_symbol_name(char *s) {}
#endif
/*
* Lookup an address
* - modname is set to NULL if it's in the kernel.

View File

@@ -4235,10 +4235,10 @@ int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
static void cfi_init(struct module *mod)
{
#ifdef CONFIG_CFI_CLANG
preempt_disable();
rcu_read_lock_sched();
mod->cfi_check =
(cfi_check_fn)mod_find_symname(mod, CFI_CHECK_FN_NAME);
preempt_enable();
rcu_read_unlock_sched();
cfi_module_add(mod, module_addr_min, module_addr_max);
#endif
}