mirror of
https://github.com/LineageOS/android_kernel_fxtec_sm6115.git
synced 2026-04-03 09:33:24 +00:00
[ Upstream commit 9cf9aa7b0acfde7545c1a1d912576e9bab28dc6f ]
There is a critical race condition in kprobe initialization that can lead to
NULL pointer dereference and kernel crash.
[1135630.084782] Unable to handle kernel paging request at virtual address 0000710a04630000
...
[1135630.260314] pstate: 404003c9 (nZcv DAIF +PAN -UAO)
[1135630.269239] pc : kprobe_perf_func+0x30/0x260
[1135630.277643] lr : kprobe_dispatcher+0x44/0x60
[1135630.286041] sp : ffffaeff4977fa40
[1135630.293441] x29: ffffaeff4977fa40 x28: ffffaf015340e400
[1135630.302837] x27: 0000000000000000 x26: 0000000000000000
[1135630.312257] x25: ffffaf029ed108a8 x24: ffffaf015340e528
[1135630.321705] x23: ffffaeff4977fc50 x22: ffffaeff4977fc50
[1135630.331154] x21: 0000000000000000 x20: ffffaeff4977fc50
[1135630.340586] x19: ffffaf015340e400 x18: 0000000000000000
[1135630.349985] x17: 0000000000000000 x16: 0000000000000000
[1135630.359285] x15: 0000000000000000 x14: 0000000000000000
[1135630.368445] x13: 0000000000000000 x12: 0000000000000000
[1135630.377473] x11: 0000000000000000 x10: 0000000000000000
[1135630.386411] x9 : 0000000000000000 x8 : 0000000000000000
[1135630.395252] x7 : 0000000000000000 x6 : 0000000000000000
[1135630.403963] x5 : 0000000000000000 x4 : 0000000000000000
[1135630.412545] x3 : 0000710a04630000 x2 : 0000000000000006
[1135630.421021] x1 : ffffaeff4977fc50 x0 : 0000710a04630000
[1135630.429410] Call trace:
[1135630.434828] kprobe_perf_func+0x30/0x260
[1135630.441661] kprobe_dispatcher+0x44/0x60
[1135630.448396] aggr_pre_handler+0x70/0xc8
[1135630.454959] kprobe_breakpoint_handler+0x140/0x1e0
[1135630.462435] brk_handler+0xbc/0xd8
[1135630.468437] do_debug_exception+0x84/0x138
[1135630.475074] el1_dbg+0x18/0x8c
[1135630.480582] security_file_permission+0x0/0xd0
[1135630.487426] vfs_write+0x70/0x1c0
[1135630.493059] ksys_write+0x5c/0xc8
[1135630.498638] __arm64_sys_write+0x24/0x30
[1135630.504821] el0_svc_common+0x78/0x130
[1135630.510838] el0_svc_handler+0x38/0x78
[1135630.516834] el0_svc+0x8/0x1b0
kernel/trace/trace_kprobe.c: 1308
0xffff3df8995039ec <kprobe_perf_func+0x2c>: ldr x21, [x24,#120]
include/linux/compiler.h: 294
0xffff3df8995039f0 <kprobe_perf_func+0x30>: ldr x1, [x21,x0]
kernel/trace/trace_kprobe.c
1308: head = this_cpu_ptr(call->perf_events);
1309: if (hlist_empty(head))
1310: return 0;
crash> struct trace_event_call -o
struct trace_event_call {
...
[120] struct hlist_head *perf_events; //(call->perf_event)
...
}
crash> struct trace_event_call ffffaf015340e528
struct trace_event_call {
...
perf_events = 0xffff0ad5fa89f088, //this value is correct, but x21 = 0
...
}
Race Condition Analysis:
The race occurs between kprobe activation and perf_events initialization:
CPU0 CPU1
==== ====
perf_kprobe_init
perf_trace_event_init
tp_event->perf_events = list;(1)
tp_event->class->reg (2)← KPROBE ACTIVE
Debug exception triggers
...
kprobe_dispatcher
kprobe_perf_func (tk->tp.flags & TP_FLAG_PROFILE)
head = this_cpu_ptr(call->perf_events)(3)
(perf_events is still NULL)
Problem:
1. CPU0 executes (1) assigning tp_event->perf_events = list
2. CPU0 executes (2) enabling kprobe functionality via class->reg()
3. CPU1 triggers and reaches kprobe_dispatcher
4. CPU1 checks TP_FLAG_PROFILE - condition passes (step 2 completed)
5. CPU1 calls kprobe_perf_func() and crashes at (3) because
call->perf_events is still NULL
CPU1 sees that kprobe functionality is enabled but does not see that
perf_events has been assigned.
Add pairing read and write memory barriers to guarantee that if CPU1
sees that kprobe functionality is enabled, it must also see that
perf_events has been assigned.
Link: https://lore.kernel.org/all/20251001022025.44626-1-chenyuan_fl@163.com/
Fixes: 50d7805607 ("tracing/kprobes: Add probe handler dispatcher to support perf and ftrace concurrent use")
Cc: stable@vger.kernel.org
Signed-off-by: Yuan Chen <chenyuan@kylinos.cn>
Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
[ Dropped ftrace changes + context ]
Signed-off-by: Sasha Levin <sashal@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
[uli: backport to 4.19]
Signed-off-by: Ulrich Hecht <uli@kernel.org>
441 lines
12 KiB
C
441 lines
12 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* Common header file for probe-based Dynamic events.
|
|
*
|
|
* This code was copied from kernel/trace/trace_kprobe.h written by
|
|
* Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
|
|
*
|
|
* Updates to make this generic:
|
|
* Copyright (C) IBM Corporation, 2010-2011
|
|
* Author: Srikar Dronamraju
|
|
*/
|
|
|
|
#include <linux/seq_file.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/smp.h>
|
|
#include <linux/tracefs.h>
|
|
#include <linux/types.h>
|
|
#include <linux/string.h>
|
|
#include <linux/ctype.h>
|
|
#include <linux/ptrace.h>
|
|
#include <linux/perf_event.h>
|
|
#include <linux/kprobes.h>
|
|
#include <linux/stringify.h>
|
|
#include <linux/limits.h>
|
|
#include <linux/uaccess.h>
|
|
#include <asm/bitsperlong.h>
|
|
|
|
#include "trace.h"
|
|
#include "trace_output.h"
|
|
|
|
#define MAX_TRACE_ARGS 128
|
|
#define MAX_ARGSTR_LEN 63
|
|
#define MAX_STRING_SIZE PATH_MAX
|
|
|
|
/* Reserved field names */
|
|
#define FIELD_STRING_IP "__probe_ip"
|
|
#define FIELD_STRING_RETIP "__probe_ret_ip"
|
|
#define FIELD_STRING_FUNC "__probe_func"
|
|
|
|
#undef DEFINE_FIELD
|
|
#define DEFINE_FIELD(type, item, name, is_signed) \
|
|
do { \
|
|
ret = trace_define_field(event_call, #type, name, \
|
|
offsetof(typeof(field), item), \
|
|
sizeof(field.item), is_signed, \
|
|
FILTER_OTHER); \
|
|
if (ret) \
|
|
return ret; \
|
|
} while (0)
|
|
|
|
|
|
/* Flags for trace_probe */
|
|
#define TP_FLAG_TRACE 1
|
|
#define TP_FLAG_PROFILE 2
|
|
#define TP_FLAG_REGISTERED 4
|
|
|
|
|
|
/* data_rloc: data relative location, compatible with u32 */
|
|
#define make_data_rloc(len, roffs) \
|
|
(((u32)(len) << 16) | ((u32)(roffs) & 0xffff))
|
|
#define get_rloc_len(dl) ((u32)(dl) >> 16)
|
|
#define get_rloc_offs(dl) ((u32)(dl) & 0xffff)
|
|
|
|
/*
|
|
* Convert data_rloc to data_loc:
|
|
* data_rloc stores the offset from data_rloc itself, but data_loc
|
|
* stores the offset from event entry.
|
|
*/
|
|
#define convert_rloc_to_loc(dl, offs) ((u32)(dl) + (offs))
|
|
|
|
static nokprobe_inline void *get_rloc_data(u32 *dl)
|
|
{
|
|
return (u8 *)dl + get_rloc_offs(*dl);
|
|
}
|
|
|
|
/* For data_loc conversion */
|
|
static nokprobe_inline void *get_loc_data(u32 *dl, void *ent)
|
|
{
|
|
return (u8 *)ent + get_rloc_offs(*dl);
|
|
}
|
|
|
|
/* Data fetch function type */
|
|
typedef void (*fetch_func_t)(struct pt_regs *, void *, void *);
|
|
/* Printing function type */
|
|
typedef int (*print_type_func_t)(struct trace_seq *, const char *, void *, void *);
|
|
|
|
/* Fetch types */
|
|
enum {
|
|
FETCH_MTD_reg = 0,
|
|
FETCH_MTD_stack,
|
|
FETCH_MTD_retval,
|
|
FETCH_MTD_comm,
|
|
FETCH_MTD_memory,
|
|
FETCH_MTD_symbol,
|
|
FETCH_MTD_deref,
|
|
FETCH_MTD_bitfield,
|
|
FETCH_MTD_file_offset,
|
|
FETCH_MTD_END,
|
|
};
|
|
|
|
/* Fetch type information table */
|
|
struct fetch_type {
|
|
const char *name; /* Name of type */
|
|
size_t size; /* Byte size of type */
|
|
int is_signed; /* Signed flag */
|
|
print_type_func_t print; /* Print functions */
|
|
const char *fmt; /* Fromat string */
|
|
const char *fmttype; /* Name in format file */
|
|
/* Fetch functions */
|
|
fetch_func_t fetch[FETCH_MTD_END];
|
|
};
|
|
|
|
struct fetch_param {
|
|
fetch_func_t fn;
|
|
void *data;
|
|
};
|
|
|
|
/* For defining macros, define string/string_size types */
|
|
typedef u32 string;
|
|
typedef u32 string_size;
|
|
|
|
#define PRINT_TYPE_FUNC_NAME(type) print_type_##type
|
|
#define PRINT_TYPE_FMT_NAME(type) print_type_format_##type
|
|
|
|
/* Printing in basic type function template */
|
|
#define DECLARE_BASIC_PRINT_TYPE_FUNC(type) \
|
|
int PRINT_TYPE_FUNC_NAME(type)(struct trace_seq *s, const char *name, \
|
|
void *data, void *ent); \
|
|
extern const char PRINT_TYPE_FMT_NAME(type)[]
|
|
|
|
DECLARE_BASIC_PRINT_TYPE_FUNC(u8);
|
|
DECLARE_BASIC_PRINT_TYPE_FUNC(u16);
|
|
DECLARE_BASIC_PRINT_TYPE_FUNC(u32);
|
|
DECLARE_BASIC_PRINT_TYPE_FUNC(u64);
|
|
DECLARE_BASIC_PRINT_TYPE_FUNC(s8);
|
|
DECLARE_BASIC_PRINT_TYPE_FUNC(s16);
|
|
DECLARE_BASIC_PRINT_TYPE_FUNC(s32);
|
|
DECLARE_BASIC_PRINT_TYPE_FUNC(s64);
|
|
DECLARE_BASIC_PRINT_TYPE_FUNC(x8);
|
|
DECLARE_BASIC_PRINT_TYPE_FUNC(x16);
|
|
DECLARE_BASIC_PRINT_TYPE_FUNC(x32);
|
|
DECLARE_BASIC_PRINT_TYPE_FUNC(x64);
|
|
|
|
DECLARE_BASIC_PRINT_TYPE_FUNC(string);
|
|
|
|
#define FETCH_FUNC_NAME(method, type) fetch_##method##_##type
|
|
|
|
/* Declare macro for basic types */
|
|
#define DECLARE_FETCH_FUNC(method, type) \
|
|
extern void FETCH_FUNC_NAME(method, type)(struct pt_regs *regs, \
|
|
void *data, void *dest)
|
|
|
|
#define DECLARE_BASIC_FETCH_FUNCS(method) \
|
|
DECLARE_FETCH_FUNC(method, u8); \
|
|
DECLARE_FETCH_FUNC(method, u16); \
|
|
DECLARE_FETCH_FUNC(method, u32); \
|
|
DECLARE_FETCH_FUNC(method, u64)
|
|
|
|
DECLARE_BASIC_FETCH_FUNCS(reg);
|
|
#define fetch_reg_string NULL
|
|
#define fetch_reg_string_size NULL
|
|
|
|
DECLARE_BASIC_FETCH_FUNCS(retval);
|
|
#define fetch_retval_string NULL
|
|
#define fetch_retval_string_size NULL
|
|
|
|
DECLARE_BASIC_FETCH_FUNCS(symbol);
|
|
DECLARE_FETCH_FUNC(symbol, string);
|
|
DECLARE_FETCH_FUNC(symbol, string_size);
|
|
|
|
DECLARE_BASIC_FETCH_FUNCS(deref);
|
|
DECLARE_FETCH_FUNC(deref, string);
|
|
DECLARE_FETCH_FUNC(deref, string_size);
|
|
|
|
DECLARE_BASIC_FETCH_FUNCS(bitfield);
|
|
#define fetch_bitfield_string NULL
|
|
#define fetch_bitfield_string_size NULL
|
|
|
|
/* comm only makes sense as a string */
|
|
#define fetch_comm_u8 NULL
|
|
#define fetch_comm_u16 NULL
|
|
#define fetch_comm_u32 NULL
|
|
#define fetch_comm_u64 NULL
|
|
DECLARE_FETCH_FUNC(comm, string);
|
|
DECLARE_FETCH_FUNC(comm, string_size);
|
|
|
|
/*
|
|
* Define macro for basic types - we don't need to define s* types, because
|
|
* we have to care only about bitwidth at recording time.
|
|
*/
|
|
#define DEFINE_BASIC_FETCH_FUNCS(method) \
|
|
DEFINE_FETCH_##method(u8) \
|
|
DEFINE_FETCH_##method(u16) \
|
|
DEFINE_FETCH_##method(u32) \
|
|
DEFINE_FETCH_##method(u64)
|
|
|
|
/* Default (unsigned long) fetch type */
|
|
#define __DEFAULT_FETCH_TYPE(t) x##t
|
|
#define _DEFAULT_FETCH_TYPE(t) __DEFAULT_FETCH_TYPE(t)
|
|
#define DEFAULT_FETCH_TYPE _DEFAULT_FETCH_TYPE(BITS_PER_LONG)
|
|
#define DEFAULT_FETCH_TYPE_STR __stringify(DEFAULT_FETCH_TYPE)
|
|
|
|
#define ASSIGN_FETCH_FUNC(method, type) \
|
|
[FETCH_MTD_##method] = FETCH_FUNC_NAME(method, type)
|
|
|
|
#define __ASSIGN_FETCH_TYPE(_name, ptype, ftype, _size, sign, _fmttype) \
|
|
{.name = _name, \
|
|
.size = _size, \
|
|
.is_signed = sign, \
|
|
.print = PRINT_TYPE_FUNC_NAME(ptype), \
|
|
.fmt = PRINT_TYPE_FMT_NAME(ptype), \
|
|
.fmttype = _fmttype, \
|
|
.fetch = { \
|
|
ASSIGN_FETCH_FUNC(reg, ftype), \
|
|
ASSIGN_FETCH_FUNC(stack, ftype), \
|
|
ASSIGN_FETCH_FUNC(retval, ftype), \
|
|
ASSIGN_FETCH_FUNC(comm, ftype), \
|
|
ASSIGN_FETCH_FUNC(memory, ftype), \
|
|
ASSIGN_FETCH_FUNC(symbol, ftype), \
|
|
ASSIGN_FETCH_FUNC(deref, ftype), \
|
|
ASSIGN_FETCH_FUNC(bitfield, ftype), \
|
|
ASSIGN_FETCH_FUNC(file_offset, ftype), \
|
|
} \
|
|
}
|
|
|
|
#define ASSIGN_FETCH_TYPE(ptype, ftype, sign) \
|
|
__ASSIGN_FETCH_TYPE(#ptype, ptype, ftype, sizeof(ftype), sign, #ptype)
|
|
|
|
/* If ptype is an alias of atype, use this macro (show atype in format) */
|
|
#define ASSIGN_FETCH_TYPE_ALIAS(ptype, atype, ftype, sign) \
|
|
__ASSIGN_FETCH_TYPE(#ptype, ptype, ftype, sizeof(ftype), sign, #atype)
|
|
|
|
#define ASSIGN_FETCH_TYPE_END {}
|
|
|
|
#define FETCH_TYPE_STRING 0
|
|
#define FETCH_TYPE_STRSIZE 1
|
|
|
|
#ifdef CONFIG_KPROBE_EVENTS
|
|
struct symbol_cache;
|
|
unsigned long update_symbol_cache(struct symbol_cache *sc);
|
|
void free_symbol_cache(struct symbol_cache *sc);
|
|
struct symbol_cache *alloc_symbol_cache(const char *sym, long offset);
|
|
bool trace_kprobe_on_func_entry(struct trace_event_call *call);
|
|
bool trace_kprobe_error_injectable(struct trace_event_call *call);
|
|
#else
|
|
/* uprobes do not support symbol fetch methods */
|
|
#define fetch_symbol_u8 NULL
|
|
#define fetch_symbol_u16 NULL
|
|
#define fetch_symbol_u32 NULL
|
|
#define fetch_symbol_u64 NULL
|
|
#define fetch_symbol_string NULL
|
|
#define fetch_symbol_string_size NULL
|
|
|
|
struct symbol_cache {
|
|
};
|
|
static inline unsigned long __used update_symbol_cache(struct symbol_cache *sc)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline void __used free_symbol_cache(struct symbol_cache *sc)
|
|
{
|
|
}
|
|
|
|
static inline struct symbol_cache * __used
|
|
alloc_symbol_cache(const char *sym, long offset)
|
|
{
|
|
return NULL;
|
|
}
|
|
|
|
static inline bool trace_kprobe_on_func_entry(struct trace_event_call *call)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
static inline bool trace_kprobe_error_injectable(struct trace_event_call *call)
|
|
{
|
|
return false;
|
|
}
|
|
#endif /* CONFIG_KPROBE_EVENTS */
|
|
|
|
struct probe_arg {
|
|
struct fetch_param fetch;
|
|
struct fetch_param fetch_size;
|
|
unsigned int offset; /* Offset from argument entry */
|
|
const char *name; /* Name of this argument */
|
|
const char *comm; /* Command of this argument */
|
|
const struct fetch_type *type; /* Type of this argument */
|
|
};
|
|
|
|
struct trace_probe {
|
|
unsigned int flags; /* For TP_FLAG_* */
|
|
struct trace_event_class class;
|
|
struct trace_event_call call;
|
|
struct list_head files;
|
|
ssize_t size; /* trace entry size */
|
|
unsigned int nr_args;
|
|
struct probe_arg args[];
|
|
};
|
|
|
|
struct event_file_link {
|
|
struct trace_event_file *file;
|
|
struct list_head list;
|
|
};
|
|
|
|
static inline unsigned int trace_probe_load_flag(struct trace_probe *tp)
|
|
{
|
|
return smp_load_acquire(&tp->flags);
|
|
}
|
|
|
|
static inline bool trace_probe_test_flag(struct trace_probe *tp,
|
|
unsigned int flag)
|
|
{
|
|
return !!(trace_probe_load_flag(tp) & flag);
|
|
}
|
|
|
|
static inline void trace_probe_set_flag(struct trace_probe *tp,
|
|
unsigned int flag)
|
|
{
|
|
smp_store_release(&tp->flags, tp->flags | flag);
|
|
}
|
|
|
|
static inline void trace_probe_clear_flag(struct trace_probe *tp,
|
|
unsigned int flag)
|
|
{
|
|
tp->flags &= ~flag;
|
|
}
|
|
|
|
static inline bool trace_probe_is_enabled(struct trace_probe *tp)
|
|
{
|
|
return !!(tp->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE));
|
|
}
|
|
|
|
static inline bool trace_probe_is_registered(struct trace_probe *tp)
|
|
{
|
|
return !!(tp->flags & TP_FLAG_REGISTERED);
|
|
}
|
|
|
|
static nokprobe_inline void call_fetch(struct fetch_param *fprm,
|
|
struct pt_regs *regs, void *dest)
|
|
{
|
|
return fprm->fn(regs, fprm->data, dest);
|
|
}
|
|
|
|
/* Check the name is good for event/group/fields */
|
|
static inline bool is_good_name(const char *name)
|
|
{
|
|
if (!isalpha(*name) && *name != '_')
|
|
return false;
|
|
while (*++name != '\0') {
|
|
if (!isalpha(*name) && !isdigit(*name) && *name != '_')
|
|
return false;
|
|
}
|
|
return true;
|
|
}
|
|
|
|
static inline struct event_file_link *
|
|
find_event_file_link(struct trace_probe *tp, struct trace_event_file *file)
|
|
{
|
|
struct event_file_link *link;
|
|
|
|
list_for_each_entry(link, &tp->files, list)
|
|
if (link->file == file)
|
|
return link;
|
|
|
|
return NULL;
|
|
}
|
|
|
|
extern int traceprobe_parse_probe_arg(char *arg, ssize_t *size,
|
|
struct probe_arg *parg, bool is_return, bool is_kprobe,
|
|
const struct fetch_type *ftbl);
|
|
|
|
extern int traceprobe_conflict_field_name(const char *name,
|
|
struct probe_arg *args, int narg);
|
|
|
|
extern void traceprobe_update_arg(struct probe_arg *arg);
|
|
extern void traceprobe_free_probe_arg(struct probe_arg *arg);
|
|
|
|
extern int traceprobe_split_symbol_offset(char *symbol, long *offset);
|
|
|
|
/* Sum up total data length for dynamic arraies (strings) */
|
|
static nokprobe_inline int
|
|
__get_data_size(struct trace_probe *tp, struct pt_regs *regs)
|
|
{
|
|
int i, ret = 0;
|
|
u32 len;
|
|
|
|
for (i = 0; i < tp->nr_args; i++)
|
|
if (unlikely(tp->args[i].fetch_size.fn)) {
|
|
call_fetch(&tp->args[i].fetch_size, regs, &len);
|
|
ret += len;
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
/* Store the value of each argument */
|
|
static nokprobe_inline void
|
|
store_trace_args(int ent_size, struct trace_probe *tp, struct pt_regs *regs,
|
|
u8 *data, int maxlen)
|
|
{
|
|
int i;
|
|
u32 end = tp->size;
|
|
u32 *dl; /* Data (relative) location */
|
|
|
|
for (i = 0; i < tp->nr_args; i++) {
|
|
if (unlikely(tp->args[i].fetch_size.fn)) {
|
|
/*
|
|
* First, we set the relative location and
|
|
* maximum data length to *dl
|
|
*/
|
|
dl = (u32 *)(data + tp->args[i].offset);
|
|
*dl = make_data_rloc(maxlen, end - tp->args[i].offset);
|
|
/* Then try to fetch string or dynamic array data */
|
|
call_fetch(&tp->args[i].fetch, regs, dl);
|
|
/* Reduce maximum length */
|
|
end += get_rloc_len(*dl);
|
|
maxlen -= get_rloc_len(*dl);
|
|
/* Trick here, convert data_rloc to data_loc */
|
|
*dl = convert_rloc_to_loc(*dl,
|
|
ent_size + tp->args[i].offset);
|
|
} else
|
|
/* Just fetching data normally */
|
|
call_fetch(&tp->args[i].fetch, regs,
|
|
data + tp->args[i].offset);
|
|
}
|
|
}
|
|
|
|
extern int set_print_fmt(struct trace_probe *tp, bool is_return);
|
|
|
|
#ifdef CONFIG_PERF_EVENTS
|
|
extern struct trace_event_call *
|
|
create_local_trace_kprobe(char *func, void *addr, unsigned long offs,
|
|
bool is_return);
|
|
extern void destroy_local_trace_kprobe(struct trace_event_call *event_call);
|
|
|
|
extern struct trace_event_call *
|
|
create_local_trace_uprobe(char *name, unsigned long offs, bool is_return);
|
|
extern void destroy_local_trace_uprobe(struct trace_event_call *event_call);
|
|
#endif
|