@@ -4783,7 +4783,7 @@ union bpf_attr {
*
* u64 bpf_get_func_ip(void *ctx)
* Description
- * Get address of the traced function (for tracing programs).
+ * Get address of the traced function (for tracing and kprobe programs).
* Return
* Address of the traced function.
*/
@@ -5979,6 +5979,8 @@ static bool has_get_func_ip(struct bpf_verifier_env *env)
return -ENOTSUPP;
}
return 0;
+ } else if (type == BPF_PROG_TYPE_KPROBE) {
+ return 0;
}
verbose(env, "func %s#%d not supported for program type %d\n",
@@ -961,6 +961,18 @@ static const struct bpf_func_proto bpf_get_func_ip_proto_tracing = {
.arg1_type = ARG_PTR_TO_CTX,
};
+BPF_CALL_1(bpf_get_func_ip_kprobe, struct pt_regs *, regs)
+{
+ return trace_current_kprobe_addr();
+}
+
+static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe = {
+ .func = bpf_get_func_ip_kprobe,
+ .gpl_only = true,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+};
+
const struct bpf_func_proto *
bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{
@@ -1092,6 +1104,8 @@ kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_override_return:
return &bpf_override_return_proto;
#endif
+ case BPF_FUNC_get_func_ip:
+ return &bpf_get_func_ip_proto_kprobe;
default:
return bpf_tracing_func_proto(func_id, prog);
}
@@ -1570,6 +1570,18 @@ static int kretprobe_event_define_fields(struct trace_event_call *event_call)
}
#ifdef CONFIG_PERF_EVENTS
+/* Used by bpf get_func_ip helper */
+DEFINE_PER_CPU(u64, current_kprobe_addr) = 0;
+
+u64 trace_current_kprobe_addr(void)
+{
+ return *this_cpu_ptr(¤t_kprobe_addr);
+}
+
+static void trace_current_kprobe_set(struct trace_kprobe *tk)
+{
+ __this_cpu_write(current_kprobe_addr, (u64) tk->rp.kp.addr);
+}
/* Kprobe profile handler */
static int
@@ -1585,6 +1597,7 @@ kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
unsigned long orig_ip = instruction_pointer(regs);
int ret;
+ trace_current_kprobe_set(tk);
ret = trace_call_bpf(call, regs);
/*
@@ -1631,8 +1644,11 @@ kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
int size, __size, dsize;
int rctx;
- if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
- return;
+ if (bpf_prog_array_valid(call)) {
+ trace_current_kprobe_set(tk);
+ if (!trace_call_bpf(call, regs))
+ return;
+ }
head = this_cpu_ptr(call->perf_events);
if (hlist_empty(head))
@@ -199,6 +199,7 @@ DECLARE_BASIC_PRINT_TYPE_FUNC(symbol);
#ifdef CONFIG_KPROBE_EVENTS
bool trace_kprobe_on_func_entry(struct trace_event_call *call);
bool trace_kprobe_error_injectable(struct trace_event_call *call);
+u64 trace_current_kprobe_addr(void);
#else
static inline bool trace_kprobe_on_func_entry(struct trace_event_call *call)
{
@@ -209,6 +210,10 @@ static inline bool trace_kprobe_error_injectable(struct trace_event_call *call)
{
return false;
}
+static inline u64 trace_current_kprobe_addr(void)
+{
+ return 0;
+}
#endif /* CONFIG_KPROBE_EVENTS */
struct probe_arg {
@@ -4783,7 +4783,7 @@ union bpf_attr {
*
* u64 bpf_get_func_ip(void *ctx)
* Description
- * Get address of the traced function (for tracing programs).
+ * Get address of the traced function (for tracing and kprobe programs).
* Return
* Address of the traced function.
*/
Adding bpf_get_func_ip helper for BPF_PROG_TYPE_KPROBE programs, so it's now possible to call bpf_get_func_ip from both kprobe and kretprobe programs. Taking the caller's address from 'struct kprobe::addr', which is defined for both kprobe and kretprobe. Signed-off-by: Jiri Olsa <jolsa@kernel.org> --- include/uapi/linux/bpf.h | 2 +- kernel/bpf/verifier.c | 2 ++ kernel/trace/bpf_trace.c | 14 ++++++++++++++ kernel/trace/trace_kprobe.c | 20 ++++++++++++++++++-- kernel/trace/trace_probe.h | 5 +++++ tools/include/uapi/linux/bpf.h | 2 +- 6 files changed, 41 insertions(+), 4 deletions(-)