@@ -243,6 +243,18 @@ int bpf__unprobe(struct bpf_object *obj)
return ret;
}
+int bpf__load(struct bpf_object *obj)
+{
+ int err;
+
+ err = bpf_object__load(obj);
+ if (err) {
+ pr_debug("bpf: load objects failed\n");
+ return err;
+ }
+ return 0;
+}
+
#define bpf__strerror_head(err, buf, size) \
char sbuf[STRERR_BUFSIZE], *emsg;\
if (!size)\
@@ -275,3 +287,13 @@ int bpf__strerror_probe(struct bpf_object *obj __maybe_unused,
bpf__strerror_end(buf, size);
return 0;
}
+
+int bpf__strerror_load(struct bpf_object *obj __maybe_unused,
+ int err, char *buf, size_t size)
+{
+ bpf__strerror_head(err, buf, size);
+ bpf__strerror_entry(EINVAL, "%s: Are you root and runing a CONFIG_BPF_SYSCALL kernel?",
+ emsg)
+ bpf__strerror_end(buf, size);
+ return 0;
+}
@@ -23,6 +23,9 @@ int bpf__unprobe(struct bpf_object *obj);
int bpf__strerror_probe(struct bpf_object *obj, int err,
char *buf, size_t size);
+int bpf__load(struct bpf_object *obj);
+int bpf__strerror_load(struct bpf_object *obj, int err,
+ char *buf, size_t size);
#else
static inline struct bpf_object *
bpf__prepare_load(const char *filename __maybe_unused)
@@ -35,6 +38,7 @@ static inline void bpf__clear(void) { }
static inline int bpf__probe(struct bpf_object *obj __maybe_unused) { return 0;}
static inline int bpf__unprobe(struct bpf_object *obj __maybe_unused) { return 0;}
+static inline int bpf__load(struct bpf_object *obj __maybe_unused) { return 0; }
static inline int
__bpf_strerror(char *buf, size_t size)
@@ -55,5 +59,12 @@ bpf__strerror_probe(struct bpf_object *obj __maybe_unused,
{
return __bpf_strerror(buf, size);
}
+
+static inline int bpf__strerror_load(struct bpf_object *obj __maybe_unused,
+ int err __maybe_unused,
+ char *buf, size_t size)
+{
+ return __bpf_strerror(buf, size);
+}
#endif
#endif
@@ -556,6 +556,12 @@ int parse_events_load_bpf_obj(struct parse_events_evlist *data,
registered_unprobe_atexit = true;
}
+ err = bpf__load(obj);
+ if (err) {
+ bpf__strerror_load(obj, err, errbuf, sizeof(errbuf));
+ goto errout;
+ }
+
/*
* Temporary add a dummy event here so we can check whether
* basic bpf loader works. Following patches will replace
This patch utilizes bpf_object__load() provided by libbpf to load all objects into kernel. Signed-off-by: Wang Nan <wangnan0@huawei.com> Cc: Alexei Starovoitov <ast@plumgrid.com> Cc: Brendan Gregg <brendan.d.gregg@gmail.com> Cc: Daniel Borkmann <daniel@iogearbox.net> Cc: David Ahern <dsahern@gmail.com> Cc: He Kuang <hekuang@huawei.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Kaixu Xia <xiakaixu@huawei.com> Cc: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Zefan Li <lizefan@huawei.com> Cc: pi3orama@163.com Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Link: http://lkml.kernel.org/n/ebpf-6yw9eg0ej3l4jnqhinngkw86@git.kernel.org --- tools/perf/util/bpf-loader.c | 22 ++++++++++++++++++++++ tools/perf/util/bpf-loader.h | 11 +++++++++++ tools/perf/util/parse-events.c | 6 ++++++ 3 files changed, 39 insertions(+)