@@ -427,12 +427,22 @@ program first::
prog_fd = bpf_program__fd(hid_skel->progs.attach_prog);
err = bpf_prog_test_run_opts(prog_fd, &tattrs);
- return err;
+ if (err)
+ return err;
+
+ return args.retval; /* the fd of the created bpf_link */
}
Our userspace program can now listen to notifications on the ring buffer, and
is awaken only when the value changes.
+When the userspace program doesn't need to listen to events anymore, it can just
+close the returned fd from :c:func:`attach_filter`, which will tell the kernel to
+detach the program from the HID device.
+
+Of course, in other use cases, the userspace program can also pin the fd to the
+BPF filesystem through a call to :c:func:`bpf_obj_pin`, as with any bpf_link.
+
Controlling the device
----------------------
@@ -249,7 +249,9 @@ int hid_bpf_reconnect(struct hid_device *hdev)
* @prog_fd: an fd in the user process representing the program to attach
* @flags: any logical OR combination of &enum hid_bpf_attach_flags
*
- * @returns %0 on success, an error code otherwise.
+ * @returns an fd of a bpf_link object on success (> %0), an error code otherwise.
+ * Closing this fd will detach the program from the HID device (unless the bpf_link
+ * is pinned to the BPF file system).
*/
/* called from syscall */
noinline int
@@ -257,7 +259,7 @@ hid_bpf_attach_prog(unsigned int hid_id, int prog_fd, __u32 flags)
{
struct hid_device *hdev;
struct device *dev;
- int err, prog_type = hid_bpf_get_prog_attach_type(prog_fd);
+ int fd, err, prog_type = hid_bpf_get_prog_attach_type(prog_fd);
if (!hid_bpf_ops)
return -EINVAL;
@@ -283,17 +285,19 @@ hid_bpf_attach_prog(unsigned int hid_id, int prog_fd, __u32 flags)
return err;
}
- err = __hid_bpf_attach_prog(hdev, prog_type, prog_fd, flags);
- if (err)
- return err;
+ fd = __hid_bpf_attach_prog(hdev, prog_type, prog_fd, flags);
+ if (fd < 0)
+ return fd;
if (prog_type == HID_BPF_PROG_TYPE_RDESC_FIXUP) {
err = hid_bpf_reconnect(hdev);
- if (err)
+ if (err) {
+ close_fd(fd);
return err;
+ }
}
- return 0;
+ return fd;
}
/**
@@ -322,16 +322,6 @@ static int hid_bpf_insert_prog(int prog_fd, struct bpf_prog *prog)
if (err)
goto out;
- /*
- * The program has been safely inserted, decrement the reference count
- * so it doesn't interfere with the number of actual user handles.
- * This is safe to do because:
- * - we overrite the put_ptr in the prog fd map
- * - we also have a cleanup function that monitors when a program gets
- * released and we manually do the cleanup in the prog fd map
- */
- bpf_prog_sub(prog, 1);
-
/* return the index */
err = index;
@@ -365,11 +355,43 @@ int hid_bpf_get_prog_attach_type(int prog_fd)
return prog_type;
}
+static void hid_bpf_link_release(struct bpf_link *link)
+{
+ struct hid_bpf_link *hid_link =
+ container_of(link, struct hid_bpf_link, link);
+
+ __clear_bit(hid_link->index, jmp_table.enabled);
+ schedule_work(&release_work);
+}
+
+static void hid_bpf_link_dealloc(struct bpf_link *link)
+{
+ struct hid_bpf_link *hid_link =
+ container_of(link, struct hid_bpf_link, link);
+
+ kfree(hid_link);
+}
+
+static void hid_bpf_link_show_fdinfo(const struct bpf_link *link,
+ struct seq_file *seq)
+{
+ seq_printf(seq,
+ "attach_type:\tHID-BPF\n");
+}
+
+static const struct bpf_link_ops hid_bpf_link_lops = {
+ .release = hid_bpf_link_release,
+ .dealloc = hid_bpf_link_dealloc,
+ .show_fdinfo = hid_bpf_link_show_fdinfo,
+};
+
/* called from syscall */
noinline int
__hid_bpf_attach_prog(struct hid_device *hdev, enum hid_bpf_prog_type prog_type,
int prog_fd, __u32 flags)
{
+ struct bpf_link_primer link_primer;
+ struct hid_bpf_link *link;
struct bpf_prog *prog = NULL;
struct hid_bpf_prog_entry *prog_entry;
int cnt, err = -EINVAL, prog_idx = -1;
@@ -381,23 +403,32 @@ __hid_bpf_attach_prog(struct hid_device *hdev, enum hid_bpf_prog_type prog_type,
mutex_lock(&hid_bpf_attach_lock);
+ link = kzalloc(sizeof(*link), GFP_USER);
+ if (!link) {
+ err = -ENOMEM;
+ goto err_unlock;
+ }
+
+ bpf_link_init(&link->link, BPF_LINK_TYPE_UNSPEC,
+ &hid_bpf_link_lops, prog);
+
/* do not attach too many programs to a given HID device */
cnt = hid_bpf_program_count(hdev, NULL, prog_type);
if (cnt < 0) {
err = cnt;
- goto out_unlock;
+ goto err_unlock;
}
if (cnt >= hid_bpf_max_programs(prog_type)) {
err = -E2BIG;
- goto out_unlock;
+ goto err_unlock;
}
prog_idx = hid_bpf_insert_prog(prog_fd, prog);
/* if the jmp table is full, abort */
if (prog_idx < 0) {
err = prog_idx;
- goto out_unlock;
+ goto err_unlock;
}
if (flags & HID_BPF_FLAG_INSERT_HEAD) {
@@ -418,16 +449,26 @@ __hid_bpf_attach_prog(struct hid_device *hdev, enum hid_bpf_prog_type prog_type,
/* finally store the index in the device list */
err = hid_bpf_populate_hdev(hdev, prog_type);
- if (err)
+ if (err) {
hid_bpf_release_prog_at(prog_idx);
+ goto err_unlock;
+ }
+
+ link->index = prog_idx;
+
+ err = bpf_link_prime(&link->link, &link_primer);
+ if (err)
+ goto err_unlock;
- out_unlock:
mutex_unlock(&hid_bpf_attach_lock);
- /* we only use prog as a key in the various tables, so we don't need to actually
- * increment the ref count.
- */
+ return bpf_link_settle(&link_primer);
+
+ err_unlock:
+ mutex_unlock(&hid_bpf_attach_lock);
+
bpf_prog_put(prog);
+ kfree(link);
return err;
}
@@ -460,36 +501,10 @@ void __hid_bpf_destroy_device(struct hid_device *hdev)
void call_hid_bpf_prog_put_deferred(struct work_struct *work)
{
- struct bpf_prog_aux *aux;
- struct bpf_prog *prog;
- bool found = false;
- int i;
-
- aux = container_of(work, struct bpf_prog_aux, work);
- prog = aux->prog;
-
- /* we don't need locking here because the entries in the progs table
- * are stable:
- * if there are other users (and the progs entries might change), we
- * would simply not have been called.
- */
- for (i = 0; i < HID_BPF_MAX_PROGS; i++) {
- if (jmp_table.progs[i] == prog) {
- __clear_bit(i, jmp_table.enabled);
- found = true;
- }
- }
-
- if (found)
- /* schedule release of all detached progs */
- schedule_work(&release_work);
-}
-
-static void hid_bpf_prog_fd_array_put_ptr(void *ptr)
-{
+ /* kept around for patch readability, to be dropped in the next commmit */
}
-#define HID_BPF_PROGS_COUNT 2
+#define HID_BPF_PROGS_COUNT 1
static struct bpf_link *links[HID_BPF_PROGS_COUNT];
static struct entrypoints_bpf *skel;
@@ -528,8 +543,6 @@ void hid_bpf_free_links_and_skel(void)
idx++; \
} while (0)
-static struct bpf_map_ops hid_bpf_prog_fd_maps_ops;
-
int hid_bpf_preload_skel(void)
{
int err, idx = 0;
@@ -548,14 +561,7 @@ int hid_bpf_preload_skel(void)
goto out;
}
- /* our jump table is stealing refs, so we should not decrement on removal of elements */
- hid_bpf_prog_fd_maps_ops = *jmp_table.map->ops;
- hid_bpf_prog_fd_maps_ops.map_fd_put_ptr = hid_bpf_prog_fd_array_put_ptr;
-
- jmp_table.map->ops = &hid_bpf_prog_fd_maps_ops;
-
ATTACH_AND_STORE_LINK(hid_tail_call);
- ATTACH_AND_STORE_LINK(hid_bpf_prog_put_deferred);
return 0;
out:
@@ -3,6 +3,7 @@
#ifndef __HID_BPF_H
#define __HID_BPF_H
+#include <linux/bpf.h>
#include <linux/spinlock.h>
#include <uapi/linux/hid.h>
@@ -138,6 +139,12 @@ struct hid_bpf {
spinlock_t progs_lock; /* protects RCU update of progs */
};
+/* specific HID-BPF link when a program is attached to a device */
+struct hid_bpf_link {
+ struct bpf_link link;
+ int index;
+};
+
#ifdef CONFIG_HID_BPF
u8 *dispatch_hid_bpf_device_event(struct hid_device *hid, enum hid_report_type type, u8 *data,
u32 *size, int interrupt);
Previously, HID-BPF was relying on a bpf tracing program to be notified when a program was released from userspace. This is error prone, as LLVM sometimes inline the function and sometimes not. So instead of messing up with the bpf prog ref count, we can use the bpf_link concept which actually matches exactly what we want: - a bpf_link represents the fact that a given program is attached to a given HID device - as long as the bpf_link has fd opened (either by the userspace program still being around or by pinning the bpf object in the bpffs), the program stays attached to the HID device - once every user has closed the fd, we get called by hid_bpf_link_release() that we no longer have any users, and we can disconnect the program to the device in 2 passes: first atomically clear the bit saying that the link is active, and then calling release_work in a scheduled work item. This solves entirely the problems of BPF tracing not showing up and is definitely cleaner. Signed-off-by: Benjamin Tissoires <benjamin.tissoires@redhat.com> --- Documentation/hid/hid-bpf.rst | 12 ++- drivers/hid/bpf/hid_bpf_dispatch.c | 18 +++-- drivers/hid/bpf/hid_bpf_jmp_table.c | 116 +++++++++++++++------------- include/linux/hid_bpf.h | 7 ++ 4 files changed, 90 insertions(+), 63 deletions(-)