@@ -528,6 +528,9 @@ struct sock {
struct sock_reuseport __rcu *sk_reuseport_cb;
#ifdef CONFIG_BPF_SYSCALL
struct bpf_local_storage __rcu *sk_bpf_storage;
+#if CONFIG_BPF_SHARED_LOCAL_STORAGE_SIZE > 0
+ u8 bpf_shared_local_storage[CONFIG_BPF_SHARED_LOCAL_STORAGE_SIZE];
+#endif
#endif
struct rcu_head sk_rcu;
};
@@ -1210,6 +1210,12 @@ enum {
/* Create a map that is suitable to be an inner map with dynamic max entries */
BPF_F_INNER_MAP = (1U << 12),
+
+/* Instead of accessing local storage via map lookup, the local storage API
+ * will use the CONFIG_BPF_SHARED_LOCAL_STORAGE_SIZE bytes inlined directly
+ * into struct sock. This flag is ignored for non-SK_STORAGE maps.
+ */
+ BPF_F_SHARED_LOCAL_STORAGE = (1U << 13),
};
/* Flags for BPF_PROG_QUERY. */
@@ -35,6 +35,17 @@ config BPF_SYSCALL
Enable the bpf() system call that allows to manipulate BPF programs
and maps via file descriptors.
+config BPF_SHARED_LOCAL_STORAGE_SIZE
+ int "BPF Socket Local Storage Optimization Buffer Size"
+ depends on BPF_SYSCALL
+ default 0
+ help
+ Enable shared socket storage mode where the data is inlined directly
+ into the socket. Provides fast and persistent storage, see
+ BPF_F_SHARED_LOCAL_STORAGE. This option controls how many bytes to
+ pre-allocate in each socket.
+
+
config BPF_JIT
bool "Enable BPF Just In Time compiler"
depends on BPF
@@ -12,7 +12,8 @@
#include <uapi/linux/sock_diag.h>
#include <uapi/linux/btf.h>
-#define BPF_LOCAL_STORAGE_CREATE_FLAG_MASK (BPF_F_NO_PREALLOC | BPF_F_CLONE)
+#define BPF_LOCAL_STORAGE_CREATE_FLAG_MASK \
+ (BPF_F_NO_PREALLOC | BPF_F_CLONE | BPF_F_SHARED_LOCAL_STORAGE)
static struct bpf_local_storage_map_bucket *
select_bucket(struct bpf_local_storage_map *smap,
@@ -92,6 +92,16 @@ static void bpf_sk_storage_map_free(struct bpf_map *map)
bpf_local_storage_map_free(smap, NULL);
}
+static int bpf_sk_storage_map_alloc_check(union bpf_attr *attr)
+{
+#if CONFIG_BPF_SHARED_LOCAL_STORAGE_SIZE > 0
+ if (attr->map_flags & BPF_F_SHARED_LOCAL_STORAGE &&
+ attr->value_size > CONFIG_BPF_SHARED_LOCAL_STORAGE_SIZE)
+ return -E2BIG;
+#endif
+ return bpf_local_storage_map_alloc_check(attr);
+}
+
static struct bpf_map *bpf_sk_storage_map_alloc(union bpf_attr *attr)
{
struct bpf_local_storage_map *smap;
@@ -119,6 +129,10 @@ static void *bpf_fd_sk_storage_lookup_elem(struct bpf_map *map, void *key)
fd = *(int *)key;
sock = sockfd_lookup(fd, &err);
if (sock) {
+#if CONFIG_BPF_SHARED_LOCAL_STORAGE_SIZE > 0
+ if (map->map_flags & BPF_F_SHARED_LOCAL_STORAGE)
+ return sock->sk->bpf_shared_local_storage;
+#endif
sdata = bpf_sk_storage_lookup(sock->sk, map, true);
sockfd_put(sock);
return sdata ? sdata->data : NULL;
@@ -137,6 +151,13 @@ static int bpf_fd_sk_storage_update_elem(struct bpf_map *map, void *key,
fd = *(int *)key;
sock = sockfd_lookup(fd, &err);
if (sock) {
+#if CONFIG_BPF_SHARED_LOCAL_STORAGE_SIZE > 0
+ if (map_flags & BPF_F_SHARED_LOCAL_STORAGE) {
+ memcpy(sock->sk->bpf_shared_local_storage, value,
+ sizeof(sock->sk->bpf_shared_local_storage));
+ return 0;
+ }
+#endif
sdata = bpf_local_storage_update(
sock->sk, (struct bpf_local_storage_map *)map, value,
map_flags);
@@ -155,6 +176,13 @@ static int bpf_fd_sk_storage_delete_elem(struct bpf_map *map, void *key)
fd = *(int *)key;
sock = sockfd_lookup(fd, &err);
if (sock) {
+#if CONFIG_BPF_SHARED_LOCAL_STORAGE_SIZE > 0
+ if (map->map_flags & BPF_F_SHARED_LOCAL_STORAGE) {
+ memset(sock->sk->bpf_shared_local_storage, 0,
+ sizeof(sock->sk->bpf_shared_local_storage));
+ return 0;
+ }
+#endif
err = bpf_sk_storage_del(sock->sk, map);
sockfd_put(sock);
return err;
@@ -261,6 +289,15 @@ BPF_CALL_4(bpf_sk_storage_get, struct bpf_map *, map, struct sock *, sk,
if (!sk || !sk_fullsock(sk) || flags > BPF_SK_STORAGE_GET_F_CREATE)
return (unsigned long)NULL;
+#if CONFIG_BPF_SHARED_LOCAL_STORAGE_SIZE > 0
+ if (map->map_flags & BPF_F_SHARED_LOCAL_STORAGE) {
+ if (unlikely(value || flags & BPF_SK_STORAGE_GET_F_CREATE))
+ return (unsigned long)NULL;
+
+ return (unsigned long)sk->bpf_shared_local_storage;
+ }
+#endif
+
sdata = bpf_sk_storage_lookup(sk, map, true);
if (sdata)
return (unsigned long)sdata->data;
@@ -291,6 +328,14 @@ BPF_CALL_2(bpf_sk_storage_delete, struct bpf_map *, map, struct sock *, sk)
if (!sk || !sk_fullsock(sk))
return -EINVAL;
+#if CONFIG_BPF_SHARED_LOCAL_STORAGE_SIZE > 0
+ if (map->map_flags & BPF_F_SHARED_LOCAL_STORAGE) {
+ memset(sk->bpf_shared_local_storage, 0,
+ sizeof(sk->bpf_shared_local_storage));
+ return 0;
+ }
+#endif
+
if (refcount_inc_not_zero(&sk->sk_refcnt)) {
int err;
@@ -336,7 +381,7 @@ bpf_sk_storage_ptr(void *owner)
static int sk_storage_map_btf_id;
const struct bpf_map_ops sk_storage_map_ops = {
.map_meta_equal = bpf_map_meta_equal,
- .map_alloc_check = bpf_local_storage_map_alloc_check,
+ .map_alloc_check = bpf_sk_storage_map_alloc_check,
.map_alloc = bpf_sk_storage_map_alloc,
.map_free = bpf_sk_storage_map_free,
.map_get_next_key = notsupp_get_next_key,