Message ID | 20200116052511.8557-6-honnappa.nagarahalli@arm.com |
---|---|
State | Superseded |
Headers | show |
Series | lib/ring: APIs to support custom element size | expand |
On Thu, Jan 16, 2020 at 6:25 AM Honnappa Nagarahalli <honnappa.nagarahalli@arm.com> wrote: > > The freelist and external bucket indices are 32b. Using rings > that use 32b element sizes will save memory. > > Signed-off-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com> > Reviewed-by: Gavin Hu <gavin.hu@arm.com> > Reviewed-by: Ola Liljedahl <ola.liljedahl@arm.com> > --- > lib/librte_hash/rte_cuckoo_hash.c | 94 ++++++++++++++++--------------- > lib/librte_hash/rte_cuckoo_hash.h | 2 +- > 2 files changed, 50 insertions(+), 46 deletions(-) > [snip] > diff --git a/lib/librte_hash/rte_cuckoo_hash.h b/lib/librte_hash/rte_cuckoo_hash.h > index fb19bb27d..345de6bf9 100644 > --- a/lib/librte_hash/rte_cuckoo_hash.h > +++ b/lib/librte_hash/rte_cuckoo_hash.h > @@ -124,7 +124,7 @@ const rte_hash_cmp_eq_t cmp_jump_table[NUM_KEY_CMP_CASES] = { > > struct lcore_cache { > unsigned len; /**< Cache len */ > - void *objs[LCORE_CACHE_SIZE]; /**< Cache objects */ > + uint32_t objs[LCORE_CACHE_SIZE]; /**< Cache objects */ This triggers a warning in ABI checks: 1 function with some indirect sub-type change: [C]'function int32_t rte_hash_add_key(const rte_hash*, void*)' at rte_cuckoo_hash.c:1118:1 has some indirect sub-type changes: parameter 1 of type 'const rte_hash*' has sub-type changes: in pointed to type 'const rte_hash': in unqualified underlying type 'struct rte_hash' at rte_cuckoo_hash.h:160:1: type size hasn't changed 1 data member change: type of 'lcore_cache* rte_hash::local_free_slots' changed: in pointed to type 'struct lcore_cache' at rte_cuckoo_hash.h:125:1: type size changed from 4608 to 2560 (in bits) 1 data member change: type of 'void* lcore_cache::objs[64]' changed: array element type 'void*' changed: entity changed from 'void*' to 'typedef uint32_t' at stdint-uintn.h:26:1 type size changed from 64 to 32 (in bits) type name changed from 'void*[64]' to 'uint32_t[64]' array type size changed from 4096 to 2048 and offset changed from 64 to 32 (in bits) (by -32 bits) As far as I can see, the local_free_slots field in rte_hash is supposed to be internal and should just be hidden from users. lib/librte_hash/rte_cuckoo_hash.c: h->local_free_slots = rte_zmalloc_socket(NULL, lib/librte_hash/rte_cuckoo_hash.c: rte_free(h->local_free_slots); lib/librte_hash/rte_cuckoo_hash.c: cached_cnt += h->local_free_slots[i].len; lib/librte_hash/rte_cuckoo_hash.c: h->local_free_slots[i].len = 0; lib/librte_hash/rte_cuckoo_hash.c: cached_free_slots = &h->local_free_slots[lcore_id]; lib/librte_hash/rte_cuckoo_hash.c: cached_free_slots = &h->local_free_slots[lcore_id]; lib/librte_hash/rte_cuckoo_hash.c: cached_free_slots = &h->local_free_slots[lcore_id]; lib/librte_hash/rte_cuckoo_hash.h: struct lcore_cache *local_free_slots; Not sure how users could make use of this. But the abi check flags this as a breakage since this type was exported. I can see three options: - we stick to our "no abi breakage" policy, this change is postponed to the next ABI breakage, and at the same time, we hide this type and inspect the rest of the rte_hash API to avoid new issues in the future, - we duplicate structures and API by using function versioning to keep the exact rte_hash v20.0 ABI and a v20.0.1 ABI with the resized and cleaned structures, - we override the ABI freeze here by ruling that this was an internal structure that users should not access (ugh..) Seeing how this is an optimisation, my preference goes to the first option. -- David Marchand
<snip> > > On Thu, Jan 16, 2020 at 6:25 AM Honnappa Nagarahalli > <honnappa.nagarahalli@arm.com> wrote: > > > > The freelist and external bucket indices are 32b. Using rings that use > > 32b element sizes will save memory. > > > > Signed-off-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com> > > Reviewed-by: Gavin Hu <gavin.hu@arm.com> > > Reviewed-by: Ola Liljedahl <ola.liljedahl@arm.com> > > --- > > lib/librte_hash/rte_cuckoo_hash.c | 94 > > ++++++++++++++++--------------- lib/librte_hash/rte_cuckoo_hash.h | > > 2 +- > > 2 files changed, 50 insertions(+), 46 deletions(-) > > > > [snip] > > > diff --git a/lib/librte_hash/rte_cuckoo_hash.h > > b/lib/librte_hash/rte_cuckoo_hash.h > > index fb19bb27d..345de6bf9 100644 > > --- a/lib/librte_hash/rte_cuckoo_hash.h > > +++ b/lib/librte_hash/rte_cuckoo_hash.h > > @@ -124,7 +124,7 @@ const rte_hash_cmp_eq_t > > cmp_jump_table[NUM_KEY_CMP_CASES] = { > > > > struct lcore_cache { > > unsigned len; /**< Cache len */ > > - void *objs[LCORE_CACHE_SIZE]; /**< Cache objects */ > > + uint32_t objs[LCORE_CACHE_SIZE]; /**< Cache objects */ > > This triggers a warning in ABI checks: > > 1 function with some indirect sub-type change: > > [C]'function int32_t rte_hash_add_key(const rte_hash*, void*)' at > rte_cuckoo_hash.c:1118:1 has some indirect sub-type changes: > parameter 1 of type 'const rte_hash*' has sub-type changes: > in pointed to type 'const rte_hash': > in unqualified underlying type 'struct rte_hash' at > rte_cuckoo_hash.h:160:1: > type size hasn't changed > 1 data member change: > type of 'lcore_cache* rte_hash::local_free_slots' changed: > in pointed to type 'struct lcore_cache' at rte_cuckoo_hash.h:125:1: > type size changed from 4608 to 2560 (in bits) > 1 data member change: > type of 'void* lcore_cache::objs[64]' changed: > array element type 'void*' changed: > entity changed from 'void*' to 'typedef uint32_t' > at stdint-uintn.h:26:1 > type size changed from 64 to 32 (in bits) > type name changed from 'void*[64]' to 'uint32_t[64]' > array type size changed from 4096 to 2048 > and offset changed from 64 to 32 (in bits) (by -32 bits) > > As far as I can see, the local_free_slots field in rte_hash is supposed to be > internal and should just be hidden from users. > lib/librte_hash/rte_cuckoo_hash.c: h->local_free_slots = > rte_zmalloc_socket(NULL, > lib/librte_hash/rte_cuckoo_hash.c: rte_free(h->local_free_slots); > lib/librte_hash/rte_cuckoo_hash.c: cached_cnt += > h->local_free_slots[i].len; > lib/librte_hash/rte_cuckoo_hash.c: > h->local_free_slots[i].len = 0; > lib/librte_hash/rte_cuckoo_hash.c: cached_free_slots = > &h->local_free_slots[lcore_id]; > lib/librte_hash/rte_cuckoo_hash.c: cached_free_slots = > &h->local_free_slots[lcore_id]; > lib/librte_hash/rte_cuckoo_hash.c: cached_free_slots = > &h->local_free_slots[lcore_id]; > lib/librte_hash/rte_cuckoo_hash.h: struct lcore_cache *local_free_slots; > > Not sure how users could make use of this. > But the abi check flags this as a breakage since this type was exported. I think this is a false positive. Users include 'rte_hash.h' file which does not define the structure. It just has the declaration 'struct rte_hash'. The actual structure is defined in 'rte_cuckoo_hash.h'. But this is not included by the user. So, the application does not have visibility into 'struct rte_hash' as defined in 'rte_cuckoo_hash.h'. The 'rte_create_hash' API returns a pointer to the 'struct rte_hash'. All the APIs are non-inline and just take this pointer as the argument. So, the 'struct rte_hash' as defined in 'rte_cuckoo_hash.h' is not used by the user. You can take a look at test_hash_readwrite_lf.c and function 'check_bucket'. This function is written as the test case cannot access the 'struct rte_hash' from 'rte_cuckoo_hash.h'. > > I can see three options: > - we stick to our "no abi breakage" policy, this change is postponed to the next > ABI breakage, and at the same time, we hide this type and inspect the rest of > the rte_hash API to avoid new issues in the future, > - we duplicate structures and API by using function versioning to keep the > exact rte_hash v20.0 ABI and a v20.0.1 ABI with the resized and cleaned > structures, > - we override the ABI freeze here by ruling that this was an internal structure > that users should not access (ugh..) > > Seeing how this is an optimisation, my preference goes to the first option. > > > -- > David Marchand
On Fri, Jan 17, 2020 at 9:54 PM Honnappa Nagarahalli <Honnappa.Nagarahalli@arm.com> wrote: > > <snip> > > > > > On Thu, Jan 16, 2020 at 6:25 AM Honnappa Nagarahalli > > <honnappa.nagarahalli@arm.com> wrote: > > > > > > The freelist and external bucket indices are 32b. Using rings that use > > > 32b element sizes will save memory. > > > > > > Signed-off-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com> > > > Reviewed-by: Gavin Hu <gavin.hu@arm.com> > > > Reviewed-by: Ola Liljedahl <ola.liljedahl@arm.com> > > > --- > > > lib/librte_hash/rte_cuckoo_hash.c | 94 > > > ++++++++++++++++--------------- lib/librte_hash/rte_cuckoo_hash.h | > > > 2 +- > > > 2 files changed, 50 insertions(+), 46 deletions(-) > > > > > > > [snip] > > > > > diff --git a/lib/librte_hash/rte_cuckoo_hash.h > > > b/lib/librte_hash/rte_cuckoo_hash.h > > > index fb19bb27d..345de6bf9 100644 > > > --- a/lib/librte_hash/rte_cuckoo_hash.h > > > +++ b/lib/librte_hash/rte_cuckoo_hash.h > > > @@ -124,7 +124,7 @@ const rte_hash_cmp_eq_t > > > cmp_jump_table[NUM_KEY_CMP_CASES] = { > > > > > > struct lcore_cache { > > > unsigned len; /**< Cache len */ > > > - void *objs[LCORE_CACHE_SIZE]; /**< Cache objects */ > > > + uint32_t objs[LCORE_CACHE_SIZE]; /**< Cache objects */ > > > > This triggers a warning in ABI checks: > > > > 1 function with some indirect sub-type change: > > > > [C]'function int32_t rte_hash_add_key(const rte_hash*, void*)' at > > rte_cuckoo_hash.c:1118:1 has some indirect sub-type changes: > > parameter 1 of type 'const rte_hash*' has sub-type changes: > > in pointed to type 'const rte_hash': > > in unqualified underlying type 'struct rte_hash' at > > rte_cuckoo_hash.h:160:1: > > type size hasn't changed > > 1 data member change: > > type of 'lcore_cache* rte_hash::local_free_slots' changed: > > in pointed to type 'struct lcore_cache' at rte_cuckoo_hash.h:125:1: > > type size changed from 4608 to 2560 (in bits) > > 1 data member change: > > type of 'void* lcore_cache::objs[64]' changed: > > array element type 'void*' changed: > > entity changed from 'void*' to 'typedef uint32_t' > > at stdint-uintn.h:26:1 > > type size changed from 64 to 32 (in bits) > > type name changed from 'void*[64]' to 'uint32_t[64]' > > array type size changed from 4096 to 2048 > > and offset changed from 64 to 32 (in bits) (by -32 bits) > > > > As far as I can see, the local_free_slots field in rte_hash is supposed to be > > internal and should just be hidden from users. > > lib/librte_hash/rte_cuckoo_hash.c: h->local_free_slots = > > rte_zmalloc_socket(NULL, > > lib/librte_hash/rte_cuckoo_hash.c: rte_free(h->local_free_slots); > > lib/librte_hash/rte_cuckoo_hash.c: cached_cnt += > > h->local_free_slots[i].len; > > lib/librte_hash/rte_cuckoo_hash.c: > > h->local_free_slots[i].len = 0; > > lib/librte_hash/rte_cuckoo_hash.c: cached_free_slots = > > &h->local_free_slots[lcore_id]; > > lib/librte_hash/rte_cuckoo_hash.c: cached_free_slots = > > &h->local_free_slots[lcore_id]; > > lib/librte_hash/rte_cuckoo_hash.c: cached_free_slots = > > &h->local_free_slots[lcore_id]; > > lib/librte_hash/rte_cuckoo_hash.h: struct lcore_cache *local_free_slots; > > > > Not sure how users could make use of this. > > But the abi check flags this as a breakage since this type was exported. > I think this is a false positive. > > Users include 'rte_hash.h' file which does not define the structure. It just has the declaration 'struct rte_hash'. The actual structure is defined in 'rte_cuckoo_hash.h'. But this is not included by the user. So, the application does not have visibility into 'struct rte_hash' as defined in 'rte_cuckoo_hash.h'. > > The 'rte_create_hash' API returns a pointer to the 'struct rte_hash'. All the APIs are non-inline and just take this pointer as the argument. So, the 'struct rte_hash' as defined in 'rte_cuckoo_hash.h' is not used by the user. Indeed, it seems properly hidden. Scratching the rest of the mail. Looked at abidiff, I can see it takes a public headers directory to filter the ABI changes. Need to make this work. -- David Marchand
>-----Original Message----- >> > Not sure how users could make use of this. >> > But the abi check flags this as a breakage since this type was exported. >> I think this is a false positive. >> >> Users include 'rte_hash.h' file which does not define the structure. It just has the declaration 'struct rte_hash'. The actual structure is >defined in 'rte_cuckoo_hash.h'. But this is not included by the user. So, the application does not have visibility into 'struct rte_hash' as >defined in 'rte_cuckoo_hash.h'. >> >> The 'rte_create_hash' API returns a pointer to the 'struct rte_hash'. All the APIs are non-inline and just take this pointer as the >argument. So, the 'struct rte_hash' as defined in 'rte_cuckoo_hash.h' is not used by the user. > >Indeed, it seems properly hidden. >Scratching the rest of the mail. > >Looked at abidiff, I can see it takes a public headers directory to >filter the ABI changes. >Need to make this work. > > >-- >David Marchand [Wang, Yipeng] Hi, Honnappa, I read the new API defs and this patch to rte_hash looks good to me. Passed unit tests as well. And I agree with you that the internals of rte_hash is hidden from users. As long as the false warning in abi-check script is no concern: Acked-by: Yipeng Wang <yipeng1.wang@intel.com>
diff --git a/lib/librte_hash/rte_cuckoo_hash.c b/lib/librte_hash/rte_cuckoo_hash.c index 87a4c01f2..6c292b6f8 100644 --- a/lib/librte_hash/rte_cuckoo_hash.c +++ b/lib/librte_hash/rte_cuckoo_hash.c @@ -24,7 +24,7 @@ #include <rte_cpuflags.h> #include <rte_rwlock.h> #include <rte_spinlock.h> -#include <rte_ring.h> +#include <rte_ring_elem.h> #include <rte_compat.h> #include <rte_vect.h> #include <rte_tailq.h> @@ -136,7 +136,6 @@ rte_hash_create(const struct rte_hash_parameters *params) char ring_name[RTE_RING_NAMESIZE]; char ext_ring_name[RTE_RING_NAMESIZE]; unsigned num_key_slots; - unsigned i; unsigned int hw_trans_mem_support = 0, use_local_cache = 0; unsigned int ext_table_support = 0; unsigned int readwrite_concur_support = 0; @@ -145,6 +144,7 @@ rte_hash_create(const struct rte_hash_parameters *params) uint32_t *ext_bkt_to_free = NULL; uint32_t *tbl_chng_cnt = NULL; unsigned int readwrite_concur_lf_support = 0; + uint32_t i; rte_hash_function default_hash_func = (rte_hash_function)rte_jhash; @@ -213,8 +213,8 @@ rte_hash_create(const struct rte_hash_parameters *params) snprintf(ring_name, sizeof(ring_name), "HT_%s", params->name); /* Create ring (Dummy slot index is not enqueued) */ - r = rte_ring_create(ring_name, rte_align32pow2(num_key_slots), - params->socket_id, 0); + r = rte_ring_create_elem(ring_name, sizeof(uint32_t), + rte_align32pow2(num_key_slots), params->socket_id, 0); if (r == NULL) { RTE_LOG(ERR, HASH, "memory allocation failed\n"); goto err; @@ -227,7 +227,7 @@ rte_hash_create(const struct rte_hash_parameters *params) if (ext_table_support) { snprintf(ext_ring_name, sizeof(ext_ring_name), "HT_EXT_%s", params->name); - r_ext = rte_ring_create(ext_ring_name, + r_ext = rte_ring_create_elem(ext_ring_name, sizeof(uint32_t), rte_align32pow2(num_buckets + 1), params->socket_id, 0); @@ -295,7 +295,7 @@ rte_hash_create(const struct rte_hash_parameters *params) * for next bucket */ for (i = 1; i <= num_buckets; i++) - rte_ring_sp_enqueue(r_ext, (void *)((uintptr_t) i)); + rte_ring_sp_enqueue_elem(r_ext, &i, sizeof(uint32_t)); if (readwrite_concur_lf_support) { ext_bkt_to_free = rte_zmalloc(NULL, sizeof(uint32_t) * @@ -434,7 +434,7 @@ rte_hash_create(const struct rte_hash_parameters *params) /* Populate free slots ring. Entry zero is reserved for key misses. */ for (i = 1; i < num_key_slots; i++) - rte_ring_sp_enqueue(r, (void *)((uintptr_t) i)); + rte_ring_sp_enqueue_elem(r, &i, sizeof(uint32_t)); te->data = (void *) h; TAILQ_INSERT_TAIL(hash_list, te, next); @@ -598,13 +598,13 @@ rte_hash_reset(struct rte_hash *h) tot_ring_cnt = h->entries; for (i = 1; i < tot_ring_cnt + 1; i++) - rte_ring_sp_enqueue(h->free_slots, (void *)((uintptr_t) i)); + rte_ring_sp_enqueue_elem(h->free_slots, &i, sizeof(uint32_t)); /* Repopulate the free ext bkt ring. */ if (h->ext_table_support) { for (i = 1; i <= h->num_buckets; i++) - rte_ring_sp_enqueue(h->free_ext_bkts, - (void *)((uintptr_t) i)); + rte_ring_sp_enqueue_elem(h->free_ext_bkts, &i, + sizeof(uint32_t)); } if (h->use_local_cache) { @@ -623,13 +623,14 @@ rte_hash_reset(struct rte_hash *h) static inline void enqueue_slot_back(const struct rte_hash *h, struct lcore_cache *cached_free_slots, - void *slot_id) + uint32_t slot_id) { if (h->use_local_cache) { cached_free_slots->objs[cached_free_slots->len] = slot_id; cached_free_slots->len++; } else - rte_ring_sp_enqueue(h->free_slots, slot_id); + rte_ring_sp_enqueue_elem(h->free_slots, &slot_id, + sizeof(uint32_t)); } /* Search a key from bucket and update its data. @@ -923,9 +924,8 @@ __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key, uint32_t prim_bucket_idx, sec_bucket_idx; struct rte_hash_bucket *prim_bkt, *sec_bkt, *cur_bkt; struct rte_hash_key *new_k, *keys = h->key_store; - void *slot_id = NULL; - void *ext_bkt_id = NULL; - uint32_t new_idx, bkt_id; + uint32_t slot_id; + uint32_t ext_bkt_id; int ret; unsigned n_slots; unsigned lcore_id; @@ -968,8 +968,9 @@ __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key, /* Try to get a free slot from the local cache */ if (cached_free_slots->len == 0) { /* Need to get another burst of free slots from global ring */ - n_slots = rte_ring_mc_dequeue_burst(h->free_slots, + n_slots = rte_ring_mc_dequeue_burst_elem(h->free_slots, cached_free_slots->objs, + sizeof(uint32_t), LCORE_CACHE_SIZE, NULL); if (n_slots == 0) { return -ENOSPC; @@ -982,13 +983,13 @@ __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key, cached_free_slots->len--; slot_id = cached_free_slots->objs[cached_free_slots->len]; } else { - if (rte_ring_sc_dequeue(h->free_slots, &slot_id) != 0) { + if (rte_ring_sc_dequeue_elem(h->free_slots, &slot_id, + sizeof(uint32_t)) != 0) { return -ENOSPC; } } - new_k = RTE_PTR_ADD(keys, (uintptr_t)slot_id * h->key_entry_size); - new_idx = (uint32_t)((uintptr_t) slot_id); + new_k = RTE_PTR_ADD(keys, slot_id * h->key_entry_size); /* The store to application data (by the application) at *data should * not leak after the store of pdata in the key store. i.e. pdata is * the guard variable. Release the application data to the readers. @@ -1001,9 +1002,9 @@ __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key, /* Find an empty slot and insert */ ret = rte_hash_cuckoo_insert_mw(h, prim_bkt, sec_bkt, key, data, - short_sig, new_idx, &ret_val); + short_sig, slot_id, &ret_val); if (ret == 0) - return new_idx - 1; + return slot_id - 1; else if (ret == 1) { enqueue_slot_back(h, cached_free_slots, slot_id); return ret_val; @@ -1011,9 +1012,9 @@ __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key, /* Primary bucket full, need to make space for new entry */ ret = rte_hash_cuckoo_make_space_mw(h, prim_bkt, sec_bkt, key, data, - short_sig, prim_bucket_idx, new_idx, &ret_val); + short_sig, prim_bucket_idx, slot_id, &ret_val); if (ret == 0) - return new_idx - 1; + return slot_id - 1; else if (ret == 1) { enqueue_slot_back(h, cached_free_slots, slot_id); return ret_val; @@ -1021,10 +1022,10 @@ __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key, /* Also search secondary bucket to get better occupancy */ ret = rte_hash_cuckoo_make_space_mw(h, sec_bkt, prim_bkt, key, data, - short_sig, sec_bucket_idx, new_idx, &ret_val); + short_sig, sec_bucket_idx, slot_id, &ret_val); if (ret == 0) - return new_idx - 1; + return slot_id - 1; else if (ret == 1) { enqueue_slot_back(h, cached_free_slots, slot_id); return ret_val; @@ -1067,10 +1068,10 @@ __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key, * and key. */ __atomic_store_n(&cur_bkt->key_idx[i], - new_idx, + slot_id, __ATOMIC_RELEASE); __hash_rw_writer_unlock(h); - return new_idx - 1; + return slot_id - 1; } } } @@ -1078,26 +1079,26 @@ __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key, /* Failed to get an empty entry from extendable buckets. Link a new * extendable bucket. We first get a free bucket from ring. */ - if (rte_ring_sc_dequeue(h->free_ext_bkts, &ext_bkt_id) != 0) { + if (rte_ring_sc_dequeue_elem(h->free_ext_bkts, &ext_bkt_id, + sizeof(uint32_t)) != 0) { ret = -ENOSPC; goto failure; } - bkt_id = (uint32_t)((uintptr_t)ext_bkt_id) - 1; /* Use the first location of the new bucket */ - (h->buckets_ext[bkt_id]).sig_current[0] = short_sig; + (h->buckets_ext[ext_bkt_id - 1]).sig_current[0] = short_sig; /* Store to signature and key should not leak after * the store to key_idx. i.e. key_idx is the guard variable * for signature and key. */ - __atomic_store_n(&(h->buckets_ext[bkt_id]).key_idx[0], - new_idx, + __atomic_store_n(&(h->buckets_ext[ext_bkt_id - 1]).key_idx[0], + slot_id, __ATOMIC_RELEASE); /* Link the new bucket to sec bucket linked list */ last = rte_hash_get_last_bkt(sec_bkt); - last->next = &h->buckets_ext[bkt_id]; + last->next = &h->buckets_ext[ext_bkt_id - 1]; __hash_rw_writer_unlock(h); - return new_idx - 1; + return slot_id - 1; failure: __hash_rw_writer_unlock(h); @@ -1373,8 +1374,9 @@ remove_entry(const struct rte_hash *h, struct rte_hash_bucket *bkt, unsigned i) /* Cache full, need to free it. */ if (cached_free_slots->len == LCORE_CACHE_SIZE) { /* Need to enqueue the free slots in global ring. */ - n_slots = rte_ring_mp_enqueue_burst(h->free_slots, + n_slots = rte_ring_mp_enqueue_burst_elem(h->free_slots, cached_free_slots->objs, + sizeof(uint32_t), LCORE_CACHE_SIZE, NULL); ERR_IF_TRUE((n_slots == 0), "%s: could not enqueue free slots in global ring\n", @@ -1383,11 +1385,11 @@ remove_entry(const struct rte_hash *h, struct rte_hash_bucket *bkt, unsigned i) } /* Put index of new free slot in cache. */ cached_free_slots->objs[cached_free_slots->len] = - (void *)((uintptr_t)bkt->key_idx[i]); + bkt->key_idx[i]; cached_free_slots->len++; } else { - rte_ring_sp_enqueue(h->free_slots, - (void *)((uintptr_t)bkt->key_idx[i])); + rte_ring_sp_enqueue_elem(h->free_slots, + &bkt->key_idx[i], sizeof(uint32_t)); } } @@ -1551,7 +1553,8 @@ __rte_hash_del_key_with_hash(const struct rte_hash *h, const void *key, */ h->ext_bkt_to_free[ret] = index; else - rte_ring_sp_enqueue(h->free_ext_bkts, (void *)(uintptr_t)index); + rte_ring_sp_enqueue_elem(h->free_ext_bkts, &index, + sizeof(uint32_t)); } __hash_rw_writer_unlock(h); return ret; @@ -1614,7 +1617,8 @@ rte_hash_free_key_with_position(const struct rte_hash *h, uint32_t index = h->ext_bkt_to_free[position]; if (index) { /* Recycle empty ext bkt to free list. */ - rte_ring_sp_enqueue(h->free_ext_bkts, (void *)(uintptr_t)index); + rte_ring_sp_enqueue_elem(h->free_ext_bkts, &index, + sizeof(uint32_t)); h->ext_bkt_to_free[position] = 0; } } @@ -1625,19 +1629,19 @@ rte_hash_free_key_with_position(const struct rte_hash *h, /* Cache full, need to free it. */ if (cached_free_slots->len == LCORE_CACHE_SIZE) { /* Need to enqueue the free slots in global ring. */ - n_slots = rte_ring_mp_enqueue_burst(h->free_slots, + n_slots = rte_ring_mp_enqueue_burst_elem(h->free_slots, cached_free_slots->objs, + sizeof(uint32_t), LCORE_CACHE_SIZE, NULL); RETURN_IF_TRUE((n_slots == 0), -EFAULT); cached_free_slots->len -= n_slots; } /* Put index of new free slot in cache. */ - cached_free_slots->objs[cached_free_slots->len] = - (void *)((uintptr_t)key_idx); + cached_free_slots->objs[cached_free_slots->len] = key_idx; cached_free_slots->len++; } else { - rte_ring_sp_enqueue(h->free_slots, - (void *)((uintptr_t)key_idx)); + rte_ring_sp_enqueue_elem(h->free_slots, &key_idx, + sizeof(uint32_t)); } return 0; diff --git a/lib/librte_hash/rte_cuckoo_hash.h b/lib/librte_hash/rte_cuckoo_hash.h index fb19bb27d..345de6bf9 100644 --- a/lib/librte_hash/rte_cuckoo_hash.h +++ b/lib/librte_hash/rte_cuckoo_hash.h @@ -124,7 +124,7 @@ const rte_hash_cmp_eq_t cmp_jump_table[NUM_KEY_CMP_CASES] = { struct lcore_cache { unsigned len; /**< Cache len */ - void *objs[LCORE_CACHE_SIZE]; /**< Cache objects */ + uint32_t objs[LCORE_CACHE_SIZE]; /**< Cache objects */ } __rte_cache_aligned; /* Structure that stores key-value pair */