Message ID | 1424451575-1205-1-git-send-email-bill.fischofer@linaro.org |
---|---|
State | New |
Headers | show |
v2 just sent that does this. Thanks. On Wed, Feb 25, 2015 at 8:45 AM, Savolainen, Petri (Nokia - FI/Espoo) < petri.savolainen@nokia.com> wrote: > > > > -----Original Message----- > > From: lng-odp-bounces@lists.linaro.org [mailto:lng-odp- > > bounces@lists.linaro.org] On Behalf Of ext Bill Fischofer > > Sent: Friday, February 20, 2015 7:00 PM > > To: lng-odp@lists.linaro.org > > Subject: [lng-odp] [PATCH] linux-generic: pools: switch to simple locks > > for buf/blk synchronization > > > > Resolve ABA issue with a simple use of locks. The performance hit is > > negligible due to the existing use of local buffer caching. > > > > Signed-off-by: Bill Fischofer <bill.fischofer@linaro.org> > > --- > > platform/linux-generic/include/odp_pool_internal.h | 74 > +++++++++-------- > > ----- > > platform/linux-generic/odp_pool.c | 8 +-- > > 2 files changed, 33 insertions(+), 49 deletions(-) > > > > diff --git a/platform/linux-generic/include/odp_pool_internal.h > > b/platform/linux-generic/include/odp_pool_internal.h > > index 3d1497a..f60c313 100644 > > --- a/platform/linux-generic/include/odp_pool_internal.h > > +++ b/platform/linux-generic/include/odp_pool_internal.h > > @@ -80,8 +80,12 @@ typedef struct local_cache_t { > > struct pool_entry_s { > > #ifdef POOL_USE_TICKETLOCK > > odp_ticketlock_t lock ODP_ALIGNED_CACHE; > > + odp_ticketlock_t buf_lock; > > + odp_ticketlock_t blk_lock; > > #else > > odp_spinlock_t lock ODP_ALIGNED_CACHE; > > + odp_spinlock_t buf_lock; > > + odp_spinlock_t blk_lock; > > #endif > > > > char name[ODP_POOL_NAME_LEN]; > > @@ -107,8 +111,8 @@ struct pool_entry_s { > > size_t pool_size; > > uint32_t buf_align; > > uint32_t buf_stride; > > - _odp_atomic_ptr_t buf_freelist; > > - _odp_atomic_ptr_t blk_freelist; > > + odp_buffer_hdr_t *buf_freelist; > > + void *blk_freelist; > > odp_atomic_u32_t bufcount; > > odp_atomic_u32_t blkcount; > > odp_atomic_u64_t bufallocs; > > @@ -164,38 +168,30 @@ extern void *pool_entry_ptr[]; > > > > static inline void *get_blk(struct pool_entry_s *pool) > > { > > - void *oldhead, *myhead, *newhead; > > + void *myhead; > > + POOL_LOCK(&pool->blk_lock); > > > > - oldhead = _odp_atomic_ptr_load(&pool->blk_freelist, > > _ODP_MEMMODEL_ACQ); > > + myhead = pool->blk_freelist; > > > > - do { > > - size_t tag = odp_tag(oldhead); > > - myhead = odp_detag(oldhead); > > - if (odp_unlikely(myhead == NULL)) > > - break; > > - newhead = odp_retag(((odp_buf_blk_t *)myhead)->next, tag + > 1); > > - } while (odp_cs(pool->blk_freelist, oldhead, newhead) == 0); > > - > > - if (odp_unlikely(myhead == NULL)) > > + if (odp_unlikely(myhead == NULL)) { > > odp_atomic_inc_u64(&pool->blkempty); > > I'd move atomic inc/dec out side of the critical section (consistently). > Atomic operations may spin, which would lead to holding the lock a long > time. Other option is use the same lock for these counters. > > -Petri > > > > - else > > + } else { > > + pool->blk_freelist = ((odp_buf_blk_t *)myhead)->next; > > odp_atomic_dec_u32(&pool->blkcount); > > + } > > > > + POOL_UNLOCK(&pool->blk_lock); > > return (void *)myhead; > > } > > > > static inline void ret_blk(struct pool_entry_s *pool, void *block) > > { > > - void *oldhead, *myhead, *myblock; > > + POOL_LOCK(&pool->blk_lock); > > > > - oldhead = _odp_atomic_ptr_load(&pool->blk_freelist, > > _ODP_MEMMODEL_ACQ); > > + ((odp_buf_blk_t *)block)->next = pool->blk_freelist; > > + pool->blk_freelist = block; > > > > - do { > > - size_t tag = odp_tag(oldhead); > > - myhead = odp_detag(oldhead); > > - ((odp_buf_blk_t *)block)->next = myhead; > > - myblock = odp_retag(block, tag + 1); > > - } while (odp_cs(pool->blk_freelist, oldhead, myblock) == 0); > > + POOL_UNLOCK(&pool->blk_lock); > > > > odp_atomic_inc_u32(&pool->blkcount); > > odp_atomic_inc_u64(&pool->blkfrees); > > @@ -203,21 +199,17 @@ static inline void ret_blk(struct pool_entry_s > > *pool, void *block) > > > > static inline odp_buffer_hdr_t *get_buf(struct pool_entry_s *pool) > > { > > - odp_buffer_hdr_t *oldhead, *myhead, *newhead; > > - > > - oldhead = _odp_atomic_ptr_load(&pool->buf_freelist, > > _ODP_MEMMODEL_ACQ); > > + odp_buffer_hdr_t *myhead; > > + POOL_LOCK(&pool->buf_lock); > > > > - do { > > - size_t tag = odp_tag(oldhead); > > - myhead = odp_detag(oldhead); > > - if (odp_unlikely(myhead == NULL)) > > - break; > > - newhead = odp_retag(myhead->next, tag + 1); > > - } while (odp_cs(pool->buf_freelist, oldhead, newhead) == 0); > > + myhead = pool->buf_freelist; > > > > if (odp_unlikely(myhead == NULL)) { > > odp_atomic_inc_u64(&pool->bufempty); > > + POOL_UNLOCK(&pool->buf_lock); > > } else { > > + pool->buf_freelist = myhead->next; > > + POOL_UNLOCK(&pool->buf_lock); > > uint64_t bufcount = > > odp_atomic_fetch_sub_u32(&pool->bufcount, 1) - 1; > > > > @@ -228,7 +220,6 @@ static inline odp_buffer_hdr_t *get_buf(struct > > pool_entry_s *pool) > > } > > > > odp_atomic_inc_u64(&pool->bufallocs); > > - myhead->next = myhead; /* Mark buffer allocated */ > > myhead->allocator = odp_thread_id(); > > } > > > > @@ -237,10 +228,6 @@ static inline odp_buffer_hdr_t *get_buf(struct > > pool_entry_s *pool) > > > > static inline void ret_buf(struct pool_entry_s *pool, odp_buffer_hdr_t > > *buf) > > { > > - odp_buffer_hdr_t *oldhead, *myhead, *mybuf; > > - > > - buf->allocator = ODP_FREEBUF; /* Mark buffer free */ > > - > > if (!buf->flags.hdrdata && buf->type != ODP_EVENT_BUFFER) { > > while (buf->segcount > 0) { > > if (buffer_is_secure(buf) || pool_is_secure(pool)) > > @@ -251,14 +238,11 @@ static inline void ret_buf(struct pool_entry_s > > *pool, odp_buffer_hdr_t *buf) > > buf->size = 0; > > } > > > > - oldhead = _odp_atomic_ptr_load(&pool->buf_freelist, > > _ODP_MEMMODEL_ACQ); > > - > > - do { > > - size_t tag = odp_tag(oldhead); > > - myhead = odp_detag(oldhead); > > - buf->next = myhead; > > - mybuf = odp_retag(buf, tag + 1); > > - } while (odp_cs(pool->buf_freelist, oldhead, mybuf) == 0); > > + buf->allocator = ODP_FREEBUF; /* Mark buffer free */ > > + POOL_LOCK(&pool->buf_lock); > > + buf->next = pool->buf_freelist; > > + pool->buf_freelist = buf; > > + POOL_UNLOCK(&pool->buf_lock); > > > > uint64_t bufcount = odp_atomic_fetch_add_u32(&pool->bufcount, 1) + > > 1; > > > > diff --git a/platform/linux-generic/odp_pool.c b/platform/linux- > > generic/odp_pool.c > > index 8da3801..382cd71 100644 > > --- a/platform/linux-generic/odp_pool.c > > +++ b/platform/linux-generic/odp_pool.c > > @@ -82,6 +82,8 @@ int odp_pool_init_global(void) > > /* init locks */ > > pool_entry_t *pool = &pool_tbl->pool[i]; > > POOL_LOCK_INIT(&pool->s.lock); > > + POOL_LOCK_INIT(&pool->s.buf_lock); > > + POOL_LOCK_INIT(&pool->s.blk_lock); > > pool->s.pool_hdl = pool_index_to_handle(i); > > pool->s.pool_id = i; > > pool_entry_ptr[i] = pool; > > @@ -302,10 +304,8 @@ odp_pool_t odp_pool_create(const char *name, > > pool->s.pool_mdata_addr = mdata_base_addr; > > > > pool->s.buf_stride = buf_stride; > > - _odp_atomic_ptr_store(&pool->s.buf_freelist, NULL, > > - _ODP_MEMMODEL_RLX); > > - _odp_atomic_ptr_store(&pool->s.blk_freelist, NULL, > > - _ODP_MEMMODEL_RLX); > > + pool->s.buf_freelist = NULL; > > + pool->s.blk_freelist = NULL; > > > > /* Initialization will increment these to their target > vals */ > > odp_atomic_store_u32(&pool->s.bufcount, 0); > > -- > > 2.1.0 > > > > > > _______________________________________________ > > lng-odp mailing list > > lng-odp@lists.linaro.org > > http://lists.linaro.org/mailman/listinfo/lng-odp >
OK, v3 submitted with those macros deleted. Bill On Wed, Feb 25, 2015 at 9:16 AM, Savolainen, Petri (Nokia - FI/Espoo) < petri.savolainen@nokia.com> wrote: > That was quick J That’s OK in v2. > > > > There’s actually another thing I missed to mention. If the lock-free > algorithm was removed for good, it would be good to remove all the related > (dead) code and definitions. For example, these seems redundant now: > > > > #define odp_cs() > > #define odp_tag() > > #define odp_detag() > > #define odp_retag() > > > > With the redundant stuff removed, > > Reviewed-by: Petri Savolainen <petri.savolainen@linaro.org> > > > > -Petri > > > > > > > > > > *From:* ext Bill Fischofer [mailto:bill.fischofer@linaro.org] > *Sent:* Wednesday, February 25, 2015 4:57 PM > *To:* Savolainen, Petri (Nokia - FI/Espoo) > *Cc:* lng-odp@lists.linaro.org > *Subject:* Re: [lng-odp] [PATCH] linux-generic: pools: switch to simple > locks for buf/blk synchronization > > > > v2 just sent that does this. Thanks. > > > > On Wed, Feb 25, 2015 at 8:45 AM, Savolainen, Petri (Nokia - FI/Espoo) < > petri.savolainen@nokia.com> wrote: > > > > > -----Original Message----- > > From: lng-odp-bounces@lists.linaro.org [mailto:lng-odp- > > bounces@lists.linaro.org] On Behalf Of ext Bill Fischofer > > Sent: Friday, February 20, 2015 7:00 PM > > To: lng-odp@lists.linaro.org > > Subject: [lng-odp] [PATCH] linux-generic: pools: switch to simple locks > > for buf/blk synchronization > > > > Resolve ABA issue with a simple use of locks. The performance hit is > > negligible due to the existing use of local buffer caching. > > > > Signed-off-by: Bill Fischofer <bill.fischofer@linaro.org> > > --- > > platform/linux-generic/include/odp_pool_internal.h | 74 > +++++++++-------- > > ----- > > platform/linux-generic/odp_pool.c | 8 +-- > > 2 files changed, 33 insertions(+), 49 deletions(-) > > > > diff --git a/platform/linux-generic/include/odp_pool_internal.h > > b/platform/linux-generic/include/odp_pool_internal.h > > index 3d1497a..f60c313 100644 > > --- a/platform/linux-generic/include/odp_pool_internal.h > > +++ b/platform/linux-generic/include/odp_pool_internal.h > > @@ -80,8 +80,12 @@ typedef struct local_cache_t { > > struct pool_entry_s { > > #ifdef POOL_USE_TICKETLOCK > > odp_ticketlock_t lock ODP_ALIGNED_CACHE; > > + odp_ticketlock_t buf_lock; > > + odp_ticketlock_t blk_lock; > > #else > > odp_spinlock_t lock ODP_ALIGNED_CACHE; > > + odp_spinlock_t buf_lock; > > + odp_spinlock_t blk_lock; > > #endif > > > > char name[ODP_POOL_NAME_LEN]; > > @@ -107,8 +111,8 @@ struct pool_entry_s { > > size_t pool_size; > > uint32_t buf_align; > > uint32_t buf_stride; > > - _odp_atomic_ptr_t buf_freelist; > > - _odp_atomic_ptr_t blk_freelist; > > + odp_buffer_hdr_t *buf_freelist; > > + void *blk_freelist; > > odp_atomic_u32_t bufcount; > > odp_atomic_u32_t blkcount; > > odp_atomic_u64_t bufallocs; > > @@ -164,38 +168,30 @@ extern void *pool_entry_ptr[]; > > > > static inline void *get_blk(struct pool_entry_s *pool) > > { > > - void *oldhead, *myhead, *newhead; > > + void *myhead; > > + POOL_LOCK(&pool->blk_lock); > > > > - oldhead = _odp_atomic_ptr_load(&pool->blk_freelist, > > _ODP_MEMMODEL_ACQ); > > + myhead = pool->blk_freelist; > > > > - do { > > - size_t tag = odp_tag(oldhead); > > - myhead = odp_detag(oldhead); > > - if (odp_unlikely(myhead == NULL)) > > - break; > > - newhead = odp_retag(((odp_buf_blk_t *)myhead)->next, tag + > 1); > > - } while (odp_cs(pool->blk_freelist, oldhead, newhead) == 0); > > - > > - if (odp_unlikely(myhead == NULL)) > > + if (odp_unlikely(myhead == NULL)) { > > odp_atomic_inc_u64(&pool->blkempty); > > I'd move atomic inc/dec out side of the critical section (consistently). > Atomic operations may spin, which would lead to holding the lock a long > time. Other option is use the same lock for these counters. > > -Petri > > > > > - else > > + } else { > > + pool->blk_freelist = ((odp_buf_blk_t *)myhead)->next; > > odp_atomic_dec_u32(&pool->blkcount); > > + } > > > > + POOL_UNLOCK(&pool->blk_lock); > > return (void *)myhead; > > } > > > > static inline void ret_blk(struct pool_entry_s *pool, void *block) > > { > > - void *oldhead, *myhead, *myblock; > > + POOL_LOCK(&pool->blk_lock); > > > > - oldhead = _odp_atomic_ptr_load(&pool->blk_freelist, > > _ODP_MEMMODEL_ACQ); > > + ((odp_buf_blk_t *)block)->next = pool->blk_freelist; > > + pool->blk_freelist = block; > > > > - do { > > - size_t tag = odp_tag(oldhead); > > - myhead = odp_detag(oldhead); > > - ((odp_buf_blk_t *)block)->next = myhead; > > - myblock = odp_retag(block, tag + 1); > > - } while (odp_cs(pool->blk_freelist, oldhead, myblock) == 0); > > + POOL_UNLOCK(&pool->blk_lock); > > > > odp_atomic_inc_u32(&pool->blkcount); > > odp_atomic_inc_u64(&pool->blkfrees); > > @@ -203,21 +199,17 @@ static inline void ret_blk(struct pool_entry_s > > *pool, void *block) > > > > static inline odp_buffer_hdr_t *get_buf(struct pool_entry_s *pool) > > { > > - odp_buffer_hdr_t *oldhead, *myhead, *newhead; > > - > > - oldhead = _odp_atomic_ptr_load(&pool->buf_freelist, > > _ODP_MEMMODEL_ACQ); > > + odp_buffer_hdr_t *myhead; > > + POOL_LOCK(&pool->buf_lock); > > > > - do { > > - size_t tag = odp_tag(oldhead); > > - myhead = odp_detag(oldhead); > > - if (odp_unlikely(myhead == NULL)) > > - break; > > - newhead = odp_retag(myhead->next, tag + 1); > > - } while (odp_cs(pool->buf_freelist, oldhead, newhead) == 0); > > + myhead = pool->buf_freelist; > > > > if (odp_unlikely(myhead == NULL)) { > > odp_atomic_inc_u64(&pool->bufempty); > > + POOL_UNLOCK(&pool->buf_lock); > > } else { > > + pool->buf_freelist = myhead->next; > > + POOL_UNLOCK(&pool->buf_lock); > > uint64_t bufcount = > > odp_atomic_fetch_sub_u32(&pool->bufcount, 1) - 1; > > > > @@ -228,7 +220,6 @@ static inline odp_buffer_hdr_t *get_buf(struct > > pool_entry_s *pool) > > } > > > > odp_atomic_inc_u64(&pool->bufallocs); > > - myhead->next = myhead; /* Mark buffer allocated */ > > myhead->allocator = odp_thread_id(); > > } > > > > @@ -237,10 +228,6 @@ static inline odp_buffer_hdr_t *get_buf(struct > > pool_entry_s *pool) > > > > static inline void ret_buf(struct pool_entry_s *pool, odp_buffer_hdr_t > > *buf) > > { > > - odp_buffer_hdr_t *oldhead, *myhead, *mybuf; > > - > > - buf->allocator = ODP_FREEBUF; /* Mark buffer free */ > > - > > if (!buf->flags.hdrdata && buf->type != ODP_EVENT_BUFFER) { > > while (buf->segcount > 0) { > > if (buffer_is_secure(buf) || pool_is_secure(pool)) > > @@ -251,14 +238,11 @@ static inline void ret_buf(struct pool_entry_s > > *pool, odp_buffer_hdr_t *buf) > > buf->size = 0; > > } > > > > - oldhead = _odp_atomic_ptr_load(&pool->buf_freelist, > > _ODP_MEMMODEL_ACQ); > > - > > - do { > > - size_t tag = odp_tag(oldhead); > > - myhead = odp_detag(oldhead); > > - buf->next = myhead; > > - mybuf = odp_retag(buf, tag + 1); > > - } while (odp_cs(pool->buf_freelist, oldhead, mybuf) == 0); > > + buf->allocator = ODP_FREEBUF; /* Mark buffer free */ > > + POOL_LOCK(&pool->buf_lock); > > + buf->next = pool->buf_freelist; > > + pool->buf_freelist = buf; > > + POOL_UNLOCK(&pool->buf_lock); > > > > uint64_t bufcount = odp_atomic_fetch_add_u32(&pool->bufcount, 1) + > > 1; > > > > diff --git a/platform/linux-generic/odp_pool.c b/platform/linux- > > generic/odp_pool.c > > index 8da3801..382cd71 100644 > > --- a/platform/linux-generic/odp_pool.c > > +++ b/platform/linux-generic/odp_pool.c > > @@ -82,6 +82,8 @@ int odp_pool_init_global(void) > > /* init locks */ > > pool_entry_t *pool = &pool_tbl->pool[i]; > > POOL_LOCK_INIT(&pool->s.lock); > > + POOL_LOCK_INIT(&pool->s.buf_lock); > > + POOL_LOCK_INIT(&pool->s.blk_lock); > > pool->s.pool_hdl = pool_index_to_handle(i); > > pool->s.pool_id = i; > > pool_entry_ptr[i] = pool; > > @@ -302,10 +304,8 @@ odp_pool_t odp_pool_create(const char *name, > > pool->s.pool_mdata_addr = mdata_base_addr; > > > > pool->s.buf_stride = buf_stride; > > - _odp_atomic_ptr_store(&pool->s.buf_freelist, NULL, > > - _ODP_MEMMODEL_RLX); > > - _odp_atomic_ptr_store(&pool->s.blk_freelist, NULL, > > - _ODP_MEMMODEL_RLX); > > + pool->s.buf_freelist = NULL; > > + pool->s.blk_freelist = NULL; > > > > /* Initialization will increment these to their target > vals */ > > odp_atomic_store_u32(&pool->s.bufcount, 0); > > -- > > 2.1.0 > > > > > > > _______________________________________________ > > lng-odp mailing list > > lng-odp@lists.linaro.org > > http://lists.linaro.org/mailman/listinfo/lng-odp > > >
diff --git a/platform/linux-generic/include/odp_pool_internal.h b/platform/linux-generic/include/odp_pool_internal.h index 3d1497a..f60c313 100644 --- a/platform/linux-generic/include/odp_pool_internal.h +++ b/platform/linux-generic/include/odp_pool_internal.h @@ -80,8 +80,12 @@ typedef struct local_cache_t { struct pool_entry_s { #ifdef POOL_USE_TICKETLOCK odp_ticketlock_t lock ODP_ALIGNED_CACHE; + odp_ticketlock_t buf_lock; + odp_ticketlock_t blk_lock; #else odp_spinlock_t lock ODP_ALIGNED_CACHE; + odp_spinlock_t buf_lock; + odp_spinlock_t blk_lock; #endif char name[ODP_POOL_NAME_LEN]; @@ -107,8 +111,8 @@ struct pool_entry_s { size_t pool_size; uint32_t buf_align; uint32_t buf_stride; - _odp_atomic_ptr_t buf_freelist; - _odp_atomic_ptr_t blk_freelist; + odp_buffer_hdr_t *buf_freelist; + void *blk_freelist; odp_atomic_u32_t bufcount; odp_atomic_u32_t blkcount; odp_atomic_u64_t bufallocs; @@ -164,38 +168,30 @@ extern void *pool_entry_ptr[]; static inline void *get_blk(struct pool_entry_s *pool) { - void *oldhead, *myhead, *newhead; + void *myhead; + POOL_LOCK(&pool->blk_lock); - oldhead = _odp_atomic_ptr_load(&pool->blk_freelist, _ODP_MEMMODEL_ACQ); + myhead = pool->blk_freelist; - do { - size_t tag = odp_tag(oldhead); - myhead = odp_detag(oldhead); - if (odp_unlikely(myhead == NULL)) - break; - newhead = odp_retag(((odp_buf_blk_t *)myhead)->next, tag + 1); - } while (odp_cs(pool->blk_freelist, oldhead, newhead) == 0); - - if (odp_unlikely(myhead == NULL)) + if (odp_unlikely(myhead == NULL)) { odp_atomic_inc_u64(&pool->blkempty); - else + } else { + pool->blk_freelist = ((odp_buf_blk_t *)myhead)->next; odp_atomic_dec_u32(&pool->blkcount); + } + POOL_UNLOCK(&pool->blk_lock); return (void *)myhead; } static inline void ret_blk(struct pool_entry_s *pool, void *block) { - void *oldhead, *myhead, *myblock; + POOL_LOCK(&pool->blk_lock); - oldhead = _odp_atomic_ptr_load(&pool->blk_freelist, _ODP_MEMMODEL_ACQ); + ((odp_buf_blk_t *)block)->next = pool->blk_freelist; + pool->blk_freelist = block; - do { - size_t tag = odp_tag(oldhead); - myhead = odp_detag(oldhead); - ((odp_buf_blk_t *)block)->next = myhead; - myblock = odp_retag(block, tag + 1); - } while (odp_cs(pool->blk_freelist, oldhead, myblock) == 0); + POOL_UNLOCK(&pool->blk_lock); odp_atomic_inc_u32(&pool->blkcount); odp_atomic_inc_u64(&pool->blkfrees); @@ -203,21 +199,17 @@ static inline void ret_blk(struct pool_entry_s *pool, void *block) static inline odp_buffer_hdr_t *get_buf(struct pool_entry_s *pool) { - odp_buffer_hdr_t *oldhead, *myhead, *newhead; - - oldhead = _odp_atomic_ptr_load(&pool->buf_freelist, _ODP_MEMMODEL_ACQ); + odp_buffer_hdr_t *myhead; + POOL_LOCK(&pool->buf_lock); - do { - size_t tag = odp_tag(oldhead); - myhead = odp_detag(oldhead); - if (odp_unlikely(myhead == NULL)) - break; - newhead = odp_retag(myhead->next, tag + 1); - } while (odp_cs(pool->buf_freelist, oldhead, newhead) == 0); + myhead = pool->buf_freelist; if (odp_unlikely(myhead == NULL)) { odp_atomic_inc_u64(&pool->bufempty); + POOL_UNLOCK(&pool->buf_lock); } else { + pool->buf_freelist = myhead->next; + POOL_UNLOCK(&pool->buf_lock); uint64_t bufcount = odp_atomic_fetch_sub_u32(&pool->bufcount, 1) - 1; @@ -228,7 +220,6 @@ static inline odp_buffer_hdr_t *get_buf(struct pool_entry_s *pool) } odp_atomic_inc_u64(&pool->bufallocs); - myhead->next = myhead; /* Mark buffer allocated */ myhead->allocator = odp_thread_id(); } @@ -237,10 +228,6 @@ static inline odp_buffer_hdr_t *get_buf(struct pool_entry_s *pool) static inline void ret_buf(struct pool_entry_s *pool, odp_buffer_hdr_t *buf) { - odp_buffer_hdr_t *oldhead, *myhead, *mybuf; - - buf->allocator = ODP_FREEBUF; /* Mark buffer free */ - if (!buf->flags.hdrdata && buf->type != ODP_EVENT_BUFFER) { while (buf->segcount > 0) { if (buffer_is_secure(buf) || pool_is_secure(pool)) @@ -251,14 +238,11 @@ static inline void ret_buf(struct pool_entry_s *pool, odp_buffer_hdr_t *buf) buf->size = 0; } - oldhead = _odp_atomic_ptr_load(&pool->buf_freelist, _ODP_MEMMODEL_ACQ); - - do { - size_t tag = odp_tag(oldhead); - myhead = odp_detag(oldhead); - buf->next = myhead; - mybuf = odp_retag(buf, tag + 1); - } while (odp_cs(pool->buf_freelist, oldhead, mybuf) == 0); + buf->allocator = ODP_FREEBUF; /* Mark buffer free */ + POOL_LOCK(&pool->buf_lock); + buf->next = pool->buf_freelist; + pool->buf_freelist = buf; + POOL_UNLOCK(&pool->buf_lock); uint64_t bufcount = odp_atomic_fetch_add_u32(&pool->bufcount, 1) + 1; diff --git a/platform/linux-generic/odp_pool.c b/platform/linux-generic/odp_pool.c index 8da3801..382cd71 100644 --- a/platform/linux-generic/odp_pool.c +++ b/platform/linux-generic/odp_pool.c @@ -82,6 +82,8 @@ int odp_pool_init_global(void) /* init locks */ pool_entry_t *pool = &pool_tbl->pool[i]; POOL_LOCK_INIT(&pool->s.lock); + POOL_LOCK_INIT(&pool->s.buf_lock); + POOL_LOCK_INIT(&pool->s.blk_lock); pool->s.pool_hdl = pool_index_to_handle(i); pool->s.pool_id = i; pool_entry_ptr[i] = pool; @@ -302,10 +304,8 @@ odp_pool_t odp_pool_create(const char *name, pool->s.pool_mdata_addr = mdata_base_addr; pool->s.buf_stride = buf_stride; - _odp_atomic_ptr_store(&pool->s.buf_freelist, NULL, - _ODP_MEMMODEL_RLX); - _odp_atomic_ptr_store(&pool->s.blk_freelist, NULL, - _ODP_MEMMODEL_RLX); + pool->s.buf_freelist = NULL; + pool->s.blk_freelist = NULL; /* Initialization will increment these to their target vals */ odp_atomic_store_u32(&pool->s.bufcount, 0);
Resolve ABA issue with a simple use of locks. The performance hit is negligible due to the existing use of local buffer caching. Signed-off-by: Bill Fischofer <bill.fischofer@linaro.org> --- platform/linux-generic/include/odp_pool_internal.h | 74 +++++++++------------- platform/linux-generic/odp_pool.c | 8 +-- 2 files changed, 33 insertions(+), 49 deletions(-)