@@ -119,6 +119,7 @@ typedef struct odp_buffer_hdr_t {
struct {
uint32_t zeroized:1; /* Zeroize buf data on free */
uint32_t hdrdata:1; /* Data is in buffer hdr */
+ uint32_t sustain:1; /* Sustain order */
};
} flags;
int16_t allocator; /* allocating thread id */
@@ -376,7 +376,7 @@ int queue_enq(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr)
} else {
odp_buffer_hdr_t *reorder_prev = NULL;
- while (buf_hdr->order > reorder_buf->order) {
+ while (buf_hdr->order >= reorder_buf->order) {
reorder_prev = reorder_buf;
reorder_buf = reorder_buf->next;
if (!reorder_buf)
@@ -404,7 +404,8 @@ int queue_enq(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr)
}
/* We're in order, so account for this and proceed with enq */
- origin_qe->s.order_out++;
+ if (!buf_hdr->flags.sustain)
+ origin_qe->s.order_out++;
/* if this element is linked, restore the linked chain */
buf_tail = buf_hdr->link;
@@ -494,8 +495,9 @@ int queue_enq(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr)
reorder_prev = reorder_buf;
}
+ if (!reorder_buf->flags.sustain)
+ release_count++;
reorder_buf = next_buf;
- release_count++;
} else if (!reorder_buf->target_qe) {
if (reorder_prev)
reorder_prev->next = next_buf;
@@ -694,6 +696,7 @@ odp_buffer_hdr_t *queue_deq(queue_entry_t *queue)
if (queue->s.param.sched.sync == ODP_SCHED_SYNC_ORDERED) {
buf_hdr->origin_qe = queue;
buf_hdr->order = queue->s.order_in++;
+ buf_hdr->flags.sustain = 0;
} else {
buf_hdr->origin_qe = NULL;
}
@@ -740,6 +743,7 @@ int queue_deq_multi(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[], int num)
if (queue->s.param.sched.sync == ODP_SCHED_SYNC_ORDERED) {
buf_hdr[i]->origin_qe = queue;
buf_hdr[i]->order = queue->s.order_in++;
+ buf_hdr[i]->flags.sustain = 0;
} else {
buf_hdr[i]->origin_qe = NULL;
}
@@ -808,8 +812,8 @@ void odp_queue_param_init(odp_queue_param_t *params)
memset(params, 0, sizeof(odp_queue_param_t));
}
-/* This routine exists here rather than in odp_schedule
- * because it operates on queue interenal structures
+/* These routines exists here rather than in odp_schedule
+ * because they operate on queue interenal structures
*/
int odp_schedule_release_ordered(odp_event_t ev)
{
@@ -831,7 +835,8 @@ int odp_schedule_release_ordered(odp_event_t ev)
*/
if (buf_hdr->order <= origin_qe->s.order_out + 1) {
buf_hdr->origin_qe = NULL;
- origin_qe->s.order_out++;
+ if (!buf_hdr->flags.sustain)
+ origin_qe->s.order_out++;
/* check if this release allows us to unblock waiters */
reorder_buf = origin_qe->s.reorder_head;
@@ -891,3 +896,29 @@ int odp_schedule_release_ordered(odp_event_t ev)
UNLOCK(&origin_qe->s.lock);
return 0;
}
+
+int odp_schedule_order_copy(odp_event_t src_event, odp_event_t dst_event)
+{
+ odp_buffer_hdr_t *src =
+ odp_buf_to_hdr(odp_buffer_from_event(src_event));
+ odp_buffer_hdr_t *dst =
+ odp_buf_to_hdr(odp_buffer_from_event(dst_event));
+ queue_entry_t *origin_qe = src->origin_qe;
+
+ if (!origin_qe || dst->origin_qe)
+ return -1;
+
+ LOCK(&origin_qe->s.lock);
+
+ if (src->origin_qe != origin_qe) {
+ UNLOCK(&origin_qe->s.lock);
+ return -1;
+ }
+
+ dst->origin_qe = origin_qe;
+ dst->order = src->order;
+ src->flags.sustain = 1;
+
+ UNLOCK(&origin_qe->s.lock);
+ return 0;
+}
@@ -746,3 +746,27 @@ int odp_schedule_group_count(odp_schedule_group_t group)
void odp_schedule_prefetch(int num ODP_UNUSED)
{
}
+
+int odp_schedule_order_sustain(odp_event_t ev)
+{
+ odp_buffer_hdr_t *buf_hdr =
+ odp_buf_to_hdr(odp_buffer_from_event(ev));
+
+ if (buf_hdr->origin_qe)
+ return buf_hdr->flags.sustain;
+
+ return -1;
+}
+
+int odp_schedule_order_sustain_set(odp_event_t ev, odp_bool_t sustain)
+{
+ odp_buffer_hdr_t *buf_hdr =
+ odp_buf_to_hdr(odp_buffer_from_event(ev));
+
+ if (buf_hdr->origin_qe && (sustain & 1) == sustain) {
+ buf_hdr->flags.sustain = sustain;
+ return 0;
+ }
+
+ return -1;
+}
Signed-off-by: Bill Fischofer <bill.fischofer@linaro.org> --- .../linux-generic/include/odp_buffer_internal.h | 1 + platform/linux-generic/odp_queue.c | 43 +++++++++++++++++++--- platform/linux-generic/odp_schedule.c | 24 ++++++++++++ 3 files changed, 62 insertions(+), 6 deletions(-)