@@ -95,6 +95,7 @@ typedef struct {
odp_schedule_group_info_t *);
void (*schedule_order_lock)(unsigned);
void (*schedule_order_unlock)(unsigned);
+ void (*schedule_order_unlock_lock)(unsigned);
} schedule_api_t;
@@ -79,6 +79,7 @@ typedef struct reorder_window {
uint32_t tail;
uint32_t turn;
uint32_t olock[CONFIG_QUEUE_MAX_ORD_LOCKS];
+ uint32_t lock_index;
uint16_t lock_count;
/* Reorder contexts in this window */
reorder_context_t *ring[RWIN_SIZE];
@@ -163,6 +163,7 @@ typedef struct {
int stash_num; /**< Number of stashed enqueue operations */
uint8_t in_order; /**< Order status */
lock_called_t lock_called; /**< States of ordered locks */
+ uint32_t lock_index;
/** Storage for stashed enqueue operations */
ordered_stash_t stash[MAX_ORDERED_STASH];
} ordered;
@@ -1121,6 +1122,7 @@ static void schedule_order_lock(unsigned lock_index)
if (lock_seq == sched_local.ordered.ctx) {
sched_local.ordered.lock_called.u8[lock_index] = 1;
+ sched_local.ordered.lock_index = lock_index;
return;
}
odp_cpu_pause();
@@ -1141,9 +1143,17 @@ static void schedule_order_unlock(unsigned lock_index)
ODP_ASSERT(sched_local.ordered.ctx == odp_atomic_load_u64(ord_lock));
+ sched_local.ordered.lock_index = sched->queue[queue_index].
+ order_lock_count + 1;
odp_atomic_store_rel_u64(ord_lock, sched_local.ordered.ctx + 1);
}
+static void schedule_order_unlock_lock(unsigned lock_index)
+{
+ schedule_order_unlock(sched_local.ordered.lock_index);
+ schedule_order_lock(lock_index);
+}
+
static void schedule_pause(void)
{
sched_local.pause = 1;
@@ -1429,5 +1439,6 @@ const schedule_api_t schedule_default_api = {
.schedule_group_thrmask = schedule_group_thrmask,
.schedule_group_info = schedule_group_info,
.schedule_order_lock = schedule_order_lock,
- .schedule_order_unlock = schedule_order_unlock
+ .schedule_order_unlock = schedule_order_unlock,
+ .schedule_order_unlock_lock = schedule_order_unlock_lock
};
@@ -129,3 +129,8 @@ void odp_schedule_order_unlock(unsigned lock_index)
{
return sched_api->schedule_order_unlock(lock_index);
}
+
+void odp_schedule_order_unlock_lock(uint32_t lock_index)
+{
+ sched_api->schedule_order_unlock_lock(lock_index);
+}
@@ -223,6 +223,7 @@ struct sched_thread_local {
int stash_num; /**< Number of stashed enqueue operations */
uint8_t in_order; /**< Order status */
lock_called_t lock_called; /**< States of ordered locks */
+ uint32_t lock_index;
/** Storage for stashed enqueue operations */
ordered_stash_t stash[MAX_ORDERED_STASH];
} ordered;
@@ -1273,6 +1274,7 @@ static void schedule_order_lock(unsigned lock_index)
if (lock_seq == thread_local.ordered.ctx) {
thread_local.ordered.lock_called.u8[lock_index] = 1;
+ thread_local.ordered.lock_index = lock_index;
return;
}
odp_cpu_pause();
@@ -1293,9 +1295,17 @@ static void schedule_order_unlock(unsigned lock_index)
ODP_ASSERT(thread_local.ordered.ctx == odp_atomic_load_u64(ord_lock));
+ thread_local.ordered.lock_index = sched->queues[queue_index].
+ lock_count + 1;
odp_atomic_store_rel_u64(ord_lock, thread_local.ordered.ctx + 1);
}
+static void schedule_order_unlock_lock(unsigned lock_index)
+{
+ schedule_order_unlock(thread_local.ordered.lock_index);
+ schedule_order_lock(lock_index);
+}
+
static unsigned schedule_max_ordered_locks(void)
{
return CONFIG_QUEUE_MAX_ORD_LOCKS;
@@ -1368,7 +1378,8 @@ const schedule_api_t schedule_iquery_api = {
.schedule_group_thrmask = schedule_group_thrmask,
.schedule_group_info = schedule_group_info,
.schedule_order_lock = schedule_order_lock,
- .schedule_order_unlock = schedule_order_unlock
+ .schedule_order_unlock = schedule_order_unlock,
+ .schedule_order_unlock_lock = schedule_order_unlock_lock
};
static void thread_set_interest(sched_thread_local_t *thread,
@@ -1007,6 +1007,8 @@ static void schedule_order_lock(unsigned lock_index)
monitor32(&rctx->rwin->olock[lock_index],
__ATOMIC_ACQUIRE) != rctx->sn)
doze();
+ rctx->rwin->lock_index = lock_index;
+
}
}
@@ -1025,9 +1027,23 @@ static void schedule_order_unlock(unsigned lock_index)
atomic_store_release(&rctx->rwin->olock[lock_index],
rctx->sn + 1,
/*readonly=*/false);
+ rctx->rwin->lock_index = rctx->rwin->lock_count + 1;
rctx->olock_flags |= 1U << lock_index;
}
+static void schedule_order_unlock_lock(unsigned lock_index)
+{
+ struct reorder_context *rctx;
+
+ rctx = sched_ts->rctx;
+ if (odp_unlikely(rctx == NULL || rctx->rwin == NULL)) {
+ ODP_ERR("Invalid call to odp_schedule_order_unlock_lock\n");
+ return;
+ }
+ schedule_order_unlock(rctx->rwin->lock_index);
+ schedule_order_lock(lock_index);
+}
+
static void schedule_release_atomic(void)
{
sched_scalable_thread_state_t *ts;
@@ -1978,4 +1994,5 @@ const schedule_api_t schedule_scalable_api = {
.schedule_group_info = schedule_group_info,
.schedule_order_lock = schedule_order_lock,
.schedule_order_unlock = schedule_order_unlock,
+ .schedule_order_unlock_lock = schedule_order_unlock_lock,
};
@@ -819,6 +819,11 @@ static void schedule_order_unlock(unsigned lock_index)
(void)lock_index;
}
+static void schedule_order_unlock_lock(unsigned lock_index)
+{
+ (void)lock_index;
+}
+
static void order_lock(void)
{
}
@@ -868,5 +873,6 @@ const schedule_api_t schedule_sp_api = {
.schedule_group_thrmask = schedule_group_thrmask,
.schedule_group_info = schedule_group_info,
.schedule_order_lock = schedule_order_lock,
- .schedule_order_unlock = schedule_order_unlock
+ .schedule_order_unlock = schedule_order_unlock,
+ .schedule_order_unlock_lock = schedule_order_unlock_lock
};