@@ -44,6 +44,11 @@ extern "C" {
#define ODP_CONFIG_SCHED_PRIOS 8
/**
+ * Number of scheduling groups
+ */
+#define ODP_CONFIG_SCHED_GRPS 16
+
+/**
* Maximum number of packet IO resources
*/
#define ODP_CONFIG_PKTIO_ENTRIES 64
@@ -43,8 +43,12 @@ typedef int odp_schedule_sync_t;
typedef int odp_schedule_group_t;
+/* These must be kept in sync with thread_globals_t in odp_thread.c */
+#define ODP_SCHED_GROUP_INVALID -1
#define ODP_SCHED_GROUP_ALL 0
#define ODP_SCHED_GROUP_WORKER 1
+#define ODP_SCHED_GROUP_CONTROL 2
+#define ODP_SCHED_GROUP_NAMED 3
#define ODP_SCHED_GROUP_NAME_LEN 32
@@ -23,6 +23,8 @@
#include <odp_queue_internal.h>
#include <odp_packet_io_internal.h>
+odp_thrmask_t sched_mask_all;
+
/* Number of schedule commands.
* One per scheduled queue and packet interface */
#define NUM_SCHED_CMD (ODP_CONFIG_QUEUES + ODP_CONFIG_PKTIO_ENTRIES)
@@ -48,6 +50,11 @@ typedef struct {
odp_pool_t pool;
odp_shm_t shm;
uint32_t pri_count[ODP_CONFIG_SCHED_PRIOS][QUEUES_PER_PRIO];
+ odp_spinlock_t grp_lock;
+ struct {
+ char name[ODP_SCHED_GROUP_NAME_LEN];
+ odp_thrmask_t *mask;
+ } sched_grp[ODP_CONFIG_SCHED_GRPS];
} sched_t;
/* Schedule command */
@@ -87,6 +94,9 @@ static sched_t *sched;
/* Thread local scheduler context */
static __thread sched_local_t sched_local;
+/* Internal routine to get scheduler thread mask addrs */
+odp_thrmask_t *thread_sched_grp_mask(int index);
+
static void sched_local_init(void)
{
int i;
@@ -163,6 +173,15 @@ int odp_schedule_init_global(void)
}
}
+ odp_spinlock_init(&sched->grp_lock);
+
+ for (i = 0; i < ODP_CONFIG_SCHED_GRPS; i++) {
+ memset(&sched->sched_grp[i].name, 0, ODP_SCHED_GROUP_NAME_LEN);
+ sched->sched_grp[i].mask = thread_sched_grp_mask(i);
+ }
+
+ odp_thrmask_setall(&sched_mask_all);
+
ODP_DBG("done\n");
return 0;
@@ -466,6 +485,18 @@ static int schedule(odp_queue_t *out_queue, odp_event_t out_ev[],
}
qe = sched_cmd->qe;
+ if (qe->s.param.sched.group > ODP_SCHED_GROUP_ALL &&
+ !odp_thrmask_isset(sched->sched_grp
+ [qe->s.param.sched.group].mask,
+ thr)) {
+ /* This thread is not eligible for work from
+ * this queue, so continue scheduling it.
+ */
+ if (odp_queue_enq(pri_q, ev))
+ ODP_ABORT("schedule failed\n");
+ continue;
+ }
+
num = queue_deq_multi(qe, sched_local.buf_hdr, max_deq);
if (num < 0) {
@@ -587,3 +618,128 @@ int odp_schedule_num_prio(void)
{
return ODP_CONFIG_SCHED_PRIOS;
}
+
+odp_schedule_group_t odp_schedule_group_create(const char *name,
+ const odp_thrmask_t *mask)
+{
+ odp_schedule_group_t group = ODP_SCHED_GROUP_INVALID;
+ int i;
+
+ odp_spinlock_lock(&sched->grp_lock);
+
+ for (i = ODP_SCHED_GROUP_NAMED; i < ODP_CONFIG_SCHED_GRPS; i++) {
+ if (sched->sched_grp[i].name[0] == 0) {
+ strncpy(sched->sched_grp[i].name, name,
+ ODP_SCHED_GROUP_NAME_LEN - 1);
+ odp_thrmask_copy(sched->sched_grp[i].mask, mask);
+ group = (odp_schedule_group_t)i;
+ break;
+ }
+ }
+
+ odp_spinlock_unlock(&sched->grp_lock);
+ return group;
+}
+
+int odp_schedule_group_destroy(odp_schedule_group_t group)
+{
+ int ret;
+
+ odp_spinlock_lock(&sched->grp_lock);
+
+ if (group < ODP_CONFIG_SCHED_GRPS &&
+ group > ODP_SCHED_GROUP_NAMED &&
+ sched->sched_grp[group].name[0] != 0) {
+ odp_thrmask_zero(sched->sched_grp[group].mask);
+ memset(&sched->sched_grp[group].name, 0,
+ ODP_SCHED_GROUP_NAME_LEN);
+ ret = 0;
+ } else {
+ ret = -1;
+ }
+
+ odp_spinlock_unlock(&sched->grp_lock);
+ return ret;
+}
+
+odp_schedule_group_t odp_schedule_group_lookup(const char *name)
+{
+ odp_schedule_group_t group = ODP_SCHED_GROUP_INVALID;
+ int i;
+
+ odp_spinlock_lock(&sched->grp_lock);
+
+ for (i = ODP_SCHED_GROUP_NAMED; i < ODP_CONFIG_SCHED_GRPS; i++) {
+ if (strcmp(name, sched->sched_grp[i].name) == 0) {
+ group = (odp_schedule_group_t)i;
+ break;
+ }
+ }
+
+ odp_spinlock_unlock(&sched->grp_lock);
+ return group;
+}
+
+int odp_schedule_group_join(odp_schedule_group_t group,
+ const odp_thrmask_t *mask)
+{
+ int ret;
+
+ odp_spinlock_lock(&sched->grp_lock);
+
+ if (group < ODP_CONFIG_SCHED_GRPS &&
+ group >= ODP_SCHED_GROUP_NAMED &&
+ sched->sched_grp[group].name[0] != 0) {
+ odp_thrmask_or(sched->sched_grp[group].mask,
+ sched->sched_grp[group].mask,
+ mask);
+ ret = 0;
+ } else {
+ ret = -1;
+ }
+
+ odp_spinlock_unlock(&sched->grp_lock);
+ return ret;
+}
+
+int odp_schedule_group_leave(odp_schedule_group_t group,
+ const odp_thrmask_t *mask)
+{
+ int ret;
+
+ odp_spinlock_lock(&sched->grp_lock);
+
+ if (group < ODP_CONFIG_SCHED_GRPS &&
+ group >= ODP_SCHED_GROUP_NAMED &&
+ sched->sched_grp[group].name[0] != 0) {
+ odp_thrmask_t leavemask;
+
+ odp_thrmask_xor(&leavemask, mask, &sched_mask_all);
+ odp_thrmask_and(sched->sched_grp[group].mask,
+ sched->sched_grp[group].mask,
+ &leavemask);
+ ret = 0;
+ } else {
+ ret = -1;
+ }
+
+ odp_spinlock_unlock(&sched->grp_lock);
+ return ret;
+}
+
+int odp_schedule_group_count(odp_schedule_group_t group)
+{
+ int ret;
+
+ odp_spinlock_lock(&sched->grp_lock);
+
+ if (group < ODP_CONFIG_SCHED_GRPS &&
+ group >= ODP_SCHED_GROUP_NAMED &&
+ sched->sched_grp[group].name[0] != 0)
+ ret = odp_thrmask_count(sched->sched_grp[group].mask);
+ else
+ ret = -1;
+
+ odp_spinlock_unlock(&sched->grp_lock);
+ return ret;
+}
@@ -32,9 +32,15 @@ typedef struct {
typedef struct {
thread_state_t thr[ODP_CONFIG_MAX_THREADS];
- odp_thrmask_t all;
- odp_thrmask_t worker;
- odp_thrmask_t control;
+ union {
+ /* struct order must be kept in sync with schedule_types.h */
+ struct {
+ odp_thrmask_t all;
+ odp_thrmask_t worker;
+ odp_thrmask_t control;
+ };
+ odp_thrmask_t sched_grp_mask[ODP_CONFIG_SCHED_GRPS];
+ };
uint32_t num;
uint32_t num_worker;
uint32_t num_control;
@@ -53,6 +59,7 @@ static __thread thread_state_t *this_thread;
int odp_thread_init_global(void)
{
odp_shm_t shm;
+ int i;
shm = odp_shm_reserve("odp_thread_globals",
sizeof(thread_globals_t),
@@ -65,13 +72,19 @@ int odp_thread_init_global(void)
memset(thread_globals, 0, sizeof(thread_globals_t));
odp_spinlock_init(&thread_globals->lock);
- odp_thrmask_zero(&thread_globals->all);
- odp_thrmask_zero(&thread_globals->worker);
- odp_thrmask_zero(&thread_globals->control);
+
+ for (i = 0; i < ODP_CONFIG_SCHED_GRPS; i++)
+ odp_thrmask_zero(&thread_globals->sched_grp_mask[i]);
return 0;
}
+odp_thrmask_t *thread_sched_grp_mask(int index);
+odp_thrmask_t *thread_sched_grp_mask(int index)
+{
+ return &thread_globals->sched_grp_mask[index];
+}
+
int odp_thread_term_global(void)
{
int ret;
Signed-off-by: Bill Fischofer <bill.fischofer@linaro.org> --- include/odp/api/config.h | 5 + .../include/odp/plat/schedule_types.h | 4 + platform/linux-generic/odp_schedule.c | 156 +++++++++++++++++++++ platform/linux-generic/odp_thread.c | 25 +++- 4 files changed, 184 insertions(+), 6 deletions(-)