Message ID | 1440796737-8636-7-git-send-email-bill.fischofer@linaro.org |
---|---|
State | Accepted |
Commit | 26722c0ce4f4edbe36bd259aabe7d42a90d4aa6e |
Headers | show |
On 2015-08-28 16:18, Bill Fischofer wrote: > Signed-off-by: Bill Fischofer <bill.fischofer@linaro.org> > --- > .../include/odp/plat/schedule_types.h | 3 + > platform/linux-generic/odp_schedule.c | 165 ++++++++++++++++++++- > platform/linux-generic/odp_thread.c | 25 +++- > 3 files changed, 186 insertions(+), 7 deletions(-) > > diff --git a/platform/linux-generic/include/odp/plat/schedule_types.h b/platform/linux-generic/include/odp/plat/schedule_types.h > index 87f9c11..c48b652 100644 > --- a/platform/linux-generic/include/odp/plat/schedule_types.h > +++ b/platform/linux-generic/include/odp/plat/schedule_types.h > @@ -43,8 +43,11 @@ typedef int odp_schedule_sync_t; > > typedef int odp_schedule_group_t; > > +/* These must be kept in sync with thread_globals_t in odp_thread.c */ > +#define ODP_SCHED_GROUP_INVALID -1 > #define ODP_SCHED_GROUP_ALL 0 > #define ODP_SCHED_GROUP_WORKER 1 > +#define ODP_SCHED_GROUP_CONTROL 2 > > #define ODP_SCHED_GROUP_NAME_LEN 32 > > diff --git a/platform/linux-generic/odp_schedule.c b/platform/linux-generic/odp_schedule.c > index 5d32c81..b00e959 100644 > --- a/platform/linux-generic/odp_schedule.c > +++ b/platform/linux-generic/odp_schedule.c > @@ -23,6 +23,8 @@ > #include <odp_queue_internal.h> > #include <odp_packet_io_internal.h> > > +odp_thrmask_t sched_mask_all; > + > /* Number of schedule commands. > * One per scheduled queue and packet interface */ > #define NUM_SCHED_CMD (ODP_CONFIG_QUEUES + ODP_CONFIG_PKTIO_ENTRIES) > @@ -40,6 +42,8 @@ typedef uint8_t pri_mask_t; > _ODP_STATIC_ASSERT((8*sizeof(pri_mask_t)) >= QUEUES_PER_PRIO, > "pri_mask_t_is_too_small"); > > +/* Internal: Start of named groups in group mask arrays */ > +#define _ODP_SCHED_GROUP_NAMED (ODP_SCHED_GROUP_CONTROL + 1) > > typedef struct { > odp_queue_t pri_queue[ODP_CONFIG_SCHED_PRIOS][QUEUES_PER_PRIO]; > @@ -48,6 +52,11 @@ typedef struct { > odp_pool_t pool; > odp_shm_t shm; > uint32_t pri_count[ODP_CONFIG_SCHED_PRIOS][QUEUES_PER_PRIO]; > + odp_spinlock_t grp_lock; > + struct { > + char name[ODP_SCHED_GROUP_NAME_LEN]; > + odp_thrmask_t *mask; > + } sched_grp[ODP_CONFIG_SCHED_GRPS]; > } sched_t; > > /* Schedule command */ > @@ -87,6 +96,9 @@ static sched_t *sched; > /* Thread local scheduler context */ > static __thread sched_local_t sched_local; > > +/* Internal routine to get scheduler thread mask addrs */ > +odp_thrmask_t *thread_sched_grp_mask(int index); should not forward declare the same functions in two C files, put it in a header and include that. nit: grp vs. group mixing them in this patch Cheers, Anders > + > static void sched_local_init(void) > { > int i; > @@ -123,6 +135,7 @@ int odp_schedule_init_global(void) > > memset(sched, 0, sizeof(sched_t)); > > + odp_pool_param_init(¶ms); > params.buf.size = sizeof(sched_cmd_t); > params.buf.align = 0; > params.buf.num = NUM_SCHED_CMD; > @@ -163,6 +176,15 @@ int odp_schedule_init_global(void) > } > } > > + odp_spinlock_init(&sched->grp_lock); > + > + for (i = 0; i < ODP_CONFIG_SCHED_GRPS; i++) { > + memset(&sched->sched_grp[i].name, 0, ODP_SCHED_GROUP_NAME_LEN); > + sched->sched_grp[i].mask = thread_sched_grp_mask(i); > + } > + > + odp_thrmask_setall(&sched_mask_all); > + > ODP_DBG("done\n"); > > return 0; > @@ -433,6 +455,7 @@ static int schedule(odp_queue_t *out_queue, odp_event_t out_ev[], > sched_cmd_t *sched_cmd; > queue_entry_t *qe; > int num; > + int qe_grp; > > if (id >= QUEUES_PER_PRIO) > id = 0; > @@ -465,7 +488,19 @@ static int schedule(odp_queue_t *out_queue, odp_event_t out_ev[], > continue; > } > > - qe = sched_cmd->qe; > + qe = sched_cmd->qe; > + qe_grp = qe->s.param.sched.group; > + > + if (qe_grp > ODP_SCHED_GROUP_ALL && > + !odp_thrmask_isset(sched->sched_grp[qe_grp].mask, > + thr)) { > + /* This thread is not eligible for work from > + * this queue, so continue scheduling it. > + */ > + if (odp_queue_enq(pri_q, ev)) > + ODP_ABORT("schedule failed\n"); > + continue; > + } > num = queue_deq_multi(qe, sched_local.buf_hdr, max_deq); > > if (num < 0) { > @@ -587,3 +622,131 @@ int odp_schedule_num_prio(void) > { > return ODP_CONFIG_SCHED_PRIOS; > } > + > +odp_schedule_group_t odp_schedule_group_create(const char *name, > + const odp_thrmask_t *mask) > +{ > + odp_schedule_group_t group = ODP_SCHED_GROUP_INVALID; > + int i; > + > + odp_spinlock_lock(&sched->grp_lock); > + > + for (i = _ODP_SCHED_GROUP_NAMED; i < ODP_CONFIG_SCHED_GRPS; i++) { > + if (sched->sched_grp[i].name[0] == 0) { > + strncpy(sched->sched_grp[i].name, name, > + ODP_SCHED_GROUP_NAME_LEN - 1); > + odp_thrmask_copy(sched->sched_grp[i].mask, mask); > + group = (odp_schedule_group_t)i; > + break; > + } > + } > + > + odp_spinlock_unlock(&sched->grp_lock); > + return group; > +} > + > +int odp_schedule_group_destroy(odp_schedule_group_t group) > +{ > + int ret; > + > + odp_spinlock_lock(&sched->grp_lock); > + > + if (group < ODP_CONFIG_SCHED_GRPS && > + group > _ODP_SCHED_GROUP_NAMED && > + sched->sched_grp[group].name[0] != 0) { > + odp_thrmask_zero(sched->sched_grp[group].mask); > + memset(&sched->sched_grp[group].name, 0, > + ODP_SCHED_GROUP_NAME_LEN); > + ret = 0; > + } else { > + ret = -1; > + } > + > + odp_spinlock_unlock(&sched->grp_lock); > + return ret; > +} > + > +odp_schedule_group_t odp_schedule_group_lookup(const char *name) > +{ > + odp_schedule_group_t group = ODP_SCHED_GROUP_INVALID; > + int i; > + > + odp_spinlock_lock(&sched->grp_lock); > + > + for (i = _ODP_SCHED_GROUP_NAMED; i < ODP_CONFIG_SCHED_GRPS; i++) { > + if (strcmp(name, sched->sched_grp[i].name) == 0) { > + group = (odp_schedule_group_t)i; > + break; > + } > + } > + > + odp_spinlock_unlock(&sched->grp_lock); > + return group; > +} > + > +int odp_schedule_group_join(odp_schedule_group_t group, > + const odp_thrmask_t *mask) > +{ > + int ret; > + > + odp_spinlock_lock(&sched->grp_lock); > + > + if (group < ODP_CONFIG_SCHED_GRPS && > + group >= _ODP_SCHED_GROUP_NAMED && > + sched->sched_grp[group].name[0] != 0) { > + odp_thrmask_or(sched->sched_grp[group].mask, > + sched->sched_grp[group].mask, > + mask); > + ret = 0; > + } else { > + ret = -1; > + } > + > + odp_spinlock_unlock(&sched->grp_lock); > + return ret; > +} > + > +int odp_schedule_group_leave(odp_schedule_group_t group, > + const odp_thrmask_t *mask) > +{ > + int ret; > + > + odp_spinlock_lock(&sched->grp_lock); > + > + if (group < ODP_CONFIG_SCHED_GRPS && > + group >= _ODP_SCHED_GROUP_NAMED && > + sched->sched_grp[group].name[0] != 0) { > + odp_thrmask_t leavemask; > + > + odp_thrmask_xor(&leavemask, mask, &sched_mask_all); > + odp_thrmask_and(sched->sched_grp[group].mask, > + sched->sched_grp[group].mask, > + &leavemask); > + ret = 0; > + } else { > + ret = -1; > + } > + > + odp_spinlock_unlock(&sched->grp_lock); > + return ret; > +} > + > +int odp_schedule_group_thrmask(odp_schedule_group_t group, > + odp_thrmask_t *thrmask) > +{ > + int ret; > + > + odp_spinlock_lock(&sched->grp_lock); > + > + if (group < ODP_CONFIG_SCHED_GRPS && > + group >= _ODP_SCHED_GROUP_NAMED && > + sched->sched_grp[group].name[0] != 0) { > + *thrmask = *sched->sched_grp[group].mask; > + ret = 0; > + } else { > + ret = -1; > + } > + > + odp_spinlock_unlock(&sched->grp_lock); > + return ret; > +} > diff --git a/platform/linux-generic/odp_thread.c b/platform/linux-generic/odp_thread.c > index 9905c78..770c64e 100644 > --- a/platform/linux-generic/odp_thread.c > +++ b/platform/linux-generic/odp_thread.c > @@ -32,9 +32,15 @@ typedef struct { > > typedef struct { > thread_state_t thr[ODP_CONFIG_MAX_THREADS]; > - odp_thrmask_t all; > - odp_thrmask_t worker; > - odp_thrmask_t control; > + union { > + /* struct order must be kept in sync with schedule_types.h */ > + struct { > + odp_thrmask_t all; > + odp_thrmask_t worker; > + odp_thrmask_t control; > + }; > + odp_thrmask_t sched_grp_mask[ODP_CONFIG_SCHED_GRPS]; > + }; > uint32_t num; > uint32_t num_worker; > uint32_t num_control; > @@ -53,6 +59,7 @@ static __thread thread_state_t *this_thread; > int odp_thread_init_global(void) > { > odp_shm_t shm; > + int i; > > shm = odp_shm_reserve("odp_thread_globals", > sizeof(thread_globals_t), > @@ -65,13 +72,19 @@ int odp_thread_init_global(void) > > memset(thread_globals, 0, sizeof(thread_globals_t)); > odp_spinlock_init(&thread_globals->lock); > - odp_thrmask_zero(&thread_globals->all); > - odp_thrmask_zero(&thread_globals->worker); > - odp_thrmask_zero(&thread_globals->control); > + > + for (i = 0; i < ODP_CONFIG_SCHED_GRPS; i++) > + odp_thrmask_zero(&thread_globals->sched_grp_mask[i]); > > return 0; > } > > +odp_thrmask_t *thread_sched_grp_mask(int index); > +odp_thrmask_t *thread_sched_grp_mask(int index) > +{ > + return &thread_globals->sched_grp_mask[index]; > +} > + > int odp_thread_term_global(void) > { > int ret; > -- > 2.1.4 > > _______________________________________________ > lng-odp mailing list > lng-odp@lists.linaro.org > https://lists.linaro.org/mailman/listinfo/lng-odp
diff --git a/platform/linux-generic/include/odp/plat/schedule_types.h b/platform/linux-generic/include/odp/plat/schedule_types.h index 87f9c11..c48b652 100644 --- a/platform/linux-generic/include/odp/plat/schedule_types.h +++ b/platform/linux-generic/include/odp/plat/schedule_types.h @@ -43,8 +43,11 @@ typedef int odp_schedule_sync_t; typedef int odp_schedule_group_t; +/* These must be kept in sync with thread_globals_t in odp_thread.c */ +#define ODP_SCHED_GROUP_INVALID -1 #define ODP_SCHED_GROUP_ALL 0 #define ODP_SCHED_GROUP_WORKER 1 +#define ODP_SCHED_GROUP_CONTROL 2 #define ODP_SCHED_GROUP_NAME_LEN 32 diff --git a/platform/linux-generic/odp_schedule.c b/platform/linux-generic/odp_schedule.c index 5d32c81..b00e959 100644 --- a/platform/linux-generic/odp_schedule.c +++ b/platform/linux-generic/odp_schedule.c @@ -23,6 +23,8 @@ #include <odp_queue_internal.h> #include <odp_packet_io_internal.h> +odp_thrmask_t sched_mask_all; + /* Number of schedule commands. * One per scheduled queue and packet interface */ #define NUM_SCHED_CMD (ODP_CONFIG_QUEUES + ODP_CONFIG_PKTIO_ENTRIES) @@ -40,6 +42,8 @@ typedef uint8_t pri_mask_t; _ODP_STATIC_ASSERT((8*sizeof(pri_mask_t)) >= QUEUES_PER_PRIO, "pri_mask_t_is_too_small"); +/* Internal: Start of named groups in group mask arrays */ +#define _ODP_SCHED_GROUP_NAMED (ODP_SCHED_GROUP_CONTROL + 1) typedef struct { odp_queue_t pri_queue[ODP_CONFIG_SCHED_PRIOS][QUEUES_PER_PRIO]; @@ -48,6 +52,11 @@ typedef struct { odp_pool_t pool; odp_shm_t shm; uint32_t pri_count[ODP_CONFIG_SCHED_PRIOS][QUEUES_PER_PRIO]; + odp_spinlock_t grp_lock; + struct { + char name[ODP_SCHED_GROUP_NAME_LEN]; + odp_thrmask_t *mask; + } sched_grp[ODP_CONFIG_SCHED_GRPS]; } sched_t; /* Schedule command */ @@ -87,6 +96,9 @@ static sched_t *sched; /* Thread local scheduler context */ static __thread sched_local_t sched_local; +/* Internal routine to get scheduler thread mask addrs */ +odp_thrmask_t *thread_sched_grp_mask(int index); + static void sched_local_init(void) { int i; @@ -123,6 +135,7 @@ int odp_schedule_init_global(void) memset(sched, 0, sizeof(sched_t)); + odp_pool_param_init(¶ms); params.buf.size = sizeof(sched_cmd_t); params.buf.align = 0; params.buf.num = NUM_SCHED_CMD; @@ -163,6 +176,15 @@ int odp_schedule_init_global(void) } } + odp_spinlock_init(&sched->grp_lock); + + for (i = 0; i < ODP_CONFIG_SCHED_GRPS; i++) { + memset(&sched->sched_grp[i].name, 0, ODP_SCHED_GROUP_NAME_LEN); + sched->sched_grp[i].mask = thread_sched_grp_mask(i); + } + + odp_thrmask_setall(&sched_mask_all); + ODP_DBG("done\n"); return 0; @@ -433,6 +455,7 @@ static int schedule(odp_queue_t *out_queue, odp_event_t out_ev[], sched_cmd_t *sched_cmd; queue_entry_t *qe; int num; + int qe_grp; if (id >= QUEUES_PER_PRIO) id = 0; @@ -465,7 +488,19 @@ static int schedule(odp_queue_t *out_queue, odp_event_t out_ev[], continue; } - qe = sched_cmd->qe; + qe = sched_cmd->qe; + qe_grp = qe->s.param.sched.group; + + if (qe_grp > ODP_SCHED_GROUP_ALL && + !odp_thrmask_isset(sched->sched_grp[qe_grp].mask, + thr)) { + /* This thread is not eligible for work from + * this queue, so continue scheduling it. + */ + if (odp_queue_enq(pri_q, ev)) + ODP_ABORT("schedule failed\n"); + continue; + } num = queue_deq_multi(qe, sched_local.buf_hdr, max_deq); if (num < 0) { @@ -587,3 +622,131 @@ int odp_schedule_num_prio(void) { return ODP_CONFIG_SCHED_PRIOS; } + +odp_schedule_group_t odp_schedule_group_create(const char *name, + const odp_thrmask_t *mask) +{ + odp_schedule_group_t group = ODP_SCHED_GROUP_INVALID; + int i; + + odp_spinlock_lock(&sched->grp_lock); + + for (i = _ODP_SCHED_GROUP_NAMED; i < ODP_CONFIG_SCHED_GRPS; i++) { + if (sched->sched_grp[i].name[0] == 0) { + strncpy(sched->sched_grp[i].name, name, + ODP_SCHED_GROUP_NAME_LEN - 1); + odp_thrmask_copy(sched->sched_grp[i].mask, mask); + group = (odp_schedule_group_t)i; + break; + } + } + + odp_spinlock_unlock(&sched->grp_lock); + return group; +} + +int odp_schedule_group_destroy(odp_schedule_group_t group) +{ + int ret; + + odp_spinlock_lock(&sched->grp_lock); + + if (group < ODP_CONFIG_SCHED_GRPS && + group > _ODP_SCHED_GROUP_NAMED && + sched->sched_grp[group].name[0] != 0) { + odp_thrmask_zero(sched->sched_grp[group].mask); + memset(&sched->sched_grp[group].name, 0, + ODP_SCHED_GROUP_NAME_LEN); + ret = 0; + } else { + ret = -1; + } + + odp_spinlock_unlock(&sched->grp_lock); + return ret; +} + +odp_schedule_group_t odp_schedule_group_lookup(const char *name) +{ + odp_schedule_group_t group = ODP_SCHED_GROUP_INVALID; + int i; + + odp_spinlock_lock(&sched->grp_lock); + + for (i = _ODP_SCHED_GROUP_NAMED; i < ODP_CONFIG_SCHED_GRPS; i++) { + if (strcmp(name, sched->sched_grp[i].name) == 0) { + group = (odp_schedule_group_t)i; + break; + } + } + + odp_spinlock_unlock(&sched->grp_lock); + return group; +} + +int odp_schedule_group_join(odp_schedule_group_t group, + const odp_thrmask_t *mask) +{ + int ret; + + odp_spinlock_lock(&sched->grp_lock); + + if (group < ODP_CONFIG_SCHED_GRPS && + group >= _ODP_SCHED_GROUP_NAMED && + sched->sched_grp[group].name[0] != 0) { + odp_thrmask_or(sched->sched_grp[group].mask, + sched->sched_grp[group].mask, + mask); + ret = 0; + } else { + ret = -1; + } + + odp_spinlock_unlock(&sched->grp_lock); + return ret; +} + +int odp_schedule_group_leave(odp_schedule_group_t group, + const odp_thrmask_t *mask) +{ + int ret; + + odp_spinlock_lock(&sched->grp_lock); + + if (group < ODP_CONFIG_SCHED_GRPS && + group >= _ODP_SCHED_GROUP_NAMED && + sched->sched_grp[group].name[0] != 0) { + odp_thrmask_t leavemask; + + odp_thrmask_xor(&leavemask, mask, &sched_mask_all); + odp_thrmask_and(sched->sched_grp[group].mask, + sched->sched_grp[group].mask, + &leavemask); + ret = 0; + } else { + ret = -1; + } + + odp_spinlock_unlock(&sched->grp_lock); + return ret; +} + +int odp_schedule_group_thrmask(odp_schedule_group_t group, + odp_thrmask_t *thrmask) +{ + int ret; + + odp_spinlock_lock(&sched->grp_lock); + + if (group < ODP_CONFIG_SCHED_GRPS && + group >= _ODP_SCHED_GROUP_NAMED && + sched->sched_grp[group].name[0] != 0) { + *thrmask = *sched->sched_grp[group].mask; + ret = 0; + } else { + ret = -1; + } + + odp_spinlock_unlock(&sched->grp_lock); + return ret; +} diff --git a/platform/linux-generic/odp_thread.c b/platform/linux-generic/odp_thread.c index 9905c78..770c64e 100644 --- a/platform/linux-generic/odp_thread.c +++ b/platform/linux-generic/odp_thread.c @@ -32,9 +32,15 @@ typedef struct { typedef struct { thread_state_t thr[ODP_CONFIG_MAX_THREADS]; - odp_thrmask_t all; - odp_thrmask_t worker; - odp_thrmask_t control; + union { + /* struct order must be kept in sync with schedule_types.h */ + struct { + odp_thrmask_t all; + odp_thrmask_t worker; + odp_thrmask_t control; + }; + odp_thrmask_t sched_grp_mask[ODP_CONFIG_SCHED_GRPS]; + }; uint32_t num; uint32_t num_worker; uint32_t num_control; @@ -53,6 +59,7 @@ static __thread thread_state_t *this_thread; int odp_thread_init_global(void) { odp_shm_t shm; + int i; shm = odp_shm_reserve("odp_thread_globals", sizeof(thread_globals_t), @@ -65,13 +72,19 @@ int odp_thread_init_global(void) memset(thread_globals, 0, sizeof(thread_globals_t)); odp_spinlock_init(&thread_globals->lock); - odp_thrmask_zero(&thread_globals->all); - odp_thrmask_zero(&thread_globals->worker); - odp_thrmask_zero(&thread_globals->control); + + for (i = 0; i < ODP_CONFIG_SCHED_GRPS; i++) + odp_thrmask_zero(&thread_globals->sched_grp_mask[i]); return 0; } +odp_thrmask_t *thread_sched_grp_mask(int index); +odp_thrmask_t *thread_sched_grp_mask(int index) +{ + return &thread_globals->sched_grp_mask[index]; +} + int odp_thread_term_global(void) { int ret;
Signed-off-by: Bill Fischofer <bill.fischofer@linaro.org> --- .../include/odp/plat/schedule_types.h | 3 + platform/linux-generic/odp_schedule.c | 165 ++++++++++++++++++++- platform/linux-generic/odp_thread.c | 25 +++- 3 files changed, 186 insertions(+), 7 deletions(-)