Message ID | 1447129211-9095-9-git-send-email-bill.fischofer@linaro.org |
---|---|
State | Superseded |
Headers | show |
nice test it good to have same but w/o ordered queues. It can allow to figure out issues with less step. On 10.11.15 06:20, Bill Fischofer wrote: > Add a "chaos" test variant to the scheduler CUnit tests. This test > stresses the scheduler by circulating events among parallel, atomic, > and ordered queues to verify that the scheduler can handle arbitrary > looping paths without deadlock. > > Suggested-by: Carl Wallen <carl.wallen@nokia.com> > Signed-off-by: Bill Fischofer <bill.fischofer@linaro.org> > --- > test/validation/scheduler/scheduler.c | 192 ++++++++++++++++++++++++++++++++++ > test/validation/scheduler/scheduler.h | 1 + > 2 files changed, 193 insertions(+) > > diff --git a/test/validation/scheduler/scheduler.c b/test/validation/scheduler/scheduler.c > index 042d7b4..c483fdd 100644 > --- a/test/validation/scheduler/scheduler.c > +++ b/test/validation/scheduler/scheduler.c > @@ -39,6 +39,12 @@ > #define MAGIC1 0xdeadbeef > #define MAGIC2 0xcafef00d > > +#define CHAOS_NUM_QUEUES 6 > +#define CHAOS_NUM_BUFS_PER_QUEUE 6 > +#define CHAOS_NUM_ROUNDS 50000 > +#define CHAOS_NUM_EVENTS (CHAOS_NUM_QUEUES * CHAOS_NUM_BUFS_PER_QUEUE) > +#define CHAOS_DEBUG (CHAOS_NUM_ROUNDS < 1000) > + > /* Test global variables */ > typedef struct { > int num_workers; > @@ -47,6 +53,11 @@ typedef struct { > int buf_count_cpy; > odp_ticketlock_t lock; > odp_spinlock_t atomic_lock; > + struct { > + odp_queue_t handle; > + char name[ODP_QUEUE_NAME_LEN]; > + } chaos_q[CHAOS_NUM_QUEUES]; > + int chaos_pending_event_count; > } test_globals_t; > > typedef struct { > @@ -74,6 +85,11 @@ typedef struct { > uint64_t lock_sequence[ODP_CONFIG_MAX_ORDERED_LOCKS_PER_QUEUE]; > } queue_context; > > +typedef struct { > + uint64_t evno; > + uint64_t seqno; > +} chaos_buf; > + > odp_pool_t pool; > odp_pool_t queue_ctx_pool; > > @@ -381,6 +397,181 @@ void scheduler_test_groups(void) > CU_ASSERT_FATAL(odp_pool_destroy(p) == 0); > } > > +static void *chaos_thread(void *arg) > +{ > + uint64_t i; > + int rc; > + chaos_buf *cbuf; > + odp_event_t ev; > + odp_queue_t from; > + thread_args_t *args = (thread_args_t *)arg; > + test_globals_t *globals = args->globals; > + int me = odp_thread_id(); > + > + if (CHAOS_DEBUG) > + printf("Chaos thread %d starting...\n", me); > + > + /* Wait for all threads to start */ > + odp_barrier_wait(&globals->barrier); > + > + /* Run the test */ > + for (i = 0; i < CHAOS_NUM_ROUNDS * CHAOS_NUM_EVENTS; i++) { > + ev = odp_schedule(&from, ODP_SCHED_WAIT); > + CU_ASSERT_FATAL(ev != ODP_EVENT_INVALID); > + cbuf = odp_buffer_addr(odp_buffer_from_event(ev)); > + CU_ASSERT_FATAL(cbuf != NULL); > + if (CHAOS_DEBUG) > + printf("Thread %d received event %lu seq %lu " > + "from Q %s, sending to Q %s\n", > + me, cbuf->evno, cbuf->seqno, > + globals-> > + chaos_q[(uint64_t)odp_queue_context(from)].name, > + globals-> > + chaos_q[cbuf->seqno % CHAOS_NUM_QUEUES].name); > + > + rc = odp_queue_enq( > + globals-> > + chaos_q[cbuf->seqno++ % CHAOS_NUM_QUEUES].handle, > + ev); > + CU_ASSERT(rc == 0); > + } > + > + if (CHAOS_DEBUG) > + printf("Thread %d completed %d rounds...terminating\n", > + odp_thread_id(), CHAOS_NUM_EVENTS); > + > + /* Thread complete--drain locally cached scheduled events */ > + odp_schedule_pause(); > + > + while (globals->chaos_pending_event_count > 0) { > + ev = odp_schedule(&from, ODP_SCHED_NO_WAIT); > + if (ev == ODP_EVENT_INVALID) > + break; > + globals->chaos_pending_event_count--; > + cbuf = odp_buffer_addr(odp_buffer_from_event(ev)); > + if (CHAOS_DEBUG) > + printf("Thread %d drained event %lu seq %lu " > + "from Q %s\n", > + odp_thread_id(), cbuf->evno, cbuf->seqno, > + globals-> > + chaos_q[(uint64_t)odp_queue_context(from)].name); > + odp_event_free(ev); > + } > + > + return NULL; > +} > + > +void scheduler_test_chaos(void) > +{ > + odp_pool_t pool; > + odp_pool_param_t params; > + odp_queue_param_t qp; > + odp_buffer_t buf; > + chaos_buf *cbuf; > + odp_event_t ev; > + test_globals_t *globals; > + thread_args_t *args; > + odp_shm_t shm; > + odp_queue_t from; > + int i, rc; > + odp_schedule_sync_t sync[] = {ODP_SCHED_SYNC_NONE, > + ODP_SCHED_SYNC_ATOMIC, > + ODP_SCHED_SYNC_ORDERED}; > + const unsigned num_sync = (sizeof(sync) / sizeof(sync[0])); > + const char *const qtypes[] = {"parallel", "atomic", "ordered"}; > + > + /* Set up the scheduling environment */ > + shm = odp_shm_lookup(GLOBALS_SHM_NAME); > + CU_ASSERT_FATAL(shm != ODP_SHM_INVALID); > + globals = odp_shm_addr(shm); > + CU_ASSERT_PTR_NOT_NULL_FATAL(shm); > + > + shm = odp_shm_lookup(SHM_THR_ARGS_NAME); > + CU_ASSERT_FATAL(shm != ODP_SHM_INVALID); > + args = odp_shm_addr(shm); > + CU_ASSERT_PTR_NOT_NULL_FATAL(args); > + > + args->globals = globals; > + args->cu_thr.numthrds = globals->num_workers; > + > + odp_queue_param_init(&qp); > + odp_pool_param_init(¶ms); > + params.buf.size = sizeof(chaos_buf); > + params.buf.align = 0; > + params.buf.num = CHAOS_NUM_EVENTS; > + params.type = ODP_POOL_BUFFER; > + > + pool = odp_pool_create("sched_chaos_pool", ¶ms); > + CU_ASSERT_FATAL(pool != ODP_POOL_INVALID); > + qp.sched.prio = ODP_SCHED_PRIO_DEFAULT; > + > + for (i = 0; i < CHAOS_NUM_QUEUES; i++) { > + qp.sched.sync = sync[i % num_sync]; > + snprintf(globals->chaos_q[i].name, > + sizeof(globals->chaos_q[i].name), > + "chaos queue %d - %s", i, > + qtypes[i % num_sync]); > + globals->chaos_q[i].handle = > + odp_queue_create(globals->chaos_q[i].name, > + ODP_QUEUE_TYPE_SCHED, > + &qp); > + CU_ASSERT_FATAL(globals->chaos_q[i].handle != > + ODP_QUEUE_INVALID); > + rc = odp_queue_context_set(globals->chaos_q[i].handle, > + (void *)(uint64_t)i); > + CU_ASSERT_FATAL(rc == 0); > + } > + > + /* Now populate the queues with the initial seed elements */ > + for (i = 0; i < CHAOS_NUM_EVENTS; i++) { > + buf = odp_buffer_alloc(pool); > + CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID); > + cbuf = odp_buffer_addr(buf); > + cbuf->evno = i; > + cbuf->seqno = 0; > + rc = odp_queue_enq( > + globals->chaos_q[i % CHAOS_NUM_QUEUES].handle, > + odp_buffer_to_event(buf)); > + CU_ASSERT_FATAL(rc == 0); > + globals->chaos_pending_event_count++; > + } > + > + /* Run the test */ > + odp_cunit_thread_create(chaos_thread, &args->cu_thr); > + odp_cunit_thread_exit(&args->cu_thr); > + > + if (CHAOS_DEBUG) > + printf("Thread %d returning from chaos threads..cleaning up\n", > + odp_thread_id()); > + > + /* Cleanup: Drain queues, free events */ > + while (globals->chaos_pending_event_count-- > 0) { > + ev = odp_schedule(&from, ODP_SCHED_WAIT); > + CU_ASSERT_FATAL(ev != ODP_EVENT_INVALID); > + cbuf = odp_buffer_addr(odp_buffer_from_event(ev)); > + if (CHAOS_DEBUG) > + printf("Draining event %lu seq %lu from Q %s...\n", > + cbuf->evno, > + cbuf->seqno, > + globals-> > + chaos_q[(uint64_t)odp_queue_context(from)].name); > + odp_event_free(ev); > + } > + > + odp_schedule_release_ordered(); > + > + for (i = 0; i < CHAOS_NUM_QUEUES; i++) { > + if (CHAOS_DEBUG) > + printf("Destroying queue %s\n", > + globals->chaos_q[i].name); > + rc = odp_queue_destroy(globals->chaos_q[i].handle); > + CU_ASSERT(rc == 0); > + } > + > + rc = odp_pool_destroy(pool); > + CU_ASSERT(rc == 0); > +} > + > static void *schedule_common_(void *arg) > { > thread_args_t *args = (thread_args_t *)arg; > @@ -1265,6 +1456,7 @@ odp_testinfo_t scheduler_suite[] = { > ODP_TEST_INFO(scheduler_test_num_prio), > ODP_TEST_INFO(scheduler_test_queue_destroy), > ODP_TEST_INFO(scheduler_test_groups), > + ODP_TEST_INFO(scheduler_test_chaos), > ODP_TEST_INFO(scheduler_test_1q_1t_n), > ODP_TEST_INFO(scheduler_test_1q_1t_a), > ODP_TEST_INFO(scheduler_test_1q_1t_o), > diff --git a/test/validation/scheduler/scheduler.h b/test/validation/scheduler/scheduler.h > index c869e41..bba79aa 100644 > --- a/test/validation/scheduler/scheduler.h > +++ b/test/validation/scheduler/scheduler.h > @@ -14,6 +14,7 @@ void scheduler_test_wait_time(void); > void scheduler_test_num_prio(void); > void scheduler_test_queue_destroy(void); > void scheduler_test_groups(void); > +void scheduler_test_chaos(void); > void scheduler_test_1q_1t_n(void); > void scheduler_test_1q_1t_a(void); > void scheduler_test_1q_1t_o(void); >
Feel free to enhance it further :) On Tuesday, November 10, 2015, Ivan Khoronzhuk <ivan.khoronzhuk@linaro.org> wrote: > nice test > it good to have same but w/o ordered queues. > It can allow to figure out issues with less step. > > On 10.11.15 06:20, Bill Fischofer wrote: > >> Add a "chaos" test variant to the scheduler CUnit tests. This test >> stresses the scheduler by circulating events among parallel, atomic, >> and ordered queues to verify that the scheduler can handle arbitrary >> looping paths without deadlock. >> >> Suggested-by: Carl Wallen <carl.wallen@nokia.com> >> Signed-off-by: Bill Fischofer <bill.fischofer@linaro.org> >> --- >> test/validation/scheduler/scheduler.c | 192 >> ++++++++++++++++++++++++++++++++++ >> test/validation/scheduler/scheduler.h | 1 + >> 2 files changed, 193 insertions(+) >> >> diff --git a/test/validation/scheduler/scheduler.c >> b/test/validation/scheduler/scheduler.c >> index 042d7b4..c483fdd 100644 >> --- a/test/validation/scheduler/scheduler.c >> +++ b/test/validation/scheduler/scheduler.c >> @@ -39,6 +39,12 @@ >> #define MAGIC1 0xdeadbeef >> #define MAGIC2 0xcafef00d >> >> +#define CHAOS_NUM_QUEUES 6 >> +#define CHAOS_NUM_BUFS_PER_QUEUE 6 >> +#define CHAOS_NUM_ROUNDS 50000 >> +#define CHAOS_NUM_EVENTS (CHAOS_NUM_QUEUES * CHAOS_NUM_BUFS_PER_QUEUE) >> +#define CHAOS_DEBUG (CHAOS_NUM_ROUNDS < 1000) >> + >> /* Test global variables */ >> typedef struct { >> int num_workers; >> @@ -47,6 +53,11 @@ typedef struct { >> int buf_count_cpy; >> odp_ticketlock_t lock; >> odp_spinlock_t atomic_lock; >> + struct { >> + odp_queue_t handle; >> + char name[ODP_QUEUE_NAME_LEN]; >> + } chaos_q[CHAOS_NUM_QUEUES]; >> + int chaos_pending_event_count; >> } test_globals_t; >> >> typedef struct { >> @@ -74,6 +85,11 @@ typedef struct { >> uint64_t lock_sequence[ODP_CONFIG_MAX_ORDERED_LOCKS_PER_QUEUE]; >> } queue_context; >> >> +typedef struct { >> + uint64_t evno; >> + uint64_t seqno; >> +} chaos_buf; >> + >> odp_pool_t pool; >> odp_pool_t queue_ctx_pool; >> >> @@ -381,6 +397,181 @@ void scheduler_test_groups(void) >> CU_ASSERT_FATAL(odp_pool_destroy(p) == 0); >> } >> >> +static void *chaos_thread(void *arg) >> +{ >> + uint64_t i; >> + int rc; >> + chaos_buf *cbuf; >> + odp_event_t ev; >> + odp_queue_t from; >> + thread_args_t *args = (thread_args_t *)arg; >> + test_globals_t *globals = args->globals; >> + int me = odp_thread_id(); >> + >> + if (CHAOS_DEBUG) >> + printf("Chaos thread %d starting...\n", me); >> + >> + /* Wait for all threads to start */ >> + odp_barrier_wait(&globals->barrier); >> + >> + /* Run the test */ >> + for (i = 0; i < CHAOS_NUM_ROUNDS * CHAOS_NUM_EVENTS; i++) { >> + ev = odp_schedule(&from, ODP_SCHED_WAIT); >> + CU_ASSERT_FATAL(ev != ODP_EVENT_INVALID); >> + cbuf = odp_buffer_addr(odp_buffer_from_event(ev)); >> + CU_ASSERT_FATAL(cbuf != NULL); >> + if (CHAOS_DEBUG) >> + printf("Thread %d received event %lu seq %lu " >> + "from Q %s, sending to Q %s\n", >> + me, cbuf->evno, cbuf->seqno, >> + globals-> >> + >> chaos_q[(uint64_t)odp_queue_context(from)].name, >> + globals-> >> + chaos_q[cbuf->seqno % >> CHAOS_NUM_QUEUES].name); >> + >> + rc = odp_queue_enq( >> + globals-> >> + chaos_q[cbuf->seqno++ % CHAOS_NUM_QUEUES].handle, >> + ev); >> + CU_ASSERT(rc == 0); >> + } >> + >> + if (CHAOS_DEBUG) >> + printf("Thread %d completed %d rounds...terminating\n", >> + odp_thread_id(), CHAOS_NUM_EVENTS); >> + >> + /* Thread complete--drain locally cached scheduled events */ >> + odp_schedule_pause(); >> + >> + while (globals->chaos_pending_event_count > 0) { >> + ev = odp_schedule(&from, ODP_SCHED_NO_WAIT); >> + if (ev == ODP_EVENT_INVALID) >> + break; >> + globals->chaos_pending_event_count--; >> + cbuf = odp_buffer_addr(odp_buffer_from_event(ev)); >> + if (CHAOS_DEBUG) >> + printf("Thread %d drained event %lu seq %lu " >> + "from Q %s\n", >> + odp_thread_id(), cbuf->evno, cbuf->seqno, >> + globals-> >> + >> chaos_q[(uint64_t)odp_queue_context(from)].name); >> + odp_event_free(ev); >> + } >> + >> + return NULL; >> +} >> + >> +void scheduler_test_chaos(void) >> +{ >> + odp_pool_t pool; >> + odp_pool_param_t params; >> + odp_queue_param_t qp; >> + odp_buffer_t buf; >> + chaos_buf *cbuf; >> + odp_event_t ev; >> + test_globals_t *globals; >> + thread_args_t *args; >> + odp_shm_t shm; >> + odp_queue_t from; >> + int i, rc; >> + odp_schedule_sync_t sync[] = {ODP_SCHED_SYNC_NONE, >> + ODP_SCHED_SYNC_ATOMIC, >> + ODP_SCHED_SYNC_ORDERED}; >> + const unsigned num_sync = (sizeof(sync) / sizeof(sync[0])); >> + const char *const qtypes[] = {"parallel", "atomic", "ordered"}; >> + >> + /* Set up the scheduling environment */ >> + shm = odp_shm_lookup(GLOBALS_SHM_NAME); >> + CU_ASSERT_FATAL(shm != ODP_SHM_INVALID); >> + globals = odp_shm_addr(shm); >> + CU_ASSERT_PTR_NOT_NULL_FATAL(shm); >> + >> + shm = odp_shm_lookup(SHM_THR_ARGS_NAME); >> + CU_ASSERT_FATAL(shm != ODP_SHM_INVALID); >> + args = odp_shm_addr(shm); >> + CU_ASSERT_PTR_NOT_NULL_FATAL(args); >> + >> + args->globals = globals; >> + args->cu_thr.numthrds = globals->num_workers; >> + >> + odp_queue_param_init(&qp); >> + odp_pool_param_init(¶ms); >> + params.buf.size = sizeof(chaos_buf); >> + params.buf.align = 0; >> + params.buf.num = CHAOS_NUM_EVENTS; >> + params.type = ODP_POOL_BUFFER; >> + >> + pool = odp_pool_create("sched_chaos_pool", ¶ms); >> + CU_ASSERT_FATAL(pool != ODP_POOL_INVALID); >> + qp.sched.prio = ODP_SCHED_PRIO_DEFAULT; >> + >> + for (i = 0; i < CHAOS_NUM_QUEUES; i++) { >> + qp.sched.sync = sync[i % num_sync]; >> + snprintf(globals->chaos_q[i].name, >> + sizeof(globals->chaos_q[i].name), >> + "chaos queue %d - %s", i, >> + qtypes[i % num_sync]); >> + globals->chaos_q[i].handle = >> + odp_queue_create(globals->chaos_q[i].name, >> + ODP_QUEUE_TYPE_SCHED, >> + &qp); >> + CU_ASSERT_FATAL(globals->chaos_q[i].handle != >> + ODP_QUEUE_INVALID); >> + rc = odp_queue_context_set(globals->chaos_q[i].handle, >> + (void *)(uint64_t)i); >> + CU_ASSERT_FATAL(rc == 0); >> + } >> + >> + /* Now populate the queues with the initial seed elements */ >> + for (i = 0; i < CHAOS_NUM_EVENTS; i++) { >> + buf = odp_buffer_alloc(pool); >> + CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID); >> + cbuf = odp_buffer_addr(buf); >> + cbuf->evno = i; >> + cbuf->seqno = 0; >> + rc = odp_queue_enq( >> + globals->chaos_q[i % CHAOS_NUM_QUEUES].handle, >> + odp_buffer_to_event(buf)); >> + CU_ASSERT_FATAL(rc == 0); >> + globals->chaos_pending_event_count++; >> + } >> + >> + /* Run the test */ >> + odp_cunit_thread_create(chaos_thread, &args->cu_thr); >> + odp_cunit_thread_exit(&args->cu_thr); >> + >> + if (CHAOS_DEBUG) >> + printf("Thread %d returning from chaos threads..cleaning >> up\n", >> + odp_thread_id()); >> + >> + /* Cleanup: Drain queues, free events */ >> + while (globals->chaos_pending_event_count-- > 0) { >> + ev = odp_schedule(&from, ODP_SCHED_WAIT); >> + CU_ASSERT_FATAL(ev != ODP_EVENT_INVALID); >> + cbuf = odp_buffer_addr(odp_buffer_from_event(ev)); >> + if (CHAOS_DEBUG) >> + printf("Draining event %lu seq %lu from Q >> %s...\n", >> + cbuf->evno, >> + cbuf->seqno, >> + globals-> >> + >> chaos_q[(uint64_t)odp_queue_context(from)].name); >> + odp_event_free(ev); >> + } >> + >> + odp_schedule_release_ordered(); >> + >> + for (i = 0; i < CHAOS_NUM_QUEUES; i++) { >> + if (CHAOS_DEBUG) >> + printf("Destroying queue %s\n", >> + globals->chaos_q[i].name); >> + rc = odp_queue_destroy(globals->chaos_q[i].handle); >> + CU_ASSERT(rc == 0); >> + } >> + >> + rc = odp_pool_destroy(pool); >> + CU_ASSERT(rc == 0); >> +} >> + >> static void *schedule_common_(void *arg) >> { >> thread_args_t *args = (thread_args_t *)arg; >> @@ -1265,6 +1456,7 @@ odp_testinfo_t scheduler_suite[] = { >> ODP_TEST_INFO(scheduler_test_num_prio), >> ODP_TEST_INFO(scheduler_test_queue_destroy), >> ODP_TEST_INFO(scheduler_test_groups), >> + ODP_TEST_INFO(scheduler_test_chaos), >> ODP_TEST_INFO(scheduler_test_1q_1t_n), >> ODP_TEST_INFO(scheduler_test_1q_1t_a), >> ODP_TEST_INFO(scheduler_test_1q_1t_o), >> diff --git a/test/validation/scheduler/scheduler.h >> b/test/validation/scheduler/scheduler.h >> index c869e41..bba79aa 100644 >> --- a/test/validation/scheduler/scheduler.h >> +++ b/test/validation/scheduler/scheduler.h >> @@ -14,6 +14,7 @@ void scheduler_test_wait_time(void); >> void scheduler_test_num_prio(void); >> void scheduler_test_queue_destroy(void); >> void scheduler_test_groups(void); >> +void scheduler_test_chaos(void); >> void scheduler_test_1q_1t_n(void); >> void scheduler_test_1q_1t_a(void); >> void scheduler_test_1q_1t_o(void); >> >> > -- > Regards, > Ivan Khoronzhuk >
diff --git a/test/validation/scheduler/scheduler.c b/test/validation/scheduler/scheduler.c index 042d7b4..c483fdd 100644 --- a/test/validation/scheduler/scheduler.c +++ b/test/validation/scheduler/scheduler.c @@ -39,6 +39,12 @@ #define MAGIC1 0xdeadbeef #define MAGIC2 0xcafef00d +#define CHAOS_NUM_QUEUES 6 +#define CHAOS_NUM_BUFS_PER_QUEUE 6 +#define CHAOS_NUM_ROUNDS 50000 +#define CHAOS_NUM_EVENTS (CHAOS_NUM_QUEUES * CHAOS_NUM_BUFS_PER_QUEUE) +#define CHAOS_DEBUG (CHAOS_NUM_ROUNDS < 1000) + /* Test global variables */ typedef struct { int num_workers; @@ -47,6 +53,11 @@ typedef struct { int buf_count_cpy; odp_ticketlock_t lock; odp_spinlock_t atomic_lock; + struct { + odp_queue_t handle; + char name[ODP_QUEUE_NAME_LEN]; + } chaos_q[CHAOS_NUM_QUEUES]; + int chaos_pending_event_count; } test_globals_t; typedef struct { @@ -74,6 +85,11 @@ typedef struct { uint64_t lock_sequence[ODP_CONFIG_MAX_ORDERED_LOCKS_PER_QUEUE]; } queue_context; +typedef struct { + uint64_t evno; + uint64_t seqno; +} chaos_buf; + odp_pool_t pool; odp_pool_t queue_ctx_pool; @@ -381,6 +397,181 @@ void scheduler_test_groups(void) CU_ASSERT_FATAL(odp_pool_destroy(p) == 0); } +static void *chaos_thread(void *arg) +{ + uint64_t i; + int rc; + chaos_buf *cbuf; + odp_event_t ev; + odp_queue_t from; + thread_args_t *args = (thread_args_t *)arg; + test_globals_t *globals = args->globals; + int me = odp_thread_id(); + + if (CHAOS_DEBUG) + printf("Chaos thread %d starting...\n", me); + + /* Wait for all threads to start */ + odp_barrier_wait(&globals->barrier); + + /* Run the test */ + for (i = 0; i < CHAOS_NUM_ROUNDS * CHAOS_NUM_EVENTS; i++) { + ev = odp_schedule(&from, ODP_SCHED_WAIT); + CU_ASSERT_FATAL(ev != ODP_EVENT_INVALID); + cbuf = odp_buffer_addr(odp_buffer_from_event(ev)); + CU_ASSERT_FATAL(cbuf != NULL); + if (CHAOS_DEBUG) + printf("Thread %d received event %lu seq %lu " + "from Q %s, sending to Q %s\n", + me, cbuf->evno, cbuf->seqno, + globals-> + chaos_q[(uint64_t)odp_queue_context(from)].name, + globals-> + chaos_q[cbuf->seqno % CHAOS_NUM_QUEUES].name); + + rc = odp_queue_enq( + globals-> + chaos_q[cbuf->seqno++ % CHAOS_NUM_QUEUES].handle, + ev); + CU_ASSERT(rc == 0); + } + + if (CHAOS_DEBUG) + printf("Thread %d completed %d rounds...terminating\n", + odp_thread_id(), CHAOS_NUM_EVENTS); + + /* Thread complete--drain locally cached scheduled events */ + odp_schedule_pause(); + + while (globals->chaos_pending_event_count > 0) { + ev = odp_schedule(&from, ODP_SCHED_NO_WAIT); + if (ev == ODP_EVENT_INVALID) + break; + globals->chaos_pending_event_count--; + cbuf = odp_buffer_addr(odp_buffer_from_event(ev)); + if (CHAOS_DEBUG) + printf("Thread %d drained event %lu seq %lu " + "from Q %s\n", + odp_thread_id(), cbuf->evno, cbuf->seqno, + globals-> + chaos_q[(uint64_t)odp_queue_context(from)].name); + odp_event_free(ev); + } + + return NULL; +} + +void scheduler_test_chaos(void) +{ + odp_pool_t pool; + odp_pool_param_t params; + odp_queue_param_t qp; + odp_buffer_t buf; + chaos_buf *cbuf; + odp_event_t ev; + test_globals_t *globals; + thread_args_t *args; + odp_shm_t shm; + odp_queue_t from; + int i, rc; + odp_schedule_sync_t sync[] = {ODP_SCHED_SYNC_NONE, + ODP_SCHED_SYNC_ATOMIC, + ODP_SCHED_SYNC_ORDERED}; + const unsigned num_sync = (sizeof(sync) / sizeof(sync[0])); + const char *const qtypes[] = {"parallel", "atomic", "ordered"}; + + /* Set up the scheduling environment */ + shm = odp_shm_lookup(GLOBALS_SHM_NAME); + CU_ASSERT_FATAL(shm != ODP_SHM_INVALID); + globals = odp_shm_addr(shm); + CU_ASSERT_PTR_NOT_NULL_FATAL(shm); + + shm = odp_shm_lookup(SHM_THR_ARGS_NAME); + CU_ASSERT_FATAL(shm != ODP_SHM_INVALID); + args = odp_shm_addr(shm); + CU_ASSERT_PTR_NOT_NULL_FATAL(args); + + args->globals = globals; + args->cu_thr.numthrds = globals->num_workers; + + odp_queue_param_init(&qp); + odp_pool_param_init(¶ms); + params.buf.size = sizeof(chaos_buf); + params.buf.align = 0; + params.buf.num = CHAOS_NUM_EVENTS; + params.type = ODP_POOL_BUFFER; + + pool = odp_pool_create("sched_chaos_pool", ¶ms); + CU_ASSERT_FATAL(pool != ODP_POOL_INVALID); + qp.sched.prio = ODP_SCHED_PRIO_DEFAULT; + + for (i = 0; i < CHAOS_NUM_QUEUES; i++) { + qp.sched.sync = sync[i % num_sync]; + snprintf(globals->chaos_q[i].name, + sizeof(globals->chaos_q[i].name), + "chaos queue %d - %s", i, + qtypes[i % num_sync]); + globals->chaos_q[i].handle = + odp_queue_create(globals->chaos_q[i].name, + ODP_QUEUE_TYPE_SCHED, + &qp); + CU_ASSERT_FATAL(globals->chaos_q[i].handle != + ODP_QUEUE_INVALID); + rc = odp_queue_context_set(globals->chaos_q[i].handle, + (void *)(uint64_t)i); + CU_ASSERT_FATAL(rc == 0); + } + + /* Now populate the queues with the initial seed elements */ + for (i = 0; i < CHAOS_NUM_EVENTS; i++) { + buf = odp_buffer_alloc(pool); + CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID); + cbuf = odp_buffer_addr(buf); + cbuf->evno = i; + cbuf->seqno = 0; + rc = odp_queue_enq( + globals->chaos_q[i % CHAOS_NUM_QUEUES].handle, + odp_buffer_to_event(buf)); + CU_ASSERT_FATAL(rc == 0); + globals->chaos_pending_event_count++; + } + + /* Run the test */ + odp_cunit_thread_create(chaos_thread, &args->cu_thr); + odp_cunit_thread_exit(&args->cu_thr); + + if (CHAOS_DEBUG) + printf("Thread %d returning from chaos threads..cleaning up\n", + odp_thread_id()); + + /* Cleanup: Drain queues, free events */ + while (globals->chaos_pending_event_count-- > 0) { + ev = odp_schedule(&from, ODP_SCHED_WAIT); + CU_ASSERT_FATAL(ev != ODP_EVENT_INVALID); + cbuf = odp_buffer_addr(odp_buffer_from_event(ev)); + if (CHAOS_DEBUG) + printf("Draining event %lu seq %lu from Q %s...\n", + cbuf->evno, + cbuf->seqno, + globals-> + chaos_q[(uint64_t)odp_queue_context(from)].name); + odp_event_free(ev); + } + + odp_schedule_release_ordered(); + + for (i = 0; i < CHAOS_NUM_QUEUES; i++) { + if (CHAOS_DEBUG) + printf("Destroying queue %s\n", + globals->chaos_q[i].name); + rc = odp_queue_destroy(globals->chaos_q[i].handle); + CU_ASSERT(rc == 0); + } + + rc = odp_pool_destroy(pool); + CU_ASSERT(rc == 0); +} + static void *schedule_common_(void *arg) { thread_args_t *args = (thread_args_t *)arg; @@ -1265,6 +1456,7 @@ odp_testinfo_t scheduler_suite[] = { ODP_TEST_INFO(scheduler_test_num_prio), ODP_TEST_INFO(scheduler_test_queue_destroy), ODP_TEST_INFO(scheduler_test_groups), + ODP_TEST_INFO(scheduler_test_chaos), ODP_TEST_INFO(scheduler_test_1q_1t_n), ODP_TEST_INFO(scheduler_test_1q_1t_a), ODP_TEST_INFO(scheduler_test_1q_1t_o), diff --git a/test/validation/scheduler/scheduler.h b/test/validation/scheduler/scheduler.h index c869e41..bba79aa 100644 --- a/test/validation/scheduler/scheduler.h +++ b/test/validation/scheduler/scheduler.h @@ -14,6 +14,7 @@ void scheduler_test_wait_time(void); void scheduler_test_num_prio(void); void scheduler_test_queue_destroy(void); void scheduler_test_groups(void); +void scheduler_test_chaos(void); void scheduler_test_1q_1t_n(void); void scheduler_test_1q_1t_a(void); void scheduler_test_1q_1t_o(void);
Add a "chaos" test variant to the scheduler CUnit tests. This test stresses the scheduler by circulating events among parallel, atomic, and ordered queues to verify that the scheduler can handle arbitrary looping paths without deadlock. Suggested-by: Carl Wallen <carl.wallen@nokia.com> Signed-off-by: Bill Fischofer <bill.fischofer@linaro.org> --- test/validation/scheduler/scheduler.c | 192 ++++++++++++++++++++++++++++++++++ test/validation/scheduler/scheduler.h | 1 + 2 files changed, 193 insertions(+)