Commit 6b97196b by Marcus Winter

mtapi_c: fixed handling of tasks executed during a wait operation

parent 36b43e9f
......@@ -213,86 +213,12 @@ embb_mtapi_thread_context_t * embb_mtapi_scheduler_get_current_thread_context(
return context;
}
void embb_mtapi_scheduler_execute_task_or_yield(
embb_mtapi_scheduler_t * that,
mtapi_boolean_t embb_mtapi_scheduler_execute_task(
embb_mtapi_task_t * task,
embb_mtapi_node_t * node,
embb_mtapi_thread_context_t * thread_context) {
assert(MTAPI_NULL != that);
assert(MTAPI_NULL != node);
if (NULL != thread_context) {
embb_mtapi_task_t* new_task = embb_mtapi_scheduler_get_next_task(
that, node, thread_context);
/* if there was work, execute it */
if (MTAPI_NULL != new_task) {
embb_mtapi_task_context_t task_context;
embb_mtapi_task_context_initialize_with_thread_context_and_task(
&task_context, thread_context, new_task);
embb_mtapi_task_execute(new_task, &task_context);
} else {
embb_thread_yield();
}
} else {
embb_thread_yield();
}
}
embb_mtapi_scheduler_worker_func_t *
embb_mtapi_scheduler_worker_func(embb_mtapi_scheduler_t * that) {
EMBB_UNUSED(that);
assert(MTAPI_NULL != that);
/* Currently just returns embb_mtapi_scheduler_worker,
but could return any custom worker function, e.g. depending
on scheduler->mode.
*/
return &embb_mtapi_scheduler_worker;
}
int embb_mtapi_scheduler_worker(void * arg) {
embb_mtapi_thread_context_t * thread_context =
(embb_mtapi_thread_context_t*)arg;
embb_mtapi_task_context_t task_context;
embb_mtapi_node_t * node;
embb_duration_t sleep_duration;
int err;
int counter = 0;
embb_mtapi_log_trace(
"embb_mtapi_scheduler_worker() called for thread %d on core %d\n",
thread_context->worker_index, thread_context->core_num);
assert(MTAPI_NULL != thread_context);
err = embb_tss_create(&thread_context->tss_id);
if (EMBB_SUCCESS != err) {
/* report error to scheduler */
embb_atomic_store_int(&thread_context->run, -1);
return MTAPI_FALSE;
}
/* node is initialized here, otherwise the worker would not run */
node = thread_context->node;
embb_tss_set(&(thread_context->tss_id), thread_context);
embb_duration_set_milliseconds(&sleep_duration, 10);
/* signal that we're up & running */
embb_atomic_store_int(&thread_context->run, 1);
/* potentially wait for node to come up completely */
while (MTAPI_FALSE == embb_atomic_load_int(&node->is_scheduler_running)) {
embb_thread_yield();
}
/* do work while not requested to stop */
while (embb_atomic_load_int(&thread_context->run)) {
/* try to get work */
embb_mtapi_task_t * task = embb_mtapi_scheduler_get_next_task(
node->scheduler, node, thread_context);
/* check if there was work */
if (MTAPI_NULL != task) {
mtapi_boolean_t result = MTAPI_FALSE;
embb_mtapi_queue_t * local_queue = MTAPI_NULL;
embb_mtapi_group_t * local_group = MTAPI_NULL;
embb_mtapi_action_t * local_action = MTAPI_NULL;
......@@ -333,7 +259,7 @@ int embb_mtapi_scheduler_worker(void * arg) {
embb_mtapi_queue_task_finished(local_queue);
}
}
counter = 0;
result = MTAPI_TRUE;
break;
case MTAPI_TASK_RETAINED:
......@@ -379,6 +305,89 @@ int embb_mtapi_scheduler_worker(void * arg) {
if (MTAPI_NULL != task->attributes.complete_func) {
task->attributes.complete_func(task->handle, MTAPI_NULL);
}
return result;
}
void embb_mtapi_scheduler_execute_task_or_yield(
embb_mtapi_scheduler_t * that,
embb_mtapi_node_t * node,
embb_mtapi_thread_context_t * thread_context) {
assert(MTAPI_NULL != that);
assert(MTAPI_NULL != node);
if (NULL != thread_context) {
embb_mtapi_task_t* new_task = embb_mtapi_scheduler_get_next_task(
that, node, thread_context);
/* if there was work, execute it */
if (MTAPI_NULL != new_task) {
embb_mtapi_scheduler_execute_task(new_task, node, thread_context);
} else {
embb_thread_yield();
}
} else {
embb_thread_yield();
}
}
embb_mtapi_scheduler_worker_func_t *
embb_mtapi_scheduler_worker_func(embb_mtapi_scheduler_t * that) {
EMBB_UNUSED(that);
assert(MTAPI_NULL != that);
/* Currently just returns embb_mtapi_scheduler_worker,
but could return any custom worker function, e.g. depending
on scheduler->mode.
*/
return &embb_mtapi_scheduler_worker;
}
int embb_mtapi_scheduler_worker(void * arg) {
embb_mtapi_thread_context_t * thread_context =
(embb_mtapi_thread_context_t*)arg;
embb_mtapi_node_t * node;
embb_duration_t sleep_duration;
int err;
int counter = 0;
embb_mtapi_log_trace(
"embb_mtapi_scheduler_worker() called for thread %d on core %d\n",
thread_context->worker_index, thread_context->core_num);
assert(MTAPI_NULL != thread_context);
err = embb_tss_create(&thread_context->tss_id);
if (EMBB_SUCCESS != err) {
/* report error to scheduler */
embb_atomic_store_int(&thread_context->run, -1);
return MTAPI_FALSE;
}
/* node is initialized here, otherwise the worker would not run */
node = thread_context->node;
embb_tss_set(&(thread_context->tss_id), thread_context);
embb_duration_set_milliseconds(&sleep_duration, 10);
/* signal that we're up & running */
embb_atomic_store_int(&thread_context->run, 1);
/* potentially wait for node to come up completely */
while (MTAPI_FALSE == embb_atomic_load_int(&node->is_scheduler_running)) {
embb_thread_yield();
}
/* do work while not requested to stop */
while (embb_atomic_load_int(&thread_context->run)) {
/* try to get work */
embb_mtapi_task_t * task = embb_mtapi_scheduler_get_next_task(
node->scheduler, node, thread_context);
/* check if there was work */
if (MTAPI_NULL != task) {
if (embb_mtapi_scheduler_execute_task(task, node, thread_context)) {
counter = 0;
}
} else if (counter < 1024) {
/* spin and yield for a while before going to sleep */
embb_thread_yield();
......
......@@ -138,6 +138,15 @@ embb_mtapi_thread_context_t * embb_mtapi_scheduler_get_current_thread_context(
embb_mtapi_scheduler_t * that);
/**
* Executes the given task if the thread context is valid.
* \memberof embb_mtapi_scheduler_struct
*/
mtapi_boolean_t embb_mtapi_scheduler_execute_task(
embb_mtapi_task_t * task,
embb_mtapi_node_t * node,
embb_mtapi_thread_context_t * thread_context);
/**
* Fetches and executes a single task if the thread context is valid,
* yields otherwise.
* \memberof embb_mtapi_scheduler_struct
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or sign in to comment