Commit e9783817 by Marcus Winter

Merge branch 'embb613_reuse_main_thread' into embb620_detached_tasks

parents bcd0ad73 6b97196b
......@@ -46,6 +46,9 @@ class Scheduler {
int process_id,
Action & action,
embb::mtapi::ExecutionPolicy const & policy) = 0;
virtual void Run(
Action & action,
embb::mtapi::ExecutionPolicy const & policy) = 0;
virtual void WaitForSlice(int slice) = 0;
virtual int GetSlices() = 0;
};
......
......@@ -105,6 +105,16 @@ class SchedulerMTAPI : public Scheduler {
group_[idx].Start(job_, &action, static_cast<void*>(NULL),
task_attr);
}
virtual void Run(
Action & action,
embb::mtapi::ExecutionPolicy const & policy) {
embb::mtapi::Node & node = embb::mtapi::Node::GetInstance();
embb::mtapi::TaskAttributes task_attr;
task_attr.SetPolicy(policy);
embb::mtapi::Task task = node.Start(job_, &action, static_cast<void*>(NULL),
task_attr);
task.Wait();
}
virtual void Enqueue(
int process_id,
Action & action,
......
......@@ -43,6 +43,11 @@ class SchedulerSequential : public Scheduler {
embb::mtapi::ExecutionPolicy const &) {
action.RunSequential();
}
virtual void Run(
Action & action,
embb::mtapi::ExecutionPolicy const &) {
action.RunSequential();
}
virtual void Enqueue(
int,
Action & action,
......
......@@ -32,6 +32,7 @@
#include <embb/dataflow/internal/node.h>
#include <embb/dataflow/internal/outputs.h>
#include <embb/dataflow/internal/source_executor.h>
#include <embb/dataflow/internal/action.h>
namespace embb {
namespace dataflow {
......@@ -67,7 +68,8 @@ class Source< Outputs<O1, O2, O3, O4, O5> >
virtual bool Start(int clock) {
if (not_done_) {
Run(clock);
Action act(this, clock);
sched_->Run(act, embb::mtapi::ExecutionPolicy());
}
return not_done_;
}
......
......@@ -150,11 +150,12 @@ SimpleTest::SimpleTest() {
#define MTAPI_DOMAIN_ID 1
#define MTAPI_NODE_ID 1
void SimpleTest::TestBasic() {
void SimpleTest::TrySimple(bool reuse_main_thread) {
// All available cores
embb::base::CoreSet core_set(true);
embb::mtapi::NodeAttributes node_attr;
node_attr
.SetReuseMainThread(reuse_main_thread ? MTAPI_TRUE : MTAPI_FALSE)
.SetCoreAffinity(core_set)
.SetMaxQueues(2);
embb::mtapi::Node::Initialize(
......@@ -162,7 +163,7 @@ void SimpleTest::TestBasic() {
MTAPI_NODE_ID,
node_attr);
for (int ii = 0; ii < 10000; ii++) {
for (int ii = 0; ii < 1000; ii++) {
ArraySink<TEST_COUNT> asink;
MyNetwork network(NUM_SLICES);
MyConstantSource constant(network, 4);
......@@ -225,3 +226,7 @@ void SimpleTest::TestBasic() {
PT_EXPECT(embb_get_bytes_allocated() == 0);
}
void SimpleTest::TestBasic() {
TrySimple(false);
TrySimple(true);
}
......@@ -35,6 +35,8 @@ class SimpleTest : public partest::TestCase {
private:
void TestBasic();
void TrySimple(bool reuse_main_thread);
};
#endif // DATAFLOW_CPP_TEST_DATAFLOW_CPP_TEST_SIMPLE_H_
......@@ -213,86 +213,12 @@ embb_mtapi_thread_context_t * embb_mtapi_scheduler_get_current_thread_context(
return context;
}
void embb_mtapi_scheduler_execute_task_or_yield(
embb_mtapi_scheduler_t * that,
mtapi_boolean_t embb_mtapi_scheduler_execute_task(
embb_mtapi_task_t * task,
embb_mtapi_node_t * node,
embb_mtapi_thread_context_t * thread_context) {
assert(MTAPI_NULL != that);
assert(MTAPI_NULL != node);
if (NULL != thread_context) {
embb_mtapi_task_t* new_task = embb_mtapi_scheduler_get_next_task(
that, node, thread_context);
/* if there was work, execute it */
if (MTAPI_NULL != new_task) {
embb_mtapi_task_context_t task_context;
embb_mtapi_task_context_initialize_with_thread_context_and_task(
&task_context, thread_context, new_task);
embb_mtapi_task_execute(new_task, &task_context);
} else {
embb_thread_yield();
}
} else {
embb_thread_yield();
}
}
embb_mtapi_scheduler_worker_func_t *
embb_mtapi_scheduler_worker_func(embb_mtapi_scheduler_t * that) {
EMBB_UNUSED(that);
assert(MTAPI_NULL != that);
/* Currently just returns embb_mtapi_scheduler_worker,
but could return any custom worker function, e.g. depending
on scheduler->mode.
*/
return &embb_mtapi_scheduler_worker;
}
int embb_mtapi_scheduler_worker(void * arg) {
embb_mtapi_thread_context_t * thread_context =
(embb_mtapi_thread_context_t*)arg;
embb_mtapi_task_context_t task_context;
embb_mtapi_node_t * node;
embb_duration_t sleep_duration;
int err;
int counter = 0;
embb_mtapi_log_trace(
"embb_mtapi_scheduler_worker() called for thread %d on core %d\n",
thread_context->worker_index, thread_context->core_num);
assert(MTAPI_NULL != thread_context);
err = embb_tss_create(&thread_context->tss_id);
if (EMBB_SUCCESS != err) {
/* report error to scheduler */
embb_atomic_store_int(&thread_context->run, -1);
return MTAPI_FALSE;
}
/* node is initialized here, otherwise the worker would not run */
node = thread_context->node;
embb_tss_set(&(thread_context->tss_id), thread_context);
embb_duration_set_milliseconds(&sleep_duration, 10);
/* signal that we're up & running */
embb_atomic_store_int(&thread_context->run, 1);
/* potentially wait for node to come up completely */
while (MTAPI_FALSE == embb_atomic_load_int(&node->is_scheduler_running)) {
embb_thread_yield();
}
/* do work while not requested to stop */
while (embb_atomic_load_int(&thread_context->run)) {
/* try to get work */
embb_mtapi_task_t * task = embb_mtapi_scheduler_get_next_task(
node->scheduler, node, thread_context);
/* check if there was work */
if (MTAPI_NULL != task) {
mtapi_boolean_t result = MTAPI_FALSE;
embb_mtapi_queue_t * local_queue = MTAPI_NULL;
embb_mtapi_group_t * local_group = MTAPI_NULL;
embb_mtapi_action_t * local_action = MTAPI_NULL;
......@@ -333,7 +259,7 @@ int embb_mtapi_scheduler_worker(void * arg) {
embb_mtapi_queue_task_finished(local_queue);
}
}
counter = 0;
result = MTAPI_TRUE;
break;
case MTAPI_TASK_RETAINED:
......@@ -379,6 +305,89 @@ int embb_mtapi_scheduler_worker(void * arg) {
if (MTAPI_NULL != task->attributes.complete_func) {
task->attributes.complete_func(task->handle, MTAPI_NULL);
}
return result;
}
void embb_mtapi_scheduler_execute_task_or_yield(
embb_mtapi_scheduler_t * that,
embb_mtapi_node_t * node,
embb_mtapi_thread_context_t * thread_context) {
assert(MTAPI_NULL != that);
assert(MTAPI_NULL != node);
if (NULL != thread_context) {
embb_mtapi_task_t* new_task = embb_mtapi_scheduler_get_next_task(
that, node, thread_context);
/* if there was work, execute it */
if (MTAPI_NULL != new_task) {
embb_mtapi_scheduler_execute_task(new_task, node, thread_context);
} else {
embb_thread_yield();
}
} else {
embb_thread_yield();
}
}
embb_mtapi_scheduler_worker_func_t *
embb_mtapi_scheduler_worker_func(embb_mtapi_scheduler_t * that) {
EMBB_UNUSED(that);
assert(MTAPI_NULL != that);
/* Currently just returns embb_mtapi_scheduler_worker,
but could return any custom worker function, e.g. depending
on scheduler->mode.
*/
return &embb_mtapi_scheduler_worker;
}
int embb_mtapi_scheduler_worker(void * arg) {
embb_mtapi_thread_context_t * thread_context =
(embb_mtapi_thread_context_t*)arg;
embb_mtapi_node_t * node;
embb_duration_t sleep_duration;
int err;
int counter = 0;
embb_mtapi_log_trace(
"embb_mtapi_scheduler_worker() called for thread %d on core %d\n",
thread_context->worker_index, thread_context->core_num);
assert(MTAPI_NULL != thread_context);
err = embb_tss_create(&thread_context->tss_id);
if (EMBB_SUCCESS != err) {
/* report error to scheduler */
embb_atomic_store_int(&thread_context->run, -1);
return MTAPI_FALSE;
}
/* node is initialized here, otherwise the worker would not run */
node = thread_context->node;
embb_tss_set(&(thread_context->tss_id), thread_context);
embb_duration_set_milliseconds(&sleep_duration, 10);
/* signal that we're up & running */
embb_atomic_store_int(&thread_context->run, 1);
/* potentially wait for node to come up completely */
while (MTAPI_FALSE == embb_atomic_load_int(&node->is_scheduler_running)) {
embb_thread_yield();
}
/* do work while not requested to stop */
while (embb_atomic_load_int(&thread_context->run)) {
/* try to get work */
embb_mtapi_task_t * task = embb_mtapi_scheduler_get_next_task(
node->scheduler, node, thread_context);
/* check if there was work */
if (MTAPI_NULL != task) {
if (embb_mtapi_scheduler_execute_task(task, node, thread_context)) {
counter = 0;
}
if (MTAPI_TRUE == task->attributes.is_detached) {
embb_mtapi_task_delete(task, node->task_pool);
}
......
......@@ -138,6 +138,15 @@ embb_mtapi_thread_context_t * embb_mtapi_scheduler_get_current_thread_context(
embb_mtapi_scheduler_t * that);
/**
* Executes the given task if the thread context is valid.
* \memberof embb_mtapi_scheduler_struct
*/
mtapi_boolean_t embb_mtapi_scheduler_execute_task(
embb_mtapi_task_t * task,
embb_mtapi_node_t * node,
embb_mtapi_thread_context_t * thread_context);
/**
* Fetches and executes a single task if the thread context is valid,
* yields otherwise.
* \memberof embb_mtapi_scheduler_struct
......
......@@ -43,6 +43,8 @@ class NodeAttributes {
public:
/**
* Constructs a NodeAttributes object.
*
* \waitfree
*/
NodeAttributes() {
mtapi_status_t status;
......@@ -52,6 +54,8 @@ class NodeAttributes {
/**
* Copies a NodeAttributes object.
*
* \waitfree
*/
NodeAttributes(
NodeAttributes const & other /**< The NodeAttributes to copy. */
......@@ -62,6 +66,8 @@ class NodeAttributes {
/**
* Copies a NodeAttributes object.
*
* \waitfree
*/
void operator=(
NodeAttributes const & other /**< The NodeAttributes to copy. */
......@@ -233,6 +239,22 @@ class NodeAttributes {
}
/**
* Enables or disables the reuse of the main thread as a worker.
*
* \returns Reference to this object.
* \notthreadsafe
*/
NodeAttributes & SetReuseMainThread(
mtapi_boolean_t reuse
) {
mtapi_status_t status;
mtapi_nodeattr_set(&attributes_, MTAPI_NODE_REUSE_MAIN_THREAD,
&reuse, sizeof(reuse), &status);
internal::CheckStatus(status);
return *this;
}
/**
* Returns the internal representation of this object.
* Allows for interoperability with the C interface.
*
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or sign in to comment