Commit 46ac2bd4 by Bernhard Gatzhammer

Merge branch 'development' of https://github.com/siemens/embb into embb349_terms_callable

parents 0682909e e9ad6754
...@@ -37,42 +37,45 @@ namespace internal { ...@@ -37,42 +37,45 @@ namespace internal {
template<typename ValueType> template<typename ValueType>
class ValueComparisonFunction{ class ValueComparisonFunction{
public: public:
explicit ValueComparisonFunction(const ValueType &value) explicit ValueComparisonFunction(const ValueType& value)
:value_(value) {} : value_(value) {}
ValueComparisonFunction(const ValueComparisonFunction &other) ValueComparisonFunction(const ValueComparisonFunction& other)
:value_(other.value_) {} : value_(other.value_) {}
template<typename ElementType> template<typename ElementType>
int operator()(ElementType element) { int operator()(ElementType element) {
if(element == value_) if (element == value_) {
return 1; return 1;
else } else {
return 0; return 0;
} }
}
private: private:
const ValueType &value_; const ValueType &value_;
ValueComparisonFunction &operator=(const ValueComparisonFunction &other); ValueComparisonFunction &operator=(
const ValueComparisonFunction& other);
}; };
template<typename Function> template<typename Function>
class FunctionComparisonFunction{ class FunctionComparisonFunction{
public: public:
explicit FunctionComparisonFunction(Function function) explicit FunctionComparisonFunction(Function function)
:function_(function) {} : function_(function) {}
FunctionComparisonFunction(const FunctionComparisonFunction &other) FunctionComparisonFunction(const FunctionComparisonFunction &other)
:function_(other.function_) {} : function_(other.function_) {}
template<typename ElementType> template<typename ElementType>
int operator()(ElementType element) { int operator()(ElementType element) {
if(function_(element)) if (function_(element)) {
return 1; return 1;
else } else {
return 0; return 0;
} }
}
private: private:
Function function_; Function function_;
FunctionComparisonFunction &operator=(const FunctionComparisonFunction & FunctionComparisonFunction &operator=(
other); const FunctionComparisonFunction& other);
}; };
} // namespace internal } // namespace internal
......
...@@ -45,44 +45,54 @@ class ForEachFunctor { ...@@ -45,44 +45,54 @@ class ForEachFunctor {
/** /**
* Constructs a for-each functor with arguments. * Constructs a for-each functor with arguments.
*/ */
ForEachFunctor(RAI first, RAI last, Function unary, ForEachFunctor(size_t chunk_first, size_t chunk_last, Function unary,
const embb::mtapi::ExecutionPolicy& policy, size_t block_size) const embb::mtapi::ExecutionPolicy& policy,
: first_(first), last_(last), unary_(unary), policy_(policy), const BlockSizePartitioner<RAI>& partitioner)
block_size_(block_size) { : chunk_first_(chunk_first), chunk_last_(chunk_last),
unary_(unary), policy_(policy), partitioner_(partitioner) {
} }
void Action(mtapi::TaskContext&) { void Action(mtapi::TaskContext&) {
size_t distance = static_cast<size_t>(std::distance(first_, last_)); if (chunk_first_ == chunk_last_) {
if (distance == 0) return; // Leaf case, recursed to single chunk. Do work on chunk:
if (distance <= block_size_) { // leaf case -> do work ChunkDescriptor<RAI> chunk = partitioner_[chunk_first_];
for (RAI curIter(first_); curIter != last_; ++curIter) { RAI first = chunk.GetFirst();
unary_(*curIter); RAI last = chunk.GetLast();
for (RAI it = first; it != last; ++it) {
unary_(*it);
} }
} else { // recurse further } else {
ChunkPartitioner<RAI> partitioner(first_, last_, 2); // Recurse further:
ForEachFunctor<RAI, Function> functorL(partitioner[0].GetFirst(), size_t chunk_split_index = (chunk_first_ + chunk_last_) / 2;
partitioner[0].GetLast(), unary_, policy_, block_size_); // Split chunks into left / right branches:
ForEachFunctor<RAI, Function> functorR(partitioner[1].GetFirst(), self_t functor_l(chunk_first_,
partitioner[1].GetLast(), unary_, policy_, block_size_); chunk_split_index,
unary_, policy_, partitioner_);
mtapi::Node& node = mtapi::Node::GetInstance(); self_t functor_r(chunk_split_index + 1,
mtapi::Task taskL = node.Spawn(mtapi::Action(base::MakeFunction( chunk_last_,
functorL, &ForEachFunctor<RAI, Function>::Action), unary_, policy_, partitioner_);
mtapi::Task task_l = mtapi::Node::GetInstance().Spawn(
mtapi::Action(
base::MakeFunction(functor_l, &self_t::Action),
policy_)); policy_));
mtapi::Task taskR = node.Spawn(mtapi::Action(base::MakeFunction( mtapi::Task task_r = mtapi::Node::GetInstance().Spawn(
functorR, &ForEachFunctor<RAI, Function>::Action), mtapi::Action(
base::MakeFunction(functor_r, &self_t::Action),
policy_)); policy_));
taskL.Wait(MTAPI_INFINITE); task_l.Wait(MTAPI_INFINITE);
taskR.Wait(MTAPI_INFINITE); task_r.Wait(MTAPI_INFINITE);
} }
} }
private: private:
RAI first_; typedef ForEachFunctor<RAI, Function> self_t;
RAI last_;
private:
size_t chunk_first_;
size_t chunk_last_;
Function unary_; Function unary_;
const embb::mtapi::ExecutionPolicy& policy_; const embb::mtapi::ExecutionPolicy& policy_;
size_t block_size_; const BlockSizePartitioner<RAI>& partitioner_;
/** /**
* Disables assignment. * Disables assignment.
...@@ -95,21 +105,31 @@ void ForEachRecursive(RAI first, RAI last, Function unary, ...@@ -95,21 +105,31 @@ void ForEachRecursive(RAI first, RAI last, Function unary,
const embb::mtapi::ExecutionPolicy& policy, size_t block_size) { const embb::mtapi::ExecutionPolicy& policy, size_t block_size) {
typedef typename std::iterator_traits<RAI>::difference_type difference_type; typedef typename std::iterator_traits<RAI>::difference_type difference_type;
difference_type distance = std::distance(first, last); difference_type distance = std::distance(first, last);
assert(distance > 0); if (distance == 0) {
return;
}
unsigned int num_cores = policy.GetCoreCount();
if (num_cores == 0) {
EMBB_THROW(embb::base::ErrorException, "No cores in execution policy");
}
mtapi::Node& node = mtapi::Node::GetInstance(); mtapi::Node& node = mtapi::Node::GetInstance();
// Determine actually used block size // Determine actually used block size
if (block_size == 0) { if (block_size == 0) {
block_size = (static_cast<size_t>(distance) / node.GetCoreCount()); block_size = (static_cast<size_t>(distance) / num_cores);
if (block_size == 0) { if (block_size == 0) {
block_size = 1; block_size = 1;
} }
} }
// Perform check of task number sufficiency // Check task number sufficiency
if (((distance / block_size) * 2) + 1 > MTAPI_NODE_MAX_TASKS_DEFAULT) { if (((distance / block_size) * 2) + 1 > MTAPI_NODE_MAX_TASKS_DEFAULT) {
EMBB_THROW(embb::base::ErrorException, "Not enough MTAPI tasks available " EMBB_THROW(embb::base::ErrorException,
"to perform the parallel foreach loop"); "Not enough MTAPI tasks available for parallel foreach");
} }
ForEachFunctor<RAI, Function> functor(first, last, unary, policy, block_size);
BlockSizePartitioner<RAI> partitioner(first, last, block_size);
ForEachFunctor<RAI, Function> functor(0,
partitioner.Size() - 1,
unary, policy, partitioner);
mtapi::Task task = node.Spawn(mtapi::Action( mtapi::Task task = node.Spawn(mtapi::Action(
base::MakeFunction(functor, base::MakeFunction(functor,
&ForEachFunctor<RAI, Function>::Action), &ForEachFunctor<RAI, Function>::Action),
...@@ -127,7 +147,7 @@ void ForEachIteratorCheck(RAI first, RAI last, Function unary, ...@@ -127,7 +147,7 @@ void ForEachIteratorCheck(RAI first, RAI last, Function unary,
} // namespace internal } // namespace internal
template<typename RAI, typename Function> template<typename RAI, typename Function>
void ForEach(RAI first, RAI last, Function unary, void ForEach(RAI first, const RAI last, Function unary,
const embb::mtapi::ExecutionPolicy& policy, size_t block_size) { const embb::mtapi::ExecutionPolicy& policy, size_t block_size) {
typename std::iterator_traits<RAI>::iterator_category category; typename std::iterator_traits<RAI>::iterator_category category;
internal::ForEachIteratorCheck(first, last, unary, policy, block_size, internal::ForEachIteratorCheck(first, last, unary, policy, block_size,
......
...@@ -48,57 +48,111 @@ class MergeSortFunctor { ...@@ -48,57 +48,111 @@ class MergeSortFunctor {
public: public:
typedef typename std::iterator_traits<RAI>::value_type value_type; typedef typename std::iterator_traits<RAI>::value_type value_type;
MergeSortFunctor(RAI first, RAI last, RAITemp temporary_first, MergeSortFunctor(size_t chunk_first, size_t chunk_last,
ComparisonFunction comparison, const embb::mtapi::ExecutionPolicy& policy, RAITemp temporary_first, ComparisonFunction comparison,
size_t block_size, const RAI& global_first, int depth) const embb::mtapi::ExecutionPolicy& policy,
: first_(first), last_(last), temp_first_(temporary_first), const BlockSizePartitioner<RAI>& partitioner,
comparison_(comparison), policy_(policy), block_size_(block_size), const RAI& global_first, int depth)
: chunk_first_(chunk_first), chunk_last_(chunk_last),
temp_first_(temporary_first),
comparison_(comparison), policy_(policy), partitioner_(partitioner),
global_first_(global_first), depth_(depth) { global_first_(global_first), depth_(depth) {
} }
void Action(mtapi::TaskContext& context) { void Action(mtapi::TaskContext&) {
typedef typename std::iterator_traits<RAI>::difference_type difference_type; size_t chunk_split_index = (chunk_first_ + chunk_last_) / 2;
size_t distance = static_cast<size_t>(std::distance(first_, last_)); if (chunk_first_ == chunk_last_) {
if (distance <= 1) { // Leaf case: recurse into a single chunk's elements:
if(!CloneBackToInput() && distance != 0) { ChunkDescriptor<RAI> chunk = partitioner_[chunk_first_];
RAITemp temp_first = temp_first_; MergeSortChunk(chunk.GetFirst(), chunk.GetLast(), depth_);
temp_first += std::distance(global_first_, first_);
*temp_first = *first_;
}
return;
}
internal::ChunkPartitioner<RAI> partitioner(first_, last_, 2);
MergeSortFunctor<RAI, RAITemp, ComparisonFunction> functorL(
partitioner[0].GetFirst(), partitioner[0].GetLast(), temp_first_,
comparison_, policy_, block_size_, global_first_, depth_ + 1);
MergeSortFunctor<RAI, RAITemp, ComparisonFunction> functorR(
partitioner[1].GetFirst(), partitioner[1].GetLast(), temp_first_,
comparison_, policy_, block_size_, global_first_, depth_ + 1);
if (distance <= block_size_) {
functorL.Action(context);
functorR.Action(context);
} else { } else {
// Recurse further, split chunks:
self_t functor_l(chunk_first_,
chunk_split_index,
temp_first_,
comparison_, policy_, partitioner_,
global_first_, depth_ + 1);
self_t functor_r(chunk_split_index + 1,
chunk_last_,
temp_first_,
comparison_, policy_, partitioner_,
global_first_, depth_ + 1);
mtapi::Node& node = mtapi::Node::GetInstance(); mtapi::Node& node = mtapi::Node::GetInstance();
mtapi::Task taskL = node.Spawn(mtapi::Action(base::MakeFunction(functorL, mtapi::Task task_l = node.Spawn(
&MergeSortFunctor<RAI, RAITemp, ComparisonFunction>::Action), mtapi::Action(
base::MakeFunction(functor_l, &self_t::Action),
policy_)); policy_));
mtapi::Task taskR = node.Spawn(mtapi::Action(base::MakeFunction(functorR, mtapi::Task task_r = node.Spawn(
&MergeSortFunctor<RAI, RAITemp, ComparisonFunction>::Action), mtapi::Action(
base::MakeFunction(functor_r, &self_t::Action),
policy_)); policy_));
taskL.Wait(MTAPI_INFINITE); task_l.Wait(MTAPI_INFINITE);
taskR.Wait(MTAPI_INFINITE); task_r.Wait(MTAPI_INFINITE);
ChunkDescriptor<RAI> ck_f = partitioner_[chunk_first_];
ChunkDescriptor<RAI> ck_m = partitioner_[chunk_split_index + 1];
ChunkDescriptor<RAI> ck_l = partitioner_[chunk_last_];
if(CloneBackToInput(depth_)) {
// Merge from temp into input:
difference_type first = std::distance(global_first_, ck_f.GetFirst());
difference_type mid = std::distance(global_first_, ck_m.GetFirst());
difference_type last = std::distance(global_first_, ck_l.GetLast());
SerialMerge(temp_first_ + first, temp_first_ + mid, temp_first_ + last,
ck_f.GetFirst(),
comparison_);
} else {
// Merge from input into temp:
SerialMerge(ck_f.GetFirst(), ck_m.GetFirst(), ck_l.GetLast(),
temp_first_ + std::distance(global_first_, ck_f.GetFirst()),
comparison_);
}
}
} }
if(CloneBackToInput()) { /**
difference_type first = std::distance(global_first_, functorL.first_); * Serial merge sort of elements within a single chunk.
difference_type mid = std::distance(global_first_, functorR.first_); */
difference_type last = std::distance(global_first_, functorR.last_); void MergeSortChunk(RAI first,
SerialMerge(temp_first_ + first, temp_first_ + mid, RAI last,
temp_first_ + last, functorL.first_, comparison_); int depth) {
size_t distance = static_cast<size_t>(
std::distance(first, last));
if (distance <= 1) {
// Leaf case:
if (!CloneBackToInput(depth) && distance != 0) {
RAITemp temp_first = temp_first_;
std::advance(temp_first, std::distance(global_first_, first));
*temp_first = *first;
}
return;
}
// Recurse further. Use binary split, ignoring chunk size as this
// recursion is serial and has leaf size 1:
ChunkPartitioner<RAI> partitioner(first, last, 2);
ChunkDescriptor<RAI> ck_l = partitioner[0];
ChunkDescriptor<RAI> ck_r = partitioner[1];
MergeSortChunk(
ck_l.GetFirst(),
ck_l.GetLast(),
depth + 1);
MergeSortChunk(
ck_r.GetFirst(),
ck_r.GetLast(),
depth + 1);
if (CloneBackToInput(depth)) {
// Merge from temp into input:
difference_type d_first = std::distance(global_first_, ck_l.GetFirst());
difference_type d_mid = std::distance(global_first_, ck_r.GetFirst());
difference_type d_last = std::distance(global_first_, ck_r.GetLast());
SerialMerge(
temp_first_ + d_first, temp_first_ + d_mid, temp_first_ + d_last,
ck_l.GetFirst(),
comparison_);
} else { } else {
SerialMerge(functorL.first_, functorR.first_, functorR.last_, // Merge from input into temp:
temp_first_ + std::distance(global_first_, functorL.first_), SerialMerge(
ck_l.GetFirst(), ck_r.GetFirst(), ck_r.GetLast(),
temp_first_ + std::distance(global_first_, ck_l.GetFirst()),
comparison_); comparison_);
} }
} }
...@@ -109,17 +163,22 @@ class MergeSortFunctor { ...@@ -109,17 +163,22 @@ class MergeSortFunctor {
* \return \c true if the temporary data range is input and the array to be * \return \c true if the temporary data range is input and the array to be
* sorted is output. \c false, if the other way around. * sorted is output. \c false, if the other way around.
*/ */
bool CloneBackToInput() { bool CloneBackToInput(int depth) {
return depth_ % 2 == 0 ? true : false; return depth % 2 == 0 ? true : false;
} }
private: private:
RAI first_; typedef MergeSortFunctor<RAI, RAITemp, ComparisonFunction> self_t;
RAI last_; typedef typename std::iterator_traits<RAI>::difference_type
difference_type;
private:
size_t chunk_first_;
size_t chunk_last_;
RAITemp temp_first_; RAITemp temp_first_;
ComparisonFunction comparison_; ComparisonFunction comparison_;
const embb::mtapi::ExecutionPolicy& policy_; const embb::mtapi::ExecutionPolicy& policy_;
size_t block_size_; const BlockSizePartitioner<RAI>& partitioner_;
const RAI& global_first_; const RAI& global_first_;
int depth_; int depth_;
...@@ -166,29 +225,47 @@ void MergeSort( ...@@ -166,29 +225,47 @@ void MergeSort(
size_t block_size size_t block_size
) { ) {
typedef typename std::iterator_traits<RAI>::difference_type difference_type; typedef typename std::iterator_traits<RAI>::difference_type difference_type;
embb::mtapi::Node &node = embb::mtapi::Node::GetInstance(); typedef internal::MergeSortFunctor<RAI, RAITemp, ComparisonFunction>
difference_type distance = last - first; functor_t;
assert(distance >= 0); difference_type distance = std::distance(first, last);
if (distance == 0) {
EMBB_THROW(embb::base::ErrorException, "Distance for ForEach is 0");
}
unsigned int num_cores = policy.GetCoreCount();
if (num_cores == 0) {
EMBB_THROW(embb::base::ErrorException, "No cores in execution policy");
}
// Determine actually used block size
if (block_size == 0) { if (block_size == 0) {
block_size = (static_cast<size_t>(distance) / node.GetCoreCount()); block_size = (static_cast<size_t>(distance) / num_cores);
if (block_size == 0) if (block_size == 0)
block_size = 1; block_size = 1;
} }
if (((distance/block_size) * 2) + 1 > MTAPI_NODE_MAX_TASKS_DEFAULT) { // Check task number sufficiency
if (((distance / block_size) * 2) + 1 > MTAPI_NODE_MAX_TASKS_DEFAULT) {
EMBB_THROW(embb::base::ErrorException, EMBB_THROW(embb::base::ErrorException,
"Not enough MTAPI tasks available to perform the merge sort"); "Not enough MTAPI tasks available to perform merge sort");
} }
internal::MergeSortFunctor<RAI, RAITemp, ComparisonFunction> functor( internal::BlockSizePartitioner<RAI> partitioner(first, last, block_size);
first, last, temporary_first, comparison, policy, block_size, first, 0); functor_t functor(0,
mtapi::Task task = node.Spawn(mtapi::Action(base::MakeFunction(functor, partitioner.Size() - 1,
&internal::MergeSortFunctor<RAI, RAITemp, ComparisonFunction>::Action), temporary_first,
comparison,
policy,
partitioner,
first,
0);
mtapi::Task task = embb::mtapi::Node::GetInstance().Spawn(
mtapi::Action(
base::MakeFunction(functor, &functor_t::Action),
policy)); policy));
task.Wait(MTAPI_INFINITE); task.Wait(MTAPI_INFINITE);
} }
// @NOTE: Why is there no type guard for RAI?
} // namespace algorithms } // namespace algorithms
} // namespace embb } // namespace embb
......
...@@ -71,8 +71,8 @@ const ChunkDescriptor<ForwardIterator> ...@@ -71,8 +71,8 @@ const ChunkDescriptor<ForwardIterator>
ForwardIterator last_new = first_new; ForwardIterator last_new = first_new;
if (index == elements_count / chunkSize) { if (index >= chunks - 1) {
std::advance(last_new, elements_count % chunkSize); last_new = last;
} else { } else {
std::advance(last_new, chunkSize); std::advance(last_new, chunkSize);
} }
...@@ -94,7 +94,7 @@ ChunkPartitioner<ForwardIterator>::ChunkPartitioner(ForwardIterator first, ...@@ -94,7 +94,7 @@ ChunkPartitioner<ForwardIterator>::ChunkPartitioner(ForwardIterator first,
} else { } else {
// if no concrete chunk size was given, use number of cores... // if no concrete chunk size was given, use number of cores...
mtapi::Node& node = mtapi::Node::GetInstance(); mtapi::Node& node = mtapi::Node::GetInstance();
size = node.GetCoreCount(); size = node.GetWorkerThreadCount();
} }
elements_count = static_cast<size_t>(std::distance(first, last)); elements_count = static_cast<size_t>(std::distance(first, last));
......
...@@ -192,10 +192,17 @@ template <typename RAI, typename ComparisonFunction> ...@@ -192,10 +192,17 @@ template <typename RAI, typename ComparisonFunction>
void QuickSort(RAI first, RAI last, ComparisonFunction comparison, void QuickSort(RAI first, RAI last, ComparisonFunction comparison,
const embb::mtapi::ExecutionPolicy& policy, size_t block_size) { const embb::mtapi::ExecutionPolicy& policy, size_t block_size) {
embb::mtapi::Node& node = embb::mtapi::Node::GetInstance(); embb::mtapi::Node& node = embb::mtapi::Node::GetInstance();
typename std::iterator_traits<RAI>::difference_type distance = last - first; typedef typename std::iterator_traits<RAI>::difference_type difference_type;
assert(distance > 0); difference_type distance = std::distance(first, last);
if (distance <= 0) {
return;
}
unsigned int num_cores = policy.GetCoreCount();
if (num_cores == 0) {
EMBB_THROW(embb::base::ErrorException, "No cores in execution policy");
}
if (block_size == 0) { if (block_size == 0) {
block_size = (static_cast<size_t>(distance) / node.GetCoreCount()); block_size = (static_cast<size_t>(distance) / num_cores);
if (block_size == 0) if (block_size == 0)
block_size = 1; block_size = 1;
} }
......
...@@ -42,45 +42,55 @@ template<typename RAI, typename ReturnType, typename ReductionFunction, ...@@ -42,45 +42,55 @@ template<typename RAI, typename ReturnType, typename ReductionFunction,
typename TransformationFunction> typename TransformationFunction>
class ReduceFunctor { class ReduceFunctor {
public: public:
ReduceFunctor(RAI first, RAI last, ReturnType neutral, ReduceFunctor(size_t chunk_first, size_t chunk_last,
ReturnType neutral,
ReductionFunction reduction, ReductionFunction reduction,
TransformationFunction transformation, TransformationFunction transformation,
const embb::mtapi::ExecutionPolicy &policy, size_t block_size, const embb::mtapi::ExecutionPolicy& policy,
const BlockSizePartitioner<RAI>& partitioner,
ReturnType& result) ReturnType& result)
: : chunk_first_(chunk_first), chunk_last_(chunk_last), neutral_(neutral),
first_(first), last_(last), neutral_(neutral), reduction_(reduction), reduction_(reduction), transformation_(transformation), policy_(policy),
transformation_(transformation), policy_(policy), partitioner_(partitioner), result_(result) {
block_size_(block_size), result_(result) {
} }
void Action(mtapi::TaskContext&) { void Action(mtapi::TaskContext&) {
if (first_ == last_) { if (chunk_first_ == chunk_last_) {
return; // Leaf case, recursed to single chunk. Do work on chunk:
} ChunkDescriptor<RAI> chunk = partitioner_[chunk_first_];
size_t distance = static_cast<size_t>(std::distance(first_, last_)); RAI first = chunk.GetFirst();
if (distance <= block_size_) { // leaf case -> do work RAI last = chunk.GetLast();
ReturnType result(neutral_); ReturnType result(neutral_);
for (RAI iter = first_; iter != last_; ++iter) { for (RAI it = first; it != last; ++it) {
result = reduction_(result, transformation_(*iter)); result = reduction_(result, transformation_(*it));
} }
result_ = result; result_ = result;
} else { // recurse further } else {
internal::ChunkPartitioner<RAI> partitioner(first_, last_, 2); // Recurse further:
size_t chunk_split_index = (chunk_first_ + chunk_last_) / 2;
// Split chunks into left / right branches:
ReturnType result_l(neutral_); ReturnType result_l(neutral_);
ReturnType result_r(neutral_); ReturnType result_r(neutral_);
ReduceFunctor functor_l(partitioner[0].GetFirst(), self_t functor_l(chunk_first_,
partitioner[0].GetLast(), chunk_split_index,
neutral_, reduction_, transformation_, policy_, neutral_, reduction_, transformation_, policy_,
block_size_, result_l); partitioner_,
ReduceFunctor functor_r(partitioner[1].GetFirst(), result_l);
partitioner[1].GetLast(), self_t functor_r(chunk_split_index + 1,
chunk_last_,
neutral_, reduction_, transformation_, policy_, neutral_, reduction_, transformation_, policy_,
block_size_, result_r); partitioner_,
mtapi::Node& node = mtapi::Node::GetInstance(); result_r);
mtapi::Task task_l = node.Spawn(mtapi::Action(base::MakeFunction( mtapi::Task task_l = mtapi::Node::GetInstance().Spawn(
functor_l, &ReduceFunctor::Action), policy_)); mtapi::Action(
mtapi::Task task_r = node.Spawn(mtapi::Action(base::MakeFunction( base::MakeFunction(
functor_r, &ReduceFunctor::Action), policy_)); functor_l, &self_t::Action),
policy_));
mtapi::Task task_r = mtapi::Node::GetInstance().Spawn(
mtapi::Action(
base::MakeFunction(
functor_r, &self_t::Action),
policy_));
task_l.Wait(MTAPI_INFINITE); task_l.Wait(MTAPI_INFINITE);
task_r.Wait(MTAPI_INFINITE); task_r.Wait(MTAPI_INFINITE);
result_ = reduction_(result_l, result_r); result_ = reduction_(result_l, result_r);
...@@ -88,15 +98,23 @@ class ReduceFunctor { ...@@ -88,15 +98,23 @@ class ReduceFunctor {
} }
private: private:
RAI first_; typedef ReduceFunctor<RAI, ReturnType,
RAI last_; ReductionFunction,
TransformationFunction> self_t;
private:
size_t chunk_first_;
size_t chunk_last_;
ReturnType neutral_; ReturnType neutral_;
ReductionFunction reduction_; ReductionFunction reduction_;
TransformationFunction transformation_; TransformationFunction transformation_;
const embb::mtapi::ExecutionPolicy& policy_; const embb::mtapi::ExecutionPolicy& policy_;
size_t block_size_; const BlockSizePartitioner<RAI>& partitioner_;
ReturnType& result_; ReturnType& result_;
/**
* Disables assignment and copy-construction.
*/
ReduceFunctor& operator=(const ReduceFunctor&); ReduceFunctor& operator=(const ReduceFunctor&);
ReduceFunctor(const ReduceFunctor&); ReduceFunctor(const ReduceFunctor&);
}; };
...@@ -110,27 +128,40 @@ ReturnType ReduceRecursive(RAI first, RAI last, ReturnType neutral, ...@@ -110,27 +128,40 @@ ReturnType ReduceRecursive(RAI first, RAI last, ReturnType neutral,
size_t block_size) { size_t block_size) {
typedef typename std::iterator_traits<RAI>::difference_type difference_type; typedef typename std::iterator_traits<RAI>::difference_type difference_type;
difference_type distance = std::distance(first, last); difference_type distance = std::distance(first, last);
assert(distance > 0); if (distance == 0) {
EMBB_THROW(embb::base::ErrorException, "Distance for Reduce is 0");
}
unsigned int num_cores = policy.GetCoreCount();
if (num_cores == 0) {
EMBB_THROW(embb::base::ErrorException, "No cores in execution policy");
}
mtapi::Node& node = mtapi::Node::GetInstance(); mtapi::Node& node = mtapi::Node::GetInstance();
size_t used_block_size = block_size; // Determine actually used block size
if (used_block_size == 0) { if (block_size == 0) {
used_block_size = static_cast<size_t>(distance) / node.GetCoreCount(); block_size = (static_cast<size_t>(distance) / num_cores);
if (used_block_size == 0) used_block_size = 1; if (block_size == 0) {
block_size = 1;
} }
}
if (((distance / used_block_size) * 2) + 1 > MTAPI_NODE_MAX_TASKS_DEFAULT) { // Perform check of task number sufficiency
if (((distance / block_size) * 2) + 1 > MTAPI_NODE_MAX_TASKS_DEFAULT) {
EMBB_THROW(embb::base::ErrorException, EMBB_THROW(embb::base::ErrorException,
"Number of computation tasks required in reduction would " "Number of computation tasks required in reduction would "
"exceed MTAPI maximum number of tasks"); "exceed MTAPI maximum number of tasks");
} }
ReturnType result = neutral;
typedef ReduceFunctor<RAI, ReturnType, ReductionFunction, typedef ReduceFunctor<RAI, ReturnType, ReductionFunction,
TransformationFunction> Functor; TransformationFunction> Functor;
Functor functor(first, last, neutral, reduction, transformation, policy, BlockSizePartitioner<RAI> partitioner(first, last, block_size);
used_block_size, result); ReturnType result = neutral;
mtapi::Task task = node.Spawn(mtapi::Action(base::MakeFunction( Functor functor(0,
partitioner.Size() - 1,
neutral,
reduction, transformation,
policy,
partitioner,
result);
mtapi::Task task = node.Spawn(
mtapi::Action(base::MakeFunction(
functor, &Functor::Action), policy)); functor, &Functor::Action), policy));
task.Wait(MTAPI_INFINITE); task.Wait(MTAPI_INFINITE);
return result; return result;
......
...@@ -41,73 +41,79 @@ template<typename RAIIn, typename RAIOut, typename ReturnType, ...@@ -41,73 +41,79 @@ template<typename RAIIn, typename RAIOut, typename ReturnType,
typename ScanFunction, typename TransformationFunction> typename ScanFunction, typename TransformationFunction>
class ScanFunctor { class ScanFunctor {
public: public:
ScanFunctor(RAIIn first, RAIIn last, RAIOut output_iterator, ScanFunctor(size_t chunk_first, size_t chunk_last, RAIOut output_iterator,
ReturnType neutral, ScanFunction scan, ReturnType neutral, ScanFunction scan,
TransformationFunction transformation, TransformationFunction transformation,
const embb::mtapi::ExecutionPolicy& policy, const embb::mtapi::ExecutionPolicy& policy,
size_t block_size, ReturnType* tree_values, size_t node_id, const BlockSizePartitioner<RAIIn>& partitioner,
ReturnType* tree_values, size_t node_id,
bool going_down) bool going_down)
: policy_(policy), first_(first), last_(last), : policy_(policy), chunk_first_(chunk_first), chunk_last_(chunk_last),
output_iterator_(output_iterator), scan_(scan), output_iterator_(output_iterator), scan_(scan),
transformation_(transformation), transformation_(transformation),
neutral_(neutral), block_size_(block_size), tree_values_(tree_values), neutral_(neutral), partitioner_(partitioner), tree_values_(tree_values),
node_id_(node_id), parent_value_(neutral), is_first_pass_(going_down) { node_id_(node_id), parent_value_(neutral), is_first_pass_(going_down) {
} }
void Action(mtapi::TaskContext&) { void Action(mtapi::TaskContext&) {
if (first_ == last_) { if (chunk_first_ == chunk_last_) {
return; ChunkDescriptor<RAIIn> chunk = partitioner_[chunk_first_];
} RAIIn iter_in = chunk.GetFirst();
size_t distance = static_cast<size_t>(std::distance(first_, last_)); RAIIn last_in = chunk.GetLast();
if (distance <= block_size_) { // leaf case -> do work
if (is_first_pass_) {
RAIIn iter_in = first_;
RAIOut iter_out = output_iterator_; RAIOut iter_out = output_iterator_;
ReturnType result = transformation_(*first_); // leaf case -> do work
if (is_first_pass_) {
ReturnType result = transformation_(*iter_in);
*iter_out = result; *iter_out = result;
++iter_in; ++iter_in;
++iter_out; ++iter_out;
while (iter_in != last_) { for (; iter_in != last_in; ++iter_in, ++iter_out) {
result = scan_(result, transformation_(*iter_in)); result = scan_(result, transformation_(*iter_in));
*iter_out = result; *iter_out = result;
++iter_in;
++iter_out;
} }
SetTreeValue(result); SetTreeValue(result);
} else { // Second pass } else {
RAIIn iter_in = first_; // Second pass
RAIOut iter_out = output_iterator_; for (; iter_in != last_in; ++iter_in, ++iter_out) {
while (iter_in != last_) {
*iter_out = scan_(parent_value_, *iter_out); *iter_out = scan_(parent_value_, *iter_out);
++iter_in;
++iter_out;
} }
} }
} else { } else {
internal::ChunkPartitioner<RAIIn> partitioner(first_, last_, 2); // recurse further
ScanFunctor functor_l(partitioner[0].GetFirst(), partitioner[0].GetLast(), size_t chunk_split_index = (chunk_first_ + chunk_last_) / 2;
// Split chunks into left / right branches:
ScanFunctor functor_l(
chunk_first_, chunk_split_index,
output_iterator_, neutral_, scan_, transformation_, output_iterator_, neutral_, scan_, transformation_,
policy_, block_size_, tree_values_, node_id_, policy_, partitioner_, tree_values_, node_id_,
is_first_pass_); is_first_pass_);
ScanFunctor functor_r(partitioner[1].GetFirst(), partitioner[1].GetLast(), ScanFunctor functor_r(
chunk_split_index + 1, chunk_last_,
output_iterator_, neutral_, scan_, transformation_, output_iterator_, neutral_, scan_, transformation_,
policy_, block_size_, tree_values_, node_id_, policy_, partitioner_, tree_values_, node_id_,
is_first_pass_); is_first_pass_);
functor_l.SetID(1); functor_l.SetID(LEFT);
functor_r.SetID(2); functor_r.SetID(RIGHT);
// Advance output iterator of right branch:
ChunkDescriptor<RAIIn> chunk_left = partitioner_[chunk_first_];
ChunkDescriptor<RAIIn> chunk_right = partitioner_[chunk_split_index + 1];
std::advance(functor_r.output_iterator_, std::advance(functor_r.output_iterator_,
std::distance(functor_l.first_, functor_r.first_)); std::distance(chunk_left.GetFirst(), chunk_right.GetFirst()));
if (!is_first_pass_) { if (!is_first_pass_) {
functor_l.parent_value_ = parent_value_; functor_l.parent_value_ = parent_value_;
functor_r.parent_value_ = functor_l.GetTreeValue() + parent_value_; functor_r.parent_value_ = functor_l.GetTreeValue() + parent_value_;
} }
// Spawn tasks to recurse:
mtapi::Node& node = mtapi::Node::GetInstance(); mtapi::Node& node = mtapi::Node::GetInstance();
mtapi::Task task_l = node.Spawn(mtapi::Action(base::MakeFunction( mtapi::Task task_l = node.Spawn(
functor_l, &ScanFunctor::Action), mtapi::Action(
base::MakeFunction(functor_l, &ScanFunctor::Action),
policy_)); policy_));
mtapi::Task task_r = node.Spawn(mtapi::Action(base::MakeFunction( mtapi::Task task_r = node.Spawn(
functor_r, &ScanFunctor::Action), mtapi::Action(
base::MakeFunction(functor_r, &ScanFunctor::Action),
policy_)); policy_));
// Wait for tasks to complete:
task_l.Wait(MTAPI_INFINITE); task_l.Wait(MTAPI_INFINITE);
task_r.Wait(MTAPI_INFINITE); task_r.Wait(MTAPI_INFINITE);
SetTreeValue(scan_(functor_l.GetTreeValue(), functor_r.GetTreeValue())); SetTreeValue(scan_(functor_l.GetTreeValue(), functor_r.GetTreeValue()));
...@@ -123,23 +129,25 @@ class ScanFunctor { ...@@ -123,23 +129,25 @@ class ScanFunctor {
} }
private: private:
static const int LEFT = 1;
static const int RIGHT = 2;
const embb::mtapi::ExecutionPolicy& policy_; const embb::mtapi::ExecutionPolicy& policy_;
RAIIn first_; size_t chunk_first_;
RAIIn last_; size_t chunk_last_;
RAIOut output_iterator_; RAIOut output_iterator_;
ScanFunction scan_; ScanFunction scan_;
TransformationFunction transformation_; TransformationFunction transformation_;
ReturnType neutral_; ReturnType neutral_;
size_t block_size_; const BlockSizePartitioner<RAIIn>& partitioner_;
ReturnType* tree_values_; ReturnType* tree_values_;
size_t node_id_; size_t node_id_;
ReturnType parent_value_; ReturnType parent_value_;
bool is_first_pass_; bool is_first_pass_;
void SetID(int is_left) { void SetID(int branch) {
if (is_left == 1) { if (branch == LEFT) {
node_id_ = 2 * node_id_ + 1; node_id_ = 2 * node_id_ + 1;
} else if (is_left == 2) { } else if (branch == RIGHT) {
node_id_ = 2 * node_id_ + 2; node_id_ = 2 * node_id_ + 2;
} }
} }
...@@ -168,34 +176,43 @@ void ScanIteratorCheck(RAIIn first, RAIIn last, RAIOut output_iterator, ...@@ -168,34 +176,43 @@ void ScanIteratorCheck(RAIIn first, RAIIn last, RAIOut output_iterator,
if (distance <= 0) { if (distance <= 0) {
return; return;
} }
mtapi::Node& node = mtapi::Node::GetInstance(); unsigned int num_cores = policy.GetCoreCount();
if (num_cores == 0) {
EMBB_THROW(embb::base::ErrorException, "No cores in execution policy");
}
ReturnType values[MTAPI_NODE_MAX_TASKS_DEFAULT]; ReturnType values[MTAPI_NODE_MAX_TASKS_DEFAULT];
size_t used_block_size = block_size;
if (block_size == 0) { if (block_size == 0) {
used_block_size = static_cast<size_t>(distance) / node.GetCoreCount(); block_size = static_cast<size_t>(distance) / num_cores;
if (used_block_size == 0) used_block_size = 1; if (block_size == 0) {
block_size = 1;
}
} }
if (((distance / used_block_size) * 2) + 1 > MTAPI_NODE_MAX_TASKS_DEFAULT) { if (((distance / block_size) * 2) + 1 > MTAPI_NODE_MAX_TASKS_DEFAULT) {
EMBB_THROW(embb::base::ErrorException, EMBB_THROW(embb::base::ErrorException,
"Number of computation tasks required in scan " "Not enough MTAPI tasks available for parallel scan");
"exceeds MTAPI maximum number of tasks");
} }
// first pass. Calculates prefix sums for leaves and when recursion returns // first pass. Calculates prefix sums for leaves and when recursion returns
// it creates the tree. // it creates the tree.
typedef ScanFunctor<RAIIn, RAIOut, ReturnType, ScanFunction, typedef ScanFunctor<RAIIn, RAIOut, ReturnType, ScanFunction,
TransformationFunction> Functor; TransformationFunction> Functor;
Functor functor_down(first, last, output_iterator, neutral, scan, mtapi::Node& node = mtapi::Node::GetInstance();
transformation, policy, used_block_size, values, 0,
true); BlockSizePartitioner<RAIIn> partitioner_down(first, last, block_size);
Functor functor_down(0, partitioner_down.Size() - 1, output_iterator,
neutral, scan, transformation, policy, partitioner_down,
values, 0, true);
mtapi::Task task_down = node.Spawn(mtapi::Action(base::MakeFunction( mtapi::Task task_down = node.Spawn(mtapi::Action(base::MakeFunction(
functor_down, &Functor::Action), functor_down, &Functor::Action),
policy)); policy));
task_down.Wait(MTAPI_INFINITE); task_down.Wait(MTAPI_INFINITE);
// Second pass. Gives to each leaf the part of the prefix missing // Second pass. Gives to each leaf the part of the prefix missing
Functor functor_up(first, last, output_iterator, neutral, scan, BlockSizePartitioner<RAIIn> partitioner_up(first, last, block_size);
transformation, policy, used_block_size, values, 0, false); Functor functor_up(0, partitioner_up.Size() - 1, output_iterator,
neutral, scan, transformation, policy, partitioner_up,
values, 0, false);
mtapi::Task task_up = node.Spawn(mtapi::Action(base::MakeFunction( mtapi::Task task_up = node.Spawn(mtapi::Action(base::MakeFunction(
functor_up, &Functor::Action), functor_up, &Functor::Action),
policy)); policy));
......
...@@ -60,7 +60,7 @@ CountTest::CountTest() { ...@@ -60,7 +60,7 @@ CountTest::CountTest() {
void CountTest::TestDataStructures() { void CountTest::TestDataStructures() {
using embb::algorithms::Count; using embb::algorithms::Count;
const int size = 10; const int size = 10;
int array[] = {10, 20, 30, 30, 20, 10, 10, 20, 20, 20}; int array[] = { 10, 20, 30, 30, 20, 10, 10, 20, 20, 20 };
std::vector<int> vector(array, array + size); std::vector<int> vector(array, array + size);
std::deque<int> deque(array, array + size); std::deque<int> deque(array, array + size);
const std::vector<int> const_vector(array, array + size); const std::vector<int> const_vector(array, array + size);
...@@ -74,7 +74,7 @@ void CountTest::TestDataStructures() { ...@@ -74,7 +74,7 @@ void CountTest::TestDataStructures() {
void CountTest::TestCountIf() { void CountTest::TestCountIf() {
using embb::algorithms::CountIf; using embb::algorithms::CountIf;
const int size = 10; const int size = 10;
int array[] = {10, 21, 30, 31, 20, 11, 10, 21, 20, 20}; int array[] = { 10, 21, 30, 31, 20, 11, 10, 21, 20, 20 };
PT_EXPECT_EQ(CountIf(array, array + size, IsEven()), 6); PT_EXPECT_EQ(CountIf(array, array + size, IsEven()), 6);
PT_EXPECT_EQ(CountIf(array, array + size, &IsEvenFunction), 6); PT_EXPECT_EQ(CountIf(array, array + size, &IsEvenFunction), 6);
} }
...@@ -128,8 +128,6 @@ void CountTest::TestPolicy() { ...@@ -128,8 +128,6 @@ void CountTest::TestPolicy() {
PT_EXPECT_EQ(Count(vector.begin(), vector.end(), 10, ExecutionPolicy()), 3); PT_EXPECT_EQ(Count(vector.begin(), vector.end(), 10, ExecutionPolicy()), 3);
PT_EXPECT_EQ(Count(vector.begin(), vector.end(), 10, ExecutionPolicy(true)), PT_EXPECT_EQ(Count(vector.begin(), vector.end(), 10, ExecutionPolicy(true)),
3); 3);
PT_EXPECT_EQ(Count(vector.begin(), vector.end(), 10, ExecutionPolicy(false)),
3);
PT_EXPECT_EQ(Count(vector.begin(), vector.end(), 10, PT_EXPECT_EQ(Count(vector.begin(), vector.end(), 10,
ExecutionPolicy(true, 1)), 3); ExecutionPolicy(true, 1)), 3);
} }
......
...@@ -207,12 +207,6 @@ void ForEachTest::TestPolicy() { ...@@ -207,12 +207,6 @@ void ForEachTest::TestPolicy() {
} }
vector = init; vector = init;
ForEach(vector.begin(), vector.end(), Square(), ExecutionPolicy(false));
for (size_t i = 0; i < count; i++) {
PT_EXPECT_EQ(vector[i], init[i]*init[i]);
}
vector = init;
ForEach(vector.begin(), vector.end(), Square(), ExecutionPolicy(true, 1)); ForEach(vector.begin(), vector.end(), Square(), ExecutionPolicy(true, 1));
for (size_t i = 0; i < count; i++) { for (size_t i = 0; i < count; i++) {
PT_EXPECT_EQ(vector[i], init[i]*init[i]); PT_EXPECT_EQ(vector[i], init[i]*init[i]);
......
...@@ -208,13 +208,6 @@ void MergeSortTest::TestPolicy() { ...@@ -208,13 +208,6 @@ void MergeSortTest::TestPolicy() {
vector = init; vector = init;
MergeSortAllocate(vector.begin(), vector.end(), std::less<int>(), MergeSortAllocate(vector.begin(), vector.end(), std::less<int>(),
ExecutionPolicy(false));
for (size_t i = 0; i < count; i++) {
PT_EXPECT_EQ(vector_copy[i], vector[i]);
}
vector = init;
MergeSortAllocate(vector.begin(), vector.end(), std::less<int>(),
ExecutionPolicy(true, 1)); ExecutionPolicy(true, 1));
for (size_t i = 0; i < count; i++) { for (size_t i = 0; i < count; i++) {
PT_EXPECT_EQ(vector_copy[i], vector[i]); PT_EXPECT_EQ(vector_copy[i], vector[i]);
......
...@@ -214,13 +214,6 @@ void QuickSortTest::TestPolicy() { ...@@ -214,13 +214,6 @@ void QuickSortTest::TestPolicy() {
vector = init; vector = init;
QuickSort(vector.begin(), vector.end(), std::greater<int>(), QuickSort(vector.begin(), vector.end(), std::greater<int>(),
ExecutionPolicy(false));
for (size_t i = 0; i < count; i++) {
PT_EXPECT_EQ(vector_copy[i], vector[i]);
}
vector = init;
QuickSort(vector.begin(), vector.end(), std::greater<int>(),
ExecutionPolicy(true, 1)); ExecutionPolicy(true, 1));
for (size_t i = 0; i < count; i++) { for (size_t i = 0; i < count; i++) {
PT_EXPECT_EQ(vector_copy[i], vector[i]); PT_EXPECT_EQ(vector_copy[i], vector[i]);
......
...@@ -179,8 +179,6 @@ void ReduceTest::TestPolicy() { ...@@ -179,8 +179,6 @@ void ReduceTest::TestPolicy() {
Identity(), ExecutionPolicy()), sum); Identity(), ExecutionPolicy()), sum);
PT_EXPECT_EQ(Reduce(vector.begin(), vector.end(), 0, std::plus<int>(), PT_EXPECT_EQ(Reduce(vector.begin(), vector.end(), 0, std::plus<int>(),
Identity(), ExecutionPolicy(true)), sum); Identity(), ExecutionPolicy(true)), sum);
PT_EXPECT_EQ(Reduce(vector.begin(), vector.end(), 0,
std::plus<int>(), Identity(), ExecutionPolicy(false)), sum);
PT_EXPECT_EQ(Reduce(vector.begin(), vector.end(), 0, std::plus<int>(), PT_EXPECT_EQ(Reduce(vector.begin(), vector.end(), 0, std::plus<int>(),
Identity(), ExecutionPolicy(true, 1)), sum); Identity(), ExecutionPolicy(true, 1)), sum);
} }
......
...@@ -284,15 +284,6 @@ void ScanTest::TestPolicy() { ...@@ -284,15 +284,6 @@ void ScanTest::TestPolicy() {
outputVector = init; outputVector = init;
Scan(vector.begin(), vector.end(), outputVector.begin(), 0, std::plus<int>(), Scan(vector.begin(), vector.end(), outputVector.begin(), 0, std::plus<int>(),
Identity(), ExecutionPolicy(false));
expected = 0;
for (size_t i = 0; i < count; i++) {
expected += vector[i];
PT_EXPECT_EQ(expected, outputVector[i]);
}
outputVector = init;
Scan(vector.begin(), vector.end(), outputVector.begin(), 0, std::plus<int>(),
Identity(), ExecutionPolicy(true, 1)); Identity(), ExecutionPolicy(true, 1));
expected = 0; expected = 0;
for (size_t i = 0; i < count; i++) { for (size_t i = 0; i < count; i++) {
......
...@@ -175,16 +175,6 @@ class Exception : public std::exception { ...@@ -175,16 +175,6 @@ class Exception : public std::exception {
} }
/** /**
* Returns the error message.
* This is here for compatibility with std::exception.
*
* \return Pointer to error message
*/
virtual const char* what() const throw() {
return What();
}
/**
* Returns an integer code representing the exception. * Returns an integer code representing the exception.
* *
* \return %Exception code * \return %Exception code
......
...@@ -35,12 +35,10 @@ namespace embb { ...@@ -35,12 +35,10 @@ namespace embb {
namespace base { namespace base {
namespace test { namespace test {
/** /**
* Forward declaration for friending. * Forward declaration for friending.
*/ */
class ThreadSpecificStorageTest; class ThreadSpecificStorageTest;
} }
/** /**
......
...@@ -43,8 +43,7 @@ class ThreadTest : public partest::TestCase { ...@@ -43,8 +43,7 @@ class ThreadTest : public partest::TestCase {
/** /**
* Adds test methods. * Adds test methods.
*/ */
ThreadTest(); /**< ThreadTest();
number of threads concurrently running test methods */
private: private:
/** /**
......
...@@ -89,7 +89,7 @@ class Inputs<Slices, T1, embb::base::internal::Nil, embb::base::internal::Nil, ...@@ -89,7 +89,7 @@ class Inputs<Slices, T1, embb::base::internal::Nil, embb::base::internal::Nil,
const int idx = clock % Slices; const int idx = clock % Slices;
if (count_[idx] == 0) { if (count_[idx] == 0) {
EMBB_THROW(embb::base::ErrorException, EMBB_THROW(embb::base::ErrorException,
"All inputs already fired for this clock.") "All inputs already fired for this clock.");
} }
if (--count_[idx] == 0) { if (--count_[idx] == 0) {
count_[idx] = 1; count_[idx] = 1;
...@@ -135,7 +135,7 @@ class Inputs<Slices, T1, T2, embb::base::internal::Nil, ...@@ -135,7 +135,7 @@ class Inputs<Slices, T1, T2, embb::base::internal::Nil,
const int idx = clock % Slices; const int idx = clock % Slices;
if (count_[idx] == 0) { if (count_[idx] == 0) {
EMBB_THROW(embb::base::ErrorException, EMBB_THROW(embb::base::ErrorException,
"All inputs already fired for this clock.") "All inputs already fired for this clock.");
} }
if (--count_[idx] == 0) { if (--count_[idx] == 0) {
count_[idx] = 2; count_[idx] = 2;
...@@ -185,7 +185,7 @@ class Inputs<Slices, T1, T2, T3, embb::base::internal::Nil, ...@@ -185,7 +185,7 @@ class Inputs<Slices, T1, T2, T3, embb::base::internal::Nil,
const int idx = clock % Slices; const int idx = clock % Slices;
if (count_[idx] == 0) { if (count_[idx] == 0) {
EMBB_THROW(embb::base::ErrorException, EMBB_THROW(embb::base::ErrorException,
"All inputs already fired for this clock.") "All inputs already fired for this clock.");
} }
if (--count_[idx] == 0) { if (--count_[idx] == 0) {
count_[idx] = 3; count_[idx] = 3;
...@@ -238,7 +238,7 @@ class Inputs<Slices, T1, T2, T3, T4, embb::base::internal::Nil> ...@@ -238,7 +238,7 @@ class Inputs<Slices, T1, T2, T3, T4, embb::base::internal::Nil>
const int idx = clock % Slices; const int idx = clock % Slices;
if (count_[idx] == 0) { if (count_[idx] == 0) {
EMBB_THROW(embb::base::ErrorException, EMBB_THROW(embb::base::ErrorException,
"All inputs already fired for this clock.") "All inputs already fired for this clock.");
} }
if (--count_[idx] == 0) { if (--count_[idx] == 0) {
count_[idx] = 4; count_[idx] = 4;
...@@ -296,7 +296,7 @@ class Inputs ...@@ -296,7 +296,7 @@ class Inputs
const int idx = clock % Slices; const int idx = clock % Slices;
if (count_[idx] == 0) { if (count_[idx] == 0) {
EMBB_THROW(embb::base::ErrorException, EMBB_THROW(embb::base::ErrorException,
"All inputs already fired for this clock.") "All inputs already fired for this clock.");
} }
if (--count_[idx] == 0) { if (--count_[idx] == 0) {
count_[idx] = 5; count_[idx] = 5;
......
...@@ -106,6 +106,13 @@ class ExecutionPolicy{ ...@@ -106,6 +106,13 @@ class ExecutionPolicy{
); );
/** /**
* Returns the number of cores the policy is affine to.
*
* \return the number of cores
*/
unsigned int GetCoreCount() const;
/**
* Returns the affinity * Returns the affinity
* *
* \return the affinity * \return the affinity
......
...@@ -129,6 +129,15 @@ class Node { ...@@ -129,6 +129,15 @@ class Node {
} }
/** /**
* Returns the number of worker threads.
* \return The number of worker threads.
* \waitfree
*/
mtapi_uint_t GetWorkerThreadCount() const {
return worker_thread_count_;
}
/**
* Creates a Group to launch \link Task Tasks \endlink in. * Creates a Group to launch \link Task Tasks \endlink in.
* \return A reference to the created Group * \return A reference to the created Group
* \throws ErrorException if the Group object could not be constructed. * \throws ErrorException if the Group object could not be constructed.
...@@ -210,6 +219,7 @@ class Node { ...@@ -210,6 +219,7 @@ class Node {
mtapi_task_context_t * context); mtapi_task_context_t * context);
mtapi_uint_t core_count_; mtapi_uint_t core_count_;
mtapi_uint_t worker_thread_count_;
mtapi_action_hndl_t action_handle_; mtapi_action_hndl_t action_handle_;
std::list<Queue*> queues_; std::list<Queue*> queues_;
std::list<Group*> groups_; std::list<Group*> groups_;
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include <embb/mtapi/execution_policy.h> #include <embb/mtapi/execution_policy.h>
#include <embb/mtapi/mtapi.h> #include <embb/mtapi/mtapi.h>
#include <embb/base/exceptions.h> #include <embb/base/exceptions.h>
#include <embb/base/c/internal/bitset.h>
#include <cassert> #include <cassert>
namespace embb { namespace embb {
...@@ -105,6 +106,10 @@ bool ExecutionPolicy::IsSetWorker(mtapi_uint_t worker) { ...@@ -105,6 +106,10 @@ bool ExecutionPolicy::IsSetWorker(mtapi_uint_t worker) {
return MTAPI_TRUE == aff; return MTAPI_TRUE == aff;
} }
unsigned int ExecutionPolicy::GetCoreCount() const {
return embb_bitset_count(&affinity_);
}
const mtapi_affinity_t &ExecutionPolicy::GetAffinity() const { const mtapi_affinity_t &ExecutionPolicy::GetAffinity() const {
return affinity_; return affinity_;
} }
......
...@@ -75,6 +75,7 @@ Node::Node( ...@@ -75,6 +75,7 @@ Node::Node(
"mtapi::Node could not initialize mtapi"); "mtapi::Node could not initialize mtapi");
} }
core_count_ = info.hardware_concurrency; core_count_ = info.hardware_concurrency;
worker_thread_count_ = embb_core_set_count(&attr->core_affinity);
action_handle_ = mtapi_action_create(MTAPI_CPP_TASK_JOB, action_func, action_handle_ = mtapi_action_create(MTAPI_CPP_TASK_JOB, action_func,
MTAPI_NULL, 0, MTAPI_NULL, &status); MTAPI_NULL, 0, MTAPI_NULL, &status);
if (MTAPI_SUCCESS != status) { if (MTAPI_SUCCESS != status) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or sign in to comment