Commit 053ca488 by Christian Kern

Worked on review comments for ticket #523

parent a023d6e4
...@@ -128,14 +128,20 @@ void embb_internal_thread_index_set_max(unsigned int max) { ...@@ -128,14 +128,20 @@ void embb_internal_thread_index_set_max(unsigned int max) {
*embb_max_number_thread_indices() = max; *embb_max_number_thread_indices() = max;
} }
/**
* \pre the calling thread is the only active thread
*
* \post the thread indices count and calling thread index is reset
*/
void embb_internal_thread_index_reset() { void embb_internal_thread_index_reset() {
// This function is only called in tests, usually when all other threads /** This function is only called in tests, usually when all other threads
// except the main thread have terminated. However, the main thread still has * except the main thread have terminated. However, the main thread still has
// potentially still stored its old index value in its thread local storage, * potentially stored its old index value in its thread local storage,
// which might be assigned additionally to another thread (as the counter is * which might be assigned additionally to another thread (as the counter is
// reset), which may lead to hard to detect bugs. Therefore, reset the thread * reset), which may lead to hard to detect bugs. Therefore, reset the thread
// local thread id here. * local thread id here.
*/
embb_internal_thread_index_var = UINT_MAX; embb_internal_thread_index_var = UINT_MAX;
embb_counter_init(embb_thread_index_counter()); embb_counter_init(embb_thread_index_counter());
} }
\ No newline at end of file
...@@ -30,7 +30,6 @@ ...@@ -30,7 +30,6 @@
namespace embb { namespace embb {
namespace containers { namespace containers {
namespace internal { namespace internal {
// Visual Studio is complaining, that the return in the last line of this // Visual Studio is complaining, that the return in the last line of this
// function is not reachable. This is true, as long as exceptions are enabled. // function is not reachable. This is true, as long as exceptions are enabled.
// Otherwise, the exception becomes an assertion and with disabling assertions, // Otherwise, the exception becomes an assertion and with disabling assertions,
...@@ -39,372 +38,354 @@ namespace internal { ...@@ -39,372 +38,354 @@ namespace internal {
#pragma warning(push) #pragma warning(push)
#pragma warning(disable:4702) #pragma warning(disable:4702)
#endif #endif
template< typename GuardType > template< typename GuardType >
unsigned int HazardPointer< GuardType >::GetCurrentThreadIndex() { unsigned int HazardPointer< GuardType >::GetObjectLocalThreadIndex() {
// first, get the EMBB native thread id.
unsigned int embb_thread_index;
// first, get the EMBB native thread id. int return_val = embb_internal_thread_index(&embb_thread_index);
unsigned int embbThreadIndex;
int return_val = embb_internal_thread_index(&embbThreadIndex); if (return_val != EMBB_SUCCESS) {
EMBB_THROW(embb::base::ErrorException, "Could not get thread id");
}
if (return_val != EMBB_SUCCESS) { // iterate over the mappings array
EMBB_THROW(embb::base::ErrorException, "Could not get thread id"); for (unsigned int i = 0; i != max_accessors_count_; ++i) {
} // end of mappings? then we need to write our id
if (thread_id_mapping_[i] == -1) {
// try to CAS the initial value with out thread id
int expected = -1;
if (thread_id_mapping_[i].CompareAndSwap(expected,
static_cast<int>(embb_thread_index))) {
//successful, return our mapping
return i;
}
}
// iterate over the mappings array if (thread_id_mapping_[i] == static_cast<int>(embb_thread_index)) {
for (unsigned int i = 0; i != accessorCount; ++i) { // found our mapping!
// end of mappings? then we need to write our id
if (threadIdMapping[i] == -1) {
// try to CAS the initial value with out thread id
int expected = -1;
if (threadIdMapping[i].CompareAndSwap(expected,
static_cast<int>(embbThreadIndex))) {
//successful, return our mapping
return i; return i;
} }
} }
if (threadIdMapping[i] == static_cast<int>(embbThreadIndex)) { // when we reach this point, we have too many accessors
// found our mapping! // (no mapping possible)
return i; EMBB_THROW(embb::base::ErrorException, "Too many accessors");
}
}
// when we reach this point, we have too many accessors return 0;
// (no mapping possible) }
EMBB_THROW(embb::base::ErrorException, "Too many accessors");
return 0;
}
#ifdef EMBB_PLATFORM_COMPILER_MSVC #ifdef EMBB_PLATFORM_COMPILER_MSVC
#pragma warning(pop) #pragma warning(pop)
#endif #endif
template< typename GuardType > template< typename GuardType >
void HazardPointer< GuardType >::RemoveGuard(int guardPosition){ void HazardPointer< GuardType >::RemoveGuard(int guard_position){
const unsigned int myThreadId = GetCurrentThreadIndex(); const unsigned int my_thread_id = GetObjectLocalThreadIndex();
// check invariants...
assert(guardPosition < guardsPerThread && myThreadId < accessorCount);
// set guard
guards[guardPosition*accessorCount + myThreadId] = undefinedGuard;
}
template< typename GuardType >
HazardPointer< GuardType >::HazardPointer(
embb::base::Function<void, GuardType> freeGuardCallback,
GuardType undefinedGuard, int guardsPerThread, int accessors) :
accessorCount(accessors == -1 ?
embb::base::Thread::GetThreadsMaxCount() :
accessors),
undefinedGuard(undefinedGuard),
guardsPerThread(guardsPerThread),
freeGuardCallback(freeGuardCallback) {
threadIdMapping =
static_cast<embb::base::Atomic<int>*>(
embb::base::Allocation::Allocate(sizeof(embb::base::Atomic<int>)
*accessorCount));
for (unsigned int i = 0; i != accessorCount; ++i) {
//in-place new for each cell
new (&threadIdMapping[i]) embb::base::Atomic < int > ;
}
guards = static_cast<embb::base::Atomic< GuardType >*> // check invariants...
(embb::base::Allocation::Allocate( assert(guard_position < max_guards_per_thread_);
sizeof(embb::base::Atomic< GuardType >) * assert(my_thread_id < max_accessors_count_);
guardsPerThread * accessorCount
));
for (unsigned int i = 0; i != guardsPerThread * accessorCount; ++i) { // set guard
//in-place new for each cell guards_[guard_position*max_accessors_count_ + my_thread_id] =
new (&guards[i]) embb::base::Atomic < GuardType > ; undefined_guard_;
} }
threadLocalRetiredListsTemp = static_cast<GuardType*> template< typename GuardType >
(embb::base::Allocation::Allocate( HazardPointer< GuardType >::HazardPointer(
sizeof(GuardType) * embb::base::Function<void, GuardType> freeGuardCallback,
guardsPerThread * accessorCount * accessorCount GuardType undefined_guard, int guardsPerThread, int accessors) :
)); max_accessors_count_(accessors < 0 ?
embb::base::Thread::GetThreadsMaxCount() : accessors),
for (unsigned int i = 0; i != undefined_guard_(undefined_guard),
guardsPerThread * accessorCount * accessorCount; ++i) { max_guards_per_thread_(guardsPerThread),
//in-place new for each cell release_object_callback_(freeGuardCallback),
new (&threadLocalRetiredListsTemp[i]) GuardType; thread_id_mapping_(static_cast<embb::base::Atomic<int>*>(
} embb::base::Allocation::Allocate(sizeof(embb::base::Atomic<int>)
*max_accessors_count_))),
threadLocalRetiredLists = static_cast<GuardType*> guards_(static_cast<embb::base::Atomic< GuardType >*>
(embb::base::Allocation::Allocate( (embb::base::Allocation::Allocate(
sizeof(GuardType) * sizeof(embb::base::Atomic< GuardType >) * max_guards_per_thread_ *
guardsPerThread * accessorCount * accessorCount max_accessors_count_))),
)); thread_local_retired_lists_temp_(static_cast<GuardType*>
(embb::base::Allocation::Allocate(
sizeof(GuardType) * max_guards_per_thread_ * max_accessors_count_ *
max_accessors_count_
))),
thread_local_retired_lists_(static_cast<GuardType*>
(embb::base::Allocation::Allocate(
sizeof(GuardType) * max_guards_per_thread_ * max_accessors_count_ *
max_accessors_count_
))) {
const unsigned int count_guards =
max_guards_per_thread_ * max_accessors_count_;
const unsigned int count_ret_elements =
count_guards * max_accessors_count_;
for (unsigned int i = 0; i != max_accessors_count_; ++i) {
//in-place new for each cell
new (&thread_id_mapping_[i]) embb::base::Atomic < int >(-1);
}
for (unsigned int i = 0; i != for (unsigned int i = 0; i != count_guards; ++i) {
guardsPerThread * accessorCount * accessorCount; ++i) { //in-place new for each cell
//in-place new for each cell new (&guards_[i]) embb::base::Atomic < GuardType >(undefined_guard);
new (&threadLocalRetiredLists[i]) GuardType; }
}
// init guards and retired lists to the undefined guard for (unsigned int i = 0; i != count_ret_elements; ++i) {
for (unsigned int i = 0; i != static_cast<unsigned int>(guardsPerThread); //in-place new for each cell
++i) { new (&thread_local_retired_lists_temp_[i]) GuardType(undefined_guard);
for (unsigned int i2 = 0; i2 != accessorCount; ++i2) {
guards[i*accessorCount + i2] = undefinedGuard;
} }
}
for (unsigned int j = 0; j != accessorCount; ++j) { for (unsigned int i = 0; i != count_ret_elements; ++i) {
for (unsigned int i = 0; i != guardsPerThread*accessorCount; ++i) { //in-place new for each cell
threadLocalRetiredListsTemp new (&thread_local_retired_lists_[i]) GuardType(undefined_guard);
[j*(accessorCount*guardsPerThread) + i] =
undefinedGuard;
threadLocalRetiredLists
[j*(accessorCount*guardsPerThread) + i] =
undefinedGuard;
} }
} }
for (unsigned int i = 0; i != accessorCount; ++i) { template< typename GuardType >
//in-place new for each cell HazardPointer< GuardType >::~HazardPointer() {
threadIdMapping[i] = -1; const unsigned int count_guards =
} max_guards_per_thread_ * max_accessors_count_;
}
const unsigned int count_ret_elements =
template< typename GuardType > count_guards * max_accessors_count_;
HazardPointer< GuardType >::~HazardPointer() {
// Release references from all retired lists. Note that for this to work,
// Release references from all retired lists. Note that for this to work, the // the data structure using hazard pointer has still to be active... So
// data structure using hazard pointer has still to be active... So first, the // first, the hazard pointer class shall be destructed, then the memory
// hazard pointer class shall be destructed, then the memory management class // management class (e.g. some pool). Otherwise, the hazard pointer class
// (e.g. some pool). Otherwise, the hazard pointer class would try to return // would try to return memory to an already destructed memory manager.
// memory to an already destructed memory manager. for (unsigned int i = 0; i != count_ret_elements; ++i) {
for (unsigned int j = 0; j != accessorCount; ++j) {
for (unsigned int i = 0; i != accessorCount*guardsPerThread; ++i) {
GuardType pointerToFree = GuardType pointerToFree =
threadLocalRetiredLists thread_local_retired_lists_[i];
[j * accessorCount * guardsPerThread + i]; if (pointerToFree == undefined_guard_) {
if (pointerToFree == undefinedGuard) {
break; break;
} }
freeGuardCallback(pointerToFree); release_object_callback_(pointerToFree);
} }
}
for (unsigned int i = 0; i != accessorCount; ++i) {
threadIdMapping[i].~Atomic();
}
embb::base::Allocation::Free(threadIdMapping); for (unsigned int i = 0; i != max_accessors_count_; ++i) {
thread_id_mapping_[i].~Atomic();
for (unsigned int i = 0; i != guardsPerThread * accessorCount; ++i) { }
guards[i].~Atomic();
}
embb::base::Allocation::Free(guards); embb::base::Allocation::Free(thread_id_mapping_);
for (unsigned int i = 0; i != for (unsigned int i = 0; i != count_guards; ++i) {
guardsPerThread * accessorCount * accessorCount; ++i) { guards_[i].~Atomic();
threadLocalRetiredListsTemp[i].~GuardType(); }
}
embb::base::Allocation::Free(threadLocalRetiredListsTemp);
for (unsigned int i = 0; i != embb::base::Allocation::Free(guards_);
guardsPerThread * accessorCount * accessorCount; ++i) {
threadLocalRetiredLists[i].~GuardType();
}
embb::base::Allocation::Free(threadLocalRetiredLists); for (unsigned int i = 0; i != count_ret_elements; ++i) {
} thread_local_retired_lists_temp_[i].~GuardType();
}
template< typename GuardType > embb::base::Allocation::Free(thread_local_retired_lists_temp_);
void HazardPointer< GuardType >::Guard(int guardPosition,
GuardType guardedElement) {
const unsigned int myThreadId = GetCurrentThreadIndex();
// check invariants... for (unsigned int i = 0; i != count_ret_elements; ++i) {
assert(guardPosition < guardsPerThread && myThreadId < accessorCount); thread_local_retired_lists_[i].~GuardType();
}
// set guard embb::base::Allocation::Free(thread_local_retired_lists_);
guards[guardPosition*accessorCount + myThreadId] = guardedElement; }
}
template< typename GuardType > template< typename GuardType >
size_t HazardPointer< GuardType >::ComputeMaximumRetiredObjectCount( void HazardPointer< GuardType >::Guard(int guardPosition,
size_t guardsPerThread, int accessors) { GuardType guardedElement) {
const unsigned int my_thread_id = GetObjectLocalThreadIndex();
unsigned int accessorCount = (accessors == -1 ? // check invariants...
embb::base::Thread::GetThreadsMaxCount() : assert(guardPosition < max_guards_per_thread_);
accessors); assert(my_thread_id < max_accessors_count_);
return static_cast<size_t>( // set guard
guardsPerThread * accessorCount * accessorCount); guards_[guardPosition*max_accessors_count_ + my_thread_id] = guardedElement;
} }
template< typename GuardType > template< typename GuardType >
void HazardPointer< GuardType >::CopyRetiredList(GuardType* sourceList, size_t HazardPointer< GuardType >::ComputeMaximumRetiredObjectCount(
GuardType* targetList, unsigned int retiredListSize, size_t guardsPerThread, int accessors) {
GuardType undefinedGuard) { unsigned int accessorCount = (accessors == -1 ?
bool done = false; embb::base::Thread::GetThreadsMaxCount() :
for (unsigned int ii = 0; ii != retiredListSize; ++ii) { accessors);
if (!done) {
GuardType guardToCopy = sourceList[ii];
if (guardToCopy == undefinedGuard) { return static_cast<size_t>(
done = true; guardsPerThread * accessorCount * accessorCount);
}
/**
* Remark: it might be faster to just swap pointers for temp retired list and
* retired list. However, with the current implementation (one array for all
* retired and retired temp lists, respectively) this is not possible. This is
* not changed until this copying accounts for a performance problem. The
* copying is not the bottleneck currently.
*/
template< typename GuardType >
void HazardPointer< GuardType >::CopyRetiredList(GuardType* sourceList,
GuardType* targetList, unsigned int retiredListSize,
GuardType undefinedGuard) {
bool done = false;
for (unsigned int ii = 0; ii != retiredListSize; ++ii) {
if (!done) {
GuardType guardToCopy = sourceList[ii];
if (guardToCopy == undefinedGuard) {
done = true;
if (targetList[ii] == undefinedGuard) {
// end of target list
break;
}
}
targetList[ii] = guardToCopy;
}
else {
// we copied the whole source list, remaining values in the target
// have to be zeroed.
if (targetList[ii] == undefinedGuard) { if (targetList[ii] == undefinedGuard) {
// end of target list // end of target list
break; break;
} }
else {
targetList[ii] = undefinedGuard;
}
} }
targetList[ii] = guardToCopy;
} }
else { }
// we copied the whole source list, remaining values in the target
// have to be zeroed. template< typename GuardType >
if (targetList[ii] == undefinedGuard) { void HazardPointer< GuardType >::UpdateRetiredList(GuardType* retired_list,
// end of target list GuardType* updated_retired_list, unsigned int retired_list_size,
GuardType guarded_element, GuardType considered_hazard,
GuardType undefined_guard) {
// no hazard set here
if (considered_hazard == undefined_guard)
return;
// if this hazard is currently in the union of
// threadLocalRetiredLists and pointerToRetire, but not yet in
// threadLocalRetiredListsTemp, add it to that list
bool contained_in_union = false;
// first iterate over our retired list
for (unsigned int i = 0; i != retired_list_size; ++i) {
// when reaching 0, we can stop iterating (end of the "list")
if (retired_list[i] == 0)
break;
// the hazard is contained in the retired list... it shall go
// into the temp list, if not already there
if (retired_list[i] == considered_hazard) {
contained_in_union = true;
break; break;
} }
else {
targetList[ii] = undefinedGuard;
}
} }
}
}
template< typename GuardType >
void HazardPointer< GuardType >::UpdateRetiredList(GuardType* retiredList,
GuardType* updatedRetiredList, unsigned int retiredListSize,
GuardType guardedElement, GuardType consideredHazard,
GuardType undefinedGuard) {
// no hazard set here
if (consideredHazard == undefinedGuard)
return;
// if this hazard is currently in the union of
// threadLocalRetiredLists and pointerToRetire, but not yet in
// threadLocalRetiredListsTemp, add it to that list
bool containedInUnion = false;
// first iterate over our retired list
for (unsigned int ii = 0; ii != retiredListSize; ++ii) {
// when reaching 0, we can stop iterating (end of the "list")
if (retiredList[ii] == 0)
break;
// the hazard is contained in the retired list... it shall go // the union also contains pointerToRetire
// into the temp list, if not already there if (!contained_in_union) {
if (retiredList[ii] == consideredHazard) { contained_in_union = (considered_hazard == guarded_element);
containedInUnion = true;
break;
} }
}
// the union also contains pointerToRetire // add the pointer to temp. retired list, if not already there
if (!containedInUnion) { if (contained_in_union) {
containedInUnion = (consideredHazard == guardedElement); for (unsigned int ii = 0; ii != retired_list_size; ++ii) {
} // is it already there?
if (updated_retired_list[ii] == considered_hazard)
break;
// add the pointer to temp. retired list, if not already there // end of the list
if (containedInUnion) { if (updated_retired_list[ii] == undefined_guard) {
for (unsigned int iii = 0; iii != retiredListSize; ++iii) { // add hazard
updated_retired_list[ii] = considered_hazard;
// is it already there? // we are done here...
if (updatedRetiredList[iii] == consideredHazard)
break; break;
}
// end of the list
if (updatedRetiredList[iii] == undefinedGuard) {
// add hazard
updatedRetiredList[iii] = consideredHazard;
// we are done here...
break;
} }
} }
} }
}
template< typename GuardType > template< typename GuardType >
void HazardPointer< GuardType >::EnqueueForDeletion(GuardType toRetire) { void HazardPointer< GuardType >::EnqueueForDeletion(GuardType toRetire) {
unsigned int my_thread_id = GetObjectLocalThreadIndex();
unsigned int myThreadId = GetCurrentThreadIndex(); // check for invariant
assert(my_thread_id < max_accessors_count_);
// check for invariant const unsigned int retired_list_size = max_accessors_count_ *
assert(myThreadId < accessorCount); max_guards_per_thread_;
unsigned int retiredListSize = accessorCount * guardsPerThread; const unsigned int count_guards = max_accessors_count_ *
max_guards_per_thread_;
GuardType* retiredList = GuardType* retired_list =
&threadLocalRetiredLists[myThreadId * retiredListSize]; &thread_local_retired_lists_[my_thread_id * retired_list_size];
GuardType* retiredListTemp = GuardType* retired_list_temp =
&threadLocalRetiredListsTemp[myThreadId * retiredListSize]; &thread_local_retired_lists_temp_[my_thread_id * retired_list_size];
// wipe my temp. retired list... // wipe my temp. retired list...
for (unsigned int i = 0; i < retiredListSize; ++i) { for (unsigned int i = 0; i < retired_list_size; ++i) {
// the list is filled always from left to right, so occurring the first // the list is filled always from left to right, so occurring the first
// undefinedGuard, the remaining ones are also undefinedGuard... // undefinedGuard, the remaining ones are also undefinedGuard...
if (retiredListTemp[i] == undefinedGuard) if (retired_list_temp[i] == undefined_guard_)
break; break;
retiredListTemp[i] = undefinedGuard; retired_list_temp[i] = undefined_guard_;
} }
// we test each hazard if it is in the union of retiredList and // we test each hazard if it is in the union of retiredList and
// guardedElement. If it is, it goes into the new retired list... // guardedElement. If it is, it goes into the new retired list...
for (unsigned int i = 0; i != accessorCount*guardsPerThread; ++i) { for (unsigned int i = 0; i != count_guards; ++i) {
// consider each current active guard // consider each current active guard
GuardType consideredHazard = guards[i].Load(); GuardType considered_hazard = guards_[i].Load();
UpdateRetiredList(retiredList, retiredListTemp, retiredListSize, UpdateRetiredList(retired_list, retired_list_temp, retired_list_size,
toRetire, consideredHazard, undefinedGuard); toRetire, considered_hazard, undefined_guard_);
} }
// now we created a a new retired list... the elements that are "removed" from int retired_list_size_signed = static_cast<int>(retired_list_size);
// the old retired list can be safely deleted now... assert(retired_list_size_signed >= 0);
for (int ii = -1; ii != static_cast<int>(retiredListSize); ++ii) {
// we iterate over the current retired list... -1 is used as dummy element
// in the iteration, to also iterate over the pointerToRetire, which is
// logically also part of the current retired list...
// end of the list, stop iterating // now we created a a new retired list... the elements that are "removed"
if (ii >= 0 && retiredList[ii] == undefinedGuard) // from the old retired list can be safely deleted now...
break; for (int i = -1; i != retired_list_size_signed; ++i) {
// we iterate over the current retired list... -1 is used as dummy element
// in the iteration, to also iterate over the pointerToRetire, which is
// logically also part of the current retired list...
GuardType toCheckIfInNewList = undefinedGuard; // end of the list, stop iterating
if (i >= 0 && retired_list[i] == undefined_guard_)
break;
toCheckIfInNewList = (ii == -1 ? toRetire : retiredList[ii]); GuardType to_check_if_in_new_list = undefined_guard_;
// still in the new retired list? to_check_if_in_new_list = (i == -1 ? toRetire : retired_list[i]);
bool stillInList = false;
for (unsigned int iii = 0; iii != retiredListSize; ++iii) { // still in the new retired list?
// end of list bool still_in_list = false;
if (retiredListTemp[iii] == undefinedGuard) for (unsigned int ii = 0; ii != retired_list_size; ++ii) {
// end of list
if (retired_list_temp[ii] == undefined_guard_)
break;
if (to_check_if_in_new_list == retired_list_temp[ii]) {
// still in list, cannot delete element!
still_in_list = true;
break; break;
}
}
if (toCheckIfInNewList == retiredListTemp[iii]) { if (!still_in_list) {
// still in list, cannot delete! this->release_object_callback_(to_check_if_in_new_list);
stillInList = true;
break;
} }
} }
if (!stillInList) { // copy the updated retired list (temp) to the retired list...
this->freeGuardCallback(toCheckIfInNewList); CopyRetiredList(retired_list_temp, retired_list, retired_list_size,
} undefined_guard_);
} }
// copy the updated retired list (temp) to the retired list...
CopyRetiredList(retiredListTemp, retiredList, retiredListSize,
undefinedGuard);
}
} // namespace internal } // namespace internal
} // namespace containers } // namespace containers
} // namespace embb } // namespace embb
......
...@@ -53,7 +53,6 @@ class HazardPointerTest2; ...@@ -53,7 +53,6 @@ class HazardPointerTest2;
namespace embb { namespace embb {
namespace containers { namespace containers {
namespace internal { namespace internal {
/** /**
* This class contains a hazard pointer implementation following publication: * This class contains a hazard pointer implementation following publication:
* *
...@@ -61,7 +60,7 @@ namespace internal { ...@@ -61,7 +60,7 @@ namespace internal {
* objects." IEEE Transactions on Parallel and Distributed Systems, 15.6 (2004) * objects." IEEE Transactions on Parallel and Distributed Systems, 15.6 (2004)
* : 491-504. * : 491-504.
* *
* Hazard pointer are a wait-free memory reclamation scheme for lock-free * Hazard pointers are a wait-free memory reclamation scheme for lock-free
* algorithms. Loosely speaking, they act as garbage collector. The release of * algorithms. Loosely speaking, they act as garbage collector. The release of
* objects contained within the memory, managed by the hazard pointer class, is * objects contained within the memory, managed by the hazard pointer class, is
* intercepted and possibly delayed to avoid concurrency bugs. * intercepted and possibly delayed to avoid concurrency bugs.
...@@ -107,111 +106,13 @@ namespace internal { ...@@ -107,111 +106,13 @@ namespace internal {
* when objects shall be freed. In this implementation, we free whenever it is * when objects shall be freed. In this implementation, we free whenever it is
* possibly to do so, as we want to keep the memory footprint as low as * possibly to do so, as we want to keep the memory footprint as low as
* possible. We also don't see a performance drop in the current algorithms that * possible. We also don't see a performance drop in the current algorithms that
* are using hazard pointer, when not using a threshold. * are using hazard pointers, when not using a threshold.
* *
* \tparam GuardType the type of the guards. Usually the pointer type of some * \tparam GuardType the type of the guards. Usually the pointer type of some
* object to protect. * object to protect.
*/ */
template< typename GuardType > template< typename GuardType >
class HazardPointer { class HazardPointer {
private:
/**
* HazardPointerTest2 is a white-box test, needing access to private members
* of this class. So declaring it as friend.
*/
friend class embb::containers::test::HazardPointerTest2;
/**
* The hazard pointer guards, represented as array. Each thread has a fixed
* set of slots (guardsPerThread) within this array.
*/
embb::base::Atomic<GuardType>* guards;
/**
* \see threadLocalRetiredLists documentation
*/
GuardType* threadLocalRetiredListsTemp;
/**
* A lists of lists, represented as single array. Each thread maintains a
* list of retired pointers, that are objects that are logically released
* but not released because some thread placed a guard on it.
*/
GuardType* threadLocalRetiredLists;
/**
* This number determines the amount of maximal accessors (threads) that
* will access this hazard pointer instance. Note that a thread once
* accessing this object will be permanently count as accessor, even if not
* participating anymore. If too many threads access this object, an
* assertion is thrown.
*/
unsigned int accessorCount;
/**
* The guard value denoting "not guarded"
*/
GuardType undefinedGuard;
/**
* The count of guards that can be set per thread.
*/
int guardsPerThread;
/**
* The functor that is called to release an object. This is called by this
* class, when it is safe to do so, i.e., no thread accesses this object
* anymore.
*/
embb::base::Function<void, GuardType> freeGuardCallback;
/**
* Mapping from EMBB thread id to internal thread ids Internal thread ids
* are in range [0;accesor_count-1]. The position of a EMBB thread id in
* that array determines the respective internal thread id.
*/
embb::base::Atomic<int>* threadIdMapping;
/**
* Each thread is assigned a thread index (starting with 0). Get the index of
* the current thread. Note that this is not the global index, but an internal
* one. The user is free to define less accessors than the amount of default
* threads. This is useful, as the number of accessors accounts quadratic for
* the memory consumption, so the user should have the possibility to avoid
* memory wastage, when only having a small, fixed size, number of accessors.
*
* @return current thread index
*/
unsigned int GetCurrentThreadIndex();
/**
* Copy retired list \c sourceList to retired list \c targetList
*/
static void CopyRetiredList(GuardType* sourceList,
/**<[IN] the source retired list*/
GuardType* targetList,
/**<[IN] the target retired list*/
unsigned int singleRetiredListSize,
/**<[IN] the size of a thread local retired list*/
GuardType undefinedGuard
/**<[IN] the undefined guard (usually the NULL pointer)*/
);
static void UpdateRetiredList(
GuardType* retiredList,
/**<[IN] the old retired list*/
GuardType* updatedRetiredList,
/**<[IN] the updated retired list*/
unsigned int retiredListSize,
/**<[IN] the size of a thread local retired list*/
GuardType toRetire,
/**<[IN] the element to retire*/
GuardType consideredHazard,
/**<[IN] the currently considered hazard*/
GuardType undefinedGuard
/**<[IN] the undefined guard (usually the NULL pointer)*/
);
public: public:
/** /**
...@@ -221,7 +122,7 @@ class HazardPointer { ...@@ -221,7 +122,7 @@ class HazardPointer {
* guarantee at each point in time. More specific, on top of the guaranteed * guarantee at each point in time. More specific, on top of the guaranteed
* count of objects, he has to provide the additional count of objects that * count of objects, he has to provide the additional count of objects that
* can be (worst-case) contained in the retired lists and therefore are not * can be (worst-case) contained in the retired lists and therefore are not
* released yet. The size of all retired lists is guardsPerThread * * released yet. The size sum of all retired lists is guardsPerThread *
* accessorCount * accessorCount, which is computed using this function. So * accessorCount * accessorCount, which is computed using this function. So
* the result of function denotes to the user, how many objects he has to * the result of function denotes to the user, how many objects he has to
* allocate additionally to the guaranteed count. * allocate additionally to the guaranteed count.
...@@ -232,10 +133,10 @@ class HazardPointer { ...@@ -232,10 +133,10 @@ class HazardPointer {
size_t guardsPerThread, size_t guardsPerThread,
/**<[IN] the count of guards per thread*/ /**<[IN] the count of guards per thread*/
int accessors = -1 int accessors = -1
/**<[IN] Number of accessors. Determines, how many threads will access /**<[IN] Number of accessors. Determines, how many threads will access
the hazard pointer object. Default value -1 will allow the the hazard pointer object. Default value -1 will allow the
maximum amount of threads as defined with maximum amount of threads as defined with
\c embb::base::Thread::GetThreadsMaxCount()*/ \c embb::base::Thread::GetThreadsMaxCount()*/
); );
/** /**
...@@ -245,9 +146,9 @@ class HazardPointer { ...@@ -245,9 +146,9 @@ class HazardPointer {
* *
* \memory We dynamically allocate the following: * \memory We dynamically allocate the following:
* *
* (sizeof(Atomic<int>) * accessorCount) + (sizeof(Atomic<GuardType>) * * (sizeof(Atomic<int>) * accessors) + (sizeof(Atomic<GuardType>) *
* guards_per_thread * accessorCount) + (2*sizeof(GuardType) * * guards_per_thread * accessors) + (2*sizeof(GuardType) *
* guards_per_thread * accessorCount^2) * guards_per_thread * accessors^2)
* *
* The last addend is the dominant one, as accessorCount accounts * The last addend is the dominant one, as accessorCount accounts
* quadratically for it. * quadratically for it.
...@@ -277,29 +178,137 @@ class HazardPointer { ...@@ -277,29 +178,137 @@ class HazardPointer {
~HazardPointer(); ~HazardPointer();
/** /**
* Guards \c toGuard. If the guardedElement is passed to \c EnqueueForDeletion * Guards \c to_guard. If the guarded_element is passed to \c EnqueueForDeletion
* it is prevented from release from now on. The user must have a check, that * it is prevented from release from now on. The user must have a check, that
* EnqueueForDeletion has not been called on toGuard, before the guarding took * EnqueueForDeletion has not been called on to_guard, before the guarding took
* effect. * effect.
* *
* \waitfree * \waitfree
*/ */
void Guard(int guardPosition, GuardType toGuard); void Guard(
int guard_position,
/**<[IN] position to place guard*/
GuardType to_guard
/**<[IN] element to guard*/
);
/** /**
* Enqueue a pointer for deletion. If not guarded, it is deleted immediately. * Enqueue guarded element for deletion. If not guarded, it is deleted
* If it is guarded, it is added to a thread local retired list, and deleted * immediately. If it is guarded, it is added to a thread local retired list,
* in a subsequent call to \c EnqueueForDeletion, when no guard is placed on * and deleted in a subsequent call to \c EnqueueForDeletion, when no guard is
* it anymore. * placed on it anymore.
*/ */
void EnqueueForDeletion(GuardType guardedElement); void EnqueueForDeletion(
GuardType guarded_element
/**<[IN] element to logically delete*/
);
/** /**
* Explicitly remove guard from thread local slot. * Explicitly remove guard from thread local slot.
* *
* \waitfree * \waitfree
*/ */
void RemoveGuard(int guardPosition); void RemoveGuard(int guard_position);
private:
/**
* HazardPointerTest2 is a white-box test, needing access to private members
* of this class. So declaring it as friend.
*/
friend class embb::containers::test::HazardPointerTest2;
/**
* This number determines the amount of maximal accessors (threads) that
* will access this hazard pointer instance. Note that a thread once
* accessing this object will be permanently count as accessor, even if not
* participating anymore. If too many threads access this object, an
* exception is thrown.
*/
unsigned int max_accessors_count_;
/**
* The guard value denoting "not guarded"
*/
GuardType undefined_guard_;
/**
* The maximal count of guards that can be set per thread.
*/
int max_guards_per_thread_;
/**
* The functor that is called to release an object. This is called by this
* class, when it is safe to do so, i.e., no thread accesses this object
* anymore.
*/
embb::base::Function<void, GuardType> release_object_callback_;
/**
* Mapping from EMBB thread id to hazard pointer thread ids. Hazard pointer
* thread ids are in range [0;accesor_count-1]. The position of a EMBB thread
* id in that array determines the respective hazard pointer thread id.
*/
embb::base::Atomic<int>* thread_id_mapping_;
/**
* The hazard pointer guards, represented as array. Each thread has a fixed
* set of slots (guardsPerThread) within this array.
*/
embb::base::Atomic<GuardType>* guards_;
/**
* \see threadLocalRetiredLists documentation
*/
GuardType* thread_local_retired_lists_temp_;
/**
* A list of lists, represented as single array. Each thread maintains a list
* of retired pointers, that are objects that are logically released but not
* released because some thread placed a guard on it.
*/
GuardType* thread_local_retired_lists_;
/**
* Each thread is assigned a thread index (starting with 0). Get the index of
* the current thread. Note that this is not the global index, but an hazard
* pointer class internal one. The user is free to define less accessors than
* the amount of default threads. This is useful, as the number of accessors
* accounts quadratic for the memory consumption, so the user should have the
* possibility to avoid memory wastage when only having a small, fixed size,
* number of accessors.
*
* @return current (hazard pointer object local) thread index
*/
unsigned int GetObjectLocalThreadIndex();
/**
* Copy retired list \c sourceList to retired list \c targetList
*/
static void CopyRetiredList(
GuardType* source_list,
/**<[IN] the source retired list*/
GuardType* target_list,
/**<[IN] the target retired list*/
unsigned int single_retired_list_size,
/**<[IN] the size of a thread local retired list*/
GuardType undefined_guard
/**<[IN] the undefined guard (usually the NULL pointer)*/
);
static void UpdateRetiredList(
GuardType* retired_list,
/**<[IN] the old retired list*/
GuardType* updated_retired_list,
/**<[IN] the updated retired list*/
unsigned int retired_list_size,
/**<[IN] the size of a thread local retired list*/
GuardType to_retire,
/**<[IN] the element to retire*/
GuardType considered_hazard,
/**<[IN] the currently considered hazard*/
GuardType undefined_guard
/**<[IN] the undefined guard (usually the NULL pointer)*/
);
}; };
} // namespace internal } // namespace internal
} // namespace containers } // namespace containers
......
...@@ -31,23 +31,22 @@ ...@@ -31,23 +31,22 @@
namespace embb { namespace embb {
namespace containers { namespace containers {
namespace test { namespace test {
IntObjectTestPool::IntObjectTestPool(unsigned int pool_size) :
IntObjectTestPool::IntObjectTestPool(unsigned int poolSize) : poolSize(pool_size)
poolSize(poolSize)
{ {
simplePoolObjects = static_cast<int*>( simplePoolObjects = static_cast<int*>(
embb::base::Allocation::Allocate(sizeof(int)*poolSize)); embb::base::Allocation::Allocate(sizeof(int)*pool_size));
simplePool = static_cast<embb::base::Atomic<int>*> ( simplePool = static_cast<embb::base::Atomic<int>*> (
embb::base::Allocation::Allocate(sizeof(embb::base::Atomic<int>)* embb::base::Allocation::Allocate(sizeof(embb::base::Atomic<int>)*
poolSize)); pool_size));
for (unsigned int i = 0; i != poolSize; ++i) { for (unsigned int i = 0; i != pool_size; ++i) {
//in-place new for each array cell //in-place new for each array cell
new (&simplePool[i]) embb::base::Atomic<int>; new (&simplePool[i]) embb::base::Atomic<int>;
} }
for (unsigned int i = 0; i != poolSize; ++i) { for (unsigned int i = 0; i != pool_size; ++i) {
simplePool[i] = FREE_MARKER; simplePool[i] = FREE_MARKER;
simplePoolObjects[i] = 0; simplePoolObjects[i] = 0;
} }
...@@ -75,8 +74,8 @@ int* IntObjectTestPool::Allocate() { ...@@ -75,8 +74,8 @@ int* IntObjectTestPool::Allocate() {
return 0; return 0;
} }
void IntObjectTestPool::Release(int* objectPointer) { void IntObjectTestPool::Release(int* object_pointer) {
int cell = objectPointer - simplePoolObjects; int cell = object_pointer - simplePoolObjects;
simplePool[cell].Store(FREE_MARKER); simplePool[cell].Store(FREE_MARKER);
} }
...@@ -85,17 +84,17 @@ HazardPointerTest::HazardPointerTest() : ...@@ -85,17 +84,17 @@ HazardPointerTest::HazardPointerTest() :
#pragma warning(push) #pragma warning(push)
#pragma warning(disable:4355) #pragma warning(disable:4355)
#endif #endif
deletePointerCallback(*this, &HazardPointerTest::DeletePointerCallback), delete_pointer_callback_(*this, &HazardPointerTest::DeletePointerCallback),
#ifdef EMBB_PLATFORM_COMPILER_MSVC #ifdef EMBB_PLATFORM_COMPILER_MSVC
#pragma warning(pop) #pragma warning(pop)
#endif #endif
objectPool(NULL), object_pool_(NULL),
stack(NULL), stack_(NULL),
hazardPointer(NULL), hazard_pointer_(NULL),
nThreads(static_cast<int> n_threads_(static_cast<int>
(partest::TestSuite::GetDefaultNumThreads())) { (partest::TestSuite::GetDefaultNumThreads())) {
nElementsPerThread = 100; n_elements_per_thread_ = 100;
nElements = nThreads*nElementsPerThread; n_elements_ = n_threads_*n_elements_per_thread_;
embb::base::Function < void, embb::base::Atomic<int>* > embb::base::Function < void, embb::base::Atomic<int>* >
deletePointerCallback( deletePointerCallback(
*this, *this,
...@@ -111,45 +110,45 @@ deletePointerCallback(*this, &HazardPointerTest::DeletePointerCallback), ...@@ -111,45 +110,45 @@ deletePointerCallback(*this, &HazardPointerTest::DeletePointerCallback),
Pre(&HazardPointerTest::HazardPointerTest1Pre, this). Pre(&HazardPointerTest::HazardPointerTest1Pre, this).
Add( Add(
&HazardPointerTest::HazardPointerTest1ThreadMethod, &HazardPointerTest::HazardPointerTest1ThreadMethod,
this, static_cast<size_t>(nThreads)). this, static_cast<size_t>(n_threads_)).
Post(&HazardPointerTest::HazardPointerTest1Post, this); Post(&HazardPointerTest::HazardPointerTest1Post, this);
} }
void HazardPointerTest::HazardPointerTest1Pre() { void HazardPointerTest::HazardPointerTest1Pre() {
embb_internal_thread_index_reset(); embb_internal_thread_index_reset();
objectPool = object_pool_ =
embb::base::Allocation:: embb::base::Allocation::
New<embb::containers::ObjectPool< embb::base::Atomic<int> > > New<embb::containers::ObjectPool< embb::base::Atomic<int> > >
(static_cast<size_t>(nElements)); (static_cast<size_t>(n_elements_));
stack = embb::base::Allocation:: stack_ = embb::base::Allocation::
New<embb::containers::LockFreeStack< embb::base::Atomic<int>* > > New<embb::containers::LockFreeStack< embb::base::Atomic<int>* > >
(static_cast<size_t>(nElements)); (static_cast<size_t>(n_elements_));
hazardPointer = embb::base::Allocation:: hazard_pointer_ = embb::base::Allocation::
New<embb::containers::internal::HazardPointer < embb::base::Atomic<int>* > > New<embb::containers::internal::HazardPointer < embb::base::Atomic<int>* > >
(deletePointerCallback, (delete_pointer_callback_,
static_cast<embb::base::Atomic<int>*>(NULL), static_cast<embb::base::Atomic<int>*>(NULL),
1); 1);
} }
void HazardPointerTest::HazardPointerTest1Post() { void HazardPointerTest::HazardPointerTest1Post() {
embb::base::Allocation::Delete(hazardPointer); embb::base::Allocation::Delete(hazard_pointer_);
embb::base::Allocation::Delete(objectPool); embb::base::Allocation::Delete(object_pool_);
embb::base::Allocation::Delete(stack); embb::base::Allocation::Delete(stack_);
} }
void HazardPointerTest::HazardPointerTest1ThreadMethod() { void HazardPointerTest::HazardPointerTest1ThreadMethod() {
unsigned int thread_index; unsigned int thread_index;
embb_internal_thread_index(&thread_index); embb_internal_thread_index(&thread_index);
for (int i = 0; i != nElementsPerThread; ++i) { for (int i = 0; i != n_elements_per_thread_; ++i) {
embb::base::Atomic<int>* allocated_object = objectPool->Allocate(0); embb::base::Atomic<int>* allocated_object = object_pool_->Allocate(0);
hazardPointer->Guard(0, allocated_object); hazard_pointer_->Guard(0, allocated_object);
bool success = stack->TryPush(allocated_object); bool success = stack_->TryPush(allocated_object);
PT_ASSERT(success == true); PT_ASSERT(success == true);
...@@ -161,7 +160,7 @@ void HazardPointerTest::HazardPointerTest1ThreadMethod() { ...@@ -161,7 +160,7 @@ void HazardPointerTest::HazardPointerTest1ThreadMethod() {
bool success_pop; bool success_pop;
while ( while (
(success_pop = stack->TryPop(allocated_object_from_different_thread)) (success_pop = stack_->TryPop(allocated_object_from_different_thread))
== true == true
&& allocated_object_from_different_thread == allocated_object && allocated_object_from_different_thread == allocated_object
) { ) {
...@@ -171,99 +170,100 @@ void HazardPointerTest::HazardPointerTest1ThreadMethod() { ...@@ -171,99 +170,100 @@ void HazardPointerTest::HazardPointerTest1ThreadMethod() {
same = true; same = true;
break; break;
} }
bool success = stack->TryPush(allocated_object_from_different_thread); bool success = stack_->TryPush(allocated_object_from_different_thread);
PT_ASSERT(success == true); PT_ASSERT(success == true);
} }
PT_ASSERT(success_pop == true); PT_ASSERT(success_pop == true);
allocated_object->Store(1); allocated_object->Store(1);
hazardPointer->EnqueueForDeletion(allocated_object); hazard_pointer_->EnqueueForDeletion(allocated_object);
if (!same) { if (!same) {
hazardPointer->Guard(0, allocated_object_from_different_thread); hazard_pointer_->Guard(0, allocated_object_from_different_thread);
// if this holds, we were successful in guarding... otherwise we // if this holds, we were successful in guarding... otherwise we
// were to late, because the pointer has already been added // were to late, because the pointer has already been added
// to the retired list. // to the retired list.
if (*allocated_object_from_different_thread == 0) { if (*allocated_object_from_different_thread == 0) {
// the pointer must not be deleted here! // the pointer must not be deleted here!
vectorMutex.Lock(); vector_mutex_.Lock();
for (std::vector< embb::base::Atomic<int>* >::iterator for (std::vector< embb::base::Atomic<int>* >::iterator
it = deletedVector.begin(); it = deleted_vector_.begin();
it != deletedVector.end(); it != deleted_vector_.end();
++it) { ++it) {
PT_ASSERT(*it != allocated_object_from_different_thread); PT_ASSERT(*it != allocated_object_from_different_thread);
} }
vectorMutex.Unlock(); vector_mutex_.Unlock();
} }
hazardPointer->Guard(0, NULL); hazard_pointer_->Guard(0, NULL);
} }
} }
} }
void HazardPointerTest::DeletePointerCallback void HazardPointerTest::DeletePointerCallback
(embb::base::Atomic<int>* to_delete) { (embb::base::Atomic<int>* to_delete) {
vectorMutex.Lock(); vector_mutex_.Lock();
deletedVector.push_back(to_delete); deleted_vector_.push_back(to_delete);
vectorMutex.Unlock(); vector_mutex_.Unlock();
} }
void HazardPointerTest2::DeletePointerCallback(int* toDelete) { void HazardPointerTest2::DeletePointerCallback(int* to_delete) {
testPool->Release(toDelete); test_pool_->Release(to_delete);
} }
bool HazardPointerTest2::SetRelativeGuards() { bool HazardPointerTest2::SetRelativeGuards() {
unsigned int threadIndex; unsigned int thread_index;
embb_internal_thread_index(&threadIndex); embb_internal_thread_index(&thread_index);
unsigned int my_begin = guardsPerThreadCount*threadIndex; unsigned int my_begin = guards_per_phread_count_*thread_index;
int guardNumber = 0; int guard_number = 0;
unsigned int alreadyGuarded = 0; unsigned int alreadyGuarded = 0;
for (unsigned int i = my_begin; i != my_begin + guardsPerThreadCount; ++i){ for (unsigned int i = my_begin; i != my_begin + guards_per_phread_count_;
if (sharedGuarded[i] != 0) { ++i){
if (shared_guarded_[i] != 0) {
alreadyGuarded++; alreadyGuarded++;
guardNumber++; guard_number++;
continue; continue;
} }
int * toGuard = sharedAllocated[i]; int * to_guard = shared_allocated_[i];
if (toGuard) { if (to_guard) {
hazardPointer->Guard(guardNumber, toGuard); hazard_pointer_->Guard(guard_number, to_guard);
// changed in the meantime? // changed in the meantime?
if (toGuard == sharedAllocated[i].Load()) { if (to_guard == shared_allocated_[i].Load()) {
// guard was successful. Communicate to other threads. // guard was successful. Communicate to other threads.
sharedGuarded[i] = toGuard; shared_guarded_[i] = to_guard;
} }
else { else {
// reset the guard, couldn't guard... // reset the guard, couldn't guard...
hazardPointer->RemoveGuard(guardNumber); hazard_pointer_->RemoveGuard(guard_number);
} }
} }
guardNumber++; guard_number++;
} }
return(alreadyGuarded == guardsPerThreadCount); return(alreadyGuarded == guards_per_phread_count_);
} }
void HazardPointerTest2::HazardPointerTest2Master() { void HazardPointerTest2::HazardPointerTest2Master() {
// while the hazard pointer guard array is not full // while the hazard pointer guard array is not full
int** allocatedLocal = static_cast<int**>( int** allocatedLocal = static_cast<int**>(
embb::base::Allocation::Allocate(sizeof(int*)*guaranteedCapacityPool)); embb::base::Allocation::Allocate(sizeof(int*)*guaranteed_capacity_pool_));
bool full = false; bool full = false;
while (!full) { while (!full) {
full = true; full = true;
for (unsigned int i = 0; i != guaranteedCapacityPool; ++i) { for (unsigned int i = 0; i != guaranteed_capacity_pool_; ++i) {
if (sharedGuarded[i] == 0) { if (shared_guarded_[i] == 0) {
full = false; full = false;
break; break;
} }
} }
// not all guards set // not all guards set
for (unsigned int i = 0; i != guaranteedCapacityPool; ++i) { for (unsigned int i = 0; i != guaranteed_capacity_pool_; ++i) {
allocatedLocal[i] = testPool->Allocate(); allocatedLocal[i] = test_pool_->Allocate();
sharedAllocated[i].Store(allocatedLocal[i]); shared_allocated_[i].Store(allocatedLocal[i]);
} }
// set my hazards. We do not have to check, this must be successful // set my hazards. We do not have to check, this must be successful
...@@ -271,9 +271,9 @@ void HazardPointerTest2::HazardPointerTest2Master() { ...@@ -271,9 +271,9 @@ void HazardPointerTest2::HazardPointerTest2Master() {
SetRelativeGuards(); SetRelativeGuards();
// free // free
for (unsigned int i = 0; i != guaranteedCapacityPool; ++i) { for (unsigned int i = 0; i != guaranteed_capacity_pool_; ++i) {
sharedAllocated[i].Store(0); shared_allocated_[i].Store(0);
hazardPointer->EnqueueForDeletion(allocatedLocal[i]); hazard_pointer_->EnqueueForDeletion(allocatedLocal[i]);
} }
} }
...@@ -289,54 +289,54 @@ void HazardPointerTest2::HazardPointerTest2Slave() { ...@@ -289,54 +289,54 @@ void HazardPointerTest2::HazardPointerTest2Slave() {
void HazardPointerTest2::HazardPointerTest2Pre() { void HazardPointerTest2::HazardPointerTest2Pre() {
embb_internal_thread_index_reset(); embb_internal_thread_index_reset();
currentMaster = 0; current_master_ = 0;
sync1 = 0; sync1_ = 0;
sync2 = 0; sync2_ = 0;
// first the test pool has to be created // first the test pool has to be created
testPool = embb::base::Allocation::New<IntObjectTestPool>(poolSizeUsingHazardPointer); test_pool_ = embb::base::Allocation::New<IntObjectTestPool>
(pool_size_using_hazard_pointer_);
// after the pool has been created, we create the hp class // after the pool has been created, we create the hp class
hazardPointer = embb::base::Allocation::New < hazard_pointer_ = embb::base::Allocation::New <
embb::containers::internal::HazardPointer<int*> > embb::containers::internal::HazardPointer<int*> >
(deletePointerCallback, static_cast<int*>(NULL), (delete_pointer_callback_, static_cast<int*>(NULL),
static_cast<int>(guardsPerThreadCount), nThreads); static_cast<int>(guards_per_phread_count_), n_threads);
sharedGuarded = static_cast<embb::base::Atomic<int*>*>( shared_guarded_ = static_cast<embb::base::Atomic<int*>*>(
embb::base::Allocation::Allocate(sizeof(embb::base::Atomic<int*>)* embb::base::Allocation::Allocate(sizeof(embb::base::Atomic<int*>)*
guaranteedCapacityPool) guaranteed_capacity_pool_)
); );
for (unsigned int i = 0; i != for (unsigned int i = 0; i !=
guaranteedCapacityPool; ++i) { guaranteed_capacity_pool_; ++i) {
//in-place new for each array cell //in-place new for each array cell
new (&sharedGuarded[i]) embb::base::Atomic < int* > ; new (&shared_guarded_[i]) embb::base::Atomic < int* > ;
} }
sharedAllocated = static_cast<embb::base::Atomic<int*>*>( shared_allocated_ = static_cast<embb::base::Atomic<int*>*>(
embb::base::Allocation::Allocate(sizeof(embb::base::Atomic<int*>)* embb::base::Allocation::Allocate(sizeof(embb::base::Atomic<int*>)*
guaranteedCapacityPool) guaranteed_capacity_pool_)
); );
for (unsigned int i = 0; i != for (unsigned int i = 0; i !=
guaranteedCapacityPool; ++i) { guaranteed_capacity_pool_; ++i) {
//in-place new for each array cell //in-place new for each array cell
new (&sharedAllocated[i]) embb::base::Atomic < int* > ; new (&shared_allocated_[i]) embb::base::Atomic < int* > ;
} }
for (unsigned int i = 0; i != guaranteedCapacityPool; ++i) { for (unsigned int i = 0; i != guaranteed_capacity_pool_; ++i) {
sharedGuarded[i] = 0; shared_guarded_[i] = 0;
sharedAllocated[i] = 0; shared_allocated_[i] = 0;
} }
} }
void HazardPointerTest2::HazardPointerTest2Post() { void HazardPointerTest2::HazardPointerTest2Post() {
for (unsigned int i = 0; i != static_cast<unsigned int>(n_threads); ++i) {
for (unsigned int i = 0; i != static_cast<unsigned int>(nThreads); ++i) { for (unsigned int i2 = 0; i2 != static_cast<unsigned int>(n_threads)*
for (unsigned int i2 = 0; i2 != static_cast<unsigned int>(nThreads)* guards_per_phread_count_; ++i2) {
guardsPerThreadCount; ++i2) { if (hazard_pointer_->thread_local_retired_lists_
if (hazardPointer->threadLocalRetiredLists [i2 + i*n_threads*guards_per_phread_count_] == NULL) {
[i2 + i*nThreads*guardsPerThreadCount] == NULL) {
// all retired lists must be completely filled // all retired lists must be completely filled
PT_ASSERT(false); PT_ASSERT(false);
} }
...@@ -344,21 +344,21 @@ void HazardPointerTest2::HazardPointerTest2Post() { ...@@ -344,21 +344,21 @@ void HazardPointerTest2::HazardPointerTest2Post() {
} }
unsigned int checks = 0; unsigned int checks = 0;
for (unsigned int i = 0; i != static_cast<unsigned int>(nThreads); ++i) { for (unsigned int i = 0; i != static_cast<unsigned int>(n_threads); ++i) {
for (unsigned int i2 = 0; i2 != static_cast<unsigned int>(nThreads)* for (unsigned int i2 = 0; i2 != static_cast<unsigned int>(n_threads)*
guardsPerThreadCount; ++i2) { guards_per_phread_count_; ++i2) {
for (unsigned int j = 0; j != static_cast<unsigned int>(nThreads); ++j) { for (unsigned int j = 0; j != static_cast<unsigned int>(n_threads); ++j) {
for (unsigned int j2 = 0; j2 != static_cast<unsigned int>(nThreads)* for (unsigned int j2 = 0; j2 != static_cast<unsigned int>(n_threads)*
guardsPerThreadCount; ++j2) { guards_per_phread_count_; ++j2) {
if (i2 == j2 && i == j) if (i2 == j2 && i == j)
continue; continue;
// all retired elements have to be disjoint // all retired elements have to be disjoint
PT_ASSERT( PT_ASSERT(
hazardPointer->threadLocalRetiredLists hazard_pointer_->thread_local_retired_lists_
[i2 + i*nThreads*guardsPerThreadCount] != [i2 + i*n_threads*guards_per_phread_count_] !=
hazardPointer->threadLocalRetiredLists hazard_pointer_->thread_local_retired_lists_
[j2 + j*nThreads*guardsPerThreadCount] [j2 + j*n_threads*guards_per_phread_count_]
); );
checks++; checks++;
...@@ -370,16 +370,16 @@ void HazardPointerTest2::HazardPointerTest2Post() { ...@@ -370,16 +370,16 @@ void HazardPointerTest2::HazardPointerTest2Post() {
// sanity check on the count of expected comparisons. // sanity check on the count of expected comparisons.
PT_ASSERT( PT_ASSERT(
checks == checks ==
nThreads*nThreads*guardsPerThreadCount * n_threads*n_threads*guards_per_phread_count_ *
(nThreads*nThreads*guardsPerThreadCount - 1) (n_threads*n_threads*guards_per_phread_count_ - 1)
); );
std::vector< int* > additionallyAllocated; std::vector< int* > additionallyAllocated;
// we should be able to still allocate the guaranteed capacity of // we should be able to still allocate the guaranteed capacity of
// elements from the pool. // elements from the pool.
for (unsigned int i = 0; i != guaranteedCapacityPool; ++i) { for (unsigned int i = 0; i != guaranteed_capacity_pool_; ++i) {
int* allocated = testPool->Allocate(); int* allocated = test_pool_->Allocate();
// allocated is not allowed to be zero // allocated is not allowed to be zero
PT_ASSERT(allocated != NULL); PT_ASSERT(allocated != NULL);
...@@ -390,11 +390,11 @@ void HazardPointerTest2::HazardPointerTest2Post() { ...@@ -390,11 +390,11 @@ void HazardPointerTest2::HazardPointerTest2Post() {
} }
// the pool should now be empty // the pool should now be empty
PT_ASSERT(testPool->Allocate() == NULL); PT_ASSERT(test_pool_->Allocate() == NULL);
// release allocated elements... // release allocated elements...
for (unsigned int i = 0; i != additionallyAllocated.size(); ++i) { for (unsigned int i = 0; i != additionallyAllocated.size(); ++i) {
testPool->Release(additionallyAllocated[i]); test_pool_->Release(additionallyAllocated[i]);
} }
// the additionallyAllocated elements shall be disjoint // the additionallyAllocated elements shall be disjoint
...@@ -409,39 +409,39 @@ void HazardPointerTest2::HazardPointerTest2Post() { ...@@ -409,39 +409,39 @@ void HazardPointerTest2::HazardPointerTest2Post() {
// no allocated element should be in any retired list... // no allocated element should be in any retired list...
for (unsigned int a = 0; a != additionallyAllocated.size(); ++a) { for (unsigned int a = 0; a != additionallyAllocated.size(); ++a) {
for (unsigned int i = 0; i != static_cast<unsigned int>(nThreads); ++i) { for (unsigned int i = 0; i != static_cast<unsigned int>(n_threads); ++i) {
for (unsigned int i2 = 0; i2 != static_cast<unsigned int>(nThreads)* for (unsigned int i2 = 0; i2 != static_cast<unsigned int>(n_threads)*
guardsPerThreadCount; ++i2) { guards_per_phread_count_; ++i2) {
PT_ASSERT( PT_ASSERT(
hazardPointer->threadLocalRetiredLists hazard_pointer_->thread_local_retired_lists_
[i2 + i*nThreads*guardsPerThreadCount] != [i2 + i*n_threads*guards_per_phread_count_] !=
additionallyAllocated[a] additionallyAllocated[a]
); );
} }
} }
} }
for (unsigned int i = 0; i != guaranteedCapacityPool; ++i) { for (unsigned int i = 0; i != guaranteed_capacity_pool_; ++i) {
//in-place new for each array cell //in-place new for each array cell
sharedGuarded[i].~Atomic(); shared_guarded_[i].~Atomic();
} }
embb::base::Allocation::Free(sharedGuarded); embb::base::Allocation::Free(shared_guarded_);
for (unsigned int i = 0; i != guaranteedCapacityPool; ++i) { for (unsigned int i = 0; i != guaranteed_capacity_pool_; ++i) {
//in-place new for each array cell //in-place new for each array cell
sharedAllocated[i].~Atomic(); shared_allocated_[i].~Atomic();
} }
embb::base::Allocation::Free(sharedAllocated); embb::base::Allocation::Free(shared_allocated_);
embb::base::Allocation::Delete(hazardPointer); embb::base::Allocation::Delete(hazard_pointer_);
// after deleting the hazard pointer object, all retired pointers have // after deleting the hazard pointer object, all retired pointers have
// to be returned to the pool! // to be returned to the pool!
std::vector<int*> elementsInPool; std::vector<int*> elementsInPool;
int* nextElement; int* nextElement;
while ((nextElement = testPool->Allocate()) != NULL) { while ((nextElement = test_pool_->Allocate()) != NULL) {
for (unsigned int i = 0; i != elementsInPool.size(); ++i) { for (unsigned int i = 0; i != elementsInPool.size(); ++i) {
// all elements need to be disjoint // all elements need to be disjoint
PT_ASSERT(elementsInPool[i] != nextElement); PT_ASSERT(elementsInPool[i] != nextElement);
...@@ -451,85 +451,83 @@ void HazardPointerTest2::HazardPointerTest2Post() { ...@@ -451,85 +451,83 @@ void HazardPointerTest2::HazardPointerTest2Post() {
// all elements should have been returned by the hp object, so we should be // all elements should have been returned by the hp object, so we should be
// able to acquire all elements. // able to acquire all elements.
PT_ASSERT(elementsInPool.size() == poolSizeUsingHazardPointer); PT_ASSERT(elementsInPool.size() == pool_size_using_hazard_pointer_);
embb::base::Allocation::Delete(testPool); embb::base::Allocation::Delete(test_pool_);
} }
void HazardPointerTest2::HazardPointerTest2ThreadMethod() { void HazardPointerTest2::HazardPointerTest2ThreadMethod() {
for (;;) { for (;;) {
unsigned int threadIndex; unsigned int thread_index;
embb_internal_thread_index(&threadIndex); embb_internal_thread_index(&thread_index);
if (threadIndex == currentMaster) { if (thread_index == current_master_) {
HazardPointerTest2Master(); HazardPointerTest2Master();
} }
else { else {
HazardPointerTest2Slave(); HazardPointerTest2Slave();
} }
sync1.FetchAndAdd(1); sync1_.FetchAndAdd(1);
// wait until cleanup thread signals to be finished // wait until cleanup thread signals to be finished
while (sync1 != 0) { while (sync1_ != 0) {
int expected = nThreads; int expected = n_threads;
int desired = finishMarker; int desired = FINISH_MARKER;
// select thread, responsible for cleanup // select thread, responsible for cleanup
if (sync1.CompareAndSwap(expected, desired)) { if (sync1_.CompareAndSwap(expected, desired)) {
//wipe arrays! //wipe arrays!
for (unsigned int i = 0; i != guaranteedCapacityPool; ++i) { for (unsigned int i = 0; i != guaranteed_capacity_pool_; ++i) {
sharedGuarded[i] = 0; shared_guarded_[i] = 0;
sharedAllocated[i] = 0; shared_allocated_[i] = 0;
} }
// increase master // increase master
currentMaster.FetchAndAdd(1); current_master_.FetchAndAdd(1);
sync2 = 0; sync2_ = 0;
sync1.Store(0); sync1_.Store(0);
} }
} }
// wait for all threads to reach this position // wait for all threads to reach this position
sync2.FetchAndAdd(1); sync2_.FetchAndAdd(1);
while (sync2 != static_cast<unsigned int>(nThreads)) {} while (sync2_ != static_cast<unsigned int>(n_threads)) {}
// if each thread was master once, terminate. // if each thread was master once, terminate.
if (currentMaster == static_cast<unsigned int>(nThreads)) { if (current_master_ == static_cast<unsigned int>(n_threads)) {
return; return;
} }
} }
} }
HazardPointerTest2::HazardPointerTest2() : HazardPointerTest2::HazardPointerTest2() :
nThreads(static_cast<int> n_threads(static_cast<int>
(partest::TestSuite::GetDefaultNumThreads())), (partest::TestSuite::GetDefaultNumThreads())),
#ifdef EMBB_PLATFORM_COMPILER_MSVC #ifdef EMBB_PLATFORM_COMPILER_MSVC
#pragma warning(push) #pragma warning(push)
#pragma warning(disable:4355) #pragma warning(disable:4355)
#endif #endif
deletePointerCallback( delete_pointer_callback_(
*this, *this,
&HazardPointerTest2::DeletePointerCallback) &HazardPointerTest2::DeletePointerCallback)
#ifdef EMBB_PLATFORM_COMPILER_MSVC #ifdef EMBB_PLATFORM_COMPILER_MSVC
#pragma warning(pop) #pragma warning(pop)
#endif #endif
{ {
guardsPerThreadCount = 5; guards_per_phread_count_ = 5;
guaranteedCapacityPool = guardsPerThreadCount*nThreads; guaranteed_capacity_pool_ = guards_per_phread_count_*n_threads;
poolSizeUsingHazardPointer = guaranteedCapacityPool + pool_size_using_hazard_pointer_ = guaranteed_capacity_pool_ +
guardsPerThreadCount*nThreads*nThreads; guards_per_phread_count_*n_threads*n_threads;
embb::base::Thread::GetThreadsMaxCount(); embb::base::Thread::GetThreadsMaxCount();
CreateUnit("HazardPointerTestSimulateMemoryWorstCase"). CreateUnit("HazardPointerTestSimulateMemoryWorstCase").
Pre(&HazardPointerTest2::HazardPointerTest2Pre, this). Pre(&HazardPointerTest2::HazardPointerTest2Pre, this).
Add( Add(
&HazardPointerTest2::HazardPointerTest2ThreadMethod, &HazardPointerTest2::HazardPointerTest2ThreadMethod,
this, static_cast<size_t>(nThreads)). this, static_cast<size_t>(n_threads)).
Post(&HazardPointerTest2::HazardPointerTest2Post, this); Post(&HazardPointerTest2::HazardPointerTest2Post, this);
} }
} // namespace test } // namespace test
} // namespace containers } // namespace containers
} // namespace embb } // namespace embb
\ No newline at end of file
...@@ -36,7 +36,6 @@ ...@@ -36,7 +36,6 @@
namespace embb { namespace embb {
namespace containers { namespace containers {
namespace test { namespace test {
/** /**
* @brief a very simple wait-free object pool implementation to have tests * @brief a very simple wait-free object pool implementation to have tests
* being independent of the EMBB object pool implementation. * being independent of the EMBB object pool implementation.
...@@ -51,7 +50,7 @@ class IntObjectTestPool { ...@@ -51,7 +50,7 @@ class IntObjectTestPool {
static const int FREE_MARKER = 0; static const int FREE_MARKER = 0;
unsigned int poolSize; unsigned int poolSize;
IntObjectTestPool(unsigned int poolSize); IntObjectTestPool(unsigned int pool_size);
~IntObjectTestPool(); ~IntObjectTestPool();
...@@ -67,27 +66,10 @@ class IntObjectTestPool { ...@@ -67,27 +66,10 @@ class IntObjectTestPool {
* *
* @param objectPointer the object to be freed * @param objectPointer the object to be freed
*/ */
void Release(int* objectPointer); void Release(int* object_pointer);
}; };
class HazardPointerTest : public partest::TestCase { class HazardPointerTest : public partest::TestCase {
private:
embb::base::Function<void, embb::base::Atomic<int>*> deletePointerCallback;
//used to allocate random stuff, we will just use the pointers, not the
//contents
embb::containers::ObjectPool< embb::base::Atomic<int> >* objectPool;
//used to move pointer between threads
embb::containers::LockFreeStack< embb::base::Atomic<int>* >* stack;
embb::base::Mutex vectorMutex;
embb::containers::internal::HazardPointer<embb::base::Atomic<int>*>*
hazardPointer;
std::vector< embb::base::Atomic<int>* > deletedVector;
int nThreads;
int nElementsPerThread;
int nElements;
public: public:
/** /**
* Adds test methods. * Adds test methods.
...@@ -96,56 +78,71 @@ class HazardPointerTest : public partest::TestCase { ...@@ -96,56 +78,71 @@ class HazardPointerTest : public partest::TestCase {
void HazardPointerTest1Pre(); void HazardPointerTest1Pre();
void HazardPointerTest1Post(); void HazardPointerTest1Post();
void HazardPointerTest1ThreadMethod(); void HazardPointerTest1ThreadMethod();
void DeletePointerCallback(embb::base::Atomic<int>* toDelete); void DeletePointerCallback(embb::base::Atomic<int>* to_delete);
private:
embb::base::Function<void, embb::base::Atomic<int>*> delete_pointer_callback_;
//used to allocate random stuff, we will just use the pointers, not the
//contents
embb::containers::ObjectPool< embb::base::Atomic<int> >* object_pool_;
//used to move pointer between threads
embb::containers::LockFreeStack< embb::base::Atomic<int>* >* stack_;
embb::base::Mutex vector_mutex_;
embb::containers::internal::HazardPointer<embb::base::Atomic<int>*>*
hazard_pointer_;
std::vector< embb::base::Atomic<int>* > deleted_vector_;
int n_threads_;
int n_elements_per_thread_;
int n_elements_;
}; };
class HazardPointerTest2 : public partest::TestCase { class HazardPointerTest2 : public partest::TestCase {
public:
void DeletePointerCallback(int* to_delete);
bool SetRelativeGuards();
void HazardPointerTest2Master();
void HazardPointerTest2Slave();
void HazardPointerTest2Pre();
void HazardPointerTest2Post();
void HazardPointerTest2ThreadMethod();
HazardPointerTest2();
private: private:
// number of threads, participating in that test // number of threads, participating in that test
int nThreads; int n_threads;
embb::base::Function<void, int*> deletePointerCallback; embb::base::Function<void, int*> delete_pointer_callback_;
// the thread id of the master // the thread id of the master
embb::base::Atomic<unsigned int> currentMaster; embb::base::Atomic<unsigned int> current_master_;
// variables, to synchronize threads. At each point in time, one master, // variables, to synchronize threads. At each point in time, one master,
// the master changes each round until each thread was assigned master once. // the master changes each round until each thread was assigned master once.
embb::base::Atomic<int> sync1; embb::base::Atomic<int> sync1_;
embb::base::Atomic<unsigned int> sync2; embb::base::Atomic<unsigned int> sync2_;
unsigned int guardsPerThreadCount; unsigned int guards_per_phread_count_;
unsigned int guaranteedCapacityPool; unsigned int guaranteed_capacity_pool_;
unsigned int poolSizeUsingHazardPointer; unsigned int pool_size_using_hazard_pointer_;
// The threads write here, if they guarded an object successfully. Used to // The threads write here, if they guarded an object successfully. Used to
// determine when all allocated objects were guarded successfully. // determine when all allocated objects were guarded successfully.
embb::base::Atomic<int*>* sharedGuarded; embb::base::Atomic<int*>* shared_guarded_;
// This array is used by the master, to communicate and share what he has // This array is used by the master, to communicate and share what he has
// allocated with the slaves. // allocated with the slaves.
embb::base::Atomic<int*>* sharedAllocated; embb::base::Atomic<int*>* shared_allocated_;
// Reference to the object pool // Reference to the object pool
IntObjectTestPool* testPool; IntObjectTestPool* test_pool_;
embb::containers::internal::HazardPointer<int*>* hazardPointer;
static const int finishMarker = -1;
public: embb::containers::internal::HazardPointer<int*>* hazard_pointer_;
void DeletePointerCallback(int* toDelete); static const int FINISH_MARKER = -1;
bool SetRelativeGuards();
void HazardPointerTest2Master();
void HazardPointerTest2Slave();
void HazardPointerTest2Pre();
void HazardPointerTest2Post();
void HazardPointerTest2ThreadMethod();
HazardPointerTest2();
}; };
} // namespace test } // namespace test
} // namespace containers } // namespace containers
} // namespace embb } // namespace embb
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or sign in to comment