Commit a023d6e4 by Christian Kern

This resolves ticket #523 - rework hazard pointer:

   - Do not use lists
   - Remove HelpScan
   - Release memory to memory manager when destructed (before that, memory had to be released by the memory manager in its destructor).
   - Avoid stdlib algorithms
parent d36f1bdb
...@@ -129,5 +129,13 @@ void embb_internal_thread_index_set_max(unsigned int max) { ...@@ -129,5 +129,13 @@ void embb_internal_thread_index_set_max(unsigned int max) {
} }
void embb_internal_thread_index_reset() { void embb_internal_thread_index_reset() {
// This function is only called in tests, usually when all other threads
// except the main thread have terminated. However, the main thread still has
// potentially still stored its old index value in its thread local storage,
// which might be assigned additionally to another thread (as the counter is
// reset), which may lead to hard to detect bugs. Therefore, reset the thread
// local thread id here.
embb_internal_thread_index_var = UINT_MAX;
embb_counter_init(embb_thread_index_counter()); embb_counter_init(embb_thread_index_counter());
} }
...@@ -30,386 +30,381 @@ ...@@ -30,386 +30,381 @@
namespace embb { namespace embb {
namespace containers { namespace containers {
namespace internal { namespace internal {
template< typename ElementT >
FixedSizeList<ElementT>::FixedSizeList(size_t max_size) :
max_size(max_size),
size(0) {
elementsArray = static_cast<ElementT*>(
embb::base::Allocation::Allocate(sizeof(ElementT) *
max_size));
}
template< typename ElementT >
inline size_t FixedSizeList<ElementT>::GetSize() const {
return size;
}
template< typename ElementT >
inline size_t FixedSizeList<ElementT>::GetMaxSize() const {
return max_size;
}
template< typename ElementT >
inline void FixedSizeList<ElementT>::clear() {
size = 0;
}
template< typename ElementT > // Visual Studio is complaining, that the return in the last line of this
typename FixedSizeList<ElementT>::iterator // function is not reachable. This is true, as long as exceptions are enabled.
FixedSizeList<ElementT>::begin() const { // Otherwise, the exception becomes an assertion and with disabling assertions,
return &elementsArray[0]; // the code becomes reachable. So, disabling this warning.
} #ifdef EMBB_PLATFORM_COMPILER_MSVC
#pragma warning(push)
#pragma warning(disable:4702)
#endif
template< typename GuardType >
unsigned int HazardPointer< GuardType >::GetCurrentThreadIndex() {
template< typename ElementT > // first, get the EMBB native thread id.
typename FixedSizeList<ElementT>::iterator unsigned int embbThreadIndex;
FixedSizeList<ElementT>::end() const {
return &elementsArray[size];
}
template< typename ElementT > int return_val = embb_internal_thread_index(&embbThreadIndex);
FixedSizeList< ElementT > &
FixedSizeList<ElementT>::operator= (const FixedSizeList & other) {
size = 0;
if (max_size < other.size) { if (return_val != EMBB_SUCCESS) {
EMBB_THROW(embb::base::ErrorException, "Copy target to small"); EMBB_THROW(embb::base::ErrorException, "Could not get thread id");
} }
for (const_iterator it = other.begin(); it != other.end(); ++it) { // iterate over the mappings array
PushBack(*it); for (unsigned int i = 0; i != accessorCount; ++i) {
} // end of mappings? then we need to write our id
return *this; if (threadIdMapping[i] == -1) {
} // try to CAS the initial value with out thread id
int expected = -1;
if (threadIdMapping[i].CompareAndSwap(expected,
static_cast<int>(embbThreadIndex))) {
//successful, return our mapping
return i;
}
}
template< typename ElementT > if (threadIdMapping[i] == static_cast<int>(embbThreadIndex)) {
bool FixedSizeList<ElementT>::PushBack(ElementT const el) { // found our mapping!
if (size + 1 > max_size) { return i;
return false; }
} }
elementsArray[size] = el;
size++;
return true;
}
template< typename ElementT > // when we reach this point, we have too many accessors
FixedSizeList<ElementT>::~FixedSizeList() { // (no mapping possible)
embb::base::Allocation::Free(elementsArray); EMBB_THROW(embb::base::ErrorException, "Too many accessors");
}
template< typename GuardType > return 0;
bool HazardPointerThreadEntry<GuardType>::IsActive() {
return is_active;
} }
#ifdef EMBB_PLATFORM_COMPILER_MSVC
#pragma warning(pop)
#endif
template< typename GuardType > template< typename GuardType >
bool HazardPointerThreadEntry<GuardType>::TryReserve() { void HazardPointer< GuardType >::RemoveGuard(int guardPosition){
bool expected = false; const unsigned int myThreadId = GetCurrentThreadIndex();
return is_active.CompareAndSwap(expected, true);
}
template< typename GuardType > // check invariants...
void HazardPointerThreadEntry<GuardType>::Deactivate() { assert(guardPosition < guardsPerThread && myThreadId < accessorCount);
is_active = false;
}
template< typename GuardType > // set guard
size_t HazardPointerThreadEntry<GuardType>::GetRetiredCounter() { guards[guardPosition*accessorCount + myThreadId] = undefinedGuard;
return retired_list.GetSize();
} }
template< typename GuardType > template< typename GuardType >
FixedSizeList< GuardType >& HazardPointerThreadEntry<GuardType>:: HazardPointer< GuardType >::HazardPointer(
GetRetired() { embb::base::Function<void, GuardType> freeGuardCallback,
return retired_list; GuardType undefinedGuard, int guardsPerThread, int accessors) :
} accessorCount(accessors == -1 ?
embb::base::Thread::GetThreadsMaxCount() :
accessors),
undefinedGuard(undefinedGuard),
guardsPerThread(guardsPerThread),
freeGuardCallback(freeGuardCallback) {
threadIdMapping =
static_cast<embb::base::Atomic<int>*>(
embb::base::Allocation::Allocate(sizeof(embb::base::Atomic<int>)
*accessorCount));
for (unsigned int i = 0; i != accessorCount; ++i) {
//in-place new for each cell
new (&threadIdMapping[i]) embb::base::Atomic < int > ;
}
template< typename GuardType > guards = static_cast<embb::base::Atomic< GuardType >*>
FixedSizeList< GuardType >& HazardPointerThreadEntry<GuardType>:: (embb::base::Allocation::Allocate(
GetRetiredTemp() { sizeof(embb::base::Atomic< GuardType >) *
return retired_list_temp; guardsPerThread * accessorCount
} ));
template< typename GuardType > for (unsigned int i = 0; i != guardsPerThread * accessorCount; ++i) {
FixedSizeList< GuardType >& HazardPointerThreadEntry<GuardType>:: //in-place new for each cell
GetHazardTemp() { new (&guards[i]) embb::base::Atomic < GuardType > ;
return hazard_pointer_list_temp; }
}
template< typename GuardType > threadLocalRetiredListsTemp = static_cast<GuardType*>
void HazardPointerThreadEntry<GuardType>:: (embb::base::Allocation::Allocate(
SetRetired(internal::FixedSizeList< GuardType > const & retired_list) { sizeof(GuardType) *
this->retired_list = retired_list; guardsPerThread * accessorCount * accessorCount
} ));
for (unsigned int i = 0; i !=
guardsPerThread * accessorCount * accessorCount; ++i) {
//in-place new for each cell
new (&threadLocalRetiredListsTemp[i]) GuardType;
}
template< typename GuardType > threadLocalRetiredLists = static_cast<GuardType*>
HazardPointerThreadEntry<GuardType>::
HazardPointerThreadEntry(GuardType undefined_guard, int guards_per_thread,
size_t max_size_retired_list) :
#ifdef EMBB_DEBUG
who_is_scanning(-1),
#endif
undefined_guard(undefined_guard),
guards_per_thread(guards_per_thread),
max_size_retired_list(max_size_retired_list),
// initially, each potential thread is active... if that is not the case
// another thread could call "HelpScan", and block this thread in making
// progress.
// Still, threads can be leave the hazard pointer processing (deactivation),
// but this can only be done once, i.e., this is not revertable...
is_active(1),
retired_list(max_size_retired_list),
retired_list_temp(max_size_retired_list),
hazard_pointer_list_temp(embb::base::Thread::GetThreadsMaxCount() *
guards_per_thread) {
// Initialize guarded pointer list
guarded_pointers = static_cast<embb::base::Atomic<GuardType>*>
(embb::base::Allocation::Allocate( (embb::base::Allocation::Allocate(
sizeof(embb::base::Atomic<GuardType>)*guards_per_thread)); sizeof(GuardType) *
guardsPerThread * accessorCount * accessorCount
));
for (unsigned int i = 0; i !=
guardsPerThread * accessorCount * accessorCount; ++i) {
//in-place new for each cell
new (&threadLocalRetiredLists[i]) GuardType;
}
for (int i = 0; i != guards_per_thread; ++i) { // init guards and retired lists to the undefined guard
new (static_cast<void*>(&guarded_pointers[i])) for (unsigned int i = 0; i != static_cast<unsigned int>(guardsPerThread);
embb::base::Atomic<GuardType>(undefined_guard); ++i) {
for (unsigned int i2 = 0; i2 != accessorCount; ++i2) {
guards[i*accessorCount + i2] = undefinedGuard;
}
} }
}
template< typename GuardType > for (unsigned int j = 0; j != accessorCount; ++j) {
HazardPointerThreadEntry<GuardType>::~HazardPointerThreadEntry() { for (unsigned int i = 0; i != guardsPerThread*accessorCount; ++i) {
for (int i = 0; i != guards_per_thread; ++i) { threadLocalRetiredListsTemp
guarded_pointers[i].~Atomic(); [j*(accessorCount*guardsPerThread) + i] =
undefinedGuard;
threadLocalRetiredLists
[j*(accessorCount*guardsPerThread) + i] =
undefinedGuard;
}
} }
embb::base::Allocation::Free(guarded_pointers); for (unsigned int i = 0; i != accessorCount; ++i) {
//in-place new for each cell
threadIdMapping[i] = -1;
}
} }
template< typename GuardType > template< typename GuardType >
GuardType HazardPointerThreadEntry<GuardType>::GetGuard(int pos) const { HazardPointer< GuardType >::~HazardPointer() {
return guarded_pointers[pos];
}
template< typename GuardType > // Release references from all retired lists. Note that for this to work, the
void HazardPointerThreadEntry<GuardType>::AddRetired(GuardType pointerToGuard) { // data structure using hazard pointer has still to be active... So first, the
retired_list.PushBack(pointerToGuard); // hazard pointer class shall be destructed, then the memory management class
} // (e.g. some pool). Otherwise, the hazard pointer class would try to return
// memory to an already destructed memory manager.
for (unsigned int j = 0; j != accessorCount; ++j) {
for (unsigned int i = 0; i != accessorCount*guardsPerThread; ++i) {
GuardType pointerToFree =
threadLocalRetiredLists
[j * accessorCount * guardsPerThread + i];
if (pointerToFree == undefinedGuard) {
break;
}
freeGuardCallback(pointerToFree);
}
}
template< typename GuardType > for (unsigned int i = 0; i != accessorCount; ++i) {
void HazardPointerThreadEntry<GuardType>:: threadIdMapping[i].~Atomic();
GuardPointer(int guardNumber, GuardType pointerToGuard) { }
guarded_pointers[guardNumber] = pointerToGuard;
}
template< typename GuardType > embb::base::Allocation::Free(threadIdMapping);
void HazardPointerThreadEntry<GuardType>::SetActive(bool active) {
is_active = active;
}
template< typename GuardType > for (unsigned int i = 0; i != guardsPerThread * accessorCount; ++i) {
unsigned int HazardPointer< GuardType >::GetCurrentThreadIndex() { guards[i].~Atomic();
unsigned int thread_index; }
int return_val = embb_internal_thread_index(&thread_index);
if (return_val != EMBB_SUCCESS) embb::base::Allocation::Free(guards);
EMBB_THROW(embb::base::ErrorException, "Could not get thread id!");
return thread_index; for (unsigned int i = 0; i !=
} guardsPerThread * accessorCount * accessorCount; ++i) {
template< typename GuardType > threadLocalRetiredListsTemp[i].~GuardType();
bool HazardPointer< GuardType >::IsThresholdExceeded() { }
double retiredCounterLocThread =
static_cast<double>(GetHazardPointerElementForCurrentThread(). embb::base::Allocation::Free(threadLocalRetiredListsTemp);
GetRetiredCounter());
for (unsigned int i = 0; i !=
return (retiredCounterLocThread >= guardsPerThread * accessorCount * accessorCount; ++i) {
RETIRE_THRESHOLD * threadLocalRetiredLists[i].~GuardType();
static_cast<double>(active_hazard_pointer)* }
static_cast<double>(guards_per_thread));
embb::base::Allocation::Free(threadLocalRetiredLists);
} }
template< typename GuardType > template< typename GuardType >
size_t HazardPointer< GuardType >::GetActiveHazardPointers() { void HazardPointer< GuardType >::Guard(int guardPosition,
return active_hazard_pointer; GuardType guardedElement) {
const unsigned int myThreadId = GetCurrentThreadIndex();
// check invariants...
assert(guardPosition < guardsPerThread && myThreadId < accessorCount);
// set guard
guards[guardPosition*accessorCount + myThreadId] = guardedElement;
} }
template< typename GuardType > template< typename GuardType >
typename HazardPointer< GuardType >::HazardPointerThreadEntry_t & size_t HazardPointer< GuardType >::ComputeMaximumRetiredObjectCount(
HazardPointer< GuardType >::GetHazardPointerElementForCurrentThread() { size_t guardsPerThread, int accessors) {
// For each thread, there is a slot in the hazard pointer array.
// Initially, the active flag of a hazard pointer entry is false. unsigned int accessorCount = (accessors == -1 ?
// Only the respective thread changes the flag from true to false. embb::base::Thread::GetThreadsMaxCount() :
// This means that the current thread tells that he is about to accessors);
// stop operating, and the others are responsible for his retired
// list. return static_cast<size_t>(
guardsPerThread * accessorCount * accessorCount);
return hazard_pointer_thread_entry_array[GetCurrentThreadIndex()];
} }
template< typename GuardType > template< typename GuardType >
void HazardPointer< GuardType >::HelpScan() { void HazardPointer< GuardType >::CopyRetiredList(GuardType* sourceList,
// This is a little bit different than in the paper. In the paper, GuardType* targetList, unsigned int retiredListSize,
// the retired nodes from other threads are added to our retired list. GuardType undefinedGuard) {
// To be able to give a bound on memory consumption, we execute scan bool done = false;
// for those threads, without moving elements. The effect shall be for (unsigned int ii = 0; ii != retiredListSize; ++ii) {
// the same. if (!done) {
GuardType guardToCopy = sourceList[ii];
for (size_t i = 0; i != hazard_pointers; ++i) {
// Try to find non active lists... if (guardToCopy == undefinedGuard) {
if (!hazard_pointer_thread_entry_array[i].IsActive() && done = true;
hazard_pointer_thread_entry_array[i].TryReserve()) {
// Here: grab retired things, first check if there are any... if (targetList[ii] == undefinedGuard) {
if (hazard_pointer_thread_entry_array[i].GetRetiredCounter() > 0) { // end of target list
Scan(&hazard_pointer_thread_entry_array[i]); break;
}
}
targetList[ii] = guardToCopy;
}
else {
// we copied the whole source list, remaining values in the target
// have to be zeroed.
if (targetList[ii] == undefinedGuard) {
// end of target list
break;
}
else {
targetList[ii] = undefinedGuard;
} }
// We are done, mark it as deactivated again
hazard_pointer_thread_entry_array[i].Deactivate();
} }
} }
} }
template< typename GuardType > template< typename GuardType >
void HazardPointer< GuardType >:: void HazardPointer< GuardType >::UpdateRetiredList(GuardType* retiredList,
Scan(HazardPointerThreadEntry_t* currentHazardPointerEntry) { GuardType* updatedRetiredList, unsigned int retiredListSize,
#ifdef EMBB_DEBUG GuardType guardedElement, GuardType consideredHazard,
// scan should only be executed by one thread at a time, otherwise we have GuardType undefinedGuard) {
// a bug... this assertions checks that
int expected = -1; // no hazard set here
if (!currentHazardPointerEntry->GetScanningThread().CompareAndSwap( if (consideredHazard == undefinedGuard)
expected, static_cast<int>(GetCurrentThreadIndex()))) { return;
assert(false);
} // if this hazard is currently in the union of
#endif // threadLocalRetiredLists and pointerToRetire, but not yet in
// In this function, we compute the intersection between local retired // threadLocalRetiredListsTemp, add it to that list
// pointers and all hazard pointers. This intersection cannot be deleted and bool containedInUnion = false;
// forms the new local retired pointers list.
// It is assumed that the union of all retired pointers contains no two // first iterate over our retired list
// pointers with the same value. However, the union of all hazard guards for (unsigned int ii = 0; ii != retiredListSize; ++ii) {
// might. // when reaching 0, we can stop iterating (end of the "list")
if (retiredList[ii] == 0)
// Here, we store the temporary hazard pointers. We have to store them, break;
// as iterating multiple time over them might be expensive, as this
// atomic array is shared between threads. // the hazard is contained in the retired list... it shall go
currentHazardPointerEntry->GetHazardTemp().clear(); // into the temp list, if not already there
if (retiredList[ii] == consideredHazard) {
// Get all active hazard pointers! containedInUnion = true;
for (unsigned int i = 0; i != hazard_pointers; ++i) { break;
// Only consider guards of active threads
if (hazard_pointer_thread_entry_array[i].IsActive()) {
// For each guard in an hazard pointer entry
for (int pos = 0; pos != guards_per_thread; ++pos) {
GuardType guard = hazard_pointer_thread_entry_array[i].GetGuard(pos);
// UndefinedGuard means not guarded
if (guard == undefined_guard)
continue;
currentHazardPointerEntry->GetHazardTemp().PushBack(guard);
}
} }
} }
currentHazardPointerEntry->GetRetiredTemp().clear(); // the union also contains pointerToRetire
if (!containedInUnion) {
// Sort them, we will do a binary search on each entry from the retired list containedInUnion = (consideredHazard == guardedElement);
std::sort(
currentHazardPointerEntry->GetHazardTemp().begin(),
currentHazardPointerEntry->GetHazardTemp().end());
for (
EMBB_CONTAINERS_CPP_DEPENDANT_TYPENAME FixedSizeList< GuardType >::iterator
it = currentHazardPointerEntry->GetRetired().begin();
it != currentHazardPointerEntry->GetRetired().end(); ++it) {
if (false == ::std::binary_search(
currentHazardPointerEntry->GetHazardTemp().begin(),
currentHazardPointerEntry->GetHazardTemp().end(), *it)) {
this->free_guard_callback(*it);
} else {
currentHazardPointerEntry->GetRetiredTemp().PushBack(*it);
}
} }
currentHazardPointerEntry->SetRetired(
currentHazardPointerEntry->GetRetiredTemp());
#ifdef EMBB_DEBUG // add the pointer to temp. retired list, if not already there
currentHazardPointerEntry->GetScanningThread().Store(-1); if (containedInUnion) {
#endif for (unsigned int iii = 0; iii != retiredListSize; ++iii) {
}
template< typename GuardType > // is it already there?
size_t HazardPointer< GuardType >::GetRetiredListMaxSize() const { if (updatedRetiredList[iii] == consideredHazard)
return static_cast<size_t>(RETIRE_THRESHOLD * break;
static_cast<double>(embb::base::Thread::GetThreadsMaxCount()) *
static_cast<double>(guards_per_thread)) + 1;
}
template< typename GuardType > // end of the list
HazardPointer< GuardType >::HazardPointer( if (updatedRetiredList[iii] == undefinedGuard) {
embb::base::Function<void, GuardType> free_guard_callback,
GuardType undefined_guard, int guards_per_thread) : // add hazard
undefined_guard(undefined_guard), updatedRetiredList[iii] = consideredHazard;
guards_per_thread(guards_per_thread),
//initially, all potential hazard pointers are active... // we are done here...
active_hazard_pointer(embb::base::Thread::GetThreadsMaxCount()), break;
free_guard_callback(free_guard_callback) { }
hazard_pointers = embb::base::Thread::GetThreadsMaxCount(); }
hazard_pointer_thread_entry_array = static_cast<HazardPointerThreadEntry_t*>(
embb::base::Allocation::Allocate(sizeof(HazardPointerThreadEntry_t) *
hazard_pointers));
for (size_t i = 0; i != hazard_pointers; ++i) {
new (static_cast<void*>(&(hazard_pointer_thread_entry_array[i])))
HazardPointerThreadEntry_t(undefined_guard, guards_per_thread,
GetRetiredListMaxSize());
} }
} }
template< typename GuardType > template< typename GuardType >
HazardPointer< GuardType >::~HazardPointer() { void HazardPointer< GuardType >::EnqueueForDeletion(GuardType toRetire) {
for (size_t i = 0; i != hazard_pointers; ++i) {
hazard_pointer_thread_entry_array[i].~HazardPointerThreadEntry_t();
}
embb::base::Allocation::Free(static_cast < void* > unsigned int myThreadId = GetCurrentThreadIndex();
(hazard_pointer_thread_entry_array));
}
template< typename GuardType > // check for invariant
void HazardPointer< GuardType >::DeactivateCurrentThread() { assert(myThreadId < accessorCount);
HazardPointerThreadEntry_t* current_thread_entry =
&hazard_pointer_thread_entry_array[GetCurrentThreadIndex()]; unsigned int retiredListSize = accessorCount * guardsPerThread;
// Deactivating a non-active hazard pointer entry has no effect! GuardType* retiredList =
if (!current_thread_entry->IsActive()) { &threadLocalRetiredLists[myThreadId * retiredListSize];
return;
} else { GuardType* retiredListTemp =
current_thread_entry->SetActive(false); &threadLocalRetiredListsTemp[myThreadId * retiredListSize];
active_hazard_pointer--;
// wipe my temp. retired list...
for (unsigned int i = 0; i < retiredListSize; ++i) {
// the list is filled always from left to right, so occurring the first
// undefinedGuard, the remaining ones are also undefinedGuard...
if (retiredListTemp[i] == undefinedGuard)
break;
retiredListTemp[i] = undefinedGuard;
} }
}
template< typename GuardType > // we test each hazard if it is in the union of retiredList and
void HazardPointer< GuardType >::GuardPointer(int guardPosition, // guardedElement. If it is, it goes into the new retired list...
GuardType guardedElement) { for (unsigned int i = 0; i != accessorCount*guardsPerThread; ++i) {
GetHazardPointerElementForCurrentThread().GuardPointer( // consider each current active guard
guardPosition, guardedElement); GuardType consideredHazard = guards[i].Load();
} UpdateRetiredList(retiredList, retiredListTemp, retiredListSize,
toRetire, consideredHazard, undefinedGuard);
}
template< typename GuardType > // now we created a a new retired list... the elements that are "removed" from
void HazardPointer< GuardType >::EnqueuePointerForDeletion( // the old retired list can be safely deleted now...
GuardType guardedElement) { for (int ii = -1; ii != static_cast<int>(retiredListSize); ++ii) {
GetHazardPointerElementForCurrentThread().AddRetired(guardedElement); // we iterate over the current retired list... -1 is used as dummy element
if (IsThresholdExceeded()) { // in the iteration, to also iterate over the pointerToRetire, which is
HazardPointerThreadEntry_t* currentHazardPointerEntry = // logically also part of the current retired list...
&GetHazardPointerElementForCurrentThread();
// end of the list, stop iterating
if (ii >= 0 && retiredList[ii] == undefinedGuard)
break;
Scan(currentHazardPointerEntry); GuardType toCheckIfInNewList = undefinedGuard;
// Help deactivated threads to clean their retired nodes. toCheckIfInNewList = (ii == -1 ? toRetire : retiredList[ii]);
HelpScan();
// still in the new retired list?
bool stillInList = false;
for (unsigned int iii = 0; iii != retiredListSize; ++iii) {
// end of list
if (retiredListTemp[iii] == undefinedGuard)
break;
if (toCheckIfInNewList == retiredListTemp[iii]) {
// still in list, cannot delete!
stillInList = true;
break;
}
}
if (!stillInList) {
this->freeGuardCallback(toCheckIfInNewList);
}
} }
// copy the updated retired list (temp) to the retired list...
CopyRetiredList(retiredListTemp, retiredList, retiredListSize,
undefinedGuard);
} }
template<typename GuardType>
const double embb::containers::internal::HazardPointer<GuardType>::
RETIRE_THRESHOLD = 1.25f;
} // namespace internal } // namespace internal
} // namespace containers } // namespace containers
} // namespace embb } // namespace embb
......
...@@ -40,451 +40,217 @@ ...@@ -40,451 +40,217 @@
#define EMBB_CONTAINERS_CPP_DEPENDANT_TYPENAME typename #define EMBB_CONTAINERS_CPP_DEPENDANT_TYPENAME typename
#endif #endif
// forward declaration for white-box test, used in friend declaration of
// HazardPointer class.
namespace embb {
namespace containers{
namespace test {
class HazardPointerTest2;
}
}
}
namespace embb { namespace embb {
namespace containers { namespace containers {
namespace internal { namespace internal {
/** /**
* A list with fixed size, implemented as an array. Replaces std::vector that * This class contains a hazard pointer implementation following publication:
* was used in previous hazard pointer implementation.
* *
* Provides iterators, so we can apply algorithms from the STL. * Maged M. Michael. "Hazard pointers: Safe memory reclamation for lock-free
* objects." IEEE Transactions on Parallel and Distributed Systems, 15.6 (2004)
* : 491-504.
* *
* \tparam ElementT Type of the elements contained in the list. * Hazard pointer are a wait-free memory reclamation scheme for lock-free
*/ * algorithms. Loosely speaking, they act as garbage collector. The release of
template< typename ElementT > * objects contained within the memory, managed by the hazard pointer class, is
class FixedSizeList { * intercepted and possibly delayed to avoid concurrency bugs.
private:
/**
* Capacity of the list
*/
size_t max_size;
/**
* Size of the list
*/
size_t size;
/**
* Pointer to the array containing the list
*/
ElementT* elementsArray;
/**
* Copy constructor not implemented. Would require dynamic memory allocation.
*/
FixedSizeList(
const FixedSizeList &
/**< [IN] Other list */);
public:
/**
* Definition of an iterator
*/
typedef ElementT * iterator;
/**
* Definition of a const iterator
*/
typedef const ElementT * const_iterator;
/**
* Constructor, initializes list with given capacity
*/
FixedSizeList(
size_t max_size
/**< [IN] Capacity of the list */);
/**
* Gets the current size of the list
*
* \return Size of the list
*/
inline size_t GetSize() const;
/**
* Gets the capacity of the list
*
* \return The capacity of the list
*/
inline size_t GetMaxSize() const;
/**
* Removes all elements from the list without changing the capacity
*/
inline void clear();
/**
* Iterator pointing to the first element
*
* \return Begin iterator
*/
iterator begin() const;
/**
* Iterator pointing beyond the last element
*
* \return End iterator
*/
iterator end() const;
/**
* Copies the elements of another list to this list. The capacity of
* this list has to be greater than or equal to the size of the other list.
*/
FixedSizeList & operator=(
const FixedSizeList & other
/**< [IN] Other list */);
/**
* Appends an element to the end of the list
*
* \return \c false if the operation was not successful because the list is
* full, otherwise \c true.
*/
bool PushBack(
ElementT const el
/**< [IN] Element to append to the list */);
/**
* Destructs the list.
*/
~FixedSizeList();
};
/**
* Hazard pointer entry for a single thread. Holds the actual guards that
* determine if the current thread is about to use the guarded pointer.
* Guarded pointers are protected and not deleted.
* *
* Moreover, the retired list for this thread is contained. It determines * Before accessing an object, threads announce their intention to do so (i.e.
* the pointers that have been allocated from this thread, but are not used * the intention to dereference the respective pointer) to the hazard pointer
* anymore by this thread. However, another thread could have a guard on it, * class. This is called guarding. From now on, the hazard pointer class will
* so the pointer cannot be deleted immediately. * prohibit the release or reuse of the guarded object. This is necessary, to
* assure that the object is not released or reused while it is accessed and to
* assure that it has not unnoticed changed (effectively avoiding the ABA
* problem).
* *
* For the scan operation, the intersection of the guarded pointers from all * Note that after guarding an object, a consecutive check that the object (i.e.
* threads and the retired list has to be computed. For this computation, we * its pointer) is still valid is necessary; the object release could already
* need thread local temporary lists which are also contained here. * have been started when guarding the object. Guarding is repeated, until this
* check eventually succeeds. Note that this "guard-and-check" loop makes the
* usage of the hazard pointer class lock-free, even though its implementation
* is wait-free.
* *
* \tparam GuardType The type of guard, usually a pointer. * Internally, guarding is realized by providing each thread slots, where
*/ * pointers can be placed that should not be freed (so called guards). When
template< typename GuardType > * trying to release an object, it is checked if the object's pointer is
class HazardPointerThreadEntry { * guarded, and if so this object is not released, but instead put into a
#ifdef EMBB_DEBUG * retired list for later release, when all guards for this object have been
* removed.
public:
embb::base::Atomic<int>& GetScanningThread() {
return who_is_scanning;
}
private:
embb::base::Atomic<int> who_is_scanning;
#endif
private:
/**
* Value of the undefined guard (means that no guard is set).
*/
GuardType undefined_guard;
/**
* The number of guards per thread. Determines the size of the guard array.
*/
int guards_per_thread;
/**
* The capacity of the retired list. It is determined by number of guards,
* retired threshold, and maximum number of threads.
*/
size_t max_size_retired_list;
/**
* Set to true if the current thread is active. Is used for a thread to
* signal that it is leaving. If a thread has left, the other threads are
* responsible for cleaning up its retired list.
*/
embb::base::Atomic< bool > is_active;
/**
* The guarded pointer of this thread, has size \c guard_per_thread.
*/
embb::base::Atomic< GuardType >* guarded_pointers;
/**
* The retired list of this thread, contains pointer that shall be released
* when no thread holds a guard on it anymore.
*/
FixedSizeList< GuardType > retired_list;
/**
* Temporary retired list, has same capacity as \c retired_list, It is used to
* compute the intersection of all guards and the \c retired list.
*/
FixedSizeList< GuardType > retired_list_temp;
/**
* Temporary guards list. Used to compute the intersection of all guards and
* the \c retired_list.
*/
FixedSizeList< GuardType > hazard_pointer_list_temp;
/**
* HazardPointerThreadEntry shall not be copied
*/
HazardPointerThreadEntry(const HazardPointerThreadEntry&);
/**
* HazardPointerThreadEntry shall not be assigned
*/
HazardPointerThreadEntry & operator= (const HazardPointerThreadEntry&);
public:
/**
* Checks if current thread is active (with respect to participating in hazard
* pointer management)
*
* \return \c true if the current thread is active, otherwise \c false.
*/
bool IsActive();
/**
* Tries to set the active flag to true (atomically). Used if the current
* thread is not active anymore as lock for another thread to help cleaning
* up hazard pointer.
*
* \return \c true if this thread was successful setting the active flag,
* otherwise \c false.
*/
bool TryReserve();
/**
* Deactivates current thread by atomically setting active flag to false.
*/
void Deactivate();
/**
* Gets the count of current retired pointer for the current thread.
*
* \return Count of current retired pointer
*/
size_t GetRetiredCounter();
/**
* Gets the retired list.
*
* \return Reference to \c retired_list
*/
FixedSizeList< GuardType >& GetRetired();
/**
* Gets the temporary retired list.
*
* \return Reference to \c retired_list_temp
*/
FixedSizeList< GuardType >& GetRetiredTemp();
/**
* Gets the temporary hazard pointer list.
*
* \return Reference to \c hazard_pointer_list_temp
*/
FixedSizeList< GuardType >& GetHazardTemp();
/**
* Sets the retired list.
*/
void SetRetired(
embb::containers::internal::FixedSizeList< GuardType > const & retired_list
/**< [IN] Retired list */);
/**
* Constructor
*/
HazardPointerThreadEntry(
GuardType undefined_guard,
/**< [IN] Value of the undefined guard (e.g. NULL) */
int guards_per_thread,
/**< [IN] Number of guards per thread */
size_t max_size_retired_list
/**< [IN] The capacity of the retired list(s) */);
/**
* Destructor
*
* Deallocate lists
*/
~HazardPointerThreadEntry();
/**
* Gets the guard at the specified position.
* Positions are numbered, beginning with 0.
*/
GuardType GetGuard(
int pos
/**< [IN] Position of the guard */) const;
/**
* Adds pointer to the retired list
*/
void AddRetired(
GuardType pointerToGuard
/**< [IN] Guard to retire */);
/**
* Guards pointer
*/
void GuardPointer(
int guardNumber,
/**< [IN] Position of guard */
GuardType pointerToGuard
/**<[IN] Pointer to guard */);
/**
* Sets the current thread active, i.e., announce that the thread
* participates in managing hazard pointer.
*/
void SetActive(
bool active
/**<[IN] \c true for active, \c false for inactive */);
};
/**
* HazardPointer implementation as presented in:
* *
* Maged M. Michael. "Hazard pointers: Safe memory reclamation for lock-free * In contrast to the original implementation, our implementation consumes only
* objects." IEEE Transactions on Parallel and Distributed Systems, 15.6 (2004) * fixed-size memory. Note that the number of threads accessing the hazard
* : 491-504. * pointer object accounts quadratic for the memory consumption: managed objects
* are provided from outside and the number of accessors accounts quadric for
* the minimum count of those objects.
* *
* In contrast to the original implementation, our implementation only uses * Also in contrast to the original implementation, we do not provide a HelpScan
* fixed-size memory. There is a safe upper limit, hazard pointer are guaranteed * functionality, which gives threads the possibility, to not participate in the
* to not consume more memory. Memory is allocated solely at initialization. * garbage collection anymore: other threads will help to clean-up the objects
* protected by the exiting thread. The reason is, that the only use-case would
* be a crashing thread, not participating anymore. However, as the thread has
* to signal its exit himself, this is not possible to realize anyways. In the
* end, it is still guaranteed that all memory is properly returned (in the
* destructor).
* *
* Hazard pointers solve the ABA problem for lock-free algorithms. Before * Additionally, the original implementation holds a threshold, which determines
* accessing a pointer, threads announce that they want to access this pointer * when objects shall be freed. In this implementation, we free whenever it is
* and then check if the pointer is still valid. This announcement is done by * possibly to do so, as we want to keep the memory footprint as low as
* placing a guard. It is guaranteed that the pointer is not reused until all * possible. We also don't see a performance drop in the current algorithms that
* threads remove their guards to this pointer. Objects, these pointers are * are using hazard pointer, when not using a threshold.
* pointing to, can therefore not be deleted directly. Instead, these pointers *
* are put into a list for later deletion (retired list). Regularly, this list * \tparam GuardType the type of the guards. Usually the pointer type of some
* is processed to check which pointers can be deleted. If a pointer can be * object to protect.
* deleted, a callback function provided by the user is called. The user can
* then, e.g., free the respective object, so that the pointer can be safely
* reused.
*/ */
template< typename GuardType > template< typename GuardType >
class HazardPointer { class HazardPointer {
private: private:
/**
* Concrete hazard pointer entry type
*/
typedef HazardPointerThreadEntry < GuardType >
HazardPointerThreadEntry_t;
/** /**
* The guard value denoting "not guarding" * HazardPointerTest2 is a white-box test, needing access to private members
* of this class. So declaring it as friend.
*/ */
GuardType undefined_guard;
friend class embb::containers::test::HazardPointerTest2;
/** /**
* The capacity of the retired list (safe upper bound for retired list size) * The hazard pointer guards, represented as array. Each thread has a fixed
* set of slots (guardsPerThread) within this array.
*/ */
int retired_list_max_size; embb::base::Atomic<GuardType>* guards;
/** /**
* Guards that can be set per thread * \see threadLocalRetiredLists documentation
*/ */
int guards_per_thread; GuardType* threadLocalRetiredListsTemp;
/** /**
* Array of HazardPointerElements. Each thread is assigned to one. * A lists of lists, represented as single array. Each thread maintains a
* list of retired pointers, that are objects that are logically released
* but not released because some thread placed a guard on it.
*/ */
HazardPointerThreadEntry_t* hazard_pointer_thread_entry_array; GuardType* threadLocalRetiredLists;
/** /**
* The threshold, determines at which size of the retired list pointers * This number determines the amount of maximal accessors (threads) that
* are tried to be deleted. * will access this hazard pointer instance. Note that a thread once
* accessing this object will be permanently count as accessor, even if not
* participating anymore. If too many threads access this object, an
* assertion is thrown.
*/ */
static const double RETIRE_THRESHOLD; unsigned int accessorCount;
/** /**
* Each thread is assigned a thread index (starting with 0). * The guard value denoting "not guarded"
* Get the index of the current thread.
*/ */
static unsigned int GetCurrentThreadIndex(); GuardType undefinedGuard;
/** /**
* The number of hazard pointers currently active. * The count of guards that can be set per thread.
*/ */
size_t active_hazard_pointer; int guardsPerThread;
/** /**
* Count of all hazard pointers. * The functor that is called to release an object. This is called by this
* class, when it is safe to do so, i.e., no thread accesses this object
* anymore.
*/ */
size_t hazard_pointers; embb::base::Function<void, GuardType> freeGuardCallback;
/** /**
* The callback that is triggered when a retired guard can be * Mapping from EMBB thread id to internal thread ids Internal thread ids
* freed. Usually, the user will call a free here. * are in range [0;accesor_count-1]. The position of a EMBB thread id in
* that array determines the respective internal thread id.
*/ */
embb::base::Function<void, GuardType> free_guard_callback; embb::base::Atomic<int>* threadIdMapping;
/** /**
* Checks if the current size of the retired list exceeds the threshold, so * Each thread is assigned a thread index (starting with 0). Get the index of
* that each retired guard is checked for being not hazardous anymore. * the current thread. Note that this is not the global index, but an internal
* one. The user is free to define less accessors than the amount of default
* threads. This is useful, as the number of accessors accounts quadratic for
* the memory consumption, so the user should have the possibility to avoid
* memory wastage, when only having a small, fixed size, number of accessors.
* *
* \return \c true is threshold is exceeded, otherwise \c false. * @return current thread index
*/ */
bool IsThresholdExceeded(); unsigned int GetCurrentThreadIndex();
/** /**
* Gets the number of hazard pointe, currently active * Copy retired list \c sourceList to retired list \c targetList
* */
* \return Number of active hazard pointers static void CopyRetiredList(GuardType* sourceList,
*/ /**<[IN] the source retired list*/
size_t GetActiveHazardPointers(); GuardType* targetList,
/**<[IN] the target retired list*/
/** unsigned int singleRetiredListSize,
* Gets the hazard pointer entry for the current thread /**<[IN] the size of a thread local retired list*/
* GuardType undefinedGuard
* \return Hazard pointer entry for current thread /**<[IN] the undefined guard (usually the NULL pointer)*/
*/ );
HazardPointerThreadEntry_t&
GetHazardPointerElementForCurrentThread(); static void UpdateRetiredList(
GuardType* retiredList,
/** /**<[IN] the old retired list*/
* Threads might leave from participating in hazard pointer management. GuardType* updatedRetiredList,
* This method helps all those threads processing their retired list. /**<[IN] the updated retired list*/
*/ unsigned int retiredListSize,
void HelpScan(); /**<[IN] the size of a thread local retired list*/
GuardType toRetire,
/** /**<[IN] the element to retire*/
* Checks the retired list of a hazard pointer entry for elements of the GuardType consideredHazard,
* retired list that can be freed, and executes the delete callback for those /**<[IN] the currently considered hazard*/
* elements. GuardType undefinedGuard
*/ /**<[IN] the undefined guard (usually the NULL pointer)*/
void Scan( );
HazardPointerThreadEntry_t* currentHazardPointerEntry
/**<[IN] Hazard pointer entry that should be checked for elements that
can be deleted*/);
public: public:
/** /**
* Gets the capacity of one retired list * The user of the hazard pointer class has to provide the memory that is
* managed here. The user has to take into account, that releasing of memory
* might be delayed. He has therefore to provide more memory than he wants to
* guarantee at each point in time. More specific, on top of the guaranteed
* count of objects, he has to provide the additional count of objects that
* can be (worst-case) contained in the retired lists and therefore are not
* released yet. The size of all retired lists is guardsPerThread *
* accessorCount * accessorCount, which is computed using this function. So
* the result of function denotes to the user, how many objects he has to
* allocate additionally to the guaranteed count.
* *
* \waitfree * \waitfree
*/ */
size_t GetRetiredListMaxSize() const; static size_t ComputeMaximumRetiredObjectCount(
size_t guardsPerThread,
/**<[IN] the count of guards per thread*/
int accessors = -1
/**<[IN] Number of accessors. Determines, how many threads will access
the hazard pointer object. Default value -1 will allow the
maximum amount of threads as defined with
\c embb::base::Thread::GetThreadsMaxCount()*/
);
/** /**
* Initializes hazard pointer * Initializes the hazard pointer object
* *
* \notthreadsafe * \notthreadsafe
* *
* \memory * \memory We dynamically allocate the following:
* - Let \c t be the number of maximal threads determined by EMBB *
* - Let \c g be the number of guards per thread * (sizeof(Atomic<int>) * accessorCount) + (sizeof(Atomic<GuardType>) *
* - Let \c x be 1.25*t*g + 1 * guards_per_thread * accessorCount) + (2*sizeof(GuardType) *
* guards_per_thread * accessorCount^2)
* *
* We dynamically allocate \c x*(3*t+1) elements of size \c sizeof(void*). * The last addend is the dominant one, as accessorCount accounts
* quadratically for it.
*/ */
HazardPointer( HazardPointer(
embb::base::Function<void, GuardType> free_guard_callback, embb::base::Function<void, GuardType> free_guard_callback,
...@@ -492,35 +258,48 @@ class HazardPointer { ...@@ -492,35 +258,48 @@ class HazardPointer {
guard can be deleted */ guard can be deleted */
GuardType undefined_guard, GuardType undefined_guard,
/**<[IN] The guard value denoting "not guarded"*/ /**<[IN] The guard value denoting "not guarded"*/
int guards_per_thread int guards_per_thread,
/**<[IN] Number of guards per thread*/); /**<[IN] Number of guards per thread*/
int accessors = -1
/** /**<[IN] Number of accessors. Determines, how many threads will access
* Deallocates lists for hazard pointer management. Note that no objects this hazard pointer object. Default value -1 will allow the
* currently in the retired lists are deleted. This is the responsibility maximum amount of threads as defined with
* of the user. Usually, HazardPointer manages pointers of an object pool. \c embb::base::Thread::GetThreadsMaxCount()*/
* After destructing HazardPointer, the object pool is deleted, so that );
* everything is properly cleaned up.
/**
* Deallocates internal data structures. Additionally releases all objects
* currently held in the retired lists, using the release functor passed in
* the constructor.
*
* \notthreadsafe
*/ */
~HazardPointer(); ~HazardPointer();
/** /**
* Announces that the current thread stops participating in hazard pointer * Guards \c toGuard. If the guardedElement is passed to \c EnqueueForDeletion
* management. The other threads now take care of his retired list. * it is prevented from release from now on. The user must have a check, that
* EnqueueForDeletion has not been called on toGuard, before the guarding took
* effect.
* *
* \waitfree * \waitfree
*/ */
void DeactivateCurrentThread(); void Guard(int guardPosition, GuardType toGuard);
/** /**
* Guards \c guardedElement with the guard at position \c guardPosition * Enqueue a pointer for deletion. If not guarded, it is deleted immediately.
* If it is guarded, it is added to a thread local retired list, and deleted
* in a subsequent call to \c EnqueueForDeletion, when no guard is placed on
* it anymore.
*/ */
void GuardPointer(int guardPosition, GuardType guardedElement); void EnqueueForDeletion(GuardType guardedElement);
/** /**
* Enqueue a pointer for deletion. It is added to the retired list and * Explicitly remove guard from thread local slot.
* deleted when no thread accesses it anymore. *
* \waitfree
*/ */
void EnqueuePointerForDeletion(GuardType guardedElement); void RemoveGuard(int guardPosition);
}; };
} // namespace internal } // namespace internal
} // namespace containers } // namespace containers
......
...@@ -77,7 +77,12 @@ LockFreeMPMCQueue<Type, ValuePool>::~LockFreeMPMCQueue() { ...@@ -77,7 +77,12 @@ LockFreeMPMCQueue<Type, ValuePool>::~LockFreeMPMCQueue() {
template< typename Type, typename ValuePool > template< typename Type, typename ValuePool >
LockFreeMPMCQueue<Type, ValuePool>::LockFreeMPMCQueue(size_t capacity) : LockFreeMPMCQueue<Type, ValuePool>::LockFreeMPMCQueue(size_t capacity) :
capacity(capacity), capacity(capacity),
// Object pool, size with respect to the maximum number of retired nodes not
// eligible for reuse. +1 for dummy node.
objectPool(
MPMCQueueNodeHazardPointer_t::ComputeMaximumRetiredObjectCount(2) +
capacity + 1),
// Disable "this is used in base member initializer" warning. // Disable "this is used in base member initializer" warning.
// We explicitly want this. // We explicitly want this.
#ifdef EMBB_PLATFORM_COMPILER_MSVC #ifdef EMBB_PLATFORM_COMPILER_MSVC
...@@ -89,13 +94,7 @@ delete_pointer_callback(*this, ...@@ -89,13 +94,7 @@ delete_pointer_callback(*this,
#ifdef EMBB_PLATFORM_COMPILER_MSVC #ifdef EMBB_PLATFORM_COMPILER_MSVC
#pragma warning(pop) #pragma warning(pop)
#endif #endif
hazardPointer(delete_pointer_callback, NULL, 2), hazardPointer(delete_pointer_callback, NULL, 2) {
// Object pool, size with respect to the maximum number of retired nodes not
// eligible for reuse. +1 for dummy node.
objectPool(
hazardPointer.GetRetiredListMaxSize()*
embb::base::Thread::GetThreadsMaxCount() +
capacity + 1) {
// Allocate dummy node to reduce the number of special cases to consider. // Allocate dummy node to reduce the number of special cases to consider.
internal::LockFreeMPMCQueueNode<Type>* dummyNode = objectPool.Allocate(); internal::LockFreeMPMCQueueNode<Type>* dummyNode = objectPool.Allocate();
// Initially, head and tail point to the dummy node. // Initially, head and tail point to the dummy node.
...@@ -120,7 +119,7 @@ bool LockFreeMPMCQueue<Type, ValuePool>::TryEnqueue(Type const& element) { ...@@ -120,7 +119,7 @@ bool LockFreeMPMCQueue<Type, ValuePool>::TryEnqueue(Type const& element) {
for (;;) { for (;;) {
my_tail = tail; my_tail = tail;
hazardPointer.GuardPointer(0, my_tail); hazardPointer.Guard(0, my_tail);
// Check if pointer is still valid after guarding. // Check if pointer is still valid after guarding.
if (my_tail != tail) { if (my_tail != tail) {
...@@ -163,12 +162,12 @@ bool LockFreeMPMCQueue<Type, ValuePool>::TryDequeue(Type & element) { ...@@ -163,12 +162,12 @@ bool LockFreeMPMCQueue<Type, ValuePool>::TryDequeue(Type & element) {
Type data; Type data;
for (;;) { for (;;) {
my_head = head; my_head = head;
hazardPointer.GuardPointer(0, my_head); hazardPointer.Guard(0, my_head);
if (my_head != head) continue; if (my_head != head) continue;
my_tail = tail; my_tail = tail;
my_next = my_head->GetNext(); my_next = my_head->GetNext();
hazardPointer.GuardPointer(1, my_next); hazardPointer.Guard(1, my_next);
if (head != my_head) continue; if (head != my_head) continue;
if (my_next == NULL) if (my_next == NULL)
...@@ -187,7 +186,7 @@ bool LockFreeMPMCQueue<Type, ValuePool>::TryDequeue(Type & element) { ...@@ -187,7 +186,7 @@ bool LockFreeMPMCQueue<Type, ValuePool>::TryDequeue(Type & element) {
break; break;
} }
hazardPointer.EnqueuePointerForDeletion(my_head); hazardPointer.EnqueueForDeletion(my_head);
element = data; element = data;
return true; return true;
} }
......
...@@ -81,13 +81,13 @@ capacity(capacity), ...@@ -81,13 +81,13 @@ capacity(capacity),
#ifdef EMBB_PLATFORM_COMPILER_MSVC #ifdef EMBB_PLATFORM_COMPILER_MSVC
#pragma warning(pop) #pragma warning(pop)
#endif #endif
hazardPointer(delete_pointer_callback, NULL, 1),
// Object pool, size with respect to the maximum number of retired nodes not // Object pool, size with respect to the maximum number of retired nodes not
// eligible for reuse: // eligible for reuse:
objectPool( objectPool(
hazardPointer.GetRetiredListMaxSize()* StackNodeHazardPointer_t::ComputeMaximumRetiredObjectCount(1) +
embb::base::Thread::GetThreadsMaxCount() + capacity),
capacity) { hazardPointer(delete_pointer_callback, NULL, 1)
{
} }
template< typename Type, typename ValuePool > template< typename Type, typename ValuePool >
...@@ -128,7 +128,7 @@ bool LockFreeStack< Type, ValuePool >::TryPop(Type & element) { ...@@ -128,7 +128,7 @@ bool LockFreeStack< Type, ValuePool >::TryPop(Type & element) {
return false; return false;
// Guard top_cached // Guard top_cached
hazardPointer.GuardPointer(0, top_cached); hazardPointer.Guard(0, top_cached);
// Check if top is still top. If this is the case, it has not been // Check if top is still top. If this is the case, it has not been
// retired yet (because before retiring that thing, the retiring thread // retired yet (because before retiring that thing, the retiring thread
...@@ -144,16 +144,16 @@ bool LockFreeStack< Type, ValuePool >::TryPop(Type & element) { ...@@ -144,16 +144,16 @@ bool LockFreeStack< Type, ValuePool >::TryPop(Type & element) {
break; break;
} else { } else {
// We continue with the next and can unguard top_cached // We continue with the next and can unguard top_cached
hazardPointer.GuardPointer(0, NULL); hazardPointer.Guard(0, NULL);
} }
} }
Type data = top_cached->GetElement(); Type data = top_cached->GetElement();
// We don't need to read from this reference anymore, unguard it // We don't need to read from this reference anymore, unguard it
hazardPointer.GuardPointer(0, NULL); hazardPointer.Guard(0, NULL);
hazardPointer.EnqueuePointerForDeletion(top_cached); hazardPointer.EnqueueForDeletion(top_cached);
element = data; element = data;
return true; return true;
......
...@@ -113,8 +113,17 @@ class LockFreeMPMCQueue { ...@@ -113,8 +113,17 @@ class LockFreeMPMCQueue {
* least as many elements, maybe more. * least as many elements, maybe more.
*/ */
size_t capacity; size_t capacity;
// Do not change the ordering of class local variables.
// Important for initialization. /**
* The object pool, used for lock-free memory allocation.
*
* Warning: the objectPool has to be initialized before the hazardPointer
* object, to be sure that the hazardPointer object is destructed before the
* Pool as the hazardPointer object might return elements to the pool in its
* destructor. So the ordering of the members objectPool and hazardPointer is
* important here!
*/
ObjectPool< internal::LockFreeMPMCQueueNode<Type>, ValuePool > objectPool;
/** /**
* Callback to the method that is called by hazard pointers if a pointer is * Callback to the method that is called by hazard pointers if a pointer is
...@@ -124,15 +133,17 @@ class LockFreeMPMCQueue { ...@@ -124,15 +133,17 @@ class LockFreeMPMCQueue {
delete_pointer_callback; delete_pointer_callback;
/** /**
* The hazard pointer object, used for memory management. * Definition of the used hazard pointer type
*/ */
embb::containers::internal::HazardPointer typedef embb::containers::internal::HazardPointer
< internal::LockFreeMPMCQueueNode<Type>* > hazardPointer; < internal::LockFreeMPMCQueueNode<Type>* >
MPMCQueueNodeHazardPointer_t;
/** /**
* The object pool, used for lock-free memory allocation. * The hazard pointer object, used for memory management.
*/ */
ObjectPool< internal::LockFreeMPMCQueueNode<Type>, ValuePool > objectPool; MPMCQueueNodeHazardPointer_t hazardPointer;
/** /**
* Atomic pointer to the head node of the queue * Atomic pointer to the head node of the queue
......
...@@ -187,11 +187,6 @@ class LockFreeStack { ...@@ -187,11 +187,6 @@ class LockFreeStack {
delete_pointer_callback; delete_pointer_callback;
/** /**
* The hazard pointer object, used for memory management.
*/
internal::HazardPointer<internal::LockFreeStackNode<Type>*> hazardPointer;
/**
* The callback function, used to cleanup non-hazardous pointers. * The callback function, used to cleanup non-hazardous pointers.
* \see delete_pointer_callback * \see delete_pointer_callback
*/ */
...@@ -199,10 +194,27 @@ class LockFreeStack { ...@@ -199,10 +194,27 @@ class LockFreeStack {
/** /**
* The object pool, used for lock-free memory allocation. * The object pool, used for lock-free memory allocation.
*
* Warning: the objectPool has to be initialized before the hazardPointer
* object, to be sure that the hazardPointer object is destructed before the
* Pool as the hazardPointer object might return elements to the pool in its
* destructor. So the ordering of the members objectPool and hazardPointer is
* important here!
*/ */
ObjectPool< internal::LockFreeStackNode<Type>, ValuePool > objectPool; ObjectPool< internal::LockFreeStackNode<Type>, ValuePool > objectPool;
/** /**
* Definition of the used hazard pointer type
*/
typedef internal::HazardPointer < internal::LockFreeStackNode<Type>* >
StackNodeHazardPointer_t;
/**
* The hazard pointer object, used for memory management.
*/
StackNodeHazardPointer_t hazardPointer;
/**
* Atomic pointer to the top node of the stack (element that is popped next) * Atomic pointer to the top node of the stack (element that is popped next)
*/ */
embb::base::Atomic<internal::LockFreeStackNode<Type>*> top; embb::base::Atomic<internal::LockFreeStackNode<Type>*> top;
......
...@@ -31,24 +31,73 @@ ...@@ -31,24 +31,73 @@
namespace embb { namespace embb {
namespace containers { namespace containers {
namespace test { namespace test {
IntObjectTestPool::IntObjectTestPool(unsigned int poolSize) :
poolSize(poolSize)
{
simplePoolObjects = static_cast<int*>(
embb::base::Allocation::Allocate(sizeof(int)*poolSize));
simplePool = static_cast<embb::base::Atomic<int>*> (
embb::base::Allocation::Allocate(sizeof(embb::base::Atomic<int>)*
poolSize));
for (unsigned int i = 0; i != poolSize; ++i) {
//in-place new for each array cell
new (&simplePool[i]) embb::base::Atomic<int>;
}
for (unsigned int i = 0; i != poolSize; ++i) {
simplePool[i] = FREE_MARKER;
simplePoolObjects[i] = 0;
}
}
IntObjectTestPool::~IntObjectTestPool() {
embb::base::Allocation::Free(simplePoolObjects);
for (unsigned int i = 0; i != poolSize; ++i) {
//in-place new for each array cell
simplePool[i].~Atomic();
}
embb::base::Allocation::Free(simplePool);
}
int* IntObjectTestPool::Allocate() {
for (unsigned int i = 0; i != poolSize; ++i) {
int expected = FREE_MARKER;
if (simplePool[i].CompareAndSwap
(expected, ALLOCATED_MARKER)) {
return &simplePoolObjects[i];
}
}
return 0;
}
void IntObjectTestPool::Release(int* objectPointer) {
int cell = objectPointer - simplePoolObjects;
simplePool[cell].Store(FREE_MARKER);
}
HazardPointerTest::HazardPointerTest() : HazardPointerTest::HazardPointerTest() :
#ifdef EMBB_PLATFORM_COMPILER_MSVC #ifdef EMBB_PLATFORM_COMPILER_MSVC
#pragma warning(push) #pragma warning(push)
#pragma warning(disable:4355) #pragma warning(disable:4355)
#endif #endif
delete_pointer_callback(*this, &HazardPointerTest::DeletePointerCallback), deletePointerCallback(*this, &HazardPointerTest::DeletePointerCallback),
#ifdef EMBB_PLATFORM_COMPILER_MSVC #ifdef EMBB_PLATFORM_COMPILER_MSVC
#pragma warning(pop) #pragma warning(pop)
#endif #endif
object_pool(NULL), objectPool(NULL),
stack(NULL), stack(NULL),
hp(NULL), hazardPointer(NULL),
n_threads(static_cast<int> nThreads(static_cast<int>
(partest::TestSuite::GetDefaultNumThreads())) { (partest::TestSuite::GetDefaultNumThreads())) {
n_elements_per_thread = 100; nElementsPerThread = 100;
n_elements = n_threads*n_elements_per_thread; nElements = nThreads*nElementsPerThread;
embb::base::Function < void, embb::base::Atomic<int>* > embb::base::Function < void, embb::base::Atomic<int>* >
delete_pointer_callback( deletePointerCallback(
*this, *this,
&HazardPointerTest::DeletePointerCallback); &HazardPointerTest::DeletePointerCallback);
...@@ -59,39 +108,46 @@ n_threads(static_cast<int> ...@@ -59,39 +108,46 @@ n_threads(static_cast<int>
// placed, the pointer is not allowed to be deleted until the second thread // placed, the pointer is not allowed to be deleted until the second thread
// removes this guard. // removes this guard.
CreateUnit("HazardPointerTestThatGuardWorks"). CreateUnit("HazardPointerTestThatGuardWorks").
Pre(&HazardPointerTest::HazardPointerTest1_Pre, this). Pre(&HazardPointerTest::HazardPointerTest1Pre, this).
Add( Add(
&HazardPointerTest::HazardPointerTest1_ThreadMethod, &HazardPointerTest::HazardPointerTest1ThreadMethod,
this, static_cast<size_t>(n_threads)). this, static_cast<size_t>(nThreads)).
Post(&HazardPointerTest::HazardPointerTest1_Post, this); Post(&HazardPointerTest::HazardPointerTest1Post, this);
} }
void HazardPointerTest::HazardPointerTest1_Pre() { void HazardPointerTest::HazardPointerTest1Pre() {
embb_internal_thread_index_reset(); embb_internal_thread_index_reset();
object_pool = new embb::containers::ObjectPool< embb::base::Atomic<int> >
(static_cast<size_t>(n_elements)); objectPool =
stack = new embb::containers::LockFreeStack< embb::base::Atomic<int>* > embb::base::Allocation::
(static_cast<size_t>(n_elements)); New<embb::containers::ObjectPool< embb::base::Atomic<int> > >
hp = new embb::containers::internal::HazardPointer< embb::base::Atomic<int>*> (static_cast<size_t>(nElements));
(delete_pointer_callback,
NULL, stack = embb::base::Allocation::
New<embb::containers::LockFreeStack< embb::base::Atomic<int>* > >
(static_cast<size_t>(nElements));
hazardPointer = embb::base::Allocation::
New<embb::containers::internal::HazardPointer < embb::base::Atomic<int>* > >
(deletePointerCallback,
static_cast<embb::base::Atomic<int>*>(NULL),
1); 1);
} }
void HazardPointerTest::HazardPointerTest1_Post() { void HazardPointerTest::HazardPointerTest1Post() {
delete object_pool; embb::base::Allocation::Delete(hazardPointer);
delete stack; embb::base::Allocation::Delete(objectPool);
delete hp; embb::base::Allocation::Delete(stack);
} }
void HazardPointerTest::HazardPointerTest1_ThreadMethod() { void HazardPointerTest::HazardPointerTest1ThreadMethod() {
unsigned int thread_index; unsigned int thread_index;
embb_internal_thread_index(&thread_index); embb_internal_thread_index(&thread_index);
for (int i = 0; i != n_elements_per_thread; ++i) { for (int i = 0; i != nElementsPerThread; ++i) {
embb::base::Atomic<int>* allocated_object = object_pool->Allocate(0); embb::base::Atomic<int>* allocated_object = objectPool->Allocate(0);
hp->GuardPointer(0, allocated_object); hazardPointer->Guard(0, allocated_object);
bool success = stack->TryPush(allocated_object); bool success = stack->TryPush(allocated_object);
...@@ -120,36 +176,360 @@ void HazardPointerTest::HazardPointerTest1_ThreadMethod() { ...@@ -120,36 +176,360 @@ void HazardPointerTest::HazardPointerTest1_ThreadMethod() {
} }
PT_ASSERT(success_pop == true); PT_ASSERT(success_pop == true);
allocated_object->Store(1); allocated_object->Store(1);
hp->EnqueuePointerForDeletion(allocated_object); hazardPointer->EnqueueForDeletion(allocated_object);
if (!same) { if (!same) {
hp->GuardPointer(0, allocated_object_from_different_thread); hazardPointer->Guard(0, allocated_object_from_different_thread);
// if this holds, we were successful in guarding... otherwise we // if this holds, we were successful in guarding... otherwise we
// were to late, because the pointer has already been added // were to late, because the pointer has already been added
// to the retired list. // to the retired list.
if (*allocated_object_from_different_thread == 0) { if (*allocated_object_from_different_thread == 0) {
// the pointer must not be deleted here! // the pointer must not be deleted here!
vector_mutex.Lock(); vectorMutex.Lock();
for (std::vector< embb::base::Atomic<int>* >::iterator for (std::vector< embb::base::Atomic<int>* >::iterator
it = deleted_vector.begin(); it = deletedVector.begin();
it != deleted_vector.end(); it != deletedVector.end();
++it) { ++it) {
PT_ASSERT(*it != allocated_object_from_different_thread); PT_ASSERT(*it != allocated_object_from_different_thread);
} }
vector_mutex.Unlock(); vectorMutex.Unlock();
} }
hp->GuardPointer(0, NULL); hazardPointer->Guard(0, NULL);
} }
} }
} }
void HazardPointerTest::DeletePointerCallback void HazardPointerTest::DeletePointerCallback
(embb::base::Atomic<int>* to_delete) { (embb::base::Atomic<int>* to_delete) {
vector_mutex.Lock(); vectorMutex.Lock();
deleted_vector.push_back(to_delete); deletedVector.push_back(to_delete);
vector_mutex.Unlock(); vectorMutex.Unlock();
}
void HazardPointerTest2::DeletePointerCallback(int* toDelete) {
testPool->Release(toDelete);
}
bool HazardPointerTest2::SetRelativeGuards() {
unsigned int threadIndex;
embb_internal_thread_index(&threadIndex);
unsigned int my_begin = guardsPerThreadCount*threadIndex;
int guardNumber = 0;
unsigned int alreadyGuarded = 0;
for (unsigned int i = my_begin; i != my_begin + guardsPerThreadCount; ++i){
if (sharedGuarded[i] != 0) {
alreadyGuarded++;
guardNumber++;
continue;
}
int * toGuard = sharedAllocated[i];
if (toGuard) {
hazardPointer->Guard(guardNumber, toGuard);
// changed in the meantime?
if (toGuard == sharedAllocated[i].Load()) {
// guard was successful. Communicate to other threads.
sharedGuarded[i] = toGuard;
}
else {
// reset the guard, couldn't guard...
hazardPointer->RemoveGuard(guardNumber);
}
}
guardNumber++;
}
return(alreadyGuarded == guardsPerThreadCount);
}
void HazardPointerTest2::HazardPointerTest2Master() {
// while the hazard pointer guard array is not full
int** allocatedLocal = static_cast<int**>(
embb::base::Allocation::Allocate(sizeof(int*)*guaranteedCapacityPool));
bool full = false;
while (!full) {
full = true;
for (unsigned int i = 0; i != guaranteedCapacityPool; ++i) {
if (sharedGuarded[i] == 0) {
full = false;
break;
}
}
// not all guards set
for (unsigned int i = 0; i != guaranteedCapacityPool; ++i) {
allocatedLocal[i] = testPool->Allocate();
sharedAllocated[i].Store(allocatedLocal[i]);
}
// set my hazards. We do not have to check, this must be successful
// here.
SetRelativeGuards();
// free
for (unsigned int i = 0; i != guaranteedCapacityPool; ++i) {
sharedAllocated[i].Store(0);
hazardPointer->EnqueueForDeletion(allocatedLocal[i]);
}
}
embb::base::Allocation::Free(allocatedLocal);
}
void HazardPointerTest2::HazardPointerTest2Slave() {
unsigned int thread_index;
embb_internal_thread_index(&thread_index);
while (!SetRelativeGuards()) {};
}
void HazardPointerTest2::HazardPointerTest2Pre() {
embb_internal_thread_index_reset();
currentMaster = 0;
sync1 = 0;
sync2 = 0;
// first the test pool has to be created
testPool = embb::base::Allocation::New<IntObjectTestPool>(poolSizeUsingHazardPointer);
// after the pool has been created, we create the hp class
hazardPointer = embb::base::Allocation::New <
embb::containers::internal::HazardPointer<int*> >
(deletePointerCallback, static_cast<int*>(NULL),
static_cast<int>(guardsPerThreadCount), nThreads);
sharedGuarded = static_cast<embb::base::Atomic<int*>*>(
embb::base::Allocation::Allocate(sizeof(embb::base::Atomic<int*>)*
guaranteedCapacityPool)
);
for (unsigned int i = 0; i !=
guaranteedCapacityPool; ++i) {
//in-place new for each array cell
new (&sharedGuarded[i]) embb::base::Atomic < int* > ;
}
sharedAllocated = static_cast<embb::base::Atomic<int*>*>(
embb::base::Allocation::Allocate(sizeof(embb::base::Atomic<int*>)*
guaranteedCapacityPool)
);
for (unsigned int i = 0; i !=
guaranteedCapacityPool; ++i) {
//in-place new for each array cell
new (&sharedAllocated[i]) embb::base::Atomic < int* > ;
}
for (unsigned int i = 0; i != guaranteedCapacityPool; ++i) {
sharedGuarded[i] = 0;
sharedAllocated[i] = 0;
}
}
void HazardPointerTest2::HazardPointerTest2Post() {
for (unsigned int i = 0; i != static_cast<unsigned int>(nThreads); ++i) {
for (unsigned int i2 = 0; i2 != static_cast<unsigned int>(nThreads)*
guardsPerThreadCount; ++i2) {
if (hazardPointer->threadLocalRetiredLists
[i2 + i*nThreads*guardsPerThreadCount] == NULL) {
// all retired lists must be completely filled
PT_ASSERT(false);
}
}
}
unsigned int checks = 0;
for (unsigned int i = 0; i != static_cast<unsigned int>(nThreads); ++i) {
for (unsigned int i2 = 0; i2 != static_cast<unsigned int>(nThreads)*
guardsPerThreadCount; ++i2) {
for (unsigned int j = 0; j != static_cast<unsigned int>(nThreads); ++j) {
for (unsigned int j2 = 0; j2 != static_cast<unsigned int>(nThreads)*
guardsPerThreadCount; ++j2) {
if (i2 == j2 && i == j)
continue;
// all retired elements have to be disjoint
PT_ASSERT(
hazardPointer->threadLocalRetiredLists
[i2 + i*nThreads*guardsPerThreadCount] !=
hazardPointer->threadLocalRetiredLists
[j2 + j*nThreads*guardsPerThreadCount]
);
checks++;
}
}
}
}
// sanity check on the count of expected comparisons.
PT_ASSERT(
checks ==
nThreads*nThreads*guardsPerThreadCount *
(nThreads*nThreads*guardsPerThreadCount - 1)
);
std::vector< int* > additionallyAllocated;
// we should be able to still allocate the guaranteed capacity of
// elements from the pool.
for (unsigned int i = 0; i != guaranteedCapacityPool; ++i) {
int* allocated = testPool->Allocate();
// allocated is not allowed to be zero
PT_ASSERT(allocated != NULL);
// push to vector, to check if elements are disjunctive and to release
// afterwards.
additionallyAllocated.push_back(allocated);
}
// the pool should now be empty
PT_ASSERT(testPool->Allocate() == NULL);
// release allocated elements...
for (unsigned int i = 0; i != additionallyAllocated.size(); ++i) {
testPool->Release(additionallyAllocated[i]);
}
// the additionallyAllocated elements shall be disjoint
for (unsigned int i = 0; i != additionallyAllocated.size(); ++i) {
for (unsigned int i2 = 0; i2 != additionallyAllocated.size(); ++i2) {
if (i == i2)
continue;
PT_ASSERT(additionallyAllocated[i] !=
additionallyAllocated[i2]);
}
}
// no allocated element should be in any retired list...
for (unsigned int a = 0; a != additionallyAllocated.size(); ++a) {
for (unsigned int i = 0; i != static_cast<unsigned int>(nThreads); ++i) {
for (unsigned int i2 = 0; i2 != static_cast<unsigned int>(nThreads)*
guardsPerThreadCount; ++i2) {
PT_ASSERT(
hazardPointer->threadLocalRetiredLists
[i2 + i*nThreads*guardsPerThreadCount] !=
additionallyAllocated[a]
);
}
}
}
for (unsigned int i = 0; i != guaranteedCapacityPool; ++i) {
//in-place new for each array cell
sharedGuarded[i].~Atomic();
}
embb::base::Allocation::Free(sharedGuarded);
for (unsigned int i = 0; i != guaranteedCapacityPool; ++i) {
//in-place new for each array cell
sharedAllocated[i].~Atomic();
}
embb::base::Allocation::Free(sharedAllocated);
embb::base::Allocation::Delete(hazardPointer);
// after deleting the hazard pointer object, all retired pointers have
// to be returned to the pool!
std::vector<int*> elementsInPool;
int* nextElement;
while ((nextElement = testPool->Allocate()) != NULL) {
for (unsigned int i = 0; i != elementsInPool.size(); ++i) {
// all elements need to be disjoint
PT_ASSERT(elementsInPool[i] != nextElement);
}
elementsInPool.push_back(nextElement);
}
// all elements should have been returned by the hp object, so we should be
// able to acquire all elements.
PT_ASSERT(elementsInPool.size() == poolSizeUsingHazardPointer);
embb::base::Allocation::Delete(testPool);
}
void HazardPointerTest2::HazardPointerTest2ThreadMethod() {
for (;;) {
unsigned int threadIndex;
embb_internal_thread_index(&threadIndex);
if (threadIndex == currentMaster) {
HazardPointerTest2Master();
}
else {
HazardPointerTest2Slave();
}
sync1.FetchAndAdd(1);
// wait until cleanup thread signals to be finished
while (sync1 != 0) {
int expected = nThreads;
int desired = finishMarker;
// select thread, responsible for cleanup
if (sync1.CompareAndSwap(expected, desired)) {
//wipe arrays!
for (unsigned int i = 0; i != guaranteedCapacityPool; ++i) {
sharedGuarded[i] = 0;
sharedAllocated[i] = 0;
}
// increase master
currentMaster.FetchAndAdd(1);
sync2 = 0;
sync1.Store(0);
}
}
// wait for all threads to reach this position
sync2.FetchAndAdd(1);
while (sync2 != static_cast<unsigned int>(nThreads)) {}
// if each thread was master once, terminate.
if (currentMaster == static_cast<unsigned int>(nThreads)) {
return;
}
}
} }
HazardPointerTest2::HazardPointerTest2() :
nThreads(static_cast<int>
(partest::TestSuite::GetDefaultNumThreads())),
#ifdef EMBB_PLATFORM_COMPILER_MSVC
#pragma warning(push)
#pragma warning(disable:4355)
#endif
deletePointerCallback(
*this,
&HazardPointerTest2::DeletePointerCallback)
#ifdef EMBB_PLATFORM_COMPILER_MSVC
#pragma warning(pop)
#endif
{
guardsPerThreadCount = 5;
guaranteedCapacityPool = guardsPerThreadCount*nThreads;
poolSizeUsingHazardPointer = guaranteedCapacityPool +
guardsPerThreadCount*nThreads*nThreads;
embb::base::Thread::GetThreadsMaxCount();
CreateUnit("HazardPointerTestSimulateMemoryWorstCase").
Pre(&HazardPointerTest2::HazardPointerTest2Pre, this).
Add(
&HazardPointerTest2::HazardPointerTest2ThreadMethod,
this, static_cast<size_t>(nThreads)).
Post(&HazardPointerTest2::HazardPointerTest2Post, this);
}
} // namespace test } // namespace test
} // namespace containers } // namespace containers
} // namespace embb } // namespace embb
...@@ -36,33 +36,116 @@ ...@@ -36,33 +36,116 @@
namespace embb { namespace embb {
namespace containers { namespace containers {
namespace test { namespace test {
/**
* @brief a very simple wait-free object pool implementation to have tests
* being independent of the EMBB object pool implementation.
*/
class IntObjectTestPool {
private:
int* simplePoolObjects;
embb::base::Atomic<int>* simplePool;
public:
static const int ALLOCATED_MARKER = 1;
static const int FREE_MARKER = 0;
unsigned int poolSize;
IntObjectTestPool(unsigned int poolSize);
~IntObjectTestPool();
/**
* Allocate object from the pool
*
* @return the allocated object
*/
int* Allocate();
/**
* Return an element to the pool
*
* @param objectPointer the object to be freed
*/
void Release(int* objectPointer);
};
class HazardPointerTest : public partest::TestCase { class HazardPointerTest : public partest::TestCase {
private: private:
embb::base::Function<void, embb::base::Atomic<int>*> delete_pointer_callback; embb::base::Function<void, embb::base::Atomic<int>*> deletePointerCallback;
//used to allocate random stuff, we will just use the pointers, not the //used to allocate random stuff, we will just use the pointers, not the
//contents //contents
embb::containers::ObjectPool< embb::base::Atomic<int> >* object_pool; embb::containers::ObjectPool< embb::base::Atomic<int> >* objectPool;
//used to move pointer between threads //used to move pointer between threads
embb::containers::LockFreeStack< embb::base::Atomic<int>* >* stack; embb::containers::LockFreeStack< embb::base::Atomic<int>* >* stack;
embb::base::Mutex vector_mutex; embb::base::Mutex vectorMutex;
embb::containers::internal::HazardPointer<embb::base::Atomic<int>*>* hp; embb::containers::internal::HazardPointer<embb::base::Atomic<int>*>*
std::vector< embb::base::Atomic<int>* > deleted_vector; hazardPointer;
int n_threads; std::vector< embb::base::Atomic<int>* > deletedVector;
int n_elements_per_thread; int nThreads;
int n_elements; int nElementsPerThread;
int nElements;
public: public:
/** /**
* Adds test methods. * Adds test methods.
*/ */
HazardPointerTest(); HazardPointerTest();
void HazardPointerTest1_Pre(); void HazardPointerTest1Pre();
void HazardPointerTest1_Post(); void HazardPointerTest1Post();
void HazardPointerTest1_ThreadMethod(); void HazardPointerTest1ThreadMethod();
void DeletePointerCallback(embb::base::Atomic<int>* to_delete); void DeletePointerCallback(embb::base::Atomic<int>* toDelete);
};
class HazardPointerTest2 : public partest::TestCase {
private:
// number of threads, participating in that test
int nThreads;
embb::base::Function<void, int*> deletePointerCallback;
// the thread id of the master
embb::base::Atomic<unsigned int> currentMaster;
// variables, to synchronize threads. At each point in time, one master,
// the master changes each round until each thread was assigned master once.
embb::base::Atomic<int> sync1;
embb::base::Atomic<unsigned int> sync2;
unsigned int guardsPerThreadCount;
unsigned int guaranteedCapacityPool;
unsigned int poolSizeUsingHazardPointer;
// The threads write here, if they guarded an object successfully. Used to
// determine when all allocated objects were guarded successfully.
embb::base::Atomic<int*>* sharedGuarded;
// This array is used by the master, to communicate and share what he has
// allocated with the slaves.
embb::base::Atomic<int*>* sharedAllocated;
// Reference to the object pool
IntObjectTestPool* testPool;
embb::containers::internal::HazardPointer<int*>* hazardPointer;
static const int finishMarker = -1;
public:
void DeletePointerCallback(int* toDelete);
bool SetRelativeGuards();
void HazardPointerTest2Master();
void HazardPointerTest2Slave();
void HazardPointerTest2Pre();
void HazardPointerTest2Post();
void HazardPointerTest2ThreadMethod();
HazardPointerTest2();
}; };
} // namespace test } // namespace test
} // namespace containers } // namespace containers
} // namespace embb } // namespace embb
......
...@@ -55,6 +55,7 @@ using embb::containers::test::HazardPointerTest; ...@@ -55,6 +55,7 @@ using embb::containers::test::HazardPointerTest;
using embb::containers::test::QueueTest; using embb::containers::test::QueueTest;
using embb::containers::test::StackTest; using embb::containers::test::StackTest;
using embb::containers::test::ObjectPoolTest; using embb::containers::test::ObjectPoolTest;
using embb::containers::test::HazardPointerTest2;
PT_MAIN("Data Structures C++") { PT_MAIN("Data Structures C++") {
unsigned int max_threads = static_cast<unsigned int>( unsigned int max_threads = static_cast<unsigned int>(
...@@ -64,6 +65,7 @@ PT_MAIN("Data Structures C++") { ...@@ -64,6 +65,7 @@ PT_MAIN("Data Structures C++") {
PT_RUN(PoolTest< WaitFreeArrayValuePool<int COMMA -1> >); PT_RUN(PoolTest< WaitFreeArrayValuePool<int COMMA -1> >);
PT_RUN(PoolTest< LockFreeTreeValuePool<int COMMA -1> >); PT_RUN(PoolTest< LockFreeTreeValuePool<int COMMA -1> >);
PT_RUN(HazardPointerTest); PT_RUN(HazardPointerTest);
PT_RUN(HazardPointerTest2);
PT_RUN(QueueTest< WaitFreeSPSCQueue< ::std::pair<size_t COMMA int> > >); PT_RUN(QueueTest< WaitFreeSPSCQueue< ::std::pair<size_t COMMA int> > >);
PT_RUN(QueueTest< LockFreeMPMCQueue< ::std::pair<size_t COMMA int> > PT_RUN(QueueTest< LockFreeMPMCQueue< ::std::pair<size_t COMMA int> >
COMMA true COMMA true >); COMMA true COMMA true >);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or sign in to comment