Commit a023d6e4 by Christian Kern

This resolves ticket #523 - rework hazard pointer:

   - Do not use lists
   - Remove HelpScan
   - Release memory to memory manager when destructed (before that, memory had to be released by the memory manager in its destructor).
   - Avoid stdlib algorithms
parent d36f1bdb
......@@ -129,5 +129,13 @@ void embb_internal_thread_index_set_max(unsigned int max) {
}
void embb_internal_thread_index_reset() {
// This function is only called in tests, usually when all other threads
// except the main thread have terminated. However, the main thread still has
// potentially still stored its old index value in its thread local storage,
// which might be assigned additionally to another thread (as the counter is
// reset), which may lead to hard to detect bugs. Therefore, reset the thread
// local thread id here.
embb_internal_thread_index_var = UINT_MAX;
embb_counter_init(embb_thread_index_counter());
}
......@@ -30,386 +30,381 @@
namespace embb {
namespace containers {
namespace internal {
template< typename ElementT >
FixedSizeList<ElementT>::FixedSizeList(size_t max_size) :
max_size(max_size),
size(0) {
elementsArray = static_cast<ElementT*>(
embb::base::Allocation::Allocate(sizeof(ElementT) *
max_size));
}
template< typename ElementT >
inline size_t FixedSizeList<ElementT>::GetSize() const {
return size;
}
template< typename ElementT >
inline size_t FixedSizeList<ElementT>::GetMaxSize() const {
return max_size;
}
template< typename ElementT >
inline void FixedSizeList<ElementT>::clear() {
size = 0;
}
template< typename ElementT >
typename FixedSizeList<ElementT>::iterator
FixedSizeList<ElementT>::begin() const {
return &elementsArray[0];
}
// Visual Studio is complaining, that the return in the last line of this
// function is not reachable. This is true, as long as exceptions are enabled.
// Otherwise, the exception becomes an assertion and with disabling assertions,
// the code becomes reachable. So, disabling this warning.
#ifdef EMBB_PLATFORM_COMPILER_MSVC
#pragma warning(push)
#pragma warning(disable:4702)
#endif
template< typename GuardType >
unsigned int HazardPointer< GuardType >::GetCurrentThreadIndex() {
template< typename ElementT >
typename FixedSizeList<ElementT>::iterator
FixedSizeList<ElementT>::end() const {
return &elementsArray[size];
}
// first, get the EMBB native thread id.
unsigned int embbThreadIndex;
template< typename ElementT >
FixedSizeList< ElementT > &
FixedSizeList<ElementT>::operator= (const FixedSizeList & other) {
size = 0;
int return_val = embb_internal_thread_index(&embbThreadIndex);
if (max_size < other.size) {
EMBB_THROW(embb::base::ErrorException, "Copy target to small");
if (return_val != EMBB_SUCCESS) {
EMBB_THROW(embb::base::ErrorException, "Could not get thread id");
}
for (const_iterator it = other.begin(); it != other.end(); ++it) {
PushBack(*it);
// iterate over the mappings array
for (unsigned int i = 0; i != accessorCount; ++i) {
// end of mappings? then we need to write our id
if (threadIdMapping[i] == -1) {
// try to CAS the initial value with out thread id
int expected = -1;
if (threadIdMapping[i].CompareAndSwap(expected,
static_cast<int>(embbThreadIndex))) {
//successful, return our mapping
return i;
}
}
return *this;
}
template< typename ElementT >
bool FixedSizeList<ElementT>::PushBack(ElementT const el) {
if (size + 1 > max_size) {
return false;
if (threadIdMapping[i] == static_cast<int>(embbThreadIndex)) {
// found our mapping!
return i;
}
}
elementsArray[size] = el;
size++;
return true;
}
template< typename ElementT >
FixedSizeList<ElementT>::~FixedSizeList() {
embb::base::Allocation::Free(elementsArray);
}
// when we reach this point, we have too many accessors
// (no mapping possible)
EMBB_THROW(embb::base::ErrorException, "Too many accessors");
template< typename GuardType >
bool HazardPointerThreadEntry<GuardType>::IsActive() {
return is_active;
return 0;
}
#ifdef EMBB_PLATFORM_COMPILER_MSVC
#pragma warning(pop)
#endif
template< typename GuardType >
bool HazardPointerThreadEntry<GuardType>::TryReserve() {
bool expected = false;
return is_active.CompareAndSwap(expected, true);
}
void HazardPointer< GuardType >::RemoveGuard(int guardPosition){
const unsigned int myThreadId = GetCurrentThreadIndex();
template< typename GuardType >
void HazardPointerThreadEntry<GuardType>::Deactivate() {
is_active = false;
}
// check invariants...
assert(guardPosition < guardsPerThread && myThreadId < accessorCount);
template< typename GuardType >
size_t HazardPointerThreadEntry<GuardType>::GetRetiredCounter() {
return retired_list.GetSize();
// set guard
guards[guardPosition*accessorCount + myThreadId] = undefinedGuard;
}
template< typename GuardType >
FixedSizeList< GuardType >& HazardPointerThreadEntry<GuardType>::
GetRetired() {
return retired_list;
}
HazardPointer< GuardType >::HazardPointer(
embb::base::Function<void, GuardType> freeGuardCallback,
GuardType undefinedGuard, int guardsPerThread, int accessors) :
accessorCount(accessors == -1 ?
embb::base::Thread::GetThreadsMaxCount() :
accessors),
undefinedGuard(undefinedGuard),
guardsPerThread(guardsPerThread),
freeGuardCallback(freeGuardCallback) {
threadIdMapping =
static_cast<embb::base::Atomic<int>*>(
embb::base::Allocation::Allocate(sizeof(embb::base::Atomic<int>)
*accessorCount));
for (unsigned int i = 0; i != accessorCount; ++i) {
//in-place new for each cell
new (&threadIdMapping[i]) embb::base::Atomic < int > ;
}
template< typename GuardType >
FixedSizeList< GuardType >& HazardPointerThreadEntry<GuardType>::
GetRetiredTemp() {
return retired_list_temp;
}
guards = static_cast<embb::base::Atomic< GuardType >*>
(embb::base::Allocation::Allocate(
sizeof(embb::base::Atomic< GuardType >) *
guardsPerThread * accessorCount
));
template< typename GuardType >
FixedSizeList< GuardType >& HazardPointerThreadEntry<GuardType>::
GetHazardTemp() {
return hazard_pointer_list_temp;
}
for (unsigned int i = 0; i != guardsPerThread * accessorCount; ++i) {
//in-place new for each cell
new (&guards[i]) embb::base::Atomic < GuardType > ;
}
template< typename GuardType >
void HazardPointerThreadEntry<GuardType>::
SetRetired(internal::FixedSizeList< GuardType > const & retired_list) {
this->retired_list = retired_list;
}
threadLocalRetiredListsTemp = static_cast<GuardType*>
(embb::base::Allocation::Allocate(
sizeof(GuardType) *
guardsPerThread * accessorCount * accessorCount
));
for (unsigned int i = 0; i !=
guardsPerThread * accessorCount * accessorCount; ++i) {
//in-place new for each cell
new (&threadLocalRetiredListsTemp[i]) GuardType;
}
template< typename GuardType >
HazardPointerThreadEntry<GuardType>::
HazardPointerThreadEntry(GuardType undefined_guard, int guards_per_thread,
size_t max_size_retired_list) :
#ifdef EMBB_DEBUG
who_is_scanning(-1),
#endif
undefined_guard(undefined_guard),
guards_per_thread(guards_per_thread),
max_size_retired_list(max_size_retired_list),
// initially, each potential thread is active... if that is not the case
// another thread could call "HelpScan", and block this thread in making
// progress.
// Still, threads can be leave the hazard pointer processing (deactivation),
// but this can only be done once, i.e., this is not revertable...
is_active(1),
retired_list(max_size_retired_list),
retired_list_temp(max_size_retired_list),
hazard_pointer_list_temp(embb::base::Thread::GetThreadsMaxCount() *
guards_per_thread) {
// Initialize guarded pointer list
guarded_pointers = static_cast<embb::base::Atomic<GuardType>*>
threadLocalRetiredLists = static_cast<GuardType*>
(embb::base::Allocation::Allocate(
sizeof(embb::base::Atomic<GuardType>)*guards_per_thread));
sizeof(GuardType) *
guardsPerThread * accessorCount * accessorCount
));
for (unsigned int i = 0; i !=
guardsPerThread * accessorCount * accessorCount; ++i) {
//in-place new for each cell
new (&threadLocalRetiredLists[i]) GuardType;
}
for (int i = 0; i != guards_per_thread; ++i) {
new (static_cast<void*>(&guarded_pointers[i]))
embb::base::Atomic<GuardType>(undefined_guard);
// init guards and retired lists to the undefined guard
for (unsigned int i = 0; i != static_cast<unsigned int>(guardsPerThread);
++i) {
for (unsigned int i2 = 0; i2 != accessorCount; ++i2) {
guards[i*accessorCount + i2] = undefinedGuard;
}
}
}
template< typename GuardType >
HazardPointerThreadEntry<GuardType>::~HazardPointerThreadEntry() {
for (int i = 0; i != guards_per_thread; ++i) {
guarded_pointers[i].~Atomic();
for (unsigned int j = 0; j != accessorCount; ++j) {
for (unsigned int i = 0; i != guardsPerThread*accessorCount; ++i) {
threadLocalRetiredListsTemp
[j*(accessorCount*guardsPerThread) + i] =
undefinedGuard;
threadLocalRetiredLists
[j*(accessorCount*guardsPerThread) + i] =
undefinedGuard;
}
}
embb::base::Allocation::Free(guarded_pointers);
for (unsigned int i = 0; i != accessorCount; ++i) {
//in-place new for each cell
threadIdMapping[i] = -1;
}
}
template< typename GuardType >
GuardType HazardPointerThreadEntry<GuardType>::GetGuard(int pos) const {
return guarded_pointers[pos];
}
HazardPointer< GuardType >::~HazardPointer() {
template< typename GuardType >
void HazardPointerThreadEntry<GuardType>::AddRetired(GuardType pointerToGuard) {
retired_list.PushBack(pointerToGuard);
}
// Release references from all retired lists. Note that for this to work, the
// data structure using hazard pointer has still to be active... So first, the
// hazard pointer class shall be destructed, then the memory management class
// (e.g. some pool). Otherwise, the hazard pointer class would try to return
// memory to an already destructed memory manager.
for (unsigned int j = 0; j != accessorCount; ++j) {
for (unsigned int i = 0; i != accessorCount*guardsPerThread; ++i) {
GuardType pointerToFree =
threadLocalRetiredLists
[j * accessorCount * guardsPerThread + i];
if (pointerToFree == undefinedGuard) {
break;
}
freeGuardCallback(pointerToFree);
}
}
template< typename GuardType >
void HazardPointerThreadEntry<GuardType>::
GuardPointer(int guardNumber, GuardType pointerToGuard) {
guarded_pointers[guardNumber] = pointerToGuard;
}
for (unsigned int i = 0; i != accessorCount; ++i) {
threadIdMapping[i].~Atomic();
}
template< typename GuardType >
void HazardPointerThreadEntry<GuardType>::SetActive(bool active) {
is_active = active;
}
embb::base::Allocation::Free(threadIdMapping);
template< typename GuardType >
unsigned int HazardPointer< GuardType >::GetCurrentThreadIndex() {
unsigned int thread_index;
int return_val = embb_internal_thread_index(&thread_index);
for (unsigned int i = 0; i != guardsPerThread * accessorCount; ++i) {
guards[i].~Atomic();
}
embb::base::Allocation::Free(guards);
for (unsigned int i = 0; i !=
guardsPerThread * accessorCount * accessorCount; ++i) {
threadLocalRetiredListsTemp[i].~GuardType();
}
embb::base::Allocation::Free(threadLocalRetiredListsTemp);
if (return_val != EMBB_SUCCESS)
EMBB_THROW(embb::base::ErrorException, "Could not get thread id!");
for (unsigned int i = 0; i !=
guardsPerThread * accessorCount * accessorCount; ++i) {
threadLocalRetiredLists[i].~GuardType();
}
return thread_index;
embb::base::Allocation::Free(threadLocalRetiredLists);
}
template< typename GuardType >
bool HazardPointer< GuardType >::IsThresholdExceeded() {
double retiredCounterLocThread =
static_cast<double>(GetHazardPointerElementForCurrentThread().
GetRetiredCounter());
return (retiredCounterLocThread >=
RETIRE_THRESHOLD *
static_cast<double>(active_hazard_pointer)*
static_cast<double>(guards_per_thread));
void HazardPointer< GuardType >::Guard(int guardPosition,
GuardType guardedElement) {
const unsigned int myThreadId = GetCurrentThreadIndex();
// check invariants...
assert(guardPosition < guardsPerThread && myThreadId < accessorCount);
// set guard
guards[guardPosition*accessorCount + myThreadId] = guardedElement;
}
template< typename GuardType >
size_t HazardPointer< GuardType >::GetActiveHazardPointers() {
return active_hazard_pointer;
size_t HazardPointer< GuardType >::ComputeMaximumRetiredObjectCount(
size_t guardsPerThread, int accessors) {
unsigned int accessorCount = (accessors == -1 ?
embb::base::Thread::GetThreadsMaxCount() :
accessors);
return static_cast<size_t>(
guardsPerThread * accessorCount * accessorCount);
}
template< typename GuardType >
typename HazardPointer< GuardType >::HazardPointerThreadEntry_t &
HazardPointer< GuardType >::GetHazardPointerElementForCurrentThread() {
// For each thread, there is a slot in the hazard pointer array.
// Initially, the active flag of a hazard pointer entry is false.
// Only the respective thread changes the flag from true to false.
// This means that the current thread tells that he is about to
// stop operating, and the others are responsible for his retired
// list.
return hazard_pointer_thread_entry_array[GetCurrentThreadIndex()];
void HazardPointer< GuardType >::CopyRetiredList(GuardType* sourceList,
GuardType* targetList, unsigned int retiredListSize,
GuardType undefinedGuard) {
bool done = false;
for (unsigned int ii = 0; ii != retiredListSize; ++ii) {
if (!done) {
GuardType guardToCopy = sourceList[ii];
if (guardToCopy == undefinedGuard) {
done = true;
if (targetList[ii] == undefinedGuard) {
// end of target list
break;
}
}
targetList[ii] = guardToCopy;
}
else {
// we copied the whole source list, remaining values in the target
// have to be zeroed.
if (targetList[ii] == undefinedGuard) {
// end of target list
break;
}
else {
targetList[ii] = undefinedGuard;
}
}
}
}
template< typename GuardType >
void HazardPointer< GuardType >::HelpScan() {
// This is a little bit different than in the paper. In the paper,
// the retired nodes from other threads are added to our retired list.
// To be able to give a bound on memory consumption, we execute scan
// for those threads, without moving elements. The effect shall be
// the same.
void HazardPointer< GuardType >::UpdateRetiredList(GuardType* retiredList,
GuardType* updatedRetiredList, unsigned int retiredListSize,
GuardType guardedElement, GuardType consideredHazard,
GuardType undefinedGuard) {
for (size_t i = 0; i != hazard_pointers; ++i) {
// Try to find non active lists...
if (!hazard_pointer_thread_entry_array[i].IsActive() &&
hazard_pointer_thread_entry_array[i].TryReserve()) {
// Here: grab retired things, first check if there are any...
if (hazard_pointer_thread_entry_array[i].GetRetiredCounter() > 0) {
Scan(&hazard_pointer_thread_entry_array[i]);
}
// no hazard set here
if (consideredHazard == undefinedGuard)
return;
// We are done, mark it as deactivated again
hazard_pointer_thread_entry_array[i].Deactivate();
// if this hazard is currently in the union of
// threadLocalRetiredLists and pointerToRetire, but not yet in
// threadLocalRetiredListsTemp, add it to that list
bool containedInUnion = false;
// first iterate over our retired list
for (unsigned int ii = 0; ii != retiredListSize; ++ii) {
// when reaching 0, we can stop iterating (end of the "list")
if (retiredList[ii] == 0)
break;
// the hazard is contained in the retired list... it shall go
// into the temp list, if not already there
if (retiredList[ii] == consideredHazard) {
containedInUnion = true;
break;
}
}
}
template< typename GuardType >
void HazardPointer< GuardType >::
Scan(HazardPointerThreadEntry_t* currentHazardPointerEntry) {
#ifdef EMBB_DEBUG
// scan should only be executed by one thread at a time, otherwise we have
// a bug... this assertions checks that
int expected = -1;
if (!currentHazardPointerEntry->GetScanningThread().CompareAndSwap(
expected, static_cast<int>(GetCurrentThreadIndex()))) {
assert(false);
// the union also contains pointerToRetire
if (!containedInUnion) {
containedInUnion = (consideredHazard == guardedElement);
}
#endif
// In this function, we compute the intersection between local retired
// pointers and all hazard pointers. This intersection cannot be deleted and
// forms the new local retired pointers list.
// It is assumed that the union of all retired pointers contains no two
// pointers with the same value. However, the union of all hazard guards
// might.
// Here, we store the temporary hazard pointers. We have to store them,
// as iterating multiple time over them might be expensive, as this
// atomic array is shared between threads.
currentHazardPointerEntry->GetHazardTemp().clear();
// add the pointer to temp. retired list, if not already there
if (containedInUnion) {
for (unsigned int iii = 0; iii != retiredListSize; ++iii) {
// is it already there?
if (updatedRetiredList[iii] == consideredHazard)
break;
// Get all active hazard pointers!
for (unsigned int i = 0; i != hazard_pointers; ++i) {
// Only consider guards of active threads
if (hazard_pointer_thread_entry_array[i].IsActive()) {
// For each guard in an hazard pointer entry
for (int pos = 0; pos != guards_per_thread; ++pos) {
GuardType guard = hazard_pointer_thread_entry_array[i].GetGuard(pos);
// end of the list
if (updatedRetiredList[iii] == undefinedGuard) {
// UndefinedGuard means not guarded
if (guard == undefined_guard)
continue;
// add hazard
updatedRetiredList[iii] = consideredHazard;
currentHazardPointerEntry->GetHazardTemp().PushBack(guard);
// we are done here...
break;
}
}
}
}
currentHazardPointerEntry->GetRetiredTemp().clear();
template< typename GuardType >
void HazardPointer< GuardType >::EnqueueForDeletion(GuardType toRetire) {
// Sort them, we will do a binary search on each entry from the retired list
std::sort(
currentHazardPointerEntry->GetHazardTemp().begin(),
currentHazardPointerEntry->GetHazardTemp().end());
unsigned int myThreadId = GetCurrentThreadIndex();
for (
EMBB_CONTAINERS_CPP_DEPENDANT_TYPENAME FixedSizeList< GuardType >::iterator
it = currentHazardPointerEntry->GetRetired().begin();
it != currentHazardPointerEntry->GetRetired().end(); ++it) {
if (false == ::std::binary_search(
currentHazardPointerEntry->GetHazardTemp().begin(),
currentHazardPointerEntry->GetHazardTemp().end(), *it)) {
this->free_guard_callback(*it);
} else {
currentHazardPointerEntry->GetRetiredTemp().PushBack(*it);
}
}
currentHazardPointerEntry->SetRetired(
currentHazardPointerEntry->GetRetiredTemp());
// check for invariant
assert(myThreadId < accessorCount);
#ifdef EMBB_DEBUG
currentHazardPointerEntry->GetScanningThread().Store(-1);
#endif
}
unsigned int retiredListSize = accessorCount * guardsPerThread;
template< typename GuardType >
size_t HazardPointer< GuardType >::GetRetiredListMaxSize() const {
return static_cast<size_t>(RETIRE_THRESHOLD *
static_cast<double>(embb::base::Thread::GetThreadsMaxCount()) *
static_cast<double>(guards_per_thread)) + 1;
}
GuardType* retiredList =
&threadLocalRetiredLists[myThreadId * retiredListSize];
template< typename GuardType >
HazardPointer< GuardType >::HazardPointer(
embb::base::Function<void, GuardType> free_guard_callback,
GuardType undefined_guard, int guards_per_thread) :
undefined_guard(undefined_guard),
guards_per_thread(guards_per_thread),
//initially, all potential hazard pointers are active...
active_hazard_pointer(embb::base::Thread::GetThreadsMaxCount()),
free_guard_callback(free_guard_callback) {
hazard_pointers = embb::base::Thread::GetThreadsMaxCount();
hazard_pointer_thread_entry_array = static_cast<HazardPointerThreadEntry_t*>(
embb::base::Allocation::Allocate(sizeof(HazardPointerThreadEntry_t) *
hazard_pointers));
for (size_t i = 0; i != hazard_pointers; ++i) {
new (static_cast<void*>(&(hazard_pointer_thread_entry_array[i])))
HazardPointerThreadEntry_t(undefined_guard, guards_per_thread,
GetRetiredListMaxSize());
GuardType* retiredListTemp =
&threadLocalRetiredListsTemp[myThreadId * retiredListSize];
// wipe my temp. retired list...
for (unsigned int i = 0; i < retiredListSize; ++i) {
// the list is filled always from left to right, so occurring the first
// undefinedGuard, the remaining ones are also undefinedGuard...
if (retiredListTemp[i] == undefinedGuard)
break;
retiredListTemp[i] = undefinedGuard;
}
}
template< typename GuardType >
HazardPointer< GuardType >::~HazardPointer() {
for (size_t i = 0; i != hazard_pointers; ++i) {
hazard_pointer_thread_entry_array[i].~HazardPointerThreadEntry_t();
// we test each hazard if it is in the union of retiredList and
// guardedElement. If it is, it goes into the new retired list...
for (unsigned int i = 0; i != accessorCount*guardsPerThread; ++i) {
// consider each current active guard
GuardType consideredHazard = guards[i].Load();
UpdateRetiredList(retiredList, retiredListTemp, retiredListSize,
toRetire, consideredHazard, undefinedGuard);
}
embb::base::Allocation::Free(static_cast < void* >
(hazard_pointer_thread_entry_array));
}
// now we created a a new retired list... the elements that are "removed" from
// the old retired list can be safely deleted now...
for (int ii = -1; ii != static_cast<int>(retiredListSize); ++ii) {
// we iterate over the current retired list... -1 is used as dummy element
// in the iteration, to also iterate over the pointerToRetire, which is
// logically also part of the current retired list...
template< typename GuardType >
void HazardPointer< GuardType >::DeactivateCurrentThread() {
HazardPointerThreadEntry_t* current_thread_entry =
&hazard_pointer_thread_entry_array[GetCurrentThreadIndex()];
// end of the list, stop iterating
if (ii >= 0 && retiredList[ii] == undefinedGuard)
break;
// Deactivating a non-active hazard pointer entry has no effect!
if (!current_thread_entry->IsActive()) {
return;
} else {
current_thread_entry->SetActive(false);
active_hazard_pointer--;
}
}
GuardType toCheckIfInNewList = undefinedGuard;
template< typename GuardType >
void HazardPointer< GuardType >::GuardPointer(int guardPosition,
GuardType guardedElement) {
GetHazardPointerElementForCurrentThread().GuardPointer(
guardPosition, guardedElement);
}
toCheckIfInNewList = (ii == -1 ? toRetire : retiredList[ii]);
template< typename GuardType >
void HazardPointer< GuardType >::EnqueuePointerForDeletion(
GuardType guardedElement) {
GetHazardPointerElementForCurrentThread().AddRetired(guardedElement);
if (IsThresholdExceeded()) {
HazardPointerThreadEntry_t* currentHazardPointerEntry =
&GetHazardPointerElementForCurrentThread();
// still in the new retired list?
bool stillInList = false;
for (unsigned int iii = 0; iii != retiredListSize; ++iii) {
// end of list
if (retiredListTemp[iii] == undefinedGuard)
break;
Scan(currentHazardPointerEntry);
if (toCheckIfInNewList == retiredListTemp[iii]) {
// still in list, cannot delete!
stillInList = true;
break;
}
}
// Help deactivated threads to clean their retired nodes.
HelpScan();
if (!stillInList) {
this->freeGuardCallback(toCheckIfInNewList);
}
}
// copy the updated retired list (temp) to the retired list...
CopyRetiredList(retiredListTemp, retiredList, retiredListSize,
undefinedGuard);
}
template<typename GuardType>
const double embb::containers::internal::HazardPointer<GuardType>::
RETIRE_THRESHOLD = 1.25f;
} // namespace internal
} // namespace containers
} // namespace embb
......
......@@ -40,451 +40,217 @@
#define EMBB_CONTAINERS_CPP_DEPENDANT_TYPENAME typename
#endif
// forward declaration for white-box test, used in friend declaration of
// HazardPointer class.
namespace embb {
namespace containers{
namespace test {
class HazardPointerTest2;
}
}
}
namespace embb {
namespace containers {
namespace internal {
/**
* A list with fixed size, implemented as an array. Replaces std::vector that
* was used in previous hazard pointer implementation.
*
* Provides iterators, so we can apply algorithms from the STL.
*
* \tparam ElementT Type of the elements contained in the list.
*/
template< typename ElementT >
class FixedSizeList {
private:
/**
* Capacity of the list
*/
size_t max_size;
/**
* Size of the list
*/
size_t size;
/**
* Pointer to the array containing the list
*/
ElementT* elementsArray;
/**
* Copy constructor not implemented. Would require dynamic memory allocation.
*/
FixedSizeList(
const FixedSizeList &
/**< [IN] Other list */);
public:
/**
* Definition of an iterator
*/
typedef ElementT * iterator;
/**
* Definition of a const iterator
*/
typedef const ElementT * const_iterator;
/**
* Constructor, initializes list with given capacity
*/
FixedSizeList(
size_t max_size
/**< [IN] Capacity of the list */);
/**
* Gets the current size of the list
*
* \return Size of the list
*/
inline size_t GetSize() const;
/**
* Gets the capacity of the list
*
* \return The capacity of the list
*/
inline size_t GetMaxSize() const;
/**
* Removes all elements from the list without changing the capacity
*/
inline void clear();
/**
* Iterator pointing to the first element
*
* \return Begin iterator
*/
iterator begin() const;
/**
* Iterator pointing beyond the last element
*
* \return End iterator
*/
iterator end() const;
/**
* Copies the elements of another list to this list. The capacity of
* this list has to be greater than or equal to the size of the other list.
*/
FixedSizeList & operator=(
const FixedSizeList & other
/**< [IN] Other list */);
/**
* Appends an element to the end of the list
*
* \return \c false if the operation was not successful because the list is
* full, otherwise \c true.
*/
bool PushBack(
ElementT const el
/**< [IN] Element to append to the list */);
/**
* Destructs the list.
*/
~FixedSizeList();
};
/**
* Hazard pointer entry for a single thread. Holds the actual guards that
* determine if the current thread is about to use the guarded pointer.
* Guarded pointers are protected and not deleted.
*
* Moreover, the retired list for this thread is contained. It determines
* the pointers that have been allocated from this thread, but are not used
* anymore by this thread. However, another thread could have a guard on it,
* so the pointer cannot be deleted immediately.
*
* For the scan operation, the intersection of the guarded pointers from all
* threads and the retired list has to be computed. For this computation, we
* need thread local temporary lists which are also contained here.
*
* \tparam GuardType The type of guard, usually a pointer.
*/
template< typename GuardType >
class HazardPointerThreadEntry {
#ifdef EMBB_DEBUG
public:
embb::base::Atomic<int>& GetScanningThread() {
return who_is_scanning;
}
private:
embb::base::Atomic<int> who_is_scanning;
#endif
private:
/**
* Value of the undefined guard (means that no guard is set).
*/
GuardType undefined_guard;
/**
* The number of guards per thread. Determines the size of the guard array.
*/
int guards_per_thread;
/**
* The capacity of the retired list. It is determined by number of guards,
* retired threshold, and maximum number of threads.
*/
size_t max_size_retired_list;
/**
* Set to true if the current thread is active. Is used for a thread to
* signal that it is leaving. If a thread has left, the other threads are
* responsible for cleaning up its retired list.
*/
embb::base::Atomic< bool > is_active;
/**
* The guarded pointer of this thread, has size \c guard_per_thread.
*/
embb::base::Atomic< GuardType >* guarded_pointers;
/**
* The retired list of this thread, contains pointer that shall be released
* when no thread holds a guard on it anymore.
*/
FixedSizeList< GuardType > retired_list;
/**
* Temporary retired list, has same capacity as \c retired_list, It is used to
* compute the intersection of all guards and the \c retired list.
*/
FixedSizeList< GuardType > retired_list_temp;
/**
* Temporary guards list. Used to compute the intersection of all guards and
* the \c retired_list.
*/
FixedSizeList< GuardType > hazard_pointer_list_temp;
/**
* HazardPointerThreadEntry shall not be copied
*/
HazardPointerThreadEntry(const HazardPointerThreadEntry&);
/**
* HazardPointerThreadEntry shall not be assigned
*/
HazardPointerThreadEntry & operator= (const HazardPointerThreadEntry&);
public:
/**
* Checks if current thread is active (with respect to participating in hazard
* pointer management)
*
* \return \c true if the current thread is active, otherwise \c false.
*/
bool IsActive();
/**
* Tries to set the active flag to true (atomically). Used if the current
* thread is not active anymore as lock for another thread to help cleaning
* up hazard pointer.
*
* \return \c true if this thread was successful setting the active flag,
* otherwise \c false.
*/
bool TryReserve();
/**
* Deactivates current thread by atomically setting active flag to false.
*/
void Deactivate();
/**
* Gets the count of current retired pointer for the current thread.
*
* \return Count of current retired pointer
*/
size_t GetRetiredCounter();
/**
* Gets the retired list.
*
* \return Reference to \c retired_list
*/
FixedSizeList< GuardType >& GetRetired();
/**
* Gets the temporary retired list.
*
* \return Reference to \c retired_list_temp
*/
FixedSizeList< GuardType >& GetRetiredTemp();
/**
* Gets the temporary hazard pointer list.
*
* \return Reference to \c hazard_pointer_list_temp
*/
FixedSizeList< GuardType >& GetHazardTemp();
/**
* Sets the retired list.
*/
void SetRetired(
embb::containers::internal::FixedSizeList< GuardType > const & retired_list
/**< [IN] Retired list */);
/**
* Constructor
*/
HazardPointerThreadEntry(
GuardType undefined_guard,
/**< [IN] Value of the undefined guard (e.g. NULL) */
int guards_per_thread,
/**< [IN] Number of guards per thread */
size_t max_size_retired_list
/**< [IN] The capacity of the retired list(s) */);
/**
* Destructor
*
* Deallocate lists
*/
~HazardPointerThreadEntry();
/**
* Gets the guard at the specified position.
* Positions are numbered, beginning with 0.
*/
GuardType GetGuard(
int pos
/**< [IN] Position of the guard */) const;
/**
* Adds pointer to the retired list
*/
void AddRetired(
GuardType pointerToGuard
/**< [IN] Guard to retire */);
/**
* Guards pointer
*/
void GuardPointer(
int guardNumber,
/**< [IN] Position of guard */
GuardType pointerToGuard
/**<[IN] Pointer to guard */);
/**
* Sets the current thread active, i.e., announce that the thread
* participates in managing hazard pointer.
*/
void SetActive(
bool active
/**<[IN] \c true for active, \c false for inactive */);
};
/**
* HazardPointer implementation as presented in:
* This class contains a hazard pointer implementation following publication:
*
* Maged M. Michael. "Hazard pointers: Safe memory reclamation for lock-free
* objects." IEEE Transactions on Parallel and Distributed Systems, 15.6 (2004)
* : 491-504.
*
* In contrast to the original implementation, our implementation only uses
* fixed-size memory. There is a safe upper limit, hazard pointer are guaranteed
* to not consume more memory. Memory is allocated solely at initialization.
*
* Hazard pointers solve the ABA problem for lock-free algorithms. Before
* accessing a pointer, threads announce that they want to access this pointer
* and then check if the pointer is still valid. This announcement is done by
* placing a guard. It is guaranteed that the pointer is not reused until all
* threads remove their guards to this pointer. Objects, these pointers are
* pointing to, can therefore not be deleted directly. Instead, these pointers
* are put into a list for later deletion (retired list). Regularly, this list
* is processed to check which pointers can be deleted. If a pointer can be
* deleted, a callback function provided by the user is called. The user can
* then, e.g., free the respective object, so that the pointer can be safely
* reused.
* Hazard pointer are a wait-free memory reclamation scheme for lock-free
* algorithms. Loosely speaking, they act as garbage collector. The release of
* objects contained within the memory, managed by the hazard pointer class, is
* intercepted and possibly delayed to avoid concurrency bugs.
*
* Before accessing an object, threads announce their intention to do so (i.e.
* the intention to dereference the respective pointer) to the hazard pointer
* class. This is called guarding. From now on, the hazard pointer class will
* prohibit the release or reuse of the guarded object. This is necessary, to
* assure that the object is not released or reused while it is accessed and to
* assure that it has not unnoticed changed (effectively avoiding the ABA
* problem).
*
* Note that after guarding an object, a consecutive check that the object (i.e.
* its pointer) is still valid is necessary; the object release could already
* have been started when guarding the object. Guarding is repeated, until this
* check eventually succeeds. Note that this "guard-and-check" loop makes the
* usage of the hazard pointer class lock-free, even though its implementation
* is wait-free.
*
* Internally, guarding is realized by providing each thread slots, where
* pointers can be placed that should not be freed (so called guards). When
* trying to release an object, it is checked if the object's pointer is
* guarded, and if so this object is not released, but instead put into a
* retired list for later release, when all guards for this object have been
* removed.
*
* In contrast to the original implementation, our implementation consumes only
* fixed-size memory. Note that the number of threads accessing the hazard
* pointer object accounts quadratic for the memory consumption: managed objects
* are provided from outside and the number of accessors accounts quadric for
* the minimum count of those objects.
*
* Also in contrast to the original implementation, we do not provide a HelpScan
* functionality, which gives threads the possibility, to not participate in the
* garbage collection anymore: other threads will help to clean-up the objects
* protected by the exiting thread. The reason is, that the only use-case would
* be a crashing thread, not participating anymore. However, as the thread has
* to signal its exit himself, this is not possible to realize anyways. In the
* end, it is still guaranteed that all memory is properly returned (in the
* destructor).
*
* Additionally, the original implementation holds a threshold, which determines
* when objects shall be freed. In this implementation, we free whenever it is
* possibly to do so, as we want to keep the memory footprint as low as
* possible. We also don't see a performance drop in the current algorithms that
* are using hazard pointer, when not using a threshold.
*
* \tparam GuardType the type of the guards. Usually the pointer type of some
* object to protect.
*/
template< typename GuardType >
class HazardPointer {
private:
/**
* Concrete hazard pointer entry type
*/
typedef HazardPointerThreadEntry < GuardType >
HazardPointerThreadEntry_t;
/**
* The guard value denoting "not guarding"
* HazardPointerTest2 is a white-box test, needing access to private members
* of this class. So declaring it as friend.
*/
GuardType undefined_guard;
friend class embb::containers::test::HazardPointerTest2;
/**
* The capacity of the retired list (safe upper bound for retired list size)
* The hazard pointer guards, represented as array. Each thread has a fixed
* set of slots (guardsPerThread) within this array.
*/
int retired_list_max_size;
embb::base::Atomic<GuardType>* guards;
/**
* Guards that can be set per thread
* \see threadLocalRetiredLists documentation
*/
int guards_per_thread;
GuardType* threadLocalRetiredListsTemp;
/**
* Array of HazardPointerElements. Each thread is assigned to one.
* A lists of lists, represented as single array. Each thread maintains a
* list of retired pointers, that are objects that are logically released
* but not released because some thread placed a guard on it.
*/
HazardPointerThreadEntry_t* hazard_pointer_thread_entry_array;
GuardType* threadLocalRetiredLists;
/**
* The threshold, determines at which size of the retired list pointers
* are tried to be deleted.
* This number determines the amount of maximal accessors (threads) that
* will access this hazard pointer instance. Note that a thread once
* accessing this object will be permanently count as accessor, even if not
* participating anymore. If too many threads access this object, an
* assertion is thrown.
*/
static const double RETIRE_THRESHOLD;
unsigned int accessorCount;
/**
* Each thread is assigned a thread index (starting with 0).
* Get the index of the current thread.
* The guard value denoting "not guarded"
*/
static unsigned int GetCurrentThreadIndex();
GuardType undefinedGuard;
/**
* The number of hazard pointers currently active.
* The count of guards that can be set per thread.
*/
size_t active_hazard_pointer;
int guardsPerThread;
/**
* Count of all hazard pointers.
*/
size_t hazard_pointers;
/**
* The callback that is triggered when a retired guard can be
* freed. Usually, the user will call a free here.
*/
embb::base::Function<void, GuardType> free_guard_callback;
/**
* Checks if the current size of the retired list exceeds the threshold, so
* that each retired guard is checked for being not hazardous anymore.
*
* \return \c true is threshold is exceeded, otherwise \c false.
* The functor that is called to release an object. This is called by this
* class, when it is safe to do so, i.e., no thread accesses this object
* anymore.
*/
bool IsThresholdExceeded();
embb::base::Function<void, GuardType> freeGuardCallback;
/**
* Gets the number of hazard pointe, currently active
*
* \return Number of active hazard pointers
* Mapping from EMBB thread id to internal thread ids Internal thread ids
* are in range [0;accesor_count-1]. The position of a EMBB thread id in
* that array determines the respective internal thread id.
*/
size_t GetActiveHazardPointers();
embb::base::Atomic<int>* threadIdMapping;
/**
* Gets the hazard pointer entry for the current thread
* Each thread is assigned a thread index (starting with 0). Get the index of
* the current thread. Note that this is not the global index, but an internal
* one. The user is free to define less accessors than the amount of default
* threads. This is useful, as the number of accessors accounts quadratic for
* the memory consumption, so the user should have the possibility to avoid
* memory wastage, when only having a small, fixed size, number of accessors.
*
* \return Hazard pointer entry for current thread
* @return current thread index
*/
HazardPointerThreadEntry_t&
GetHazardPointerElementForCurrentThread();
unsigned int GetCurrentThreadIndex();
/**
* Threads might leave from participating in hazard pointer management.
* This method helps all those threads processing their retired list.
* Copy retired list \c sourceList to retired list \c targetList
*/
void HelpScan();
static void CopyRetiredList(GuardType* sourceList,
/**<[IN] the source retired list*/
GuardType* targetList,
/**<[IN] the target retired list*/
unsigned int singleRetiredListSize,
/**<[IN] the size of a thread local retired list*/
GuardType undefinedGuard
/**<[IN] the undefined guard (usually the NULL pointer)*/
);
/**
* Checks the retired list of a hazard pointer entry for elements of the
* retired list that can be freed, and executes the delete callback for those
* elements.
*/
void Scan(
HazardPointerThreadEntry_t* currentHazardPointerEntry
/**<[IN] Hazard pointer entry that should be checked for elements that
can be deleted*/);
static void UpdateRetiredList(
GuardType* retiredList,
/**<[IN] the old retired list*/
GuardType* updatedRetiredList,
/**<[IN] the updated retired list*/
unsigned int retiredListSize,
/**<[IN] the size of a thread local retired list*/
GuardType toRetire,
/**<[IN] the element to retire*/
GuardType consideredHazard,
/**<[IN] the currently considered hazard*/
GuardType undefinedGuard
/**<[IN] the undefined guard (usually the NULL pointer)*/
);
public:
/**
* Gets the capacity of one retired list
* The user of the hazard pointer class has to provide the memory that is
* managed here. The user has to take into account, that releasing of memory
* might be delayed. He has therefore to provide more memory than he wants to
* guarantee at each point in time. More specific, on top of the guaranteed
* count of objects, he has to provide the additional count of objects that
* can be (worst-case) contained in the retired lists and therefore are not
* released yet. The size of all retired lists is guardsPerThread *
* accessorCount * accessorCount, which is computed using this function. So
* the result of function denotes to the user, how many objects he has to
* allocate additionally to the guaranteed count.
*
* \waitfree
*/
size_t GetRetiredListMaxSize() const;
static size_t ComputeMaximumRetiredObjectCount(
size_t guardsPerThread,
/**<[IN] the count of guards per thread*/
int accessors = -1
/**<[IN] Number of accessors. Determines, how many threads will access
the hazard pointer object. Default value -1 will allow the
maximum amount of threads as defined with
\c embb::base::Thread::GetThreadsMaxCount()*/
);
/**
* Initializes hazard pointer
* Initializes the hazard pointer object
*
* \notthreadsafe
*
* \memory
* - Let \c t be the number of maximal threads determined by EMBB
* - Let \c g be the number of guards per thread
* - Let \c x be 1.25*t*g + 1
* \memory We dynamically allocate the following:
*
* (sizeof(Atomic<int>) * accessorCount) + (sizeof(Atomic<GuardType>) *
* guards_per_thread * accessorCount) + (2*sizeof(GuardType) *
* guards_per_thread * accessorCount^2)
*
* We dynamically allocate \c x*(3*t+1) elements of size \c sizeof(void*).
* The last addend is the dominant one, as accessorCount accounts
* quadratically for it.
*/
HazardPointer(
embb::base::Function<void, GuardType> free_guard_callback,
......@@ -492,35 +258,48 @@ class HazardPointer {
guard can be deleted */
GuardType undefined_guard,
/**<[IN] The guard value denoting "not guarded"*/
int guards_per_thread
/**<[IN] Number of guards per thread*/);
int guards_per_thread,
/**<[IN] Number of guards per thread*/
int accessors = -1
/**<[IN] Number of accessors. Determines, how many threads will access
this hazard pointer object. Default value -1 will allow the
maximum amount of threads as defined with
\c embb::base::Thread::GetThreadsMaxCount()*/
);
/**
* Deallocates lists for hazard pointer management. Note that no objects
* currently in the retired lists are deleted. This is the responsibility
* of the user. Usually, HazardPointer manages pointers of an object pool.
* After destructing HazardPointer, the object pool is deleted, so that
* everything is properly cleaned up.
* Deallocates internal data structures. Additionally releases all objects
* currently held in the retired lists, using the release functor passed in
* the constructor.
*
* \notthreadsafe
*/
~HazardPointer();
/**
* Announces that the current thread stops participating in hazard pointer
* management. The other threads now take care of his retired list.
* Guards \c toGuard. If the guardedElement is passed to \c EnqueueForDeletion
* it is prevented from release from now on. The user must have a check, that
* EnqueueForDeletion has not been called on toGuard, before the guarding took
* effect.
*
* \waitfree
*/
void DeactivateCurrentThread();
void Guard(int guardPosition, GuardType toGuard);
/**
* Guards \c guardedElement with the guard at position \c guardPosition
* Enqueue a pointer for deletion. If not guarded, it is deleted immediately.
* If it is guarded, it is added to a thread local retired list, and deleted
* in a subsequent call to \c EnqueueForDeletion, when no guard is placed on
* it anymore.
*/
void GuardPointer(int guardPosition, GuardType guardedElement);
void EnqueueForDeletion(GuardType guardedElement);
/**
* Enqueue a pointer for deletion. It is added to the retired list and
* deleted when no thread accesses it anymore.
* Explicitly remove guard from thread local slot.
*
* \waitfree
*/
void EnqueuePointerForDeletion(GuardType guardedElement);
void RemoveGuard(int guardPosition);
};
} // namespace internal
} // namespace containers
......
......@@ -77,7 +77,12 @@ LockFreeMPMCQueue<Type, ValuePool>::~LockFreeMPMCQueue() {
template< typename Type, typename ValuePool >
LockFreeMPMCQueue<Type, ValuePool>::LockFreeMPMCQueue(size_t capacity) :
capacity(capacity),
capacity(capacity),
// Object pool, size with respect to the maximum number of retired nodes not
// eligible for reuse. +1 for dummy node.
objectPool(
MPMCQueueNodeHazardPointer_t::ComputeMaximumRetiredObjectCount(2) +
capacity + 1),
// Disable "this is used in base member initializer" warning.
// We explicitly want this.
#ifdef EMBB_PLATFORM_COMPILER_MSVC
......@@ -89,13 +94,7 @@ delete_pointer_callback(*this,
#ifdef EMBB_PLATFORM_COMPILER_MSVC
#pragma warning(pop)
#endif
hazardPointer(delete_pointer_callback, NULL, 2),
// Object pool, size with respect to the maximum number of retired nodes not
// eligible for reuse. +1 for dummy node.
objectPool(
hazardPointer.GetRetiredListMaxSize()*
embb::base::Thread::GetThreadsMaxCount() +
capacity + 1) {
hazardPointer(delete_pointer_callback, NULL, 2) {
// Allocate dummy node to reduce the number of special cases to consider.
internal::LockFreeMPMCQueueNode<Type>* dummyNode = objectPool.Allocate();
// Initially, head and tail point to the dummy node.
......@@ -120,7 +119,7 @@ bool LockFreeMPMCQueue<Type, ValuePool>::TryEnqueue(Type const& element) {
for (;;) {
my_tail = tail;
hazardPointer.GuardPointer(0, my_tail);
hazardPointer.Guard(0, my_tail);
// Check if pointer is still valid after guarding.
if (my_tail != tail) {
......@@ -163,12 +162,12 @@ bool LockFreeMPMCQueue<Type, ValuePool>::TryDequeue(Type & element) {
Type data;
for (;;) {
my_head = head;
hazardPointer.GuardPointer(0, my_head);
hazardPointer.Guard(0, my_head);
if (my_head != head) continue;
my_tail = tail;
my_next = my_head->GetNext();
hazardPointer.GuardPointer(1, my_next);
hazardPointer.Guard(1, my_next);
if (head != my_head) continue;
if (my_next == NULL)
......@@ -187,7 +186,7 @@ bool LockFreeMPMCQueue<Type, ValuePool>::TryDequeue(Type & element) {
break;
}
hazardPointer.EnqueuePointerForDeletion(my_head);
hazardPointer.EnqueueForDeletion(my_head);
element = data;
return true;
}
......
......@@ -81,13 +81,13 @@ capacity(capacity),
#ifdef EMBB_PLATFORM_COMPILER_MSVC
#pragma warning(pop)
#endif
hazardPointer(delete_pointer_callback, NULL, 1),
// Object pool, size with respect to the maximum number of retired nodes not
// eligible for reuse:
objectPool(
hazardPointer.GetRetiredListMaxSize()*
embb::base::Thread::GetThreadsMaxCount() +
capacity) {
StackNodeHazardPointer_t::ComputeMaximumRetiredObjectCount(1) +
capacity),
hazardPointer(delete_pointer_callback, NULL, 1)
{
}
template< typename Type, typename ValuePool >
......@@ -128,7 +128,7 @@ bool LockFreeStack< Type, ValuePool >::TryPop(Type & element) {
return false;
// Guard top_cached
hazardPointer.GuardPointer(0, top_cached);
hazardPointer.Guard(0, top_cached);
// Check if top is still top. If this is the case, it has not been
// retired yet (because before retiring that thing, the retiring thread
......@@ -144,16 +144,16 @@ bool LockFreeStack< Type, ValuePool >::TryPop(Type & element) {
break;
} else {
// We continue with the next and can unguard top_cached
hazardPointer.GuardPointer(0, NULL);
hazardPointer.Guard(0, NULL);
}
}
Type data = top_cached->GetElement();
// We don't need to read from this reference anymore, unguard it
hazardPointer.GuardPointer(0, NULL);
hazardPointer.Guard(0, NULL);
hazardPointer.EnqueuePointerForDeletion(top_cached);
hazardPointer.EnqueueForDeletion(top_cached);
element = data;
return true;
......
......@@ -113,8 +113,17 @@ class LockFreeMPMCQueue {
* least as many elements, maybe more.
*/
size_t capacity;
// Do not change the ordering of class local variables.
// Important for initialization.
/**
* The object pool, used for lock-free memory allocation.
*
* Warning: the objectPool has to be initialized before the hazardPointer
* object, to be sure that the hazardPointer object is destructed before the
* Pool as the hazardPointer object might return elements to the pool in its
* destructor. So the ordering of the members objectPool and hazardPointer is
* important here!
*/
ObjectPool< internal::LockFreeMPMCQueueNode<Type>, ValuePool > objectPool;
/**
* Callback to the method that is called by hazard pointers if a pointer is
......@@ -124,15 +133,17 @@ class LockFreeMPMCQueue {
delete_pointer_callback;
/**
* The hazard pointer object, used for memory management.
* Definition of the used hazard pointer type
*/
embb::containers::internal::HazardPointer
< internal::LockFreeMPMCQueueNode<Type>* > hazardPointer;
typedef embb::containers::internal::HazardPointer
< internal::LockFreeMPMCQueueNode<Type>* >
MPMCQueueNodeHazardPointer_t;
/**
* The object pool, used for lock-free memory allocation.
* The hazard pointer object, used for memory management.
*/
ObjectPool< internal::LockFreeMPMCQueueNode<Type>, ValuePool > objectPool;
MPMCQueueNodeHazardPointer_t hazardPointer;
/**
* Atomic pointer to the head node of the queue
......
......@@ -187,11 +187,6 @@ class LockFreeStack {
delete_pointer_callback;
/**
* The hazard pointer object, used for memory management.
*/
internal::HazardPointer<internal::LockFreeStackNode<Type>*> hazardPointer;
/**
* The callback function, used to cleanup non-hazardous pointers.
* \see delete_pointer_callback
*/
......@@ -199,10 +194,27 @@ class LockFreeStack {
/**
* The object pool, used for lock-free memory allocation.
*
* Warning: the objectPool has to be initialized before the hazardPointer
* object, to be sure that the hazardPointer object is destructed before the
* Pool as the hazardPointer object might return elements to the pool in its
* destructor. So the ordering of the members objectPool and hazardPointer is
* important here!
*/
ObjectPool< internal::LockFreeStackNode<Type>, ValuePool > objectPool;
/**
* Definition of the used hazard pointer type
*/
typedef internal::HazardPointer < internal::LockFreeStackNode<Type>* >
StackNodeHazardPointer_t;
/**
* The hazard pointer object, used for memory management.
*/
StackNodeHazardPointer_t hazardPointer;
/**
* Atomic pointer to the top node of the stack (element that is popped next)
*/
embb::base::Atomic<internal::LockFreeStackNode<Type>*> top;
......
......@@ -31,24 +31,73 @@
namespace embb {
namespace containers {
namespace test {
IntObjectTestPool::IntObjectTestPool(unsigned int poolSize) :
poolSize(poolSize)
{
simplePoolObjects = static_cast<int*>(
embb::base::Allocation::Allocate(sizeof(int)*poolSize));
simplePool = static_cast<embb::base::Atomic<int>*> (
embb::base::Allocation::Allocate(sizeof(embb::base::Atomic<int>)*
poolSize));
for (unsigned int i = 0; i != poolSize; ++i) {
//in-place new for each array cell
new (&simplePool[i]) embb::base::Atomic<int>;
}
for (unsigned int i = 0; i != poolSize; ++i) {
simplePool[i] = FREE_MARKER;
simplePoolObjects[i] = 0;
}
}
IntObjectTestPool::~IntObjectTestPool() {
embb::base::Allocation::Free(simplePoolObjects);
for (unsigned int i = 0; i != poolSize; ++i) {
//in-place new for each array cell
simplePool[i].~Atomic();
}
embb::base::Allocation::Free(simplePool);
}
int* IntObjectTestPool::Allocate() {
for (unsigned int i = 0; i != poolSize; ++i) {
int expected = FREE_MARKER;
if (simplePool[i].CompareAndSwap
(expected, ALLOCATED_MARKER)) {
return &simplePoolObjects[i];
}
}
return 0;
}
void IntObjectTestPool::Release(int* objectPointer) {
int cell = objectPointer - simplePoolObjects;
simplePool[cell].Store(FREE_MARKER);
}
HazardPointerTest::HazardPointerTest() :
#ifdef EMBB_PLATFORM_COMPILER_MSVC
#pragma warning(push)
#pragma warning(disable:4355)
#endif
delete_pointer_callback(*this, &HazardPointerTest::DeletePointerCallback),
deletePointerCallback(*this, &HazardPointerTest::DeletePointerCallback),
#ifdef EMBB_PLATFORM_COMPILER_MSVC
#pragma warning(pop)
#endif
object_pool(NULL),
objectPool(NULL),
stack(NULL),
hp(NULL),
n_threads(static_cast<int>
hazardPointer(NULL),
nThreads(static_cast<int>
(partest::TestSuite::GetDefaultNumThreads())) {
n_elements_per_thread = 100;
n_elements = n_threads*n_elements_per_thread;
nElementsPerThread = 100;
nElements = nThreads*nElementsPerThread;
embb::base::Function < void, embb::base::Atomic<int>* >
delete_pointer_callback(
deletePointerCallback(
*this,
&HazardPointerTest::DeletePointerCallback);
......@@ -59,39 +108,46 @@ n_threads(static_cast<int>
// placed, the pointer is not allowed to be deleted until the second thread
// removes this guard.
CreateUnit("HazardPointerTestThatGuardWorks").
Pre(&HazardPointerTest::HazardPointerTest1_Pre, this).
Pre(&HazardPointerTest::HazardPointerTest1Pre, this).
Add(
&HazardPointerTest::HazardPointerTest1_ThreadMethod,
this, static_cast<size_t>(n_threads)).
Post(&HazardPointerTest::HazardPointerTest1_Post, this);
&HazardPointerTest::HazardPointerTest1ThreadMethod,
this, static_cast<size_t>(nThreads)).
Post(&HazardPointerTest::HazardPointerTest1Post, this);
}
void HazardPointerTest::HazardPointerTest1_Pre() {
void HazardPointerTest::HazardPointerTest1Pre() {
embb_internal_thread_index_reset();
object_pool = new embb::containers::ObjectPool< embb::base::Atomic<int> >
(static_cast<size_t>(n_elements));
stack = new embb::containers::LockFreeStack< embb::base::Atomic<int>* >
(static_cast<size_t>(n_elements));
hp = new embb::containers::internal::HazardPointer< embb::base::Atomic<int>*>
(delete_pointer_callback,
NULL,
objectPool =
embb::base::Allocation::
New<embb::containers::ObjectPool< embb::base::Atomic<int> > >
(static_cast<size_t>(nElements));
stack = embb::base::Allocation::
New<embb::containers::LockFreeStack< embb::base::Atomic<int>* > >
(static_cast<size_t>(nElements));
hazardPointer = embb::base::Allocation::
New<embb::containers::internal::HazardPointer < embb::base::Atomic<int>* > >
(deletePointerCallback,
static_cast<embb::base::Atomic<int>*>(NULL),
1);
}
void HazardPointerTest::HazardPointerTest1_Post() {
delete object_pool;
delete stack;
delete hp;
void HazardPointerTest::HazardPointerTest1Post() {
embb::base::Allocation::Delete(hazardPointer);
embb::base::Allocation::Delete(objectPool);
embb::base::Allocation::Delete(stack);
}
void HazardPointerTest::HazardPointerTest1_ThreadMethod() {
void HazardPointerTest::HazardPointerTest1ThreadMethod() {
unsigned int thread_index;
embb_internal_thread_index(&thread_index);
for (int i = 0; i != n_elements_per_thread; ++i) {
embb::base::Atomic<int>* allocated_object = object_pool->Allocate(0);
for (int i = 0; i != nElementsPerThread; ++i) {
embb::base::Atomic<int>* allocated_object = objectPool->Allocate(0);
hp->GuardPointer(0, allocated_object);
hazardPointer->Guard(0, allocated_object);
bool success = stack->TryPush(allocated_object);
......@@ -120,36 +176,360 @@ void HazardPointerTest::HazardPointerTest1_ThreadMethod() {
}
PT_ASSERT(success_pop == true);
allocated_object->Store(1);
hp->EnqueuePointerForDeletion(allocated_object);
hazardPointer->EnqueueForDeletion(allocated_object);
if (!same) {
hp->GuardPointer(0, allocated_object_from_different_thread);
hazardPointer->Guard(0, allocated_object_from_different_thread);
// if this holds, we were successful in guarding... otherwise we
// were to late, because the pointer has already been added
// to the retired list.
if (*allocated_object_from_different_thread == 0) {
// the pointer must not be deleted here!
vector_mutex.Lock();
vectorMutex.Lock();
for (std::vector< embb::base::Atomic<int>* >::iterator
it = deleted_vector.begin();
it != deleted_vector.end();
it = deletedVector.begin();
it != deletedVector.end();
++it) {
PT_ASSERT(*it != allocated_object_from_different_thread);
}
vector_mutex.Unlock();
vectorMutex.Unlock();
}
hp->GuardPointer(0, NULL);
hazardPointer->Guard(0, NULL);
}
}
}
void HazardPointerTest::DeletePointerCallback
(embb::base::Atomic<int>* to_delete) {
vector_mutex.Lock();
deleted_vector.push_back(to_delete);
vector_mutex.Unlock();
vectorMutex.Lock();
deletedVector.push_back(to_delete);
vectorMutex.Unlock();
}
void HazardPointerTest2::DeletePointerCallback(int* toDelete) {
testPool->Release(toDelete);
}
bool HazardPointerTest2::SetRelativeGuards() {
unsigned int threadIndex;
embb_internal_thread_index(&threadIndex);
unsigned int my_begin = guardsPerThreadCount*threadIndex;
int guardNumber = 0;
unsigned int alreadyGuarded = 0;
for (unsigned int i = my_begin; i != my_begin + guardsPerThreadCount; ++i){
if (sharedGuarded[i] != 0) {
alreadyGuarded++;
guardNumber++;
continue;
}
int * toGuard = sharedAllocated[i];
if (toGuard) {
hazardPointer->Guard(guardNumber, toGuard);
// changed in the meantime?
if (toGuard == sharedAllocated[i].Load()) {
// guard was successful. Communicate to other threads.
sharedGuarded[i] = toGuard;
}
else {
// reset the guard, couldn't guard...
hazardPointer->RemoveGuard(guardNumber);
}
}
guardNumber++;
}
return(alreadyGuarded == guardsPerThreadCount);
}
void HazardPointerTest2::HazardPointerTest2Master() {
// while the hazard pointer guard array is not full
int** allocatedLocal = static_cast<int**>(
embb::base::Allocation::Allocate(sizeof(int*)*guaranteedCapacityPool));
bool full = false;
while (!full) {
full = true;
for (unsigned int i = 0; i != guaranteedCapacityPool; ++i) {
if (sharedGuarded[i] == 0) {
full = false;
break;
}
}
// not all guards set
for (unsigned int i = 0; i != guaranteedCapacityPool; ++i) {
allocatedLocal[i] = testPool->Allocate();
sharedAllocated[i].Store(allocatedLocal[i]);
}
// set my hazards. We do not have to check, this must be successful
// here.
SetRelativeGuards();
// free
for (unsigned int i = 0; i != guaranteedCapacityPool; ++i) {
sharedAllocated[i].Store(0);
hazardPointer->EnqueueForDeletion(allocatedLocal[i]);
}
}
embb::base::Allocation::Free(allocatedLocal);
}
void HazardPointerTest2::HazardPointerTest2Slave() {
unsigned int thread_index;
embb_internal_thread_index(&thread_index);
while (!SetRelativeGuards()) {};
}
void HazardPointerTest2::HazardPointerTest2Pre() {
embb_internal_thread_index_reset();
currentMaster = 0;
sync1 = 0;
sync2 = 0;
// first the test pool has to be created
testPool = embb::base::Allocation::New<IntObjectTestPool>(poolSizeUsingHazardPointer);
// after the pool has been created, we create the hp class
hazardPointer = embb::base::Allocation::New <
embb::containers::internal::HazardPointer<int*> >
(deletePointerCallback, static_cast<int*>(NULL),
static_cast<int>(guardsPerThreadCount), nThreads);
sharedGuarded = static_cast<embb::base::Atomic<int*>*>(
embb::base::Allocation::Allocate(sizeof(embb::base::Atomic<int*>)*
guaranteedCapacityPool)
);
for (unsigned int i = 0; i !=
guaranteedCapacityPool; ++i) {
//in-place new for each array cell
new (&sharedGuarded[i]) embb::base::Atomic < int* > ;
}
sharedAllocated = static_cast<embb::base::Atomic<int*>*>(
embb::base::Allocation::Allocate(sizeof(embb::base::Atomic<int*>)*
guaranteedCapacityPool)
);
for (unsigned int i = 0; i !=
guaranteedCapacityPool; ++i) {
//in-place new for each array cell
new (&sharedAllocated[i]) embb::base::Atomic < int* > ;
}
for (unsigned int i = 0; i != guaranteedCapacityPool; ++i) {
sharedGuarded[i] = 0;
sharedAllocated[i] = 0;
}
}
void HazardPointerTest2::HazardPointerTest2Post() {
for (unsigned int i = 0; i != static_cast<unsigned int>(nThreads); ++i) {
for (unsigned int i2 = 0; i2 != static_cast<unsigned int>(nThreads)*
guardsPerThreadCount; ++i2) {
if (hazardPointer->threadLocalRetiredLists
[i2 + i*nThreads*guardsPerThreadCount] == NULL) {
// all retired lists must be completely filled
PT_ASSERT(false);
}
}
}
unsigned int checks = 0;
for (unsigned int i = 0; i != static_cast<unsigned int>(nThreads); ++i) {
for (unsigned int i2 = 0; i2 != static_cast<unsigned int>(nThreads)*
guardsPerThreadCount; ++i2) {
for (unsigned int j = 0; j != static_cast<unsigned int>(nThreads); ++j) {
for (unsigned int j2 = 0; j2 != static_cast<unsigned int>(nThreads)*
guardsPerThreadCount; ++j2) {
if (i2 == j2 && i == j)
continue;
// all retired elements have to be disjoint
PT_ASSERT(
hazardPointer->threadLocalRetiredLists
[i2 + i*nThreads*guardsPerThreadCount] !=
hazardPointer->threadLocalRetiredLists
[j2 + j*nThreads*guardsPerThreadCount]
);
checks++;
}
}
}
}
// sanity check on the count of expected comparisons.
PT_ASSERT(
checks ==
nThreads*nThreads*guardsPerThreadCount *
(nThreads*nThreads*guardsPerThreadCount - 1)
);
std::vector< int* > additionallyAllocated;
// we should be able to still allocate the guaranteed capacity of
// elements from the pool.
for (unsigned int i = 0; i != guaranteedCapacityPool; ++i) {
int* allocated = testPool->Allocate();
// allocated is not allowed to be zero
PT_ASSERT(allocated != NULL);
// push to vector, to check if elements are disjunctive and to release
// afterwards.
additionallyAllocated.push_back(allocated);
}
// the pool should now be empty
PT_ASSERT(testPool->Allocate() == NULL);
// release allocated elements...
for (unsigned int i = 0; i != additionallyAllocated.size(); ++i) {
testPool->Release(additionallyAllocated[i]);
}
// the additionallyAllocated elements shall be disjoint
for (unsigned int i = 0; i != additionallyAllocated.size(); ++i) {
for (unsigned int i2 = 0; i2 != additionallyAllocated.size(); ++i2) {
if (i == i2)
continue;
PT_ASSERT(additionallyAllocated[i] !=
additionallyAllocated[i2]);
}
}
// no allocated element should be in any retired list...
for (unsigned int a = 0; a != additionallyAllocated.size(); ++a) {
for (unsigned int i = 0; i != static_cast<unsigned int>(nThreads); ++i) {
for (unsigned int i2 = 0; i2 != static_cast<unsigned int>(nThreads)*
guardsPerThreadCount; ++i2) {
PT_ASSERT(
hazardPointer->threadLocalRetiredLists
[i2 + i*nThreads*guardsPerThreadCount] !=
additionallyAllocated[a]
);
}
}
}
for (unsigned int i = 0; i != guaranteedCapacityPool; ++i) {
//in-place new for each array cell
sharedGuarded[i].~Atomic();
}
embb::base::Allocation::Free(sharedGuarded);
for (unsigned int i = 0; i != guaranteedCapacityPool; ++i) {
//in-place new for each array cell
sharedAllocated[i].~Atomic();
}
embb::base::Allocation::Free(sharedAllocated);
embb::base::Allocation::Delete(hazardPointer);
// after deleting the hazard pointer object, all retired pointers have
// to be returned to the pool!
std::vector<int*> elementsInPool;
int* nextElement;
while ((nextElement = testPool->Allocate()) != NULL) {
for (unsigned int i = 0; i != elementsInPool.size(); ++i) {
// all elements need to be disjoint
PT_ASSERT(elementsInPool[i] != nextElement);
}
elementsInPool.push_back(nextElement);
}
// all elements should have been returned by the hp object, so we should be
// able to acquire all elements.
PT_ASSERT(elementsInPool.size() == poolSizeUsingHazardPointer);
embb::base::Allocation::Delete(testPool);
}
void HazardPointerTest2::HazardPointerTest2ThreadMethod() {
for (;;) {
unsigned int threadIndex;
embb_internal_thread_index(&threadIndex);
if (threadIndex == currentMaster) {
HazardPointerTest2Master();
}
else {
HazardPointerTest2Slave();
}
sync1.FetchAndAdd(1);
// wait until cleanup thread signals to be finished
while (sync1 != 0) {
int expected = nThreads;
int desired = finishMarker;
// select thread, responsible for cleanup
if (sync1.CompareAndSwap(expected, desired)) {
//wipe arrays!
for (unsigned int i = 0; i != guaranteedCapacityPool; ++i) {
sharedGuarded[i] = 0;
sharedAllocated[i] = 0;
}
// increase master
currentMaster.FetchAndAdd(1);
sync2 = 0;
sync1.Store(0);
}
}
// wait for all threads to reach this position
sync2.FetchAndAdd(1);
while (sync2 != static_cast<unsigned int>(nThreads)) {}
// if each thread was master once, terminate.
if (currentMaster == static_cast<unsigned int>(nThreads)) {
return;
}
}
}
HazardPointerTest2::HazardPointerTest2() :
nThreads(static_cast<int>
(partest::TestSuite::GetDefaultNumThreads())),
#ifdef EMBB_PLATFORM_COMPILER_MSVC
#pragma warning(push)
#pragma warning(disable:4355)
#endif
deletePointerCallback(
*this,
&HazardPointerTest2::DeletePointerCallback)
#ifdef EMBB_PLATFORM_COMPILER_MSVC
#pragma warning(pop)
#endif
{
guardsPerThreadCount = 5;
guaranteedCapacityPool = guardsPerThreadCount*nThreads;
poolSizeUsingHazardPointer = guaranteedCapacityPool +
guardsPerThreadCount*nThreads*nThreads;
embb::base::Thread::GetThreadsMaxCount();
CreateUnit("HazardPointerTestSimulateMemoryWorstCase").
Pre(&HazardPointerTest2::HazardPointerTest2Pre, this).
Add(
&HazardPointerTest2::HazardPointerTest2ThreadMethod,
this, static_cast<size_t>(nThreads)).
Post(&HazardPointerTest2::HazardPointerTest2Post, this);
}
} // namespace test
} // namespace containers
} // namespace embb
......@@ -36,33 +36,116 @@
namespace embb {
namespace containers {
namespace test {
/**
* @brief a very simple wait-free object pool implementation to have tests
* being independent of the EMBB object pool implementation.
*/
class IntObjectTestPool {
private:
int* simplePoolObjects;
embb::base::Atomic<int>* simplePool;
public:
static const int ALLOCATED_MARKER = 1;
static const int FREE_MARKER = 0;
unsigned int poolSize;
IntObjectTestPool(unsigned int poolSize);
~IntObjectTestPool();
/**
* Allocate object from the pool
*
* @return the allocated object
*/
int* Allocate();
/**
* Return an element to the pool
*
* @param objectPointer the object to be freed
*/
void Release(int* objectPointer);
};
class HazardPointerTest : public partest::TestCase {
private:
embb::base::Function<void, embb::base::Atomic<int>*> delete_pointer_callback;
embb::base::Function<void, embb::base::Atomic<int>*> deletePointerCallback;
//used to allocate random stuff, we will just use the pointers, not the
//contents
embb::containers::ObjectPool< embb::base::Atomic<int> >* object_pool;
embb::containers::ObjectPool< embb::base::Atomic<int> >* objectPool;
//used to move pointer between threads
embb::containers::LockFreeStack< embb::base::Atomic<int>* >* stack;
embb::base::Mutex vector_mutex;
embb::containers::internal::HazardPointer<embb::base::Atomic<int>*>* hp;
std::vector< embb::base::Atomic<int>* > deleted_vector;
int n_threads;
int n_elements_per_thread;
int n_elements;
embb::base::Mutex vectorMutex;
embb::containers::internal::HazardPointer<embb::base::Atomic<int>*>*
hazardPointer;
std::vector< embb::base::Atomic<int>* > deletedVector;
int nThreads;
int nElementsPerThread;
int nElements;
public:
/**
* Adds test methods.
*/
HazardPointerTest();
void HazardPointerTest1_Pre();
void HazardPointerTest1_Post();
void HazardPointerTest1_ThreadMethod();
void DeletePointerCallback(embb::base::Atomic<int>* to_delete);
void HazardPointerTest1Pre();
void HazardPointerTest1Post();
void HazardPointerTest1ThreadMethod();
void DeletePointerCallback(embb::base::Atomic<int>* toDelete);
};
class HazardPointerTest2 : public partest::TestCase {
private:
// number of threads, participating in that test
int nThreads;
embb::base::Function<void, int*> deletePointerCallback;
// the thread id of the master
embb::base::Atomic<unsigned int> currentMaster;
// variables, to synchronize threads. At each point in time, one master,
// the master changes each round until each thread was assigned master once.
embb::base::Atomic<int> sync1;
embb::base::Atomic<unsigned int> sync2;
unsigned int guardsPerThreadCount;
unsigned int guaranteedCapacityPool;
unsigned int poolSizeUsingHazardPointer;
// The threads write here, if they guarded an object successfully. Used to
// determine when all allocated objects were guarded successfully.
embb::base::Atomic<int*>* sharedGuarded;
// This array is used by the master, to communicate and share what he has
// allocated with the slaves.
embb::base::Atomic<int*>* sharedAllocated;
// Reference to the object pool
IntObjectTestPool* testPool;
embb::containers::internal::HazardPointer<int*>* hazardPointer;
static const int finishMarker = -1;
public:
void DeletePointerCallback(int* toDelete);
bool SetRelativeGuards();
void HazardPointerTest2Master();
void HazardPointerTest2Slave();
void HazardPointerTest2Pre();
void HazardPointerTest2Post();
void HazardPointerTest2ThreadMethod();
HazardPointerTest2();
};
} // namespace test
} // namespace containers
} // namespace embb
......
......@@ -55,6 +55,7 @@ using embb::containers::test::HazardPointerTest;
using embb::containers::test::QueueTest;
using embb::containers::test::StackTest;
using embb::containers::test::ObjectPoolTest;
using embb::containers::test::HazardPointerTest2;
PT_MAIN("Data Structures C++") {
unsigned int max_threads = static_cast<unsigned int>(
......@@ -64,6 +65,7 @@ PT_MAIN("Data Structures C++") {
PT_RUN(PoolTest< WaitFreeArrayValuePool<int COMMA -1> >);
PT_RUN(PoolTest< LockFreeTreeValuePool<int COMMA -1> >);
PT_RUN(HazardPointerTest);
PT_RUN(HazardPointerTest2);
PT_RUN(QueueTest< WaitFreeSPSCQueue< ::std::pair<size_t COMMA int> > >);
PT_RUN(QueueTest< LockFreeMPMCQueue< ::std::pair<size_t COMMA int> >
COMMA true COMMA true >);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or sign in to comment