Commit 3fdf419d by bernhard-gatzhammer

Revert "Merge remote-tracking branch 'origin/development' into embb453_rwlock"

This reverts commit 554c76f7, reversing
changes made to fc660db0.
parent 554c76f7
Embedded Multicore Building Blocks (EMB²)
=========================================
Version 0.3.1
-------------
### Features:
- None
### Changes and improvements:
- Removed one function argument from algorithms::Invoke
- Added "explicit" specifier to base type constructor of Atomic<BaseType*>
- Added "const" qualifier to dereference operator and member access operator of AtomicPointer<>
- Changed AtomicBase<>::CompareAndSwap to atomically return expected value
- Replaced constant in dataflow_cpp_test_simple.cc with corresponding macro
- Added initialization of atomic variable in hazard_pointer_test.cc to avoid warning with GCC 5.1
- Changed initial value of allocated_object_from_different_thread
- Added tests for ID Pool and check for memory leaks
- Updated unit test for the UniqueLock::Swap
### Bug fixes:
- Fixed implementation of ID pool (provided fewer elements than specified by capacity)
- Fixed unsigned overflow bug in timed wait function of condition variables
- Fixed implementation of UniqueLock::Swap
### Build system:
- Improved CMake output for automatic initialization option
- Fixed cpplint and unsigned/signed warnings
### Documentation:
- Fixed documentation of UniqueLock class
- Updated README file
Version 0.3.0
-------------
......
......@@ -28,7 +28,7 @@ cmake_minimum_required (VERSION 2.8.9)
# Version number
set (EMBB_BASE_VERSION_MAJOR 0)
set (EMBB_BASE_VERSION_MINOR 3)
set (EMBB_BASE_VERSION_PATCH 1)
set (EMBB_BASE_VERSION_PATCH 0)
# Fix compilation for CMake versions >= 3.1
#
......@@ -59,9 +59,7 @@ IF(NOT OpenCL_FOUND)
MESSAGE( STATUS "OpenCL is not there, will build without MTAPI OpenCL Plugin." )
ENDIF()
# give the user the possibility, to append compiler flags
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${EXTRA_CMAKE_CXX_FLAGS}")
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${EXTRA_CMAKE_C_FLAGS}")
if(NOT CMAKE_BUILD_TYPE)
set(CMAKE_BUILD_TYPE "Release" CACHE STRING
......@@ -102,13 +100,6 @@ else()
endif()
message(" (set with command line option -DWARNINGS_ARE_ERRORS=ON/OFF)")
if (USE_AUTOMATIC_INITIALIZATION STREQUAL ON)
message("-- MTAPI/Tasks automatic initialization enabled (default)")
else()
message("-- MTAPI/Tasks automatic initialization disabled")
endif()
message(" (set with command line option -DUSE_AUTOMATIC_INITIALIZATION=ON/OFF)")
include(CMakeCommon/SetCompilerFlags.cmake)
SetGNUCompilerFlags(compiler_libs compiler_flags)
SetVisualStudioCompilerFlags(compiler_libs compiler_flags)
......
......@@ -270,8 +270,8 @@ If you want to use the C++ functionalities of EMB², you have to link the
following libraries (names will be different on Windows and on Linux) in the
given order:
embb_dataflow_cpp, embb_algorithms_cpp, embb_containers_cpp,
embb_mtapi_cpp, embb_mtapi_c, embb_base_cpp, embb_base_c
embb_base, embb_base_cpp, embb_mtapi_c, embb_mtapi_cpp, embb_containers_cpp,
embb_algorithms_cpp, embb_dataflow_cpp
The C++ header files can be included as follows:
......@@ -284,7 +284,7 @@ The C++ header files can be included as follows:
The following libraries have to be linked in the given order:
embb_mtapi_c, embb_base_c
embb_base_c, mtapi_c
The C header files can be included as follows:
......@@ -323,8 +323,6 @@ Known Bugs and Limitations
is bounded by a predefined but modifiable constant (see functions
embb_thread_get_max_count() / embb_thread_set_max_count() and class
embb::base::Thread).
- While MTAPI fully supports heterogeneous systems, the algorithms and
dataflow components are currently limited to homogeneous systems.
Development and Contribution
......
......@@ -49,37 +49,33 @@ typedef embb::base::Function<void> InvokeFunctionType;
#ifdef DOXYGEN
/**
* Spawns two to ten function objects at once and runs them in parallel.
* Spawns one to ten function objects at once and runs them in parallel.
*
* Blocks until all of them are done.
*
* \ingroup CPP_ALGORITHMS_INVOKE
*/
template<typename Function1, typename Function2, ...>
template<typename Function1, ...>
void Invoke(
Function1 func1,
/**< [in] First function object to invoke */
Function2 func2,
/**< [in] Second function object to invoke */
...);
/**
* Spawns two to ten function objects at once and runs them in parallel using the
* Spawns one to ten function objects at once and runs them in parallel using the
* given embb::mtapi::ExecutionPolicy.
*
* Blocks until all of them are done.
*
* \ingroup CPP_ALGORITHMS_INVOKE
*/
template<typename Function1, typename Function2, ...>
template<typename Function1, ...>
void Invoke(
Function1 func1,
/**< [in] Function object to invoke */
Function2 func2,
/**< [in] Second function object to invoke */
...,
const embb::tasks::ExecutionPolicy & policy
/**< [in] embb::tasks::ExecutionPolicy to use */
const embb::mtapi::ExecutionPolicy & policy
/**< [in] embb::mtapi::ExecutionPolicy to use */
);
#else // DOXYGEN
......@@ -122,6 +118,13 @@ class TaskWrapper {
};
} // namespace internal
template<typename Function1>
void Invoke(
Function1 func1,
const embb::tasks::ExecutionPolicy& policy) {
internal::TaskWrapper<Function1> wrap1(func1, policy);
}
template<typename Function1, typename Function2>
void Invoke(
Function1 func1,
......@@ -287,6 +290,12 @@ template<typename Function1, typename Function2, typename Function3,
internal::TaskWrapper<Function10> wrap10(func10, policy);
}
template<typename Function1>
void Invoke(
Function1 func1) {
Invoke(func1, embb::tasks::ExecutionPolicy());
}
template<typename Function1, typename Function2>
void Invoke(
Function1 func1,
......
......@@ -44,6 +44,7 @@ static void Invocable10() {}
void InvokeTest::Test() {
using embb::algorithms::Invoke;
Invoke(&Invocable1);
Invoke(&Invocable1, &Invocable2);
Invoke(&Invocable1, &Invocable2, &Invocable3);
Invoke(&Invocable1, &Invocable2, &Invocable3, &Invocable4);
......@@ -60,24 +61,4 @@ void InvokeTest::Test() {
&Invocable6, &Invocable7, &Invocable8, &Invocable9);
Invoke(&Invocable1, &Invocable2, &Invocable3, &Invocable4, &Invocable5,
&Invocable6, &Invocable7, &Invocable8, &Invocable9, &Invocable10);
embb::tasks::ExecutionPolicy policy;
Invoke(&Invocable1, &Invocable2, policy);
Invoke(&Invocable1, &Invocable2, &Invocable3, policy);
Invoke(&Invocable1, &Invocable2, &Invocable3, &Invocable4, policy);
Invoke(&Invocable1, &Invocable2, &Invocable3, &Invocable4, &Invocable5,
policy);
Invoke(&Invocable1, &Invocable2, &Invocable3, &Invocable4, &Invocable5,
&Invocable6, policy);
Invoke(&Invocable1, &Invocable2, &Invocable3, &Invocable4, &Invocable5,
&Invocable6, &Invocable7, policy);
Invoke(&Invocable1, &Invocable2, &Invocable3, &Invocable4, &Invocable5,
&Invocable6, &Invocable7, &Invocable8, policy);
Invoke(&Invocable1, &Invocable2, &Invocable3, &Invocable4, &Invocable5,
&Invocable6, &Invocable7, &Invocable8, &Invocable9, policy);
Invoke(&Invocable1, &Invocable2, &Invocable3, &Invocable4, &Invocable5,
&Invocable6, &Invocable7, &Invocable8, &Invocable9, policy);
Invoke(&Invocable1, &Invocable2, &Invocable3, &Invocable4, &Invocable5,
&Invocable6, &Invocable7, &Invocable8, &Invocable9, &Invocable10,
policy);
}
......@@ -83,8 +83,8 @@ int embb_condition_wait_until(embb_condition_t* condition_var,
embb_time_t now;
embb_time_now(&now);
/* Check if absolute timepoint (in milliseconds) still is in the future */
if ((time->seconds * 1000 + time->nanoseconds / 1000000)
> (now.seconds * 1000 + now.nanoseconds / 1000000)) {
if (time->seconds * 1000 + time->nanoseconds / 1000000
- now.seconds * 1000 - now.nanoseconds / 1000000 > 0) {
/* Convert to (unsigned type) milliseconds and round up */
DWORD time_diff = (DWORD) (
time->seconds * 1000 + time->nanoseconds / 1000000
......
......@@ -128,20 +128,6 @@ void embb_internal_thread_index_set_max(unsigned int max) {
*embb_max_number_thread_indices() = max;
}
/**
* \pre the calling thread is the only active thread
*
* \post the thread indices count and calling thread index is reset
*/
void embb_internal_thread_index_reset() {
/** This function is only called in tests, usually when all other threads
* except the main thread have terminated. However, the main thread still has
* potentially stored its old index value in its thread local storage,
* which might be assigned additionally to another thread (as the counter is
* reset), which may lead to hard to detect bugs. Therefore, reset the thread
* local thread id here.
*/
embb_internal_thread_index_var = UINT_MAX;
embb_counter_init(embb_thread_index_counter());
}
\ No newline at end of file
}
......@@ -38,7 +38,7 @@ ConditionVarTest::ConditionVarTest()
embb_condition_init(&cond_wait_);
embb_mutex_init(&mutex_cond_wait_, EMBB_MUTEX_PLAIN);
CreateUnit("Timed wait timeouts")
CreateUnit("Timed wait timouts")
.Add(&ConditionVarTest::TestTimedWaitTimeouts, this);
if (num_threads_ >= 2) {
CreateUnit("Condition Notify Test")
......@@ -64,10 +64,10 @@ void ConditionVarTest::TestNotify() {
while (embb_counter_get(&counter_)
< static_cast<unsigned int>(num_threads_-1))
{} // All threads entered critical section
{} // all threads entered critical section
embb_mutex_lock(&mutex_cond_notify_);
embb_mutex_unlock(&mutex_cond_notify_);
// All threads called wait on the condition (even last thread)
// All threads called wait on the condition (Even last thread)
embb_counter_init(&counter_);
......@@ -75,7 +75,7 @@ void ConditionVarTest::TestNotify() {
embb_mutex_lock(&mutex_cond_wait_);
embb_condition_wait_for(&cond_wait_, &mutex_cond_wait_, &duration);
while (embb_counter_get(&counter_) == 0)
{} // If test hangs here, signalling has not succeeded
{} //if hangs here signal has not succeded
PT_ASSERT_EQ_MSG(embb_counter_get(&counter_), static_cast<unsigned int>(1),
"Only one thread notified");
......@@ -85,7 +85,7 @@ void ConditionVarTest::TestNotify() {
while (embb_counter_get(&counter_) !=
static_cast<unsigned int>(num_threads_-1))
{} // If test hangs here, not all threads were notified
{} // If this hangs then not all threads were notified.
embb_mutex_unlock(&mutex_cond_wait_);
embb_mutex_destroy(&mutex_cond_wait_);
......@@ -105,13 +105,13 @@ void ConditionVarTest::TestTimedWaitTimeouts() {
embb_time_t time;
embb_duration_t duration = EMBB_DURATION_INIT;
// Wait for "now" tests already passed time point
// Wait for now tests already passed time point
embb_time_now(&time);
embb_mutex_lock(&mutex);
int status = embb_condition_wait_until(&cond, &mutex, &time);
PT_EXPECT_EQ(status, EMBB_TIMEDOUT);
// Wait for a future time point
// Wait for a future timepoint
status = embb_duration_set_milliseconds(&duration, 1);
PT_EXPECT_EQ(status, EMBB_SUCCESS);
status = embb_time_in(&time, &duration); // Time now
......
......@@ -36,9 +36,6 @@ namespace test {
TimeTest::TimeTest() {
CreateUnit("Time in duration").Add(&TimeTest::TestTimeInDuration, this);
CreateUnit("Monotonicity").Add(
&TimeTest::TestMonotonicity, this,
1, partest::TestSuite::GetDefaultNumIterations() * 10);
}
void TimeTest::TestTimeInDuration() {
......@@ -51,20 +48,6 @@ void TimeTest::TestTimeInDuration() {
PT_EXPECT_EQ(status, EMBB_SUCCESS);
}
void TimeTest::TestMonotonicity() {
embb_time_t first;
embb_time_t second;
int status1 = embb_time_in(&first, embb_duration_zero());
int status2 = embb_time_in(&second, embb_duration_zero());
PT_EXPECT_EQ(status1, EMBB_SUCCESS);
PT_EXPECT_EQ(status2, EMBB_SUCCESS);
unsigned long long first_abs = first.seconds * 1000 +
first.nanoseconds / 1000000;
unsigned long long second_abs = second.seconds * 1000 +
second.nanoseconds / 1000000;
PT_EXPECT_GE(second_abs, first_abs);
}
} // namespace test
} // namespace base
} // namespace embb
......@@ -42,14 +42,9 @@ class TimeTest : public partest::TestCase {
private:
/**
* Tests time-in-duration method.
* Tests time in duration method.
*/
void TestTimeInDuration();
/**
* Tests that succeedingly taken times are monotonously increasing.
*/
void TestMonotonicity();
};
} // namespace test
......
......@@ -478,7 +478,7 @@ class Atomic<BaseType*> : public embb::base::internal::atomic::
public:
Atomic() : embb::base::internal::atomic::
AtomicPointer<BaseType, ptrdiff_t, sizeof(BaseType*)>() {}
explicit Atomic(BaseType* p) : embb::base::internal::atomic::
Atomic(BaseType* p) : embb::base::internal::atomic::
AtomicPointer<BaseType, ptrdiff_t, sizeof(BaseType*)>(p) {}
BaseType* operator=(BaseType* p) {
......
......@@ -177,7 +177,8 @@ CompareAndSwap(BaseType& expected, BaseType desired) {
compare_and_swap(&AtomicValue, &native_expected, native_desired)) !=0
? true : false;
memcpy(&expected, &native_expected, sizeof(expected));
if (!return_val)
expected = Load();
return return_val;
}
......
......@@ -65,8 +65,8 @@ class AtomicPointer : public AtomicArithmetic<BaseType*, DifferenceType, S> {
bool IsPointer() const;
// The methods below are documented in atomic.h
BaseType* operator->() const;
BaseType& operator*() const;
BaseType* operator->();
BaseType& operator*();
};
template<typename BaseType, typename DifferenceType, size_t S>
......@@ -93,13 +93,13 @@ inline bool AtomicPointer<BaseType, DifferenceType, S>::
template<typename BaseType, typename DifferenceType, size_t S>
inline BaseType* AtomicPointer<BaseType, DifferenceType, S>::
operator->() const {
operator->() {
return this->Load();
}
template<typename BaseType, typename DifferenceType, size_t S>
inline BaseType& AtomicPointer<BaseType, DifferenceType, S>::
operator*() const {
operator*() {
return *(this->Load());
}
......
......@@ -28,7 +28,6 @@
#define EMBB_BASE_INTERNAL_MUTEX_INL_H_
#include <cassert>
#include <algorithm>
namespace embb {
namespace base {
......@@ -96,8 +95,8 @@ void UniqueLock<Mutex>::Unlock() {
template<typename Mutex>
void UniqueLock<Mutex>::Swap(UniqueLock<Mutex>& other) {
std::swap(mutex_, other.mutex_);
std::swap(locked_, other.locked_);
locked_ = other.locked_;
mutex_ = other.Release();
}
template<typename Mutex>
......
......@@ -439,11 +439,11 @@ class UniqueLock {
void Unlock();
/**
* Exchanges ownership of the wrapped mutex with another lock.
* Transfers ownership of a mutex to this lock.
*/
void Swap(
UniqueLock<Mutex>& other
/**< [IN/OUT] The lock to exchange ownership with */
/**< [IN/OUT] Lock from which ownership shall be transferred */
);
/**
......
......@@ -191,21 +191,13 @@ void MutexTest::TestUniqueLock() {
}
{ // Test lock swapping
UniqueLock<> lock1(mutex_);
UniqueLock<> lock1;
UniqueLock<> lock2(mutex_);
PT_EXPECT_EQ(lock1.OwnsLock(), false);
PT_EXPECT_EQ(lock2.OwnsLock(), true);
lock1.Swap(lock2);
PT_EXPECT_EQ(lock1.OwnsLock(), true);
{
UniqueLock<> lock2;
PT_EXPECT_EQ(lock2.OwnsLock(), false);
lock1.Swap(lock2);
PT_EXPECT_EQ(lock1.OwnsLock(), false);
PT_EXPECT_EQ(lock2.OwnsLock(), true);
}
// At this point, "lock2" was destroyed and "mutex_" must be unlocked.
UniqueLock<> lock3(mutex_, embb::base::try_lock);
PT_EXPECT_EQ(lock3.OwnsLock(), true);
PT_EXPECT_EQ(lock2.OwnsLock(), false);
}
}
......
......@@ -30,360 +30,386 @@
namespace embb {
namespace containers {
namespace internal {
// Visual Studio is complaining, that the return in the last line of this
// function is not reachable. This is true, as long as exceptions are enabled.
// Otherwise, the exception becomes an assertion and with disabling assertions,
// the code becomes reachable. So, disabling this warning.
#ifdef EMBB_PLATFORM_COMPILER_MSVC
#pragma warning(push)
#pragma warning(disable:4702)
#endif
template< typename GuardType >
unsigned int HazardPointer< GuardType >::GetObjectLocalThreadIndex() {
// first, get the EMBB native thread id.
unsigned int embb_thread_index;
int return_val = embb_internal_thread_index(&embb_thread_index);
if (return_val != EMBB_SUCCESS) {
EMBB_THROW(embb::base::ErrorException, "Could not get thread id");
}
// iterate over the mappings array
for (unsigned int i = 0; i != max_accessors_count_; ++i) {
// end of mappings? then we need to write our id
if (thread_id_mapping_[i] == -1) {
// try to CAS the initial value with out thread id
int expected = -1;
if (thread_id_mapping_[i].CompareAndSwap(expected,
static_cast<int>(embb_thread_index))) {
//successful, return our mapping
return i;
}
}
if (thread_id_mapping_[i] == static_cast<int>(embb_thread_index)) {
// found our mapping!
return i;
}
}
template< typename ElementT >
FixedSizeList<ElementT>::FixedSizeList(size_t max_size) :
max_size(max_size),
size(0) {
elementsArray = static_cast<ElementT*>(
embb::base::Allocation::Allocate(sizeof(ElementT) *
max_size));
}
template< typename ElementT >
inline size_t FixedSizeList<ElementT>::GetSize() const {
return size;
}
template< typename ElementT >
inline size_t FixedSizeList<ElementT>::GetMaxSize() const {
return max_size;
}
template< typename ElementT >
inline void FixedSizeList<ElementT>::clear() {
size = 0;
}
template< typename ElementT >
typename FixedSizeList<ElementT>::iterator
FixedSizeList<ElementT>::begin() const {
return &elementsArray[0];
}
template< typename ElementT >
typename FixedSizeList<ElementT>::iterator
FixedSizeList<ElementT>::end() const {
return &elementsArray[size];
}
template< typename ElementT >
FixedSizeList< ElementT > &
FixedSizeList<ElementT>::operator= (const FixedSizeList & other) {
size = 0;
if (max_size < other.size) {
EMBB_THROW(embb::base::ErrorException, "Copy target to small");
}
// when we reach this point, we have too many accessors
// (no mapping possible)
EMBB_THROW(embb::base::ErrorException, "Too many accessors");
for (const_iterator it = other.begin(); it != other.end(); ++it) {
PushBack(*it);
}
return *this;
}
return 0;
template< typename ElementT >
bool FixedSizeList<ElementT>::PushBack(ElementT const el) {
if (size + 1 > max_size) {
return false;
}
#ifdef EMBB_PLATFORM_COMPILER_MSVC
#pragma warning(pop)
elementsArray[size] = el;
size++;
return true;
}
template< typename ElementT >
FixedSizeList<ElementT>::~FixedSizeList() {
embb::base::Allocation::Free(elementsArray);
}
template< typename GuardType >
bool HazardPointerThreadEntry<GuardType>::IsActive() {
return is_active;
}
template< typename GuardType >
bool HazardPointerThreadEntry<GuardType>::TryReserve() {
bool expected = false;
return is_active.CompareAndSwap(expected, true);
}
template< typename GuardType >
void HazardPointerThreadEntry<GuardType>::Deactivate() {
is_active = false;
}
template< typename GuardType >
size_t HazardPointerThreadEntry<GuardType>::GetRetiredCounter() {
return retired_list.GetSize();
}
template< typename GuardType >
FixedSizeList< GuardType >& HazardPointerThreadEntry<GuardType>::
GetRetired() {
return retired_list;
}
template< typename GuardType >
FixedSizeList< GuardType >& HazardPointerThreadEntry<GuardType>::
GetRetiredTemp() {
return retired_list_temp;
}
template< typename GuardType >
FixedSizeList< GuardType >& HazardPointerThreadEntry<GuardType>::
GetHazardTemp() {
return hazard_pointer_list_temp;
}
template< typename GuardType >
void HazardPointerThreadEntry<GuardType>::
SetRetired(internal::FixedSizeList< GuardType > const & retired_list) {
this->retired_list = retired_list;
}
template< typename GuardType >
HazardPointerThreadEntry<GuardType>::
HazardPointerThreadEntry(GuardType undefined_guard, int guards_per_thread,
size_t max_size_retired_list) :
#ifdef EMBB_DEBUG
who_is_scanning(-1),
#endif
template< typename GuardType >
void HazardPointer< GuardType >::RemoveGuard(int guard_position) {
const unsigned int my_thread_id = GetObjectLocalThreadIndex();
// check invariants...
assert(guard_position < max_guards_per_thread_);
assert(my_thread_id < max_accessors_count_);
// set guard
guards_[guard_position*max_accessors_count_ + my_thread_id] =
undefined_guard_;
undefined_guard(undefined_guard),
guards_per_thread(guards_per_thread),
max_size_retired_list(max_size_retired_list),
// initially, each potential thread is active... if that is not the case
// another thread could call "HelpScan", and block this thread in making
// progress.
// Still, threads can be leave the hazard pointer processing (deactivation),
// but this can only be done once, i.e., this is not revertable...
is_active(1),
retired_list(max_size_retired_list),
retired_list_temp(max_size_retired_list),
hazard_pointer_list_temp(embb::base::Thread::GetThreadsMaxCount() *
guards_per_thread) {
// Initialize guarded pointer list
guarded_pointers = static_cast<embb::base::Atomic<GuardType>*>
(embb::base::Allocation::Allocate(
sizeof(embb::base::Atomic<GuardType>)*guards_per_thread));
for (int i = 0; i != guards_per_thread; ++i) {
new (static_cast<void*>(&guarded_pointers[i]))
embb::base::Atomic<GuardType>(undefined_guard);
}
}
template< typename GuardType >
HazardPointer< GuardType >::HazardPointer(
embb::base::Function<void, GuardType> freeGuardCallback,
GuardType undefined_guard, int guardsPerThread, int accessors) :
max_accessors_count_(accessors < 0 ?
embb::base::Thread::GetThreadsMaxCount() : accessors),
undefined_guard_(undefined_guard),
max_guards_per_thread_(guardsPerThread),
release_object_callback_(freeGuardCallback),
thread_id_mapping_(static_cast<embb::base::Atomic<int>*>(
embb::base::Allocation::Allocate(sizeof(embb::base::Atomic<int>)
*max_accessors_count_))),
guards_(static_cast<embb::base::Atomic< GuardType >*>
(embb::base::Allocation::Allocate(
sizeof(embb::base::Atomic< GuardType >) * max_guards_per_thread_ *
max_accessors_count_))),
thread_local_retired_lists_temp_(static_cast<GuardType*>
(embb::base::Allocation::Allocate(
sizeof(GuardType) * max_guards_per_thread_ * max_accessors_count_ *
max_accessors_count_
))),
thread_local_retired_lists_(static_cast<GuardType*>
(embb::base::Allocation::Allocate(
sizeof(GuardType) * max_guards_per_thread_ * max_accessors_count_ *
max_accessors_count_
))) {
const unsigned int count_guards =
max_guards_per_thread_ * max_accessors_count_;
const unsigned int count_ret_elements =
count_guards * max_accessors_count_;
for (unsigned int i = 0; i != max_accessors_count_; ++i) {
//in-place new for each cell
new (&thread_id_mapping_[i]) embb::base::Atomic < int >(-1);
}
for (unsigned int i = 0; i != count_guards; ++i) {
//in-place new for each cell
new (&guards_[i]) embb::base::Atomic < GuardType >(undefined_guard);
}
for (unsigned int i = 0; i != count_ret_elements; ++i) {
//in-place new for each cell
new (&thread_local_retired_lists_temp_[i]) GuardType(undefined_guard);
}
for (unsigned int i = 0; i != count_ret_elements; ++i) {
//in-place new for each cell
new (&thread_local_retired_lists_[i]) GuardType(undefined_guard);
}
template< typename GuardType >
HazardPointerThreadEntry<GuardType>::~HazardPointerThreadEntry() {
for (int i = 0; i != guards_per_thread; ++i) {
guarded_pointers[i].~Atomic();
}
template< typename GuardType >
HazardPointer< GuardType >::~HazardPointer() {
const unsigned int count_guards =
max_guards_per_thread_ * max_accessors_count_;
const unsigned int count_ret_elements =
count_guards * max_accessors_count_;
// Release references from all retired lists. Note that for this to work,
// the data structure using hazard pointer has still to be active... So
// first, the hazard pointer class shall be destructed, then the memory
// management class (e.g. some pool). Otherwise, the hazard pointer class
// would try to return memory to an already destructed memory manager.
for (unsigned int i = 0; i != count_ret_elements; ++i) {
GuardType pointerToFree =
thread_local_retired_lists_[i];
if (pointerToFree == undefined_guard_) {
break;
embb::base::Allocation::Free(guarded_pointers);
}
template< typename GuardType >
GuardType HazardPointerThreadEntry<GuardType>::GetGuard(int pos) const {
return guarded_pointers[pos];
}
template< typename GuardType >
void HazardPointerThreadEntry<GuardType>::AddRetired(GuardType pointerToGuard) {
retired_list.PushBack(pointerToGuard);
}
template< typename GuardType >
void HazardPointerThreadEntry<GuardType>::
GuardPointer(int guardNumber, GuardType pointerToGuard) {
guarded_pointers[guardNumber] = pointerToGuard;
}
template< typename GuardType >
void HazardPointerThreadEntry<GuardType>::SetActive(bool active) {
is_active = active;
}
template< typename GuardType >
unsigned int HazardPointer< GuardType >::GetCurrentThreadIndex() {
unsigned int thread_index;
int return_val = embb_internal_thread_index(&thread_index);
if (return_val != EMBB_SUCCESS)
EMBB_THROW(embb::base::ErrorException, "Could not get thread id!");
return thread_index;
}
template< typename GuardType >
bool HazardPointer< GuardType >::IsThresholdExceeded() {
double retiredCounterLocThread =
static_cast<double>(GetHazardPointerElementForCurrentThread().
GetRetiredCounter());
return (retiredCounterLocThread >=
RETIRE_THRESHOLD *
static_cast<double>(active_hazard_pointer)*
static_cast<double>(guards_per_thread));
}
template< typename GuardType >
size_t HazardPointer< GuardType >::GetActiveHazardPointers() {
return active_hazard_pointer;
}
template< typename GuardType >
typename HazardPointer< GuardType >::HazardPointerThreadEntry_t &
HazardPointer< GuardType >::GetHazardPointerElementForCurrentThread() {
// For each thread, there is a slot in the hazard pointer array.
// Initially, the active flag of a hazard pointer entry is false.
// Only the respective thread changes the flag from true to false.
// This means that the current thread tells that he is about to
// stop operating, and the others are responsible for his retired
// list.
return hazard_pointer_thread_entry_array[GetCurrentThreadIndex()];
}
template< typename GuardType >
void HazardPointer< GuardType >::HelpScan() {
// This is a little bit different than in the paper. In the paper,
// the retired nodes from other threads are added to our retired list.
// To be able to give a bound on memory consumption, we execute scan
// for those threads, without moving elements. The effect shall be
// the same.
for (size_t i = 0; i != hazard_pointers; ++i) {
// Try to find non active lists...
if (!hazard_pointer_thread_entry_array[i].IsActive() &&
hazard_pointer_thread_entry_array[i].TryReserve()) {
// Here: grab retired things, first check if there are any...
if (hazard_pointer_thread_entry_array[i].GetRetiredCounter() > 0) {
Scan(&hazard_pointer_thread_entry_array[i]);
}
release_object_callback_(pointerToFree);
}
for (unsigned int i = 0; i != max_accessors_count_; ++i) {
thread_id_mapping_[i].~Atomic();
}
embb::base::Allocation::Free(thread_id_mapping_);
for (unsigned int i = 0; i != count_guards; ++i) {
guards_[i].~Atomic();
}
embb::base::Allocation::Free(guards_);
for (unsigned int i = 0; i != count_ret_elements; ++i) {
thread_local_retired_lists_temp_[i].~GuardType();
// We are done, mark it as deactivated again
hazard_pointer_thread_entry_array[i].Deactivate();
}
embb::base::Allocation::Free(thread_local_retired_lists_temp_);
for (unsigned int i = 0; i != count_ret_elements; ++i) {
thread_local_retired_lists_[i].~GuardType();
}
embb::base::Allocation::Free(thread_local_retired_lists_);
}
template< typename GuardType >
void HazardPointer< GuardType >::Guard(int guardPosition,
GuardType guardedElement) {
const unsigned int my_thread_id = GetObjectLocalThreadIndex();
// check invariants...
assert(guardPosition < max_guards_per_thread_);
assert(my_thread_id < max_accessors_count_);
// set guard
guards_[guardPosition*max_accessors_count_ + my_thread_id] = guardedElement;
}
template< typename GuardType >
void HazardPointer< GuardType >::
Scan(HazardPointerThreadEntry_t* currentHazardPointerEntry) {
#ifdef EMBB_DEBUG
// scan should only be executed by one thread at a time, otherwise we have
// a bug... this assertions checks that
int expected = -1;
if (!currentHazardPointerEntry->GetScanningThread().CompareAndSwap(
expected, static_cast<int>(GetCurrentThreadIndex()))) {
assert(false);
}
template< typename GuardType >
size_t HazardPointer< GuardType >::ComputeMaximumRetiredObjectCount(
size_t guardsPerThread, int accessors) {
unsigned int accessorCount = (accessors == -1 ?
embb::base::Thread::GetThreadsMaxCount() :
accessors);
return static_cast<size_t>(
guardsPerThread * accessorCount * accessorCount);
}
/**
* Remark: it might be faster to just swap pointers for temp retired list and
* retired list. However, with the current implementation (one array for all
* retired and retired temp lists, respectively) this is not possible. This is
* not changed until this copying accounts for a performance problem. The
* copying is not the bottleneck currently.
*/
template< typename GuardType >
void HazardPointer< GuardType >::CopyRetiredList(GuardType* sourceList,
GuardType* targetList, unsigned int retiredListSize,
GuardType undefinedGuard) {
bool done = false;
for (unsigned int ii = 0; ii != retiredListSize; ++ii) {
if (!done) {
GuardType guardToCopy = sourceList[ii];
if (guardToCopy == undefinedGuard) {
done = true;
if (targetList[ii] == undefinedGuard) {
// end of target list
break;
}
}
targetList[ii] = guardToCopy;
} else {
// we copied the whole source list, remaining values in the target
// have to be zeroed.
if (targetList[ii] == undefinedGuard) {
// end of target list
break;
} else {
targetList[ii] = undefinedGuard;
}
#endif
// In this function, we compute the intersection between local retired
// pointers and all hazard pointers. This intersection cannot be deleted and
// forms the new local retired pointers list.
// It is assumed that the union of all retired pointers contains no two
// pointers with the same value. However, the union of all hazard guards
// might.
// Here, we store the temporary hazard pointers. We have to store them,
// as iterating multiple time over them might be expensive, as this
// atomic array is shared between threads.
currentHazardPointerEntry->GetHazardTemp().clear();
// Get all active hazard pointers!
for (unsigned int i = 0; i != hazard_pointers; ++i) {
// Only consider guards of active threads
if (hazard_pointer_thread_entry_array[i].IsActive()) {
// For each guard in an hazard pointer entry
for (int pos = 0; pos != guards_per_thread; ++pos) {
GuardType guard = hazard_pointer_thread_entry_array[i].GetGuard(pos);
// UndefinedGuard means not guarded
if (guard == undefined_guard)
continue;
currentHazardPointerEntry->GetHazardTemp().PushBack(guard);
}
}
}
template< typename GuardType >
void HazardPointer< GuardType >::UpdateRetiredList(GuardType* retired_list,
GuardType* updated_retired_list, unsigned int retired_list_size,
GuardType guarded_element, GuardType considered_hazard,
GuardType undefined_guard) {
// no hazard set here
if (considered_hazard == undefined_guard)
return;
// if this hazard is currently in the union of
// threadLocalRetiredLists and pointerToRetire, but not yet in
// threadLocalRetiredListsTemp, add it to that list
bool contained_in_union = false;
// first iterate over our retired list
for (unsigned int i = 0; i != retired_list_size; ++i) {
// when reaching 0, we can stop iterating (end of the "list")
if (retired_list[i] == 0)
break;
// the hazard is contained in the retired list... it shall go
// into the temp list, if not already there
if (retired_list[i] == considered_hazard) {
contained_in_union = true;
break;
}
}
// the union also contains pointerToRetire
if (!contained_in_union) {
contained_in_union = (considered_hazard == guarded_element);
}
// add the pointer to temp. retired list, if not already there
if (contained_in_union) {
for (unsigned int ii = 0; ii != retired_list_size; ++ii) {
// is it already there?
if (updated_retired_list[ii] == considered_hazard)
break;
// end of the list
if (updated_retired_list[ii] == undefined_guard) {
// add hazard
updated_retired_list[ii] = considered_hazard;
// we are done here...
break;
}
}
currentHazardPointerEntry->GetRetiredTemp().clear();
// Sort them, we will do a binary search on each entry from the retired list
std::sort(
currentHazardPointerEntry->GetHazardTemp().begin(),
currentHazardPointerEntry->GetHazardTemp().end());
for (
EMBB_CONTAINERS_CPP_DEPENDANT_TYPENAME FixedSizeList< GuardType >::iterator
it = currentHazardPointerEntry->GetRetired().begin();
it != currentHazardPointerEntry->GetRetired().end(); ++it) {
if (false == ::std::binary_search(
currentHazardPointerEntry->GetHazardTemp().begin(),
currentHazardPointerEntry->GetHazardTemp().end(), *it)) {
this->free_guard_callback(*it);
} else {
currentHazardPointerEntry->GetRetiredTemp().PushBack(*it);
}
}
currentHazardPointerEntry->SetRetired(
currentHazardPointerEntry->GetRetiredTemp());
template< typename GuardType >
void HazardPointer< GuardType >::EnqueueForDeletion(GuardType toRetire) {
unsigned int my_thread_id = GetObjectLocalThreadIndex();
// check for invariant
assert(my_thread_id < max_accessors_count_);
const unsigned int retired_list_size = max_accessors_count_ *
max_guards_per_thread_;
const unsigned int count_guards = max_accessors_count_ *
max_guards_per_thread_;
GuardType* retired_list =
&thread_local_retired_lists_[my_thread_id * retired_list_size];
GuardType* retired_list_temp =
&thread_local_retired_lists_temp_[my_thread_id * retired_list_size];
// wipe my temp. retired list...
for (unsigned int i = 0; i < retired_list_size; ++i) {
// the list is filled always from left to right, so occurring the first
// undefinedGuard, the remaining ones are also undefinedGuard...
if (retired_list_temp[i] == undefined_guard_)
break;
retired_list_temp[i] = undefined_guard_;
}
// we test each hazard if it is in the union of retiredList and
// guardedElement. If it is, it goes into the new retired list...
for (unsigned int i = 0; i != count_guards; ++i) {
// consider each current active guard
GuardType considered_hazard = guards_[i].Load();
UpdateRetiredList(retired_list, retired_list_temp, retired_list_size,
toRetire, considered_hazard, undefined_guard_);
}
int retired_list_size_signed = static_cast<int>(retired_list_size);
assert(retired_list_size_signed >= 0);
// now we created a a new retired list... the elements that are "removed"
// from the old retired list can be safely deleted now...
for (int i = -1; i != retired_list_size_signed; ++i) {
// we iterate over the current retired list... -1 is used as dummy element
// in the iteration, to also iterate over the pointerToRetire, which is
// logically also part of the current retired list...
// end of the list, stop iterating
if (i >= 0 && retired_list[i] == undefined_guard_)
break;
GuardType to_check_if_in_new_list = undefined_guard_;
to_check_if_in_new_list = (i == -1 ? toRetire : retired_list[i]);
// still in the new retired list?
bool still_in_list = false;
for (unsigned int ii = 0; ii != retired_list_size; ++ii) {
// end of list
if (retired_list_temp[ii] == undefined_guard_)
break;
if (to_check_if_in_new_list == retired_list_temp[ii]) {
// still in list, cannot delete element!
still_in_list = true;
break;
}
}
#ifdef EMBB_DEBUG
currentHazardPointerEntry->GetScanningThread().Store(-1);
#endif
}
template< typename GuardType >
size_t HazardPointer< GuardType >::GetRetiredListMaxSize() const {
return static_cast<size_t>(RETIRE_THRESHOLD *
static_cast<double>(embb::base::Thread::GetThreadsMaxCount()) *
static_cast<double>(guards_per_thread)) + 1;
}
template< typename GuardType >
HazardPointer< GuardType >::HazardPointer(
embb::base::Function<void, GuardType> free_guard_callback,
GuardType undefined_guard, int guards_per_thread) :
undefined_guard(undefined_guard),
guards_per_thread(guards_per_thread),
//initially, all potential hazard pointers are active...
active_hazard_pointer(embb::base::Thread::GetThreadsMaxCount()),
free_guard_callback(free_guard_callback) {
hazard_pointers = embb::base::Thread::GetThreadsMaxCount();
hazard_pointer_thread_entry_array = static_cast<HazardPointerThreadEntry_t*>(
embb::base::Allocation::Allocate(sizeof(HazardPointerThreadEntry_t) *
hazard_pointers));
for (size_t i = 0; i != hazard_pointers; ++i) {
new (static_cast<void*>(&(hazard_pointer_thread_entry_array[i])))
HazardPointerThreadEntry_t(undefined_guard, guards_per_thread,
GetRetiredListMaxSize());
}
}
if (!still_in_list) {
this->release_object_callback_(to_check_if_in_new_list);
}
}
template< typename GuardType >
HazardPointer< GuardType >::~HazardPointer() {
for (size_t i = 0; i != hazard_pointers; ++i) {
hazard_pointer_thread_entry_array[i].~HazardPointerThreadEntry_t();
}
// copy the updated retired list (temp) to the retired list...
CopyRetiredList(retired_list_temp, retired_list, retired_list_size,
undefined_guard_);
embb::base::Allocation::Free(static_cast < void* >
(hazard_pointer_thread_entry_array));
}
template< typename GuardType >
void HazardPointer< GuardType >::DeactivateCurrentThread() {
HazardPointerThreadEntry_t* current_thread_entry =
&hazard_pointer_thread_entry_array[GetCurrentThreadIndex()];
// Deactivating a non-active hazard pointer entry has no effect!
if (!current_thread_entry->IsActive()) {
return;
} else {
current_thread_entry->SetActive(false);
active_hazard_pointer--;
}
}
template< typename GuardType >
void HazardPointer< GuardType >::GuardPointer(int guardPosition,
GuardType guardedElement) {
GetHazardPointerElementForCurrentThread().GuardPointer(
guardPosition, guardedElement);
}
template< typename GuardType >
void HazardPointer< GuardType >::EnqueuePointerForDeletion(
GuardType guardedElement) {
GetHazardPointerElementForCurrentThread().AddRetired(guardedElement);
if (IsThresholdExceeded()) {
HazardPointerThreadEntry_t* currentHazardPointerEntry =
&GetHazardPointerElementForCurrentThread();
Scan(currentHazardPointerEntry);
// Help deactivated threads to clean their retired nodes.
HelpScan();
}
}
template<typename GuardType>
const double embb::containers::internal::HazardPointer<GuardType>::
RETIRE_THRESHOLD = 1.25f;
} // namespace internal
} // namespace containers
} // namespace embb
......
......@@ -40,274 +40,487 @@
#define EMBB_CONTAINERS_CPP_DEPENDANT_TYPENAME typename
#endif
// forward declaration for white-box test, used in friend declaration of
// HazardPointer class.
namespace embb {
namespace containers {
namespace test {
class HazardPointerTest2;
}
}
}
namespace embb {
namespace containers {
namespace internal {
/**
* This class contains a hazard pointer implementation following publication:
*
* Maged M. Michael. "Hazard pointers: Safe memory reclamation for lock-free
* objects." IEEE Transactions on Parallel and Distributed Systems, 15.6 (2004)
* : 491-504.
* A list with fixed size, implemented as an array. Replaces std::vector that
* was used in previous hazard pointer implementation.
*
* Hazard pointers are a wait-free memory reclamation scheme for lock-free
* algorithms. Loosely speaking, they act as garbage collector. The release of
* objects contained within the memory, managed by the hazard pointer class, is
* intercepted and possibly delayed to avoid concurrency bugs.
* Provides iterators, so we can apply algorithms from the STL.
*
* Before accessing an object, threads announce their intention to do so (i.e.
* the intention to dereference the respective pointer) to the hazard pointer
* class. This is called guarding. From now on, the hazard pointer class will
* prohibit the release or reuse of the guarded object. This is necessary, to
* assure that the object is not released or reused while it is accessed and to
* assure that it has not unnoticed changed (effectively avoiding the ABA
* problem).
*
* Note that after guarding an object, a consecutive check that the object (i.e.
* its pointer) is still valid is necessary; the object release could already
* have been started when guarding the object. Guarding is repeated, until this
* check eventually succeeds. Note that this "guard-and-check" loop makes the
* usage of the hazard pointer class lock-free, even though its implementation
* is wait-free.
*
* Internally, guarding is realized by providing each thread slots, where
* pointers can be placed that should not be freed (so called guards). When
* trying to release an object, it is checked if the object's pointer is
* guarded, and if so this object is not released, but instead put into a
* retired list for later release, when all guards for this object have been
* removed.
*
* In contrast to the original implementation, our implementation consumes only
* fixed-size memory. Note that the number of threads accessing the hazard
* pointer object accounts quadratic for the memory consumption: managed objects
* are provided from outside and the number of accessors accounts quadric for
* the minimum count of those objects.
* \tparam ElementT Type of the elements contained in the list.
*/
template< typename ElementT >
class FixedSizeList {
private:
/**
* Capacity of the list
*/
size_t max_size;
/**
* Size of the list
*/
size_t size;
/**
* Pointer to the array containing the list
*/
ElementT* elementsArray;
/**
* Copy constructor not implemented. Would require dynamic memory allocation.
*/
FixedSizeList(
const FixedSizeList &
/**< [IN] Other list */);
public:
/**
* Definition of an iterator
*/
typedef ElementT * iterator;
/**
* Definition of a const iterator
*/
typedef const ElementT * const_iterator;
/**
* Constructor, initializes list with given capacity
*/
FixedSizeList(
size_t max_size
/**< [IN] Capacity of the list */);
/**
* Gets the current size of the list
*
* \return Size of the list
*/
inline size_t GetSize() const;
/**
* Gets the capacity of the list
*
* \return The capacity of the list
*/
inline size_t GetMaxSize() const;
/**
* Removes all elements from the list without changing the capacity
*/
inline void clear();
/**
* Iterator pointing to the first element
*
* \return Begin iterator
*/
iterator begin() const;
/**
* Iterator pointing beyond the last element
*
* \return End iterator
*/
iterator end() const;
/**
* Copies the elements of another list to this list. The capacity of
* this list has to be greater than or equal to the size of the other list.
*/
FixedSizeList & operator=(
const FixedSizeList & other
/**< [IN] Other list */);
/**
* Appends an element to the end of the list
*
* \return \c false if the operation was not successful because the list is
* full, otherwise \c true.
*/
bool PushBack(
ElementT const el
/**< [IN] Element to append to the list */);
/**
* Destructs the list.
*/
~FixedSizeList();
};
/**
* Hazard pointer entry for a single thread. Holds the actual guards that
* determine if the current thread is about to use the guarded pointer.
* Guarded pointers are protected and not deleted.
*
* Also in contrast to the original implementation, we do not provide a HelpScan
* functionality, which gives threads the possibility, to not participate in the
* garbage collection anymore: other threads will help to clean-up the objects
* protected by the exiting thread. The reason is, that the only use-case would
* be a crashing thread, not participating anymore. However, as the thread has
* to signal its exit himself, this is not possible to realize anyways. In the
* end, it is still guaranteed that all memory is properly returned (in the
* destructor).
* Moreover, the retired list for this thread is contained. It determines
* the pointers that have been allocated from this thread, but are not used
* anymore by this thread. However, another thread could have a guard on it,
* so the pointer cannot be deleted immediately.
*
* Additionally, the original implementation holds a threshold, which determines
* when objects shall be freed. In this implementation, we free whenever it is
* possibly to do so, as we want to keep the memory footprint as low as
* possible. We also don't see a performance drop in the current algorithms that
* are using hazard pointers, when not using a threshold.
* For the scan operation, the intersection of the guarded pointers from all
* threads and the retired list has to be computed. For this computation, we
* need thread local temporary lists which are also contained here.
*
* \tparam GuardType the type of the guards. Usually the pointer type of some
* object to protect.
* \tparam GuardType The type of guard, usually a pointer.
*/
template< typename GuardType >
class HazardPointer {
class HazardPointerThreadEntry {
#ifdef EMBB_DEBUG
public:
embb::base::Atomic<int>& GetScanningThread() {
return who_is_scanning;
}
private:
embb::base::Atomic<int> who_is_scanning;
#endif
private:
/**
* Value of the undefined guard (means that no guard is set).
*/
GuardType undefined_guard;
/**
* The number of guards per thread. Determines the size of the guard array.
*/
int guards_per_thread;
/**
* The capacity of the retired list. It is determined by number of guards,
* retired threshold, and maximum number of threads.
*/
size_t max_size_retired_list;
/**
* Set to true if the current thread is active. Is used for a thread to
* signal that it is leaving. If a thread has left, the other threads are
* responsible for cleaning up its retired list.
*/
embb::base::Atomic< bool > is_active;
/**
* The guarded pointer of this thread, has size \c guard_per_thread.
*/
embb::base::Atomic< GuardType >* guarded_pointers;
/**
* The retired list of this thread, contains pointer that shall be released
* when no thread holds a guard on it anymore.
*/
FixedSizeList< GuardType > retired_list;
/**
* Temporary retired list, has same capacity as \c retired_list, It is used to
* compute the intersection of all guards and the \c retired list.
*/
FixedSizeList< GuardType > retired_list_temp;
/**
* Temporary guards list. Used to compute the intersection of all guards and
* the \c retired_list.
*/
FixedSizeList< GuardType > hazard_pointer_list_temp;
/**
* HazardPointerThreadEntry shall not be copied
*/
HazardPointerThreadEntry(const HazardPointerThreadEntry&);
/**
* HazardPointerThreadEntry shall not be assigned
*/
HazardPointerThreadEntry & operator= (const HazardPointerThreadEntry&);
public:
/**
* The user of the hazard pointer class has to provide the memory that is
* managed here. The user has to take into account, that releasing of memory
* might be delayed. He has therefore to provide more memory than he wants to
* guarantee at each point in time. More specific, on top of the guaranteed
* count of objects, he has to provide the additional count of objects that
* can be (worst-case) contained in the retired lists and therefore are not
* released yet. The size sum of all retired lists is guardsPerThread *
* accessorCount * accessorCount, which is computed using this function. So
* the result of function denotes to the user, how many objects he has to
* allocate additionally to the guaranteed count.
* Checks if current thread is active (with respect to participating in hazard
* pointer management)
*
* \waitfree
* \return \c true if the current thread is active, otherwise \c false.
*/
static size_t ComputeMaximumRetiredObjectCount(
size_t guardsPerThread,
/**<[IN] the count of guards per thread*/
int accessors = -1
/**<[IN] Number of accessors. Determines, how many threads will access
the hazard pointer object. Default value -1 will allow the
maximum amount of threads as defined with
\c embb::base::Thread::GetThreadsMaxCount()*/
);
bool IsActive();
/**
* Initializes the hazard pointer object
* Tries to set the active flag to true (atomically). Used if the current
* thread is not active anymore as lock for another thread to help cleaning
* up hazard pointer.
*
* \notthreadsafe
* \return \c true if this thread was successful setting the active flag,
* otherwise \c false.
*/
bool TryReserve();
/**
* Deactivates current thread by atomically setting active flag to false.
*/
void Deactivate();
/**
* Gets the count of current retired pointer for the current thread.
*
* \memory We dynamically allocate the following:
* \return Count of current retired pointer
*/
size_t GetRetiredCounter();
/**
* Gets the retired list.
*
* (sizeof(Atomic<int>) * accessors) + (sizeof(Atomic<GuardType>) *
* guards_per_thread * accessors) + (2*sizeof(GuardType) *
* guards_per_thread * accessors^2)
* \return Reference to \c retired_list
*/
FixedSizeList< GuardType >& GetRetired();
/**
* Gets the temporary retired list.
*
* The last addend is the dominant one, as accessorCount accounts
* quadratically for it.
* \return Reference to \c retired_list_temp
*/
HazardPointer(
embb::base::Function<void, GuardType> free_guard_callback,
/**<[IN] Callback to the function that shall be called when a retired
guard can be deleted */
FixedSizeList< GuardType >& GetRetiredTemp();
/**
* Gets the temporary hazard pointer list.
*
* \return Reference to \c hazard_pointer_list_temp
*/
FixedSizeList< GuardType >& GetHazardTemp();
/**
* Sets the retired list.
*/
void SetRetired(
embb::containers::internal::FixedSizeList< GuardType > const & retired_list
/**< [IN] Retired list */);
/**
* Constructor
*/
HazardPointerThreadEntry(
GuardType undefined_guard,
/**<[IN] The guard value denoting "not guarded"*/
/**< [IN] Value of the undefined guard (e.g. NULL) */
int guards_per_thread,
/**<[IN] Number of guards per thread*/
int accessors = -1
/**<[IN] Number of accessors. Determines, how many threads will access
this hazard pointer object. Default value -1 will allow the
maximum amount of threads as defined with
\c embb::base::Thread::GetThreadsMaxCount()*/
);
/**
* Deallocates internal data structures. Additionally releases all objects
* currently held in the retired lists, using the release functor passed in
* the constructor.
/**< [IN] Number of guards per thread */
size_t max_size_retired_list
/**< [IN] The capacity of the retired list(s) */);
/**
* Destructor
*
* \notthreadsafe
* Deallocate lists
*/
~HazardPointer();
~HazardPointerThreadEntry();
/**
* Guards \c to_guard. If the guarded_element is passed to \c EnqueueForDeletion
* it is prevented from release from now on. The user must have a check, that
* EnqueueForDeletion has not been called on to_guard, before the guarding took
* effect.
*
* \waitfree
* Gets the guard at the specified position.
* Positions are numbered, beginning with 0.
*/
void Guard(
int guard_position,
/**<[IN] position to place guard*/
GuardType to_guard
/**<[IN] element to guard*/
);
GuardType GetGuard(
int pos
/**< [IN] Position of the guard */) const;
/**
* Enqueue guarded element for deletion. If not guarded, it is deleted
* immediately. If it is guarded, it is added to a thread local retired list,
* and deleted in a subsequent call to \c EnqueueForDeletion, when no guard is
* placed on it anymore.
* Adds pointer to the retired list
*/
void EnqueueForDeletion(
GuardType guarded_element
/**<[IN] element to logically delete*/
);
void AddRetired(
GuardType pointerToGuard
/**< [IN] Guard to retire */);
/**
* Explicitly remove guard from thread local slot.
*
* \waitfree
* Guards pointer
*/
void RemoveGuard(int guard_position);
void GuardPointer(
int guardNumber,
/**< [IN] Position of guard */
GuardType pointerToGuard
/**<[IN] Pointer to guard */);
/**
* Sets the current thread active, i.e., announce that the thread
* participates in managing hazard pointer.
*/
void SetActive(
bool active
/**<[IN] \c true for active, \c false for inactive */);
};
/**
* HazardPointer implementation as presented in:
*
* Maged M. Michael. "Hazard pointers: Safe memory reclamation for lock-free
* objects." IEEE Transactions on Parallel and Distributed Systems, 15.6 (2004)
* : 491-504.
*
* In contrast to the original implementation, our implementation only uses
* fixed-size memory. There is a safe upper limit, hazard pointer are guaranteed
* to not consume more memory. Memory is allocated solely at initialization.
*
* Hazard pointers solve the ABA problem for lock-free algorithms. Before
* accessing a pointer, threads announce that they want to access this pointer
* and then check if the pointer is still valid. This announcement is done by
* placing a guard. It is guaranteed that the pointer is not reused until all
* threads remove their guards to this pointer. Objects, these pointers are
* pointing to, can therefore not be deleted directly. Instead, these pointers
* are put into a list for later deletion (retired list). Regularly, this list
* is processed to check which pointers can be deleted. If a pointer can be
* deleted, a callback function provided by the user is called. The user can
* then, e.g., free the respective object, so that the pointer can be safely
* reused.
*/
template< typename GuardType >
class HazardPointer {
private:
/**
* Concrete hazard pointer entry type
*/
typedef HazardPointerThreadEntry < GuardType >
HazardPointerThreadEntry_t;
/**
* The guard value denoting "not guarding"
*/
GuardType undefined_guard;
/**
* HazardPointerTest2 is a white-box test, needing access to private members
* of this class. So declaring it as friend.
* The capacity of the retired list (safe upper bound for retired list size)
*/
friend class embb::containers::test::HazardPointerTest2;
int retired_list_max_size;
/**
* This number determines the amount of maximal accessors (threads) that
* will access this hazard pointer instance. Note that a thread once
* accessing this object will be permanently count as accessor, even if not
* participating anymore. If too many threads access this object, an
* exception is thrown.
* Guards that can be set per thread
*/
unsigned int max_accessors_count_;
int guards_per_thread;
/**
* The guard value denoting "not guarded"
* Array of HazardPointerElements. Each thread is assigned to one.
*/
GuardType undefined_guard_;
HazardPointerThreadEntry_t* hazard_pointer_thread_entry_array;
/**
* The maximal count of guards that can be set per thread.
* The threshold, determines at which size of the retired list pointers
* are tried to be deleted.
*/
int max_guards_per_thread_;
static const double RETIRE_THRESHOLD;
/**
* The functor that is called to release an object. This is called by this
* class, when it is safe to do so, i.e., no thread accesses this object
* anymore.
* Each thread is assigned a thread index (starting with 0).
* Get the index of the current thread.
*/
embb::base::Function<void, GuardType> release_object_callback_;
static unsigned int GetCurrentThreadIndex();
/**
* Mapping from EMBB thread id to hazard pointer thread ids. Hazard pointer
* thread ids are in range [0;accesor_count-1]. The position of a EMBB thread
* id in that array determines the respective hazard pointer thread id.
* The number of hazard pointers currently active.
*/
embb::base::Atomic<int>* thread_id_mapping_;
size_t active_hazard_pointer;
/**
* The hazard pointer guards, represented as array. Each thread has a fixed
* set of slots (guardsPerThread) within this array.
* Count of all hazard pointers.
*/
embb::base::Atomic<GuardType>* guards_;
size_t hazard_pointers;
/**
* \see threadLocalRetiredLists documentation
* The callback that is triggered when a retired guard can be
* freed. Usually, the user will call a free here.
*/
GuardType* thread_local_retired_lists_temp_;
embb::base::Function<void, GuardType> free_guard_callback;
/**
* A list of lists, represented as single array. Each thread maintains a list
* of retired pointers, that are objects that are logically released but not
* released because some thread placed a guard on it.
* Checks if the current size of the retired list exceeds the threshold, so
* that each retired guard is checked for being not hazardous anymore.
*
* \return \c true is threshold is exceeded, otherwise \c false.
*/
bool IsThresholdExceeded();
/**
* Gets the number of hazard pointe, currently active
*
* \return Number of active hazard pointers
*/
size_t GetActiveHazardPointers();
/**
* Gets the hazard pointer entry for the current thread
*
* \return Hazard pointer entry for current thread
*/
HazardPointerThreadEntry_t&
GetHazardPointerElementForCurrentThread();
/**
* Threads might leave from participating in hazard pointer management.
* This method helps all those threads processing their retired list.
*/
void HelpScan();
/**
* Checks the retired list of a hazard pointer entry for elements of the
* retired list that can be freed, and executes the delete callback for those
* elements.
*/
void Scan(
HazardPointerThreadEntry_t* currentHazardPointerEntry
/**<[IN] Hazard pointer entry that should be checked for elements that
can be deleted*/);
public:
/**
* Gets the capacity of one retired list
*
* \waitfree
*/
GuardType* thread_local_retired_lists_;
size_t GetRetiredListMaxSize() const;
/**
* Each thread is assigned a thread index (starting with 0). Get the index of
* the current thread. Note that this is not the global index, but an hazard
* pointer class internal one. The user is free to define less accessors than
* the amount of default threads. This is useful, as the number of accessors
* accounts quadratic for the memory consumption, so the user should have the
* possibility to avoid memory wastage when only having a small, fixed size,
* number of accessors.
* Initializes hazard pointer
*
* @return current (hazard pointer object local) thread index
*/
unsigned int GetObjectLocalThreadIndex();
/**
* Copy retired list \c sourceList to retired list \c targetList
*/
static void CopyRetiredList(
GuardType* source_list,
/**<[IN] the source retired list*/
GuardType* target_list,
/**<[IN] the target retired list*/
unsigned int single_retired_list_size,
/**<[IN] the size of a thread local retired list*/
GuardType undefined_guard
/**<[IN] the undefined guard (usually the NULL pointer)*/
);
static void UpdateRetiredList(
GuardType* retired_list,
/**<[IN] the old retired list*/
GuardType* updated_retired_list,
/**<[IN] the updated retired list*/
unsigned int retired_list_size,
/**<[IN] the size of a thread local retired list*/
GuardType to_retire,
/**<[IN] the element to retire*/
GuardType considered_hazard,
/**<[IN] the currently considered hazard*/
GuardType undefined_guard
/**<[IN] the undefined guard (usually the NULL pointer)*/
);
* \notthreadsafe
*
* \memory
* - Let \c t be the number of maximal threads determined by EMBB
* - Let \c g be the number of guards per thread
* - Let \c x be 1.25*t*g + 1
*
* We dynamically allocate \c x*(3*t+1) elements of size \c sizeof(void*).
*/
HazardPointer(
embb::base::Function<void, GuardType> free_guard_callback,
/**<[IN] Callback to the function that shall be called when a retired
guard can be deleted */
GuardType undefined_guard,
/**<[IN] The guard value denoting "not guarded"*/
int guards_per_thread
/**<[IN] Number of guards per thread*/);
/**
* Deallocates lists for hazard pointer management. Note that no objects
* currently in the retired lists are deleted. This is the responsibility
* of the user. Usually, HazardPointer manages pointers of an object pool.
* After destructing HazardPointer, the object pool is deleted, so that
* everything is properly cleaned up.
*/
~HazardPointer();
/**
* Announces that the current thread stops participating in hazard pointer
* management. The other threads now take care of his retired list.
*
* \waitfree
*/
void DeactivateCurrentThread();
/**
* Guards \c guardedElement with the guard at position \c guardPosition
*/
void GuardPointer(int guardPosition, GuardType guardedElement);
/**
* Enqueue a pointer for deletion. It is added to the retired list and
* deleted when no thread accesses it anymore.
*/
void EnqueuePointerForDeletion(GuardType guardedElement);
};
} // namespace internal
} // namespace containers
......
......@@ -77,12 +77,7 @@ LockFreeMPMCQueue<Type, ValuePool>::~LockFreeMPMCQueue() {
template< typename Type, typename ValuePool >
LockFreeMPMCQueue<Type, ValuePool>::LockFreeMPMCQueue(size_t capacity) :
capacity(capacity),
// Object pool, size with respect to the maximum number of retired nodes not
// eligible for reuse. +1 for dummy node.
objectPool(
MPMCQueueNodeHazardPointer_t::ComputeMaximumRetiredObjectCount(2) +
capacity + 1),
capacity(capacity),
// Disable "this is used in base member initializer" warning.
// We explicitly want this.
#ifdef EMBB_PLATFORM_COMPILER_MSVC
......@@ -94,7 +89,13 @@ delete_pointer_callback(*this,
#ifdef EMBB_PLATFORM_COMPILER_MSVC
#pragma warning(pop)
#endif
hazardPointer(delete_pointer_callback, NULL, 2) {
hazardPointer(delete_pointer_callback, NULL, 2),
// Object pool, size with respect to the maximum number of retired nodes not
// eligible for reuse. +1 for dummy node.
objectPool(
hazardPointer.GetRetiredListMaxSize()*
embb::base::Thread::GetThreadsMaxCount() +
capacity + 1) {
// Allocate dummy node to reduce the number of special cases to consider.
internal::LockFreeMPMCQueueNode<Type>* dummyNode = objectPool.Allocate();
// Initially, head and tail point to the dummy node.
......@@ -119,7 +120,7 @@ bool LockFreeMPMCQueue<Type, ValuePool>::TryEnqueue(Type const& element) {
for (;;) {
my_tail = tail;
hazardPointer.Guard(0, my_tail);
hazardPointer.GuardPointer(0, my_tail);
// Check if pointer is still valid after guarding.
if (my_tail != tail) {
......@@ -162,12 +163,12 @@ bool LockFreeMPMCQueue<Type, ValuePool>::TryDequeue(Type & element) {
Type data;
for (;;) {
my_head = head;
hazardPointer.Guard(0, my_head);
hazardPointer.GuardPointer(0, my_head);
if (my_head != head) continue;
my_tail = tail;
my_next = my_head->GetNext();
hazardPointer.Guard(1, my_next);
hazardPointer.GuardPointer(1, my_next);
if (head != my_head) continue;
if (my_next == NULL)
......@@ -186,7 +187,7 @@ bool LockFreeMPMCQueue<Type, ValuePool>::TryDequeue(Type & element) {
break;
}
hazardPointer.EnqueueForDeletion(my_head);
hazardPointer.EnqueuePointerForDeletion(my_head);
element = data;
return true;
}
......
......@@ -81,12 +81,13 @@ capacity(capacity),
#ifdef EMBB_PLATFORM_COMPILER_MSVC
#pragma warning(pop)
#endif
hazardPointer(delete_pointer_callback, NULL, 1),
// Object pool, size with respect to the maximum number of retired nodes not
// eligible for reuse:
objectPool(
StackNodeHazardPointer_t::ComputeMaximumRetiredObjectCount(1) +
capacity),
hazardPointer(delete_pointer_callback, NULL, 1) {
hazardPointer.GetRetiredListMaxSize()*
embb::base::Thread::GetThreadsMaxCount() +
capacity) {
}
template< typename Type, typename ValuePool >
......@@ -127,7 +128,7 @@ bool LockFreeStack< Type, ValuePool >::TryPop(Type & element) {
return false;
// Guard top_cached
hazardPointer.Guard(0, top_cached);
hazardPointer.GuardPointer(0, top_cached);
// Check if top is still top. If this is the case, it has not been
// retired yet (because before retiring that thing, the retiring thread
......@@ -143,16 +144,16 @@ bool LockFreeStack< Type, ValuePool >::TryPop(Type & element) {
break;
} else {
// We continue with the next and can unguard top_cached
hazardPointer.Guard(0, NULL);
hazardPointer.GuardPointer(0, NULL);
}
}
Type data = top_cached->GetElement();
// We don't need to read from this reference anymore, unguard it
hazardPointer.Guard(0, NULL);
hazardPointer.GuardPointer(0, NULL);
hazardPointer.EnqueueForDeletion(top_cached);
hazardPointer.EnqueuePointerForDeletion(top_cached);
element = data;
return true;
......
......@@ -42,7 +42,7 @@ template<typename Type, Type Undefined, class PoolAllocator,
class TreeAllocator >
bool LockFreeTreeValuePool<Type, Undefined, PoolAllocator, TreeAllocator>::
IsLeaf(int node) {
if (node >= size_ - 1 && node <= 2 * size_ - 1) {
if (node >= size - 1 && node <= 2 * size - 1) {
return true;
}
return false;
......@@ -52,7 +52,7 @@ template<typename Type, Type Undefined, class PoolAllocator,
class TreeAllocator >
bool LockFreeTreeValuePool<Type, Undefined, PoolAllocator, TreeAllocator>::
IsValid(int node) {
return (node >= 0 && node <= 2 * size_ - 1);
return (node >= 0 && node <= 2 * size - 1);
}
template<typename Type, Type Undefined, class PoolAllocator,
......@@ -77,14 +77,14 @@ template<typename T, T Undefined, class PoolAllocator, class TreeAllocator >
int LockFreeTreeValuePool<T, Undefined, PoolAllocator, TreeAllocator>::
NodeIndexToPoolIndex(int node) {
assert(IsLeaf(node));
return(node - (size_ - 1));
return(node - (size - 1));
}
template<typename Type, Type Undefined, class PoolAllocator,
class TreeAllocator >
int LockFreeTreeValuePool<Type, Undefined, PoolAllocator, TreeAllocator>::
PoolIndexToNodeIndex(int index) {
int node = index + (size_ - 1);
int node = index + (size - 1);
assert(IsLeaf(node));
return node;
}
......@@ -100,7 +100,7 @@ template<typename T, T Undefined, class PoolAllocator, class TreeAllocator >
int LockFreeTreeValuePool<T, Undefined, PoolAllocator, TreeAllocator>::
GetParentNode(int node) {
int parent = (node - 1) / 2;
assert(parent >= 0 && parent < size_ - 1);
assert(parent >= 0 && parent < size - 1);
return parent;
}
......@@ -112,11 +112,11 @@ allocate_rec(int node, Type& element) {
if (IsLeaf(node)) {
int pool_index = NodeIndexToPoolIndex(node);
Type expected = pool_[pool_index];
Type expected = pool[pool_index];
if (expected == Undefined)
return -1;
if (pool_[pool_index].CompareAndSwap(expected, Undefined)) {
if (pool[pool_index].CompareAndSwap(expected, Undefined)) {
element = expected;
return pool_index;
}
......@@ -131,11 +131,11 @@ allocate_rec(int node, Type& element) {
// atomically decrement the value in the node if the result is greater than
// or equal to zero. This cannot be done atomically.
do {
current = tree_[node];
current = tree[node];
desired = current - 1;
if (desired < 0)
return -1;
} while (!tree_[node].CompareAndSwap(current, desired));
} while (!tree[node].CompareAndSwap(current, desired));
int leftResult = allocate_rec(GetLeftChildIndex(node), element);
if (leftResult != -1) {
......@@ -156,7 +156,7 @@ Fill(int node, int elementsToStore, int power2Value) {
if (IsLeaf(node))
return;
tree_[node] = elementsToStore;
tree[node] = elementsToStore;
int postPower2Value = power2Value >> 1;
......@@ -188,14 +188,14 @@ Free(Type element, int index) {
assert(element != Undefined);
// Put the element back
pool_[index].Store(element);
pool[index].Store(element);
assert(index >= 0 && index < size_);
assert(index >= 0 && index < size);
int node = PoolIndexToNodeIndex(index);
while (!IsRoot(node)) {
node = GetParentNode(node);
tree_[node].FetchAndAdd(1);
tree[node].FetchAndAdd(1);
}
}
......@@ -205,76 +205,37 @@ template< typename ForwardIterator >
LockFreeTreeValuePool<Type, Undefined, PoolAllocator, TreeAllocator>::
LockFreeTreeValuePool(ForwardIterator first, ForwardIterator last) {
// Number of elements to store
real_size_ = static_cast<int>(::std::distance(first, last));
real_size = static_cast<int>(::std::distance(first, last));
// Let k be smallest number so that real_size <= 2^k, size = 2^k
size_ = GetSmallestPowerByTwoValue(real_size_);
size = GetSmallestPowerByTwoValue(real_size);
// Size of binary tree without the leaves
tree_size_ = size_ - 1;
// make sure, signed values are not negative
assert(tree_size_ >= 0);
assert(real_size_ >= 0);
size_t tree_size_unsigned = static_cast<size_t>(tree_size_);
size_t real_size_unsigned = static_cast<size_t>(real_size_);
tree_size = size - 1;
// Pool stores elements of type T
pool_ = pool_allocator_.allocate(real_size_unsigned);
// invoke inplace new for each pool element
for (size_t i = 0; i != real_size_unsigned; ++i) {
new (&pool_[i]) embb::base::Atomic<Type>();
}
pool = poolAllocator.allocate(static_cast<size_t>(real_size));
// Tree holds the counter of not allocated elements
tree_ = tree_allocator_.allocate(tree_size_unsigned);
// invoke inplace new for each tree element
for (size_t i = 0; i != tree_size_unsigned; ++i) {
new (&tree_[i]) embb::base::Atomic<int>();
}
tree = treeAllocator.allocate(static_cast<size_t>(tree_size));
int i = 0;
// Store the elements from the range
for (ForwardIterator curIter(first); curIter != last; ++curIter) {
pool_[i++] = *curIter;
pool[i++] = *curIter;
}
// Initialize the binary tree without leaves (counters)
Fill(0, static_cast<int>(::std::distance(first, last)), size_);
Fill(0, static_cast<int>(::std::distance(first, last)), size);
}
template<typename Type, Type Undefined, class PoolAllocator,
class TreeAllocator >
LockFreeTreeValuePool<Type, Undefined, PoolAllocator, TreeAllocator>::
~LockFreeTreeValuePool() {
size_t tree_size_unsigned = static_cast<size_t>(tree_size_);
size_t real_size_unsigned = static_cast<size_t>(real_size_);
// invoke destructor for each pool element
for (size_t i = 0; i != real_size_unsigned; ++i) {
pool_[i].~Atomic();
}
pool_allocator_.deallocate(pool_, real_size_unsigned);
// invoke destructor for each tree element
for (size_t i = 0; i != tree_size_unsigned; ++i) {
tree_[i].~Atomic();
}
tree_allocator_.deallocate(tree_, tree_size_unsigned);
}
template<typename Type, Type Undefined, class PoolAllocator,
class TreeAllocator >
size_t LockFreeTreeValuePool<Type, Undefined, PoolAllocator, TreeAllocator>::
GetMinimumElementCountForGuaranteedCapacity(size_t capacity) {
// for this value pool, this is just capacity...
return capacity;
poolAllocator.deallocate(pool, static_cast<size_t>(real_size));
treeAllocator.deallocate(tree, static_cast<size_t>(tree_size));
}
} // namespace containers
......
......@@ -83,8 +83,7 @@ ReturningTrueIterator::operator!=(const self_type& rhs) {
template<class Type, typename ValuePool, class ObjectAllocator>
bool ObjectPool<Type, ValuePool, ObjectAllocator>::
IsContained(const Type &obj) const {
if ((&obj < &objects_array_[0]) ||
(&obj > &objects_array_[value_pool_size_ - 1])) {
if ((&obj < &objects[0]) || (&obj > &objects[capacity - 1])) {
return false;
} else {
return true;
......@@ -95,17 +94,17 @@ template<class Type, typename ValuePool, class ObjectAllocator>
int ObjectPool<Type, ValuePool, ObjectAllocator>::
GetIndexOfObject(const Type &obj) const {
assert(IsContained(obj));
return(static_cast<int>(&obj - &objects_array_[0]));
return(static_cast<int>(&obj - &objects[0]));
}
template<class Type, typename ValuePool, class ObjectAllocator>
Type* ObjectPool<Type, ValuePool, ObjectAllocator>::AllocateRaw() {
bool val;
int allocated_index = value_pool_.Allocate(val);
int allocated_index = p.Allocate(val);
if (allocated_index == -1) {
return NULL;
} else {
Type* ret_pointer = &(objects_array_[allocated_index]);
Type* ret_pointer = &(objects[allocated_index]);
return ret_pointer;
}
......@@ -113,17 +112,15 @@ Type* ObjectPool<Type, ValuePool, ObjectAllocator>::AllocateRaw() {
template<class Type, typename ValuePool, class ObjectAllocator>
size_t ObjectPool<Type, ValuePool, ObjectAllocator>::GetCapacity() {
return capacity_;
return capacity;
}
template<class Type, typename ValuePool, class ObjectAllocator>
ObjectPool<Type, ValuePool, ObjectAllocator>::ObjectPool(size_t capacity) :
capacity_(capacity),
value_pool_size_(
ValuePool::GetMinimumElementCountForGuaranteedCapacity(capacity)),
value_pool_(ReturningTrueIterator(0), ReturningTrueIterator(
value_pool_size_)),
objects_array_(object_allocator_.allocate(value_pool_size_)) {
capacity(capacity),
p(ReturningTrueIterator(0), ReturningTrueIterator(capacity)) {
// Allocate the objects (without construction, just get the memory)
objects = objectAllocator.allocate(capacity);
}
template<class Type, typename ValuePool, class ObjectAllocator>
......@@ -131,7 +128,7 @@ void ObjectPool<Type, ValuePool, ObjectAllocator>::Free(Type* obj) {
int index = GetIndexOfObject(*obj);
obj->~Type();
value_pool_.Free(true, index);
p.Free(true, index);
}
template<class Type, typename ValuePool, class ObjectAllocator>
......@@ -192,7 +189,7 @@ Type* ObjectPool<Type, ValuePool, ObjectAllocator>::Allocate(
template<class Type, typename ValuePool, class ObjectAllocator>
ObjectPool<Type, ValuePool, ObjectAllocator>::~ObjectPool() {
// Deallocate the objects
object_allocator_.deallocate(objects_array_, value_pool_size_);
objectAllocator.deallocate(objects, capacity);
}
} // namespace containers
} // namespace embb
......
......@@ -35,21 +35,21 @@ Free(Type element, int index) {
assert(element != Undefined);
// Just put back the element
pool_array_[index].Store(element);
pool[index].Store(element);
}
template<typename Type, Type Undefined, class Allocator >
int WaitFreeArrayValuePool<Type, Undefined, Allocator>::
Allocate(Type & element) {
for (int i = 0; i != size_; ++i) {
for (int i = 0; i != size; ++i) {
Type expected;
// If the memory cell is not available, go ahead
if (Undefined == (expected = pool_array_[i].Load()))
if (Undefined == (expected = pool[i].Load()))
continue;
// Try to get the memory cell
if (pool_array_[i].CompareAndSwap(expected, Undefined)) {
if (pool[i].CompareAndSwap(expected, Undefined)) {
// When the CAS was successful, this element is ours
element = expected;
return i;
......@@ -64,45 +64,23 @@ WaitFreeArrayValuePool<Type, Undefined, Allocator>::
WaitFreeArrayValuePool(ForwardIterator first, ForwardIterator last) {
size_t dist = static_cast<size_t>(std::distance(first, last));
size_ = static_cast<int>(dist);
// conversion may result in negative number. check!
assert(size_ >= 0);
size = static_cast<int>(dist);
// Use the allocator to allocate an array of size dist
pool_array_ = allocator_.allocate(dist);
// invoke inplace new for each pool element
for ( size_t i = 0; i != dist; ++i ) {
new (&pool_array_[i]) embb::base::Atomic<Type>();
}
pool = allocator.allocate(dist);
int i = 0;
// Store the elements of the range
for (ForwardIterator curIter(first); curIter != last; ++curIter) {
pool_array_[i++] = *curIter;
pool[i++] = *curIter;
}
}
template<typename Type, Type Undefined, class Allocator >
WaitFreeArrayValuePool<Type, Undefined, Allocator>::~WaitFreeArrayValuePool() {
// invoke destructor for each pool element
for (int i = 0; i != size_; ++i) {
pool_array_[i].~Atomic();
}
// free memory
allocator_.deallocate(pool_array_, static_cast<size_t>(size_));
allocator.deallocate(pool, (size_t)size);
}
template<typename Type, Type Undefined, class Allocator >
size_t WaitFreeArrayValuePool<Type, Undefined, Allocator>::
GetMinimumElementCountForGuaranteedCapacity(size_t capacity) {
// for this value pool, this is just capacity...
return capacity;
}
} // namespace containers
} // namespace embb
......
......@@ -113,17 +113,8 @@ class LockFreeMPMCQueue {
* least as many elements, maybe more.
*/
size_t capacity;
/**
* The object pool, used for lock-free memory allocation.
*
* Warning: the objectPool has to be initialized before the hazardPointer
* object, to be sure that the hazardPointer object is destructed before the
* Pool as the hazardPointer object might return elements to the pool in its
* destructor. So the ordering of the members objectPool and hazardPointer is
* important here!
*/
ObjectPool< internal::LockFreeMPMCQueueNode<Type>, ValuePool > objectPool;
// Do not change the ordering of class local variables.
// Important for initialization.
/**
* Callback to the method that is called by hazard pointers if a pointer is
......@@ -133,17 +124,15 @@ class LockFreeMPMCQueue {
delete_pointer_callback;
/**
* Definition of the used hazard pointer type
* The hazard pointer object, used for memory management.
*/
typedef embb::containers::internal::HazardPointer
< internal::LockFreeMPMCQueueNode<Type>* >
MPMCQueueNodeHazardPointer_t;
embb::containers::internal::HazardPointer
< internal::LockFreeMPMCQueueNode<Type>* > hazardPointer;
/**
* The hazard pointer object, used for memory management.
* The object pool, used for lock-free memory allocation.
*/
MPMCQueueNodeHazardPointer_t hazardPointer;
ObjectPool< internal::LockFreeMPMCQueueNode<Type>, ValuePool > objectPool;
/**
* Atomic pointer to the head node of the queue
......
......@@ -187,6 +187,11 @@ class LockFreeStack {
delete_pointer_callback;
/**
* The hazard pointer object, used for memory management.
*/
internal::HazardPointer<internal::LockFreeStackNode<Type>*> hazardPointer;
/**
* The callback function, used to cleanup non-hazardous pointers.
* \see delete_pointer_callback
*/
......@@ -194,27 +199,10 @@ class LockFreeStack {
/**
* The object pool, used for lock-free memory allocation.
*
* Warning: the objectPool has to be initialized before the hazardPointer
* object, to be sure that the hazardPointer object is destructed before the
* Pool as the hazardPointer object might return elements to the pool in its
* destructor. So the ordering of the members objectPool and hazardPointer is
* important here!
*/
ObjectPool< internal::LockFreeStackNode<Type>, ValuePool > objectPool;
/**
* Definition of the used hazard pointer type
*/
typedef internal::HazardPointer < internal::LockFreeStackNode<Type>* >
StackNodeHazardPointer_t;
/**
* The hazard pointer object, used for memory management.
*/
StackNodeHazardPointer_t hazardPointer;
/**
* Atomic pointer to the top node of the stack (element that is popped next)
*/
embb::base::Atomic<internal::LockFreeStackNode<Type>*> top;
......
......@@ -123,25 +123,22 @@ class LockFreeTreeValuePool {
LockFreeTreeValuePool& operator=(const LockFreeTreeValuePool&);
// See algorithm description above
int size_;
int size;
// See algorithm description above
int tree_size_;
int tree_size;
// See algorithm description above
int real_size_;
int real_size;
// The tree above the pool
embb::base::Atomic<int>* tree_;
embb::base::Atomic<int>* tree;
// The actual pool
embb::base::Atomic<Type>* pool_;
embb::base::Atomic<Type>* pool;
// respective allocator
PoolAllocator pool_allocator_;
// respective allocator
TreeAllocator tree_allocator_;
PoolAllocator poolAllocator;
TreeAllocator treeAllocator;
/**
* Computes smallest power of two fitting the specified value
......@@ -281,18 +278,6 @@ class LockFreeTreeValuePool {
);
/**
* Due to concurrency effects, a pool might provide less elements than managed
* by it. However, usually one wants to guarantee a minimal capacity. The
* count of elements, that must be given to the pool when to guarantee \c
* capacity elements is computed using this function.
*
* \return count of indices the pool has to be initialized with
*/
static size_t GetMinimumElementCountForGuaranteedCapacity(
size_t capacity
/**< [IN] count of indices that shall be guaranteed */);
/**
* Destructs the pool.
*
* \notthreadsafe
......
......@@ -35,6 +35,7 @@
namespace embb {
namespace containers {
/**
* \defgroup CPP_CONTAINERS_POOLS Pools
* Concurrent pools
......@@ -61,29 +62,22 @@ class ObjectPool {
/**
* Allocator used to allocate elements of the object pool
*/
ObjectAllocator object_allocator_;
ObjectAllocator objectAllocator;
/**
* Capacity of the object pool
* Array holding the allocated object
*/
size_t capacity_;
Type* objects;
/**
* The size of the underlying value pool. This is also the size of the object
* array in this class. It is assumed, that the valuepool manages indices in
* range [0;value_pool_size_-1].
* Capacity of the object pool
*/
size_t value_pool_size_;
size_t capacity;
/**
* Underlying value pool
*/
ValuePool value_pool_;
/**
* Array holding the allocated object
*/
Type* objects_array_;
ValuePool p;
/**
* Helper providing a virtual iterator that just returns true in each
......
......@@ -39,30 +39,12 @@ namespace containers {
* \ingroup CPP_CONCEPT
* \{
* \par Description
* A value pool is a multi-set of elements, where each element has a unique,
* continuous (starting with 0) index. The elements cannot be modified and are
* given at construction time by providing first/last iterators.
*
* \par
* A value pool provides two primary operations: \c Allocate and \c Free. \c
* Allocate allocates an element/index "pair" (index via return, element via
* reference parameter) from the pool, and \c Free returns an element/index pair
* to the pool. To guarantee linearizability, \c element is not allowed to be
* modified between \c Allocate and \c Free. It is only allowed to free elements
* that have previously been allocated. The \c Allocate function does not
* guarantee an order on which indices are allocated. The count of elements that
* can be allocated with \c Allocate might be smaller than the count of
* elements, the pool is initialized with. This might be because of
* implementation details and respective concurrency effects: for example, if
* indices are managed within a queue, one has to protect queue elements from
* concurrency effects (reuse and access). As long as a thread potentially
* accesses a node (and with that an index), the respective index cannot not be
* given out to the user, even if being logically not part of the pool anymore.
* However, the user might want to guarantee a certain amount of indices to the
* user. Therefore, the static \c GetMinimumElementCountForGuaranteedCapacity
* method is used. The user passes the count of indices to this method, that
* shall be guaranteed by the pool. The method returns the count on indices, the
* pool has to be initialized with in order to guarantee this count on indices.
* A value pool is a fixed-size multiset of elements, where each element has a
* unique index. The elements cannot be modified and are given at construction
* time (by providing first/last iterators). A value pool provides two
* operations: \c Allocate and \c Free. \c Allocate removes an element from the
* pool, and \c Free returns an element to the pool. It is only allowed to
* free elements that have previously been allocated.
*
* \par Requirements
* - Let \c Pool be the pool class
......@@ -72,7 +54,6 @@ namespace containers {
* - Let \c i, j be forward iterators supporting \c std::distance.
* - Let \c c be an object of type \c Type&
* - Let \c e be a value of type \c int
* - Let \c f be a value of type \c int
*
* \par Valid Expressions
*
......@@ -91,7 +72,7 @@ namespace containers {
* the bottom element. The bottom element cannot be stored in the pool, it
* is exclusively used to mark empty cells. The pool initially contains
* \c std::distance(i, j) elements which are copied during construction from
* the range \c [i, j]. A concrete class satisfying the value pool concept
* the range \c [i, j). A concrete class satisfying the value pool concept
* might provide additional template parameters for specifying allocators.
* </td>
* </tr>
......@@ -99,10 +80,9 @@ namespace containers {
* <td>\code{.cpp} Allocate(c) \endcode</td>
* <td>\c int</td>
* <td>
* Allocates an element/index "pair" from the pool. Returns -1, if no
* element is available, i.e., the pool is empty. Otherwise, returns the
* index of the element in the pool. The value of the pool element is
* written into parameter reference \c c.
* Gets an element from the pool. Returns -1, if no element is available,
* i.e., the pool is empty. Otherwise, returns the index of the element in
* the pool. The value of the pool element is written into reference \c c.
* </td>
* </tr>
* <tr>
......@@ -113,15 +93,6 @@ namespace containers {
* \c Allocate. For each allocated element, \c Free must be called exactly
* once.</td>
* </tr>
* <tr>
* <td>\code{.cpp} GetMinimumElementCountForGuaranteedCapacity(f)
* \endcode</td>
* <td>\c void</td>
* <td>Static method, returns the count of indices, the user has to
* initialize the pool with in order to guarantee a count of \c f elements
* (irrespective of concurrency effects).
* </td>
* </tr>
* </table>
*
* \}
......@@ -145,10 +116,10 @@ template<typename Type,
class Allocator = embb::base::Allocator< embb::base::Atomic<Type> > >
class WaitFreeArrayValuePool {
private:
int size_;
embb::base::Atomic<Type>* pool_array_;
int size;
embb::base::Atomic<Type>* pool;
WaitFreeArrayValuePool();
Allocator allocator_;
Allocator allocator;
// Prevent copy-construction
WaitFreeArrayValuePool(const WaitFreeArrayValuePool&);
......@@ -179,18 +150,6 @@ class WaitFreeArrayValuePool {
);
/**
* Due to concurrency effects, a pool might provide less elements than managed
* by it. However, usually one wants to guarantee a minimal capacity. The
* count of elements, that must be given to the pool when to guarantee \c
* capacity elements is computed using this function.
*
* \return count of indices the pool has to be initialized with
*/
static size_t GetMinimumElementCountForGuaranteedCapacity(
size_t capacity
/**< [IN] count of indices that shall be guaranteed */);
/**
* Destructs the pool.
*
* \notthreadsafe
......@@ -216,7 +175,7 @@ class WaitFreeArrayValuePool {
* Returns an element to the pool.
*
* \note The element must have been allocated with Allocate().
*
*
* \waitfree
*
* \see CPP_CONCEPTS_VALUE_POOL
......
......@@ -31,71 +31,24 @@
namespace embb {
namespace containers {
namespace test {
IntObjectTestPool::IntObjectTestPool(unsigned int pool_size) :
poolSize(pool_size) {
simplePoolObjects = static_cast<int*>(
embb::base::Allocation::Allocate(sizeof(int)*pool_size));
simplePool = static_cast<embb::base::Atomic<int>*> (
embb::base::Allocation::Allocate(sizeof(embb::base::Atomic<int>)*
pool_size));
for (unsigned int i = 0; i != pool_size; ++i) {
// in-place new for each array cell
new (&simplePool[i]) embb::base::Atomic<int>;
}
for (unsigned int i = 0; i != pool_size; ++i) {
simplePool[i] = FREE_MARKER;
simplePoolObjects[i] = 0;
}
}
IntObjectTestPool::~IntObjectTestPool() {
embb::base::Allocation::Free(simplePoolObjects);
for (unsigned int i = 0; i != poolSize; ++i) {
// in-place new for each array cell
simplePool[i].~Atomic();
}
embb::base::Allocation::Free(simplePool);
}
int* IntObjectTestPool::Allocate() {
for (unsigned int i = 0; i != poolSize; ++i) {
int expected = FREE_MARKER;
if (simplePool[i].CompareAndSwap
(expected, ALLOCATED_MARKER)) {
return &simplePoolObjects[i];
}
}
return 0;
}
void IntObjectTestPool::Release(int* object_pointer) {
int cell = object_pointer - simplePoolObjects;
simplePool[cell].Store(FREE_MARKER);
}
HazardPointerTest::HazardPointerTest() :
#ifdef EMBB_PLATFORM_COMPILER_MSVC
#pragma warning(push)
#pragma warning(disable:4355)
#endif
delete_pointer_callback_(*this, &HazardPointerTest::DeletePointerCallback),
delete_pointer_callback(*this, &HazardPointerTest::DeletePointerCallback),
#ifdef EMBB_PLATFORM_COMPILER_MSVC
#pragma warning(pop)
#endif
object_pool_(NULL),
stack_(NULL),
hazard_pointer_(NULL),
n_threads_(static_cast<int>
object_pool(NULL),
stack(NULL),
hp(NULL),
n_threads(static_cast<int>
(partest::TestSuite::GetDefaultNumThreads())) {
n_elements_per_thread_ = 100;
n_elements_ = n_threads_*n_elements_per_thread_;
n_elements_per_thread = 100;
n_elements = n_threads*n_elements_per_thread;
embb::base::Function < void, embb::base::Atomic<int>* >
deletePointerCallback(
delete_pointer_callback(
*this,
&HazardPointerTest::DeletePointerCallback);
......@@ -106,52 +59,45 @@ delete_pointer_callback_(*this, &HazardPointerTest::DeletePointerCallback),
// placed, the pointer is not allowed to be deleted until the second thread
// removes this guard.
CreateUnit("HazardPointerTestThatGuardWorks").
Pre(&HazardPointerTest::HazardPointerTest1Pre, this).
Pre(&HazardPointerTest::HazardPointerTest1_Pre, this).
Add(
&HazardPointerTest::HazardPointerTest1ThreadMethod,
this, static_cast<size_t>(n_threads_)).
Post(&HazardPointerTest::HazardPointerTest1Post, this);
&HazardPointerTest::HazardPointerTest1_ThreadMethod,
this, static_cast<size_t>(n_threads)).
Post(&HazardPointerTest::HazardPointerTest1_Post, this);
}
void HazardPointerTest::HazardPointerTest1Pre() {
void HazardPointerTest::HazardPointerTest1_Pre() {
embb_internal_thread_index_reset();
object_pool_ =
embb::base::Allocation::
New<embb::containers::ObjectPool< embb::base::Atomic<int> > >
(static_cast<size_t>(n_elements_));
stack_ = embb::base::Allocation::
New<embb::containers::LockFreeStack< embb::base::Atomic<int>* > >
(static_cast<size_t>(n_elements_));
hazard_pointer_ = embb::base::Allocation::
New<embb::containers::internal::HazardPointer < embb::base::Atomic<int>* > >
(delete_pointer_callback_,
static_cast<embb::base::Atomic<int>*>(NULL),
object_pool = new embb::containers::ObjectPool< embb::base::Atomic<int> >
(static_cast<size_t>(n_elements));
stack = new embb::containers::LockFreeStack< embb::base::Atomic<int>* >
(static_cast<size_t>(n_elements));
hp = new embb::containers::internal::HazardPointer< embb::base::Atomic<int>*>
(delete_pointer_callback,
NULL,
1);
}
void HazardPointerTest::HazardPointerTest1Post() {
embb::base::Allocation::Delete(hazard_pointer_);
embb::base::Allocation::Delete(object_pool_);
embb::base::Allocation::Delete(stack_);
void HazardPointerTest::HazardPointerTest1_Post() {
delete object_pool;
delete stack;
delete hp;
}
void HazardPointerTest::HazardPointerTest1ThreadMethod() {
void HazardPointerTest::HazardPointerTest1_ThreadMethod() {
unsigned int thread_index;
embb_internal_thread_index(&thread_index);
for (int i = 0; i != n_elements_per_thread_; ++i) {
embb::base::Atomic<int>* allocated_object = object_pool_->Allocate(0);
for (int i = 0; i != n_elements_per_thread; ++i) {
embb::base::Atomic<int>* allocated_object = object_pool->Allocate(0);
hazard_pointer_->Guard(0, allocated_object);
hp->GuardPointer(0, allocated_object);
bool success = stack_->TryPush(allocated_object);
bool success = stack->TryPush(allocated_object);
PT_ASSERT(success == true);
embb::base::Atomic<int>* allocated_object_from_different_thread(0);
embb::base::Atomic<int>* allocated_object_from_different_thread;
int diff_count = 0;
......@@ -159,366 +105,51 @@ void HazardPointerTest::HazardPointerTest1ThreadMethod() {
bool success_pop;
while (
(success_pop = stack_->TryPop(allocated_object_from_different_thread))
(success_pop = stack->TryPop(allocated_object_from_different_thread))
== true
&& allocated_object_from_different_thread == allocated_object
) {
// try to make it probable to get an element from a different thread
// however, can be the same. Try 10000 times to get a different element.
//try to make it probable to get an element from a different thread
//however, can be the same. Try 10000 times to get a different element.
if (diff_count++ > 10000) {
same = true;
break;
}
bool success = stack_->TryPush(allocated_object_from_different_thread);
bool success = stack->TryPush(allocated_object_from_different_thread);
PT_ASSERT(success == true);
}
PT_ASSERT(success_pop == true);
allocated_object->Store(1);
hazard_pointer_->EnqueueForDeletion(allocated_object);
hp->EnqueuePointerForDeletion(allocated_object);
if (!same) {
hazard_pointer_->Guard(0, allocated_object_from_different_thread);
hp->GuardPointer(0, allocated_object_from_different_thread);
// if this holds, we were successful in guarding... otherwise we
// were to late, because the pointer has already been added
// to the retired list.
if (*allocated_object_from_different_thread == 0) {
// the pointer must not be deleted here!
vector_mutex_.Lock();
vector_mutex.Lock();
for (std::vector< embb::base::Atomic<int>* >::iterator
it = deleted_vector_.begin();
it != deleted_vector_.end();
it = deleted_vector.begin();
it != deleted_vector.end();
++it) {
PT_ASSERT(*it != allocated_object_from_different_thread);
}
vector_mutex_.Unlock();
vector_mutex.Unlock();
}
hazard_pointer_->Guard(0, NULL);
hp->GuardPointer(0, NULL);
}
}
}
void HazardPointerTest::DeletePointerCallback
(embb::base::Atomic<int>* to_delete) {
vector_mutex_.Lock();
deleted_vector_.push_back(to_delete);
vector_mutex_.Unlock();
}
void HazardPointerTest2::DeletePointerCallback(int* to_delete) {
test_pool_->Release(to_delete);
}
bool HazardPointerTest2::SetRelativeGuards() {
unsigned int thread_index;
embb_internal_thread_index(&thread_index);
unsigned int my_begin = guards_per_phread_count_*thread_index;
int guard_number = 0;
unsigned int alreadyGuarded = 0;
for (unsigned int i = my_begin; i != my_begin + guards_per_phread_count_;
++i) {
if (shared_guarded_[i] != 0) {
alreadyGuarded++;
guard_number++;
continue;
}
int * to_guard = shared_allocated_[i];
if (to_guard) {
hazard_pointer_->Guard(guard_number, to_guard);
// changed in the meantime?
if (to_guard == shared_allocated_[i].Load()) {
// guard was successful. Communicate to other threads.
shared_guarded_[i] = to_guard;
} else {
// reset the guard, couldn't guard...
hazard_pointer_->RemoveGuard(guard_number);
}
}
guard_number++;
}
return(alreadyGuarded == guards_per_phread_count_);
}
void HazardPointerTest2::HazardPointerTest2Master() {
// while the hazard pointer guard array is not full
int** allocatedLocal = static_cast<int**>(
embb::base::Allocation::Allocate(sizeof(int*)*guaranteed_capacity_pool_));
bool full = false;
while (!full) {
full = true;
for (unsigned int i = 0; i != guaranteed_capacity_pool_; ++i) {
if (shared_guarded_[i] == 0) {
full = false;
break;
}
}
// not all guards set
for (unsigned int i = 0; i != guaranteed_capacity_pool_; ++i) {
allocatedLocal[i] = test_pool_->Allocate();
shared_allocated_[i].Store(allocatedLocal[i]);
}
// set my hazards. We do not have to check, this must be successful
// here.
SetRelativeGuards();
// free
for (unsigned int i = 0; i != guaranteed_capacity_pool_; ++i) {
shared_allocated_[i].Store(0);
hazard_pointer_->EnqueueForDeletion(allocatedLocal[i]);
}
}
embb::base::Allocation::Free(allocatedLocal);
}
void HazardPointerTest2::HazardPointerTest2Slave() {
unsigned int thread_index;
embb_internal_thread_index(&thread_index);
while (!SetRelativeGuards()) {}
}
void HazardPointerTest2::HazardPointerTest2Pre() {
embb_internal_thread_index_reset();
current_master_ = 0;
sync1_ = 0;
sync2_ = 0;
// first the test pool has to be created
test_pool_ = embb::base::Allocation::New<IntObjectTestPool>
(pool_size_using_hazard_pointer_);
// after the pool has been created, we create the hp class
hazard_pointer_ = embb::base::Allocation::New <
embb::containers::internal::HazardPointer<int*> >
(delete_pointer_callback_, static_cast<int*>(NULL),
static_cast<int>(guards_per_phread_count_), n_threads);
shared_guarded_ = static_cast<embb::base::Atomic<int*>*>(
embb::base::Allocation::Allocate(sizeof(embb::base::Atomic<int*>)*
guaranteed_capacity_pool_));
for (unsigned int i = 0; i != guaranteed_capacity_pool_; ++i) {
// in-place new for each array cell
new (&shared_guarded_[i]) embb::base::Atomic < int* >;
}
shared_allocated_ = static_cast<embb::base::Atomic<int*>*>(
embb::base::Allocation::Allocate(sizeof(embb::base::Atomic<int*>)*
guaranteed_capacity_pool_));
for (unsigned int i = 0; i !=
guaranteed_capacity_pool_; ++i) {
// in-place new for each array cell
new (&shared_allocated_[i]) embb::base::Atomic < int* >;
}
for (unsigned int i = 0; i != guaranteed_capacity_pool_; ++i) {
shared_guarded_[i] = 0;
shared_allocated_[i] = 0;
}
}
void HazardPointerTest2::HazardPointerTest2Post() {
for (unsigned int i = 0; i != static_cast<unsigned int>(n_threads); ++i) {
for (unsigned int i2 = 0; i2 != static_cast<unsigned int>(n_threads)*
guards_per_phread_count_; ++i2) {
if (hazard_pointer_->thread_local_retired_lists_
[i2 + i*n_threads*guards_per_phread_count_] == NULL) {
// all retired lists must be completely filled
PT_ASSERT(false);
}
}
}
unsigned int checks = 0;
for (unsigned int i = 0; i != static_cast<unsigned int>(n_threads); ++i) {
for (unsigned int i2 = 0; i2 != static_cast<unsigned int>(n_threads)*
guards_per_phread_count_; ++i2) {
for (unsigned int j = 0; j != static_cast<unsigned int>(n_threads); ++j) {
for (unsigned int j2 = 0; j2 != static_cast<unsigned int>(n_threads)*
guards_per_phread_count_; ++j2) {
if (i2 == j2 && i == j)
continue;
// all retired elements have to be disjoint
PT_ASSERT(
hazard_pointer_->thread_local_retired_lists_
[i2 + i*n_threads*guards_per_phread_count_] !=
hazard_pointer_->thread_local_retired_lists_
[j2 + j*n_threads*guards_per_phread_count_]);
checks++;
}
}
}
}
// sanity check on the count of expected comparisons.
PT_ASSERT(
checks ==
n_threads*n_threads*guards_per_phread_count_ *
(n_threads*n_threads*guards_per_phread_count_ - 1));
std::vector< int* > additionallyAllocated;
// we should be able to still allocate the guaranteed capacity of
// elements from the pool.
for (unsigned int i = 0; i != guaranteed_capacity_pool_; ++i) {
int* allocated = test_pool_->Allocate();
// allocated is not allowed to be zero
PT_ASSERT(allocated != NULL);
// push to vector, to check if elements are disjunctive and to release
// afterwards.
additionallyAllocated.push_back(allocated);
}
// the pool should now be empty
PT_ASSERT(test_pool_->Allocate() == NULL);
// release allocated elements...
for (unsigned int i = 0; i != additionallyAllocated.size(); ++i) {
test_pool_->Release(additionallyAllocated[i]);
}
// the additionallyAllocated elements shall be disjoint
for (unsigned int i = 0; i != additionallyAllocated.size(); ++i) {
for (unsigned int i2 = 0; i2 != additionallyAllocated.size(); ++i2) {
if (i == i2)
continue;
PT_ASSERT(additionallyAllocated[i] !=
additionallyAllocated[i2]);
}
}
// no allocated element should be in any retired list...
for (unsigned int a = 0; a != additionallyAllocated.size(); ++a) {
for (unsigned int i = 0; i != static_cast<unsigned int>(n_threads); ++i) {
for (unsigned int i2 = 0; i2 != static_cast<unsigned int>(n_threads)*
guards_per_phread_count_; ++i2) {
PT_ASSERT(
hazard_pointer_->thread_local_retired_lists_
[i2 + i*n_threads*guards_per_phread_count_] !=
additionallyAllocated[a]);
}
}
}
for (unsigned int i = 0; i != guaranteed_capacity_pool_; ++i) {
// in-place new for each array cell
shared_guarded_[i].~Atomic();
}
embb::base::Allocation::Free(shared_guarded_);
for (unsigned int i = 0; i != guaranteed_capacity_pool_; ++i) {
// in-place new for each array cell
shared_allocated_[i].~Atomic();
}
embb::base::Allocation::Free(shared_allocated_);
embb::base::Allocation::Delete(hazard_pointer_);
// after deleting the hazard pointer object, all retired pointers have
// to be returned to the pool!
std::vector<int*> elementsInPool;
int* nextElement;
while ((nextElement = test_pool_->Allocate()) != NULL) {
for (unsigned int i = 0; i != elementsInPool.size(); ++i) {
// all elements need to be disjoint
PT_ASSERT(elementsInPool[i] != nextElement);
}
elementsInPool.push_back(nextElement);
}
// all elements should have been returned by the hp object, so we should be
// able to acquire all elements.
PT_ASSERT(elementsInPool.size() == pool_size_using_hazard_pointer_);
embb::base::Allocation::Delete(test_pool_);
}
void HazardPointerTest2::HazardPointerTest2ThreadMethod() {
for (;;) {
unsigned int thread_index;
embb_internal_thread_index(&thread_index);
if (thread_index == current_master_) {
HazardPointerTest2Master();
} else {
HazardPointerTest2Slave();
}
sync1_.FetchAndAdd(1);
// wait until cleanup thread signals to be finished
while (sync1_ != 0) {
int expected = n_threads;
int desired = FINISH_MARKER;
// select thread, responsible for cleanup
if (sync1_.CompareAndSwap(expected, desired)) {
// wipe arrays!
for (unsigned int i = 0; i != guaranteed_capacity_pool_; ++i) {
shared_guarded_[i] = 0;
shared_allocated_[i] = 0;
}
// increase master
current_master_.FetchAndAdd(1);
sync2_ = 0;
sync1_.Store(0);
}
}
// wait for all threads to reach this position
sync2_.FetchAndAdd(1);
while (sync2_ != static_cast<unsigned int>(n_threads)) {}
// if each thread was master once, terminate.
if (current_master_ == static_cast<unsigned int>(n_threads)) {
return;
}
}
}
HazardPointerTest2::HazardPointerTest2() :
n_threads(static_cast<int>
(partest::TestSuite::GetDefaultNumThreads())),
#ifdef EMBB_PLATFORM_COMPILER_MSVC
#pragma warning(push)
#pragma warning(disable:4355)
#endif
delete_pointer_callback_(
*this,
&HazardPointerTest2::DeletePointerCallback)
#ifdef EMBB_PLATFORM_COMPILER_MSVC
#pragma warning(pop)
#endif
{
guards_per_phread_count_ = 5;
guaranteed_capacity_pool_ = guards_per_phread_count_*n_threads;
pool_size_using_hazard_pointer_ = guaranteed_capacity_pool_ +
guards_per_phread_count_*n_threads*n_threads;
embb::base::Thread::GetThreadsMaxCount();
CreateUnit("HazardPointerTestSimulateMemoryWorstCase").
Pre(&HazardPointerTest2::HazardPointerTest2Pre, this).
Add(
&HazardPointerTest2::HazardPointerTest2ThreadMethod,
this, static_cast<size_t>(n_threads)).
Post(&HazardPointerTest2::HazardPointerTest2Post, this);
vector_mutex.Lock();
deleted_vector.push_back(to_delete);
vector_mutex.Unlock();
}
} // namespace test
} // namespace containers
} // namespace embb
} // namespace test
} // namespace containers
} // namespace embb
......@@ -36,112 +36,32 @@
namespace embb {
namespace containers {
namespace test {
/**
* @brief a very simple wait-free object pool implementation to have tests
* being independent of the EMBB object pool implementation.
*/
class IntObjectTestPool {
class HazardPointerTest : public partest::TestCase {
private:
int* simplePoolObjects;
embb::base::Atomic<int>* simplePool;
public:
static const int ALLOCATED_MARKER = 1;
static const int FREE_MARKER = 0;
unsigned int poolSize;
explicit IntObjectTestPool(unsigned int pool_size);
embb::base::Function<void, embb::base::Atomic<int>*> delete_pointer_callback;
~IntObjectTestPool();
/**
* Allocate object from the pool
*
* @return the allocated object
*/
int* Allocate();
//used to allocate random stuff, we will just use the pointers, not the
//contents
embb::containers::ObjectPool< embb::base::Atomic<int> >* object_pool;
/**
* Return an element to the pool
*
* @param objectPointer the object to be freed
*/
void Release(int* object_pointer);
};
//used to move pointer between threads
embb::containers::LockFreeStack< embb::base::Atomic<int>* >* stack;
embb::base::Mutex vector_mutex;
embb::containers::internal::HazardPointer<embb::base::Atomic<int>*>* hp;
std::vector< embb::base::Atomic<int>* > deleted_vector;
int n_threads;
int n_elements_per_thread;
int n_elements;
class HazardPointerTest : public partest::TestCase {
public:
/**
* Adds test methods.
*/
HazardPointerTest();
void HazardPointerTest1Pre();
void HazardPointerTest1Post();
void HazardPointerTest1ThreadMethod();
void HazardPointerTest1_Pre();
void HazardPointerTest1_Post();
void HazardPointerTest1_ThreadMethod();
void DeletePointerCallback(embb::base::Atomic<int>* to_delete);
private:
embb::base::Function<void, embb::base::Atomic<int>*> delete_pointer_callback_;
//used to allocate random stuff, we will just use the pointers, not the
//contents
embb::containers::ObjectPool< embb::base::Atomic<int> >* object_pool_;
//used to move pointer between threads
embb::containers::LockFreeStack< embb::base::Atomic<int>* >* stack_;
embb::base::Mutex vector_mutex_;
embb::containers::internal::HazardPointer<embb::base::Atomic<int>*>*
hazard_pointer_;
std::vector< embb::base::Atomic<int>* > deleted_vector_;
int n_threads_;
int n_elements_per_thread_;
int n_elements_;
};
class HazardPointerTest2 : public partest::TestCase {
public:
void DeletePointerCallback(int* to_delete);
bool SetRelativeGuards();
void HazardPointerTest2Master();
void HazardPointerTest2Slave();
void HazardPointerTest2Pre();
void HazardPointerTest2Post();
void HazardPointerTest2ThreadMethod();
HazardPointerTest2();
private:
// number of threads, participating in that test
int n_threads;
embb::base::Function<void, int*> delete_pointer_callback_;
// the thread id of the master
embb::base::Atomic<unsigned int> current_master_;
// variables, to synchronize threads. At each point in time, one master,
// the master changes each round until each thread was assigned master once.
embb::base::Atomic<int> sync1_;
embb::base::Atomic<unsigned int> sync2_;
unsigned int guards_per_phread_count_;
unsigned int guaranteed_capacity_pool_;
unsigned int pool_size_using_hazard_pointer_;
// The threads write here, if they guarded an object successfully. Used to
// determine when all allocated objects were guarded successfully.
embb::base::Atomic<int*>* shared_guarded_;
// This array is used by the master, to communicate and share what he has
// allocated with the slaves.
embb::base::Atomic<int*>* shared_allocated_;
// Reference to the object pool
IntObjectTestPool* test_pool_;
embb::containers::internal::HazardPointer<int*>* hazard_pointer_;
static const int FINISH_MARKER = -1;
};
} // namespace test
} // namespace containers
......
......@@ -55,7 +55,6 @@ using embb::containers::test::HazardPointerTest;
using embb::containers::test::QueueTest;
using embb::containers::test::StackTest;
using embb::containers::test::ObjectPoolTest;
using embb::containers::test::HazardPointerTest2;
PT_MAIN("Data Structures C++") {
unsigned int max_threads = static_cast<unsigned int>(
......@@ -65,7 +64,6 @@ PT_MAIN("Data Structures C++") {
PT_RUN(PoolTest< WaitFreeArrayValuePool<int COMMA -1> >);
PT_RUN(PoolTest< LockFreeTreeValuePool<int COMMA -1> >);
PT_RUN(HazardPointerTest);
PT_RUN(HazardPointerTest2);
PT_RUN(QueueTest< WaitFreeSPSCQueue< ::std::pair<size_t COMMA int> > >);
PT_RUN(QueueTest< LockFreeMPMCQueue< ::std::pair<size_t COMMA int> >
COMMA true COMMA true >);
......
......@@ -39,7 +39,7 @@
#define NUM_SLICES 8
#define TEST_COUNT 12
typedef embb::dataflow::Network<NUM_SLICES> MyNetwork;
typedef embb::dataflow::Network<8> MyNetwork;
typedef MyNetwork::ConstantSource< int > MyConstantSource;
typedef MyNetwork::Source< int > MySource;
typedef MyNetwork::SerialProcess< MyNetwork::Inputs<int>::Type,
......@@ -156,7 +156,9 @@ void SimpleTest::TestBasic() {
core_set,
1024, // max tasks (default: 1024)
128, // max groups (default: 128)
num_cores, // max queues (default: 16)
// Currently needs to be initialized
// with (max_queues + 1), see defect embb449
num_cores + 1, // max queues (default: 16)
1024, // queue capacity (default: 1024)
4); // num priorities (default: 4)
......
......@@ -71,7 +71,7 @@ mtapi_uint_t embb_mtapi_id_pool_allocate(embb_mtapi_id_pool_t * that) {
/* acquire position to fetch id from */
mtapi_uint_t id_position = that->get_id_position;
that->get_id_position++;
if (that->capacity < that->get_id_position) {
if (that->capacity <= that->get_id_position) {
that->get_id_position = 0;
}
......@@ -97,7 +97,7 @@ void embb_mtapi_id_pool_deallocate(
/* acquire position to put id to */
mtapi_uint_t id_position = that->put_id_position;
that->put_id_position++;
if (that->capacity < that->put_id_position) {
if (that->capacity <= that->put_id_position) {
that->put_id_position = 0;
}
......
/*
* Copyright (c) 2014-2015, Siemens AG. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <embb_mtapi_test_id_pool.h>
#include <vector>
IdPoolTest::IdPoolTest() {
CreateUnit("mtapi id pool test single threaded").
Add(&IdPoolTest::TestBasic, this, 1, 1000).
Pre(&IdPoolTest::TestBasicPre, this).
Post(&IdPoolTest::TestBasicPost, this);
CreateUnit("mtapi id pool test concurrent").
Add(&IdPoolTest::TestParallel, this, concurrent_accessors_id_pool_2
, 20).
Post(&IdPoolTest::TestParallelPost, this).
Pre(&IdPoolTest::TestParallelPre, this);
}
void IdPoolTest::TestParallel() {
// allocate ID_ELEMENTS_PER_ACCESSOR elements. Each test thread is
// guaranteed to be able to allocate this amount of elements.
TestAllocateDeallocateNElementsFromPool(id_pool_parallel,
id_elements_per_accessor);
}
void IdPoolTest::TestParallelPre() {
// create second id pool with CONCURRENT_ACCESSORS_ID_POOL_2*
// ID_ELEMENTS_PER_ACCESSOR elements
embb_mtapi_id_pool_initialize(&id_pool_parallel,
concurrent_accessors_id_pool_2*id_elements_per_accessor);
}
void IdPoolTest::TestParallelPost() {
// after the parallel tests, try to again allocate and deallocate all
// elements sequentially.
TestAllocateDeallocateNElementsFromPool(id_pool_parallel,
concurrent_accessors_id_pool_2*id_elements_per_accessor, true);
// finalize pool
embb_mtapi_id_pool_finalize(&id_pool_parallel);
}
void IdPoolTest::TestBasic() {
TestAllocateDeallocateNElementsFromPool(id_pool, id_pool_size_1, true);
}
void IdPoolTest::TestBasicPre() {
// create id pool with ID_POOL_SIZE_1 elements
embb_mtapi_id_pool_initialize(&id_pool, id_pool_size_1);
}
void IdPoolTest::TestBasicPost() {
// finalize pool
embb_mtapi_id_pool_finalize(&id_pool);
}
void IdPoolTest::TestAllocateDeallocateNElementsFromPool(
embb_mtapi_id_pool_t &pool,
int count_elements,
bool empty_check) {
std::vector<unsigned int> allocated;
for (int i = 0; i != count_elements; ++i) {
allocated.push_back(embb_mtapi_id_pool_allocate(&pool));
}
// the allocated elements should be disjunctive, and never invalid element
for (unsigned int x = 0; x != allocated.size(); ++x) {
PT_ASSERT(allocated[x] != EMBB_MTAPI_IDPOOL_INVALID_ID);
for (unsigned int y = 0; y != allocated.size(); ++y) {
if (x == y) {
continue;
}
PT_ASSERT(allocated[x] != allocated[y]);
}
}
// now the id pool should be empty... try ten times to get an id,
// we should always get the invalid element
if (empty_check) {
for (int i = 0; i != 10; ++i) {
PT_ASSERT_EQ(embb_mtapi_id_pool_allocate(&pool),
static_cast<unsigned int>(EMBB_MTAPI_IDPOOL_INVALID_ID)
)
}
}
// now return allocated elements in a shuffled manner.
::std::random_shuffle(allocated.begin(), allocated.end());
for (int i = 0; i != count_elements; ++i) {
embb_mtapi_id_pool_deallocate(&pool,
allocated[static_cast<unsigned int>(i)]);
}
}
/*
* Copyright (c) 2014-2015, Siemens AG. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef MTAPI_C_TEST_EMBB_MTAPI_TEST_ID_POOL_H_
#define MTAPI_C_TEST_EMBB_MTAPI_TEST_ID_POOL_H_
#include <partest/partest.h>
#include <embb_mtapi_id_pool_t.h>
// for shuffling a vector
#include <algorithm>
class IdPoolTest : public partest::TestCase {
public:
embb_mtapi_id_pool_t id_pool;
embb_mtapi_id_pool_t id_pool_parallel;
IdPoolTest();
private:
static const unsigned int id_pool_size_1 = 100;
static const unsigned int concurrent_accessors_id_pool_2 = 10;
static const unsigned int id_elements_per_accessor = 10;
/**
* We create a pool of size number_accessors*elements_per_accessor, so
* at each time we can guarantee each thread to be able to allocate
* elements_per_accessor elements.
* We create number_accessor threads, where each thread iteratively
* allocates and frees elements_per_accessor elements, which in each case
* has to be successful. Additionally, the sanity checks from the basic tests
* are repeated. The TestParallelPost function also repeats all
* sequential tests.
*/
void TestParallel();
void TestParallelPre();
void TestParallelPost();
/**
* Create a pool of size N. We repeatedly allocate and free N elements, check
* if the pool always returns disjunctive ids and check that the pool never
* returns the invalid element, if the pool is not empty. Check that the
* invalid element is returned if the pool is empty.
*/
void TestBasic();
void TestBasicPre();
void TestBasicPost();
static void TestAllocateDeallocateNElementsFromPool(
embb_mtapi_id_pool_t &pool,
int count_elements,
bool empty_check = false);
};
#endif // MTAPI_C_TEST_EMBB_MTAPI_TEST_ID_POOL_H_
......@@ -37,9 +37,6 @@
#include <embb_mtapi_test_group.h>
#include <embb_mtapi_test_queue.h>
#include <embb_mtapi_test_error.h>
#include <embb_mtapi_test_id_pool.h>
#include <embb/base/c/memory_allocation.h>
PT_MAIN("MTAPI C") {
embb_log_set_log_level(EMBB_LOG_LEVEL_NONE);
......@@ -51,7 +48,4 @@ PT_MAIN("MTAPI C") {
PT_RUN(InitFinalizeTest);
PT_RUN(GroupTest);
PT_RUN(QueueTest);
PT_RUN(IdPoolTest);
PT_EXPECT(embb_get_bytes_allocated() == 0);
}
......@@ -5,10 +5,14 @@ file(GLOB_RECURSE EMBB_MTAPI_CPP_HEADERS "include/*.h")
file(GLOB_RECURSE EMBB_MTAPI_CPP_TEST_SOURCES "test/*.cc" "test/*.h")
if (USE_AUTOMATIC_INITIALIZATION STREQUAL ON)
message("-- Automatic initialization enabled (default)")
set(MTAPI_CPP_AUTOMATIC_INITIALIZE 1)
else()
set(MTAPI_CPP_AUTOMATIC_INITIALIZE 0)
message("-- Automatic initialization disabled")
endif()
message(" (set with command line option -DUSE_AUTOMATIC_INITIALIZATION=ON/OFF)")
# Execute the GroupSources macro
include(${CMAKE_SOURCE_DIR}/CMakeCommon/GroupSourcesMSVC.cmake)
......
......@@ -5,10 +5,13 @@ file(GLOB_RECURSE EMBB_TASKS_CPP_HEADERS "include/*.h")
file(GLOB_RECURSE EMBB_TASKS_CPP_TEST_SOURCES "test/*.cc" "test/*.h")
if (USE_AUTOMATIC_INITIALIZATION STREQUAL ON)
message("-- Automatic initialization enabled (default)")
set(TASKS_CPP_AUTOMATIC_INITIALIZE 1)
else()
set(TASKS_CPP_AUTOMATIC_INITIALIZE 0)
message("-- Automatic initialization disabled")
endif()
message(" (set with command line option -DUSE_AUTOMATIC_INITIALIZATION=ON/OFF)")
configure_file("include/embb/tasks/internal/cmake_config.h.in"
"include/embb/tasks/internal/cmake_config.h")
......
......@@ -78,19 +78,13 @@ void TaskTest::TestBasic() {
PT_EXPECT_EQ(policy.GetPriority(), 0u);
policy.AddWorker(0u);
PT_EXPECT_EQ(policy.GetAffinity(), 1u);
if (policy.GetCoreCount() > 1) {
policy.AddWorker(1u);
PT_EXPECT_EQ(policy.GetAffinity(), 3u);
}
policy.AddWorker(1u);
PT_EXPECT_EQ(policy.GetAffinity(), 3u);
policy.RemoveWorker(0u);
PT_EXPECT_EQ(policy.GetAffinity(), 2u);
PT_EXPECT_EQ(policy.IsSetWorker(0), false);
PT_EXPECT_EQ(policy.IsSetWorker(1), true);
if (policy.GetCoreCount() > 1) {
PT_EXPECT_EQ(policy.GetAffinity(), 2u);
PT_EXPECT_EQ(policy.IsSetWorker(1), true);
}
std::string test;
embb::tasks::Task task = node.Spawn(
embb::base::Bind(
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or sign in to comment