diff --git a/CHANGELOG.md b/CHANGELOG.md index b4fa4ed..6a0946d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,37 @@ Embedded Multicore Building Blocks (EMB²) ========================================= +Version 0.3.1 +------------- + +### Features: +- None + +### Changes and improvements: +- Removed one function argument from algorithms::Invoke +- Added "explicit" specifier to base type constructor of Atomic +- Added "const" qualifier to dereference operator and member access operator of AtomicPointer<> +- Changed AtomicBase<>::CompareAndSwap to atomically return expected value +- Replaced constant in dataflow_cpp_test_simple.cc with corresponding macro +- Added initialization of atomic variable in hazard_pointer_test.cc to avoid warning with GCC 5.1 +- Changed initial value of allocated_object_from_different_thread +- Added tests for ID Pool and check for memory leaks +- Updated unit test for the UniqueLock::Swap + +### Bug fixes: +- Fixed implementation of ID pool (provided fewer elements than specified by capacity) +- Fixed unsigned overflow bug in timed wait function of condition variables +- Fixed implementation of UniqueLock::Swap + +### Build system: +- Improved CMake output for automatic initialization option +- Fixed cpplint and unsigned/signed warnings + +### Documentation: +- Fixed documentation of UniqueLock class +- Updated README file + + Version 0.3.0 ------------- diff --git a/CMakeLists.txt b/CMakeLists.txt index ed453d0..281a772 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -28,7 +28,7 @@ cmake_minimum_required (VERSION 2.8.9) # Version number set (EMBB_BASE_VERSION_MAJOR 0) set (EMBB_BASE_VERSION_MINOR 3) -set (EMBB_BASE_VERSION_PATCH 0) +set (EMBB_BASE_VERSION_PATCH 1) # Fix compilation for CMake versions >= 3.1 # @@ -59,7 +59,9 @@ IF(NOT OpenCL_FOUND) MESSAGE( STATUS "OpenCL is not there, will build without MTAPI OpenCL Plugin." ) ENDIF() - +# give the user the possibility, to append compiler flags +set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${EXTRA_CMAKE_CXX_FLAGS}") +set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${EXTRA_CMAKE_C_FLAGS}") if(NOT CMAKE_BUILD_TYPE) set(CMAKE_BUILD_TYPE "Release" CACHE STRING @@ -100,6 +102,13 @@ else() endif() message(" (set with command line option -DWARNINGS_ARE_ERRORS=ON/OFF)") +if (USE_AUTOMATIC_INITIALIZATION STREQUAL ON) + message("-- MTAPI/Tasks automatic initialization enabled (default)") +else() + message("-- MTAPI/Tasks automatic initialization disabled") +endif() +message(" (set with command line option -DUSE_AUTOMATIC_INITIALIZATION=ON/OFF)") + include(CMakeCommon/SetCompilerFlags.cmake) SetGNUCompilerFlags(compiler_libs compiler_flags) SetVisualStudioCompilerFlags(compiler_libs compiler_flags) diff --git a/README.md b/README.md index 7996d94..58efb20 100644 --- a/README.md +++ b/README.md @@ -270,8 +270,8 @@ If you want to use the C++ functionalities of EMB², you have to link the following libraries (names will be different on Windows and on Linux) in the given order: - embb_base, embb_base_cpp, embb_mtapi_c, embb_mtapi_cpp, embb_containers_cpp, - embb_algorithms_cpp, embb_dataflow_cpp + embb_dataflow_cpp, embb_algorithms_cpp, embb_containers_cpp, + embb_mtapi_cpp, embb_mtapi_c, embb_base_cpp, embb_base_c The C++ header files can be included as follows: @@ -284,7 +284,7 @@ The C++ header files can be included as follows: The following libraries have to be linked in the given order: - embb_base_c, mtapi_c + embb_mtapi_c, embb_base_c The C header files can be included as follows: @@ -323,6 +323,8 @@ Known Bugs and Limitations is bounded by a predefined but modifiable constant (see functions embb_thread_get_max_count() / embb_thread_set_max_count() and class embb::base::Thread). +- While MTAPI fully supports heterogeneous systems, the algorithms and + dataflow components are currently limited to homogeneous systems. Development and Contribution diff --git a/algorithms_cpp/include/embb/algorithms/invoke.h b/algorithms_cpp/include/embb/algorithms/invoke.h index 10dd818..a3884a4 100644 --- a/algorithms_cpp/include/embb/algorithms/invoke.h +++ b/algorithms_cpp/include/embb/algorithms/invoke.h @@ -49,33 +49,37 @@ typedef embb::base::Function InvokeFunctionType; #ifdef DOXYGEN /** - * Spawns one to ten function objects at once and runs them in parallel. + * Spawns two to ten function objects at once and runs them in parallel. * * Blocks until all of them are done. * * \ingroup CPP_ALGORITHMS_INVOKE */ -template +template void Invoke( Function1 func1, /**< [in] First function object to invoke */ + Function2 func2, + /**< [in] Second function object to invoke */ ...); /** -* Spawns one to ten function objects at once and runs them in parallel using the +* Spawns two to ten function objects at once and runs them in parallel using the * given embb::mtapi::ExecutionPolicy. * * Blocks until all of them are done. * * \ingroup CPP_ALGORITHMS_INVOKE */ -template +template void Invoke( Function1 func1, /**< [in] Function object to invoke */ + Function2 func2, + /**< [in] Second function object to invoke */ ..., - const embb::mtapi::ExecutionPolicy & policy - /**< [in] embb::mtapi::ExecutionPolicy to use */ + const embb::tasks::ExecutionPolicy & policy + /**< [in] embb::tasks::ExecutionPolicy to use */ ); #else // DOXYGEN @@ -118,13 +122,6 @@ class TaskWrapper { }; } // namespace internal -template -void Invoke( - Function1 func1, - const embb::tasks::ExecutionPolicy& policy) { - internal::TaskWrapper wrap1(func1, policy); -} - template void Invoke( Function1 func1, @@ -290,12 +287,6 @@ template wrap10(func10, policy); } -template -void Invoke( - Function1 func1) { - Invoke(func1, embb::tasks::ExecutionPolicy()); -} - template void Invoke( Function1 func1, diff --git a/algorithms_cpp/test/invoke_test.cc b/algorithms_cpp/test/invoke_test.cc index 361e73a..1481747 100644 --- a/algorithms_cpp/test/invoke_test.cc +++ b/algorithms_cpp/test/invoke_test.cc @@ -44,7 +44,6 @@ static void Invocable10() {} void InvokeTest::Test() { using embb::algorithms::Invoke; - Invoke(&Invocable1); Invoke(&Invocable1, &Invocable2); Invoke(&Invocable1, &Invocable2, &Invocable3); Invoke(&Invocable1, &Invocable2, &Invocable3, &Invocable4); @@ -61,4 +60,24 @@ void InvokeTest::Test() { &Invocable6, &Invocable7, &Invocable8, &Invocable9); Invoke(&Invocable1, &Invocable2, &Invocable3, &Invocable4, &Invocable5, &Invocable6, &Invocable7, &Invocable8, &Invocable9, &Invocable10); + + embb::tasks::ExecutionPolicy policy; + Invoke(&Invocable1, &Invocable2, policy); + Invoke(&Invocable1, &Invocable2, &Invocable3, policy); + Invoke(&Invocable1, &Invocable2, &Invocable3, &Invocable4, policy); + Invoke(&Invocable1, &Invocable2, &Invocable3, &Invocable4, &Invocable5, + policy); + Invoke(&Invocable1, &Invocable2, &Invocable3, &Invocable4, &Invocable5, + &Invocable6, policy); + Invoke(&Invocable1, &Invocable2, &Invocable3, &Invocable4, &Invocable5, + &Invocable6, &Invocable7, policy); + Invoke(&Invocable1, &Invocable2, &Invocable3, &Invocable4, &Invocable5, + &Invocable6, &Invocable7, &Invocable8, policy); + Invoke(&Invocable1, &Invocable2, &Invocable3, &Invocable4, &Invocable5, + &Invocable6, &Invocable7, &Invocable8, &Invocable9, policy); + Invoke(&Invocable1, &Invocable2, &Invocable3, &Invocable4, &Invocable5, + &Invocable6, &Invocable7, &Invocable8, &Invocable9, policy); + Invoke(&Invocable1, &Invocable2, &Invocable3, &Invocable4, &Invocable5, + &Invocable6, &Invocable7, &Invocable8, &Invocable9, &Invocable10, + policy); } diff --git a/base_c/src/condition_variable.c b/base_c/src/condition_variable.c index 81f8c3b..575bb33 100644 --- a/base_c/src/condition_variable.c +++ b/base_c/src/condition_variable.c @@ -83,8 +83,8 @@ int embb_condition_wait_until(embb_condition_t* condition_var, embb_time_t now; embb_time_now(&now); /* Check if absolute timepoint (in milliseconds) still is in the future */ - if (time->seconds * 1000 + time->nanoseconds / 1000000 - - now.seconds * 1000 - now.nanoseconds / 1000000 > 0) { + if ((time->seconds * 1000 + time->nanoseconds / 1000000) + > (now.seconds * 1000 + now.nanoseconds / 1000000)) { /* Convert to (unsigned type) milliseconds and round up */ DWORD time_diff = (DWORD) ( time->seconds * 1000 + time->nanoseconds / 1000000 diff --git a/base_c/src/internal/thread_index.c b/base_c/src/internal/thread_index.c index 80f6d1f..414559b 100644 --- a/base_c/src/internal/thread_index.c +++ b/base_c/src/internal/thread_index.c @@ -128,6 +128,20 @@ void embb_internal_thread_index_set_max(unsigned int max) { *embb_max_number_thread_indices() = max; } +/** + * \pre the calling thread is the only active thread + * + * \post the thread indices count and calling thread index is reset + */ void embb_internal_thread_index_reset() { + /** This function is only called in tests, usually when all other threads + * except the main thread have terminated. However, the main thread still has + * potentially stored its old index value in its thread local storage, + * which might be assigned additionally to another thread (as the counter is + * reset), which may lead to hard to detect bugs. Therefore, reset the thread + * local thread id here. + */ + embb_internal_thread_index_var = UINT_MAX; + embb_counter_init(embb_thread_index_counter()); -} +} \ No newline at end of file diff --git a/base_c/test/condition_var_test.cc b/base_c/test/condition_var_test.cc index f292d3c..3a526d2 100644 --- a/base_c/test/condition_var_test.cc +++ b/base_c/test/condition_var_test.cc @@ -38,7 +38,7 @@ ConditionVarTest::ConditionVarTest() embb_condition_init(&cond_wait_); embb_mutex_init(&mutex_cond_wait_, EMBB_MUTEX_PLAIN); - CreateUnit("Timed wait timouts") + CreateUnit("Timed wait timeouts") .Add(&ConditionVarTest::TestTimedWaitTimeouts, this); if (num_threads_ >= 2) { CreateUnit("Condition Notify Test") @@ -64,10 +64,10 @@ void ConditionVarTest::TestNotify() { while (embb_counter_get(&counter_) < static_cast(num_threads_-1)) - {} // all threads entered critical section + {} // All threads entered critical section embb_mutex_lock(&mutex_cond_notify_); embb_mutex_unlock(&mutex_cond_notify_); - // All threads called wait on the condition (Even last thread) + // All threads called wait on the condition (even last thread) embb_counter_init(&counter_); @@ -75,7 +75,7 @@ void ConditionVarTest::TestNotify() { embb_mutex_lock(&mutex_cond_wait_); embb_condition_wait_for(&cond_wait_, &mutex_cond_wait_, &duration); while (embb_counter_get(&counter_) == 0) - {} //if hangs here signal has not succeded + {} // If test hangs here, signalling has not succeeded PT_ASSERT_EQ_MSG(embb_counter_get(&counter_), static_cast(1), "Only one thread notified"); @@ -85,7 +85,7 @@ void ConditionVarTest::TestNotify() { while (embb_counter_get(&counter_) != static_cast(num_threads_-1)) - {} // If this hangs then not all threads were notified. + {} // If test hangs here, not all threads were notified embb_mutex_unlock(&mutex_cond_wait_); embb_mutex_destroy(&mutex_cond_wait_); @@ -105,13 +105,13 @@ void ConditionVarTest::TestTimedWaitTimeouts() { embb_time_t time; embb_duration_t duration = EMBB_DURATION_INIT; - // Wait for now tests already passed time point + // Wait for "now" tests already passed time point embb_time_now(&time); embb_mutex_lock(&mutex); int status = embb_condition_wait_until(&cond, &mutex, &time); PT_EXPECT_EQ(status, EMBB_TIMEDOUT); - // Wait for a future timepoint + // Wait for a future time point status = embb_duration_set_milliseconds(&duration, 1); PT_EXPECT_EQ(status, EMBB_SUCCESS); status = embb_time_in(&time, &duration); // Time now diff --git a/base_c/test/time_test.cc b/base_c/test/time_test.cc index 94d6d1c..9797350 100644 --- a/base_c/test/time_test.cc +++ b/base_c/test/time_test.cc @@ -36,6 +36,9 @@ namespace test { TimeTest::TimeTest() { CreateUnit("Time in duration").Add(&TimeTest::TestTimeInDuration, this); + CreateUnit("Monotonicity").Add( + &TimeTest::TestMonotonicity, this, + 1, partest::TestSuite::GetDefaultNumIterations() * 10); } void TimeTest::TestTimeInDuration() { @@ -48,6 +51,20 @@ void TimeTest::TestTimeInDuration() { PT_EXPECT_EQ(status, EMBB_SUCCESS); } +void TimeTest::TestMonotonicity() { + embb_time_t first; + embb_time_t second; + int status1 = embb_time_in(&first, embb_duration_zero()); + int status2 = embb_time_in(&second, embb_duration_zero()); + PT_EXPECT_EQ(status1, EMBB_SUCCESS); + PT_EXPECT_EQ(status2, EMBB_SUCCESS); + unsigned long long first_abs = first.seconds * 1000 + + first.nanoseconds / 1000000; + unsigned long long second_abs = second.seconds * 1000 + + second.nanoseconds / 1000000; + PT_EXPECT_GE(second_abs, first_abs); +} + } // namespace test } // namespace base } // namespace embb diff --git a/base_c/test/time_test.h b/base_c/test/time_test.h index 629befc..419d26d 100644 --- a/base_c/test/time_test.h +++ b/base_c/test/time_test.h @@ -42,9 +42,14 @@ class TimeTest : public partest::TestCase { private: /** - * Tests time in duration method. + * Tests time-in-duration method. */ void TestTimeInDuration(); + + /** + * Tests that succeedingly taken times are monotonously increasing. + */ + void TestMonotonicity(); }; } // namespace test diff --git a/base_cpp/include/embb/base/atomic.h b/base_cpp/include/embb/base/atomic.h index 990b2e0..d39f494 100644 --- a/base_cpp/include/embb/base/atomic.h +++ b/base_cpp/include/embb/base/atomic.h @@ -478,7 +478,7 @@ class Atomic : public embb::base::internal::atomic:: public: Atomic() : embb::base::internal::atomic:: AtomicPointer() {} - Atomic(BaseType* p) : embb::base::internal::atomic:: + explicit Atomic(BaseType* p) : embb::base::internal::atomic:: AtomicPointer(p) {} BaseType* operator=(BaseType* p) { diff --git a/base_cpp/include/embb/base/internal/atomic/atomic_base.h b/base_cpp/include/embb/base/internal/atomic/atomic_base.h index 258a686..1be01e5 100644 --- a/base_cpp/include/embb/base/internal/atomic/atomic_base.h +++ b/base_cpp/include/embb/base/internal/atomic/atomic_base.h @@ -177,8 +177,7 @@ CompareAndSwap(BaseType& expected, BaseType desired) { compare_and_swap(&AtomicValue, &native_expected, native_desired)) !=0 ? true : false; - if (!return_val) - expected = Load(); + memcpy(&expected, &native_expected, sizeof(expected)); return return_val; } diff --git a/base_cpp/include/embb/base/internal/atomic/atomic_pointer.h b/base_cpp/include/embb/base/internal/atomic/atomic_pointer.h index d7fd970..a689054 100644 --- a/base_cpp/include/embb/base/internal/atomic/atomic_pointer.h +++ b/base_cpp/include/embb/base/internal/atomic/atomic_pointer.h @@ -65,8 +65,8 @@ class AtomicPointer : public AtomicArithmetic { bool IsPointer() const; // The methods below are documented in atomic.h - BaseType* operator->(); - BaseType& operator*(); + BaseType* operator->() const; + BaseType& operator*() const; }; template @@ -93,13 +93,13 @@ inline bool AtomicPointer:: template inline BaseType* AtomicPointer:: - operator->() { + operator->() const { return this->Load(); } template inline BaseType& AtomicPointer:: - operator*() { + operator*() const { return *(this->Load()); } diff --git a/base_cpp/include/embb/base/internal/mutex-inl.h b/base_cpp/include/embb/base/internal/mutex-inl.h index 0d9b336..86f66ac 100644 --- a/base_cpp/include/embb/base/internal/mutex-inl.h +++ b/base_cpp/include/embb/base/internal/mutex-inl.h @@ -28,6 +28,7 @@ #define EMBB_BASE_INTERNAL_MUTEX_INL_H_ #include +#include namespace embb { namespace base { @@ -95,8 +96,8 @@ void UniqueLock::Unlock() { template void UniqueLock::Swap(UniqueLock& other) { - locked_ = other.locked_; - mutex_ = other.Release(); + std::swap(mutex_, other.mutex_); + std::swap(locked_, other.locked_); } template diff --git a/base_cpp/include/embb/base/mutex.h b/base_cpp/include/embb/base/mutex.h index 0b8c7e3..1d63027 100644 --- a/base_cpp/include/embb/base/mutex.h +++ b/base_cpp/include/embb/base/mutex.h @@ -439,11 +439,11 @@ class UniqueLock { void Unlock(); /** - * Transfers ownership of a mutex to this lock. + * Exchanges ownership of the wrapped mutex with another lock. */ void Swap( UniqueLock& other - /**< [IN/OUT] Lock from which ownership shall be transferred */ + /**< [IN/OUT] The lock to exchange ownership with */ ); /** diff --git a/base_cpp/test/mutex_test.cc b/base_cpp/test/mutex_test.cc index 17e5c9e..48cc0a9 100644 --- a/base_cpp/test/mutex_test.cc +++ b/base_cpp/test/mutex_test.cc @@ -191,13 +191,21 @@ void MutexTest::TestUniqueLock() { } { // Test lock swapping - UniqueLock<> lock1; - UniqueLock<> lock2(mutex_); - PT_EXPECT_EQ(lock1.OwnsLock(), false); - PT_EXPECT_EQ(lock2.OwnsLock(), true); - lock1.Swap(lock2); + UniqueLock<> lock1(mutex_); PT_EXPECT_EQ(lock1.OwnsLock(), true); - PT_EXPECT_EQ(lock2.OwnsLock(), false); + + { + UniqueLock<> lock2; + PT_EXPECT_EQ(lock2.OwnsLock(), false); + + lock1.Swap(lock2); + PT_EXPECT_EQ(lock1.OwnsLock(), false); + PT_EXPECT_EQ(lock2.OwnsLock(), true); + } + + // At this point, "lock2" was destroyed and "mutex_" must be unlocked. + UniqueLock<> lock3(mutex_, embb::base::try_lock); + PT_EXPECT_EQ(lock3.OwnsLock(), true); } } diff --git a/containers_cpp/include/embb/containers/internal/hazard_pointer-inl.h b/containers_cpp/include/embb/containers/internal/hazard_pointer-inl.h index 3873dba..83f80aa 100644 --- a/containers_cpp/include/embb/containers/internal/hazard_pointer-inl.h +++ b/containers_cpp/include/embb/containers/internal/hazard_pointer-inl.h @@ -30,386 +30,360 @@ namespace embb { namespace containers { namespace internal { -template< typename ElementT > -FixedSizeList::FixedSizeList(size_t max_size) : - max_size(max_size), - size(0) { - elementsArray = static_cast( - embb::base::Allocation::Allocate(sizeof(ElementT) * - max_size)); -} - -template< typename ElementT > -inline size_t FixedSizeList::GetSize() const { - return size; -} - -template< typename ElementT > -inline size_t FixedSizeList::GetMaxSize() const { - return max_size; -} - -template< typename ElementT > -inline void FixedSizeList::clear() { - size = 0; -} - -template< typename ElementT > -typename FixedSizeList::iterator -FixedSizeList::begin() const { - return &elementsArray[0]; -} - -template< typename ElementT > -typename FixedSizeList::iterator -FixedSizeList::end() const { - return &elementsArray[size]; -} - -template< typename ElementT > -FixedSizeList< ElementT > & -FixedSizeList::operator= (const FixedSizeList & other) { - size = 0; - - if (max_size < other.size) { - EMBB_THROW(embb::base::ErrorException, "Copy target to small"); - } +// Visual Studio is complaining, that the return in the last line of this +// function is not reachable. This is true, as long as exceptions are enabled. +// Otherwise, the exception becomes an assertion and with disabling assertions, +// the code becomes reachable. So, disabling this warning. +#ifdef EMBB_PLATFORM_COMPILER_MSVC +#pragma warning(push) +#pragma warning(disable:4702) +#endif + template< typename GuardType > + unsigned int HazardPointer< GuardType >::GetObjectLocalThreadIndex() { + // first, get the EMBB native thread id. + unsigned int embb_thread_index; - for (const_iterator it = other.begin(); it != other.end(); ++it) { - PushBack(*it); - } - return *this; -} + int return_val = embb_internal_thread_index(&embb_thread_index); + + if (return_val != EMBB_SUCCESS) { + EMBB_THROW(embb::base::ErrorException, "Could not get thread id"); + } + + // iterate over the mappings array + for (unsigned int i = 0; i != max_accessors_count_; ++i) { + // end of mappings? then we need to write our id + if (thread_id_mapping_[i] == -1) { + // try to CAS the initial value with out thread id + int expected = -1; + if (thread_id_mapping_[i].CompareAndSwap(expected, + static_cast(embb_thread_index))) { + //successful, return our mapping + return i; + } + } + + if (thread_id_mapping_[i] == static_cast(embb_thread_index)) { + // found our mapping! + return i; + } + } + + // when we reach this point, we have too many accessors + // (no mapping possible) + EMBB_THROW(embb::base::ErrorException, "Too many accessors"); -template< typename ElementT > -bool FixedSizeList::PushBack(ElementT const el) { - if (size + 1 > max_size) { - return false; + return 0; } - elementsArray[size] = el; - size++; - return true; -} - -template< typename ElementT > -FixedSizeList::~FixedSizeList() { - embb::base::Allocation::Free(elementsArray); -} - -template< typename GuardType > -bool HazardPointerThreadEntry::IsActive() { - return is_active; -} - -template< typename GuardType > -bool HazardPointerThreadEntry::TryReserve() { - bool expected = false; - return is_active.CompareAndSwap(expected, true); -} - -template< typename GuardType > -void HazardPointerThreadEntry::Deactivate() { - is_active = false; -} - -template< typename GuardType > -size_t HazardPointerThreadEntry::GetRetiredCounter() { - return retired_list.GetSize(); -} - -template< typename GuardType > -FixedSizeList< GuardType >& HazardPointerThreadEntry:: -GetRetired() { - return retired_list; -} - -template< typename GuardType > -FixedSizeList< GuardType >& HazardPointerThreadEntry:: -GetRetiredTemp() { - return retired_list_temp; -} - -template< typename GuardType > -FixedSizeList< GuardType >& HazardPointerThreadEntry:: -GetHazardTemp() { - return hazard_pointer_list_temp; -} - -template< typename GuardType > -void HazardPointerThreadEntry:: -SetRetired(internal::FixedSizeList< GuardType > const & retired_list) { - this->retired_list = retired_list; -} - -template< typename GuardType > -HazardPointerThreadEntry:: -HazardPointerThreadEntry(GuardType undefined_guard, int guards_per_thread, - size_t max_size_retired_list) : -#ifdef EMBB_DEBUG - who_is_scanning(-1), +#ifdef EMBB_PLATFORM_COMPILER_MSVC +#pragma warning(pop) #endif - undefined_guard(undefined_guard), - guards_per_thread(guards_per_thread), - max_size_retired_list(max_size_retired_list), - // initially, each potential thread is active... if that is not the case - // another thread could call "HelpScan", and block this thread in making - // progress. - // Still, threads can be leave the hazard pointer processing (deactivation), - // but this can only be done once, i.e., this is not revertable... - is_active(1), - retired_list(max_size_retired_list), - retired_list_temp(max_size_retired_list), - hazard_pointer_list_temp(embb::base::Thread::GetThreadsMaxCount() * - guards_per_thread) { - // Initialize guarded pointer list - guarded_pointers = static_cast*> - (embb::base::Allocation::Allocate( - sizeof(embb::base::Atomic)*guards_per_thread)); - - for (int i = 0; i != guards_per_thread; ++i) { - new (static_cast(&guarded_pointers[i])) - embb::base::Atomic(undefined_guard); - } -} -template< typename GuardType > -HazardPointerThreadEntry::~HazardPointerThreadEntry() { - for (int i = 0; i != guards_per_thread; ++i) { - guarded_pointers[i].~Atomic(); + template< typename GuardType > + void HazardPointer< GuardType >::RemoveGuard(int guard_position) { + const unsigned int my_thread_id = GetObjectLocalThreadIndex(); + + // check invariants... + assert(guard_position < max_guards_per_thread_); + assert(my_thread_id < max_accessors_count_); + + // set guard + guards_[guard_position*max_accessors_count_ + my_thread_id] = + undefined_guard_; } - embb::base::Allocation::Free(guarded_pointers); -} - -template< typename GuardType > -GuardType HazardPointerThreadEntry::GetGuard(int pos) const { - return guarded_pointers[pos]; -} - -template< typename GuardType > -void HazardPointerThreadEntry::AddRetired(GuardType pointerToGuard) { - retired_list.PushBack(pointerToGuard); -} - -template< typename GuardType > -void HazardPointerThreadEntry:: -GuardPointer(int guardNumber, GuardType pointerToGuard) { - guarded_pointers[guardNumber] = pointerToGuard; -} - -template< typename GuardType > -void HazardPointerThreadEntry::SetActive(bool active) { - is_active = active; -} - -template< typename GuardType > -unsigned int HazardPointer< GuardType >::GetCurrentThreadIndex() { - unsigned int thread_index; - int return_val = embb_internal_thread_index(&thread_index); - - if (return_val != EMBB_SUCCESS) - EMBB_THROW(embb::base::ErrorException, "Could not get thread id!"); - - return thread_index; -} -template< typename GuardType > -bool HazardPointer< GuardType >::IsThresholdExceeded() { - double retiredCounterLocThread = - static_cast(GetHazardPointerElementForCurrentThread(). - GetRetiredCounter()); - - return (retiredCounterLocThread >= - RETIRE_THRESHOLD * - static_cast(active_hazard_pointer)* - static_cast(guards_per_thread)); -} - -template< typename GuardType > -size_t HazardPointer< GuardType >::GetActiveHazardPointers() { - return active_hazard_pointer; -} -template< typename GuardType > -typename HazardPointer< GuardType >::HazardPointerThreadEntry_t & -HazardPointer< GuardType >::GetHazardPointerElementForCurrentThread() { - // For each thread, there is a slot in the hazard pointer array. - // Initially, the active flag of a hazard pointer entry is false. - // Only the respective thread changes the flag from true to false. - // This means that the current thread tells that he is about to - // stop operating, and the others are responsible for his retired - // list. - - return hazard_pointer_thread_entry_array[GetCurrentThreadIndex()]; -} - -template< typename GuardType > -void HazardPointer< GuardType >::HelpScan() { - // This is a little bit different than in the paper. In the paper, - // the retired nodes from other threads are added to our retired list. - // To be able to give a bound on memory consumption, we execute scan - // for those threads, without moving elements. The effect shall be - // the same. - - for (size_t i = 0; i != hazard_pointers; ++i) { - // Try to find non active lists... - if (!hazard_pointer_thread_entry_array[i].IsActive() && - hazard_pointer_thread_entry_array[i].TryReserve()) { - // Here: grab retired things, first check if there are any... - if (hazard_pointer_thread_entry_array[i].GetRetiredCounter() > 0) { - Scan(&hazard_pointer_thread_entry_array[i]); - } + template< typename GuardType > + HazardPointer< GuardType >::HazardPointer( + embb::base::Function freeGuardCallback, + GuardType undefined_guard, int guardsPerThread, int accessors) : + max_accessors_count_(accessors < 0 ? + embb::base::Thread::GetThreadsMaxCount() : accessors), + undefined_guard_(undefined_guard), + max_guards_per_thread_(guardsPerThread), + release_object_callback_(freeGuardCallback), + thread_id_mapping_(static_cast*>( + embb::base::Allocation::Allocate(sizeof(embb::base::Atomic) + *max_accessors_count_))), + guards_(static_cast*> + (embb::base::Allocation::Allocate( + sizeof(embb::base::Atomic< GuardType >) * max_guards_per_thread_ * + max_accessors_count_))), + thread_local_retired_lists_temp_(static_cast + (embb::base::Allocation::Allocate( + sizeof(GuardType) * max_guards_per_thread_ * max_accessors_count_ * + max_accessors_count_ + ))), + thread_local_retired_lists_(static_cast + (embb::base::Allocation::Allocate( + sizeof(GuardType) * max_guards_per_thread_ * max_accessors_count_ * + max_accessors_count_ + ))) { + const unsigned int count_guards = + max_guards_per_thread_ * max_accessors_count_; + + const unsigned int count_ret_elements = + count_guards * max_accessors_count_; + + for (unsigned int i = 0; i != max_accessors_count_; ++i) { + //in-place new for each cell + new (&thread_id_mapping_[i]) embb::base::Atomic < int >(-1); + } - // We are done, mark it as deactivated again - hazard_pointer_thread_entry_array[i].Deactivate(); + for (unsigned int i = 0; i != count_guards; ++i) { + //in-place new for each cell + new (&guards_[i]) embb::base::Atomic < GuardType >(undefined_guard); + } + + for (unsigned int i = 0; i != count_ret_elements; ++i) { + //in-place new for each cell + new (&thread_local_retired_lists_temp_[i]) GuardType(undefined_guard); + } + + for (unsigned int i = 0; i != count_ret_elements; ++i) { + //in-place new for each cell + new (&thread_local_retired_lists_[i]) GuardType(undefined_guard); } } -} - -template< typename GuardType > -void HazardPointer< GuardType >:: -Scan(HazardPointerThreadEntry_t* currentHazardPointerEntry) { -#ifdef EMBB_DEBUG - // scan should only be executed by one thread at a time, otherwise we have - // a bug... this assertions checks that - int expected = -1; - if (!currentHazardPointerEntry->GetScanningThread().CompareAndSwap( - expected, static_cast(GetCurrentThreadIndex()))) { - assert(false); - } -#endif - // In this function, we compute the intersection between local retired - // pointers and all hazard pointers. This intersection cannot be deleted and - // forms the new local retired pointers list. - // It is assumed that the union of all retired pointers contains no two - // pointers with the same value. However, the union of all hazard guards - // might. - - // Here, we store the temporary hazard pointers. We have to store them, - // as iterating multiple time over them might be expensive, as this - // atomic array is shared between threads. - currentHazardPointerEntry->GetHazardTemp().clear(); - - // Get all active hazard pointers! - for (unsigned int i = 0; i != hazard_pointers; ++i) { - // Only consider guards of active threads - if (hazard_pointer_thread_entry_array[i].IsActive()) { - // For each guard in an hazard pointer entry - for (int pos = 0; pos != guards_per_thread; ++pos) { - GuardType guard = hazard_pointer_thread_entry_array[i].GetGuard(pos); - - // UndefinedGuard means not guarded - if (guard == undefined_guard) - continue; - - currentHazardPointerEntry->GetHazardTemp().PushBack(guard); + + template< typename GuardType > + HazardPointer< GuardType >::~HazardPointer() { + const unsigned int count_guards = + max_guards_per_thread_ * max_accessors_count_; + + const unsigned int count_ret_elements = + count_guards * max_accessors_count_; + + // Release references from all retired lists. Note that for this to work, + // the data structure using hazard pointer has still to be active... So + // first, the hazard pointer class shall be destructed, then the memory + // management class (e.g. some pool). Otherwise, the hazard pointer class + // would try to return memory to an already destructed memory manager. + for (unsigned int i = 0; i != count_ret_elements; ++i) { + GuardType pointerToFree = + thread_local_retired_lists_[i]; + if (pointerToFree == undefined_guard_) { + break; } + release_object_callback_(pointerToFree); } - } - currentHazardPointerEntry->GetRetiredTemp().clear(); - - // Sort them, we will do a binary search on each entry from the retired list - std::sort( - currentHazardPointerEntry->GetHazardTemp().begin(), - currentHazardPointerEntry->GetHazardTemp().end()); - - for ( - EMBB_CONTAINERS_CPP_DEPENDANT_TYPENAME FixedSizeList< GuardType >::iterator - it = currentHazardPointerEntry->GetRetired().begin(); - it != currentHazardPointerEntry->GetRetired().end(); ++it) { - if (false == ::std::binary_search( - currentHazardPointerEntry->GetHazardTemp().begin(), - currentHazardPointerEntry->GetHazardTemp().end(), *it)) { - this->free_guard_callback(*it); - } else { - currentHazardPointerEntry->GetRetiredTemp().PushBack(*it); + for (unsigned int i = 0; i != max_accessors_count_; ++i) { + thread_id_mapping_[i].~Atomic(); + } + + embb::base::Allocation::Free(thread_id_mapping_); + + for (unsigned int i = 0; i != count_guards; ++i) { + guards_[i].~Atomic(); } + + embb::base::Allocation::Free(guards_); + + for (unsigned int i = 0; i != count_ret_elements; ++i) { + thread_local_retired_lists_temp_[i].~GuardType(); + } + + embb::base::Allocation::Free(thread_local_retired_lists_temp_); + + for (unsigned int i = 0; i != count_ret_elements; ++i) { + thread_local_retired_lists_[i].~GuardType(); + } + + embb::base::Allocation::Free(thread_local_retired_lists_); } - currentHazardPointerEntry->SetRetired( - currentHazardPointerEntry->GetRetiredTemp()); -#ifdef EMBB_DEBUG - currentHazardPointerEntry->GetScanningThread().Store(-1); -#endif -} - -template< typename GuardType > -size_t HazardPointer< GuardType >::GetRetiredListMaxSize() const { - return static_cast(RETIRE_THRESHOLD * - static_cast(embb::base::Thread::GetThreadsMaxCount()) * - static_cast(guards_per_thread)) + 1; -} - -template< typename GuardType > -HazardPointer< GuardType >::HazardPointer( - embb::base::Function free_guard_callback, - GuardType undefined_guard, int guards_per_thread) : - undefined_guard(undefined_guard), - guards_per_thread(guards_per_thread), - //initially, all potential hazard pointers are active... - active_hazard_pointer(embb::base::Thread::GetThreadsMaxCount()), - free_guard_callback(free_guard_callback) { - hazard_pointers = embb::base::Thread::GetThreadsMaxCount(); - - hazard_pointer_thread_entry_array = static_cast( - embb::base::Allocation::Allocate(sizeof(HazardPointerThreadEntry_t) * - hazard_pointers)); - - for (size_t i = 0; i != hazard_pointers; ++i) { - new (static_cast(&(hazard_pointer_thread_entry_array[i]))) - HazardPointerThreadEntry_t(undefined_guard, guards_per_thread, - GetRetiredListMaxSize()); + template< typename GuardType > + void HazardPointer< GuardType >::Guard(int guardPosition, + GuardType guardedElement) { + const unsigned int my_thread_id = GetObjectLocalThreadIndex(); + + // check invariants... + assert(guardPosition < max_guards_per_thread_); + assert(my_thread_id < max_accessors_count_); + + // set guard + guards_[guardPosition*max_accessors_count_ + my_thread_id] = guardedElement; } -} -template< typename GuardType > -HazardPointer< GuardType >::~HazardPointer() { - for (size_t i = 0; i != hazard_pointers; ++i) { - hazard_pointer_thread_entry_array[i].~HazardPointerThreadEntry_t(); + template< typename GuardType > + size_t HazardPointer< GuardType >::ComputeMaximumRetiredObjectCount( + size_t guardsPerThread, int accessors) { + unsigned int accessorCount = (accessors == -1 ? + embb::base::Thread::GetThreadsMaxCount() : + accessors); + + return static_cast( + guardsPerThread * accessorCount * accessorCount); } - embb::base::Allocation::Free(static_cast < void* > - (hazard_pointer_thread_entry_array)); -} - -template< typename GuardType > -void HazardPointer< GuardType >::DeactivateCurrentThread() { - HazardPointerThreadEntry_t* current_thread_entry = - &hazard_pointer_thread_entry_array[GetCurrentThreadIndex()]; - - // Deactivating a non-active hazard pointer entry has no effect! - if (!current_thread_entry->IsActive()) { - return; - } else { - current_thread_entry->SetActive(false); - active_hazard_pointer--; + /** + * Remark: it might be faster to just swap pointers for temp retired list and + * retired list. However, with the current implementation (one array for all + * retired and retired temp lists, respectively) this is not possible. This is + * not changed until this copying accounts for a performance problem. The + * copying is not the bottleneck currently. + */ + template< typename GuardType > + void HazardPointer< GuardType >::CopyRetiredList(GuardType* sourceList, + GuardType* targetList, unsigned int retiredListSize, + GuardType undefinedGuard) { + bool done = false; + for (unsigned int ii = 0; ii != retiredListSize; ++ii) { + if (!done) { + GuardType guardToCopy = sourceList[ii]; + + if (guardToCopy == undefinedGuard) { + done = true; + + if (targetList[ii] == undefinedGuard) { + // end of target list + break; + } + } + targetList[ii] = guardToCopy; + } else { + // we copied the whole source list, remaining values in the target + // have to be zeroed. + if (targetList[ii] == undefinedGuard) { + // end of target list + break; + } else { + targetList[ii] = undefinedGuard; + } + } + } } -} - -template< typename GuardType > -void HazardPointer< GuardType >::GuardPointer(int guardPosition, - GuardType guardedElement) { - GetHazardPointerElementForCurrentThread().GuardPointer( - guardPosition, guardedElement); -} - -template< typename GuardType > -void HazardPointer< GuardType >::EnqueuePointerForDeletion( - GuardType guardedElement) { - GetHazardPointerElementForCurrentThread().AddRetired(guardedElement); - if (IsThresholdExceeded()) { - HazardPointerThreadEntry_t* currentHazardPointerEntry = - &GetHazardPointerElementForCurrentThread(); - - Scan(currentHazardPointerEntry); - - // Help deactivated threads to clean their retired nodes. - HelpScan(); + + template< typename GuardType > + void HazardPointer< GuardType >::UpdateRetiredList(GuardType* retired_list, + GuardType* updated_retired_list, unsigned int retired_list_size, + GuardType guarded_element, GuardType considered_hazard, + GuardType undefined_guard) { + // no hazard set here + if (considered_hazard == undefined_guard) + return; + + // if this hazard is currently in the union of + // threadLocalRetiredLists and pointerToRetire, but not yet in + // threadLocalRetiredListsTemp, add it to that list + bool contained_in_union = false; + + // first iterate over our retired list + for (unsigned int i = 0; i != retired_list_size; ++i) { + // when reaching 0, we can stop iterating (end of the "list") + if (retired_list[i] == 0) + break; + + // the hazard is contained in the retired list... it shall go + // into the temp list, if not already there + if (retired_list[i] == considered_hazard) { + contained_in_union = true; + break; + } + } + + // the union also contains pointerToRetire + if (!contained_in_union) { + contained_in_union = (considered_hazard == guarded_element); + } + + // add the pointer to temp. retired list, if not already there + if (contained_in_union) { + for (unsigned int ii = 0; ii != retired_list_size; ++ii) { + // is it already there? + if (updated_retired_list[ii] == considered_hazard) + break; + + // end of the list + if (updated_retired_list[ii] == undefined_guard) { + // add hazard + updated_retired_list[ii] = considered_hazard; + + // we are done here... + break; + } + } + } } -} -template -const double embb::containers::internal::HazardPointer:: - RETIRE_THRESHOLD = 1.25f; + template< typename GuardType > + void HazardPointer< GuardType >::EnqueueForDeletion(GuardType toRetire) { + unsigned int my_thread_id = GetObjectLocalThreadIndex(); + + // check for invariant + assert(my_thread_id < max_accessors_count_); + + const unsigned int retired_list_size = max_accessors_count_ * + max_guards_per_thread_; + + const unsigned int count_guards = max_accessors_count_ * + max_guards_per_thread_; + + GuardType* retired_list = + &thread_local_retired_lists_[my_thread_id * retired_list_size]; + + GuardType* retired_list_temp = + &thread_local_retired_lists_temp_[my_thread_id * retired_list_size]; + + // wipe my temp. retired list... + for (unsigned int i = 0; i < retired_list_size; ++i) { + // the list is filled always from left to right, so occurring the first + // undefinedGuard, the remaining ones are also undefinedGuard... + if (retired_list_temp[i] == undefined_guard_) + break; + + retired_list_temp[i] = undefined_guard_; + } + + // we test each hazard if it is in the union of retiredList and + // guardedElement. If it is, it goes into the new retired list... + for (unsigned int i = 0; i != count_guards; ++i) { + // consider each current active guard + GuardType considered_hazard = guards_[i].Load(); + UpdateRetiredList(retired_list, retired_list_temp, retired_list_size, + toRetire, considered_hazard, undefined_guard_); + } + + int retired_list_size_signed = static_cast(retired_list_size); + assert(retired_list_size_signed >= 0); + + // now we created a a new retired list... the elements that are "removed" + // from the old retired list can be safely deleted now... + for (int i = -1; i != retired_list_size_signed; ++i) { + // we iterate over the current retired list... -1 is used as dummy element + // in the iteration, to also iterate over the pointerToRetire, which is + // logically also part of the current retired list... + + // end of the list, stop iterating + if (i >= 0 && retired_list[i] == undefined_guard_) + break; + + GuardType to_check_if_in_new_list = undefined_guard_; + + to_check_if_in_new_list = (i == -1 ? toRetire : retired_list[i]); + + // still in the new retired list? + bool still_in_list = false; + for (unsigned int ii = 0; ii != retired_list_size; ++ii) { + // end of list + if (retired_list_temp[ii] == undefined_guard_) + break; + + if (to_check_if_in_new_list == retired_list_temp[ii]) { + // still in list, cannot delete element! + still_in_list = true; + break; + } + } + + if (!still_in_list) { + this->release_object_callback_(to_check_if_in_new_list); + } + } + + // copy the updated retired list (temp) to the retired list... + CopyRetiredList(retired_list_temp, retired_list, retired_list_size, + undefined_guard_); + } } // namespace internal } // namespace containers } // namespace embb diff --git a/containers_cpp/include/embb/containers/internal/hazard_pointer.h b/containers_cpp/include/embb/containers/internal/hazard_pointer.h index e1e5f49..2a25b67 100644 --- a/containers_cpp/include/embb/containers/internal/hazard_pointer.h +++ b/containers_cpp/include/embb/containers/internal/hazard_pointer.h @@ -40,487 +40,274 @@ #define EMBB_CONTAINERS_CPP_DEPENDANT_TYPENAME typename #endif +// forward declaration for white-box test, used in friend declaration of +// HazardPointer class. +namespace embb { +namespace containers { +namespace test { +class HazardPointerTest2; +} +} +} + namespace embb { namespace containers { namespace internal { /** - * A list with fixed size, implemented as an array. Replaces std::vector that - * was used in previous hazard pointer implementation. + * This class contains a hazard pointer implementation following publication: * - * Provides iterators, so we can apply algorithms from the STL. + * Maged M. Michael. "Hazard pointers: Safe memory reclamation for lock-free + * objects." IEEE Transactions on Parallel and Distributed Systems, 15.6 (2004) + * : 491-504. * - * \tparam ElementT Type of the elements contained in the list. - */ -template< typename ElementT > -class FixedSizeList { - private: - /** - * Capacity of the list - */ - size_t max_size; - - /** - * Size of the list - */ - size_t size; - - /** - * Pointer to the array containing the list - */ - ElementT* elementsArray; - - /** - * Copy constructor not implemented. Would require dynamic memory allocation. - */ - FixedSizeList( - const FixedSizeList & - /**< [IN] Other list */); - - public: - /** - * Definition of an iterator - */ - typedef ElementT * iterator; - - /** - * Definition of a const iterator - */ - typedef const ElementT * const_iterator; - - /** - * Constructor, initializes list with given capacity - */ - FixedSizeList( - size_t max_size - /**< [IN] Capacity of the list */); - - /** - * Gets the current size of the list - * - * \return Size of the list - */ - inline size_t GetSize() const; - - /** - * Gets the capacity of the list - * - * \return The capacity of the list - */ - inline size_t GetMaxSize() const; - - /** - * Removes all elements from the list without changing the capacity - */ - inline void clear(); - - /** - * Iterator pointing to the first element - * - * \return Begin iterator - */ - iterator begin() const; - - /** - * Iterator pointing beyond the last element - * - * \return End iterator - */ - iterator end() const; - - /** - * Copies the elements of another list to this list. The capacity of - * this list has to be greater than or equal to the size of the other list. - */ - FixedSizeList & operator=( - const FixedSizeList & other - /**< [IN] Other list */); - - /** - * Appends an element to the end of the list - * - * \return \c false if the operation was not successful because the list is - * full, otherwise \c true. - */ - bool PushBack( - ElementT const el - /**< [IN] Element to append to the list */); - - /** - * Destructs the list. - */ - ~FixedSizeList(); -}; - -/** - * Hazard pointer entry for a single thread. Holds the actual guards that - * determine if the current thread is about to use the guarded pointer. - * Guarded pointers are protected and not deleted. + * Hazard pointers are a wait-free memory reclamation scheme for lock-free + * algorithms. Loosely speaking, they act as garbage collector. The release of + * objects contained within the memory, managed by the hazard pointer class, is + * intercepted and possibly delayed to avoid concurrency bugs. + * + * Before accessing an object, threads announce their intention to do so (i.e. + * the intention to dereference the respective pointer) to the hazard pointer + * class. This is called guarding. From now on, the hazard pointer class will + * prohibit the release or reuse of the guarded object. This is necessary, to + * assure that the object is not released or reused while it is accessed and to + * assure that it has not unnoticed changed (effectively avoiding the ABA + * problem). + * + * Note that after guarding an object, a consecutive check that the object (i.e. + * its pointer) is still valid is necessary; the object release could already + * have been started when guarding the object. Guarding is repeated, until this + * check eventually succeeds. Note that this "guard-and-check" loop makes the + * usage of the hazard pointer class lock-free, even though its implementation + * is wait-free. + * + * Internally, guarding is realized by providing each thread slots, where + * pointers can be placed that should not be freed (so called guards). When + * trying to release an object, it is checked if the object's pointer is + * guarded, and if so this object is not released, but instead put into a + * retired list for later release, when all guards for this object have been + * removed. + * + * In contrast to the original implementation, our implementation consumes only + * fixed-size memory. Note that the number of threads accessing the hazard + * pointer object accounts quadratic for the memory consumption: managed objects + * are provided from outside and the number of accessors accounts quadric for + * the minimum count of those objects. * - * Moreover, the retired list for this thread is contained. It determines - * the pointers that have been allocated from this thread, but are not used - * anymore by this thread. However, another thread could have a guard on it, - * so the pointer cannot be deleted immediately. + * Also in contrast to the original implementation, we do not provide a HelpScan + * functionality, which gives threads the possibility, to not participate in the + * garbage collection anymore: other threads will help to clean-up the objects + * protected by the exiting thread. The reason is, that the only use-case would + * be a crashing thread, not participating anymore. However, as the thread has + * to signal its exit himself, this is not possible to realize anyways. In the + * end, it is still guaranteed that all memory is properly returned (in the + * destructor). * - * For the scan operation, the intersection of the guarded pointers from all - * threads and the retired list has to be computed. For this computation, we - * need thread local temporary lists which are also contained here. + * Additionally, the original implementation holds a threshold, which determines + * when objects shall be freed. In this implementation, we free whenever it is + * possibly to do so, as we want to keep the memory footprint as low as + * possible. We also don't see a performance drop in the current algorithms that + * are using hazard pointers, when not using a threshold. * - * \tparam GuardType The type of guard, usually a pointer. + * \tparam GuardType the type of the guards. Usually the pointer type of some + * object to protect. */ template< typename GuardType > -class HazardPointerThreadEntry { -#ifdef EMBB_DEBUG - - public: - embb::base::Atomic& GetScanningThread() { - return who_is_scanning; - } - - private: - embb::base::Atomic who_is_scanning; -#endif - - private: - /** - * Value of the undefined guard (means that no guard is set). - */ - GuardType undefined_guard; - - /** - * The number of guards per thread. Determines the size of the guard array. - */ - int guards_per_thread; - - /** - * The capacity of the retired list. It is determined by number of guards, - * retired threshold, and maximum number of threads. - */ - size_t max_size_retired_list; - - /** - * Set to true if the current thread is active. Is used for a thread to - * signal that it is leaving. If a thread has left, the other threads are - * responsible for cleaning up its retired list. - */ - embb::base::Atomic< bool > is_active; - - /** - * The guarded pointer of this thread, has size \c guard_per_thread. - */ - embb::base::Atomic< GuardType >* guarded_pointers; - - /** - * The retired list of this thread, contains pointer that shall be released - * when no thread holds a guard on it anymore. - */ - FixedSizeList< GuardType > retired_list; - - /** - * Temporary retired list, has same capacity as \c retired_list, It is used to - * compute the intersection of all guards and the \c retired list. - */ - FixedSizeList< GuardType > retired_list_temp; - - /** - * Temporary guards list. Used to compute the intersection of all guards and - * the \c retired_list. - */ - FixedSizeList< GuardType > hazard_pointer_list_temp; - - /** - * HazardPointerThreadEntry shall not be copied - */ - HazardPointerThreadEntry(const HazardPointerThreadEntry&); - - /** - * HazardPointerThreadEntry shall not be assigned - */ - HazardPointerThreadEntry & operator= (const HazardPointerThreadEntry&); - +class HazardPointer { public: /** - * Checks if current thread is active (with respect to participating in hazard - * pointer management) + * The user of the hazard pointer class has to provide the memory that is + * managed here. The user has to take into account, that releasing of memory + * might be delayed. He has therefore to provide more memory than he wants to + * guarantee at each point in time. More specific, on top of the guaranteed + * count of objects, he has to provide the additional count of objects that + * can be (worst-case) contained in the retired lists and therefore are not + * released yet. The size sum of all retired lists is guardsPerThread * + * accessorCount * accessorCount, which is computed using this function. So + * the result of function denotes to the user, how many objects he has to + * allocate additionally to the guaranteed count. * - * \return \c true if the current thread is active, otherwise \c false. - */ - bool IsActive(); - - /** - * Tries to set the active flag to true (atomically). Used if the current - * thread is not active anymore as lock for another thread to help cleaning - * up hazard pointer. - * - * \return \c true if this thread was successful setting the active flag, - * otherwise \c false. - */ - bool TryReserve(); - - /** - * Deactivates current thread by atomically setting active flag to false. + * \waitfree */ - void Deactivate(); + static size_t ComputeMaximumRetiredObjectCount( + size_t guardsPerThread, + /**<[IN] the count of guards per thread*/ + int accessors = -1 + /**<[IN] Number of accessors. Determines, how many threads will access + the hazard pointer object. Default value -1 will allow the + maximum amount of threads as defined with + \c embb::base::Thread::GetThreadsMaxCount()*/ + ); /** - * Gets the count of current retired pointer for the current thread. + * Initializes the hazard pointer object * - * \return Count of current retired pointer - */ - size_t GetRetiredCounter(); - - /** - * Gets the retired list. + * \notthreadsafe * - * \return Reference to \c retired_list - */ - FixedSizeList< GuardType >& GetRetired(); - - /** - * Gets the temporary retired list. + * \memory We dynamically allocate the following: * - * \return Reference to \c retired_list_temp - */ - FixedSizeList< GuardType >& GetRetiredTemp(); - - /** - * Gets the temporary hazard pointer list. + * (sizeof(Atomic) * accessors) + (sizeof(Atomic) * + * guards_per_thread * accessors) + (2*sizeof(GuardType) * + * guards_per_thread * accessors^2) * - * \return Reference to \c hazard_pointer_list_temp - */ - FixedSizeList< GuardType >& GetHazardTemp(); - - /** - * Sets the retired list. - */ - void SetRetired( - embb::containers::internal::FixedSizeList< GuardType > const & retired_list - /**< [IN] Retired list */); - - /** - * Constructor + * The last addend is the dominant one, as accessorCount accounts + * quadratically for it. */ - HazardPointerThreadEntry( + HazardPointer( + embb::base::Function free_guard_callback, + /**<[IN] Callback to the function that shall be called when a retired + guard can be deleted */ GuardType undefined_guard, - /**< [IN] Value of the undefined guard (e.g. NULL) */ + /**<[IN] The guard value denoting "not guarded"*/ int guards_per_thread, - /**< [IN] Number of guards per thread */ - size_t max_size_retired_list - /**< [IN] The capacity of the retired list(s) */); - - /** - * Destructor + /**<[IN] Number of guards per thread*/ + int accessors = -1 + /**<[IN] Number of accessors. Determines, how many threads will access + this hazard pointer object. Default value -1 will allow the + maximum amount of threads as defined with + \c embb::base::Thread::GetThreadsMaxCount()*/ + ); + + /** + * Deallocates internal data structures. Additionally releases all objects + * currently held in the retired lists, using the release functor passed in + * the constructor. * - * Deallocate lists - */ - ~HazardPointerThreadEntry(); - - /** - * Gets the guard at the specified position. - * Positions are numbered, beginning with 0. + * \notthreadsafe */ - GuardType GetGuard( - int pos - /**< [IN] Position of the guard */) const; + ~HazardPointer(); /** - * Adds pointer to the retired list + * Guards \c to_guard. If the guarded_element is passed to \c EnqueueForDeletion + * it is prevented from release from now on. The user must have a check, that + * EnqueueForDeletion has not been called on to_guard, before the guarding took + * effect. + * + * \waitfree */ - void AddRetired( - GuardType pointerToGuard - /**< [IN] Guard to retire */); + void Guard( + int guard_position, + /**<[IN] position to place guard*/ + GuardType to_guard + /**<[IN] element to guard*/ + ); /** - * Guards pointer + * Enqueue guarded element for deletion. If not guarded, it is deleted + * immediately. If it is guarded, it is added to a thread local retired list, + * and deleted in a subsequent call to \c EnqueueForDeletion, when no guard is + * placed on it anymore. */ - void GuardPointer( - int guardNumber, - /**< [IN] Position of guard */ - GuardType pointerToGuard - /**<[IN] Pointer to guard */); + void EnqueueForDeletion( + GuardType guarded_element + /**<[IN] element to logically delete*/ + ); /** - * Sets the current thread active, i.e., announce that the thread - * participates in managing hazard pointer. + * Explicitly remove guard from thread local slot. + * + * \waitfree */ - void SetActive( - bool active - /**<[IN] \c true for active, \c false for inactive */); -}; + void RemoveGuard(int guard_position); -/** - * HazardPointer implementation as presented in: - * - * Maged M. Michael. "Hazard pointers: Safe memory reclamation for lock-free - * objects." IEEE Transactions on Parallel and Distributed Systems, 15.6 (2004) - * : 491-504. - * - * In contrast to the original implementation, our implementation only uses - * fixed-size memory. There is a safe upper limit, hazard pointer are guaranteed - * to not consume more memory. Memory is allocated solely at initialization. - * - * Hazard pointers solve the ABA problem for lock-free algorithms. Before - * accessing a pointer, threads announce that they want to access this pointer - * and then check if the pointer is still valid. This announcement is done by - * placing a guard. It is guaranteed that the pointer is not reused until all - * threads remove their guards to this pointer. Objects, these pointers are - * pointing to, can therefore not be deleted directly. Instead, these pointers - * are put into a list for later deletion (retired list). Regularly, this list - * is processed to check which pointers can be deleted. If a pointer can be - * deleted, a callback function provided by the user is called. The user can - * then, e.g., free the respective object, so that the pointer can be safely - * reused. - */ -template< typename GuardType > -class HazardPointer { private: - /** - * Concrete hazard pointer entry type - */ - typedef HazardPointerThreadEntry < GuardType > - HazardPointerThreadEntry_t; - - /** - * The guard value denoting "not guarding" - */ - GuardType undefined_guard; - /** - * The capacity of the retired list (safe upper bound for retired list size) + * HazardPointerTest2 is a white-box test, needing access to private members + * of this class. So declaring it as friend. */ - int retired_list_max_size; + friend class embb::containers::test::HazardPointerTest2; /** - * Guards that can be set per thread + * This number determines the amount of maximal accessors (threads) that + * will access this hazard pointer instance. Note that a thread once + * accessing this object will be permanently count as accessor, even if not + * participating anymore. If too many threads access this object, an + * exception is thrown. */ - int guards_per_thread; + unsigned int max_accessors_count_; /** - * Array of HazardPointerElements. Each thread is assigned to one. + * The guard value denoting "not guarded" */ - HazardPointerThreadEntry_t* hazard_pointer_thread_entry_array; + GuardType undefined_guard_; /** - * The threshold, determines at which size of the retired list pointers - * are tried to be deleted. + * The maximal count of guards that can be set per thread. */ - static const double RETIRE_THRESHOLD; + int max_guards_per_thread_; /** - * Each thread is assigned a thread index (starting with 0). - * Get the index of the current thread. + * The functor that is called to release an object. This is called by this + * class, when it is safe to do so, i.e., no thread accesses this object + * anymore. */ - static unsigned int GetCurrentThreadIndex(); + embb::base::Function release_object_callback_; /** - * The number of hazard pointers currently active. + * Mapping from EMBB thread id to hazard pointer thread ids. Hazard pointer + * thread ids are in range [0;accesor_count-1]. The position of a EMBB thread + * id in that array determines the respective hazard pointer thread id. */ - size_t active_hazard_pointer; + embb::base::Atomic* thread_id_mapping_; /** - * Count of all hazard pointers. + * The hazard pointer guards, represented as array. Each thread has a fixed + * set of slots (guardsPerThread) within this array. */ - size_t hazard_pointers; + embb::base::Atomic* guards_; /** - * The callback that is triggered when a retired guard can be - * freed. Usually, the user will call a free here. + * \see threadLocalRetiredLists documentation */ - embb::base::Function free_guard_callback; + GuardType* thread_local_retired_lists_temp_; /** - * Checks if the current size of the retired list exceeds the threshold, so - * that each retired guard is checked for being not hazardous anymore. - * - * \return \c true is threshold is exceeded, otherwise \c false. - */ - bool IsThresholdExceeded(); - - /** - * Gets the number of hazard pointe, currently active - * - * \return Number of active hazard pointers - */ - size_t GetActiveHazardPointers(); - - /** - * Gets the hazard pointer entry for the current thread - * - * \return Hazard pointer entry for current thread - */ - HazardPointerThreadEntry_t& - GetHazardPointerElementForCurrentThread(); - - /** - * Threads might leave from participating in hazard pointer management. - * This method helps all those threads processing their retired list. - */ - void HelpScan(); - - /** - * Checks the retired list of a hazard pointer entry for elements of the - * retired list that can be freed, and executes the delete callback for those - * elements. - */ - void Scan( - HazardPointerThreadEntry_t* currentHazardPointerEntry - /**<[IN] Hazard pointer entry that should be checked for elements that - can be deleted*/); - - public: - /** - * Gets the capacity of one retired list - * - * \waitfree + * A list of lists, represented as single array. Each thread maintains a list + * of retired pointers, that are objects that are logically released but not + * released because some thread placed a guard on it. */ - size_t GetRetiredListMaxSize() const; + GuardType* thread_local_retired_lists_; /** - * Initializes hazard pointer + * Each thread is assigned a thread index (starting with 0). Get the index of + * the current thread. Note that this is not the global index, but an hazard + * pointer class internal one. The user is free to define less accessors than + * the amount of default threads. This is useful, as the number of accessors + * accounts quadratic for the memory consumption, so the user should have the + * possibility to avoid memory wastage when only having a small, fixed size, + * number of accessors. * - * \notthreadsafe - * - * \memory - * - Let \c t be the number of maximal threads determined by EMBB - * - Let \c g be the number of guards per thread - * - Let \c x be 1.25*t*g + 1 - * - * We dynamically allocate \c x*(3*t+1) elements of size \c sizeof(void*). - */ - HazardPointer( - embb::base::Function free_guard_callback, - /**<[IN] Callback to the function that shall be called when a retired - guard can be deleted */ - GuardType undefined_guard, - /**<[IN] The guard value denoting "not guarded"*/ - int guards_per_thread - /**<[IN] Number of guards per thread*/); - - /** - * Deallocates lists for hazard pointer management. Note that no objects - * currently in the retired lists are deleted. This is the responsibility - * of the user. Usually, HazardPointer manages pointers of an object pool. - * After destructing HazardPointer, the object pool is deleted, so that - * everything is properly cleaned up. - */ - ~HazardPointer(); - - /** - * Announces that the current thread stops participating in hazard pointer - * management. The other threads now take care of his retired list. - * - * \waitfree - */ - void DeactivateCurrentThread(); - - /** - * Guards \c guardedElement with the guard at position \c guardPosition - */ - void GuardPointer(int guardPosition, GuardType guardedElement); - /** - * Enqueue a pointer for deletion. It is added to the retired list and - * deleted when no thread accesses it anymore. - */ - void EnqueuePointerForDeletion(GuardType guardedElement); + * @return current (hazard pointer object local) thread index + */ + unsigned int GetObjectLocalThreadIndex(); + + /** + * Copy retired list \c sourceList to retired list \c targetList + */ + static void CopyRetiredList( + GuardType* source_list, + /**<[IN] the source retired list*/ + GuardType* target_list, + /**<[IN] the target retired list*/ + unsigned int single_retired_list_size, + /**<[IN] the size of a thread local retired list*/ + GuardType undefined_guard + /**<[IN] the undefined guard (usually the NULL pointer)*/ + ); + + static void UpdateRetiredList( + GuardType* retired_list, + /**<[IN] the old retired list*/ + GuardType* updated_retired_list, + /**<[IN] the updated retired list*/ + unsigned int retired_list_size, + /**<[IN] the size of a thread local retired list*/ + GuardType to_retire, + /**<[IN] the element to retire*/ + GuardType considered_hazard, + /**<[IN] the currently considered hazard*/ + GuardType undefined_guard + /**<[IN] the undefined guard (usually the NULL pointer)*/ + ); }; } // namespace internal } // namespace containers diff --git a/containers_cpp/include/embb/containers/internal/lock_free_mpmc_queue-inl.h b/containers_cpp/include/embb/containers/internal/lock_free_mpmc_queue-inl.h index c4914e9..d087706 100644 --- a/containers_cpp/include/embb/containers/internal/lock_free_mpmc_queue-inl.h +++ b/containers_cpp/include/embb/containers/internal/lock_free_mpmc_queue-inl.h @@ -77,7 +77,12 @@ LockFreeMPMCQueue::~LockFreeMPMCQueue() { template< typename Type, typename ValuePool > LockFreeMPMCQueue::LockFreeMPMCQueue(size_t capacity) : -capacity(capacity), + capacity(capacity), + // Object pool, size with respect to the maximum number of retired nodes not + // eligible for reuse. +1 for dummy node. + objectPool( + MPMCQueueNodeHazardPointer_t::ComputeMaximumRetiredObjectCount(2) + + capacity + 1), // Disable "this is used in base member initializer" warning. // We explicitly want this. #ifdef EMBB_PLATFORM_COMPILER_MSVC @@ -89,13 +94,7 @@ delete_pointer_callback(*this, #ifdef EMBB_PLATFORM_COMPILER_MSVC #pragma warning(pop) #endif - hazardPointer(delete_pointer_callback, NULL, 2), - // Object pool, size with respect to the maximum number of retired nodes not - // eligible for reuse. +1 for dummy node. - objectPool( - hazardPointer.GetRetiredListMaxSize()* - embb::base::Thread::GetThreadsMaxCount() + - capacity + 1) { + hazardPointer(delete_pointer_callback, NULL, 2) { // Allocate dummy node to reduce the number of special cases to consider. internal::LockFreeMPMCQueueNode* dummyNode = objectPool.Allocate(); // Initially, head and tail point to the dummy node. @@ -120,7 +119,7 @@ bool LockFreeMPMCQueue::TryEnqueue(Type const& element) { for (;;) { my_tail = tail; - hazardPointer.GuardPointer(0, my_tail); + hazardPointer.Guard(0, my_tail); // Check if pointer is still valid after guarding. if (my_tail != tail) { @@ -163,12 +162,12 @@ bool LockFreeMPMCQueue::TryDequeue(Type & element) { Type data; for (;;) { my_head = head; - hazardPointer.GuardPointer(0, my_head); + hazardPointer.Guard(0, my_head); if (my_head != head) continue; my_tail = tail; my_next = my_head->GetNext(); - hazardPointer.GuardPointer(1, my_next); + hazardPointer.Guard(1, my_next); if (head != my_head) continue; if (my_next == NULL) @@ -187,7 +186,7 @@ bool LockFreeMPMCQueue::TryDequeue(Type & element) { break; } - hazardPointer.EnqueuePointerForDeletion(my_head); + hazardPointer.EnqueueForDeletion(my_head); element = data; return true; } diff --git a/containers_cpp/include/embb/containers/internal/lock_free_stack-inl.h b/containers_cpp/include/embb/containers/internal/lock_free_stack-inl.h index 2c29395..5aa032a 100644 --- a/containers_cpp/include/embb/containers/internal/lock_free_stack-inl.h +++ b/containers_cpp/include/embb/containers/internal/lock_free_stack-inl.h @@ -81,13 +81,12 @@ capacity(capacity), #ifdef EMBB_PLATFORM_COMPILER_MSVC #pragma warning(pop) #endif - hazardPointer(delete_pointer_callback, NULL, 1), // Object pool, size with respect to the maximum number of retired nodes not // eligible for reuse: objectPool( - hazardPointer.GetRetiredListMaxSize()* - embb::base::Thread::GetThreadsMaxCount() + - capacity) { + StackNodeHazardPointer_t::ComputeMaximumRetiredObjectCount(1) + + capacity), + hazardPointer(delete_pointer_callback, NULL, 1) { } template< typename Type, typename ValuePool > @@ -128,7 +127,7 @@ bool LockFreeStack< Type, ValuePool >::TryPop(Type & element) { return false; // Guard top_cached - hazardPointer.GuardPointer(0, top_cached); + hazardPointer.Guard(0, top_cached); // Check if top is still top. If this is the case, it has not been // retired yet (because before retiring that thing, the retiring thread @@ -144,16 +143,16 @@ bool LockFreeStack< Type, ValuePool >::TryPop(Type & element) { break; } else { // We continue with the next and can unguard top_cached - hazardPointer.GuardPointer(0, NULL); + hazardPointer.Guard(0, NULL); } } Type data = top_cached->GetElement(); // We don't need to read from this reference anymore, unguard it - hazardPointer.GuardPointer(0, NULL); + hazardPointer.Guard(0, NULL); - hazardPointer.EnqueuePointerForDeletion(top_cached); + hazardPointer.EnqueueForDeletion(top_cached); element = data; return true; diff --git a/containers_cpp/include/embb/containers/internal/lock_free_tree_value_pool-inl.h b/containers_cpp/include/embb/containers/internal/lock_free_tree_value_pool-inl.h index 2049174..eedc22d 100644 --- a/containers_cpp/include/embb/containers/internal/lock_free_tree_value_pool-inl.h +++ b/containers_cpp/include/embb/containers/internal/lock_free_tree_value_pool-inl.h @@ -42,7 +42,7 @@ template bool LockFreeTreeValuePool:: IsLeaf(int node) { - if (node >= size - 1 && node <= 2 * size - 1) { + if (node >= size_ - 1 && node <= 2 * size_ - 1) { return true; } return false; @@ -52,7 +52,7 @@ template bool LockFreeTreeValuePool:: IsValid(int node) { - return (node >= 0 && node <= 2 * size - 1); + return (node >= 0 && node <= 2 * size_ - 1); } template int LockFreeTreeValuePool:: NodeIndexToPoolIndex(int node) { assert(IsLeaf(node)); - return(node - (size - 1)); + return(node - (size_ - 1)); } template int LockFreeTreeValuePool:: PoolIndexToNodeIndex(int index) { - int node = index + (size - 1); + int node = index + (size_ - 1); assert(IsLeaf(node)); return node; } @@ -100,7 +100,7 @@ template int LockFreeTreeValuePool:: GetParentNode(int node) { int parent = (node - 1) / 2; - assert(parent >= 0 && parent < size - 1); + assert(parent >= 0 && parent < size_ - 1); return parent; } @@ -112,11 +112,11 @@ allocate_rec(int node, Type& element) { if (IsLeaf(node)) { int pool_index = NodeIndexToPoolIndex(node); - Type expected = pool[pool_index]; + Type expected = pool_[pool_index]; if (expected == Undefined) return -1; - if (pool[pool_index].CompareAndSwap(expected, Undefined)) { + if (pool_[pool_index].CompareAndSwap(expected, Undefined)) { element = expected; return pool_index; } @@ -131,11 +131,11 @@ allocate_rec(int node, Type& element) { // atomically decrement the value in the node if the result is greater than // or equal to zero. This cannot be done atomically. do { - current = tree[node]; + current = tree_[node]; desired = current - 1; if (desired < 0) return -1; - } while (!tree[node].CompareAndSwap(current, desired)); + } while (!tree_[node].CompareAndSwap(current, desired)); int leftResult = allocate_rec(GetLeftChildIndex(node), element); if (leftResult != -1) { @@ -156,7 +156,7 @@ Fill(int node, int elementsToStore, int power2Value) { if (IsLeaf(node)) return; - tree[node] = elementsToStore; + tree_[node] = elementsToStore; int postPower2Value = power2Value >> 1; @@ -188,14 +188,14 @@ Free(Type element, int index) { assert(element != Undefined); // Put the element back - pool[index].Store(element); + pool_[index].Store(element); - assert(index >= 0 && index < size); + assert(index >= 0 && index < size_); int node = PoolIndexToNodeIndex(index); while (!IsRoot(node)) { node = GetParentNode(node); - tree[node].FetchAndAdd(1); + tree_[node].FetchAndAdd(1); } } @@ -205,37 +205,76 @@ template< typename ForwardIterator > LockFreeTreeValuePool:: LockFreeTreeValuePool(ForwardIterator first, ForwardIterator last) { // Number of elements to store - real_size = static_cast(::std::distance(first, last)); + real_size_ = static_cast(::std::distance(first, last)); // Let k be smallest number so that real_size <= 2^k, size = 2^k - size = GetSmallestPowerByTwoValue(real_size); + size_ = GetSmallestPowerByTwoValue(real_size_); // Size of binary tree without the leaves - tree_size = size - 1; + tree_size_ = size_ - 1; + + // make sure, signed values are not negative + assert(tree_size_ >= 0); + assert(real_size_ >= 0); + + size_t tree_size_unsigned = static_cast(tree_size_); + size_t real_size_unsigned = static_cast(real_size_); // Pool stores elements of type T - pool = poolAllocator.allocate(static_cast(real_size)); + pool_ = pool_allocator_.allocate(real_size_unsigned); + + // invoke inplace new for each pool element + for (size_t i = 0; i != real_size_unsigned; ++i) { + new (&pool_[i]) embb::base::Atomic(); + } // Tree holds the counter of not allocated elements - tree = treeAllocator.allocate(static_cast(tree_size)); + tree_ = tree_allocator_.allocate(tree_size_unsigned); + + // invoke inplace new for each tree element + for (size_t i = 0; i != tree_size_unsigned; ++i) { + new (&tree_[i]) embb::base::Atomic(); + } int i = 0; // Store the elements from the range for (ForwardIterator curIter(first); curIter != last; ++curIter) { - pool[i++] = *curIter; + pool_[i++] = *curIter; } // Initialize the binary tree without leaves (counters) - Fill(0, static_cast(::std::distance(first, last)), size); + Fill(0, static_cast(::std::distance(first, last)), size_); } template LockFreeTreeValuePool:: ~LockFreeTreeValuePool() { - poolAllocator.deallocate(pool, static_cast(real_size)); - treeAllocator.deallocate(tree, static_cast(tree_size)); + size_t tree_size_unsigned = static_cast(tree_size_); + size_t real_size_unsigned = static_cast(real_size_); + + // invoke destructor for each pool element + for (size_t i = 0; i != real_size_unsigned; ++i) { + pool_[i].~Atomic(); + } + + pool_allocator_.deallocate(pool_, real_size_unsigned); + + // invoke destructor for each tree element + for (size_t i = 0; i != tree_size_unsigned; ++i) { + tree_[i].~Atomic(); + } + + tree_allocator_.deallocate(tree_, tree_size_unsigned); +} + +template +size_t LockFreeTreeValuePool:: +GetMinimumElementCountForGuaranteedCapacity(size_t capacity) { + // for this value pool, this is just capacity... + return capacity; } } // namespace containers diff --git a/containers_cpp/include/embb/containers/internal/object_pool-inl.h b/containers_cpp/include/embb/containers/internal/object_pool-inl.h index 61711d5..9458a55 100644 --- a/containers_cpp/include/embb/containers/internal/object_pool-inl.h +++ b/containers_cpp/include/embb/containers/internal/object_pool-inl.h @@ -83,7 +83,8 @@ ReturningTrueIterator::operator!=(const self_type& rhs) { template bool ObjectPool:: IsContained(const Type &obj) const { - if ((&obj < &objects[0]) || (&obj > &objects[capacity - 1])) { + if ((&obj < &objects_array_[0]) || + (&obj > &objects_array_[value_pool_size_ - 1])) { return false; } else { return true; @@ -94,17 +95,17 @@ template int ObjectPool:: GetIndexOfObject(const Type &obj) const { assert(IsContained(obj)); - return(static_cast(&obj - &objects[0])); + return(static_cast(&obj - &objects_array_[0])); } template Type* ObjectPool::AllocateRaw() { bool val; - int allocated_index = p.Allocate(val); + int allocated_index = value_pool_.Allocate(val); if (allocated_index == -1) { return NULL; } else { - Type* ret_pointer = &(objects[allocated_index]); + Type* ret_pointer = &(objects_array_[allocated_index]); return ret_pointer; } @@ -112,15 +113,17 @@ Type* ObjectPool::AllocateRaw() { template size_t ObjectPool::GetCapacity() { - return capacity; + return capacity_; } template ObjectPool::ObjectPool(size_t capacity) : -capacity(capacity), - p(ReturningTrueIterator(0), ReturningTrueIterator(capacity)) { - // Allocate the objects (without construction, just get the memory) - objects = objectAllocator.allocate(capacity); + capacity_(capacity), + value_pool_size_( + ValuePool::GetMinimumElementCountForGuaranteedCapacity(capacity)), + value_pool_(ReturningTrueIterator(0), ReturningTrueIterator( + value_pool_size_)), + objects_array_(object_allocator_.allocate(value_pool_size_)) { } template @@ -128,7 +131,7 @@ void ObjectPool::Free(Type* obj) { int index = GetIndexOfObject(*obj); obj->~Type(); - p.Free(true, index); + value_pool_.Free(true, index); } template @@ -189,7 +192,7 @@ Type* ObjectPool::Allocate( template ObjectPool::~ObjectPool() { // Deallocate the objects - objectAllocator.deallocate(objects, capacity); + object_allocator_.deallocate(objects_array_, value_pool_size_); } } // namespace containers } // namespace embb diff --git a/containers_cpp/include/embb/containers/internal/wait_free_array_value_pool-inl.h b/containers_cpp/include/embb/containers/internal/wait_free_array_value_pool-inl.h index 1453f1f..18ef996 100644 --- a/containers_cpp/include/embb/containers/internal/wait_free_array_value_pool-inl.h +++ b/containers_cpp/include/embb/containers/internal/wait_free_array_value_pool-inl.h @@ -35,21 +35,21 @@ Free(Type element, int index) { assert(element != Undefined); // Just put back the element - pool[index].Store(element); + pool_array_[index].Store(element); } template int WaitFreeArrayValuePool:: Allocate(Type & element) { - for (int i = 0; i != size; ++i) { + for (int i = 0; i != size_; ++i) { Type expected; // If the memory cell is not available, go ahead - if (Undefined == (expected = pool[i].Load())) + if (Undefined == (expected = pool_array_[i].Load())) continue; // Try to get the memory cell - if (pool[i].CompareAndSwap(expected, Undefined)) { + if (pool_array_[i].CompareAndSwap(expected, Undefined)) { // When the CAS was successful, this element is ours element = expected; return i; @@ -64,23 +64,45 @@ WaitFreeArrayValuePool:: WaitFreeArrayValuePool(ForwardIterator first, ForwardIterator last) { size_t dist = static_cast(std::distance(first, last)); - size = static_cast(dist); + size_ = static_cast(dist); + + // conversion may result in negative number. check! + assert(size_ >= 0); // Use the allocator to allocate an array of size dist - pool = allocator.allocate(dist); + pool_array_ = allocator_.allocate(dist); + + // invoke inplace new for each pool element + for ( size_t i = 0; i != dist; ++i ) { + new (&pool_array_[i]) embb::base::Atomic(); + } int i = 0; // Store the elements of the range for (ForwardIterator curIter(first); curIter != last; ++curIter) { - pool[i++] = *curIter; + pool_array_[i++] = *curIter; } } template WaitFreeArrayValuePool::~WaitFreeArrayValuePool() { - allocator.deallocate(pool, (size_t)size); + // invoke destructor for each pool element + for (int i = 0; i != size_; ++i) { + pool_array_[i].~Atomic(); + } + + // free memory + allocator_.deallocate(pool_array_, static_cast(size_)); } + +template +size_t WaitFreeArrayValuePool:: +GetMinimumElementCountForGuaranteedCapacity(size_t capacity) { + // for this value pool, this is just capacity... + return capacity; +} + } // namespace containers } // namespace embb diff --git a/containers_cpp/include/embb/containers/lock_free_mpmc_queue.h b/containers_cpp/include/embb/containers/lock_free_mpmc_queue.h index 37c5439..8340c53 100644 --- a/containers_cpp/include/embb/containers/lock_free_mpmc_queue.h +++ b/containers_cpp/include/embb/containers/lock_free_mpmc_queue.h @@ -113,8 +113,17 @@ class LockFreeMPMCQueue { * least as many elements, maybe more. */ size_t capacity; - // Do not change the ordering of class local variables. - // Important for initialization. + + /** + * The object pool, used for lock-free memory allocation. + * + * Warning: the objectPool has to be initialized before the hazardPointer + * object, to be sure that the hazardPointer object is destructed before the + * Pool as the hazardPointer object might return elements to the pool in its + * destructor. So the ordering of the members objectPool and hazardPointer is + * important here! + */ + ObjectPool< internal::LockFreeMPMCQueueNode, ValuePool > objectPool; /** * Callback to the method that is called by hazard pointers if a pointer is @@ -124,15 +133,17 @@ class LockFreeMPMCQueue { delete_pointer_callback; /** - * The hazard pointer object, used for memory management. + * Definition of the used hazard pointer type */ - embb::containers::internal::HazardPointer - < internal::LockFreeMPMCQueueNode* > hazardPointer; + typedef embb::containers::internal::HazardPointer + < internal::LockFreeMPMCQueueNode* > + MPMCQueueNodeHazardPointer_t; /** - * The object pool, used for lock-free memory allocation. + * The hazard pointer object, used for memory management. */ - ObjectPool< internal::LockFreeMPMCQueueNode, ValuePool > objectPool; + MPMCQueueNodeHazardPointer_t hazardPointer; + /** * Atomic pointer to the head node of the queue diff --git a/containers_cpp/include/embb/containers/lock_free_stack.h b/containers_cpp/include/embb/containers/lock_free_stack.h index fdcb70c..7fb6d5e 100644 --- a/containers_cpp/include/embb/containers/lock_free_stack.h +++ b/containers_cpp/include/embb/containers/lock_free_stack.h @@ -187,11 +187,6 @@ class LockFreeStack { delete_pointer_callback; /** - * The hazard pointer object, used for memory management. - */ - internal::HazardPointer*> hazardPointer; - - /** * The callback function, used to cleanup non-hazardous pointers. * \see delete_pointer_callback */ @@ -199,10 +194,27 @@ class LockFreeStack { /** * The object pool, used for lock-free memory allocation. + * + * Warning: the objectPool has to be initialized before the hazardPointer + * object, to be sure that the hazardPointer object is destructed before the + * Pool as the hazardPointer object might return elements to the pool in its + * destructor. So the ordering of the members objectPool and hazardPointer is + * important here! */ ObjectPool< internal::LockFreeStackNode, ValuePool > objectPool; /** + * Definition of the used hazard pointer type + */ + typedef internal::HazardPointer < internal::LockFreeStackNode* > + StackNodeHazardPointer_t; + + /** + * The hazard pointer object, used for memory management. + */ + StackNodeHazardPointer_t hazardPointer; + + /** * Atomic pointer to the top node of the stack (element that is popped next) */ embb::base::Atomic*> top; diff --git a/containers_cpp/include/embb/containers/lock_free_tree_value_pool.h b/containers_cpp/include/embb/containers/lock_free_tree_value_pool.h index 2a14d4c..7b218fc 100644 --- a/containers_cpp/include/embb/containers/lock_free_tree_value_pool.h +++ b/containers_cpp/include/embb/containers/lock_free_tree_value_pool.h @@ -123,22 +123,25 @@ class LockFreeTreeValuePool { LockFreeTreeValuePool& operator=(const LockFreeTreeValuePool&); // See algorithm description above - int size; + int size_; // See algorithm description above - int tree_size; + int tree_size_; // See algorithm description above - int real_size; + int real_size_; // The tree above the pool - embb::base::Atomic* tree; + embb::base::Atomic* tree_; // The actual pool - embb::base::Atomic* pool; + embb::base::Atomic* pool_; - PoolAllocator poolAllocator; - TreeAllocator treeAllocator; + // respective allocator + PoolAllocator pool_allocator_; + + // respective allocator + TreeAllocator tree_allocator_; /** * Computes smallest power of two fitting the specified value @@ -278,6 +281,18 @@ class LockFreeTreeValuePool { ); /** + * Due to concurrency effects, a pool might provide less elements than managed + * by it. However, usually one wants to guarantee a minimal capacity. The + * count of elements, that must be given to the pool when to guarantee \c + * capacity elements is computed using this function. + * + * \return count of indices the pool has to be initialized with + */ + static size_t GetMinimumElementCountForGuaranteedCapacity( + size_t capacity + /**< [IN] count of indices that shall be guaranteed */); + + /** * Destructs the pool. * * \notthreadsafe diff --git a/containers_cpp/include/embb/containers/object_pool.h b/containers_cpp/include/embb/containers/object_pool.h index 0a94708..889f5cc 100644 --- a/containers_cpp/include/embb/containers/object_pool.h +++ b/containers_cpp/include/embb/containers/object_pool.h @@ -35,7 +35,6 @@ namespace embb { namespace containers { - /** * \defgroup CPP_CONTAINERS_POOLS Pools * Concurrent pools @@ -62,22 +61,29 @@ class ObjectPool { /** * Allocator used to allocate elements of the object pool */ - ObjectAllocator objectAllocator; + ObjectAllocator object_allocator_; /** - * Array holding the allocated object + * Capacity of the object pool */ - Type* objects; + size_t capacity_; /** - * Capacity of the object pool + * The size of the underlying value pool. This is also the size of the object + * array in this class. It is assumed, that the valuepool manages indices in + * range [0;value_pool_size_-1]. */ - size_t capacity; + size_t value_pool_size_; /** * Underlying value pool */ - ValuePool p; + ValuePool value_pool_; + + /** + * Array holding the allocated object + */ + Type* objects_array_; /** * Helper providing a virtual iterator that just returns true in each diff --git a/containers_cpp/include/embb/containers/wait_free_array_value_pool.h b/containers_cpp/include/embb/containers/wait_free_array_value_pool.h index 8289ca6..5a6bfda 100644 --- a/containers_cpp/include/embb/containers/wait_free_array_value_pool.h +++ b/containers_cpp/include/embb/containers/wait_free_array_value_pool.h @@ -39,12 +39,30 @@ namespace containers { * \ingroup CPP_CONCEPT * \{ * \par Description - * A value pool is a fixed-size multiset of elements, where each element has a - * unique index. The elements cannot be modified and are given at construction - * time (by providing first/last iterators). A value pool provides two - * operations: \c Allocate and \c Free. \c Allocate removes an element from the - * pool, and \c Free returns an element to the pool. It is only allowed to - * free elements that have previously been allocated. + * A value pool is a multi-set of elements, where each element has a unique, + * continuous (starting with 0) index. The elements cannot be modified and are + * given at construction time by providing first/last iterators. + * + * \par + * A value pool provides two primary operations: \c Allocate and \c Free. \c + * Allocate allocates an element/index "pair" (index via return, element via + * reference parameter) from the pool, and \c Free returns an element/index pair + * to the pool. To guarantee linearizability, \c element is not allowed to be + * modified between \c Allocate and \c Free. It is only allowed to free elements + * that have previously been allocated. The \c Allocate function does not + * guarantee an order on which indices are allocated. The count of elements that + * can be allocated with \c Allocate might be smaller than the count of + * elements, the pool is initialized with. This might be because of + * implementation details and respective concurrency effects: for example, if + * indices are managed within a queue, one has to protect queue elements from + * concurrency effects (reuse and access). As long as a thread potentially + * accesses a node (and with that an index), the respective index cannot not be + * given out to the user, even if being logically not part of the pool anymore. + * However, the user might want to guarantee a certain amount of indices to the + * user. Therefore, the static \c GetMinimumElementCountForGuaranteedCapacity + * method is used. The user passes the count of indices to this method, that + * shall be guaranteed by the pool. The method returns the count on indices, the + * pool has to be initialized with in order to guarantee this count on indices. * * \par Requirements * - Let \c Pool be the pool class @@ -54,6 +72,7 @@ namespace containers { * - Let \c i, j be forward iterators supporting \c std::distance. * - Let \c c be an object of type \c Type& * - Let \c e be a value of type \c int + * - Let \c f be a value of type \c int * * \par Valid Expressions * @@ -72,7 +91,7 @@ namespace containers { * the bottom element. The bottom element cannot be stored in the pool, it * is exclusively used to mark empty cells. The pool initially contains * \c std::distance(i, j) elements which are copied during construction from - * the range \c [i, j). A concrete class satisfying the value pool concept + * the range \c [i, j]. A concrete class satisfying the value pool concept * might provide additional template parameters for specifying allocators. * * @@ -80,9 +99,10 @@ namespace containers { * \code{.cpp} Allocate(c) \endcode * \c int * - * Gets an element from the pool. Returns -1, if no element is available, - * i.e., the pool is empty. Otherwise, returns the index of the element in - * the pool. The value of the pool element is written into reference \c c. + * Allocates an element/index "pair" from the pool. Returns -1, if no + * element is available, i.e., the pool is empty. Otherwise, returns the + * index of the element in the pool. The value of the pool element is + * written into parameter reference \c c. * * * @@ -93,6 +113,15 @@ namespace containers { * \c Allocate. For each allocated element, \c Free must be called exactly * once. * + * + * \code{.cpp} GetMinimumElementCountForGuaranteedCapacity(f) + * \endcode + * \c void + * Static method, returns the count of indices, the user has to + * initialize the pool with in order to guarantee a count of \c f elements + * (irrespective of concurrency effects). + * + * * * * \} @@ -116,10 +145,10 @@ template > > class WaitFreeArrayValuePool { private: - int size; - embb::base::Atomic* pool; + int size_; + embb::base::Atomic* pool_array_; WaitFreeArrayValuePool(); - Allocator allocator; + Allocator allocator_; // Prevent copy-construction WaitFreeArrayValuePool(const WaitFreeArrayValuePool&); @@ -150,6 +179,18 @@ class WaitFreeArrayValuePool { ); /** + * Due to concurrency effects, a pool might provide less elements than managed + * by it. However, usually one wants to guarantee a minimal capacity. The + * count of elements, that must be given to the pool when to guarantee \c + * capacity elements is computed using this function. + * + * \return count of indices the pool has to be initialized with + */ + static size_t GetMinimumElementCountForGuaranteedCapacity( + size_t capacity + /**< [IN] count of indices that shall be guaranteed */); + + /** * Destructs the pool. * * \notthreadsafe @@ -175,7 +216,7 @@ class WaitFreeArrayValuePool { * Returns an element to the pool. * * \note The element must have been allocated with Allocate(). - * + * * \waitfree * * \see CPP_CONCEPTS_VALUE_POOL diff --git a/containers_cpp/test/hazard_pointer_test.cc b/containers_cpp/test/hazard_pointer_test.cc index 17a4c38..71f1d2e 100644 --- a/containers_cpp/test/hazard_pointer_test.cc +++ b/containers_cpp/test/hazard_pointer_test.cc @@ -31,24 +31,71 @@ namespace embb { namespace containers { namespace test { +IntObjectTestPool::IntObjectTestPool(unsigned int pool_size) : +poolSize(pool_size) { + simplePoolObjects = static_cast( + embb::base::Allocation::Allocate(sizeof(int)*pool_size)); + + simplePool = static_cast*> ( + embb::base::Allocation::Allocate(sizeof(embb::base::Atomic)* + pool_size)); + + for (unsigned int i = 0; i != pool_size; ++i) { + // in-place new for each array cell + new (&simplePool[i]) embb::base::Atomic; + } + + for (unsigned int i = 0; i != pool_size; ++i) { + simplePool[i] = FREE_MARKER; + simplePoolObjects[i] = 0; + } +} + +IntObjectTestPool::~IntObjectTestPool() { + embb::base::Allocation::Free(simplePoolObjects); + + for (unsigned int i = 0; i != poolSize; ++i) { + // in-place new for each array cell + simplePool[i].~Atomic(); + } + + embb::base::Allocation::Free(simplePool); +} + +int* IntObjectTestPool::Allocate() { + for (unsigned int i = 0; i != poolSize; ++i) { + int expected = FREE_MARKER; + if (simplePool[i].CompareAndSwap + (expected, ALLOCATED_MARKER)) { + return &simplePoolObjects[i]; + } + } + return 0; +} + +void IntObjectTestPool::Release(int* object_pointer) { + int cell = object_pointer - simplePoolObjects; + simplePool[cell].Store(FREE_MARKER); +} + HazardPointerTest::HazardPointerTest() : #ifdef EMBB_PLATFORM_COMPILER_MSVC #pragma warning(push) #pragma warning(disable:4355) #endif - delete_pointer_callback(*this, &HazardPointerTest::DeletePointerCallback), +delete_pointer_callback_(*this, &HazardPointerTest::DeletePointerCallback), #ifdef EMBB_PLATFORM_COMPILER_MSVC #pragma warning(pop) #endif - object_pool(NULL), - stack(NULL), - hp(NULL), -n_threads(static_cast + object_pool_(NULL), + stack_(NULL), + hazard_pointer_(NULL), + n_threads_(static_cast (partest::TestSuite::GetDefaultNumThreads())) { - n_elements_per_thread = 100; - n_elements = n_threads*n_elements_per_thread; + n_elements_per_thread_ = 100; + n_elements_ = n_threads_*n_elements_per_thread_; embb::base::Function < void, embb::base::Atomic* > - delete_pointer_callback( + deletePointerCallback( *this, &HazardPointerTest::DeletePointerCallback); @@ -59,45 +106,52 @@ n_threads(static_cast // placed, the pointer is not allowed to be deleted until the second thread // removes this guard. CreateUnit("HazardPointerTestThatGuardWorks"). - Pre(&HazardPointerTest::HazardPointerTest1_Pre, this). + Pre(&HazardPointerTest::HazardPointerTest1Pre, this). Add( - &HazardPointerTest::HazardPointerTest1_ThreadMethod, - this, static_cast(n_threads)). - Post(&HazardPointerTest::HazardPointerTest1_Post, this); + &HazardPointerTest::HazardPointerTest1ThreadMethod, + this, static_cast(n_threads_)). + Post(&HazardPointerTest::HazardPointerTest1Post, this); } -void HazardPointerTest::HazardPointerTest1_Pre() { +void HazardPointerTest::HazardPointerTest1Pre() { embb_internal_thread_index_reset(); - object_pool = new embb::containers::ObjectPool< embb::base::Atomic > - (static_cast(n_elements)); - stack = new embb::containers::LockFreeStack< embb::base::Atomic* > - (static_cast(n_elements)); - hp = new embb::containers::internal::HazardPointer< embb::base::Atomic*> - (delete_pointer_callback, - NULL, + + object_pool_ = + embb::base::Allocation:: + New > > + (static_cast(n_elements_)); + + stack_ = embb::base::Allocation:: + New* > > + (static_cast(n_elements_)); + + hazard_pointer_ = embb::base::Allocation:: + New* > > + (delete_pointer_callback_, + static_cast*>(NULL), 1); } -void HazardPointerTest::HazardPointerTest1_Post() { - delete object_pool; - delete stack; - delete hp; +void HazardPointerTest::HazardPointerTest1Post() { + embb::base::Allocation::Delete(hazard_pointer_); + embb::base::Allocation::Delete(object_pool_); + embb::base::Allocation::Delete(stack_); } -void HazardPointerTest::HazardPointerTest1_ThreadMethod() { +void HazardPointerTest::HazardPointerTest1ThreadMethod() { unsigned int thread_index; embb_internal_thread_index(&thread_index); - for (int i = 0; i != n_elements_per_thread; ++i) { - embb::base::Atomic* allocated_object = object_pool->Allocate(0); + for (int i = 0; i != n_elements_per_thread_; ++i) { + embb::base::Atomic* allocated_object = object_pool_->Allocate(0); - hp->GuardPointer(0, allocated_object); + hazard_pointer_->Guard(0, allocated_object); - bool success = stack->TryPush(allocated_object); + bool success = stack_->TryPush(allocated_object); PT_ASSERT(success == true); - embb::base::Atomic* allocated_object_from_different_thread; + embb::base::Atomic* allocated_object_from_different_thread(0); int diff_count = 0; @@ -105,51 +159,366 @@ void HazardPointerTest::HazardPointerTest1_ThreadMethod() { bool success_pop; while ( - (success_pop = stack->TryPop(allocated_object_from_different_thread)) + (success_pop = stack_->TryPop(allocated_object_from_different_thread)) == true && allocated_object_from_different_thread == allocated_object ) { - //try to make it probable to get an element from a different thread - //however, can be the same. Try 10000 times to get a different element. + // try to make it probable to get an element from a different thread + // however, can be the same. Try 10000 times to get a different element. if (diff_count++ > 10000) { same = true; break; } - bool success = stack->TryPush(allocated_object_from_different_thread); + bool success = stack_->TryPush(allocated_object_from_different_thread); PT_ASSERT(success == true); } PT_ASSERT(success_pop == true); allocated_object->Store(1); - hp->EnqueuePointerForDeletion(allocated_object); + hazard_pointer_->EnqueueForDeletion(allocated_object); if (!same) { - hp->GuardPointer(0, allocated_object_from_different_thread); + hazard_pointer_->Guard(0, allocated_object_from_different_thread); // if this holds, we were successful in guarding... otherwise we // were to late, because the pointer has already been added // to the retired list. if (*allocated_object_from_different_thread == 0) { // the pointer must not be deleted here! - vector_mutex.Lock(); + vector_mutex_.Lock(); for (std::vector< embb::base::Atomic* >::iterator - it = deleted_vector.begin(); - it != deleted_vector.end(); + it = deleted_vector_.begin(); + it != deleted_vector_.end(); ++it) { PT_ASSERT(*it != allocated_object_from_different_thread); } - vector_mutex.Unlock(); + vector_mutex_.Unlock(); } - hp->GuardPointer(0, NULL); + hazard_pointer_->Guard(0, NULL); } } } void HazardPointerTest::DeletePointerCallback (embb::base::Atomic* to_delete) { - vector_mutex.Lock(); - deleted_vector.push_back(to_delete); - vector_mutex.Unlock(); + vector_mutex_.Lock(); + deleted_vector_.push_back(to_delete); + vector_mutex_.Unlock(); +} + +void HazardPointerTest2::DeletePointerCallback(int* to_delete) { + test_pool_->Release(to_delete); +} + +bool HazardPointerTest2::SetRelativeGuards() { + unsigned int thread_index; + embb_internal_thread_index(&thread_index); + + unsigned int my_begin = guards_per_phread_count_*thread_index; + int guard_number = 0; + unsigned int alreadyGuarded = 0; + + for (unsigned int i = my_begin; i != my_begin + guards_per_phread_count_; + ++i) { + if (shared_guarded_[i] != 0) { + alreadyGuarded++; + guard_number++; + continue; + } + + int * to_guard = shared_allocated_[i]; + if (to_guard) { + hazard_pointer_->Guard(guard_number, to_guard); + + // changed in the meantime? + if (to_guard == shared_allocated_[i].Load()) { + // guard was successful. Communicate to other threads. + shared_guarded_[i] = to_guard; + } else { + // reset the guard, couldn't guard... + hazard_pointer_->RemoveGuard(guard_number); + } + } + guard_number++; + } + return(alreadyGuarded == guards_per_phread_count_); +} + +void HazardPointerTest2::HazardPointerTest2Master() { + // while the hazard pointer guard array is not full + int** allocatedLocal = static_cast( + embb::base::Allocation::Allocate(sizeof(int*)*guaranteed_capacity_pool_)); + + bool full = false; + while (!full) { + full = true; + for (unsigned int i = 0; i != guaranteed_capacity_pool_; ++i) { + if (shared_guarded_[i] == 0) { + full = false; + break; + } + } + + // not all guards set + for (unsigned int i = 0; i != guaranteed_capacity_pool_; ++i) { + allocatedLocal[i] = test_pool_->Allocate(); + shared_allocated_[i].Store(allocatedLocal[i]); + } + + // set my hazards. We do not have to check, this must be successful + // here. + SetRelativeGuards(); + + // free + for (unsigned int i = 0; i != guaranteed_capacity_pool_; ++i) { + shared_allocated_[i].Store(0); + hazard_pointer_->EnqueueForDeletion(allocatedLocal[i]); + } + } + + embb::base::Allocation::Free(allocatedLocal); +} + +void HazardPointerTest2::HazardPointerTest2Slave() { + unsigned int thread_index; + embb_internal_thread_index(&thread_index); + + while (!SetRelativeGuards()) {} +} + +void HazardPointerTest2::HazardPointerTest2Pre() { + embb_internal_thread_index_reset(); + current_master_ = 0; + sync1_ = 0; + sync2_ = 0; + + // first the test pool has to be created + test_pool_ = embb::base::Allocation::New + (pool_size_using_hazard_pointer_); + + // after the pool has been created, we create the hp class + hazard_pointer_ = embb::base::Allocation::New < + embb::containers::internal::HazardPointer > + (delete_pointer_callback_, static_cast(NULL), + static_cast(guards_per_phread_count_), n_threads); + + shared_guarded_ = static_cast*>( + embb::base::Allocation::Allocate(sizeof(embb::base::Atomic)* + guaranteed_capacity_pool_)); + + for (unsigned int i = 0; i != guaranteed_capacity_pool_; ++i) { + // in-place new for each array cell + new (&shared_guarded_[i]) embb::base::Atomic < int* >; + } + + shared_allocated_ = static_cast*>( + embb::base::Allocation::Allocate(sizeof(embb::base::Atomic)* + guaranteed_capacity_pool_)); + + for (unsigned int i = 0; i != + guaranteed_capacity_pool_; ++i) { + // in-place new for each array cell + new (&shared_allocated_[i]) embb::base::Atomic < int* >; + } + + for (unsigned int i = 0; i != guaranteed_capacity_pool_; ++i) { + shared_guarded_[i] = 0; + shared_allocated_[i] = 0; + } +} + +void HazardPointerTest2::HazardPointerTest2Post() { + for (unsigned int i = 0; i != static_cast(n_threads); ++i) { + for (unsigned int i2 = 0; i2 != static_cast(n_threads)* + guards_per_phread_count_; ++i2) { + if (hazard_pointer_->thread_local_retired_lists_ + [i2 + i*n_threads*guards_per_phread_count_] == NULL) { + // all retired lists must be completely filled + PT_ASSERT(false); + } + } + } + + unsigned int checks = 0; + for (unsigned int i = 0; i != static_cast(n_threads); ++i) { + for (unsigned int i2 = 0; i2 != static_cast(n_threads)* + guards_per_phread_count_; ++i2) { + for (unsigned int j = 0; j != static_cast(n_threads); ++j) { + for (unsigned int j2 = 0; j2 != static_cast(n_threads)* + guards_per_phread_count_; ++j2) { + if (i2 == j2 && i == j) + continue; + + // all retired elements have to be disjoint + PT_ASSERT( + hazard_pointer_->thread_local_retired_lists_ + [i2 + i*n_threads*guards_per_phread_count_] != + hazard_pointer_->thread_local_retired_lists_ + [j2 + j*n_threads*guards_per_phread_count_]); + + checks++; + } + } + } + } + + // sanity check on the count of expected comparisons. + PT_ASSERT( + checks == + n_threads*n_threads*guards_per_phread_count_ * + (n_threads*n_threads*guards_per_phread_count_ - 1)); + + std::vector< int* > additionallyAllocated; + + // we should be able to still allocate the guaranteed capacity of + // elements from the pool. + for (unsigned int i = 0; i != guaranteed_capacity_pool_; ++i) { + int* allocated = test_pool_->Allocate(); + + // allocated is not allowed to be zero + PT_ASSERT(allocated != NULL); + + // push to vector, to check if elements are disjunctive and to release + // afterwards. + additionallyAllocated.push_back(allocated); + } + + // the pool should now be empty + PT_ASSERT(test_pool_->Allocate() == NULL); + + // release allocated elements... + for (unsigned int i = 0; i != additionallyAllocated.size(); ++i) { + test_pool_->Release(additionallyAllocated[i]); + } + + // the additionallyAllocated elements shall be disjoint + for (unsigned int i = 0; i != additionallyAllocated.size(); ++i) { + for (unsigned int i2 = 0; i2 != additionallyAllocated.size(); ++i2) { + if (i == i2) + continue; + PT_ASSERT(additionallyAllocated[i] != + additionallyAllocated[i2]); + } + } + + // no allocated element should be in any retired list... + for (unsigned int a = 0; a != additionallyAllocated.size(); ++a) { + for (unsigned int i = 0; i != static_cast(n_threads); ++i) { + for (unsigned int i2 = 0; i2 != static_cast(n_threads)* + guards_per_phread_count_; ++i2) { + PT_ASSERT( + hazard_pointer_->thread_local_retired_lists_ + [i2 + i*n_threads*guards_per_phread_count_] != + additionallyAllocated[a]); + } + } + } + + for (unsigned int i = 0; i != guaranteed_capacity_pool_; ++i) { + // in-place new for each array cell + shared_guarded_[i].~Atomic(); + } + + embb::base::Allocation::Free(shared_guarded_); + + for (unsigned int i = 0; i != guaranteed_capacity_pool_; ++i) { + // in-place new for each array cell + shared_allocated_[i].~Atomic(); + } + + embb::base::Allocation::Free(shared_allocated_); + embb::base::Allocation::Delete(hazard_pointer_); + + // after deleting the hazard pointer object, all retired pointers have + // to be returned to the pool! + std::vector elementsInPool; + + int* nextElement; + while ((nextElement = test_pool_->Allocate()) != NULL) { + for (unsigned int i = 0; i != elementsInPool.size(); ++i) { + // all elements need to be disjoint + PT_ASSERT(elementsInPool[i] != nextElement); + } + elementsInPool.push_back(nextElement); + } + + // all elements should have been returned by the hp object, so we should be + // able to acquire all elements. + PT_ASSERT(elementsInPool.size() == pool_size_using_hazard_pointer_); + + embb::base::Allocation::Delete(test_pool_); +} + +void HazardPointerTest2::HazardPointerTest2ThreadMethod() { + for (;;) { + unsigned int thread_index; + embb_internal_thread_index(&thread_index); + + if (thread_index == current_master_) { + HazardPointerTest2Master(); + } else { + HazardPointerTest2Slave(); + } + + sync1_.FetchAndAdd(1); + + // wait until cleanup thread signals to be finished + while (sync1_ != 0) { + int expected = n_threads; + int desired = FINISH_MARKER; + // select thread, responsible for cleanup + if (sync1_.CompareAndSwap(expected, desired)) { + // wipe arrays! + for (unsigned int i = 0; i != guaranteed_capacity_pool_; ++i) { + shared_guarded_[i] = 0; + shared_allocated_[i] = 0; + } + + // increase master + current_master_.FetchAndAdd(1); + sync2_ = 0; + sync1_.Store(0); + } + } + + // wait for all threads to reach this position + sync2_.FetchAndAdd(1); + while (sync2_ != static_cast(n_threads)) {} + + // if each thread was master once, terminate. + if (current_master_ == static_cast(n_threads)) { + return; + } + } +} + +HazardPointerTest2::HazardPointerTest2() : +n_threads(static_cast +(partest::TestSuite::GetDefaultNumThreads())), + +#ifdef EMBB_PLATFORM_COMPILER_MSVC +#pragma warning(push) +#pragma warning(disable:4355) +#endif + delete_pointer_callback_( + *this, + &HazardPointerTest2::DeletePointerCallback) +#ifdef EMBB_PLATFORM_COMPILER_MSVC +#pragma warning(pop) +#endif +{ + guards_per_phread_count_ = 5; + guaranteed_capacity_pool_ = guards_per_phread_count_*n_threads; + pool_size_using_hazard_pointer_ = guaranteed_capacity_pool_ + + guards_per_phread_count_*n_threads*n_threads; + + embb::base::Thread::GetThreadsMaxCount(); + CreateUnit("HazardPointerTestSimulateMemoryWorstCase"). + Pre(&HazardPointerTest2::HazardPointerTest2Pre, this). + Add( + &HazardPointerTest2::HazardPointerTest2ThreadMethod, + this, static_cast(n_threads)). + Post(&HazardPointerTest2::HazardPointerTest2Post, this); } -} // namespace test -} // namespace containers -} // namespace embb +} // namespace test +} // namespace containers +} // namespace embb diff --git a/containers_cpp/test/hazard_pointer_test.h b/containers_cpp/test/hazard_pointer_test.h index 3e02c6a..f6631cd 100644 --- a/containers_cpp/test/hazard_pointer_test.h +++ b/containers_cpp/test/hazard_pointer_test.h @@ -36,32 +36,112 @@ namespace embb { namespace containers { namespace test { -class HazardPointerTest : public partest::TestCase { +/** + * @brief a very simple wait-free object pool implementation to have tests + * being independent of the EMBB object pool implementation. + */ +class IntObjectTestPool { private: - embb::base::Function*> delete_pointer_callback; + int* simplePoolObjects; + embb::base::Atomic* simplePool; - //used to allocate random stuff, we will just use the pointers, not the - //contents - embb::containers::ObjectPool< embb::base::Atomic >* object_pool; + public: + static const int ALLOCATED_MARKER = 1; + static const int FREE_MARKER = 0; + unsigned int poolSize; - //used to move pointer between threads - embb::containers::LockFreeStack< embb::base::Atomic* >* stack; - embb::base::Mutex vector_mutex; - embb::containers::internal::HazardPointer*>* hp; - std::vector< embb::base::Atomic* > deleted_vector; - int n_threads; - int n_elements_per_thread; - int n_elements; + explicit IntObjectTestPool(unsigned int pool_size); + + ~IntObjectTestPool(); + + /** + * Allocate object from the pool + * + * @return the allocated object + */ + int* Allocate(); + /** + * Return an element to the pool + * + * @param objectPointer the object to be freed + */ + void Release(int* object_pointer); +}; + +class HazardPointerTest : public partest::TestCase { public: /** * Adds test methods. */ HazardPointerTest(); - void HazardPointerTest1_Pre(); - void HazardPointerTest1_Post(); - void HazardPointerTest1_ThreadMethod(); + void HazardPointerTest1Pre(); + void HazardPointerTest1Post(); + void HazardPointerTest1ThreadMethod(); void DeletePointerCallback(embb::base::Atomic* to_delete); + + private: + embb::base::Function*> delete_pointer_callback_; + + //used to allocate random stuff, we will just use the pointers, not the + //contents + embb::containers::ObjectPool< embb::base::Atomic >* object_pool_; + + //used to move pointer between threads + embb::containers::LockFreeStack< embb::base::Atomic* >* stack_; + embb::base::Mutex vector_mutex_; + embb::containers::internal::HazardPointer*>* + hazard_pointer_; + std::vector< embb::base::Atomic* > deleted_vector_; + int n_threads_; + int n_elements_per_thread_; + int n_elements_; +}; + +class HazardPointerTest2 : public partest::TestCase { + public: + void DeletePointerCallback(int* to_delete); + bool SetRelativeGuards(); + void HazardPointerTest2Master(); + void HazardPointerTest2Slave(); + + void HazardPointerTest2Pre(); + void HazardPointerTest2Post(); + + void HazardPointerTest2ThreadMethod(); + + HazardPointerTest2(); + + private: + // number of threads, participating in that test + int n_threads; + + embb::base::Function delete_pointer_callback_; + // the thread id of the master + embb::base::Atomic current_master_; + + // variables, to synchronize threads. At each point in time, one master, + // the master changes each round until each thread was assigned master once. + embb::base::Atomic sync1_; + embb::base::Atomic sync2_; + + unsigned int guards_per_phread_count_; + unsigned int guaranteed_capacity_pool_; + unsigned int pool_size_using_hazard_pointer_; + + // The threads write here, if they guarded an object successfully. Used to + // determine when all allocated objects were guarded successfully. + embb::base::Atomic* shared_guarded_; + + // This array is used by the master, to communicate and share what he has + // allocated with the slaves. + embb::base::Atomic* shared_allocated_; + + // Reference to the object pool + IntObjectTestPool* test_pool_; + + embb::containers::internal::HazardPointer* hazard_pointer_; + static const int FINISH_MARKER = -1; }; } // namespace test } // namespace containers diff --git a/containers_cpp/test/main.cc b/containers_cpp/test/main.cc index 0e26fee..e4cb128 100644 --- a/containers_cpp/test/main.cc +++ b/containers_cpp/test/main.cc @@ -55,6 +55,7 @@ using embb::containers::test::HazardPointerTest; using embb::containers::test::QueueTest; using embb::containers::test::StackTest; using embb::containers::test::ObjectPoolTest; +using embb::containers::test::HazardPointerTest2; PT_MAIN("Data Structures C++") { unsigned int max_threads = static_cast( @@ -64,6 +65,7 @@ PT_MAIN("Data Structures C++") { PT_RUN(PoolTest< WaitFreeArrayValuePool >); PT_RUN(PoolTest< LockFreeTreeValuePool >); PT_RUN(HazardPointerTest); + PT_RUN(HazardPointerTest2); PT_RUN(QueueTest< WaitFreeSPSCQueue< ::std::pair > >); PT_RUN(QueueTest< LockFreeMPMCQueue< ::std::pair > COMMA true COMMA true >); diff --git a/dataflow_cpp/test/dataflow_cpp_test_simple.cc b/dataflow_cpp/test/dataflow_cpp_test_simple.cc index 78af07c..b434625 100644 --- a/dataflow_cpp/test/dataflow_cpp_test_simple.cc +++ b/dataflow_cpp/test/dataflow_cpp_test_simple.cc @@ -39,7 +39,7 @@ #define NUM_SLICES 8 #define TEST_COUNT 12 -typedef embb::dataflow::Network<8> MyNetwork; +typedef embb::dataflow::Network MyNetwork; typedef MyNetwork::ConstantSource< int > MyConstantSource; typedef MyNetwork::Source< int > MySource; typedef MyNetwork::SerialProcess< MyNetwork::Inputs::Type, @@ -156,9 +156,7 @@ void SimpleTest::TestBasic() { core_set, 1024, // max tasks (default: 1024) 128, // max groups (default: 128) - // Currently needs to be initialized - // with (max_queues + 1), see defect embb449 - num_cores + 1, // max queues (default: 16) + num_cores, // max queues (default: 16) 1024, // queue capacity (default: 1024) 4); // num priorities (default: 4) diff --git a/mtapi_c/src/embb_mtapi_id_pool_t.c b/mtapi_c/src/embb_mtapi_id_pool_t.c index 570fa98..184a9f6 100644 --- a/mtapi_c/src/embb_mtapi_id_pool_t.c +++ b/mtapi_c/src/embb_mtapi_id_pool_t.c @@ -71,7 +71,7 @@ mtapi_uint_t embb_mtapi_id_pool_allocate(embb_mtapi_id_pool_t * that) { /* acquire position to fetch id from */ mtapi_uint_t id_position = that->get_id_position; that->get_id_position++; - if (that->capacity <= that->get_id_position) { + if (that->capacity < that->get_id_position) { that->get_id_position = 0; } @@ -97,7 +97,7 @@ void embb_mtapi_id_pool_deallocate( /* acquire position to put id to */ mtapi_uint_t id_position = that->put_id_position; that->put_id_position++; - if (that->capacity <= that->put_id_position) { + if (that->capacity < that->put_id_position) { that->put_id_position = 0; } diff --git a/mtapi_c/test/embb_mtapi_test_id_pool.cc b/mtapi_c/test/embb_mtapi_test_id_pool.cc new file mode 100644 index 0000000..f7c7855 --- /dev/null +++ b/mtapi_c/test/embb_mtapi_test_id_pool.cc @@ -0,0 +1,120 @@ +/* + * Copyright (c) 2014-2015, Siemens AG. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include + +IdPoolTest::IdPoolTest() { + CreateUnit("mtapi id pool test single threaded"). + Add(&IdPoolTest::TestBasic, this, 1, 1000). + Pre(&IdPoolTest::TestBasicPre, this). + Post(&IdPoolTest::TestBasicPost, this); + + CreateUnit("mtapi id pool test concurrent"). + Add(&IdPoolTest::TestParallel, this, concurrent_accessors_id_pool_2 + , 20). + Post(&IdPoolTest::TestParallelPost, this). + Pre(&IdPoolTest::TestParallelPre, this); +} + +void IdPoolTest::TestParallel() { + // allocate ID_ELEMENTS_PER_ACCESSOR elements. Each test thread is + // guaranteed to be able to allocate this amount of elements. + TestAllocateDeallocateNElementsFromPool(id_pool_parallel, + id_elements_per_accessor); +} + +void IdPoolTest::TestParallelPre() { + // create second id pool with CONCURRENT_ACCESSORS_ID_POOL_2* + // ID_ELEMENTS_PER_ACCESSOR elements + embb_mtapi_id_pool_initialize(&id_pool_parallel, + concurrent_accessors_id_pool_2*id_elements_per_accessor); +} + +void IdPoolTest::TestParallelPost() { + // after the parallel tests, try to again allocate and deallocate all + // elements sequentially. + TestAllocateDeallocateNElementsFromPool(id_pool_parallel, + concurrent_accessors_id_pool_2*id_elements_per_accessor, true); + + // finalize pool + embb_mtapi_id_pool_finalize(&id_pool_parallel); +} + +void IdPoolTest::TestBasic() { + TestAllocateDeallocateNElementsFromPool(id_pool, id_pool_size_1, true); +} + +void IdPoolTest::TestBasicPre() { + // create id pool with ID_POOL_SIZE_1 elements + embb_mtapi_id_pool_initialize(&id_pool, id_pool_size_1); +} + +void IdPoolTest::TestBasicPost() { + // finalize pool + embb_mtapi_id_pool_finalize(&id_pool); +} + +void IdPoolTest::TestAllocateDeallocateNElementsFromPool( + embb_mtapi_id_pool_t &pool, + int count_elements, + bool empty_check) { + std::vector allocated; + + for (int i = 0; i != count_elements; ++i) { + allocated.push_back(embb_mtapi_id_pool_allocate(&pool)); + } + + // the allocated elements should be disjunctive, and never invalid element + for (unsigned int x = 0; x != allocated.size(); ++x) { + PT_ASSERT(allocated[x] != EMBB_MTAPI_IDPOOL_INVALID_ID); + for (unsigned int y = 0; y != allocated.size(); ++y) { + if (x == y) { + continue; + } + PT_ASSERT(allocated[x] != allocated[y]); + } + } + + // now the id pool should be empty... try ten times to get an id, + // we should always get the invalid element + if (empty_check) { + for (int i = 0; i != 10; ++i) { + PT_ASSERT_EQ(embb_mtapi_id_pool_allocate(&pool), + static_cast(EMBB_MTAPI_IDPOOL_INVALID_ID) + ) + } + } + + // now return allocated elements in a shuffled manner. + ::std::random_shuffle(allocated.begin(), allocated.end()); + + for (int i = 0; i != count_elements; ++i) { + embb_mtapi_id_pool_deallocate(&pool, + allocated[static_cast(i)]); + } +} + diff --git a/mtapi_c/test/embb_mtapi_test_id_pool.h b/mtapi_c/test/embb_mtapi_test_id_pool.h new file mode 100644 index 0000000..a85a284 --- /dev/null +++ b/mtapi_c/test/embb_mtapi_test_id_pool.h @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2014-2015, Siemens AG. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef MTAPI_C_TEST_EMBB_MTAPI_TEST_ID_POOL_H_ +#define MTAPI_C_TEST_EMBB_MTAPI_TEST_ID_POOL_H_ + +#include +#include + +// for shuffling a vector +#include + +class IdPoolTest : public partest::TestCase { + public: + embb_mtapi_id_pool_t id_pool; + embb_mtapi_id_pool_t id_pool_parallel; + + IdPoolTest(); + + private: + static const unsigned int id_pool_size_1 = 100; + static const unsigned int concurrent_accessors_id_pool_2 = 10; + static const unsigned int id_elements_per_accessor = 10; + + /** + * We create a pool of size number_accessors*elements_per_accessor, so + * at each time we can guarantee each thread to be able to allocate + * elements_per_accessor elements. + * We create number_accessor threads, where each thread iteratively + * allocates and frees elements_per_accessor elements, which in each case + * has to be successful. Additionally, the sanity checks from the basic tests + * are repeated. The TestParallelPost function also repeats all + * sequential tests. + */ + void TestParallel(); + void TestParallelPre(); + void TestParallelPost(); + + /** + * Create a pool of size N. We repeatedly allocate and free N elements, check + * if the pool always returns disjunctive ids and check that the pool never + * returns the invalid element, if the pool is not empty. Check that the + * invalid element is returned if the pool is empty. + */ + void TestBasic(); + void TestBasicPre(); + void TestBasicPost(); + + static void TestAllocateDeallocateNElementsFromPool( + embb_mtapi_id_pool_t &pool, + int count_elements, + bool empty_check = false); +}; + +#endif // MTAPI_C_TEST_EMBB_MTAPI_TEST_ID_POOL_H_ diff --git a/mtapi_c/test/main.cc b/mtapi_c/test/main.cc index a92b25d..85b2e57 100644 --- a/mtapi_c/test/main.cc +++ b/mtapi_c/test/main.cc @@ -37,6 +37,9 @@ #include #include #include +#include + +#include PT_MAIN("MTAPI C") { embb_log_set_log_level(EMBB_LOG_LEVEL_NONE); @@ -48,4 +51,7 @@ PT_MAIN("MTAPI C") { PT_RUN(InitFinalizeTest); PT_RUN(GroupTest); PT_RUN(QueueTest); + PT_RUN(IdPoolTest); + + PT_EXPECT(embb_get_bytes_allocated() == 0); } diff --git a/mtapi_cpp/CMakeLists.txt b/mtapi_cpp/CMakeLists.txt index 3652d63..3a343f2 100644 --- a/mtapi_cpp/CMakeLists.txt +++ b/mtapi_cpp/CMakeLists.txt @@ -5,14 +5,10 @@ file(GLOB_RECURSE EMBB_MTAPI_CPP_HEADERS "include/*.h") file(GLOB_RECURSE EMBB_MTAPI_CPP_TEST_SOURCES "test/*.cc" "test/*.h") if (USE_AUTOMATIC_INITIALIZATION STREQUAL ON) - message("-- Automatic initialization enabled (default)") set(MTAPI_CPP_AUTOMATIC_INITIALIZE 1) else() set(MTAPI_CPP_AUTOMATIC_INITIALIZE 0) - message("-- Automatic initialization disabled") endif() -message(" (set with command line option -DUSE_AUTOMATIC_INITIALIZATION=ON/OFF)") - # Execute the GroupSources macro include(${CMAKE_SOURCE_DIR}/CMakeCommon/GroupSourcesMSVC.cmake) diff --git a/tasks_cpp/CMakeLists.txt b/tasks_cpp/CMakeLists.txt index 31effbf..397be86 100644 --- a/tasks_cpp/CMakeLists.txt +++ b/tasks_cpp/CMakeLists.txt @@ -5,13 +5,10 @@ file(GLOB_RECURSE EMBB_TASKS_CPP_HEADERS "include/*.h") file(GLOB_RECURSE EMBB_TASKS_CPP_TEST_SOURCES "test/*.cc" "test/*.h") if (USE_AUTOMATIC_INITIALIZATION STREQUAL ON) - message("-- Automatic initialization enabled (default)") set(TASKS_CPP_AUTOMATIC_INITIALIZE 1) else() set(TASKS_CPP_AUTOMATIC_INITIALIZE 0) - message("-- Automatic initialization disabled") endif() -message(" (set with command line option -DUSE_AUTOMATIC_INITIALIZATION=ON/OFF)") configure_file("include/embb/tasks/internal/cmake_config.h.in" "include/embb/tasks/internal/cmake_config.h") diff --git a/tasks_cpp/test/tasks_cpp_test_task.cc b/tasks_cpp/test/tasks_cpp_test_task.cc index 103098d..1c51631 100644 --- a/tasks_cpp/test/tasks_cpp_test_task.cc +++ b/tasks_cpp/test/tasks_cpp_test_task.cc @@ -78,13 +78,19 @@ void TaskTest::TestBasic() { PT_EXPECT_EQ(policy.GetPriority(), 0u); policy.AddWorker(0u); PT_EXPECT_EQ(policy.GetAffinity(), 1u); - policy.AddWorker(1u); - PT_EXPECT_EQ(policy.GetAffinity(), 3u); + + if (policy.GetCoreCount() > 1) { + policy.AddWorker(1u); + PT_EXPECT_EQ(policy.GetAffinity(), 3u); + } + policy.RemoveWorker(0u); - PT_EXPECT_EQ(policy.GetAffinity(), 2u); PT_EXPECT_EQ(policy.IsSetWorker(0), false); - PT_EXPECT_EQ(policy.IsSetWorker(1), true); + if (policy.GetCoreCount() > 1) { + PT_EXPECT_EQ(policy.GetAffinity(), 2u); + PT_EXPECT_EQ(policy.IsSetWorker(1), true); + } std::string test; embb::tasks::Task task = node.Spawn( embb::base::Bind(